From de8d587838d4fdc0477d31584172a95bcaae2b30 Mon Sep 17 00:00:00 2001 From: Florent Pirou Date: Fri, 16 Jan 2026 23:54:54 -0700 Subject: [PATCH 01/14] ipu-acpi: initial ipu-acpi PDATA PTL_Beta_IoT Signed-off-by: Florent Pirou --- Makefile | 20 +- dkms.conf | 45 +- drivers/media/platform/intel/Kconfig | 19 + drivers/media/platform/intel/Makefile | 17 + .../media/platform/intel/ipu-acpi-common.c | 448 +++++++++++++ drivers/media/platform/intel/ipu-acpi-pdata.c | 594 ++++++++++++++++++ drivers/media/platform/intel/ipu-acpi.c | 224 +++++++ include/media/ipu-acpi-pdata.h | 120 ++++ include/media/ipu-acpi.h | 186 ++++++ 9 files changed, 1660 insertions(+), 13 deletions(-) create mode 100644 drivers/media/platform/intel/Kconfig create mode 100644 drivers/media/platform/intel/Makefile create mode 100644 drivers/media/platform/intel/ipu-acpi-common.c create mode 100644 drivers/media/platform/intel/ipu-acpi-pdata.c create mode 100644 drivers/media/platform/intel/ipu-acpi.c create mode 100644 include/media/ipu-acpi-pdata.h create mode 100644 include/media/ipu-acpi.h diff --git a/Makefile b/Makefile index 5609587..e65edb1 100644 --- a/Makefile +++ b/Makefile @@ -3,10 +3,21 @@ KERNELRELEASE ?= $(shell uname -r) KERNEL_SRC ?= /lib/modules/$(KERNELRELEASE)/build +KERNEL_VERSION := $(shell echo $(KERNELRELEASE) | sed 's/[^0-9.]*\([0-9.]*\).*/\1/') +BUILD_EXCLUSIVE_KERNEL="^(6\.(1[278])\.)" + MODSRC := $(shell pwd) -export EXTERNAL_BUILD = 1 +subdir-ccflags-y += -DDRIVER_VERSION_SUFFIX=\"${DRIVER_VERSION_SUFFIX}\" + +# Define config macros for conditional compilation in ipu6-acpi.c +# IS_ENABLED() checks for CONFIG_XXX or CONFIG_XXX_MODULE +subdir-ccflags-y += -DCONFIG_VIDEO_ISX031_MODULE=1 +subdir-ccflags-y += -DCONFIG_VIDEO_AR0820_MODULE=1 +subdir-ccflags-y += -DCONFIG_VIDEO_AR0234_MODULE=1 +subdir-ccflags-y += -DCONFIG_IPU_BRIDGE_MODULE=1 +export EXTERNAL_BUILD = 1 export CONFIG_IPU_BRIDGE=m export CONFIG_VIDEO_AR0820=m export CONFIG_VIDEO_AR0234=m @@ -15,6 +26,13 @@ export CONFIG_VIDEO_ISX031=m obj-m += drivers/media/pci/intel/ obj-m += drivers/media/i2c/ +export CONFIG_INTEL_IPU_ACPI = m +obj-y += drivers/media/platform/intel/ + +subdir-ccflags-$(CONFIG_INTEL_IPU_ACPI) += \ + -DCONFIG_INTEL_IPU_ACPI + +subdir-ccflags-y += $(subdir-ccflags-m) subdir-ccflags-y += -iquote $(src)/include/ -I$(src)/include/ diff --git a/dkms.conf b/dkms.conf index a9ec111..347c190 100644 --- a/dkms.conf +++ b/dkms.conf @@ -8,18 +8,39 @@ AUTOINSTALL="yes" MODULE_PATH="drivers/media/i2c" MODULE_DEST="/kernel/drivers/media/i2c/" -BUILT_MODULE_NAME[0]="isx031" -BUILT_MODULE_LOCATION[0]="$MODULE_PATH" -DEST_MODULE_LOCATION[0]="$MODULE_DEST" +i=0 -BUILT_MODULE_NAME[1]="ar0234" -BUILT_MODULE_LOCATION[1]="$MODULE_PATH" -DEST_MODULE_LOCATION[1]="$MODULE_DEST" +BUILT_MODULE_NAME[$i]="ipu-acpi" +BUILT_MODULE_LOCATION[$i]="drivers/media/platform/intel" +DEST_MODULE_LOCATION[$i]="/updates" +STRIP[$i]=no -BUILT_MODULE_NAME[2]="ar0820" -BUILT_MODULE_LOCATION[2]="$MODULE_PATH" -DEST_MODULE_LOCATION[2]="$MODULE_DEST" +BUILT_MODULE_NAME[$((++i))]="ipu-acpi-pdata" +BUILT_MODULE_LOCATION[$i]="drivers/media/platform/intel" +DEST_MODULE_LOCATION[$i]="/updates" +STRIP[$i]=no -BUILT_MODULE_NAME[3]="ipu-bridge" -BUILT_MODULE_LOCATION[3]="drivers/media/pci/intel" -DEST_MODULE_LOCATION[3]="/kernel/drivers/media/pci/intel/" \ No newline at end of file +BUILT_MODULE_NAME[$((++i))]="ipu-acpi-common" +BUILT_MODULE_LOCATION[$i]="drivers/media/platform/intel" +DEST_MODULE_LOCATION[$i]="/updates" +STRIP[$i]=no + +BUILT_MODULE_NAME[$((++i))]="ar0234" +BUILT_MODULE_LOCATION[$i]="drivers/media/i2c" +DEST_MODULE_LOCATION[$i]="/updates" +STRIP[$i]=no + +BUILT_MODULE_NAME[$((++i))]="ar0820" +BUILT_MODULE_LOCATION[$i]="drivers/media/i2c" +DEST_MODULE_LOCATION[$i]="/updates" +STRIP[$i]=no + +BUILT_MODULE_NAME[$((++i))]="ipu-bridge" +BUILT_MODULE_LOCATION[$i]="drivers/media/pci/intel" +DEST_MODULE_LOCATION[$i]="/updates" +STRIP[$i]=no + +BUILT_MODULE_NAME[$((++i))]="isx031" +BUILT_MODULE_LOCATION[$i]="drivers/media/i2c" +DEST_MODULE_LOCATION[$i]="/updates" +STRIP[$i]=no diff --git a/drivers/media/platform/intel/Kconfig b/drivers/media/platform/intel/Kconfig new file mode 100644 index 0000000..f8affa6 --- /dev/null +++ b/drivers/media/platform/intel/Kconfig @@ -0,0 +1,19 @@ +config INTEL_IPU7_FPGA_PDATA + bool "Enable built in platform data for ipu7 fpga" + depends on VIDEO_INTEL_IPU7 + help + Pre-ACPI system platform data is compiled inside kernel. + + This platform data is only used for PSS stage software + development and mainly used for pixter or sensor enablement + without ACPI support. + +config INTEL_IPU7_ACPI + tristate "Enable IPU ACPI driver" + default VIDEO_INTEL_IPU7 + depends on I2C + depends on ACPI + help + Driver to read ACPI data from BIOS + + This driver is used to read ACPI data from BIOS diff --git a/drivers/media/platform/intel/Makefile b/drivers/media/platform/intel/Makefile new file mode 100644 index 0000000..c898c5e --- /dev/null +++ b/drivers/media/platform/intel/Makefile @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2010 - 2026 Intel Corporation. + +is_kernel_lt_6_10 = $(shell if [ $$(printf "6.10\n$(KERNELVERSION)" | sort -V | head -n1) != "6.10" ]; then echo 1; fi) +ifeq ($(is_kernel_lt_6_10), 1) +ifneq ($(EXTERNAL_BUILD), 1) +src := $(srctree)/$(src) +endif +endif + +ccflags-y += -I$(src)/../../../../ipu7-drivers/drivers/media/pci/intel/ipu7 + +ifneq ($(filter y m, $(CONFIG_INTEL_IPU7_ACPI)),) +obj-$(CONFIG_INTEL_IPU7_ACPI) += ipu-acpi.o \ + ipu-acpi-pdata.o \ + ipu-acpi-common.o +endif diff --git a/drivers/media/platform/intel/ipu-acpi-common.c b/drivers/media/platform/intel/ipu-acpi-common.c new file mode 100644 index 0000000..936c158 --- /dev/null +++ b/drivers/media/platform/intel/ipu-acpi-common.c @@ -0,0 +1,448 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2016-2024 Intel Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include + +static int get_integer_dsdt_data(struct device *dev, const u8 *dsdt, + int func, u64 *out) +{ + struct acpi_handle *dev_handle = ACPI_HANDLE(dev); + union acpi_object *obj; + + obj = acpi_evaluate_dsm(dev_handle, (void *)dsdt, 0, func, NULL); + if (!obj) { + pr_err("IPU ACPI: Could not find corresponding DSM\n"); + return -ENODEV; + } + + if (obj->type != ACPI_TYPE_INTEGER) { + ACPI_FREE(obj); + return -ENODEV; + } + *out = obj->integer.value; + ACPI_FREE(obj); + return 0; +} + +static int read_acpi_block(struct device *dev, char *id, void *data, u32 size) +{ + union acpi_object *obj; + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + struct acpi_handle *dev_handle = ACPI_HANDLE(dev); + int status; + u32 buffer_length; + + status = acpi_evaluate_object(dev_handle, id, NULL, &buffer); + if (!ACPI_SUCCESS(status)) { + pr_err("IPU ACPI: Could not find ACPI block with err %d", status); + return -ENODEV; + } + + obj = (union acpi_object *)buffer.pointer; + if (!obj || obj->type != ACPI_TYPE_BUFFER) { + pr_err("IPU ACPI: Could not read ACPI buffer\n"); + status = -ENODEV; + goto err; + } + + if (obj->buffer.length > size) { + pr_err("IPU ACPI: Given buffer is too small\n"); + status = -ENODEV; + goto err; + } + + memcpy(data, obj->buffer.pointer, min(size, obj->buffer.length)); + buffer_length = obj->buffer.length; + kfree(buffer.pointer); + + return buffer_length; +err: + kfree(buffer.pointer); + return status; +} + +static int ipu_acpi_get_gpio_data(struct device *dev, struct ipu_gpio_info *gpio, int size, + u64 *gpio_num) +{ + const u8 dsdt_cam_gpio[] = { + 0x40, 0x46, 0x23, 0x79, 0x10, 0x9e, 0xea, 0x4f, + 0xa5, 0xc1, 0xB5, 0xaa, 0x8b, 0x19, 0x75, 0x6f }; + + int i = 0, j = 0, retries = 0, loop = 0; + u64 num_gpio; + int rval; + + rval = get_integer_dsdt_data(dev, dsdt_cam_gpio, 1, &num_gpio); + + if (rval < 0) { + pr_err("IPU ACPI: Failed to get number of GPIO pins\n"); + return rval; + } + + pr_info("IPU APCI: Num of gpio found = %lld", num_gpio); + + if (num_gpio == 0) { + *gpio_num = num_gpio; + return rval; + } + + if (num_gpio > size) { + pr_err("IPU ACPI: Incorrect number of GPIO pins\n"); + return rval; + } + + /* repeat until all gpio pin is saved */ + while (i < num_gpio && loop <= LOOP_SIZE) { + u64 data; + struct gpio_desc *desc = NULL; + + rval = get_integer_dsdt_data(dev, dsdt_cam_gpio, i + 2, &data); + + if (rval < 0) { + pr_err("IPU ACPI: Failed to get GPIO data\n"); + return -ENODEV; + } + + gpio[i].func = (data & 0xff); + gpio[i].valid = FALSE; + + desc = gpiod_get_index(dev, NULL, i + retries, GPIOD_ASIS); + + if (!IS_ERR(desc)) { + unsigned short pin = desc_to_gpio(desc); + bool save = TRUE; + + /* always save first GPIO pin */ + if (i == 0) + save = TRUE; + + /* check subsequent GPIO pin for replicate */ + else { + for (j = 0; j <= i; j++) { + /* retry if same as previous pin */ + if (gpio[j].pin == pin) { + retries++; + save = FALSE; + gpiod_put(desc); + break; + } + } + } + + /* save into array */ + if (save == TRUE) { + pr_info("IPU ACPI: DSM: Pin number = %d. Func = %x.", pin, gpio[i].func); + gpio[i].pin = pin; + gpio[i].valid = TRUE; + gpiod_put(desc); + i++; + retries = 0; + } + } + loop++; + } + *gpio_num = num_gpio; + + return rval; +} + +// Callback to parse I2C resources from _CRS +static acpi_status parse_i2c_resource(struct acpi_resource *res, void *data) +{ + char **controller_path = data; + struct acpi_resource_i2c_serialbus *i2c; + + if (res->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) + return AE_OK; + + i2c = &res->data.i2c_serial_bus; + if (i2c->type != ACPI_RESOURCE_SERIAL_TYPE_I2C) + return AE_OK; + + *controller_path = kstrdup(i2c->resource_source.string_ptr, GFP_KERNEL); + return AE_CTRL_TERMINATE; +} + +// Get I2C bdf via _CRS +static int ipu_get_i2c_bdf_crs(struct device *dev, struct ipu_i2c_info *info) +{ + acpi_handle handle = ACPI_HANDLE(dev); + acpi_status status; + struct acpi_device *controller_adev; + struct pci_dev *pci_dev; + char *controller_path = NULL; + + if (!handle) { + dev_err(dev, "No ACPI handle\n"); + return -EINVAL; + } + + // Parse _CRS for I2C resource + status = acpi_walk_resources(handle, "_CRS", parse_i2c_resource, &controller_path); + if (ACPI_FAILURE(status) || !controller_path) { + dev_err(dev, "Failed to parse _CRS: %s\n", acpi_format_exception(status)); + return -EIO; + } + + // Get the I2C controller's ACPI device + status = acpi_get_handle(NULL, controller_path, &handle); + if (ACPI_FAILURE(status)) { + dev_err(dev, "Invalid controller path: %s\n", controller_path); + kfree(controller_path); + controller_path = NULL; + return -ENODEV; + } + + controller_adev = acpi_fetch_acpi_dev(handle); + if (!controller_adev) { + dev_err(dev, "No ACPI device for controller: %s\n", controller_path); + kfree(controller_path); + return -ENODEV; + } + + // Map to PCI device + pci_dev = acpi_get_pci_dev(handle); + if (!pci_dev) { + dev_err(dev, "No PCI device for controller: %s\n", controller_path); + kfree(controller_path); + return -ENODEV; + } + + // Extract bdf using pci_domain_nr() + snprintf(info->bdf, sizeof(info->bdf), "%04x:%02x:%02x.%x", + pci_domain_nr(pci_dev->bus), pci_dev->bus->number, + PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn)); + pci_dev_put(pci_dev); + + return 0; +} + +static int ipu_acpi_get_i2c_info(struct device *dev, struct ipu_i2c_info *i2c, int size, u64 *num) +{ + const u8 dsdt_cam_i2c[] = { + 0x49, 0x75, 0x25, 0x26, 0x71, 0x92, 0xA4, 0x4C, + 0xBB, 0x43, 0xC4, 0x89, 0x9D, 0x5A, 0x48, 0x81}; + + u64 num_i2c; + int i; + int rval; + + rval = get_integer_dsdt_data(dev, dsdt_cam_i2c, 1, &num_i2c); + + if (rval < 0) { + pr_err("IPU ACPI: Failed to get number of I2C\n"); + return -ENODEV; + } + + for (i = 0; i < num_i2c && i < size; i++) { + u64 data; + + rval = get_integer_dsdt_data(dev, dsdt_cam_i2c, i + 2, + &data); + + if (rval < 0) { + pr_err("IPU ACPI: Failed to get I2C data\n"); + return -ENODEV; + } + + i2c[i].bus = ((data >> 24) & 0xff); + i2c[i].addr = (data >> 8) & 0xff; + rval = ipu_get_i2c_bdf_crs(dev, i2c + i); + if (rval < 0) { + pr_err("IPU ACPI: Failed to get i2c bdf\n"); + return -ENODEV; + } + pr_info("IPU ACPI: DSM: i2c bus %d addr %x bdf: %s\n", + i2c[i].bus, i2c[i].addr, i2c[i].bdf); + } + + *num = num_i2c; + + return 0; +} + +static int match_depend(struct device *dev, const void *data) +{ + return (dev && dev->fwnode == data) ? 1 : 0; +} + +int ipu_acpi_get_control_logic_data(struct device *dev, + struct control_logic_data **ctl_data) +{ + /* CLDB data */ + struct control_logic_data_packed ctl_logic_data; + int ret; + + ret = read_acpi_block(dev, "CLDB", &ctl_logic_data, + sizeof(ctl_logic_data)); + + if (ret < 0) { + pr_err("IPU ACPI: Fail to read from CLDB"); + return ret; + } + + (*ctl_data)->type = ctl_logic_data.controllogictype; + (*ctl_data)->id = ctl_logic_data.controllogicid; + (*ctl_data)->sku = ctl_logic_data.sensorcardsku; + + pr_info("IPU ACPI: CLDB: version %d clid %d cltype %d sku %d", + ctl_logic_data.version, + ctl_logic_data.controllogicid, + ctl_logic_data.controllogictype, + ctl_logic_data.sensorcardsku); + + /* GPIO data */ + ret = ipu_acpi_get_gpio_data(dev, (*ctl_data)->gpio, ARRAY_SIZE((*ctl_data)->gpio), + &((*ctl_data)->gpio_num)); + + if (ret < 0) { + dev_err(dev, "Failed to get GPIO data"); + return ret; + } + return 0; +} + +int ipu_acpi_get_dep_data(struct device *dev, + struct control_logic_data *ctl_data) +{ + struct acpi_handle *dev_handle = ACPI_HANDLE(dev); + struct acpi_handle_list dep_devices; + acpi_status status; + int i; + int rval; + + ctl_data->completed = false; + + if (!acpi_has_method(dev_handle, "_DEP")) { + pr_err("IPU ACPI: %s does not have _DEP method", dev_name(dev)); + return 0; + } + +#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 9, 0) + status = acpi_evaluate_reference(dev_handle, "_DEP", NULL, + &dep_devices); + + if (ACPI_FAILURE(status)) { +#else + if (!acpi_evaluate_reference(dev_handle, "_DEP", NULL, &dep_devices)) { +#endif + pr_err("IPU ACPI: %s failed to evaluate _DEP", dev_name(dev)); + return -ENODEV; + } + + for (i = 0; i < dep_devices.count; i++) { + struct acpi_device *device; + struct acpi_device_info *info; + struct device *p_dev; + int match; + + status = acpi_get_object_info(dep_devices.handles[i], &info); + if (ACPI_FAILURE(status)) { + pr_err("IPU ACPI: %s error reading _DEP device info", dev_name(dev)); + continue; + } + + match = info->valid & ACPI_VALID_HID && + !strcmp(info->hardware_id.string, "INT3472"); + + kfree(info); + + if (!match) + continue; + + /* Process device IN3472 created by acpi */ + device = acpi_fetch_acpi_dev(dep_devices.handles[i]); + if (!device) { + pr_err("IPU ACPI: Failed to get ACPI device"); + return -ENODEV; + } + + pr_info("IPU ACPI: Dependent ACPI device found: %s\n", + dev_name(&device->dev)); + + p_dev = bus_find_device(&platform_bus_type, NULL, + &device->fwnode, match_depend); + + if (p_dev) { + pr_info("IPU ACPI: Dependent platform device found %s\n", + dev_name(p_dev)); + + /* obtain Control Logic Data from BIOS */ + rval = ipu_acpi_get_control_logic_data(p_dev, &ctl_data); + + if (rval) { + pr_err("IPU ACPI: Error getting Control Logic Data"); + return rval; + } + + ctl_data->completed = true; + } else + pr_err("IPU ACPI: Dependent platform device not found for %s\n", dev_name(dev)); + } + + if (!ctl_data->completed) { + ctl_data->type = CL_EMPTY; + pr_err("IPU APCI: No control logic data available"); + } + + return 0; +} +EXPORT_SYMBOL(ipu_acpi_get_dep_data); + +int ipu_acpi_get_cam_data(struct device *dev, + struct sensor_bios_data *sensor) +{ + /* SSDB */ + struct sensor_bios_data_packed sensor_data; + int ret; + + ret = read_acpi_block(dev, "SSDB", &sensor_data, + sizeof(sensor_data)); + + if (ret < 0) { + pr_err("IPU ACPI: Fail to read from SSDB with err %d", ret); + return ret; + } + + /* Xshutdown is not part of the ssdb data */ + sensor->link = sensor_data.link; + sensor->lanes = sensor_data.lanes; + sensor->pprval = sensor_data.pprval; + sensor->pprunit = sensor_data.pprunit; + sensor->bus_type = sensor_data.phyconfig; + sensor->degree = sensor_data.degree; + + pr_info("IPU ACPI: SSDB: name %s. link %d. lanes %d. pprval %d. pprunit %x. degree %d", + dev_name(dev), sensor->link, sensor->lanes, sensor->pprval, sensor->pprunit, + sensor->degree); + + /* I2C */ + ret = ipu_acpi_get_i2c_info(dev, sensor->i2c, ARRAY_SIZE(sensor->i2c), &sensor->i2c_num); + + if (ret < 0) { + pr_err("IPU ACPI: Failed to get I2C info"); + return ret; + } + + return 0; +} +EXPORT_SYMBOL(ipu_acpi_get_cam_data); + +MODULE_AUTHOR("Samu Onkalo "); +MODULE_AUTHOR("Khai Wen Ng "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("IPU ACPI support"); diff --git a/drivers/media/platform/intel/ipu-acpi-pdata.c b/drivers/media/platform/intel/ipu-acpi-pdata.c new file mode 100644 index 0000000..0a84dcc --- /dev/null +++ b/drivers/media/platform/intel/ipu-acpi-pdata.c @@ -0,0 +1,594 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2016-2025 Intel Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include + +#define MIN_SENSOR_I2C 1 +#define MIN_SERDES_I2C 3 +#define SUFFIX_BASE 97 + +struct ipu7_isys_subdev_pdata acpi_subdev_pdata = { + .subdevs = (struct ipu7_isys_subdev_info *[]) { + NULL, + } +}; + +struct serdes_local serdes_info; + +/* + * The dev_id was hard code in platform data, as i2c bus number + * may change dynamiclly, we need to update this bus id + * accordingly. + * + * @adapter_id: hardware i2c adapter id, this was fixed in platform data + * return: i2c bus id registered in system + */ +static int get_i2c_bus_id(int adapter_id, char *adapter_bdf, int bdf_len) +{ + struct i2c_adapter *adapter; + char name[32]; + int i = 0; + + if (adapter_bdf) { + while ((adapter = i2c_get_adapter(i)) != NULL) { + struct device *parent = adapter->dev.parent; + struct device *pp = parent->parent; + + if (pp && !strncmp(adapter_bdf, dev_name(pp), bdf_len)) + return i; + i++; + } + } + + i = 0; + snprintf(name, sizeof(name), "i2c_designware.%d", adapter_id); + while ((adapter = i2c_get_adapter(i)) != NULL) { + struct device *parent = adapter->dev.parent; + + if (parent && !strncmp(name, dev_name(parent), sizeof(name))) + return i; + i++; + } + + /* Not found, should never happen! */ + WARN_ON_ONCE(1); + return -1; +} + +/* + * update i2c bus here to avoid deadlock between i2c_for_each_dev + * and i2c_get_adapter + */ +static void update_i2c_bus_id(void) +{ + struct ipu7_isys_subdev_info **subdevs = acpi_subdev_pdata.subdevs; + for (int i = 0; subdevs[i] != NULL; i++) { + subdevs[i]->i2c.i2c_adapter_id = + get_i2c_bus_id(subdevs[i]->i2c.i2c_adapter_id, + subdevs[i]->i2c.i2c_adapter_bdf, + sizeof(subdevs[i]->i2c.i2c_adapter_bdf)); + } +} + +struct ipu7_isys_subdev_pdata *get_acpi_subdev_pdata(void) +{ + struct ipu7_isys_subdev_pdata *ptr; + + update_i2c_bus_id(); + ptr = &acpi_subdev_pdata; + return ptr; +} +EXPORT_SYMBOL(get_acpi_subdev_pdata); + +static void print_serdes_sdinfo(struct serdes_subdev_info *sdinfo) +{ + int i; + struct serdes_module_pdata *sd_mpdata = sdinfo->board_info.platform_data; + + if (!sd_mpdata) { + pr_err("Empty serdes module pdata"); + return; + } + + pr_debug("\t\trx_port \t\t= %d", sdinfo->rx_port); + pr_debug("\t\tphy_i2c_addr \t\t= 0x%x", sdinfo->phy_i2c_addr); + pr_debug("\t\tser_alias \t\t= 0x%x", sdinfo->ser_alias); + pr_debug("\t\tser_phys_addr \t\t= 0x%x", sdinfo->ser_phys_addr); + pr_debug("\t\tsuffix \t\t\t= %s", sdinfo->suffix); + pr_debug("\t\tboard_info.type \t= %s", sdinfo->board_info.type); + pr_debug("\t\tboard_info.addr \t= 0x%x", sdinfo->board_info.addr); + + pr_debug("serdes board_info.platform_data"); + pr_debug("\t\tlanes \t\t\t= %d", sd_mpdata->lanes); + pr_debug("\t\tmodule_name \t\t= %s", sd_mpdata->module_name); + pr_debug("\t\tfsin \t\t\t= %d", sd_mpdata->fsin); + + if (serdes_info.gpio_powerup_seq > 0) + for (i = 0; i < serdes_info.gpio_powerup_seq; i++) + pr_debug("\t\t gpio_powerup_seq[%d] \t= %d", i, + (int)sd_mpdata->gpio_powerup_seq[i]); +} + +static void print_serdes_subdev(struct ipu7_isys_subdev_info *sd) +{ + struct serdes_platform_data *sd_pdata = sd->i2c.board_info.platform_data; + int i; + struct serdes_subdev_info *sd_sdinfo; + struct serdes_module_pdata *sd_mpdata; + + if (!sd_pdata) { + pr_err("Empty serdes subdev pdata"); + return; + } + + pr_debug("IPU ACPI: %s", __func__); + pr_debug("sd_csi2"); + pr_debug("\t\tnlanes \t\t\t= %d", sd->csi2->nlanes); + pr_debug("\t\tport \t\t\t= %d", sd->csi2->port); + pr_debug("\t\ttype \t\t\t= %d", sd->csi2->bus_type); + + pr_debug("sd->i2c"); + pr_debug("\t\ti2c_adapter_bdf \t= %s", sd->i2c.i2c_adapter_bdf); + pr_debug("\t\tboard_info.type \t= %s", sd->i2c.board_info.type); + pr_debug("\t\tboard_info.addr \t= 0x%x", sd->i2c.board_info.addr); + + pr_debug("sd->i2c.board_info.platform_data"); + pr_debug("\t\treset_gpio \t\t= %d", sd_pdata->reset_gpio); + pr_debug("\t\tFPD_gpio \t\t= %d", sd_pdata->FPD_gpio); + pr_debug("\t\tsuffix \t\t\t= %c", sd_pdata->suffix); + + pr_debug("\t\tlink_freq_mbps \t\t= %d", sd_pdata->link_freq_mbps); + pr_debug("\t\tdeser_nlanes \t\t= %d", sd_pdata->deser_nlanes); + pr_debug("\t\tser_nlanes \t\t= %d", sd_pdata->ser_nlanes); + + for (i = 0; i < serdes_info.rx_port; i++) { + sd_sdinfo = &sd_pdata->subdev_info[i]; + sd_mpdata = sd_sdinfo->board_info.platform_data; + + if (!sd_mpdata) + continue; + + pr_debug("serdes subdev_info[%d]", i); + print_serdes_sdinfo(sd_sdinfo); + } + +} + +static void print_subdev(struct ipu7_isys_subdev_info *sd) +{ + struct sensor_platform_data *spdata = sd->i2c.board_info.platform_data; + int i; + + if (!spdata) { + pr_err("IPU ACPI: Empty sensor subdev"); + return; + } + + pr_debug("IPU ACPI: %s", __func__); + pr_debug("sd->csi2"); + pr_debug("\t\tnlanes \t\t\t= %d", sd->csi2->nlanes); + pr_debug("\t\tport \t\t\t= %d", sd->csi2->port); + pr_debug("\t\ttype \t\t\t= %d", sd->csi2->bus_type); + + pr_debug("sd->i2c"); + pr_debug("\t\ti2c_adapter_bdf \t= %s", sd->i2c.i2c_adapter_bdf); + pr_debug("\t\tboard_info.type \t= %s", sd->i2c.board_info.type); + pr_debug("\t\tboard_info.addr \t= 0x%x", sd->i2c.board_info.addr); + + pr_debug("sd->i2c.platform_data"); + pr_debug("\t\tport \t\t\t= %d", spdata->port); + pr_debug("\t\tlanes \t\t\t= %d", spdata->lanes); + pr_debug("\t\ti2c_slave_address \t= 0x%x", spdata->i2c_slave_address); + pr_debug("\t\tirq_pin \t\t= %d", spdata->irq_pin); + pr_debug("\t\tirq_pin_name \t\t= %s", spdata->irq_pin_name); + pr_debug("\t\tsuffix \t\t\t= %c", spdata->suffix); + pr_debug("\t\treset_pin \t\t= %d", spdata->reset_pin); + pr_debug("\t\tdetect_pin \t\t= %d", spdata->detect_pin); + + for (i = 0; i < IPU7_SPDATA_GPIO_NUM; i++) + pr_debug("\t\tgpios[%d] \t\t= %d", i, spdata->gpios[i]); +} + +static void set_common_gpio(struct control_logic_data *ctl_data, + struct sensor_platform_data **pdata) +{ + int i; + + /* TODO: consider remove specific naming such as irq_pin, and use gpios[] */ + (*pdata)->irq_pin = -1; + (*pdata)->reset_pin = -1; + (*pdata)->detect_pin = -1; + + (*pdata)->gpios[0] = -1; + (*pdata)->gpios[1] = 0; + (*pdata)->gpios[2] = 0; + (*pdata)->gpios[3] = 0; + + /* all sensors should have RESET GPIO */ + if (ctl_data->completed && ctl_data->gpio_num > 0) + for (i = 0; i < ctl_data->gpio_num; i++) + if (ctl_data->gpio[i].func != GPIO_RESET) + dev_err(ctl_data->dev, + "IPU ACPI: Invalid GPIO func: %d\n", + ctl_data->gpio[i].func); +} + +static int set_csi2(struct ipu7_isys_subdev_info **sensor_sd, + unsigned int lanes, unsigned int port, + unsigned int bus_type) +{ + struct ipu7_isys_csi2_config *csi2_config; + + csi2_config = kzalloc(sizeof(*csi2_config), GFP_KERNEL); + if (!csi2_config) + return -ENOMEM; + + csi2_config->nlanes = lanes; + csi2_config->port = port; + if (bus_type == PHY_MODE_DPHY) + csi2_config->bus_type = V4L2_MBUS_CSI2_DPHY; + else if (bus_type == PHY_MODE_CPHY) + csi2_config->bus_type = V4L2_MBUS_CSI2_CPHY; + else + csi2_config->bus_type = V4L2_MBUS_UNKNOWN; + + (*sensor_sd)->csi2 = csi2_config; + + return 0; +} + +static void set_i2c(struct ipu7_isys_subdev_info **sensor_sd, + struct device *dev, + const char *sensor_name, + unsigned int addr, + const char *i2c_adapter_bdf) +{ + dev_info(dev, "IPU ACPI: kernel I2C BDF: %s", i2c_adapter_bdf); + (*sensor_sd)->i2c.board_info.addr = addr; + strscpy((*sensor_sd)->i2c.board_info.type, sensor_name, I2C_NAME_SIZE); + strscpy((*sensor_sd)->i2c.i2c_adapter_bdf, i2c_adapter_bdf, + sizeof((*sensor_sd)->i2c.i2c_adapter_bdf)); +} + +static void set_serdes_sd_pdata(struct serdes_module_pdata **module_pdata, + const char *sensor_name, const char *hid_name, + unsigned int lanes) +{ + /* general */ + (*module_pdata)->lanes = lanes; + strscpy((*module_pdata)->module_name, sensor_name, I2C_NAME_SIZE); +} + +#define PORT_NR 8 + +static int set_serdes_subdev(struct ipu7_isys_subdev_info **serdes_sd, + struct device *dev, + struct serdes_platform_data **pdata, + const char *sensor_name, + const char *hid_name, + unsigned int lanes, + unsigned int addr, + unsigned int subdev_num) +{ + int i; + struct serdes_module_pdata *module_pdata[PORT_NR]; + struct serdes_subdev_info *serdes_sdinfo; + size_t subdev_size = subdev_num * sizeof(*serdes_sdinfo); + unsigned int port = (*pdata)->suffix - SUFFIX_BASE; + + serdes_sdinfo = kzalloc(subdev_size, GFP_KERNEL); + if (!serdes_sdinfo) + return -ENOMEM; + + for (i = 0; i < subdev_num; i++) { + module_pdata[i] = kzalloc(sizeof(*module_pdata[i]), GFP_KERNEL); + if (!module_pdata[i]) { + kfree(serdes_sdinfo); + return -ENOMEM; + } + + set_serdes_sd_pdata(&module_pdata[i], sensor_name, hid_name, lanes); + + /* board info */ + strscpy(serdes_sdinfo[i].board_info.type, sensor_name, I2C_NAME_SIZE); + serdes_sdinfo[i].board_info.addr = serdes_info.sensor_map_addr + i; + serdes_sdinfo[i].board_info.platform_data = module_pdata[i]; + + /* serdes_subdev_info */ + serdes_sdinfo[i].rx_port = i; + serdes_sdinfo[i].ser_alias = serdes_info.ser_map_addr + i; + + serdes_sdinfo[i].phy_i2c_addr = serdes_info.phy_i2c_addr; + snprintf(serdes_sdinfo[i].suffix, sizeof(serdes_sdinfo[i].suffix), "%c-%d", + SUFFIX_BASE + i, port); +#if IS_ENABLED(CONFIG_VIDEO_ISX031) + serdes_sdinfo[i].ser_phys_addr = 0x40; + serdes_sdinfo[i].sensor_dt = 0x1e; +#endif + } + + (*pdata)->subdev_info = serdes_sdinfo; + (*pdata)->subdev_num = subdev_num; + + return 0; +} + +static int set_pdata(struct ipu7_isys_subdev_info **sensor_sd, + struct device *dev, + const char *sensor_name, + const char *hid_name, + struct control_logic_data *ctl_data, + unsigned int port, + unsigned int lanes, + unsigned int addr, + unsigned int subdev_num, + unsigned int deser_lanes, + bool is_dummy, + enum connection_type connect, + int link_freq, + int des_port) +{ + if (connect == TYPE_DIRECT) { + struct sensor_platform_data *pdata; + + pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return -ENOMEM; + + pr_debug("IPU ACPI: %s - Direct connection", __func__); + /* use ascii */ + /* port for start from 0 */ + if (port >= 0) { + pdata->suffix = port + SUFFIX_BASE; + pr_info("IPU ACPI: create %s on port %d", + sensor_name, port); + } else + dev_err(dev, "INVALID MIPI PORT"); + + pdata->port = port; + pdata->lanes = lanes; + pdata->i2c_slave_address = addr; + + /* gpio */ + set_common_gpio(ctl_data, &pdata); + + (*sensor_sd)->i2c.board_info.platform_data = pdata; + } else if (connect == TYPE_SERDES) { + struct serdes_platform_data *pdata; + + pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return -ENOMEM; + + pr_debug("IPU ACPI: %s - Serdes connection", __func__); + /* use ascii */ + if (port >= 0) { + pdata->suffix = port + SUFFIX_BASE; + pr_info("IPU ACPI: create %s on mipi port %d", + sensor_name, port); + } else + pr_err("IPU ACPI: Invalid MIPI Port : %d", port); + + pdata->link_freq_mbps = link_freq; + pdata->bus_type = (*sensor_sd)->csi2->bus_type; + pdata->deser_nlanes = deser_lanes; + pdata->ser_nlanes = lanes; + pdata->des_port = des_port; + strscpy(pdata->ser_name, (*sensor_sd)->i2c.board_info.type, I2C_NAME_SIZE); + set_serdes_subdev(sensor_sd, dev, &pdata, sensor_name, hid_name, lanes, addr, subdev_num); + + (*sensor_sd)->i2c.board_info.platform_data = pdata; + pdata->deser_board_info = &(*sensor_sd)->i2c.board_info; + } + + return 0; +} + +static void set_serdes_info(struct device *dev, const char *sensor_name, + const char *serdes_name, + struct sensor_bios_data *cam_data, + int sensor_physical_addr) +{ + int i; + + serdes_info.deser_num = 0; + /* pprunit as num of sensor connected to deserializer */ + serdes_info.rx_port = cam_data->pprunit; + + /* i2c devices */ + serdes_info.i2c_num = cam_data->i2c_num; + + i = 1; + /* serializer mapped addr */ + serdes_info.ser_map_addr = cam_data->i2c[i++].addr; + /* sensor mapped addr */ + serdes_info.sensor_map_addr = cam_data->i2c[i++].addr; + + serdes_info.gpio_powerup_seq = 0; + + serdes_info.phy_i2c_addr = sensor_physical_addr; +} + +static int populate_sensor_pdata(struct device *dev, + struct ipu7_isys_subdev_info **sensor_sd, + struct sensor_bios_data *cam_data, + struct control_logic_data *ctl_data, + enum connection_type connect, + const char *sensor_name, + const char *serdes_name, + const char *hid_name, + int sensor_physical_addr, + int link_freq) +{ + struct ipu7_isys_subdev_pdata *ptr_acpi_subdev_pdata = &acpi_subdev_pdata; + int i = 0; + int ret; + + if (connect == TYPE_DIRECT) { + /* sensor csi2 info */ + ret = set_csi2(sensor_sd, cam_data->lanes, cam_data->link, cam_data->bus_type); + if (ret) + return ret; + + /* sensor i2c info */ + if (cam_data->i2c_num == MIN_SENSOR_I2C) { + pr_debug("IPU ACPI: num of I2C device for Direct connection: %lld is Correct.", + cam_data->i2c_num); + set_i2c(sensor_sd, dev, sensor_name, cam_data->i2c[0].addr, cam_data->i2c[0].bdf); + } else { + pr_err("IPU ACPI: num of I2C device for Direct connection : %lld is Incorrect", + cam_data->i2c_num); + return -1; + } + /* Others use DISCRETE Control Logic */ + if (ctl_data->type != CL_DISCRETE) { + dev_err(dev, "IPU ACPI: Control Logic Type\n"); + dev_err(dev, "for %s: %d is Incorrect\n", + sensor_name, ctl_data->type); + return -EINVAL; + } + } else if (connect == TYPE_SERDES) { + /* serdes csi2 info. pprval as deserializer lane */ + ret = set_csi2(sensor_sd, cam_data->pprval, cam_data->link, cam_data->bus_type); + if (ret) + return ret; + + /* Use DISCRETE Control Logic or No Control Logic for serdes */ + if (ctl_data->type != CL_DISCRETE && ctl_data->type != CL_EMPTY) { + pr_err("IPU ACPI: Control Logic Type for serdes: %d is Incorrect", + ctl_data->type); + return -1; + } + + /* serdes i2c info */ + if (cam_data->i2c_num >= MIN_SERDES_I2C) { + pr_debug("IPU ACPI: num of I2C device for Serdes connection: %lld is Correct", + cam_data->i2c_num); + set_i2c(sensor_sd, dev, serdes_name, cam_data->i2c[0].addr, cam_data->i2c[0].bdf); + } else { + pr_err("IPU ACPI: num of I2C device for Serdes connection: %lld is Incorrect", + cam_data->i2c_num); + return -1; + } + + /* local serdes info */ + set_serdes_info(dev, sensor_name, serdes_name, cam_data, sensor_physical_addr); + } + + /* Use last I2C device */ + ret = set_pdata(sensor_sd, dev, sensor_name, hid_name, ctl_data, cam_data->link, + cam_data->lanes, cam_data->i2c[cam_data->i2c_num - 1].addr, + cam_data->pprunit, cam_data->pprval, false, connect, link_freq, cam_data->degree); + if (ret) + return ret; + + /* update local ipu7_isys_subdev_pdata */ + while (i <= MAX_ACPI_SENSOR_NUM) { + if (!ptr_acpi_subdev_pdata->subdevs[i]) { + ptr_acpi_subdev_pdata->subdevs[i] = *sensor_sd; + ptr_acpi_subdev_pdata->subdevs[i+1] = NULL; + break; + } + i++; + } + + /* print new subdev */ + if (connect == TYPE_DIRECT) { + pr_debug("New sensor subdev\n"); + print_subdev(*sensor_sd); + } else { + pr_debug("New serdes subdev\n"); + print_serdes_subdev(*sensor_sd); + } + + /* update total num of sensor connected */ + if (connect == TYPE_SERDES) + serdes_info.deser_num++; + + return 0; +} + +int get_sensor_pdata(struct device *dev, + struct ipu_camera_module_data *data, + void *priv, size_t size, + enum connection_type connect, const char *sensor_name, + const char *serdes_name, const char *hid_name, + int sensor_physical_addr, int link_freq) +{ + struct sensor_bios_data *cam_data; + struct control_logic_data *ctl_data; + struct ipu7_isys_subdev_info *sensor_sd; + int rval; + + cam_data = kzalloc(sizeof(*cam_data), GFP_KERNEL); + if (!cam_data) + return -ENOMEM; + cam_data->dev = dev; + + ctl_data = kzalloc(sizeof(*ctl_data), GFP_KERNEL); + if (!ctl_data) { + kfree(cam_data); + return -ENOMEM; + } + ctl_data->dev = dev; + + sensor_sd = kzalloc(sizeof(*sensor_sd), GFP_KERNEL); + if (!sensor_sd) { + kfree(cam_data); + kfree(ctl_data); + return -ENOMEM; + } + + /* camera info */ + rval = ipu_acpi_get_cam_data(dev, cam_data); + if (rval) { + kfree(sensor_sd); + kfree(cam_data); + kfree(ctl_data); + return rval; + } + + /* control logic info */ + rval = ipu_acpi_get_dep_data(dev, ctl_data); + if (rval) { + kfree(sensor_sd); + kfree(cam_data); + kfree(ctl_data); + return rval; + } + + /* populate pdata */ + rval = populate_sensor_pdata(dev, &sensor_sd, cam_data, ctl_data, + connect, sensor_name, serdes_name, hid_name, + sensor_physical_addr, link_freq); + if (rval) { + kfree(sensor_sd); + kfree(cam_data); + kfree(ctl_data); + return rval; + } + + dev->platform_data = sensor_sd; + + kfree(cam_data); + kfree(ctl_data); + return rval; +} +EXPORT_SYMBOL(get_sensor_pdata); + +MODULE_AUTHOR("Khai Wen, Ng "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("IPU ACPI support"); diff --git a/drivers/media/platform/intel/ipu-acpi.c b/drivers/media/platform/intel/ipu-acpi.c new file mode 100644 index 0000000..deed3bb --- /dev/null +++ b/drivers/media/platform/intel/ipu-acpi.c @@ -0,0 +1,224 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2016-2025 Intel Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if IS_ENABLED(CONFIG_VIDEO_ISX031) +#include +#endif + +#include "ipu7.h" +#include "ipu7-isys.h" + +static LIST_HEAD(devices); + +static struct ipu_camera_module_data *add_device_to_list( + struct list_head *devices) +{ + struct ipu_camera_module_data *cam_device; + + cam_device = kzalloc(sizeof(*cam_device), GFP_KERNEL); + if (!cam_device) + return NULL; + + list_add(&cam_device->list, devices); + return cam_device; +} + +static const struct ipu_acpi_devices supported_devices[] = { +/* + * { "ACPI ID", sensor_name, get_sensor_pdata, NULL, 0, TYPE, serdes_name, + * sensor_physical_addr, link_freq(mbps) }, // Custom HID + */ + +#if IS_ENABLED(CONFIG_VIDEO_MAX9X) +#if IS_ENABLED(CONFIG_VIDEO_ISX031) + { "INTC031M", ISX031_NAME, get_sensor_pdata, NULL, 0, TYPE_SERDES, "max9x", + ISX031_I2C_ADDRESS, 1600 }, // D3 ISX031 HID +#endif +#endif +}; + +static int get_table_index(const char *acpi_name) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(supported_devices); i++) { + if (!strncmp(supported_devices[i].hid_name, acpi_name, + strlen(supported_devices[i].hid_name))) + return i; + } + + return -ENODEV; +} + +/* List of ACPI devices what we can handle */ +/* Must match with HID in BIOS option. Add new sensor if required */ +static const struct acpi_device_id ipu_acpi_match[] = { +/* + * { "AR0234A", 0 }, // Custom HID + */ +#if IS_ENABLED(CONFIG_VIDEO_ISX031) + { "INTC1031", 0 }, // ISX031 HID + { "INTC031M", 0 }, // D3CMC68N-115-084 ISX031 HID +#endif + {}, +}; + +static int ipu_acpi_get_pdata(struct device *dev, int index) +{ + struct ipu_camera_module_data *camdata; + int rval; + + if (index < 0) { + pr_err("Device is not in supported devices list\n"); + return -ENODEV; + } + + camdata = add_device_to_list(&devices); + if (!camdata) + return -ENOMEM; + + pr_info("IPU ACPI: Getting BIOS data for %s (%s)", + supported_devices[index].real_driver, dev_name(dev)); + + rval = supported_devices[index].get_platform_data( + dev, camdata, + supported_devices[index].priv_data, + supported_devices[index].priv_size, + supported_devices[index].connect, + supported_devices[index].real_driver, + supported_devices[index].serdes_name, + supported_devices[index].hid_name, + supported_devices[index].sensor_physical_addr, + supported_devices[index].link_freq); + + if (rval) + return -EPROBE_DEFER; + + return 0; +} + +/* + * different acpi devices may have same HID, so acpi_dev_get_first_match_dev + * will always match device to simple fwnode. + */ +static int ipu_acpi_test(struct device *dev, void *priv) +{ + struct acpi_device *adev = NULL; + int rval; + int acpi_idx = get_table_index(dev_name(dev)); + + if (acpi_idx < 0) + return 0; + else + dev_info(dev, "IPU6 ACPI: ACPI device %s\n", dev_name(dev)); + + const char *target_hid = supported_devices[acpi_idx].hid_name; + + if (!ACPI_COMPANION(dev)) { + while ((adev = acpi_dev_get_next_match_dev(adev, target_hid, + NULL, -1))) { + if (adev->flags.reserved == 0) { + adev->flags.reserved = 1; + break; + } + acpi_dev_put(adev); + } + + if (!adev) { + dev_dbg(dev, "No ACPI device found for %s\n", target_hid); + return 0; + } else { + set_primary_fwnode(dev, &adev->fwnode); + dev_dbg(dev, "Assigned fwnode to %s\n", dev_name(dev)); + } + } + + if (ACPI_COMPANION(dev) != adev) { + dev_err(dev, "Failed to set ACPI companion for %s\n", + dev_name(dev)); + acpi_dev_put(adev); + return 0; + } + + acpi_dev_put(adev); + + rval = ipu_acpi_get_pdata(dev, acpi_idx); + if (rval) { + pr_err("IPU6 ACPI: Failed to process ACPI data"); + return rval; + } + + return 0; /* Continue iteration */ +} + +/* Try to get all IPU related devices mentioned in BIOS and all related information + * return a new generated existing pdata + */ + +int ipu_get_acpi_devices(void **spdata) +{ + struct ipu_i2c_helper helper = {0}; + int rval; + struct ipu7_isys_subdev_pdata *ptr = NULL; + + rval = acpi_bus_for_each_dev(ipu_acpi_test, NULL); + if (rval < 0) + return rval; + + ptr = get_acpi_subdev_pdata(); + if (ptr && *ptr->subdevs) + *spdata = ptr; + + return 0; +} +EXPORT_SYMBOL(ipu_get_acpi_devices); + +static int __init ipu_acpi_init(void) +{ + return 0; +} + +static void __exit ipu_acpi_exit(void) +{ +} + +module_init(ipu_acpi_init); +module_exit(ipu_acpi_exit); + +MODULE_AUTHOR("Samu Onkalo "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("IPU ACPI support"); diff --git a/include/media/ipu-acpi-pdata.h b/include/media/ipu-acpi-pdata.h new file mode 100644 index 0000000..e207441 --- /dev/null +++ b/include/media/ipu-acpi-pdata.h @@ -0,0 +1,120 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2023-2025 Intel Corporation */ +#include +#include +#include +#if IS_ENABLED(CONFIG_VIDEO_ISX031) +#include +#endif + +#define CL_EMPTY 0 +#define CL_DISCRETE 1 +#define SERDES_MAX_GPIO_POWERUP_SEQ 4 +#define LOOP_SIZE 10 + +/* CPHY is supported since ipu7*/ +#define PHY_MODE_DPHY 0 +#define PHY_MODE_CPHY 1 + +int get_sensor_pdata(struct device *dev, + struct ipu_camera_module_data *data, + void *priv, size_t size, + enum connection_type connect, + const char *sensor_name, + const char *serdes_name, + const char *hid_name, + int sensor_physical_addr, + int link_freq); + +struct ipu7_isys_subdev_pdata *get_acpi_subdev_pdata(void); + +struct sensor_platform_data { + unsigned int port; + unsigned int lanes; + uint32_t i2c_slave_address; + int irq_pin; + unsigned int irq_pin_flags; + char irq_pin_name[IPU7_SPDATA_IRQ_PIN_NAME_LEN]; + int reset_pin; + int detect_pin; + char suffix; + int gpios[IPU7_SPDATA_GPIO_NUM]; +}; + +struct serdes_platform_data { + unsigned int subdev_num; + struct serdes_subdev_info *subdev_info; + unsigned int reset_gpio; + unsigned int FPD_gpio; + char suffix; + unsigned int link_freq_mbps; + enum v4l2_mbus_type bus_type; + unsigned int deser_nlanes; + unsigned int ser_nlanes; + unsigned int des_port; + char ser_name[I2C_NAME_SIZE]; + struct i2c_board_info *deser_board_info; +}; + +struct serdes_subdev_info { + struct i2c_board_info board_info; + int i2c_adapter_id; + unsigned short rx_port; + unsigned short phy_i2c_addr; + unsigned short ser_alias; + char suffix[5]; /* suffix for subdevs */ + unsigned short ser_phys_addr; + unsigned int sensor_dt; +}; + +struct serdes_module_pdata { + unsigned short i2c_addr; + unsigned short i2c_adapter; + unsigned int lanes; + int xshutdown; + int fsin; + int reset; + char gpio_powerup_seq[SERDES_MAX_GPIO_POWERUP_SEQ]; + unsigned int module_flags; + char module_name[I2C_NAME_SIZE]; + char suffix; +}; + +struct serdes_local { + /* num of camera sensor connected to current mipi port */ + unsigned int rx_port; + + /* num of i2c addr for current ACPI device */ + unsigned int i2c_num; + + /* current sensor_addr */ + unsigned short sensor_addr; + + /* physical i2c addr */ + unsigned short phy_i2c_addr; + + /* last mapped addr */ + unsigned short sensor_map_addr; + + /* current serializer_addr */ + unsigned short ser_addr; + + /* last mapped addr */ + unsigned short ser_map_addr; + + /* 2nd group of mapped addr for 2x sensors */ + unsigned short sensor_map_addr_2; + unsigned short ser_map_addr_2; + + /* current gpio_powerup_seq */ + unsigned int gpio_powerup_seq; + + /* current module flag */ + unsigned int module_flags; + + /* counter for total camera sensor connected */ + unsigned int sensor_num; + + /* counter for total deser connected */ + unsigned int deser_num; +}; diff --git a/include/media/ipu-acpi.h b/include/media/ipu-acpi.h new file mode 100644 index 0000000..bc63240 --- /dev/null +++ b/include/media/ipu-acpi.h @@ -0,0 +1,186 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2016-2025 Intel Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef MEDIA_INTEL_IPU_ACPI_H +#define MEDIA_INTEL_IPU_ACPI_H + +#include "ipu7-isys.h" + +#define MAX_ACPI_SENSOR_NUM 4 +#define MAX_ACPI_I2C_NUM 12 +#define MAX_ACPI_GPIO_NUM 12 + +#define GPIO_RESET 0x0 + +#define IPU7_SPDATA_GPIO_NUM 4 +#define IPU7_SPDATA_IRQ_PIN_NAME_LEN 16 + +enum connection_type { + TYPE_DIRECT, + TYPE_SERDES +}; + +/* Data representation as it is in ACPI SSDB buffer */ +struct sensor_bios_data_packed { + u8 version; + u8 sku; + u8 guid_csi2[16]; + u8 devfunction; + u8 bus; + u32 dphylinkenfuses; + u32 clockdiv; + u8 link; + u8 lanes; + u32 csiparams[10]; + u32 maxlanespeed; + u8 sensorcalibfileidx; + u8 sensorcalibfileidxInMBZ[3]; + u8 romtype; + u8 vcmtype; + u8 platforminfo; + u8 platformsubinfo; + u8 flash; + u8 privacyled; + u8 degree; + u8 mipilinkdefined; + u32 mclkspeed; + u8 controllogicid; + u8 mipidataformat; + u8 siliconversion; + u8 customerid; + u8 mclkport; + u8 pmicpos; + u8 voltagerail; + u8 pprval; + u8 pprunit; + u8 flashid; + u8 phyconfig; + u8 reserved2[7]; +} __attribute__((__packed__)); + +struct ipu_i2c_info { + unsigned short bus; + unsigned short addr; + char bdf[32]; +}; + +/* Fields needed by ipu driver */ +/* Each I2C client can have 12 device */ +struct sensor_bios_data { + struct device *dev; + u8 link; + u8 lanes; + u8 vcmtype; + u8 flash; + u8 degree; + u8 mclkport; + u32 mclkspeed; + u16 xshutdown; + u8 controllogicid; + u8 pprval; + u8 pprunit; + struct ipu_i2c_info i2c[MAX_ACPI_I2C_NUM]; + u64 i2c_num; + u8 bus_type; +}; + +struct control_logic_data_packed { + u8 version; + u8 controllogictype; + u8 controllogicid; + u8 sensorcardsku; + u8 inputclk; + u8 platformid; + u8 subplatformid; + u8 customerid; + u8 wled1_maxflashcurrent; + u8 wled1_maxtorchcurrent; + u8 wled2_maxflashcurrent; + u8 wled2_maxtorchcurrent; + u8 wled1_type; + u8 wled2_type; + u8 pch_clk_src; + u8 reserved2[17]; +} __attribute__((__packed__)); + +struct ipu_gpio_info { + unsigned short init_state; + unsigned short pin; + unsigned short func; + bool valid; +}; + +/* Each I2C client linked to 1 set of CTL Logic */ +struct control_logic_data { + struct device *dev; + u8 id; + u8 type; + u8 sku; + u64 gpio_num; + struct ipu_gpio_info gpio[MAX_ACPI_GPIO_NUM]; + bool completed; +}; + +int ipu_get_acpi_devices(void **spdata); + +struct ipu7_isys_subdev_pdata *get_built_in_pdata(void); + +int ipu_acpi_get_cam_data(struct device *dev, + struct sensor_bios_data *sensor); + +int ipu_acpi_get_dep_data(struct device *dev, + struct control_logic_data *ctl_data); + +int ipu_acpi_get_control_logic_data(struct device *dev, + struct control_logic_data **ctl_data); + +struct ipu_i2c_helper { + int (*fn)(struct device *dev, void *priv, + struct ipu7_isys_csi2_config *csi2, + bool reprobe); + void *driver_data; +}; + +struct ipu_camera_module_data { + struct list_head list; + struct ipu7_isys_subdev_info sd; + struct ipu7_isys_csi2_config csi2; + unsigned int ext_clk; + void *pdata; /* Ptr to generated platform data*/ + void *priv; /* Private for specific subdevice */ +}; + +struct ipu_acpi_devices { + const char *hid_name; + const char *real_driver; + int (*get_platform_data)(struct device *dev, + struct ipu_camera_module_data *data, + void *priv, + size_t size, + enum connection_type type, + const char *sensor_name, + const char *serdes_name, + const char *hid_name, + int sensor_physical_addr, + int link_freq); + void *priv_data; + size_t priv_size; + enum connection_type connect; + const char *serdes_name; + int sensor_physical_addr; + int link_freq; /* in mbps */ +}; + +#endif From 54ea2db74c7586adf17f0c52433190eae6ae6692 Mon Sep 17 00:00:00 2001 From: hepengpx Date: Wed, 5 Nov 2025 10:37:43 +0800 Subject: [PATCH 02/14] ipu-acpi: decouple the PLATFORM ACPI driver and IPU driver Signed-off-by: hepengpx --- drivers/media/platform/intel/Kconfig | 2 +- drivers/media/platform/intel/Makefile | 4 +- .../media/platform/intel/ipu-acpi-common.c | 9 ++- drivers/media/platform/intel/ipu-acpi-pdata.c | 57 ++++++++++----- drivers/media/platform/intel/ipu-acpi.c | 61 +++++++++++++--- include/media/ipu-acpi-pdata.h | 7 +- include/media/ipu-acpi.h | 70 ++++++++++++++++--- include/media/ipu-get-acpi.h | 30 ++++++++ 8 files changed, 195 insertions(+), 45 deletions(-) create mode 100644 include/media/ipu-get-acpi.h diff --git a/drivers/media/platform/intel/Kconfig b/drivers/media/platform/intel/Kconfig index f8affa6..b8a7a36 100644 --- a/drivers/media/platform/intel/Kconfig +++ b/drivers/media/platform/intel/Kconfig @@ -8,7 +8,7 @@ config INTEL_IPU7_FPGA_PDATA development and mainly used for pixter or sensor enablement without ACPI support. -config INTEL_IPU7_ACPI +config INTEL_IPU_ACPI tristate "Enable IPU ACPI driver" default VIDEO_INTEL_IPU7 depends on I2C diff --git a/drivers/media/platform/intel/Makefile b/drivers/media/platform/intel/Makefile index c898c5e..9f210f8 100644 --- a/drivers/media/platform/intel/Makefile +++ b/drivers/media/platform/intel/Makefile @@ -10,8 +10,8 @@ endif ccflags-y += -I$(src)/../../../../ipu7-drivers/drivers/media/pci/intel/ipu7 -ifneq ($(filter y m, $(CONFIG_INTEL_IPU7_ACPI)),) -obj-$(CONFIG_INTEL_IPU7_ACPI) += ipu-acpi.o \ +ifneq ($(filter y m, $(CONFIG_INTEL_IPU_ACPI)),) +obj-$(CONFIG_INTEL_IPU_ACPI) += ipu-acpi.o \ ipu-acpi-pdata.o \ ipu-acpi-common.o endif diff --git a/drivers/media/platform/intel/ipu-acpi-common.c b/drivers/media/platform/intel/ipu-acpi-common.c index 936c158..1804932 100644 --- a/drivers/media/platform/intel/ipu-acpi-common.c +++ b/drivers/media/platform/intel/ipu-acpi-common.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (c) 2016-2024 Intel Corporation. + * Copyright (c) 2016-2025 Intel Corporation. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version @@ -13,7 +13,10 @@ * */ #include +#include #include +#include + #include #include @@ -365,8 +368,12 @@ int ipu_acpi_get_dep_data(struct device *dev, continue; /* Process device IN3472 created by acpi */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 17, 0) + if (acpi_bus_get_device(dep_devices.handles[i], &device)) { +#else device = acpi_fetch_acpi_dev(dep_devices.handles[i]); if (!device) { +#endif pr_err("IPU ACPI: Failed to get ACPI device"); return -ENODEV; } diff --git a/drivers/media/platform/intel/ipu-acpi-pdata.c b/drivers/media/platform/intel/ipu-acpi-pdata.c index 0a84dcc..be75bb4 100644 --- a/drivers/media/platform/intel/ipu-acpi-pdata.c +++ b/drivers/media/platform/intel/ipu-acpi-pdata.c @@ -12,18 +12,31 @@ * GNU General Public License for more details. * */ - #include #include #define MIN_SENSOR_I2C 1 #define MIN_SERDES_I2C 3 #define SUFFIX_BASE 97 +#define MSG_LEN 128 -struct ipu7_isys_subdev_pdata acpi_subdev_pdata = { - .subdevs = (struct ipu7_isys_subdev_info *[]) { - NULL, - } +static struct ipu_isys_subdev_pdata *ptr_built_in_pdata; + +void set_built_in_pdata(struct ipu_isys_subdev_pdata *pdata) +{ + ptr_built_in_pdata = pdata; +}; +EXPORT_SYMBOL(set_built_in_pdata); + +static struct ipu_isys_clk_mapping clk_mapping[] = { + { CLKDEV_INIT(NULL, NULL, NULL), NULL } +}; + +struct ipu_isys_subdev_pdata acpi_subdev_pdata = { + .subdevs = (struct ipu_isys_subdev_info *[]) { + NULL, NULL, NULL, NULL, NULL, + }, + .clk_map = clk_mapping, }; struct serdes_local serdes_info; @@ -74,7 +87,8 @@ static int get_i2c_bus_id(int adapter_id, char *adapter_bdf, int bdf_len) */ static void update_i2c_bus_id(void) { - struct ipu7_isys_subdev_info **subdevs = acpi_subdev_pdata.subdevs; + struct ipu_isys_subdev_info **subdevs = acpi_subdev_pdata.subdevs; + for (int i = 0; subdevs[i] != NULL; i++) { subdevs[i]->i2c.i2c_adapter_id = get_i2c_bus_id(subdevs[i]->i2c.i2c_adapter_id, @@ -83,9 +97,9 @@ static void update_i2c_bus_id(void) } } -struct ipu7_isys_subdev_pdata *get_acpi_subdev_pdata(void) +struct ipu_isys_subdev_pdata *get_acpi_subdev_pdata(void) { - struct ipu7_isys_subdev_pdata *ptr; + struct ipu_isys_subdev_pdata *ptr; update_i2c_bus_id(); ptr = &acpi_subdev_pdata; @@ -122,7 +136,7 @@ static void print_serdes_sdinfo(struct serdes_subdev_info *sdinfo) (int)sd_mpdata->gpio_powerup_seq[i]); } -static void print_serdes_subdev(struct ipu7_isys_subdev_info *sd) +static void print_serdes_subdev(struct ipu_isys_subdev_info *sd) { struct serdes_platform_data *sd_pdata = sd->i2c.board_info.platform_data; int i; @@ -153,6 +167,7 @@ static void print_serdes_subdev(struct ipu7_isys_subdev_info *sd) pr_debug("\t\tlink_freq_mbps \t\t= %d", sd_pdata->link_freq_mbps); pr_debug("\t\tdeser_nlanes \t\t= %d", sd_pdata->deser_nlanes); pr_debug("\t\tser_nlanes \t\t= %d", sd_pdata->ser_nlanes); + pr_debug("\t\tser_name \t\t= %s", sd_pdata->ser_name); for (i = 0; i < serdes_info.rx_port; i++) { sd_sdinfo = &sd_pdata->subdev_info[i]; @@ -167,7 +182,7 @@ static void print_serdes_subdev(struct ipu7_isys_subdev_info *sd) } -static void print_subdev(struct ipu7_isys_subdev_info *sd) +static void print_subdev(struct ipu_isys_subdev_info *sd) { struct sensor_platform_data *spdata = sd->i2c.board_info.platform_data; int i; @@ -198,7 +213,7 @@ static void print_subdev(struct ipu7_isys_subdev_info *sd) pr_debug("\t\treset_pin \t\t= %d", spdata->reset_pin); pr_debug("\t\tdetect_pin \t\t= %d", spdata->detect_pin); - for (i = 0; i < IPU7_SPDATA_GPIO_NUM; i++) + for (i = 0; i < IPU_SPDATA_GPIO_NUM; i++) pr_debug("\t\tgpios[%d] \t\t= %d", i, spdata->gpios[i]); } @@ -226,11 +241,11 @@ static void set_common_gpio(struct control_logic_data *ctl_data, ctl_data->gpio[i].func); } -static int set_csi2(struct ipu7_isys_subdev_info **sensor_sd, +static int set_csi2(struct ipu_isys_subdev_info **sensor_sd, unsigned int lanes, unsigned int port, unsigned int bus_type) { - struct ipu7_isys_csi2_config *csi2_config; + struct ipu_isys_csi2_config *csi2_config; csi2_config = kzalloc(sizeof(*csi2_config), GFP_KERNEL); if (!csi2_config) @@ -250,7 +265,7 @@ static int set_csi2(struct ipu7_isys_subdev_info **sensor_sd, return 0; } -static void set_i2c(struct ipu7_isys_subdev_info **sensor_sd, +static void set_i2c(struct ipu_isys_subdev_info **sensor_sd, struct device *dev, const char *sensor_name, unsigned int addr, @@ -274,7 +289,7 @@ static void set_serdes_sd_pdata(struct serdes_module_pdata **module_pdata, #define PORT_NR 8 -static int set_serdes_subdev(struct ipu7_isys_subdev_info **serdes_sd, +static int set_serdes_subdev(struct ipu_isys_subdev_info **serdes_sd, struct device *dev, struct serdes_platform_data **pdata, const char *sensor_name, @@ -305,6 +320,7 @@ static int set_serdes_subdev(struct ipu7_isys_subdev_info **serdes_sd, /* board info */ strscpy(serdes_sdinfo[i].board_info.type, sensor_name, I2C_NAME_SIZE); serdes_sdinfo[i].board_info.addr = serdes_info.sensor_map_addr + i; + serdes_sdinfo[i].board_info.platform_data = module_pdata[i]; /* serdes_subdev_info */ @@ -326,7 +342,7 @@ static int set_serdes_subdev(struct ipu7_isys_subdev_info **serdes_sd, return 0; } -static int set_pdata(struct ipu7_isys_subdev_info **sensor_sd, +static int set_pdata(struct ipu_isys_subdev_info **sensor_sd, struct device *dev, const char *sensor_name, const char *hid_name, @@ -423,7 +439,7 @@ static void set_serdes_info(struct device *dev, const char *sensor_name, } static int populate_sensor_pdata(struct device *dev, - struct ipu7_isys_subdev_info **sensor_sd, + struct ipu_isys_subdev_info **sensor_sd, struct sensor_bios_data *cam_data, struct control_logic_data *ctl_data, enum connection_type connect, @@ -433,7 +449,7 @@ static int populate_sensor_pdata(struct device *dev, int sensor_physical_addr, int link_freq) { - struct ipu7_isys_subdev_pdata *ptr_acpi_subdev_pdata = &acpi_subdev_pdata; + struct ipu_isys_subdev_pdata *ptr_acpi_subdev_pdata = &acpi_subdev_pdata; int i = 0; int ret; @@ -453,6 +469,7 @@ static int populate_sensor_pdata(struct device *dev, cam_data->i2c_num); return -1; } + /* Others use DISCRETE Control Logic */ if (ctl_data->type != CL_DISCRETE) { dev_err(dev, "IPU ACPI: Control Logic Type\n"); @@ -530,12 +547,13 @@ int get_sensor_pdata(struct device *dev, { struct sensor_bios_data *cam_data; struct control_logic_data *ctl_data; - struct ipu7_isys_subdev_info *sensor_sd; + struct ipu_isys_subdev_info *sensor_sd; int rval; cam_data = kzalloc(sizeof(*cam_data), GFP_KERNEL); if (!cam_data) return -ENOMEM; + cam_data->dev = dev; ctl_data = kzalloc(sizeof(*ctl_data), GFP_KERNEL); @@ -543,6 +561,7 @@ int get_sensor_pdata(struct device *dev, kfree(cam_data); return -ENOMEM; } + ctl_data->dev = dev; sensor_sd = kzalloc(sizeof(*sensor_sd), GFP_KERNEL); diff --git a/drivers/media/platform/intel/ipu-acpi.c b/drivers/media/platform/intel/ipu-acpi.c index deed3bb..a5a54b4 100644 --- a/drivers/media/platform/intel/ipu-acpi.c +++ b/drivers/media/platform/intel/ipu-acpi.c @@ -32,6 +32,8 @@ #include #include #include +#include + #include #include @@ -73,7 +75,7 @@ static const struct ipu_acpi_devices supported_devices[] = { static int get_table_index(const char *acpi_name) { - unsigned int i; + int i; for (i = 0; i < ARRAY_SIZE(supported_devices); i++) { if (!strncmp(supported_devices[i].hid_name, acpi_name, @@ -143,8 +145,8 @@ static int ipu_acpi_test(struct device *dev, void *priv) if (acpi_idx < 0) return 0; - else - dev_info(dev, "IPU6 ACPI: ACPI device %s\n", dev_name(dev)); + + dev_info(dev, "IPU6 ACPI: ACPI device %s\n", dev_name(dev)); const char *target_hid = supported_devices[acpi_idx].hid_name; @@ -161,10 +163,10 @@ static int ipu_acpi_test(struct device *dev, void *priv) if (!adev) { dev_dbg(dev, "No ACPI device found for %s\n", target_hid); return 0; - } else { - set_primary_fwnode(dev, &adev->fwnode); - dev_dbg(dev, "Assigned fwnode to %s\n", dev_name(dev)); } + + set_primary_fwnode(dev, &adev->fwnode); + dev_dbg(dev, "Assigned fwnode to %s\n", dev_name(dev)); } if (ACPI_COMPANION(dev) != adev) { @@ -185,15 +187,53 @@ static int ipu_acpi_test(struct device *dev, void *priv) return 0; /* Continue iteration */ } +/* Scan all i2c devices and pick ones which we can handle */ + /* Try to get all IPU related devices mentioned in BIOS and all related information - * return a new generated existing pdata + * If there is existing ipu_isys_subdev_pdata, update the existing pdata + * If not, return a new generated existing pdata */ -int ipu_get_acpi_devices(void **spdata) +int ipu_get_acpi_devices(void *driver_data, + struct device *dev, + void **spdata, + void **built_in_pdata, + int (*fn) + (struct device *, void *, + void *csi2, + bool reprobe)) { struct ipu_i2c_helper helper = {0}; int rval; - struct ipu7_isys_subdev_pdata *ptr = NULL; + + if (!built_in_pdata) + dev_dbg(dev, "Built-in pdata not found"); + else { + dev_dbg(dev, "Built-in pdata found"); + set_built_in_pdata(*built_in_pdata); + } + + if ((!fn) || (!driver_data)) + return -ENODEV; + + rval = acpi_bus_for_each_dev(ipu_acpi_test, NULL); + if (rval < 0) + return rval; + + if (!built_in_pdata) { + dev_dbg(dev, "Return ACPI generated pdata"); + *spdata = get_acpi_subdev_pdata(); + } else + dev_dbg(dev, "Return updated built-in pdata"); + + return 0; +} +EXPORT_SYMBOL(ipu_get_acpi_devices); + +int ipu_get_acpi_devices_new(void **spdata) +{ + int rval; + struct ipu_isys_subdev_pdata *ptr = NULL; rval = acpi_bus_for_each_dev(ipu_acpi_test, NULL); if (rval < 0) @@ -205,10 +245,11 @@ int ipu_get_acpi_devices(void **spdata) return 0; } -EXPORT_SYMBOL(ipu_get_acpi_devices); +EXPORT_SYMBOL(ipu_get_acpi_devices_new); static int __init ipu_acpi_init(void) { + set_built_in_pdata(NULL); return 0; } diff --git a/include/media/ipu-acpi-pdata.h b/include/media/ipu-acpi-pdata.h index e207441..c6ec18e 100644 --- a/include/media/ipu-acpi-pdata.h +++ b/include/media/ipu-acpi-pdata.h @@ -9,6 +9,7 @@ #define CL_EMPTY 0 #define CL_DISCRETE 1 +#define SERDES_MAX_PORT 4 #define SERDES_MAX_GPIO_POWERUP_SEQ 4 #define LOOP_SIZE 10 @@ -26,7 +27,7 @@ int get_sensor_pdata(struct device *dev, int sensor_physical_addr, int link_freq); -struct ipu7_isys_subdev_pdata *get_acpi_subdev_pdata(void); +struct ipu_isys_subdev_pdata *get_acpi_subdev_pdata(void); struct sensor_platform_data { unsigned int port; @@ -34,11 +35,11 @@ struct sensor_platform_data { uint32_t i2c_slave_address; int irq_pin; unsigned int irq_pin_flags; - char irq_pin_name[IPU7_SPDATA_IRQ_PIN_NAME_LEN]; + char irq_pin_name[IPU_SPDATA_IRQ_PIN_NAME_LEN]; int reset_pin; int detect_pin; char suffix; - int gpios[IPU7_SPDATA_GPIO_NUM]; + int gpios[IPU_SPDATA_GPIO_NUM]; }; struct serdes_platform_data { diff --git a/include/media/ipu-acpi.h b/include/media/ipu-acpi.h index bc63240..1f8e09e 100644 --- a/include/media/ipu-acpi.h +++ b/include/media/ipu-acpi.h @@ -16,16 +16,53 @@ #ifndef MEDIA_INTEL_IPU_ACPI_H #define MEDIA_INTEL_IPU_ACPI_H -#include "ipu7-isys.h" +#include +#include + +#include + +struct ipu_isys_csi2_config { + unsigned int nlanes; + unsigned int port; + enum v4l2_mbus_type bus_type; +}; + +struct ipu_isys_subdev_i2c_info { + struct i2c_board_info board_info; + int i2c_adapter_id; + char i2c_adapter_bdf[32]; +}; + +struct ipu_isys_subdev_info { + struct ipu_isys_csi2_config *csi2; + struct ipu_isys_subdev_i2c_info i2c; +#if IS_ENABLED(CONFIG_INTEL_IPU_ACPI) + char *acpi_hid; +#endif +}; + +struct ipu_isys_clk_mapping { + struct clk_lookup clkdev_data; + char *platform_clock_name; +}; + +struct ipu_isys_subdev_pdata { + struct ipu_isys_subdev_info **subdevs; + struct ipu_isys_clk_mapping *clk_map; +}; #define MAX_ACPI_SENSOR_NUM 4 #define MAX_ACPI_I2C_NUM 12 #define MAX_ACPI_GPIO_NUM 12 #define GPIO_RESET 0x0 +#define GPIO_POWER_EN 0xb +#define GPIO_READY_STAT 0x13 + +#define IPU_SPDATA_GPIO_NUM 4 +#define IPU_SPDATA_IRQ_PIN_NAME_LEN 16 -#define IPU7_SPDATA_GPIO_NUM 4 -#define IPU7_SPDATA_IRQ_PIN_NAME_LEN 16 +void set_built_in_pdata(struct ipu_isys_subdev_pdata *pdata); enum connection_type { TYPE_DIRECT, @@ -122,6 +159,11 @@ struct ipu_gpio_info { bool valid; }; +struct ipu_irq_info { + int irq_pin; + char irq_pin_name[IPU_SPDATA_IRQ_PIN_NAME_LEN]; +}; + /* Each I2C client linked to 1 set of CTL Logic */ struct control_logic_data { struct device *dev; @@ -133,9 +175,7 @@ struct control_logic_data { bool completed; }; -int ipu_get_acpi_devices(void **spdata); - -struct ipu7_isys_subdev_pdata *get_built_in_pdata(void); +struct ipu_isys_subdev_pdata *get_built_in_pdata(void); int ipu_acpi_get_cam_data(struct device *dev, struct sensor_bios_data *sensor); @@ -146,17 +186,29 @@ int ipu_acpi_get_dep_data(struct device *dev, int ipu_acpi_get_control_logic_data(struct device *dev, struct control_logic_data **ctl_data); +struct intel_ipu6_regulator { + char *src_dev_name; + char *src_rail; + char *dest_rail; +}; + struct ipu_i2c_helper { int (*fn)(struct device *dev, void *priv, - struct ipu7_isys_csi2_config *csi2, + struct ipu_isys_csi2_config *csi2, bool reprobe); void *driver_data; }; +struct ipu_i2c_new_dev { + struct list_head list; + struct i2c_board_info info; + unsigned short int bus; +}; + struct ipu_camera_module_data { struct list_head list; - struct ipu7_isys_subdev_info sd; - struct ipu7_isys_csi2_config csi2; + struct ipu_isys_subdev_info sd; + struct ipu_isys_csi2_config csi2; unsigned int ext_clk; void *pdata; /* Ptr to generated platform data*/ void *priv; /* Private for specific subdevice */ diff --git a/include/media/ipu-get-acpi.h b/include/media/ipu-get-acpi.h new file mode 100644 index 0000000..da2ed51 --- /dev/null +++ b/include/media/ipu-get-acpi.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2016-2025 Intel Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef MEDIA_INTEL_IPU_GET_ACPI_H +#define MEDIA_INTEL_IPU_GET_ACPI_H + +int ipu_get_acpi_devices(void *driver_data, + struct device *dev, + void **spdata, + void **built_in_pdata, + int (*fn) + (struct device *, void *, + void *csi2, + bool reprobe)); + +int ipu_get_acpi_devices_new(void **spdata); + +#endif /* MEDIA_INTEL_IPU_GET_ACPI_H */ From 4c896e236bf4f06d33d68443d8e5db5866897ef2 Mon Sep 17 00:00:00 2001 From: florent pirou Date: Mon, 17 Nov 2025 07:01:20 -0700 Subject: [PATCH 03/14] v4l2-core : add v6.17 v4l2_subdev_enable_streams_api support Signed-off-by: florent pirou --- 6.17.0/drivers/media/v4l2-core/Kconfig | 84 + 6.17.0/drivers/media/v4l2-core/Makefile | 37 + 6.17.0/drivers/media/v4l2-core/tuner-core.c | 1424 +++++++ 6.17.0/drivers/media/v4l2-core/v4l2-async.c | 976 +++++ 6.17.0/drivers/media/v4l2-core/v4l2-cci.c | 203 + 6.17.0/drivers/media/v4l2-core/v4l2-common.c | 707 ++++ .../media/v4l2-core/v4l2-compat-ioctl32.c | 1210 ++++++ .../drivers/media/v4l2-core/v4l2-ctrls-api.c | 1358 +++++++ .../drivers/media/v4l2-core/v4l2-ctrls-core.c | 2721 +++++++++++++ .../drivers/media/v4l2-core/v4l2-ctrls-defs.c | 1685 ++++++++ .../drivers/media/v4l2-core/v4l2-ctrls-priv.h | 95 + .../media/v4l2-core/v4l2-ctrls-request.c | 501 +++ 6.17.0/drivers/media/v4l2-core/v4l2-dev.c | 1247 ++++++ 6.17.0/drivers/media/v4l2-core/v4l2-device.c | 295 ++ .../drivers/media/v4l2-core/v4l2-dv-timings.c | 1271 ++++++ 6.17.0/drivers/media/v4l2-core/v4l2-event.c | 373 ++ 6.17.0/drivers/media/v4l2-core/v4l2-fh.c | 117 + .../media/v4l2-core/v4l2-flash-led-class.c | 746 ++++ 6.17.0/drivers/media/v4l2-core/v4l2-fwnode.c | 1299 ++++++ 6.17.0/drivers/media/v4l2-core/v4l2-h264.c | 453 +++ 6.17.0/drivers/media/v4l2-core/v4l2-i2c.c | 185 + 6.17.0/drivers/media/v4l2-core/v4l2-ioctl.c | 3538 +++++++++++++++++ 6.17.0/drivers/media/v4l2-core/v4l2-jpeg.c | 713 ++++ 6.17.0/drivers/media/v4l2-core/v4l2-mc.c | 614 +++ 6.17.0/drivers/media/v4l2-core/v4l2-mem2mem.c | 1643 ++++++++ 6.17.0/drivers/media/v4l2-core/v4l2-spi.c | 78 + .../media/v4l2-core/v4l2-subdev-priv.h | 14 + 6.17.0/drivers/media/v4l2-core/v4l2-subdev.c | 2617 ++++++++++++ 6.17.0/drivers/media/v4l2-core/v4l2-trace.c | 12 + 6.17.0/drivers/media/v4l2-core/v4l2-vp9.c | 1850 +++++++++ 6.17.0/include-overrides/media/cec-notifier.h | 166 + 6.17.0/include-overrides/media/cec-pin.h | 79 + 6.17.0/include-overrides/media/cec.h | 597 +++ 6.17.0/include-overrides/media/demux.h | 600 +++ 6.17.0/include-overrides/media/dmxdev.h | 213 + 6.17.0/include-overrides/media/dvb-usb-ids.h | 471 +++ .../include-overrides/media/dvb_ca_en50221.h | 142 + 6.17.0/include-overrides/media/dvb_demux.h | 354 ++ 6.17.0/include-overrides/media/dvb_frontend.h | 834 ++++ 6.17.0/include-overrides/media/dvb_net.h | 95 + .../include-overrides/media/dvb_ringbuffer.h | 280 ++ 6.17.0/include-overrides/media/dvb_vb2.h | 280 ++ 6.17.0/include-overrides/media/dvbdev.h | 493 +++ 6.17.0/include-overrides/media/frame_vector.h | 47 + 6.17.0/include-overrides/media/imx.h | 11 + 6.17.0/include-overrides/media/ipu-bridge.h | 182 + .../include-overrides/media/ipu6-pci-table.h | 28 + 6.17.0/include-overrides/media/jpeg.h | 20 + .../media/media-dev-allocator.h | 63 + 6.17.0/include-overrides/media/media-device.h | 518 +++ .../include-overrides/media/media-devnode.h | 168 + 6.17.0/include-overrides/media/media-entity.h | 1450 +++++++ .../include-overrides/media/media-request.h | 442 ++ 6.17.0/include-overrides/media/mipi-csi2.h | 47 + 6.17.0/include-overrides/media/rc-core.h | 377 ++ 6.17.0/include-overrides/media/rc-map.h | 357 ++ 6.17.0/include-overrides/media/rcar-fcp.h | 43 + 6.17.0/include-overrides/media/tuner-types.h | 205 + 6.17.0/include-overrides/media/tuner.h | 229 ++ 6.17.0/include-overrides/media/tveeprom.h | 116 + 6.17.0/include-overrides/media/v4l2-async.h | 346 ++ 6.17.0/include-overrides/media/v4l2-cci.h | 141 + 6.17.0/include-overrides/media/v4l2-common.h | 672 ++++ 6.17.0/include-overrides/media/v4l2-ctrls.h | 1633 ++++++++ 6.17.0/include-overrides/media/v4l2-dev.h | 665 ++++ 6.17.0/include-overrides/media/v4l2-device.h | 569 +++ .../include-overrides/media/v4l2-dv-timings.h | 309 ++ 6.17.0/include-overrides/media/v4l2-event.h | 208 + 6.17.0/include-overrides/media/v4l2-fh.h | 161 + .../media/v4l2-flash-led-class.h | 186 + 6.17.0/include-overrides/media/v4l2-fwnode.h | 414 ++ 6.17.0/include-overrides/media/v4l2-h264.h | 89 + .../media/v4l2-image-sizes.h | 46 + 6.17.0/include-overrides/media/v4l2-ioctl.h | 785 ++++ 6.17.0/include-overrides/media/v4l2-jpeg.h | 180 + 6.17.0/include-overrides/media/v4l2-mc.h | 232 ++ .../include-overrides/media/v4l2-mediabus.h | 278 ++ 6.17.0/include-overrides/media/v4l2-mem2mem.h | 902 +++++ 6.17.0/include-overrides/media/v4l2-rect.h | 207 + 6.17.0/include-overrides/media/v4l2-subdev.h | 2017 ++++++++++ 6.17.0/include-overrides/media/v4l2-vp9.h | 233 ++ .../include-overrides/media/videobuf2-core.h | 1348 +++++++ .../media/videobuf2-dma-contig.h | 32 + .../media/videobuf2-dma-sg.h | 26 + .../include-overrides/media/videobuf2-dvb.h | 69 + .../media/videobuf2-memops.h | 41 + .../include-overrides/media/videobuf2-v4l2.h | 392 ++ .../media/videobuf2-vmalloc.h | 20 + 6.17.0/include-overrides/media/vsp1.h | 213 + dkms.conf | 51 +- ...s-v4l2-core-makefile-adaptation-6.17.patch | 37 + ...t-v4l2_subdev_enable_streams_api-tru.patch | 28 + 92 files changed, 49496 insertions(+), 7 deletions(-) create mode 100644 6.17.0/drivers/media/v4l2-core/Kconfig create mode 100644 6.17.0/drivers/media/v4l2-core/Makefile create mode 100644 6.17.0/drivers/media/v4l2-core/tuner-core.c create mode 100644 6.17.0/drivers/media/v4l2-core/v4l2-async.c create mode 100644 6.17.0/drivers/media/v4l2-core/v4l2-cci.c create mode 100644 6.17.0/drivers/media/v4l2-core/v4l2-common.c create mode 100644 6.17.0/drivers/media/v4l2-core/v4l2-compat-ioctl32.c create mode 100644 6.17.0/drivers/media/v4l2-core/v4l2-ctrls-api.c create mode 100644 6.17.0/drivers/media/v4l2-core/v4l2-ctrls-core.c create mode 100644 6.17.0/drivers/media/v4l2-core/v4l2-ctrls-defs.c create mode 100644 6.17.0/drivers/media/v4l2-core/v4l2-ctrls-priv.h create mode 100644 6.17.0/drivers/media/v4l2-core/v4l2-ctrls-request.c create mode 100644 6.17.0/drivers/media/v4l2-core/v4l2-dev.c create mode 100644 6.17.0/drivers/media/v4l2-core/v4l2-device.c create mode 100644 6.17.0/drivers/media/v4l2-core/v4l2-dv-timings.c create mode 100644 6.17.0/drivers/media/v4l2-core/v4l2-event.c create mode 100644 6.17.0/drivers/media/v4l2-core/v4l2-fh.c create mode 100644 6.17.0/drivers/media/v4l2-core/v4l2-flash-led-class.c create mode 100644 6.17.0/drivers/media/v4l2-core/v4l2-fwnode.c create mode 100644 6.17.0/drivers/media/v4l2-core/v4l2-h264.c create mode 100644 6.17.0/drivers/media/v4l2-core/v4l2-i2c.c create mode 100644 6.17.0/drivers/media/v4l2-core/v4l2-ioctl.c create mode 100644 6.17.0/drivers/media/v4l2-core/v4l2-jpeg.c create mode 100644 6.17.0/drivers/media/v4l2-core/v4l2-mc.c create mode 100644 6.17.0/drivers/media/v4l2-core/v4l2-mem2mem.c create mode 100644 6.17.0/drivers/media/v4l2-core/v4l2-spi.c create mode 100644 6.17.0/drivers/media/v4l2-core/v4l2-subdev-priv.h create mode 100644 6.17.0/drivers/media/v4l2-core/v4l2-subdev.c create mode 100644 6.17.0/drivers/media/v4l2-core/v4l2-trace.c create mode 100644 6.17.0/drivers/media/v4l2-core/v4l2-vp9.c create mode 100644 6.17.0/include-overrides/media/cec-notifier.h create mode 100644 6.17.0/include-overrides/media/cec-pin.h create mode 100644 6.17.0/include-overrides/media/cec.h create mode 100644 6.17.0/include-overrides/media/demux.h create mode 100644 6.17.0/include-overrides/media/dmxdev.h create mode 100644 6.17.0/include-overrides/media/dvb-usb-ids.h create mode 100644 6.17.0/include-overrides/media/dvb_ca_en50221.h create mode 100644 6.17.0/include-overrides/media/dvb_demux.h create mode 100644 6.17.0/include-overrides/media/dvb_frontend.h create mode 100644 6.17.0/include-overrides/media/dvb_net.h create mode 100644 6.17.0/include-overrides/media/dvb_ringbuffer.h create mode 100644 6.17.0/include-overrides/media/dvb_vb2.h create mode 100644 6.17.0/include-overrides/media/dvbdev.h create mode 100644 6.17.0/include-overrides/media/frame_vector.h create mode 100644 6.17.0/include-overrides/media/imx.h create mode 100644 6.17.0/include-overrides/media/ipu-bridge.h create mode 100644 6.17.0/include-overrides/media/ipu6-pci-table.h create mode 100644 6.17.0/include-overrides/media/jpeg.h create mode 100644 6.17.0/include-overrides/media/media-dev-allocator.h create mode 100644 6.17.0/include-overrides/media/media-device.h create mode 100644 6.17.0/include-overrides/media/media-devnode.h create mode 100644 6.17.0/include-overrides/media/media-entity.h create mode 100644 6.17.0/include-overrides/media/media-request.h create mode 100644 6.17.0/include-overrides/media/mipi-csi2.h create mode 100644 6.17.0/include-overrides/media/rc-core.h create mode 100644 6.17.0/include-overrides/media/rc-map.h create mode 100644 6.17.0/include-overrides/media/rcar-fcp.h create mode 100644 6.17.0/include-overrides/media/tuner-types.h create mode 100644 6.17.0/include-overrides/media/tuner.h create mode 100644 6.17.0/include-overrides/media/tveeprom.h create mode 100644 6.17.0/include-overrides/media/v4l2-async.h create mode 100644 6.17.0/include-overrides/media/v4l2-cci.h create mode 100644 6.17.0/include-overrides/media/v4l2-common.h create mode 100644 6.17.0/include-overrides/media/v4l2-ctrls.h create mode 100644 6.17.0/include-overrides/media/v4l2-dev.h create mode 100644 6.17.0/include-overrides/media/v4l2-device.h create mode 100644 6.17.0/include-overrides/media/v4l2-dv-timings.h create mode 100644 6.17.0/include-overrides/media/v4l2-event.h create mode 100644 6.17.0/include-overrides/media/v4l2-fh.h create mode 100644 6.17.0/include-overrides/media/v4l2-flash-led-class.h create mode 100644 6.17.0/include-overrides/media/v4l2-fwnode.h create mode 100644 6.17.0/include-overrides/media/v4l2-h264.h create mode 100644 6.17.0/include-overrides/media/v4l2-image-sizes.h create mode 100644 6.17.0/include-overrides/media/v4l2-ioctl.h create mode 100644 6.17.0/include-overrides/media/v4l2-jpeg.h create mode 100644 6.17.0/include-overrides/media/v4l2-mc.h create mode 100644 6.17.0/include-overrides/media/v4l2-mediabus.h create mode 100644 6.17.0/include-overrides/media/v4l2-mem2mem.h create mode 100644 6.17.0/include-overrides/media/v4l2-rect.h create mode 100644 6.17.0/include-overrides/media/v4l2-subdev.h create mode 100644 6.17.0/include-overrides/media/v4l2-vp9.h create mode 100644 6.17.0/include-overrides/media/videobuf2-core.h create mode 100644 6.17.0/include-overrides/media/videobuf2-dma-contig.h create mode 100644 6.17.0/include-overrides/media/videobuf2-dma-sg.h create mode 100644 6.17.0/include-overrides/media/videobuf2-dvb.h create mode 100644 6.17.0/include-overrides/media/videobuf2-memops.h create mode 100644 6.17.0/include-overrides/media/videobuf2-v4l2.h create mode 100644 6.17.0/include-overrides/media/videobuf2-vmalloc.h create mode 100644 6.17.0/include-overrides/media/vsp1.h create mode 100644 patches/0001-ipu7-dkms-v4l2-core-makefile-adaptation-6.17.patch create mode 100644 patches/0002-drivers-media-set-v4l2_subdev_enable_streams_api-tru.patch diff --git a/6.17.0/drivers/media/v4l2-core/Kconfig b/6.17.0/drivers/media/v4l2-core/Kconfig new file mode 100644 index 0000000..331b8e5 --- /dev/null +++ b/6.17.0/drivers/media/v4l2-core/Kconfig @@ -0,0 +1,84 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Generic video config states +# + +config VIDEO_V4L2_I2C + bool + depends on I2C && VIDEO_DEV + default y + +config VIDEO_V4L2_SUBDEV_API + bool + depends on VIDEO_DEV && MEDIA_CONTROLLER + help + Enables the V4L2 sub-device pad-level userspace API used to configure + video format, size and frame rate between hardware blocks. + + This API is mostly used by camera interfaces in embedded platforms. + +config VIDEO_ADV_DEBUG + bool "Enable advanced debug functionality on V4L2 drivers" + help + Say Y here to enable advanced debugging functionality on some + V4L devices. + In doubt, say N. + +config VIDEO_FIXED_MINOR_RANGES + bool "Enable old-style fixed minor ranges on drivers/video devices" + help + Say Y here to enable the old-style fixed-range minor assignments. + Only useful if you rely on the old behavior and use mknod instead of udev. + + When in doubt, say N. + +# Used by drivers that need tuner.ko +config VIDEO_TUNER + tristate + +# Used by drivers that need v4l2-jpeg.ko +config V4L2_JPEG_HELPER + tristate + +# Used by drivers that need v4l2-h264.ko +config V4L2_H264 + tristate + +# Used by drivers that need v4l2-vp9.ko +config V4L2_VP9 + tristate + +# Used by drivers that need v4l2-mem2mem.ko +config V4L2_MEM2MEM_DEV + tristate + depends on VIDEOBUF2_CORE + +# Used by LED subsystem flash drivers +config V4L2_FLASH_LED_CLASS + tristate "V4L2 flash API for LED flash class devices" + depends on VIDEO_DEV + depends on LEDS_CLASS_FLASH + select MEDIA_CONTROLLER + select V4L2_ASYNC + select VIDEO_V4L2_SUBDEV_API + help + Say Y here to enable V4L2 flash API support for LED flash + class drivers. + + When in doubt, say N. + +config V4L2_FWNODE + tristate + select V4L2_ASYNC + +config V4L2_ASYNC + tristate + +config V4L2_CCI + tristate + +config V4L2_CCI_I2C + tristate + depends on I2C + select REGMAP_I2C + select V4L2_CCI diff --git a/6.17.0/drivers/media/v4l2-core/Makefile b/6.17.0/drivers/media/v4l2-core/Makefile new file mode 100644 index 0000000..2177b9d --- /dev/null +++ b/6.17.0/drivers/media/v4l2-core/Makefile @@ -0,0 +1,37 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the V4L2 core +# + +ccflags-y += -I$(srctree)/drivers/media/dvb-frontends +ccflags-y += -I$(srctree)/drivers/media/tuners + +tuner-objs := tuner-core.o + +videodev-objs := v4l2-dev.o v4l2-ioctl.o v4l2-device.o v4l2-fh.o \ + v4l2-event.o v4l2-subdev.o v4l2-common.o \ + v4l2-ctrls-core.o v4l2-ctrls-api.o \ + v4l2-ctrls-request.o v4l2-ctrls-defs.o + +# Please keep it alphabetically sorted by Kconfig name +# (e. g. LC_ALL=C sort Makefile) +videodev-$(CONFIG_COMPAT) += v4l2-compat-ioctl32.o +videodev-$(CONFIG_MEDIA_CONTROLLER) += v4l2-mc.o +videodev-$(CONFIG_SPI) += v4l2-spi.o +videodev-$(CONFIG_TRACEPOINTS) += v4l2-trace.o +videodev-$(CONFIG_VIDEO_V4L2_I2C) += v4l2-i2c.o + +# Please keep it alphabetically sorted by Kconfig name +# (e. g. LC_ALL=C sort Makefile) + +obj-$(CONFIG_V4L2_ASYNC) += v4l2-async.o +obj-$(CONFIG_V4L2_CCI) += v4l2-cci.o +obj-$(CONFIG_V4L2_FLASH_LED_CLASS) += v4l2-flash-led-class.o +obj-$(CONFIG_V4L2_FWNODE) += v4l2-fwnode.o +obj-$(CONFIG_V4L2_H264) += v4l2-h264.o +obj-$(CONFIG_V4L2_JPEG_HELPER) += v4l2-jpeg.o +obj-$(CONFIG_V4L2_MEM2MEM_DEV) += v4l2-mem2mem.o +obj-$(CONFIG_V4L2_VP9) += v4l2-vp9.o + +obj-$(CONFIG_VIDEO_TUNER) += tuner.o +obj-$(CONFIG_VIDEO_DEV) += v4l2-dv-timings.o videodev.o diff --git a/6.17.0/drivers/media/v4l2-core/tuner-core.c b/6.17.0/drivers/media/v4l2-core/tuner-core.c new file mode 100644 index 0000000..5687089 --- /dev/null +++ b/6.17.0/drivers/media/v4l2-core/tuner-core.c @@ -0,0 +1,1424 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * i2c tv tuner chip device driver + * core core, i.e. kernel interfaces, registering and so on + * + * Copyright(c) by Ralph Metzler, Gerd Knorr, Gunther Mayer + * + * Copyright(c) 2005-2011 by Mauro Carvalho Chehab + * - Added support for a separate Radio tuner + * - Major rework and cleanups at the code + * + * This driver supports many devices and the idea is to let the driver + * detect which device is present. So rather than listing all supported + * devices here, we pretend to support a single, fake device type that will + * handle both radio and analog TV tuning. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mt20xx.h" +#include "tda8290.h" +#include "tea5761.h" +#include "tea5767.h" +#include "xc2028.h" +#include "tuner-simple.h" +#include "tda9887.h" +#include "xc5000.h" +#include "tda18271.h" +#include "xc4000.h" + +#define UNSET (-1U) + +/* + * Driver modprobe parameters + */ + +/* insmod options used at init time => read/only */ +static unsigned int addr; +static unsigned int no_autodetect; +static unsigned int show_i2c; + +module_param(addr, int, 0444); +module_param(no_autodetect, int, 0444); +module_param(show_i2c, int, 0444); + +/* insmod options used at runtime => read/write */ +static int tuner_debug; +static unsigned int tv_range[2] = { 44, 958 }; +static unsigned int radio_range[2] = { 65, 108 }; +static char pal[] = "--"; +static char secam[] = "--"; +static char ntsc[] = "-"; + +module_param_named(debug, tuner_debug, int, 0644); +module_param_array(tv_range, int, NULL, 0644); +module_param_array(radio_range, int, NULL, 0644); +module_param_string(pal, pal, sizeof(pal), 0644); +module_param_string(secam, secam, sizeof(secam), 0644); +module_param_string(ntsc, ntsc, sizeof(ntsc), 0644); + +/* + * Static vars + */ + +static LIST_HEAD(tuner_list); +static const struct v4l2_subdev_ops tuner_ops; + +/* + * Debug macros + */ + +#undef pr_fmt + +#define pr_fmt(fmt) KBUILD_MODNAME ": %d-%04x: " fmt, \ + i2c_adapter_id(t->i2c->adapter), t->i2c->addr + + +#define dprintk(fmt, arg...) do { \ + if (tuner_debug) \ + printk(KERN_DEBUG pr_fmt("%s: " fmt), __func__, ##arg); \ +} while (0) + +/* + * Internal enums/struct used inside the driver + */ + +/** + * enum tuner_pad_index - tuner pad index for MEDIA_ENT_F_TUNER + * + * @TUNER_PAD_RF_INPUT: + * Radiofrequency (RF) sink pad, usually linked to a RF connector entity. + * @TUNER_PAD_OUTPUT: + * tuner video output source pad. Contains the video chrominance + * and luminance or the hole bandwidth of the signal converted to + * an Intermediate Frequency (IF) or to baseband (on zero-IF tuners). + * @TUNER_PAD_AUD_OUT: + * Tuner audio output source pad. Tuners used to decode analog TV + * signals have an extra pad for audio output. Old tuners use an + * analog stage with a saw filter for the audio IF frequency. The + * output of the pad is, in this case, the audio IF, with should be + * decoded either by the bridge chipset (that's the case of cx2388x + * chipsets) or may require an external IF sound processor, like + * msp34xx. On modern silicon tuners, the audio IF decoder is usually + * incorporated at the tuner. On such case, the output of this pad + * is an audio sampled data. + * @TUNER_NUM_PADS: + * Number of pads of the tuner. + */ +enum tuner_pad_index { + TUNER_PAD_RF_INPUT, + TUNER_PAD_OUTPUT, + TUNER_PAD_AUD_OUT, + TUNER_NUM_PADS +}; + +/** + * enum if_vid_dec_pad_index - video IF-PLL pad index + * for MEDIA_ENT_F_IF_VID_DECODER + * + * @IF_VID_DEC_PAD_IF_INPUT: + * video Intermediate Frequency (IF) sink pad + * @IF_VID_DEC_PAD_OUT: + * IF-PLL video output source pad. Contains the video chrominance + * and luminance IF signals. + * @IF_VID_DEC_PAD_NUM_PADS: + * Number of pads of the video IF-PLL. + */ +enum if_vid_dec_pad_index { + IF_VID_DEC_PAD_IF_INPUT, + IF_VID_DEC_PAD_OUT, + IF_VID_DEC_PAD_NUM_PADS +}; + +struct tuner { + /* device */ + struct dvb_frontend fe; + struct i2c_client *i2c; + struct v4l2_subdev sd; + struct list_head list; + + /* keep track of the current settings */ + v4l2_std_id std; + unsigned int tv_freq; + unsigned int radio_freq; + unsigned int audmode; + + enum v4l2_tuner_type mode; + unsigned int mode_mask; /* Combination of allowable modes */ + + bool standby; /* Standby mode */ + + unsigned int type; /* chip type id */ + void *config; + const char *name; + +#if defined(CONFIG_MEDIA_CONTROLLER) + struct media_pad pad[TUNER_NUM_PADS]; +#endif +}; + +/* + * Function prototypes + */ + +static void set_tv_freq(struct i2c_client *c, unsigned int freq); +static void set_radio_freq(struct i2c_client *c, unsigned int freq); + +/* + * tuner attach/detach logic + */ + +/* This macro allows us to probe dynamically, avoiding static links */ +#ifdef CONFIG_MEDIA_ATTACH +#define tuner_symbol_probe(FUNCTION, ARGS...) ({ \ + int __r = -EINVAL; \ + typeof(&FUNCTION) __a = symbol_request(FUNCTION); \ + if (__a) { \ + __r = (int) __a(ARGS); \ + symbol_put(FUNCTION); \ + } else { \ + printk(KERN_ERR "TUNER: Unable to find " \ + "symbol "#FUNCTION"()\n"); \ + } \ + __r; \ +}) + +static void tuner_detach(struct dvb_frontend *fe) +{ + if (fe->ops.tuner_ops.release) { + fe->ops.tuner_ops.release(fe); + symbol_put_addr(fe->ops.tuner_ops.release); + } + if (fe->ops.analog_ops.release) { + fe->ops.analog_ops.release(fe); + symbol_put_addr(fe->ops.analog_ops.release); + } +} +#else +#define tuner_symbol_probe(FUNCTION, ARGS...) ({ \ + FUNCTION(ARGS); \ +}) + +static void tuner_detach(struct dvb_frontend *fe) +{ + if (fe->ops.tuner_ops.release) + fe->ops.tuner_ops.release(fe); + if (fe->ops.analog_ops.release) + fe->ops.analog_ops.release(fe); +} +#endif + + +static inline struct tuner *to_tuner(struct v4l2_subdev *sd) +{ + return container_of(sd, struct tuner, sd); +} + +/* + * struct analog_demod_ops callbacks + */ + +static void fe_set_params(struct dvb_frontend *fe, + struct analog_parameters *params) +{ + struct dvb_tuner_ops *fe_tuner_ops = &fe->ops.tuner_ops; + struct tuner *t = fe->analog_demod_priv; + + if (NULL == fe_tuner_ops->set_analog_params) { + pr_warn("Tuner frontend module has no way to set freq\n"); + return; + } + fe_tuner_ops->set_analog_params(fe, params); +} + +static void fe_standby(struct dvb_frontend *fe) +{ + struct dvb_tuner_ops *fe_tuner_ops = &fe->ops.tuner_ops; + + if (fe_tuner_ops->sleep) + fe_tuner_ops->sleep(fe); +} + +static int fe_set_config(struct dvb_frontend *fe, void *priv_cfg) +{ + struct dvb_tuner_ops *fe_tuner_ops = &fe->ops.tuner_ops; + struct tuner *t = fe->analog_demod_priv; + + if (fe_tuner_ops->set_config) + return fe_tuner_ops->set_config(fe, priv_cfg); + + pr_warn("Tuner frontend module has no way to set config\n"); + + return 0; +} + +static void tuner_status(struct dvb_frontend *fe); + +static const struct analog_demod_ops tuner_analog_ops = { + .set_params = fe_set_params, + .standby = fe_standby, + .set_config = fe_set_config, + .tuner_status = tuner_status +}; + +/* + * Functions to select between radio and TV and tuner probe/remove functions + */ + +/** + * set_type - Sets the tuner type for a given device + * + * @c: i2c_client descriptor + * @type: type of the tuner (e. g. tuner number) + * @new_mode_mask: Indicates if tuner supports TV and/or Radio + * @new_config: an optional parameter used by a few tuners to adjust + * internal parameters, like LNA mode + * @tuner_callback: an optional function to be called when switching + * to analog mode + * + * This function applies the tuner config to tuner specified + * by tun_setup structure. It contains several per-tuner initialization "magic" + */ +static void set_type(struct i2c_client *c, unsigned int type, + unsigned int new_mode_mask, void *new_config, + int (*tuner_callback) (void *dev, int component, int cmd, int arg)) +{ + struct tuner *t = to_tuner(i2c_get_clientdata(c)); + struct dvb_tuner_ops *fe_tuner_ops = &t->fe.ops.tuner_ops; + struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops; + unsigned char buffer[4]; + int tune_now = 1; + + if (type == UNSET || type == TUNER_ABSENT) { + dprintk("tuner 0x%02x: Tuner type absent\n", c->addr); + return; + } + + t->type = type; + t->config = new_config; + if (tuner_callback != NULL) { + dprintk("defining GPIO callback\n"); + t->fe.callback = tuner_callback; + } + + /* discard private data, in case set_type() was previously called */ + tuner_detach(&t->fe); + t->fe.analog_demod_priv = NULL; + + switch (t->type) { + case TUNER_MT2032: + if (!dvb_attach(microtune_attach, + &t->fe, t->i2c->adapter, t->i2c->addr)) + goto attach_failed; + break; + case TUNER_PHILIPS_TDA8290: + { + if (!dvb_attach(tda829x_attach, &t->fe, t->i2c->adapter, + t->i2c->addr, t->config)) + goto attach_failed; + break; + } + case TUNER_TEA5767: + if (!dvb_attach(tea5767_attach, &t->fe, + t->i2c->adapter, t->i2c->addr)) + goto attach_failed; + t->mode_mask = T_RADIO; + break; + case TUNER_TEA5761: + if (!dvb_attach(tea5761_attach, &t->fe, + t->i2c->adapter, t->i2c->addr)) + goto attach_failed; + t->mode_mask = T_RADIO; + break; + case TUNER_PHILIPS_FMD1216ME_MK3: + case TUNER_PHILIPS_FMD1216MEX_MK3: + buffer[0] = 0x0b; + buffer[1] = 0xdc; + buffer[2] = 0x9c; + buffer[3] = 0x60; + i2c_master_send(c, buffer, 4); + mdelay(1); + buffer[2] = 0x86; + buffer[3] = 0x54; + i2c_master_send(c, buffer, 4); + if (!dvb_attach(simple_tuner_attach, &t->fe, + t->i2c->adapter, t->i2c->addr, t->type)) + goto attach_failed; + break; + case TUNER_PHILIPS_TD1316: + buffer[0] = 0x0b; + buffer[1] = 0xdc; + buffer[2] = 0x86; + buffer[3] = 0xa4; + i2c_master_send(c, buffer, 4); + if (!dvb_attach(simple_tuner_attach, &t->fe, + t->i2c->adapter, t->i2c->addr, t->type)) + goto attach_failed; + break; + case TUNER_XC2028: + { + struct xc2028_config cfg = { + .i2c_adap = t->i2c->adapter, + .i2c_addr = t->i2c->addr, + }; + if (!dvb_attach(xc2028_attach, &t->fe, &cfg)) + goto attach_failed; + tune_now = 0; + break; + } + case TUNER_TDA9887: + if (!dvb_attach(tda9887_attach, + &t->fe, t->i2c->adapter, t->i2c->addr)) + goto attach_failed; + break; + case TUNER_XC5000: + { + struct xc5000_config xc5000_cfg = { + .i2c_address = t->i2c->addr, + /* if_khz will be set at dvb_attach() */ + .if_khz = 0, + }; + + if (!dvb_attach(xc5000_attach, + &t->fe, t->i2c->adapter, &xc5000_cfg)) + goto attach_failed; + tune_now = 0; + break; + } + case TUNER_XC5000C: + { + struct xc5000_config xc5000c_cfg = { + .i2c_address = t->i2c->addr, + /* if_khz will be set at dvb_attach() */ + .if_khz = 0, + .chip_id = XC5000C, + }; + + if (!dvb_attach(xc5000_attach, + &t->fe, t->i2c->adapter, &xc5000c_cfg)) + goto attach_failed; + tune_now = 0; + break; + } + case TUNER_NXP_TDA18271: + { + struct tda18271_config cfg = { + .small_i2c = TDA18271_03_BYTE_CHUNK_INIT, + }; + + if (!dvb_attach(tda18271_attach, &t->fe, t->i2c->addr, + t->i2c->adapter, &cfg)) + goto attach_failed; + tune_now = 0; + break; + } + case TUNER_XC4000: + { + struct xc4000_config xc4000_cfg = { + .i2c_address = t->i2c->addr, + /* FIXME: the correct parameters will be set */ + /* only when the digital dvb_attach() occurs */ + .default_pm = 0, + .dvb_amplitude = 0, + .set_smoothedcvbs = 0, + .if_khz = 0 + }; + if (!dvb_attach(xc4000_attach, + &t->fe, t->i2c->adapter, &xc4000_cfg)) + goto attach_failed; + tune_now = 0; + break; + } + default: + if (!dvb_attach(simple_tuner_attach, &t->fe, + t->i2c->adapter, t->i2c->addr, t->type)) + goto attach_failed; + + break; + } + + if ((NULL == analog_ops->set_params) && + (fe_tuner_ops->set_analog_params)) { + + t->name = fe_tuner_ops->info.name; + + t->fe.analog_demod_priv = t; + memcpy(analog_ops, &tuner_analog_ops, + sizeof(struct analog_demod_ops)); + + if (fe_tuner_ops->get_rf_strength) + analog_ops->has_signal = fe_tuner_ops->get_rf_strength; + if (fe_tuner_ops->get_afc) + analog_ops->get_afc = fe_tuner_ops->get_afc; + + } else { + t->name = analog_ops->info.name; + } + +#ifdef CONFIG_MEDIA_CONTROLLER + t->sd.entity.name = t->name; +#endif + + dprintk("type set to %s\n", t->name); + + t->mode_mask = new_mode_mask; + + /* Some tuners require more initialization setup before use, + such as firmware download or device calibration. + trying to set a frequency here will just fail + FIXME: better to move set_freq to the tuner code. This is needed + on analog tuners for PLL to properly work + */ + if (tune_now) { + if (V4L2_TUNER_RADIO == t->mode) + set_radio_freq(c, t->radio_freq); + else + set_tv_freq(c, t->tv_freq); + } + + dprintk("%s %s I2C addr 0x%02x with type %d used for 0x%02x\n", + c->adapter->name, c->dev.driver->name, c->addr << 1, type, + t->mode_mask); + return; + +attach_failed: + dprintk("Tuner attach for type = %d failed.\n", t->type); + t->type = TUNER_ABSENT; + + return; +} + +/** + * tuner_s_type_addr - Sets the tuner type for a device + * + * @sd: subdev descriptor + * @tun_setup: type to be associated to a given tuner i2c address + * + * This function applies the tuner config to tuner specified + * by tun_setup structure. + * If tuner I2C address is UNSET, then it will only set the device + * if the tuner supports the mode specified in the call. + * If the address is specified, the change will be applied only if + * tuner I2C address matches. + * The call can change the tuner number and the tuner mode. + */ +static int tuner_s_type_addr(struct v4l2_subdev *sd, + struct tuner_setup *tun_setup) +{ + struct tuner *t = to_tuner(sd); + struct i2c_client *c = v4l2_get_subdevdata(sd); + + dprintk("Calling set_type_addr for type=%d, addr=0x%02x, mode=0x%02x, config=%p\n", + tun_setup->type, + tun_setup->addr, + tun_setup->mode_mask, + tun_setup->config); + + if ((t->type == UNSET && ((tun_setup->addr == ADDR_UNSET) && + (t->mode_mask & tun_setup->mode_mask))) || + (tun_setup->addr == c->addr)) { + set_type(c, tun_setup->type, tun_setup->mode_mask, + tun_setup->config, tun_setup->tuner_callback); + } else + dprintk("set addr discarded for type %i, mask %x. Asked to change tuner at addr 0x%02x, with mask %x\n", + t->type, t->mode_mask, + tun_setup->addr, tun_setup->mode_mask); + + return 0; +} + +/** + * tuner_s_config - Sets tuner configuration + * + * @sd: subdev descriptor + * @cfg: tuner configuration + * + * Calls tuner set_config() private function to set some tuner-internal + * parameters + */ +static int tuner_s_config(struct v4l2_subdev *sd, + const struct v4l2_priv_tun_config *cfg) +{ + struct tuner *t = to_tuner(sd); + struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops; + + if (t->type != cfg->tuner) + return 0; + + if (analog_ops->set_config) { + analog_ops->set_config(&t->fe, cfg->priv); + return 0; + } + + dprintk("Tuner frontend module has no way to set config\n"); + return 0; +} + +/** + * tuner_lookup - Seek for tuner adapters + * + * @adap: i2c_adapter struct + * @radio: pointer to be filled if the adapter is radio + * @tv: pointer to be filled if the adapter is TV + * + * Search for existing radio and/or TV tuners on the given I2C adapter, + * discarding demod-only adapters (tda9887). + * + * Note that when this function is called from tuner_probe you can be + * certain no other devices will be added/deleted at the same time, I2C + * core protects against that. + */ +static void tuner_lookup(struct i2c_adapter *adap, + struct tuner **radio, struct tuner **tv) +{ + struct tuner *pos; + + *radio = NULL; + *tv = NULL; + + list_for_each_entry(pos, &tuner_list, list) { + int mode_mask; + + if (pos->i2c->adapter != adap || + strcmp(pos->i2c->dev.driver->name, "tuner")) + continue; + + mode_mask = pos->mode_mask; + if (*radio == NULL && mode_mask == T_RADIO) + *radio = pos; + /* Note: currently TDA9887 is the only demod-only + device. If other devices appear then we need to + make this test more general. */ + else if (*tv == NULL && pos->type != TUNER_TDA9887 && + (pos->mode_mask & T_ANALOG_TV)) + *tv = pos; + } +} + +/** + *tuner_probe - Probes the existing tuners on an I2C bus + * + * @client: i2c_client descriptor + * + * This routine probes for tuners at the expected I2C addresses. On most + * cases, if a device answers to a given I2C address, it assumes that the + * device is a tuner. On a few cases, however, an additional logic is needed + * to double check if the device is really a tuner, or to identify the tuner + * type, like on tea5767/5761 devices. + * + * During client attach, set_type is called by adapter's attach_inform callback. + * set_type must then be completed by tuner_probe. + */ +static int tuner_probe(struct i2c_client *client) +{ + struct tuner *t; + struct tuner *radio; + struct tuner *tv; +#ifdef CONFIG_MEDIA_CONTROLLER + int ret; +#endif + + t = kzalloc(sizeof(struct tuner), GFP_KERNEL); + if (NULL == t) + return -ENOMEM; + v4l2_i2c_subdev_init(&t->sd, client, &tuner_ops); + t->i2c = client; + t->name = "(tuner unset)"; + t->type = UNSET; + t->audmode = V4L2_TUNER_MODE_STEREO; + t->standby = true; + t->radio_freq = 87.5 * 16000; /* Initial freq range */ + t->tv_freq = 400 * 16; /* Sets freq to VHF High - needed for some PLL's to properly start */ + + if (show_i2c) { + unsigned char buffer[16]; + int rc; + + memset(buffer, 0, sizeof(buffer)); + rc = i2c_master_recv(client, buffer, sizeof(buffer)); + if (rc >= 0) + pr_info("I2C RECV = %*ph\n", rc, buffer); + } + + /* autodetection code based on the i2c addr */ + if (!no_autodetect) { + switch (client->addr) { + case 0x10: + if (tuner_symbol_probe(tea5761_autodetection, + t->i2c->adapter, + t->i2c->addr) >= 0) { + t->type = TUNER_TEA5761; + t->mode_mask = T_RADIO; + tuner_lookup(t->i2c->adapter, &radio, &tv); + if (tv) + tv->mode_mask &= ~T_RADIO; + + goto register_client; + } + kfree(t); + return -ENODEV; + case 0x42: + case 0x43: + case 0x4a: + case 0x4b: + /* If chip is not tda8290, don't register. + since it can be tda9887*/ + if (tuner_symbol_probe(tda829x_probe, t->i2c->adapter, + t->i2c->addr) >= 0) { + dprintk("tda829x detected\n"); + } else { + /* Default is being tda9887 */ + t->type = TUNER_TDA9887; + t->mode_mask = T_RADIO | T_ANALOG_TV; + goto register_client; + } + break; + case 0x60: + if (tuner_symbol_probe(tea5767_autodetection, + t->i2c->adapter, t->i2c->addr) + >= 0) { + t->type = TUNER_TEA5767; + t->mode_mask = T_RADIO; + /* Sets freq to FM range */ + tuner_lookup(t->i2c->adapter, &radio, &tv); + if (tv) + tv->mode_mask &= ~T_RADIO; + + goto register_client; + } + break; + } + } + + /* Initializes only the first TV tuner on this adapter. Why only the + first? Because there are some devices (notably the ones with TI + tuners) that have more than one i2c address for the *same* device. + Experience shows that, except for just one case, the first + address is the right one. The exception is a Russian tuner + (ACORP_Y878F). So, the desired behavior is just to enable the + first found TV tuner. */ + tuner_lookup(t->i2c->adapter, &radio, &tv); + if (tv == NULL) { + t->mode_mask = T_ANALOG_TV; + if (radio == NULL) + t->mode_mask |= T_RADIO; + dprintk("Setting mode_mask to 0x%02x\n", t->mode_mask); + } + + /* Should be just before return */ +register_client: +#if defined(CONFIG_MEDIA_CONTROLLER) + t->sd.entity.name = t->name; + /* + * Handle the special case where the tuner has actually + * two stages: the PLL to tune into a frequency and the + * IF-PLL demodulator (tda988x). + */ + if (t->type == TUNER_TDA9887) { + t->pad[IF_VID_DEC_PAD_IF_INPUT].flags = MEDIA_PAD_FL_SINK; + t->pad[IF_VID_DEC_PAD_IF_INPUT].sig_type = PAD_SIGNAL_ANALOG; + t->pad[IF_VID_DEC_PAD_OUT].flags = MEDIA_PAD_FL_SOURCE; + t->pad[IF_VID_DEC_PAD_OUT].sig_type = PAD_SIGNAL_ANALOG; + ret = media_entity_pads_init(&t->sd.entity, + IF_VID_DEC_PAD_NUM_PADS, + &t->pad[0]); + t->sd.entity.function = MEDIA_ENT_F_IF_VID_DECODER; + } else { + t->pad[TUNER_PAD_RF_INPUT].flags = MEDIA_PAD_FL_SINK; + t->pad[TUNER_PAD_RF_INPUT].sig_type = PAD_SIGNAL_ANALOG; + t->pad[TUNER_PAD_OUTPUT].flags = MEDIA_PAD_FL_SOURCE; + t->pad[TUNER_PAD_OUTPUT].sig_type = PAD_SIGNAL_ANALOG; + t->pad[TUNER_PAD_AUD_OUT].flags = MEDIA_PAD_FL_SOURCE; + t->pad[TUNER_PAD_AUD_OUT].sig_type = PAD_SIGNAL_AUDIO; + ret = media_entity_pads_init(&t->sd.entity, TUNER_NUM_PADS, + &t->pad[0]); + t->sd.entity.function = MEDIA_ENT_F_TUNER; + } + + if (ret < 0) { + pr_err("failed to initialize media entity!\n"); + kfree(t); + return ret; + } +#endif + /* Sets a default mode */ + if (t->mode_mask & T_ANALOG_TV) + t->mode = V4L2_TUNER_ANALOG_TV; + else + t->mode = V4L2_TUNER_RADIO; + set_type(client, t->type, t->mode_mask, t->config, t->fe.callback); + list_add_tail(&t->list, &tuner_list); + + pr_info("Tuner %d found with type(s)%s%s.\n", + t->type, + t->mode_mask & T_RADIO ? " Radio" : "", + t->mode_mask & T_ANALOG_TV ? " TV" : ""); + return 0; +} + +/** + * tuner_remove - detaches a tuner + * + * @client: i2c_client descriptor + */ + +static void tuner_remove(struct i2c_client *client) +{ + struct tuner *t = to_tuner(i2c_get_clientdata(client)); + + v4l2_device_unregister_subdev(&t->sd); + tuner_detach(&t->fe); + t->fe.analog_demod_priv = NULL; + + list_del(&t->list); + kfree(t); +} + +/* + * Functions to switch between Radio and TV + * + * A few cards have a separate I2C tuner for radio. Those routines + * take care of switching between TV/Radio mode, filtering only the + * commands that apply to the Radio or TV tuner. + */ + +/** + * check_mode - Verify if tuner supports the requested mode + * @t: a pointer to the module's internal struct_tuner + * @mode: mode of the tuner, as defined by &enum v4l2_tuner_type. + * + * This function checks if the tuner is capable of tuning analog TV, + * digital TV or radio, depending on what the caller wants. If the + * tuner can't support that mode, it returns -EINVAL. Otherwise, it + * returns 0. + * This function is needed for boards that have a separate tuner for + * radio (like devices with tea5767). + * + * NOTE: mt20xx uses V4L2_TUNER_DIGITAL_TV and calls set_tv_freq to + * select a TV frequency. So, t_mode = T_ANALOG_TV could actually + * be used to represent a Digital TV too. + */ +static inline int check_mode(struct tuner *t, enum v4l2_tuner_type mode) +{ + int t_mode; + if (mode == V4L2_TUNER_RADIO) + t_mode = T_RADIO; + else + t_mode = T_ANALOG_TV; + + if ((t_mode & t->mode_mask) == 0) + return -EINVAL; + + return 0; +} + +/** + * set_mode - Switch tuner to other mode. + * @t: a pointer to the module's internal struct_tuner + * @mode: enum v4l2_type (radio or TV) + * + * If tuner doesn't support the needed mode (radio or TV), prints a + * debug message and returns -EINVAL, changing its state to standby. + * Otherwise, changes the mode and returns 0. + */ +static int set_mode(struct tuner *t, enum v4l2_tuner_type mode) +{ + struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops; + + if (mode != t->mode) { + if (check_mode(t, mode) == -EINVAL) { + dprintk("Tuner doesn't support mode %d. Putting tuner to sleep\n", + mode); + t->standby = true; + if (analog_ops->standby) + analog_ops->standby(&t->fe); + return -EINVAL; + } + t->mode = mode; + dprintk("Changing to mode %d\n", mode); + } + return 0; +} + +/** + * set_freq - Set the tuner to the desired frequency. + * @t: a pointer to the module's internal struct_tuner + * @freq: frequency to set (0 means to use the current frequency) + */ +static void set_freq(struct tuner *t, unsigned int freq) +{ + struct i2c_client *client = v4l2_get_subdevdata(&t->sd); + + if (t->mode == V4L2_TUNER_RADIO) { + if (!freq) + freq = t->radio_freq; + set_radio_freq(client, freq); + } else { + if (!freq) + freq = t->tv_freq; + set_tv_freq(client, freq); + } +} + +/* + * Functions that are specific for TV mode + */ + +/** + * set_tv_freq - Set tuner frequency, freq in Units of 62.5 kHz = 1/16MHz + * + * @c: i2c_client descriptor + * @freq: frequency + */ +static void set_tv_freq(struct i2c_client *c, unsigned int freq) +{ + struct tuner *t = to_tuner(i2c_get_clientdata(c)); + struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops; + + struct analog_parameters params = { + .mode = t->mode, + .audmode = t->audmode, + .std = t->std + }; + + if (t->type == UNSET) { + pr_warn("tuner type not set\n"); + return; + } + if (NULL == analog_ops->set_params) { + pr_warn("Tuner has no way to set tv freq\n"); + return; + } + if (freq < tv_range[0] * 16 || freq > tv_range[1] * 16) { + dprintk("TV freq (%d.%02d) out of range (%d-%d)\n", + freq / 16, freq % 16 * 100 / 16, tv_range[0], + tv_range[1]); + /* V4L2 spec: if the freq is not possible then the closest + possible value should be selected */ + if (freq < tv_range[0] * 16) + freq = tv_range[0] * 16; + else + freq = tv_range[1] * 16; + } + params.frequency = freq; + dprintk("tv freq set to %d.%02d\n", + freq / 16, freq % 16 * 100 / 16); + t->tv_freq = freq; + t->standby = false; + + analog_ops->set_params(&t->fe, ¶ms); +} + +/** + * tuner_fixup_std - force a given video standard variant + * + * @t: tuner internal struct + * @std: TV standard + * + * A few devices or drivers have problem to detect some standard variations. + * On other operational systems, the drivers generally have a per-country + * code, and some logic to apply per-country hacks. V4L2 API doesn't provide + * such hacks. Instead, it relies on a proper video standard selection from + * the userspace application. However, as some apps are buggy, not allowing + * to distinguish all video standard variations, a modprobe parameter can + * be used to force a video standard match. + */ +static v4l2_std_id tuner_fixup_std(struct tuner *t, v4l2_std_id std) +{ + if (pal[0] != '-' && (std & V4L2_STD_PAL) == V4L2_STD_PAL) { + switch (pal[0]) { + case '6': + return V4L2_STD_PAL_60; + case 'b': + case 'B': + case 'g': + case 'G': + return V4L2_STD_PAL_BG; + case 'i': + case 'I': + return V4L2_STD_PAL_I; + case 'd': + case 'D': + case 'k': + case 'K': + return V4L2_STD_PAL_DK; + case 'M': + case 'm': + return V4L2_STD_PAL_M; + case 'N': + case 'n': + if (pal[1] == 'c' || pal[1] == 'C') + return V4L2_STD_PAL_Nc; + return V4L2_STD_PAL_N; + default: + pr_warn("pal= argument not recognised\n"); + break; + } + } + if (secam[0] != '-' && (std & V4L2_STD_SECAM) == V4L2_STD_SECAM) { + switch (secam[0]) { + case 'b': + case 'B': + case 'g': + case 'G': + case 'h': + case 'H': + return V4L2_STD_SECAM_B | + V4L2_STD_SECAM_G | + V4L2_STD_SECAM_H; + case 'd': + case 'D': + case 'k': + case 'K': + return V4L2_STD_SECAM_DK; + case 'l': + case 'L': + if ((secam[1] == 'C') || (secam[1] == 'c')) + return V4L2_STD_SECAM_LC; + return V4L2_STD_SECAM_L; + default: + pr_warn("secam= argument not recognised\n"); + break; + } + } + + if (ntsc[0] != '-' && (std & V4L2_STD_NTSC) == V4L2_STD_NTSC) { + switch (ntsc[0]) { + case 'm': + case 'M': + return V4L2_STD_NTSC_M; + case 'j': + case 'J': + return V4L2_STD_NTSC_M_JP; + case 'k': + case 'K': + return V4L2_STD_NTSC_M_KR; + default: + pr_info("ntsc= argument not recognised\n"); + break; + } + } + return std; +} + +/* + * Functions that are specific for Radio mode + */ + +/** + * set_radio_freq - Set tuner frequency, freq in Units of 62.5 Hz = 1/16kHz + * + * @c: i2c_client descriptor + * @freq: frequency + */ +static void set_radio_freq(struct i2c_client *c, unsigned int freq) +{ + struct tuner *t = to_tuner(i2c_get_clientdata(c)); + struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops; + + struct analog_parameters params = { + .mode = t->mode, + .audmode = t->audmode, + .std = t->std + }; + + if (t->type == UNSET) { + pr_warn("tuner type not set\n"); + return; + } + if (NULL == analog_ops->set_params) { + pr_warn("tuner has no way to set radio frequency\n"); + return; + } + if (freq < radio_range[0] * 16000 || freq > radio_range[1] * 16000) { + dprintk("radio freq (%d.%02d) out of range (%d-%d)\n", + freq / 16000, freq % 16000 * 100 / 16000, + radio_range[0], radio_range[1]); + /* V4L2 spec: if the freq is not possible then the closest + possible value should be selected */ + if (freq < radio_range[0] * 16000) + freq = radio_range[0] * 16000; + else + freq = radio_range[1] * 16000; + } + params.frequency = freq; + dprintk("radio freq set to %d.%02d\n", + freq / 16000, freq % 16000 * 100 / 16000); + t->radio_freq = freq; + t->standby = false; + + analog_ops->set_params(&t->fe, ¶ms); + /* + * The tuner driver might decide to change the audmode if it only + * supports stereo, so update t->audmode. + */ + t->audmode = params.audmode; +} + +/* + * Debug function for reporting tuner status to userspace + */ + +/** + * tuner_status - Dumps the current tuner status at dmesg + * @fe: pointer to struct dvb_frontend + * + * This callback is used only for driver debug purposes, answering to + * VIDIOC_LOG_STATUS. No changes should happen on this call. + */ +static void tuner_status(struct dvb_frontend *fe) +{ + struct tuner *t = fe->analog_demod_priv; + unsigned long freq, freq_fraction; + struct dvb_tuner_ops *fe_tuner_ops = &fe->ops.tuner_ops; + struct analog_demod_ops *analog_ops = &fe->ops.analog_ops; + const char *p; + + switch (t->mode) { + case V4L2_TUNER_RADIO: + p = "radio"; + break; + case V4L2_TUNER_DIGITAL_TV: /* Used by mt20xx */ + p = "digital TV"; + break; + case V4L2_TUNER_ANALOG_TV: + default: + p = "analog TV"; + break; + } + if (t->mode == V4L2_TUNER_RADIO) { + freq = t->radio_freq / 16000; + freq_fraction = (t->radio_freq % 16000) * 100 / 16000; + } else { + freq = t->tv_freq / 16; + freq_fraction = (t->tv_freq % 16) * 100 / 16; + } + pr_info("Tuner mode: %s%s\n", p, + t->standby ? " on standby mode" : ""); + pr_info("Frequency: %lu.%02lu MHz\n", freq, freq_fraction); + pr_info("Standard: 0x%08lx\n", (unsigned long)t->std); + if (t->mode != V4L2_TUNER_RADIO) + return; + if (fe_tuner_ops->get_status) { + u32 tuner_status = 0; + + fe_tuner_ops->get_status(&t->fe, &tuner_status); + if (tuner_status & TUNER_STATUS_LOCKED) + pr_info("Tuner is locked.\n"); + if (tuner_status & TUNER_STATUS_STEREO) + pr_info("Stereo: yes\n"); + } + if (analog_ops->has_signal) { + u16 signal; + + if (!analog_ops->has_signal(fe, &signal)) + pr_info("Signal strength: %hu\n", signal); + } +} + +/* + * Function to splicitly change mode to radio. Probably not needed anymore + */ + +static int tuner_s_radio(struct v4l2_subdev *sd) +{ + struct tuner *t = to_tuner(sd); + + if (set_mode(t, V4L2_TUNER_RADIO) == 0) + set_freq(t, 0); + return 0; +} + +/* + * Tuner callbacks to handle userspace ioctl's + */ + +/** + * tuner_standby - places the tuner in standby mode + * @sd: pointer to struct v4l2_subdev + */ +static int tuner_standby(struct v4l2_subdev *sd) +{ + struct tuner *t = to_tuner(sd); + struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops; + + dprintk("Putting tuner to sleep\n"); + t->standby = true; + if (analog_ops->standby) + analog_ops->standby(&t->fe); + return 0; +} + +static int tuner_s_std(struct v4l2_subdev *sd, v4l2_std_id std) +{ + struct tuner *t = to_tuner(sd); + + if (set_mode(t, V4L2_TUNER_ANALOG_TV)) + return 0; + + t->std = tuner_fixup_std(t, std); + if (t->std != std) + dprintk("Fixup standard %llx to %llx\n", std, t->std); + set_freq(t, 0); + return 0; +} + +static int tuner_s_frequency(struct v4l2_subdev *sd, const struct v4l2_frequency *f) +{ + struct tuner *t = to_tuner(sd); + + if (set_mode(t, f->type) == 0) + set_freq(t, f->frequency); + return 0; +} + +/** + * tuner_g_frequency - Get the tuned frequency for the tuner + * @sd: pointer to struct v4l2_subdev + * @f: pointer to struct v4l2_frequency + * + * At return, the structure f will be filled with tuner frequency + * if the tuner matches the f->type. + * Note: f->type should be initialized before calling it. + * This is done by either video_ioctl2 or by the bridge driver. + */ +static int tuner_g_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *f) +{ + struct tuner *t = to_tuner(sd); + struct dvb_tuner_ops *fe_tuner_ops = &t->fe.ops.tuner_ops; + + if (check_mode(t, f->type) == -EINVAL) + return 0; + if (f->type == t->mode && fe_tuner_ops->get_frequency && !t->standby) { + u32 abs_freq; + + fe_tuner_ops->get_frequency(&t->fe, &abs_freq); + f->frequency = (V4L2_TUNER_RADIO == t->mode) ? + DIV_ROUND_CLOSEST(abs_freq * 2, 125) : + DIV_ROUND_CLOSEST(abs_freq, 62500); + } else { + f->frequency = (V4L2_TUNER_RADIO == f->type) ? + t->radio_freq : t->tv_freq; + } + return 0; +} + +/** + * tuner_g_tuner - Fill in tuner information + * @sd: pointer to struct v4l2_subdev + * @vt: pointer to struct v4l2_tuner + * + * At return, the structure vt will be filled with tuner information + * if the tuner matches vt->type. + * Note: vt->type should be initialized before calling it. + * This is done by either video_ioctl2 or by the bridge driver. + */ +static int tuner_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt) +{ + struct tuner *t = to_tuner(sd); + struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops; + struct dvb_tuner_ops *fe_tuner_ops = &t->fe.ops.tuner_ops; + + if (check_mode(t, vt->type) == -EINVAL) + return 0; + if (vt->type == t->mode && analog_ops->get_afc) + analog_ops->get_afc(&t->fe, &vt->afc); + if (vt->type == t->mode && analog_ops->has_signal) { + u16 signal = (u16)vt->signal; + + if (!analog_ops->has_signal(&t->fe, &signal)) + vt->signal = signal; + } + if (vt->type != V4L2_TUNER_RADIO) { + vt->capability |= V4L2_TUNER_CAP_NORM; + vt->rangelow = tv_range[0] * 16; + vt->rangehigh = tv_range[1] * 16; + return 0; + } + + /* radio mode */ + if (vt->type == t->mode) { + vt->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO; + if (fe_tuner_ops->get_status) { + u32 tuner_status = 0; + + fe_tuner_ops->get_status(&t->fe, &tuner_status); + vt->rxsubchans = + (tuner_status & TUNER_STATUS_STEREO) ? + V4L2_TUNER_SUB_STEREO : + V4L2_TUNER_SUB_MONO; + } + vt->audmode = t->audmode; + } + vt->capability |= V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO; + vt->rangelow = radio_range[0] * 16000; + vt->rangehigh = radio_range[1] * 16000; + + return 0; +} + +/** + * tuner_s_tuner - Set the tuner's audio mode + * @sd: pointer to struct v4l2_subdev + * @vt: pointer to struct v4l2_tuner + * + * Sets the audio mode if the tuner matches vt->type. + * Note: vt->type should be initialized before calling it. + * This is done by either video_ioctl2 or by the bridge driver. + */ +static int tuner_s_tuner(struct v4l2_subdev *sd, const struct v4l2_tuner *vt) +{ + struct tuner *t = to_tuner(sd); + + if (set_mode(t, vt->type)) + return 0; + + if (t->mode == V4L2_TUNER_RADIO) { + t->audmode = vt->audmode; + /* + * For radio audmode can only be mono or stereo. Map any + * other values to stereo. The actual tuner driver that is + * called in set_radio_freq can decide to limit the audmode to + * mono if only mono is supported. + */ + if (t->audmode != V4L2_TUNER_MODE_MONO && + t->audmode != V4L2_TUNER_MODE_STEREO) + t->audmode = V4L2_TUNER_MODE_STEREO; + } + set_freq(t, 0); + + return 0; +} + +static int tuner_log_status(struct v4l2_subdev *sd) +{ + struct tuner *t = to_tuner(sd); + struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops; + + if (analog_ops->tuner_status) + analog_ops->tuner_status(&t->fe); + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int tuner_suspend(struct device *dev) +{ + struct i2c_client *c = to_i2c_client(dev); + struct tuner *t = to_tuner(i2c_get_clientdata(c)); + struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops; + + dprintk("suspend\n"); + + if (t->fe.ops.tuner_ops.suspend) + t->fe.ops.tuner_ops.suspend(&t->fe); + else if (!t->standby && analog_ops->standby) + analog_ops->standby(&t->fe); + + return 0; +} + +static int tuner_resume(struct device *dev) +{ + struct i2c_client *c = to_i2c_client(dev); + struct tuner *t = to_tuner(i2c_get_clientdata(c)); + + dprintk("resume\n"); + + if (t->fe.ops.tuner_ops.resume) + t->fe.ops.tuner_ops.resume(&t->fe); + else if (!t->standby) + if (set_mode(t, t->mode) == 0) + set_freq(t, 0); + + return 0; +} +#endif + +static int tuner_command(struct i2c_client *client, unsigned cmd, void *arg) +{ + struct v4l2_subdev *sd = i2c_get_clientdata(client); + + /* TUNER_SET_CONFIG is still called by tuner-simple.c, so we have + to handle it here. + There must be a better way of doing this... */ + switch (cmd) { + case TUNER_SET_CONFIG: + return tuner_s_config(sd, arg); + } + return -ENOIOCTLCMD; +} + +/* + * Callback structs + */ + +static const struct v4l2_subdev_core_ops tuner_core_ops = { + .log_status = tuner_log_status, +}; + +static const struct v4l2_subdev_tuner_ops tuner_tuner_ops = { + .standby = tuner_standby, + .s_radio = tuner_s_radio, + .g_tuner = tuner_g_tuner, + .s_tuner = tuner_s_tuner, + .s_frequency = tuner_s_frequency, + .g_frequency = tuner_g_frequency, + .s_type_addr = tuner_s_type_addr, + .s_config = tuner_s_config, +}; + +static const struct v4l2_subdev_video_ops tuner_video_ops = { + .s_std = tuner_s_std, +}; + +static const struct v4l2_subdev_ops tuner_ops = { + .core = &tuner_core_ops, + .tuner = &tuner_tuner_ops, + .video = &tuner_video_ops, +}; + +/* + * I2C structs and module init functions + */ + +static const struct dev_pm_ops tuner_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(tuner_suspend, tuner_resume) +}; + +static const struct i2c_device_id tuner_id[] = { + { "tuner", }, /* autodetect */ + { } +}; +MODULE_DEVICE_TABLE(i2c, tuner_id); + +static struct i2c_driver tuner_driver = { + .driver = { + .name = "tuner", + .pm = &tuner_pm_ops, + }, + .probe = tuner_probe, + .remove = tuner_remove, + .command = tuner_command, + .id_table = tuner_id, +}; + +module_i2c_driver(tuner_driver); + +MODULE_DESCRIPTION("device driver for various TV and TV+FM radio tuners"); +MODULE_AUTHOR("Ralph Metzler, Gerd Knorr, Gunther Mayer"); +MODULE_LICENSE("GPL"); diff --git a/6.17.0/drivers/media/v4l2-core/v4l2-async.c b/6.17.0/drivers/media/v4l2-core/v4l2-async.c new file mode 100644 index 0000000..ee884a8 --- /dev/null +++ b/6.17.0/drivers/media/v4l2-core/v4l2-async.c @@ -0,0 +1,976 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * V4L2 asynchronous subdevice registration API + * + * Copyright (C) 2012-2013, Guennadi Liakhovetski + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "v4l2-subdev-priv.h" + +static int v4l2_async_nf_call_bound(struct v4l2_async_notifier *n, + struct v4l2_subdev *subdev, + struct v4l2_async_connection *asc) +{ + if (!n->ops || !n->ops->bound) + return 0; + + return n->ops->bound(n, subdev, asc); +} + +static void v4l2_async_nf_call_unbind(struct v4l2_async_notifier *n, + struct v4l2_subdev *subdev, + struct v4l2_async_connection *asc) +{ + if (!n->ops || !n->ops->unbind) + return; + + n->ops->unbind(n, subdev, asc); +} + +static int v4l2_async_nf_call_complete(struct v4l2_async_notifier *n) +{ + if (!n->ops || !n->ops->complete) + return 0; + + return n->ops->complete(n); +} + +static void v4l2_async_nf_call_destroy(struct v4l2_async_notifier *n, + struct v4l2_async_connection *asc) +{ + if (!n->ops || !n->ops->destroy) + return; + + n->ops->destroy(asc); +} + +static bool match_i2c(struct v4l2_async_notifier *notifier, + struct v4l2_subdev *sd, + struct v4l2_async_match_desc *match) +{ +#if IS_ENABLED(CONFIG_I2C) + struct i2c_client *client = i2c_verify_client(sd->dev); + + return client && + match->i2c.adapter_id == client->adapter->nr && + match->i2c.address == client->addr; +#else + return false; +#endif +} + +static struct device *notifier_dev(struct v4l2_async_notifier *notifier) +{ + if (notifier->sd) + return notifier->sd->dev; + + if (notifier->v4l2_dev) + return notifier->v4l2_dev->dev; + + return NULL; +} + +static bool +match_fwnode_one(struct v4l2_async_notifier *notifier, + struct v4l2_subdev *sd, struct fwnode_handle *sd_fwnode, + struct v4l2_async_match_desc *match) +{ + struct fwnode_handle *asd_dev_fwnode; + bool ret; + + dev_dbg(notifier_dev(notifier), + "v4l2-async: fwnode match: need %pfw, trying %pfw\n", + sd_fwnode, match->fwnode); + + if (sd_fwnode == match->fwnode) { + dev_dbg(notifier_dev(notifier), + "v4l2-async: direct match found\n"); + return true; + } + + if (!fwnode_graph_is_endpoint(match->fwnode)) { + dev_dbg(notifier_dev(notifier), + "v4l2-async: direct match not found\n"); + return false; + } + + asd_dev_fwnode = fwnode_graph_get_port_parent(match->fwnode); + + ret = sd_fwnode == asd_dev_fwnode; + + fwnode_handle_put(asd_dev_fwnode); + + dev_dbg(notifier_dev(notifier), + "v4l2-async: device--endpoint match %sfound\n", + ret ? "" : "not "); + + return ret; +} + +static bool match_fwnode(struct v4l2_async_notifier *notifier, + struct v4l2_subdev *sd, + struct v4l2_async_match_desc *match) +{ + dev_dbg(notifier_dev(notifier), + "v4l2-async: matching for notifier %pfw, sd fwnode %pfw\n", + dev_fwnode(notifier_dev(notifier)), sd->fwnode); + + if (!list_empty(&sd->async_subdev_endpoint_list)) { + struct v4l2_async_subdev_endpoint *ase; + + dev_dbg(sd->dev, + "v4l2-async: endpoint fwnode list available, looking for %pfw\n", + match->fwnode); + + list_for_each_entry(ase, &sd->async_subdev_endpoint_list, + async_subdev_endpoint_entry) { + bool matched = ase->endpoint == match->fwnode; + + dev_dbg(sd->dev, + "v4l2-async: endpoint-endpoint match %sfound with %pfw\n", + matched ? "" : "not ", ase->endpoint); + + if (matched) + return true; + } + + dev_dbg(sd->dev, "async: no endpoint matched\n"); + + return false; + } + + if (match_fwnode_one(notifier, sd, sd->fwnode, match)) + return true; + + /* Also check the secondary fwnode. */ + if (IS_ERR_OR_NULL(sd->fwnode->secondary)) + return false; + + dev_dbg(notifier_dev(notifier), + "v4l2-async: trying secondary fwnode match\n"); + + return match_fwnode_one(notifier, sd, sd->fwnode->secondary, match); +} + +static LIST_HEAD(subdev_list); +static LIST_HEAD(notifier_list); +static DEFINE_MUTEX(list_lock); + +static struct v4l2_async_connection * +v4l2_async_find_match(struct v4l2_async_notifier *notifier, + struct v4l2_subdev *sd) +{ + bool (*match)(struct v4l2_async_notifier *notifier, + struct v4l2_subdev *sd, + struct v4l2_async_match_desc *match); + struct v4l2_async_connection *asc; + + list_for_each_entry(asc, ¬ifier->waiting_list, asc_entry) { + /* bus_type has been verified valid before */ + switch (asc->match.type) { + case V4L2_ASYNC_MATCH_TYPE_I2C: + match = match_i2c; + break; + case V4L2_ASYNC_MATCH_TYPE_FWNODE: + match = match_fwnode; + break; + default: + /* Cannot happen, unless someone breaks us */ + WARN_ON(true); + return NULL; + } + + /* match cannot be NULL here */ + if (match(notifier, sd, &asc->match)) + return asc; + } + + return NULL; +} + +/* Compare two async match descriptors for equivalence */ +static bool v4l2_async_match_equal(struct v4l2_async_match_desc *match1, + struct v4l2_async_match_desc *match2) +{ + if (match1->type != match2->type) + return false; + + switch (match1->type) { + case V4L2_ASYNC_MATCH_TYPE_I2C: + return match1->i2c.adapter_id == match2->i2c.adapter_id && + match1->i2c.address == match2->i2c.address; + case V4L2_ASYNC_MATCH_TYPE_FWNODE: + return match1->fwnode == match2->fwnode; + default: + break; + } + + return false; +} + +/* Find the sub-device notifier registered by a sub-device driver. */ +static struct v4l2_async_notifier * +v4l2_async_find_subdev_notifier(struct v4l2_subdev *sd) +{ + struct v4l2_async_notifier *n; + + list_for_each_entry(n, ¬ifier_list, notifier_entry) + if (n->sd == sd) + return n; + + return NULL; +} + +/* Get v4l2_device related to the notifier if one can be found. */ +static struct v4l2_device * +v4l2_async_nf_find_v4l2_dev(struct v4l2_async_notifier *notifier) +{ + while (notifier->parent) + notifier = notifier->parent; + + return notifier->v4l2_dev; +} + +/* + * Return true if all child sub-device notifiers are complete, false otherwise. + */ +static bool +v4l2_async_nf_can_complete(struct v4l2_async_notifier *notifier) +{ + struct v4l2_async_connection *asc; + + if (!list_empty(¬ifier->waiting_list)) + return false; + + list_for_each_entry(asc, ¬ifier->done_list, asc_entry) { + struct v4l2_async_notifier *subdev_notifier = + v4l2_async_find_subdev_notifier(asc->sd); + + if (subdev_notifier && + !v4l2_async_nf_can_complete(subdev_notifier)) + return false; + } + + return true; +} + +/* + * Complete the master notifier if possible. This is done when all async + * sub-devices have been bound; v4l2_device is also available then. + */ +static int +v4l2_async_nf_try_complete(struct v4l2_async_notifier *notifier) +{ + struct v4l2_async_notifier *__notifier = notifier; + + /* Quick check whether there are still more sub-devices here. */ + if (!list_empty(¬ifier->waiting_list)) + return 0; + + if (notifier->sd) + dev_dbg(notifier_dev(notifier), + "v4l2-async: trying to complete\n"); + + /* Check the entire notifier tree; find the root notifier first. */ + while (notifier->parent) + notifier = notifier->parent; + + /* This is root if it has v4l2_dev. */ + if (!notifier->v4l2_dev) { + dev_dbg(notifier_dev(__notifier), + "v4l2-async: V4L2 device not available\n"); + return 0; + } + + /* Is everything ready? */ + if (!v4l2_async_nf_can_complete(notifier)) + return 0; + + dev_dbg(notifier_dev(__notifier), "v4l2-async: complete\n"); + + return v4l2_async_nf_call_complete(notifier); +} + +static int +v4l2_async_nf_try_all_subdevs(struct v4l2_async_notifier *notifier); + +static int v4l2_async_create_ancillary_links(struct v4l2_async_notifier *n, + struct v4l2_subdev *sd) +{ +#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER) + struct media_link *link; + + if (sd->entity.function != MEDIA_ENT_F_LENS && + sd->entity.function != MEDIA_ENT_F_FLASH) + return 0; + + if (!n->sd) { + dev_warn(notifier_dev(n), + "not a sub-device notifier, not creating an ancillary link for %s!\n", + dev_name(sd->dev)); + return 0; + } + + link = media_create_ancillary_link(&n->sd->entity, &sd->entity); + + return IS_ERR(link) ? PTR_ERR(link) : 0; +#else + return 0; +#endif +} + +static int v4l2_async_match_notify(struct v4l2_async_notifier *notifier, + struct v4l2_device *v4l2_dev, + struct v4l2_subdev *sd, + struct v4l2_async_connection *asc) +{ + struct v4l2_async_notifier *subdev_notifier; + bool registered = false; + int ret; + + if (list_empty(&sd->asc_list)) { + ret = __v4l2_device_register_subdev(v4l2_dev, sd, sd->owner); + if (ret < 0) + return ret; + registered = true; + } + + ret = v4l2_async_nf_call_bound(notifier, sd, asc); + if (ret < 0) { + if (asc->match.type == V4L2_ASYNC_MATCH_TYPE_FWNODE) + dev_dbg(notifier_dev(notifier), + "failed binding %pfw (%d)\n", + asc->match.fwnode, ret); + goto err_unregister_subdev; + } + + if (registered) { + /* + * Depending of the function of the entities involved, we may + * want to create links between them (for example between a + * sensor and its lens or between a sensor's source pad and the + * connected device's sink pad). + */ + ret = v4l2_async_create_ancillary_links(notifier, sd); + if (ret) { + if (asc->match.type == V4L2_ASYNC_MATCH_TYPE_FWNODE) + dev_dbg(notifier_dev(notifier), + "failed creating links for %pfw (%d)\n", + asc->match.fwnode, ret); + goto err_call_unbind; + } + } + + list_add(&asc->asc_subdev_entry, &sd->asc_list); + asc->sd = sd; + + /* Move from the waiting list to notifier's done */ + list_move(&asc->asc_entry, ¬ifier->done_list); + + dev_dbg(notifier_dev(notifier), "v4l2-async: %s bound (ret %d)\n", + dev_name(sd->dev), ret); + + /* + * See if the sub-device has a notifier. If not, return here. + */ + subdev_notifier = v4l2_async_find_subdev_notifier(sd); + if (!subdev_notifier || subdev_notifier->parent) + return 0; + + /* + * Proceed with checking for the sub-device notifier's async + * sub-devices, and return the result. The error will be handled by the + * caller. + */ + subdev_notifier->parent = notifier; + + return v4l2_async_nf_try_all_subdevs(subdev_notifier); + +err_call_unbind: + v4l2_async_nf_call_unbind(notifier, sd, asc); + list_del(&asc->asc_subdev_entry); + +err_unregister_subdev: + if (registered) + v4l2_device_unregister_subdev(sd); + + return ret; +} + +/* Test all async sub-devices in a notifier for a match. */ +static int +v4l2_async_nf_try_all_subdevs(struct v4l2_async_notifier *notifier) +{ + struct v4l2_device *v4l2_dev = + v4l2_async_nf_find_v4l2_dev(notifier); + struct v4l2_subdev *sd; + + if (!v4l2_dev) + return 0; + + dev_dbg(notifier_dev(notifier), "v4l2-async: trying all sub-devices\n"); + +again: + list_for_each_entry(sd, &subdev_list, async_list) { + struct v4l2_async_connection *asc; + int ret; + + asc = v4l2_async_find_match(notifier, sd); + if (!asc) + continue; + + dev_dbg(notifier_dev(notifier), + "v4l2-async: match found, subdev %s\n", sd->name); + + ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asc); + if (ret < 0) + return ret; + + /* + * v4l2_async_match_notify() may lead to registering a + * new notifier and thus changing the async subdevs + * list. In order to proceed safely from here, restart + * parsing the list from the beginning. + */ + goto again; + } + + return 0; +} + +static void v4l2_async_unbind_subdev_one(struct v4l2_async_notifier *notifier, + struct v4l2_async_connection *asc) +{ + list_move_tail(&asc->asc_entry, ¬ifier->waiting_list); + if (list_is_singular(&asc->asc_subdev_entry)) { + v4l2_async_nf_call_unbind(notifier, asc->sd, asc); + v4l2_device_unregister_subdev(asc->sd); + asc->sd = NULL; + } + list_del(&asc->asc_subdev_entry); +} + +/* Unbind all sub-devices in the notifier tree. */ +static void +v4l2_async_nf_unbind_all_subdevs(struct v4l2_async_notifier *notifier) +{ + struct v4l2_async_connection *asc, *asc_tmp; + + list_for_each_entry_safe(asc, asc_tmp, ¬ifier->done_list, + asc_entry) { + struct v4l2_async_notifier *subdev_notifier = + v4l2_async_find_subdev_notifier(asc->sd); + + if (subdev_notifier) + v4l2_async_nf_unbind_all_subdevs(subdev_notifier); + + v4l2_async_unbind_subdev_one(notifier, asc); + } + + notifier->parent = NULL; +} + +/* See if an async sub-device can be found in a notifier's lists. */ +static bool +v4l2_async_nf_has_async_match_entry(struct v4l2_async_notifier *notifier, + struct v4l2_async_match_desc *match) +{ + struct v4l2_async_connection *asc; + + list_for_each_entry(asc, ¬ifier->waiting_list, asc_entry) + if (v4l2_async_match_equal(&asc->match, match)) + return true; + + list_for_each_entry(asc, ¬ifier->done_list, asc_entry) + if (v4l2_async_match_equal(&asc->match, match)) + return true; + + return false; +} + +/* + * Find out whether an async sub-device was set up already or whether it exists + * in a given notifier. + */ +static bool +v4l2_async_nf_has_async_match(struct v4l2_async_notifier *notifier, + struct v4l2_async_match_desc *match) +{ + struct list_head *heads[] = { + ¬ifier->waiting_list, + ¬ifier->done_list, + }; + unsigned int i; + + lockdep_assert_held(&list_lock); + + /* Check that an asd is not being added more than once. */ + for (i = 0; i < ARRAY_SIZE(heads); i++) { + struct v4l2_async_connection *asc; + + list_for_each_entry(asc, heads[i], asc_entry) { + if (&asc->match == match) + continue; + if (v4l2_async_match_equal(&asc->match, match)) + return true; + } + } + + /* Check that an asc does not exist in other notifiers. */ + list_for_each_entry(notifier, ¬ifier_list, notifier_entry) + if (v4l2_async_nf_has_async_match_entry(notifier, match)) + return true; + + return false; +} + +static int v4l2_async_nf_match_valid(struct v4l2_async_notifier *notifier, + struct v4l2_async_match_desc *match) +{ + struct device *dev = notifier_dev(notifier); + + switch (match->type) { + case V4L2_ASYNC_MATCH_TYPE_I2C: + case V4L2_ASYNC_MATCH_TYPE_FWNODE: + if (v4l2_async_nf_has_async_match(notifier, match)) { + dev_dbg(dev, "v4l2-async: match descriptor already listed in a notifier\n"); + return -EEXIST; + } + break; + default: + dev_err(dev, "v4l2-async: Invalid match type %u on %p\n", + match->type, match); + return -EINVAL; + } + + return 0; +} + +void v4l2_async_nf_init(struct v4l2_async_notifier *notifier, + struct v4l2_device *v4l2_dev) +{ + INIT_LIST_HEAD(¬ifier->waiting_list); + INIT_LIST_HEAD(¬ifier->done_list); + INIT_LIST_HEAD(¬ifier->notifier_entry); + notifier->v4l2_dev = v4l2_dev; +} +EXPORT_SYMBOL(v4l2_async_nf_init); + +void v4l2_async_subdev_nf_init(struct v4l2_async_notifier *notifier, + struct v4l2_subdev *sd) +{ + INIT_LIST_HEAD(¬ifier->waiting_list); + INIT_LIST_HEAD(¬ifier->done_list); + INIT_LIST_HEAD(¬ifier->notifier_entry); + notifier->sd = sd; +} +EXPORT_SYMBOL_GPL(v4l2_async_subdev_nf_init); + +static int __v4l2_async_nf_register(struct v4l2_async_notifier *notifier) +{ + struct v4l2_async_connection *asc; + int ret; + + mutex_lock(&list_lock); + + list_for_each_entry(asc, ¬ifier->waiting_list, asc_entry) { + ret = v4l2_async_nf_match_valid(notifier, &asc->match); + if (ret) + goto err_unlock; + } + + ret = v4l2_async_nf_try_all_subdevs(notifier); + if (ret < 0) + goto err_unbind; + + ret = v4l2_async_nf_try_complete(notifier); + if (ret < 0) + goto err_unbind; + + /* Keep also completed notifiers on the list */ + list_add(¬ifier->notifier_entry, ¬ifier_list); + + mutex_unlock(&list_lock); + + return 0; + +err_unbind: + /* + * On failure, unbind all sub-devices registered through this notifier. + */ + v4l2_async_nf_unbind_all_subdevs(notifier); + +err_unlock: + mutex_unlock(&list_lock); + + return ret; +} + +int v4l2_async_nf_register(struct v4l2_async_notifier *notifier) +{ + if (WARN_ON(!notifier->v4l2_dev == !notifier->sd)) + return -EINVAL; + + return __v4l2_async_nf_register(notifier); +} +EXPORT_SYMBOL(v4l2_async_nf_register); + +static void +__v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier) +{ + if (!notifier || (!notifier->v4l2_dev && !notifier->sd)) + return; + + v4l2_async_nf_unbind_all_subdevs(notifier); + + list_del_init(¬ifier->notifier_entry); +} + +void v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier) +{ + mutex_lock(&list_lock); + + __v4l2_async_nf_unregister(notifier); + + mutex_unlock(&list_lock); +} +EXPORT_SYMBOL(v4l2_async_nf_unregister); + +static void __v4l2_async_nf_cleanup(struct v4l2_async_notifier *notifier) +{ + struct v4l2_async_connection *asc, *tmp; + + if (!notifier || !notifier->waiting_list.next) + return; + + WARN_ON(!list_empty(¬ifier->done_list)); + + list_for_each_entry_safe(asc, tmp, ¬ifier->waiting_list, asc_entry) { + list_del(&asc->asc_entry); + v4l2_async_nf_call_destroy(notifier, asc); + + if (asc->match.type == V4L2_ASYNC_MATCH_TYPE_FWNODE) + fwnode_handle_put(asc->match.fwnode); + + kfree(asc); + } + + notifier->sd = NULL; + notifier->v4l2_dev = NULL; +} + +void v4l2_async_nf_cleanup(struct v4l2_async_notifier *notifier) +{ + mutex_lock(&list_lock); + + __v4l2_async_nf_cleanup(notifier); + + mutex_unlock(&list_lock); +} +EXPORT_SYMBOL_GPL(v4l2_async_nf_cleanup); + +static void __v4l2_async_nf_add_connection(struct v4l2_async_notifier *notifier, + struct v4l2_async_connection *asc) +{ + mutex_lock(&list_lock); + + list_add_tail(&asc->asc_entry, ¬ifier->waiting_list); + + mutex_unlock(&list_lock); +} + +struct v4l2_async_connection * +__v4l2_async_nf_add_fwnode(struct v4l2_async_notifier *notifier, + struct fwnode_handle *fwnode, + unsigned int asc_struct_size) +{ + struct v4l2_async_connection *asc; + + asc = kzalloc(asc_struct_size, GFP_KERNEL); + if (!asc) + return ERR_PTR(-ENOMEM); + + asc->notifier = notifier; + asc->match.type = V4L2_ASYNC_MATCH_TYPE_FWNODE; + asc->match.fwnode = fwnode_handle_get(fwnode); + + __v4l2_async_nf_add_connection(notifier, asc); + + return asc; +} +EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_fwnode); + +struct v4l2_async_connection * +__v4l2_async_nf_add_fwnode_remote(struct v4l2_async_notifier *notif, + struct fwnode_handle *endpoint, + unsigned int asc_struct_size) +{ + struct v4l2_async_connection *asc; + struct fwnode_handle *remote; + + remote = fwnode_graph_get_remote_endpoint(endpoint); + if (!remote) + return ERR_PTR(-ENOTCONN); + + asc = __v4l2_async_nf_add_fwnode(notif, remote, asc_struct_size); + /* + * Calling __v4l2_async_nf_add_fwnode grabs a refcount, + * so drop the one we got in fwnode_graph_get_remote_port_parent. + */ + fwnode_handle_put(remote); + return asc; +} +EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_fwnode_remote); + +struct v4l2_async_connection * +__v4l2_async_nf_add_i2c(struct v4l2_async_notifier *notifier, int adapter_id, + unsigned short address, unsigned int asc_struct_size) +{ + struct v4l2_async_connection *asc; + + asc = kzalloc(asc_struct_size, GFP_KERNEL); + if (!asc) + return ERR_PTR(-ENOMEM); + + asc->notifier = notifier; + asc->match.type = V4L2_ASYNC_MATCH_TYPE_I2C; + asc->match.i2c.adapter_id = adapter_id; + asc->match.i2c.address = address; + + __v4l2_async_nf_add_connection(notifier, asc); + + return asc; +} +EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_i2c); + +int v4l2_async_subdev_endpoint_add(struct v4l2_subdev *sd, + struct fwnode_handle *fwnode) +{ + struct v4l2_async_subdev_endpoint *ase; + + ase = kmalloc(sizeof(*ase), GFP_KERNEL); + if (!ase) + return -ENOMEM; + + ase->endpoint = fwnode; + list_add(&ase->async_subdev_endpoint_entry, + &sd->async_subdev_endpoint_list); + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_async_subdev_endpoint_add); + +struct v4l2_async_connection * +v4l2_async_connection_unique(struct v4l2_subdev *sd) +{ + if (!list_is_singular(&sd->asc_list)) + return NULL; + + return list_first_entry(&sd->asc_list, + struct v4l2_async_connection, asc_subdev_entry); +} +EXPORT_SYMBOL_GPL(v4l2_async_connection_unique); + +int __v4l2_async_register_subdev(struct v4l2_subdev *sd, struct module *module) +{ + struct v4l2_async_notifier *subdev_notifier; + struct v4l2_async_notifier *notifier; + struct v4l2_async_connection *asc; + int ret; + + INIT_LIST_HEAD(&sd->asc_list); + + /* + * No reference taken. The reference is held by the device (struct + * v4l2_subdev.dev), and async sub-device does not exist independently + * of the device at any point of time. + * + * The async sub-device shall always be registered for its device node, + * not the endpoint node. + */ + if (!sd->fwnode && sd->dev) { + sd->fwnode = dev_fwnode(sd->dev); + } else if (fwnode_graph_is_endpoint(sd->fwnode)) { + dev_warn(sd->dev, "sub-device fwnode is an endpoint!\n"); + return -EINVAL; + } + + sd->owner = module; + + mutex_lock(&list_lock); + + list_for_each_entry(notifier, ¬ifier_list, notifier_entry) { + struct v4l2_device *v4l2_dev = + v4l2_async_nf_find_v4l2_dev(notifier); + + if (!v4l2_dev) + continue; + + while ((asc = v4l2_async_find_match(notifier, sd))) { + ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, + asc); + if (ret) + goto err_unbind; + + ret = v4l2_async_nf_try_complete(notifier); + if (ret) + goto err_unbind; + } + } + + /* None matched, wait for hot-plugging */ + list_add(&sd->async_list, &subdev_list); + + mutex_unlock(&list_lock); + + return 0; + +err_unbind: + /* + * Complete failed. Unbind the sub-devices bound through registering + * this async sub-device. + */ + subdev_notifier = v4l2_async_find_subdev_notifier(sd); + if (subdev_notifier) + v4l2_async_nf_unbind_all_subdevs(subdev_notifier); + + if (asc) + v4l2_async_unbind_subdev_one(notifier, asc); + + mutex_unlock(&list_lock); + + sd->owner = NULL; + + return ret; +} +EXPORT_SYMBOL(__v4l2_async_register_subdev); + +void v4l2_async_unregister_subdev(struct v4l2_subdev *sd) +{ + struct v4l2_async_connection *asc, *asc_tmp; + + if (!sd->async_list.next) + return; + + v4l2_subdev_put_privacy_led(sd); + + mutex_lock(&list_lock); + + __v4l2_async_nf_unregister(sd->subdev_notifier); + __v4l2_async_nf_cleanup(sd->subdev_notifier); + kfree(sd->subdev_notifier); + sd->subdev_notifier = NULL; + + if (sd->asc_list.next) { + list_for_each_entry_safe(asc, asc_tmp, &sd->asc_list, + asc_subdev_entry) { + v4l2_async_unbind_subdev_one(asc->notifier, asc); + } + } + + list_del(&sd->async_list); + sd->async_list.next = NULL; + + mutex_unlock(&list_lock); +} +EXPORT_SYMBOL(v4l2_async_unregister_subdev); + +static void print_waiting_match(struct seq_file *s, + struct v4l2_async_match_desc *match) +{ + switch (match->type) { + case V4L2_ASYNC_MATCH_TYPE_I2C: + seq_printf(s, " [i2c] dev=%d-%04x\n", match->i2c.adapter_id, + match->i2c.address); + break; + case V4L2_ASYNC_MATCH_TYPE_FWNODE: { + struct fwnode_handle *devnode, *fwnode = match->fwnode; + + devnode = fwnode_graph_is_endpoint(fwnode) ? + fwnode_graph_get_port_parent(fwnode) : + fwnode_handle_get(fwnode); + + seq_printf(s, " [fwnode] dev=%s, node=%pfw\n", + devnode->dev ? dev_name(devnode->dev) : "nil", + fwnode); + + fwnode_handle_put(devnode); + break; + } + } +} + +static const char * +v4l2_async_nf_name(struct v4l2_async_notifier *notifier) +{ + if (notifier->v4l2_dev) + return notifier->v4l2_dev->name; + else if (notifier->sd) + return notifier->sd->name; + else + return "nil"; +} + +static int pending_subdevs_show(struct seq_file *s, void *data) +{ + struct v4l2_async_notifier *notif; + struct v4l2_async_connection *asc; + + mutex_lock(&list_lock); + + list_for_each_entry(notif, ¬ifier_list, notifier_entry) { + seq_printf(s, "%s:\n", v4l2_async_nf_name(notif)); + list_for_each_entry(asc, ¬if->waiting_list, asc_entry) + print_waiting_match(s, &asc->match); + } + + mutex_unlock(&list_lock); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(pending_subdevs); + +static struct dentry *v4l2_async_debugfs_dir; + +static int __init v4l2_async_init(void) +{ + v4l2_async_debugfs_dir = debugfs_create_dir("v4l2-async", NULL); + debugfs_create_file("pending_async_subdevices", 0444, + v4l2_async_debugfs_dir, NULL, + &pending_subdevs_fops); + + return 0; +} + +static void __exit v4l2_async_exit(void) +{ + debugfs_remove_recursive(v4l2_async_debugfs_dir); +} + +subsys_initcall(v4l2_async_init); +module_exit(v4l2_async_exit); + +MODULE_AUTHOR("Guennadi Liakhovetski "); +MODULE_AUTHOR("Sakari Ailus "); +MODULE_AUTHOR("Ezequiel Garcia "); +MODULE_DESCRIPTION("V4L2 asynchronous subdevice registration API"); +MODULE_LICENSE("GPL"); diff --git a/6.17.0/drivers/media/v4l2-core/v4l2-cci.c b/6.17.0/drivers/media/v4l2-core/v4l2-cci.c new file mode 100644 index 0000000..e9ecf47 --- /dev/null +++ b/6.17.0/drivers/media/v4l2-core/v4l2-cci.c @@ -0,0 +1,203 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * MIPI Camera Control Interface (CCI) register access helpers. + * + * Copyright (C) 2023 Hans de Goede + */ + +#include +#include +#include +#include +#include +#include + +#include + +#include + +int cci_read(struct regmap *map, u32 reg, u64 *val, int *err) +{ + bool little_endian; + unsigned int len; + u8 buf[8]; + int ret; + + /* + * TODO: Fix smatch. Assign *val to 0 here in order to avoid + * failing a smatch check on caller when the caller proceeds to + * read *val without initialising it on caller's side. *val is set + * to a valid value whenever this function returns 0 but smatch + * can't figure that out currently. + */ + *val = 0; + + if (err && *err) + return *err; + + little_endian = reg & CCI_REG_LE; + len = CCI_REG_WIDTH_BYTES(reg); + reg = CCI_REG_ADDR(reg); + + ret = regmap_bulk_read(map, reg, buf, len); + if (ret) { + dev_err(regmap_get_device(map), "Error reading reg 0x%04x: %d\n", + reg, ret); + goto out; + } + + switch (len) { + case 1: + *val = buf[0]; + break; + case 2: + if (little_endian) + *val = get_unaligned_le16(buf); + else + *val = get_unaligned_be16(buf); + break; + case 3: + if (little_endian) + *val = get_unaligned_le24(buf); + else + *val = get_unaligned_be24(buf); + break; + case 4: + if (little_endian) + *val = get_unaligned_le32(buf); + else + *val = get_unaligned_be32(buf); + break; + case 8: + if (little_endian) + *val = get_unaligned_le64(buf); + else + *val = get_unaligned_be64(buf); + break; + default: + dev_err(regmap_get_device(map), "Error invalid reg-width %u for reg 0x%04x\n", + len, reg); + ret = -EINVAL; + break; + } + +out: + if (ret && err) + *err = ret; + + return ret; +} +EXPORT_SYMBOL_GPL(cci_read); + +int cci_write(struct regmap *map, u32 reg, u64 val, int *err) +{ + bool little_endian; + unsigned int len; + u8 buf[8]; + int ret; + + if (err && *err) + return *err; + + little_endian = reg & CCI_REG_LE; + len = CCI_REG_WIDTH_BYTES(reg); + reg = CCI_REG_ADDR(reg); + + switch (len) { + case 1: + buf[0] = val; + break; + case 2: + if (little_endian) + put_unaligned_le16(val, buf); + else + put_unaligned_be16(val, buf); + break; + case 3: + if (little_endian) + put_unaligned_le24(val, buf); + else + put_unaligned_be24(val, buf); + break; + case 4: + if (little_endian) + put_unaligned_le32(val, buf); + else + put_unaligned_be32(val, buf); + break; + case 8: + if (little_endian) + put_unaligned_le64(val, buf); + else + put_unaligned_be64(val, buf); + break; + default: + dev_err(regmap_get_device(map), "Error invalid reg-width %u for reg 0x%04x\n", + len, reg); + ret = -EINVAL; + goto out; + } + + ret = regmap_bulk_write(map, reg, buf, len); + if (ret) + dev_err(regmap_get_device(map), "Error writing reg 0x%04x: %d\n", + reg, ret); + +out: + if (ret && err) + *err = ret; + + return ret; +} +EXPORT_SYMBOL_GPL(cci_write); + +int cci_update_bits(struct regmap *map, u32 reg, u64 mask, u64 val, int *err) +{ + u64 readval; + int ret; + + ret = cci_read(map, reg, &readval, err); + if (ret) + return ret; + + val = (readval & ~mask) | (val & mask); + + return cci_write(map, reg, val, err); +} +EXPORT_SYMBOL_GPL(cci_update_bits); + +int cci_multi_reg_write(struct regmap *map, const struct cci_reg_sequence *regs, + unsigned int num_regs, int *err) +{ + unsigned int i; + int ret; + + for (i = 0; i < num_regs; i++) { + ret = cci_write(map, regs[i].reg, regs[i].val, err); + if (ret) + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(cci_multi_reg_write); + +#if IS_ENABLED(CONFIG_V4L2_CCI_I2C) +struct regmap *devm_cci_regmap_init_i2c(struct i2c_client *client, + int reg_addr_bits) +{ + struct regmap_config config = { + .reg_bits = reg_addr_bits, + .val_bits = 8, + .reg_format_endian = REGMAP_ENDIAN_BIG, + .disable_locking = true, + }; + + return devm_regmap_init_i2c(client, &config); +} +EXPORT_SYMBOL_GPL(devm_cci_regmap_init_i2c); +#endif + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Hans de Goede "); +MODULE_DESCRIPTION("MIPI Camera Control Interface (CCI) support"); diff --git a/6.17.0/drivers/media/v4l2-core/v4l2-common.c b/6.17.0/drivers/media/v4l2-core/v4l2-common.c new file mode 100644 index 0000000..6e585bc --- /dev/null +++ b/6.17.0/drivers/media/v4l2-core/v4l2-common.c @@ -0,0 +1,707 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Video for Linux Two + * + * A generic video device interface for the LINUX operating system + * using a set of device structures/vectors for low level operations. + * + * This file replaces the videodev.c file that comes with the + * regular kernel distribution. + * + * Author: Bill Dirks + * based on code by Alan Cox, + */ + +/* + * Video capture interface for Linux + * + * A generic video device interface for the LINUX operating system + * using a set of device structures/vectors for low level operations. + * + * Author: Alan Cox, + * + * Fixes: + */ + +/* + * Video4linux 1/2 integration by Justin Schoeman + * + * 2.4 PROCFS support ported from 2.4 kernels by + * Iñaki García Etxebarria + * Makefile fix by "W. Michael Petullo" + * 2.4 devfs support ported from 2.4 kernels by + * Dan Merillat + * Added Gerd Knorrs v4l1 enhancements (Justin Schoeman) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +/* + * + * V 4 L 2 D R I V E R H E L P E R A P I + * + */ + +/* + * Video Standard Operations (contributed by Michael Schimek) + */ + +/* Helper functions for control handling */ + +/* Fill in a struct v4l2_queryctrl */ +int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 _min, s32 _max, s32 _step, s32 _def) +{ + const char *name; + s64 min = _min; + s64 max = _max; + u64 step = _step; + s64 def = _def; + + v4l2_ctrl_fill(qctrl->id, &name, &qctrl->type, + &min, &max, &step, &def, &qctrl->flags); + + if (name == NULL) + return -EINVAL; + + qctrl->minimum = min; + qctrl->maximum = max; + qctrl->step = step; + qctrl->default_value = def; + qctrl->reserved[0] = qctrl->reserved[1] = 0; + strscpy(qctrl->name, name, sizeof(qctrl->name)); + return 0; +} +EXPORT_SYMBOL(v4l2_ctrl_query_fill); + +/* Clamp x to be between min and max, aligned to a multiple of 2^align. min + * and max don't have to be aligned, but there must be at least one valid + * value. E.g., min=17,max=31,align=4 is not allowed as there are no multiples + * of 16 between 17 and 31. */ +static unsigned int clamp_align(unsigned int x, unsigned int min, + unsigned int max, unsigned int align) +{ + /* Bits that must be zero to be aligned */ + unsigned int mask = ~((1 << align) - 1); + + /* Clamp to aligned min and max */ + x = clamp(x, (min + ~mask) & mask, max & mask); + + /* Round to nearest aligned value */ + if (align) + x = (x + (1 << (align - 1))) & mask; + + return x; +} + +static unsigned int clamp_roundup(unsigned int x, unsigned int min, + unsigned int max, unsigned int alignment) +{ + x = clamp(x, min, max); + if (alignment) + x = round_up(x, alignment); + + return x; +} + +void v4l_bound_align_image(u32 *w, unsigned int wmin, unsigned int wmax, + unsigned int walign, + u32 *h, unsigned int hmin, unsigned int hmax, + unsigned int halign, unsigned int salign) +{ + *w = clamp_align(*w, wmin, wmax, walign); + *h = clamp_align(*h, hmin, hmax, halign); + + /* Usually we don't need to align the size and are done now. */ + if (!salign) + return; + + /* How much alignment do we have? */ + walign = __ffs(*w); + halign = __ffs(*h); + /* Enough to satisfy the image alignment? */ + if (walign + halign < salign) { + /* Max walign where there is still a valid width */ + unsigned int wmaxa = __fls(wmax ^ (wmin - 1)); + /* Max halign where there is still a valid height */ + unsigned int hmaxa = __fls(hmax ^ (hmin - 1)); + + /* up the smaller alignment until we have enough */ + do { + if (halign >= hmaxa || + (walign <= halign && walign < wmaxa)) { + *w = clamp_align(*w, wmin, wmax, walign + 1); + walign = __ffs(*w); + } else { + *h = clamp_align(*h, hmin, hmax, halign + 1); + halign = __ffs(*h); + } + } while (halign + walign < salign); + } +} +EXPORT_SYMBOL_GPL(v4l_bound_align_image); + +const void * +__v4l2_find_nearest_size_conditional(const void *array, size_t array_size, + size_t entry_size, size_t width_offset, + size_t height_offset, s32 width, + s32 height, + bool (*func)(const void *array, + size_t index, + const void *context), + const void *context) +{ + u32 error, min_error = U32_MAX; + const void *best = NULL; + size_t i; + + if (!array) + return NULL; + + for (i = 0; i < array_size; i++, array += entry_size) { + const u32 *entry_width = array + width_offset; + const u32 *entry_height = array + height_offset; + + if (func && !func(array, i, context)) + continue; + + error = abs(*entry_width - width) + abs(*entry_height - height); + if (error > min_error) + continue; + + min_error = error; + best = array; + if (!error) + break; + } + + return best; +} +EXPORT_SYMBOL_GPL(__v4l2_find_nearest_size_conditional); + +int v4l2_g_parm_cap(struct video_device *vdev, + struct v4l2_subdev *sd, struct v4l2_streamparm *a) +{ + struct v4l2_subdev_frame_interval ival = { 0 }; + int ret; + + if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE && + a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) + return -EINVAL; + + if (vdev->device_caps & V4L2_CAP_READWRITE) + a->parm.capture.readbuffers = 2; + if (v4l2_subdev_has_op(sd, pad, get_frame_interval)) + a->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; + ret = v4l2_subdev_call_state_active(sd, pad, get_frame_interval, &ival); + if (!ret) + a->parm.capture.timeperframe = ival.interval; + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_g_parm_cap); + +int v4l2_s_parm_cap(struct video_device *vdev, + struct v4l2_subdev *sd, struct v4l2_streamparm *a) +{ + struct v4l2_subdev_frame_interval ival = { + .interval = a->parm.capture.timeperframe + }; + int ret; + + if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE && + a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) + return -EINVAL; + + memset(&a->parm, 0, sizeof(a->parm)); + if (vdev->device_caps & V4L2_CAP_READWRITE) + a->parm.capture.readbuffers = 2; + else + a->parm.capture.readbuffers = 0; + + if (v4l2_subdev_has_op(sd, pad, get_frame_interval)) + a->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; + ret = v4l2_subdev_call_state_active(sd, pad, set_frame_interval, &ival); + if (!ret) + a->parm.capture.timeperframe = ival.interval; + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_s_parm_cap); + +const struct v4l2_format_info *v4l2_format_info(u32 format) +{ + static const struct v4l2_format_info formats[] = { + /* RGB formats */ + { .format = V4L2_PIX_FMT_BGR24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_RGB24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_HSV24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_BGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_XBGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_BGRX32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_RGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_XRGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_RGBX32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_HSV32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_ARGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_RGBA32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_ABGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_BGRA32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_RGB565, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_RGB565X, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_RGB555, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_BGR666, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_BGR48_12, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 6, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_BGR48, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 6, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_RGB48, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 6, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_ABGR64_12, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 8, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_RGBA1010102, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_RGBX1010102, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_ARGB2101010, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + + /* YUV packed formats */ + { .format = V4L2_PIX_FMT_YUYV, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_YVYU, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_UYVY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_VYUY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_Y210, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_Y212, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_Y216, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_YUV48_12, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 6, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_MT2110T, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 5, 10, 0, 0 }, .bpp_div = { 4, 4, 1, 1 }, .hdiv = 2, .vdiv = 2, + .block_w = { 16, 8, 0, 0 }, .block_h = { 32, 16, 0, 0 }}, + { .format = V4L2_PIX_FMT_MT2110R, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 5, 10, 0, 0 }, .bpp_div = { 4, 4, 1, 1 }, .hdiv = 2, .vdiv = 2, + .block_w = { 16, 8, 0, 0 }, .block_h = { 32, 16, 0, 0 }}, + + /* YUV planar formats */ + { .format = V4L2_PIX_FMT_NV12, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 }, + { .format = V4L2_PIX_FMT_NV21, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 }, + { .format = V4L2_PIX_FMT_NV15, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 5, 10, 0, 0 }, .bpp_div = { 4, 4, 1, 1 }, .hdiv = 2, .vdiv = 2 }, + { .format = V4L2_PIX_FMT_NV16, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_NV61, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_NV20, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 5, 10, 0, 0 }, .bpp_div = { 4, 4, 1, 1 }, .hdiv = 2, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_NV24, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_NV42, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_P010, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 2, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_P012, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 2, 4, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 }, + + { .format = V4L2_PIX_FMT_YUV410, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 4, .vdiv = 4 }, + { .format = V4L2_PIX_FMT_YVU410, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 4, .vdiv = 4 }, + { .format = V4L2_PIX_FMT_YUV411P, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 4, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_YUV420, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 }, + { .format = V4L2_PIX_FMT_YVU420, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 }, + { .format = V4L2_PIX_FMT_YUV422P, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_GREY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + + /* Tiled YUV formats */ + { .format = V4L2_PIX_FMT_NV12_4L4, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 }, + { .format = V4L2_PIX_FMT_NV15_4L4, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 5, 10, 0, 0 }, .bpp_div = { 4, 4, 1, 1 }, .hdiv = 2, .vdiv = 2, + .block_w = { 4, 2, 0, 0 }, .block_h = { 1, 1, 0, 0 }}, + { .format = V4L2_PIX_FMT_P010_4L4, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 2, 4, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 }, + + /* YUV planar formats, non contiguous variant */ + { .format = V4L2_PIX_FMT_YUV420M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 }, + { .format = V4L2_PIX_FMT_YVU420M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 }, + { .format = V4L2_PIX_FMT_YUV422M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_YVU422M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_YUV444M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_YVU444M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + + { .format = V4L2_PIX_FMT_NV12M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 }, + { .format = V4L2_PIX_FMT_NV21M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 }, + { .format = V4L2_PIX_FMT_NV16M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_NV61M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_P012M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 2, 4, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 }, + + /* Tiled YUV formats, non contiguous variant */ + { .format = V4L2_PIX_FMT_NV12MT, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2, + .block_w = { 64, 32, 0, 0 }, .block_h = { 32, 16, 0, 0 }}, + { .format = V4L2_PIX_FMT_NV12MT_16X16, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2, + .block_w = { 16, 8, 0, 0 }, .block_h = { 16, 8, 0, 0 }}, + + /* Bayer RGB formats */ + { .format = V4L2_PIX_FMT_SBGGR8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SGBRG8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SGRBG8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SRGGB8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SBGGR10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SGBRG10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SGRBG10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SRGGB10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SBGGR10P, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 5, 0, 0, 0 }, .bpp_div = { 4, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SGBRG10P, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 5, 0, 0, 0 }, .bpp_div = { 4, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SGRBG10P, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 5, 0, 0, 0 }, .bpp_div = { 4, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SRGGB10P, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 5, 0, 0, 0 }, .bpp_div = { 4, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SBGGR10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SGBRG10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SGRBG10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SRGGB10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SBGGR10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SGBRG10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SGRBG10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SRGGB10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SBGGR12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SGBRG12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SGRBG12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SRGGB12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SBGGR12P, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .bpp_div = { 2, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SGBRG12P, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .bpp_div = { 2, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SGRBG12P, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .bpp_div = { 2, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SRGGB12P, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .bpp_div = { 2, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SBGGR14, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SGBRG14, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SGRBG14, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SRGGB14, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SBGGR14P, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 7, 0, 0, 0 }, .bpp_div = { 4, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SGBRG14P, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 7, 0, 0, 0 }, .bpp_div = { 4, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SGRBG14P, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 7, 0, 0, 0 }, .bpp_div = { 4, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SRGGB14P, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 7, 0, 0, 0 }, .bpp_div = { 4, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SBGGR16, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SGBRG16, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SGRBG16, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SRGGB16, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + + /* Renesas Camera Data Receiver Unit formats, bayer order agnostic */ + { .format = V4L2_PIX_FMT_RAW_CRU10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 8, 0, 0, 0 }, .bpp_div = { 6, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_RAW_CRU12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 8, 0, 0, 0 }, .bpp_div = { 5, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_RAW_CRU14, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 8, 0, 0, 0 }, .bpp_div = { 4, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_RAW_CRU20, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 8, 0, 0, 0 }, .bpp_div = { 3, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + }; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(formats); ++i) + if (formats[i].format == format) + return &formats[i]; + return NULL; +} +EXPORT_SYMBOL(v4l2_format_info); + +static inline unsigned int v4l2_format_block_width(const struct v4l2_format_info *info, int plane) +{ + if (!info->block_w[plane]) + return 1; + return info->block_w[plane]; +} + +static inline unsigned int v4l2_format_block_height(const struct v4l2_format_info *info, int plane) +{ + if (!info->block_h[plane]) + return 1; + return info->block_h[plane]; +} + +static inline unsigned int v4l2_format_plane_stride(const struct v4l2_format_info *info, int plane, + unsigned int width) +{ + unsigned int hdiv = plane ? info->hdiv : 1; + unsigned int aligned_width = + ALIGN(width, v4l2_format_block_width(info, plane)); + + return DIV_ROUND_UP(aligned_width, hdiv) * + info->bpp[plane] / info->bpp_div[plane]; +} + +static inline unsigned int v4l2_format_plane_height(const struct v4l2_format_info *info, int plane, + unsigned int height) +{ + unsigned int vdiv = plane ? info->vdiv : 1; + unsigned int aligned_height = + ALIGN(height, v4l2_format_block_height(info, plane)); + + return DIV_ROUND_UP(aligned_height, vdiv); +} + +static inline unsigned int v4l2_format_plane_size(const struct v4l2_format_info *info, int plane, + unsigned int width, unsigned int height) +{ + return v4l2_format_plane_stride(info, plane, width) * + v4l2_format_plane_height(info, plane, height); +} + +void v4l2_apply_frmsize_constraints(u32 *width, u32 *height, + const struct v4l2_frmsize_stepwise *frmsize) +{ + if (!frmsize) + return; + + /* + * Clamp width/height to meet min/max constraints and round it up to + * macroblock alignment. + */ + *width = clamp_roundup(*width, frmsize->min_width, frmsize->max_width, + frmsize->step_width); + *height = clamp_roundup(*height, frmsize->min_height, frmsize->max_height, + frmsize->step_height); +} +EXPORT_SYMBOL_GPL(v4l2_apply_frmsize_constraints); + +int v4l2_fill_pixfmt_mp(struct v4l2_pix_format_mplane *pixfmt, + u32 pixelformat, u32 width, u32 height) +{ + const struct v4l2_format_info *info; + struct v4l2_plane_pix_format *plane; + int i; + + info = v4l2_format_info(pixelformat); + if (!info) + return -EINVAL; + + pixfmt->width = width; + pixfmt->height = height; + pixfmt->pixelformat = pixelformat; + pixfmt->num_planes = info->mem_planes; + + if (info->mem_planes == 1) { + plane = &pixfmt->plane_fmt[0]; + plane->bytesperline = v4l2_format_plane_stride(info, 0, width); + plane->sizeimage = 0; + + for (i = 0; i < info->comp_planes; i++) + plane->sizeimage += + v4l2_format_plane_size(info, i, width, height); + } else { + for (i = 0; i < info->comp_planes; i++) { + plane = &pixfmt->plane_fmt[i]; + plane->bytesperline = + v4l2_format_plane_stride(info, i, width); + plane->sizeimage = plane->bytesperline * + v4l2_format_plane_height(info, i, height); + } + } + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_fill_pixfmt_mp); + +int v4l2_fill_pixfmt(struct v4l2_pix_format *pixfmt, u32 pixelformat, + u32 width, u32 height) +{ + const struct v4l2_format_info *info; + int i; + + info = v4l2_format_info(pixelformat); + if (!info) + return -EINVAL; + + /* Single planar API cannot be used for multi plane formats. */ + if (info->mem_planes > 1) + return -EINVAL; + + pixfmt->width = width; + pixfmt->height = height; + pixfmt->pixelformat = pixelformat; + pixfmt->bytesperline = v4l2_format_plane_stride(info, 0, width); + pixfmt->sizeimage = 0; + + for (i = 0; i < info->comp_planes; i++) + pixfmt->sizeimage += + v4l2_format_plane_size(info, i, width, height); + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_fill_pixfmt); + +s64 __v4l2_get_link_freq_ctrl(struct v4l2_ctrl_handler *handler, + unsigned int mul, unsigned int div) +{ + struct v4l2_ctrl *ctrl; + s64 freq; + + ctrl = v4l2_ctrl_find(handler, V4L2_CID_LINK_FREQ); + if (ctrl) { + struct v4l2_querymenu qm = { .id = V4L2_CID_LINK_FREQ }; + int ret; + + qm.index = v4l2_ctrl_g_ctrl(ctrl); + + ret = v4l2_querymenu(handler, &qm); + if (ret) + return -ENOENT; + + freq = qm.value; + } else { + if (!mul || !div) + return -ENOENT; + + ctrl = v4l2_ctrl_find(handler, V4L2_CID_PIXEL_RATE); + if (!ctrl) + return -ENOENT; + + freq = div_u64(v4l2_ctrl_g_ctrl_int64(ctrl) * mul, div); + + pr_warn_once("%s: Link frequency estimated using pixel rate: result might be inaccurate\n", + __func__); + pr_warn_once("%s: Consider implementing support for V4L2_CID_LINK_FREQ in the transmitter driver\n", + __func__); + } + + return freq > 0 ? freq : -EINVAL; +} +EXPORT_SYMBOL_GPL(__v4l2_get_link_freq_ctrl); + +#ifdef CONFIG_MEDIA_CONTROLLER +s64 __v4l2_get_link_freq_pad(struct media_pad *pad, unsigned int mul, + unsigned int div) +{ + struct v4l2_mbus_config mbus_config = {}; + struct v4l2_subdev *sd; + int ret; + + sd = media_entity_to_v4l2_subdev(pad->entity); + ret = v4l2_subdev_call(sd, pad, get_mbus_config, pad->index, + &mbus_config); + if (ret < 0 && ret != -ENOIOCTLCMD) + return ret; + + if (mbus_config.link_freq) + return mbus_config.link_freq; + + /* + * Fall back to using the link frequency control if the media bus config + * doesn't provide a link frequency. + */ + return __v4l2_get_link_freq_ctrl(sd->ctrl_handler, mul, div); +} +EXPORT_SYMBOL_GPL(__v4l2_get_link_freq_pad); +#endif /* CONFIG_MEDIA_CONTROLLER */ + +/* + * Simplify a fraction using a simple continued fraction decomposition. The + * idea here is to convert fractions such as 333333/10000000 to 1/30 using + * 32 bit arithmetic only. The algorithm is not perfect and relies upon two + * arbitrary parameters to remove non-significative terms from the simple + * continued fraction decomposition. Using 8 and 333 for n_terms and threshold + * respectively seems to give nice results. + */ +void v4l2_simplify_fraction(u32 *numerator, u32 *denominator, + unsigned int n_terms, unsigned int threshold) +{ + u32 *an; + u32 x, y, r; + unsigned int i, n; + + an = kmalloc_array(n_terms, sizeof(*an), GFP_KERNEL); + if (an == NULL) + return; + + /* + * Convert the fraction to a simple continued fraction. See + * https://en.wikipedia.org/wiki/Continued_fraction + * Stop if the current term is bigger than or equal to the given + * threshold. + */ + x = *numerator; + y = *denominator; + + for (n = 0; n < n_terms && y != 0; ++n) { + an[n] = x / y; + if (an[n] >= threshold) { + if (n < 2) + n++; + break; + } + + r = x - an[n] * y; + x = y; + y = r; + } + + /* Expand the simple continued fraction back to an integer fraction. */ + x = 0; + y = 1; + + for (i = n; i > 0; --i) { + r = y; + y = an[i-1] * y + x; + x = r; + } + + *numerator = y; + *denominator = x; + kfree(an); +} +EXPORT_SYMBOL_GPL(v4l2_simplify_fraction); + +/* + * Convert a fraction to a frame interval in 100ns multiples. The idea here is + * to compute numerator / denominator * 10000000 using 32 bit fixed point + * arithmetic only. + */ +u32 v4l2_fraction_to_interval(u32 numerator, u32 denominator) +{ + u32 multiplier; + + /* Saturate the result if the operation would overflow. */ + if (denominator == 0 || + numerator/denominator >= ((u32)-1)/10000000) + return (u32)-1; + + /* + * Divide both the denominator and the multiplier by two until + * numerator * multiplier doesn't overflow. If anyone knows a better + * algorithm please let me know. + */ + multiplier = 10000000; + while (numerator > ((u32)-1)/multiplier) { + multiplier /= 2; + denominator /= 2; + } + + return denominator ? numerator * multiplier / denominator : 0; +} +EXPORT_SYMBOL_GPL(v4l2_fraction_to_interval); + +int v4l2_link_freq_to_bitmap(struct device *dev, const u64 *fw_link_freqs, + unsigned int num_of_fw_link_freqs, + const s64 *driver_link_freqs, + unsigned int num_of_driver_link_freqs, + unsigned long *bitmap) +{ + unsigned int i; + + *bitmap = 0; + + if (!num_of_fw_link_freqs) { + dev_err(dev, "no link frequencies in firmware\n"); + return -ENODATA; + } + + for (i = 0; i < num_of_fw_link_freqs; i++) { + unsigned int j; + + for (j = 0; j < num_of_driver_link_freqs; j++) { + if (fw_link_freqs[i] != driver_link_freqs[j]) + continue; + + dev_dbg(dev, "enabling link frequency %lld Hz\n", + driver_link_freqs[j]); + *bitmap |= BIT(j); + break; + } + } + + if (!*bitmap) { + dev_err(dev, "no matching link frequencies found\n"); + + dev_dbg(dev, "specified in firmware:\n"); + for (i = 0; i < num_of_fw_link_freqs; i++) + dev_dbg(dev, "\t%llu Hz\n", fw_link_freqs[i]); + + dev_dbg(dev, "driver supported:\n"); + for (i = 0; i < num_of_driver_link_freqs; i++) + dev_dbg(dev, "\t%lld Hz\n", driver_link_freqs[i]); + + return -ENOENT; + } + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_link_freq_to_bitmap); diff --git a/6.17.0/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/6.17.0/drivers/media/v4l2-core/v4l2-compat-ioctl32.c new file mode 100644 index 0000000..8c07400 --- /dev/null +++ b/6.17.0/drivers/media/v4l2-core/v4l2-compat-ioctl32.c @@ -0,0 +1,1210 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * ioctl32.c: Conversion between 32bit and 64bit native ioctls. + * Separated from fs stuff by Arnd Bergmann + * + * Copyright (C) 1997-2000 Jakub Jelinek (jakub@redhat.com) + * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be) + * Copyright (C) 2001,2002 Andi Kleen, SuSE Labs + * Copyright (C) 2003 Pavel Machek (pavel@ucw.cz) + * Copyright (C) 2005 Philippe De Muyter (phdm@macqel.be) + * Copyright (C) 2008 Hans Verkuil + * + * These routines maintain argument size conversion between 32bit and 64bit + * ioctls. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Per-ioctl data copy handlers. + * + * Those come in pairs, with a get_v4l2_foo() and a put_v4l2_foo() routine, + * where "v4l2_foo" is the name of the V4L2 struct. + * + * They basically get two __user pointers, one with a 32-bits struct that + * came from the userspace call and a 64-bits struct, also allocated as + * userspace, but filled internally by do_video_ioctl(). + * + * For ioctls that have pointers inside it, the functions will also + * receive an ancillary buffer with extra space, used to pass extra + * data to the routine. + */ + +struct v4l2_window32 { + struct v4l2_rect w; + __u32 field; /* enum v4l2_field */ + __u32 chromakey; + compat_caddr_t clips; /* always NULL */ + __u32 clipcount; /* always 0 */ + compat_caddr_t bitmap; /* always NULL */ + __u8 global_alpha; +}; + +static int get_v4l2_window32(struct v4l2_window *p64, + struct v4l2_window32 __user *p32) +{ + struct v4l2_window32 w32; + + if (copy_from_user(&w32, p32, sizeof(w32))) + return -EFAULT; + + *p64 = (struct v4l2_window) { + .w = w32.w, + .field = w32.field, + .chromakey = w32.chromakey, + .clips = NULL, + .clipcount = 0, + .bitmap = NULL, + .global_alpha = w32.global_alpha, + }; + + return 0; +} + +static int put_v4l2_window32(struct v4l2_window *p64, + struct v4l2_window32 __user *p32) +{ + struct v4l2_window32 w32; + + memset(&w32, 0, sizeof(w32)); + w32 = (struct v4l2_window32) { + .w = p64->w, + .field = p64->field, + .chromakey = p64->chromakey, + .clips = 0, + .clipcount = 0, + .bitmap = 0, + .global_alpha = p64->global_alpha, + }; + + if (copy_to_user(p32, &w32, sizeof(w32))) + return -EFAULT; + + return 0; +} + +struct v4l2_format32 { + __u32 type; /* enum v4l2_buf_type */ + union { + struct v4l2_pix_format pix; + struct v4l2_pix_format_mplane pix_mp; + struct v4l2_window32 win; + struct v4l2_vbi_format vbi; + struct v4l2_sliced_vbi_format sliced; + struct v4l2_sdr_format sdr; + struct v4l2_meta_format meta; + __u8 raw_data[200]; /* user-defined */ + } fmt; +}; + +/** + * struct v4l2_create_buffers32 - VIDIOC_CREATE_BUFS32 argument + * @index: on return, index of the first created buffer + * @count: entry: number of requested buffers, + * return: number of created buffers + * @memory: buffer memory type + * @format: frame format, for which buffers are requested + * @capabilities: capabilities of this buffer type. + * @flags: additional buffer management attributes (ignored unless the + * queue has V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS capability and + * configured for MMAP streaming I/O). + * @max_num_buffers: if V4L2_BUF_CAP_SUPPORTS_MAX_NUM_BUFFERS capability flag is set + * this field indicate the maximum possible number of buffers + * for this queue. + * @reserved: future extensions + */ +struct v4l2_create_buffers32 { + __u32 index; + __u32 count; + __u32 memory; /* enum v4l2_memory */ + struct v4l2_format32 format; + __u32 capabilities; + __u32 flags; + __u32 max_num_buffers; + __u32 reserved[5]; +}; + +static int get_v4l2_format32(struct v4l2_format *p64, + struct v4l2_format32 __user *p32) +{ + if (get_user(p64->type, &p32->type)) + return -EFAULT; + + switch (p64->type) { + case V4L2_BUF_TYPE_VIDEO_CAPTURE: + case V4L2_BUF_TYPE_VIDEO_OUTPUT: + return copy_from_user(&p64->fmt.pix, &p32->fmt.pix, + sizeof(p64->fmt.pix)) ? -EFAULT : 0; + case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: + case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: + return copy_from_user(&p64->fmt.pix_mp, &p32->fmt.pix_mp, + sizeof(p64->fmt.pix_mp)) ? -EFAULT : 0; + case V4L2_BUF_TYPE_VIDEO_OVERLAY: + case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: + return get_v4l2_window32(&p64->fmt.win, &p32->fmt.win); + case V4L2_BUF_TYPE_VBI_CAPTURE: + case V4L2_BUF_TYPE_VBI_OUTPUT: + return copy_from_user(&p64->fmt.vbi, &p32->fmt.vbi, + sizeof(p64->fmt.vbi)) ? -EFAULT : 0; + case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: + case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: + return copy_from_user(&p64->fmt.sliced, &p32->fmt.sliced, + sizeof(p64->fmt.sliced)) ? -EFAULT : 0; + case V4L2_BUF_TYPE_SDR_CAPTURE: + case V4L2_BUF_TYPE_SDR_OUTPUT: + return copy_from_user(&p64->fmt.sdr, &p32->fmt.sdr, + sizeof(p64->fmt.sdr)) ? -EFAULT : 0; + case V4L2_BUF_TYPE_META_CAPTURE: + case V4L2_BUF_TYPE_META_OUTPUT: + return copy_from_user(&p64->fmt.meta, &p32->fmt.meta, + sizeof(p64->fmt.meta)) ? -EFAULT : 0; + default: + return -EINVAL; + } +} + +static int get_v4l2_create32(struct v4l2_create_buffers *p64, + struct v4l2_create_buffers32 __user *p32) +{ + if (copy_from_user(p64, p32, + offsetof(struct v4l2_create_buffers32, format))) + return -EFAULT; + if (copy_from_user(&p64->flags, &p32->flags, sizeof(p32->flags))) + return -EFAULT; + if (copy_from_user(&p64->max_num_buffers, &p32->max_num_buffers, + sizeof(p32->max_num_buffers))) + return -EFAULT; + return get_v4l2_format32(&p64->format, &p32->format); +} + +static int put_v4l2_format32(struct v4l2_format *p64, + struct v4l2_format32 __user *p32) +{ + switch (p64->type) { + case V4L2_BUF_TYPE_VIDEO_CAPTURE: + case V4L2_BUF_TYPE_VIDEO_OUTPUT: + return copy_to_user(&p32->fmt.pix, &p64->fmt.pix, + sizeof(p64->fmt.pix)) ? -EFAULT : 0; + case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: + case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: + return copy_to_user(&p32->fmt.pix_mp, &p64->fmt.pix_mp, + sizeof(p64->fmt.pix_mp)) ? -EFAULT : 0; + case V4L2_BUF_TYPE_VIDEO_OVERLAY: + case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: + return put_v4l2_window32(&p64->fmt.win, &p32->fmt.win); + case V4L2_BUF_TYPE_VBI_CAPTURE: + case V4L2_BUF_TYPE_VBI_OUTPUT: + return copy_to_user(&p32->fmt.vbi, &p64->fmt.vbi, + sizeof(p64->fmt.vbi)) ? -EFAULT : 0; + case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: + case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: + return copy_to_user(&p32->fmt.sliced, &p64->fmt.sliced, + sizeof(p64->fmt.sliced)) ? -EFAULT : 0; + case V4L2_BUF_TYPE_SDR_CAPTURE: + case V4L2_BUF_TYPE_SDR_OUTPUT: + return copy_to_user(&p32->fmt.sdr, &p64->fmt.sdr, + sizeof(p64->fmt.sdr)) ? -EFAULT : 0; + case V4L2_BUF_TYPE_META_CAPTURE: + case V4L2_BUF_TYPE_META_OUTPUT: + return copy_to_user(&p32->fmt.meta, &p64->fmt.meta, + sizeof(p64->fmt.meta)) ? -EFAULT : 0; + default: + return -EINVAL; + } +} + +static int put_v4l2_create32(struct v4l2_create_buffers *p64, + struct v4l2_create_buffers32 __user *p32) +{ + if (copy_to_user(p32, p64, + offsetof(struct v4l2_create_buffers32, format)) || + put_user(p64->capabilities, &p32->capabilities) || + put_user(p64->flags, &p32->flags) || + put_user(p64->max_num_buffers, &p32->max_num_buffers) || + copy_to_user(p32->reserved, p64->reserved, sizeof(p64->reserved))) + return -EFAULT; + return put_v4l2_format32(&p64->format, &p32->format); +} + +struct v4l2_standard32 { + __u32 index; + compat_u64 id; + __u8 name[24]; + struct v4l2_fract frameperiod; /* Frames, not fields */ + __u32 framelines; + __u32 reserved[4]; +}; + +static int get_v4l2_standard32(struct v4l2_standard *p64, + struct v4l2_standard32 __user *p32) +{ + /* other fields are not set by the user, nor used by the driver */ + return get_user(p64->index, &p32->index); +} + +static int put_v4l2_standard32(struct v4l2_standard *p64, + struct v4l2_standard32 __user *p32) +{ + if (put_user(p64->index, &p32->index) || + put_user(p64->id, &p32->id) || + copy_to_user(p32->name, p64->name, sizeof(p32->name)) || + copy_to_user(&p32->frameperiod, &p64->frameperiod, + sizeof(p32->frameperiod)) || + put_user(p64->framelines, &p32->framelines) || + copy_to_user(p32->reserved, p64->reserved, sizeof(p32->reserved))) + return -EFAULT; + return 0; +} + +struct v4l2_plane32 { + __u32 bytesused; + __u32 length; + union { + __u32 mem_offset; + compat_long_t userptr; + __s32 fd; + } m; + __u32 data_offset; + __u32 reserved[11]; +}; + +/* + * This is correct for all architectures including i386, but not x32, + * which has different alignment requirements for timestamp + */ +struct v4l2_buffer32 { + __u32 index; + __u32 type; /* enum v4l2_buf_type */ + __u32 bytesused; + __u32 flags; + __u32 field; /* enum v4l2_field */ + struct { + compat_s64 tv_sec; + compat_s64 tv_usec; + } timestamp; + struct v4l2_timecode timecode; + __u32 sequence; + + /* memory location */ + __u32 memory; /* enum v4l2_memory */ + union { + __u32 offset; + compat_long_t userptr; + compat_caddr_t planes; + __s32 fd; + } m; + __u32 length; + __u32 reserved2; + __s32 request_fd; +}; + +#ifdef CONFIG_COMPAT_32BIT_TIME +struct v4l2_buffer32_time32 { + __u32 index; + __u32 type; /* enum v4l2_buf_type */ + __u32 bytesused; + __u32 flags; + __u32 field; /* enum v4l2_field */ + struct old_timeval32 timestamp; + struct v4l2_timecode timecode; + __u32 sequence; + + /* memory location */ + __u32 memory; /* enum v4l2_memory */ + union { + __u32 offset; + compat_long_t userptr; + compat_caddr_t planes; + __s32 fd; + } m; + __u32 length; + __u32 reserved2; + __s32 request_fd; +}; +#endif + +static int get_v4l2_plane32(struct v4l2_plane *p64, + struct v4l2_plane32 __user *p32, + enum v4l2_memory memory) +{ + struct v4l2_plane32 plane32; + typeof(p64->m) m = {}; + + if (copy_from_user(&plane32, p32, sizeof(plane32))) + return -EFAULT; + + switch (memory) { + case V4L2_MEMORY_MMAP: + case V4L2_MEMORY_OVERLAY: + m.mem_offset = plane32.m.mem_offset; + break; + case V4L2_MEMORY_USERPTR: + m.userptr = (unsigned long)compat_ptr(plane32.m.userptr); + break; + case V4L2_MEMORY_DMABUF: + m.fd = plane32.m.fd; + break; + } + + memset(p64, 0, sizeof(*p64)); + *p64 = (struct v4l2_plane) { + .bytesused = plane32.bytesused, + .length = plane32.length, + .m = m, + .data_offset = plane32.data_offset, + }; + + return 0; +} + +static int put_v4l2_plane32(struct v4l2_plane *p64, + struct v4l2_plane32 __user *p32, + enum v4l2_memory memory) +{ + struct v4l2_plane32 plane32; + + memset(&plane32, 0, sizeof(plane32)); + plane32 = (struct v4l2_plane32) { + .bytesused = p64->bytesused, + .length = p64->length, + .data_offset = p64->data_offset, + }; + + switch (memory) { + case V4L2_MEMORY_MMAP: + case V4L2_MEMORY_OVERLAY: + plane32.m.mem_offset = p64->m.mem_offset; + break; + case V4L2_MEMORY_USERPTR: + plane32.m.userptr = (uintptr_t)(p64->m.userptr); + break; + case V4L2_MEMORY_DMABUF: + plane32.m.fd = p64->m.fd; + break; + } + + if (copy_to_user(p32, &plane32, sizeof(plane32))) + return -EFAULT; + + return 0; +} + +static int get_v4l2_buffer32(struct v4l2_buffer *vb, + struct v4l2_buffer32 __user *arg) +{ + struct v4l2_buffer32 vb32; + + if (copy_from_user(&vb32, arg, sizeof(vb32))) + return -EFAULT; + + memset(vb, 0, sizeof(*vb)); + *vb = (struct v4l2_buffer) { + .index = vb32.index, + .type = vb32.type, + .bytesused = vb32.bytesused, + .flags = vb32.flags, + .field = vb32.field, + .timestamp.tv_sec = vb32.timestamp.tv_sec, + .timestamp.tv_usec = vb32.timestamp.tv_usec, + .timecode = vb32.timecode, + .sequence = vb32.sequence, + .memory = vb32.memory, + .m.offset = vb32.m.offset, + .length = vb32.length, + .request_fd = vb32.request_fd, + }; + + switch (vb->memory) { + case V4L2_MEMORY_MMAP: + case V4L2_MEMORY_OVERLAY: + vb->m.offset = vb32.m.offset; + break; + case V4L2_MEMORY_USERPTR: + vb->m.userptr = (unsigned long)compat_ptr(vb32.m.userptr); + break; + case V4L2_MEMORY_DMABUF: + vb->m.fd = vb32.m.fd; + break; + } + + if (V4L2_TYPE_IS_MULTIPLANAR(vb->type)) + vb->m.planes = (void __force *) + compat_ptr(vb32.m.planes); + + return 0; +} + +#ifdef CONFIG_COMPAT_32BIT_TIME +static int get_v4l2_buffer32_time32(struct v4l2_buffer *vb, + struct v4l2_buffer32_time32 __user *arg) +{ + struct v4l2_buffer32_time32 vb32; + + if (copy_from_user(&vb32, arg, sizeof(vb32))) + return -EFAULT; + + *vb = (struct v4l2_buffer) { + .index = vb32.index, + .type = vb32.type, + .bytesused = vb32.bytesused, + .flags = vb32.flags, + .field = vb32.field, + .timestamp.tv_sec = vb32.timestamp.tv_sec, + .timestamp.tv_usec = vb32.timestamp.tv_usec, + .timecode = vb32.timecode, + .sequence = vb32.sequence, + .memory = vb32.memory, + .m.offset = vb32.m.offset, + .length = vb32.length, + .request_fd = vb32.request_fd, + }; + switch (vb->memory) { + case V4L2_MEMORY_MMAP: + case V4L2_MEMORY_OVERLAY: + vb->m.offset = vb32.m.offset; + break; + case V4L2_MEMORY_USERPTR: + vb->m.userptr = (unsigned long)compat_ptr(vb32.m.userptr); + break; + case V4L2_MEMORY_DMABUF: + vb->m.fd = vb32.m.fd; + break; + } + + if (V4L2_TYPE_IS_MULTIPLANAR(vb->type)) + vb->m.planes = (void __force *) + compat_ptr(vb32.m.planes); + + return 0; +} +#endif + +static int put_v4l2_buffer32(struct v4l2_buffer *vb, + struct v4l2_buffer32 __user *arg) +{ + struct v4l2_buffer32 vb32; + + memset(&vb32, 0, sizeof(vb32)); + vb32 = (struct v4l2_buffer32) { + .index = vb->index, + .type = vb->type, + .bytesused = vb->bytesused, + .flags = vb->flags, + .field = vb->field, + .timestamp.tv_sec = vb->timestamp.tv_sec, + .timestamp.tv_usec = vb->timestamp.tv_usec, + .timecode = vb->timecode, + .sequence = vb->sequence, + .memory = vb->memory, + .m.offset = vb->m.offset, + .length = vb->length, + .request_fd = vb->request_fd, + }; + + switch (vb->memory) { + case V4L2_MEMORY_MMAP: + case V4L2_MEMORY_OVERLAY: + vb32.m.offset = vb->m.offset; + break; + case V4L2_MEMORY_USERPTR: + vb32.m.userptr = (uintptr_t)(vb->m.userptr); + break; + case V4L2_MEMORY_DMABUF: + vb32.m.fd = vb->m.fd; + break; + } + + if (V4L2_TYPE_IS_MULTIPLANAR(vb->type)) + vb32.m.planes = (uintptr_t)vb->m.planes; + + if (copy_to_user(arg, &vb32, sizeof(vb32))) + return -EFAULT; + + return 0; +} + +#ifdef CONFIG_COMPAT_32BIT_TIME +static int put_v4l2_buffer32_time32(struct v4l2_buffer *vb, + struct v4l2_buffer32_time32 __user *arg) +{ + struct v4l2_buffer32_time32 vb32; + + memset(&vb32, 0, sizeof(vb32)); + vb32 = (struct v4l2_buffer32_time32) { + .index = vb->index, + .type = vb->type, + .bytesused = vb->bytesused, + .flags = vb->flags, + .field = vb->field, + .timestamp.tv_sec = vb->timestamp.tv_sec, + .timestamp.tv_usec = vb->timestamp.tv_usec, + .timecode = vb->timecode, + .sequence = vb->sequence, + .memory = vb->memory, + .m.offset = vb->m.offset, + .length = vb->length, + .request_fd = vb->request_fd, + }; + switch (vb->memory) { + case V4L2_MEMORY_MMAP: + case V4L2_MEMORY_OVERLAY: + vb32.m.offset = vb->m.offset; + break; + case V4L2_MEMORY_USERPTR: + vb32.m.userptr = (uintptr_t)(vb->m.userptr); + break; + case V4L2_MEMORY_DMABUF: + vb32.m.fd = vb->m.fd; + break; + } + + if (V4L2_TYPE_IS_MULTIPLANAR(vb->type)) + vb32.m.planes = (uintptr_t)vb->m.planes; + + if (copy_to_user(arg, &vb32, sizeof(vb32))) + return -EFAULT; + + return 0; +} +#endif + +struct v4l2_framebuffer32 { + __u32 capability; + __u32 flags; + compat_caddr_t base; + struct { + __u32 width; + __u32 height; + __u32 pixelformat; + __u32 field; + __u32 bytesperline; + __u32 sizeimage; + __u32 colorspace; + __u32 priv; + } fmt; +}; + +static int get_v4l2_framebuffer32(struct v4l2_framebuffer *p64, + struct v4l2_framebuffer32 __user *p32) +{ + if (get_user(p64->capability, &p32->capability) || + get_user(p64->flags, &p32->flags) || + copy_from_user(&p64->fmt, &p32->fmt, sizeof(p64->fmt))) + return -EFAULT; + p64->base = NULL; + + return 0; +} + +static int put_v4l2_framebuffer32(struct v4l2_framebuffer *p64, + struct v4l2_framebuffer32 __user *p32) +{ + if (put_user((uintptr_t)p64->base, &p32->base) || + put_user(p64->capability, &p32->capability) || + put_user(p64->flags, &p32->flags) || + copy_to_user(&p32->fmt, &p64->fmt, sizeof(p64->fmt))) + return -EFAULT; + + return 0; +} + +struct v4l2_input32 { + __u32 index; /* Which input */ + __u8 name[32]; /* Label */ + __u32 type; /* Type of input */ + __u32 audioset; /* Associated audios (bitfield) */ + __u32 tuner; /* Associated tuner */ + compat_u64 std; + __u32 status; + __u32 capabilities; + __u32 reserved[3]; +}; + +/* + * The 64-bit v4l2_input struct has extra padding at the end of the struct. + * Otherwise it is identical to the 32-bit version. + */ +static inline int get_v4l2_input32(struct v4l2_input *p64, + struct v4l2_input32 __user *p32) +{ + if (copy_from_user(p64, p32, sizeof(*p32))) + return -EFAULT; + return 0; +} + +static inline int put_v4l2_input32(struct v4l2_input *p64, + struct v4l2_input32 __user *p32) +{ + if (copy_to_user(p32, p64, sizeof(*p32))) + return -EFAULT; + return 0; +} + +struct v4l2_ext_controls32 { + __u32 which; + __u32 count; + __u32 error_idx; + __s32 request_fd; + __u32 reserved[1]; + compat_caddr_t controls; /* actually struct v4l2_ext_control32 * */ +}; + +struct v4l2_ext_control32 { + __u32 id; + __u32 size; + __u32 reserved2[1]; + union { + __s32 value; + __s64 value64; + compat_caddr_t string; /* actually char * */ + }; +} __attribute__ ((packed)); + +/* Return true if this control is a pointer type. */ +static inline bool ctrl_is_pointer(struct file *file, u32 id) +{ + struct video_device *vdev = video_devdata(file); + struct v4l2_fh *fh = NULL; + struct v4l2_ctrl_handler *hdl = NULL; + struct v4l2_query_ext_ctrl qec = { id }; + const struct v4l2_ioctl_ops *ops = vdev->ioctl_ops; + + if (test_bit(V4L2_FL_USES_V4L2_FH, &vdev->flags)) + fh = file->private_data; + + if (fh && fh->ctrl_handler) + hdl = fh->ctrl_handler; + else if (vdev->ctrl_handler) + hdl = vdev->ctrl_handler; + + if (hdl) { + struct v4l2_ctrl *ctrl = v4l2_ctrl_find(hdl, id); + + return ctrl && ctrl->is_ptr; + } + + if (!ops || !ops->vidioc_query_ext_ctrl) + return false; + + return !ops->vidioc_query_ext_ctrl(file, fh, &qec) && + (qec.flags & V4L2_CTRL_FLAG_HAS_PAYLOAD); +} + +static int get_v4l2_ext_controls32(struct v4l2_ext_controls *p64, + struct v4l2_ext_controls32 __user *p32) +{ + struct v4l2_ext_controls32 ec32; + + if (copy_from_user(&ec32, p32, sizeof(ec32))) + return -EFAULT; + + *p64 = (struct v4l2_ext_controls) { + .which = ec32.which, + .count = ec32.count, + .error_idx = ec32.error_idx, + .request_fd = ec32.request_fd, + .reserved[0] = ec32.reserved[0], + .controls = (void __force *)compat_ptr(ec32.controls), + }; + + return 0; +} + +static int put_v4l2_ext_controls32(struct v4l2_ext_controls *p64, + struct v4l2_ext_controls32 __user *p32) +{ + struct v4l2_ext_controls32 ec32; + + memset(&ec32, 0, sizeof(ec32)); + ec32 = (struct v4l2_ext_controls32) { + .which = p64->which, + .count = p64->count, + .error_idx = p64->error_idx, + .request_fd = p64->request_fd, + .reserved[0] = p64->reserved[0], + .controls = (uintptr_t)p64->controls, + }; + + if (copy_to_user(p32, &ec32, sizeof(ec32))) + return -EFAULT; + + return 0; +} + +#ifdef CONFIG_X86_64 +/* + * x86 is the only compat architecture with different struct alignment + * between 32-bit and 64-bit tasks. + */ +struct v4l2_event32 { + __u32 type; + union { + compat_s64 value64; + __u8 data[64]; + } u; + __u32 pending; + __u32 sequence; + struct { + compat_s64 tv_sec; + compat_s64 tv_nsec; + } timestamp; + __u32 id; + __u32 reserved[8]; +}; + +static int put_v4l2_event32(struct v4l2_event *p64, + struct v4l2_event32 __user *p32) +{ + if (put_user(p64->type, &p32->type) || + copy_to_user(&p32->u, &p64->u, sizeof(p64->u)) || + put_user(p64->pending, &p32->pending) || + put_user(p64->sequence, &p32->sequence) || + put_user(p64->timestamp.tv_sec, &p32->timestamp.tv_sec) || + put_user(p64->timestamp.tv_nsec, &p32->timestamp.tv_nsec) || + put_user(p64->id, &p32->id) || + copy_to_user(p32->reserved, p64->reserved, sizeof(p32->reserved))) + return -EFAULT; + return 0; +} + +#endif + +#ifdef CONFIG_COMPAT_32BIT_TIME +struct v4l2_event32_time32 { + __u32 type; + union { + compat_s64 value64; + __u8 data[64]; + } u; + __u32 pending; + __u32 sequence; + struct old_timespec32 timestamp; + __u32 id; + __u32 reserved[8]; +}; + +static int put_v4l2_event32_time32(struct v4l2_event *p64, + struct v4l2_event32_time32 __user *p32) +{ + if (put_user(p64->type, &p32->type) || + copy_to_user(&p32->u, &p64->u, sizeof(p64->u)) || + put_user(p64->pending, &p32->pending) || + put_user(p64->sequence, &p32->sequence) || + put_user(p64->timestamp.tv_sec, &p32->timestamp.tv_sec) || + put_user(p64->timestamp.tv_nsec, &p32->timestamp.tv_nsec) || + put_user(p64->id, &p32->id) || + copy_to_user(p32->reserved, p64->reserved, sizeof(p32->reserved))) + return -EFAULT; + return 0; +} +#endif + +struct v4l2_edid32 { + __u32 pad; + __u32 start_block; + __u32 blocks; + __u32 reserved[5]; + compat_caddr_t edid; +}; + +static int get_v4l2_edid32(struct v4l2_edid *p64, + struct v4l2_edid32 __user *p32) +{ + compat_uptr_t edid; + + if (copy_from_user(p64, p32, offsetof(struct v4l2_edid32, edid)) || + get_user(edid, &p32->edid)) + return -EFAULT; + + p64->edid = (void __force *)compat_ptr(edid); + return 0; +} + +static int put_v4l2_edid32(struct v4l2_edid *p64, + struct v4l2_edid32 __user *p32) +{ + if (copy_to_user(p32, p64, offsetof(struct v4l2_edid32, edid))) + return -EFAULT; + return 0; +} + +/* + * List of ioctls that require 32-bits/64-bits conversion + * + * The V4L2 ioctls that aren't listed there don't have pointer arguments + * and the struct size is identical for both 32 and 64 bits versions, so + * they don't need translations. + */ + +#define VIDIOC_G_FMT32 _IOWR('V', 4, struct v4l2_format32) +#define VIDIOC_S_FMT32 _IOWR('V', 5, struct v4l2_format32) +#define VIDIOC_QUERYBUF32 _IOWR('V', 9, struct v4l2_buffer32) +#define VIDIOC_G_FBUF32 _IOR ('V', 10, struct v4l2_framebuffer32) +#define VIDIOC_S_FBUF32 _IOW ('V', 11, struct v4l2_framebuffer32) +#define VIDIOC_QBUF32 _IOWR('V', 15, struct v4l2_buffer32) +#define VIDIOC_DQBUF32 _IOWR('V', 17, struct v4l2_buffer32) +#define VIDIOC_ENUMSTD32 _IOWR('V', 25, struct v4l2_standard32) +#define VIDIOC_ENUMINPUT32 _IOWR('V', 26, struct v4l2_input32) +#define VIDIOC_G_EDID32 _IOWR('V', 40, struct v4l2_edid32) +#define VIDIOC_S_EDID32 _IOWR('V', 41, struct v4l2_edid32) +#define VIDIOC_TRY_FMT32 _IOWR('V', 64, struct v4l2_format32) +#define VIDIOC_G_EXT_CTRLS32 _IOWR('V', 71, struct v4l2_ext_controls32) +#define VIDIOC_S_EXT_CTRLS32 _IOWR('V', 72, struct v4l2_ext_controls32) +#define VIDIOC_TRY_EXT_CTRLS32 _IOWR('V', 73, struct v4l2_ext_controls32) +#define VIDIOC_DQEVENT32 _IOR ('V', 89, struct v4l2_event32) +#define VIDIOC_CREATE_BUFS32 _IOWR('V', 92, struct v4l2_create_buffers32) +#define VIDIOC_PREPARE_BUF32 _IOWR('V', 93, struct v4l2_buffer32) + +#ifdef CONFIG_COMPAT_32BIT_TIME +#define VIDIOC_QUERYBUF32_TIME32 _IOWR('V', 9, struct v4l2_buffer32_time32) +#define VIDIOC_QBUF32_TIME32 _IOWR('V', 15, struct v4l2_buffer32_time32) +#define VIDIOC_DQBUF32_TIME32 _IOWR('V', 17, struct v4l2_buffer32_time32) +#define VIDIOC_DQEVENT32_TIME32 _IOR ('V', 89, struct v4l2_event32_time32) +#define VIDIOC_PREPARE_BUF32_TIME32 _IOWR('V', 93, struct v4l2_buffer32_time32) +#endif + +unsigned int v4l2_compat_translate_cmd(unsigned int cmd) +{ + switch (cmd) { + case VIDIOC_G_FMT32: + return VIDIOC_G_FMT; + case VIDIOC_S_FMT32: + return VIDIOC_S_FMT; + case VIDIOC_TRY_FMT32: + return VIDIOC_TRY_FMT; + case VIDIOC_G_FBUF32: + return VIDIOC_G_FBUF; + case VIDIOC_S_FBUF32: + return VIDIOC_S_FBUF; +#ifdef CONFIG_COMPAT_32BIT_TIME + case VIDIOC_QUERYBUF32_TIME32: + return VIDIOC_QUERYBUF; + case VIDIOC_QBUF32_TIME32: + return VIDIOC_QBUF; + case VIDIOC_DQBUF32_TIME32: + return VIDIOC_DQBUF; + case VIDIOC_PREPARE_BUF32_TIME32: + return VIDIOC_PREPARE_BUF; +#endif + case VIDIOC_QUERYBUF32: + return VIDIOC_QUERYBUF; + case VIDIOC_QBUF32: + return VIDIOC_QBUF; + case VIDIOC_DQBUF32: + return VIDIOC_DQBUF; + case VIDIOC_CREATE_BUFS32: + return VIDIOC_CREATE_BUFS; + case VIDIOC_G_EXT_CTRLS32: + return VIDIOC_G_EXT_CTRLS; + case VIDIOC_S_EXT_CTRLS32: + return VIDIOC_S_EXT_CTRLS; + case VIDIOC_TRY_EXT_CTRLS32: + return VIDIOC_TRY_EXT_CTRLS; + case VIDIOC_PREPARE_BUF32: + return VIDIOC_PREPARE_BUF; + case VIDIOC_ENUMSTD32: + return VIDIOC_ENUMSTD; + case VIDIOC_ENUMINPUT32: + return VIDIOC_ENUMINPUT; + case VIDIOC_G_EDID32: + return VIDIOC_G_EDID; + case VIDIOC_S_EDID32: + return VIDIOC_S_EDID; +#ifdef CONFIG_X86_64 + case VIDIOC_DQEVENT32: + return VIDIOC_DQEVENT; +#endif +#ifdef CONFIG_COMPAT_32BIT_TIME + case VIDIOC_DQEVENT32_TIME32: + return VIDIOC_DQEVENT; +#endif + } + return cmd; +} + +int v4l2_compat_get_user(void __user *arg, void *parg, unsigned int cmd) +{ + switch (cmd) { + case VIDIOC_G_FMT32: + case VIDIOC_S_FMT32: + case VIDIOC_TRY_FMT32: + return get_v4l2_format32(parg, arg); + + case VIDIOC_S_FBUF32: + return get_v4l2_framebuffer32(parg, arg); +#ifdef CONFIG_COMPAT_32BIT_TIME + case VIDIOC_QUERYBUF32_TIME32: + case VIDIOC_QBUF32_TIME32: + case VIDIOC_DQBUF32_TIME32: + case VIDIOC_PREPARE_BUF32_TIME32: + return get_v4l2_buffer32_time32(parg, arg); +#endif + case VIDIOC_QUERYBUF32: + case VIDIOC_QBUF32: + case VIDIOC_DQBUF32: + case VIDIOC_PREPARE_BUF32: + return get_v4l2_buffer32(parg, arg); + + case VIDIOC_G_EXT_CTRLS32: + case VIDIOC_S_EXT_CTRLS32: + case VIDIOC_TRY_EXT_CTRLS32: + return get_v4l2_ext_controls32(parg, arg); + + case VIDIOC_CREATE_BUFS32: + return get_v4l2_create32(parg, arg); + + case VIDIOC_ENUMSTD32: + return get_v4l2_standard32(parg, arg); + + case VIDIOC_ENUMINPUT32: + return get_v4l2_input32(parg, arg); + + case VIDIOC_G_EDID32: + case VIDIOC_S_EDID32: + return get_v4l2_edid32(parg, arg); + } + return 0; +} + +int v4l2_compat_put_user(void __user *arg, void *parg, unsigned int cmd) +{ + switch (cmd) { + case VIDIOC_G_FMT32: + case VIDIOC_S_FMT32: + case VIDIOC_TRY_FMT32: + return put_v4l2_format32(parg, arg); + + case VIDIOC_G_FBUF32: + return put_v4l2_framebuffer32(parg, arg); +#ifdef CONFIG_COMPAT_32BIT_TIME + case VIDIOC_QUERYBUF32_TIME32: + case VIDIOC_QBUF32_TIME32: + case VIDIOC_DQBUF32_TIME32: + case VIDIOC_PREPARE_BUF32_TIME32: + return put_v4l2_buffer32_time32(parg, arg); +#endif + case VIDIOC_QUERYBUF32: + case VIDIOC_QBUF32: + case VIDIOC_DQBUF32: + case VIDIOC_PREPARE_BUF32: + return put_v4l2_buffer32(parg, arg); + + case VIDIOC_G_EXT_CTRLS32: + case VIDIOC_S_EXT_CTRLS32: + case VIDIOC_TRY_EXT_CTRLS32: + return put_v4l2_ext_controls32(parg, arg); + + case VIDIOC_CREATE_BUFS32: + return put_v4l2_create32(parg, arg); + + case VIDIOC_ENUMSTD32: + return put_v4l2_standard32(parg, arg); + + case VIDIOC_ENUMINPUT32: + return put_v4l2_input32(parg, arg); + + case VIDIOC_G_EDID32: + case VIDIOC_S_EDID32: + return put_v4l2_edid32(parg, arg); +#ifdef CONFIG_X86_64 + case VIDIOC_DQEVENT32: + return put_v4l2_event32(parg, arg); +#endif +#ifdef CONFIG_COMPAT_32BIT_TIME + case VIDIOC_DQEVENT32_TIME32: + return put_v4l2_event32_time32(parg, arg); +#endif + } + return 0; +} + +int v4l2_compat_get_array_args(struct file *file, void *mbuf, + void __user *user_ptr, size_t array_size, + unsigned int cmd, void *arg) +{ + int err = 0; + + memset(mbuf, 0, array_size); + + switch (cmd) { +#ifdef CONFIG_COMPAT_32BIT_TIME + case VIDIOC_QUERYBUF32_TIME32: + case VIDIOC_QBUF32_TIME32: + case VIDIOC_DQBUF32_TIME32: + case VIDIOC_PREPARE_BUF32_TIME32: +#endif + case VIDIOC_QUERYBUF32: + case VIDIOC_QBUF32: + case VIDIOC_DQBUF32: + case VIDIOC_PREPARE_BUF32: { + struct v4l2_buffer *b64 = arg; + struct v4l2_plane *p64 = mbuf; + struct v4l2_plane32 __user *p32 = user_ptr; + + if (V4L2_TYPE_IS_MULTIPLANAR(b64->type)) { + u32 num_planes = b64->length; + + if (num_planes == 0) + return 0; + + while (num_planes--) { + err = get_v4l2_plane32(p64, p32, b64->memory); + if (err) + return err; + ++p64; + ++p32; + } + } + break; + } + case VIDIOC_G_EXT_CTRLS32: + case VIDIOC_S_EXT_CTRLS32: + case VIDIOC_TRY_EXT_CTRLS32: { + struct v4l2_ext_controls *ecs64 = arg; + struct v4l2_ext_control *ec64 = mbuf; + struct v4l2_ext_control32 __user *ec32 = user_ptr; + int n; + + for (n = 0; n < ecs64->count; n++) { + if (copy_from_user(ec64, ec32, sizeof(*ec32))) + return -EFAULT; + + if (ctrl_is_pointer(file, ec64->id)) { + compat_uptr_t p; + + if (get_user(p, &ec32->string)) + return -EFAULT; + ec64->string = compat_ptr(p); + } + ec32++; + ec64++; + } + break; + } + default: + if (copy_from_user(mbuf, user_ptr, array_size)) + err = -EFAULT; + break; + } + + return err; +} + +int v4l2_compat_put_array_args(struct file *file, void __user *user_ptr, + void *mbuf, size_t array_size, + unsigned int cmd, void *arg) +{ + int err = 0; + + switch (cmd) { +#ifdef CONFIG_COMPAT_32BIT_TIME + case VIDIOC_QUERYBUF32_TIME32: + case VIDIOC_QBUF32_TIME32: + case VIDIOC_DQBUF32_TIME32: + case VIDIOC_PREPARE_BUF32_TIME32: +#endif + case VIDIOC_QUERYBUF32: + case VIDIOC_QBUF32: + case VIDIOC_DQBUF32: + case VIDIOC_PREPARE_BUF32: { + struct v4l2_buffer *b64 = arg; + struct v4l2_plane *p64 = mbuf; + struct v4l2_plane32 __user *p32 = user_ptr; + + if (V4L2_TYPE_IS_MULTIPLANAR(b64->type)) { + u32 num_planes = b64->length; + + if (num_planes == 0) + return 0; + + while (num_planes--) { + err = put_v4l2_plane32(p64, p32, b64->memory); + if (err) + return err; + ++p64; + ++p32; + } + } + break; + } + case VIDIOC_G_EXT_CTRLS32: + case VIDIOC_S_EXT_CTRLS32: + case VIDIOC_TRY_EXT_CTRLS32: { + struct v4l2_ext_controls *ecs64 = arg; + struct v4l2_ext_control *ec64 = mbuf; + struct v4l2_ext_control32 __user *ec32 = user_ptr; + int n; + + for (n = 0; n < ecs64->count; n++) { + unsigned int size = sizeof(*ec32); + /* + * Do not modify the pointer when copying a pointer + * control. The contents of the pointer was changed, + * not the pointer itself. + * The structures are otherwise compatible. + */ + if (ctrl_is_pointer(file, ec64->id)) + size -= sizeof(ec32->value64); + + if (copy_to_user(ec32, ec64, size)) + return -EFAULT; + + ec32++; + ec64++; + } + break; + } + default: + if (copy_to_user(user_ptr, mbuf, array_size)) + err = -EFAULT; + break; + } + + return err; +} + +/** + * v4l2_compat_ioctl32() - Handles a compat32 ioctl call + * + * @file: pointer to &struct file with the file handler + * @cmd: ioctl to be called + * @arg: arguments passed from/to the ioctl handler + * + * This function is meant to be used as .compat_ioctl fops at v4l2-dev.c + * in order to deal with 32-bit calls on a 64-bits Kernel. + * + * This function calls do_video_ioctl() for non-private V4L2 ioctls. + * If the function is a private one it calls vdev->fops->compat_ioctl32 + * instead. + */ +long v4l2_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct video_device *vdev = video_devdata(file); + long ret = -ENOIOCTLCMD; + + if (!file->f_op->unlocked_ioctl) + return ret; + + if (!video_is_registered(vdev)) + return -ENODEV; + + if (_IOC_TYPE(cmd) == 'V' && _IOC_NR(cmd) < BASE_VIDIOC_PRIVATE) + ret = file->f_op->unlocked_ioctl(file, cmd, + (unsigned long)compat_ptr(arg)); + else if (vdev->fops->compat_ioctl32) + ret = vdev->fops->compat_ioctl32(file, cmd, arg); + + if (ret == -ENOIOCTLCMD) + pr_debug("compat_ioctl32: unknown ioctl '%c', dir=%d, #%d (0x%08x)\n", + _IOC_TYPE(cmd), _IOC_DIR(cmd), _IOC_NR(cmd), cmd); + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_compat_ioctl32); diff --git a/6.17.0/drivers/media/v4l2-core/v4l2-ctrls-api.c b/6.17.0/drivers/media/v4l2-core/v4l2-ctrls-api.c new file mode 100644 index 0000000..d49a68b --- /dev/null +++ b/6.17.0/drivers/media/v4l2-core/v4l2-ctrls-api.c @@ -0,0 +1,1358 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * V4L2 controls framework uAPI implementation: + * + * Copyright (C) 2010-2021 Hans Verkuil + */ + +#define pr_fmt(fmt) "v4l2-ctrls: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "v4l2-ctrls-priv.h" + +/* Internal temporary helper struct, one for each v4l2_ext_control */ +struct v4l2_ctrl_helper { + /* Pointer to the control reference of the master control */ + struct v4l2_ctrl_ref *mref; + /* The control ref corresponding to the v4l2_ext_control ID field. */ + struct v4l2_ctrl_ref *ref; + /* + * v4l2_ext_control index of the next control belonging to the + * same cluster, or 0 if there isn't any. + */ + u32 next; +}; + +/* + * Helper functions to copy control payload data from kernel space to + * user space and vice versa. + */ + +/* Helper function: copy the given control value back to the caller */ +static int ptr_to_user(struct v4l2_ext_control *c, + struct v4l2_ctrl *ctrl, + union v4l2_ctrl_ptr ptr) +{ + u32 len; + + if (ctrl->is_ptr && !ctrl->is_string) + return copy_to_user(c->ptr, ptr.p_const, c->size) ? + -EFAULT : 0; + + switch (ctrl->type) { + case V4L2_CTRL_TYPE_STRING: + len = strlen(ptr.p_char); + if (c->size < len + 1) { + c->size = ctrl->elem_size; + return -ENOSPC; + } + return copy_to_user(c->string, ptr.p_char, len + 1) ? + -EFAULT : 0; + case V4L2_CTRL_TYPE_INTEGER64: + c->value64 = *ptr.p_s64; + break; + default: + c->value = *ptr.p_s32; + break; + } + return 0; +} + +/* Helper function: copy the current control value back to the caller */ +static int cur_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl) +{ + return ptr_to_user(c, ctrl, ctrl->p_cur); +} + +/* Helper function: copy the new control value back to the caller */ +static int new_to_user(struct v4l2_ext_control *c, + struct v4l2_ctrl *ctrl) +{ + return ptr_to_user(c, ctrl, ctrl->p_new); +} + +/* Helper function: copy the request value back to the caller */ +static int req_to_user(struct v4l2_ext_control *c, + struct v4l2_ctrl_ref *ref) +{ + return ptr_to_user(c, ref->ctrl, ref->p_req); +} + +/* Helper function: copy the initial control value back to the caller */ +static int def_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl) +{ + ctrl->type_ops->init(ctrl, 0, ctrl->p_new); + + return ptr_to_user(c, ctrl, ctrl->p_new); +} + +/* Helper function: copy the minimum control value back to the caller */ +static int min_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl) +{ + ctrl->type_ops->minimum(ctrl, 0, ctrl->p_new); + + return ptr_to_user(c, ctrl, ctrl->p_new); +} + +/* Helper function: copy the maximum control value back to the caller */ +static int max_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl) +{ + ctrl->type_ops->maximum(ctrl, 0, ctrl->p_new); + + return ptr_to_user(c, ctrl, ctrl->p_new); +} + +/* Helper function: copy the caller-provider value as the new control value */ +static int user_to_new(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl) +{ + int ret; + u32 size; + + ctrl->is_new = 0; + if (ctrl->is_dyn_array && + c->size > ctrl->p_array_alloc_elems * ctrl->elem_size) { + void *old = ctrl->p_array; + void *tmp = kvzalloc(2 * c->size, GFP_KERNEL); + + if (!tmp) + return -ENOMEM; + memcpy(tmp, ctrl->p_new.p, ctrl->elems * ctrl->elem_size); + memcpy(tmp + c->size, ctrl->p_cur.p, ctrl->elems * ctrl->elem_size); + ctrl->p_new.p = tmp; + ctrl->p_cur.p = tmp + c->size; + ctrl->p_array = tmp; + ctrl->p_array_alloc_elems = c->size / ctrl->elem_size; + kvfree(old); + } + + if (ctrl->is_ptr && !ctrl->is_string) { + unsigned int elems = c->size / ctrl->elem_size; + + if (copy_from_user(ctrl->p_new.p, c->ptr, c->size)) + return -EFAULT; + ctrl->is_new = 1; + if (ctrl->is_dyn_array) + ctrl->new_elems = elems; + else if (ctrl->is_array) + ctrl->type_ops->init(ctrl, elems, ctrl->p_new); + return 0; + } + + switch (ctrl->type) { + case V4L2_CTRL_TYPE_INTEGER64: + *ctrl->p_new.p_s64 = c->value64; + break; + case V4L2_CTRL_TYPE_STRING: + size = c->size; + if (size == 0) + return -ERANGE; + if (size > ctrl->maximum + 1) + size = ctrl->maximum + 1; + ret = copy_from_user(ctrl->p_new.p_char, c->string, size) ? -EFAULT : 0; + if (!ret) { + char last = ctrl->p_new.p_char[size - 1]; + + ctrl->p_new.p_char[size - 1] = 0; + /* + * If the string was longer than ctrl->maximum, + * then return an error. + */ + if (strlen(ctrl->p_new.p_char) == ctrl->maximum && last) + return -ERANGE; + ctrl->is_new = 1; + } + return ret; + default: + *ctrl->p_new.p_s32 = c->value; + break; + } + ctrl->is_new = 1; + return 0; +} + +/* + * VIDIOC_G/TRY/S_EXT_CTRLS implementation + */ + +/* + * Some general notes on the atomic requirements of VIDIOC_G/TRY/S_EXT_CTRLS: + * + * It is not a fully atomic operation, just best-effort only. After all, if + * multiple controls have to be set through multiple i2c writes (for example) + * then some initial writes may succeed while others fail. Thus leaving the + * system in an inconsistent state. The question is how much effort you are + * willing to spend on trying to make something atomic that really isn't. + * + * From the point of view of an application the main requirement is that + * when you call VIDIOC_S_EXT_CTRLS and some values are invalid then an + * error should be returned without actually affecting any controls. + * + * If all the values are correct, then it is acceptable to just give up + * in case of low-level errors. + * + * It is important though that the application can tell when only a partial + * configuration was done. The way we do that is through the error_idx field + * of struct v4l2_ext_controls: if that is equal to the count field then no + * controls were affected. Otherwise all controls before that index were + * successful in performing their 'get' or 'set' operation, the control at + * the given index failed, and you don't know what happened with the controls + * after the failed one. Since if they were part of a control cluster they + * could have been successfully processed (if a cluster member was encountered + * at index < error_idx), they could have failed (if a cluster member was at + * error_idx), or they may not have been processed yet (if the first cluster + * member appeared after error_idx). + * + * It is all fairly theoretical, though. In practice all you can do is to + * bail out. If error_idx == count, then it is an application bug. If + * error_idx < count then it is only an application bug if the error code was + * EBUSY. That usually means that something started streaming just when you + * tried to set the controls. In all other cases it is a driver/hardware + * problem and all you can do is to retry or bail out. + * + * Note that these rules do not apply to VIDIOC_TRY_EXT_CTRLS: since that + * never modifies controls the error_idx is just set to whatever control + * has an invalid value. + */ + +/* + * Prepare for the extended g/s/try functions. + * Find the controls in the control array and do some basic checks. + */ +static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl, + struct v4l2_ext_controls *cs, + struct v4l2_ctrl_helper *helpers, + struct video_device *vdev, + bool get) +{ + struct v4l2_ctrl_helper *h; + bool have_clusters = false; + u32 i; + + for (i = 0, h = helpers; i < cs->count; i++, h++) { + struct v4l2_ext_control *c = &cs->controls[i]; + struct v4l2_ctrl_ref *ref; + struct v4l2_ctrl *ctrl; + u32 id = c->id & V4L2_CTRL_ID_MASK; + + cs->error_idx = i; + + if (cs->which && + (cs->which < V4L2_CTRL_WHICH_DEF_VAL || + cs->which > V4L2_CTRL_WHICH_MAX_VAL) && + V4L2_CTRL_ID2WHICH(id) != cs->which) { + dprintk(vdev, + "invalid which 0x%x or control id 0x%x\n", + cs->which, id); + return -EINVAL; + } + + /* + * Old-style private controls are not allowed for + * extended controls. + */ + if (id >= V4L2_CID_PRIVATE_BASE) { + dprintk(vdev, + "old-style private controls not allowed\n"); + return -EINVAL; + } + ref = find_ref_lock(hdl, id); + if (!ref) { + dprintk(vdev, "cannot find control id 0x%x\n", id); + return -EINVAL; + } + h->ref = ref; + ctrl = ref->ctrl; + if (ctrl->flags & V4L2_CTRL_FLAG_DISABLED) { + dprintk(vdev, "control id 0x%x is disabled\n", id); + return -EINVAL; + } + + if (!(ctrl->flags & V4L2_CTRL_FLAG_HAS_WHICH_MIN_MAX) && + (cs->which == V4L2_CTRL_WHICH_MIN_VAL || + cs->which == V4L2_CTRL_WHICH_MAX_VAL)) { + dprintk(vdev, + "invalid which 0x%x or control id 0x%x\n", + cs->which, id); + return -EINVAL; + } + + if (ctrl->cluster[0]->ncontrols > 1) + have_clusters = true; + if (ctrl->cluster[0] != ctrl) + ref = find_ref_lock(hdl, ctrl->cluster[0]->id); + if (ctrl->is_dyn_array) { + unsigned int max_size = ctrl->dims[0] * ctrl->elem_size; + unsigned int tot_size = ctrl->elem_size; + + if (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL) + tot_size *= ref->p_req_elems; + else + tot_size *= ctrl->elems; + + c->size = ctrl->elem_size * (c->size / ctrl->elem_size); + if (get) { + if (c->size < tot_size) { + c->size = tot_size; + return -ENOSPC; + } + c->size = tot_size; + } else { + if (c->size > max_size) { + c->size = max_size; + return -ENOSPC; + } + if (!c->size) + return -EFAULT; + } + } else if (ctrl->is_ptr && !ctrl->is_string) { + unsigned int tot_size = ctrl->elems * ctrl->elem_size; + + if (c->size < tot_size) { + /* + * In the get case the application first + * queries to obtain the size of the control. + */ + if (get) { + c->size = tot_size; + return -ENOSPC; + } + dprintk(vdev, + "pointer control id 0x%x size too small, %d bytes but %d bytes needed\n", + id, c->size, tot_size); + return -EFAULT; + } + c->size = tot_size; + } + /* Store the ref to the master control of the cluster */ + h->mref = ref; + /* + * Initially set next to 0, meaning that there is no other + * control in this helper array belonging to the same + * cluster. + */ + h->next = 0; + } + + /* + * We are done if there were no controls that belong to a multi- + * control cluster. + */ + if (!have_clusters) + return 0; + + /* + * The code below figures out in O(n) time which controls in the list + * belong to the same cluster. + */ + + /* This has to be done with the handler lock taken. */ + mutex_lock(hdl->lock); + + /* First zero the helper field in the master control references */ + for (i = 0; i < cs->count; i++) + helpers[i].mref->helper = NULL; + for (i = 0, h = helpers; i < cs->count; i++, h++) { + struct v4l2_ctrl_ref *mref = h->mref; + + /* + * If the mref->helper is set, then it points to an earlier + * helper that belongs to the same cluster. + */ + if (mref->helper) { + /* + * Set the next field of mref->helper to the current + * index: this means that the earlier helper now + * points to the next helper in the same cluster. + */ + mref->helper->next = i; + /* + * mref should be set only for the first helper in the + * cluster, clear the others. + */ + h->mref = NULL; + } + /* Point the mref helper to the current helper struct. */ + mref->helper = h; + } + mutex_unlock(hdl->lock); + return 0; +} + +/* + * Handles the corner case where cs->count == 0. It checks whether the + * specified control class exists. If that class ID is 0, then it checks + * whether there are any controls at all. + */ +static int class_check(struct v4l2_ctrl_handler *hdl, u32 which) +{ + if (which == 0 || (which >= V4L2_CTRL_WHICH_DEF_VAL && + which <= V4L2_CTRL_WHICH_MAX_VAL)) + return 0; + return find_ref_lock(hdl, which | 1) ? 0 : -EINVAL; +} + +/* + * Get extended controls. Allocates the helpers array if needed. + * + * Note that v4l2_g_ext_ctrls_common() with 'which' set to + * V4L2_CTRL_WHICH_REQUEST_VAL is only called if the request was + * completed, and in that case p_req_valid is true for all controls. + */ +int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl, + struct v4l2_ext_controls *cs, + struct video_device *vdev) +{ + struct v4l2_ctrl_helper helper[4]; + struct v4l2_ctrl_helper *helpers = helper; + int ret; + int i, j; + bool is_default, is_request, is_min, is_max; + + is_default = (cs->which == V4L2_CTRL_WHICH_DEF_VAL); + is_request = (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL); + is_min = (cs->which == V4L2_CTRL_WHICH_MIN_VAL); + is_max = (cs->which == V4L2_CTRL_WHICH_MAX_VAL); + + cs->error_idx = cs->count; + cs->which = V4L2_CTRL_ID2WHICH(cs->which); + + if (!hdl) + return -EINVAL; + + if (cs->count == 0) + return class_check(hdl, cs->which); + + if (cs->count > ARRAY_SIZE(helper)) { + helpers = kvmalloc_array(cs->count, sizeof(helper[0]), + GFP_KERNEL); + if (!helpers) + return -ENOMEM; + } + + ret = prepare_ext_ctrls(hdl, cs, helpers, vdev, true); + cs->error_idx = cs->count; + + for (i = 0; !ret && i < cs->count; i++) + if (helpers[i].ref->ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY) + ret = -EACCES; + + for (i = 0; !ret && i < cs->count; i++) { + struct v4l2_ctrl *master; + bool is_volatile = false; + u32 idx = i; + + if (!helpers[i].mref) + continue; + + master = helpers[i].mref->ctrl; + cs->error_idx = i; + + v4l2_ctrl_lock(master); + + /* + * g_volatile_ctrl will update the new control values. + * This makes no sense for V4L2_CTRL_WHICH_DEF_VAL, + * V4L2_CTRL_WHICH_MIN_VAL, V4L2_CTRL_WHICH_MAX_VAL and + * V4L2_CTRL_WHICH_REQUEST_VAL. In the case of requests + * it is v4l2_ctrl_request_complete() that copies the + * volatile controls at the time of request completion + * to the request, so you don't want to do that again. + */ + if (!is_default && !is_request && !is_min && !is_max && + ((master->flags & V4L2_CTRL_FLAG_VOLATILE) || + (master->has_volatiles && !is_cur_manual(master)))) { + for (j = 0; j < master->ncontrols; j++) + cur_to_new(master->cluster[j]); + ret = call_op(master, g_volatile_ctrl); + is_volatile = true; + } + + if (ret) { + v4l2_ctrl_unlock(master); + break; + } + + /* + * Copy the default value (if is_default is true), the + * request value (if is_request is true and p_req is valid), + * the new volatile value (if is_volatile is true) or the + * current value. + */ + do { + struct v4l2_ctrl_ref *ref = helpers[idx].ref; + + if (is_default) + ret = def_to_user(cs->controls + idx, ref->ctrl); + else if (is_request && ref->p_req_array_enomem) + ret = -ENOMEM; + else if (is_request && ref->p_req_valid) + ret = req_to_user(cs->controls + idx, ref); + else if (is_min) + ret = min_to_user(cs->controls + idx, ref->ctrl); + else if (is_max) + ret = max_to_user(cs->controls + idx, ref->ctrl); + else if (is_volatile) + ret = new_to_user(cs->controls + idx, ref->ctrl); + else + ret = cur_to_user(cs->controls + idx, ref->ctrl); + idx = helpers[idx].next; + } while (!ret && idx); + + v4l2_ctrl_unlock(master); + } + + if (cs->count > ARRAY_SIZE(helper)) + kvfree(helpers); + return ret; +} + +int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct video_device *vdev, + struct media_device *mdev, struct v4l2_ext_controls *cs) +{ + if (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL) + return v4l2_g_ext_ctrls_request(hdl, vdev, mdev, cs); + + return v4l2_g_ext_ctrls_common(hdl, cs, vdev); +} +EXPORT_SYMBOL(v4l2_g_ext_ctrls); + +/* Validate a new control */ +static int validate_new(const struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr p_new) +{ + return ctrl->type_ops->validate(ctrl, p_new); +} + +/* Validate controls. */ +static int validate_ctrls(struct v4l2_ext_controls *cs, + struct v4l2_ctrl_helper *helpers, + struct video_device *vdev, + bool set) +{ + unsigned int i; + int ret = 0; + + cs->error_idx = cs->count; + for (i = 0; i < cs->count; i++) { + struct v4l2_ctrl *ctrl = helpers[i].ref->ctrl; + union v4l2_ctrl_ptr p_new; + + cs->error_idx = i; + + if (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY) { + dprintk(vdev, + "control id 0x%x is read-only\n", + ctrl->id); + return -EACCES; + } + /* + * This test is also done in try_set_control_cluster() which + * is called in atomic context, so that has the final say, + * but it makes sense to do an up-front check as well. Once + * an error occurs in try_set_control_cluster() some other + * controls may have been set already and we want to do a + * best-effort to avoid that. + */ + if (set && (ctrl->flags & V4L2_CTRL_FLAG_GRABBED)) { + dprintk(vdev, + "control id 0x%x is grabbed, cannot set\n", + ctrl->id); + return -EBUSY; + } + /* + * Skip validation for now if the payload needs to be copied + * from userspace into kernelspace. We'll validate those later. + */ + if (ctrl->is_ptr) + continue; + if (ctrl->type == V4L2_CTRL_TYPE_INTEGER64) + p_new.p_s64 = &cs->controls[i].value64; + else + p_new.p_s32 = &cs->controls[i].value; + ret = validate_new(ctrl, p_new); + if (ret) + return ret; + } + return 0; +} + +/* Try or try-and-set controls */ +int try_set_ext_ctrls_common(struct v4l2_fh *fh, + struct v4l2_ctrl_handler *hdl, + struct v4l2_ext_controls *cs, + struct video_device *vdev, bool set) +{ + struct v4l2_ctrl_helper helper[4]; + struct v4l2_ctrl_helper *helpers = helper; + unsigned int i, j; + int ret; + + cs->error_idx = cs->count; + + /* Default/minimum/maximum values cannot be changed */ + if (cs->which == V4L2_CTRL_WHICH_DEF_VAL || + cs->which == V4L2_CTRL_WHICH_MIN_VAL || + cs->which == V4L2_CTRL_WHICH_MAX_VAL) { + dprintk(vdev, "%s: cannot change default/min/max value\n", + video_device_node_name(vdev)); + return -EINVAL; + } + + cs->which = V4L2_CTRL_ID2WHICH(cs->which); + + if (!hdl) { + dprintk(vdev, "%s: invalid null control handler\n", + video_device_node_name(vdev)); + return -EINVAL; + } + + if (cs->count == 0) + return class_check(hdl, cs->which); + + if (cs->count > ARRAY_SIZE(helper)) { + helpers = kvmalloc_array(cs->count, sizeof(helper[0]), + GFP_KERNEL); + if (!helpers) + return -ENOMEM; + } + ret = prepare_ext_ctrls(hdl, cs, helpers, vdev, false); + if (!ret) + ret = validate_ctrls(cs, helpers, vdev, set); + if (ret && set) + cs->error_idx = cs->count; + for (i = 0; !ret && i < cs->count; i++) { + struct v4l2_ctrl *master; + u32 idx = i; + + if (!helpers[i].mref) + continue; + + cs->error_idx = i; + master = helpers[i].mref->ctrl; + v4l2_ctrl_lock(master); + + /* Reset the 'is_new' flags of the cluster */ + for (j = 0; j < master->ncontrols; j++) + if (master->cluster[j]) + master->cluster[j]->is_new = 0; + + /* + * For volatile autoclusters that are currently in auto mode + * we need to discover if it will be set to manual mode. + * If so, then we have to copy the current volatile values + * first since those will become the new manual values (which + * may be overwritten by explicit new values from this set + * of controls). + */ + if (master->is_auto && master->has_volatiles && + !is_cur_manual(master)) { + /* Pick an initial non-manual value */ + s32 new_auto_val = master->manual_mode_value + 1; + u32 tmp_idx = idx; + + do { + /* + * Check if the auto control is part of the + * list, and remember the new value. + */ + if (helpers[tmp_idx].ref->ctrl == master) + new_auto_val = cs->controls[tmp_idx].value; + tmp_idx = helpers[tmp_idx].next; + } while (tmp_idx); + /* + * If the new value == the manual value, then copy + * the current volatile values. + */ + if (new_auto_val == master->manual_mode_value) + update_from_auto_cluster(master); + } + + /* + * Copy the new caller-supplied control values. + * user_to_new() sets 'is_new' to 1. + */ + do { + struct v4l2_ctrl *ctrl = helpers[idx].ref->ctrl; + + ret = user_to_new(cs->controls + idx, ctrl); + if (!ret && ctrl->is_ptr) { + ret = validate_new(ctrl, ctrl->p_new); + if (ret) + dprintk(vdev, + "failed to validate control %s (%d)\n", + v4l2_ctrl_get_name(ctrl->id), ret); + } + idx = helpers[idx].next; + } while (!ret && idx); + + if (!ret) + ret = try_or_set_cluster(fh, master, + !hdl->req_obj.req && set, 0); + if (!ret && hdl->req_obj.req && set) { + for (j = 0; j < master->ncontrols; j++) { + struct v4l2_ctrl_ref *ref = + find_ref(hdl, master->cluster[j]->id); + + new_to_req(ref); + } + } + + /* Copy the new values back to userspace. */ + if (!ret) { + idx = i; + do { + ret = new_to_user(cs->controls + idx, + helpers[idx].ref->ctrl); + idx = helpers[idx].next; + } while (!ret && idx); + } + v4l2_ctrl_unlock(master); + } + + if (cs->count > ARRAY_SIZE(helper)) + kvfree(helpers); + return ret; +} + +static int try_set_ext_ctrls(struct v4l2_fh *fh, + struct v4l2_ctrl_handler *hdl, + struct video_device *vdev, + struct media_device *mdev, + struct v4l2_ext_controls *cs, bool set) +{ + int ret; + + if (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL) + return try_set_ext_ctrls_request(fh, hdl, vdev, mdev, cs, set); + + ret = try_set_ext_ctrls_common(fh, hdl, cs, vdev, set); + if (ret) + dprintk(vdev, + "%s: try_set_ext_ctrls_common failed (%d)\n", + video_device_node_name(vdev), ret); + + return ret; +} + +int v4l2_try_ext_ctrls(struct v4l2_ctrl_handler *hdl, + struct video_device *vdev, + struct media_device *mdev, + struct v4l2_ext_controls *cs) +{ + return try_set_ext_ctrls(NULL, hdl, vdev, mdev, cs, false); +} +EXPORT_SYMBOL(v4l2_try_ext_ctrls); + +int v4l2_s_ext_ctrls(struct v4l2_fh *fh, + struct v4l2_ctrl_handler *hdl, + struct video_device *vdev, + struct media_device *mdev, + struct v4l2_ext_controls *cs) +{ + return try_set_ext_ctrls(fh, hdl, vdev, mdev, cs, true); +} +EXPORT_SYMBOL(v4l2_s_ext_ctrls); + +/* + * VIDIOC_G/S_CTRL implementation + */ + +/* Helper function to get a single control */ +static int get_ctrl(struct v4l2_ctrl *ctrl, struct v4l2_ext_control *c) +{ + struct v4l2_ctrl *master = ctrl->cluster[0]; + int ret = 0; + int i; + + /* Compound controls are not supported. The new_to_user() and + * cur_to_user() calls below would need to be modified not to access + * userspace memory when called from get_ctrl(). + */ + if (!ctrl->is_int && ctrl->type != V4L2_CTRL_TYPE_INTEGER64) + return -EINVAL; + + if (ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY) + return -EACCES; + + v4l2_ctrl_lock(master); + /* g_volatile_ctrl will update the current control values */ + if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) { + for (i = 0; i < master->ncontrols; i++) + cur_to_new(master->cluster[i]); + ret = call_op(master, g_volatile_ctrl); + if (!ret) + ret = new_to_user(c, ctrl); + } else { + ret = cur_to_user(c, ctrl); + } + v4l2_ctrl_unlock(master); + return ret; +} + +int v4l2_g_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_control *control) +{ + struct v4l2_ctrl *ctrl = v4l2_ctrl_find(hdl, control->id); + struct v4l2_ext_control c; + int ret; + + if (!ctrl || !ctrl->is_int) + return -EINVAL; + ret = get_ctrl(ctrl, &c); + + if (!ret) + control->value = c.value; + + return ret; +} +EXPORT_SYMBOL(v4l2_g_ctrl); + +/* Helper function for VIDIOC_S_CTRL compatibility */ +static int set_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 ch_flags) +{ + struct v4l2_ctrl *master = ctrl->cluster[0]; + int ret; + int i; + + /* Reset the 'is_new' flags of the cluster */ + for (i = 0; i < master->ncontrols; i++) + if (master->cluster[i]) + master->cluster[i]->is_new = 0; + + ret = validate_new(ctrl, ctrl->p_new); + if (ret) + return ret; + + /* + * For autoclusters with volatiles that are switched from auto to + * manual mode we have to update the current volatile values since + * those will become the initial manual values after such a switch. + */ + if (master->is_auto && master->has_volatiles && ctrl == master && + !is_cur_manual(master) && ctrl->val == master->manual_mode_value) + update_from_auto_cluster(master); + + ctrl->is_new = 1; + return try_or_set_cluster(fh, master, true, ch_flags); +} + +/* Helper function for VIDIOC_S_CTRL compatibility */ +static int set_ctrl_lock(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, + struct v4l2_ext_control *c) +{ + int ret; + + v4l2_ctrl_lock(ctrl); + ret = user_to_new(c, ctrl); + if (!ret) + ret = set_ctrl(fh, ctrl, 0); + if (!ret) + ret = cur_to_user(c, ctrl); + v4l2_ctrl_unlock(ctrl); + return ret; +} + +int v4l2_s_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl, + struct v4l2_control *control) +{ + struct v4l2_ctrl *ctrl = v4l2_ctrl_find(hdl, control->id); + struct v4l2_ext_control c = { control->id }; + int ret; + + if (!ctrl || !ctrl->is_int) + return -EINVAL; + + if (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY) + return -EACCES; + + c.value = control->value; + ret = set_ctrl_lock(fh, ctrl, &c); + control->value = c.value; + return ret; +} +EXPORT_SYMBOL(v4l2_s_ctrl); + +/* + * Helper functions for drivers to get/set controls. + */ + +s32 v4l2_ctrl_g_ctrl(struct v4l2_ctrl *ctrl) +{ + struct v4l2_ext_control c; + + /* It's a driver bug if this happens. */ + if (WARN_ON(!ctrl->is_int)) + return 0; + c.value = 0; + get_ctrl(ctrl, &c); + return c.value; +} +EXPORT_SYMBOL(v4l2_ctrl_g_ctrl); + +s64 v4l2_ctrl_g_ctrl_int64(struct v4l2_ctrl *ctrl) +{ + struct v4l2_ext_control c; + + /* It's a driver bug if this happens. */ + if (WARN_ON(ctrl->is_ptr || ctrl->type != V4L2_CTRL_TYPE_INTEGER64)) + return 0; + c.value64 = 0; + get_ctrl(ctrl, &c); + return c.value64; +} +EXPORT_SYMBOL(v4l2_ctrl_g_ctrl_int64); + +int __v4l2_ctrl_s_ctrl(struct v4l2_ctrl *ctrl, s32 val) +{ + lockdep_assert_held(ctrl->handler->lock); + + /* It's a driver bug if this happens. */ + if (WARN_ON(!ctrl->is_int)) + return -EINVAL; + ctrl->val = val; + return set_ctrl(NULL, ctrl, 0); +} +EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl); + +int __v4l2_ctrl_s_ctrl_int64(struct v4l2_ctrl *ctrl, s64 val) +{ + lockdep_assert_held(ctrl->handler->lock); + + /* It's a driver bug if this happens. */ + if (WARN_ON(ctrl->is_ptr || ctrl->type != V4L2_CTRL_TYPE_INTEGER64)) + return -EINVAL; + *ctrl->p_new.p_s64 = val; + return set_ctrl(NULL, ctrl, 0); +} +EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_int64); + +int __v4l2_ctrl_s_ctrl_string(struct v4l2_ctrl *ctrl, const char *s) +{ + lockdep_assert_held(ctrl->handler->lock); + + /* It's a driver bug if this happens. */ + if (WARN_ON(ctrl->type != V4L2_CTRL_TYPE_STRING)) + return -EINVAL; + strscpy(ctrl->p_new.p_char, s, ctrl->maximum + 1); + return set_ctrl(NULL, ctrl, 0); +} +EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_string); + +int __v4l2_ctrl_s_ctrl_compound(struct v4l2_ctrl *ctrl, + enum v4l2_ctrl_type type, const void *p) +{ + lockdep_assert_held(ctrl->handler->lock); + + /* It's a driver bug if this happens. */ + if (WARN_ON(ctrl->type != type)) + return -EINVAL; + /* Setting dynamic arrays is not (yet?) supported. */ + if (WARN_ON(ctrl->is_dyn_array)) + return -EINVAL; + memcpy(ctrl->p_new.p, p, ctrl->elems * ctrl->elem_size); + return set_ctrl(NULL, ctrl, 0); +} +EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_compound); + +/* + * Modify the range of a control. + */ +int __v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl, + s64 min, s64 max, u64 step, s64 def) +{ + bool value_changed; + bool range_changed = false; + int ret; + + lockdep_assert_held(ctrl->handler->lock); + + switch (ctrl->type) { + case V4L2_CTRL_TYPE_INTEGER: + case V4L2_CTRL_TYPE_INTEGER64: + case V4L2_CTRL_TYPE_BOOLEAN: + case V4L2_CTRL_TYPE_MENU: + case V4L2_CTRL_TYPE_INTEGER_MENU: + case V4L2_CTRL_TYPE_BITMASK: + case V4L2_CTRL_TYPE_U8: + case V4L2_CTRL_TYPE_U16: + case V4L2_CTRL_TYPE_U32: + if (ctrl->is_array) + return -EINVAL; + ret = check_range(ctrl->type, min, max, step, def); + if (ret) + return ret; + break; + default: + return -EINVAL; + } + if (ctrl->minimum != min || ctrl->maximum != max || + ctrl->step != step || ctrl->default_value != def) { + range_changed = true; + ctrl->minimum = min; + ctrl->maximum = max; + ctrl->step = step; + ctrl->default_value = def; + } + cur_to_new(ctrl); + if (validate_new(ctrl, ctrl->p_new)) { + if (ctrl->type == V4L2_CTRL_TYPE_INTEGER64) + *ctrl->p_new.p_s64 = def; + else + *ctrl->p_new.p_s32 = def; + } + + if (ctrl->type == V4L2_CTRL_TYPE_INTEGER64) + value_changed = *ctrl->p_new.p_s64 != *ctrl->p_cur.p_s64; + else + value_changed = *ctrl->p_new.p_s32 != *ctrl->p_cur.p_s32; + if (value_changed) + ret = set_ctrl(NULL, ctrl, V4L2_EVENT_CTRL_CH_RANGE); + else if (range_changed) + send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_RANGE); + return ret; +} +EXPORT_SYMBOL(__v4l2_ctrl_modify_range); + +int __v4l2_ctrl_modify_dimensions(struct v4l2_ctrl *ctrl, + u32 dims[V4L2_CTRL_MAX_DIMS]) +{ + unsigned int elems = 1; + unsigned int i; + void *p_array; + + lockdep_assert_held(ctrl->handler->lock); + + if (!ctrl->is_array || ctrl->is_dyn_array) + return -EINVAL; + + for (i = 0; i < ctrl->nr_of_dims; i++) + elems *= dims[i]; + if (elems == 0) + return -EINVAL; + p_array = kvzalloc(2 * elems * ctrl->elem_size, GFP_KERNEL); + if (!p_array) + return -ENOMEM; + kvfree(ctrl->p_array); + ctrl->p_array_alloc_elems = elems; + ctrl->elems = elems; + ctrl->new_elems = elems; + ctrl->p_array = p_array; + ctrl->p_new.p = p_array; + ctrl->p_cur.p = p_array + elems * ctrl->elem_size; + for (i = 0; i < ctrl->nr_of_dims; i++) + ctrl->dims[i] = dims[i]; + ctrl->type_ops->init(ctrl, 0, ctrl->p_cur); + cur_to_new(ctrl); + send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_VALUE | + V4L2_EVENT_CTRL_CH_DIMENSIONS); + return 0; +} +EXPORT_SYMBOL(__v4l2_ctrl_modify_dimensions); + +/* Implement VIDIOC_QUERY_EXT_CTRL */ +int v4l2_query_ext_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_query_ext_ctrl *qc) +{ + const unsigned int next_flags = V4L2_CTRL_FLAG_NEXT_CTRL | V4L2_CTRL_FLAG_NEXT_COMPOUND; + u32 id = qc->id & V4L2_CTRL_ID_MASK; + struct v4l2_ctrl_ref *ref; + struct v4l2_ctrl *ctrl; + + if (!hdl) + return -EINVAL; + + mutex_lock(hdl->lock); + + /* Try to find it */ + ref = find_ref(hdl, id); + + if ((qc->id & next_flags) && !list_empty(&hdl->ctrl_refs)) { + bool is_compound; + /* Match any control that is not hidden */ + unsigned int mask = 1; + bool match = false; + + if ((qc->id & next_flags) == V4L2_CTRL_FLAG_NEXT_COMPOUND) { + /* Match any hidden control */ + match = true; + } else if ((qc->id & next_flags) == next_flags) { + /* Match any control, compound or not */ + mask = 0; + } + + /* Find the next control with ID > qc->id */ + + /* Did we reach the end of the control list? */ + if (id >= node2id(hdl->ctrl_refs.prev)) { + ref = NULL; /* Yes, so there is no next control */ + } else if (ref) { + struct v4l2_ctrl_ref *pos = ref; + + /* + * We found a control with the given ID, so just get + * the next valid one in the list. + */ + ref = NULL; + list_for_each_entry_continue(pos, &hdl->ctrl_refs, node) { + is_compound = pos->ctrl->is_array || + pos->ctrl->type >= V4L2_CTRL_COMPOUND_TYPES; + if (id < pos->ctrl->id && + (is_compound & mask) == match) { + ref = pos; + break; + } + } + } else { + struct v4l2_ctrl_ref *pos; + + /* + * No control with the given ID exists, so start + * searching for the next largest ID. We know there + * is one, otherwise the first 'if' above would have + * been true. + */ + list_for_each_entry(pos, &hdl->ctrl_refs, node) { + is_compound = pos->ctrl->is_array || + pos->ctrl->type >= V4L2_CTRL_COMPOUND_TYPES; + if (id < pos->ctrl->id && + (is_compound & mask) == match) { + ref = pos; + break; + } + } + } + } + mutex_unlock(hdl->lock); + + if (!ref) + return -EINVAL; + + ctrl = ref->ctrl; + memset(qc, 0, sizeof(*qc)); + if (id >= V4L2_CID_PRIVATE_BASE) + qc->id = id; + else + qc->id = ctrl->id; + strscpy(qc->name, ctrl->name, sizeof(qc->name)); + qc->flags = user_flags(ctrl); + qc->type = ctrl->type; + qc->elem_size = ctrl->elem_size; + qc->elems = ctrl->elems; + qc->nr_of_dims = ctrl->nr_of_dims; + memcpy(qc->dims, ctrl->dims, qc->nr_of_dims * sizeof(qc->dims[0])); + qc->minimum = ctrl->minimum; + qc->maximum = ctrl->maximum; + qc->default_value = ctrl->default_value; + if (ctrl->type == V4L2_CTRL_TYPE_MENU || + ctrl->type == V4L2_CTRL_TYPE_INTEGER_MENU) + qc->step = 1; + else + qc->step = ctrl->step; + return 0; +} +EXPORT_SYMBOL(v4l2_query_ext_ctrl); + +void v4l2_query_ext_ctrl_to_v4l2_queryctrl(struct v4l2_queryctrl *to, + const struct v4l2_query_ext_ctrl *from) +{ + to->id = from->id; + to->type = from->type; + to->flags = from->flags; + strscpy(to->name, from->name, sizeof(to->name)); + + switch (from->type) { + case V4L2_CTRL_TYPE_INTEGER: + case V4L2_CTRL_TYPE_BOOLEAN: + case V4L2_CTRL_TYPE_MENU: + case V4L2_CTRL_TYPE_INTEGER_MENU: + case V4L2_CTRL_TYPE_STRING: + case V4L2_CTRL_TYPE_BITMASK: + to->minimum = from->minimum; + to->maximum = from->maximum; + to->step = from->step; + to->default_value = from->default_value; + break; + default: + to->minimum = 0; + to->maximum = 0; + to->step = 0; + to->default_value = 0; + break; + } +} +EXPORT_SYMBOL(v4l2_query_ext_ctrl_to_v4l2_queryctrl); + +/* Implement VIDIOC_QUERYCTRL */ +int v4l2_queryctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_queryctrl *qc) +{ + struct v4l2_query_ext_ctrl qec = { qc->id }; + int rc; + + rc = v4l2_query_ext_ctrl(hdl, &qec); + if (rc) + return rc; + + v4l2_query_ext_ctrl_to_v4l2_queryctrl(qc, &qec); + + return 0; +} +EXPORT_SYMBOL(v4l2_queryctrl); + +/* Implement VIDIOC_QUERYMENU */ +int v4l2_querymenu(struct v4l2_ctrl_handler *hdl, struct v4l2_querymenu *qm) +{ + struct v4l2_ctrl *ctrl; + u32 i = qm->index; + + ctrl = v4l2_ctrl_find(hdl, qm->id); + if (!ctrl) + return -EINVAL; + + qm->reserved = 0; + /* Sanity checks */ + switch (ctrl->type) { + case V4L2_CTRL_TYPE_MENU: + if (!ctrl->qmenu) + return -EINVAL; + break; + case V4L2_CTRL_TYPE_INTEGER_MENU: + if (!ctrl->qmenu_int) + return -EINVAL; + break; + default: + return -EINVAL; + } + + if (i < ctrl->minimum || i > ctrl->maximum) + return -EINVAL; + + /* Use mask to see if this menu item should be skipped */ + if (i < BITS_PER_LONG_LONG && (ctrl->menu_skip_mask & BIT_ULL(i))) + return -EINVAL; + /* Empty menu items should also be skipped */ + if (ctrl->type == V4L2_CTRL_TYPE_MENU) { + if (!ctrl->qmenu[i] || ctrl->qmenu[i][0] == '\0') + return -EINVAL; + strscpy(qm->name, ctrl->qmenu[i], sizeof(qm->name)); + } else { + qm->value = ctrl->qmenu_int[i]; + } + return 0; +} +EXPORT_SYMBOL(v4l2_querymenu); + +/* + * VIDIOC_LOG_STATUS helpers + */ + +int v4l2_ctrl_log_status(struct file *file, void *fh) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_fh *vfh = file->private_data; + + if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) && vfd->v4l2_dev) + v4l2_ctrl_handler_log_status(vfh->ctrl_handler, + vfd->v4l2_dev->name); + return 0; +} +EXPORT_SYMBOL(v4l2_ctrl_log_status); + +int v4l2_ctrl_subdev_log_status(struct v4l2_subdev *sd) +{ + v4l2_ctrl_handler_log_status(sd->ctrl_handler, sd->name); + return 0; +} +EXPORT_SYMBOL(v4l2_ctrl_subdev_log_status); + +/* + * VIDIOC_(UN)SUBSCRIBE_EVENT implementation + */ + +static int v4l2_ctrl_add_event(struct v4l2_subscribed_event *sev, + unsigned int elems) +{ + struct v4l2_ctrl *ctrl = v4l2_ctrl_find(sev->fh->ctrl_handler, sev->id); + + if (!ctrl) + return -EINVAL; + + v4l2_ctrl_lock(ctrl); + list_add_tail(&sev->node, &ctrl->ev_subs); + if (ctrl->type != V4L2_CTRL_TYPE_CTRL_CLASS && + (sev->flags & V4L2_EVENT_SUB_FL_SEND_INITIAL)) + send_initial_event(sev->fh, ctrl); + v4l2_ctrl_unlock(ctrl); + return 0; +} + +static void v4l2_ctrl_del_event(struct v4l2_subscribed_event *sev) +{ + struct v4l2_ctrl *ctrl = v4l2_ctrl_find(sev->fh->ctrl_handler, sev->id); + + if (!ctrl) + return; + + v4l2_ctrl_lock(ctrl); + list_del(&sev->node); + v4l2_ctrl_unlock(ctrl); +} + +void v4l2_ctrl_replace(struct v4l2_event *old, const struct v4l2_event *new) +{ + u32 old_changes = old->u.ctrl.changes; + + old->u.ctrl = new->u.ctrl; + old->u.ctrl.changes |= old_changes; +} +EXPORT_SYMBOL(v4l2_ctrl_replace); + +void v4l2_ctrl_merge(const struct v4l2_event *old, struct v4l2_event *new) +{ + new->u.ctrl.changes |= old->u.ctrl.changes; +} +EXPORT_SYMBOL(v4l2_ctrl_merge); + +const struct v4l2_subscribed_event_ops v4l2_ctrl_sub_ev_ops = { + .add = v4l2_ctrl_add_event, + .del = v4l2_ctrl_del_event, + .replace = v4l2_ctrl_replace, + .merge = v4l2_ctrl_merge, +}; +EXPORT_SYMBOL(v4l2_ctrl_sub_ev_ops); + +int v4l2_ctrl_subscribe_event(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub) +{ + if (sub->type == V4L2_EVENT_CTRL) + return v4l2_event_subscribe(fh, sub, 0, &v4l2_ctrl_sub_ev_ops); + return -EINVAL; +} +EXPORT_SYMBOL(v4l2_ctrl_subscribe_event); + +int v4l2_ctrl_subdev_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh, + struct v4l2_event_subscription *sub) +{ + if (!sd->ctrl_handler) + return -EINVAL; + return v4l2_ctrl_subscribe_event(fh, sub); +} +EXPORT_SYMBOL(v4l2_ctrl_subdev_subscribe_event); + +/* + * poll helper + */ +__poll_t v4l2_ctrl_poll(struct file *file, struct poll_table_struct *wait) +{ + struct v4l2_fh *fh = file->private_data; + + poll_wait(file, &fh->wait, wait); + if (v4l2_event_pending(fh)) + return EPOLLPRI; + return 0; +} +EXPORT_SYMBOL(v4l2_ctrl_poll); diff --git a/6.17.0/drivers/media/v4l2-core/v4l2-ctrls-core.c b/6.17.0/drivers/media/v4l2-core/v4l2-ctrls-core.c new file mode 100644 index 0000000..98b9607 --- /dev/null +++ b/6.17.0/drivers/media/v4l2-core/v4l2-ctrls-core.c @@ -0,0 +1,2721 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * V4L2 controls framework core implementation. + * + * Copyright (C) 2010-2021 Hans Verkuil + */ + +#include +#include +#include +#include +#include +#include + +#include "v4l2-ctrls-priv.h" + +static const union v4l2_ctrl_ptr ptr_null; + +static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, + u32 changes) +{ + memset(ev, 0, sizeof(*ev)); + ev->type = V4L2_EVENT_CTRL; + ev->id = ctrl->id; + ev->u.ctrl.changes = changes; + ev->u.ctrl.type = ctrl->type; + ev->u.ctrl.flags = user_flags(ctrl); + if (ctrl->is_ptr) + ev->u.ctrl.value64 = 0; + else + ev->u.ctrl.value64 = *ctrl->p_cur.p_s64; + ev->u.ctrl.minimum = ctrl->minimum; + ev->u.ctrl.maximum = ctrl->maximum; + if (ctrl->type == V4L2_CTRL_TYPE_MENU + || ctrl->type == V4L2_CTRL_TYPE_INTEGER_MENU) + ev->u.ctrl.step = 1; + else + ev->u.ctrl.step = ctrl->step; + ev->u.ctrl.default_value = ctrl->default_value; +} + +void send_initial_event(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl) +{ + struct v4l2_event ev; + u32 changes = V4L2_EVENT_CTRL_CH_FLAGS; + + if (!(ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY)) + changes |= V4L2_EVENT_CTRL_CH_VALUE; + fill_event(&ev, ctrl, changes); + v4l2_event_queue_fh(fh, &ev); +} + +void send_event(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 changes) +{ + struct v4l2_event ev; + struct v4l2_subscribed_event *sev; + + if (list_empty(&ctrl->ev_subs)) + return; + fill_event(&ev, ctrl, changes); + + list_for_each_entry(sev, &ctrl->ev_subs, node) + if (sev->fh != fh || + (sev->flags & V4L2_EVENT_SUB_FL_ALLOW_FEEDBACK)) + v4l2_event_queue_fh(sev->fh, &ev); +} + +bool v4l2_ctrl_type_op_equal(const struct v4l2_ctrl *ctrl, + union v4l2_ctrl_ptr ptr1, union v4l2_ctrl_ptr ptr2) +{ + unsigned int i; + + switch (ctrl->type) { + case V4L2_CTRL_TYPE_BUTTON: + return false; + case V4L2_CTRL_TYPE_STRING: + for (i = 0; i < ctrl->elems; i++) { + unsigned int idx = i * ctrl->elem_size; + + /* strings are always 0-terminated */ + if (strcmp(ptr1.p_char + idx, ptr2.p_char + idx)) + return false; + } + return true; + default: + return !memcmp(ptr1.p_const, ptr2.p_const, + ctrl->elems * ctrl->elem_size); + } +} +EXPORT_SYMBOL(v4l2_ctrl_type_op_equal); + +/* Default intra MPEG-2 quantisation coefficients, from the specification. */ +static const u8 mpeg2_intra_quant_matrix[64] = { + 8, 16, 16, 19, 16, 19, 22, 22, + 22, 22, 22, 22, 26, 24, 26, 27, + 27, 27, 26, 26, 26, 26, 27, 27, + 27, 29, 29, 29, 34, 34, 34, 29, + 29, 29, 27, 27, 29, 29, 32, 32, + 34, 34, 37, 38, 37, 35, 35, 34, + 35, 38, 38, 40, 40, 40, 48, 48, + 46, 46, 56, 56, 58, 69, 69, 83 +}; + +static void std_init_compound(const struct v4l2_ctrl *ctrl, u32 idx, + union v4l2_ctrl_ptr ptr) +{ + struct v4l2_ctrl_mpeg2_sequence *p_mpeg2_sequence; + struct v4l2_ctrl_mpeg2_picture *p_mpeg2_picture; + struct v4l2_ctrl_mpeg2_quantisation *p_mpeg2_quant; + struct v4l2_ctrl_vp8_frame *p_vp8_frame; + struct v4l2_ctrl_vp9_frame *p_vp9_frame; + struct v4l2_ctrl_fwht_params *p_fwht_params; + struct v4l2_ctrl_h264_scaling_matrix *p_h264_scaling_matrix; + struct v4l2_ctrl_av1_sequence *p_av1_sequence; + void *p = ptr.p + idx * ctrl->elem_size; + + if (ctrl->p_def.p_const) + memcpy(p, ctrl->p_def.p_const, ctrl->elem_size); + else + memset(p, 0, ctrl->elem_size); + + switch ((u32)ctrl->type) { + case V4L2_CTRL_TYPE_MPEG2_SEQUENCE: + p_mpeg2_sequence = p; + + /* 4:2:0 */ + p_mpeg2_sequence->chroma_format = 1; + break; + case V4L2_CTRL_TYPE_MPEG2_PICTURE: + p_mpeg2_picture = p; + + /* interlaced top field */ + p_mpeg2_picture->picture_structure = V4L2_MPEG2_PIC_TOP_FIELD; + p_mpeg2_picture->picture_coding_type = + V4L2_MPEG2_PIC_CODING_TYPE_I; + break; + case V4L2_CTRL_TYPE_MPEG2_QUANTISATION: + p_mpeg2_quant = p; + + memcpy(p_mpeg2_quant->intra_quantiser_matrix, + mpeg2_intra_quant_matrix, + ARRAY_SIZE(mpeg2_intra_quant_matrix)); + /* + * The default non-intra MPEG-2 quantisation + * coefficients are all 16, as per the specification. + */ + memset(p_mpeg2_quant->non_intra_quantiser_matrix, 16, + sizeof(p_mpeg2_quant->non_intra_quantiser_matrix)); + break; + case V4L2_CTRL_TYPE_VP8_FRAME: + p_vp8_frame = p; + p_vp8_frame->num_dct_parts = 1; + break; + case V4L2_CTRL_TYPE_VP9_FRAME: + p_vp9_frame = p; + p_vp9_frame->profile = 0; + p_vp9_frame->bit_depth = 8; + p_vp9_frame->flags |= V4L2_VP9_FRAME_FLAG_X_SUBSAMPLING | + V4L2_VP9_FRAME_FLAG_Y_SUBSAMPLING; + break; + case V4L2_CTRL_TYPE_AV1_SEQUENCE: + p_av1_sequence = p; + p_av1_sequence->bit_depth = 8; + break; + case V4L2_CTRL_TYPE_FWHT_PARAMS: + p_fwht_params = p; + p_fwht_params->version = V4L2_FWHT_VERSION; + p_fwht_params->width = 1280; + p_fwht_params->height = 720; + p_fwht_params->flags = V4L2_FWHT_FL_PIXENC_YUV | + (2 << V4L2_FWHT_FL_COMPONENTS_NUM_OFFSET); + break; + case V4L2_CTRL_TYPE_H264_SCALING_MATRIX: + p_h264_scaling_matrix = p; + /* + * The default (flat) H.264 scaling matrix when none are + * specified in the bitstream, this is according to formulas + * (7-8) and (7-9) of the specification. + */ + memset(p_h264_scaling_matrix, 16, sizeof(*p_h264_scaling_matrix)); + break; + } +} + +static void std_min_compound(const struct v4l2_ctrl *ctrl, u32 idx, + union v4l2_ctrl_ptr ptr) +{ + void *p = ptr.p + idx * ctrl->elem_size; + + if (ctrl->p_min.p_const) + memcpy(p, ctrl->p_min.p_const, ctrl->elem_size); + else + memset(p, 0, ctrl->elem_size); +} + +static void std_max_compound(const struct v4l2_ctrl *ctrl, u32 idx, + union v4l2_ctrl_ptr ptr) +{ + void *p = ptr.p + idx * ctrl->elem_size; + + if (ctrl->p_max.p_const) + memcpy(p, ctrl->p_max.p_const, ctrl->elem_size); + else + memset(p, 0, ctrl->elem_size); +} + +static void __v4l2_ctrl_type_op_init(const struct v4l2_ctrl *ctrl, u32 from_idx, + u32 which, union v4l2_ctrl_ptr ptr) +{ + unsigned int i; + u32 tot_elems = ctrl->elems; + u32 elems = tot_elems - from_idx; + s64 value; + + switch (which) { + case V4L2_CTRL_WHICH_DEF_VAL: + value = ctrl->default_value; + break; + case V4L2_CTRL_WHICH_MAX_VAL: + value = ctrl->maximum; + break; + case V4L2_CTRL_WHICH_MIN_VAL: + value = ctrl->minimum; + break; + default: + return; + } + + switch (ctrl->type) { + case V4L2_CTRL_TYPE_STRING: + if (which == V4L2_CTRL_WHICH_DEF_VAL) + value = ctrl->minimum; + + for (i = from_idx; i < tot_elems; i++) { + unsigned int offset = i * ctrl->elem_size; + + memset(ptr.p_char + offset, ' ', value); + ptr.p_char[offset + value] = '\0'; + } + break; + case V4L2_CTRL_TYPE_INTEGER64: + if (value) { + for (i = from_idx; i < tot_elems; i++) + ptr.p_s64[i] = value; + } else { + memset(ptr.p_s64 + from_idx, 0, elems * sizeof(s64)); + } + break; + case V4L2_CTRL_TYPE_INTEGER: + case V4L2_CTRL_TYPE_INTEGER_MENU: + case V4L2_CTRL_TYPE_MENU: + case V4L2_CTRL_TYPE_BITMASK: + case V4L2_CTRL_TYPE_BOOLEAN: + if (value) { + for (i = from_idx; i < tot_elems; i++) + ptr.p_s32[i] = value; + } else { + memset(ptr.p_s32 + from_idx, 0, elems * sizeof(s32)); + } + break; + case V4L2_CTRL_TYPE_BUTTON: + case V4L2_CTRL_TYPE_CTRL_CLASS: + memset(ptr.p_s32 + from_idx, 0, elems * sizeof(s32)); + break; + case V4L2_CTRL_TYPE_U8: + memset(ptr.p_u8 + from_idx, value, elems); + break; + case V4L2_CTRL_TYPE_U16: + if (value) { + for (i = from_idx; i < tot_elems; i++) + ptr.p_u16[i] = value; + } else { + memset(ptr.p_u16 + from_idx, 0, elems * sizeof(u16)); + } + break; + case V4L2_CTRL_TYPE_U32: + if (value) { + for (i = from_idx; i < tot_elems; i++) + ptr.p_u32[i] = value; + } else { + memset(ptr.p_u32 + from_idx, 0, elems * sizeof(u32)); + } + break; + default: + for (i = from_idx; i < tot_elems; i++) { + switch (which) { + case V4L2_CTRL_WHICH_DEF_VAL: + std_init_compound(ctrl, i, ptr); + break; + case V4L2_CTRL_WHICH_MAX_VAL: + std_max_compound(ctrl, i, ptr); + break; + case V4L2_CTRL_WHICH_MIN_VAL: + std_min_compound(ctrl, i, ptr); + break; + } + } + break; + } +} + +void v4l2_ctrl_type_op_init(const struct v4l2_ctrl *ctrl, u32 from_idx, + union v4l2_ctrl_ptr ptr) +{ + __v4l2_ctrl_type_op_init(ctrl, from_idx, V4L2_CTRL_WHICH_DEF_VAL, ptr); +} +EXPORT_SYMBOL(v4l2_ctrl_type_op_init); + +static void v4l2_ctrl_type_op_minimum(const struct v4l2_ctrl *ctrl, + u32 from_idx, union v4l2_ctrl_ptr ptr) +{ + __v4l2_ctrl_type_op_init(ctrl, from_idx, V4L2_CTRL_WHICH_MIN_VAL, ptr); +} + +static void v4l2_ctrl_type_op_maximum(const struct v4l2_ctrl *ctrl, + u32 from_idx, union v4l2_ctrl_ptr ptr) +{ + __v4l2_ctrl_type_op_init(ctrl, from_idx, V4L2_CTRL_WHICH_MAX_VAL, ptr); +} + +void v4l2_ctrl_type_op_log(const struct v4l2_ctrl *ctrl) +{ + union v4l2_ctrl_ptr ptr = ctrl->p_cur; + + if (ctrl->is_array) { + unsigned i; + + for (i = 0; i < ctrl->nr_of_dims; i++) + pr_cont("[%u]", ctrl->dims[i]); + pr_cont(" "); + } + + switch (ctrl->type) { + case V4L2_CTRL_TYPE_INTEGER: + pr_cont("%d", *ptr.p_s32); + break; + case V4L2_CTRL_TYPE_BOOLEAN: + pr_cont("%s", *ptr.p_s32 ? "true" : "false"); + break; + case V4L2_CTRL_TYPE_MENU: + pr_cont("%s", ctrl->qmenu[*ptr.p_s32]); + break; + case V4L2_CTRL_TYPE_INTEGER_MENU: + pr_cont("%lld", ctrl->qmenu_int[*ptr.p_s32]); + break; + case V4L2_CTRL_TYPE_BITMASK: + pr_cont("0x%08x", *ptr.p_s32); + break; + case V4L2_CTRL_TYPE_INTEGER64: + pr_cont("%lld", *ptr.p_s64); + break; + case V4L2_CTRL_TYPE_STRING: + pr_cont("%s", ptr.p_char); + break; + case V4L2_CTRL_TYPE_U8: + pr_cont("%u", (unsigned)*ptr.p_u8); + break; + case V4L2_CTRL_TYPE_U16: + pr_cont("%u", (unsigned)*ptr.p_u16); + break; + case V4L2_CTRL_TYPE_U32: + pr_cont("%u", (unsigned)*ptr.p_u32); + break; + case V4L2_CTRL_TYPE_AREA: + pr_cont("%ux%u", ptr.p_area->width, ptr.p_area->height); + break; + case V4L2_CTRL_TYPE_H264_SPS: + pr_cont("H264_SPS"); + break; + case V4L2_CTRL_TYPE_H264_PPS: + pr_cont("H264_PPS"); + break; + case V4L2_CTRL_TYPE_H264_SCALING_MATRIX: + pr_cont("H264_SCALING_MATRIX"); + break; + case V4L2_CTRL_TYPE_H264_SLICE_PARAMS: + pr_cont("H264_SLICE_PARAMS"); + break; + case V4L2_CTRL_TYPE_H264_DECODE_PARAMS: + pr_cont("H264_DECODE_PARAMS"); + break; + case V4L2_CTRL_TYPE_H264_PRED_WEIGHTS: + pr_cont("H264_PRED_WEIGHTS"); + break; + case V4L2_CTRL_TYPE_FWHT_PARAMS: + pr_cont("FWHT_PARAMS"); + break; + case V4L2_CTRL_TYPE_VP8_FRAME: + pr_cont("VP8_FRAME"); + break; + case V4L2_CTRL_TYPE_HDR10_CLL_INFO: + pr_cont("HDR10_CLL_INFO"); + break; + case V4L2_CTRL_TYPE_HDR10_MASTERING_DISPLAY: + pr_cont("HDR10_MASTERING_DISPLAY"); + break; + case V4L2_CTRL_TYPE_MPEG2_QUANTISATION: + pr_cont("MPEG2_QUANTISATION"); + break; + case V4L2_CTRL_TYPE_MPEG2_SEQUENCE: + pr_cont("MPEG2_SEQUENCE"); + break; + case V4L2_CTRL_TYPE_MPEG2_PICTURE: + pr_cont("MPEG2_PICTURE"); + break; + case V4L2_CTRL_TYPE_VP9_COMPRESSED_HDR: + pr_cont("VP9_COMPRESSED_HDR"); + break; + case V4L2_CTRL_TYPE_VP9_FRAME: + pr_cont("VP9_FRAME"); + break; + case V4L2_CTRL_TYPE_HEVC_SPS: + pr_cont("HEVC_SPS"); + break; + case V4L2_CTRL_TYPE_HEVC_PPS: + pr_cont("HEVC_PPS"); + break; + case V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS: + pr_cont("HEVC_SLICE_PARAMS"); + break; + case V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX: + pr_cont("HEVC_SCALING_MATRIX"); + break; + case V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS: + pr_cont("HEVC_DECODE_PARAMS"); + break; + case V4L2_CTRL_TYPE_AV1_SEQUENCE: + pr_cont("AV1_SEQUENCE"); + break; + case V4L2_CTRL_TYPE_AV1_TILE_GROUP_ENTRY: + pr_cont("AV1_TILE_GROUP_ENTRY"); + break; + case V4L2_CTRL_TYPE_AV1_FRAME: + pr_cont("AV1_FRAME"); + break; + case V4L2_CTRL_TYPE_AV1_FILM_GRAIN: + pr_cont("AV1_FILM_GRAIN"); + break; + case V4L2_CTRL_TYPE_RECT: + pr_cont("(%d,%d)/%ux%u", + ptr.p_rect->left, ptr.p_rect->top, + ptr.p_rect->width, ptr.p_rect->height); + break; + default: + pr_cont("unknown type %d", ctrl->type); + break; + } +} +EXPORT_SYMBOL(v4l2_ctrl_type_op_log); + +/* + * Round towards the closest legal value. Be careful when we are + * close to the maximum range of the control type to prevent + * wrap-arounds. + */ +#define ROUND_TO_RANGE(val, offset_type, ctrl) \ +({ \ + offset_type offset; \ + if ((ctrl)->maximum >= 0 && \ + val >= (ctrl)->maximum - (s32)((ctrl)->step / 2)) \ + val = (ctrl)->maximum; \ + else \ + val += (s32)((ctrl)->step / 2); \ + val = clamp_t(typeof(val), val, \ + (ctrl)->minimum, (ctrl)->maximum); \ + offset = (val) - (ctrl)->minimum; \ + offset = (ctrl)->step * (offset / (u32)(ctrl)->step); \ + val = (ctrl)->minimum + offset; \ + 0; \ +}) + +/* Validate a new control */ + +#define zero_padding(s) \ + memset(&(s).padding, 0, sizeof((s).padding)) +#define zero_reserved(s) \ + memset(&(s).reserved, 0, sizeof((s).reserved)) + +static int +validate_vp9_lf_params(struct v4l2_vp9_loop_filter *lf) +{ + unsigned int i; + + if (lf->flags & ~(V4L2_VP9_LOOP_FILTER_FLAG_DELTA_ENABLED | + V4L2_VP9_LOOP_FILTER_FLAG_DELTA_UPDATE)) + return -EINVAL; + + /* That all values are in the accepted range. */ + if (lf->level > GENMASK(5, 0)) + return -EINVAL; + + if (lf->sharpness > GENMASK(2, 0)) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(lf->ref_deltas); i++) + if (lf->ref_deltas[i] < -63 || lf->ref_deltas[i] > 63) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(lf->mode_deltas); i++) + if (lf->mode_deltas[i] < -63 || lf->mode_deltas[i] > 63) + return -EINVAL; + + zero_reserved(*lf); + return 0; +} + +static int +validate_vp9_quant_params(struct v4l2_vp9_quantization *quant) +{ + if (quant->delta_q_y_dc < -15 || quant->delta_q_y_dc > 15 || + quant->delta_q_uv_dc < -15 || quant->delta_q_uv_dc > 15 || + quant->delta_q_uv_ac < -15 || quant->delta_q_uv_ac > 15) + return -EINVAL; + + zero_reserved(*quant); + return 0; +} + +static int +validate_vp9_seg_params(struct v4l2_vp9_segmentation *seg) +{ + unsigned int i, j; + + if (seg->flags & ~(V4L2_VP9_SEGMENTATION_FLAG_ENABLED | + V4L2_VP9_SEGMENTATION_FLAG_UPDATE_MAP | + V4L2_VP9_SEGMENTATION_FLAG_TEMPORAL_UPDATE | + V4L2_VP9_SEGMENTATION_FLAG_UPDATE_DATA | + V4L2_VP9_SEGMENTATION_FLAG_ABS_OR_DELTA_UPDATE)) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(seg->feature_enabled); i++) { + if (seg->feature_enabled[i] & + ~V4L2_VP9_SEGMENT_FEATURE_ENABLED_MASK) + return -EINVAL; + } + + for (i = 0; i < ARRAY_SIZE(seg->feature_data); i++) { + static const int range[] = { 255, 63, 3, 0 }; + + for (j = 0; j < ARRAY_SIZE(seg->feature_data[j]); j++) { + if (seg->feature_data[i][j] < -range[j] || + seg->feature_data[i][j] > range[j]) + return -EINVAL; + } + } + + zero_reserved(*seg); + return 0; +} + +static int +validate_vp9_compressed_hdr(struct v4l2_ctrl_vp9_compressed_hdr *hdr) +{ + if (hdr->tx_mode > V4L2_VP9_TX_MODE_SELECT) + return -EINVAL; + + return 0; +} + +static int +validate_vp9_frame(struct v4l2_ctrl_vp9_frame *frame) +{ + int ret; + + /* Make sure we're not passed invalid flags. */ + if (frame->flags & ~(V4L2_VP9_FRAME_FLAG_KEY_FRAME | + V4L2_VP9_FRAME_FLAG_SHOW_FRAME | + V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT | + V4L2_VP9_FRAME_FLAG_INTRA_ONLY | + V4L2_VP9_FRAME_FLAG_ALLOW_HIGH_PREC_MV | + V4L2_VP9_FRAME_FLAG_REFRESH_FRAME_CTX | + V4L2_VP9_FRAME_FLAG_PARALLEL_DEC_MODE | + V4L2_VP9_FRAME_FLAG_X_SUBSAMPLING | + V4L2_VP9_FRAME_FLAG_Y_SUBSAMPLING | + V4L2_VP9_FRAME_FLAG_COLOR_RANGE_FULL_SWING)) + return -EINVAL; + + if (frame->flags & V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT && + frame->flags & V4L2_VP9_FRAME_FLAG_REFRESH_FRAME_CTX) + return -EINVAL; + + if (frame->profile > V4L2_VP9_PROFILE_MAX) + return -EINVAL; + + if (frame->reset_frame_context > V4L2_VP9_RESET_FRAME_CTX_ALL) + return -EINVAL; + + if (frame->frame_context_idx >= V4L2_VP9_NUM_FRAME_CTX) + return -EINVAL; + + /* + * Profiles 0 and 1 only support 8-bit depth, profiles 2 and 3 only 10 + * and 12 bit depths. + */ + if ((frame->profile < 2 && frame->bit_depth != 8) || + (frame->profile >= 2 && + (frame->bit_depth != 10 && frame->bit_depth != 12))) + return -EINVAL; + + /* Profile 0 and 2 only accept YUV 4:2:0. */ + if ((frame->profile == 0 || frame->profile == 2) && + (!(frame->flags & V4L2_VP9_FRAME_FLAG_X_SUBSAMPLING) || + !(frame->flags & V4L2_VP9_FRAME_FLAG_Y_SUBSAMPLING))) + return -EINVAL; + + /* Profile 1 and 3 only accept YUV 4:2:2, 4:4:0 and 4:4:4. */ + if ((frame->profile == 1 || frame->profile == 3) && + ((frame->flags & V4L2_VP9_FRAME_FLAG_X_SUBSAMPLING) && + (frame->flags & V4L2_VP9_FRAME_FLAG_Y_SUBSAMPLING))) + return -EINVAL; + + if (frame->interpolation_filter > V4L2_VP9_INTERP_FILTER_SWITCHABLE) + return -EINVAL; + + /* + * According to the spec, tile_cols_log2 shall be less than or equal + * to 6. + */ + if (frame->tile_cols_log2 > 6) + return -EINVAL; + + if (frame->reference_mode > V4L2_VP9_REFERENCE_MODE_SELECT) + return -EINVAL; + + ret = validate_vp9_lf_params(&frame->lf); + if (ret) + return ret; + + ret = validate_vp9_quant_params(&frame->quant); + if (ret) + return ret; + + ret = validate_vp9_seg_params(&frame->seg); + if (ret) + return ret; + + zero_reserved(*frame); + return 0; +} + +static int validate_av1_quantization(struct v4l2_av1_quantization *q) +{ + if (q->flags > GENMASK(2, 0)) + return -EINVAL; + + if (q->delta_q_y_dc < -64 || q->delta_q_y_dc > 63 || + q->delta_q_u_dc < -64 || q->delta_q_u_dc > 63 || + q->delta_q_v_dc < -64 || q->delta_q_v_dc > 63 || + q->delta_q_u_ac < -64 || q->delta_q_u_ac > 63 || + q->delta_q_v_ac < -64 || q->delta_q_v_ac > 63 || + q->delta_q_res > GENMASK(1, 0)) + return -EINVAL; + + if (q->qm_y > GENMASK(3, 0) || + q->qm_u > GENMASK(3, 0) || + q->qm_v > GENMASK(3, 0)) + return -EINVAL; + + return 0; +} + +static int validate_av1_segmentation(struct v4l2_av1_segmentation *s) +{ + u32 i; + u32 j; + + if (s->flags > GENMASK(4, 0)) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(s->feature_data); i++) { + static const int segmentation_feature_signed[] = { 1, 1, 1, 1, 1, 0, 0, 0 }; + static const int segmentation_feature_max[] = { 255, 63, 63, 63, 63, 7, 0, 0}; + + for (j = 0; j < ARRAY_SIZE(s->feature_data[j]); j++) { + s32 limit = segmentation_feature_max[j]; + + if (segmentation_feature_signed[j]) { + if (s->feature_data[i][j] < -limit || + s->feature_data[i][j] > limit) + return -EINVAL; + } else { + if (s->feature_data[i][j] < 0 || s->feature_data[i][j] > limit) + return -EINVAL; + } + } + } + + return 0; +} + +static int validate_av1_loop_filter(struct v4l2_av1_loop_filter *lf) +{ + u32 i; + + if (lf->flags > GENMASK(3, 0)) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(lf->level); i++) { + if (lf->level[i] > GENMASK(5, 0)) + return -EINVAL; + } + + if (lf->sharpness > GENMASK(2, 0)) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(lf->ref_deltas); i++) { + if (lf->ref_deltas[i] < -64 || lf->ref_deltas[i] > 63) + return -EINVAL; + } + + for (i = 0; i < ARRAY_SIZE(lf->mode_deltas); i++) { + if (lf->mode_deltas[i] < -64 || lf->mode_deltas[i] > 63) + return -EINVAL; + } + + return 0; +} + +static int validate_av1_cdef(struct v4l2_av1_cdef *cdef) +{ + u32 i; + + if (cdef->damping_minus_3 > GENMASK(1, 0) || + cdef->bits > GENMASK(1, 0)) + return -EINVAL; + + for (i = 0; i < 1 << cdef->bits; i++) { + if (cdef->y_pri_strength[i] > GENMASK(3, 0) || + cdef->y_sec_strength[i] > 4 || + cdef->uv_pri_strength[i] > GENMASK(3, 0) || + cdef->uv_sec_strength[i] > 4) + return -EINVAL; + } + + return 0; +} + +static int validate_av1_loop_restauration(struct v4l2_av1_loop_restoration *lr) +{ + if (lr->lr_unit_shift > 3 || lr->lr_uv_shift > 1) + return -EINVAL; + + return 0; +} + +static int validate_av1_film_grain(struct v4l2_ctrl_av1_film_grain *fg) +{ + u32 i; + + if (fg->flags > GENMASK(4, 0)) + return -EINVAL; + + if (fg->film_grain_params_ref_idx > GENMASK(2, 0) || + fg->num_y_points > 14 || + fg->num_cb_points > 10 || + fg->num_cr_points > GENMASK(3, 0) || + fg->grain_scaling_minus_8 > GENMASK(1, 0) || + fg->ar_coeff_lag > GENMASK(1, 0) || + fg->ar_coeff_shift_minus_6 > GENMASK(1, 0) || + fg->grain_scale_shift > GENMASK(1, 0)) + return -EINVAL; + + if (!(fg->flags & V4L2_AV1_FILM_GRAIN_FLAG_APPLY_GRAIN)) + return 0; + + for (i = 1; i < fg->num_y_points; i++) + if (fg->point_y_value[i] <= fg->point_y_value[i - 1]) + return -EINVAL; + + for (i = 1; i < fg->num_cb_points; i++) + if (fg->point_cb_value[i] <= fg->point_cb_value[i - 1]) + return -EINVAL; + + for (i = 1; i < fg->num_cr_points; i++) + if (fg->point_cr_value[i] <= fg->point_cr_value[i - 1]) + return -EINVAL; + + return 0; +} + +static int validate_av1_frame(struct v4l2_ctrl_av1_frame *f) +{ + int ret = 0; + + ret = validate_av1_quantization(&f->quantization); + if (ret) + return ret; + ret = validate_av1_segmentation(&f->segmentation); + if (ret) + return ret; + ret = validate_av1_loop_filter(&f->loop_filter); + if (ret) + return ret; + ret = validate_av1_cdef(&f->cdef); + if (ret) + return ret; + ret = validate_av1_loop_restauration(&f->loop_restoration); + if (ret) + return ret; + + if (f->flags & + ~(V4L2_AV1_FRAME_FLAG_SHOW_FRAME | + V4L2_AV1_FRAME_FLAG_SHOWABLE_FRAME | + V4L2_AV1_FRAME_FLAG_ERROR_RESILIENT_MODE | + V4L2_AV1_FRAME_FLAG_DISABLE_CDF_UPDATE | + V4L2_AV1_FRAME_FLAG_ALLOW_SCREEN_CONTENT_TOOLS | + V4L2_AV1_FRAME_FLAG_FORCE_INTEGER_MV | + V4L2_AV1_FRAME_FLAG_ALLOW_INTRABC | + V4L2_AV1_FRAME_FLAG_USE_SUPERRES | + V4L2_AV1_FRAME_FLAG_ALLOW_HIGH_PRECISION_MV | + V4L2_AV1_FRAME_FLAG_IS_MOTION_MODE_SWITCHABLE | + V4L2_AV1_FRAME_FLAG_USE_REF_FRAME_MVS | + V4L2_AV1_FRAME_FLAG_DISABLE_FRAME_END_UPDATE_CDF | + V4L2_AV1_FRAME_FLAG_ALLOW_WARPED_MOTION | + V4L2_AV1_FRAME_FLAG_REFERENCE_SELECT | + V4L2_AV1_FRAME_FLAG_REDUCED_TX_SET | + V4L2_AV1_FRAME_FLAG_SKIP_MODE_ALLOWED | + V4L2_AV1_FRAME_FLAG_SKIP_MODE_PRESENT | + V4L2_AV1_FRAME_FLAG_FRAME_SIZE_OVERRIDE | + V4L2_AV1_FRAME_FLAG_BUFFER_REMOVAL_TIME_PRESENT | + V4L2_AV1_FRAME_FLAG_FRAME_REFS_SHORT_SIGNALING)) + return -EINVAL; + + if (f->superres_denom > GENMASK(2, 0) + 9) + return -EINVAL; + + return 0; +} + +static int validate_av1_sequence(struct v4l2_ctrl_av1_sequence *s) +{ + if (s->flags & + ~(V4L2_AV1_SEQUENCE_FLAG_STILL_PICTURE | + V4L2_AV1_SEQUENCE_FLAG_USE_128X128_SUPERBLOCK | + V4L2_AV1_SEQUENCE_FLAG_ENABLE_FILTER_INTRA | + V4L2_AV1_SEQUENCE_FLAG_ENABLE_INTRA_EDGE_FILTER | + V4L2_AV1_SEQUENCE_FLAG_ENABLE_INTERINTRA_COMPOUND | + V4L2_AV1_SEQUENCE_FLAG_ENABLE_MASKED_COMPOUND | + V4L2_AV1_SEQUENCE_FLAG_ENABLE_WARPED_MOTION | + V4L2_AV1_SEQUENCE_FLAG_ENABLE_DUAL_FILTER | + V4L2_AV1_SEQUENCE_FLAG_ENABLE_ORDER_HINT | + V4L2_AV1_SEQUENCE_FLAG_ENABLE_JNT_COMP | + V4L2_AV1_SEQUENCE_FLAG_ENABLE_REF_FRAME_MVS | + V4L2_AV1_SEQUENCE_FLAG_ENABLE_SUPERRES | + V4L2_AV1_SEQUENCE_FLAG_ENABLE_CDEF | + V4L2_AV1_SEQUENCE_FLAG_ENABLE_RESTORATION | + V4L2_AV1_SEQUENCE_FLAG_MONO_CHROME | + V4L2_AV1_SEQUENCE_FLAG_COLOR_RANGE | + V4L2_AV1_SEQUENCE_FLAG_SUBSAMPLING_X | + V4L2_AV1_SEQUENCE_FLAG_SUBSAMPLING_Y | + V4L2_AV1_SEQUENCE_FLAG_FILM_GRAIN_PARAMS_PRESENT | + V4L2_AV1_SEQUENCE_FLAG_SEPARATE_UV_DELTA_Q)) + return -EINVAL; + + if (s->seq_profile == 1 && s->flags & V4L2_AV1_SEQUENCE_FLAG_MONO_CHROME) + return -EINVAL; + + /* reserved */ + if (s->seq_profile > 2) + return -EINVAL; + + /* TODO: PROFILES */ + return 0; +} + +/* + * Compound controls validation requires setting unused fields/flags to zero + * in order to properly detect unchanged controls with v4l2_ctrl_type_op_equal's + * memcmp. + */ +static int std_validate_compound(const struct v4l2_ctrl *ctrl, u32 idx, + union v4l2_ctrl_ptr ptr) +{ + struct v4l2_ctrl_mpeg2_sequence *p_mpeg2_sequence; + struct v4l2_ctrl_mpeg2_picture *p_mpeg2_picture; + struct v4l2_ctrl_vp8_frame *p_vp8_frame; + struct v4l2_ctrl_fwht_params *p_fwht_params; + struct v4l2_ctrl_h264_sps *p_h264_sps; + struct v4l2_ctrl_h264_pps *p_h264_pps; + struct v4l2_ctrl_h264_pred_weights *p_h264_pred_weights; + struct v4l2_ctrl_h264_slice_params *p_h264_slice_params; + struct v4l2_ctrl_h264_decode_params *p_h264_dec_params; + struct v4l2_ctrl_hevc_sps *p_hevc_sps; + struct v4l2_ctrl_hevc_pps *p_hevc_pps; + struct v4l2_ctrl_hdr10_mastering_display *p_hdr10_mastering; + struct v4l2_ctrl_hevc_decode_params *p_hevc_decode_params; + struct v4l2_area *area; + struct v4l2_rect *rect; + void *p = ptr.p + idx * ctrl->elem_size; + unsigned int i; + + switch ((u32)ctrl->type) { + case V4L2_CTRL_TYPE_MPEG2_SEQUENCE: + p_mpeg2_sequence = p; + + switch (p_mpeg2_sequence->chroma_format) { + case 1: /* 4:2:0 */ + case 2: /* 4:2:2 */ + case 3: /* 4:4:4 */ + break; + default: + return -EINVAL; + } + break; + + case V4L2_CTRL_TYPE_MPEG2_PICTURE: + p_mpeg2_picture = p; + + switch (p_mpeg2_picture->intra_dc_precision) { + case 0: /* 8 bits */ + case 1: /* 9 bits */ + case 2: /* 10 bits */ + case 3: /* 11 bits */ + break; + default: + return -EINVAL; + } + + switch (p_mpeg2_picture->picture_structure) { + case V4L2_MPEG2_PIC_TOP_FIELD: + case V4L2_MPEG2_PIC_BOTTOM_FIELD: + case V4L2_MPEG2_PIC_FRAME: + break; + default: + return -EINVAL; + } + + switch (p_mpeg2_picture->picture_coding_type) { + case V4L2_MPEG2_PIC_CODING_TYPE_I: + case V4L2_MPEG2_PIC_CODING_TYPE_P: + case V4L2_MPEG2_PIC_CODING_TYPE_B: + break; + default: + return -EINVAL; + } + zero_reserved(*p_mpeg2_picture); + break; + + case V4L2_CTRL_TYPE_MPEG2_QUANTISATION: + break; + + case V4L2_CTRL_TYPE_FWHT_PARAMS: + p_fwht_params = p; + if (p_fwht_params->version < V4L2_FWHT_VERSION) + return -EINVAL; + if (!p_fwht_params->width || !p_fwht_params->height) + return -EINVAL; + break; + + case V4L2_CTRL_TYPE_H264_SPS: + p_h264_sps = p; + + /* Some syntax elements are only conditionally valid */ + if (p_h264_sps->pic_order_cnt_type != 0) { + p_h264_sps->log2_max_pic_order_cnt_lsb_minus4 = 0; + } else if (p_h264_sps->pic_order_cnt_type != 1) { + p_h264_sps->num_ref_frames_in_pic_order_cnt_cycle = 0; + p_h264_sps->offset_for_non_ref_pic = 0; + p_h264_sps->offset_for_top_to_bottom_field = 0; + memset(&p_h264_sps->offset_for_ref_frame, 0, + sizeof(p_h264_sps->offset_for_ref_frame)); + } + + if (!V4L2_H264_SPS_HAS_CHROMA_FORMAT(p_h264_sps)) { + p_h264_sps->chroma_format_idc = 1; + p_h264_sps->bit_depth_luma_minus8 = 0; + p_h264_sps->bit_depth_chroma_minus8 = 0; + + p_h264_sps->flags &= + ~V4L2_H264_SPS_FLAG_QPPRIME_Y_ZERO_TRANSFORM_BYPASS; + } + + if (p_h264_sps->chroma_format_idc < 3) + p_h264_sps->flags &= + ~V4L2_H264_SPS_FLAG_SEPARATE_COLOUR_PLANE; + + if (p_h264_sps->flags & V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY) + p_h264_sps->flags &= + ~V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD; + + /* + * Chroma 4:2:2 format require at least High 4:2:2 profile. + * + * The H264 specification and well-known parser implementations + * use profile-idc values directly, as that is clearer and + * less ambiguous. We do the same here. + */ + if (p_h264_sps->profile_idc < 122 && + p_h264_sps->chroma_format_idc > 1) + return -EINVAL; + /* Chroma 4:4:4 format require at least High 4:2:2 profile */ + if (p_h264_sps->profile_idc < 244 && + p_h264_sps->chroma_format_idc > 2) + return -EINVAL; + if (p_h264_sps->chroma_format_idc > 3) + return -EINVAL; + + if (p_h264_sps->bit_depth_luma_minus8 > 6) + return -EINVAL; + if (p_h264_sps->bit_depth_chroma_minus8 > 6) + return -EINVAL; + if (p_h264_sps->log2_max_frame_num_minus4 > 12) + return -EINVAL; + if (p_h264_sps->pic_order_cnt_type > 2) + return -EINVAL; + if (p_h264_sps->log2_max_pic_order_cnt_lsb_minus4 > 12) + return -EINVAL; + if (p_h264_sps->max_num_ref_frames > V4L2_H264_REF_LIST_LEN) + return -EINVAL; + break; + + case V4L2_CTRL_TYPE_H264_PPS: + p_h264_pps = p; + + if (p_h264_pps->num_slice_groups_minus1 > 7) + return -EINVAL; + if (p_h264_pps->num_ref_idx_l0_default_active_minus1 > + (V4L2_H264_REF_LIST_LEN - 1)) + return -EINVAL; + if (p_h264_pps->num_ref_idx_l1_default_active_minus1 > + (V4L2_H264_REF_LIST_LEN - 1)) + return -EINVAL; + if (p_h264_pps->weighted_bipred_idc > 2) + return -EINVAL; + /* + * pic_init_qp_minus26 shall be in the range of + * -(26 + QpBdOffset_y) to +25, inclusive, + * where QpBdOffset_y is 6 * bit_depth_luma_minus8 + */ + if (p_h264_pps->pic_init_qp_minus26 < -62 || + p_h264_pps->pic_init_qp_minus26 > 25) + return -EINVAL; + if (p_h264_pps->pic_init_qs_minus26 < -26 || + p_h264_pps->pic_init_qs_minus26 > 25) + return -EINVAL; + if (p_h264_pps->chroma_qp_index_offset < -12 || + p_h264_pps->chroma_qp_index_offset > 12) + return -EINVAL; + if (p_h264_pps->second_chroma_qp_index_offset < -12 || + p_h264_pps->second_chroma_qp_index_offset > 12) + return -EINVAL; + break; + + case V4L2_CTRL_TYPE_H264_SCALING_MATRIX: + break; + + case V4L2_CTRL_TYPE_H264_PRED_WEIGHTS: + p_h264_pred_weights = p; + + if (p_h264_pred_weights->luma_log2_weight_denom > 7) + return -EINVAL; + if (p_h264_pred_weights->chroma_log2_weight_denom > 7) + return -EINVAL; + break; + + case V4L2_CTRL_TYPE_H264_SLICE_PARAMS: + p_h264_slice_params = p; + + if (p_h264_slice_params->slice_type != V4L2_H264_SLICE_TYPE_B) + p_h264_slice_params->flags &= + ~V4L2_H264_SLICE_FLAG_DIRECT_SPATIAL_MV_PRED; + + if (p_h264_slice_params->colour_plane_id > 2) + return -EINVAL; + if (p_h264_slice_params->cabac_init_idc > 2) + return -EINVAL; + if (p_h264_slice_params->disable_deblocking_filter_idc > 2) + return -EINVAL; + if (p_h264_slice_params->slice_alpha_c0_offset_div2 < -6 || + p_h264_slice_params->slice_alpha_c0_offset_div2 > 6) + return -EINVAL; + if (p_h264_slice_params->slice_beta_offset_div2 < -6 || + p_h264_slice_params->slice_beta_offset_div2 > 6) + return -EINVAL; + + if (p_h264_slice_params->slice_type == V4L2_H264_SLICE_TYPE_I || + p_h264_slice_params->slice_type == V4L2_H264_SLICE_TYPE_SI) + p_h264_slice_params->num_ref_idx_l0_active_minus1 = 0; + if (p_h264_slice_params->slice_type != V4L2_H264_SLICE_TYPE_B) + p_h264_slice_params->num_ref_idx_l1_active_minus1 = 0; + + if (p_h264_slice_params->num_ref_idx_l0_active_minus1 > + (V4L2_H264_REF_LIST_LEN - 1)) + return -EINVAL; + if (p_h264_slice_params->num_ref_idx_l1_active_minus1 > + (V4L2_H264_REF_LIST_LEN - 1)) + return -EINVAL; + zero_reserved(*p_h264_slice_params); + break; + + case V4L2_CTRL_TYPE_H264_DECODE_PARAMS: + p_h264_dec_params = p; + + if (p_h264_dec_params->nal_ref_idc > 3) + return -EINVAL; + for (i = 0; i < V4L2_H264_NUM_DPB_ENTRIES; i++) { + struct v4l2_h264_dpb_entry *dpb_entry = + &p_h264_dec_params->dpb[i]; + + zero_reserved(*dpb_entry); + } + zero_reserved(*p_h264_dec_params); + break; + + case V4L2_CTRL_TYPE_VP8_FRAME: + p_vp8_frame = p; + + switch (p_vp8_frame->num_dct_parts) { + case 1: + case 2: + case 4: + case 8: + break; + default: + return -EINVAL; + } + zero_padding(p_vp8_frame->segment); + zero_padding(p_vp8_frame->lf); + zero_padding(p_vp8_frame->quant); + zero_padding(p_vp8_frame->entropy); + zero_padding(p_vp8_frame->coder_state); + break; + + case V4L2_CTRL_TYPE_HEVC_SPS: + p_hevc_sps = p; + + if (!(p_hevc_sps->flags & V4L2_HEVC_SPS_FLAG_PCM_ENABLED)) { + p_hevc_sps->pcm_sample_bit_depth_luma_minus1 = 0; + p_hevc_sps->pcm_sample_bit_depth_chroma_minus1 = 0; + p_hevc_sps->log2_min_pcm_luma_coding_block_size_minus3 = 0; + p_hevc_sps->log2_diff_max_min_pcm_luma_coding_block_size = 0; + } + + if (!(p_hevc_sps->flags & + V4L2_HEVC_SPS_FLAG_LONG_TERM_REF_PICS_PRESENT)) + p_hevc_sps->num_long_term_ref_pics_sps = 0; + break; + + case V4L2_CTRL_TYPE_HEVC_PPS: + p_hevc_pps = p; + + if (!(p_hevc_pps->flags & + V4L2_HEVC_PPS_FLAG_CU_QP_DELTA_ENABLED)) + p_hevc_pps->diff_cu_qp_delta_depth = 0; + + if (!(p_hevc_pps->flags & V4L2_HEVC_PPS_FLAG_TILES_ENABLED)) { + p_hevc_pps->num_tile_columns_minus1 = 0; + p_hevc_pps->num_tile_rows_minus1 = 0; + memset(&p_hevc_pps->column_width_minus1, 0, + sizeof(p_hevc_pps->column_width_minus1)); + memset(&p_hevc_pps->row_height_minus1, 0, + sizeof(p_hevc_pps->row_height_minus1)); + + p_hevc_pps->flags &= + ~V4L2_HEVC_PPS_FLAG_LOOP_FILTER_ACROSS_TILES_ENABLED; + } + + if (p_hevc_pps->flags & + V4L2_HEVC_PPS_FLAG_PPS_DISABLE_DEBLOCKING_FILTER) { + p_hevc_pps->pps_beta_offset_div2 = 0; + p_hevc_pps->pps_tc_offset_div2 = 0; + } + break; + + case V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS: + p_hevc_decode_params = p; + + if (p_hevc_decode_params->num_active_dpb_entries > + V4L2_HEVC_DPB_ENTRIES_NUM_MAX) + return -EINVAL; + break; + + case V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS: + break; + + case V4L2_CTRL_TYPE_HDR10_CLL_INFO: + break; + + case V4L2_CTRL_TYPE_HDR10_MASTERING_DISPLAY: + p_hdr10_mastering = p; + + for (i = 0; i < 3; ++i) { + if (p_hdr10_mastering->display_primaries_x[i] < + V4L2_HDR10_MASTERING_PRIMARIES_X_LOW || + p_hdr10_mastering->display_primaries_x[i] > + V4L2_HDR10_MASTERING_PRIMARIES_X_HIGH || + p_hdr10_mastering->display_primaries_y[i] < + V4L2_HDR10_MASTERING_PRIMARIES_Y_LOW || + p_hdr10_mastering->display_primaries_y[i] > + V4L2_HDR10_MASTERING_PRIMARIES_Y_HIGH) + return -EINVAL; + } + + if (p_hdr10_mastering->white_point_x < + V4L2_HDR10_MASTERING_WHITE_POINT_X_LOW || + p_hdr10_mastering->white_point_x > + V4L2_HDR10_MASTERING_WHITE_POINT_X_HIGH || + p_hdr10_mastering->white_point_y < + V4L2_HDR10_MASTERING_WHITE_POINT_Y_LOW || + p_hdr10_mastering->white_point_y > + V4L2_HDR10_MASTERING_WHITE_POINT_Y_HIGH) + return -EINVAL; + + if (p_hdr10_mastering->max_display_mastering_luminance < + V4L2_HDR10_MASTERING_MAX_LUMA_LOW || + p_hdr10_mastering->max_display_mastering_luminance > + V4L2_HDR10_MASTERING_MAX_LUMA_HIGH || + p_hdr10_mastering->min_display_mastering_luminance < + V4L2_HDR10_MASTERING_MIN_LUMA_LOW || + p_hdr10_mastering->min_display_mastering_luminance > + V4L2_HDR10_MASTERING_MIN_LUMA_HIGH) + return -EINVAL; + + /* The following restriction comes from ITU-T Rec. H.265 spec */ + if (p_hdr10_mastering->max_display_mastering_luminance == + V4L2_HDR10_MASTERING_MAX_LUMA_LOW && + p_hdr10_mastering->min_display_mastering_luminance == + V4L2_HDR10_MASTERING_MIN_LUMA_HIGH) + return -EINVAL; + + break; + + case V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX: + break; + + case V4L2_CTRL_TYPE_VP9_COMPRESSED_HDR: + return validate_vp9_compressed_hdr(p); + + case V4L2_CTRL_TYPE_VP9_FRAME: + return validate_vp9_frame(p); + case V4L2_CTRL_TYPE_AV1_FRAME: + return validate_av1_frame(p); + case V4L2_CTRL_TYPE_AV1_SEQUENCE: + return validate_av1_sequence(p); + case V4L2_CTRL_TYPE_AV1_TILE_GROUP_ENTRY: + break; + case V4L2_CTRL_TYPE_AV1_FILM_GRAIN: + return validate_av1_film_grain(p); + + case V4L2_CTRL_TYPE_AREA: + area = p; + if (!area->width || !area->height) + return -EINVAL; + break; + + case V4L2_CTRL_TYPE_RECT: + rect = p; + if (!rect->width || !rect->height) + return -EINVAL; + break; + + default: + return -EINVAL; + } + + return 0; +} + +static int std_validate_elem(const struct v4l2_ctrl *ctrl, u32 idx, + union v4l2_ctrl_ptr ptr) +{ + size_t len; + u64 offset; + s64 val; + + switch ((u32)ctrl->type) { + case V4L2_CTRL_TYPE_INTEGER: + return ROUND_TO_RANGE(ptr.p_s32[idx], u32, ctrl); + case V4L2_CTRL_TYPE_INTEGER64: + /* + * We can't use the ROUND_TO_RANGE define here due to + * the u64 divide that needs special care. + */ + val = ptr.p_s64[idx]; + if (ctrl->maximum >= 0 && val >= ctrl->maximum - (s64)(ctrl->step / 2)) + val = ctrl->maximum; + else + val += (s64)(ctrl->step / 2); + val = clamp_t(s64, val, ctrl->minimum, ctrl->maximum); + offset = val - ctrl->minimum; + do_div(offset, ctrl->step); + ptr.p_s64[idx] = ctrl->minimum + offset * ctrl->step; + return 0; + case V4L2_CTRL_TYPE_U8: + return ROUND_TO_RANGE(ptr.p_u8[idx], u8, ctrl); + case V4L2_CTRL_TYPE_U16: + return ROUND_TO_RANGE(ptr.p_u16[idx], u16, ctrl); + case V4L2_CTRL_TYPE_U32: + return ROUND_TO_RANGE(ptr.p_u32[idx], u32, ctrl); + + case V4L2_CTRL_TYPE_BOOLEAN: + ptr.p_s32[idx] = !!ptr.p_s32[idx]; + return 0; + + case V4L2_CTRL_TYPE_MENU: + case V4L2_CTRL_TYPE_INTEGER_MENU: + if (ptr.p_s32[idx] < ctrl->minimum || ptr.p_s32[idx] > ctrl->maximum) + return -ERANGE; + if (ptr.p_s32[idx] < BITS_PER_LONG_LONG && + (ctrl->menu_skip_mask & BIT_ULL(ptr.p_s32[idx]))) + return -EINVAL; + if (ctrl->type == V4L2_CTRL_TYPE_MENU && + ctrl->qmenu[ptr.p_s32[idx]][0] == '\0') + return -EINVAL; + return 0; + + case V4L2_CTRL_TYPE_BITMASK: + ptr.p_s32[idx] &= ctrl->maximum; + return 0; + + case V4L2_CTRL_TYPE_BUTTON: + case V4L2_CTRL_TYPE_CTRL_CLASS: + ptr.p_s32[idx] = 0; + return 0; + + case V4L2_CTRL_TYPE_STRING: + idx *= ctrl->elem_size; + len = strlen(ptr.p_char + idx); + if (len < ctrl->minimum) + return -ERANGE; + if ((len - (u32)ctrl->minimum) % (u32)ctrl->step) + return -ERANGE; + return 0; + + default: + return std_validate_compound(ctrl, idx, ptr); + } +} + +int v4l2_ctrl_type_op_validate(const struct v4l2_ctrl *ctrl, + union v4l2_ctrl_ptr ptr) +{ + unsigned int i; + int ret = 0; + + switch ((u32)ctrl->type) { + case V4L2_CTRL_TYPE_U8: + if (ctrl->maximum == 0xff && ctrl->minimum == 0 && ctrl->step == 1) + return 0; + break; + case V4L2_CTRL_TYPE_U16: + if (ctrl->maximum == 0xffff && ctrl->minimum == 0 && ctrl->step == 1) + return 0; + break; + case V4L2_CTRL_TYPE_U32: + if (ctrl->maximum == 0xffffffff && ctrl->minimum == 0 && ctrl->step == 1) + return 0; + break; + + case V4L2_CTRL_TYPE_BUTTON: + case V4L2_CTRL_TYPE_CTRL_CLASS: + memset(ptr.p_s32, 0, ctrl->new_elems * sizeof(s32)); + return 0; + } + + for (i = 0; !ret && i < ctrl->new_elems; i++) + ret = std_validate_elem(ctrl, i, ptr); + return ret; +} +EXPORT_SYMBOL(v4l2_ctrl_type_op_validate); + +static const struct v4l2_ctrl_type_ops std_type_ops = { + .equal = v4l2_ctrl_type_op_equal, + .init = v4l2_ctrl_type_op_init, + .minimum = v4l2_ctrl_type_op_minimum, + .maximum = v4l2_ctrl_type_op_maximum, + .log = v4l2_ctrl_type_op_log, + .validate = v4l2_ctrl_type_op_validate, +}; + +void v4l2_ctrl_notify(struct v4l2_ctrl *ctrl, v4l2_ctrl_notify_fnc notify, void *priv) +{ + if (!ctrl) + return; + if (!notify) { + ctrl->call_notify = 0; + return; + } + if (WARN_ON(ctrl->handler->notify && ctrl->handler->notify != notify)) + return; + ctrl->handler->notify = notify; + ctrl->handler->notify_priv = priv; + ctrl->call_notify = 1; +} +EXPORT_SYMBOL(v4l2_ctrl_notify); + +/* Copy the one value to another. */ +static void ptr_to_ptr(struct v4l2_ctrl *ctrl, + union v4l2_ctrl_ptr from, union v4l2_ctrl_ptr to, + unsigned int elems) +{ + if (ctrl == NULL) + return; + memcpy(to.p, from.p_const, elems * ctrl->elem_size); +} + +/* Copy the new value to the current value. */ +void new_to_cur(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 ch_flags) +{ + bool changed; + + if (ctrl == NULL) + return; + + /* has_changed is set by cluster_changed */ + changed = ctrl->has_changed; + if (changed) { + if (ctrl->is_dyn_array) + ctrl->elems = ctrl->new_elems; + ptr_to_ptr(ctrl, ctrl->p_new, ctrl->p_cur, ctrl->elems); + } + + if (ch_flags & V4L2_EVENT_CTRL_CH_FLAGS) { + /* Note: CH_FLAGS is only set for auto clusters. */ + ctrl->flags &= + ~(V4L2_CTRL_FLAG_INACTIVE | V4L2_CTRL_FLAG_VOLATILE); + if (!is_cur_manual(ctrl->cluster[0])) { + ctrl->flags |= V4L2_CTRL_FLAG_INACTIVE; + if (ctrl->cluster[0]->has_volatiles) + ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE; + } + fh = NULL; + } + if (changed || ch_flags) { + /* If a control was changed that was not one of the controls + modified by the application, then send the event to all. */ + if (!ctrl->is_new) + fh = NULL; + send_event(fh, ctrl, + (changed ? V4L2_EVENT_CTRL_CH_VALUE : 0) | ch_flags); + if (ctrl->call_notify && changed && ctrl->handler->notify) + ctrl->handler->notify(ctrl, ctrl->handler->notify_priv); + } +} + +/* Copy the current value to the new value */ +void cur_to_new(struct v4l2_ctrl *ctrl) +{ + if (ctrl == NULL) + return; + if (ctrl->is_dyn_array) + ctrl->new_elems = ctrl->elems; + ptr_to_ptr(ctrl, ctrl->p_cur, ctrl->p_new, ctrl->new_elems); +} + +static bool req_alloc_array(struct v4l2_ctrl_ref *ref, u32 elems) +{ + void *tmp; + + if (elems == ref->p_req_array_alloc_elems) + return true; + if (ref->ctrl->is_dyn_array && + elems < ref->p_req_array_alloc_elems) + return true; + + tmp = kvmalloc(elems * ref->ctrl->elem_size, GFP_KERNEL); + + if (!tmp) { + ref->p_req_array_enomem = true; + return false; + } + ref->p_req_array_enomem = false; + kvfree(ref->p_req.p); + ref->p_req.p = tmp; + ref->p_req_array_alloc_elems = elems; + return true; +} + +/* Copy the new value to the request value */ +void new_to_req(struct v4l2_ctrl_ref *ref) +{ + struct v4l2_ctrl *ctrl; + + if (!ref) + return; + + ctrl = ref->ctrl; + if (ctrl->is_array && !req_alloc_array(ref, ctrl->new_elems)) + return; + + ref->p_req_elems = ctrl->new_elems; + ptr_to_ptr(ctrl, ctrl->p_new, ref->p_req, ref->p_req_elems); + ref->p_req_valid = true; +} + +/* Copy the current value to the request value */ +void cur_to_req(struct v4l2_ctrl_ref *ref) +{ + struct v4l2_ctrl *ctrl; + + if (!ref) + return; + + ctrl = ref->ctrl; + if (ctrl->is_array && !req_alloc_array(ref, ctrl->elems)) + return; + + ref->p_req_elems = ctrl->elems; + ptr_to_ptr(ctrl, ctrl->p_cur, ref->p_req, ctrl->elems); + ref->p_req_valid = true; +} + +/* Copy the request value to the new value */ +int req_to_new(struct v4l2_ctrl_ref *ref) +{ + struct v4l2_ctrl *ctrl; + + if (!ref) + return 0; + + ctrl = ref->ctrl; + + /* + * This control was never set in the request, so just use the current + * value. + */ + if (!ref->p_req_valid) { + if (ctrl->is_dyn_array) + ctrl->new_elems = ctrl->elems; + ptr_to_ptr(ctrl, ctrl->p_cur, ctrl->p_new, ctrl->new_elems); + return 0; + } + + /* Not an array, so just copy the request value */ + if (!ctrl->is_array) { + ptr_to_ptr(ctrl, ref->p_req, ctrl->p_new, ctrl->new_elems); + return 0; + } + + /* Sanity check, should never happen */ + if (WARN_ON(!ref->p_req_array_alloc_elems)) + return -ENOMEM; + + if (!ctrl->is_dyn_array && + ref->p_req_elems != ctrl->p_array_alloc_elems) + return -ENOMEM; + + /* + * Check if the number of elements in the request is more than the + * elements in ctrl->p_array. If so, attempt to realloc ctrl->p_array. + * Note that p_array is allocated with twice the number of elements + * in the dynamic array since it has to store both the current and + * new value of such a control. + */ + if (ref->p_req_elems > ctrl->p_array_alloc_elems) { + unsigned int sz = ref->p_req_elems * ctrl->elem_size; + void *old = ctrl->p_array; + void *tmp = kvzalloc(2 * sz, GFP_KERNEL); + + if (!tmp) + return -ENOMEM; + memcpy(tmp, ctrl->p_new.p, ctrl->elems * ctrl->elem_size); + memcpy(tmp + sz, ctrl->p_cur.p, ctrl->elems * ctrl->elem_size); + ctrl->p_new.p = tmp; + ctrl->p_cur.p = tmp + sz; + ctrl->p_array = tmp; + ctrl->p_array_alloc_elems = ref->p_req_elems; + kvfree(old); + } + + ctrl->new_elems = ref->p_req_elems; + ptr_to_ptr(ctrl, ref->p_req, ctrl->p_new, ctrl->new_elems); + return 0; +} + +/* Control range checking */ +int check_range(enum v4l2_ctrl_type type, + s64 min, s64 max, u64 step, s64 def) +{ + switch (type) { + case V4L2_CTRL_TYPE_BOOLEAN: + if (step != 1 || max > 1 || min < 0) + return -ERANGE; + fallthrough; + case V4L2_CTRL_TYPE_U8: + case V4L2_CTRL_TYPE_U16: + case V4L2_CTRL_TYPE_U32: + case V4L2_CTRL_TYPE_INTEGER: + case V4L2_CTRL_TYPE_INTEGER64: + if (step == 0 || min > max || def < min || def > max) + return -ERANGE; + return 0; + case V4L2_CTRL_TYPE_BITMASK: + if (step || min || !max || (def & ~max)) + return -ERANGE; + return 0; + case V4L2_CTRL_TYPE_MENU: + case V4L2_CTRL_TYPE_INTEGER_MENU: + if (min > max || def < min || def > max || + min < 0 || (step && max >= BITS_PER_LONG_LONG)) + return -ERANGE; + /* Note: step == menu_skip_mask for menu controls. + So here we check if the default value is masked out. */ + if (def < BITS_PER_LONG_LONG && (step & BIT_ULL(def))) + return -EINVAL; + return 0; + case V4L2_CTRL_TYPE_STRING: + if (min > max || min < 0 || step < 1 || def) + return -ERANGE; + return 0; + default: + return 0; + } +} + +/* Set the handler's error code if it wasn't set earlier already */ +static inline int handler_set_err(struct v4l2_ctrl_handler *hdl, int err) +{ + if (hdl->error == 0) + hdl->error = err; + return err; +} + +/* Initialize the handler */ +int v4l2_ctrl_handler_init_class(struct v4l2_ctrl_handler *hdl, + unsigned nr_of_controls_hint, + struct lock_class_key *key, const char *name) +{ + mutex_init(&hdl->_lock); + hdl->lock = &hdl->_lock; + lockdep_set_class_and_name(hdl->lock, key, name); + INIT_LIST_HEAD(&hdl->ctrls); + INIT_LIST_HEAD(&hdl->ctrl_refs); + hdl->nr_of_buckets = 1 + nr_of_controls_hint / 8; + hdl->buckets = kvcalloc(hdl->nr_of_buckets, sizeof(hdl->buckets[0]), + GFP_KERNEL); + hdl->error = hdl->buckets ? 0 : -ENOMEM; + v4l2_ctrl_handler_init_request(hdl); + return hdl->error; +} +EXPORT_SYMBOL(v4l2_ctrl_handler_init_class); + +/* Free all controls and control refs */ +int v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl) +{ + struct v4l2_ctrl_ref *ref, *next_ref; + struct v4l2_ctrl *ctrl, *next_ctrl; + struct v4l2_subscribed_event *sev, *next_sev; + + if (!hdl) + return 0; + + if (!hdl->buckets) + return hdl->error; + + v4l2_ctrl_handler_free_request(hdl); + + mutex_lock(hdl->lock); + /* Free all nodes */ + list_for_each_entry_safe(ref, next_ref, &hdl->ctrl_refs, node) { + list_del(&ref->node); + if (ref->p_req_array_alloc_elems) + kvfree(ref->p_req.p); + kfree(ref); + } + /* Free all controls owned by the handler */ + list_for_each_entry_safe(ctrl, next_ctrl, &hdl->ctrls, node) { + list_del(&ctrl->node); + list_for_each_entry_safe(sev, next_sev, &ctrl->ev_subs, node) + list_del(&sev->node); + kvfree(ctrl->p_array); + kvfree(ctrl); + } + kvfree(hdl->buckets); + hdl->buckets = NULL; + hdl->cached = NULL; + mutex_unlock(hdl->lock); + mutex_destroy(&hdl->_lock); + + return hdl->error; +} +EXPORT_SYMBOL(v4l2_ctrl_handler_free); + +/* For backwards compatibility: V4L2_CID_PRIVATE_BASE should no longer + be used except in G_CTRL, S_CTRL, QUERYCTRL and QUERYMENU when dealing + with applications that do not use the NEXT_CTRL flag. + + We just find the n-th private user control. It's O(N), but that should not + be an issue in this particular case. */ +static struct v4l2_ctrl_ref *find_private_ref( + struct v4l2_ctrl_handler *hdl, u32 id) +{ + struct v4l2_ctrl_ref *ref; + + id -= V4L2_CID_PRIVATE_BASE; + list_for_each_entry(ref, &hdl->ctrl_refs, node) { + /* Search for private user controls that are compatible with + VIDIOC_G/S_CTRL. */ + if (V4L2_CTRL_ID2WHICH(ref->ctrl->id) == V4L2_CTRL_CLASS_USER && + V4L2_CTRL_DRIVER_PRIV(ref->ctrl->id)) { + if (!ref->ctrl->is_int) + continue; + if (id == 0) + return ref; + id--; + } + } + return NULL; +} + +/* Find a control with the given ID. */ +struct v4l2_ctrl_ref *find_ref(struct v4l2_ctrl_handler *hdl, u32 id) +{ + struct v4l2_ctrl_ref *ref; + int bucket; + + id &= V4L2_CTRL_ID_MASK; + + /* Old-style private controls need special handling */ + if (id >= V4L2_CID_PRIVATE_BASE) + return find_private_ref(hdl, id); + bucket = id % hdl->nr_of_buckets; + + /* Simple optimization: cache the last control found */ + if (hdl->cached && hdl->cached->ctrl->id == id) + return hdl->cached; + + /* Not in cache, search the hash */ + ref = hdl->buckets ? hdl->buckets[bucket] : NULL; + while (ref && ref->ctrl->id != id) + ref = ref->next; + + if (ref) + hdl->cached = ref; /* cache it! */ + return ref; +} + +/* Find a control with the given ID. Take the handler's lock first. */ +struct v4l2_ctrl_ref *find_ref_lock(struct v4l2_ctrl_handler *hdl, u32 id) +{ + struct v4l2_ctrl_ref *ref = NULL; + + if (hdl) { + mutex_lock(hdl->lock); + ref = find_ref(hdl, id); + mutex_unlock(hdl->lock); + } + return ref; +} + +/* Find a control with the given ID. */ +struct v4l2_ctrl *v4l2_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id) +{ + struct v4l2_ctrl_ref *ref = find_ref_lock(hdl, id); + + return ref ? ref->ctrl : NULL; +} +EXPORT_SYMBOL(v4l2_ctrl_find); + +/* Allocate a new v4l2_ctrl_ref and hook it into the handler. */ +int handler_new_ref(struct v4l2_ctrl_handler *hdl, + struct v4l2_ctrl *ctrl, + struct v4l2_ctrl_ref **ctrl_ref, + bool from_other_dev, bool allocate_req) +{ + struct v4l2_ctrl_ref *ref; + struct v4l2_ctrl_ref *new_ref; + u32 id = ctrl->id; + u32 class_ctrl = V4L2_CTRL_ID2WHICH(id) | 1; + int bucket = id % hdl->nr_of_buckets; /* which bucket to use */ + unsigned int size_extra_req = 0; + + if (ctrl_ref) + *ctrl_ref = NULL; + + /* + * Automatically add the control class if it is not yet present and + * the new control is not a compound control. + */ + if (ctrl->type < V4L2_CTRL_COMPOUND_TYPES && + id != class_ctrl && find_ref_lock(hdl, class_ctrl) == NULL) + if (!v4l2_ctrl_new_std(hdl, NULL, class_ctrl, 0, 0, 0, 0)) + return hdl->error; + + if (hdl->error) + return hdl->error; + + if (allocate_req && !ctrl->is_array) + size_extra_req = ctrl->elems * ctrl->elem_size; + new_ref = kzalloc(sizeof(*new_ref) + size_extra_req, GFP_KERNEL); + if (!new_ref) + return handler_set_err(hdl, -ENOMEM); + new_ref->ctrl = ctrl; + new_ref->from_other_dev = from_other_dev; + if (size_extra_req) + new_ref->p_req.p = &new_ref[1]; + + INIT_LIST_HEAD(&new_ref->node); + + mutex_lock(hdl->lock); + + /* Add immediately at the end of the list if the list is empty, or if + the last element in the list has a lower ID. + This ensures that when elements are added in ascending order the + insertion is an O(1) operation. */ + if (list_empty(&hdl->ctrl_refs) || id > node2id(hdl->ctrl_refs.prev)) { + list_add_tail(&new_ref->node, &hdl->ctrl_refs); + goto insert_in_hash; + } + + /* Find insert position in sorted list */ + list_for_each_entry(ref, &hdl->ctrl_refs, node) { + if (ref->ctrl->id < id) + continue; + /* Don't add duplicates */ + if (ref->ctrl->id == id) { + kfree(new_ref); + goto unlock; + } + list_add(&new_ref->node, ref->node.prev); + break; + } + +insert_in_hash: + /* Insert the control node in the hash */ + new_ref->next = hdl->buckets[bucket]; + hdl->buckets[bucket] = new_ref; + if (ctrl_ref) + *ctrl_ref = new_ref; + if (ctrl->handler == hdl) { + /* By default each control starts in a cluster of its own. + * new_ref->ctrl is basically a cluster array with one + * element, so that's perfect to use as the cluster pointer. + * But only do this for the handler that owns the control. + */ + ctrl->cluster = &new_ref->ctrl; + ctrl->ncontrols = 1; + } + +unlock: + mutex_unlock(hdl->lock); + return 0; +} + +/* Add a new control */ +static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl, + const struct v4l2_ctrl_ops *ops, + const struct v4l2_ctrl_type_ops *type_ops, + u32 id, const char *name, enum v4l2_ctrl_type type, + s64 min, s64 max, u64 step, s64 def, + const u32 dims[V4L2_CTRL_MAX_DIMS], u32 elem_size, + u32 flags, const char * const *qmenu, + const s64 *qmenu_int, + const union v4l2_ctrl_ptr p_def, + const union v4l2_ctrl_ptr p_min, + const union v4l2_ctrl_ptr p_max, + void *priv) +{ + struct v4l2_ctrl *ctrl; + unsigned sz_extra; + unsigned nr_of_dims = 0; + unsigned elems = 1; + bool is_array; + unsigned tot_ctrl_size; + void *data; + int err; + + if (hdl->error) + return NULL; + + while (dims && dims[nr_of_dims]) { + elems *= dims[nr_of_dims]; + nr_of_dims++; + if (nr_of_dims == V4L2_CTRL_MAX_DIMS) + break; + } + is_array = nr_of_dims > 0; + + /* Prefill elem_size for all types handled by std_type_ops */ + switch ((u32)type) { + case V4L2_CTRL_TYPE_INTEGER64: + elem_size = sizeof(s64); + break; + case V4L2_CTRL_TYPE_STRING: + elem_size = max + 1; + break; + case V4L2_CTRL_TYPE_U8: + elem_size = sizeof(u8); + break; + case V4L2_CTRL_TYPE_U16: + elem_size = sizeof(u16); + break; + case V4L2_CTRL_TYPE_U32: + elem_size = sizeof(u32); + break; + case V4L2_CTRL_TYPE_MPEG2_SEQUENCE: + elem_size = sizeof(struct v4l2_ctrl_mpeg2_sequence); + break; + case V4L2_CTRL_TYPE_MPEG2_PICTURE: + elem_size = sizeof(struct v4l2_ctrl_mpeg2_picture); + break; + case V4L2_CTRL_TYPE_MPEG2_QUANTISATION: + elem_size = sizeof(struct v4l2_ctrl_mpeg2_quantisation); + break; + case V4L2_CTRL_TYPE_FWHT_PARAMS: + elem_size = sizeof(struct v4l2_ctrl_fwht_params); + break; + case V4L2_CTRL_TYPE_H264_SPS: + elem_size = sizeof(struct v4l2_ctrl_h264_sps); + break; + case V4L2_CTRL_TYPE_H264_PPS: + elem_size = sizeof(struct v4l2_ctrl_h264_pps); + break; + case V4L2_CTRL_TYPE_H264_SCALING_MATRIX: + elem_size = sizeof(struct v4l2_ctrl_h264_scaling_matrix); + break; + case V4L2_CTRL_TYPE_H264_SLICE_PARAMS: + elem_size = sizeof(struct v4l2_ctrl_h264_slice_params); + break; + case V4L2_CTRL_TYPE_H264_DECODE_PARAMS: + elem_size = sizeof(struct v4l2_ctrl_h264_decode_params); + break; + case V4L2_CTRL_TYPE_H264_PRED_WEIGHTS: + elem_size = sizeof(struct v4l2_ctrl_h264_pred_weights); + break; + case V4L2_CTRL_TYPE_VP8_FRAME: + elem_size = sizeof(struct v4l2_ctrl_vp8_frame); + break; + case V4L2_CTRL_TYPE_HEVC_SPS: + elem_size = sizeof(struct v4l2_ctrl_hevc_sps); + break; + case V4L2_CTRL_TYPE_HEVC_PPS: + elem_size = sizeof(struct v4l2_ctrl_hevc_pps); + break; + case V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS: + elem_size = sizeof(struct v4l2_ctrl_hevc_slice_params); + break; + case V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX: + elem_size = sizeof(struct v4l2_ctrl_hevc_scaling_matrix); + break; + case V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS: + elem_size = sizeof(struct v4l2_ctrl_hevc_decode_params); + break; + case V4L2_CTRL_TYPE_HDR10_CLL_INFO: + elem_size = sizeof(struct v4l2_ctrl_hdr10_cll_info); + break; + case V4L2_CTRL_TYPE_HDR10_MASTERING_DISPLAY: + elem_size = sizeof(struct v4l2_ctrl_hdr10_mastering_display); + break; + case V4L2_CTRL_TYPE_VP9_COMPRESSED_HDR: + elem_size = sizeof(struct v4l2_ctrl_vp9_compressed_hdr); + break; + case V4L2_CTRL_TYPE_VP9_FRAME: + elem_size = sizeof(struct v4l2_ctrl_vp9_frame); + break; + case V4L2_CTRL_TYPE_AV1_SEQUENCE: + elem_size = sizeof(struct v4l2_ctrl_av1_sequence); + break; + case V4L2_CTRL_TYPE_AV1_TILE_GROUP_ENTRY: + elem_size = sizeof(struct v4l2_ctrl_av1_tile_group_entry); + break; + case V4L2_CTRL_TYPE_AV1_FRAME: + elem_size = sizeof(struct v4l2_ctrl_av1_frame); + break; + case V4L2_CTRL_TYPE_AV1_FILM_GRAIN: + elem_size = sizeof(struct v4l2_ctrl_av1_film_grain); + break; + case V4L2_CTRL_TYPE_AREA: + elem_size = sizeof(struct v4l2_area); + break; + case V4L2_CTRL_TYPE_RECT: + elem_size = sizeof(struct v4l2_rect); + break; + default: + if (type < V4L2_CTRL_COMPOUND_TYPES) + elem_size = sizeof(s32); + break; + } + + if (type < V4L2_CTRL_COMPOUND_TYPES && + type != V4L2_CTRL_TYPE_BUTTON && + type != V4L2_CTRL_TYPE_CTRL_CLASS && + type != V4L2_CTRL_TYPE_STRING) + flags |= V4L2_CTRL_FLAG_HAS_WHICH_MIN_MAX; + + /* Sanity checks */ + if (id == 0 || name == NULL || !elem_size || + id >= V4L2_CID_PRIVATE_BASE || + (type == V4L2_CTRL_TYPE_MENU && qmenu == NULL) || + (type == V4L2_CTRL_TYPE_INTEGER_MENU && qmenu_int == NULL)) { + handler_set_err(hdl, -ERANGE); + return NULL; + } + + err = check_range(type, min, max, step, def); + if (err) { + handler_set_err(hdl, err); + return NULL; + } + if (is_array && + (type == V4L2_CTRL_TYPE_BUTTON || + type == V4L2_CTRL_TYPE_CTRL_CLASS)) { + handler_set_err(hdl, -EINVAL); + return NULL; + } + if (flags & V4L2_CTRL_FLAG_DYNAMIC_ARRAY) { + /* + * For now only support this for one-dimensional arrays only. + * + * This can be relaxed in the future, but this will + * require more effort. + */ + if (nr_of_dims != 1) { + handler_set_err(hdl, -EINVAL); + return NULL; + } + /* Start with just 1 element */ + elems = 1; + } + + tot_ctrl_size = elem_size * elems; + sz_extra = 0; + if (type == V4L2_CTRL_TYPE_BUTTON) + flags |= V4L2_CTRL_FLAG_WRITE_ONLY | + V4L2_CTRL_FLAG_EXECUTE_ON_WRITE; + else if (type == V4L2_CTRL_TYPE_CTRL_CLASS) + flags |= V4L2_CTRL_FLAG_READ_ONLY; + else if (!is_array && + (type == V4L2_CTRL_TYPE_INTEGER64 || + type == V4L2_CTRL_TYPE_STRING || + type >= V4L2_CTRL_COMPOUND_TYPES)) + sz_extra += 2 * tot_ctrl_size; + + if (type >= V4L2_CTRL_COMPOUND_TYPES && p_def.p_const) + sz_extra += elem_size; + if (type >= V4L2_CTRL_COMPOUND_TYPES && p_min.p_const) + sz_extra += elem_size; + if (type >= V4L2_CTRL_COMPOUND_TYPES && p_max.p_const) + sz_extra += elem_size; + + ctrl = kvzalloc(sizeof(*ctrl) + sz_extra, GFP_KERNEL); + if (ctrl == NULL) { + handler_set_err(hdl, -ENOMEM); + return NULL; + } + + INIT_LIST_HEAD(&ctrl->node); + INIT_LIST_HEAD(&ctrl->ev_subs); + ctrl->handler = hdl; + ctrl->ops = ops; + ctrl->type_ops = type_ops ? type_ops : &std_type_ops; + ctrl->id = id; + ctrl->name = name; + ctrl->type = type; + ctrl->flags = flags; + ctrl->minimum = min; + ctrl->maximum = max; + ctrl->step = step; + ctrl->default_value = def; + ctrl->is_string = !is_array && type == V4L2_CTRL_TYPE_STRING; + ctrl->is_ptr = is_array || type >= V4L2_CTRL_COMPOUND_TYPES || ctrl->is_string; + ctrl->is_int = !ctrl->is_ptr && type != V4L2_CTRL_TYPE_INTEGER64; + ctrl->is_array = is_array; + ctrl->is_dyn_array = !!(flags & V4L2_CTRL_FLAG_DYNAMIC_ARRAY); + ctrl->elems = elems; + ctrl->new_elems = elems; + ctrl->nr_of_dims = nr_of_dims; + if (nr_of_dims) + memcpy(ctrl->dims, dims, nr_of_dims * sizeof(dims[0])); + ctrl->elem_size = elem_size; + if (type == V4L2_CTRL_TYPE_MENU) + ctrl->qmenu = qmenu; + else if (type == V4L2_CTRL_TYPE_INTEGER_MENU) + ctrl->qmenu_int = qmenu_int; + ctrl->priv = priv; + ctrl->cur.val = ctrl->val = def; + data = &ctrl[1]; + + if (ctrl->is_array) { + ctrl->p_array_alloc_elems = elems; + ctrl->p_array = kvzalloc(2 * elems * elem_size, GFP_KERNEL); + if (!ctrl->p_array) { + kvfree(ctrl); + return NULL; + } + data = ctrl->p_array; + } + + if (!ctrl->is_int) { + ctrl->p_new.p = data; + ctrl->p_cur.p = data + tot_ctrl_size; + } else { + ctrl->p_new.p = &ctrl->val; + ctrl->p_cur.p = &ctrl->cur.val; + } + + if (type >= V4L2_CTRL_COMPOUND_TYPES && p_def.p_const) { + if (ctrl->is_array) + ctrl->p_def.p = &ctrl[1]; + else + ctrl->p_def.p = ctrl->p_cur.p + tot_ctrl_size; + memcpy(ctrl->p_def.p, p_def.p_const, elem_size); + } + + if (flags & V4L2_CTRL_FLAG_HAS_WHICH_MIN_MAX) { + void *ptr = ctrl->p_def.p; + + if (p_min.p_const) { + ptr += elem_size; + ctrl->p_min.p = ptr; + memcpy(ctrl->p_min.p, p_min.p_const, elem_size); + } + + if (p_max.p_const) { + ptr += elem_size; + ctrl->p_max.p = ptr; + memcpy(ctrl->p_max.p, p_max.p_const, elem_size); + } + } + + ctrl->type_ops->init(ctrl, 0, ctrl->p_cur); + cur_to_new(ctrl); + + if (handler_new_ref(hdl, ctrl, NULL, false, false)) { + kvfree(ctrl->p_array); + kvfree(ctrl); + return NULL; + } + mutex_lock(hdl->lock); + list_add_tail(&ctrl->node, &hdl->ctrls); + mutex_unlock(hdl->lock); + return ctrl; +} + +struct v4l2_ctrl *v4l2_ctrl_new_custom(struct v4l2_ctrl_handler *hdl, + const struct v4l2_ctrl_config *cfg, void *priv) +{ + bool is_menu; + struct v4l2_ctrl *ctrl; + const char *name = cfg->name; + const char * const *qmenu = cfg->qmenu; + const s64 *qmenu_int = cfg->qmenu_int; + enum v4l2_ctrl_type type = cfg->type; + u32 flags = cfg->flags; + s64 min = cfg->min; + s64 max = cfg->max; + u64 step = cfg->step; + s64 def = cfg->def; + + if (name == NULL) + v4l2_ctrl_fill(cfg->id, &name, &type, &min, &max, &step, + &def, &flags); + + is_menu = (type == V4L2_CTRL_TYPE_MENU || + type == V4L2_CTRL_TYPE_INTEGER_MENU); + if (is_menu) + WARN_ON(step); + else + WARN_ON(cfg->menu_skip_mask); + if (type == V4L2_CTRL_TYPE_MENU && !qmenu) { + qmenu = v4l2_ctrl_get_menu(cfg->id); + } else if (type == V4L2_CTRL_TYPE_INTEGER_MENU && !qmenu_int) { + handler_set_err(hdl, -EINVAL); + return NULL; + } + + ctrl = v4l2_ctrl_new(hdl, cfg->ops, cfg->type_ops, cfg->id, name, + type, min, max, + is_menu ? cfg->menu_skip_mask : step, def, + cfg->dims, cfg->elem_size, + flags, qmenu, qmenu_int, cfg->p_def, cfg->p_min, + cfg->p_max, priv); + if (ctrl) + ctrl->is_private = cfg->is_private; + return ctrl; +} +EXPORT_SYMBOL(v4l2_ctrl_new_custom); + +/* Helper function for standard non-menu controls */ +struct v4l2_ctrl *v4l2_ctrl_new_std(struct v4l2_ctrl_handler *hdl, + const struct v4l2_ctrl_ops *ops, + u32 id, s64 min, s64 max, u64 step, s64 def) +{ + const char *name; + enum v4l2_ctrl_type type; + u32 flags; + + v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags); + if (type == V4L2_CTRL_TYPE_MENU || + type == V4L2_CTRL_TYPE_INTEGER_MENU || + type >= V4L2_CTRL_COMPOUND_TYPES) { + handler_set_err(hdl, -EINVAL); + return NULL; + } + return v4l2_ctrl_new(hdl, ops, NULL, id, name, type, + min, max, step, def, NULL, 0, + flags, NULL, NULL, ptr_null, ptr_null, + ptr_null, NULL); +} +EXPORT_SYMBOL(v4l2_ctrl_new_std); + +/* Helper function for standard menu controls */ +struct v4l2_ctrl *v4l2_ctrl_new_std_menu(struct v4l2_ctrl_handler *hdl, + const struct v4l2_ctrl_ops *ops, + u32 id, u8 _max, u64 mask, u8 _def) +{ + const char * const *qmenu = NULL; + const s64 *qmenu_int = NULL; + unsigned int qmenu_int_len = 0; + const char *name; + enum v4l2_ctrl_type type; + s64 min; + s64 max = _max; + s64 def = _def; + u64 step; + u32 flags; + + v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags); + + if (type == V4L2_CTRL_TYPE_MENU) + qmenu = v4l2_ctrl_get_menu(id); + else if (type == V4L2_CTRL_TYPE_INTEGER_MENU) + qmenu_int = v4l2_ctrl_get_int_menu(id, &qmenu_int_len); + + if ((!qmenu && !qmenu_int) || (qmenu_int && max >= qmenu_int_len)) { + handler_set_err(hdl, -EINVAL); + return NULL; + } + return v4l2_ctrl_new(hdl, ops, NULL, id, name, type, + 0, max, mask, def, NULL, 0, + flags, qmenu, qmenu_int, ptr_null, ptr_null, + ptr_null, NULL); +} +EXPORT_SYMBOL(v4l2_ctrl_new_std_menu); + +/* Helper function for standard menu controls with driver defined menu */ +struct v4l2_ctrl *v4l2_ctrl_new_std_menu_items(struct v4l2_ctrl_handler *hdl, + const struct v4l2_ctrl_ops *ops, u32 id, u8 _max, + u64 mask, u8 _def, const char * const *qmenu) +{ + enum v4l2_ctrl_type type; + const char *name; + u32 flags; + u64 step; + s64 min; + s64 max = _max; + s64 def = _def; + + /* v4l2_ctrl_new_std_menu_items() should only be called for + * standard controls without a standard menu. + */ + if (v4l2_ctrl_get_menu(id)) { + handler_set_err(hdl, -EINVAL); + return NULL; + } + + v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags); + if (type != V4L2_CTRL_TYPE_MENU || qmenu == NULL) { + handler_set_err(hdl, -EINVAL); + return NULL; + } + return v4l2_ctrl_new(hdl, ops, NULL, id, name, type, + 0, max, mask, def, NULL, 0, + flags, qmenu, NULL, ptr_null, ptr_null, + ptr_null, NULL); + +} +EXPORT_SYMBOL(v4l2_ctrl_new_std_menu_items); + +/* Helper function for standard compound controls */ +struct v4l2_ctrl *v4l2_ctrl_new_std_compound(struct v4l2_ctrl_handler *hdl, + const struct v4l2_ctrl_ops *ops, u32 id, + const union v4l2_ctrl_ptr p_def, + const union v4l2_ctrl_ptr p_min, + const union v4l2_ctrl_ptr p_max) +{ + const char *name; + enum v4l2_ctrl_type type; + u32 flags; + s64 min, max, step, def; + + v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags); + if (type < V4L2_CTRL_COMPOUND_TYPES) { + handler_set_err(hdl, -EINVAL); + return NULL; + } + return v4l2_ctrl_new(hdl, ops, NULL, id, name, type, + min, max, step, def, NULL, 0, + flags, NULL, NULL, p_def, p_min, p_max, NULL); +} +EXPORT_SYMBOL(v4l2_ctrl_new_std_compound); + +/* Helper function for standard integer menu controls */ +struct v4l2_ctrl *v4l2_ctrl_new_int_menu(struct v4l2_ctrl_handler *hdl, + const struct v4l2_ctrl_ops *ops, + u32 id, u8 _max, u8 _def, const s64 *qmenu_int) +{ + const char *name; + enum v4l2_ctrl_type type; + s64 min; + u64 step; + s64 max = _max; + s64 def = _def; + u32 flags; + + v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags); + if (type != V4L2_CTRL_TYPE_INTEGER_MENU) { + handler_set_err(hdl, -EINVAL); + return NULL; + } + return v4l2_ctrl_new(hdl, ops, NULL, id, name, type, + 0, max, 0, def, NULL, 0, + flags, NULL, qmenu_int, ptr_null, ptr_null, + ptr_null, NULL); +} +EXPORT_SYMBOL(v4l2_ctrl_new_int_menu); + +/* Add the controls from another handler to our own. */ +int v4l2_ctrl_add_handler(struct v4l2_ctrl_handler *hdl, + struct v4l2_ctrl_handler *add, + bool (*filter)(const struct v4l2_ctrl *ctrl), + bool from_other_dev) +{ + struct v4l2_ctrl_ref *ref; + int ret = 0; + + /* Do nothing if either handler is NULL or if they are the same */ + if (!hdl || !add || hdl == add) + return 0; + if (hdl->error) + return hdl->error; + mutex_lock(add->lock); + list_for_each_entry(ref, &add->ctrl_refs, node) { + struct v4l2_ctrl *ctrl = ref->ctrl; + + /* Skip handler-private controls. */ + if (ctrl->is_private) + continue; + /* And control classes */ + if (ctrl->type == V4L2_CTRL_TYPE_CTRL_CLASS) + continue; + /* Filter any unwanted controls */ + if (filter && !filter(ctrl)) + continue; + ret = handler_new_ref(hdl, ctrl, NULL, from_other_dev, false); + if (ret) + break; + } + mutex_unlock(add->lock); + return ret; +} +EXPORT_SYMBOL(v4l2_ctrl_add_handler); + +bool v4l2_ctrl_radio_filter(const struct v4l2_ctrl *ctrl) +{ + if (V4L2_CTRL_ID2WHICH(ctrl->id) == V4L2_CTRL_CLASS_FM_TX) + return true; + if (V4L2_CTRL_ID2WHICH(ctrl->id) == V4L2_CTRL_CLASS_FM_RX) + return true; + switch (ctrl->id) { + case V4L2_CID_AUDIO_MUTE: + case V4L2_CID_AUDIO_VOLUME: + case V4L2_CID_AUDIO_BALANCE: + case V4L2_CID_AUDIO_BASS: + case V4L2_CID_AUDIO_TREBLE: + case V4L2_CID_AUDIO_LOUDNESS: + return true; + default: + break; + } + return false; +} +EXPORT_SYMBOL(v4l2_ctrl_radio_filter); + +/* Cluster controls */ +void v4l2_ctrl_cluster(unsigned ncontrols, struct v4l2_ctrl **controls) +{ + bool has_volatiles = false; + int i; + + /* The first control is the master control and it must not be NULL */ + if (WARN_ON(ncontrols == 0 || controls[0] == NULL)) + return; + + for (i = 0; i < ncontrols; i++) { + if (controls[i]) { + controls[i]->cluster = controls; + controls[i]->ncontrols = ncontrols; + if (controls[i]->flags & V4L2_CTRL_FLAG_VOLATILE) + has_volatiles = true; + } + } + controls[0]->has_volatiles = has_volatiles; +} +EXPORT_SYMBOL(v4l2_ctrl_cluster); + +void v4l2_ctrl_auto_cluster(unsigned ncontrols, struct v4l2_ctrl **controls, + u8 manual_val, bool set_volatile) +{ + struct v4l2_ctrl *master = controls[0]; + u32 flag = 0; + int i; + + v4l2_ctrl_cluster(ncontrols, controls); + WARN_ON(ncontrols <= 1); + WARN_ON(manual_val < master->minimum || manual_val > master->maximum); + WARN_ON(set_volatile && !has_op(master, g_volatile_ctrl)); + master->is_auto = true; + master->has_volatiles = set_volatile; + master->manual_mode_value = manual_val; + master->flags |= V4L2_CTRL_FLAG_UPDATE; + + if (!is_cur_manual(master)) + flag = V4L2_CTRL_FLAG_INACTIVE | + (set_volatile ? V4L2_CTRL_FLAG_VOLATILE : 0); + + for (i = 1; i < ncontrols; i++) + if (controls[i]) + controls[i]->flags |= flag; +} +EXPORT_SYMBOL(v4l2_ctrl_auto_cluster); + +/* + * Obtain the current volatile values of an autocluster and mark them + * as new. + */ +void update_from_auto_cluster(struct v4l2_ctrl *master) +{ + int i; + + for (i = 1; i < master->ncontrols; i++) + cur_to_new(master->cluster[i]); + if (!call_op(master, g_volatile_ctrl)) + for (i = 1; i < master->ncontrols; i++) + if (master->cluster[i]) + master->cluster[i]->is_new = 1; +} + +/* + * Return non-zero if one or more of the controls in the cluster has a new + * value that differs from the current value. + */ +static int cluster_changed(struct v4l2_ctrl *master) +{ + bool changed = false; + int i; + + for (i = 0; i < master->ncontrols; i++) { + struct v4l2_ctrl *ctrl = master->cluster[i]; + bool ctrl_changed = false; + + if (!ctrl) + continue; + + if (ctrl->flags & V4L2_CTRL_FLAG_EXECUTE_ON_WRITE) { + changed = true; + ctrl_changed = true; + } + + /* + * Set has_changed to false to avoid generating + * the event V4L2_EVENT_CTRL_CH_VALUE + */ + if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) { + ctrl->has_changed = false; + continue; + } + + if (ctrl->elems != ctrl->new_elems) + ctrl_changed = true; + if (!ctrl_changed) + ctrl_changed = !ctrl->type_ops->equal(ctrl, + ctrl->p_cur, ctrl->p_new); + ctrl->has_changed = ctrl_changed; + changed |= ctrl->has_changed; + } + return changed; +} + +/* + * Core function that calls try/s_ctrl and ensures that the new value is + * copied to the current value on a set. + * Must be called with ctrl->handler->lock held. + */ +int try_or_set_cluster(struct v4l2_fh *fh, struct v4l2_ctrl *master, + bool set, u32 ch_flags) +{ + bool update_flag; + int ret; + int i; + + /* + * Go through the cluster and either validate the new value or + * (if no new value was set), copy the current value to the new + * value, ensuring a consistent view for the control ops when + * called. + */ + for (i = 0; i < master->ncontrols; i++) { + struct v4l2_ctrl *ctrl = master->cluster[i]; + + if (!ctrl) + continue; + + if (!ctrl->is_new) { + cur_to_new(ctrl); + continue; + } + /* + * Check again: it may have changed since the + * previous check in try_or_set_ext_ctrls(). + */ + if (set && (ctrl->flags & V4L2_CTRL_FLAG_GRABBED)) + return -EBUSY; + } + + ret = call_op(master, try_ctrl); + + /* Don't set if there is no change */ + if (ret || !set || !cluster_changed(master)) + return ret; + ret = call_op(master, s_ctrl); + if (ret) + return ret; + + /* If OK, then make the new values permanent. */ + update_flag = is_cur_manual(master) != is_new_manual(master); + + for (i = 0; i < master->ncontrols; i++) { + /* + * If we switch from auto to manual mode, and this cluster + * contains volatile controls, then all non-master controls + * have to be marked as changed. The 'new' value contains + * the volatile value (obtained by update_from_auto_cluster), + * which now has to become the current value. + */ + if (i && update_flag && is_new_manual(master) && + master->has_volatiles && master->cluster[i]) + master->cluster[i]->has_changed = true; + + new_to_cur(fh, master->cluster[i], ch_flags | + ((update_flag && i > 0) ? V4L2_EVENT_CTRL_CH_FLAGS : 0)); + } + return 0; +} + +/* Activate/deactivate a control. */ +void v4l2_ctrl_activate(struct v4l2_ctrl *ctrl, bool active) +{ + /* invert since the actual flag is called 'inactive' */ + bool inactive = !active; + bool old; + + if (ctrl == NULL) + return; + + if (inactive) + /* set V4L2_CTRL_FLAG_INACTIVE */ + old = test_and_set_bit(4, &ctrl->flags); + else + /* clear V4L2_CTRL_FLAG_INACTIVE */ + old = test_and_clear_bit(4, &ctrl->flags); + if (old != inactive) + send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_FLAGS); +} +EXPORT_SYMBOL(v4l2_ctrl_activate); + +void __v4l2_ctrl_grab(struct v4l2_ctrl *ctrl, bool grabbed) +{ + bool old; + + if (ctrl == NULL) + return; + + lockdep_assert_held(ctrl->handler->lock); + + if (grabbed) + /* set V4L2_CTRL_FLAG_GRABBED */ + old = test_and_set_bit(1, &ctrl->flags); + else + /* clear V4L2_CTRL_FLAG_GRABBED */ + old = test_and_clear_bit(1, &ctrl->flags); + if (old != grabbed) + send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_FLAGS); +} +EXPORT_SYMBOL(__v4l2_ctrl_grab); + +/* Call s_ctrl for all controls owned by the handler */ +int __v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl) +{ + struct v4l2_ctrl *ctrl; + int ret = 0; + + if (hdl == NULL) + return 0; + + lockdep_assert_held(hdl->lock); + + list_for_each_entry(ctrl, &hdl->ctrls, node) + ctrl->done = false; + + list_for_each_entry(ctrl, &hdl->ctrls, node) { + struct v4l2_ctrl *master = ctrl->cluster[0]; + int i; + + /* Skip if this control was already handled by a cluster. */ + /* Skip button controls and read-only controls. */ + if (ctrl->done || ctrl->type == V4L2_CTRL_TYPE_BUTTON || + (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY)) + continue; + + for (i = 0; i < master->ncontrols; i++) { + if (master->cluster[i]) { + cur_to_new(master->cluster[i]); + master->cluster[i]->is_new = 1; + master->cluster[i]->done = true; + } + } + ret = call_op(master, s_ctrl); + if (ret) + break; + } + + return ret; +} +EXPORT_SYMBOL_GPL(__v4l2_ctrl_handler_setup); + +int v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl) +{ + int ret; + + if (hdl == NULL) + return 0; + + mutex_lock(hdl->lock); + ret = __v4l2_ctrl_handler_setup(hdl); + mutex_unlock(hdl->lock); + + return ret; +} +EXPORT_SYMBOL(v4l2_ctrl_handler_setup); + +/* Log the control name and value */ +static void log_ctrl(const struct v4l2_ctrl *ctrl, + const char *prefix, const char *colon) +{ + if (ctrl->flags & (V4L2_CTRL_FLAG_DISABLED | V4L2_CTRL_FLAG_WRITE_ONLY)) + return; + if (ctrl->type == V4L2_CTRL_TYPE_CTRL_CLASS) + return; + + pr_info("%s%s%s: ", prefix, colon, ctrl->name); + + ctrl->type_ops->log(ctrl); + + if (ctrl->flags & (V4L2_CTRL_FLAG_INACTIVE | + V4L2_CTRL_FLAG_GRABBED | + V4L2_CTRL_FLAG_VOLATILE)) { + if (ctrl->flags & V4L2_CTRL_FLAG_INACTIVE) + pr_cont(" inactive"); + if (ctrl->flags & V4L2_CTRL_FLAG_GRABBED) + pr_cont(" grabbed"); + if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) + pr_cont(" volatile"); + } + pr_cont("\n"); +} + +/* Log all controls owned by the handler */ +void v4l2_ctrl_handler_log_status(struct v4l2_ctrl_handler *hdl, + const char *prefix) +{ + struct v4l2_ctrl *ctrl; + const char *colon = ""; + int len; + + if (!hdl) + return; + if (!prefix) + prefix = ""; + len = strlen(prefix); + if (len && prefix[len - 1] != ' ') + colon = ": "; + mutex_lock(hdl->lock); + list_for_each_entry(ctrl, &hdl->ctrls, node) + if (!(ctrl->flags & V4L2_CTRL_FLAG_DISABLED)) + log_ctrl(ctrl, prefix, colon); + mutex_unlock(hdl->lock); +} +EXPORT_SYMBOL(v4l2_ctrl_handler_log_status); + +int v4l2_ctrl_new_fwnode_properties(struct v4l2_ctrl_handler *hdl, + const struct v4l2_ctrl_ops *ctrl_ops, + const struct v4l2_fwnode_device_properties *p) +{ + if (hdl->error) + return hdl->error; + + if (p->orientation != V4L2_FWNODE_PROPERTY_UNSET) { + u32 orientation_ctrl; + + switch (p->orientation) { + case V4L2_FWNODE_ORIENTATION_FRONT: + orientation_ctrl = V4L2_CAMERA_ORIENTATION_FRONT; + break; + case V4L2_FWNODE_ORIENTATION_BACK: + orientation_ctrl = V4L2_CAMERA_ORIENTATION_BACK; + break; + case V4L2_FWNODE_ORIENTATION_EXTERNAL: + orientation_ctrl = V4L2_CAMERA_ORIENTATION_EXTERNAL; + break; + default: + return -EINVAL; + } + if (!v4l2_ctrl_new_std_menu(hdl, ctrl_ops, + V4L2_CID_CAMERA_ORIENTATION, + V4L2_CAMERA_ORIENTATION_EXTERNAL, 0, + orientation_ctrl)) + return hdl->error; + } + + if (p->rotation != V4L2_FWNODE_PROPERTY_UNSET) { + if (!v4l2_ctrl_new_std(hdl, ctrl_ops, + V4L2_CID_CAMERA_SENSOR_ROTATION, + p->rotation, p->rotation, 1, + p->rotation)) + return hdl->error; + } + + return hdl->error; +} +EXPORT_SYMBOL(v4l2_ctrl_new_fwnode_properties); diff --git a/6.17.0/drivers/media/v4l2-core/v4l2-ctrls-defs.c b/6.17.0/drivers/media/v4l2-core/v4l2-ctrls-defs.c new file mode 100644 index 0000000..1ea5201 --- /dev/null +++ b/6.17.0/drivers/media/v4l2-core/v4l2-ctrls-defs.c @@ -0,0 +1,1685 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * V4L2 controls framework control definitions. + * + * Copyright (C) 2010-2021 Hans Verkuil + */ + +#include +#include + +/* + * Returns NULL or a character pointer array containing the menu for + * the given control ID. The pointer array ends with a NULL pointer. + * An empty string signifies a menu entry that is invalid. This allows + * drivers to disable certain options if it is not supported. + */ +const char * const *v4l2_ctrl_get_menu(u32 id) +{ + static const char * const mpeg_audio_sampling_freq[] = { + "44.1 kHz", + "48 kHz", + "32 kHz", + NULL + }; + static const char * const mpeg_audio_encoding[] = { + "MPEG-1/2 Layer I", + "MPEG-1/2 Layer II", + "MPEG-1/2 Layer III", + "MPEG-2/4 AAC", + "AC-3", + NULL + }; + static const char * const mpeg_audio_l1_bitrate[] = { + "32 kbps", + "64 kbps", + "96 kbps", + "128 kbps", + "160 kbps", + "192 kbps", + "224 kbps", + "256 kbps", + "288 kbps", + "320 kbps", + "352 kbps", + "384 kbps", + "416 kbps", + "448 kbps", + NULL + }; + static const char * const mpeg_audio_l2_bitrate[] = { + "32 kbps", + "48 kbps", + "56 kbps", + "64 kbps", + "80 kbps", + "96 kbps", + "112 kbps", + "128 kbps", + "160 kbps", + "192 kbps", + "224 kbps", + "256 kbps", + "320 kbps", + "384 kbps", + NULL + }; + static const char * const mpeg_audio_l3_bitrate[] = { + "32 kbps", + "40 kbps", + "48 kbps", + "56 kbps", + "64 kbps", + "80 kbps", + "96 kbps", + "112 kbps", + "128 kbps", + "160 kbps", + "192 kbps", + "224 kbps", + "256 kbps", + "320 kbps", + NULL + }; + static const char * const mpeg_audio_ac3_bitrate[] = { + "32 kbps", + "40 kbps", + "48 kbps", + "56 kbps", + "64 kbps", + "80 kbps", + "96 kbps", + "112 kbps", + "128 kbps", + "160 kbps", + "192 kbps", + "224 kbps", + "256 kbps", + "320 kbps", + "384 kbps", + "448 kbps", + "512 kbps", + "576 kbps", + "640 kbps", + NULL + }; + static const char * const mpeg_audio_mode[] = { + "Stereo", + "Joint Stereo", + "Dual", + "Mono", + NULL + }; + static const char * const mpeg_audio_mode_extension[] = { + "Bound 4", + "Bound 8", + "Bound 12", + "Bound 16", + NULL + }; + static const char * const mpeg_audio_emphasis[] = { + "No Emphasis", + "50/15 us", + "CCITT J17", + NULL + }; + static const char * const mpeg_audio_crc[] = { + "No CRC", + "16-bit CRC", + NULL + }; + static const char * const mpeg_audio_dec_playback[] = { + "Auto", + "Stereo", + "Left", + "Right", + "Mono", + "Swapped Stereo", + NULL + }; + static const char * const mpeg_video_encoding[] = { + "MPEG-1", + "MPEG-2", + "MPEG-4 AVC", + NULL + }; + static const char * const mpeg_video_aspect[] = { + "1x1", + "4x3", + "16x9", + "2.21x1", + NULL + }; + static const char * const mpeg_video_bitrate_mode[] = { + "Variable Bitrate", + "Constant Bitrate", + "Constant Quality", + NULL + }; + static const char * const mpeg_stream_type[] = { + "MPEG-2 Program Stream", + "MPEG-2 Transport Stream", + "MPEG-1 System Stream", + "MPEG-2 DVD-compatible Stream", + "MPEG-1 VCD-compatible Stream", + "MPEG-2 SVCD-compatible Stream", + NULL + }; + static const char * const mpeg_stream_vbi_fmt[] = { + "No VBI", + "Private Packet, IVTV Format", + NULL + }; + static const char * const camera_power_line_frequency[] = { + "Disabled", + "50 Hz", + "60 Hz", + "Auto", + NULL + }; + static const char * const camera_exposure_auto[] = { + "Auto Mode", + "Manual Mode", + "Shutter Priority Mode", + "Aperture Priority Mode", + NULL + }; + static const char * const camera_exposure_metering[] = { + "Average", + "Center Weighted", + "Spot", + "Matrix", + NULL + }; + static const char * const camera_auto_focus_range[] = { + "Auto", + "Normal", + "Macro", + "Infinity", + NULL + }; + static const char * const colorfx[] = { + "None", + "Black & White", + "Sepia", + "Negative", + "Emboss", + "Sketch", + "Sky Blue", + "Grass Green", + "Skin Whiten", + "Vivid", + "Aqua", + "Art Freeze", + "Silhouette", + "Solarization", + "Antique", + "Set Cb/Cr", + NULL + }; + static const char * const auto_n_preset_white_balance[] = { + "Manual", + "Auto", + "Incandescent", + "Fluorescent", + "Fluorescent H", + "Horizon", + "Daylight", + "Flash", + "Cloudy", + "Shade", + NULL, + }; + static const char * const camera_iso_sensitivity_auto[] = { + "Manual", + "Auto", + NULL + }; + static const char * const scene_mode[] = { + "None", + "Backlight", + "Beach/Snow", + "Candle Light", + "Dusk/Dawn", + "Fall Colors", + "Fireworks", + "Landscape", + "Night", + "Party/Indoor", + "Portrait", + "Sports", + "Sunset", + "Text", + NULL + }; + static const char * const tune_emphasis[] = { + "None", + "50 Microseconds", + "75 Microseconds", + NULL, + }; + static const char * const header_mode[] = { + "Separate Buffer", + "Joined With 1st Frame", + NULL, + }; + static const char * const multi_slice[] = { + "Single", + "Max Macroblocks", + "Max Bytes", + NULL, + }; + static const char * const entropy_mode[] = { + "CAVLC", + "CABAC", + NULL, + }; + static const char * const mpeg_h264_level[] = { + "1", + "1b", + "1.1", + "1.2", + "1.3", + "2", + "2.1", + "2.2", + "3", + "3.1", + "3.2", + "4", + "4.1", + "4.2", + "5", + "5.1", + "5.2", + "6.0", + "6.1", + "6.2", + NULL, + }; + static const char * const h264_loop_filter[] = { + "Enabled", + "Disabled", + "Disabled at Slice Boundary", + NULL, + }; + static const char * const h264_profile[] = { + "Baseline", + "Constrained Baseline", + "Main", + "Extended", + "High", + "High 10", + "High 422", + "High 444 Predictive", + "High 10 Intra", + "High 422 Intra", + "High 444 Intra", + "CAVLC 444 Intra", + "Scalable Baseline", + "Scalable High", + "Scalable High Intra", + "Stereo High", + "Multiview High", + "Constrained High", + NULL, + }; + static const char * const vui_sar_idc[] = { + "Unspecified", + "1:1", + "12:11", + "10:11", + "16:11", + "40:33", + "24:11", + "20:11", + "32:11", + "80:33", + "18:11", + "15:11", + "64:33", + "160:99", + "4:3", + "3:2", + "2:1", + "Extended SAR", + NULL, + }; + static const char * const h264_fp_arrangement_type[] = { + "Checkerboard", + "Column", + "Row", + "Side by Side", + "Top Bottom", + "Temporal", + NULL, + }; + static const char * const h264_fmo_map_type[] = { + "Interleaved Slices", + "Scattered Slices", + "Foreground with Leftover", + "Box Out", + "Raster Scan", + "Wipe Scan", + "Explicit", + NULL, + }; + static const char * const h264_decode_mode[] = { + "Slice-Based", + "Frame-Based", + NULL, + }; + static const char * const h264_start_code[] = { + "No Start Code", + "Annex B Start Code", + NULL, + }; + static const char * const h264_hierarchical_coding_type[] = { + "Hier Coding B", + "Hier Coding P", + NULL, + }; + static const char * const mpeg_mpeg2_level[] = { + "Low", + "Main", + "High 1440", + "High", + NULL, + }; + static const char * const mpeg2_profile[] = { + "Simple", + "Main", + "SNR Scalable", + "Spatially Scalable", + "High", + NULL, + }; + static const char * const mpeg_mpeg4_level[] = { + "0", + "0b", + "1", + "2", + "3", + "3b", + "4", + "5", + NULL, + }; + static const char * const mpeg4_profile[] = { + "Simple", + "Advanced Simple", + "Core", + "Simple Scalable", + "Advanced Coding Efficiency", + NULL, + }; + + static const char * const vpx_golden_frame_sel[] = { + "Use Previous Frame", + "Use Previous Specific Frame", + NULL, + }; + static const char * const vp8_profile[] = { + "0", + "1", + "2", + "3", + NULL, + }; + static const char * const vp9_profile[] = { + "0", + "1", + "2", + "3", + NULL, + }; + static const char * const vp9_level[] = { + "1", + "1.1", + "2", + "2.1", + "3", + "3.1", + "4", + "4.1", + "5", + "5.1", + "5.2", + "6", + "6.1", + "6.2", + NULL, + }; + + static const char * const flash_led_mode[] = { + "Off", + "Flash", + "Torch", + NULL, + }; + static const char * const flash_strobe_source[] = { + "Software", + "External", + NULL, + }; + + static const char * const jpeg_chroma_subsampling[] = { + "4:4:4", + "4:2:2", + "4:2:0", + "4:1:1", + "4:1:0", + "Gray", + NULL, + }; + static const char * const dv_tx_mode[] = { + "DVI-D", + "HDMI", + NULL, + }; + static const char * const dv_rgb_range[] = { + "Automatic", + "RGB Limited Range (16-235)", + "RGB Full Range (0-255)", + NULL, + }; + static const char * const dv_it_content_type[] = { + "Graphics", + "Photo", + "Cinema", + "Game", + "No IT Content", + NULL, + }; + static const char * const detect_md_mode[] = { + "Disabled", + "Global", + "Threshold Grid", + "Region Grid", + NULL, + }; + + static const char * const av1_profile[] = { + "Main", + "High", + "Professional", + NULL, + }; + static const char * const av1_level[] = { + "2.0", + "2.1", + "2.2", + "2.3", + "3.0", + "3.1", + "3.2", + "3.3", + "4.0", + "4.1", + "4.2", + "4.3", + "5.0", + "5.1", + "5.2", + "5.3", + "6.0", + "6.1", + "6.2", + "6.3", + "7.0", + "7.1", + "7.2", + "7.3", + NULL, + }; + + static const char * const hevc_profile[] = { + "Main", + "Main Still Picture", + "Main 10", + NULL, + }; + static const char * const hevc_level[] = { + "1", + "2", + "2.1", + "3", + "3.1", + "4", + "4.1", + "5", + "5.1", + "5.2", + "6", + "6.1", + "6.2", + NULL, + }; + static const char * const hevc_hierarchial_coding_type[] = { + "B", + "P", + NULL, + }; + static const char * const hevc_refresh_type[] = { + "None", + "CRA", + "IDR", + NULL, + }; + static const char * const hevc_size_of_length_field[] = { + "0", + "1", + "2", + "4", + NULL, + }; + static const char * const hevc_tier[] = { + "Main", + "High", + NULL, + }; + static const char * const hevc_loop_filter_mode[] = { + "Disabled", + "Enabled", + "Disabled at slice boundary", + "NULL", + }; + static const char * const hevc_decode_mode[] = { + "Slice-Based", + "Frame-Based", + NULL, + }; + static const char * const hevc_start_code[] = { + "No Start Code", + "Annex B Start Code", + NULL, + }; + static const char * const camera_orientation[] = { + "Front", + "Back", + "External", + NULL, + }; + static const char * const mpeg_video_frame_skip[] = { + "Disabled", + "Level Limit", + "VBV/CPB Limit", + NULL, + }; + static const char * const intra_refresh_period_type[] = { + "Random", + "Cyclic", + NULL, + }; + + switch (id) { + case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ: + return mpeg_audio_sampling_freq; + case V4L2_CID_MPEG_AUDIO_ENCODING: + return mpeg_audio_encoding; + case V4L2_CID_MPEG_AUDIO_L1_BITRATE: + return mpeg_audio_l1_bitrate; + case V4L2_CID_MPEG_AUDIO_L2_BITRATE: + return mpeg_audio_l2_bitrate; + case V4L2_CID_MPEG_AUDIO_L3_BITRATE: + return mpeg_audio_l3_bitrate; + case V4L2_CID_MPEG_AUDIO_AC3_BITRATE: + return mpeg_audio_ac3_bitrate; + case V4L2_CID_MPEG_AUDIO_MODE: + return mpeg_audio_mode; + case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION: + return mpeg_audio_mode_extension; + case V4L2_CID_MPEG_AUDIO_EMPHASIS: + return mpeg_audio_emphasis; + case V4L2_CID_MPEG_AUDIO_CRC: + return mpeg_audio_crc; + case V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK: + case V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK: + return mpeg_audio_dec_playback; + case V4L2_CID_MPEG_VIDEO_ENCODING: + return mpeg_video_encoding; + case V4L2_CID_MPEG_VIDEO_ASPECT: + return mpeg_video_aspect; + case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: + return mpeg_video_bitrate_mode; + case V4L2_CID_MPEG_STREAM_TYPE: + return mpeg_stream_type; + case V4L2_CID_MPEG_STREAM_VBI_FMT: + return mpeg_stream_vbi_fmt; + case V4L2_CID_POWER_LINE_FREQUENCY: + return camera_power_line_frequency; + case V4L2_CID_EXPOSURE_AUTO: + return camera_exposure_auto; + case V4L2_CID_EXPOSURE_METERING: + return camera_exposure_metering; + case V4L2_CID_AUTO_FOCUS_RANGE: + return camera_auto_focus_range; + case V4L2_CID_COLORFX: + return colorfx; + case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE: + return auto_n_preset_white_balance; + case V4L2_CID_ISO_SENSITIVITY_AUTO: + return camera_iso_sensitivity_auto; + case V4L2_CID_SCENE_MODE: + return scene_mode; + case V4L2_CID_TUNE_PREEMPHASIS: + return tune_emphasis; + case V4L2_CID_TUNE_DEEMPHASIS: + return tune_emphasis; + case V4L2_CID_FLASH_LED_MODE: + return flash_led_mode; + case V4L2_CID_FLASH_STROBE_SOURCE: + return flash_strobe_source; + case V4L2_CID_MPEG_VIDEO_HEADER_MODE: + return header_mode; + case V4L2_CID_MPEG_VIDEO_FRAME_SKIP_MODE: + return mpeg_video_frame_skip; + case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: + return multi_slice; + case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE: + return entropy_mode; + case V4L2_CID_MPEG_VIDEO_H264_LEVEL: + return mpeg_h264_level; + case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE: + return h264_loop_filter; + case V4L2_CID_MPEG_VIDEO_H264_PROFILE: + return h264_profile; + case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC: + return vui_sar_idc; + case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE: + return h264_fp_arrangement_type; + case V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE: + return h264_fmo_map_type; + case V4L2_CID_STATELESS_H264_DECODE_MODE: + return h264_decode_mode; + case V4L2_CID_STATELESS_H264_START_CODE: + return h264_start_code; + case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_TYPE: + return h264_hierarchical_coding_type; + case V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL: + return mpeg_mpeg2_level; + case V4L2_CID_MPEG_VIDEO_MPEG2_PROFILE: + return mpeg2_profile; + case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL: + return mpeg_mpeg4_level; + case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE: + return mpeg4_profile; + case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL: + return vpx_golden_frame_sel; + case V4L2_CID_MPEG_VIDEO_VP8_PROFILE: + return vp8_profile; + case V4L2_CID_MPEG_VIDEO_VP9_PROFILE: + return vp9_profile; + case V4L2_CID_MPEG_VIDEO_VP9_LEVEL: + return vp9_level; + case V4L2_CID_JPEG_CHROMA_SUBSAMPLING: + return jpeg_chroma_subsampling; + case V4L2_CID_DV_TX_MODE: + return dv_tx_mode; + case V4L2_CID_DV_TX_RGB_RANGE: + case V4L2_CID_DV_RX_RGB_RANGE: + return dv_rgb_range; + case V4L2_CID_DV_TX_IT_CONTENT_TYPE: + case V4L2_CID_DV_RX_IT_CONTENT_TYPE: + return dv_it_content_type; + case V4L2_CID_DETECT_MD_MODE: + return detect_md_mode; + case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE: + return hevc_profile; + case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL: + return hevc_level; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE: + return hevc_hierarchial_coding_type; + case V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_TYPE: + return hevc_refresh_type; + case V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD: + return hevc_size_of_length_field; + case V4L2_CID_MPEG_VIDEO_HEVC_TIER: + return hevc_tier; + case V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE: + return hevc_loop_filter_mode; + case V4L2_CID_MPEG_VIDEO_AV1_PROFILE: + return av1_profile; + case V4L2_CID_MPEG_VIDEO_AV1_LEVEL: + return av1_level; + case V4L2_CID_STATELESS_HEVC_DECODE_MODE: + return hevc_decode_mode; + case V4L2_CID_STATELESS_HEVC_START_CODE: + return hevc_start_code; + case V4L2_CID_CAMERA_ORIENTATION: + return camera_orientation; + case V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE: + return intra_refresh_period_type; + default: + return NULL; + } +} +EXPORT_SYMBOL(v4l2_ctrl_get_menu); + +#define __v4l2_qmenu_int_len(arr, len) ({ *(len) = ARRAY_SIZE(arr); (arr); }) +/* + * Returns NULL or an s64 type array containing the menu for given + * control ID. The total number of the menu items is returned in @len. + */ +const s64 *v4l2_ctrl_get_int_menu(u32 id, u32 *len) +{ + static const s64 qmenu_int_vpx_num_partitions[] = { + 1, 2, 4, 8, + }; + + static const s64 qmenu_int_vpx_num_ref_frames[] = { + 1, 2, 3, + }; + + switch (id) { + case V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS: + return __v4l2_qmenu_int_len(qmenu_int_vpx_num_partitions, len); + case V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES: + return __v4l2_qmenu_int_len(qmenu_int_vpx_num_ref_frames, len); + default: + *len = 0; + return NULL; + } +} +EXPORT_SYMBOL(v4l2_ctrl_get_int_menu); + +/* Return the control name. */ +const char *v4l2_ctrl_get_name(u32 id) +{ + switch (id) { + /* USER controls */ + /* Keep the order of the 'case's the same as in v4l2-controls.h! */ + case V4L2_CID_USER_CLASS: return "User Controls"; + case V4L2_CID_BRIGHTNESS: return "Brightness"; + case V4L2_CID_CONTRAST: return "Contrast"; + case V4L2_CID_SATURATION: return "Saturation"; + case V4L2_CID_HUE: return "Hue"; + case V4L2_CID_AUDIO_VOLUME: return "Volume"; + case V4L2_CID_AUDIO_BALANCE: return "Balance"; + case V4L2_CID_AUDIO_BASS: return "Bass"; + case V4L2_CID_AUDIO_TREBLE: return "Treble"; + case V4L2_CID_AUDIO_MUTE: return "Mute"; + case V4L2_CID_AUDIO_LOUDNESS: return "Loudness"; + case V4L2_CID_BLACK_LEVEL: return "Black Level"; + case V4L2_CID_AUTO_WHITE_BALANCE: return "White Balance, Automatic"; + case V4L2_CID_DO_WHITE_BALANCE: return "Do White Balance"; + case V4L2_CID_RED_BALANCE: return "Red Balance"; + case V4L2_CID_BLUE_BALANCE: return "Blue Balance"; + case V4L2_CID_GAMMA: return "Gamma"; + case V4L2_CID_EXPOSURE: return "Exposure"; + case V4L2_CID_AUTOGAIN: return "Gain, Automatic"; + case V4L2_CID_GAIN: return "Gain"; + case V4L2_CID_HFLIP: return "Horizontal Flip"; + case V4L2_CID_VFLIP: return "Vertical Flip"; + case V4L2_CID_POWER_LINE_FREQUENCY: return "Power Line Frequency"; + case V4L2_CID_HUE_AUTO: return "Hue, Automatic"; + case V4L2_CID_WHITE_BALANCE_TEMPERATURE: return "White Balance Temperature"; + case V4L2_CID_SHARPNESS: return "Sharpness"; + case V4L2_CID_BACKLIGHT_COMPENSATION: return "Backlight Compensation"; + case V4L2_CID_CHROMA_AGC: return "Chroma AGC"; + case V4L2_CID_COLOR_KILLER: return "Color Killer"; + case V4L2_CID_COLORFX: return "Color Effects"; + case V4L2_CID_AUTOBRIGHTNESS: return "Brightness, Automatic"; + case V4L2_CID_BAND_STOP_FILTER: return "Band-Stop Filter"; + case V4L2_CID_ROTATE: return "Rotate"; + case V4L2_CID_BG_COLOR: return "Background Color"; + case V4L2_CID_CHROMA_GAIN: return "Chroma Gain"; + case V4L2_CID_ILLUMINATORS_1: return "Illuminator 1"; + case V4L2_CID_ILLUMINATORS_2: return "Illuminator 2"; + case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE: return "Min Number of Capture Buffers"; + case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT: return "Min Number of Output Buffers"; + case V4L2_CID_ALPHA_COMPONENT: return "Alpha Component"; + case V4L2_CID_COLORFX_CBCR: return "Color Effects, CbCr"; + case V4L2_CID_COLORFX_RGB: return "Color Effects, RGB"; + + /* + * Codec controls + * + * The MPEG controls are applicable to all codec controls + * and the 'MPEG' part of the define is historical. + * + * Keep the order of the 'case's the same as in videodev2.h! + */ + case V4L2_CID_CODEC_CLASS: return "Codec Controls"; + case V4L2_CID_MPEG_STREAM_TYPE: return "Stream Type"; + case V4L2_CID_MPEG_STREAM_PID_PMT: return "Stream PMT Program ID"; + case V4L2_CID_MPEG_STREAM_PID_AUDIO: return "Stream Audio Program ID"; + case V4L2_CID_MPEG_STREAM_PID_VIDEO: return "Stream Video Program ID"; + case V4L2_CID_MPEG_STREAM_PID_PCR: return "Stream PCR Program ID"; + case V4L2_CID_MPEG_STREAM_PES_ID_AUDIO: return "Stream PES Audio ID"; + case V4L2_CID_MPEG_STREAM_PES_ID_VIDEO: return "Stream PES Video ID"; + case V4L2_CID_MPEG_STREAM_VBI_FMT: return "Stream VBI Format"; + case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ: return "Audio Sampling Frequency"; + case V4L2_CID_MPEG_AUDIO_ENCODING: return "Audio Encoding"; + case V4L2_CID_MPEG_AUDIO_L1_BITRATE: return "Audio Layer I Bitrate"; + case V4L2_CID_MPEG_AUDIO_L2_BITRATE: return "Audio Layer II Bitrate"; + case V4L2_CID_MPEG_AUDIO_L3_BITRATE: return "Audio Layer III Bitrate"; + case V4L2_CID_MPEG_AUDIO_MODE: return "Audio Stereo Mode"; + case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION: return "Audio Stereo Mode Extension"; + case V4L2_CID_MPEG_AUDIO_EMPHASIS: return "Audio Emphasis"; + case V4L2_CID_MPEG_AUDIO_CRC: return "Audio CRC"; + case V4L2_CID_MPEG_AUDIO_MUTE: return "Audio Mute"; + case V4L2_CID_MPEG_AUDIO_AAC_BITRATE: return "Audio AAC Bitrate"; + case V4L2_CID_MPEG_AUDIO_AC3_BITRATE: return "Audio AC-3 Bitrate"; + case V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK: return "Audio Playback"; + case V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK: return "Audio Multilingual Playback"; + case V4L2_CID_MPEG_VIDEO_ENCODING: return "Video Encoding"; + case V4L2_CID_MPEG_VIDEO_ASPECT: return "Video Aspect"; + case V4L2_CID_MPEG_VIDEO_B_FRAMES: return "Video B Frames"; + case V4L2_CID_MPEG_VIDEO_GOP_SIZE: return "Video GOP Size"; + case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE: return "Video GOP Closure"; + case V4L2_CID_MPEG_VIDEO_PULLDOWN: return "Video Pulldown"; + case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: return "Video Bitrate Mode"; + case V4L2_CID_MPEG_VIDEO_CONSTANT_QUALITY: return "Constant Quality"; + case V4L2_CID_MPEG_VIDEO_BITRATE: return "Video Bitrate"; + case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK: return "Video Peak Bitrate"; + case V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION: return "Video Temporal Decimation"; + case V4L2_CID_MPEG_VIDEO_MUTE: return "Video Mute"; + case V4L2_CID_MPEG_VIDEO_MUTE_YUV: return "Video Mute YUV"; + case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE: return "Decoder Slice Interface"; + case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER: return "MPEG4 Loop Filter Enable"; + case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB: return "Number of Intra Refresh MBs"; + case V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE: return "Intra Refresh Period Type"; + case V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD: return "Intra Refresh Period"; + case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE: return "Frame Level Rate Control Enable"; + case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE: return "H264 MB Level Rate Control"; + case V4L2_CID_MPEG_VIDEO_HEADER_MODE: return "Sequence Header Mode"; + case V4L2_CID_MPEG_VIDEO_MAX_REF_PIC: return "Max Number of Reference Pics"; + case V4L2_CID_MPEG_VIDEO_FRAME_SKIP_MODE: return "Frame Skip Mode"; + case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY: return "Display Delay"; + case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY_ENABLE: return "Display Delay Enable"; + case V4L2_CID_MPEG_VIDEO_AU_DELIMITER: return "Generate Access Unit Delimiters"; + case V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP: return "H263 I-Frame QP Value"; + case V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP: return "H263 P-Frame QP Value"; + case V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP: return "H263 B-Frame QP Value"; + case V4L2_CID_MPEG_VIDEO_H263_MIN_QP: return "H263 Minimum QP Value"; + case V4L2_CID_MPEG_VIDEO_H263_MAX_QP: return "H263 Maximum QP Value"; + case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP: return "H264 I-Frame QP Value"; + case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP: return "H264 P-Frame QP Value"; + case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP: return "H264 B-Frame QP Value"; + case V4L2_CID_MPEG_VIDEO_H264_MAX_QP: return "H264 Maximum QP Value"; + case V4L2_CID_MPEG_VIDEO_H264_MIN_QP: return "H264 Minimum QP Value"; + case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM: return "H264 8x8 Transform Enable"; + case V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE: return "H264 CPB Buffer Size"; + case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE: return "H264 Entropy Mode"; + case V4L2_CID_MPEG_VIDEO_H264_I_PERIOD: return "H264 I-Frame Period"; + case V4L2_CID_MPEG_VIDEO_H264_LEVEL: return "H264 Level"; + case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA: return "H264 Loop Filter Alpha Offset"; + case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA: return "H264 Loop Filter Beta Offset"; + case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE: return "H264 Loop Filter Mode"; + case V4L2_CID_MPEG_VIDEO_H264_PROFILE: return "H264 Profile"; + case V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_HEIGHT: return "Vertical Size of SAR"; + case V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH: return "Horizontal Size of SAR"; + case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE: return "Aspect Ratio VUI Enable"; + case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC: return "VUI Aspect Ratio IDC"; + case V4L2_CID_MPEG_VIDEO_H264_SEI_FRAME_PACKING: return "H264 Enable Frame Packing SEI"; + case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_CURRENT_FRAME_0: return "H264 Set Curr. Frame as Frame0"; + case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE: return "H264 FP Arrangement Type"; + case V4L2_CID_MPEG_VIDEO_H264_FMO: return "H264 Flexible MB Ordering"; + case V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE: return "H264 Map Type for FMO"; + case V4L2_CID_MPEG_VIDEO_H264_FMO_SLICE_GROUP: return "H264 FMO Number of Slice Groups"; + case V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_DIRECTION: return "H264 FMO Direction of Change"; + case V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_RATE: return "H264 FMO Size of 1st Slice Grp"; + case V4L2_CID_MPEG_VIDEO_H264_FMO_RUN_LENGTH: return "H264 FMO No. of Consecutive MBs"; + case V4L2_CID_MPEG_VIDEO_H264_ASO: return "H264 Arbitrary Slice Ordering"; + case V4L2_CID_MPEG_VIDEO_H264_ASO_SLICE_ORDER: return "H264 ASO Slice Order"; + case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING: return "Enable H264 Hierarchical Coding"; + case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_TYPE: return "H264 Hierarchical Coding Type"; + case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER:return "H264 Number of HC Layers"; + case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER_QP: + return "H264 Set QP Value for HC Layers"; + case V4L2_CID_MPEG_VIDEO_H264_CONSTRAINED_INTRA_PREDICTION: + return "H264 Constrained Intra Pred"; + case V4L2_CID_MPEG_VIDEO_H264_CHROMA_QP_INDEX_OFFSET: return "H264 Chroma QP Index Offset"; + case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MIN_QP: return "H264 I-Frame Minimum QP Value"; + case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MAX_QP: return "H264 I-Frame Maximum QP Value"; + case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MIN_QP: return "H264 P-Frame Minimum QP Value"; + case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MAX_QP: return "H264 P-Frame Maximum QP Value"; + case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MIN_QP: return "H264 B-Frame Minimum QP Value"; + case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MAX_QP: return "H264 B-Frame Maximum QP Value"; + case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L0_BR: return "H264 Hierarchical Lay 0 Bitrate"; + case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L1_BR: return "H264 Hierarchical Lay 1 Bitrate"; + case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L2_BR: return "H264 Hierarchical Lay 2 Bitrate"; + case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L3_BR: return "H264 Hierarchical Lay 3 Bitrate"; + case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L4_BR: return "H264 Hierarchical Lay 4 Bitrate"; + case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L5_BR: return "H264 Hierarchical Lay 5 Bitrate"; + case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L6_BR: return "H264 Hierarchical Lay 6 Bitrate"; + case V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL: return "MPEG2 Level"; + case V4L2_CID_MPEG_VIDEO_MPEG2_PROFILE: return "MPEG2 Profile"; + case V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP: return "MPEG4 I-Frame QP Value"; + case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP: return "MPEG4 P-Frame QP Value"; + case V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP: return "MPEG4 B-Frame QP Value"; + case V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP: return "MPEG4 Minimum QP Value"; + case V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP: return "MPEG4 Maximum QP Value"; + case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL: return "MPEG4 Level"; + case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE: return "MPEG4 Profile"; + case V4L2_CID_MPEG_VIDEO_MPEG4_QPEL: return "Quarter Pixel Search Enable"; + case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES: return "Maximum Bytes in a Slice"; + case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB: return "Number of MBs in a Slice"; + case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: return "Slice Partitioning Method"; + case V4L2_CID_MPEG_VIDEO_VBV_SIZE: return "VBV Buffer Size"; + case V4L2_CID_MPEG_VIDEO_DEC_PTS: return "Video Decoder PTS"; + case V4L2_CID_MPEG_VIDEO_DEC_FRAME: return "Video Decoder Frame Count"; + case V4L2_CID_MPEG_VIDEO_DEC_CONCEAL_COLOR: return "Video Decoder Conceal Color"; + case V4L2_CID_MPEG_VIDEO_VBV_DELAY: return "Initial Delay for VBV Control"; + case V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE: return "Horizontal MV Search Range"; + case V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE: return "Vertical MV Search Range"; + case V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER: return "Repeat Sequence Header"; + case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME: return "Force Key Frame"; + case V4L2_CID_MPEG_VIDEO_BASELAYER_PRIORITY_ID: return "Base Layer Priority ID"; + case V4L2_CID_MPEG_VIDEO_LTR_COUNT: return "LTR Count"; + case V4L2_CID_MPEG_VIDEO_FRAME_LTR_INDEX: return "Frame LTR Index"; + case V4L2_CID_MPEG_VIDEO_USE_LTR_FRAMES: return "Use LTR Frames"; + case V4L2_CID_MPEG_VIDEO_AVERAGE_QP: return "Average QP Value"; + case V4L2_CID_FWHT_I_FRAME_QP: return "FWHT I-Frame QP Value"; + case V4L2_CID_FWHT_P_FRAME_QP: return "FWHT P-Frame QP Value"; + + /* VPX controls */ + case V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS: return "VPX Number of Partitions"; + case V4L2_CID_MPEG_VIDEO_VPX_IMD_DISABLE_4X4: return "VPX Intra Mode Decision Disable"; + case V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES: return "VPX No. of Refs for P Frame"; + case V4L2_CID_MPEG_VIDEO_VPX_FILTER_LEVEL: return "VPX Loop Filter Level Range"; + case V4L2_CID_MPEG_VIDEO_VPX_FILTER_SHARPNESS: return "VPX Deblocking Effect Control"; + case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_REF_PERIOD: return "VPX Golden Frame Refresh Period"; + case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL: return "VPX Golden Frame Indicator"; + case V4L2_CID_MPEG_VIDEO_VPX_MIN_QP: return "VPX Minimum QP Value"; + case V4L2_CID_MPEG_VIDEO_VPX_MAX_QP: return "VPX Maximum QP Value"; + case V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP: return "VPX I-Frame QP Value"; + case V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP: return "VPX P-Frame QP Value"; + case V4L2_CID_MPEG_VIDEO_VP8_PROFILE: return "VP8 Profile"; + case V4L2_CID_MPEG_VIDEO_VP9_PROFILE: return "VP9 Profile"; + case V4L2_CID_MPEG_VIDEO_VP9_LEVEL: return "VP9 Level"; + + /* HEVC controls */ + case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP: return "HEVC I-Frame QP Value"; + case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP: return "HEVC P-Frame QP Value"; + case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP: return "HEVC B-Frame QP Value"; + case V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP: return "HEVC Minimum QP Value"; + case V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP: return "HEVC Maximum QP Value"; + case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MIN_QP: return "HEVC I-Frame Minimum QP Value"; + case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MAX_QP: return "HEVC I-Frame Maximum QP Value"; + case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MIN_QP: return "HEVC P-Frame Minimum QP Value"; + case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MAX_QP: return "HEVC P-Frame Maximum QP Value"; + case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MIN_QP: return "HEVC B-Frame Minimum QP Value"; + case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MAX_QP: return "HEVC B-Frame Maximum QP Value"; + case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE: return "HEVC Profile"; + case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL: return "HEVC Level"; + case V4L2_CID_MPEG_VIDEO_HEVC_TIER: return "HEVC Tier"; + case V4L2_CID_MPEG_VIDEO_HEVC_FRAME_RATE_RESOLUTION: return "HEVC Frame Rate Resolution"; + case V4L2_CID_MPEG_VIDEO_HEVC_MAX_PARTITION_DEPTH: return "HEVC Maximum Coding Unit Depth"; + case V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_TYPE: return "HEVC Refresh Type"; + case V4L2_CID_MPEG_VIDEO_HEVC_CONST_INTRA_PRED: return "HEVC Constant Intra Prediction"; + case V4L2_CID_MPEG_VIDEO_HEVC_LOSSLESS_CU: return "HEVC Lossless Encoding"; + case V4L2_CID_MPEG_VIDEO_HEVC_WAVEFRONT: return "HEVC Wavefront"; + case V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE: return "HEVC Loop Filter"; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_QP: return "HEVC QP Values"; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE: return "HEVC Hierarchical Coding Type"; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_LAYER: return "HEVC Hierarchical Coding Layer"; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_QP: return "HEVC Hierarchical Layer 0 QP"; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_QP: return "HEVC Hierarchical Layer 1 QP"; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_QP: return "HEVC Hierarchical Layer 2 QP"; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_QP: return "HEVC Hierarchical Layer 3 QP"; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_QP: return "HEVC Hierarchical Layer 4 QP"; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_QP: return "HEVC Hierarchical Layer 5 QP"; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L6_QP: return "HEVC Hierarchical Layer 6 QP"; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_BR: return "HEVC Hierarchical Lay 0 BitRate"; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_BR: return "HEVC Hierarchical Lay 1 BitRate"; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_BR: return "HEVC Hierarchical Lay 2 BitRate"; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_BR: return "HEVC Hierarchical Lay 3 BitRate"; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_BR: return "HEVC Hierarchical Lay 4 BitRate"; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_BR: return "HEVC Hierarchical Lay 5 BitRate"; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L6_BR: return "HEVC Hierarchical Lay 6 BitRate"; + case V4L2_CID_MPEG_VIDEO_HEVC_GENERAL_PB: return "HEVC General PB"; + case V4L2_CID_MPEG_VIDEO_HEVC_TEMPORAL_ID: return "HEVC Temporal ID"; + case V4L2_CID_MPEG_VIDEO_HEVC_STRONG_SMOOTHING: return "HEVC Strong Intra Smoothing"; + case V4L2_CID_MPEG_VIDEO_HEVC_INTRA_PU_SPLIT: return "HEVC Intra PU Split"; + case V4L2_CID_MPEG_VIDEO_HEVC_TMV_PREDICTION: return "HEVC TMV Prediction"; + case V4L2_CID_MPEG_VIDEO_HEVC_MAX_NUM_MERGE_MV_MINUS1: return "HEVC Max Num of Candidate MVs"; + case V4L2_CID_MPEG_VIDEO_HEVC_WITHOUT_STARTCODE: return "HEVC ENC Without Startcode"; + case V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_PERIOD: return "HEVC Num of I-Frame b/w 2 IDR"; + case V4L2_CID_MPEG_VIDEO_HEVC_LF_BETA_OFFSET_DIV2: return "HEVC Loop Filter Beta Offset"; + case V4L2_CID_MPEG_VIDEO_HEVC_LF_TC_OFFSET_DIV2: return "HEVC Loop Filter TC Offset"; + case V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD: return "HEVC Size of Length Field"; + case V4L2_CID_MPEG_VIDEO_REF_NUMBER_FOR_PFRAMES: return "Reference Frames for a P-Frame"; + case V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR: return "Prepend SPS and PPS to IDR"; + + /* AV1 controls */ + case V4L2_CID_MPEG_VIDEO_AV1_PROFILE: return "AV1 Profile"; + case V4L2_CID_MPEG_VIDEO_AV1_LEVEL: return "AV1 Level"; + + /* CAMERA controls */ + /* Keep the order of the 'case's the same as in v4l2-controls.h! */ + case V4L2_CID_CAMERA_CLASS: return "Camera Controls"; + case V4L2_CID_EXPOSURE_AUTO: return "Auto Exposure"; + case V4L2_CID_EXPOSURE_ABSOLUTE: return "Exposure Time, Absolute"; + case V4L2_CID_EXPOSURE_AUTO_PRIORITY: return "Exposure, Dynamic Framerate"; + case V4L2_CID_PAN_RELATIVE: return "Pan, Relative"; + case V4L2_CID_TILT_RELATIVE: return "Tilt, Relative"; + case V4L2_CID_PAN_RESET: return "Pan, Reset"; + case V4L2_CID_TILT_RESET: return "Tilt, Reset"; + case V4L2_CID_PAN_ABSOLUTE: return "Pan, Absolute"; + case V4L2_CID_TILT_ABSOLUTE: return "Tilt, Absolute"; + case V4L2_CID_FOCUS_ABSOLUTE: return "Focus, Absolute"; + case V4L2_CID_FOCUS_RELATIVE: return "Focus, Relative"; + case V4L2_CID_FOCUS_AUTO: return "Focus, Automatic Continuous"; + case V4L2_CID_ZOOM_ABSOLUTE: return "Zoom, Absolute"; + case V4L2_CID_ZOOM_RELATIVE: return "Zoom, Relative"; + case V4L2_CID_ZOOM_CONTINUOUS: return "Zoom, Continuous"; + case V4L2_CID_PRIVACY: return "Privacy"; + case V4L2_CID_IRIS_ABSOLUTE: return "Iris, Absolute"; + case V4L2_CID_IRIS_RELATIVE: return "Iris, Relative"; + case V4L2_CID_AUTO_EXPOSURE_BIAS: return "Auto Exposure, Bias"; + case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE: return "White Balance, Auto & Preset"; + case V4L2_CID_WIDE_DYNAMIC_RANGE: return "Wide Dynamic Range"; + case V4L2_CID_IMAGE_STABILIZATION: return "Image Stabilization"; + case V4L2_CID_ISO_SENSITIVITY: return "ISO Sensitivity"; + case V4L2_CID_ISO_SENSITIVITY_AUTO: return "ISO Sensitivity, Auto"; + case V4L2_CID_EXPOSURE_METERING: return "Exposure, Metering Mode"; + case V4L2_CID_SCENE_MODE: return "Scene Mode"; + case V4L2_CID_3A_LOCK: return "3A Lock"; + case V4L2_CID_AUTO_FOCUS_START: return "Auto Focus, Start"; + case V4L2_CID_AUTO_FOCUS_STOP: return "Auto Focus, Stop"; + case V4L2_CID_AUTO_FOCUS_STATUS: return "Auto Focus, Status"; + case V4L2_CID_AUTO_FOCUS_RANGE: return "Auto Focus, Range"; + case V4L2_CID_PAN_SPEED: return "Pan, Speed"; + case V4L2_CID_TILT_SPEED: return "Tilt, Speed"; + case V4L2_CID_UNIT_CELL_SIZE: return "Unit Cell Size"; + case V4L2_CID_CAMERA_ORIENTATION: return "Camera Orientation"; + case V4L2_CID_CAMERA_SENSOR_ROTATION: return "Camera Sensor Rotation"; + case V4L2_CID_HDR_SENSOR_MODE: return "HDR Sensor Mode"; + + /* FM Radio Modulator controls */ + /* Keep the order of the 'case's the same as in v4l2-controls.h! */ + case V4L2_CID_FM_TX_CLASS: return "FM Radio Modulator Controls"; + case V4L2_CID_RDS_TX_DEVIATION: return "RDS Signal Deviation"; + case V4L2_CID_RDS_TX_PI: return "RDS Program ID"; + case V4L2_CID_RDS_TX_PTY: return "RDS Program Type"; + case V4L2_CID_RDS_TX_PS_NAME: return "RDS PS Name"; + case V4L2_CID_RDS_TX_RADIO_TEXT: return "RDS Radio Text"; + case V4L2_CID_RDS_TX_MONO_STEREO: return "RDS Stereo"; + case V4L2_CID_RDS_TX_ARTIFICIAL_HEAD: return "RDS Artificial Head"; + case V4L2_CID_RDS_TX_COMPRESSED: return "RDS Compressed"; + case V4L2_CID_RDS_TX_DYNAMIC_PTY: return "RDS Dynamic PTY"; + case V4L2_CID_RDS_TX_TRAFFIC_ANNOUNCEMENT: return "RDS Traffic Announcement"; + case V4L2_CID_RDS_TX_TRAFFIC_PROGRAM: return "RDS Traffic Program"; + case V4L2_CID_RDS_TX_MUSIC_SPEECH: return "RDS Music"; + case V4L2_CID_RDS_TX_ALT_FREQS_ENABLE: return "RDS Enable Alt Frequencies"; + case V4L2_CID_RDS_TX_ALT_FREQS: return "RDS Alternate Frequencies"; + case V4L2_CID_AUDIO_LIMITER_ENABLED: return "Audio Limiter Feature Enabled"; + case V4L2_CID_AUDIO_LIMITER_RELEASE_TIME: return "Audio Limiter Release Time"; + case V4L2_CID_AUDIO_LIMITER_DEVIATION: return "Audio Limiter Deviation"; + case V4L2_CID_AUDIO_COMPRESSION_ENABLED: return "Audio Compression Enabled"; + case V4L2_CID_AUDIO_COMPRESSION_GAIN: return "Audio Compression Gain"; + case V4L2_CID_AUDIO_COMPRESSION_THRESHOLD: return "Audio Compression Threshold"; + case V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME: return "Audio Compression Attack Time"; + case V4L2_CID_AUDIO_COMPRESSION_RELEASE_TIME: return "Audio Compression Release Time"; + case V4L2_CID_PILOT_TONE_ENABLED: return "Pilot Tone Feature Enabled"; + case V4L2_CID_PILOT_TONE_DEVIATION: return "Pilot Tone Deviation"; + case V4L2_CID_PILOT_TONE_FREQUENCY: return "Pilot Tone Frequency"; + case V4L2_CID_TUNE_PREEMPHASIS: return "Pre-Emphasis"; + case V4L2_CID_TUNE_POWER_LEVEL: return "Tune Power Level"; + case V4L2_CID_TUNE_ANTENNA_CAPACITOR: return "Tune Antenna Capacitor"; + + /* Flash controls */ + /* Keep the order of the 'case's the same as in v4l2-controls.h! */ + case V4L2_CID_FLASH_CLASS: return "Flash Controls"; + case V4L2_CID_FLASH_LED_MODE: return "LED Mode"; + case V4L2_CID_FLASH_STROBE_SOURCE: return "Strobe Source"; + case V4L2_CID_FLASH_STROBE: return "Strobe"; + case V4L2_CID_FLASH_STROBE_STOP: return "Stop Strobe"; + case V4L2_CID_FLASH_STROBE_STATUS: return "Strobe Status"; + case V4L2_CID_FLASH_TIMEOUT: return "Strobe Timeout"; + case V4L2_CID_FLASH_INTENSITY: return "Intensity, Flash Mode"; + case V4L2_CID_FLASH_TORCH_INTENSITY: return "Intensity, Torch Mode"; + case V4L2_CID_FLASH_INDICATOR_INTENSITY: return "Intensity, Indicator"; + case V4L2_CID_FLASH_FAULT: return "Faults"; + case V4L2_CID_FLASH_CHARGE: return "Charge"; + case V4L2_CID_FLASH_READY: return "Ready to Strobe"; + + /* JPEG encoder controls */ + /* Keep the order of the 'case's the same as in v4l2-controls.h! */ + case V4L2_CID_JPEG_CLASS: return "JPEG Compression Controls"; + case V4L2_CID_JPEG_CHROMA_SUBSAMPLING: return "Chroma Subsampling"; + case V4L2_CID_JPEG_RESTART_INTERVAL: return "Restart Interval"; + case V4L2_CID_JPEG_COMPRESSION_QUALITY: return "Compression Quality"; + case V4L2_CID_JPEG_ACTIVE_MARKER: return "Active Markers"; + + /* Image source controls */ + /* Keep the order of the 'case's the same as in v4l2-controls.h! */ + case V4L2_CID_IMAGE_SOURCE_CLASS: return "Image Source Controls"; + case V4L2_CID_VBLANK: return "Vertical Blanking"; + case V4L2_CID_HBLANK: return "Horizontal Blanking"; + case V4L2_CID_ANALOGUE_GAIN: return "Analogue Gain"; + case V4L2_CID_TEST_PATTERN_RED: return "Red Pixel Value"; + case V4L2_CID_TEST_PATTERN_GREENR: return "Green (Red) Pixel Value"; + case V4L2_CID_TEST_PATTERN_BLUE: return "Blue Pixel Value"; + case V4L2_CID_TEST_PATTERN_GREENB: return "Green (Blue) Pixel Value"; + case V4L2_CID_NOTIFY_GAINS: return "Notify Gains"; + + /* Image processing controls */ + /* Keep the order of the 'case's the same as in v4l2-controls.h! */ + case V4L2_CID_IMAGE_PROC_CLASS: return "Image Processing Controls"; + case V4L2_CID_LINK_FREQ: return "Link Frequency"; + case V4L2_CID_PIXEL_RATE: return "Pixel Rate"; + case V4L2_CID_TEST_PATTERN: return "Test Pattern"; + case V4L2_CID_DEINTERLACING_MODE: return "Deinterlacing Mode"; + case V4L2_CID_DIGITAL_GAIN: return "Digital Gain"; + + /* DV controls */ + /* Keep the order of the 'case's the same as in v4l2-controls.h! */ + case V4L2_CID_DV_CLASS: return "Digital Video Controls"; + case V4L2_CID_DV_TX_HOTPLUG: return "Hotplug Present"; + case V4L2_CID_DV_TX_RXSENSE: return "RxSense Present"; + case V4L2_CID_DV_TX_EDID_PRESENT: return "EDID Present"; + case V4L2_CID_DV_TX_MODE: return "Transmit Mode"; + case V4L2_CID_DV_TX_RGB_RANGE: return "Tx RGB Quantization Range"; + case V4L2_CID_DV_TX_IT_CONTENT_TYPE: return "Tx IT Content Type"; + case V4L2_CID_DV_RX_POWER_PRESENT: return "Power Present"; + case V4L2_CID_DV_RX_RGB_RANGE: return "Rx RGB Quantization Range"; + case V4L2_CID_DV_RX_IT_CONTENT_TYPE: return "Rx IT Content Type"; + + case V4L2_CID_FM_RX_CLASS: return "FM Radio Receiver Controls"; + case V4L2_CID_TUNE_DEEMPHASIS: return "De-Emphasis"; + case V4L2_CID_RDS_RECEPTION: return "RDS Reception"; + case V4L2_CID_RF_TUNER_CLASS: return "RF Tuner Controls"; + case V4L2_CID_RF_TUNER_RF_GAIN: return "RF Gain"; + case V4L2_CID_RF_TUNER_LNA_GAIN_AUTO: return "LNA Gain, Auto"; + case V4L2_CID_RF_TUNER_LNA_GAIN: return "LNA Gain"; + case V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO: return "Mixer Gain, Auto"; + case V4L2_CID_RF_TUNER_MIXER_GAIN: return "Mixer Gain"; + case V4L2_CID_RF_TUNER_IF_GAIN_AUTO: return "IF Gain, Auto"; + case V4L2_CID_RF_TUNER_IF_GAIN: return "IF Gain"; + case V4L2_CID_RF_TUNER_BANDWIDTH_AUTO: return "Bandwidth, Auto"; + case V4L2_CID_RF_TUNER_BANDWIDTH: return "Bandwidth"; + case V4L2_CID_RF_TUNER_PLL_LOCK: return "PLL Lock"; + case V4L2_CID_RDS_RX_PTY: return "RDS Program Type"; + case V4L2_CID_RDS_RX_PS_NAME: return "RDS PS Name"; + case V4L2_CID_RDS_RX_RADIO_TEXT: return "RDS Radio Text"; + case V4L2_CID_RDS_RX_TRAFFIC_ANNOUNCEMENT: return "RDS Traffic Announcement"; + case V4L2_CID_RDS_RX_TRAFFIC_PROGRAM: return "RDS Traffic Program"; + case V4L2_CID_RDS_RX_MUSIC_SPEECH: return "RDS Music"; + + /* Detection controls */ + /* Keep the order of the 'case's the same as in v4l2-controls.h! */ + case V4L2_CID_DETECT_CLASS: return "Detection Controls"; + case V4L2_CID_DETECT_MD_MODE: return "Motion Detection Mode"; + case V4L2_CID_DETECT_MD_GLOBAL_THRESHOLD: return "MD Global Threshold"; + case V4L2_CID_DETECT_MD_THRESHOLD_GRID: return "MD Threshold Grid"; + case V4L2_CID_DETECT_MD_REGION_GRID: return "MD Region Grid"; + + /* Stateless Codec controls */ + /* Keep the order of the 'case's the same as in v4l2-controls.h! */ + case V4L2_CID_CODEC_STATELESS_CLASS: return "Stateless Codec Controls"; + case V4L2_CID_STATELESS_H264_DECODE_MODE: return "H264 Decode Mode"; + case V4L2_CID_STATELESS_H264_START_CODE: return "H264 Start Code"; + case V4L2_CID_STATELESS_H264_SPS: return "H264 Sequence Parameter Set"; + case V4L2_CID_STATELESS_H264_PPS: return "H264 Picture Parameter Set"; + case V4L2_CID_STATELESS_H264_SCALING_MATRIX: return "H264 Scaling Matrix"; + case V4L2_CID_STATELESS_H264_PRED_WEIGHTS: return "H264 Prediction Weight Table"; + case V4L2_CID_STATELESS_H264_SLICE_PARAMS: return "H264 Slice Parameters"; + case V4L2_CID_STATELESS_H264_DECODE_PARAMS: return "H264 Decode Parameters"; + case V4L2_CID_STATELESS_FWHT_PARAMS: return "FWHT Stateless Parameters"; + case V4L2_CID_STATELESS_VP8_FRAME: return "VP8 Frame Parameters"; + case V4L2_CID_STATELESS_MPEG2_SEQUENCE: return "MPEG-2 Sequence Header"; + case V4L2_CID_STATELESS_MPEG2_PICTURE: return "MPEG-2 Picture Header"; + case V4L2_CID_STATELESS_MPEG2_QUANTISATION: return "MPEG-2 Quantisation Matrices"; + case V4L2_CID_STATELESS_VP9_COMPRESSED_HDR: return "VP9 Probabilities Updates"; + case V4L2_CID_STATELESS_VP9_FRAME: return "VP9 Frame Decode Parameters"; + case V4L2_CID_STATELESS_HEVC_SPS: return "HEVC Sequence Parameter Set"; + case V4L2_CID_STATELESS_HEVC_PPS: return "HEVC Picture Parameter Set"; + case V4L2_CID_STATELESS_HEVC_SLICE_PARAMS: return "HEVC Slice Parameters"; + case V4L2_CID_STATELESS_HEVC_SCALING_MATRIX: return "HEVC Scaling Matrix"; + case V4L2_CID_STATELESS_HEVC_DECODE_PARAMS: return "HEVC Decode Parameters"; + case V4L2_CID_STATELESS_HEVC_DECODE_MODE: return "HEVC Decode Mode"; + case V4L2_CID_STATELESS_HEVC_START_CODE: return "HEVC Start Code"; + case V4L2_CID_STATELESS_HEVC_ENTRY_POINT_OFFSETS: return "HEVC Entry Point Offsets"; + case V4L2_CID_STATELESS_AV1_SEQUENCE: return "AV1 Sequence Parameters"; + case V4L2_CID_STATELESS_AV1_TILE_GROUP_ENTRY: return "AV1 Tile Group Entry"; + case V4L2_CID_STATELESS_AV1_FRAME: return "AV1 Frame Parameters"; + case V4L2_CID_STATELESS_AV1_FILM_GRAIN: return "AV1 Film Grain"; + + /* Colorimetry controls */ + /* Keep the order of the 'case's the same as in v4l2-controls.h! */ + case V4L2_CID_COLORIMETRY_CLASS: return "Colorimetry Controls"; + case V4L2_CID_COLORIMETRY_HDR10_CLL_INFO: return "HDR10 Content Light Info"; + case V4L2_CID_COLORIMETRY_HDR10_MASTERING_DISPLAY: return "HDR10 Mastering Display"; + default: + return NULL; + } +} +EXPORT_SYMBOL(v4l2_ctrl_get_name); + +void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type, + s64 *min, s64 *max, u64 *step, s64 *def, u32 *flags) +{ + *name = v4l2_ctrl_get_name(id); + *flags = 0; + + switch (id) { + case V4L2_CID_AUDIO_MUTE: + case V4L2_CID_AUDIO_LOUDNESS: + case V4L2_CID_AUTO_WHITE_BALANCE: + case V4L2_CID_AUTOGAIN: + case V4L2_CID_HFLIP: + case V4L2_CID_VFLIP: + case V4L2_CID_HUE_AUTO: + case V4L2_CID_CHROMA_AGC: + case V4L2_CID_COLOR_KILLER: + case V4L2_CID_AUTOBRIGHTNESS: + case V4L2_CID_MPEG_AUDIO_MUTE: + case V4L2_CID_MPEG_VIDEO_MUTE: + case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE: + case V4L2_CID_MPEG_VIDEO_PULLDOWN: + case V4L2_CID_EXPOSURE_AUTO_PRIORITY: + case V4L2_CID_FOCUS_AUTO: + case V4L2_CID_PRIVACY: + case V4L2_CID_AUDIO_LIMITER_ENABLED: + case V4L2_CID_AUDIO_COMPRESSION_ENABLED: + case V4L2_CID_PILOT_TONE_ENABLED: + case V4L2_CID_ILLUMINATORS_1: + case V4L2_CID_ILLUMINATORS_2: + case V4L2_CID_FLASH_STROBE_STATUS: + case V4L2_CID_FLASH_CHARGE: + case V4L2_CID_FLASH_READY: + case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER: + case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE: + case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY_ENABLE: + case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE: + case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE: + case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM: + case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE: + case V4L2_CID_MPEG_VIDEO_MPEG4_QPEL: + case V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER: + case V4L2_CID_MPEG_VIDEO_AU_DELIMITER: + case V4L2_CID_WIDE_DYNAMIC_RANGE: + case V4L2_CID_IMAGE_STABILIZATION: + case V4L2_CID_RDS_RECEPTION: + case V4L2_CID_RF_TUNER_LNA_GAIN_AUTO: + case V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO: + case V4L2_CID_RF_TUNER_IF_GAIN_AUTO: + case V4L2_CID_RF_TUNER_BANDWIDTH_AUTO: + case V4L2_CID_RF_TUNER_PLL_LOCK: + case V4L2_CID_RDS_TX_MONO_STEREO: + case V4L2_CID_RDS_TX_ARTIFICIAL_HEAD: + case V4L2_CID_RDS_TX_COMPRESSED: + case V4L2_CID_RDS_TX_DYNAMIC_PTY: + case V4L2_CID_RDS_TX_TRAFFIC_ANNOUNCEMENT: + case V4L2_CID_RDS_TX_TRAFFIC_PROGRAM: + case V4L2_CID_RDS_TX_MUSIC_SPEECH: + case V4L2_CID_RDS_TX_ALT_FREQS_ENABLE: + case V4L2_CID_RDS_RX_TRAFFIC_ANNOUNCEMENT: + case V4L2_CID_RDS_RX_TRAFFIC_PROGRAM: + case V4L2_CID_RDS_RX_MUSIC_SPEECH: + *type = V4L2_CTRL_TYPE_BOOLEAN; + *min = 0; + *max = *step = 1; + break; + case V4L2_CID_ROTATE: + *type = V4L2_CTRL_TYPE_INTEGER; + *flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT; + break; + case V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE: + case V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE: + case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY: + case V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD: + *type = V4L2_CTRL_TYPE_INTEGER; + break; + case V4L2_CID_MPEG_VIDEO_LTR_COUNT: + *type = V4L2_CTRL_TYPE_INTEGER; + break; + case V4L2_CID_MPEG_VIDEO_FRAME_LTR_INDEX: + *type = V4L2_CTRL_TYPE_INTEGER; + *flags |= V4L2_CTRL_FLAG_EXECUTE_ON_WRITE; + break; + case V4L2_CID_MPEG_VIDEO_USE_LTR_FRAMES: + *type = V4L2_CTRL_TYPE_BITMASK; + *flags |= V4L2_CTRL_FLAG_EXECUTE_ON_WRITE; + break; + case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME: + case V4L2_CID_PAN_RESET: + case V4L2_CID_TILT_RESET: + case V4L2_CID_FLASH_STROBE: + case V4L2_CID_FLASH_STROBE_STOP: + case V4L2_CID_AUTO_FOCUS_START: + case V4L2_CID_AUTO_FOCUS_STOP: + case V4L2_CID_DO_WHITE_BALANCE: + *type = V4L2_CTRL_TYPE_BUTTON; + *flags |= V4L2_CTRL_FLAG_WRITE_ONLY | + V4L2_CTRL_FLAG_EXECUTE_ON_WRITE; + *min = *max = *step = *def = 0; + break; + case V4L2_CID_POWER_LINE_FREQUENCY: + case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ: + case V4L2_CID_MPEG_AUDIO_ENCODING: + case V4L2_CID_MPEG_AUDIO_L1_BITRATE: + case V4L2_CID_MPEG_AUDIO_L2_BITRATE: + case V4L2_CID_MPEG_AUDIO_L3_BITRATE: + case V4L2_CID_MPEG_AUDIO_AC3_BITRATE: + case V4L2_CID_MPEG_AUDIO_MODE: + case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION: + case V4L2_CID_MPEG_AUDIO_EMPHASIS: + case V4L2_CID_MPEG_AUDIO_CRC: + case V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK: + case V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK: + case V4L2_CID_MPEG_VIDEO_ENCODING: + case V4L2_CID_MPEG_VIDEO_ASPECT: + case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: + case V4L2_CID_MPEG_STREAM_TYPE: + case V4L2_CID_MPEG_STREAM_VBI_FMT: + case V4L2_CID_EXPOSURE_AUTO: + case V4L2_CID_AUTO_FOCUS_RANGE: + case V4L2_CID_COLORFX: + case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE: + case V4L2_CID_TUNE_PREEMPHASIS: + case V4L2_CID_FLASH_LED_MODE: + case V4L2_CID_FLASH_STROBE_SOURCE: + case V4L2_CID_MPEG_VIDEO_HEADER_MODE: + case V4L2_CID_MPEG_VIDEO_FRAME_SKIP_MODE: + case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: + case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE: + case V4L2_CID_MPEG_VIDEO_H264_LEVEL: + case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE: + case V4L2_CID_MPEG_VIDEO_H264_PROFILE: + case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC: + case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE: + case V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE: + case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_TYPE: + case V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL: + case V4L2_CID_MPEG_VIDEO_MPEG2_PROFILE: + case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL: + case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE: + case V4L2_CID_JPEG_CHROMA_SUBSAMPLING: + case V4L2_CID_ISO_SENSITIVITY_AUTO: + case V4L2_CID_EXPOSURE_METERING: + case V4L2_CID_SCENE_MODE: + case V4L2_CID_DV_TX_MODE: + case V4L2_CID_DV_TX_RGB_RANGE: + case V4L2_CID_DV_TX_IT_CONTENT_TYPE: + case V4L2_CID_DV_RX_RGB_RANGE: + case V4L2_CID_DV_RX_IT_CONTENT_TYPE: + case V4L2_CID_TEST_PATTERN: + case V4L2_CID_DEINTERLACING_MODE: + case V4L2_CID_TUNE_DEEMPHASIS: + case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL: + case V4L2_CID_MPEG_VIDEO_VP8_PROFILE: + case V4L2_CID_MPEG_VIDEO_VP9_PROFILE: + case V4L2_CID_MPEG_VIDEO_VP9_LEVEL: + case V4L2_CID_DETECT_MD_MODE: + case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE: + case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL: + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE: + case V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_TYPE: + case V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD: + case V4L2_CID_MPEG_VIDEO_HEVC_TIER: + case V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE: + case V4L2_CID_MPEG_VIDEO_AV1_PROFILE: + case V4L2_CID_MPEG_VIDEO_AV1_LEVEL: + case V4L2_CID_STATELESS_HEVC_DECODE_MODE: + case V4L2_CID_STATELESS_HEVC_START_CODE: + case V4L2_CID_STATELESS_H264_DECODE_MODE: + case V4L2_CID_STATELESS_H264_START_CODE: + case V4L2_CID_CAMERA_ORIENTATION: + case V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE: + case V4L2_CID_HDR_SENSOR_MODE: + *type = V4L2_CTRL_TYPE_MENU; + break; + case V4L2_CID_LINK_FREQ: + *type = V4L2_CTRL_TYPE_INTEGER_MENU; + break; + case V4L2_CID_RDS_TX_PS_NAME: + case V4L2_CID_RDS_TX_RADIO_TEXT: + case V4L2_CID_RDS_RX_PS_NAME: + case V4L2_CID_RDS_RX_RADIO_TEXT: + *type = V4L2_CTRL_TYPE_STRING; + break; + case V4L2_CID_ISO_SENSITIVITY: + case V4L2_CID_AUTO_EXPOSURE_BIAS: + case V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS: + case V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES: + *type = V4L2_CTRL_TYPE_INTEGER_MENU; + break; + case V4L2_CID_USER_CLASS: + case V4L2_CID_CAMERA_CLASS: + case V4L2_CID_CODEC_CLASS: + case V4L2_CID_FM_TX_CLASS: + case V4L2_CID_FLASH_CLASS: + case V4L2_CID_JPEG_CLASS: + case V4L2_CID_IMAGE_SOURCE_CLASS: + case V4L2_CID_IMAGE_PROC_CLASS: + case V4L2_CID_DV_CLASS: + case V4L2_CID_FM_RX_CLASS: + case V4L2_CID_RF_TUNER_CLASS: + case V4L2_CID_DETECT_CLASS: + case V4L2_CID_CODEC_STATELESS_CLASS: + case V4L2_CID_COLORIMETRY_CLASS: + *type = V4L2_CTRL_TYPE_CTRL_CLASS; + /* You can neither read nor write these */ + *flags |= V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_WRITE_ONLY; + *min = *max = *step = *def = 0; + break; + case V4L2_CID_BG_COLOR: + case V4L2_CID_COLORFX_RGB: + *type = V4L2_CTRL_TYPE_INTEGER; + *step = 1; + *min = 0; + /* Max is calculated as RGB888 that is 2^24 - 1 */ + *max = 0xffffff; + break; + case V4L2_CID_COLORFX_CBCR: + *type = V4L2_CTRL_TYPE_INTEGER; + *step = 1; + *min = 0; + *max = 0xffff; + break; + case V4L2_CID_FLASH_FAULT: + case V4L2_CID_JPEG_ACTIVE_MARKER: + case V4L2_CID_3A_LOCK: + case V4L2_CID_AUTO_FOCUS_STATUS: + case V4L2_CID_DV_TX_HOTPLUG: + case V4L2_CID_DV_TX_RXSENSE: + case V4L2_CID_DV_TX_EDID_PRESENT: + case V4L2_CID_DV_RX_POWER_PRESENT: + *type = V4L2_CTRL_TYPE_BITMASK; + break; + case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE: + case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT: + *type = V4L2_CTRL_TYPE_INTEGER; + *flags |= V4L2_CTRL_FLAG_READ_ONLY; + break; + case V4L2_CID_MPEG_VIDEO_DEC_PTS: + *type = V4L2_CTRL_TYPE_INTEGER64; + *flags |= V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY; + *min = *def = 0; + *max = 0x1ffffffffLL; + *step = 1; + break; + case V4L2_CID_MPEG_VIDEO_DEC_FRAME: + *type = V4L2_CTRL_TYPE_INTEGER64; + *flags |= V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY; + *min = *def = 0; + *max = 0x7fffffffffffffffLL; + *step = 1; + break; + case V4L2_CID_MPEG_VIDEO_DEC_CONCEAL_COLOR: + *type = V4L2_CTRL_TYPE_INTEGER64; + *min = 0; + /* default for 8 bit black, luma is 16, chroma is 128 */ + *def = 0x8000800010LL; + *max = 0xffffffffffffLL; + *step = 1; + break; + case V4L2_CID_MPEG_VIDEO_AVERAGE_QP: + *type = V4L2_CTRL_TYPE_INTEGER; + *flags |= V4L2_CTRL_FLAG_READ_ONLY; + break; + case V4L2_CID_PIXEL_RATE: + *type = V4L2_CTRL_TYPE_INTEGER64; + *flags |= V4L2_CTRL_FLAG_READ_ONLY; + break; + case V4L2_CID_DETECT_MD_REGION_GRID: + *type = V4L2_CTRL_TYPE_U8; + break; + case V4L2_CID_DETECT_MD_THRESHOLD_GRID: + *type = V4L2_CTRL_TYPE_U16; + break; + case V4L2_CID_RDS_TX_ALT_FREQS: + *type = V4L2_CTRL_TYPE_U32; + break; + case V4L2_CID_STATELESS_MPEG2_SEQUENCE: + *type = V4L2_CTRL_TYPE_MPEG2_SEQUENCE; + break; + case V4L2_CID_STATELESS_MPEG2_PICTURE: + *type = V4L2_CTRL_TYPE_MPEG2_PICTURE; + break; + case V4L2_CID_STATELESS_MPEG2_QUANTISATION: + *type = V4L2_CTRL_TYPE_MPEG2_QUANTISATION; + break; + case V4L2_CID_STATELESS_FWHT_PARAMS: + *type = V4L2_CTRL_TYPE_FWHT_PARAMS; + break; + case V4L2_CID_STATELESS_H264_SPS: + *type = V4L2_CTRL_TYPE_H264_SPS; + break; + case V4L2_CID_STATELESS_H264_PPS: + *type = V4L2_CTRL_TYPE_H264_PPS; + break; + case V4L2_CID_STATELESS_H264_SCALING_MATRIX: + *type = V4L2_CTRL_TYPE_H264_SCALING_MATRIX; + break; + case V4L2_CID_STATELESS_H264_SLICE_PARAMS: + *type = V4L2_CTRL_TYPE_H264_SLICE_PARAMS; + break; + case V4L2_CID_STATELESS_H264_DECODE_PARAMS: + *type = V4L2_CTRL_TYPE_H264_DECODE_PARAMS; + break; + case V4L2_CID_STATELESS_H264_PRED_WEIGHTS: + *type = V4L2_CTRL_TYPE_H264_PRED_WEIGHTS; + break; + case V4L2_CID_STATELESS_VP8_FRAME: + *type = V4L2_CTRL_TYPE_VP8_FRAME; + break; + case V4L2_CID_STATELESS_HEVC_SPS: + *type = V4L2_CTRL_TYPE_HEVC_SPS; + break; + case V4L2_CID_STATELESS_HEVC_PPS: + *type = V4L2_CTRL_TYPE_HEVC_PPS; + break; + case V4L2_CID_STATELESS_HEVC_SLICE_PARAMS: + *type = V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS; + *flags |= V4L2_CTRL_FLAG_DYNAMIC_ARRAY; + break; + case V4L2_CID_STATELESS_HEVC_SCALING_MATRIX: + *type = V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX; + break; + case V4L2_CID_STATELESS_HEVC_DECODE_PARAMS: + *type = V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS; + break; + case V4L2_CID_STATELESS_HEVC_ENTRY_POINT_OFFSETS: + *type = V4L2_CTRL_TYPE_U32; + *flags |= V4L2_CTRL_FLAG_DYNAMIC_ARRAY; + break; + case V4L2_CID_STATELESS_VP9_COMPRESSED_HDR: + *type = V4L2_CTRL_TYPE_VP9_COMPRESSED_HDR; + break; + case V4L2_CID_STATELESS_VP9_FRAME: + *type = V4L2_CTRL_TYPE_VP9_FRAME; + break; + case V4L2_CID_STATELESS_AV1_SEQUENCE: + *type = V4L2_CTRL_TYPE_AV1_SEQUENCE; + break; + case V4L2_CID_STATELESS_AV1_TILE_GROUP_ENTRY: + *type = V4L2_CTRL_TYPE_AV1_TILE_GROUP_ENTRY; + *flags |= V4L2_CTRL_FLAG_DYNAMIC_ARRAY; + break; + case V4L2_CID_STATELESS_AV1_FRAME: + *type = V4L2_CTRL_TYPE_AV1_FRAME; + break; + case V4L2_CID_STATELESS_AV1_FILM_GRAIN: + *type = V4L2_CTRL_TYPE_AV1_FILM_GRAIN; + break; + case V4L2_CID_UNIT_CELL_SIZE: + *type = V4L2_CTRL_TYPE_AREA; + *flags |= V4L2_CTRL_FLAG_READ_ONLY; + break; + case V4L2_CID_COLORIMETRY_HDR10_CLL_INFO: + *type = V4L2_CTRL_TYPE_HDR10_CLL_INFO; + break; + case V4L2_CID_COLORIMETRY_HDR10_MASTERING_DISPLAY: + *type = V4L2_CTRL_TYPE_HDR10_MASTERING_DISPLAY; + break; + default: + *type = V4L2_CTRL_TYPE_INTEGER; + break; + } + switch (id) { + case V4L2_CID_MPEG_AUDIO_ENCODING: + case V4L2_CID_MPEG_AUDIO_MODE: + case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: + case V4L2_CID_MPEG_VIDEO_B_FRAMES: + case V4L2_CID_MPEG_STREAM_TYPE: + *flags |= V4L2_CTRL_FLAG_UPDATE; + break; + case V4L2_CID_AUDIO_VOLUME: + case V4L2_CID_AUDIO_BALANCE: + case V4L2_CID_AUDIO_BASS: + case V4L2_CID_AUDIO_TREBLE: + case V4L2_CID_BRIGHTNESS: + case V4L2_CID_CONTRAST: + case V4L2_CID_SATURATION: + case V4L2_CID_HUE: + case V4L2_CID_RED_BALANCE: + case V4L2_CID_BLUE_BALANCE: + case V4L2_CID_GAMMA: + case V4L2_CID_SHARPNESS: + case V4L2_CID_CHROMA_GAIN: + case V4L2_CID_RDS_TX_DEVIATION: + case V4L2_CID_AUDIO_LIMITER_RELEASE_TIME: + case V4L2_CID_AUDIO_LIMITER_DEVIATION: + case V4L2_CID_AUDIO_COMPRESSION_GAIN: + case V4L2_CID_AUDIO_COMPRESSION_THRESHOLD: + case V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME: + case V4L2_CID_AUDIO_COMPRESSION_RELEASE_TIME: + case V4L2_CID_PILOT_TONE_DEVIATION: + case V4L2_CID_PILOT_TONE_FREQUENCY: + case V4L2_CID_TUNE_POWER_LEVEL: + case V4L2_CID_TUNE_ANTENNA_CAPACITOR: + case V4L2_CID_RF_TUNER_RF_GAIN: + case V4L2_CID_RF_TUNER_LNA_GAIN: + case V4L2_CID_RF_TUNER_MIXER_GAIN: + case V4L2_CID_RF_TUNER_IF_GAIN: + case V4L2_CID_RF_TUNER_BANDWIDTH: + case V4L2_CID_DETECT_MD_GLOBAL_THRESHOLD: + *flags |= V4L2_CTRL_FLAG_SLIDER; + break; + case V4L2_CID_PAN_RELATIVE: + case V4L2_CID_TILT_RELATIVE: + case V4L2_CID_FOCUS_RELATIVE: + case V4L2_CID_IRIS_RELATIVE: + case V4L2_CID_ZOOM_RELATIVE: + *flags |= V4L2_CTRL_FLAG_WRITE_ONLY | + V4L2_CTRL_FLAG_EXECUTE_ON_WRITE; + break; + case V4L2_CID_FLASH_STROBE_STATUS: + case V4L2_CID_AUTO_FOCUS_STATUS: + case V4L2_CID_FLASH_READY: + case V4L2_CID_DV_TX_HOTPLUG: + case V4L2_CID_DV_TX_RXSENSE: + case V4L2_CID_DV_TX_EDID_PRESENT: + case V4L2_CID_DV_RX_POWER_PRESENT: + case V4L2_CID_DV_RX_IT_CONTENT_TYPE: + case V4L2_CID_RDS_RX_PTY: + case V4L2_CID_RDS_RX_PS_NAME: + case V4L2_CID_RDS_RX_RADIO_TEXT: + case V4L2_CID_RDS_RX_TRAFFIC_ANNOUNCEMENT: + case V4L2_CID_RDS_RX_TRAFFIC_PROGRAM: + case V4L2_CID_RDS_RX_MUSIC_SPEECH: + case V4L2_CID_CAMERA_ORIENTATION: + case V4L2_CID_CAMERA_SENSOR_ROTATION: + *flags |= V4L2_CTRL_FLAG_READ_ONLY; + break; + case V4L2_CID_RF_TUNER_PLL_LOCK: + *flags |= V4L2_CTRL_FLAG_VOLATILE; + break; + } +} +EXPORT_SYMBOL(v4l2_ctrl_fill); diff --git a/6.17.0/drivers/media/v4l2-core/v4l2-ctrls-priv.h b/6.17.0/drivers/media/v4l2-core/v4l2-ctrls-priv.h new file mode 100644 index 0000000..aba6176 --- /dev/null +++ b/6.17.0/drivers/media/v4l2-core/v4l2-ctrls-priv.h @@ -0,0 +1,95 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * V4L2 controls framework private header. + * + * Copyright (C) 2010-2021 Hans Verkuil + */ + +#ifndef _V4L2_CTRLS_PRIV_H_ +#define _V4L2_CTRLS_PRIV_H_ + +#define dprintk(vdev, fmt, arg...) do { \ + if (!WARN_ON(!(vdev)) && ((vdev)->dev_debug & V4L2_DEV_DEBUG_CTRL)) \ + printk(KERN_DEBUG pr_fmt("%s: %s: " fmt), \ + __func__, video_device_node_name(vdev), ##arg); \ +} while (0) + +#define has_op(master, op) \ + ((master)->ops && (master)->ops->op) +#define call_op(master, op) \ + (has_op(master, op) ? (master)->ops->op(master) : 0) + +static inline u32 node2id(struct list_head *node) +{ + return list_entry(node, struct v4l2_ctrl_ref, node)->ctrl->id; +} + +/* + * Small helper function to determine if the autocluster is set to manual + * mode. + */ +static inline bool is_cur_manual(const struct v4l2_ctrl *master) +{ + return master->is_auto && master->cur.val == master->manual_mode_value; +} + +/* + * Small helper function to determine if the autocluster will be set to manual + * mode. + */ +static inline bool is_new_manual(const struct v4l2_ctrl *master) +{ + return master->is_auto && master->val == master->manual_mode_value; +} + +static inline u32 user_flags(const struct v4l2_ctrl *ctrl) +{ + u32 flags = ctrl->flags; + + if (ctrl->is_ptr) + flags |= V4L2_CTRL_FLAG_HAS_PAYLOAD; + + return flags; +} + +/* v4l2-ctrls-core.c */ +void cur_to_new(struct v4l2_ctrl *ctrl); +void cur_to_req(struct v4l2_ctrl_ref *ref); +void new_to_cur(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 ch_flags); +void new_to_req(struct v4l2_ctrl_ref *ref); +int req_to_new(struct v4l2_ctrl_ref *ref); +void send_initial_event(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl); +void send_event(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 changes); +int handler_new_ref(struct v4l2_ctrl_handler *hdl, + struct v4l2_ctrl *ctrl, + struct v4l2_ctrl_ref **ctrl_ref, + bool from_other_dev, bool allocate_req); +struct v4l2_ctrl_ref *find_ref(struct v4l2_ctrl_handler *hdl, u32 id); +struct v4l2_ctrl_ref *find_ref_lock(struct v4l2_ctrl_handler *hdl, u32 id); +int check_range(enum v4l2_ctrl_type type, + s64 min, s64 max, u64 step, s64 def); +void update_from_auto_cluster(struct v4l2_ctrl *master); +int try_or_set_cluster(struct v4l2_fh *fh, struct v4l2_ctrl *master, + bool set, u32 ch_flags); + +/* v4l2-ctrls-api.c */ +int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl, + struct v4l2_ext_controls *cs, + struct video_device *vdev); +int try_set_ext_ctrls_common(struct v4l2_fh *fh, + struct v4l2_ctrl_handler *hdl, + struct v4l2_ext_controls *cs, + struct video_device *vdev, bool set); + +/* v4l2-ctrls-request.c */ +void v4l2_ctrl_handler_init_request(struct v4l2_ctrl_handler *hdl); +void v4l2_ctrl_handler_free_request(struct v4l2_ctrl_handler *hdl); +int v4l2_g_ext_ctrls_request(struct v4l2_ctrl_handler *hdl, struct video_device *vdev, + struct media_device *mdev, struct v4l2_ext_controls *cs); +int try_set_ext_ctrls_request(struct v4l2_fh *fh, + struct v4l2_ctrl_handler *hdl, + struct video_device *vdev, + struct media_device *mdev, + struct v4l2_ext_controls *cs, bool set); + +#endif diff --git a/6.17.0/drivers/media/v4l2-core/v4l2-ctrls-request.c b/6.17.0/drivers/media/v4l2-core/v4l2-ctrls-request.c new file mode 100644 index 0000000..c637049 --- /dev/null +++ b/6.17.0/drivers/media/v4l2-core/v4l2-ctrls-request.c @@ -0,0 +1,501 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * V4L2 controls framework Request API implementation. + * + * Copyright (C) 2018-2021 Hans Verkuil + */ + +#define pr_fmt(fmt) "v4l2-ctrls: " fmt + +#include +#include +#include +#include +#include + +#include "v4l2-ctrls-priv.h" + +/* Initialize the request-related fields in a control handler */ +void v4l2_ctrl_handler_init_request(struct v4l2_ctrl_handler *hdl) +{ + INIT_LIST_HEAD(&hdl->requests); + INIT_LIST_HEAD(&hdl->requests_queued); + hdl->request_is_queued = false; + media_request_object_init(&hdl->req_obj); +} + +/* Free the request-related fields in a control handler */ +void v4l2_ctrl_handler_free_request(struct v4l2_ctrl_handler *hdl) +{ + struct v4l2_ctrl_handler *req, *next_req; + + /* + * Do nothing if this isn't the main handler or the main + * handler is not used in any request. + * + * The main handler can be identified by having a NULL ops pointer in + * the request object. + */ + if (hdl->req_obj.ops || list_empty(&hdl->requests)) + return; + + /* + * If the main handler is freed and it is used by handler objects in + * outstanding requests, then unbind and put those objects before + * freeing the main handler. + */ + list_for_each_entry_safe(req, next_req, &hdl->requests, requests) { + media_request_object_unbind(&req->req_obj); + media_request_object_put(&req->req_obj); + } +} + +static int v4l2_ctrl_request_clone(struct v4l2_ctrl_handler *hdl, + const struct v4l2_ctrl_handler *from) +{ + struct v4l2_ctrl_ref *ref; + int err = 0; + + if (WARN_ON(!hdl || hdl == from)) + return -EINVAL; + + if (hdl->error) + return hdl->error; + + WARN_ON(hdl->lock != &hdl->_lock); + + mutex_lock(from->lock); + list_for_each_entry(ref, &from->ctrl_refs, node) { + struct v4l2_ctrl *ctrl = ref->ctrl; + struct v4l2_ctrl_ref *new_ref; + + /* Skip refs inherited from other devices */ + if (ref->from_other_dev) + continue; + err = handler_new_ref(hdl, ctrl, &new_ref, false, true); + if (err) + break; + } + mutex_unlock(from->lock); + return err; +} + +static void v4l2_ctrl_request_queue(struct media_request_object *obj) +{ + struct v4l2_ctrl_handler *hdl = + container_of(obj, struct v4l2_ctrl_handler, req_obj); + struct v4l2_ctrl_handler *main_hdl = obj->priv; + + mutex_lock(main_hdl->lock); + list_add_tail(&hdl->requests_queued, &main_hdl->requests_queued); + hdl->request_is_queued = true; + mutex_unlock(main_hdl->lock); +} + +static void v4l2_ctrl_request_unbind(struct media_request_object *obj) +{ + struct v4l2_ctrl_handler *hdl = + container_of(obj, struct v4l2_ctrl_handler, req_obj); + struct v4l2_ctrl_handler *main_hdl = obj->priv; + + mutex_lock(main_hdl->lock); + list_del_init(&hdl->requests); + if (hdl->request_is_queued) { + list_del_init(&hdl->requests_queued); + hdl->request_is_queued = false; + } + mutex_unlock(main_hdl->lock); +} + +static void v4l2_ctrl_request_release(struct media_request_object *obj) +{ + struct v4l2_ctrl_handler *hdl = + container_of(obj, struct v4l2_ctrl_handler, req_obj); + + v4l2_ctrl_handler_free(hdl); + kfree(hdl); +} + +static const struct media_request_object_ops req_ops = { + .queue = v4l2_ctrl_request_queue, + .unbind = v4l2_ctrl_request_unbind, + .release = v4l2_ctrl_request_release, +}; + +struct v4l2_ctrl_handler *v4l2_ctrl_request_hdl_find(struct media_request *req, + struct v4l2_ctrl_handler *parent) +{ + struct media_request_object *obj; + + if (WARN_ON(req->state != MEDIA_REQUEST_STATE_VALIDATING && + req->state != MEDIA_REQUEST_STATE_QUEUED)) + return NULL; + + obj = media_request_object_find(req, &req_ops, parent); + if (obj) + return container_of(obj, struct v4l2_ctrl_handler, req_obj); + return NULL; +} +EXPORT_SYMBOL_GPL(v4l2_ctrl_request_hdl_find); + +struct v4l2_ctrl * +v4l2_ctrl_request_hdl_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id) +{ + struct v4l2_ctrl_ref *ref = find_ref_lock(hdl, id); + + return (ref && ref->p_req_valid) ? ref->ctrl : NULL; +} +EXPORT_SYMBOL_GPL(v4l2_ctrl_request_hdl_ctrl_find); + +static int v4l2_ctrl_request_bind(struct media_request *req, + struct v4l2_ctrl_handler *hdl, + struct v4l2_ctrl_handler *from) +{ + int ret; + + ret = v4l2_ctrl_request_clone(hdl, from); + + if (!ret) { + ret = media_request_object_bind(req, &req_ops, + from, false, &hdl->req_obj); + if (!ret) { + mutex_lock(from->lock); + list_add_tail(&hdl->requests, &from->requests); + mutex_unlock(from->lock); + } + } + return ret; +} + +static struct media_request_object * +v4l2_ctrls_find_req_obj(struct v4l2_ctrl_handler *hdl, + struct media_request *req, bool set) +{ + struct media_request_object *obj; + struct v4l2_ctrl_handler *new_hdl; + int ret; + + if (IS_ERR(req)) + return ERR_CAST(req); + + if (set && WARN_ON(req->state != MEDIA_REQUEST_STATE_UPDATING)) + return ERR_PTR(-EBUSY); + + obj = media_request_object_find(req, &req_ops, hdl); + if (obj) + return obj; + /* + * If there are no controls in this completed request, + * then that can only happen if: + * + * 1) no controls were present in the queued request, and + * 2) v4l2_ctrl_request_complete() could not allocate a + * control handler object to store the completed state in. + * + * So return ENOMEM to indicate that there was an out-of-memory + * error. + */ + if (!set) + return ERR_PTR(-ENOMEM); + + new_hdl = kzalloc(sizeof(*new_hdl), GFP_KERNEL); + if (!new_hdl) + return ERR_PTR(-ENOMEM); + + obj = &new_hdl->req_obj; + ret = v4l2_ctrl_handler_init(new_hdl, (hdl->nr_of_buckets - 1) * 8); + if (!ret) + ret = v4l2_ctrl_request_bind(req, new_hdl, hdl); + if (ret) { + v4l2_ctrl_handler_free(new_hdl); + kfree(new_hdl); + return ERR_PTR(ret); + } + + media_request_object_get(obj); + return obj; +} + +int v4l2_g_ext_ctrls_request(struct v4l2_ctrl_handler *hdl, struct video_device *vdev, + struct media_device *mdev, struct v4l2_ext_controls *cs) +{ + struct media_request_object *obj = NULL; + struct media_request *req = NULL; + int ret; + + if (!mdev || cs->request_fd < 0) + return -EINVAL; + + req = media_request_get_by_fd(mdev, cs->request_fd); + if (IS_ERR(req)) + return PTR_ERR(req); + + if (req->state != MEDIA_REQUEST_STATE_COMPLETE) { + media_request_put(req); + return -EACCES; + } + + ret = media_request_lock_for_access(req); + if (ret) { + media_request_put(req); + return ret; + } + + obj = v4l2_ctrls_find_req_obj(hdl, req, false); + if (IS_ERR(obj)) { + media_request_unlock_for_access(req); + media_request_put(req); + return PTR_ERR(obj); + } + + hdl = container_of(obj, struct v4l2_ctrl_handler, + req_obj); + ret = v4l2_g_ext_ctrls_common(hdl, cs, vdev); + + media_request_unlock_for_access(req); + media_request_object_put(obj); + media_request_put(req); + return ret; +} + +int try_set_ext_ctrls_request(struct v4l2_fh *fh, + struct v4l2_ctrl_handler *hdl, + struct video_device *vdev, + struct media_device *mdev, + struct v4l2_ext_controls *cs, bool set) +{ + struct media_request_object *obj = NULL; + struct media_request *req = NULL; + int ret; + + if (!mdev) { + dprintk(vdev, "%s: missing media device\n", + video_device_node_name(vdev)); + return -EINVAL; + } + + if (cs->request_fd < 0) { + dprintk(vdev, "%s: invalid request fd %d\n", + video_device_node_name(vdev), cs->request_fd); + return -EINVAL; + } + + req = media_request_get_by_fd(mdev, cs->request_fd); + if (IS_ERR(req)) { + dprintk(vdev, "%s: cannot find request fd %d\n", + video_device_node_name(vdev), cs->request_fd); + return PTR_ERR(req); + } + + ret = media_request_lock_for_update(req); + if (ret) { + dprintk(vdev, "%s: cannot lock request fd %d\n", + video_device_node_name(vdev), cs->request_fd); + media_request_put(req); + return ret; + } + + obj = v4l2_ctrls_find_req_obj(hdl, req, set); + if (IS_ERR(obj)) { + dprintk(vdev, + "%s: cannot find request object for request fd %d\n", + video_device_node_name(vdev), + cs->request_fd); + media_request_unlock_for_update(req); + media_request_put(req); + return PTR_ERR(obj); + } + + hdl = container_of(obj, struct v4l2_ctrl_handler, + req_obj); + ret = try_set_ext_ctrls_common(fh, hdl, cs, vdev, set); + if (ret) + dprintk(vdev, + "%s: try_set_ext_ctrls_common failed (%d)\n", + video_device_node_name(vdev), ret); + + media_request_unlock_for_update(req); + media_request_object_put(obj); + media_request_put(req); + + return ret; +} + +void v4l2_ctrl_request_complete(struct media_request *req, + struct v4l2_ctrl_handler *main_hdl) +{ + struct media_request_object *obj; + struct v4l2_ctrl_handler *hdl; + struct v4l2_ctrl_ref *ref; + + if (!req || !main_hdl) + return; + + /* + * Note that it is valid if nothing was found. It means + * that this request doesn't have any controls and so just + * wants to leave the controls unchanged. + */ + obj = media_request_object_find(req, &req_ops, main_hdl); + if (!obj) { + int ret; + + /* Create a new request so the driver can return controls */ + hdl = kzalloc(sizeof(*hdl), GFP_KERNEL); + if (!hdl) + return; + + ret = v4l2_ctrl_handler_init(hdl, (main_hdl->nr_of_buckets - 1) * 8); + if (!ret) + ret = v4l2_ctrl_request_bind(req, hdl, main_hdl); + if (ret) { + v4l2_ctrl_handler_free(hdl); + kfree(hdl); + return; + } + hdl->request_is_queued = true; + obj = media_request_object_find(req, &req_ops, main_hdl); + } + hdl = container_of(obj, struct v4l2_ctrl_handler, req_obj); + + list_for_each_entry(ref, &hdl->ctrl_refs, node) { + struct v4l2_ctrl *ctrl = ref->ctrl; + struct v4l2_ctrl *master = ctrl->cluster[0]; + unsigned int i; + + if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) { + v4l2_ctrl_lock(master); + /* g_volatile_ctrl will update the current control values */ + for (i = 0; i < master->ncontrols; i++) + cur_to_new(master->cluster[i]); + call_op(master, g_volatile_ctrl); + new_to_req(ref); + v4l2_ctrl_unlock(master); + continue; + } + if (ref->p_req_valid) + continue; + + /* Copy the current control value into the request */ + v4l2_ctrl_lock(ctrl); + cur_to_req(ref); + v4l2_ctrl_unlock(ctrl); + } + + mutex_lock(main_hdl->lock); + WARN_ON(!hdl->request_is_queued); + list_del_init(&hdl->requests_queued); + hdl->request_is_queued = false; + mutex_unlock(main_hdl->lock); + media_request_object_complete(obj); + media_request_object_put(obj); +} +EXPORT_SYMBOL(v4l2_ctrl_request_complete); + +int v4l2_ctrl_request_setup(struct media_request *req, + struct v4l2_ctrl_handler *main_hdl) +{ + struct media_request_object *obj; + struct v4l2_ctrl_handler *hdl; + struct v4l2_ctrl_ref *ref; + int ret = 0; + + if (!req || !main_hdl) + return 0; + + if (WARN_ON(req->state != MEDIA_REQUEST_STATE_QUEUED)) + return -EBUSY; + + /* + * Note that it is valid if nothing was found. It means + * that this request doesn't have any controls and so just + * wants to leave the controls unchanged. + */ + obj = media_request_object_find(req, &req_ops, main_hdl); + if (!obj) + return 0; + if (obj->completed) { + media_request_object_put(obj); + return -EBUSY; + } + hdl = container_of(obj, struct v4l2_ctrl_handler, req_obj); + + list_for_each_entry(ref, &hdl->ctrl_refs, node) + ref->req_done = false; + + list_for_each_entry(ref, &hdl->ctrl_refs, node) { + struct v4l2_ctrl *ctrl = ref->ctrl; + struct v4l2_ctrl *master = ctrl->cluster[0]; + bool have_new_data = false; + int i; + + /* + * Skip if this control was already handled by a cluster. + * Skip button controls and read-only controls. + */ + if (ref->req_done || (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY)) + continue; + + v4l2_ctrl_lock(master); + for (i = 0; i < master->ncontrols; i++) { + if (master->cluster[i]) { + struct v4l2_ctrl_ref *r = + find_ref(hdl, master->cluster[i]->id); + + if (r->p_req_valid) { + have_new_data = true; + break; + } + } + } + if (!have_new_data) { + v4l2_ctrl_unlock(master); + continue; + } + + for (i = 0; i < master->ncontrols; i++) { + if (master->cluster[i]) { + struct v4l2_ctrl_ref *r = + find_ref(hdl, master->cluster[i]->id); + + ret = req_to_new(r); + if (ret) { + v4l2_ctrl_unlock(master); + goto error; + } + master->cluster[i]->is_new = 1; + r->req_done = true; + } + } + /* + * For volatile autoclusters that are currently in auto mode + * we need to discover if it will be set to manual mode. + * If so, then we have to copy the current volatile values + * first since those will become the new manual values (which + * may be overwritten by explicit new values from this set + * of controls). + */ + if (master->is_auto && master->has_volatiles && + !is_cur_manual(master)) { + s32 new_auto_val = *master->p_new.p_s32; + + /* + * If the new value == the manual value, then copy + * the current volatile values. + */ + if (new_auto_val == master->manual_mode_value) + update_from_auto_cluster(master); + } + + ret = try_or_set_cluster(NULL, master, true, 0); + v4l2_ctrl_unlock(master); + + if (ret) + break; + } + +error: + media_request_object_put(obj); + return ret; +} +EXPORT_SYMBOL(v4l2_ctrl_request_setup); diff --git a/6.17.0/drivers/media/v4l2-core/v4l2-dev.c b/6.17.0/drivers/media/v4l2-core/v4l2-dev.c new file mode 100644 index 0000000..c369235 --- /dev/null +++ b/6.17.0/drivers/media/v4l2-core/v4l2-dev.c @@ -0,0 +1,1247 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Video capture interface for Linux version 2 + * + * A generic video device interface for the LINUX operating system + * using a set of device structures/vectors for low level operations. + * + * Authors: Alan Cox, (version 1) + * Mauro Carvalho Chehab (version 2) + * + * Fixes: 20000516 Claudio Matsuoka + * - Added procfs support + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#define VIDEO_NUM_DEVICES 256 +#define VIDEO_NAME "video4linux" + +#define dprintk(fmt, arg...) do { \ + printk(KERN_DEBUG pr_fmt("%s: " fmt), \ + __func__, ##arg); \ +} while (0) + +/* + * sysfs stuff + */ + +static ssize_t index_show(struct device *cd, + struct device_attribute *attr, char *buf) +{ + struct video_device *vdev = to_video_device(cd); + + return sprintf(buf, "%i\n", vdev->index); +} +static DEVICE_ATTR_RO(index); + +static ssize_t dev_debug_show(struct device *cd, + struct device_attribute *attr, char *buf) +{ + struct video_device *vdev = to_video_device(cd); + + return sprintf(buf, "%i\n", vdev->dev_debug); +} + +static ssize_t dev_debug_store(struct device *cd, struct device_attribute *attr, + const char *buf, size_t len) +{ + struct video_device *vdev = to_video_device(cd); + int res = 0; + u16 value; + + res = kstrtou16(buf, 0, &value); + if (res) + return res; + + vdev->dev_debug = value; + return len; +} +static DEVICE_ATTR_RW(dev_debug); + +static ssize_t name_show(struct device *cd, + struct device_attribute *attr, char *buf) +{ + struct video_device *vdev = to_video_device(cd); + + return sprintf(buf, "%.*s\n", (int)sizeof(vdev->name), vdev->name); +} +static DEVICE_ATTR_RO(name); + +static struct attribute *video_device_attrs[] = { + &dev_attr_name.attr, + &dev_attr_dev_debug.attr, + &dev_attr_index.attr, + NULL, +}; +ATTRIBUTE_GROUPS(video_device); + +static struct dentry *v4l2_debugfs_root_dir; + +/* + * Active devices + */ +static struct video_device *video_devices[VIDEO_NUM_DEVICES]; +static DEFINE_MUTEX(videodev_lock); +static DECLARE_BITMAP(devnode_nums[VFL_TYPE_MAX], VIDEO_NUM_DEVICES); + +/* Device node utility functions */ + +/* Note: these utility functions all assume that vfl_type is in the range + [0, VFL_TYPE_MAX-1]. */ + +#ifdef CONFIG_VIDEO_FIXED_MINOR_RANGES +/* Return the bitmap corresponding to vfl_type. */ +static inline unsigned long *devnode_bits(enum vfl_devnode_type vfl_type) +{ + /* Any types not assigned to fixed minor ranges must be mapped to + one single bitmap for the purposes of finding a free node number + since all those unassigned types use the same minor range. */ + int idx = (vfl_type > VFL_TYPE_RADIO) ? VFL_TYPE_MAX - 1 : vfl_type; + + return devnode_nums[idx]; +} +#else +/* Return the bitmap corresponding to vfl_type. */ +static inline unsigned long *devnode_bits(enum vfl_devnode_type vfl_type) +{ + return devnode_nums[vfl_type]; +} +#endif + +/* Mark device node number vdev->num as used */ +static inline void devnode_set(struct video_device *vdev) +{ + set_bit(vdev->num, devnode_bits(vdev->vfl_type)); +} + +/* Mark device node number vdev->num as unused */ +static inline void devnode_clear(struct video_device *vdev) +{ + clear_bit(vdev->num, devnode_bits(vdev->vfl_type)); +} + +/* Try to find a free device node number in the range [from, to> */ +static inline int devnode_find(struct video_device *vdev, int from, int to) +{ + return find_next_zero_bit(devnode_bits(vdev->vfl_type), to, from); +} + +struct video_device *video_device_alloc(void) +{ + return kzalloc(sizeof(struct video_device), GFP_KERNEL); +} +EXPORT_SYMBOL(video_device_alloc); + +void video_device_release(struct video_device *vdev) +{ + kfree(vdev); +} +EXPORT_SYMBOL(video_device_release); + +void video_device_release_empty(struct video_device *vdev) +{ + /* Do nothing */ + /* Only valid when the video_device struct is a static. */ +} +EXPORT_SYMBOL(video_device_release_empty); + +static inline void video_get(struct video_device *vdev) +{ + get_device(&vdev->dev); +} + +static inline void video_put(struct video_device *vdev) +{ + put_device(&vdev->dev); +} + +/* Called when the last user of the video device exits. */ +static void v4l2_device_release(struct device *cd) +{ + struct video_device *vdev = to_video_device(cd); + struct v4l2_device *v4l2_dev = vdev->v4l2_dev; + + mutex_lock(&videodev_lock); + if (WARN_ON(video_devices[vdev->minor] != vdev)) { + /* should not happen */ + mutex_unlock(&videodev_lock); + return; + } + + /* Free up this device for reuse */ + video_devices[vdev->minor] = NULL; + + /* Delete the cdev on this minor as well */ + cdev_del(vdev->cdev); + /* Just in case some driver tries to access this from + the release() callback. */ + vdev->cdev = NULL; + + /* Mark device node number as free */ + devnode_clear(vdev); + + mutex_unlock(&videodev_lock); + +#if defined(CONFIG_MEDIA_CONTROLLER) + if (v4l2_dev->mdev && vdev->vfl_dir != VFL_DIR_M2M) { + /* Remove interfaces and interface links */ + media_devnode_remove(vdev->intf_devnode); + if (vdev->entity.function != MEDIA_ENT_F_UNKNOWN) + media_device_unregister_entity(&vdev->entity); + } +#endif + + /* Do not call v4l2_device_put if there is no release callback set. + * Drivers that have no v4l2_device release callback might free the + * v4l2_dev instance in the video_device release callback below, so we + * must perform this check here. + * + * TODO: In the long run all drivers that use v4l2_device should use the + * v4l2_device release callback. This check will then be unnecessary. + */ + if (v4l2_dev->release == NULL) + v4l2_dev = NULL; + + /* Release video_device and perform other + cleanups as needed. */ + vdev->release(vdev); + + /* Decrease v4l2_device refcount */ + if (v4l2_dev) + v4l2_device_put(v4l2_dev); +} + +static const struct class video_class = { + .name = VIDEO_NAME, + .dev_groups = video_device_groups, +}; + +struct video_device *video_devdata(struct file *file) +{ + return video_devices[iminor(file_inode(file))]; +} +EXPORT_SYMBOL(video_devdata); + + +/* Priority handling */ + +static inline bool prio_is_valid(enum v4l2_priority prio) +{ + return prio == V4L2_PRIORITY_BACKGROUND || + prio == V4L2_PRIORITY_INTERACTIVE || + prio == V4L2_PRIORITY_RECORD; +} + +void v4l2_prio_init(struct v4l2_prio_state *global) +{ + memset(global, 0, sizeof(*global)); +} +EXPORT_SYMBOL(v4l2_prio_init); + +int v4l2_prio_change(struct v4l2_prio_state *global, enum v4l2_priority *local, + enum v4l2_priority new) +{ + if (!prio_is_valid(new)) + return -EINVAL; + if (*local == new) + return 0; + + atomic_inc(&global->prios[new]); + if (prio_is_valid(*local)) + atomic_dec(&global->prios[*local]); + *local = new; + return 0; +} +EXPORT_SYMBOL(v4l2_prio_change); + +void v4l2_prio_open(struct v4l2_prio_state *global, enum v4l2_priority *local) +{ + v4l2_prio_change(global, local, V4L2_PRIORITY_DEFAULT); +} +EXPORT_SYMBOL(v4l2_prio_open); + +void v4l2_prio_close(struct v4l2_prio_state *global, enum v4l2_priority local) +{ + if (prio_is_valid(local)) + atomic_dec(&global->prios[local]); +} +EXPORT_SYMBOL(v4l2_prio_close); + +enum v4l2_priority v4l2_prio_max(struct v4l2_prio_state *global) +{ + if (atomic_read(&global->prios[V4L2_PRIORITY_RECORD]) > 0) + return V4L2_PRIORITY_RECORD; + if (atomic_read(&global->prios[V4L2_PRIORITY_INTERACTIVE]) > 0) + return V4L2_PRIORITY_INTERACTIVE; + if (atomic_read(&global->prios[V4L2_PRIORITY_BACKGROUND]) > 0) + return V4L2_PRIORITY_BACKGROUND; + return V4L2_PRIORITY_UNSET; +} +EXPORT_SYMBOL(v4l2_prio_max); + +int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local) +{ + return (local < v4l2_prio_max(global)) ? -EBUSY : 0; +} +EXPORT_SYMBOL(v4l2_prio_check); + + +static ssize_t v4l2_read(struct file *filp, char __user *buf, + size_t sz, loff_t *off) +{ + struct video_device *vdev = video_devdata(filp); + int ret = -ENODEV; + + if (!vdev->fops->read) + return -EINVAL; + if (video_is_registered(vdev)) + ret = vdev->fops->read(filp, buf, sz, off); + if ((vdev->dev_debug & V4L2_DEV_DEBUG_FOP) && + (vdev->dev_debug & V4L2_DEV_DEBUG_STREAMING)) + dprintk("%s: read: %zd (%d)\n", + video_device_node_name(vdev), sz, ret); + return ret; +} + +static ssize_t v4l2_write(struct file *filp, const char __user *buf, + size_t sz, loff_t *off) +{ + struct video_device *vdev = video_devdata(filp); + int ret = -ENODEV; + + if (!vdev->fops->write) + return -EINVAL; + if (video_is_registered(vdev)) + ret = vdev->fops->write(filp, buf, sz, off); + if ((vdev->dev_debug & V4L2_DEV_DEBUG_FOP) && + (vdev->dev_debug & V4L2_DEV_DEBUG_STREAMING)) + dprintk("%s: write: %zd (%d)\n", + video_device_node_name(vdev), sz, ret); + return ret; +} + +static __poll_t v4l2_poll(struct file *filp, struct poll_table_struct *poll) +{ + struct video_device *vdev = video_devdata(filp); + __poll_t res = EPOLLERR | EPOLLHUP | EPOLLPRI; + + if (video_is_registered(vdev)) { + if (!vdev->fops->poll) + res = DEFAULT_POLLMASK; + else + res = vdev->fops->poll(filp, poll); + } + if (vdev->dev_debug & V4L2_DEV_DEBUG_POLL) + dprintk("%s: poll: %08x %08x\n", + video_device_node_name(vdev), res, + poll_requested_events(poll)); + return res; +} + +static long v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + struct video_device *vdev = video_devdata(filp); + int ret = -ENODEV; + + if (vdev->fops->unlocked_ioctl) { + if (video_is_registered(vdev)) + ret = vdev->fops->unlocked_ioctl(filp, cmd, arg); + } else + ret = -ENOTTY; + + return ret; +} + +#ifdef CONFIG_MMU +#define v4l2_get_unmapped_area NULL +#else +static unsigned long v4l2_get_unmapped_area(struct file *filp, + unsigned long addr, unsigned long len, unsigned long pgoff, + unsigned long flags) +{ + struct video_device *vdev = video_devdata(filp); + int ret; + + if (!vdev->fops->get_unmapped_area) + return -ENOSYS; + if (!video_is_registered(vdev)) + return -ENODEV; + ret = vdev->fops->get_unmapped_area(filp, addr, len, pgoff, flags); + if (vdev->dev_debug & V4L2_DEV_DEBUG_FOP) + dprintk("%s: get_unmapped_area (%d)\n", + video_device_node_name(vdev), ret); + return ret; +} +#endif + +static int v4l2_mmap(struct file *filp, struct vm_area_struct *vm) +{ + struct video_device *vdev = video_devdata(filp); + int ret = -ENODEV; + + if (!vdev->fops->mmap) + return -ENODEV; + if (video_is_registered(vdev)) + ret = vdev->fops->mmap(filp, vm); + if (vdev->dev_debug & V4L2_DEV_DEBUG_FOP) + dprintk("%s: mmap (%d)\n", + video_device_node_name(vdev), ret); + return ret; +} + +/* Override for the open function */ +static int v4l2_open(struct inode *inode, struct file *filp) +{ + struct video_device *vdev; + int ret = 0; + + /* Check if the video device is available */ + mutex_lock(&videodev_lock); + vdev = video_devdata(filp); + /* return ENODEV if the video device has already been removed. */ + if (vdev == NULL || !video_is_registered(vdev)) { + mutex_unlock(&videodev_lock); + return -ENODEV; + } + /* and increase the device refcount */ + video_get(vdev); + mutex_unlock(&videodev_lock); + if (vdev->fops->open) { + if (video_is_registered(vdev)) + ret = vdev->fops->open(filp); + else + ret = -ENODEV; + } + + if (vdev->dev_debug & V4L2_DEV_DEBUG_FOP) + dprintk("%s: open (%d)\n", + video_device_node_name(vdev), ret); + /* decrease the refcount in case of an error */ + if (ret) + video_put(vdev); + return ret; +} + +/* Override for the release function */ +static int v4l2_release(struct inode *inode, struct file *filp) +{ + struct video_device *vdev = video_devdata(filp); + int ret = 0; + + /* + * We need to serialize the release() with queueing new requests. + * The release() may trigger the cancellation of a streaming + * operation, and that should not be mixed with queueing a new + * request at the same time. + */ + if (vdev->fops->release) { + if (v4l2_device_supports_requests(vdev->v4l2_dev)) { + mutex_lock(&vdev->v4l2_dev->mdev->req_queue_mutex); + ret = vdev->fops->release(filp); + mutex_unlock(&vdev->v4l2_dev->mdev->req_queue_mutex); + } else { + ret = vdev->fops->release(filp); + } + } + + if (vdev->dev_debug & V4L2_DEV_DEBUG_FOP) + dprintk("%s: release\n", + video_device_node_name(vdev)); + + /* decrease the refcount unconditionally since the release() + return value is ignored. */ + video_put(vdev); + return ret; +} + +static const struct file_operations v4l2_fops = { + .owner = THIS_MODULE, + .read = v4l2_read, + .write = v4l2_write, + .open = v4l2_open, + .get_unmapped_area = v4l2_get_unmapped_area, + .mmap = v4l2_mmap, + .unlocked_ioctl = v4l2_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = v4l2_compat_ioctl32, +#endif + .release = v4l2_release, + .poll = v4l2_poll, +}; + +/** + * get_index - assign stream index number based on v4l2_dev + * @vdev: video_device to assign index number to, vdev->v4l2_dev should be assigned + * + * Note that when this is called the new device has not yet been registered + * in the video_device array, but it was able to obtain a minor number. + * + * This means that we can always obtain a free stream index number since + * the worst case scenario is that there are VIDEO_NUM_DEVICES - 1 slots in + * use of the video_device array. + * + * Returns a free index number. + */ +static int get_index(struct video_device *vdev) +{ + /* This can be static since this function is called with the global + videodev_lock held. */ + static DECLARE_BITMAP(used, VIDEO_NUM_DEVICES); + int i; + + bitmap_zero(used, VIDEO_NUM_DEVICES); + + for (i = 0; i < VIDEO_NUM_DEVICES; i++) { + if (video_devices[i] != NULL && + video_devices[i]->v4l2_dev == vdev->v4l2_dev) { + __set_bit(video_devices[i]->index, used); + } + } + + return find_first_zero_bit(used, VIDEO_NUM_DEVICES); +} + +#define SET_VALID_IOCTL(ops, cmd, op) \ + do { if ((ops)->op) __set_bit(_IOC_NR(cmd), valid_ioctls); } while (0) + +/* This determines which ioctls are actually implemented in the driver. + It's a one-time thing which simplifies video_ioctl2 as it can just do + a bit test. + + Note that drivers can override this by setting bits to 1 in + vdev->valid_ioctls. If an ioctl is marked as 1 when this function is + called, then that ioctl will actually be marked as unimplemented. + + It does that by first setting up the local valid_ioctls bitmap, and + at the end do a: + + vdev->valid_ioctls = valid_ioctls & ~(vdev->valid_ioctls) + */ +static void determine_valid_ioctls(struct video_device *vdev) +{ + const u32 vid_caps = V4L2_CAP_VIDEO_CAPTURE | + V4L2_CAP_VIDEO_CAPTURE_MPLANE | + V4L2_CAP_VIDEO_OUTPUT | + V4L2_CAP_VIDEO_OUTPUT_MPLANE | + V4L2_CAP_VIDEO_M2M | V4L2_CAP_VIDEO_M2M_MPLANE; + const u32 meta_caps = V4L2_CAP_META_CAPTURE | + V4L2_CAP_META_OUTPUT; + DECLARE_BITMAP(valid_ioctls, BASE_VIDIOC_PRIVATE); + const struct v4l2_ioctl_ops *ops = vdev->ioctl_ops; + bool is_vid = vdev->vfl_type == VFL_TYPE_VIDEO && + (vdev->device_caps & vid_caps); + bool is_vbi = vdev->vfl_type == VFL_TYPE_VBI; + bool is_radio = vdev->vfl_type == VFL_TYPE_RADIO; + bool is_sdr = vdev->vfl_type == VFL_TYPE_SDR; + bool is_tch = vdev->vfl_type == VFL_TYPE_TOUCH; + bool is_meta = vdev->vfl_type == VFL_TYPE_VIDEO && + (vdev->device_caps & meta_caps); + bool is_rx = vdev->vfl_dir != VFL_DIR_TX; + bool is_tx = vdev->vfl_dir != VFL_DIR_RX; + bool is_io_mc = vdev->device_caps & V4L2_CAP_IO_MC; + bool has_streaming = vdev->device_caps & V4L2_CAP_STREAMING; + bool is_edid = vdev->device_caps & V4L2_CAP_EDID; + + bitmap_zero(valid_ioctls, BASE_VIDIOC_PRIVATE); + + /* vfl_type and vfl_dir independent ioctls */ + + SET_VALID_IOCTL(ops, VIDIOC_QUERYCAP, vidioc_querycap); + __set_bit(_IOC_NR(VIDIOC_G_PRIORITY), valid_ioctls); + __set_bit(_IOC_NR(VIDIOC_S_PRIORITY), valid_ioctls); + + /* Note: the control handler can also be passed through the filehandle, + and that can't be tested here. If the bit for these control ioctls + is set, then the ioctl is valid. But if it is 0, then it can still + be valid if the filehandle passed the control handler. */ + if (vdev->ctrl_handler || ops->vidioc_query_ext_ctrl) + __set_bit(_IOC_NR(VIDIOC_QUERYCTRL), valid_ioctls); + if (vdev->ctrl_handler || ops->vidioc_query_ext_ctrl) + __set_bit(_IOC_NR(VIDIOC_QUERY_EXT_CTRL), valid_ioctls); + if (vdev->ctrl_handler || ops->vidioc_g_ext_ctrls) + __set_bit(_IOC_NR(VIDIOC_G_CTRL), valid_ioctls); + if (vdev->ctrl_handler || ops->vidioc_s_ext_ctrls) + __set_bit(_IOC_NR(VIDIOC_S_CTRL), valid_ioctls); + if (vdev->ctrl_handler || ops->vidioc_g_ext_ctrls) + __set_bit(_IOC_NR(VIDIOC_G_EXT_CTRLS), valid_ioctls); + if (vdev->ctrl_handler || ops->vidioc_s_ext_ctrls) + __set_bit(_IOC_NR(VIDIOC_S_EXT_CTRLS), valid_ioctls); + if (vdev->ctrl_handler || ops->vidioc_try_ext_ctrls) + __set_bit(_IOC_NR(VIDIOC_TRY_EXT_CTRLS), valid_ioctls); + if (vdev->ctrl_handler || ops->vidioc_querymenu) + __set_bit(_IOC_NR(VIDIOC_QUERYMENU), valid_ioctls); + if (!is_tch) { + SET_VALID_IOCTL(ops, VIDIOC_G_FREQUENCY, vidioc_g_frequency); + SET_VALID_IOCTL(ops, VIDIOC_S_FREQUENCY, vidioc_s_frequency); + } + SET_VALID_IOCTL(ops, VIDIOC_LOG_STATUS, vidioc_log_status); +#ifdef CONFIG_VIDEO_ADV_DEBUG + __set_bit(_IOC_NR(VIDIOC_DBG_G_CHIP_INFO), valid_ioctls); + __set_bit(_IOC_NR(VIDIOC_DBG_G_REGISTER), valid_ioctls); + __set_bit(_IOC_NR(VIDIOC_DBG_S_REGISTER), valid_ioctls); +#endif + /* yes, really vidioc_subscribe_event */ + SET_VALID_IOCTL(ops, VIDIOC_DQEVENT, vidioc_subscribe_event); + SET_VALID_IOCTL(ops, VIDIOC_SUBSCRIBE_EVENT, vidioc_subscribe_event); + SET_VALID_IOCTL(ops, VIDIOC_UNSUBSCRIBE_EVENT, vidioc_unsubscribe_event); + if (ops->vidioc_enum_freq_bands || ops->vidioc_g_tuner || ops->vidioc_g_modulator) + __set_bit(_IOC_NR(VIDIOC_ENUM_FREQ_BANDS), valid_ioctls); + + if (is_vid) { + /* video specific ioctls */ + if ((is_rx && (ops->vidioc_enum_fmt_vid_cap || + ops->vidioc_enum_fmt_vid_overlay)) || + (is_tx && ops->vidioc_enum_fmt_vid_out)) + __set_bit(_IOC_NR(VIDIOC_ENUM_FMT), valid_ioctls); + if ((is_rx && (ops->vidioc_g_fmt_vid_cap || + ops->vidioc_g_fmt_vid_cap_mplane || + ops->vidioc_g_fmt_vid_overlay)) || + (is_tx && (ops->vidioc_g_fmt_vid_out || + ops->vidioc_g_fmt_vid_out_mplane || + ops->vidioc_g_fmt_vid_out_overlay))) + __set_bit(_IOC_NR(VIDIOC_G_FMT), valid_ioctls); + if ((is_rx && (ops->vidioc_s_fmt_vid_cap || + ops->vidioc_s_fmt_vid_cap_mplane || + ops->vidioc_s_fmt_vid_overlay)) || + (is_tx && (ops->vidioc_s_fmt_vid_out || + ops->vidioc_s_fmt_vid_out_mplane || + ops->vidioc_s_fmt_vid_out_overlay))) + __set_bit(_IOC_NR(VIDIOC_S_FMT), valid_ioctls); + if ((is_rx && (ops->vidioc_try_fmt_vid_cap || + ops->vidioc_try_fmt_vid_cap_mplane || + ops->vidioc_try_fmt_vid_overlay)) || + (is_tx && (ops->vidioc_try_fmt_vid_out || + ops->vidioc_try_fmt_vid_out_mplane || + ops->vidioc_try_fmt_vid_out_overlay))) + __set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls); + SET_VALID_IOCTL(ops, VIDIOC_OVERLAY, vidioc_overlay); + SET_VALID_IOCTL(ops, VIDIOC_G_FBUF, vidioc_g_fbuf); + SET_VALID_IOCTL(ops, VIDIOC_S_FBUF, vidioc_s_fbuf); + SET_VALID_IOCTL(ops, VIDIOC_G_JPEGCOMP, vidioc_g_jpegcomp); + SET_VALID_IOCTL(ops, VIDIOC_S_JPEGCOMP, vidioc_s_jpegcomp); + SET_VALID_IOCTL(ops, VIDIOC_G_ENC_INDEX, vidioc_g_enc_index); + SET_VALID_IOCTL(ops, VIDIOC_ENCODER_CMD, vidioc_encoder_cmd); + SET_VALID_IOCTL(ops, VIDIOC_TRY_ENCODER_CMD, vidioc_try_encoder_cmd); + SET_VALID_IOCTL(ops, VIDIOC_DECODER_CMD, vidioc_decoder_cmd); + SET_VALID_IOCTL(ops, VIDIOC_TRY_DECODER_CMD, vidioc_try_decoder_cmd); + SET_VALID_IOCTL(ops, VIDIOC_ENUM_FRAMESIZES, vidioc_enum_framesizes); + SET_VALID_IOCTL(ops, VIDIOC_ENUM_FRAMEINTERVALS, vidioc_enum_frameintervals); + if (ops->vidioc_g_selection && + !test_bit(_IOC_NR(VIDIOC_G_SELECTION), vdev->valid_ioctls)) { + __set_bit(_IOC_NR(VIDIOC_G_CROP), valid_ioctls); + __set_bit(_IOC_NR(VIDIOC_CROPCAP), valid_ioctls); + } + if (ops->vidioc_s_selection && + !test_bit(_IOC_NR(VIDIOC_S_SELECTION), vdev->valid_ioctls)) + __set_bit(_IOC_NR(VIDIOC_S_CROP), valid_ioctls); + SET_VALID_IOCTL(ops, VIDIOC_G_SELECTION, vidioc_g_selection); + SET_VALID_IOCTL(ops, VIDIOC_S_SELECTION, vidioc_s_selection); + } + if (is_meta && is_rx) { + /* metadata capture specific ioctls */ + SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_meta_cap); + SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_meta_cap); + SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_meta_cap); + SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_meta_cap); + } else if (is_meta && is_tx) { + /* metadata output specific ioctls */ + SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_meta_out); + SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_meta_out); + SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_meta_out); + SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_meta_out); + } + if (is_vbi) { + /* vbi specific ioctls */ + if ((is_rx && (ops->vidioc_g_fmt_vbi_cap || + ops->vidioc_g_fmt_sliced_vbi_cap)) || + (is_tx && (ops->vidioc_g_fmt_vbi_out || + ops->vidioc_g_fmt_sliced_vbi_out))) + __set_bit(_IOC_NR(VIDIOC_G_FMT), valid_ioctls); + if ((is_rx && (ops->vidioc_s_fmt_vbi_cap || + ops->vidioc_s_fmt_sliced_vbi_cap)) || + (is_tx && (ops->vidioc_s_fmt_vbi_out || + ops->vidioc_s_fmt_sliced_vbi_out))) + __set_bit(_IOC_NR(VIDIOC_S_FMT), valid_ioctls); + if ((is_rx && (ops->vidioc_try_fmt_vbi_cap || + ops->vidioc_try_fmt_sliced_vbi_cap)) || + (is_tx && (ops->vidioc_try_fmt_vbi_out || + ops->vidioc_try_fmt_sliced_vbi_out))) + __set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls); + SET_VALID_IOCTL(ops, VIDIOC_G_SLICED_VBI_CAP, vidioc_g_sliced_vbi_cap); + } else if (is_tch) { + /* touch specific ioctls */ + SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_vid_cap); + SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_vid_cap); + SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_vid_cap); + SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_vid_cap); + SET_VALID_IOCTL(ops, VIDIOC_ENUM_FRAMESIZES, vidioc_enum_framesizes); + SET_VALID_IOCTL(ops, VIDIOC_ENUM_FRAMEINTERVALS, vidioc_enum_frameintervals); + SET_VALID_IOCTL(ops, VIDIOC_ENUMINPUT, vidioc_enum_input); + SET_VALID_IOCTL(ops, VIDIOC_G_INPUT, vidioc_g_input); + SET_VALID_IOCTL(ops, VIDIOC_S_INPUT, vidioc_s_input); + SET_VALID_IOCTL(ops, VIDIOC_G_PARM, vidioc_g_parm); + SET_VALID_IOCTL(ops, VIDIOC_S_PARM, vidioc_s_parm); + } else if (is_sdr && is_rx) { + /* SDR receiver specific ioctls */ + SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_sdr_cap); + SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_sdr_cap); + SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_sdr_cap); + SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_sdr_cap); + } else if (is_sdr && is_tx) { + /* SDR transmitter specific ioctls */ + SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_sdr_out); + SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_sdr_out); + SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_sdr_out); + SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_sdr_out); + } + + if (has_streaming) { + /* ioctls valid for streaming I/O */ + SET_VALID_IOCTL(ops, VIDIOC_REQBUFS, vidioc_reqbufs); + SET_VALID_IOCTL(ops, VIDIOC_QUERYBUF, vidioc_querybuf); + SET_VALID_IOCTL(ops, VIDIOC_QBUF, vidioc_qbuf); + SET_VALID_IOCTL(ops, VIDIOC_EXPBUF, vidioc_expbuf); + SET_VALID_IOCTL(ops, VIDIOC_DQBUF, vidioc_dqbuf); + SET_VALID_IOCTL(ops, VIDIOC_CREATE_BUFS, vidioc_create_bufs); + SET_VALID_IOCTL(ops, VIDIOC_PREPARE_BUF, vidioc_prepare_buf); + SET_VALID_IOCTL(ops, VIDIOC_STREAMON, vidioc_streamon); + SET_VALID_IOCTL(ops, VIDIOC_STREAMOFF, vidioc_streamoff); + /* VIDIOC_CREATE_BUFS support is mandatory to enable VIDIOC_REMOVE_BUFS */ + if (ops->vidioc_create_bufs) + SET_VALID_IOCTL(ops, VIDIOC_REMOVE_BUFS, vidioc_remove_bufs); + } + + if (is_vid || is_vbi || is_meta) { + /* ioctls valid for video, vbi and metadata */ + if (ops->vidioc_s_std) + __set_bit(_IOC_NR(VIDIOC_ENUMSTD), valid_ioctls); + SET_VALID_IOCTL(ops, VIDIOC_S_STD, vidioc_s_std); + SET_VALID_IOCTL(ops, VIDIOC_G_STD, vidioc_g_std); + if (is_rx) { + SET_VALID_IOCTL(ops, VIDIOC_QUERYSTD, vidioc_querystd); + if (is_io_mc) { + __set_bit(_IOC_NR(VIDIOC_ENUMINPUT), valid_ioctls); + __set_bit(_IOC_NR(VIDIOC_G_INPUT), valid_ioctls); + __set_bit(_IOC_NR(VIDIOC_S_INPUT), valid_ioctls); + } else { + SET_VALID_IOCTL(ops, VIDIOC_ENUMINPUT, vidioc_enum_input); + SET_VALID_IOCTL(ops, VIDIOC_G_INPUT, vidioc_g_input); + SET_VALID_IOCTL(ops, VIDIOC_S_INPUT, vidioc_s_input); + } + SET_VALID_IOCTL(ops, VIDIOC_ENUMAUDIO, vidioc_enumaudio); + SET_VALID_IOCTL(ops, VIDIOC_G_AUDIO, vidioc_g_audio); + SET_VALID_IOCTL(ops, VIDIOC_S_AUDIO, vidioc_s_audio); + SET_VALID_IOCTL(ops, VIDIOC_QUERY_DV_TIMINGS, vidioc_query_dv_timings); + SET_VALID_IOCTL(ops, VIDIOC_S_EDID, vidioc_s_edid); + } + if (is_tx) { + if (is_io_mc) { + __set_bit(_IOC_NR(VIDIOC_ENUMOUTPUT), valid_ioctls); + __set_bit(_IOC_NR(VIDIOC_G_OUTPUT), valid_ioctls); + __set_bit(_IOC_NR(VIDIOC_S_OUTPUT), valid_ioctls); + } else { + SET_VALID_IOCTL(ops, VIDIOC_ENUMOUTPUT, vidioc_enum_output); + SET_VALID_IOCTL(ops, VIDIOC_G_OUTPUT, vidioc_g_output); + SET_VALID_IOCTL(ops, VIDIOC_S_OUTPUT, vidioc_s_output); + } + SET_VALID_IOCTL(ops, VIDIOC_ENUMAUDOUT, vidioc_enumaudout); + SET_VALID_IOCTL(ops, VIDIOC_G_AUDOUT, vidioc_g_audout); + SET_VALID_IOCTL(ops, VIDIOC_S_AUDOUT, vidioc_s_audout); + } + if (ops->vidioc_g_parm || ops->vidioc_g_std) + __set_bit(_IOC_NR(VIDIOC_G_PARM), valid_ioctls); + SET_VALID_IOCTL(ops, VIDIOC_S_PARM, vidioc_s_parm); + SET_VALID_IOCTL(ops, VIDIOC_S_DV_TIMINGS, vidioc_s_dv_timings); + SET_VALID_IOCTL(ops, VIDIOC_G_DV_TIMINGS, vidioc_g_dv_timings); + SET_VALID_IOCTL(ops, VIDIOC_ENUM_DV_TIMINGS, vidioc_enum_dv_timings); + SET_VALID_IOCTL(ops, VIDIOC_DV_TIMINGS_CAP, vidioc_dv_timings_cap); + SET_VALID_IOCTL(ops, VIDIOC_G_EDID, vidioc_g_edid); + } + if (is_tx && (is_radio || is_sdr)) { + /* radio transmitter only ioctls */ + SET_VALID_IOCTL(ops, VIDIOC_G_MODULATOR, vidioc_g_modulator); + SET_VALID_IOCTL(ops, VIDIOC_S_MODULATOR, vidioc_s_modulator); + } + if (is_rx && !is_tch) { + /* receiver only ioctls */ + SET_VALID_IOCTL(ops, VIDIOC_G_TUNER, vidioc_g_tuner); + SET_VALID_IOCTL(ops, VIDIOC_S_TUNER, vidioc_s_tuner); + SET_VALID_IOCTL(ops, VIDIOC_S_HW_FREQ_SEEK, vidioc_s_hw_freq_seek); + } + if (is_edid) { + SET_VALID_IOCTL(ops, VIDIOC_G_EDID, vidioc_g_edid); + if (is_tx) { + SET_VALID_IOCTL(ops, VIDIOC_G_OUTPUT, vidioc_g_output); + SET_VALID_IOCTL(ops, VIDIOC_S_OUTPUT, vidioc_s_output); + SET_VALID_IOCTL(ops, VIDIOC_ENUMOUTPUT, vidioc_enum_output); + } + if (is_rx) { + SET_VALID_IOCTL(ops, VIDIOC_ENUMINPUT, vidioc_enum_input); + SET_VALID_IOCTL(ops, VIDIOC_G_INPUT, vidioc_g_input); + SET_VALID_IOCTL(ops, VIDIOC_S_INPUT, vidioc_s_input); + SET_VALID_IOCTL(ops, VIDIOC_S_EDID, vidioc_s_edid); + } + } + + bitmap_andnot(vdev->valid_ioctls, valid_ioctls, vdev->valid_ioctls, + BASE_VIDIOC_PRIVATE); +} + +static int video_register_media_controller(struct video_device *vdev) +{ +#if defined(CONFIG_MEDIA_CONTROLLER) + u32 intf_type; + int ret; + + /* Memory-to-memory devices are more complex and use + * their own function to register its mc entities. + */ + if (!vdev->v4l2_dev->mdev || vdev->vfl_dir == VFL_DIR_M2M) + return 0; + + vdev->entity.obj_type = MEDIA_ENTITY_TYPE_VIDEO_DEVICE; + vdev->entity.function = MEDIA_ENT_F_UNKNOWN; + + switch (vdev->vfl_type) { + case VFL_TYPE_VIDEO: + intf_type = MEDIA_INTF_T_V4L_VIDEO; + vdev->entity.function = MEDIA_ENT_F_IO_V4L; + break; + case VFL_TYPE_VBI: + intf_type = MEDIA_INTF_T_V4L_VBI; + vdev->entity.function = MEDIA_ENT_F_IO_VBI; + break; + case VFL_TYPE_SDR: + intf_type = MEDIA_INTF_T_V4L_SWRADIO; + vdev->entity.function = MEDIA_ENT_F_IO_SWRADIO; + break; + case VFL_TYPE_TOUCH: + intf_type = MEDIA_INTF_T_V4L_TOUCH; + vdev->entity.function = MEDIA_ENT_F_IO_V4L; + break; + case VFL_TYPE_RADIO: + intf_type = MEDIA_INTF_T_V4L_RADIO; + /* + * Radio doesn't have an entity at the V4L2 side to represent + * radio input or output. Instead, the audio input/output goes + * via either physical wires or ALSA. + */ + break; + case VFL_TYPE_SUBDEV: + intf_type = MEDIA_INTF_T_V4L_SUBDEV; + /* Entity will be created via v4l2_device_register_subdev() */ + break; + default: + return 0; + } + + if (vdev->entity.function != MEDIA_ENT_F_UNKNOWN) { + vdev->entity.name = vdev->name; + + /* Needed just for backward compatibility with legacy MC API */ + vdev->entity.info.dev.major = VIDEO_MAJOR; + vdev->entity.info.dev.minor = vdev->minor; + + ret = media_device_register_entity(vdev->v4l2_dev->mdev, + &vdev->entity); + if (ret < 0) { + pr_warn("%s: media_device_register_entity failed\n", + __func__); + return ret; + } + } + + vdev->intf_devnode = media_devnode_create(vdev->v4l2_dev->mdev, + intf_type, + 0, VIDEO_MAJOR, + vdev->minor); + if (!vdev->intf_devnode) { + media_device_unregister_entity(&vdev->entity); + return -ENOMEM; + } + + if (vdev->entity.function != MEDIA_ENT_F_UNKNOWN) { + struct media_link *link; + + link = media_create_intf_link(&vdev->entity, + &vdev->intf_devnode->intf, + MEDIA_LNK_FL_ENABLED | + MEDIA_LNK_FL_IMMUTABLE); + if (!link) { + media_devnode_remove(vdev->intf_devnode); + media_device_unregister_entity(&vdev->entity); + return -ENOMEM; + } + } + + /* FIXME: how to create the other interface links? */ + +#endif + return 0; +} + +int __video_register_device(struct video_device *vdev, + enum vfl_devnode_type type, + int nr, int warn_if_nr_in_use, + struct module *owner) +{ + int i = 0; + int ret; + int minor_offset = 0; + int minor_cnt = VIDEO_NUM_DEVICES; + const char *name_base; + + /* A minor value of -1 marks this video device as never + having been registered */ + vdev->minor = -1; + + /* the release callback MUST be present */ + if (WARN_ON(!vdev->release)) + return -EINVAL; + /* the v4l2_dev pointer MUST be present */ + if (WARN_ON(!vdev->v4l2_dev)) + return -EINVAL; + /* the device_caps field MUST be set for all but subdevs */ + if (WARN_ON(type != VFL_TYPE_SUBDEV && !vdev->device_caps)) + return -EINVAL; + + /* v4l2_fh support */ + spin_lock_init(&vdev->fh_lock); + INIT_LIST_HEAD(&vdev->fh_list); + + /* Part 1: check device type */ + switch (type) { + case VFL_TYPE_VIDEO: + name_base = "video"; + break; + case VFL_TYPE_VBI: + name_base = "vbi"; + break; + case VFL_TYPE_RADIO: + name_base = "radio"; + break; + case VFL_TYPE_SUBDEV: + name_base = "v4l-subdev"; + break; + case VFL_TYPE_SDR: + /* Use device name 'swradio' because 'sdr' was already taken. */ + name_base = "swradio"; + break; + case VFL_TYPE_TOUCH: + name_base = "v4l-touch"; + break; + default: + pr_err("%s called with unknown type: %d\n", + __func__, type); + return -EINVAL; + } + + vdev->vfl_type = type; + vdev->cdev = NULL; + if (vdev->dev_parent == NULL) + vdev->dev_parent = vdev->v4l2_dev->dev; + if (vdev->ctrl_handler == NULL) + vdev->ctrl_handler = vdev->v4l2_dev->ctrl_handler; + /* If the prio state pointer is NULL, then use the v4l2_device + prio state. */ + if (vdev->prio == NULL) + vdev->prio = &vdev->v4l2_dev->prio; + + /* Part 2: find a free minor, device node number and device index. */ +#ifdef CONFIG_VIDEO_FIXED_MINOR_RANGES + /* Keep the ranges for the first four types for historical + * reasons. + * Newer devices (not yet in place) should use the range + * of 128-191 and just pick the first free minor there + * (new style). */ + switch (type) { + case VFL_TYPE_VIDEO: + minor_offset = 0; + minor_cnt = 64; + break; + case VFL_TYPE_RADIO: + minor_offset = 64; + minor_cnt = 64; + break; + case VFL_TYPE_VBI: + minor_offset = 224; + minor_cnt = 32; + break; + default: + minor_offset = 128; + minor_cnt = 64; + break; + } +#endif + + /* Pick a device node number */ + mutex_lock(&videodev_lock); + nr = devnode_find(vdev, nr == -1 ? 0 : nr, minor_cnt); + if (nr == minor_cnt) + nr = devnode_find(vdev, 0, minor_cnt); + if (nr == minor_cnt) { + pr_err("could not get a free device node number\n"); + mutex_unlock(&videodev_lock); + return -ENFILE; + } +#ifdef CONFIG_VIDEO_FIXED_MINOR_RANGES + /* 1-on-1 mapping of device node number to minor number */ + i = nr; +#else + /* The device node number and minor numbers are independent, so + we just find the first free minor number. */ + for (i = 0; i < VIDEO_NUM_DEVICES; i++) + if (video_devices[i] == NULL) + break; + if (i == VIDEO_NUM_DEVICES) { + mutex_unlock(&videodev_lock); + pr_err("could not get a free minor\n"); + return -ENFILE; + } +#endif + vdev->minor = i + minor_offset; + vdev->num = nr; + + /* Should not happen since we thought this minor was free */ + if (WARN_ON(video_devices[vdev->minor])) { + mutex_unlock(&videodev_lock); + pr_err("video_device not empty!\n"); + return -ENFILE; + } + devnode_set(vdev); + vdev->index = get_index(vdev); + video_devices[vdev->minor] = vdev; + mutex_unlock(&videodev_lock); + + if (vdev->ioctl_ops) + determine_valid_ioctls(vdev); + + /* Part 3: Initialize the character device */ + vdev->cdev = cdev_alloc(); + if (vdev->cdev == NULL) { + ret = -ENOMEM; + goto cleanup; + } + vdev->cdev->ops = &v4l2_fops; + vdev->cdev->owner = owner; + ret = cdev_add(vdev->cdev, MKDEV(VIDEO_MAJOR, vdev->minor), 1); + if (ret < 0) { + pr_err("%s: cdev_add failed\n", __func__); + kfree(vdev->cdev); + vdev->cdev = NULL; + goto cleanup; + } + + /* Part 4: register the device with sysfs */ + vdev->dev.class = &video_class; + vdev->dev.devt = MKDEV(VIDEO_MAJOR, vdev->minor); + vdev->dev.parent = vdev->dev_parent; + vdev->dev.release = v4l2_device_release; + dev_set_name(&vdev->dev, "%s%d", name_base, vdev->num); + + /* Increase v4l2_device refcount */ + v4l2_device_get(vdev->v4l2_dev); + + mutex_lock(&videodev_lock); + ret = device_register(&vdev->dev); + if (ret < 0) { + mutex_unlock(&videodev_lock); + pr_err("%s: device_register failed\n", __func__); + put_device(&vdev->dev); + return ret; + } + + if (nr != -1 && nr != vdev->num && warn_if_nr_in_use) + pr_warn("%s: requested %s%d, got %s\n", __func__, + name_base, nr, video_device_node_name(vdev)); + + /* Part 5: Register the entity. */ + ret = video_register_media_controller(vdev); + + /* Part 6: Activate this minor. The char device can now be used. */ + set_bit(V4L2_FL_REGISTERED, &vdev->flags); + mutex_unlock(&videodev_lock); + + return 0; + +cleanup: + mutex_lock(&videodev_lock); + if (vdev->cdev) + cdev_del(vdev->cdev); + video_devices[vdev->minor] = NULL; + devnode_clear(vdev); + mutex_unlock(&videodev_lock); + /* Mark this video device as never having been registered. */ + vdev->minor = -1; + return ret; +} +EXPORT_SYMBOL(__video_register_device); + +/** + * video_unregister_device - unregister a video4linux device + * @vdev: the device to unregister + * + * This unregisters the passed device. Future open calls will + * be met with errors. + */ +void video_unregister_device(struct video_device *vdev) +{ + /* Check if vdev was ever registered at all */ + if (!vdev || !video_is_registered(vdev)) + return; + + mutex_lock(&videodev_lock); + /* This must be in a critical section to prevent a race with v4l2_open. + * Once this bit has been cleared video_get may never be called again. + */ + clear_bit(V4L2_FL_REGISTERED, &vdev->flags); + mutex_unlock(&videodev_lock); + if (test_bit(V4L2_FL_USES_V4L2_FH, &vdev->flags)) + v4l2_event_wake_all(vdev); + device_unregister(&vdev->dev); +} +EXPORT_SYMBOL(video_unregister_device); + +#ifdef CONFIG_DEBUG_FS +struct dentry *v4l2_debugfs_root(void) +{ + if (!v4l2_debugfs_root_dir) + v4l2_debugfs_root_dir = debugfs_create_dir("v4l2", NULL); + return v4l2_debugfs_root_dir; +} +EXPORT_SYMBOL_GPL(v4l2_debugfs_root); +#endif + +#if defined(CONFIG_MEDIA_CONTROLLER) + +__must_check int video_device_pipeline_start(struct video_device *vdev, + struct media_pipeline *pipe) +{ + struct media_entity *entity = &vdev->entity; + + if (entity->num_pads != 1) + return -ENODEV; + + return media_pipeline_start(&entity->pads[0], pipe); +} +EXPORT_SYMBOL_GPL(video_device_pipeline_start); + +__must_check int __video_device_pipeline_start(struct video_device *vdev, + struct media_pipeline *pipe) +{ + struct media_entity *entity = &vdev->entity; + + if (entity->num_pads != 1) + return -ENODEV; + + return __media_pipeline_start(&entity->pads[0], pipe); +} +EXPORT_SYMBOL_GPL(__video_device_pipeline_start); + +void video_device_pipeline_stop(struct video_device *vdev) +{ + struct media_entity *entity = &vdev->entity; + + if (WARN_ON(entity->num_pads != 1)) + return; + + return media_pipeline_stop(&entity->pads[0]); +} +EXPORT_SYMBOL_GPL(video_device_pipeline_stop); + +void __video_device_pipeline_stop(struct video_device *vdev) +{ + struct media_entity *entity = &vdev->entity; + + if (WARN_ON(entity->num_pads != 1)) + return; + + return __media_pipeline_stop(&entity->pads[0]); +} +EXPORT_SYMBOL_GPL(__video_device_pipeline_stop); + +__must_check int video_device_pipeline_alloc_start(struct video_device *vdev) +{ + struct media_entity *entity = &vdev->entity; + + if (entity->num_pads != 1) + return -ENODEV; + + return media_pipeline_alloc_start(&entity->pads[0]); +} +EXPORT_SYMBOL_GPL(video_device_pipeline_alloc_start); + +struct media_pipeline *video_device_pipeline(struct video_device *vdev) +{ + struct media_entity *entity = &vdev->entity; + + if (WARN_ON(entity->num_pads != 1)) + return NULL; + + return media_pad_pipeline(&entity->pads[0]); +} +EXPORT_SYMBOL_GPL(video_device_pipeline); + +#endif /* CONFIG_MEDIA_CONTROLLER */ + +/* + * Initialise video for linux + */ +static int __init videodev_init(void) +{ + dev_t dev = MKDEV(VIDEO_MAJOR, 0); + int ret; + + pr_info("Linux video capture interface: v2.00\n"); + ret = register_chrdev_region(dev, VIDEO_NUM_DEVICES, VIDEO_NAME); + if (ret < 0) { + pr_warn("videodev: unable to get major %d\n", + VIDEO_MAJOR); + return ret; + } + + ret = class_register(&video_class); + if (ret < 0) { + unregister_chrdev_region(dev, VIDEO_NUM_DEVICES); + pr_warn("video_dev: class_register failed\n"); + return -EIO; + } + + return 0; +} + +static void __exit videodev_exit(void) +{ + dev_t dev = MKDEV(VIDEO_MAJOR, 0); + + class_unregister(&video_class); + unregister_chrdev_region(dev, VIDEO_NUM_DEVICES); + debugfs_remove_recursive(v4l2_debugfs_root_dir); + v4l2_debugfs_root_dir = NULL; +} + +subsys_initcall(videodev_init); +module_exit(videodev_exit) + +MODULE_AUTHOR("Alan Cox, Mauro Carvalho Chehab , Bill Dirks, Justin Schoeman, Gerd Knorr"); +MODULE_DESCRIPTION("Video4Linux2 core driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_CHARDEV_MAJOR(VIDEO_MAJOR); diff --git a/6.17.0/drivers/media/v4l2-core/v4l2-device.c b/6.17.0/drivers/media/v4l2-core/v4l2-device.c new file mode 100644 index 0000000..5e53745 --- /dev/null +++ b/6.17.0/drivers/media/v4l2-core/v4l2-device.c @@ -0,0 +1,295 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + V4L2 device support. + + Copyright (C) 2008 Hans Verkuil + + */ + +#include +#include +#include +#include +#include +#include +#include + +int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev) +{ + if (v4l2_dev == NULL) + return -EINVAL; + + INIT_LIST_HEAD(&v4l2_dev->subdevs); + spin_lock_init(&v4l2_dev->lock); + v4l2_prio_init(&v4l2_dev->prio); + kref_init(&v4l2_dev->ref); + get_device(dev); + v4l2_dev->dev = dev; + if (dev == NULL) { + /* If dev == NULL, then name must be filled in by the caller */ + if (WARN_ON(!v4l2_dev->name[0])) + return -EINVAL; + return 0; + } + + /* Set name to driver name + device name if it is empty. */ + if (!v4l2_dev->name[0]) + snprintf(v4l2_dev->name, sizeof(v4l2_dev->name), "%s %s", + dev->driver->name, dev_name(dev)); + if (!dev_get_drvdata(dev)) + dev_set_drvdata(dev, v4l2_dev); + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_device_register); + +static void v4l2_device_release(struct kref *ref) +{ + struct v4l2_device *v4l2_dev = + container_of(ref, struct v4l2_device, ref); + + if (v4l2_dev->release) + v4l2_dev->release(v4l2_dev); +} + +int v4l2_device_put(struct v4l2_device *v4l2_dev) +{ + return kref_put(&v4l2_dev->ref, v4l2_device_release); +} +EXPORT_SYMBOL_GPL(v4l2_device_put); + +int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename, + atomic_t *instance) +{ + int num = atomic_inc_return(instance) - 1; + int len = strlen(basename); + + if (basename[len - 1] >= '0' && basename[len - 1] <= '9') + snprintf(v4l2_dev->name, sizeof(v4l2_dev->name), + "%s-%d", basename, num); + else + snprintf(v4l2_dev->name, sizeof(v4l2_dev->name), + "%s%d", basename, num); + return num; +} +EXPORT_SYMBOL_GPL(v4l2_device_set_name); + +void v4l2_device_disconnect(struct v4l2_device *v4l2_dev) +{ + if (v4l2_dev->dev == NULL) + return; + + if (dev_get_drvdata(v4l2_dev->dev) == v4l2_dev) + dev_set_drvdata(v4l2_dev->dev, NULL); + put_device(v4l2_dev->dev); + v4l2_dev->dev = NULL; +} +EXPORT_SYMBOL_GPL(v4l2_device_disconnect); + +void v4l2_device_unregister(struct v4l2_device *v4l2_dev) +{ + struct v4l2_subdev *sd, *next; + + /* Just return if v4l2_dev is NULL or if it was already + * unregistered before. */ + if (v4l2_dev == NULL || !v4l2_dev->name[0]) + return; + v4l2_device_disconnect(v4l2_dev); + + /* Unregister subdevs */ + list_for_each_entry_safe(sd, next, &v4l2_dev->subdevs, list) { + v4l2_device_unregister_subdev(sd); + if (sd->flags & V4L2_SUBDEV_FL_IS_I2C) + v4l2_i2c_subdev_unregister(sd); + else if (sd->flags & V4L2_SUBDEV_FL_IS_SPI) + v4l2_spi_subdev_unregister(sd); + } + /* Mark as unregistered, thus preventing duplicate unregistrations */ + v4l2_dev->name[0] = '\0'; +} +EXPORT_SYMBOL_GPL(v4l2_device_unregister); + +int __v4l2_device_register_subdev(struct v4l2_device *v4l2_dev, + struct v4l2_subdev *sd, struct module *module) +{ + int err; + + /* Check for valid input */ + if (!v4l2_dev || !sd || sd->v4l2_dev || !sd->name[0]) + return -EINVAL; + + /* + * The reason to acquire the module here is to avoid unloading + * a module of sub-device which is registered to a media + * device. To make it possible to unload modules for media + * devices that also register sub-devices, do not + * try_module_get() such sub-device owners. + */ + sd->owner_v4l2_dev = v4l2_dev->dev && v4l2_dev->dev->driver && + module == v4l2_dev->dev->driver->owner; + + if (!sd->owner_v4l2_dev && !try_module_get(module)) + return -ENODEV; + + sd->v4l2_dev = v4l2_dev; + /* This just returns 0 if either of the two args is NULL */ + err = v4l2_ctrl_add_handler(v4l2_dev->ctrl_handler, sd->ctrl_handler, + NULL, true); + if (err) + goto error_module; + +#if defined(CONFIG_MEDIA_CONTROLLER) + /* Register the entity. */ + if (v4l2_dev->mdev) { + err = media_device_register_entity(v4l2_dev->mdev, &sd->entity); + if (err < 0) + goto error_module; + } +#endif + + if (sd->internal_ops && sd->internal_ops->registered) { + err = sd->internal_ops->registered(sd); + if (err) + goto error_unregister; + } + + sd->owner = module; + + spin_lock(&v4l2_dev->lock); + list_add_tail(&sd->list, &v4l2_dev->subdevs); + spin_unlock(&v4l2_dev->lock); + + return 0; + +error_unregister: +#if defined(CONFIG_MEDIA_CONTROLLER) + media_device_unregister_entity(&sd->entity); +#endif +error_module: + if (!sd->owner_v4l2_dev) + module_put(sd->owner); + sd->v4l2_dev = NULL; + return err; +} +EXPORT_SYMBOL_GPL(__v4l2_device_register_subdev); + +static void v4l2_subdev_release(struct v4l2_subdev *sd) +{ + struct module *owner = !sd->owner_v4l2_dev ? sd->owner : NULL; + + if (sd->internal_ops && sd->internal_ops->release) + sd->internal_ops->release(sd); + sd->devnode = NULL; + module_put(owner); +} + +static void v4l2_device_release_subdev_node(struct video_device *vdev) +{ + v4l2_subdev_release(video_get_drvdata(vdev)); + kfree(vdev); +} + +int __v4l2_device_register_subdev_nodes(struct v4l2_device *v4l2_dev, + bool read_only) +{ + struct video_device *vdev; + struct v4l2_subdev *sd; + int err; + + /* Register a device node for every subdev marked with the + * V4L2_SUBDEV_FL_HAS_DEVNODE flag. + */ + list_for_each_entry(sd, &v4l2_dev->subdevs, list) { + if (!(sd->flags & V4L2_SUBDEV_FL_HAS_DEVNODE)) + continue; + + if (sd->devnode) + continue; + + vdev = kzalloc(sizeof(*vdev), GFP_KERNEL); + if (!vdev) { + err = -ENOMEM; + goto clean_up; + } + + video_set_drvdata(vdev, sd); + strscpy(vdev->name, sd->name, sizeof(vdev->name)); + vdev->dev_parent = sd->dev; + vdev->v4l2_dev = v4l2_dev; + vdev->fops = &v4l2_subdev_fops; + vdev->release = v4l2_device_release_subdev_node; + vdev->ctrl_handler = sd->ctrl_handler; + if (read_only) + set_bit(V4L2_FL_SUBDEV_RO_DEVNODE, &vdev->flags); + sd->devnode = vdev; + err = __video_register_device(vdev, VFL_TYPE_SUBDEV, -1, 1, + sd->owner); + if (err < 0) { + sd->devnode = NULL; + kfree(vdev); + goto clean_up; + } +#if defined(CONFIG_MEDIA_CONTROLLER) + sd->entity.info.dev.major = VIDEO_MAJOR; + sd->entity.info.dev.minor = vdev->minor; + + /* Interface is created by __video_register_device() */ + if (vdev->v4l2_dev->mdev) { + struct media_link *link; + + link = media_create_intf_link(&sd->entity, + &vdev->intf_devnode->intf, + MEDIA_LNK_FL_ENABLED | + MEDIA_LNK_FL_IMMUTABLE); + if (!link) { + err = -ENOMEM; + goto clean_up; + } + } +#endif + } + return 0; + +clean_up: + list_for_each_entry(sd, &v4l2_dev->subdevs, list) { + if (!sd->devnode) + break; + video_unregister_device(sd->devnode); + } + + return err; +} +EXPORT_SYMBOL_GPL(__v4l2_device_register_subdev_nodes); + +void v4l2_device_unregister_subdev(struct v4l2_subdev *sd) +{ + struct v4l2_device *v4l2_dev; + + /* return if it isn't registered */ + if (sd == NULL || sd->v4l2_dev == NULL) + return; + + v4l2_dev = sd->v4l2_dev; + + spin_lock(&v4l2_dev->lock); + list_del(&sd->list); + spin_unlock(&v4l2_dev->lock); + + if (sd->internal_ops && sd->internal_ops->unregistered) + sd->internal_ops->unregistered(sd); + sd->v4l2_dev = NULL; + +#if defined(CONFIG_MEDIA_CONTROLLER) + if (v4l2_dev->mdev) { + /* + * No need to explicitly remove links, as both pads and + * links are removed by the function below, in the right order + */ + media_device_unregister_entity(&sd->entity); + } +#endif + if (sd->devnode) + video_unregister_device(sd->devnode); + else + v4l2_subdev_release(sd); +} +EXPORT_SYMBOL_GPL(v4l2_device_unregister_subdev); diff --git a/6.17.0/drivers/media/v4l2-core/v4l2-dv-timings.c b/6.17.0/drivers/media/v4l2-core/v4l2-dv-timings.c new file mode 100644 index 0000000..7710cb2 --- /dev/null +++ b/6.17.0/drivers/media/v4l2-core/v4l2-dv-timings.c @@ -0,0 +1,1271 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * v4l2-dv-timings - dv-timings helper functions + * + * Copyright 2013 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +MODULE_AUTHOR("Hans Verkuil"); +MODULE_DESCRIPTION("V4L2 DV Timings Helper Functions"); +MODULE_LICENSE("GPL"); + +const struct v4l2_dv_timings v4l2_dv_timings_presets[] = { + V4L2_DV_BT_CEA_640X480P59_94, + V4L2_DV_BT_CEA_720X480I59_94, + V4L2_DV_BT_CEA_720X480P59_94, + V4L2_DV_BT_CEA_720X576I50, + V4L2_DV_BT_CEA_720X576P50, + V4L2_DV_BT_CEA_1280X720P24, + V4L2_DV_BT_CEA_1280X720P25, + V4L2_DV_BT_CEA_1280X720P30, + V4L2_DV_BT_CEA_1280X720P50, + V4L2_DV_BT_CEA_1280X720P60, + V4L2_DV_BT_CEA_1920X1080P24, + V4L2_DV_BT_CEA_1920X1080P25, + V4L2_DV_BT_CEA_1920X1080P30, + V4L2_DV_BT_CEA_1920X1080I50, + V4L2_DV_BT_CEA_1920X1080P50, + V4L2_DV_BT_CEA_1920X1080I60, + V4L2_DV_BT_CEA_1920X1080P60, + V4L2_DV_BT_DMT_640X350P85, + V4L2_DV_BT_DMT_640X400P85, + V4L2_DV_BT_DMT_720X400P85, + V4L2_DV_BT_DMT_640X480P72, + V4L2_DV_BT_DMT_640X480P75, + V4L2_DV_BT_DMT_640X480P85, + V4L2_DV_BT_DMT_800X600P56, + V4L2_DV_BT_DMT_800X600P60, + V4L2_DV_BT_DMT_800X600P72, + V4L2_DV_BT_DMT_800X600P75, + V4L2_DV_BT_DMT_800X600P85, + V4L2_DV_BT_DMT_800X600P120_RB, + V4L2_DV_BT_DMT_848X480P60, + V4L2_DV_BT_DMT_1024X768I43, + V4L2_DV_BT_DMT_1024X768P60, + V4L2_DV_BT_DMT_1024X768P70, + V4L2_DV_BT_DMT_1024X768P75, + V4L2_DV_BT_DMT_1024X768P85, + V4L2_DV_BT_DMT_1024X768P120_RB, + V4L2_DV_BT_DMT_1152X864P75, + V4L2_DV_BT_DMT_1280X768P60_RB, + V4L2_DV_BT_DMT_1280X768P60, + V4L2_DV_BT_DMT_1280X768P75, + V4L2_DV_BT_DMT_1280X768P85, + V4L2_DV_BT_DMT_1280X768P120_RB, + V4L2_DV_BT_DMT_1280X800P60_RB, + V4L2_DV_BT_DMT_1280X800P60, + V4L2_DV_BT_DMT_1280X800P75, + V4L2_DV_BT_DMT_1280X800P85, + V4L2_DV_BT_DMT_1280X800P120_RB, + V4L2_DV_BT_DMT_1280X960P60, + V4L2_DV_BT_DMT_1280X960P85, + V4L2_DV_BT_DMT_1280X960P120_RB, + V4L2_DV_BT_DMT_1280X1024P60, + V4L2_DV_BT_DMT_1280X1024P75, + V4L2_DV_BT_DMT_1280X1024P85, + V4L2_DV_BT_DMT_1280X1024P120_RB, + V4L2_DV_BT_DMT_1360X768P60, + V4L2_DV_BT_DMT_1360X768P120_RB, + V4L2_DV_BT_DMT_1366X768P60, + V4L2_DV_BT_DMT_1366X768P60_RB, + V4L2_DV_BT_DMT_1400X1050P60_RB, + V4L2_DV_BT_DMT_1400X1050P60, + V4L2_DV_BT_DMT_1400X1050P75, + V4L2_DV_BT_DMT_1400X1050P85, + V4L2_DV_BT_DMT_1400X1050P120_RB, + V4L2_DV_BT_DMT_1440X900P60_RB, + V4L2_DV_BT_DMT_1440X900P60, + V4L2_DV_BT_DMT_1440X900P75, + V4L2_DV_BT_DMT_1440X900P85, + V4L2_DV_BT_DMT_1440X900P120_RB, + V4L2_DV_BT_DMT_1600X900P60_RB, + V4L2_DV_BT_DMT_1600X1200P60, + V4L2_DV_BT_DMT_1600X1200P65, + V4L2_DV_BT_DMT_1600X1200P70, + V4L2_DV_BT_DMT_1600X1200P75, + V4L2_DV_BT_DMT_1600X1200P85, + V4L2_DV_BT_DMT_1600X1200P120_RB, + V4L2_DV_BT_DMT_1680X1050P60_RB, + V4L2_DV_BT_DMT_1680X1050P60, + V4L2_DV_BT_DMT_1680X1050P75, + V4L2_DV_BT_DMT_1680X1050P85, + V4L2_DV_BT_DMT_1680X1050P120_RB, + V4L2_DV_BT_DMT_1792X1344P60, + V4L2_DV_BT_DMT_1792X1344P75, + V4L2_DV_BT_DMT_1792X1344P120_RB, + V4L2_DV_BT_DMT_1856X1392P60, + V4L2_DV_BT_DMT_1856X1392P75, + V4L2_DV_BT_DMT_1856X1392P120_RB, + V4L2_DV_BT_DMT_1920X1200P60_RB, + V4L2_DV_BT_DMT_1920X1200P60, + V4L2_DV_BT_DMT_1920X1200P75, + V4L2_DV_BT_DMT_1920X1200P85, + V4L2_DV_BT_DMT_1920X1200P120_RB, + V4L2_DV_BT_DMT_1920X1440P60, + V4L2_DV_BT_DMT_1920X1440P75, + V4L2_DV_BT_DMT_1920X1440P120_RB, + V4L2_DV_BT_DMT_2048X1152P60_RB, + V4L2_DV_BT_DMT_2560X1600P60_RB, + V4L2_DV_BT_DMT_2560X1600P60, + V4L2_DV_BT_DMT_2560X1600P75, + V4L2_DV_BT_DMT_2560X1600P85, + V4L2_DV_BT_DMT_2560X1600P120_RB, + V4L2_DV_BT_CEA_3840X2160P24, + V4L2_DV_BT_CEA_3840X2160P25, + V4L2_DV_BT_CEA_3840X2160P30, + V4L2_DV_BT_CEA_3840X2160P50, + V4L2_DV_BT_CEA_3840X2160P60, + V4L2_DV_BT_CEA_4096X2160P24, + V4L2_DV_BT_CEA_4096X2160P25, + V4L2_DV_BT_CEA_4096X2160P30, + V4L2_DV_BT_CEA_4096X2160P50, + V4L2_DV_BT_DMT_4096X2160P59_94_RB, + V4L2_DV_BT_CEA_4096X2160P60, + { } +}; +EXPORT_SYMBOL_GPL(v4l2_dv_timings_presets); + +bool v4l2_valid_dv_timings(const struct v4l2_dv_timings *t, + const struct v4l2_dv_timings_cap *dvcap, + v4l2_check_dv_timings_fnc fnc, + void *fnc_handle) +{ + const struct v4l2_bt_timings *bt = &t->bt; + const struct v4l2_bt_timings_cap *cap = &dvcap->bt; + u32 caps = cap->capabilities; + const u32 max_vert = 10240; + u32 max_hor = 3 * bt->width; + + if (t->type != V4L2_DV_BT_656_1120) + return false; + if (t->type != dvcap->type || + bt->height < cap->min_height || + bt->height > cap->max_height || + bt->width < cap->min_width || + bt->width > cap->max_width || + bt->pixelclock < cap->min_pixelclock || + bt->pixelclock > cap->max_pixelclock || + (!(caps & V4L2_DV_BT_CAP_CUSTOM) && + cap->standards && bt->standards && + !(bt->standards & cap->standards)) || + (bt->interlaced && !(caps & V4L2_DV_BT_CAP_INTERLACED)) || + (!bt->interlaced && !(caps & V4L2_DV_BT_CAP_PROGRESSIVE))) + return false; + + /* sanity checks for the blanking timings */ + if (!bt->interlaced && + (bt->il_vbackporch || bt->il_vsync || bt->il_vfrontporch)) + return false; + /* + * Some video receivers cannot properly separate the frontporch, + * backporch and sync values, and instead they only have the total + * blanking. That can be assigned to any of these three fields. + * So just check that none of these are way out of range. + */ + if (bt->hfrontporch > max_hor || + bt->hsync > max_hor || bt->hbackporch > max_hor) + return false; + if (bt->vfrontporch > max_vert || + bt->vsync > max_vert || bt->vbackporch > max_vert) + return false; + if (bt->interlaced && (bt->il_vfrontporch > max_vert || + bt->il_vsync > max_vert || bt->il_vbackporch > max_vert)) + return false; + return fnc == NULL || fnc(t, fnc_handle); +} +EXPORT_SYMBOL_GPL(v4l2_valid_dv_timings); + +int v4l2_enum_dv_timings_cap(struct v4l2_enum_dv_timings *t, + const struct v4l2_dv_timings_cap *cap, + v4l2_check_dv_timings_fnc fnc, + void *fnc_handle) +{ + u32 i, idx; + + memset(t->reserved, 0, sizeof(t->reserved)); + for (i = idx = 0; v4l2_dv_timings_presets[i].bt.width; i++) { + if (v4l2_valid_dv_timings(v4l2_dv_timings_presets + i, cap, + fnc, fnc_handle) && + idx++ == t->index) { + t->timings = v4l2_dv_timings_presets[i]; + return 0; + } + } + return -EINVAL; +} +EXPORT_SYMBOL_GPL(v4l2_enum_dv_timings_cap); + +bool v4l2_find_dv_timings_cap(struct v4l2_dv_timings *t, + const struct v4l2_dv_timings_cap *cap, + unsigned pclock_delta, + v4l2_check_dv_timings_fnc fnc, + void *fnc_handle) +{ + int i; + + if (!v4l2_valid_dv_timings(t, cap, fnc, fnc_handle)) + return false; + + for (i = 0; v4l2_dv_timings_presets[i].bt.width; i++) { + if (v4l2_valid_dv_timings(v4l2_dv_timings_presets + i, cap, + fnc, fnc_handle) && + v4l2_match_dv_timings(t, v4l2_dv_timings_presets + i, + pclock_delta, false)) { + u32 flags = t->bt.flags & V4L2_DV_FL_REDUCED_FPS; + + *t = v4l2_dv_timings_presets[i]; + if (can_reduce_fps(&t->bt)) + t->bt.flags |= flags; + + return true; + } + } + return false; +} +EXPORT_SYMBOL_GPL(v4l2_find_dv_timings_cap); + +bool v4l2_find_dv_timings_cea861_vic(struct v4l2_dv_timings *t, u8 vic) +{ + unsigned int i; + + for (i = 0; v4l2_dv_timings_presets[i].bt.width; i++) { + const struct v4l2_bt_timings *bt = + &v4l2_dv_timings_presets[i].bt; + + if ((bt->flags & V4L2_DV_FL_HAS_CEA861_VIC) && + bt->cea861_vic == vic) { + *t = v4l2_dv_timings_presets[i]; + return true; + } + } + return false; +} +EXPORT_SYMBOL_GPL(v4l2_find_dv_timings_cea861_vic); + +/** + * v4l2_match_dv_timings - check if two timings match + * @t1: compare this v4l2_dv_timings struct... + * @t2: with this struct. + * @pclock_delta: the allowed pixelclock deviation. + * @match_reduced_fps: if true, then fail if V4L2_DV_FL_REDUCED_FPS does not + * match. + * + * Compare t1 with t2 with a given margin of error for the pixelclock. + */ +bool v4l2_match_dv_timings(const struct v4l2_dv_timings *t1, + const struct v4l2_dv_timings *t2, + unsigned pclock_delta, bool match_reduced_fps) +{ + if (t1->type != t2->type || t1->type != V4L2_DV_BT_656_1120) + return false; + if (t1->bt.width == t2->bt.width && + t1->bt.height == t2->bt.height && + t1->bt.interlaced == t2->bt.interlaced && + t1->bt.polarities == t2->bt.polarities && + t1->bt.pixelclock >= t2->bt.pixelclock - pclock_delta && + t1->bt.pixelclock <= t2->bt.pixelclock + pclock_delta && + t1->bt.hfrontporch == t2->bt.hfrontporch && + t1->bt.hsync == t2->bt.hsync && + t1->bt.hbackporch == t2->bt.hbackporch && + t1->bt.vfrontporch == t2->bt.vfrontporch && + t1->bt.vsync == t2->bt.vsync && + t1->bt.vbackporch == t2->bt.vbackporch && + (!match_reduced_fps || + (t1->bt.flags & V4L2_DV_FL_REDUCED_FPS) == + (t2->bt.flags & V4L2_DV_FL_REDUCED_FPS)) && + (!t1->bt.interlaced || + (t1->bt.il_vfrontporch == t2->bt.il_vfrontporch && + t1->bt.il_vsync == t2->bt.il_vsync && + t1->bt.il_vbackporch == t2->bt.il_vbackporch))) + return true; + return false; +} +EXPORT_SYMBOL_GPL(v4l2_match_dv_timings); + +void v4l2_print_dv_timings(const char *dev_prefix, const char *prefix, + const struct v4l2_dv_timings *t, bool detailed) +{ + const struct v4l2_bt_timings *bt = &t->bt; + u32 htot, vtot; + u32 fps; + + if (t->type != V4L2_DV_BT_656_1120) + return; + + htot = V4L2_DV_BT_FRAME_WIDTH(bt); + vtot = V4L2_DV_BT_FRAME_HEIGHT(bt); + if (bt->interlaced) + vtot /= 2; + + fps = (htot * vtot) > 0 ? div_u64((100 * (u64)bt->pixelclock), + (htot * vtot)) : 0; + + if (prefix == NULL) + prefix = ""; + + pr_info("%s: %s%ux%u%s%u.%02u (%ux%u)\n", dev_prefix, prefix, + bt->width, bt->height, bt->interlaced ? "i" : "p", + fps / 100, fps % 100, htot, vtot); + + if (!detailed) + return; + + pr_info("%s: horizontal: fp = %u, %ssync = %u, bp = %u\n", + dev_prefix, bt->hfrontporch, + (bt->polarities & V4L2_DV_HSYNC_POS_POL) ? "+" : "-", + bt->hsync, bt->hbackporch); + pr_info("%s: vertical: fp = %u, %ssync = %u, bp = %u\n", + dev_prefix, bt->vfrontporch, + (bt->polarities & V4L2_DV_VSYNC_POS_POL) ? "+" : "-", + bt->vsync, bt->vbackporch); + if (bt->interlaced) + pr_info("%s: vertical bottom field: fp = %u, %ssync = %u, bp = %u\n", + dev_prefix, bt->il_vfrontporch, + (bt->polarities & V4L2_DV_VSYNC_POS_POL) ? "+" : "-", + bt->il_vsync, bt->il_vbackporch); + pr_info("%s: pixelclock: %llu\n", dev_prefix, bt->pixelclock); + pr_info("%s: flags (0x%x):%s%s%s%s%s%s%s%s%s%s\n", + dev_prefix, bt->flags, + (bt->flags & V4L2_DV_FL_REDUCED_BLANKING) ? + " REDUCED_BLANKING" : "", + ((bt->flags & V4L2_DV_FL_REDUCED_BLANKING) && + bt->vsync == 8) ? " (V2)" : "", + (bt->flags & V4L2_DV_FL_CAN_REDUCE_FPS) ? + " CAN_REDUCE_FPS" : "", + (bt->flags & V4L2_DV_FL_REDUCED_FPS) ? + " REDUCED_FPS" : "", + (bt->flags & V4L2_DV_FL_HALF_LINE) ? + " HALF_LINE" : "", + (bt->flags & V4L2_DV_FL_IS_CE_VIDEO) ? + " CE_VIDEO" : "", + (bt->flags & V4L2_DV_FL_FIRST_FIELD_EXTRA_LINE) ? + " FIRST_FIELD_EXTRA_LINE" : "", + (bt->flags & V4L2_DV_FL_HAS_PICTURE_ASPECT) ? + " HAS_PICTURE_ASPECT" : "", + (bt->flags & V4L2_DV_FL_HAS_CEA861_VIC) ? + " HAS_CEA861_VIC" : "", + (bt->flags & V4L2_DV_FL_HAS_HDMI_VIC) ? + " HAS_HDMI_VIC" : ""); + pr_info("%s: standards (0x%x):%s%s%s%s%s\n", dev_prefix, bt->standards, + (bt->standards & V4L2_DV_BT_STD_CEA861) ? " CEA" : "", + (bt->standards & V4L2_DV_BT_STD_DMT) ? " DMT" : "", + (bt->standards & V4L2_DV_BT_STD_CVT) ? " CVT" : "", + (bt->standards & V4L2_DV_BT_STD_GTF) ? " GTF" : "", + (bt->standards & V4L2_DV_BT_STD_SDI) ? " SDI" : ""); + if (bt->flags & V4L2_DV_FL_HAS_PICTURE_ASPECT) + pr_info("%s: picture aspect (hor:vert): %u:%u\n", dev_prefix, + bt->picture_aspect.numerator, + bt->picture_aspect.denominator); + if (bt->flags & V4L2_DV_FL_HAS_CEA861_VIC) + pr_info("%s: CEA-861 VIC: %u\n", dev_prefix, bt->cea861_vic); + if (bt->flags & V4L2_DV_FL_HAS_HDMI_VIC) + pr_info("%s: HDMI VIC: %u\n", dev_prefix, bt->hdmi_vic); +} +EXPORT_SYMBOL_GPL(v4l2_print_dv_timings); + +struct v4l2_fract v4l2_dv_timings_aspect_ratio(const struct v4l2_dv_timings *t) +{ + struct v4l2_fract ratio = { 1, 1 }; + unsigned long n, d; + + if (t->type != V4L2_DV_BT_656_1120) + return ratio; + if (!(t->bt.flags & V4L2_DV_FL_HAS_PICTURE_ASPECT)) + return ratio; + + ratio.numerator = t->bt.width * t->bt.picture_aspect.denominator; + ratio.denominator = t->bt.height * t->bt.picture_aspect.numerator; + + rational_best_approximation(ratio.numerator, ratio.denominator, + ratio.numerator, ratio.denominator, &n, &d); + ratio.numerator = n; + ratio.denominator = d; + return ratio; +} +EXPORT_SYMBOL_GPL(v4l2_dv_timings_aspect_ratio); + +/** v4l2_calc_timeperframe - helper function to calculate timeperframe based + * v4l2_dv_timings fields. + * @t - Timings for the video mode. + * + * Calculates the expected timeperframe using the pixel clock value and + * horizontal/vertical measures. This means that v4l2_dv_timings structure + * must be correctly and fully filled. + */ +struct v4l2_fract v4l2_calc_timeperframe(const struct v4l2_dv_timings *t) +{ + const struct v4l2_bt_timings *bt = &t->bt; + struct v4l2_fract fps_fract = { 1, 1 }; + unsigned long n, d; + u32 htot, vtot, fps; + u64 pclk; + + if (t->type != V4L2_DV_BT_656_1120) + return fps_fract; + + htot = V4L2_DV_BT_FRAME_WIDTH(bt); + vtot = V4L2_DV_BT_FRAME_HEIGHT(bt); + pclk = bt->pixelclock; + + if ((bt->flags & V4L2_DV_FL_CAN_DETECT_REDUCED_FPS) && + (bt->flags & V4L2_DV_FL_REDUCED_FPS)) + pclk = div_u64(pclk * 1000ULL, 1001); + + fps = (htot * vtot) > 0 ? div_u64((100 * pclk), (htot * vtot)) : 0; + if (!fps) + return fps_fract; + + rational_best_approximation(fps, 100, fps, 100, &n, &d); + + fps_fract.numerator = d; + fps_fract.denominator = n; + return fps_fract; +} +EXPORT_SYMBOL_GPL(v4l2_calc_timeperframe); + +/* + * CVT defines + * Based on Coordinated Video Timings Standard + * version 1.1 September 10, 2003 + */ + +#define CVT_PXL_CLK_GRAN 250000 /* pixel clock granularity */ +#define CVT_PXL_CLK_GRAN_RB_V2 1000 /* granularity for reduced blanking v2*/ + +/* Normal blanking */ +#define CVT_MIN_V_BPORCH 7 /* lines */ +#define CVT_MIN_V_PORCH_RND 3 /* lines */ +#define CVT_MIN_VSYNC_BP 550 /* min time of vsync + back porch (us) */ +#define CVT_HSYNC_PERCENT 8 /* nominal hsync as percentage of line */ + +/* Normal blanking for CVT uses GTF to calculate horizontal blanking */ +#define CVT_CELL_GRAN 8 /* character cell granularity */ +#define CVT_M 600 /* blanking formula gradient */ +#define CVT_C 40 /* blanking formula offset */ +#define CVT_K 128 /* blanking formula scaling factor */ +#define CVT_J 20 /* blanking formula scaling factor */ +#define CVT_C_PRIME (((CVT_C - CVT_J) * CVT_K / 256) + CVT_J) +#define CVT_M_PRIME (CVT_K * CVT_M / 256) + +/* Reduced Blanking */ +#define CVT_RB_MIN_V_BPORCH 7 /* lines */ +#define CVT_RB_V_FPORCH 3 /* lines */ +#define CVT_RB_MIN_V_BLANK 460 /* us */ +#define CVT_RB_H_SYNC 32 /* pixels */ +#define CVT_RB_H_BLANK 160 /* pixels */ +/* Reduce blanking Version 2 */ +#define CVT_RB_V2_H_BLANK 80 /* pixels */ +#define CVT_RB_MIN_V_FPORCH 3 /* lines */ +#define CVT_RB_V2_MIN_V_FPORCH 1 /* lines */ +#define CVT_RB_V_BPORCH 6 /* lines */ + +/** v4l2_detect_cvt - detect if the given timings follow the CVT standard + * @frame_height - the total height of the frame (including blanking) in lines. + * @hfreq - the horizontal frequency in Hz. + * @vsync - the height of the vertical sync in lines. + * @active_width - active width of image (does not include blanking). This + * information is needed only in case of version 2 of reduced blanking. + * In other cases, this parameter does not have any effect on timings. + * @polarities - the horizontal and vertical polarities (same as struct + * v4l2_bt_timings polarities). + * @interlaced - if this flag is true, it indicates interlaced format + * @cap - the v4l2_dv_timings_cap capabilities. + * @timings - the resulting timings. + * + * This function will attempt to detect if the given values correspond to a + * valid CVT format. If so, then it will return true, and fmt will be filled + * in with the found CVT timings. + */ +bool v4l2_detect_cvt(unsigned int frame_height, + unsigned int hfreq, + unsigned int vsync, + unsigned int active_width, + u32 polarities, + bool interlaced, + const struct v4l2_dv_timings_cap *cap, + struct v4l2_dv_timings *timings) +{ + struct v4l2_dv_timings t = {}; + int v_fp, v_bp, h_fp, h_bp, hsync; + int frame_width, image_height, image_width; + bool reduced_blanking; + bool rb_v2 = false; + unsigned int pix_clk; + + if (vsync < 4 || vsync > 8) + return false; + + if (polarities == V4L2_DV_VSYNC_POS_POL) + reduced_blanking = false; + else if (polarities == V4L2_DV_HSYNC_POS_POL) + reduced_blanking = true; + else + return false; + + if (reduced_blanking && vsync == 8) + rb_v2 = true; + + if (rb_v2 && active_width == 0) + return false; + + if (!rb_v2 && vsync > 7) + return false; + + if (hfreq == 0) + return false; + + /* Vertical */ + if (reduced_blanking) { + if (rb_v2) { + v_bp = CVT_RB_V_BPORCH; + v_fp = (CVT_RB_MIN_V_BLANK * hfreq) / 1000000 + 1; + v_fp -= vsync + v_bp; + + if (v_fp < CVT_RB_V2_MIN_V_FPORCH) + v_fp = CVT_RB_V2_MIN_V_FPORCH; + } else { + v_fp = CVT_RB_V_FPORCH; + v_bp = (CVT_RB_MIN_V_BLANK * hfreq) / 1000000 + 1; + v_bp -= vsync + v_fp; + + if (v_bp < CVT_RB_MIN_V_BPORCH) + v_bp = CVT_RB_MIN_V_BPORCH; + } + } else { + v_fp = CVT_MIN_V_PORCH_RND; + v_bp = (CVT_MIN_VSYNC_BP * hfreq) / 1000000 + 1 - vsync; + + if (v_bp < CVT_MIN_V_BPORCH) + v_bp = CVT_MIN_V_BPORCH; + } + + if (interlaced) + image_height = (frame_height - 2 * v_fp - 2 * vsync - 2 * v_bp) & ~0x1; + else + image_height = (frame_height - v_fp - vsync - v_bp + 1) & ~0x1; + + if (image_height < 0) + return false; + + /* Aspect ratio based on vsync */ + switch (vsync) { + case 4: + image_width = (image_height * 4) / 3; + break; + case 5: + image_width = (image_height * 16) / 9; + break; + case 6: + image_width = (image_height * 16) / 10; + break; + case 7: + /* special case */ + if (image_height == 1024) + image_width = (image_height * 5) / 4; + else if (image_height == 768) + image_width = (image_height * 15) / 9; + else + return false; + break; + case 8: + image_width = active_width; + break; + default: + return false; + } + + if (!rb_v2) + image_width = image_width & ~7; + + /* Horizontal */ + if (reduced_blanking) { + int h_blank; + int clk_gran; + + h_blank = rb_v2 ? CVT_RB_V2_H_BLANK : CVT_RB_H_BLANK; + clk_gran = rb_v2 ? CVT_PXL_CLK_GRAN_RB_V2 : CVT_PXL_CLK_GRAN; + + pix_clk = (image_width + h_blank) * hfreq; + pix_clk = (pix_clk / clk_gran) * clk_gran; + + h_bp = h_blank / 2; + hsync = CVT_RB_H_SYNC; + h_fp = h_blank - h_bp - hsync; + + frame_width = image_width + h_blank; + } else { + unsigned ideal_duty_cycle_per_myriad = + 100 * CVT_C_PRIME - (CVT_M_PRIME * 100000) / hfreq; + int h_blank; + + if (ideal_duty_cycle_per_myriad < 2000) + ideal_duty_cycle_per_myriad = 2000; + + h_blank = image_width * ideal_duty_cycle_per_myriad / + (10000 - ideal_duty_cycle_per_myriad); + h_blank = (h_blank / (2 * CVT_CELL_GRAN)) * 2 * CVT_CELL_GRAN; + + pix_clk = (image_width + h_blank) * hfreq; + pix_clk = (pix_clk / CVT_PXL_CLK_GRAN) * CVT_PXL_CLK_GRAN; + + h_bp = h_blank / 2; + frame_width = image_width + h_blank; + + hsync = frame_width * CVT_HSYNC_PERCENT / 100; + hsync = (hsync / CVT_CELL_GRAN) * CVT_CELL_GRAN; + h_fp = h_blank - hsync - h_bp; + } + + t.type = V4L2_DV_BT_656_1120; + t.bt.polarities = polarities; + t.bt.width = image_width; + t.bt.height = image_height; + t.bt.hfrontporch = h_fp; + t.bt.vfrontporch = v_fp; + t.bt.hsync = hsync; + t.bt.vsync = vsync; + t.bt.hbackporch = frame_width - image_width - h_fp - hsync; + + if (!interlaced) { + t.bt.vbackporch = frame_height - image_height - v_fp - vsync; + t.bt.interlaced = V4L2_DV_PROGRESSIVE; + } else { + t.bt.vbackporch = (frame_height - image_height - 2 * v_fp - + 2 * vsync) / 2; + t.bt.il_vbackporch = frame_height - image_height - 2 * v_fp - + 2 * vsync - t.bt.vbackporch; + t.bt.il_vfrontporch = v_fp; + t.bt.il_vsync = vsync; + t.bt.flags |= V4L2_DV_FL_HALF_LINE; + t.bt.interlaced = V4L2_DV_INTERLACED; + } + + t.bt.pixelclock = pix_clk; + t.bt.standards = V4L2_DV_BT_STD_CVT; + + if (reduced_blanking) + t.bt.flags |= V4L2_DV_FL_REDUCED_BLANKING; + + if (!v4l2_valid_dv_timings(&t, cap, NULL, NULL)) + return false; + *timings = t; + return true; +} +EXPORT_SYMBOL_GPL(v4l2_detect_cvt); + +/* + * GTF defines + * Based on Generalized Timing Formula Standard + * Version 1.1 September 2, 1999 + */ + +#define GTF_PXL_CLK_GRAN 250000 /* pixel clock granularity */ + +#define GTF_MIN_VSYNC_BP 550 /* min time of vsync + back porch (us) */ +#define GTF_V_FP 1 /* vertical front porch (lines) */ +#define GTF_CELL_GRAN 8 /* character cell granularity */ + +/* Default */ +#define GTF_D_M 600 /* blanking formula gradient */ +#define GTF_D_C 40 /* blanking formula offset */ +#define GTF_D_K 128 /* blanking formula scaling factor */ +#define GTF_D_J 20 /* blanking formula scaling factor */ +#define GTF_D_C_PRIME ((((GTF_D_C - GTF_D_J) * GTF_D_K) / 256) + GTF_D_J) +#define GTF_D_M_PRIME ((GTF_D_K * GTF_D_M) / 256) + +/* Secondary */ +#define GTF_S_M 3600 /* blanking formula gradient */ +#define GTF_S_C 40 /* blanking formula offset */ +#define GTF_S_K 128 /* blanking formula scaling factor */ +#define GTF_S_J 35 /* blanking formula scaling factor */ +#define GTF_S_C_PRIME ((((GTF_S_C - GTF_S_J) * GTF_S_K) / 256) + GTF_S_J) +#define GTF_S_M_PRIME ((GTF_S_K * GTF_S_M) / 256) + +/** v4l2_detect_gtf - detect if the given timings follow the GTF standard + * @frame_height - the total height of the frame (including blanking) in lines. + * @hfreq - the horizontal frequency in Hz. + * @vsync - the height of the vertical sync in lines. + * @polarities - the horizontal and vertical polarities (same as struct + * v4l2_bt_timings polarities). + * @interlaced - if this flag is true, it indicates interlaced format + * @aspect - preferred aspect ratio. GTF has no method of determining the + * aspect ratio in order to derive the image width from the + * image height, so it has to be passed explicitly. Usually + * the native screen aspect ratio is used for this. If it + * is not filled in correctly, then 16:9 will be assumed. + * @cap - the v4l2_dv_timings_cap capabilities. + * @timings - the resulting timings. + * + * This function will attempt to detect if the given values correspond to a + * valid GTF format. If so, then it will return true, and fmt will be filled + * in with the found GTF timings. + */ +bool v4l2_detect_gtf(unsigned int frame_height, + unsigned int hfreq, + unsigned int vsync, + u32 polarities, + bool interlaced, + struct v4l2_fract aspect, + const struct v4l2_dv_timings_cap *cap, + struct v4l2_dv_timings *timings) +{ + struct v4l2_dv_timings t = {}; + int pix_clk; + int v_fp, v_bp, h_fp, hsync; + int frame_width, image_height, image_width; + bool default_gtf; + int h_blank; + + if (vsync != 3) + return false; + + if (polarities == V4L2_DV_VSYNC_POS_POL) + default_gtf = true; + else if (polarities == V4L2_DV_HSYNC_POS_POL) + default_gtf = false; + else + return false; + + if (hfreq == 0) + return false; + + /* Vertical */ + v_fp = GTF_V_FP; + v_bp = (GTF_MIN_VSYNC_BP * hfreq + 500000) / 1000000 - vsync; + if (interlaced) + image_height = (frame_height - 2 * v_fp - 2 * vsync - 2 * v_bp) & ~0x1; + else + image_height = (frame_height - v_fp - vsync - v_bp + 1) & ~0x1; + + if (image_height < 0) + return false; + + if (aspect.numerator == 0 || aspect.denominator == 0) { + aspect.numerator = 16; + aspect.denominator = 9; + } + image_width = ((image_height * aspect.numerator) / aspect.denominator); + image_width = (image_width + GTF_CELL_GRAN/2) & ~(GTF_CELL_GRAN - 1); + + /* Horizontal */ + if (default_gtf) { + u64 num; + u32 den; + + num = (((u64)image_width * GTF_D_C_PRIME * hfreq) - + ((u64)image_width * GTF_D_M_PRIME * 1000)); + den = (hfreq * (100 - GTF_D_C_PRIME) + GTF_D_M_PRIME * 1000) * + (2 * GTF_CELL_GRAN); + h_blank = div_u64((num + (den >> 1)), den); + h_blank *= (2 * GTF_CELL_GRAN); + } else { + u64 num; + u32 den; + + num = (((u64)image_width * GTF_S_C_PRIME * hfreq) - + ((u64)image_width * GTF_S_M_PRIME * 1000)); + den = (hfreq * (100 - GTF_S_C_PRIME) + GTF_S_M_PRIME * 1000) * + (2 * GTF_CELL_GRAN); + h_blank = div_u64((num + (den >> 1)), den); + h_blank *= (2 * GTF_CELL_GRAN); + } + + frame_width = image_width + h_blank; + + pix_clk = (image_width + h_blank) * hfreq; + pix_clk = pix_clk / GTF_PXL_CLK_GRAN * GTF_PXL_CLK_GRAN; + + hsync = (frame_width * 8 + 50) / 100; + hsync = DIV_ROUND_CLOSEST(hsync, GTF_CELL_GRAN) * GTF_CELL_GRAN; + + h_fp = h_blank / 2 - hsync; + + t.type = V4L2_DV_BT_656_1120; + t.bt.polarities = polarities; + t.bt.width = image_width; + t.bt.height = image_height; + t.bt.hfrontporch = h_fp; + t.bt.vfrontporch = v_fp; + t.bt.hsync = hsync; + t.bt.vsync = vsync; + t.bt.hbackporch = frame_width - image_width - h_fp - hsync; + + if (!interlaced) { + t.bt.vbackporch = frame_height - image_height - v_fp - vsync; + t.bt.interlaced = V4L2_DV_PROGRESSIVE; + } else { + t.bt.vbackporch = (frame_height - image_height - 2 * v_fp - + 2 * vsync) / 2; + t.bt.il_vbackporch = frame_height - image_height - 2 * v_fp - + 2 * vsync - t.bt.vbackporch; + t.bt.il_vfrontporch = v_fp; + t.bt.il_vsync = vsync; + t.bt.flags |= V4L2_DV_FL_HALF_LINE; + t.bt.interlaced = V4L2_DV_INTERLACED; + } + + t.bt.pixelclock = pix_clk; + t.bt.standards = V4L2_DV_BT_STD_GTF; + + if (!default_gtf) + t.bt.flags |= V4L2_DV_FL_REDUCED_BLANKING; + + if (!v4l2_valid_dv_timings(&t, cap, NULL, NULL)) + return false; + *timings = t; + return true; +} +EXPORT_SYMBOL_GPL(v4l2_detect_gtf); + +/** v4l2_calc_aspect_ratio - calculate the aspect ratio based on bytes + * 0x15 and 0x16 from the EDID. + * @hor_landscape - byte 0x15 from the EDID. + * @vert_portrait - byte 0x16 from the EDID. + * + * Determines the aspect ratio from the EDID. + * See VESA Enhanced EDID standard, release A, rev 2, section 3.6.2: + * "Horizontal and Vertical Screen Size or Aspect Ratio" + */ +struct v4l2_fract v4l2_calc_aspect_ratio(u8 hor_landscape, u8 vert_portrait) +{ + struct v4l2_fract aspect = { 16, 9 }; + u8 ratio; + + /* Nothing filled in, fallback to 16:9 */ + if (!hor_landscape && !vert_portrait) + return aspect; + /* Both filled in, so they are interpreted as the screen size in cm */ + if (hor_landscape && vert_portrait) { + aspect.numerator = hor_landscape; + aspect.denominator = vert_portrait; + return aspect; + } + /* Only one is filled in, so interpret them as a ratio: + (val + 99) / 100 */ + ratio = hor_landscape | vert_portrait; + /* Change some rounded values into the exact aspect ratio */ + if (ratio == 79) { + aspect.numerator = 16; + aspect.denominator = 9; + } else if (ratio == 34) { + aspect.numerator = 4; + aspect.denominator = 3; + } else if (ratio == 68) { + aspect.numerator = 15; + aspect.denominator = 9; + } else { + aspect.numerator = hor_landscape + 99; + aspect.denominator = 100; + } + if (hor_landscape) + return aspect; + /* The aspect ratio is for portrait, so swap numerator and denominator */ + swap(aspect.denominator, aspect.numerator); + return aspect; +} +EXPORT_SYMBOL_GPL(v4l2_calc_aspect_ratio); + +/** v4l2_hdmi_rx_colorimetry - determine HDMI colorimetry information + * based on various InfoFrames. + * @avi: the AVI InfoFrame + * @hdmi: the HDMI Vendor InfoFrame, may be NULL + * @height: the frame height + * + * Determines the HDMI colorimetry information, i.e. how the HDMI + * pixel color data should be interpreted. + * + * Note that some of the newer features (DCI-P3, HDR) are not yet + * implemented: the hdmi.h header needs to be updated to the HDMI 2.0 + * and CTA-861-G standards. + */ +struct v4l2_hdmi_colorimetry +v4l2_hdmi_rx_colorimetry(const struct hdmi_avi_infoframe *avi, + const struct hdmi_vendor_infoframe *hdmi, + unsigned int height) +{ + struct v4l2_hdmi_colorimetry c = { + V4L2_COLORSPACE_SRGB, + V4L2_YCBCR_ENC_DEFAULT, + V4L2_QUANTIZATION_FULL_RANGE, + V4L2_XFER_FUNC_SRGB + }; + bool is_ce = avi->video_code || (hdmi && hdmi->vic); + bool is_sdtv = height <= 576; + bool default_is_lim_range_rgb = avi->video_code > 1; + + switch (avi->colorspace) { + case HDMI_COLORSPACE_RGB: + /* RGB pixel encoding */ + switch (avi->colorimetry) { + case HDMI_COLORIMETRY_EXTENDED: + switch (avi->extended_colorimetry) { + case HDMI_EXTENDED_COLORIMETRY_OPRGB: + c.colorspace = V4L2_COLORSPACE_OPRGB; + c.xfer_func = V4L2_XFER_FUNC_OPRGB; + break; + case HDMI_EXTENDED_COLORIMETRY_BT2020: + c.colorspace = V4L2_COLORSPACE_BT2020; + c.xfer_func = V4L2_XFER_FUNC_709; + break; + default: + break; + } + break; + default: + break; + } + switch (avi->quantization_range) { + case HDMI_QUANTIZATION_RANGE_LIMITED: + c.quantization = V4L2_QUANTIZATION_LIM_RANGE; + break; + case HDMI_QUANTIZATION_RANGE_FULL: + break; + default: + if (default_is_lim_range_rgb) + c.quantization = V4L2_QUANTIZATION_LIM_RANGE; + break; + } + break; + + default: + /* YCbCr pixel encoding */ + c.quantization = V4L2_QUANTIZATION_LIM_RANGE; + switch (avi->colorimetry) { + case HDMI_COLORIMETRY_NONE: + if (!is_ce) + break; + if (is_sdtv) { + c.colorspace = V4L2_COLORSPACE_SMPTE170M; + c.ycbcr_enc = V4L2_YCBCR_ENC_601; + } else { + c.colorspace = V4L2_COLORSPACE_REC709; + c.ycbcr_enc = V4L2_YCBCR_ENC_709; + } + c.xfer_func = V4L2_XFER_FUNC_709; + break; + case HDMI_COLORIMETRY_ITU_601: + c.colorspace = V4L2_COLORSPACE_SMPTE170M; + c.ycbcr_enc = V4L2_YCBCR_ENC_601; + c.xfer_func = V4L2_XFER_FUNC_709; + break; + case HDMI_COLORIMETRY_ITU_709: + c.colorspace = V4L2_COLORSPACE_REC709; + c.ycbcr_enc = V4L2_YCBCR_ENC_709; + c.xfer_func = V4L2_XFER_FUNC_709; + break; + case HDMI_COLORIMETRY_EXTENDED: + switch (avi->extended_colorimetry) { + case HDMI_EXTENDED_COLORIMETRY_XV_YCC_601: + c.colorspace = V4L2_COLORSPACE_REC709; + c.ycbcr_enc = V4L2_YCBCR_ENC_XV709; + c.xfer_func = V4L2_XFER_FUNC_709; + break; + case HDMI_EXTENDED_COLORIMETRY_XV_YCC_709: + c.colorspace = V4L2_COLORSPACE_REC709; + c.ycbcr_enc = V4L2_YCBCR_ENC_XV601; + c.xfer_func = V4L2_XFER_FUNC_709; + break; + case HDMI_EXTENDED_COLORIMETRY_S_YCC_601: + c.colorspace = V4L2_COLORSPACE_SRGB; + c.ycbcr_enc = V4L2_YCBCR_ENC_601; + c.xfer_func = V4L2_XFER_FUNC_SRGB; + break; + case HDMI_EXTENDED_COLORIMETRY_OPYCC_601: + c.colorspace = V4L2_COLORSPACE_OPRGB; + c.ycbcr_enc = V4L2_YCBCR_ENC_601; + c.xfer_func = V4L2_XFER_FUNC_OPRGB; + break; + case HDMI_EXTENDED_COLORIMETRY_BT2020: + c.colorspace = V4L2_COLORSPACE_BT2020; + c.ycbcr_enc = V4L2_YCBCR_ENC_BT2020; + c.xfer_func = V4L2_XFER_FUNC_709; + break; + case HDMI_EXTENDED_COLORIMETRY_BT2020_CONST_LUM: + c.colorspace = V4L2_COLORSPACE_BT2020; + c.ycbcr_enc = V4L2_YCBCR_ENC_BT2020_CONST_LUM; + c.xfer_func = V4L2_XFER_FUNC_709; + break; + default: /* fall back to ITU_709 */ + c.colorspace = V4L2_COLORSPACE_REC709; + c.ycbcr_enc = V4L2_YCBCR_ENC_709; + c.xfer_func = V4L2_XFER_FUNC_709; + break; + } + break; + default: + break; + } + /* + * YCC Quantization Range signaling is more-or-less broken, + * let's just ignore this. + */ + break; + } + return c; +} +EXPORT_SYMBOL_GPL(v4l2_hdmi_rx_colorimetry); + +/** + * v4l2_num_edid_blocks() - return the number of EDID blocks + * + * @edid: pointer to the EDID data + * @max_blocks: maximum number of supported EDID blocks + * + * Return: the number of EDID blocks based on the contents of the EDID. + * This supports the HDMI Forum EDID Extension Override Data Block. + */ +unsigned int v4l2_num_edid_blocks(const u8 *edid, unsigned int max_blocks) +{ + unsigned int blocks; + + if (!edid || !max_blocks) + return 0; + + // The number of extension blocks is recorded at byte 126 of the + // first 128-byte block in the EDID. + // + // If there is an HDMI Forum EDID Extension Override Data Block + // present, then it is in bytes 4-6 of the first CTA-861 extension + // block of the EDID. + blocks = edid[126] + 1; + // Check for HDMI Forum EDID Extension Override Data Block + if (blocks >= 2 && // The EDID must be at least 2 blocks + max_blocks >= 3 && // The caller supports at least 3 blocks + edid[128] == 2 && // The first extension block is type CTA-861 + edid[133] == 0x78 && // Identifier for the EEODB + (edid[132] & 0xe0) == 0xe0 && // Tag Code == 7 + (edid[132] & 0x1f) >= 2 && // Length >= 2 + edid[134] > 1) // Number of extension blocks is sane + blocks = edid[134] + 1; + return blocks > max_blocks ? max_blocks : blocks; +} +EXPORT_SYMBOL_GPL(v4l2_num_edid_blocks); + +/** + * v4l2_get_edid_phys_addr() - find and return the physical address + * + * @edid: pointer to the EDID data + * @size: size in bytes of the EDID data + * @offset: If not %NULL then the location of the physical address + * bytes in the EDID will be returned here. This is set to 0 + * if there is no physical address found. + * + * Return: the physical address or CEC_PHYS_ADDR_INVALID if there is none. + */ +u16 v4l2_get_edid_phys_addr(const u8 *edid, unsigned int size, + unsigned int *offset) +{ + unsigned int loc = cec_get_edid_spa_location(edid, size); + + if (offset) + *offset = loc; + if (loc == 0) + return CEC_PHYS_ADDR_INVALID; + return (edid[loc] << 8) | edid[loc + 1]; +} +EXPORT_SYMBOL_GPL(v4l2_get_edid_phys_addr); + +/** + * v4l2_set_edid_phys_addr() - find and set the physical address + * + * @edid: pointer to the EDID data + * @size: size in bytes of the EDID data + * @phys_addr: the new physical address + * + * This function finds the location of the physical address in the EDID + * and fills in the given physical address and updates the checksum + * at the end of the EDID block. It does nothing if the EDID doesn't + * contain a physical address. + */ +void v4l2_set_edid_phys_addr(u8 *edid, unsigned int size, u16 phys_addr) +{ + unsigned int loc = cec_get_edid_spa_location(edid, size); + u8 sum = 0; + unsigned int i; + + if (loc == 0) + return; + edid[loc] = phys_addr >> 8; + edid[loc + 1] = phys_addr & 0xff; + loc &= ~0x7f; + + /* update the checksum */ + for (i = loc; i < loc + 127; i++) + sum += edid[i]; + edid[i] = 256 - sum; +} +EXPORT_SYMBOL_GPL(v4l2_set_edid_phys_addr); + +/** + * v4l2_phys_addr_for_input() - calculate the PA for an input + * + * @phys_addr: the physical address of the parent + * @input: the number of the input port, must be between 1 and 15 + * + * This function calculates a new physical address based on the input + * port number. For example: + * + * PA = 0.0.0.0 and input = 2 becomes 2.0.0.0 + * + * PA = 3.0.0.0 and input = 1 becomes 3.1.0.0 + * + * PA = 3.2.1.0 and input = 5 becomes 3.2.1.5 + * + * PA = 3.2.1.3 and input = 5 becomes f.f.f.f since it maxed out the depth. + * + * Return: the new physical address or CEC_PHYS_ADDR_INVALID. + */ +u16 v4l2_phys_addr_for_input(u16 phys_addr, u8 input) +{ + /* Check if input is sane */ + if (WARN_ON(input == 0 || input > 0xf)) + return CEC_PHYS_ADDR_INVALID; + + if (phys_addr == 0) + return input << 12; + + if ((phys_addr & 0x0fff) == 0) + return phys_addr | (input << 8); + + if ((phys_addr & 0x00ff) == 0) + return phys_addr | (input << 4); + + if ((phys_addr & 0x000f) == 0) + return phys_addr | input; + + /* + * All nibbles are used so no valid physical addresses can be assigned + * to the input. + */ + return CEC_PHYS_ADDR_INVALID; +} +EXPORT_SYMBOL_GPL(v4l2_phys_addr_for_input); + +/** + * v4l2_phys_addr_validate() - validate a physical address from an EDID + * + * @phys_addr: the physical address to validate + * @parent: if not %NULL, then this is filled with the parents PA. + * @port: if not %NULL, then this is filled with the input port. + * + * This validates a physical address as read from an EDID. If the + * PA is invalid (such as 1.0.1.0 since '0' is only allowed at the end), + * then it will return -EINVAL. + * + * The parent PA is passed into %parent and the input port is passed into + * %port. For example: + * + * PA = 0.0.0.0: has parent 0.0.0.0 and input port 0. + * + * PA = 1.0.0.0: has parent 0.0.0.0 and input port 1. + * + * PA = 3.2.0.0: has parent 3.0.0.0 and input port 2. + * + * PA = f.f.f.f: has parent f.f.f.f and input port 0. + * + * Return: 0 if the PA is valid, -EINVAL if not. + */ +int v4l2_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port) +{ + int i; + + if (parent) + *parent = phys_addr; + if (port) + *port = 0; + if (phys_addr == CEC_PHYS_ADDR_INVALID) + return 0; + for (i = 0; i < 16; i += 4) + if (phys_addr & (0xf << i)) + break; + if (i == 16) + return 0; + if (parent) + *parent = phys_addr & (0xfff0 << i); + if (port) + *port = (phys_addr >> i) & 0xf; + for (i += 4; i < 16; i += 4) + if ((phys_addr & (0xf << i)) == 0) + return -EINVAL; + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_phys_addr_validate); + +#ifdef CONFIG_DEBUG_FS + +#define DEBUGFS_FOPS(type, flag) \ +static ssize_t \ +infoframe_read_##type(struct file *filp, \ + char __user *ubuf, size_t count, loff_t *ppos) \ +{ \ + struct v4l2_debugfs_if *infoframes = filp->private_data; \ + \ + return infoframes->if_read((flag), infoframes->priv, filp, \ + ubuf, count, ppos); \ +} \ + \ +static const struct file_operations infoframe_##type##_fops = { \ + .owner = THIS_MODULE, \ + .open = simple_open, \ + .read = infoframe_read_##type, \ +} + +DEBUGFS_FOPS(avi, V4L2_DEBUGFS_IF_AVI); +DEBUGFS_FOPS(audio, V4L2_DEBUGFS_IF_AUDIO); +DEBUGFS_FOPS(spd, V4L2_DEBUGFS_IF_SPD); +DEBUGFS_FOPS(hdmi, V4L2_DEBUGFS_IF_HDMI); + +struct v4l2_debugfs_if *v4l2_debugfs_if_alloc(struct dentry *root, u32 if_types, + void *priv, + v4l2_debugfs_if_read_t if_read) +{ + struct v4l2_debugfs_if *infoframes; + + if (IS_ERR_OR_NULL(root) || !if_types || !if_read) + return NULL; + + infoframes = kzalloc(sizeof(*infoframes), GFP_KERNEL); + if (!infoframes) + return NULL; + + infoframes->if_dir = debugfs_create_dir("infoframes", root); + infoframes->priv = priv; + infoframes->if_read = if_read; + if (if_types & V4L2_DEBUGFS_IF_AVI) + debugfs_create_file("avi", 0400, infoframes->if_dir, + infoframes, &infoframe_avi_fops); + if (if_types & V4L2_DEBUGFS_IF_AUDIO) + debugfs_create_file("audio", 0400, infoframes->if_dir, + infoframes, &infoframe_audio_fops); + if (if_types & V4L2_DEBUGFS_IF_SPD) + debugfs_create_file("spd", 0400, infoframes->if_dir, + infoframes, &infoframe_spd_fops); + if (if_types & V4L2_DEBUGFS_IF_HDMI) + debugfs_create_file("hdmi", 0400, infoframes->if_dir, + infoframes, &infoframe_hdmi_fops); + return infoframes; +} +EXPORT_SYMBOL_GPL(v4l2_debugfs_if_alloc); + +void v4l2_debugfs_if_free(struct v4l2_debugfs_if *infoframes) +{ + if (infoframes) { + debugfs_remove_recursive(infoframes->if_dir); + kfree(infoframes); + } +} +EXPORT_SYMBOL_GPL(v4l2_debugfs_if_free); + +#endif diff --git a/6.17.0/drivers/media/v4l2-core/v4l2-event.c b/6.17.0/drivers/media/v4l2-core/v4l2-event.c new file mode 100644 index 0000000..3898ff7 --- /dev/null +++ b/6.17.0/drivers/media/v4l2-core/v4l2-event.c @@ -0,0 +1,373 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * v4l2-event.c + * + * V4L2 events. + * + * Copyright (C) 2009--2010 Nokia Corporation. + * + * Contact: Sakari Ailus + */ + +#include +#include +#include + +#include +#include +#include +#include + +static unsigned int sev_pos(const struct v4l2_subscribed_event *sev, unsigned int idx) +{ + idx += sev->first; + return idx >= sev->elems ? idx - sev->elems : idx; +} + +static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event) +{ + struct v4l2_kevent *kev; + struct timespec64 ts; + unsigned long flags; + + spin_lock_irqsave(&fh->vdev->fh_lock, flags); + + if (list_empty(&fh->available)) { + spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); + return -ENOENT; + } + + WARN_ON(fh->navailable == 0); + + kev = list_first_entry(&fh->available, struct v4l2_kevent, list); + list_del(&kev->list); + fh->navailable--; + + kev->event.pending = fh->navailable; + *event = kev->event; + ts = ns_to_timespec64(kev->ts); + event->timestamp.tv_sec = ts.tv_sec; + event->timestamp.tv_nsec = ts.tv_nsec; + kev->sev->first = sev_pos(kev->sev, 1); + kev->sev->in_use--; + + spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); + + return 0; +} + +int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event, + int nonblocking) +{ + int ret; + + if (nonblocking) + return __v4l2_event_dequeue(fh, event); + + /* Release the vdev lock while waiting */ + if (fh->vdev->lock) + mutex_unlock(fh->vdev->lock); + + do { + ret = wait_event_interruptible(fh->wait, + fh->navailable != 0); + if (ret < 0) + break; + + ret = __v4l2_event_dequeue(fh, event); + } while (ret == -ENOENT); + + if (fh->vdev->lock) + mutex_lock(fh->vdev->lock); + + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_event_dequeue); + +/* Caller must hold fh->vdev->fh_lock! */ +static struct v4l2_subscribed_event *v4l2_event_subscribed( + struct v4l2_fh *fh, u32 type, u32 id) +{ + struct v4l2_subscribed_event *sev; + + assert_spin_locked(&fh->vdev->fh_lock); + + list_for_each_entry(sev, &fh->subscribed, list) + if (sev->type == type && sev->id == id) + return sev; + + return NULL; +} + +static void __v4l2_event_queue_fh(struct v4l2_fh *fh, + const struct v4l2_event *ev, u64 ts) +{ + struct v4l2_subscribed_event *sev; + struct v4l2_kevent *kev; + bool copy_payload = true; + + /* Are we subscribed? */ + sev = v4l2_event_subscribed(fh, ev->type, ev->id); + if (sev == NULL) + return; + + /* Increase event sequence number on fh. */ + fh->sequence++; + + /* Do we have any free events? */ + if (sev->in_use == sev->elems) { + /* no, remove the oldest one */ + kev = sev->events + sev_pos(sev, 0); + list_del(&kev->list); + sev->in_use--; + sev->first = sev_pos(sev, 1); + fh->navailable--; + if (sev->elems == 1) { + if (sev->ops && sev->ops->replace) { + sev->ops->replace(&kev->event, ev); + copy_payload = false; + } + } else if (sev->ops && sev->ops->merge) { + struct v4l2_kevent *second_oldest = + sev->events + sev_pos(sev, 0); + sev->ops->merge(&kev->event, &second_oldest->event); + } + } + + /* Take one and fill it. */ + kev = sev->events + sev_pos(sev, sev->in_use); + kev->event.type = ev->type; + if (copy_payload) + kev->event.u = ev->u; + kev->event.id = ev->id; + kev->ts = ts; + kev->event.sequence = fh->sequence; + sev->in_use++; + list_add_tail(&kev->list, &fh->available); + + fh->navailable++; + + wake_up_all(&fh->wait); +} + +void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev) +{ + struct v4l2_fh *fh; + unsigned long flags; + u64 ts; + + if (vdev == NULL) + return; + + ts = ktime_get_ns(); + + spin_lock_irqsave(&vdev->fh_lock, flags); + + list_for_each_entry(fh, &vdev->fh_list, list) + __v4l2_event_queue_fh(fh, ev, ts); + + spin_unlock_irqrestore(&vdev->fh_lock, flags); +} +EXPORT_SYMBOL_GPL(v4l2_event_queue); + +void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev) +{ + unsigned long flags; + u64 ts = ktime_get_ns(); + + spin_lock_irqsave(&fh->vdev->fh_lock, flags); + __v4l2_event_queue_fh(fh, ev, ts); + spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); +} +EXPORT_SYMBOL_GPL(v4l2_event_queue_fh); + +int v4l2_event_pending(struct v4l2_fh *fh) +{ + return fh->navailable; +} +EXPORT_SYMBOL_GPL(v4l2_event_pending); + +void v4l2_event_wake_all(struct video_device *vdev) +{ + struct v4l2_fh *fh; + unsigned long flags; + + if (!vdev) + return; + + spin_lock_irqsave(&vdev->fh_lock, flags); + + list_for_each_entry(fh, &vdev->fh_list, list) + wake_up_all(&fh->wait); + + spin_unlock_irqrestore(&vdev->fh_lock, flags); +} +EXPORT_SYMBOL_GPL(v4l2_event_wake_all); + +static void __v4l2_event_unsubscribe(struct v4l2_subscribed_event *sev) +{ + struct v4l2_fh *fh = sev->fh; + unsigned int i; + + lockdep_assert_held(&fh->subscribe_lock); + assert_spin_locked(&fh->vdev->fh_lock); + + /* Remove any pending events for this subscription */ + for (i = 0; i < sev->in_use; i++) { + list_del(&sev->events[sev_pos(sev, i)].list); + fh->navailable--; + } + list_del(&sev->list); +} + +int v4l2_event_subscribe(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub, unsigned int elems, + const struct v4l2_subscribed_event_ops *ops) +{ + struct v4l2_subscribed_event *sev, *found_ev; + unsigned long flags; + unsigned int i; + int ret = 0; + + if (sub->type == V4L2_EVENT_ALL) + return -EINVAL; + + if (elems < 1) + elems = 1; + + sev = kvzalloc(struct_size(sev, events, elems), GFP_KERNEL); + if (!sev) + return -ENOMEM; + sev->elems = elems; + for (i = 0; i < elems; i++) + sev->events[i].sev = sev; + sev->type = sub->type; + sev->id = sub->id; + sev->flags = sub->flags; + sev->fh = fh; + sev->ops = ops; + + mutex_lock(&fh->subscribe_lock); + + spin_lock_irqsave(&fh->vdev->fh_lock, flags); + found_ev = v4l2_event_subscribed(fh, sub->type, sub->id); + if (!found_ev) + list_add(&sev->list, &fh->subscribed); + spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); + + if (found_ev) { + /* Already listening */ + kvfree(sev); + } else if (sev->ops && sev->ops->add) { + ret = sev->ops->add(sev, elems); + if (ret) { + spin_lock_irqsave(&fh->vdev->fh_lock, flags); + __v4l2_event_unsubscribe(sev); + spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); + kvfree(sev); + } + } + + mutex_unlock(&fh->subscribe_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_event_subscribe); + +void v4l2_event_unsubscribe_all(struct v4l2_fh *fh) +{ + struct v4l2_event_subscription sub; + struct v4l2_subscribed_event *sev; + unsigned long flags; + + do { + sev = NULL; + + spin_lock_irqsave(&fh->vdev->fh_lock, flags); + if (!list_empty(&fh->subscribed)) { + sev = list_first_entry(&fh->subscribed, + struct v4l2_subscribed_event, list); + sub.type = sev->type; + sub.id = sev->id; + } + spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); + if (sev) + v4l2_event_unsubscribe(fh, &sub); + } while (sev); +} +EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe_all); + +int v4l2_event_unsubscribe(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub) +{ + struct v4l2_subscribed_event *sev; + unsigned long flags; + + if (sub->type == V4L2_EVENT_ALL) { + v4l2_event_unsubscribe_all(fh); + return 0; + } + + mutex_lock(&fh->subscribe_lock); + + spin_lock_irqsave(&fh->vdev->fh_lock, flags); + + sev = v4l2_event_subscribed(fh, sub->type, sub->id); + if (sev != NULL) + __v4l2_event_unsubscribe(sev); + + spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); + + if (sev && sev->ops && sev->ops->del) + sev->ops->del(sev); + + mutex_unlock(&fh->subscribe_lock); + + kvfree(sev); + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe); + +int v4l2_event_subdev_unsubscribe(struct v4l2_subdev *sd, struct v4l2_fh *fh, + struct v4l2_event_subscription *sub) +{ + return v4l2_event_unsubscribe(fh, sub); +} +EXPORT_SYMBOL_GPL(v4l2_event_subdev_unsubscribe); + +static void v4l2_event_src_replace(struct v4l2_event *old, + const struct v4l2_event *new) +{ + u32 old_changes = old->u.src_change.changes; + + old->u.src_change = new->u.src_change; + old->u.src_change.changes |= old_changes; +} + +static void v4l2_event_src_merge(const struct v4l2_event *old, + struct v4l2_event *new) +{ + new->u.src_change.changes |= old->u.src_change.changes; +} + +static const struct v4l2_subscribed_event_ops v4l2_event_src_ch_ops = { + .replace = v4l2_event_src_replace, + .merge = v4l2_event_src_merge, +}; + +int v4l2_src_change_event_subscribe(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub) +{ + if (sub->type == V4L2_EVENT_SOURCE_CHANGE) + return v4l2_event_subscribe(fh, sub, 0, &v4l2_event_src_ch_ops); + return -EINVAL; +} +EXPORT_SYMBOL_GPL(v4l2_src_change_event_subscribe); + +int v4l2_src_change_event_subdev_subscribe(struct v4l2_subdev *sd, + struct v4l2_fh *fh, struct v4l2_event_subscription *sub) +{ + return v4l2_src_change_event_subscribe(fh, sub); +} +EXPORT_SYMBOL_GPL(v4l2_src_change_event_subdev_subscribe); diff --git a/6.17.0/drivers/media/v4l2-core/v4l2-fh.c b/6.17.0/drivers/media/v4l2-core/v4l2-fh.c new file mode 100644 index 0000000..90eec79 --- /dev/null +++ b/6.17.0/drivers/media/v4l2-core/v4l2-fh.c @@ -0,0 +1,117 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * v4l2-fh.c + * + * V4L2 file handles. + * + * Copyright (C) 2009--2010 Nokia Corporation. + * + * Contact: Sakari Ailus + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +void v4l2_fh_init(struct v4l2_fh *fh, struct video_device *vdev) +{ + fh->vdev = vdev; + /* Inherit from video_device. May be overridden by the driver. */ + fh->ctrl_handler = vdev->ctrl_handler; + INIT_LIST_HEAD(&fh->list); + set_bit(V4L2_FL_USES_V4L2_FH, &fh->vdev->flags); + /* + * determine_valid_ioctls() does not know if struct v4l2_fh + * is used by this driver, but here we do. So enable the + * prio ioctls here. + */ + set_bit(_IOC_NR(VIDIOC_G_PRIORITY), vdev->valid_ioctls); + set_bit(_IOC_NR(VIDIOC_S_PRIORITY), vdev->valid_ioctls); + fh->prio = V4L2_PRIORITY_UNSET; + init_waitqueue_head(&fh->wait); + INIT_LIST_HEAD(&fh->available); + INIT_LIST_HEAD(&fh->subscribed); + fh->sequence = -1; + mutex_init(&fh->subscribe_lock); +} +EXPORT_SYMBOL_GPL(v4l2_fh_init); + +void v4l2_fh_add(struct v4l2_fh *fh) +{ + unsigned long flags; + + v4l2_prio_open(fh->vdev->prio, &fh->prio); + spin_lock_irqsave(&fh->vdev->fh_lock, flags); + list_add(&fh->list, &fh->vdev->fh_list); + spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); +} +EXPORT_SYMBOL_GPL(v4l2_fh_add); + +int v4l2_fh_open(struct file *filp) +{ + struct video_device *vdev = video_devdata(filp); + struct v4l2_fh *fh = kzalloc(sizeof(*fh), GFP_KERNEL); + + filp->private_data = fh; + if (fh == NULL) + return -ENOMEM; + v4l2_fh_init(fh, vdev); + v4l2_fh_add(fh); + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_fh_open); + +void v4l2_fh_del(struct v4l2_fh *fh) +{ + unsigned long flags; + + spin_lock_irqsave(&fh->vdev->fh_lock, flags); + list_del_init(&fh->list); + spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); + v4l2_prio_close(fh->vdev->prio, fh->prio); +} +EXPORT_SYMBOL_GPL(v4l2_fh_del); + +void v4l2_fh_exit(struct v4l2_fh *fh) +{ + if (fh->vdev == NULL) + return; + v4l_disable_media_source(fh->vdev); + v4l2_event_unsubscribe_all(fh); + mutex_destroy(&fh->subscribe_lock); + fh->vdev = NULL; +} +EXPORT_SYMBOL_GPL(v4l2_fh_exit); + +int v4l2_fh_release(struct file *filp) +{ + struct v4l2_fh *fh = filp->private_data; + + if (fh) { + v4l2_fh_del(fh); + v4l2_fh_exit(fh); + kfree(fh); + filp->private_data = NULL; + } + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_fh_release); + +int v4l2_fh_is_singular(struct v4l2_fh *fh) +{ + unsigned long flags; + int is_singular; + + if (fh == NULL || fh->vdev == NULL) + return 0; + spin_lock_irqsave(&fh->vdev->fh_lock, flags); + is_singular = list_is_singular(&fh->list); + spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); + return is_singular; +} +EXPORT_SYMBOL_GPL(v4l2_fh_is_singular); diff --git a/6.17.0/drivers/media/v4l2-core/v4l2-flash-led-class.c b/6.17.0/drivers/media/v4l2-core/v4l2-flash-led-class.c new file mode 100644 index 0000000..355595a --- /dev/null +++ b/6.17.0/drivers/media/v4l2-core/v4l2-flash-led-class.c @@ -0,0 +1,746 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * V4L2 flash LED sub-device registration helpers. + * + * Copyright (C) 2015 Samsung Electronics Co., Ltd + * Author: Jacek Anaszewski + */ + +#include +#include +#include +#include +#include +#include +#include + +#define has_flash_op(v4l2_flash, op) \ + (v4l2_flash && v4l2_flash->ops && v4l2_flash->ops->op) + +#define call_flash_op(v4l2_flash, op, arg) \ + (has_flash_op(v4l2_flash, op) ? \ + v4l2_flash->ops->op(v4l2_flash, arg) : \ + -EINVAL) + +enum ctrl_init_data_id { + LED_MODE, + TORCH_INTENSITY, + FLASH_INTENSITY, + INDICATOR_INTENSITY, + FLASH_TIMEOUT, + STROBE_SOURCE, + /* + * Only above values are applicable to + * the 'ctrls' array in the struct v4l2_flash. + */ + FLASH_STROBE, + STROBE_STOP, + STROBE_STATUS, + FLASH_FAULT, + NUM_FLASH_CTRLS, +}; + +static enum led_brightness __intensity_to_led_brightness( + struct v4l2_ctrl *ctrl, s32 intensity) +{ + intensity -= ctrl->minimum; + intensity /= (u32) ctrl->step; + + /* + * Indicator LEDs, unlike torch LEDs, are turned on/off basing on + * the state of V4L2_CID_FLASH_INDICATOR_INTENSITY control only. + * Therefore it must be possible to set it to 0 level which in + * the LED subsystem reflects LED_OFF state. + */ + if (ctrl->minimum) + ++intensity; + + return intensity; +} + +static s32 __led_brightness_to_intensity(struct v4l2_ctrl *ctrl, + enum led_brightness brightness) +{ + /* + * Indicator LEDs, unlike torch LEDs, are turned on/off basing on + * the state of V4L2_CID_FLASH_INDICATOR_INTENSITY control only. + * Do not decrement brightness read from the LED subsystem for + * indicator LED as it may equal 0. For torch LEDs this function + * is called only when V4L2_FLASH_LED_MODE_TORCH is set and the + * brightness read is guaranteed to be greater than 0. In the mode + * V4L2_FLASH_LED_MODE_NONE the cached torch intensity value is used. + */ + if (ctrl->id != V4L2_CID_FLASH_INDICATOR_INTENSITY) + --brightness; + + return (brightness * ctrl->step) + ctrl->minimum; +} + +static int v4l2_flash_set_led_brightness(struct v4l2_flash *v4l2_flash, + struct v4l2_ctrl *ctrl) +{ + struct v4l2_ctrl **ctrls = v4l2_flash->ctrls; + struct led_classdev *led_cdev; + enum led_brightness brightness; + + if (has_flash_op(v4l2_flash, intensity_to_led_brightness)) + brightness = call_flash_op(v4l2_flash, + intensity_to_led_brightness, + ctrl->val); + else + brightness = __intensity_to_led_brightness(ctrl, ctrl->val); + /* + * In case a LED Flash class driver provides ops for custom + * brightness <-> intensity conversion, it also must have defined + * related v4l2 control step == 1. In such a case a backward conversion + * from led brightness to v4l2 intensity is required to find out the + * aligned intensity value. + */ + if (has_flash_op(v4l2_flash, led_brightness_to_intensity)) + ctrl->val = call_flash_op(v4l2_flash, + led_brightness_to_intensity, + brightness); + + if (ctrl == ctrls[TORCH_INTENSITY]) { + if (ctrls[LED_MODE]->val != V4L2_FLASH_LED_MODE_TORCH) + return 0; + + if (WARN_ON_ONCE(!v4l2_flash->fled_cdev)) + return -EINVAL; + + led_cdev = &v4l2_flash->fled_cdev->led_cdev; + } else { + if (WARN_ON_ONCE(!v4l2_flash->iled_cdev)) + return -EINVAL; + + led_cdev = v4l2_flash->iled_cdev; + } + + return led_set_brightness_sync(led_cdev, brightness); +} + +static int v4l2_flash_update_led_brightness(struct v4l2_flash *v4l2_flash, + struct v4l2_ctrl *ctrl) +{ + struct v4l2_ctrl **ctrls = v4l2_flash->ctrls; + struct led_classdev *led_cdev; + int ret; + + if (ctrl == ctrls[TORCH_INTENSITY]) { + /* + * Update torch brightness only if in TORCH_MODE. In other modes + * torch led is turned off, which would spuriously inform the + * user space that V4L2_CID_FLASH_TORCH_INTENSITY control value + * has changed to 0. + */ + if (ctrls[LED_MODE]->val != V4L2_FLASH_LED_MODE_TORCH) + return 0; + + if (WARN_ON_ONCE(!v4l2_flash->fled_cdev)) + return -EINVAL; + + led_cdev = &v4l2_flash->fled_cdev->led_cdev; + } else { + if (WARN_ON_ONCE(!v4l2_flash->iled_cdev)) + return -EINVAL; + + led_cdev = v4l2_flash->iled_cdev; + } + + ret = led_update_brightness(led_cdev); + if (ret < 0) + return ret; + + if (has_flash_op(v4l2_flash, led_brightness_to_intensity)) + ctrl->val = call_flash_op(v4l2_flash, + led_brightness_to_intensity, + led_cdev->brightness); + else + ctrl->val = __led_brightness_to_intensity(ctrl, + led_cdev->brightness); + + return 0; +} + +static int v4l2_flash_g_volatile_ctrl(struct v4l2_ctrl *c) +{ + struct v4l2_flash *v4l2_flash = v4l2_ctrl_to_v4l2_flash(c); + struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev; + bool is_strobing; + int ret; + + switch (c->id) { + case V4L2_CID_FLASH_TORCH_INTENSITY: + case V4L2_CID_FLASH_INDICATOR_INTENSITY: + return v4l2_flash_update_led_brightness(v4l2_flash, c); + } + + if (!fled_cdev) + return -EINVAL; + + switch (c->id) { + case V4L2_CID_FLASH_INTENSITY: + ret = led_update_flash_brightness(fled_cdev); + if (ret < 0) + return ret; + /* + * No conversion is needed as LED Flash class also uses + * microamperes for flash intensity units. + */ + c->val = fled_cdev->brightness.val; + return 0; + case V4L2_CID_FLASH_STROBE_STATUS: + ret = led_get_flash_strobe(fled_cdev, &is_strobing); + if (ret < 0) + return ret; + c->val = is_strobing; + return 0; + case V4L2_CID_FLASH_FAULT: + /* LED faults map directly to V4L2 flash faults */ + return led_get_flash_fault(fled_cdev, &c->val); + default: + return -EINVAL; + } +} + +static bool __software_strobe_mode_inactive(struct v4l2_ctrl **ctrls) +{ + return ((ctrls[LED_MODE]->val != V4L2_FLASH_LED_MODE_FLASH) || + (ctrls[STROBE_SOURCE] && (ctrls[STROBE_SOURCE]->val != + V4L2_FLASH_STROBE_SOURCE_SOFTWARE))); +} + +static int v4l2_flash_s_ctrl(struct v4l2_ctrl *c) +{ + struct v4l2_flash *v4l2_flash = v4l2_ctrl_to_v4l2_flash(c); + struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev; + struct led_classdev *led_cdev; + struct v4l2_ctrl **ctrls = v4l2_flash->ctrls; + bool external_strobe; + int ret = 0; + + switch (c->id) { + case V4L2_CID_FLASH_TORCH_INTENSITY: + case V4L2_CID_FLASH_INDICATOR_INTENSITY: + return v4l2_flash_set_led_brightness(v4l2_flash, c); + } + + if (!fled_cdev) + return -EINVAL; + + led_cdev = &fled_cdev->led_cdev; + + switch (c->id) { + case V4L2_CID_FLASH_LED_MODE: + switch (c->val) { + case V4L2_FLASH_LED_MODE_NONE: + led_set_brightness_sync(led_cdev, LED_OFF); + return led_set_flash_strobe(fled_cdev, false); + case V4L2_FLASH_LED_MODE_FLASH: + /* Turn the torch LED off */ + led_set_brightness_sync(led_cdev, LED_OFF); + if (ctrls[STROBE_SOURCE]) { + external_strobe = (ctrls[STROBE_SOURCE]->val == + V4L2_FLASH_STROBE_SOURCE_EXTERNAL); + + ret = call_flash_op(v4l2_flash, + external_strobe_set, + external_strobe); + } + return ret; + case V4L2_FLASH_LED_MODE_TORCH: + if (ctrls[STROBE_SOURCE]) { + ret = call_flash_op(v4l2_flash, + external_strobe_set, + false); + if (ret < 0) + return ret; + } + /* Stop flash strobing */ + ret = led_set_flash_strobe(fled_cdev, false); + if (ret < 0) + return ret; + + return v4l2_flash_set_led_brightness(v4l2_flash, + ctrls[TORCH_INTENSITY]); + } + break; + case V4L2_CID_FLASH_STROBE_SOURCE: + external_strobe = (c->val == V4L2_FLASH_STROBE_SOURCE_EXTERNAL); + /* + * For some hardware arrangements setting strobe source may + * affect torch mode. Therefore, if not in the flash mode, + * cache only this setting. It will be applied upon switching + * to flash mode. + */ + if (ctrls[LED_MODE]->val != V4L2_FLASH_LED_MODE_FLASH) + return 0; + + return call_flash_op(v4l2_flash, external_strobe_set, + external_strobe); + case V4L2_CID_FLASH_STROBE: + if (__software_strobe_mode_inactive(ctrls)) + return -EBUSY; + return led_set_flash_strobe(fled_cdev, true); + case V4L2_CID_FLASH_STROBE_STOP: + if (__software_strobe_mode_inactive(ctrls)) + return -EBUSY; + return led_set_flash_strobe(fled_cdev, false); + case V4L2_CID_FLASH_TIMEOUT: + /* + * No conversion is needed as LED Flash class also uses + * microseconds for flash timeout units. + */ + return led_set_flash_timeout(fled_cdev, c->val); + case V4L2_CID_FLASH_INTENSITY: + /* + * No conversion is needed as LED Flash class also uses + * microamperes for flash intensity units. + */ + return led_set_flash_brightness(fled_cdev, c->val); + } + + return -EINVAL; +} + +static const struct v4l2_ctrl_ops v4l2_flash_ctrl_ops = { + .g_volatile_ctrl = v4l2_flash_g_volatile_ctrl, + .s_ctrl = v4l2_flash_s_ctrl, +}; + +static void __lfs_to_v4l2_ctrl_config(struct led_flash_setting *s, + struct v4l2_ctrl_config *c) +{ + c->min = s->min; + c->max = s->max; + c->step = s->step; + c->def = s->val; +} + +static void __fill_ctrl_init_data(struct v4l2_flash *v4l2_flash, + struct v4l2_flash_config *flash_cfg, + struct v4l2_flash_ctrl_data *ctrl_init_data) +{ + struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev; + struct led_classdev *led_cdev = fled_cdev ? &fled_cdev->led_cdev : NULL; + struct v4l2_ctrl_config *ctrl_cfg; + u32 mask; + + /* Init INDICATOR_INTENSITY ctrl data */ + if (v4l2_flash->iled_cdev) { + ctrl_init_data[INDICATOR_INTENSITY].cid = + V4L2_CID_FLASH_INDICATOR_INTENSITY; + ctrl_cfg = &ctrl_init_data[INDICATOR_INTENSITY].config; + __lfs_to_v4l2_ctrl_config(&flash_cfg->intensity, + ctrl_cfg); + ctrl_cfg->id = V4L2_CID_FLASH_INDICATOR_INTENSITY; + ctrl_cfg->min = 0; + ctrl_cfg->flags = V4L2_CTRL_FLAG_VOLATILE | + V4L2_CTRL_FLAG_EXECUTE_ON_WRITE; + } + + if (!led_cdev || WARN_ON(!(led_cdev->flags & LED_DEV_CAP_FLASH))) + return; + + /* Init FLASH_FAULT ctrl data */ + if (flash_cfg->flash_faults) { + ctrl_init_data[FLASH_FAULT].cid = V4L2_CID_FLASH_FAULT; + ctrl_cfg = &ctrl_init_data[FLASH_FAULT].config; + ctrl_cfg->id = V4L2_CID_FLASH_FAULT; + ctrl_cfg->max = flash_cfg->flash_faults; + ctrl_cfg->flags = V4L2_CTRL_FLAG_VOLATILE | + V4L2_CTRL_FLAG_READ_ONLY; + } + + /* Init FLASH_LED_MODE ctrl data */ + mask = 1 << V4L2_FLASH_LED_MODE_NONE | + 1 << V4L2_FLASH_LED_MODE_TORCH; + if (led_cdev->flags & LED_DEV_CAP_FLASH) + mask |= 1 << V4L2_FLASH_LED_MODE_FLASH; + + ctrl_init_data[LED_MODE].cid = V4L2_CID_FLASH_LED_MODE; + ctrl_cfg = &ctrl_init_data[LED_MODE].config; + ctrl_cfg->id = V4L2_CID_FLASH_LED_MODE; + ctrl_cfg->max = V4L2_FLASH_LED_MODE_TORCH; + ctrl_cfg->menu_skip_mask = ~mask; + ctrl_cfg->def = V4L2_FLASH_LED_MODE_NONE; + ctrl_cfg->flags = 0; + + /* Init TORCH_INTENSITY ctrl data */ + ctrl_init_data[TORCH_INTENSITY].cid = V4L2_CID_FLASH_TORCH_INTENSITY; + ctrl_cfg = &ctrl_init_data[TORCH_INTENSITY].config; + __lfs_to_v4l2_ctrl_config(&flash_cfg->intensity, ctrl_cfg); + ctrl_cfg->id = V4L2_CID_FLASH_TORCH_INTENSITY; + ctrl_cfg->flags = V4L2_CTRL_FLAG_VOLATILE | + V4L2_CTRL_FLAG_EXECUTE_ON_WRITE; + + /* Init FLASH_STROBE ctrl data */ + ctrl_init_data[FLASH_STROBE].cid = V4L2_CID_FLASH_STROBE; + ctrl_cfg = &ctrl_init_data[FLASH_STROBE].config; + ctrl_cfg->id = V4L2_CID_FLASH_STROBE; + + /* Init STROBE_STOP ctrl data */ + ctrl_init_data[STROBE_STOP].cid = V4L2_CID_FLASH_STROBE_STOP; + ctrl_cfg = &ctrl_init_data[STROBE_STOP].config; + ctrl_cfg->id = V4L2_CID_FLASH_STROBE_STOP; + + /* Init FLASH_STROBE_SOURCE ctrl data */ + if (flash_cfg->has_external_strobe) { + mask = (1 << V4L2_FLASH_STROBE_SOURCE_SOFTWARE) | + (1 << V4L2_FLASH_STROBE_SOURCE_EXTERNAL); + ctrl_init_data[STROBE_SOURCE].cid = + V4L2_CID_FLASH_STROBE_SOURCE; + ctrl_cfg = &ctrl_init_data[STROBE_SOURCE].config; + ctrl_cfg->id = V4L2_CID_FLASH_STROBE_SOURCE; + ctrl_cfg->max = V4L2_FLASH_STROBE_SOURCE_EXTERNAL; + ctrl_cfg->menu_skip_mask = ~mask; + ctrl_cfg->def = V4L2_FLASH_STROBE_SOURCE_SOFTWARE; + } + + /* Init STROBE_STATUS ctrl data */ + if (has_flash_op(fled_cdev, strobe_get)) { + ctrl_init_data[STROBE_STATUS].cid = + V4L2_CID_FLASH_STROBE_STATUS; + ctrl_cfg = &ctrl_init_data[STROBE_STATUS].config; + ctrl_cfg->id = V4L2_CID_FLASH_STROBE_STATUS; + ctrl_cfg->flags = V4L2_CTRL_FLAG_VOLATILE | + V4L2_CTRL_FLAG_READ_ONLY; + } + + /* Init FLASH_TIMEOUT ctrl data */ + if (has_flash_op(fled_cdev, timeout_set)) { + ctrl_init_data[FLASH_TIMEOUT].cid = V4L2_CID_FLASH_TIMEOUT; + ctrl_cfg = &ctrl_init_data[FLASH_TIMEOUT].config; + __lfs_to_v4l2_ctrl_config(&fled_cdev->timeout, ctrl_cfg); + ctrl_cfg->id = V4L2_CID_FLASH_TIMEOUT; + } + + /* Init FLASH_INTENSITY ctrl data */ + if (has_flash_op(fled_cdev, flash_brightness_set)) { + ctrl_init_data[FLASH_INTENSITY].cid = V4L2_CID_FLASH_INTENSITY; + ctrl_cfg = &ctrl_init_data[FLASH_INTENSITY].config; + __lfs_to_v4l2_ctrl_config(&fled_cdev->brightness, ctrl_cfg); + ctrl_cfg->id = V4L2_CID_FLASH_INTENSITY; + ctrl_cfg->flags = V4L2_CTRL_FLAG_VOLATILE | + V4L2_CTRL_FLAG_EXECUTE_ON_WRITE; + } +} + +static int v4l2_flash_init_controls(struct v4l2_flash *v4l2_flash, + struct v4l2_flash_config *flash_cfg) + +{ + struct v4l2_flash_ctrl_data *ctrl_init_data; + struct v4l2_ctrl *ctrl; + struct v4l2_ctrl_config *ctrl_cfg; + int i, ret, num_ctrls = 0; + + v4l2_flash->ctrls = devm_kcalloc(v4l2_flash->sd.dev, + STROBE_SOURCE + 1, + sizeof(*v4l2_flash->ctrls), + GFP_KERNEL); + if (!v4l2_flash->ctrls) + return -ENOMEM; + + /* allocate memory dynamically so as not to exceed stack frame size */ + ctrl_init_data = kcalloc(NUM_FLASH_CTRLS, sizeof(*ctrl_init_data), + GFP_KERNEL); + if (!ctrl_init_data) + return -ENOMEM; + + __fill_ctrl_init_data(v4l2_flash, flash_cfg, ctrl_init_data); + + for (i = 0; i < NUM_FLASH_CTRLS; ++i) + if (ctrl_init_data[i].cid) + ++num_ctrls; + + v4l2_ctrl_handler_init(&v4l2_flash->hdl, num_ctrls); + + for (i = 0; i < NUM_FLASH_CTRLS; ++i) { + ctrl_cfg = &ctrl_init_data[i].config; + if (!ctrl_init_data[i].cid) + continue; + + if (ctrl_cfg->id == V4L2_CID_FLASH_LED_MODE || + ctrl_cfg->id == V4L2_CID_FLASH_STROBE_SOURCE) + ctrl = v4l2_ctrl_new_std_menu(&v4l2_flash->hdl, + &v4l2_flash_ctrl_ops, + ctrl_cfg->id, + ctrl_cfg->max, + ctrl_cfg->menu_skip_mask, + ctrl_cfg->def); + else + ctrl = v4l2_ctrl_new_std(&v4l2_flash->hdl, + &v4l2_flash_ctrl_ops, + ctrl_cfg->id, + ctrl_cfg->min, + ctrl_cfg->max, + ctrl_cfg->step, + ctrl_cfg->def); + + if (ctrl) + ctrl->flags |= ctrl_cfg->flags; + + if (i <= STROBE_SOURCE) + v4l2_flash->ctrls[i] = ctrl; + } + + kfree(ctrl_init_data); + + if (v4l2_flash->hdl.error) { + ret = v4l2_flash->hdl.error; + goto error_free_handler; + } + + v4l2_ctrl_handler_setup(&v4l2_flash->hdl); + + v4l2_flash->sd.ctrl_handler = &v4l2_flash->hdl; + + return 0; + +error_free_handler: + v4l2_ctrl_handler_free(&v4l2_flash->hdl); + return ret; +} + +static int __sync_device_with_v4l2_controls(struct v4l2_flash *v4l2_flash) +{ + struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev; + struct v4l2_ctrl **ctrls = v4l2_flash->ctrls; + int ret = 0; + + if (ctrls[TORCH_INTENSITY]) { + ret = v4l2_flash_set_led_brightness(v4l2_flash, + ctrls[TORCH_INTENSITY]); + if (ret < 0) + return ret; + } + + if (ctrls[INDICATOR_INTENSITY]) { + ret = v4l2_flash_set_led_brightness(v4l2_flash, + ctrls[INDICATOR_INTENSITY]); + if (ret < 0) + return ret; + } + + if (ctrls[FLASH_TIMEOUT]) { + if (WARN_ON_ONCE(!fled_cdev)) + return -EINVAL; + + ret = led_set_flash_timeout(fled_cdev, + ctrls[FLASH_TIMEOUT]->val); + if (ret < 0) + return ret; + } + + if (ctrls[FLASH_INTENSITY]) { + if (WARN_ON_ONCE(!fled_cdev)) + return -EINVAL; + + ret = led_set_flash_brightness(fled_cdev, + ctrls[FLASH_INTENSITY]->val); + if (ret < 0) + return ret; + } + + /* + * For some hardware arrangements setting strobe source may affect + * torch mode. Synchronize strobe source setting only if not in torch + * mode. For torch mode case it will get synchronized upon switching + * to flash mode. + */ + if (ctrls[STROBE_SOURCE] && + ctrls[LED_MODE]->val != V4L2_FLASH_LED_MODE_TORCH) + ret = call_flash_op(v4l2_flash, external_strobe_set, + ctrls[STROBE_SOURCE]->val); + + return ret; +} + +/* + * V4L2 subdev internal operations + */ + +static int v4l2_flash_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) +{ + struct v4l2_flash *v4l2_flash = v4l2_subdev_to_v4l2_flash(sd); + struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev; + struct led_classdev *led_cdev = fled_cdev ? &fled_cdev->led_cdev : NULL; + struct led_classdev *led_cdev_ind = v4l2_flash->iled_cdev; + int ret = 0; + + if (!v4l2_fh_is_singular(&fh->vfh)) + return 0; + + if (led_cdev) { + mutex_lock(&led_cdev->led_access); + + led_sysfs_disable(led_cdev); + led_trigger_remove(led_cdev); + + mutex_unlock(&led_cdev->led_access); + } + + if (led_cdev_ind) { + mutex_lock(&led_cdev_ind->led_access); + + led_sysfs_disable(led_cdev_ind); + led_trigger_remove(led_cdev_ind); + + mutex_unlock(&led_cdev_ind->led_access); + } + + ret = __sync_device_with_v4l2_controls(v4l2_flash); + if (ret < 0) + goto out_sync_device; + + return 0; +out_sync_device: + if (led_cdev) { + mutex_lock(&led_cdev->led_access); + led_sysfs_enable(led_cdev); + mutex_unlock(&led_cdev->led_access); + } + + if (led_cdev_ind) { + mutex_lock(&led_cdev_ind->led_access); + led_sysfs_enable(led_cdev_ind); + mutex_unlock(&led_cdev_ind->led_access); + } + + return ret; +} + +static int v4l2_flash_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) +{ + struct v4l2_flash *v4l2_flash = v4l2_subdev_to_v4l2_flash(sd); + struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev; + struct led_classdev *led_cdev = fled_cdev ? &fled_cdev->led_cdev : NULL; + struct led_classdev *led_cdev_ind = v4l2_flash->iled_cdev; + int ret = 0; + + if (!v4l2_fh_is_singular(&fh->vfh)) + return 0; + + if (led_cdev) { + mutex_lock(&led_cdev->led_access); + + if (v4l2_flash->ctrls[STROBE_SOURCE]) + ret = v4l2_ctrl_s_ctrl( + v4l2_flash->ctrls[STROBE_SOURCE], + V4L2_FLASH_STROBE_SOURCE_SOFTWARE); + led_sysfs_enable(led_cdev); + + mutex_unlock(&led_cdev->led_access); + } + + if (led_cdev_ind) { + mutex_lock(&led_cdev_ind->led_access); + led_sysfs_enable(led_cdev_ind); + mutex_unlock(&led_cdev_ind->led_access); + } + + return ret; +} + +static const struct v4l2_subdev_internal_ops v4l2_flash_subdev_internal_ops = { + .open = v4l2_flash_open, + .close = v4l2_flash_close, +}; + +static const struct v4l2_subdev_ops v4l2_flash_subdev_ops; + +static struct v4l2_flash *__v4l2_flash_init( + struct device *dev, struct fwnode_handle *fwn, + struct led_classdev_flash *fled_cdev, struct led_classdev *iled_cdev, + const struct v4l2_flash_ops *ops, struct v4l2_flash_config *config) +{ + struct v4l2_flash *v4l2_flash; + struct v4l2_subdev *sd; + int ret; + + if (!config) + return ERR_PTR(-EINVAL); + + v4l2_flash = devm_kzalloc(dev, sizeof(*v4l2_flash), GFP_KERNEL); + if (!v4l2_flash) + return ERR_PTR(-ENOMEM); + + sd = &v4l2_flash->sd; + v4l2_flash->fled_cdev = fled_cdev; + v4l2_flash->iled_cdev = iled_cdev; + v4l2_flash->ops = ops; + sd->dev = dev; + sd->fwnode = fwn ? fwn : dev_fwnode(dev); + v4l2_subdev_init(sd, &v4l2_flash_subdev_ops); + sd->internal_ops = &v4l2_flash_subdev_internal_ops; + sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; + strscpy(sd->name, config->dev_name, sizeof(sd->name)); + + ret = media_entity_pads_init(&sd->entity, 0, NULL); + if (ret < 0) + return ERR_PTR(ret); + + sd->entity.function = MEDIA_ENT_F_FLASH; + + ret = v4l2_flash_init_controls(v4l2_flash, config); + if (ret < 0) + goto err_init_controls; + + fwnode_handle_get(sd->fwnode); + + ret = v4l2_async_register_subdev(sd); + if (ret < 0) + goto err_async_register_sd; + + return v4l2_flash; + +err_async_register_sd: + fwnode_handle_put(sd->fwnode); + v4l2_ctrl_handler_free(sd->ctrl_handler); +err_init_controls: + media_entity_cleanup(&sd->entity); + + return ERR_PTR(ret); +} + +struct v4l2_flash *v4l2_flash_init( + struct device *dev, struct fwnode_handle *fwn, + struct led_classdev_flash *fled_cdev, + const struct v4l2_flash_ops *ops, + struct v4l2_flash_config *config) +{ + return __v4l2_flash_init(dev, fwn, fled_cdev, NULL, ops, config); +} +EXPORT_SYMBOL_GPL(v4l2_flash_init); + +struct v4l2_flash *v4l2_flash_indicator_init( + struct device *dev, struct fwnode_handle *fwn, + struct led_classdev *iled_cdev, + struct v4l2_flash_config *config) +{ + return __v4l2_flash_init(dev, fwn, NULL, iled_cdev, NULL, config); +} +EXPORT_SYMBOL_GPL(v4l2_flash_indicator_init); + +void v4l2_flash_release(struct v4l2_flash *v4l2_flash) +{ + struct v4l2_subdev *sd; + + if (IS_ERR_OR_NULL(v4l2_flash)) + return; + + sd = &v4l2_flash->sd; + + v4l2_async_unregister_subdev(sd); + + fwnode_handle_put(sd->fwnode); + + v4l2_ctrl_handler_free(sd->ctrl_handler); + media_entity_cleanup(&sd->entity); +} +EXPORT_SYMBOL_GPL(v4l2_flash_release); + +MODULE_AUTHOR("Jacek Anaszewski "); +MODULE_DESCRIPTION("V4L2 Flash sub-device helpers"); +MODULE_LICENSE("GPL v2"); diff --git a/6.17.0/drivers/media/v4l2-core/v4l2-fwnode.c b/6.17.0/drivers/media/v4l2-core/v4l2-fwnode.c new file mode 100644 index 0000000..cb153ce --- /dev/null +++ b/6.17.0/drivers/media/v4l2-core/v4l2-fwnode.c @@ -0,0 +1,1299 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * V4L2 fwnode binding parsing library + * + * The origins of the V4L2 fwnode library are in V4L2 OF library that + * formerly was located in v4l2-of.c. + * + * Copyright (c) 2016 Intel Corporation. + * Author: Sakari Ailus + * + * Copyright (C) 2012 - 2013 Samsung Electronics Co., Ltd. + * Author: Sylwester Nawrocki + * + * Copyright (C) 2012 Renesas Electronics Corp. + * Author: Guennadi Liakhovetski + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "v4l2-subdev-priv.h" + +static const struct v4l2_fwnode_bus_conv { + enum v4l2_fwnode_bus_type fwnode_bus_type; + enum v4l2_mbus_type mbus_type; + const char *name; +} buses[] = { + { + V4L2_FWNODE_BUS_TYPE_GUESS, + V4L2_MBUS_UNKNOWN, + "not specified", + }, { + V4L2_FWNODE_BUS_TYPE_CSI2_CPHY, + V4L2_MBUS_CSI2_CPHY, + "MIPI CSI-2 C-PHY", + }, { + V4L2_FWNODE_BUS_TYPE_CSI1, + V4L2_MBUS_CSI1, + "MIPI CSI-1", + }, { + V4L2_FWNODE_BUS_TYPE_CCP2, + V4L2_MBUS_CCP2, + "compact camera port 2", + }, { + V4L2_FWNODE_BUS_TYPE_CSI2_DPHY, + V4L2_MBUS_CSI2_DPHY, + "MIPI CSI-2 D-PHY", + }, { + V4L2_FWNODE_BUS_TYPE_PARALLEL, + V4L2_MBUS_PARALLEL, + "parallel", + }, { + V4L2_FWNODE_BUS_TYPE_BT656, + V4L2_MBUS_BT656, + "Bt.656", + }, { + V4L2_FWNODE_BUS_TYPE_DPI, + V4L2_MBUS_DPI, + "DPI", + } +}; + +static const struct v4l2_fwnode_bus_conv * +get_v4l2_fwnode_bus_conv_by_fwnode_bus(enum v4l2_fwnode_bus_type type) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(buses); i++) + if (buses[i].fwnode_bus_type == type) + return &buses[i]; + + return NULL; +} + +static enum v4l2_mbus_type +v4l2_fwnode_bus_type_to_mbus(enum v4l2_fwnode_bus_type type) +{ + const struct v4l2_fwnode_bus_conv *conv = + get_v4l2_fwnode_bus_conv_by_fwnode_bus(type); + + return conv ? conv->mbus_type : V4L2_MBUS_INVALID; +} + +static const char * +v4l2_fwnode_bus_type_to_string(enum v4l2_fwnode_bus_type type) +{ + const struct v4l2_fwnode_bus_conv *conv = + get_v4l2_fwnode_bus_conv_by_fwnode_bus(type); + + return conv ? conv->name : "not found"; +} + +static const struct v4l2_fwnode_bus_conv * +get_v4l2_fwnode_bus_conv_by_mbus(enum v4l2_mbus_type type) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(buses); i++) + if (buses[i].mbus_type == type) + return &buses[i]; + + return NULL; +} + +static const char * +v4l2_fwnode_mbus_type_to_string(enum v4l2_mbus_type type) +{ + const struct v4l2_fwnode_bus_conv *conv = + get_v4l2_fwnode_bus_conv_by_mbus(type); + + return conv ? conv->name : "not found"; +} + +static int v4l2_fwnode_endpoint_parse_csi2_bus(struct fwnode_handle *fwnode, + struct v4l2_fwnode_endpoint *vep, + enum v4l2_mbus_type bus_type) +{ + struct v4l2_mbus_config_mipi_csi2 *bus = &vep->bus.mipi_csi2; + bool have_clk_lane = false, have_data_lanes = false, + have_lane_polarities = false, have_line_orders = false; + unsigned int flags = 0, lanes_used = 0; + u32 array[1 + V4L2_MBUS_CSI2_MAX_DATA_LANES]; + u32 clock_lane = 0; + unsigned int num_data_lanes = 0; + bool use_default_lane_mapping = false; + unsigned int i; + u32 v; + int rval; + + if (bus_type == V4L2_MBUS_CSI2_DPHY || + bus_type == V4L2_MBUS_CSI2_CPHY) { + use_default_lane_mapping = true; + + num_data_lanes = min_t(u32, bus->num_data_lanes, + V4L2_MBUS_CSI2_MAX_DATA_LANES); + + clock_lane = bus->clock_lane; + if (clock_lane) + use_default_lane_mapping = false; + + for (i = 0; i < num_data_lanes; i++) { + array[i] = bus->data_lanes[i]; + if (array[i]) + use_default_lane_mapping = false; + } + + if (use_default_lane_mapping) + pr_debug("no lane mapping given, using defaults\n"); + } + + rval = fwnode_property_count_u32(fwnode, "data-lanes"); + if (rval > 0) { + num_data_lanes = + min_t(int, V4L2_MBUS_CSI2_MAX_DATA_LANES, rval); + + fwnode_property_read_u32_array(fwnode, "data-lanes", array, + num_data_lanes); + + have_data_lanes = true; + if (use_default_lane_mapping) { + pr_debug("data-lanes property exists; disabling default mapping\n"); + use_default_lane_mapping = false; + } + } + + for (i = 0; i < num_data_lanes; i++) { + if (lanes_used & BIT(array[i])) { + if (have_data_lanes || !use_default_lane_mapping) + pr_warn("duplicated lane %u in data-lanes, using defaults\n", + array[i]); + use_default_lane_mapping = true; + } + lanes_used |= BIT(array[i]); + + if (have_data_lanes) + pr_debug("lane %u position %u\n", i, array[i]); + } + + rval = fwnode_property_count_u32(fwnode, "lane-polarities"); + if (rval > 0) { + if (rval != 1 + num_data_lanes /* clock+data */) { + pr_warn("invalid number of lane-polarities entries (need %u, got %u)\n", + 1 + num_data_lanes, rval); + return -EINVAL; + } + + have_lane_polarities = true; + } + + rval = fwnode_property_count_u32(fwnode, "line-orders"); + if (rval > 0) { + if (rval != num_data_lanes) { + pr_warn("invalid number of line-orders entries (need %u, got %u)\n", + num_data_lanes, rval); + return -EINVAL; + } + + have_line_orders = true; + } + + if (!fwnode_property_read_u32(fwnode, "clock-lanes", &v)) { + clock_lane = v; + pr_debug("clock lane position %u\n", v); + have_clk_lane = true; + } + + if (have_clk_lane && lanes_used & BIT(clock_lane) && + !use_default_lane_mapping) { + pr_warn("duplicated lane %u in clock-lanes, using defaults\n", + v); + use_default_lane_mapping = true; + } + + if (fwnode_property_present(fwnode, "clock-noncontinuous")) { + flags |= V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK; + pr_debug("non-continuous clock\n"); + } + + if (bus_type == V4L2_MBUS_CSI2_DPHY || + bus_type == V4L2_MBUS_CSI2_CPHY || + lanes_used || have_clk_lane || flags) { + /* Only D-PHY has a clock lane. */ + unsigned int dfl_data_lane_index = + bus_type == V4L2_MBUS_CSI2_DPHY; + + bus->flags = flags; + if (bus_type == V4L2_MBUS_UNKNOWN) + vep->bus_type = V4L2_MBUS_CSI2_DPHY; + bus->num_data_lanes = num_data_lanes; + + if (use_default_lane_mapping) { + bus->clock_lane = 0; + for (i = 0; i < num_data_lanes; i++) + bus->data_lanes[i] = dfl_data_lane_index + i; + } else { + bus->clock_lane = clock_lane; + for (i = 0; i < num_data_lanes; i++) + bus->data_lanes[i] = array[i]; + } + + if (have_lane_polarities) { + fwnode_property_read_u32_array(fwnode, + "lane-polarities", array, + 1 + num_data_lanes); + + for (i = 0; i < 1 + num_data_lanes; i++) { + bus->lane_polarities[i] = array[i]; + pr_debug("lane %u polarity %sinverted", + i, array[i] ? "" : "not "); + } + } else { + pr_debug("no lane polarities defined, assuming not inverted\n"); + } + + if (have_line_orders) { + fwnode_property_read_u32_array(fwnode, + "line-orders", array, + num_data_lanes); + + for (i = 0; i < num_data_lanes; i++) { + static const char * const orders[] = { + "ABC", "ACB", "BAC", "BCA", "CAB", "CBA" + }; + + if (array[i] >= ARRAY_SIZE(orders)) { + pr_warn("lane %u invalid line-order assuming ABC (got %u)\n", + i, array[i]); + bus->line_orders[i] = + V4L2_MBUS_CSI2_CPHY_LINE_ORDER_ABC; + continue; + } + + bus->line_orders[i] = array[i]; + pr_debug("lane %u line order %s", i, + orders[array[i]]); + } + } else { + for (i = 0; i < num_data_lanes; i++) + bus->line_orders[i] = + V4L2_MBUS_CSI2_CPHY_LINE_ORDER_ABC; + + pr_debug("no line orders defined, assuming ABC\n"); + } + } + + return 0; +} + +#define PARALLEL_MBUS_FLAGS (V4L2_MBUS_HSYNC_ACTIVE_HIGH | \ + V4L2_MBUS_HSYNC_ACTIVE_LOW | \ + V4L2_MBUS_VSYNC_ACTIVE_HIGH | \ + V4L2_MBUS_VSYNC_ACTIVE_LOW | \ + V4L2_MBUS_FIELD_EVEN_HIGH | \ + V4L2_MBUS_FIELD_EVEN_LOW) + +static void +v4l2_fwnode_endpoint_parse_parallel_bus(struct fwnode_handle *fwnode, + struct v4l2_fwnode_endpoint *vep, + enum v4l2_mbus_type bus_type) +{ + struct v4l2_mbus_config_parallel *bus = &vep->bus.parallel; + unsigned int flags = 0; + u32 v; + + if (bus_type == V4L2_MBUS_PARALLEL || bus_type == V4L2_MBUS_BT656) + flags = bus->flags; + + if (!fwnode_property_read_u32(fwnode, "hsync-active", &v)) { + flags &= ~(V4L2_MBUS_HSYNC_ACTIVE_HIGH | + V4L2_MBUS_HSYNC_ACTIVE_LOW); + flags |= v ? V4L2_MBUS_HSYNC_ACTIVE_HIGH : + V4L2_MBUS_HSYNC_ACTIVE_LOW; + pr_debug("hsync-active %s\n", v ? "high" : "low"); + } + + if (!fwnode_property_read_u32(fwnode, "vsync-active", &v)) { + flags &= ~(V4L2_MBUS_VSYNC_ACTIVE_HIGH | + V4L2_MBUS_VSYNC_ACTIVE_LOW); + flags |= v ? V4L2_MBUS_VSYNC_ACTIVE_HIGH : + V4L2_MBUS_VSYNC_ACTIVE_LOW; + pr_debug("vsync-active %s\n", v ? "high" : "low"); + } + + if (!fwnode_property_read_u32(fwnode, "field-even-active", &v)) { + flags &= ~(V4L2_MBUS_FIELD_EVEN_HIGH | + V4L2_MBUS_FIELD_EVEN_LOW); + flags |= v ? V4L2_MBUS_FIELD_EVEN_HIGH : + V4L2_MBUS_FIELD_EVEN_LOW; + pr_debug("field-even-active %s\n", v ? "high" : "low"); + } + + if (!fwnode_property_read_u32(fwnode, "pclk-sample", &v)) { + flags &= ~(V4L2_MBUS_PCLK_SAMPLE_RISING | + V4L2_MBUS_PCLK_SAMPLE_FALLING | + V4L2_MBUS_PCLK_SAMPLE_DUALEDGE); + switch (v) { + case 0: + flags |= V4L2_MBUS_PCLK_SAMPLE_FALLING; + pr_debug("pclk-sample low\n"); + break; + case 1: + flags |= V4L2_MBUS_PCLK_SAMPLE_RISING; + pr_debug("pclk-sample high\n"); + break; + case 2: + flags |= V4L2_MBUS_PCLK_SAMPLE_DUALEDGE; + pr_debug("pclk-sample dual edge\n"); + break; + default: + pr_warn("invalid argument for pclk-sample"); + break; + } + } + + if (!fwnode_property_read_u32(fwnode, "data-active", &v)) { + flags &= ~(V4L2_MBUS_DATA_ACTIVE_HIGH | + V4L2_MBUS_DATA_ACTIVE_LOW); + flags |= v ? V4L2_MBUS_DATA_ACTIVE_HIGH : + V4L2_MBUS_DATA_ACTIVE_LOW; + pr_debug("data-active %s\n", v ? "high" : "low"); + } + + if (fwnode_property_present(fwnode, "slave-mode")) { + pr_debug("slave mode\n"); + flags &= ~V4L2_MBUS_MASTER; + flags |= V4L2_MBUS_SLAVE; + } else { + flags &= ~V4L2_MBUS_SLAVE; + flags |= V4L2_MBUS_MASTER; + } + + if (!fwnode_property_read_u32(fwnode, "bus-width", &v)) { + bus->bus_width = v; + pr_debug("bus-width %u\n", v); + } + + if (!fwnode_property_read_u32(fwnode, "data-shift", &v)) { + bus->data_shift = v; + pr_debug("data-shift %u\n", v); + } + + if (!fwnode_property_read_u32(fwnode, "sync-on-green-active", &v)) { + flags &= ~(V4L2_MBUS_VIDEO_SOG_ACTIVE_HIGH | + V4L2_MBUS_VIDEO_SOG_ACTIVE_LOW); + flags |= v ? V4L2_MBUS_VIDEO_SOG_ACTIVE_HIGH : + V4L2_MBUS_VIDEO_SOG_ACTIVE_LOW; + pr_debug("sync-on-green-active %s\n", v ? "high" : "low"); + } + + if (!fwnode_property_read_u32(fwnode, "data-enable-active", &v)) { + flags &= ~(V4L2_MBUS_DATA_ENABLE_HIGH | + V4L2_MBUS_DATA_ENABLE_LOW); + flags |= v ? V4L2_MBUS_DATA_ENABLE_HIGH : + V4L2_MBUS_DATA_ENABLE_LOW; + pr_debug("data-enable-active %s\n", v ? "high" : "low"); + } + + switch (bus_type) { + default: + bus->flags = flags; + if (flags & PARALLEL_MBUS_FLAGS) + vep->bus_type = V4L2_MBUS_PARALLEL; + else + vep->bus_type = V4L2_MBUS_BT656; + break; + case V4L2_MBUS_PARALLEL: + vep->bus_type = V4L2_MBUS_PARALLEL; + bus->flags = flags; + break; + case V4L2_MBUS_BT656: + vep->bus_type = V4L2_MBUS_BT656; + bus->flags = flags & ~PARALLEL_MBUS_FLAGS; + break; + } +} + +static void +v4l2_fwnode_endpoint_parse_csi1_bus(struct fwnode_handle *fwnode, + struct v4l2_fwnode_endpoint *vep, + enum v4l2_mbus_type bus_type) +{ + struct v4l2_mbus_config_mipi_csi1 *bus = &vep->bus.mipi_csi1; + u32 v; + + if (!fwnode_property_read_u32(fwnode, "clock-inv", &v)) { + bus->clock_inv = v; + pr_debug("clock-inv %u\n", v); + } + + if (!fwnode_property_read_u32(fwnode, "strobe", &v)) { + bus->strobe = v; + pr_debug("strobe %u\n", v); + } + + if (!fwnode_property_read_u32(fwnode, "data-lanes", &v)) { + bus->data_lane = v; + pr_debug("data-lanes %u\n", v); + } + + if (!fwnode_property_read_u32(fwnode, "clock-lanes", &v)) { + bus->clock_lane = v; + pr_debug("clock-lanes %u\n", v); + } + + if (bus_type == V4L2_MBUS_CCP2) + vep->bus_type = V4L2_MBUS_CCP2; + else + vep->bus_type = V4L2_MBUS_CSI1; +} + +static int __v4l2_fwnode_endpoint_parse(struct fwnode_handle *fwnode, + struct v4l2_fwnode_endpoint *vep) +{ + u32 bus_type = V4L2_FWNODE_BUS_TYPE_GUESS; + enum v4l2_mbus_type mbus_type; + int rval; + + pr_debug("===== begin parsing endpoint %pfw\n", fwnode); + + fwnode_property_read_u32(fwnode, "bus-type", &bus_type); + pr_debug("fwnode video bus type %s (%u), mbus type %s (%u)\n", + v4l2_fwnode_bus_type_to_string(bus_type), bus_type, + v4l2_fwnode_mbus_type_to_string(vep->bus_type), + vep->bus_type); + mbus_type = v4l2_fwnode_bus_type_to_mbus(bus_type); + if (mbus_type == V4L2_MBUS_INVALID) { + pr_debug("unsupported bus type %u\n", bus_type); + return -EINVAL; + } + + if (vep->bus_type != V4L2_MBUS_UNKNOWN) { + if (mbus_type != V4L2_MBUS_UNKNOWN && + vep->bus_type != mbus_type) { + pr_debug("expecting bus type %s\n", + v4l2_fwnode_mbus_type_to_string(vep->bus_type)); + return -ENXIO; + } + } else { + vep->bus_type = mbus_type; + } + + switch (vep->bus_type) { + case V4L2_MBUS_UNKNOWN: + rval = v4l2_fwnode_endpoint_parse_csi2_bus(fwnode, vep, + V4L2_MBUS_UNKNOWN); + if (rval) + return rval; + + if (vep->bus_type == V4L2_MBUS_UNKNOWN) + v4l2_fwnode_endpoint_parse_parallel_bus(fwnode, vep, + V4L2_MBUS_UNKNOWN); + + pr_debug("assuming media bus type %s (%u)\n", + v4l2_fwnode_mbus_type_to_string(vep->bus_type), + vep->bus_type); + + break; + case V4L2_MBUS_CCP2: + case V4L2_MBUS_CSI1: + v4l2_fwnode_endpoint_parse_csi1_bus(fwnode, vep, vep->bus_type); + + break; + case V4L2_MBUS_CSI2_DPHY: + case V4L2_MBUS_CSI2_CPHY: + rval = v4l2_fwnode_endpoint_parse_csi2_bus(fwnode, vep, + vep->bus_type); + if (rval) + return rval; + + break; + case V4L2_MBUS_PARALLEL: + case V4L2_MBUS_BT656: + v4l2_fwnode_endpoint_parse_parallel_bus(fwnode, vep, + vep->bus_type); + + break; + default: + pr_warn("unsupported bus type %u\n", mbus_type); + return -EINVAL; + } + + fwnode_graph_parse_endpoint(fwnode, &vep->base); + + return 0; +} + +int v4l2_fwnode_endpoint_parse(struct fwnode_handle *fwnode, + struct v4l2_fwnode_endpoint *vep) +{ + int ret; + + ret = __v4l2_fwnode_endpoint_parse(fwnode, vep); + + pr_debug("===== end parsing endpoint %pfw\n", fwnode); + + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_fwnode_endpoint_parse); + +void v4l2_fwnode_endpoint_free(struct v4l2_fwnode_endpoint *vep) +{ + if (IS_ERR_OR_NULL(vep)) + return; + + kfree(vep->link_frequencies); + vep->link_frequencies = NULL; +} +EXPORT_SYMBOL_GPL(v4l2_fwnode_endpoint_free); + +int v4l2_fwnode_endpoint_alloc_parse(struct fwnode_handle *fwnode, + struct v4l2_fwnode_endpoint *vep) +{ + int rval; + + rval = __v4l2_fwnode_endpoint_parse(fwnode, vep); + if (rval < 0) + return rval; + + rval = fwnode_property_count_u64(fwnode, "link-frequencies"); + if (rval > 0) { + unsigned int i; + + vep->link_frequencies = + kmalloc_array(rval, sizeof(*vep->link_frequencies), + GFP_KERNEL); + if (!vep->link_frequencies) + return -ENOMEM; + + vep->nr_of_link_frequencies = rval; + + rval = fwnode_property_read_u64_array(fwnode, + "link-frequencies", + vep->link_frequencies, + vep->nr_of_link_frequencies); + if (rval < 0) { + v4l2_fwnode_endpoint_free(vep); + return rval; + } + + for (i = 0; i < vep->nr_of_link_frequencies; i++) + pr_debug("link-frequencies %u value %llu\n", i, + vep->link_frequencies[i]); + } + + pr_debug("===== end parsing endpoint %pfw\n", fwnode); + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_fwnode_endpoint_alloc_parse); + +int v4l2_fwnode_parse_link(struct fwnode_handle *fwnode, + struct v4l2_fwnode_link *link) +{ + struct fwnode_endpoint fwep; + + memset(link, 0, sizeof(*link)); + + fwnode_graph_parse_endpoint(fwnode, &fwep); + link->local_id = fwep.id; + link->local_port = fwep.port; + link->local_node = fwnode_graph_get_port_parent(fwnode); + if (!link->local_node) + return -ENOLINK; + + fwnode = fwnode_graph_get_remote_endpoint(fwnode); + if (!fwnode) + goto err_put_local_node; + + fwnode_graph_parse_endpoint(fwnode, &fwep); + link->remote_id = fwep.id; + link->remote_port = fwep.port; + link->remote_node = fwnode_graph_get_port_parent(fwnode); + if (!link->remote_node) + goto err_put_remote_endpoint; + + return 0; + +err_put_remote_endpoint: + fwnode_handle_put(fwnode); + +err_put_local_node: + fwnode_handle_put(link->local_node); + + return -ENOLINK; +} +EXPORT_SYMBOL_GPL(v4l2_fwnode_parse_link); + +void v4l2_fwnode_put_link(struct v4l2_fwnode_link *link) +{ + fwnode_handle_put(link->local_node); + fwnode_handle_put(link->remote_node); +} +EXPORT_SYMBOL_GPL(v4l2_fwnode_put_link); + +static const struct v4l2_fwnode_connector_conv { + enum v4l2_connector_type type; + const char *compatible; +} connectors[] = { + { + .type = V4L2_CONN_COMPOSITE, + .compatible = "composite-video-connector", + }, { + .type = V4L2_CONN_SVIDEO, + .compatible = "svideo-connector", + }, +}; + +static enum v4l2_connector_type +v4l2_fwnode_string_to_connector_type(const char *con_str) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(connectors); i++) + if (!strcmp(con_str, connectors[i].compatible)) + return connectors[i].type; + + return V4L2_CONN_UNKNOWN; +} + +static void +v4l2_fwnode_connector_parse_analog(struct fwnode_handle *fwnode, + struct v4l2_fwnode_connector *vc) +{ + u32 stds; + int ret; + + ret = fwnode_property_read_u32(fwnode, "sdtv-standards", &stds); + + /* The property is optional. */ + vc->connector.analog.sdtv_stds = ret ? V4L2_STD_ALL : stds; +} + +void v4l2_fwnode_connector_free(struct v4l2_fwnode_connector *connector) +{ + struct v4l2_connector_link *link, *tmp; + + if (IS_ERR_OR_NULL(connector) || connector->type == V4L2_CONN_UNKNOWN) + return; + + list_for_each_entry_safe(link, tmp, &connector->links, head) { + v4l2_fwnode_put_link(&link->fwnode_link); + list_del(&link->head); + kfree(link); + } + + kfree(connector->label); + connector->label = NULL; + connector->type = V4L2_CONN_UNKNOWN; +} +EXPORT_SYMBOL_GPL(v4l2_fwnode_connector_free); + +static enum v4l2_connector_type +v4l2_fwnode_get_connector_type(struct fwnode_handle *fwnode) +{ + const char *type_name; + int err; + + if (!fwnode) + return V4L2_CONN_UNKNOWN; + + /* The connector-type is stored within the compatible string. */ + err = fwnode_property_read_string(fwnode, "compatible", &type_name); + if (err) + return V4L2_CONN_UNKNOWN; + + return v4l2_fwnode_string_to_connector_type(type_name); +} + +int v4l2_fwnode_connector_parse(struct fwnode_handle *fwnode, + struct v4l2_fwnode_connector *connector) +{ + struct fwnode_handle *connector_node; + enum v4l2_connector_type connector_type; + const char *label; + int err; + + if (!fwnode) + return -EINVAL; + + memset(connector, 0, sizeof(*connector)); + + INIT_LIST_HEAD(&connector->links); + + connector_node = fwnode_graph_get_port_parent(fwnode); + connector_type = v4l2_fwnode_get_connector_type(connector_node); + if (connector_type == V4L2_CONN_UNKNOWN) { + fwnode_handle_put(connector_node); + connector_node = fwnode_graph_get_remote_port_parent(fwnode); + connector_type = v4l2_fwnode_get_connector_type(connector_node); + } + + if (connector_type == V4L2_CONN_UNKNOWN) { + pr_err("Unknown connector type\n"); + err = -ENOTCONN; + goto out; + } + + connector->type = connector_type; + connector->name = fwnode_get_name(connector_node); + err = fwnode_property_read_string(connector_node, "label", &label); + connector->label = err ? NULL : kstrdup_const(label, GFP_KERNEL); + + /* Parse the connector specific properties. */ + switch (connector->type) { + case V4L2_CONN_COMPOSITE: + case V4L2_CONN_SVIDEO: + v4l2_fwnode_connector_parse_analog(connector_node, connector); + break; + /* Avoid compiler warnings */ + case V4L2_CONN_UNKNOWN: + break; + } + +out: + fwnode_handle_put(connector_node); + + return err; +} +EXPORT_SYMBOL_GPL(v4l2_fwnode_connector_parse); + +int v4l2_fwnode_connector_add_link(struct fwnode_handle *fwnode, + struct v4l2_fwnode_connector *connector) +{ + struct fwnode_handle *connector_ep; + struct v4l2_connector_link *link; + int err; + + if (!fwnode || !connector || connector->type == V4L2_CONN_UNKNOWN) + return -EINVAL; + + connector_ep = fwnode_graph_get_remote_endpoint(fwnode); + if (!connector_ep) + return -ENOTCONN; + + link = kzalloc(sizeof(*link), GFP_KERNEL); + if (!link) { + err = -ENOMEM; + goto err; + } + + err = v4l2_fwnode_parse_link(connector_ep, &link->fwnode_link); + if (err) + goto err; + + fwnode_handle_put(connector_ep); + + list_add(&link->head, &connector->links); + connector->nr_of_links++; + + return 0; + +err: + kfree(link); + fwnode_handle_put(connector_ep); + + return err; +} +EXPORT_SYMBOL_GPL(v4l2_fwnode_connector_add_link); + +int v4l2_fwnode_device_parse(struct device *dev, + struct v4l2_fwnode_device_properties *props) +{ + struct fwnode_handle *fwnode = dev_fwnode(dev); + u32 val; + int ret; + + memset(props, 0, sizeof(*props)); + + props->orientation = V4L2_FWNODE_PROPERTY_UNSET; + ret = fwnode_property_read_u32(fwnode, "orientation", &val); + if (!ret) { + switch (val) { + case V4L2_FWNODE_ORIENTATION_FRONT: + case V4L2_FWNODE_ORIENTATION_BACK: + case V4L2_FWNODE_ORIENTATION_EXTERNAL: + break; + default: + dev_warn(dev, "Unsupported device orientation: %u\n", val); + return -EINVAL; + } + + props->orientation = val; + dev_dbg(dev, "device orientation: %u\n", val); + } + + props->rotation = V4L2_FWNODE_PROPERTY_UNSET; + ret = fwnode_property_read_u32(fwnode, "rotation", &val); + if (!ret) { + if (val >= 360) { + dev_warn(dev, "Unsupported device rotation: %u\n", val); + return -EINVAL; + } + + props->rotation = val; + dev_dbg(dev, "device rotation: %u\n", val); + } + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_fwnode_device_parse); + +/* + * v4l2_fwnode_reference_parse - parse references for async sub-devices + * @dev: the device node the properties of which are parsed for references + * @notifier: the async notifier where the async subdevs will be added + * @prop: the name of the property + * + * Return: 0 on success + * -ENOENT if no entries were found + * -ENOMEM if memory allocation failed + * -EINVAL if property parsing failed + */ +static int v4l2_fwnode_reference_parse(struct device *dev, + struct v4l2_async_notifier *notifier, + const char *prop) +{ + struct fwnode_reference_args args; + unsigned int index; + int ret; + + for (index = 0; + !(ret = fwnode_property_get_reference_args(dev_fwnode(dev), prop, + NULL, 0, index, &args)); + index++) { + struct v4l2_async_connection *asd; + + asd = v4l2_async_nf_add_fwnode(notifier, args.fwnode, + struct v4l2_async_connection); + fwnode_handle_put(args.fwnode); + if (IS_ERR(asd)) { + /* not an error if asd already exists */ + if (PTR_ERR(asd) == -EEXIST) + continue; + + return PTR_ERR(asd); + } + } + + /* -ENOENT here means successful parsing */ + if (ret != -ENOENT) + return ret; + + /* Return -ENOENT if no references were found */ + return index ? 0 : -ENOENT; +} + +/* + * v4l2_fwnode_reference_get_int_prop - parse a reference with integer + * arguments + * @fwnode: fwnode to read @prop from + * @notifier: notifier for @dev + * @prop: the name of the property + * @index: the index of the reference to get + * @props: the array of integer property names + * @nprops: the number of integer property names in @nprops + * + * First find an fwnode referred to by the reference at @index in @prop. + * + * Then under that fwnode, @nprops times, for each property in @props, + * iteratively follow child nodes starting from fwnode such that they have the + * property in @props array at the index of the child node distance from the + * root node and the value of that property matching with the integer argument + * of the reference, at the same index. + * + * The child fwnode reached at the end of the iteration is then returned to the + * caller. + * + * The core reason for this is that you cannot refer to just any node in ACPI. + * So to refer to an endpoint (easy in DT) you need to refer to a device, then + * provide a list of (property name, property value) tuples where each tuple + * uniquely identifies a child node. The first tuple identifies a child directly + * underneath the device fwnode, the next tuple identifies a child node + * underneath the fwnode identified by the previous tuple, etc. until you + * reached the fwnode you need. + * + * THIS EXAMPLE EXISTS MERELY TO DOCUMENT THIS FUNCTION. DO NOT USE IT AS A + * REFERENCE IN HOW ACPI TABLES SHOULD BE WRITTEN!! See documentation under + * Documentation/firmware-guide/acpi/dsd/ instead and especially graph.txt, + * data-node-references.txt and leds.txt . + * + * Scope (\_SB.PCI0.I2C2) + * { + * Device (CAM0) + * { + * Name (_DSD, Package () { + * ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"), + * Package () { + * Package () { + * "compatible", + * Package () { "nokia,smia" } + * }, + * }, + * ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"), + * Package () { + * Package () { "port0", "PRT0" }, + * } + * }) + * Name (PRT0, Package() { + * ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"), + * Package () { + * Package () { "port", 0 }, + * }, + * ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"), + * Package () { + * Package () { "endpoint0", "EP00" }, + * } + * }) + * Name (EP00, Package() { + * ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"), + * Package () { + * Package () { "endpoint", 0 }, + * Package () { + * "remote-endpoint", + * Package() { + * \_SB.PCI0.ISP, 4, 0 + * } + * }, + * } + * }) + * } + * } + * + * Scope (\_SB.PCI0) + * { + * Device (ISP) + * { + * Name (_DSD, Package () { + * ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"), + * Package () { + * Package () { "port4", "PRT4" }, + * } + * }) + * + * Name (PRT4, Package() { + * ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"), + * Package () { + * Package () { "port", 4 }, + * }, + * ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"), + * Package () { + * Package () { "endpoint0", "EP40" }, + * } + * }) + * + * Name (EP40, Package() { + * ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"), + * Package () { + * Package () { "endpoint", 0 }, + * Package () { + * "remote-endpoint", + * Package () { + * \_SB.PCI0.I2C2.CAM0, + * 0, 0 + * } + * }, + * } + * }) + * } + * } + * + * From the EP40 node under ISP device, you could parse the graph remote + * endpoint using v4l2_fwnode_reference_get_int_prop with these arguments: + * + * @fwnode: fwnode referring to EP40 under ISP. + * @prop: "remote-endpoint" + * @index: 0 + * @props: "port", "endpoint" + * @nprops: 2 + * + * And you'd get back fwnode referring to EP00 under CAM0. + * + * The same works the other way around: if you use EP00 under CAM0 as the + * fwnode, you'll get fwnode referring to EP40 under ISP. + * + * The same example in DT syntax would look like this: + * + * cam: cam0 { + * compatible = "nokia,smia"; + * + * port { + * port = <0>; + * endpoint { + * endpoint = <0>; + * remote-endpoint = <&isp 4 0>; + * }; + * }; + * }; + * + * isp: isp { + * ports { + * port@4 { + * port = <4>; + * endpoint { + * endpoint = <0>; + * remote-endpoint = <&cam 0 0>; + * }; + * }; + * }; + * }; + * + * Return: 0 on success + * -ENOENT if no entries (or the property itself) were found + * -EINVAL if property parsing otherwise failed + * -ENOMEM if memory allocation failed + */ +static struct fwnode_handle * +v4l2_fwnode_reference_get_int_prop(struct fwnode_handle *fwnode, + const char *prop, + unsigned int index, + const char * const *props, + unsigned int nprops) +{ + struct fwnode_reference_args fwnode_args; + u64 *args = fwnode_args.args; + struct fwnode_handle *child; + int ret; + + /* + * Obtain remote fwnode as well as the integer arguments. + * + * Note that right now both -ENODATA and -ENOENT may signal + * out-of-bounds access. Return -ENOENT in that case. + */ + ret = fwnode_property_get_reference_args(fwnode, prop, NULL, nprops, + index, &fwnode_args); + if (ret) + return ERR_PTR(ret == -ENODATA ? -ENOENT : ret); + + /* + * Find a node in the tree under the referred fwnode corresponding to + * the integer arguments. + */ + fwnode = fwnode_args.fwnode; + while (nprops--) { + u32 val; + + /* Loop over all child nodes under fwnode. */ + fwnode_for_each_child_node(fwnode, child) { + if (fwnode_property_read_u32(child, *props, &val)) + continue; + + /* Found property, see if its value matches. */ + if (val == *args) + break; + } + + fwnode_handle_put(fwnode); + + /* No property found; return an error here. */ + if (!child) { + fwnode = ERR_PTR(-ENOENT); + break; + } + + props++; + args++; + fwnode = child; + } + + return fwnode; +} + +struct v4l2_fwnode_int_props { + const char *name; + const char * const *props; + unsigned int nprops; +}; + +/* + * v4l2_fwnode_reference_parse_int_props - parse references for async + * sub-devices + * @dev: struct device pointer + * @notifier: notifier for @dev + * @prop: the name of the property + * @props: the array of integer property names + * @nprops: the number of integer properties + * + * Use v4l2_fwnode_reference_get_int_prop to find fwnodes through reference in + * property @prop with integer arguments with child nodes matching in properties + * @props. Then, set up V4L2 async sub-devices for those fwnodes in the notifier + * accordingly. + * + * While it is technically possible to use this function on DT, it is only + * meaningful on ACPI. On Device tree you can refer to any node in the tree but + * on ACPI the references are limited to devices. + * + * Return: 0 on success + * -ENOENT if no entries (or the property itself) were found + * -EINVAL if property parsing otherwisefailed + * -ENOMEM if memory allocation failed + */ +static int +v4l2_fwnode_reference_parse_int_props(struct device *dev, + struct v4l2_async_notifier *notifier, + const struct v4l2_fwnode_int_props *p) +{ + struct fwnode_handle *fwnode; + unsigned int index; + int ret; + const char *prop = p->name; + const char * const *props = p->props; + unsigned int nprops = p->nprops; + + index = 0; + do { + fwnode = v4l2_fwnode_reference_get_int_prop(dev_fwnode(dev), + prop, index, + props, nprops); + if (IS_ERR(fwnode)) { + /* + * Note that right now both -ENODATA and -ENOENT may + * signal out-of-bounds access. Return the error in + * cases other than that. + */ + if (PTR_ERR(fwnode) != -ENOENT && + PTR_ERR(fwnode) != -ENODATA) + return PTR_ERR(fwnode); + break; + } + fwnode_handle_put(fwnode); + index++; + } while (1); + + for (index = 0; + !IS_ERR((fwnode = v4l2_fwnode_reference_get_int_prop(dev_fwnode(dev), + prop, index, + props, + nprops))); + index++) { + struct v4l2_async_connection *asd; + + asd = v4l2_async_nf_add_fwnode(notifier, fwnode, + struct v4l2_async_connection); + fwnode_handle_put(fwnode); + if (IS_ERR(asd)) { + ret = PTR_ERR(asd); + /* not an error if asd already exists */ + if (ret == -EEXIST) + continue; + + return PTR_ERR(asd); + } + } + + return !fwnode || PTR_ERR(fwnode) == -ENOENT ? 0 : PTR_ERR(fwnode); +} + +/** + * v4l2_async_nf_parse_fwnode_sensor - parse common references on + * sensors for async sub-devices + * @dev: the device node the properties of which are parsed for references + * @notifier: the async notifier where the async subdevs will be added + * + * Parse common sensor properties for remote devices related to the + * sensor and set up async sub-devices for them. + * + * Any notifier populated using this function must be released with a call to + * v4l2_async_nf_release() after it has been unregistered and the async + * sub-devices are no longer in use, even in the case the function returned an + * error. + * + * Return: 0 on success + * -ENOMEM if memory allocation failed + * -EINVAL if property parsing failed + */ +static int +v4l2_async_nf_parse_fwnode_sensor(struct device *dev, + struct v4l2_async_notifier *notifier) +{ + static const char * const led_props[] = { "led" }; + static const struct v4l2_fwnode_int_props props[] = { + { "flash-leds", led_props, ARRAY_SIZE(led_props) }, + { "mipi-img-flash-leds", }, + { "lens-focus", }, + { "mipi-img-lens-focus", }, + }; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(props); i++) { + int ret; + + if (props[i].props && is_acpi_node(dev_fwnode(dev))) + ret = v4l2_fwnode_reference_parse_int_props(dev, + notifier, + &props[i]); + else + ret = v4l2_fwnode_reference_parse(dev, notifier, + props[i].name); + if (ret && ret != -ENOENT) { + dev_warn(dev, "parsing property \"%s\" failed (%d)\n", + props[i].name, ret); + return ret; + } + } + + return 0; +} + +int v4l2_async_register_subdev_sensor(struct v4l2_subdev *sd) +{ + struct v4l2_async_notifier *notifier; + int ret; + + if (WARN_ON(!sd->dev)) + return -ENODEV; + + notifier = kzalloc(sizeof(*notifier), GFP_KERNEL); + if (!notifier) + return -ENOMEM; + + v4l2_async_subdev_nf_init(notifier, sd); + + ret = v4l2_subdev_get_privacy_led(sd); + if (ret < 0) + goto out_cleanup; + + ret = v4l2_async_nf_parse_fwnode_sensor(sd->dev, notifier); + if (ret < 0) + goto out_cleanup; + + ret = v4l2_async_nf_register(notifier); + if (ret < 0) + goto out_cleanup; + + ret = v4l2_async_register_subdev(sd); + if (ret < 0) + goto out_unregister; + + sd->subdev_notifier = notifier; + + return 0; + +out_unregister: + v4l2_async_nf_unregister(notifier); + +out_cleanup: + v4l2_subdev_put_privacy_led(sd); + v4l2_async_nf_cleanup(notifier); + kfree(notifier); + + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_async_register_subdev_sensor); + +MODULE_DESCRIPTION("V4L2 fwnode binding parsing library"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Sakari Ailus "); +MODULE_AUTHOR("Sylwester Nawrocki "); +MODULE_AUTHOR("Guennadi Liakhovetski "); diff --git a/6.17.0/drivers/media/v4l2-core/v4l2-h264.c b/6.17.0/drivers/media/v4l2-core/v4l2-h264.c new file mode 100644 index 0000000..c00197d --- /dev/null +++ b/6.17.0/drivers/media/v4l2-core/v4l2-h264.c @@ -0,0 +1,453 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * V4L2 H264 helpers. + * + * Copyright (C) 2019 Collabora, Ltd. + * + * Author: Boris Brezillon + */ + +#include +#include + +#include + +/* + * Size of the tempory buffer allocated when printing reference lists. The + * output will be truncated if the size is too small. + */ +static const int tmp_str_size = 1024; + +/** + * v4l2_h264_init_reflist_builder() - Initialize a P/B0/B1 reference list + * builder + * + * @b: the builder context to initialize + * @dec_params: decode parameters control + * @sps: SPS control + * @dpb: DPB to use when creating the reference list + */ +void +v4l2_h264_init_reflist_builder(struct v4l2_h264_reflist_builder *b, + const struct v4l2_ctrl_h264_decode_params *dec_params, + const struct v4l2_ctrl_h264_sps *sps, + const struct v4l2_h264_dpb_entry dpb[V4L2_H264_NUM_DPB_ENTRIES]) +{ + int cur_frame_num, max_frame_num; + unsigned int i; + + max_frame_num = 1 << (sps->log2_max_frame_num_minus4 + 4); + cur_frame_num = dec_params->frame_num; + + memset(b, 0, sizeof(*b)); + if (!(dec_params->flags & V4L2_H264_DECODE_PARAM_FLAG_FIELD_PIC)) { + b->cur_pic_order_count = min(dec_params->bottom_field_order_cnt, + dec_params->top_field_order_cnt); + b->cur_pic_fields = V4L2_H264_FRAME_REF; + } else if (dec_params->flags & V4L2_H264_DECODE_PARAM_FLAG_BOTTOM_FIELD) { + b->cur_pic_order_count = dec_params->bottom_field_order_cnt; + b->cur_pic_fields = V4L2_H264_BOTTOM_FIELD_REF; + } else { + b->cur_pic_order_count = dec_params->top_field_order_cnt; + b->cur_pic_fields = V4L2_H264_TOP_FIELD_REF; + } + + for (i = 0; i < V4L2_H264_NUM_DPB_ENTRIES; i++) { + if (!(dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE)) + continue; + + if (dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM) + b->refs[i].longterm = true; + + /* + * Handle frame_num wraparound as described in section + * '8.2.4.1 Decoding process for picture numbers' of the spec. + * For long term references, frame_num is set to + * long_term_frame_idx which requires no wrapping. + */ + if (!b->refs[i].longterm && dpb[i].frame_num > cur_frame_num) + b->refs[i].frame_num = (int)dpb[i].frame_num - + max_frame_num; + else + b->refs[i].frame_num = dpb[i].frame_num; + + b->refs[i].top_field_order_cnt = dpb[i].top_field_order_cnt; + b->refs[i].bottom_field_order_cnt = dpb[i].bottom_field_order_cnt; + + if (b->cur_pic_fields == V4L2_H264_FRAME_REF) { + u8 fields = V4L2_H264_FRAME_REF; + + b->unordered_reflist[b->num_valid].index = i; + b->unordered_reflist[b->num_valid].fields = fields; + b->num_valid++; + continue; + } + + if (dpb[i].fields & V4L2_H264_TOP_FIELD_REF) { + u8 fields = V4L2_H264_TOP_FIELD_REF; + + b->unordered_reflist[b->num_valid].index = i; + b->unordered_reflist[b->num_valid].fields = fields; + b->num_valid++; + } + + if (dpb[i].fields & V4L2_H264_BOTTOM_FIELD_REF) { + u8 fields = V4L2_H264_BOTTOM_FIELD_REF; + + b->unordered_reflist[b->num_valid].index = i; + b->unordered_reflist[b->num_valid].fields = fields; + b->num_valid++; + } + } + + for (i = b->num_valid; i < ARRAY_SIZE(b->unordered_reflist); i++) + b->unordered_reflist[i].index = i; +} +EXPORT_SYMBOL_GPL(v4l2_h264_init_reflist_builder); + +static s32 v4l2_h264_get_poc(const struct v4l2_h264_reflist_builder *b, + const struct v4l2_h264_reference *ref) +{ + switch (ref->fields) { + case V4L2_H264_FRAME_REF: + return min(b->refs[ref->index].top_field_order_cnt, + b->refs[ref->index].bottom_field_order_cnt); + case V4L2_H264_TOP_FIELD_REF: + return b->refs[ref->index].top_field_order_cnt; + case V4L2_H264_BOTTOM_FIELD_REF: + return b->refs[ref->index].bottom_field_order_cnt; + } + + /* not reached */ + return 0; +} + +static int v4l2_h264_p_ref_list_cmp(const void *ptra, const void *ptrb, + const void *data) +{ + const struct v4l2_h264_reflist_builder *builder = data; + u8 idxa, idxb; + + idxa = ((struct v4l2_h264_reference *)ptra)->index; + idxb = ((struct v4l2_h264_reference *)ptrb)->index; + + if (WARN_ON(idxa >= V4L2_H264_NUM_DPB_ENTRIES || + idxb >= V4L2_H264_NUM_DPB_ENTRIES)) + return 1; + + if (builder->refs[idxa].longterm != builder->refs[idxb].longterm) { + /* Short term pics first. */ + if (!builder->refs[idxa].longterm) + return -1; + else + return 1; + } + + /* + * For frames, short term pics are in descending pic num order and long + * term ones in ascending order. For fields, the same direction is used + * but with frame_num (wrapped). For frames, the value of pic_num and + * frame_num are the same (see formula (8-28) and (8-29)). For this + * reason we can use frame_num only and share this function between + * frames and fields reflist. + */ + if (!builder->refs[idxa].longterm) + return builder->refs[idxb].frame_num < + builder->refs[idxa].frame_num ? + -1 : 1; + + return builder->refs[idxa].frame_num < builder->refs[idxb].frame_num ? + -1 : 1; +} + +static int v4l2_h264_b0_ref_list_cmp(const void *ptra, const void *ptrb, + const void *data) +{ + const struct v4l2_h264_reflist_builder *builder = data; + s32 poca, pocb; + u8 idxa, idxb; + + idxa = ((struct v4l2_h264_reference *)ptra)->index; + idxb = ((struct v4l2_h264_reference *)ptrb)->index; + + if (WARN_ON(idxa >= V4L2_H264_NUM_DPB_ENTRIES || + idxb >= V4L2_H264_NUM_DPB_ENTRIES)) + return 1; + + if (builder->refs[idxa].longterm != builder->refs[idxb].longterm) { + /* Short term pics first. */ + if (!builder->refs[idxa].longterm) + return -1; + else + return 1; + } + + /* Long term pics in ascending frame num order. */ + if (builder->refs[idxa].longterm) + return builder->refs[idxa].frame_num < + builder->refs[idxb].frame_num ? + -1 : 1; + + poca = v4l2_h264_get_poc(builder, ptra); + pocb = v4l2_h264_get_poc(builder, ptrb); + + /* + * Short term pics with POC < cur POC first in POC descending order + * followed by short term pics with POC > cur POC in POC ascending + * order. + */ + if ((poca < builder->cur_pic_order_count) != + (pocb < builder->cur_pic_order_count)) + return poca < pocb ? -1 : 1; + else if (poca < builder->cur_pic_order_count) + return pocb < poca ? -1 : 1; + + return poca < pocb ? -1 : 1; +} + +static int v4l2_h264_b1_ref_list_cmp(const void *ptra, const void *ptrb, + const void *data) +{ + const struct v4l2_h264_reflist_builder *builder = data; + s32 poca, pocb; + u8 idxa, idxb; + + idxa = ((struct v4l2_h264_reference *)ptra)->index; + idxb = ((struct v4l2_h264_reference *)ptrb)->index; + + if (WARN_ON(idxa >= V4L2_H264_NUM_DPB_ENTRIES || + idxb >= V4L2_H264_NUM_DPB_ENTRIES)) + return 1; + + if (builder->refs[idxa].longterm != builder->refs[idxb].longterm) { + /* Short term pics first. */ + if (!builder->refs[idxa].longterm) + return -1; + else + return 1; + } + + /* Long term pics in ascending frame num order. */ + if (builder->refs[idxa].longterm) + return builder->refs[idxa].frame_num < + builder->refs[idxb].frame_num ? + -1 : 1; + + poca = v4l2_h264_get_poc(builder, ptra); + pocb = v4l2_h264_get_poc(builder, ptrb); + + /* + * Short term pics with POC > cur POC first in POC ascending order + * followed by short term pics with POC < cur POC in POC descending + * order. + */ + if ((poca < builder->cur_pic_order_count) != + (pocb < builder->cur_pic_order_count)) + return pocb < poca ? -1 : 1; + else if (poca < builder->cur_pic_order_count) + return pocb < poca ? -1 : 1; + + return poca < pocb ? -1 : 1; +} + +/* + * The references need to be reordered so that references are alternating + * between top and bottom field references starting with the current picture + * parity. This has to be done for short term and long term references + * separately. + */ +static void reorder_field_reflist(const struct v4l2_h264_reflist_builder *b, + struct v4l2_h264_reference *reflist) +{ + struct v4l2_h264_reference tmplist[V4L2_H264_REF_LIST_LEN]; + u8 lt, i = 0, j = 0, k = 0; + + memcpy(tmplist, reflist, sizeof(tmplist[0]) * b->num_valid); + + for (lt = 0; lt <= 1; lt++) { + do { + for (; i < b->num_valid && b->refs[tmplist[i].index].longterm == lt; i++) { + if (tmplist[i].fields == b->cur_pic_fields) { + reflist[k++] = tmplist[i++]; + break; + } + } + + for (; j < b->num_valid && b->refs[tmplist[j].index].longterm == lt; j++) { + if (tmplist[j].fields != b->cur_pic_fields) { + reflist[k++] = tmplist[j++]; + break; + } + } + } while ((i < b->num_valid && b->refs[tmplist[i].index].longterm == lt) || + (j < b->num_valid && b->refs[tmplist[j].index].longterm == lt)); + } +} + +static char ref_type_to_char(u8 ref_type) +{ + switch (ref_type) { + case V4L2_H264_FRAME_REF: + return 'f'; + case V4L2_H264_TOP_FIELD_REF: + return 't'; + case V4L2_H264_BOTTOM_FIELD_REF: + return 'b'; + } + + return '?'; +} + +static const char *format_ref_list_p(const struct v4l2_h264_reflist_builder *builder, + struct v4l2_h264_reference *reflist, + char **out_str) +{ + int n = 0, i; + + *out_str = kmalloc(tmp_str_size, GFP_KERNEL); + if (!(*out_str)) + return NULL; + + n += snprintf(*out_str + n, tmp_str_size - n, "|"); + + for (i = 0; i < builder->num_valid; i++) { + /* this is pic_num for frame and frame_num (wrapped) for field, + * but for frame pic_num is equal to frame_num (wrapped). + */ + int frame_num = builder->refs[reflist[i].index].frame_num; + bool longterm = builder->refs[reflist[i].index].longterm; + + n += scnprintf(*out_str + n, tmp_str_size - n, "%i%c%c|", + frame_num, longterm ? 'l' : 's', + ref_type_to_char(reflist[i].fields)); + } + + return *out_str; +} + +static void print_ref_list_p(const struct v4l2_h264_reflist_builder *builder, + struct v4l2_h264_reference *reflist) +{ + char *buf = NULL; + + pr_debug("ref_pic_list_p (cur_poc %u%c) %s\n", + builder->cur_pic_order_count, + ref_type_to_char(builder->cur_pic_fields), + format_ref_list_p(builder, reflist, &buf)); + + kfree(buf); +} + +static const char *format_ref_list_b(const struct v4l2_h264_reflist_builder *builder, + struct v4l2_h264_reference *reflist, + char **out_str) +{ + int n = 0, i; + + *out_str = kmalloc(tmp_str_size, GFP_KERNEL); + if (!(*out_str)) + return NULL; + + n += snprintf(*out_str + n, tmp_str_size - n, "|"); + + for (i = 0; i < builder->num_valid; i++) { + int frame_num = builder->refs[reflist[i].index].frame_num; + u32 poc = v4l2_h264_get_poc(builder, reflist + i); + bool longterm = builder->refs[reflist[i].index].longterm; + + n += scnprintf(*out_str + n, tmp_str_size - n, "%i%c%c|", + longterm ? frame_num : poc, + longterm ? 'l' : 's', + ref_type_to_char(reflist[i].fields)); + } + + return *out_str; +} + +static void print_ref_list_b(const struct v4l2_h264_reflist_builder *builder, + struct v4l2_h264_reference *reflist, u8 list_num) +{ + char *buf = NULL; + + pr_debug("ref_pic_list_b%u (cur_poc %u%c) %s", + list_num, builder->cur_pic_order_count, + ref_type_to_char(builder->cur_pic_fields), + format_ref_list_b(builder, reflist, &buf)); + + kfree(buf); +} + +/** + * v4l2_h264_build_p_ref_list() - Build the P reference list + * + * @builder: reference list builder context + * @reflist: 32 sized array used to store the P reference list. Each entry + * is a v4l2_h264_reference structure + * + * This functions builds the P reference lists. This procedure is describe in + * section '8.2.4 Decoding process for reference picture lists construction' + * of the H264 spec. This function can be used by H264 decoder drivers that + * need to pass a P reference list to the hardware. + */ +void +v4l2_h264_build_p_ref_list(const struct v4l2_h264_reflist_builder *builder, + struct v4l2_h264_reference *reflist) +{ + memcpy(reflist, builder->unordered_reflist, + sizeof(builder->unordered_reflist[0]) * builder->num_valid); + sort_r(reflist, builder->num_valid, sizeof(*reflist), + v4l2_h264_p_ref_list_cmp, NULL, builder); + + if (builder->cur_pic_fields != V4L2_H264_FRAME_REF) + reorder_field_reflist(builder, reflist); + + print_ref_list_p(builder, reflist); +} +EXPORT_SYMBOL_GPL(v4l2_h264_build_p_ref_list); + +/** + * v4l2_h264_build_b_ref_lists() - Build the B0/B1 reference lists + * + * @builder: reference list builder context + * @b0_reflist: 32 sized array used to store the B0 reference list. Each entry + * is a v4l2_h264_reference structure + * @b1_reflist: 32 sized array used to store the B1 reference list. Each entry + * is a v4l2_h264_reference structure + * + * This functions builds the B0/B1 reference lists. This procedure is described + * in section '8.2.4 Decoding process for reference picture lists construction' + * of the H264 spec. This function can be used by H264 decoder drivers that + * need to pass B0/B1 reference lists to the hardware. + */ +void +v4l2_h264_build_b_ref_lists(const struct v4l2_h264_reflist_builder *builder, + struct v4l2_h264_reference *b0_reflist, + struct v4l2_h264_reference *b1_reflist) +{ + memcpy(b0_reflist, builder->unordered_reflist, + sizeof(builder->unordered_reflist[0]) * builder->num_valid); + sort_r(b0_reflist, builder->num_valid, sizeof(*b0_reflist), + v4l2_h264_b0_ref_list_cmp, NULL, builder); + + memcpy(b1_reflist, builder->unordered_reflist, + sizeof(builder->unordered_reflist[0]) * builder->num_valid); + sort_r(b1_reflist, builder->num_valid, sizeof(*b1_reflist), + v4l2_h264_b1_ref_list_cmp, NULL, builder); + + if (builder->cur_pic_fields != V4L2_H264_FRAME_REF) { + reorder_field_reflist(builder, b0_reflist); + reorder_field_reflist(builder, b1_reflist); + } + + if (builder->num_valid > 1 && + !memcmp(b1_reflist, b0_reflist, builder->num_valid)) + swap(b1_reflist[0], b1_reflist[1]); + + print_ref_list_b(builder, b0_reflist, 0); + print_ref_list_b(builder, b1_reflist, 1); +} +EXPORT_SYMBOL_GPL(v4l2_h264_build_b_ref_lists); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("V4L2 H264 Helpers"); +MODULE_AUTHOR("Boris Brezillon "); diff --git a/6.17.0/drivers/media/v4l2-core/v4l2-i2c.c b/6.17.0/drivers/media/v4l2-core/v4l2-i2c.c new file mode 100644 index 0000000..ffc64e1 --- /dev/null +++ b/6.17.0/drivers/media/v4l2-core/v4l2-i2c.c @@ -0,0 +1,185 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * v4l2-i2c - I2C helpers for Video4Linux2 + */ + +#include +#include +#include +#include +#include + +void v4l2_i2c_subdev_unregister(struct v4l2_subdev *sd) +{ + struct i2c_client *client = v4l2_get_subdevdata(sd); + + /* + * We need to unregister the i2c client + * explicitly. We cannot rely on + * i2c_del_adapter to always unregister + * clients for us, since if the i2c bus is a + * platform bus, then it is never deleted. + * + * Device tree or ACPI based devices must not + * be unregistered as they have not been + * registered by us, and would not be + * re-created by just probing the V4L2 driver. + */ + if (client && !dev_fwnode(&client->dev)) + i2c_unregister_device(client); +} + +void v4l2_i2c_subdev_set_name(struct v4l2_subdev *sd, + struct i2c_client *client, + const char *devname, const char *postfix) +{ + if (!devname) + devname = client->dev.driver->name; + if (!postfix) + postfix = ""; + + snprintf(sd->name, sizeof(sd->name), "%s%s %d-%04x", devname, postfix, + i2c_adapter_id(client->adapter), client->addr); +} +EXPORT_SYMBOL_GPL(v4l2_i2c_subdev_set_name); + +void v4l2_i2c_subdev_init(struct v4l2_subdev *sd, struct i2c_client *client, + const struct v4l2_subdev_ops *ops) +{ + v4l2_subdev_init(sd, ops); + sd->flags |= V4L2_SUBDEV_FL_IS_I2C; + /* the owner is the same as the i2c_client's driver owner */ + sd->owner = client->dev.driver->owner; + sd->dev = &client->dev; + /* i2c_client and v4l2_subdev point to one another */ + v4l2_set_subdevdata(sd, client); + i2c_set_clientdata(client, sd); + v4l2_i2c_subdev_set_name(sd, client, NULL, NULL); +} +EXPORT_SYMBOL_GPL(v4l2_i2c_subdev_init); + +/* Load an i2c sub-device. */ +struct v4l2_subdev +*v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev, + struct i2c_adapter *adapter, + struct i2c_board_info *info, + const unsigned short *probe_addrs) +{ + struct v4l2_subdev *sd = NULL; + struct i2c_client *client; + + if (!v4l2_dev) + return NULL; + + request_module(I2C_MODULE_PREFIX "%s", info->type); + + /* Create the i2c client */ + if (info->addr == 0 && probe_addrs) + client = i2c_new_scanned_device(adapter, info, probe_addrs, + NULL); + else + client = i2c_new_client_device(adapter, info); + + /* + * Note: by loading the module first we are certain that c->driver + * will be set if the driver was found. If the module was not loaded + * first, then the i2c core tries to delay-load the module for us, + * and then c->driver is still NULL until the module is finally + * loaded. This delay-load mechanism doesn't work if other drivers + * want to use the i2c device, so explicitly loading the module + * is the best alternative. + */ + if (!i2c_client_has_driver(client)) + goto error; + + /* Lock the module so we can safely get the v4l2_subdev pointer */ + if (!try_module_get(client->dev.driver->owner)) + goto error; + sd = i2c_get_clientdata(client); + + /* + * Register with the v4l2_device which increases the module's + * use count as well. + */ + if (__v4l2_device_register_subdev(v4l2_dev, sd, sd->owner)) + sd = NULL; + /* Decrease the module use count to match the first try_module_get. */ + module_put(client->dev.driver->owner); + +error: + /* + * If we have a client but no subdev, then something went wrong and + * we must unregister the client. + */ + if (!IS_ERR(client) && !sd) + i2c_unregister_device(client); + return sd; +} +EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev_board); + +struct v4l2_subdev *v4l2_i2c_new_subdev(struct v4l2_device *v4l2_dev, + struct i2c_adapter *adapter, + const char *client_type, + u8 addr, + const unsigned short *probe_addrs) +{ + struct i2c_board_info info; + + /* + * Setup the i2c board info with the device type and + * the device address. + */ + memset(&info, 0, sizeof(info)); + strscpy(info.type, client_type, sizeof(info.type)); + info.addr = addr; + + return v4l2_i2c_new_subdev_board(v4l2_dev, adapter, &info, + probe_addrs); +} +EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev); + +/* Return i2c client address of v4l2_subdev. */ +unsigned short v4l2_i2c_subdev_addr(struct v4l2_subdev *sd) +{ + struct i2c_client *client = v4l2_get_subdevdata(sd); + + return client ? client->addr : I2C_CLIENT_END; +} +EXPORT_SYMBOL_GPL(v4l2_i2c_subdev_addr); + +/* + * Return a list of I2C tuner addresses to probe. Use only if the tuner + * addresses are unknown. + */ +const unsigned short *v4l2_i2c_tuner_addrs(enum v4l2_i2c_tuner_type type) +{ + static const unsigned short radio_addrs[] = { +#if IS_ENABLED(CONFIG_MEDIA_TUNER_TEA5761) + 0x10, +#endif + 0x60, + I2C_CLIENT_END + }; + static const unsigned short demod_addrs[] = { + 0x42, 0x43, 0x4a, 0x4b, + I2C_CLIENT_END + }; + static const unsigned short tv_addrs[] = { + 0x42, 0x43, 0x4a, 0x4b, /* tda8290 */ + 0x60, 0x61, 0x62, 0x63, 0x64, + I2C_CLIENT_END + }; + + switch (type) { + case ADDRS_RADIO: + return radio_addrs; + case ADDRS_DEMOD: + return demod_addrs; + case ADDRS_TV: + return tv_addrs; + case ADDRS_TV_WITH_DEMOD: + return tv_addrs + 4; + } + return NULL; +} +EXPORT_SYMBOL_GPL(v4l2_i2c_tuner_addrs); diff --git a/6.17.0/drivers/media/v4l2-core/v4l2-ioctl.c b/6.17.0/drivers/media/v4l2-core/v4l2-ioctl.c new file mode 100644 index 0000000..46da373 --- /dev/null +++ b/6.17.0/drivers/media/v4l2-core/v4l2-ioctl.c @@ -0,0 +1,3538 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Video capture interface for Linux version 2 + * + * A generic framework to process V4L2 ioctl commands. + * + * Authors: Alan Cox, (version 1) + * Mauro Carvalho Chehab (version 2) + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include /* for media_set_bus_info() */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define is_valid_ioctl(vfd, cmd) test_bit(_IOC_NR(cmd), (vfd)->valid_ioctls) + +struct std_descr { + v4l2_std_id std; + const char *descr; +}; + +static const struct std_descr standards[] = { + { V4L2_STD_NTSC, "NTSC" }, + { V4L2_STD_NTSC_M, "NTSC-M" }, + { V4L2_STD_NTSC_M_JP, "NTSC-M-JP" }, + { V4L2_STD_NTSC_M_KR, "NTSC-M-KR" }, + { V4L2_STD_NTSC_443, "NTSC-443" }, + { V4L2_STD_PAL, "PAL" }, + { V4L2_STD_PAL_BG, "PAL-BG" }, + { V4L2_STD_PAL_B, "PAL-B" }, + { V4L2_STD_PAL_B1, "PAL-B1" }, + { V4L2_STD_PAL_G, "PAL-G" }, + { V4L2_STD_PAL_H, "PAL-H" }, + { V4L2_STD_PAL_I, "PAL-I" }, + { V4L2_STD_PAL_DK, "PAL-DK" }, + { V4L2_STD_PAL_D, "PAL-D" }, + { V4L2_STD_PAL_D1, "PAL-D1" }, + { V4L2_STD_PAL_K, "PAL-K" }, + { V4L2_STD_PAL_M, "PAL-M" }, + { V4L2_STD_PAL_N, "PAL-N" }, + { V4L2_STD_PAL_Nc, "PAL-Nc" }, + { V4L2_STD_PAL_60, "PAL-60" }, + { V4L2_STD_SECAM, "SECAM" }, + { V4L2_STD_SECAM_B, "SECAM-B" }, + { V4L2_STD_SECAM_G, "SECAM-G" }, + { V4L2_STD_SECAM_H, "SECAM-H" }, + { V4L2_STD_SECAM_DK, "SECAM-DK" }, + { V4L2_STD_SECAM_D, "SECAM-D" }, + { V4L2_STD_SECAM_K, "SECAM-K" }, + { V4L2_STD_SECAM_K1, "SECAM-K1" }, + { V4L2_STD_SECAM_L, "SECAM-L" }, + { V4L2_STD_SECAM_LC, "SECAM-Lc" }, + { 0, "Unknown" } +}; + +/* video4linux standard ID conversion to standard name + */ +const char *v4l2_norm_to_name(v4l2_std_id id) +{ + u32 myid = id; + int i; + + /* HACK: ppc32 architecture doesn't have __ucmpdi2 function to handle + 64 bit comparisons. So, on that architecture, with some gcc + variants, compilation fails. Currently, the max value is 30bit wide. + */ + BUG_ON(myid != id); + + for (i = 0; standards[i].std; i++) + if (myid == standards[i].std) + break; + return standards[i].descr; +} +EXPORT_SYMBOL(v4l2_norm_to_name); + +/* Returns frame period for the given standard */ +void v4l2_video_std_frame_period(int id, struct v4l2_fract *frameperiod) +{ + if (id & V4L2_STD_525_60) { + frameperiod->numerator = 1001; + frameperiod->denominator = 30000; + } else { + frameperiod->numerator = 1; + frameperiod->denominator = 25; + } +} +EXPORT_SYMBOL(v4l2_video_std_frame_period); + +/* Fill in the fields of a v4l2_standard structure according to the + 'id' and 'transmission' parameters. Returns negative on error. */ +int v4l2_video_std_construct(struct v4l2_standard *vs, + int id, const char *name) +{ + vs->id = id; + v4l2_video_std_frame_period(id, &vs->frameperiod); + vs->framelines = (id & V4L2_STD_525_60) ? 525 : 625; + strscpy(vs->name, name, sizeof(vs->name)); + return 0; +} +EXPORT_SYMBOL(v4l2_video_std_construct); + +/* Fill in the fields of a v4l2_standard structure according to the + * 'id' and 'vs->index' parameters. Returns negative on error. */ +int v4l_video_std_enumstd(struct v4l2_standard *vs, v4l2_std_id id) +{ + v4l2_std_id curr_id = 0; + unsigned int index = vs->index, i, j = 0; + const char *descr = ""; + + /* Return -ENODATA if the id for the current input + or output is 0, meaning that it doesn't support this API. */ + if (id == 0) + return -ENODATA; + + /* Return norm array in a canonical way */ + for (i = 0; i <= index && id; i++) { + /* last std value in the standards array is 0, so this + while always ends there since (id & 0) == 0. */ + while ((id & standards[j].std) != standards[j].std) + j++; + curr_id = standards[j].std; + descr = standards[j].descr; + j++; + if (curr_id == 0) + break; + if (curr_id != V4L2_STD_PAL && + curr_id != V4L2_STD_SECAM && + curr_id != V4L2_STD_NTSC) + id &= ~curr_id; + } + if (i <= index) + return -EINVAL; + + v4l2_video_std_construct(vs, curr_id, descr); + return 0; +} + +/* ----------------------------------------------------------------- */ +/* some arrays for pretty-printing debug messages of enum types */ + +const char *v4l2_field_names[] = { + [V4L2_FIELD_ANY] = "any", + [V4L2_FIELD_NONE] = "none", + [V4L2_FIELD_TOP] = "top", + [V4L2_FIELD_BOTTOM] = "bottom", + [V4L2_FIELD_INTERLACED] = "interlaced", + [V4L2_FIELD_SEQ_TB] = "seq-tb", + [V4L2_FIELD_SEQ_BT] = "seq-bt", + [V4L2_FIELD_ALTERNATE] = "alternate", + [V4L2_FIELD_INTERLACED_TB] = "interlaced-tb", + [V4L2_FIELD_INTERLACED_BT] = "interlaced-bt", +}; +EXPORT_SYMBOL(v4l2_field_names); + +const char *v4l2_type_names[] = { + [0] = "0", + [V4L2_BUF_TYPE_VIDEO_CAPTURE] = "vid-cap", + [V4L2_BUF_TYPE_VIDEO_OVERLAY] = "vid-overlay", + [V4L2_BUF_TYPE_VIDEO_OUTPUT] = "vid-out", + [V4L2_BUF_TYPE_VBI_CAPTURE] = "vbi-cap", + [V4L2_BUF_TYPE_VBI_OUTPUT] = "vbi-out", + [V4L2_BUF_TYPE_SLICED_VBI_CAPTURE] = "sliced-vbi-cap", + [V4L2_BUF_TYPE_SLICED_VBI_OUTPUT] = "sliced-vbi-out", + [V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY] = "vid-out-overlay", + [V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE] = "vid-cap-mplane", + [V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE] = "vid-out-mplane", + [V4L2_BUF_TYPE_SDR_CAPTURE] = "sdr-cap", + [V4L2_BUF_TYPE_SDR_OUTPUT] = "sdr-out", + [V4L2_BUF_TYPE_META_CAPTURE] = "meta-cap", + [V4L2_BUF_TYPE_META_OUTPUT] = "meta-out", +}; +EXPORT_SYMBOL(v4l2_type_names); + +static const char *v4l2_memory_names[] = { + [V4L2_MEMORY_MMAP] = "mmap", + [V4L2_MEMORY_USERPTR] = "userptr", + [V4L2_MEMORY_OVERLAY] = "overlay", + [V4L2_MEMORY_DMABUF] = "dmabuf", +}; + +#define prt_names(a, arr) (((unsigned)(a)) < ARRAY_SIZE(arr) ? arr[a] : "unknown") + +/* ------------------------------------------------------------------ */ +/* debug help functions */ + +static void v4l_print_querycap(const void *arg, bool write_only) +{ + const struct v4l2_capability *p = arg; + + pr_cont("driver=%.*s, card=%.*s, bus=%.*s, version=0x%08x, capabilities=0x%08x, device_caps=0x%08x\n", + (int)sizeof(p->driver), p->driver, + (int)sizeof(p->card), p->card, + (int)sizeof(p->bus_info), p->bus_info, + p->version, p->capabilities, p->device_caps); +} + +static void v4l_print_enuminput(const void *arg, bool write_only) +{ + const struct v4l2_input *p = arg; + + pr_cont("index=%u, name=%.*s, type=%u, audioset=0x%x, tuner=%u, std=0x%08Lx, status=0x%x, capabilities=0x%x\n", + p->index, (int)sizeof(p->name), p->name, p->type, p->audioset, + p->tuner, (unsigned long long)p->std, p->status, + p->capabilities); +} + +static void v4l_print_enumoutput(const void *arg, bool write_only) +{ + const struct v4l2_output *p = arg; + + pr_cont("index=%u, name=%.*s, type=%u, audioset=0x%x, modulator=%u, std=0x%08Lx, capabilities=0x%x\n", + p->index, (int)sizeof(p->name), p->name, p->type, p->audioset, + p->modulator, (unsigned long long)p->std, p->capabilities); +} + +static void v4l_print_audio(const void *arg, bool write_only) +{ + const struct v4l2_audio *p = arg; + + if (write_only) + pr_cont("index=%u, mode=0x%x\n", p->index, p->mode); + else + pr_cont("index=%u, name=%.*s, capability=0x%x, mode=0x%x\n", + p->index, (int)sizeof(p->name), p->name, + p->capability, p->mode); +} + +static void v4l_print_audioout(const void *arg, bool write_only) +{ + const struct v4l2_audioout *p = arg; + + if (write_only) + pr_cont("index=%u\n", p->index); + else + pr_cont("index=%u, name=%.*s, capability=0x%x, mode=0x%x\n", + p->index, (int)sizeof(p->name), p->name, + p->capability, p->mode); +} + +static void v4l_print_fmtdesc(const void *arg, bool write_only) +{ + const struct v4l2_fmtdesc *p = arg; + + pr_cont("index=%u, type=%s, flags=0x%x, pixelformat=%p4cc, mbus_code=0x%04x, description='%.*s'\n", + p->index, prt_names(p->type, v4l2_type_names), + p->flags, &p->pixelformat, p->mbus_code, + (int)sizeof(p->description), p->description); +} + +static void v4l_print_format(const void *arg, bool write_only) +{ + const struct v4l2_format *p = arg; + const struct v4l2_pix_format *pix; + const struct v4l2_pix_format_mplane *mp; + const struct v4l2_vbi_format *vbi; + const struct v4l2_sliced_vbi_format *sliced; + const struct v4l2_window *win; + const struct v4l2_meta_format *meta; + u32 pixelformat; + u32 planes; + unsigned i; + + pr_cont("type=%s", prt_names(p->type, v4l2_type_names)); + switch (p->type) { + case V4L2_BUF_TYPE_VIDEO_CAPTURE: + case V4L2_BUF_TYPE_VIDEO_OUTPUT: + pix = &p->fmt.pix; + pr_cont(", width=%u, height=%u, pixelformat=%p4cc, field=%s, bytesperline=%u, sizeimage=%u, colorspace=%d, flags=0x%x, ycbcr_enc=%u, quantization=%u, xfer_func=%u\n", + pix->width, pix->height, &pix->pixelformat, + prt_names(pix->field, v4l2_field_names), + pix->bytesperline, pix->sizeimage, + pix->colorspace, pix->flags, pix->ycbcr_enc, + pix->quantization, pix->xfer_func); + break; + case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: + case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: + mp = &p->fmt.pix_mp; + pixelformat = mp->pixelformat; + pr_cont(", width=%u, height=%u, format=%p4cc, field=%s, colorspace=%d, num_planes=%u, flags=0x%x, ycbcr_enc=%u, quantization=%u, xfer_func=%u\n", + mp->width, mp->height, &pixelformat, + prt_names(mp->field, v4l2_field_names), + mp->colorspace, mp->num_planes, mp->flags, + mp->ycbcr_enc, mp->quantization, mp->xfer_func); + planes = min_t(u32, mp->num_planes, VIDEO_MAX_PLANES); + for (i = 0; i < planes; i++) + printk(KERN_DEBUG "plane %u: bytesperline=%u sizeimage=%u\n", i, + mp->plane_fmt[i].bytesperline, + mp->plane_fmt[i].sizeimage); + break; + case V4L2_BUF_TYPE_VIDEO_OVERLAY: + case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: + win = &p->fmt.win; + pr_cont(", (%d,%d)/%ux%u, field=%s, chromakey=0x%08x, global_alpha=0x%02x\n", + win->w.left, win->w.top, win->w.width, win->w.height, + prt_names(win->field, v4l2_field_names), + win->chromakey, win->global_alpha); + break; + case V4L2_BUF_TYPE_VBI_CAPTURE: + case V4L2_BUF_TYPE_VBI_OUTPUT: + vbi = &p->fmt.vbi; + pr_cont(", sampling_rate=%u, offset=%u, samples_per_line=%u, sample_format=%p4cc, start=%u,%u, count=%u,%u\n", + vbi->sampling_rate, vbi->offset, + vbi->samples_per_line, &vbi->sample_format, + vbi->start[0], vbi->start[1], + vbi->count[0], vbi->count[1]); + break; + case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: + case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: + sliced = &p->fmt.sliced; + pr_cont(", service_set=0x%08x, io_size=%d\n", + sliced->service_set, sliced->io_size); + for (i = 0; i < 24; i++) + printk(KERN_DEBUG "line[%02u]=0x%04x, 0x%04x\n", i, + sliced->service_lines[0][i], + sliced->service_lines[1][i]); + break; + case V4L2_BUF_TYPE_SDR_CAPTURE: + case V4L2_BUF_TYPE_SDR_OUTPUT: + pixelformat = p->fmt.sdr.pixelformat; + pr_cont(", pixelformat=%p4cc\n", &pixelformat); + break; + case V4L2_BUF_TYPE_META_CAPTURE: + case V4L2_BUF_TYPE_META_OUTPUT: + meta = &p->fmt.meta; + pixelformat = meta->dataformat; + pr_cont(", dataformat=%p4cc, buffersize=%u, width=%u, height=%u, bytesperline=%u\n", + &pixelformat, meta->buffersize, meta->width, + meta->height, meta->bytesperline); + break; + } +} + +static void v4l_print_framebuffer(const void *arg, bool write_only) +{ + const struct v4l2_framebuffer *p = arg; + + pr_cont("capability=0x%x, flags=0x%x, base=0x%p, width=%u, height=%u, pixelformat=%p4cc, bytesperline=%u, sizeimage=%u, colorspace=%d\n", + p->capability, p->flags, p->base, p->fmt.width, p->fmt.height, + &p->fmt.pixelformat, p->fmt.bytesperline, p->fmt.sizeimage, + p->fmt.colorspace); +} + +static void v4l_print_buftype(const void *arg, bool write_only) +{ + pr_cont("type=%s\n", prt_names(*(u32 *)arg, v4l2_type_names)); +} + +static void v4l_print_modulator(const void *arg, bool write_only) +{ + const struct v4l2_modulator *p = arg; + + if (write_only) + pr_cont("index=%u, txsubchans=0x%x\n", p->index, p->txsubchans); + else + pr_cont("index=%u, name=%.*s, capability=0x%x, rangelow=%u, rangehigh=%u, txsubchans=0x%x\n", + p->index, (int)sizeof(p->name), p->name, p->capability, + p->rangelow, p->rangehigh, p->txsubchans); +} + +static void v4l_print_tuner(const void *arg, bool write_only) +{ + const struct v4l2_tuner *p = arg; + + if (write_only) + pr_cont("index=%u, audmode=%u\n", p->index, p->audmode); + else + pr_cont("index=%u, name=%.*s, type=%u, capability=0x%x, rangelow=%u, rangehigh=%u, signal=%u, afc=%d, rxsubchans=0x%x, audmode=%u\n", + p->index, (int)sizeof(p->name), p->name, p->type, + p->capability, p->rangelow, + p->rangehigh, p->signal, p->afc, + p->rxsubchans, p->audmode); +} + +static void v4l_print_frequency(const void *arg, bool write_only) +{ + const struct v4l2_frequency *p = arg; + + pr_cont("tuner=%u, type=%u, frequency=%u\n", + p->tuner, p->type, p->frequency); +} + +static void v4l_print_standard(const void *arg, bool write_only) +{ + const struct v4l2_standard *p = arg; + + pr_cont("index=%u, id=0x%Lx, name=%.*s, fps=%u/%u, framelines=%u\n", + p->index, + (unsigned long long)p->id, (int)sizeof(p->name), p->name, + p->frameperiod.numerator, + p->frameperiod.denominator, + p->framelines); +} + +static void v4l_print_std(const void *arg, bool write_only) +{ + pr_cont("std=0x%08Lx\n", *(const long long unsigned *)arg); +} + +static void v4l_print_hw_freq_seek(const void *arg, bool write_only) +{ + const struct v4l2_hw_freq_seek *p = arg; + + pr_cont("tuner=%u, type=%u, seek_upward=%u, wrap_around=%u, spacing=%u, rangelow=%u, rangehigh=%u\n", + p->tuner, p->type, p->seek_upward, p->wrap_around, p->spacing, + p->rangelow, p->rangehigh); +} + +static void v4l_print_requestbuffers(const void *arg, bool write_only) +{ + const struct v4l2_requestbuffers *p = arg; + + pr_cont("count=%d, type=%s, memory=%s\n", + p->count, + prt_names(p->type, v4l2_type_names), + prt_names(p->memory, v4l2_memory_names)); +} + +static void v4l_print_buffer(const void *arg, bool write_only) +{ + const struct v4l2_buffer *p = arg; + const struct v4l2_timecode *tc = &p->timecode; + const struct v4l2_plane *plane; + int i; + + pr_cont("%02d:%02d:%02d.%06ld index=%d, type=%s, request_fd=%d, flags=0x%08x, field=%s, sequence=%d, memory=%s", + (int)p->timestamp.tv_sec / 3600, + ((int)p->timestamp.tv_sec / 60) % 60, + ((int)p->timestamp.tv_sec % 60), + (long)p->timestamp.tv_usec, + p->index, + prt_names(p->type, v4l2_type_names), p->request_fd, + p->flags, prt_names(p->field, v4l2_field_names), + p->sequence, prt_names(p->memory, v4l2_memory_names)); + + if (V4L2_TYPE_IS_MULTIPLANAR(p->type) && p->m.planes) { + pr_cont("\n"); + for (i = 0; i < p->length; ++i) { + plane = &p->m.planes[i]; + printk(KERN_DEBUG + "plane %d: bytesused=%d, data_offset=0x%08x, offset/userptr=0x%lx, length=%d\n", + i, plane->bytesused, plane->data_offset, + plane->m.userptr, plane->length); + } + } else { + pr_cont(", bytesused=%d, offset/userptr=0x%lx, length=%d\n", + p->bytesused, p->m.userptr, p->length); + } + + printk(KERN_DEBUG "timecode=%02d:%02d:%02d type=%d, flags=0x%08x, frames=%d, userbits=0x%08x\n", + tc->hours, tc->minutes, tc->seconds, + tc->type, tc->flags, tc->frames, *(__u32 *)tc->userbits); +} + +static void v4l_print_exportbuffer(const void *arg, bool write_only) +{ + const struct v4l2_exportbuffer *p = arg; + + pr_cont("fd=%d, type=%s, index=%u, plane=%u, flags=0x%08x\n", + p->fd, prt_names(p->type, v4l2_type_names), + p->index, p->plane, p->flags); +} + +static void v4l_print_create_buffers(const void *arg, bool write_only) +{ + const struct v4l2_create_buffers *p = arg; + + pr_cont("index=%d, count=%d, memory=%s, capabilities=0x%08x, max num buffers=%u, ", + p->index, p->count, prt_names(p->memory, v4l2_memory_names), + p->capabilities, p->max_num_buffers); + v4l_print_format(&p->format, write_only); +} + +static void v4l_print_remove_buffers(const void *arg, bool write_only) +{ + const struct v4l2_remove_buffers *p = arg; + + pr_cont("type=%s, index=%u, count=%u\n", + prt_names(p->type, v4l2_type_names), p->index, p->count); +} + +static void v4l_print_streamparm(const void *arg, bool write_only) +{ + const struct v4l2_streamparm *p = arg; + + pr_cont("type=%s", prt_names(p->type, v4l2_type_names)); + + if (p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE || + p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { + const struct v4l2_captureparm *c = &p->parm.capture; + + pr_cont(", capability=0x%x, capturemode=0x%x, timeperframe=%d/%d, extendedmode=%d, readbuffers=%d\n", + c->capability, c->capturemode, + c->timeperframe.numerator, c->timeperframe.denominator, + c->extendedmode, c->readbuffers); + } else if (p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT || + p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { + const struct v4l2_outputparm *c = &p->parm.output; + + pr_cont(", capability=0x%x, outputmode=0x%x, timeperframe=%d/%d, extendedmode=%d, writebuffers=%d\n", + c->capability, c->outputmode, + c->timeperframe.numerator, c->timeperframe.denominator, + c->extendedmode, c->writebuffers); + } else { + pr_cont("\n"); + } +} + +static void v4l_print_queryctrl(const void *arg, bool write_only) +{ + const struct v4l2_queryctrl *p = arg; + + pr_cont("id=0x%x, type=%d, name=%.*s, min/max=%d/%d, step=%d, default=%d, flags=0x%08x\n", + p->id, p->type, (int)sizeof(p->name), p->name, + p->minimum, p->maximum, + p->step, p->default_value, p->flags); +} + +static void v4l_print_query_ext_ctrl(const void *arg, bool write_only) +{ + const struct v4l2_query_ext_ctrl *p = arg; + + pr_cont("id=0x%x, type=%d, name=%.*s, min/max=%lld/%lld, step=%lld, default=%lld, flags=0x%08x, elem_size=%u, elems=%u, nr_of_dims=%u, dims=%u,%u,%u,%u\n", + p->id, p->type, (int)sizeof(p->name), p->name, + p->minimum, p->maximum, + p->step, p->default_value, p->flags, + p->elem_size, p->elems, p->nr_of_dims, + p->dims[0], p->dims[1], p->dims[2], p->dims[3]); +} + +static void v4l_print_querymenu(const void *arg, bool write_only) +{ + const struct v4l2_querymenu *p = arg; + + pr_cont("id=0x%x, index=%d\n", p->id, p->index); +} + +static void v4l_print_control(const void *arg, bool write_only) +{ + const struct v4l2_control *p = arg; + const char *name = v4l2_ctrl_get_name(p->id); + + if (name) + pr_cont("name=%s, ", name); + pr_cont("id=0x%x, value=%d\n", p->id, p->value); +} + +static void v4l_print_ext_controls(const void *arg, bool write_only) +{ + const struct v4l2_ext_controls *p = arg; + int i; + + pr_cont("which=0x%x, count=%d, error_idx=%d, request_fd=%d", + p->which, p->count, p->error_idx, p->request_fd); + for (i = 0; i < p->count; i++) { + unsigned int id = p->controls[i].id; + const char *name = v4l2_ctrl_get_name(id); + + if (name) + pr_cont(", name=%s", name); + if (!p->controls[i].size) + pr_cont(", id/val=0x%x/0x%x", id, p->controls[i].value); + else + pr_cont(", id/size=0x%x/%u", id, p->controls[i].size); + } + pr_cont("\n"); +} + +static void v4l_print_cropcap(const void *arg, bool write_only) +{ + const struct v4l2_cropcap *p = arg; + + pr_cont("type=%s, bounds (%d,%d)/%ux%u, defrect (%d,%d)/%ux%u, pixelaspect %d/%d\n", + prt_names(p->type, v4l2_type_names), + p->bounds.left, p->bounds.top, + p->bounds.width, p->bounds.height, + p->defrect.left, p->defrect.top, + p->defrect.width, p->defrect.height, + p->pixelaspect.numerator, p->pixelaspect.denominator); +} + +static void v4l_print_crop(const void *arg, bool write_only) +{ + const struct v4l2_crop *p = arg; + + pr_cont("type=%s, crop=(%d,%d)/%ux%u\n", + prt_names(p->type, v4l2_type_names), + p->c.left, p->c.top, + p->c.width, p->c.height); +} + +static void v4l_print_selection(const void *arg, bool write_only) +{ + const struct v4l2_selection *p = arg; + + pr_cont("type=%s, target=%d, flags=0x%x, rect=(%d,%d)/%ux%u\n", + prt_names(p->type, v4l2_type_names), + p->target, p->flags, + p->r.left, p->r.top, p->r.width, p->r.height); +} + +static void v4l_print_jpegcompression(const void *arg, bool write_only) +{ + const struct v4l2_jpegcompression *p = arg; + + pr_cont("quality=%d, APPn=%d, APP_len=%d, COM_len=%d, jpeg_markers=0x%x\n", + p->quality, p->APPn, p->APP_len, + p->COM_len, p->jpeg_markers); +} + +static void v4l_print_enc_idx(const void *arg, bool write_only) +{ + const struct v4l2_enc_idx *p = arg; + + pr_cont("entries=%d, entries_cap=%d\n", + p->entries, p->entries_cap); +} + +static void v4l_print_encoder_cmd(const void *arg, bool write_only) +{ + const struct v4l2_encoder_cmd *p = arg; + + pr_cont("cmd=%d, flags=0x%x\n", + p->cmd, p->flags); +} + +static void v4l_print_decoder_cmd(const void *arg, bool write_only) +{ + const struct v4l2_decoder_cmd *p = arg; + + pr_cont("cmd=%d, flags=0x%x\n", p->cmd, p->flags); + + if (p->cmd == V4L2_DEC_CMD_START) + pr_info("speed=%d, format=%u\n", + p->start.speed, p->start.format); + else if (p->cmd == V4L2_DEC_CMD_STOP) + pr_info("pts=%llu\n", p->stop.pts); +} + +static void v4l_print_dbg_chip_info(const void *arg, bool write_only) +{ + const struct v4l2_dbg_chip_info *p = arg; + + pr_cont("type=%u, ", p->match.type); + if (p->match.type == V4L2_CHIP_MATCH_I2C_DRIVER) + pr_cont("name=%.*s, ", + (int)sizeof(p->match.name), p->match.name); + else + pr_cont("addr=%u, ", p->match.addr); + pr_cont("name=%.*s\n", (int)sizeof(p->name), p->name); +} + +static void v4l_print_dbg_register(const void *arg, bool write_only) +{ + const struct v4l2_dbg_register *p = arg; + + pr_cont("type=%u, ", p->match.type); + if (p->match.type == V4L2_CHIP_MATCH_I2C_DRIVER) + pr_cont("name=%.*s, ", + (int)sizeof(p->match.name), p->match.name); + else + pr_cont("addr=%u, ", p->match.addr); + pr_cont("reg=0x%llx, val=0x%llx\n", + p->reg, p->val); +} + +static void v4l_print_dv_timings(const void *arg, bool write_only) +{ + const struct v4l2_dv_timings *p = arg; + + switch (p->type) { + case V4L2_DV_BT_656_1120: + pr_cont("type=bt-656/1120, interlaced=%u, pixelclock=%llu, width=%u, height=%u, polarities=0x%x, hfrontporch=%u, hsync=%u, hbackporch=%u, vfrontporch=%u, vsync=%u, vbackporch=%u, il_vfrontporch=%u, il_vsync=%u, il_vbackporch=%u, standards=0x%x, flags=0x%x\n", + p->bt.interlaced, p->bt.pixelclock, + p->bt.width, p->bt.height, + p->bt.polarities, p->bt.hfrontporch, + p->bt.hsync, p->bt.hbackporch, + p->bt.vfrontporch, p->bt.vsync, + p->bt.vbackporch, p->bt.il_vfrontporch, + p->bt.il_vsync, p->bt.il_vbackporch, + p->bt.standards, p->bt.flags); + break; + default: + pr_cont("type=%d\n", p->type); + break; + } +} + +static void v4l_print_enum_dv_timings(const void *arg, bool write_only) +{ + const struct v4l2_enum_dv_timings *p = arg; + + pr_cont("index=%u, ", p->index); + v4l_print_dv_timings(&p->timings, write_only); +} + +static void v4l_print_dv_timings_cap(const void *arg, bool write_only) +{ + const struct v4l2_dv_timings_cap *p = arg; + + switch (p->type) { + case V4L2_DV_BT_656_1120: + pr_cont("type=bt-656/1120, width=%u-%u, height=%u-%u, pixelclock=%llu-%llu, standards=0x%x, capabilities=0x%x\n", + p->bt.min_width, p->bt.max_width, + p->bt.min_height, p->bt.max_height, + p->bt.min_pixelclock, p->bt.max_pixelclock, + p->bt.standards, p->bt.capabilities); + break; + default: + pr_cont("type=%u\n", p->type); + break; + } +} + +static void v4l_print_frmsizeenum(const void *arg, bool write_only) +{ + const struct v4l2_frmsizeenum *p = arg; + + pr_cont("index=%u, pixelformat=%p4cc, type=%u", + p->index, &p->pixel_format, p->type); + switch (p->type) { + case V4L2_FRMSIZE_TYPE_DISCRETE: + pr_cont(", wxh=%ux%u\n", + p->discrete.width, p->discrete.height); + break; + case V4L2_FRMSIZE_TYPE_STEPWISE: + pr_cont(", min=%ux%u, max=%ux%u, step=%ux%u\n", + p->stepwise.min_width, + p->stepwise.min_height, + p->stepwise.max_width, + p->stepwise.max_height, + p->stepwise.step_width, + p->stepwise.step_height); + break; + case V4L2_FRMSIZE_TYPE_CONTINUOUS: + default: + pr_cont("\n"); + break; + } +} + +static void v4l_print_frmivalenum(const void *arg, bool write_only) +{ + const struct v4l2_frmivalenum *p = arg; + + pr_cont("index=%u, pixelformat=%p4cc, wxh=%ux%u, type=%u", + p->index, &p->pixel_format, p->width, p->height, p->type); + switch (p->type) { + case V4L2_FRMIVAL_TYPE_DISCRETE: + pr_cont(", fps=%d/%d\n", + p->discrete.numerator, + p->discrete.denominator); + break; + case V4L2_FRMIVAL_TYPE_STEPWISE: + pr_cont(", min=%d/%d, max=%d/%d, step=%d/%d\n", + p->stepwise.min.numerator, + p->stepwise.min.denominator, + p->stepwise.max.numerator, + p->stepwise.max.denominator, + p->stepwise.step.numerator, + p->stepwise.step.denominator); + break; + case V4L2_FRMIVAL_TYPE_CONTINUOUS: + default: + pr_cont("\n"); + break; + } +} + +static void v4l_print_event(const void *arg, bool write_only) +{ + const struct v4l2_event *p = arg; + const struct v4l2_event_ctrl *c; + + pr_cont("type=0x%x, pending=%u, sequence=%u, id=%u, timestamp=%llu.%9.9llu\n", + p->type, p->pending, p->sequence, p->id, + p->timestamp.tv_sec, p->timestamp.tv_nsec); + switch (p->type) { + case V4L2_EVENT_VSYNC: + printk(KERN_DEBUG "field=%s\n", + prt_names(p->u.vsync.field, v4l2_field_names)); + break; + case V4L2_EVENT_CTRL: + c = &p->u.ctrl; + printk(KERN_DEBUG "changes=0x%x, type=%u, ", + c->changes, c->type); + if (c->type == V4L2_CTRL_TYPE_INTEGER64) + pr_cont("value64=%lld, ", c->value64); + else + pr_cont("value=%d, ", c->value); + pr_cont("flags=0x%x, minimum=%d, maximum=%d, step=%d, default_value=%d\n", + c->flags, c->minimum, c->maximum, + c->step, c->default_value); + break; + case V4L2_EVENT_FRAME_SYNC: + pr_cont("frame_sequence=%u\n", + p->u.frame_sync.frame_sequence); + break; + } +} + +static void v4l_print_event_subscription(const void *arg, bool write_only) +{ + const struct v4l2_event_subscription *p = arg; + + pr_cont("type=0x%x, id=0x%x, flags=0x%x\n", + p->type, p->id, p->flags); +} + +static void v4l_print_sliced_vbi_cap(const void *arg, bool write_only) +{ + const struct v4l2_sliced_vbi_cap *p = arg; + int i; + + pr_cont("type=%s, service_set=0x%08x\n", + prt_names(p->type, v4l2_type_names), p->service_set); + for (i = 0; i < 24; i++) + printk(KERN_DEBUG "line[%02u]=0x%04x, 0x%04x\n", i, + p->service_lines[0][i], + p->service_lines[1][i]); +} + +static void v4l_print_freq_band(const void *arg, bool write_only) +{ + const struct v4l2_frequency_band *p = arg; + + pr_cont("tuner=%u, type=%u, index=%u, capability=0x%x, rangelow=%u, rangehigh=%u, modulation=0x%x\n", + p->tuner, p->type, p->index, + p->capability, p->rangelow, + p->rangehigh, p->modulation); +} + +static void v4l_print_edid(const void *arg, bool write_only) +{ + const struct v4l2_edid *p = arg; + + pr_cont("pad=%u, start_block=%u, blocks=%u\n", + p->pad, p->start_block, p->blocks); +} + +static void v4l_print_u32(const void *arg, bool write_only) +{ + pr_cont("value=%u\n", *(const u32 *)arg); +} + +static void v4l_print_newline(const void *arg, bool write_only) +{ + pr_cont("\n"); +} + +static void v4l_print_default(const void *arg, bool write_only) +{ + pr_cont("driver-specific ioctl\n"); +} + +static bool check_ext_ctrls(struct v4l2_ext_controls *c, unsigned long ioctl) +{ + __u32 i; + + /* zero the reserved fields */ + c->reserved[0] = 0; + for (i = 0; i < c->count; i++) + c->controls[i].reserved2[0] = 0; + + switch (c->which) { + case V4L2_CID_PRIVATE_BASE: + /* + * V4L2_CID_PRIVATE_BASE cannot be used as control class + * when using extended controls. + * Only when passed in through VIDIOC_G_CTRL and VIDIOC_S_CTRL + * is it allowed for backwards compatibility. + */ + if (ioctl == VIDIOC_G_CTRL || ioctl == VIDIOC_S_CTRL) + return false; + break; + case V4L2_CTRL_WHICH_DEF_VAL: + case V4L2_CTRL_WHICH_MIN_VAL: + case V4L2_CTRL_WHICH_MAX_VAL: + /* Default, minimum or maximum value cannot be changed */ + if (ioctl == VIDIOC_S_EXT_CTRLS || + ioctl == VIDIOC_TRY_EXT_CTRLS) { + c->error_idx = c->count; + return false; + } + return true; + case V4L2_CTRL_WHICH_CUR_VAL: + return true; + case V4L2_CTRL_WHICH_REQUEST_VAL: + c->error_idx = c->count; + return false; + } + + /* Check that all controls are from the same control class. */ + for (i = 0; i < c->count; i++) { + if (V4L2_CTRL_ID2WHICH(c->controls[i].id) != c->which) { + c->error_idx = ioctl == VIDIOC_TRY_EXT_CTRLS ? i : + c->count; + return false; + } + } + return true; +} + +static int check_fmt(struct file *file, enum v4l2_buf_type type) +{ + const u32 vid_caps = V4L2_CAP_VIDEO_CAPTURE | + V4L2_CAP_VIDEO_CAPTURE_MPLANE | + V4L2_CAP_VIDEO_OUTPUT | + V4L2_CAP_VIDEO_OUTPUT_MPLANE | + V4L2_CAP_VIDEO_M2M | V4L2_CAP_VIDEO_M2M_MPLANE; + const u32 meta_caps = V4L2_CAP_META_CAPTURE | + V4L2_CAP_META_OUTPUT; + struct video_device *vfd = video_devdata(file); + const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops; + bool is_vid = vfd->vfl_type == VFL_TYPE_VIDEO && + (vfd->device_caps & vid_caps); + bool is_vbi = vfd->vfl_type == VFL_TYPE_VBI; + bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR; + bool is_tch = vfd->vfl_type == VFL_TYPE_TOUCH; + bool is_meta = vfd->vfl_type == VFL_TYPE_VIDEO && + (vfd->device_caps & meta_caps); + bool is_rx = vfd->vfl_dir != VFL_DIR_TX; + bool is_tx = vfd->vfl_dir != VFL_DIR_RX; + + if (ops == NULL) + return -EINVAL; + + switch (type) { + case V4L2_BUF_TYPE_VIDEO_CAPTURE: + if ((is_vid || is_tch) && is_rx && + (ops->vidioc_g_fmt_vid_cap || ops->vidioc_g_fmt_vid_cap_mplane)) + return 0; + break; + case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: + if ((is_vid || is_tch) && is_rx && ops->vidioc_g_fmt_vid_cap_mplane) + return 0; + break; + case V4L2_BUF_TYPE_VIDEO_OVERLAY: + if (is_vid && is_rx && ops->vidioc_g_fmt_vid_overlay) + return 0; + break; + case V4L2_BUF_TYPE_VIDEO_OUTPUT: + if (is_vid && is_tx && + (ops->vidioc_g_fmt_vid_out || ops->vidioc_g_fmt_vid_out_mplane)) + return 0; + break; + case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: + if (is_vid && is_tx && ops->vidioc_g_fmt_vid_out_mplane) + return 0; + break; + case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: + if (is_vid && is_tx && ops->vidioc_g_fmt_vid_out_overlay) + return 0; + break; + case V4L2_BUF_TYPE_VBI_CAPTURE: + if (is_vbi && is_rx && ops->vidioc_g_fmt_vbi_cap) + return 0; + break; + case V4L2_BUF_TYPE_VBI_OUTPUT: + if (is_vbi && is_tx && ops->vidioc_g_fmt_vbi_out) + return 0; + break; + case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: + if (is_vbi && is_rx && ops->vidioc_g_fmt_sliced_vbi_cap) + return 0; + break; + case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: + if (is_vbi && is_tx && ops->vidioc_g_fmt_sliced_vbi_out) + return 0; + break; + case V4L2_BUF_TYPE_SDR_CAPTURE: + if (is_sdr && is_rx && ops->vidioc_g_fmt_sdr_cap) + return 0; + break; + case V4L2_BUF_TYPE_SDR_OUTPUT: + if (is_sdr && is_tx && ops->vidioc_g_fmt_sdr_out) + return 0; + break; + case V4L2_BUF_TYPE_META_CAPTURE: + if (is_meta && is_rx && ops->vidioc_g_fmt_meta_cap) + return 0; + break; + case V4L2_BUF_TYPE_META_OUTPUT: + if (is_meta && is_tx && ops->vidioc_g_fmt_meta_out) + return 0; + break; + default: + break; + } + return -EINVAL; +} + +static void v4l_sanitize_colorspace(u32 pixelformat, u32 *colorspace, + u32 *encoding, u32 *quantization, + u32 *xfer_func) +{ + bool is_hsv = pixelformat == V4L2_PIX_FMT_HSV24 || + pixelformat == V4L2_PIX_FMT_HSV32; + + if (!v4l2_is_colorspace_valid(*colorspace)) { + *colorspace = V4L2_COLORSPACE_DEFAULT; + *encoding = V4L2_YCBCR_ENC_DEFAULT; + *quantization = V4L2_QUANTIZATION_DEFAULT; + *xfer_func = V4L2_XFER_FUNC_DEFAULT; + } + + if ((!is_hsv && !v4l2_is_ycbcr_enc_valid(*encoding)) || + (is_hsv && !v4l2_is_hsv_enc_valid(*encoding))) + *encoding = V4L2_YCBCR_ENC_DEFAULT; + + if (!v4l2_is_quant_valid(*quantization)) + *quantization = V4L2_QUANTIZATION_DEFAULT; + + if (!v4l2_is_xfer_func_valid(*xfer_func)) + *xfer_func = V4L2_XFER_FUNC_DEFAULT; +} + +static void v4l_sanitize_format(struct v4l2_format *fmt) +{ + unsigned int offset; + + /* Make sure num_planes is not bogus */ + if (fmt->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE || + fmt->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + fmt->fmt.pix_mp.num_planes = min_t(u32, fmt->fmt.pix_mp.num_planes, + VIDEO_MAX_PLANES); + + /* + * The v4l2_pix_format structure has been extended with fields that were + * not previously required to be set to zero by applications. The priv + * field, when set to a magic value, indicates that the extended fields + * are valid. Otherwise they will contain undefined values. To simplify + * the API towards drivers zero the extended fields and set the priv + * field to the magic value when the extended pixel format structure + * isn't used by applications. + */ + if (fmt->type == V4L2_BUF_TYPE_VIDEO_CAPTURE || + fmt->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) { + if (fmt->fmt.pix.priv != V4L2_PIX_FMT_PRIV_MAGIC) { + fmt->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; + + offset = offsetof(struct v4l2_pix_format, priv) + + sizeof(fmt->fmt.pix.priv); + memset(((void *)&fmt->fmt.pix) + offset, 0, + sizeof(fmt->fmt.pix) - offset); + } + } + + /* Replace invalid colorspace values with defaults. */ + if (fmt->type == V4L2_BUF_TYPE_VIDEO_CAPTURE || + fmt->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) { + v4l_sanitize_colorspace(fmt->fmt.pix.pixelformat, + &fmt->fmt.pix.colorspace, + &fmt->fmt.pix.ycbcr_enc, + &fmt->fmt.pix.quantization, + &fmt->fmt.pix.xfer_func); + } else if (fmt->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE || + fmt->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { + u32 ycbcr_enc = fmt->fmt.pix_mp.ycbcr_enc; + u32 quantization = fmt->fmt.pix_mp.quantization; + u32 xfer_func = fmt->fmt.pix_mp.xfer_func; + + v4l_sanitize_colorspace(fmt->fmt.pix_mp.pixelformat, + &fmt->fmt.pix_mp.colorspace, &ycbcr_enc, + &quantization, &xfer_func); + + fmt->fmt.pix_mp.ycbcr_enc = ycbcr_enc; + fmt->fmt.pix_mp.quantization = quantization; + fmt->fmt.pix_mp.xfer_func = xfer_func; + } +} + +static int v4l_querycap(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct v4l2_capability *cap = (struct v4l2_capability *)arg; + struct video_device *vfd = video_devdata(file); + int ret; + + cap->version = LINUX_VERSION_CODE; + cap->device_caps = vfd->device_caps; + cap->capabilities = vfd->device_caps | V4L2_CAP_DEVICE_CAPS; + + media_set_bus_info(cap->bus_info, sizeof(cap->bus_info), + vfd->dev_parent); + + ret = ops->vidioc_querycap(file, fh, cap); + + /* + * Drivers must not change device_caps, so check for this and + * warn if this happened. + */ + WARN_ON(cap->device_caps != vfd->device_caps); + /* + * Check that capabilities is a superset of + * vfd->device_caps | V4L2_CAP_DEVICE_CAPS + */ + WARN_ON((cap->capabilities & + (vfd->device_caps | V4L2_CAP_DEVICE_CAPS)) != + (vfd->device_caps | V4L2_CAP_DEVICE_CAPS)); + cap->capabilities |= V4L2_CAP_EXT_PIX_FORMAT; + cap->device_caps |= V4L2_CAP_EXT_PIX_FORMAT; + + return ret; +} + +static int v4l_g_input(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + + if (vfd->device_caps & V4L2_CAP_IO_MC) { + *(int *)arg = 0; + return 0; + } + + return ops->vidioc_g_input(file, fh, arg); +} + +static int v4l_g_output(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + + if (vfd->device_caps & V4L2_CAP_IO_MC) { + *(int *)arg = 0; + return 0; + } + + return ops->vidioc_g_output(file, fh, arg); +} + +static int v4l_s_input(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + int ret; + + ret = v4l_enable_media_source(vfd); + if (ret) + return ret; + + if (vfd->device_caps & V4L2_CAP_IO_MC) + return *(int *)arg ? -EINVAL : 0; + + return ops->vidioc_s_input(file, fh, *(unsigned int *)arg); +} + +static int v4l_s_output(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + + if (vfd->device_caps & V4L2_CAP_IO_MC) + return *(int *)arg ? -EINVAL : 0; + + return ops->vidioc_s_output(file, fh, *(unsigned int *)arg); +} + +static int v4l_g_priority(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd; + u32 *p = arg; + + vfd = video_devdata(file); + *p = v4l2_prio_max(vfd->prio); + return 0; +} + +static int v4l_s_priority(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd; + struct v4l2_fh *vfh; + u32 *p = arg; + + vfd = video_devdata(file); + if (!test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) + return -ENOTTY; + vfh = file->private_data; + return v4l2_prio_change(vfd->prio, &vfh->prio, *p); +} + +static int v4l_enuminput(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_input *p = arg; + + /* + * We set the flags for CAP_DV_TIMINGS & + * CAP_STD here based on ioctl handler provided by the + * driver. If the driver doesn't support these + * for a specific input, it must override these flags. + */ + if (is_valid_ioctl(vfd, VIDIOC_S_STD)) + p->capabilities |= V4L2_IN_CAP_STD; + + if (vfd->device_caps & V4L2_CAP_IO_MC) { + if (p->index) + return -EINVAL; + strscpy(p->name, vfd->name, sizeof(p->name)); + p->type = V4L2_INPUT_TYPE_CAMERA; + return 0; + } + + return ops->vidioc_enum_input(file, fh, p); +} + +static int v4l_enumoutput(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_output *p = arg; + + /* + * We set the flags for CAP_DV_TIMINGS & + * CAP_STD here based on ioctl handler provided by the + * driver. If the driver doesn't support these + * for a specific output, it must override these flags. + */ + if (is_valid_ioctl(vfd, VIDIOC_S_STD)) + p->capabilities |= V4L2_OUT_CAP_STD; + + if (vfd->device_caps & V4L2_CAP_IO_MC) { + if (p->index) + return -EINVAL; + strscpy(p->name, vfd->name, sizeof(p->name)); + p->type = V4L2_OUTPUT_TYPE_ANALOG; + return 0; + } + + return ops->vidioc_enum_output(file, fh, p); +} + +static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt) +{ + const unsigned sz = sizeof(fmt->description); + const char *descr = NULL; + u32 flags = 0; + + /* + * We depart from the normal coding style here since the descriptions + * should be aligned so it is easy to see which descriptions will be + * longer than 31 characters (the max length for a description). + * And frankly, this is easier to read anyway. + * + * Note that gcc will use O(log N) comparisons to find the right case. + */ + switch (fmt->pixelformat) { + /* Max description length mask: descr = "0123456789012345678901234567890" */ + case V4L2_PIX_FMT_RGB332: descr = "8-bit RGB 3-3-2"; break; + case V4L2_PIX_FMT_RGB444: descr = "16-bit A/XRGB 4-4-4-4"; break; + case V4L2_PIX_FMT_ARGB444: descr = "16-bit ARGB 4-4-4-4"; break; + case V4L2_PIX_FMT_XRGB444: descr = "16-bit XRGB 4-4-4-4"; break; + case V4L2_PIX_FMT_RGBA444: descr = "16-bit RGBA 4-4-4-4"; break; + case V4L2_PIX_FMT_RGBX444: descr = "16-bit RGBX 4-4-4-4"; break; + case V4L2_PIX_FMT_ABGR444: descr = "16-bit ABGR 4-4-4-4"; break; + case V4L2_PIX_FMT_XBGR444: descr = "16-bit XBGR 4-4-4-4"; break; + case V4L2_PIX_FMT_BGRA444: descr = "16-bit BGRA 4-4-4-4"; break; + case V4L2_PIX_FMT_BGRX444: descr = "16-bit BGRX 4-4-4-4"; break; + case V4L2_PIX_FMT_RGB555: descr = "16-bit A/XRGB 1-5-5-5"; break; + case V4L2_PIX_FMT_ARGB555: descr = "16-bit ARGB 1-5-5-5"; break; + case V4L2_PIX_FMT_XRGB555: descr = "16-bit XRGB 1-5-5-5"; break; + case V4L2_PIX_FMT_ABGR555: descr = "16-bit ABGR 1-5-5-5"; break; + case V4L2_PIX_FMT_XBGR555: descr = "16-bit XBGR 1-5-5-5"; break; + case V4L2_PIX_FMT_RGBA555: descr = "16-bit RGBA 5-5-5-1"; break; + case V4L2_PIX_FMT_RGBX555: descr = "16-bit RGBX 5-5-5-1"; break; + case V4L2_PIX_FMT_BGRA555: descr = "16-bit BGRA 5-5-5-1"; break; + case V4L2_PIX_FMT_BGRX555: descr = "16-bit BGRX 5-5-5-1"; break; + case V4L2_PIX_FMT_RGB565: descr = "16-bit RGB 5-6-5"; break; + case V4L2_PIX_FMT_RGB555X: descr = "16-bit A/XRGB 1-5-5-5 BE"; break; + case V4L2_PIX_FMT_ARGB555X: descr = "16-bit ARGB 1-5-5-5 BE"; break; + case V4L2_PIX_FMT_XRGB555X: descr = "16-bit XRGB 1-5-5-5 BE"; break; + case V4L2_PIX_FMT_RGB565X: descr = "16-bit RGB 5-6-5 BE"; break; + case V4L2_PIX_FMT_BGR666: descr = "18-bit BGRX 6-6-6-14"; break; + case V4L2_PIX_FMT_BGR24: descr = "24-bit BGR 8-8-8"; break; + case V4L2_PIX_FMT_RGB24: descr = "24-bit RGB 8-8-8"; break; + case V4L2_PIX_FMT_BGR32: descr = "32-bit BGRA/X 8-8-8-8"; break; + case V4L2_PIX_FMT_ABGR32: descr = "32-bit BGRA 8-8-8-8"; break; + case V4L2_PIX_FMT_XBGR32: descr = "32-bit BGRX 8-8-8-8"; break; + case V4L2_PIX_FMT_RGB32: descr = "32-bit A/XRGB 8-8-8-8"; break; + case V4L2_PIX_FMT_ARGB32: descr = "32-bit ARGB 8-8-8-8"; break; + case V4L2_PIX_FMT_XRGB32: descr = "32-bit XRGB 8-8-8-8"; break; + case V4L2_PIX_FMT_BGRA32: descr = "32-bit ABGR 8-8-8-8"; break; + case V4L2_PIX_FMT_BGRX32: descr = "32-bit XBGR 8-8-8-8"; break; + case V4L2_PIX_FMT_RGBA32: descr = "32-bit RGBA 8-8-8-8"; break; + case V4L2_PIX_FMT_RGBX32: descr = "32-bit RGBX 8-8-8-8"; break; + case V4L2_PIX_FMT_RGBX1010102: descr = "32-bit RGBX 10-10-10-2"; break; + case V4L2_PIX_FMT_RGBA1010102: descr = "32-bit RGBA 10-10-10-2"; break; + case V4L2_PIX_FMT_ARGB2101010: descr = "32-bit ARGB 2-10-10-10"; break; + case V4L2_PIX_FMT_BGR48: descr = "48-bit BGR 16-16-16"; break; + case V4L2_PIX_FMT_RGB48: descr = "48-bit RGB 16-16-16"; break; + case V4L2_PIX_FMT_BGR48_12: descr = "12-bit Depth BGR"; break; + case V4L2_PIX_FMT_ABGR64_12: descr = "12-bit Depth BGRA"; break; + case V4L2_PIX_FMT_GREY: descr = "8-bit Greyscale"; break; + case V4L2_PIX_FMT_Y4: descr = "4-bit Greyscale"; break; + case V4L2_PIX_FMT_Y6: descr = "6-bit Greyscale"; break; + case V4L2_PIX_FMT_Y10: descr = "10-bit Greyscale"; break; + case V4L2_PIX_FMT_Y12: descr = "12-bit Greyscale"; break; + case V4L2_PIX_FMT_Y012: descr = "12-bit Greyscale (bits 15-4)"; break; + case V4L2_PIX_FMT_Y14: descr = "14-bit Greyscale"; break; + case V4L2_PIX_FMT_Y16: descr = "16-bit Greyscale"; break; + case V4L2_PIX_FMT_Y16_BE: descr = "16-bit Greyscale BE"; break; + case V4L2_PIX_FMT_Y10BPACK: descr = "10-bit Greyscale (Packed)"; break; + case V4L2_PIX_FMT_Y10P: descr = "10-bit Greyscale (MIPI Packed)"; break; + case V4L2_PIX_FMT_IPU3_Y10: descr = "10-bit greyscale (IPU3 Packed)"; break; + case V4L2_PIX_FMT_Y12P: descr = "12-bit Greyscale (MIPI Packed)"; break; + case V4L2_PIX_FMT_Y14P: descr = "14-bit Greyscale (MIPI Packed)"; break; + case V4L2_PIX_FMT_Y8I: descr = "Interleaved 8-bit Greyscale"; break; + case V4L2_PIX_FMT_Y12I: descr = "Interleaved 12-bit Greyscale"; break; + case V4L2_PIX_FMT_Y16I: descr = "Interleaved 16-bit Greyscale"; break; + case V4L2_PIX_FMT_Z16: descr = "16-bit Depth"; break; + case V4L2_PIX_FMT_INZI: descr = "Planar 10:16 Greyscale Depth"; break; + case V4L2_PIX_FMT_CNF4: descr = "4-bit Depth Confidence (Packed)"; break; + case V4L2_PIX_FMT_PAL8: descr = "8-bit Palette"; break; + case V4L2_PIX_FMT_UV8: descr = "8-bit Chrominance UV 4-4"; break; + case V4L2_PIX_FMT_YVU410: descr = "Planar YVU 4:1:0"; break; + case V4L2_PIX_FMT_YVU420: descr = "Planar YVU 4:2:0"; break; + case V4L2_PIX_FMT_YUYV: descr = "YUYV 4:2:2"; break; + case V4L2_PIX_FMT_YYUV: descr = "YYUV 4:2:2"; break; + case V4L2_PIX_FMT_YVYU: descr = "YVYU 4:2:2"; break; + case V4L2_PIX_FMT_UYVY: descr = "UYVY 4:2:2"; break; + case V4L2_PIX_FMT_VYUY: descr = "VYUY 4:2:2"; break; + case V4L2_PIX_FMT_YUV422P: descr = "Planar YUV 4:2:2"; break; + case V4L2_PIX_FMT_YUV411P: descr = "Planar YUV 4:1:1"; break; + case V4L2_PIX_FMT_Y41P: descr = "YUV 4:1:1 (Packed)"; break; + case V4L2_PIX_FMT_YUV444: descr = "16-bit A/XYUV 4-4-4-4"; break; + case V4L2_PIX_FMT_YUV555: descr = "16-bit A/XYUV 1-5-5-5"; break; + case V4L2_PIX_FMT_YUV565: descr = "16-bit YUV 5-6-5"; break; + case V4L2_PIX_FMT_YUV24: descr = "24-bit YUV 4:4:4 8-8-8"; break; + case V4L2_PIX_FMT_YUV32: descr = "32-bit A/XYUV 8-8-8-8"; break; + case V4L2_PIX_FMT_AYUV32: descr = "32-bit AYUV 8-8-8-8"; break; + case V4L2_PIX_FMT_XYUV32: descr = "32-bit XYUV 8-8-8-8"; break; + case V4L2_PIX_FMT_VUYA32: descr = "32-bit VUYA 8-8-8-8"; break; + case V4L2_PIX_FMT_VUYX32: descr = "32-bit VUYX 8-8-8-8"; break; + case V4L2_PIX_FMT_YUVA32: descr = "32-bit YUVA 8-8-8-8"; break; + case V4L2_PIX_FMT_YUVX32: descr = "32-bit YUVX 8-8-8-8"; break; + case V4L2_PIX_FMT_YUV410: descr = "Planar YUV 4:1:0"; break; + case V4L2_PIX_FMT_YUV420: descr = "Planar YUV 4:2:0"; break; + case V4L2_PIX_FMT_HI240: descr = "8-bit Dithered RGB (BTTV)"; break; + case V4L2_PIX_FMT_M420: descr = "YUV 4:2:0 (M420)"; break; + case V4L2_PIX_FMT_YUV48_12: descr = "12-bit YUV 4:4:4 Packed"; break; + case V4L2_PIX_FMT_NV12: descr = "Y/UV 4:2:0"; break; + case V4L2_PIX_FMT_NV21: descr = "Y/VU 4:2:0"; break; + case V4L2_PIX_FMT_NV15: descr = "10-bit Y/UV 4:2:0 (Packed)"; break; + case V4L2_PIX_FMT_NV16: descr = "Y/UV 4:2:2"; break; + case V4L2_PIX_FMT_NV61: descr = "Y/VU 4:2:2"; break; + case V4L2_PIX_FMT_NV20: descr = "10-bit Y/UV 4:2:2 (Packed)"; break; + case V4L2_PIX_FMT_NV24: descr = "Y/UV 4:4:4"; break; + case V4L2_PIX_FMT_NV42: descr = "Y/VU 4:4:4"; break; + case V4L2_PIX_FMT_P010: descr = "10-bit Y/UV 4:2:0"; break; + case V4L2_PIX_FMT_P012: descr = "12-bit Y/UV 4:2:0"; break; + case V4L2_PIX_FMT_NV12_4L4: descr = "Y/UV 4:2:0 (4x4 Linear)"; break; + case V4L2_PIX_FMT_NV12_16L16: descr = "Y/UV 4:2:0 (16x16 Linear)"; break; + case V4L2_PIX_FMT_NV12_32L32: descr = "Y/UV 4:2:0 (32x32 Linear)"; break; + case V4L2_PIX_FMT_NV15_4L4: descr = "10-bit Y/UV 4:2:0 (4x4 Linear)"; break; + case V4L2_PIX_FMT_P010_4L4: descr = "10-bit Y/UV 4:2:0 (4x4 Linear)"; break; + case V4L2_PIX_FMT_NV12M: descr = "Y/UV 4:2:0 (N-C)"; break; + case V4L2_PIX_FMT_NV21M: descr = "Y/VU 4:2:0 (N-C)"; break; + case V4L2_PIX_FMT_NV16M: descr = "Y/UV 4:2:2 (N-C)"; break; + case V4L2_PIX_FMT_NV61M: descr = "Y/VU 4:2:2 (N-C)"; break; + case V4L2_PIX_FMT_NV12MT: descr = "Y/UV 4:2:0 (64x32 MB, N-C)"; break; + case V4L2_PIX_FMT_NV12MT_16X16: descr = "Y/UV 4:2:0 (16x16 MB, N-C)"; break; + case V4L2_PIX_FMT_P012M: descr = "12-bit Y/UV 4:2:0 (N-C)"; break; + case V4L2_PIX_FMT_YUV420M: descr = "Planar YUV 4:2:0 (N-C)"; break; + case V4L2_PIX_FMT_YVU420M: descr = "Planar YVU 4:2:0 (N-C)"; break; + case V4L2_PIX_FMT_YUV422M: descr = "Planar YUV 4:2:2 (N-C)"; break; + case V4L2_PIX_FMT_YVU422M: descr = "Planar YVU 4:2:2 (N-C)"; break; + case V4L2_PIX_FMT_YUV444M: descr = "Planar YUV 4:4:4 (N-C)"; break; + case V4L2_PIX_FMT_YVU444M: descr = "Planar YVU 4:4:4 (N-C)"; break; + case V4L2_PIX_FMT_SBGGR8: descr = "8-bit Bayer BGBG/GRGR"; break; + case V4L2_PIX_FMT_SGBRG8: descr = "8-bit Bayer GBGB/RGRG"; break; + case V4L2_PIX_FMT_SGRBG8: descr = "8-bit Bayer GRGR/BGBG"; break; + case V4L2_PIX_FMT_SRGGB8: descr = "8-bit Bayer RGRG/GBGB"; break; + case V4L2_PIX_FMT_SBGGR10: descr = "10-bit Bayer BGBG/GRGR"; break; + case V4L2_PIX_FMT_SGBRG10: descr = "10-bit Bayer GBGB/RGRG"; break; + case V4L2_PIX_FMT_SGRBG10: descr = "10-bit Bayer GRGR/BGBG"; break; + case V4L2_PIX_FMT_SRGGB10: descr = "10-bit Bayer RGRG/GBGB"; break; + case V4L2_PIX_FMT_SBGGR10P: descr = "10-bit Bayer BGBG/GRGR Packed"; break; + case V4L2_PIX_FMT_SGBRG10P: descr = "10-bit Bayer GBGB/RGRG Packed"; break; + case V4L2_PIX_FMT_SGRBG10P: descr = "10-bit Bayer GRGR/BGBG Packed"; break; + case V4L2_PIX_FMT_SRGGB10P: descr = "10-bit Bayer RGRG/GBGB Packed"; break; + case V4L2_PIX_FMT_IPU3_SBGGR10: descr = "10-bit bayer BGGR IPU3 Packed"; break; + case V4L2_PIX_FMT_IPU3_SGBRG10: descr = "10-bit bayer GBRG IPU3 Packed"; break; + case V4L2_PIX_FMT_IPU3_SGRBG10: descr = "10-bit bayer GRBG IPU3 Packed"; break; + case V4L2_PIX_FMT_IPU3_SRGGB10: descr = "10-bit bayer RGGB IPU3 Packed"; break; + case V4L2_PIX_FMT_SBGGR10ALAW8: descr = "8-bit Bayer BGBG/GRGR (A-law)"; break; + case V4L2_PIX_FMT_SGBRG10ALAW8: descr = "8-bit Bayer GBGB/RGRG (A-law)"; break; + case V4L2_PIX_FMT_SGRBG10ALAW8: descr = "8-bit Bayer GRGR/BGBG (A-law)"; break; + case V4L2_PIX_FMT_SRGGB10ALAW8: descr = "8-bit Bayer RGRG/GBGB (A-law)"; break; + case V4L2_PIX_FMT_SBGGR10DPCM8: descr = "8-bit Bayer BGBG/GRGR (DPCM)"; break; + case V4L2_PIX_FMT_SGBRG10DPCM8: descr = "8-bit Bayer GBGB/RGRG (DPCM)"; break; + case V4L2_PIX_FMT_SGRBG10DPCM8: descr = "8-bit Bayer GRGR/BGBG (DPCM)"; break; + case V4L2_PIX_FMT_SRGGB10DPCM8: descr = "8-bit Bayer RGRG/GBGB (DPCM)"; break; + case V4L2_PIX_FMT_RAW_CRU10: descr = "10-bit Raw CRU Packed"; break; + case V4L2_PIX_FMT_SBGGR12: descr = "12-bit Bayer BGBG/GRGR"; break; + case V4L2_PIX_FMT_SGBRG12: descr = "12-bit Bayer GBGB/RGRG"; break; + case V4L2_PIX_FMT_SGRBG12: descr = "12-bit Bayer GRGR/BGBG"; break; + case V4L2_PIX_FMT_SRGGB12: descr = "12-bit Bayer RGRG/GBGB"; break; + case V4L2_PIX_FMT_SBGGR12P: descr = "12-bit Bayer BGBG/GRGR Packed"; break; + case V4L2_PIX_FMT_SGBRG12P: descr = "12-bit Bayer GBGB/RGRG Packed"; break; + case V4L2_PIX_FMT_SGRBG12P: descr = "12-bit Bayer GRGR/BGBG Packed"; break; + case V4L2_PIX_FMT_SRGGB12P: descr = "12-bit Bayer RGRG/GBGB Packed"; break; + case V4L2_PIX_FMT_RAW_CRU12: descr = "12-bit Raw CRU Packed"; break; + case V4L2_PIX_FMT_SBGGR14: descr = "14-bit Bayer BGBG/GRGR"; break; + case V4L2_PIX_FMT_SGBRG14: descr = "14-bit Bayer GBGB/RGRG"; break; + case V4L2_PIX_FMT_SGRBG14: descr = "14-bit Bayer GRGR/BGBG"; break; + case V4L2_PIX_FMT_SRGGB14: descr = "14-bit Bayer RGRG/GBGB"; break; + case V4L2_PIX_FMT_SBGGR14P: descr = "14-bit Bayer BGBG/GRGR Packed"; break; + case V4L2_PIX_FMT_SGBRG14P: descr = "14-bit Bayer GBGB/RGRG Packed"; break; + case V4L2_PIX_FMT_SGRBG14P: descr = "14-bit Bayer GRGR/BGBG Packed"; break; + case V4L2_PIX_FMT_SRGGB14P: descr = "14-bit Bayer RGRG/GBGB Packed"; break; + case V4L2_PIX_FMT_RAW_CRU14: descr = "14-bit Raw CRU Packed"; break; + case V4L2_PIX_FMT_SBGGR16: descr = "16-bit Bayer BGBG/GRGR"; break; + case V4L2_PIX_FMT_SGBRG16: descr = "16-bit Bayer GBGB/RGRG"; break; + case V4L2_PIX_FMT_SGRBG16: descr = "16-bit Bayer GRGR/BGBG"; break; + case V4L2_PIX_FMT_SRGGB16: descr = "16-bit Bayer RGRG/GBGB"; break; + case V4L2_PIX_FMT_RAW_CRU20: descr = "14-bit Raw CRU Packed"; break; + case V4L2_PIX_FMT_SN9C20X_I420: descr = "GSPCA SN9C20X I420"; break; + case V4L2_PIX_FMT_SPCA501: descr = "GSPCA SPCA501"; break; + case V4L2_PIX_FMT_SPCA505: descr = "GSPCA SPCA505"; break; + case V4L2_PIX_FMT_SPCA508: descr = "GSPCA SPCA508"; break; + case V4L2_PIX_FMT_STV0680: descr = "GSPCA STV0680"; break; + case V4L2_PIX_FMT_TM6000: descr = "A/V + VBI Mux Packet"; break; + case V4L2_PIX_FMT_CIT_YYVYUY: descr = "GSPCA CIT YYVYUY"; break; + case V4L2_PIX_FMT_KONICA420: descr = "GSPCA KONICA420"; break; + case V4L2_PIX_FMT_MM21: descr = "Mediatek 8-bit Block Format"; break; + case V4L2_PIX_FMT_HSV24: descr = "24-bit HSV 8-8-8"; break; + case V4L2_PIX_FMT_HSV32: descr = "32-bit XHSV 8-8-8-8"; break; + case V4L2_SDR_FMT_CU8: descr = "Complex U8"; break; + case V4L2_SDR_FMT_CU16LE: descr = "Complex U16LE"; break; + case V4L2_SDR_FMT_CS8: descr = "Complex S8"; break; + case V4L2_SDR_FMT_CS14LE: descr = "Complex S14LE"; break; + case V4L2_SDR_FMT_RU12LE: descr = "Real U12LE"; break; + case V4L2_SDR_FMT_PCU16BE: descr = "Planar Complex U16BE"; break; + case V4L2_SDR_FMT_PCU18BE: descr = "Planar Complex U18BE"; break; + case V4L2_SDR_FMT_PCU20BE: descr = "Planar Complex U20BE"; break; + case V4L2_TCH_FMT_DELTA_TD16: descr = "16-bit Signed Deltas"; break; + case V4L2_TCH_FMT_DELTA_TD08: descr = "8-bit Signed Deltas"; break; + case V4L2_TCH_FMT_TU16: descr = "16-bit Unsigned Touch Data"; break; + case V4L2_TCH_FMT_TU08: descr = "8-bit Unsigned Touch Data"; break; + case V4L2_META_FMT_VSP1_HGO: descr = "R-Car VSP1 1-D Histogram"; break; + case V4L2_META_FMT_VSP1_HGT: descr = "R-Car VSP1 2-D Histogram"; break; + case V4L2_META_FMT_UVC: descr = "UVC Payload Header Metadata"; break; + case V4L2_META_FMT_UVC_MSXU_1_5: descr = "UVC MSXU Metadata"; break; + case V4L2_META_FMT_D4XX: descr = "Intel D4xx UVC Metadata"; break; + case V4L2_META_FMT_VIVID: descr = "Vivid Metadata"; break; + case V4L2_META_FMT_RK_ISP1_PARAMS: descr = "Rockchip ISP1 3A Parameters"; break; + case V4L2_META_FMT_RK_ISP1_STAT_3A: descr = "Rockchip ISP1 3A Statistics"; break; + case V4L2_META_FMT_RK_ISP1_EXT_PARAMS: descr = "Rockchip ISP1 Ext 3A Params"; break; + case V4L2_META_FMT_C3ISP_PARAMS: descr = "Amlogic C3 ISP Parameters"; break; + case V4L2_META_FMT_C3ISP_STATS: descr = "Amlogic C3 ISP Statistics"; break; + case V4L2_PIX_FMT_NV12_8L128: descr = "NV12 (8x128 Linear)"; break; + case V4L2_PIX_FMT_NV12M_8L128: descr = "NV12M (8x128 Linear)"; break; + case V4L2_PIX_FMT_NV12_10BE_8L128: descr = "10-bit NV12 (8x128 Linear, BE)"; break; + case V4L2_PIX_FMT_NV12M_10BE_8L128: descr = "10-bit NV12M (8x128 Linear, BE)"; break; + case V4L2_PIX_FMT_Y210: descr = "10-bit YUYV Packed"; break; + case V4L2_PIX_FMT_Y212: descr = "12-bit YUYV Packed"; break; + case V4L2_PIX_FMT_Y216: descr = "16-bit YUYV Packed"; break; + case V4L2_META_FMT_RPI_BE_CFG: descr = "RPi PiSP BE Config format"; break; + case V4L2_META_FMT_RPI_FE_CFG: descr = "RPi PiSP FE Config format"; break; + case V4L2_META_FMT_RPI_FE_STATS: descr = "RPi PiSP FE Statistics format"; break; + case V4L2_META_FMT_GENERIC_8: descr = "8-bit Generic Metadata"; break; + case V4L2_META_FMT_GENERIC_CSI2_10: descr = "8-bit Generic Meta, 10b CSI-2"; break; + case V4L2_META_FMT_GENERIC_CSI2_12: descr = "8-bit Generic Meta, 12b CSI-2"; break; + case V4L2_META_FMT_GENERIC_CSI2_14: descr = "8-bit Generic Meta, 14b CSI-2"; break; + case V4L2_META_FMT_GENERIC_CSI2_16: descr = "8-bit Generic Meta, 16b CSI-2"; break; + case V4L2_META_FMT_GENERIC_CSI2_20: descr = "8-bit Generic Meta, 20b CSI-2"; break; + case V4L2_META_FMT_GENERIC_CSI2_24: descr = "8-bit Generic Meta, 24b CSI-2"; break; + + default: + /* Compressed formats */ + flags = V4L2_FMT_FLAG_COMPRESSED; + switch (fmt->pixelformat) { + /* Max description length mask: descr = "0123456789012345678901234567890" */ + case V4L2_PIX_FMT_MJPEG: descr = "Motion-JPEG"; break; + case V4L2_PIX_FMT_JPEG: descr = "JFIF JPEG"; break; + case V4L2_PIX_FMT_DV: descr = "1394"; break; + case V4L2_PIX_FMT_MPEG: descr = "MPEG-1/2/4"; break; + case V4L2_PIX_FMT_H264: descr = "H.264"; break; + case V4L2_PIX_FMT_H264_NO_SC: descr = "H.264 (No Start Codes)"; break; + case V4L2_PIX_FMT_H264_MVC: descr = "H.264 MVC"; break; + case V4L2_PIX_FMT_H264_SLICE: descr = "H.264 Parsed Slice Data"; break; + case V4L2_PIX_FMT_H263: descr = "H.263"; break; + case V4L2_PIX_FMT_MPEG1: descr = "MPEG-1 ES"; break; + case V4L2_PIX_FMT_MPEG2: descr = "MPEG-2 ES"; break; + case V4L2_PIX_FMT_MPEG2_SLICE: descr = "MPEG-2 Parsed Slice Data"; break; + case V4L2_PIX_FMT_MPEG4: descr = "MPEG-4 Part 2 ES"; break; + case V4L2_PIX_FMT_XVID: descr = "Xvid"; break; + case V4L2_PIX_FMT_VC1_ANNEX_G: descr = "VC-1 (SMPTE 412M Annex G)"; break; + case V4L2_PIX_FMT_VC1_ANNEX_L: descr = "VC-1 (SMPTE 412M Annex L)"; break; + case V4L2_PIX_FMT_VP8: descr = "VP8"; break; + case V4L2_PIX_FMT_VP8_FRAME: descr = "VP8 Frame"; break; + case V4L2_PIX_FMT_VP9: descr = "VP9"; break; + case V4L2_PIX_FMT_VP9_FRAME: descr = "VP9 Frame"; break; + case V4L2_PIX_FMT_HEVC: descr = "HEVC"; break; /* aka H.265 */ + case V4L2_PIX_FMT_HEVC_SLICE: descr = "HEVC Parsed Slice Data"; break; + case V4L2_PIX_FMT_FWHT: descr = "FWHT"; break; /* used in vicodec */ + case V4L2_PIX_FMT_FWHT_STATELESS: descr = "FWHT Stateless"; break; /* used in vicodec */ + case V4L2_PIX_FMT_SPK: descr = "Sorenson Spark"; break; + case V4L2_PIX_FMT_RV30: descr = "RealVideo 8"; break; + case V4L2_PIX_FMT_RV40: descr = "RealVideo 9 & 10"; break; + case V4L2_PIX_FMT_CPIA1: descr = "GSPCA CPiA YUV"; break; + case V4L2_PIX_FMT_WNVA: descr = "WNVA"; break; + case V4L2_PIX_FMT_SN9C10X: descr = "GSPCA SN9C10X"; break; + case V4L2_PIX_FMT_PWC1: descr = "Raw Philips Webcam Type (Old)"; break; + case V4L2_PIX_FMT_PWC2: descr = "Raw Philips Webcam Type (New)"; break; + case V4L2_PIX_FMT_ET61X251: descr = "GSPCA ET61X251"; break; + case V4L2_PIX_FMT_SPCA561: descr = "GSPCA SPCA561"; break; + case V4L2_PIX_FMT_PAC207: descr = "GSPCA PAC207"; break; + case V4L2_PIX_FMT_MR97310A: descr = "GSPCA MR97310A"; break; + case V4L2_PIX_FMT_JL2005BCD: descr = "GSPCA JL2005BCD"; break; + case V4L2_PIX_FMT_SN9C2028: descr = "GSPCA SN9C2028"; break; + case V4L2_PIX_FMT_SQ905C: descr = "GSPCA SQ905C"; break; + case V4L2_PIX_FMT_PJPG: descr = "GSPCA PJPG"; break; + case V4L2_PIX_FMT_OV511: descr = "GSPCA OV511"; break; + case V4L2_PIX_FMT_OV518: descr = "GSPCA OV518"; break; + case V4L2_PIX_FMT_JPGL: descr = "JPEG Lite"; break; + case V4L2_PIX_FMT_SE401: descr = "GSPCA SE401"; break; + case V4L2_PIX_FMT_S5C_UYVY_JPG: descr = "S5C73MX interleaved UYVY/JPEG"; break; + case V4L2_PIX_FMT_MT21C: descr = "Mediatek Compressed Format"; break; + case V4L2_PIX_FMT_QC08C: descr = "QCOM Compressed 8-bit Format"; break; + case V4L2_PIX_FMT_QC10C: descr = "QCOM Compressed 10-bit Format"; break; + case V4L2_PIX_FMT_AJPG: descr = "Aspeed JPEG"; break; + case V4L2_PIX_FMT_AV1_FRAME: descr = "AV1 Frame"; break; + case V4L2_PIX_FMT_MT2110T: descr = "Mediatek 10bit Tile Mode"; break; + case V4L2_PIX_FMT_MT2110R: descr = "Mediatek 10bit Raster Mode"; break; + case V4L2_PIX_FMT_HEXTILE: descr = "Hextile Compressed Format"; break; + case V4L2_PIX_FMT_PISP_COMP1_RGGB: descr = "PiSP 8b RGRG/GBGB mode1 compr"; break; + case V4L2_PIX_FMT_PISP_COMP1_GRBG: descr = "PiSP 8b GRGR/BGBG mode1 compr"; break; + case V4L2_PIX_FMT_PISP_COMP1_GBRG: descr = "PiSP 8b GBGB/RGRG mode1 compr"; break; + case V4L2_PIX_FMT_PISP_COMP1_BGGR: descr = "PiSP 8b BGBG/GRGR mode1 compr"; break; + case V4L2_PIX_FMT_PISP_COMP1_MONO: descr = "PiSP 8b monochrome mode1 compr"; break; + case V4L2_PIX_FMT_PISP_COMP2_RGGB: descr = "PiSP 8b RGRG/GBGB mode2 compr"; break; + case V4L2_PIX_FMT_PISP_COMP2_GRBG: descr = "PiSP 8b GRGR/BGBG mode2 compr"; break; + case V4L2_PIX_FMT_PISP_COMP2_GBRG: descr = "PiSP 8b GBGB/RGRG mode2 compr"; break; + case V4L2_PIX_FMT_PISP_COMP2_BGGR: descr = "PiSP 8b BGBG/GRGR mode2 compr"; break; + case V4L2_PIX_FMT_PISP_COMP2_MONO: descr = "PiSP 8b monochrome mode2 compr"; break; + default: + if (fmt->description[0]) + return; + WARN(1, "Unknown pixelformat 0x%08x\n", fmt->pixelformat); + flags = 0; + snprintf(fmt->description, sz, "%p4cc", + &fmt->pixelformat); + break; + } + } + + if (fmt->type == V4L2_BUF_TYPE_META_CAPTURE) { + switch (fmt->pixelformat) { + case V4L2_META_FMT_GENERIC_8: + case V4L2_META_FMT_GENERIC_CSI2_10: + case V4L2_META_FMT_GENERIC_CSI2_12: + case V4L2_META_FMT_GENERIC_CSI2_14: + case V4L2_META_FMT_GENERIC_CSI2_16: + case V4L2_META_FMT_GENERIC_CSI2_20: + case V4L2_META_FMT_GENERIC_CSI2_24: + fmt->flags |= V4L2_FMT_FLAG_META_LINE_BASED; + break; + default: + fmt->flags &= ~V4L2_FMT_FLAG_META_LINE_BASED; + } + } + + if (descr) + WARN_ON(strscpy(fmt->description, descr, sz) < 0); + fmt->flags |= flags; +} + +static int v4l_enum_fmt(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vdev = video_devdata(file); + struct v4l2_fmtdesc *p = arg; + int ret = check_fmt(file, p->type); + u32 mbus_code; + u32 cap_mask; + + if (ret) + return ret; + ret = -EINVAL; + + if (!(vdev->device_caps & V4L2_CAP_IO_MC)) + p->mbus_code = 0; + + mbus_code = p->mbus_code; + memset_after(p, 0, type); + p->mbus_code = mbus_code; + + switch (p->type) { + case V4L2_BUF_TYPE_VIDEO_CAPTURE: + case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: + cap_mask = V4L2_CAP_VIDEO_CAPTURE_MPLANE | + V4L2_CAP_VIDEO_M2M_MPLANE; + if (!!(vdev->device_caps & cap_mask) != + (p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)) + break; + + if (unlikely(!ops->vidioc_enum_fmt_vid_cap)) + break; + ret = ops->vidioc_enum_fmt_vid_cap(file, fh, arg); + break; + case V4L2_BUF_TYPE_VIDEO_OVERLAY: + if (unlikely(!ops->vidioc_enum_fmt_vid_overlay)) + break; + ret = ops->vidioc_enum_fmt_vid_overlay(file, fh, arg); + break; + case V4L2_BUF_TYPE_VIDEO_OUTPUT: + case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: + cap_mask = V4L2_CAP_VIDEO_OUTPUT_MPLANE | + V4L2_CAP_VIDEO_M2M_MPLANE; + if (!!(vdev->device_caps & cap_mask) != + (p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)) + break; + + if (unlikely(!ops->vidioc_enum_fmt_vid_out)) + break; + ret = ops->vidioc_enum_fmt_vid_out(file, fh, arg); + break; + case V4L2_BUF_TYPE_SDR_CAPTURE: + if (unlikely(!ops->vidioc_enum_fmt_sdr_cap)) + break; + ret = ops->vidioc_enum_fmt_sdr_cap(file, fh, arg); + break; + case V4L2_BUF_TYPE_SDR_OUTPUT: + if (unlikely(!ops->vidioc_enum_fmt_sdr_out)) + break; + ret = ops->vidioc_enum_fmt_sdr_out(file, fh, arg); + break; + case V4L2_BUF_TYPE_META_CAPTURE: + if (unlikely(!ops->vidioc_enum_fmt_meta_cap)) + break; + ret = ops->vidioc_enum_fmt_meta_cap(file, fh, arg); + break; + case V4L2_BUF_TYPE_META_OUTPUT: + if (unlikely(!ops->vidioc_enum_fmt_meta_out)) + break; + ret = ops->vidioc_enum_fmt_meta_out(file, fh, arg); + break; + } + if (ret == 0) + v4l_fill_fmtdesc(p); + return ret; +} + +static void v4l_pix_format_touch(struct v4l2_pix_format *p) +{ + /* + * The v4l2_pix_format structure contains fields that make no sense for + * touch. Set them to default values in this case. + */ + + p->field = V4L2_FIELD_NONE; + p->colorspace = V4L2_COLORSPACE_RAW; + p->flags = 0; + p->ycbcr_enc = 0; + p->quantization = 0; + p->xfer_func = 0; +} + +static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct v4l2_format *p = arg; + struct video_device *vfd = video_devdata(file); + int ret = check_fmt(file, p->type); + + if (ret) + return ret; + + memset(&p->fmt, 0, sizeof(p->fmt)); + + switch (p->type) { + case V4L2_BUF_TYPE_VIDEO_CAPTURE: + if (unlikely(!ops->vidioc_g_fmt_vid_cap)) + break; + p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; + ret = ops->vidioc_g_fmt_vid_cap(file, fh, arg); + /* just in case the driver zeroed it again */ + p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; + if (vfd->vfl_type == VFL_TYPE_TOUCH) + v4l_pix_format_touch(&p->fmt.pix); + return ret; + case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: + return ops->vidioc_g_fmt_vid_cap_mplane(file, fh, arg); + case V4L2_BUF_TYPE_VIDEO_OVERLAY: + return ops->vidioc_g_fmt_vid_overlay(file, fh, arg); + case V4L2_BUF_TYPE_VBI_CAPTURE: + return ops->vidioc_g_fmt_vbi_cap(file, fh, arg); + case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: + return ops->vidioc_g_fmt_sliced_vbi_cap(file, fh, arg); + case V4L2_BUF_TYPE_VIDEO_OUTPUT: + if (unlikely(!ops->vidioc_g_fmt_vid_out)) + break; + p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; + ret = ops->vidioc_g_fmt_vid_out(file, fh, arg); + /* just in case the driver zeroed it again */ + p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; + return ret; + case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: + return ops->vidioc_g_fmt_vid_out_mplane(file, fh, arg); + case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: + return ops->vidioc_g_fmt_vid_out_overlay(file, fh, arg); + case V4L2_BUF_TYPE_VBI_OUTPUT: + return ops->vidioc_g_fmt_vbi_out(file, fh, arg); + case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: + return ops->vidioc_g_fmt_sliced_vbi_out(file, fh, arg); + case V4L2_BUF_TYPE_SDR_CAPTURE: + return ops->vidioc_g_fmt_sdr_cap(file, fh, arg); + case V4L2_BUF_TYPE_SDR_OUTPUT: + return ops->vidioc_g_fmt_sdr_out(file, fh, arg); + case V4L2_BUF_TYPE_META_CAPTURE: + return ops->vidioc_g_fmt_meta_cap(file, fh, arg); + case V4L2_BUF_TYPE_META_OUTPUT: + return ops->vidioc_g_fmt_meta_out(file, fh, arg); + } + return -EINVAL; +} + +static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct v4l2_format *p = arg; + struct video_device *vfd = video_devdata(file); + int ret = check_fmt(file, p->type); + unsigned int i; + + if (ret) + return ret; + + ret = v4l_enable_media_source(vfd); + if (ret) + return ret; + v4l_sanitize_format(p); + + switch (p->type) { + case V4L2_BUF_TYPE_VIDEO_CAPTURE: + if (unlikely(!ops->vidioc_s_fmt_vid_cap)) + break; + memset_after(p, 0, fmt.pix); + ret = ops->vidioc_s_fmt_vid_cap(file, fh, arg); + /* just in case the driver zeroed it again */ + p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; + if (vfd->vfl_type == VFL_TYPE_TOUCH) + v4l_pix_format_touch(&p->fmt.pix); + return ret; + case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: + if (unlikely(!ops->vidioc_s_fmt_vid_cap_mplane)) + break; + memset_after(p, 0, fmt.pix_mp.xfer_func); + for (i = 0; i < p->fmt.pix_mp.num_planes; i++) + memset_after(&p->fmt.pix_mp.plane_fmt[i], + 0, bytesperline); + return ops->vidioc_s_fmt_vid_cap_mplane(file, fh, arg); + case V4L2_BUF_TYPE_VIDEO_OVERLAY: + if (unlikely(!ops->vidioc_s_fmt_vid_overlay)) + break; + memset_after(p, 0, fmt.win); + p->fmt.win.clips = NULL; + p->fmt.win.clipcount = 0; + p->fmt.win.bitmap = NULL; + return ops->vidioc_s_fmt_vid_overlay(file, fh, arg); + case V4L2_BUF_TYPE_VBI_CAPTURE: + if (unlikely(!ops->vidioc_s_fmt_vbi_cap)) + break; + memset_after(p, 0, fmt.vbi.flags); + return ops->vidioc_s_fmt_vbi_cap(file, fh, arg); + case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: + if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_cap)) + break; + memset_after(p, 0, fmt.sliced.io_size); + return ops->vidioc_s_fmt_sliced_vbi_cap(file, fh, arg); + case V4L2_BUF_TYPE_VIDEO_OUTPUT: + if (unlikely(!ops->vidioc_s_fmt_vid_out)) + break; + memset_after(p, 0, fmt.pix); + ret = ops->vidioc_s_fmt_vid_out(file, fh, arg); + /* just in case the driver zeroed it again */ + p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; + return ret; + case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: + if (unlikely(!ops->vidioc_s_fmt_vid_out_mplane)) + break; + memset_after(p, 0, fmt.pix_mp.xfer_func); + for (i = 0; i < p->fmt.pix_mp.num_planes; i++) + memset_after(&p->fmt.pix_mp.plane_fmt[i], + 0, bytesperline); + return ops->vidioc_s_fmt_vid_out_mplane(file, fh, arg); + case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: + if (unlikely(!ops->vidioc_s_fmt_vid_out_overlay)) + break; + memset_after(p, 0, fmt.win); + p->fmt.win.clips = NULL; + p->fmt.win.clipcount = 0; + p->fmt.win.bitmap = NULL; + return ops->vidioc_s_fmt_vid_out_overlay(file, fh, arg); + case V4L2_BUF_TYPE_VBI_OUTPUT: + if (unlikely(!ops->vidioc_s_fmt_vbi_out)) + break; + memset_after(p, 0, fmt.vbi.flags); + return ops->vidioc_s_fmt_vbi_out(file, fh, arg); + case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: + if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_out)) + break; + memset_after(p, 0, fmt.sliced.io_size); + return ops->vidioc_s_fmt_sliced_vbi_out(file, fh, arg); + case V4L2_BUF_TYPE_SDR_CAPTURE: + if (unlikely(!ops->vidioc_s_fmt_sdr_cap)) + break; + memset_after(p, 0, fmt.sdr.buffersize); + return ops->vidioc_s_fmt_sdr_cap(file, fh, arg); + case V4L2_BUF_TYPE_SDR_OUTPUT: + if (unlikely(!ops->vidioc_s_fmt_sdr_out)) + break; + memset_after(p, 0, fmt.sdr.buffersize); + return ops->vidioc_s_fmt_sdr_out(file, fh, arg); + case V4L2_BUF_TYPE_META_CAPTURE: + if (unlikely(!ops->vidioc_s_fmt_meta_cap)) + break; + memset_after(p, 0, fmt.meta); + return ops->vidioc_s_fmt_meta_cap(file, fh, arg); + case V4L2_BUF_TYPE_META_OUTPUT: + if (unlikely(!ops->vidioc_s_fmt_meta_out)) + break; + memset_after(p, 0, fmt.meta); + return ops->vidioc_s_fmt_meta_out(file, fh, arg); + } + return -EINVAL; +} + +static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct v4l2_format *p = arg; + struct video_device *vfd = video_devdata(file); + int ret = check_fmt(file, p->type); + unsigned int i; + + if (ret) + return ret; + + v4l_sanitize_format(p); + + switch (p->type) { + case V4L2_BUF_TYPE_VIDEO_CAPTURE: + if (unlikely(!ops->vidioc_try_fmt_vid_cap)) + break; + memset_after(p, 0, fmt.pix); + ret = ops->vidioc_try_fmt_vid_cap(file, fh, arg); + /* just in case the driver zeroed it again */ + p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; + if (vfd->vfl_type == VFL_TYPE_TOUCH) + v4l_pix_format_touch(&p->fmt.pix); + return ret; + case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: + if (unlikely(!ops->vidioc_try_fmt_vid_cap_mplane)) + break; + memset_after(p, 0, fmt.pix_mp.xfer_func); + for (i = 0; i < p->fmt.pix_mp.num_planes; i++) + memset_after(&p->fmt.pix_mp.plane_fmt[i], + 0, bytesperline); + return ops->vidioc_try_fmt_vid_cap_mplane(file, fh, arg); + case V4L2_BUF_TYPE_VIDEO_OVERLAY: + if (unlikely(!ops->vidioc_try_fmt_vid_overlay)) + break; + memset_after(p, 0, fmt.win); + p->fmt.win.clips = NULL; + p->fmt.win.clipcount = 0; + p->fmt.win.bitmap = NULL; + return ops->vidioc_try_fmt_vid_overlay(file, fh, arg); + case V4L2_BUF_TYPE_VBI_CAPTURE: + if (unlikely(!ops->vidioc_try_fmt_vbi_cap)) + break; + memset_after(p, 0, fmt.vbi.flags); + return ops->vidioc_try_fmt_vbi_cap(file, fh, arg); + case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: + if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_cap)) + break; + memset_after(p, 0, fmt.sliced.io_size); + return ops->vidioc_try_fmt_sliced_vbi_cap(file, fh, arg); + case V4L2_BUF_TYPE_VIDEO_OUTPUT: + if (unlikely(!ops->vidioc_try_fmt_vid_out)) + break; + memset_after(p, 0, fmt.pix); + ret = ops->vidioc_try_fmt_vid_out(file, fh, arg); + /* just in case the driver zeroed it again */ + p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; + return ret; + case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: + if (unlikely(!ops->vidioc_try_fmt_vid_out_mplane)) + break; + memset_after(p, 0, fmt.pix_mp.xfer_func); + for (i = 0; i < p->fmt.pix_mp.num_planes; i++) + memset_after(&p->fmt.pix_mp.plane_fmt[i], + 0, bytesperline); + return ops->vidioc_try_fmt_vid_out_mplane(file, fh, arg); + case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: + if (unlikely(!ops->vidioc_try_fmt_vid_out_overlay)) + break; + memset_after(p, 0, fmt.win); + p->fmt.win.clips = NULL; + p->fmt.win.clipcount = 0; + p->fmt.win.bitmap = NULL; + return ops->vidioc_try_fmt_vid_out_overlay(file, fh, arg); + case V4L2_BUF_TYPE_VBI_OUTPUT: + if (unlikely(!ops->vidioc_try_fmt_vbi_out)) + break; + memset_after(p, 0, fmt.vbi.flags); + return ops->vidioc_try_fmt_vbi_out(file, fh, arg); + case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: + if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_out)) + break; + memset_after(p, 0, fmt.sliced.io_size); + return ops->vidioc_try_fmt_sliced_vbi_out(file, fh, arg); + case V4L2_BUF_TYPE_SDR_CAPTURE: + if (unlikely(!ops->vidioc_try_fmt_sdr_cap)) + break; + memset_after(p, 0, fmt.sdr.buffersize); + return ops->vidioc_try_fmt_sdr_cap(file, fh, arg); + case V4L2_BUF_TYPE_SDR_OUTPUT: + if (unlikely(!ops->vidioc_try_fmt_sdr_out)) + break; + memset_after(p, 0, fmt.sdr.buffersize); + return ops->vidioc_try_fmt_sdr_out(file, fh, arg); + case V4L2_BUF_TYPE_META_CAPTURE: + if (unlikely(!ops->vidioc_try_fmt_meta_cap)) + break; + memset_after(p, 0, fmt.meta); + return ops->vidioc_try_fmt_meta_cap(file, fh, arg); + case V4L2_BUF_TYPE_META_OUTPUT: + if (unlikely(!ops->vidioc_try_fmt_meta_out)) + break; + memset_after(p, 0, fmt.meta); + return ops->vidioc_try_fmt_meta_out(file, fh, arg); + } + return -EINVAL; +} + +static int v4l_streamon(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + return ops->vidioc_streamon(file, fh, *(unsigned int *)arg); +} + +static int v4l_streamoff(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + return ops->vidioc_streamoff(file, fh, *(unsigned int *)arg); +} + +static int v4l_g_tuner(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_tuner *p = arg; + int err; + + p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ? + V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV; + err = ops->vidioc_g_tuner(file, fh, p); + if (!err) + p->capability |= V4L2_TUNER_CAP_FREQ_BANDS; + return err; +} + +static int v4l_s_tuner(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_tuner *p = arg; + int ret; + + ret = v4l_enable_media_source(vfd); + if (ret) + return ret; + p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ? + V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV; + return ops->vidioc_s_tuner(file, fh, p); +} + +static int v4l_g_modulator(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_modulator *p = arg; + int err; + + if (vfd->vfl_type == VFL_TYPE_RADIO) + p->type = V4L2_TUNER_RADIO; + + err = ops->vidioc_g_modulator(file, fh, p); + if (!err) + p->capability |= V4L2_TUNER_CAP_FREQ_BANDS; + return err; +} + +static int v4l_s_modulator(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_modulator *p = arg; + + if (vfd->vfl_type == VFL_TYPE_RADIO) + p->type = V4L2_TUNER_RADIO; + + return ops->vidioc_s_modulator(file, fh, p); +} + +static int v4l_g_frequency(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_frequency *p = arg; + + if (vfd->vfl_type == VFL_TYPE_SDR) + p->type = V4L2_TUNER_SDR; + else + p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ? + V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV; + return ops->vidioc_g_frequency(file, fh, p); +} + +static int v4l_s_frequency(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + const struct v4l2_frequency *p = arg; + enum v4l2_tuner_type type; + int ret; + + ret = v4l_enable_media_source(vfd); + if (ret) + return ret; + if (vfd->vfl_type == VFL_TYPE_SDR) { + if (p->type != V4L2_TUNER_SDR && p->type != V4L2_TUNER_RF) + return -EINVAL; + } else { + type = (vfd->vfl_type == VFL_TYPE_RADIO) ? + V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV; + if (type != p->type) + return -EINVAL; + } + return ops->vidioc_s_frequency(file, fh, p); +} + +static int v4l_enumstd(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_standard *p = arg; + + return v4l_video_std_enumstd(p, vfd->tvnorms); +} + +static int v4l_s_std(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + v4l2_std_id id = *(v4l2_std_id *)arg, norm; + int ret; + + ret = v4l_enable_media_source(vfd); + if (ret) + return ret; + norm = id & vfd->tvnorms; + if (vfd->tvnorms && !norm) /* Check if std is supported */ + return -EINVAL; + + /* Calls the specific handler */ + return ops->vidioc_s_std(file, fh, norm); +} + +static int v4l_querystd(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + v4l2_std_id *p = arg; + int ret; + + ret = v4l_enable_media_source(vfd); + if (ret) + return ret; + /* + * If no signal is detected, then the driver should return + * V4L2_STD_UNKNOWN. Otherwise it should return tvnorms with + * any standards that do not apply removed. + * + * This means that tuners, audio and video decoders can join + * their efforts to improve the standards detection. + */ + *p = vfd->tvnorms; + return ops->vidioc_querystd(file, fh, arg); +} + +static int v4l_s_hw_freq_seek(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_hw_freq_seek *p = arg; + enum v4l2_tuner_type type; + int ret; + + ret = v4l_enable_media_source(vfd); + if (ret) + return ret; + /* s_hw_freq_seek is not supported for SDR for now */ + if (vfd->vfl_type == VFL_TYPE_SDR) + return -EINVAL; + + type = (vfd->vfl_type == VFL_TYPE_RADIO) ? + V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV; + if (p->type != type) + return -EINVAL; + return ops->vidioc_s_hw_freq_seek(file, fh, p); +} + +static int v4l_s_fbuf(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct v4l2_framebuffer *p = arg; + + p->base = NULL; + return ops->vidioc_s_fbuf(file, fh, p); +} + +static int v4l_overlay(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + return ops->vidioc_overlay(file, fh, *(unsigned int *)arg); +} + +static int v4l_reqbufs(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_requestbuffers *p = arg; + int ret = check_fmt(file, p->type); + + if (ret) + return ret; + + memset_after(p, 0, flags); + + p->capabilities = 0; + if (is_valid_ioctl(vfd, VIDIOC_REMOVE_BUFS)) + p->capabilities = V4L2_BUF_CAP_SUPPORTS_REMOVE_BUFS; + + return ops->vidioc_reqbufs(file, fh, p); +} + +static int v4l_querybuf(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct v4l2_buffer *p = arg; + int ret = check_fmt(file, p->type); + + return ret ? ret : ops->vidioc_querybuf(file, fh, p); +} + +static int v4l_qbuf(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct v4l2_buffer *p = arg; + int ret = check_fmt(file, p->type); + + return ret ? ret : ops->vidioc_qbuf(file, fh, p); +} + +static int v4l_dqbuf(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct v4l2_buffer *p = arg; + int ret = check_fmt(file, p->type); + + return ret ? ret : ops->vidioc_dqbuf(file, fh, p); +} + +static int v4l_create_bufs(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_create_buffers *create = arg; + int ret = check_fmt(file, create->format.type); + + if (ret) + return ret; + + memset_after(create, 0, flags); + + v4l_sanitize_format(&create->format); + + create->capabilities = 0; + if (is_valid_ioctl(vfd, VIDIOC_REMOVE_BUFS)) + create->capabilities = V4L2_BUF_CAP_SUPPORTS_REMOVE_BUFS; + + ret = ops->vidioc_create_bufs(file, fh, create); + + if (create->format.type == V4L2_BUF_TYPE_VIDEO_CAPTURE || + create->format.type == V4L2_BUF_TYPE_VIDEO_OUTPUT) + create->format.fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; + + return ret; +} + +static int v4l_prepare_buf(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct v4l2_buffer *b = arg; + int ret = check_fmt(file, b->type); + + return ret ? ret : ops->vidioc_prepare_buf(file, fh, b); +} + +static int v4l_remove_bufs(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct v4l2_remove_buffers *remove = arg; + + if (ops->vidioc_remove_bufs) + return ops->vidioc_remove_bufs(file, fh, remove); + + return -ENOTTY; +} + +static int v4l_g_parm(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_streamparm *p = arg; + v4l2_std_id std; + int ret = check_fmt(file, p->type); + + if (ret) + return ret; + if (ops->vidioc_g_parm) + return ops->vidioc_g_parm(file, fh, p); + if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE && + p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) + return -EINVAL; + if (vfd->device_caps & V4L2_CAP_READWRITE) + p->parm.capture.readbuffers = 2; + ret = ops->vidioc_g_std(file, fh, &std); + if (ret == 0) + v4l2_video_std_frame_period(std, &p->parm.capture.timeperframe); + return ret; +} + +static int v4l_s_parm(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct v4l2_streamparm *p = arg; + int ret = check_fmt(file, p->type); + + if (ret) + return ret; + + /* Note: extendedmode is never used in drivers */ + if (V4L2_TYPE_IS_OUTPUT(p->type)) { + memset(p->parm.output.reserved, 0, + sizeof(p->parm.output.reserved)); + p->parm.output.extendedmode = 0; + p->parm.output.outputmode &= V4L2_MODE_HIGHQUALITY; + } else { + memset(p->parm.capture.reserved, 0, + sizeof(p->parm.capture.reserved)); + p->parm.capture.extendedmode = 0; + p->parm.capture.capturemode &= V4L2_MODE_HIGHQUALITY; + } + return ops->vidioc_s_parm(file, fh, p); +} + +static int v4l_queryctrl(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_query_ext_ctrl qec = {}; + struct v4l2_queryctrl *p = arg; + struct v4l2_fh *vfh = + test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL; + int ret; + + if (vfh && vfh->ctrl_handler) + return v4l2_queryctrl(vfh->ctrl_handler, p); + if (vfd->ctrl_handler) + return v4l2_queryctrl(vfd->ctrl_handler, p); + if (!ops->vidioc_query_ext_ctrl) + return -ENOTTY; + + /* Simulate query_ext_ctr using query_ctrl. */ + qec.id = p->id; + ret = ops->vidioc_query_ext_ctrl(file, fh, &qec); + if (ret) + return ret; + v4l2_query_ext_ctrl_to_v4l2_queryctrl(p, &qec); + return ret; +} + +static int v4l_query_ext_ctrl(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_query_ext_ctrl *p = arg; + struct v4l2_fh *vfh = + test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL; + + if (vfh && vfh->ctrl_handler) + return v4l2_query_ext_ctrl(vfh->ctrl_handler, p); + if (vfd->ctrl_handler) + return v4l2_query_ext_ctrl(vfd->ctrl_handler, p); + if (ops->vidioc_query_ext_ctrl) + return ops->vidioc_query_ext_ctrl(file, fh, p); + return -ENOTTY; +} + +static int v4l_querymenu(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_querymenu *p = arg; + struct v4l2_fh *vfh = + test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL; + + if (vfh && vfh->ctrl_handler) + return v4l2_querymenu(vfh->ctrl_handler, p); + if (vfd->ctrl_handler) + return v4l2_querymenu(vfd->ctrl_handler, p); + if (ops->vidioc_querymenu) + return ops->vidioc_querymenu(file, fh, p); + return -ENOTTY; +} + +static int v4l_g_ctrl(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_control *p = arg; + struct v4l2_fh *vfh = + test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL; + struct v4l2_ext_controls ctrls; + struct v4l2_ext_control ctrl; + + if (vfh && vfh->ctrl_handler) + return v4l2_g_ctrl(vfh->ctrl_handler, p); + if (vfd->ctrl_handler) + return v4l2_g_ctrl(vfd->ctrl_handler, p); + if (ops->vidioc_g_ext_ctrls == NULL) + return -ENOTTY; + + ctrls.which = V4L2_CTRL_ID2WHICH(p->id); + ctrls.count = 1; + ctrls.controls = &ctrl; + ctrl.id = p->id; + ctrl.value = p->value; + if (check_ext_ctrls(&ctrls, VIDIOC_G_CTRL)) { + int ret = ops->vidioc_g_ext_ctrls(file, fh, &ctrls); + + if (ret == 0) + p->value = ctrl.value; + return ret; + } + return -EINVAL; +} + +static int v4l_s_ctrl(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_control *p = arg; + struct v4l2_fh *vfh = + test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL; + struct v4l2_ext_controls ctrls; + struct v4l2_ext_control ctrl; + int ret; + + if (vfh && vfh->ctrl_handler) + return v4l2_s_ctrl(vfh, vfh->ctrl_handler, p); + if (vfd->ctrl_handler) + return v4l2_s_ctrl(NULL, vfd->ctrl_handler, p); + if (ops->vidioc_s_ext_ctrls == NULL) + return -ENOTTY; + + ctrls.which = V4L2_CTRL_ID2WHICH(p->id); + ctrls.count = 1; + ctrls.controls = &ctrl; + ctrl.id = p->id; + ctrl.value = p->value; + if (!check_ext_ctrls(&ctrls, VIDIOC_S_CTRL)) + return -EINVAL; + ret = ops->vidioc_s_ext_ctrls(file, fh, &ctrls); + p->value = ctrl.value; + return ret; +} + +static int v4l_g_ext_ctrls(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_ext_controls *p = arg; + struct v4l2_fh *vfh = + test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL; + + p->error_idx = p->count; + if (vfh && vfh->ctrl_handler) + return v4l2_g_ext_ctrls(vfh->ctrl_handler, + vfd, vfd->v4l2_dev->mdev, p); + if (vfd->ctrl_handler) + return v4l2_g_ext_ctrls(vfd->ctrl_handler, + vfd, vfd->v4l2_dev->mdev, p); + if (ops->vidioc_g_ext_ctrls == NULL) + return -ENOTTY; + return check_ext_ctrls(p, VIDIOC_G_EXT_CTRLS) ? + ops->vidioc_g_ext_ctrls(file, fh, p) : -EINVAL; +} + +static int v4l_s_ext_ctrls(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_ext_controls *p = arg; + struct v4l2_fh *vfh = + test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL; + + p->error_idx = p->count; + if (vfh && vfh->ctrl_handler) + return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler, + vfd, vfd->v4l2_dev->mdev, p); + if (vfd->ctrl_handler) + return v4l2_s_ext_ctrls(NULL, vfd->ctrl_handler, + vfd, vfd->v4l2_dev->mdev, p); + if (ops->vidioc_s_ext_ctrls == NULL) + return -ENOTTY; + return check_ext_ctrls(p, VIDIOC_S_EXT_CTRLS) ? + ops->vidioc_s_ext_ctrls(file, fh, p) : -EINVAL; +} + +static int v4l_try_ext_ctrls(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_ext_controls *p = arg; + struct v4l2_fh *vfh = + test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL; + + p->error_idx = p->count; + if (vfh && vfh->ctrl_handler) + return v4l2_try_ext_ctrls(vfh->ctrl_handler, + vfd, vfd->v4l2_dev->mdev, p); + if (vfd->ctrl_handler) + return v4l2_try_ext_ctrls(vfd->ctrl_handler, + vfd, vfd->v4l2_dev->mdev, p); + if (ops->vidioc_try_ext_ctrls == NULL) + return -ENOTTY; + return check_ext_ctrls(p, VIDIOC_TRY_EXT_CTRLS) ? + ops->vidioc_try_ext_ctrls(file, fh, p) : -EINVAL; +} + +/* + * The selection API specified originally that the _MPLANE buffer types + * shouldn't be used. The reasons for this are lost in the mists of time + * (or just really crappy memories). Regardless, this is really annoying + * for userspace. So to keep things simple we map _MPLANE buffer types + * to their 'regular' counterparts before calling the driver. And we + * restore it afterwards. This way applications can use either buffer + * type and drivers don't need to check for both. + */ +static int v4l_g_selection(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct v4l2_selection *p = arg; + u32 old_type = p->type; + int ret; + + if (p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) + p->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + else if (p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + p->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; + ret = ops->vidioc_g_selection(file, fh, p); + p->type = old_type; + return ret; +} + +static int v4l_s_selection(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct v4l2_selection *p = arg; + u32 old_type = p->type; + int ret; + + if (p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) + p->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + else if (p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + p->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; + ret = ops->vidioc_s_selection(file, fh, p); + p->type = old_type; + return ret; +} + +static int v4l_g_crop(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_crop *p = arg; + struct v4l2_selection s = { + .type = p->type, + }; + int ret; + + /* simulate capture crop using selection api */ + + /* crop means compose for output devices */ + if (V4L2_TYPE_IS_OUTPUT(p->type)) + s.target = V4L2_SEL_TGT_COMPOSE; + else + s.target = V4L2_SEL_TGT_CROP; + + if (test_bit(V4L2_FL_QUIRK_INVERTED_CROP, &vfd->flags)) + s.target = s.target == V4L2_SEL_TGT_COMPOSE ? + V4L2_SEL_TGT_CROP : V4L2_SEL_TGT_COMPOSE; + + ret = v4l_g_selection(ops, file, fh, &s); + + /* copying results to old structure on success */ + if (!ret) + p->c = s.r; + return ret; +} + +static int v4l_s_crop(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_crop *p = arg; + struct v4l2_selection s = { + .type = p->type, + .r = p->c, + }; + + /* simulate capture crop using selection api */ + + /* crop means compose for output devices */ + if (V4L2_TYPE_IS_OUTPUT(p->type)) + s.target = V4L2_SEL_TGT_COMPOSE; + else + s.target = V4L2_SEL_TGT_CROP; + + if (test_bit(V4L2_FL_QUIRK_INVERTED_CROP, &vfd->flags)) + s.target = s.target == V4L2_SEL_TGT_COMPOSE ? + V4L2_SEL_TGT_CROP : V4L2_SEL_TGT_COMPOSE; + + return v4l_s_selection(ops, file, fh, &s); +} + +static int v4l_cropcap(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_cropcap *p = arg; + struct v4l2_selection s = { .type = p->type }; + int ret = 0; + + /* setting trivial pixelaspect */ + p->pixelaspect.numerator = 1; + p->pixelaspect.denominator = 1; + + if (s.type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) + s.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + else if (s.type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + s.type = V4L2_BUF_TYPE_VIDEO_OUTPUT; + + /* + * The determine_valid_ioctls() call already should ensure + * that this can never happen, but just in case... + */ + if (WARN_ON(!ops->vidioc_g_selection)) + return -ENOTTY; + + if (ops->vidioc_g_pixelaspect) + ret = ops->vidioc_g_pixelaspect(file, fh, s.type, + &p->pixelaspect); + + /* + * Ignore ENOTTY or ENOIOCTLCMD error returns, just use the + * square pixel aspect ratio in that case. + */ + if (ret && ret != -ENOTTY && ret != -ENOIOCTLCMD) + return ret; + + /* Use g_selection() to fill in the bounds and defrect rectangles */ + + /* obtaining bounds */ + if (V4L2_TYPE_IS_OUTPUT(p->type)) + s.target = V4L2_SEL_TGT_COMPOSE_BOUNDS; + else + s.target = V4L2_SEL_TGT_CROP_BOUNDS; + + if (test_bit(V4L2_FL_QUIRK_INVERTED_CROP, &vfd->flags)) + s.target = s.target == V4L2_SEL_TGT_COMPOSE_BOUNDS ? + V4L2_SEL_TGT_CROP_BOUNDS : V4L2_SEL_TGT_COMPOSE_BOUNDS; + + ret = v4l_g_selection(ops, file, fh, &s); + if (ret) + return ret; + p->bounds = s.r; + + /* obtaining defrect */ + if (s.target == V4L2_SEL_TGT_COMPOSE_BOUNDS) + s.target = V4L2_SEL_TGT_COMPOSE_DEFAULT; + else + s.target = V4L2_SEL_TGT_CROP_DEFAULT; + + ret = v4l_g_selection(ops, file, fh, &s); + if (ret) + return ret; + p->defrect = s.r; + + return 0; +} + +static int v4l_log_status(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + int ret; + + if (vfd->v4l2_dev) + pr_info("%s: ================= START STATUS =================\n", + vfd->v4l2_dev->name); + ret = ops->vidioc_log_status(file, fh); + if (vfd->v4l2_dev) + pr_info("%s: ================== END STATUS ==================\n", + vfd->v4l2_dev->name); + return ret; +} + +static int v4l_dbg_g_register(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ +#ifdef CONFIG_VIDEO_ADV_DEBUG + struct v4l2_dbg_register *p = arg; + struct video_device *vfd = video_devdata(file); + struct v4l2_subdev *sd; + int idx = 0; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if (p->match.type == V4L2_CHIP_MATCH_SUBDEV) { + if (vfd->v4l2_dev == NULL) + return -EINVAL; + v4l2_device_for_each_subdev(sd, vfd->v4l2_dev) + if (p->match.addr == idx++) + return v4l2_subdev_call(sd, core, g_register, p); + return -EINVAL; + } + if (ops->vidioc_g_register && p->match.type == V4L2_CHIP_MATCH_BRIDGE && + (ops->vidioc_g_chip_info || p->match.addr == 0)) + return ops->vidioc_g_register(file, fh, p); + return -EINVAL; +#else + return -ENOTTY; +#endif +} + +static int v4l_dbg_s_register(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ +#ifdef CONFIG_VIDEO_ADV_DEBUG + const struct v4l2_dbg_register *p = arg; + struct video_device *vfd = video_devdata(file); + struct v4l2_subdev *sd; + int idx = 0; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if (p->match.type == V4L2_CHIP_MATCH_SUBDEV) { + if (vfd->v4l2_dev == NULL) + return -EINVAL; + v4l2_device_for_each_subdev(sd, vfd->v4l2_dev) + if (p->match.addr == idx++) + return v4l2_subdev_call(sd, core, s_register, p); + return -EINVAL; + } + if (ops->vidioc_s_register && p->match.type == V4L2_CHIP_MATCH_BRIDGE && + (ops->vidioc_g_chip_info || p->match.addr == 0)) + return ops->vidioc_s_register(file, fh, p); + return -EINVAL; +#else + return -ENOTTY; +#endif +} + +static int v4l_dbg_g_chip_info(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ +#ifdef CONFIG_VIDEO_ADV_DEBUG + struct video_device *vfd = video_devdata(file); + struct v4l2_dbg_chip_info *p = arg; + struct v4l2_subdev *sd; + int idx = 0; + + switch (p->match.type) { + case V4L2_CHIP_MATCH_BRIDGE: + if (ops->vidioc_s_register) + p->flags |= V4L2_CHIP_FL_WRITABLE; + if (ops->vidioc_g_register) + p->flags |= V4L2_CHIP_FL_READABLE; + strscpy(p->name, vfd->v4l2_dev->name, sizeof(p->name)); + if (ops->vidioc_g_chip_info) + return ops->vidioc_g_chip_info(file, fh, arg); + if (p->match.addr) + return -EINVAL; + return 0; + + case V4L2_CHIP_MATCH_SUBDEV: + if (vfd->v4l2_dev == NULL) + break; + v4l2_device_for_each_subdev(sd, vfd->v4l2_dev) { + if (p->match.addr != idx++) + continue; + if (sd->ops->core && sd->ops->core->s_register) + p->flags |= V4L2_CHIP_FL_WRITABLE; + if (sd->ops->core && sd->ops->core->g_register) + p->flags |= V4L2_CHIP_FL_READABLE; + strscpy(p->name, sd->name, sizeof(p->name)); + return 0; + } + break; + } + return -EINVAL; +#else + return -ENOTTY; +#endif +} + +static int v4l_dqevent(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + return v4l2_event_dequeue(fh, arg, file->f_flags & O_NONBLOCK); +} + +static int v4l_subscribe_event(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + return ops->vidioc_subscribe_event(fh, arg); +} + +static int v4l_unsubscribe_event(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + return ops->vidioc_unsubscribe_event(fh, arg); +} + +static int v4l_g_sliced_vbi_cap(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct v4l2_sliced_vbi_cap *p = arg; + int ret = check_fmt(file, p->type); + + if (ret) + return ret; + + /* Clear up to type, everything after type is zeroed already */ + memset(p, 0, offsetof(struct v4l2_sliced_vbi_cap, type)); + + return ops->vidioc_g_sliced_vbi_cap(file, fh, p); +} + +static int v4l_enum_freq_bands(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_frequency_band *p = arg; + enum v4l2_tuner_type type; + int err; + + if (vfd->vfl_type == VFL_TYPE_SDR) { + if (p->type != V4L2_TUNER_SDR && p->type != V4L2_TUNER_RF) + return -EINVAL; + type = p->type; + } else { + type = (vfd->vfl_type == VFL_TYPE_RADIO) ? + V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV; + if (type != p->type) + return -EINVAL; + } + if (ops->vidioc_enum_freq_bands) { + err = ops->vidioc_enum_freq_bands(file, fh, p); + if (err != -ENOTTY) + return err; + } + if (is_valid_ioctl(vfd, VIDIOC_G_TUNER)) { + struct v4l2_tuner t = { + .index = p->tuner, + .type = type, + }; + + if (p->index) + return -EINVAL; + err = ops->vidioc_g_tuner(file, fh, &t); + if (err) + return err; + p->capability = t.capability | V4L2_TUNER_CAP_FREQ_BANDS; + p->rangelow = t.rangelow; + p->rangehigh = t.rangehigh; + p->modulation = (type == V4L2_TUNER_RADIO) ? + V4L2_BAND_MODULATION_FM : V4L2_BAND_MODULATION_VSB; + return 0; + } + if (is_valid_ioctl(vfd, VIDIOC_G_MODULATOR)) { + struct v4l2_modulator m = { + .index = p->tuner, + }; + + if (type != V4L2_TUNER_RADIO) + return -EINVAL; + if (p->index) + return -EINVAL; + err = ops->vidioc_g_modulator(file, fh, &m); + if (err) + return err; + p->capability = m.capability | V4L2_TUNER_CAP_FREQ_BANDS; + p->rangelow = m.rangelow; + p->rangehigh = m.rangehigh; + p->modulation = V4L2_BAND_MODULATION_FM; + return 0; + } + return -ENOTTY; +} + +struct v4l2_ioctl_info { + unsigned int ioctl; + u32 flags; + const char * const name; + int (*func)(const struct v4l2_ioctl_ops *ops, struct file *file, + void *fh, void *p); + void (*debug)(const void *arg, bool write_only); +}; + +/* This control needs a priority check */ +#define INFO_FL_PRIO (1 << 0) +/* This control can be valid if the filehandle passes a control handler. */ +#define INFO_FL_CTRL (1 << 1) +/* Queuing ioctl */ +#define INFO_FL_QUEUE (1 << 2) +/* Always copy back result, even on error */ +#define INFO_FL_ALWAYS_COPY (1 << 3) +/* Zero struct from after the field to the end */ +#define INFO_FL_CLEAR(v4l2_struct, field) \ + ((offsetof(struct v4l2_struct, field) + \ + sizeof_field(struct v4l2_struct, field)) << 16) +#define INFO_FL_CLEAR_MASK (_IOC_SIZEMASK << 16) + +#define DEFINE_V4L_STUB_FUNC(_vidioc) \ + static int v4l_stub_ ## _vidioc( \ + const struct v4l2_ioctl_ops *ops, \ + struct file *file, void *fh, void *p) \ + { \ + return ops->vidioc_ ## _vidioc(file, fh, p); \ + } + +#define IOCTL_INFO(_ioctl, _func, _debug, _flags) \ + [_IOC_NR(_ioctl)] = { \ + .ioctl = _ioctl, \ + .flags = _flags, \ + .name = #_ioctl, \ + .func = _func, \ + .debug = _debug, \ + } + +DEFINE_V4L_STUB_FUNC(g_fbuf) +DEFINE_V4L_STUB_FUNC(expbuf) +DEFINE_V4L_STUB_FUNC(g_std) +DEFINE_V4L_STUB_FUNC(g_audio) +DEFINE_V4L_STUB_FUNC(s_audio) +DEFINE_V4L_STUB_FUNC(g_edid) +DEFINE_V4L_STUB_FUNC(s_edid) +DEFINE_V4L_STUB_FUNC(g_audout) +DEFINE_V4L_STUB_FUNC(s_audout) +DEFINE_V4L_STUB_FUNC(g_jpegcomp) +DEFINE_V4L_STUB_FUNC(s_jpegcomp) +DEFINE_V4L_STUB_FUNC(enumaudio) +DEFINE_V4L_STUB_FUNC(enumaudout) +DEFINE_V4L_STUB_FUNC(enum_framesizes) +DEFINE_V4L_STUB_FUNC(enum_frameintervals) +DEFINE_V4L_STUB_FUNC(g_enc_index) +DEFINE_V4L_STUB_FUNC(encoder_cmd) +DEFINE_V4L_STUB_FUNC(try_encoder_cmd) +DEFINE_V4L_STUB_FUNC(decoder_cmd) +DEFINE_V4L_STUB_FUNC(try_decoder_cmd) +DEFINE_V4L_STUB_FUNC(s_dv_timings) +DEFINE_V4L_STUB_FUNC(g_dv_timings) +DEFINE_V4L_STUB_FUNC(enum_dv_timings) +DEFINE_V4L_STUB_FUNC(query_dv_timings) +DEFINE_V4L_STUB_FUNC(dv_timings_cap) + +static const struct v4l2_ioctl_info v4l2_ioctls[] = { + IOCTL_INFO(VIDIOC_QUERYCAP, v4l_querycap, v4l_print_querycap, 0), + IOCTL_INFO(VIDIOC_ENUM_FMT, v4l_enum_fmt, v4l_print_fmtdesc, 0), + IOCTL_INFO(VIDIOC_G_FMT, v4l_g_fmt, v4l_print_format, 0), + IOCTL_INFO(VIDIOC_S_FMT, v4l_s_fmt, v4l_print_format, INFO_FL_PRIO), + IOCTL_INFO(VIDIOC_REQBUFS, v4l_reqbufs, v4l_print_requestbuffers, INFO_FL_PRIO | INFO_FL_QUEUE), + IOCTL_INFO(VIDIOC_QUERYBUF, v4l_querybuf, v4l_print_buffer, INFO_FL_QUEUE | INFO_FL_CLEAR(v4l2_buffer, length)), + IOCTL_INFO(VIDIOC_G_FBUF, v4l_stub_g_fbuf, v4l_print_framebuffer, 0), + IOCTL_INFO(VIDIOC_S_FBUF, v4l_s_fbuf, v4l_print_framebuffer, INFO_FL_PRIO), + IOCTL_INFO(VIDIOC_OVERLAY, v4l_overlay, v4l_print_u32, INFO_FL_PRIO), + IOCTL_INFO(VIDIOC_QBUF, v4l_qbuf, v4l_print_buffer, INFO_FL_QUEUE), + IOCTL_INFO(VIDIOC_EXPBUF, v4l_stub_expbuf, v4l_print_exportbuffer, INFO_FL_QUEUE | INFO_FL_CLEAR(v4l2_exportbuffer, flags)), + IOCTL_INFO(VIDIOC_DQBUF, v4l_dqbuf, v4l_print_buffer, INFO_FL_QUEUE), + IOCTL_INFO(VIDIOC_STREAMON, v4l_streamon, v4l_print_buftype, INFO_FL_PRIO | INFO_FL_QUEUE), + IOCTL_INFO(VIDIOC_STREAMOFF, v4l_streamoff, v4l_print_buftype, INFO_FL_PRIO | INFO_FL_QUEUE), + IOCTL_INFO(VIDIOC_G_PARM, v4l_g_parm, v4l_print_streamparm, INFO_FL_CLEAR(v4l2_streamparm, type)), + IOCTL_INFO(VIDIOC_S_PARM, v4l_s_parm, v4l_print_streamparm, INFO_FL_PRIO), + IOCTL_INFO(VIDIOC_G_STD, v4l_stub_g_std, v4l_print_std, 0), + IOCTL_INFO(VIDIOC_S_STD, v4l_s_std, v4l_print_std, INFO_FL_PRIO), + IOCTL_INFO(VIDIOC_ENUMSTD, v4l_enumstd, v4l_print_standard, INFO_FL_CLEAR(v4l2_standard, index)), + IOCTL_INFO(VIDIOC_ENUMINPUT, v4l_enuminput, v4l_print_enuminput, INFO_FL_CLEAR(v4l2_input, index)), + IOCTL_INFO(VIDIOC_G_CTRL, v4l_g_ctrl, v4l_print_control, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_control, id)), + IOCTL_INFO(VIDIOC_S_CTRL, v4l_s_ctrl, v4l_print_control, INFO_FL_PRIO | INFO_FL_CTRL), + IOCTL_INFO(VIDIOC_G_TUNER, v4l_g_tuner, v4l_print_tuner, INFO_FL_CLEAR(v4l2_tuner, index)), + IOCTL_INFO(VIDIOC_S_TUNER, v4l_s_tuner, v4l_print_tuner, INFO_FL_PRIO), + IOCTL_INFO(VIDIOC_G_AUDIO, v4l_stub_g_audio, v4l_print_audio, 0), + IOCTL_INFO(VIDIOC_S_AUDIO, v4l_stub_s_audio, v4l_print_audio, INFO_FL_PRIO), + IOCTL_INFO(VIDIOC_QUERYCTRL, v4l_queryctrl, v4l_print_queryctrl, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_queryctrl, id)), + IOCTL_INFO(VIDIOC_QUERYMENU, v4l_querymenu, v4l_print_querymenu, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_querymenu, index)), + IOCTL_INFO(VIDIOC_G_INPUT, v4l_g_input, v4l_print_u32, 0), + IOCTL_INFO(VIDIOC_S_INPUT, v4l_s_input, v4l_print_u32, INFO_FL_PRIO), + IOCTL_INFO(VIDIOC_G_EDID, v4l_stub_g_edid, v4l_print_edid, INFO_FL_ALWAYS_COPY), + IOCTL_INFO(VIDIOC_S_EDID, v4l_stub_s_edid, v4l_print_edid, INFO_FL_PRIO | INFO_FL_ALWAYS_COPY), + IOCTL_INFO(VIDIOC_G_OUTPUT, v4l_g_output, v4l_print_u32, 0), + IOCTL_INFO(VIDIOC_S_OUTPUT, v4l_s_output, v4l_print_u32, INFO_FL_PRIO), + IOCTL_INFO(VIDIOC_ENUMOUTPUT, v4l_enumoutput, v4l_print_enumoutput, INFO_FL_CLEAR(v4l2_output, index)), + IOCTL_INFO(VIDIOC_G_AUDOUT, v4l_stub_g_audout, v4l_print_audioout, 0), + IOCTL_INFO(VIDIOC_S_AUDOUT, v4l_stub_s_audout, v4l_print_audioout, INFO_FL_PRIO), + IOCTL_INFO(VIDIOC_G_MODULATOR, v4l_g_modulator, v4l_print_modulator, INFO_FL_CLEAR(v4l2_modulator, index)), + IOCTL_INFO(VIDIOC_S_MODULATOR, v4l_s_modulator, v4l_print_modulator, INFO_FL_PRIO), + IOCTL_INFO(VIDIOC_G_FREQUENCY, v4l_g_frequency, v4l_print_frequency, INFO_FL_CLEAR(v4l2_frequency, tuner)), + IOCTL_INFO(VIDIOC_S_FREQUENCY, v4l_s_frequency, v4l_print_frequency, INFO_FL_PRIO), + IOCTL_INFO(VIDIOC_CROPCAP, v4l_cropcap, v4l_print_cropcap, INFO_FL_CLEAR(v4l2_cropcap, type)), + IOCTL_INFO(VIDIOC_G_CROP, v4l_g_crop, v4l_print_crop, INFO_FL_CLEAR(v4l2_crop, type)), + IOCTL_INFO(VIDIOC_S_CROP, v4l_s_crop, v4l_print_crop, INFO_FL_PRIO), + IOCTL_INFO(VIDIOC_G_SELECTION, v4l_g_selection, v4l_print_selection, INFO_FL_CLEAR(v4l2_selection, r)), + IOCTL_INFO(VIDIOC_S_SELECTION, v4l_s_selection, v4l_print_selection, INFO_FL_PRIO | INFO_FL_CLEAR(v4l2_selection, r)), + IOCTL_INFO(VIDIOC_G_JPEGCOMP, v4l_stub_g_jpegcomp, v4l_print_jpegcompression, 0), + IOCTL_INFO(VIDIOC_S_JPEGCOMP, v4l_stub_s_jpegcomp, v4l_print_jpegcompression, INFO_FL_PRIO), + IOCTL_INFO(VIDIOC_QUERYSTD, v4l_querystd, v4l_print_std, 0), + IOCTL_INFO(VIDIOC_TRY_FMT, v4l_try_fmt, v4l_print_format, 0), + IOCTL_INFO(VIDIOC_ENUMAUDIO, v4l_stub_enumaudio, v4l_print_audio, INFO_FL_CLEAR(v4l2_audio, index)), + IOCTL_INFO(VIDIOC_ENUMAUDOUT, v4l_stub_enumaudout, v4l_print_audioout, INFO_FL_CLEAR(v4l2_audioout, index)), + IOCTL_INFO(VIDIOC_G_PRIORITY, v4l_g_priority, v4l_print_u32, 0), + IOCTL_INFO(VIDIOC_S_PRIORITY, v4l_s_priority, v4l_print_u32, INFO_FL_PRIO), + IOCTL_INFO(VIDIOC_G_SLICED_VBI_CAP, v4l_g_sliced_vbi_cap, v4l_print_sliced_vbi_cap, INFO_FL_CLEAR(v4l2_sliced_vbi_cap, type)), + IOCTL_INFO(VIDIOC_LOG_STATUS, v4l_log_status, v4l_print_newline, 0), + IOCTL_INFO(VIDIOC_G_EXT_CTRLS, v4l_g_ext_ctrls, v4l_print_ext_controls, INFO_FL_CTRL | INFO_FL_ALWAYS_COPY), + IOCTL_INFO(VIDIOC_S_EXT_CTRLS, v4l_s_ext_ctrls, v4l_print_ext_controls, INFO_FL_PRIO | INFO_FL_CTRL | INFO_FL_ALWAYS_COPY), + IOCTL_INFO(VIDIOC_TRY_EXT_CTRLS, v4l_try_ext_ctrls, v4l_print_ext_controls, INFO_FL_CTRL | INFO_FL_ALWAYS_COPY), + IOCTL_INFO(VIDIOC_ENUM_FRAMESIZES, v4l_stub_enum_framesizes, v4l_print_frmsizeenum, INFO_FL_CLEAR(v4l2_frmsizeenum, pixel_format)), + IOCTL_INFO(VIDIOC_ENUM_FRAMEINTERVALS, v4l_stub_enum_frameintervals, v4l_print_frmivalenum, INFO_FL_CLEAR(v4l2_frmivalenum, height)), + IOCTL_INFO(VIDIOC_G_ENC_INDEX, v4l_stub_g_enc_index, v4l_print_enc_idx, 0), + IOCTL_INFO(VIDIOC_ENCODER_CMD, v4l_stub_encoder_cmd, v4l_print_encoder_cmd, INFO_FL_PRIO | INFO_FL_CLEAR(v4l2_encoder_cmd, flags)), + IOCTL_INFO(VIDIOC_TRY_ENCODER_CMD, v4l_stub_try_encoder_cmd, v4l_print_encoder_cmd, INFO_FL_CLEAR(v4l2_encoder_cmd, flags)), + IOCTL_INFO(VIDIOC_DECODER_CMD, v4l_stub_decoder_cmd, v4l_print_decoder_cmd, INFO_FL_PRIO), + IOCTL_INFO(VIDIOC_TRY_DECODER_CMD, v4l_stub_try_decoder_cmd, v4l_print_decoder_cmd, 0), + IOCTL_INFO(VIDIOC_DBG_S_REGISTER, v4l_dbg_s_register, v4l_print_dbg_register, 0), + IOCTL_INFO(VIDIOC_DBG_G_REGISTER, v4l_dbg_g_register, v4l_print_dbg_register, 0), + IOCTL_INFO(VIDIOC_S_HW_FREQ_SEEK, v4l_s_hw_freq_seek, v4l_print_hw_freq_seek, INFO_FL_PRIO), + IOCTL_INFO(VIDIOC_S_DV_TIMINGS, v4l_stub_s_dv_timings, v4l_print_dv_timings, INFO_FL_PRIO | INFO_FL_CLEAR(v4l2_dv_timings, bt.flags)), + IOCTL_INFO(VIDIOC_G_DV_TIMINGS, v4l_stub_g_dv_timings, v4l_print_dv_timings, 0), + IOCTL_INFO(VIDIOC_DQEVENT, v4l_dqevent, v4l_print_event, 0), + IOCTL_INFO(VIDIOC_SUBSCRIBE_EVENT, v4l_subscribe_event, v4l_print_event_subscription, 0), + IOCTL_INFO(VIDIOC_UNSUBSCRIBE_EVENT, v4l_unsubscribe_event, v4l_print_event_subscription, 0), + IOCTL_INFO(VIDIOC_CREATE_BUFS, v4l_create_bufs, v4l_print_create_buffers, INFO_FL_PRIO | INFO_FL_QUEUE), + IOCTL_INFO(VIDIOC_PREPARE_BUF, v4l_prepare_buf, v4l_print_buffer, INFO_FL_QUEUE), + IOCTL_INFO(VIDIOC_ENUM_DV_TIMINGS, v4l_stub_enum_dv_timings, v4l_print_enum_dv_timings, INFO_FL_CLEAR(v4l2_enum_dv_timings, pad)), + IOCTL_INFO(VIDIOC_QUERY_DV_TIMINGS, v4l_stub_query_dv_timings, v4l_print_dv_timings, INFO_FL_ALWAYS_COPY), + IOCTL_INFO(VIDIOC_DV_TIMINGS_CAP, v4l_stub_dv_timings_cap, v4l_print_dv_timings_cap, INFO_FL_CLEAR(v4l2_dv_timings_cap, pad)), + IOCTL_INFO(VIDIOC_ENUM_FREQ_BANDS, v4l_enum_freq_bands, v4l_print_freq_band, 0), + IOCTL_INFO(VIDIOC_DBG_G_CHIP_INFO, v4l_dbg_g_chip_info, v4l_print_dbg_chip_info, INFO_FL_CLEAR(v4l2_dbg_chip_info, match)), + IOCTL_INFO(VIDIOC_QUERY_EXT_CTRL, v4l_query_ext_ctrl, v4l_print_query_ext_ctrl, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_query_ext_ctrl, id)), + IOCTL_INFO(VIDIOC_REMOVE_BUFS, v4l_remove_bufs, v4l_print_remove_buffers, INFO_FL_PRIO | INFO_FL_QUEUE | INFO_FL_CLEAR(v4l2_remove_buffers, type)), +}; +#define V4L2_IOCTLS ARRAY_SIZE(v4l2_ioctls) + +static bool v4l2_is_known_ioctl(unsigned int cmd) +{ + if (_IOC_NR(cmd) >= V4L2_IOCTLS) + return false; + return v4l2_ioctls[_IOC_NR(cmd)].ioctl == cmd; +} + +static struct mutex *v4l2_ioctl_get_lock(struct video_device *vdev, + struct v4l2_fh *vfh, unsigned int cmd, + void *arg) +{ + if (_IOC_NR(cmd) >= V4L2_IOCTLS) + return vdev->lock; + if (vfh && vfh->m2m_ctx && + (v4l2_ioctls[_IOC_NR(cmd)].flags & INFO_FL_QUEUE)) { + if (vfh->m2m_ctx->q_lock) + return vfh->m2m_ctx->q_lock; + } + if (vdev->queue && vdev->queue->lock && + (v4l2_ioctls[_IOC_NR(cmd)].flags & INFO_FL_QUEUE)) + return vdev->queue->lock; + return vdev->lock; +} + +/* Common ioctl debug function. This function can be used by + external ioctl messages as well as internal V4L ioctl */ +void v4l_printk_ioctl(const char *prefix, unsigned int cmd) +{ + const char *dir, *type; + + if (prefix) + printk(KERN_DEBUG "%s: ", prefix); + + switch (_IOC_TYPE(cmd)) { + case 'd': + type = "v4l2_int"; + break; + case 'V': + if (!v4l2_is_known_ioctl(cmd)) { + type = "v4l2"; + break; + } + pr_cont("%s", v4l2_ioctls[_IOC_NR(cmd)].name); + return; + default: + type = "unknown"; + break; + } + + switch (_IOC_DIR(cmd)) { + case _IOC_NONE: dir = "--"; break; + case _IOC_READ: dir = "r-"; break; + case _IOC_WRITE: dir = "-w"; break; + case _IOC_READ | _IOC_WRITE: dir = "rw"; break; + default: dir = "*ERR*"; break; + } + pr_cont("%s ioctl '%c', dir=%s, #%d (0x%08x)", + type, _IOC_TYPE(cmd), dir, _IOC_NR(cmd), cmd); +} +EXPORT_SYMBOL(v4l_printk_ioctl); + +static long __video_do_ioctl(struct file *file, + unsigned int cmd, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct mutex *req_queue_lock = NULL; + struct mutex *lock; /* ioctl serialization mutex */ + const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops; + bool write_only = false; + struct v4l2_ioctl_info default_info; + const struct v4l2_ioctl_info *info; + void *fh = file->private_data; + struct v4l2_fh *vfh = NULL; + int dev_debug = vfd->dev_debug; + long ret = -ENOTTY; + + if (ops == NULL) { + pr_warn("%s: has no ioctl_ops.\n", + video_device_node_name(vfd)); + return ret; + } + + if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) + vfh = file->private_data; + + /* + * We need to serialize streamon/off with queueing new requests. + * These ioctls may trigger the cancellation of a streaming + * operation, and that should not be mixed with queueing a new + * request at the same time. + */ + if (v4l2_device_supports_requests(vfd->v4l2_dev) && + (cmd == VIDIOC_STREAMON || cmd == VIDIOC_STREAMOFF)) { + req_queue_lock = &vfd->v4l2_dev->mdev->req_queue_mutex; + + if (mutex_lock_interruptible(req_queue_lock)) + return -ERESTARTSYS; + } + + lock = v4l2_ioctl_get_lock(vfd, vfh, cmd, arg); + + if (lock && mutex_lock_interruptible(lock)) { + if (req_queue_lock) + mutex_unlock(req_queue_lock); + return -ERESTARTSYS; + } + + if (!video_is_registered(vfd)) { + ret = -ENODEV; + goto unlock; + } + + if (v4l2_is_known_ioctl(cmd)) { + info = &v4l2_ioctls[_IOC_NR(cmd)]; + + if (!is_valid_ioctl(vfd, cmd) && + !((info->flags & INFO_FL_CTRL) && vfh && vfh->ctrl_handler)) + goto done; + + if (vfh && (info->flags & INFO_FL_PRIO)) { + ret = v4l2_prio_check(vfd->prio, vfh->prio); + if (ret) + goto done; + } + } else { + default_info.ioctl = cmd; + default_info.flags = 0; + default_info.debug = v4l_print_default; + info = &default_info; + } + + write_only = _IOC_DIR(cmd) == _IOC_WRITE; + if (info != &default_info) { + ret = info->func(ops, file, fh, arg); + } else if (!ops->vidioc_default) { + ret = -ENOTTY; + } else { + ret = ops->vidioc_default(file, fh, + vfh ? v4l2_prio_check(vfd->prio, vfh->prio) >= 0 : 0, + cmd, arg); + } + +done: + if (dev_debug & (V4L2_DEV_DEBUG_IOCTL | V4L2_DEV_DEBUG_IOCTL_ARG)) { + if (!(dev_debug & V4L2_DEV_DEBUG_STREAMING) && + (cmd == VIDIOC_QBUF || cmd == VIDIOC_DQBUF)) + goto unlock; + + v4l_printk_ioctl(video_device_node_name(vfd), cmd); + if (ret < 0) + pr_cont(": error %ld", ret); + if (!(dev_debug & V4L2_DEV_DEBUG_IOCTL_ARG)) + pr_cont("\n"); + else if (_IOC_DIR(cmd) == _IOC_NONE) + info->debug(arg, write_only); + else { + pr_cont(": "); + info->debug(arg, write_only); + } + } + +unlock: + if (lock) + mutex_unlock(lock); + if (req_queue_lock) + mutex_unlock(req_queue_lock); + return ret; +} + +static int check_array_args(unsigned int cmd, void *parg, size_t *array_size, + void __user **user_ptr, void ***kernel_ptr) +{ + int ret = 0; + + switch (cmd) { + case VIDIOC_PREPARE_BUF: + case VIDIOC_QUERYBUF: + case VIDIOC_QBUF: + case VIDIOC_DQBUF: { + struct v4l2_buffer *buf = parg; + + if (V4L2_TYPE_IS_MULTIPLANAR(buf->type) && buf->length > 0) { + if (buf->length > VIDEO_MAX_PLANES) { + ret = -EINVAL; + break; + } + *user_ptr = (void __user *)buf->m.planes; + *kernel_ptr = (void **)&buf->m.planes; + *array_size = sizeof(struct v4l2_plane) * buf->length; + ret = 1; + } + break; + } + + case VIDIOC_G_EDID: + case VIDIOC_S_EDID: { + struct v4l2_edid *edid = parg; + + if (edid->blocks) { + if (edid->blocks > 256) { + ret = -EINVAL; + break; + } + *user_ptr = (void __user *)edid->edid; + *kernel_ptr = (void **)&edid->edid; + *array_size = edid->blocks * 128; + ret = 1; + } + break; + } + + case VIDIOC_S_EXT_CTRLS: + case VIDIOC_G_EXT_CTRLS: + case VIDIOC_TRY_EXT_CTRLS: { + struct v4l2_ext_controls *ctrls = parg; + + if (ctrls->count != 0) { + if (ctrls->count > V4L2_CID_MAX_CTRLS) { + ret = -EINVAL; + break; + } + *user_ptr = (void __user *)ctrls->controls; + *kernel_ptr = (void **)&ctrls->controls; + *array_size = sizeof(struct v4l2_ext_control) + * ctrls->count; + ret = 1; + } + break; + } + + case VIDIOC_SUBDEV_G_ROUTING: + case VIDIOC_SUBDEV_S_ROUTING: { + struct v4l2_subdev_routing *routing = parg; + + if (routing->len_routes > 256) + return -E2BIG; + + *user_ptr = u64_to_user_ptr(routing->routes); + *kernel_ptr = (void **)&routing->routes; + *array_size = sizeof(struct v4l2_subdev_route) + * routing->len_routes; + ret = 1; + break; + } + } + + return ret; +} + +unsigned int v4l2_translate_cmd(unsigned int cmd) +{ +#if !defined(CONFIG_64BIT) && defined(CONFIG_COMPAT_32BIT_TIME) + switch (cmd) { + case VIDIOC_DQEVENT_TIME32: + return VIDIOC_DQEVENT; + case VIDIOC_QUERYBUF_TIME32: + return VIDIOC_QUERYBUF; + case VIDIOC_QBUF_TIME32: + return VIDIOC_QBUF; + case VIDIOC_DQBUF_TIME32: + return VIDIOC_DQBUF; + case VIDIOC_PREPARE_BUF_TIME32: + return VIDIOC_PREPARE_BUF; + } +#endif + if (in_compat_syscall()) + return v4l2_compat_translate_cmd(cmd); + + return cmd; +} +EXPORT_SYMBOL_GPL(v4l2_translate_cmd); + +static int video_get_user(void __user *arg, void *parg, + unsigned int real_cmd, unsigned int cmd, + bool *always_copy) +{ + unsigned int n = _IOC_SIZE(real_cmd); + int err = 0; + + if (!(_IOC_DIR(cmd) & _IOC_WRITE)) { + /* read-only ioctl */ + memset(parg, 0, n); + return 0; + } + + /* + * In some cases, only a few fields are used as input, + * i.e. when the app sets "index" and then the driver + * fills in the rest of the structure for the thing + * with that index. We only need to copy up the first + * non-input field. + */ + if (v4l2_is_known_ioctl(real_cmd)) { + u32 flags = v4l2_ioctls[_IOC_NR(real_cmd)].flags; + + if (flags & INFO_FL_CLEAR_MASK) + n = (flags & INFO_FL_CLEAR_MASK) >> 16; + *always_copy = flags & INFO_FL_ALWAYS_COPY; + } + + if (cmd == real_cmd) { + if (copy_from_user(parg, (void __user *)arg, n)) + err = -EFAULT; + } else if (in_compat_syscall()) { + memset(parg, 0, n); + err = v4l2_compat_get_user(arg, parg, cmd); + } else { + memset(parg, 0, n); +#if !defined(CONFIG_64BIT) && defined(CONFIG_COMPAT_32BIT_TIME) + switch (cmd) { + case VIDIOC_QUERYBUF_TIME32: + case VIDIOC_QBUF_TIME32: + case VIDIOC_DQBUF_TIME32: + case VIDIOC_PREPARE_BUF_TIME32: { + struct v4l2_buffer_time32 vb32; + struct v4l2_buffer *vb = parg; + + if (copy_from_user(&vb32, arg, sizeof(vb32))) + return -EFAULT; + + *vb = (struct v4l2_buffer) { + .index = vb32.index, + .type = vb32.type, + .bytesused = vb32.bytesused, + .flags = vb32.flags, + .field = vb32.field, + .timestamp.tv_sec = vb32.timestamp.tv_sec, + .timestamp.tv_usec = vb32.timestamp.tv_usec, + .timecode = vb32.timecode, + .sequence = vb32.sequence, + .memory = vb32.memory, + .m.userptr = vb32.m.userptr, + .length = vb32.length, + .request_fd = vb32.request_fd, + }; + break; + } + } +#endif + } + + /* zero out anything we don't copy from userspace */ + if (!err && n < _IOC_SIZE(real_cmd)) + memset((u8 *)parg + n, 0, _IOC_SIZE(real_cmd) - n); + return err; +} + +static int video_put_user(void __user *arg, void *parg, + unsigned int real_cmd, unsigned int cmd) +{ + if (!(_IOC_DIR(cmd) & _IOC_READ)) + return 0; + + if (cmd == real_cmd) { + /* Copy results into user buffer */ + if (copy_to_user(arg, parg, _IOC_SIZE(cmd))) + return -EFAULT; + return 0; + } + + if (in_compat_syscall()) + return v4l2_compat_put_user(arg, parg, cmd); + +#if !defined(CONFIG_64BIT) && defined(CONFIG_COMPAT_32BIT_TIME) + switch (cmd) { + case VIDIOC_DQEVENT_TIME32: { + struct v4l2_event *ev = parg; + struct v4l2_event_time32 ev32; + + memset(&ev32, 0, sizeof(ev32)); + + ev32.type = ev->type; + ev32.pending = ev->pending; + ev32.sequence = ev->sequence; + ev32.timestamp.tv_sec = ev->timestamp.tv_sec; + ev32.timestamp.tv_nsec = ev->timestamp.tv_nsec; + ev32.id = ev->id; + + memcpy(&ev32.u, &ev->u, sizeof(ev->u)); + memcpy(&ev32.reserved, &ev->reserved, sizeof(ev->reserved)); + + if (copy_to_user(arg, &ev32, sizeof(ev32))) + return -EFAULT; + break; + } + case VIDIOC_QUERYBUF_TIME32: + case VIDIOC_QBUF_TIME32: + case VIDIOC_DQBUF_TIME32: + case VIDIOC_PREPARE_BUF_TIME32: { + struct v4l2_buffer *vb = parg; + struct v4l2_buffer_time32 vb32; + + memset(&vb32, 0, sizeof(vb32)); + + vb32.index = vb->index; + vb32.type = vb->type; + vb32.bytesused = vb->bytesused; + vb32.flags = vb->flags; + vb32.field = vb->field; + vb32.timestamp.tv_sec = vb->timestamp.tv_sec; + vb32.timestamp.tv_usec = vb->timestamp.tv_usec; + vb32.timecode = vb->timecode; + vb32.sequence = vb->sequence; + vb32.memory = vb->memory; + vb32.m.userptr = vb->m.userptr; + vb32.length = vb->length; + vb32.request_fd = vb->request_fd; + + if (copy_to_user(arg, &vb32, sizeof(vb32))) + return -EFAULT; + break; + } + } +#endif + + return 0; +} + +long +video_usercopy(struct file *file, unsigned int orig_cmd, unsigned long arg, + v4l2_kioctl func) +{ + char sbuf[128]; + void *mbuf = NULL, *array_buf = NULL; + void *parg = (void *)arg; + long err = -EINVAL; + bool has_array_args; + bool always_copy = false; + size_t array_size = 0; + void __user *user_ptr = NULL; + void **kernel_ptr = NULL; + unsigned int cmd = v4l2_translate_cmd(orig_cmd); + const size_t ioc_size = _IOC_SIZE(cmd); + + /* Copy arguments into temp kernel buffer */ + if (_IOC_DIR(cmd) != _IOC_NONE) { + if (ioc_size <= sizeof(sbuf)) { + parg = sbuf; + } else { + /* too big to allocate from stack */ + mbuf = kmalloc(ioc_size, GFP_KERNEL); + if (NULL == mbuf) + return -ENOMEM; + parg = mbuf; + } + + err = video_get_user((void __user *)arg, parg, cmd, + orig_cmd, &always_copy); + if (err) + goto out; + } + + err = check_array_args(cmd, parg, &array_size, &user_ptr, &kernel_ptr); + if (err < 0) + goto out; + has_array_args = err; + + if (has_array_args) { + array_buf = kvmalloc(array_size, GFP_KERNEL); + err = -ENOMEM; + if (array_buf == NULL) + goto out; + if (in_compat_syscall()) + err = v4l2_compat_get_array_args(file, array_buf, + user_ptr, array_size, + orig_cmd, parg); + else + err = copy_from_user(array_buf, user_ptr, array_size) ? + -EFAULT : 0; + if (err) + goto out; + *kernel_ptr = array_buf; + } + + /* Handles IOCTL */ + err = func(file, cmd, parg); + if (err == -ENOTTY || err == -ENOIOCTLCMD) { + err = -ENOTTY; + goto out; + } + + if (err == 0) { + if (cmd == VIDIOC_DQBUF) + trace_v4l2_dqbuf(video_devdata(file)->minor, parg); + else if (cmd == VIDIOC_QBUF) + trace_v4l2_qbuf(video_devdata(file)->minor, parg); + } + + /* + * Some ioctls can return an error, but still have valid + * results that must be returned. + * + * FIXME: subdev IOCTLS are partially handled here and partially in + * v4l2-subdev.c and the 'always_copy' flag can only be set for IOCTLS + * defined here as part of the 'v4l2_ioctls' array. As + * VIDIOC_SUBDEV_[GS]_ROUTING needs to return results to applications + * even in case of failure, but it is not defined here as part of the + * 'v4l2_ioctls' array, insert an ad-hoc check to address that. + */ + if (cmd == VIDIOC_SUBDEV_G_ROUTING || cmd == VIDIOC_SUBDEV_S_ROUTING) + always_copy = true; + + if (err < 0 && !always_copy) + goto out; + + if (has_array_args) { + *kernel_ptr = (void __force *)user_ptr; + if (in_compat_syscall()) { + int put_err; + + put_err = v4l2_compat_put_array_args(file, user_ptr, + array_buf, + array_size, + orig_cmd, parg); + if (put_err) + err = put_err; + } else if (copy_to_user(user_ptr, array_buf, array_size)) { + err = -EFAULT; + } + } + + if (video_put_user((void __user *)arg, parg, cmd, orig_cmd)) + err = -EFAULT; +out: + kvfree(array_buf); + kfree(mbuf); + return err; +} + +long video_ioctl2(struct file *file, + unsigned int cmd, unsigned long arg) +{ + return video_usercopy(file, cmd, arg, __video_do_ioctl); +} +EXPORT_SYMBOL(video_ioctl2); diff --git a/6.17.0/drivers/media/v4l2-core/v4l2-jpeg.c b/6.17.0/drivers/media/v4l2-core/v4l2-jpeg.c new file mode 100644 index 0000000..36a0f1a --- /dev/null +++ b/6.17.0/drivers/media/v4l2-core/v4l2-jpeg.c @@ -0,0 +1,713 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * V4L2 JPEG header parser helpers. + * + * Copyright (C) 2019 Pengutronix, Philipp Zabel + * + * For reference, see JPEG ITU-T.81 (ISO/IEC 10918-1) [1] + * + * [1] https://www.w3.org/Graphics/JPEG/itu-t81.pdf + */ + +#include +#include +#include +#include +#include +#include + +MODULE_DESCRIPTION("V4L2 JPEG header parser helpers"); +MODULE_AUTHOR("Philipp Zabel "); +MODULE_LICENSE("GPL"); + +/* Table B.1 - Marker code assignments */ +#define SOF0 0xffc0 /* start of frame */ +#define SOF1 0xffc1 +#define SOF2 0xffc2 +#define SOF3 0xffc3 +#define SOF5 0xffc5 +#define SOF7 0xffc7 +#define JPG 0xffc8 /* extensions */ +#define SOF9 0xffc9 +#define SOF11 0xffcb +#define SOF13 0xffcd +#define SOF15 0xffcf +#define DHT 0xffc4 /* huffman table */ +#define DAC 0xffcc /* arithmetic coding conditioning */ +#define RST0 0xffd0 /* restart */ +#define RST7 0xffd7 +#define SOI 0xffd8 /* start of image */ +#define EOI 0xffd9 /* end of image */ +#define SOS 0xffda /* start of stream */ +#define DQT 0xffdb /* quantization table */ +#define DNL 0xffdc /* number of lines */ +#define DRI 0xffdd /* restart interval */ +#define DHP 0xffde /* hierarchical progression */ +#define EXP 0xffdf /* expand reference */ +#define APP0 0xffe0 /* application data */ +#define APP14 0xffee /* application data for colour encoding */ +#define APP15 0xffef +#define JPG0 0xfff0 /* extensions */ +#define JPG13 0xfffd +#define COM 0xfffe /* comment */ +#define TEM 0xff01 /* temporary */ + +/* Luma and chroma qp tables to achieve 50% compression quality + * This is as per example in Annex K.1 of ITU-T.81 + */ +const u8 v4l2_jpeg_ref_table_luma_qt[V4L2_JPEG_PIXELS_IN_BLOCK] = { + 16, 11, 10, 16, 24, 40, 51, 61, + 12, 12, 14, 19, 26, 58, 60, 55, + 14, 13, 16, 24, 40, 57, 69, 56, + 14, 17, 22, 29, 51, 87, 80, 62, + 18, 22, 37, 56, 68, 109, 103, 77, + 24, 35, 55, 64, 81, 104, 113, 92, + 49, 64, 78, 87, 103, 121, 120, 101, + 72, 92, 95, 98, 112, 100, 103, 99 +}; +EXPORT_SYMBOL_GPL(v4l2_jpeg_ref_table_luma_qt); + +const u8 v4l2_jpeg_ref_table_chroma_qt[V4L2_JPEG_PIXELS_IN_BLOCK] = { + 17, 18, 24, 47, 99, 99, 99, 99, + 18, 21, 26, 66, 99, 99, 99, 99, + 24, 26, 56, 99, 99, 99, 99, 99, + 47, 66, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99 +}; +EXPORT_SYMBOL_GPL(v4l2_jpeg_ref_table_chroma_qt); + +/* Zigzag scan pattern indexes */ +const u8 v4l2_jpeg_zigzag_scan_index[V4L2_JPEG_PIXELS_IN_BLOCK] = { + 0, 1, 8, 16, 9, 2, 3, 10, + 17, 24, 32, 25, 18, 11, 4, 5, + 12, 19, 26, 33, 40, 48, 41, 34, + 27, 20, 13, 6, 7, 14, 21, 28, + 35, 42, 49, 56, 57, 50, 43, 36, + 29, 22, 15, 23, 30, 37, 44, 51, + 58, 59, 52, 45, 38, 31, 39, 46, + 53, 60, 61, 54, 47, 55, 62, 63 +}; +EXPORT_SYMBOL_GPL(v4l2_jpeg_zigzag_scan_index); + +/* + * Contains the data that needs to be sent in the marker segment of an + * interchange format JPEG stream or an abbreviated format table specification + * data stream. Specifies the huffman table used for encoding the luminance DC + * coefficient differences. The table represents Table K.3 of ITU-T.81 + */ +const u8 v4l2_jpeg_ref_table_luma_dc_ht[V4L2_JPEG_REF_HT_DC_LEN] = { + 0x00, 0x01, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B +}; +EXPORT_SYMBOL_GPL(v4l2_jpeg_ref_table_luma_dc_ht); + +/* + * Contains the data that needs to be sent in the marker segment of an + * interchange format JPEG stream or an abbreviated format table specification + * data stream. Specifies the huffman table used for encoding the luminance AC + * coefficients. The table represents Table K.5 of ITU-T.81 + */ +const u8 v4l2_jpeg_ref_table_luma_ac_ht[V4L2_JPEG_REF_HT_AC_LEN] = { + 0x00, 0x02, 0x01, 0x03, 0x03, 0x02, 0x04, 0x03, 0x05, 0x05, 0x04, 0x04, + 0x00, 0x00, 0x01, 0x7D, 0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12, + 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07, 0x22, 0x71, 0x14, 0x32, + 0x81, 0x91, 0xA1, 0x08, 0x23, 0x42, 0xB1, 0xC1, 0x15, 0x52, 0xD1, 0xF0, + 0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0A, 0x16, 0x17, 0x18, 0x19, 0x1A, + 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, + 0x3A, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x53, 0x54, 0x55, + 0x56, 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, + 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x83, 0x84, 0x85, + 0x86, 0x87, 0x88, 0x89, 0x8A, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, + 0x99, 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, 0xA9, 0xAA, 0xB2, + 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xC2, 0xC3, 0xC4, 0xC5, + 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, + 0xD9, 0xDA, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, + 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA +}; +EXPORT_SYMBOL_GPL(v4l2_jpeg_ref_table_luma_ac_ht); + +/* + * Contains the data that needs to be sent in the marker segment of an interchange format JPEG + * stream or an abbreviated format table specification data stream. + * Specifies the huffman table used for encoding the chrominance DC coefficient differences. + * The table represents Table K.4 of ITU-T.81 + */ +const u8 v4l2_jpeg_ref_table_chroma_dc_ht[V4L2_JPEG_REF_HT_DC_LEN] = { + 0x00, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B +}; +EXPORT_SYMBOL_GPL(v4l2_jpeg_ref_table_chroma_dc_ht); + +/* + * Contains the data that needs to be sent in the marker segment of an + * interchange format JPEG stream or an abbreviated format table specification + * data stream. Specifies the huffman table used for encoding the chrominance + * AC coefficients. The table represents Table K.6 of ITU-T.81 + */ +const u8 v4l2_jpeg_ref_table_chroma_ac_ht[V4L2_JPEG_REF_HT_AC_LEN] = { + 0x00, 0x02, 0x01, 0x02, 0x04, 0x04, 0x03, 0x04, 0x07, 0x05, 0x04, 0x04, + 0x00, 0x01, 0x02, 0x77, 0x00, 0x01, 0x02, 0x03, 0x11, 0x04, 0x05, 0x21, + 0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71, 0x13, 0x22, 0x32, 0x81, + 0x08, 0x14, 0x42, 0x91, 0xA1, 0xB1, 0xC1, 0x09, 0x23, 0x33, 0x52, 0xF0, + 0x15, 0x62, 0x72, 0xD1, 0x0A, 0x16, 0x24, 0x34, 0xE1, 0x25, 0xF1, 0x17, + 0x18, 0x19, 0x1A, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x35, 0x36, 0x37, 0x38, + 0x39, 0x3A, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x53, 0x54, + 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, + 0x69, 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x82, 0x83, + 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x92, 0x93, 0x94, 0x95, 0x96, + 0x97, 0x98, 0x99, 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, 0xA9, + 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xC2, 0xC3, + 0xC4, 0xC5, 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, + 0xD7, 0xD8, 0xD9, 0xDA, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, + 0xEA, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA +}; +EXPORT_SYMBOL_GPL(v4l2_jpeg_ref_table_chroma_ac_ht); + +/** + * struct jpeg_stream - JPEG byte stream + * @curr: current position in stream + * @end: end position, after last byte + */ +struct jpeg_stream { + u8 *curr; + u8 *end; +}; + +/* returns a value that fits into u8, or negative error */ +static int jpeg_get_byte(struct jpeg_stream *stream) +{ + if (stream->curr >= stream->end) + return -EINVAL; + + return *stream->curr++; +} + +/* returns a value that fits into u16, or negative error */ +static int jpeg_get_word_be(struct jpeg_stream *stream) +{ + u16 word; + + if (stream->curr + sizeof(__be16) > stream->end) + return -EINVAL; + + word = get_unaligned_be16(stream->curr); + stream->curr += sizeof(__be16); + + return word; +} + +static int jpeg_skip(struct jpeg_stream *stream, size_t len) +{ + if (stream->curr + len > stream->end) + return -EINVAL; + + stream->curr += len; + + return 0; +} + +static int jpeg_next_marker(struct jpeg_stream *stream) +{ + int byte; + u16 marker = 0; + + while ((byte = jpeg_get_byte(stream)) >= 0) { + marker = (marker << 8) | byte; + /* skip stuffing bytes and REServed markers */ + if (marker == TEM || (marker > 0xffbf && marker < 0xffff)) + return marker; + } + + return byte; +} + +/* this does not advance the current position in the stream */ +static int jpeg_reference_segment(struct jpeg_stream *stream, + struct v4l2_jpeg_reference *segment) +{ + u16 len; + + if (stream->curr + sizeof(__be16) > stream->end) + return -EINVAL; + + len = get_unaligned_be16(stream->curr); + if (stream->curr + len > stream->end) + return -EINVAL; + + segment->start = stream->curr; + segment->length = len; + + return 0; +} + +static int v4l2_jpeg_decode_subsampling(u8 nf, u8 h_v) +{ + if (nf == 1) + return V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY; + + /* no chroma subsampling for 4-component images */ + if (nf == 4 && h_v != 0x11) + return -EINVAL; + + switch (h_v) { + case 0x11: + return V4L2_JPEG_CHROMA_SUBSAMPLING_444; + case 0x21: + return V4L2_JPEG_CHROMA_SUBSAMPLING_422; + case 0x22: + return V4L2_JPEG_CHROMA_SUBSAMPLING_420; + case 0x41: + return V4L2_JPEG_CHROMA_SUBSAMPLING_411; + default: + return -EINVAL; + } +} + +static int jpeg_parse_frame_header(struct jpeg_stream *stream, u16 sof_marker, + struct v4l2_jpeg_frame_header *frame_header) +{ + int len = jpeg_get_word_be(stream); + + if (len < 0) + return len; + /* Lf = 8 + 3 * Nf, Nf >= 1 */ + if (len < 8 + 3) + return -EINVAL; + + if (frame_header) { + /* Table B.2 - Frame header parameter sizes and values */ + int p, y, x, nf; + int i; + + p = jpeg_get_byte(stream); + if (p < 0) + return p; + /* + * Baseline DCT only supports 8-bit precision. + * Extended sequential DCT also supports 12-bit precision. + */ + if (p != 8 && (p != 12 || sof_marker != SOF1)) + return -EINVAL; + + y = jpeg_get_word_be(stream); + if (y < 0) + return y; + if (y == 0) + return -EINVAL; + + x = jpeg_get_word_be(stream); + if (x < 0) + return x; + if (x == 0) + return -EINVAL; + + nf = jpeg_get_byte(stream); + if (nf < 0) + return nf; + /* + * The spec allows 1 <= Nf <= 255, but we only support up to 4 + * components. + */ + if (nf < 1 || nf > V4L2_JPEG_MAX_COMPONENTS) + return -EINVAL; + if (len != 8 + 3 * nf) + return -EINVAL; + + frame_header->precision = p; + frame_header->height = y; + frame_header->width = x; + frame_header->num_components = nf; + + for (i = 0; i < nf; i++) { + struct v4l2_jpeg_frame_component_spec *component; + int c, h_v, tq; + + c = jpeg_get_byte(stream); + if (c < 0) + return c; + + h_v = jpeg_get_byte(stream); + if (h_v < 0) + return h_v; + if (i == 0) { + int subs; + + subs = v4l2_jpeg_decode_subsampling(nf, h_v); + if (subs < 0) + return subs; + frame_header->subsampling = subs; + } else if (h_v != 0x11) { + /* all chroma sampling factors must be 1 */ + return -EINVAL; + } + + tq = jpeg_get_byte(stream); + if (tq < 0) + return tq; + + component = &frame_header->component[i]; + component->component_identifier = c; + component->horizontal_sampling_factor = + (h_v >> 4) & 0xf; + component->vertical_sampling_factor = h_v & 0xf; + component->quantization_table_selector = tq; + } + } else { + return jpeg_skip(stream, len - 2); + } + + return 0; +} + +static int jpeg_parse_scan_header(struct jpeg_stream *stream, + struct v4l2_jpeg_scan_header *scan_header) +{ + size_t skip; + int len = jpeg_get_word_be(stream); + + if (len < 0) + return len; + /* Ls = 8 + 3 * Ns, Ns >= 1 */ + if (len < 6 + 2) + return -EINVAL; + + if (scan_header) { + int ns; + int i; + + ns = jpeg_get_byte(stream); + if (ns < 0) + return ns; + if (ns < 1 || ns > 4 || len != 6 + 2 * ns) + return -EINVAL; + + scan_header->num_components = ns; + + for (i = 0; i < ns; i++) { + struct v4l2_jpeg_scan_component_spec *component; + int cs, td_ta; + + cs = jpeg_get_byte(stream); + if (cs < 0) + return cs; + + td_ta = jpeg_get_byte(stream); + if (td_ta < 0) + return td_ta; + + component = &scan_header->component[i]; + component->component_selector = cs; + component->dc_entropy_coding_table_selector = + (td_ta >> 4) & 0xf; + component->ac_entropy_coding_table_selector = + td_ta & 0xf; + } + + skip = 3; /* skip Ss, Se, Ah, and Al */ + } else { + skip = len - 2; + } + + return jpeg_skip(stream, skip); +} + +/* B.2.4.1 Quantization table-specification syntax */ +static int jpeg_parse_quantization_tables(struct jpeg_stream *stream, + u8 precision, + struct v4l2_jpeg_reference *tables) +{ + int len = jpeg_get_word_be(stream); + + if (len < 0) + return len; + /* Lq = 2 + n * 65 (for baseline DCT), n >= 1 */ + if (len < 2 + 65) + return -EINVAL; + + len -= 2; + while (len >= 65) { + u8 pq, tq, *qk; + int ret; + int pq_tq = jpeg_get_byte(stream); + + if (pq_tq < 0) + return pq_tq; + + /* quantization table element precision */ + pq = (pq_tq >> 4) & 0xf; + /* + * Only 8-bit Qk values for 8-bit sample precision. Extended + * sequential DCT with 12-bit sample precision also supports + * 16-bit Qk values. + */ + if (pq != 0 && (pq != 1 || precision != 12)) + return -EINVAL; + + /* quantization table destination identifier */ + tq = pq_tq & 0xf; + if (tq > 3) + return -EINVAL; + + /* quantization table element */ + qk = stream->curr; + ret = jpeg_skip(stream, pq ? 128 : 64); + if (ret < 0) + return -EINVAL; + + if (tables) { + tables[tq].start = qk; + tables[tq].length = pq ? 128 : 64; + } + + len -= pq ? 129 : 65; + } + + return 0; +} + +/* B.2.4.2 Huffman table-specification syntax */ +static int jpeg_parse_huffman_tables(struct jpeg_stream *stream, + struct v4l2_jpeg_reference *tables) +{ + int mt; + int len = jpeg_get_word_be(stream); + + if (len < 0) + return len; + /* Table B.5 - Huffman table specification parameter sizes and values */ + if (len < 2 + 17) + return -EINVAL; + + for (len -= 2; len >= 17; len -= 17 + mt) { + u8 tc, th, *table; + int tc_th = jpeg_get_byte(stream); + int i, ret; + + if (tc_th < 0) + return tc_th; + + /* table class - 0 = DC, 1 = AC */ + tc = (tc_th >> 4) & 0xf; + if (tc > 1) + return -EINVAL; + + /* huffman table destination identifier */ + th = tc_th & 0xf; + /* only two Huffman tables for baseline DCT */ + if (th > 1) + return -EINVAL; + + /* BITS - number of Huffman codes with length i */ + table = stream->curr; + mt = 0; + for (i = 0; i < 16; i++) { + int li; + + li = jpeg_get_byte(stream); + if (li < 0) + return li; + + mt += li; + } + /* HUFFVAL - values associated with each Huffman code */ + ret = jpeg_skip(stream, mt); + if (ret < 0) + return ret; + + if (tables) { + tables[(tc << 1) | th].start = table; + tables[(tc << 1) | th].length = stream->curr - table; + } + } + + return jpeg_skip(stream, len - 2); +} + +/* B.2.4.4 Restart interval definition syntax */ +static int jpeg_parse_restart_interval(struct jpeg_stream *stream, + u16 *restart_interval) +{ + int len = jpeg_get_word_be(stream); + int ri; + + if (len < 0) + return len; + if (len != 4) + return -EINVAL; + + ri = jpeg_get_word_be(stream); + if (ri < 0) + return ri; + + *restart_interval = ri; + + return 0; +} + +static int jpeg_skip_segment(struct jpeg_stream *stream) +{ + int len = jpeg_get_word_be(stream); + + if (len < 0) + return len; + if (len < 2) + return -EINVAL; + + return jpeg_skip(stream, len - 2); +} + +/* Rec. ITU-T T.872 (06/2012) 6.5.3 */ +static int jpeg_parse_app14_data(struct jpeg_stream *stream, + enum v4l2_jpeg_app14_tf *tf) +{ + int ret; + int lp; + int skip; + + lp = jpeg_get_word_be(stream); + if (lp < 0) + return lp; + + /* Check for "Adobe\0" in Ap1..6 */ + if (stream->curr + 6 > stream->end || + strncmp(stream->curr, "Adobe\0", 6)) + return jpeg_skip(stream, lp - 2); + + /* get to Ap12 */ + ret = jpeg_skip(stream, 11); + if (ret < 0) + return ret; + + ret = jpeg_get_byte(stream); + if (ret < 0) + return ret; + + *tf = ret; + + /* skip the rest of the segment, this ensures at least it is complete */ + skip = lp - 2 - 11 - 1; + return jpeg_skip(stream, skip); +} + +/** + * v4l2_jpeg_parse_header - locate marker segments and optionally parse headers + * @buf: address of the JPEG buffer, should start with a SOI marker + * @len: length of the JPEG buffer + * @out: returns marker segment positions and optionally parsed headers + * + * The out->scan_header pointer must be initialized to NULL or point to a valid + * v4l2_jpeg_scan_header structure. The out->huffman_tables and + * out->quantization_tables pointers must be initialized to NULL or point to a + * valid array of 4 v4l2_jpeg_reference structures each. + * + * Returns 0 or negative error if parsing failed. + */ +int v4l2_jpeg_parse_header(void *buf, size_t len, struct v4l2_jpeg_header *out) +{ + struct jpeg_stream stream; + int marker; + int ret = 0; + + stream.curr = buf; + stream.end = stream.curr + len; + + out->num_dht = 0; + out->num_dqt = 0; + + /* the first bytes must be SOI, B.2.1 High-level syntax */ + if (jpeg_get_word_be(&stream) != SOI) + return -EINVAL; + + /* init value to signal if this marker is not present */ + out->app14_tf = V4L2_JPEG_APP14_TF_UNKNOWN; + + /* loop through marker segments */ + while ((marker = jpeg_next_marker(&stream)) >= 0) { + switch (marker) { + /* baseline DCT, extended sequential DCT */ + case SOF0 ... SOF1: + ret = jpeg_reference_segment(&stream, &out->sof); + if (ret < 0) + return ret; + ret = jpeg_parse_frame_header(&stream, marker, + &out->frame); + break; + /* progressive, lossless */ + case SOF2 ... SOF3: + /* differential coding */ + case SOF5 ... SOF7: + /* arithmetic coding */ + case SOF9 ... SOF11: + case SOF13 ... SOF15: + case DAC: + case TEM: + return -EINVAL; + + case DHT: + ret = jpeg_reference_segment(&stream, + &out->dht[out->num_dht++ % 4]); + if (ret < 0) + return ret; + if (!out->huffman_tables) { + ret = jpeg_skip_segment(&stream); + break; + } + ret = jpeg_parse_huffman_tables(&stream, + out->huffman_tables); + break; + case DQT: + ret = jpeg_reference_segment(&stream, + &out->dqt[out->num_dqt++ % 4]); + if (ret < 0) + return ret; + if (!out->quantization_tables) { + ret = jpeg_skip_segment(&stream); + break; + } + ret = jpeg_parse_quantization_tables(&stream, + out->frame.precision, + out->quantization_tables); + break; + case DRI: + ret = jpeg_parse_restart_interval(&stream, + &out->restart_interval); + break; + case APP14: + ret = jpeg_parse_app14_data(&stream, + &out->app14_tf); + break; + case SOS: + ret = jpeg_reference_segment(&stream, &out->sos); + if (ret < 0) + return ret; + ret = jpeg_parse_scan_header(&stream, out->scan); + /* + * stop parsing, the scan header marks the beginning of + * the entropy coded segment + */ + out->ecs_offset = stream.curr - (u8 *)buf; + return ret; + + /* markers without parameters */ + case RST0 ... RST7: /* restart */ + case SOI: /* start of image */ + case EOI: /* end of image */ + break; + + /* skip unknown or unsupported marker segments */ + default: + ret = jpeg_skip_segment(&stream); + break; + } + if (ret < 0) + return ret; + } + + return marker; +} +EXPORT_SYMBOL_GPL(v4l2_jpeg_parse_header); diff --git a/6.17.0/drivers/media/v4l2-core/v4l2-mc.c b/6.17.0/drivers/media/v4l2-core/v4l2-mc.c new file mode 100644 index 0000000..937d358 --- /dev/null +++ b/6.17.0/drivers/media/v4l2-core/v4l2-mc.c @@ -0,0 +1,614 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +/* + * Media Controller ancillary functions + * + * Copyright (c) 2016 Mauro Carvalho Chehab + * Copyright (C) 2016 Shuah Khan + * Copyright (C) 2006-2010 Nokia Corporation + * Copyright (c) 2016 Intel Corporation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +int v4l2_mc_create_media_graph(struct media_device *mdev) + +{ + struct media_entity *entity; + struct media_entity *if_vid = NULL, *if_aud = NULL; + struct media_entity *tuner = NULL, *decoder = NULL; + struct media_entity *io_v4l = NULL, *io_vbi = NULL, *io_swradio = NULL; + bool is_webcam = false; + u32 flags; + int ret, pad_sink, pad_source; + + if (!mdev) + return 0; + + media_device_for_each_entity(entity, mdev) { + switch (entity->function) { + case MEDIA_ENT_F_IF_VID_DECODER: + if_vid = entity; + break; + case MEDIA_ENT_F_IF_AUD_DECODER: + if_aud = entity; + break; + case MEDIA_ENT_F_TUNER: + tuner = entity; + break; + case MEDIA_ENT_F_ATV_DECODER: + decoder = entity; + break; + case MEDIA_ENT_F_IO_V4L: + io_v4l = entity; + break; + case MEDIA_ENT_F_IO_VBI: + io_vbi = entity; + break; + case MEDIA_ENT_F_IO_SWRADIO: + io_swradio = entity; + break; + case MEDIA_ENT_F_CAM_SENSOR: + is_webcam = true; + break; + } + } + + /* It should have at least one I/O entity */ + if (!io_v4l && !io_vbi && !io_swradio) { + dev_warn(mdev->dev, "Didn't find any I/O entity\n"); + return -EINVAL; + } + + /* + * Here, webcams are modelled on a very simple way: the sensor is + * connected directly to the I/O entity. All dirty details, like + * scaler and crop HW are hidden. While such mapping is not enough + * for mc-centric hardware, it is enough for v4l2 interface centric + * PC-consumer's hardware. + */ + if (is_webcam) { + if (!io_v4l) { + dev_warn(mdev->dev, "Didn't find a MEDIA_ENT_F_IO_V4L\n"); + return -EINVAL; + } + + media_device_for_each_entity(entity, mdev) { + if (entity->function != MEDIA_ENT_F_CAM_SENSOR) + continue; + ret = media_create_pad_link(entity, 0, + io_v4l, 0, + MEDIA_LNK_FL_ENABLED); + if (ret) { + dev_warn(mdev->dev, "Failed to create a sensor link\n"); + return ret; + } + } + if (!decoder) + return 0; + } + + /* The device isn't a webcam. So, it should have a decoder */ + if (!decoder) { + dev_warn(mdev->dev, "Decoder not found\n"); + return -EINVAL; + } + + /* Link the tuner and IF video output pads */ + if (tuner) { + if (if_vid) { + pad_source = media_get_pad_index(tuner, + MEDIA_PAD_FL_SOURCE, + PAD_SIGNAL_ANALOG); + pad_sink = media_get_pad_index(if_vid, + MEDIA_PAD_FL_SINK, + PAD_SIGNAL_ANALOG); + if (pad_source < 0 || pad_sink < 0) { + dev_warn(mdev->dev, "Couldn't get tuner and/or PLL pad(s): (%d, %d)\n", + pad_source, pad_sink); + return -EINVAL; + } + ret = media_create_pad_link(tuner, pad_source, + if_vid, pad_sink, + MEDIA_LNK_FL_ENABLED); + if (ret) { + dev_warn(mdev->dev, "Couldn't create tuner->PLL link)\n"); + return ret; + } + + pad_source = media_get_pad_index(if_vid, + MEDIA_PAD_FL_SOURCE, + PAD_SIGNAL_ANALOG); + pad_sink = media_get_pad_index(decoder, + MEDIA_PAD_FL_SINK, + PAD_SIGNAL_ANALOG); + if (pad_source < 0 || pad_sink < 0) { + dev_warn(mdev->dev, "get decoder and/or PLL pad(s): (%d, %d)\n", + pad_source, pad_sink); + return -EINVAL; + } + ret = media_create_pad_link(if_vid, pad_source, + decoder, pad_sink, + MEDIA_LNK_FL_ENABLED); + if (ret) { + dev_warn(mdev->dev, "couldn't link PLL to decoder\n"); + return ret; + } + } else { + pad_source = media_get_pad_index(tuner, + MEDIA_PAD_FL_SOURCE, + PAD_SIGNAL_ANALOG); + pad_sink = media_get_pad_index(decoder, + MEDIA_PAD_FL_SINK, + PAD_SIGNAL_ANALOG); + if (pad_source < 0 || pad_sink < 0) { + dev_warn(mdev->dev, "couldn't get tuner and/or decoder pad(s): (%d, %d)\n", + pad_source, pad_sink); + return -EINVAL; + } + ret = media_create_pad_link(tuner, pad_source, + decoder, pad_sink, + MEDIA_LNK_FL_ENABLED); + if (ret) + return ret; + } + + if (if_aud) { + pad_source = media_get_pad_index(tuner, + MEDIA_PAD_FL_SOURCE, + PAD_SIGNAL_AUDIO); + pad_sink = media_get_pad_index(if_aud, + MEDIA_PAD_FL_SINK, + PAD_SIGNAL_AUDIO); + if (pad_source < 0 || pad_sink < 0) { + dev_warn(mdev->dev, "couldn't get tuner and/or decoder pad(s) for audio: (%d, %d)\n", + pad_source, pad_sink); + return -EINVAL; + } + ret = media_create_pad_link(tuner, pad_source, + if_aud, pad_sink, + MEDIA_LNK_FL_ENABLED); + if (ret) { + dev_warn(mdev->dev, "couldn't link tuner->audio PLL\n"); + return ret; + } + } else { + if_aud = tuner; + } + + } + + /* Create demod to V4L, VBI and SDR radio links */ + if (io_v4l) { + pad_source = media_get_pad_index(decoder, MEDIA_PAD_FL_SOURCE, + PAD_SIGNAL_DV); + if (pad_source < 0) { + dev_warn(mdev->dev, "couldn't get decoder output pad for V4L I/O\n"); + return -EINVAL; + } + ret = media_create_pad_link(decoder, pad_source, + io_v4l, 0, + MEDIA_LNK_FL_ENABLED); + if (ret) { + dev_warn(mdev->dev, "couldn't link decoder output to V4L I/O\n"); + return ret; + } + } + + if (io_swradio) { + pad_source = media_get_pad_index(decoder, MEDIA_PAD_FL_SOURCE, + PAD_SIGNAL_DV); + if (pad_source < 0) { + dev_warn(mdev->dev, "couldn't get decoder output pad for SDR\n"); + return -EINVAL; + } + ret = media_create_pad_link(decoder, pad_source, + io_swradio, 0, + MEDIA_LNK_FL_ENABLED); + if (ret) { + dev_warn(mdev->dev, "couldn't link decoder output to SDR\n"); + return ret; + } + } + + if (io_vbi) { + pad_source = media_get_pad_index(decoder, MEDIA_PAD_FL_SOURCE, + PAD_SIGNAL_DV); + if (pad_source < 0) { + dev_warn(mdev->dev, "couldn't get decoder output pad for VBI\n"); + return -EINVAL; + } + ret = media_create_pad_link(decoder, pad_source, + io_vbi, 0, + MEDIA_LNK_FL_ENABLED); + if (ret) { + dev_warn(mdev->dev, "couldn't link decoder output to VBI\n"); + return ret; + } + } + + /* Create links for the media connectors */ + flags = MEDIA_LNK_FL_ENABLED; + media_device_for_each_entity(entity, mdev) { + switch (entity->function) { + case MEDIA_ENT_F_CONN_RF: + if (!tuner) + continue; + pad_sink = media_get_pad_index(tuner, MEDIA_PAD_FL_SINK, + PAD_SIGNAL_ANALOG); + if (pad_sink < 0) { + dev_warn(mdev->dev, "couldn't get tuner analog pad sink\n"); + return -EINVAL; + } + ret = media_create_pad_link(entity, 0, tuner, + pad_sink, + flags); + break; + case MEDIA_ENT_F_CONN_SVIDEO: + case MEDIA_ENT_F_CONN_COMPOSITE: + pad_sink = media_get_pad_index(decoder, + MEDIA_PAD_FL_SINK, + PAD_SIGNAL_ANALOG); + if (pad_sink < 0) { + dev_warn(mdev->dev, "couldn't get decoder analog pad sink\n"); + return -EINVAL; + } + ret = media_create_pad_link(entity, 0, decoder, + pad_sink, + flags); + break; + default: + continue; + } + if (ret) + return ret; + + flags = 0; + } + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_mc_create_media_graph); + +int v4l_enable_media_source(struct video_device *vdev) +{ + struct media_device *mdev = vdev->entity.graph_obj.mdev; + int ret = 0, err; + + if (!mdev) + return 0; + + mutex_lock(&mdev->graph_mutex); + if (!mdev->enable_source) + goto end; + err = mdev->enable_source(&vdev->entity, &vdev->pipe); + if (err) + ret = -EBUSY; +end: + mutex_unlock(&mdev->graph_mutex); + return ret; +} +EXPORT_SYMBOL_GPL(v4l_enable_media_source); + +void v4l_disable_media_source(struct video_device *vdev) +{ + struct media_device *mdev = vdev->entity.graph_obj.mdev; + + if (mdev) { + mutex_lock(&mdev->graph_mutex); + if (mdev->disable_source) + mdev->disable_source(&vdev->entity); + mutex_unlock(&mdev->graph_mutex); + } +} +EXPORT_SYMBOL_GPL(v4l_disable_media_source); + +int v4l_vb2q_enable_media_source(struct vb2_queue *q) +{ + struct v4l2_fh *fh = q->owner; + + if (fh && fh->vdev) + return v4l_enable_media_source(fh->vdev); + return 0; +} +EXPORT_SYMBOL_GPL(v4l_vb2q_enable_media_source); + +int v4l2_create_fwnode_links_to_pad(struct v4l2_subdev *src_sd, + struct media_pad *sink, u32 flags) +{ + struct fwnode_handle *endpoint; + + if (!(sink->flags & MEDIA_PAD_FL_SINK)) + return -EINVAL; + + fwnode_graph_for_each_endpoint(src_sd->fwnode, endpoint) { + struct fwnode_handle *remote_ep; + int src_idx, sink_idx, ret; + struct media_pad *src; + + src_idx = media_entity_get_fwnode_pad(&src_sd->entity, + endpoint, + MEDIA_PAD_FL_SOURCE); + if (src_idx < 0) { + dev_dbg(src_sd->dev, "no source pad found for %pfw\n", + endpoint); + continue; + } + + remote_ep = fwnode_graph_get_remote_endpoint(endpoint); + if (!remote_ep) { + dev_dbg(src_sd->dev, "no remote ep found for %pfw\n", + endpoint); + continue; + } + + /* + * ask the sink to verify it owns the remote endpoint, + * and translate to a sink pad. + */ + sink_idx = media_entity_get_fwnode_pad(sink->entity, + remote_ep, + MEDIA_PAD_FL_SINK); + fwnode_handle_put(remote_ep); + + if (sink_idx < 0 || sink_idx != sink->index) { + dev_dbg(src_sd->dev, + "sink pad index mismatch or error (is %d, expected %u)\n", + sink_idx, sink->index); + continue; + } + + /* + * the source endpoint corresponds to one of its source pads, + * the source endpoint connects to an endpoint at the sink + * entity, and the sink endpoint corresponds to the sink + * pad requested, so we have found an endpoint connection + * that works, create the media link for it. + */ + + src = &src_sd->entity.pads[src_idx]; + + /* skip if link already exists */ + if (media_entity_find_link(src, sink)) { + dev_dbg(src_sd->dev, + "link %s:%d -> %s:%d already exists\n", + src_sd->entity.name, src_idx, + sink->entity->name, sink_idx); + continue; + } + + dev_dbg(src_sd->dev, "creating link %s:%d -> %s:%d\n", + src_sd->entity.name, src_idx, + sink->entity->name, sink_idx); + + ret = media_create_pad_link(&src_sd->entity, src_idx, + sink->entity, sink_idx, flags); + if (ret) { + dev_err(src_sd->dev, + "link %s:%d -> %s:%d failed with %d\n", + src_sd->entity.name, src_idx, + sink->entity->name, sink_idx, ret); + + fwnode_handle_put(endpoint); + return ret; + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_create_fwnode_links_to_pad); + +int v4l2_create_fwnode_links(struct v4l2_subdev *src_sd, + struct v4l2_subdev *sink_sd) +{ + unsigned int i; + + for (i = 0; i < sink_sd->entity.num_pads; i++) { + struct media_pad *pad = &sink_sd->entity.pads[i]; + int ret; + + if (!(pad->flags & MEDIA_PAD_FL_SINK)) + continue; + + ret = v4l2_create_fwnode_links_to_pad(src_sd, pad, 0); + if (ret) + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_create_fwnode_links); + +/* ----------------------------------------------------------------------------- + * Pipeline power management + * + * Entities must be powered up when part of a pipeline that contains at least + * one open video device node. + * + * To achieve this use the entity use_count field to track the number of users. + * For entities corresponding to video device nodes the use_count field stores + * the users count of the node. For entities corresponding to subdevs the + * use_count field stores the total number of users of all video device nodes + * in the pipeline. + * + * The v4l2_pipeline_pm_{get, put}() functions must be called in the open() and + * close() handlers of video device nodes. It increments or decrements the use + * count of all subdev entities in the pipeline. + * + * To react to link management on powered pipelines, the link setup notification + * callback updates the use count of all entities in the source and sink sides + * of the link. + */ + +/* + * pipeline_pm_use_count - Count the number of users of a pipeline + * @entity: The entity + * + * Return the total number of users of all video device nodes in the pipeline. + */ +static int pipeline_pm_use_count(struct media_entity *entity, + struct media_graph *graph) +{ + int use = 0; + + media_graph_walk_start(graph, entity); + + while ((entity = media_graph_walk_next(graph))) { + if (is_media_entity_v4l2_video_device(entity)) + use += entity->use_count; + } + + return use; +} + +/* + * pipeline_pm_power_one - Apply power change to an entity + * @entity: The entity + * @change: Use count change + * + * Change the entity use count by @change. If the entity is a subdev update its + * power state by calling the core::s_power operation when the use count goes + * from 0 to != 0 or from != 0 to 0. + * + * Return 0 on success or a negative error code on failure. + */ +static int pipeline_pm_power_one(struct media_entity *entity, int change) +{ + struct v4l2_subdev *subdev; + int ret; + + subdev = is_media_entity_v4l2_subdev(entity) + ? media_entity_to_v4l2_subdev(entity) : NULL; + + if (entity->use_count == 0 && change > 0 && subdev != NULL) { + ret = v4l2_subdev_call(subdev, core, s_power, 1); + if (ret < 0 && ret != -ENOIOCTLCMD) + return ret; + } + + entity->use_count += change; + WARN_ON(entity->use_count < 0); + + if (entity->use_count == 0 && change < 0 && subdev != NULL) + v4l2_subdev_call(subdev, core, s_power, 0); + + return 0; +} + +/* + * pipeline_pm_power - Apply power change to all entities in a pipeline + * @entity: The entity + * @change: Use count change + * + * Walk the pipeline to update the use count and the power state of all non-node + * entities. + * + * Return 0 on success or a negative error code on failure. + */ +static int pipeline_pm_power(struct media_entity *entity, int change, + struct media_graph *graph) +{ + struct media_entity *first = entity; + int ret = 0; + + if (!change) + return 0; + + media_graph_walk_start(graph, entity); + + while (!ret && (entity = media_graph_walk_next(graph))) + if (is_media_entity_v4l2_subdev(entity)) + ret = pipeline_pm_power_one(entity, change); + + if (!ret) + return ret; + + media_graph_walk_start(graph, first); + + while ((first = media_graph_walk_next(graph)) + && first != entity) + if (is_media_entity_v4l2_subdev(first)) + pipeline_pm_power_one(first, -change); + + return ret; +} + +static int v4l2_pipeline_pm_use(struct media_entity *entity, unsigned int use) +{ + struct media_device *mdev = entity->graph_obj.mdev; + int change = use ? 1 : -1; + int ret; + + mutex_lock(&mdev->graph_mutex); + + /* Apply use count to node. */ + entity->use_count += change; + WARN_ON(entity->use_count < 0); + + /* Apply power change to connected non-nodes. */ + ret = pipeline_pm_power(entity, change, &mdev->pm_count_walk); + if (ret < 0) + entity->use_count -= change; + + mutex_unlock(&mdev->graph_mutex); + + return ret; +} + +int v4l2_pipeline_pm_get(struct media_entity *entity) +{ + return v4l2_pipeline_pm_use(entity, 1); +} +EXPORT_SYMBOL_GPL(v4l2_pipeline_pm_get); + +void v4l2_pipeline_pm_put(struct media_entity *entity) +{ + /* Powering off entities shouldn't fail. */ + WARN_ON(v4l2_pipeline_pm_use(entity, 0)); +} +EXPORT_SYMBOL_GPL(v4l2_pipeline_pm_put); + +int v4l2_pipeline_link_notify(struct media_link *link, u32 flags, + unsigned int notification) +{ + struct media_graph *graph = &link->graph_obj.mdev->pm_count_walk; + struct media_entity *source = link->source->entity; + struct media_entity *sink = link->sink->entity; + int source_use; + int sink_use; + int ret = 0; + + source_use = pipeline_pm_use_count(source, graph); + sink_use = pipeline_pm_use_count(sink, graph); + + if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH && + !(flags & MEDIA_LNK_FL_ENABLED)) { + /* Powering off entities is assumed to never fail. */ + pipeline_pm_power(source, -sink_use, graph); + pipeline_pm_power(sink, -source_use, graph); + return 0; + } + + if (notification == MEDIA_DEV_NOTIFY_PRE_LINK_CH && + (flags & MEDIA_LNK_FL_ENABLED)) { + + ret = pipeline_pm_power(source, sink_use, graph); + if (ret < 0) + return ret; + + ret = pipeline_pm_power(sink, source_use, graph); + if (ret < 0) + pipeline_pm_power(source, -sink_use, graph); + } + + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_pipeline_link_notify); diff --git a/6.17.0/drivers/media/v4l2-core/v4l2-mem2mem.c b/6.17.0/drivers/media/v4l2-core/v4l2-mem2mem.c new file mode 100644 index 0000000..eb22d61 --- /dev/null +++ b/6.17.0/drivers/media/v4l2-core/v4l2-mem2mem.c @@ -0,0 +1,1643 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Memory-to-memory device framework for Video for Linux 2 and vb2. + * + * Helper functions for devices that use vb2 buffers for both their + * source and destination. + * + * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd. + * Pawel Osciak, + * Marek Szyprowski, + */ +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +MODULE_DESCRIPTION("Mem to mem device framework for vb2"); +MODULE_AUTHOR("Pawel Osciak, "); +MODULE_LICENSE("GPL"); + +static bool debug; +module_param(debug, bool, 0644); + +#define dprintk(fmt, arg...) \ + do { \ + if (debug) \ + printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\ + } while (0) + + +/* Instance is already queued on the job_queue */ +#define TRANS_QUEUED (1 << 0) +/* Instance is currently running in hardware */ +#define TRANS_RUNNING (1 << 1) +/* Instance is currently aborting */ +#define TRANS_ABORT (1 << 2) + + +/* The job queue is not running new jobs */ +#define QUEUE_PAUSED (1 << 0) + + +/* Offset base for buffers on the destination queue - used to distinguish + * between source and destination buffers when mmapping - they receive the same + * offsets but for different queues */ +#define DST_QUEUE_OFF_BASE (1 << 30) + +enum v4l2_m2m_entity_type { + MEM2MEM_ENT_TYPE_SOURCE, + MEM2MEM_ENT_TYPE_SINK, + MEM2MEM_ENT_TYPE_PROC +}; + +static const char * const m2m_entity_name[] = { + "source", + "sink", + "proc" +}; + +/** + * struct v4l2_m2m_dev - per-device context + * @source: &struct media_entity pointer with the source entity + * Used only when the M2M device is registered via + * v4l2_m2m_register_media_controller(). + * @source_pad: &struct media_pad with the source pad. + * Used only when the M2M device is registered via + * v4l2_m2m_register_media_controller(). + * @sink: &struct media_entity pointer with the sink entity + * Used only when the M2M device is registered via + * v4l2_m2m_register_media_controller(). + * @sink_pad: &struct media_pad with the sink pad. + * Used only when the M2M device is registered via + * v4l2_m2m_register_media_controller(). + * @proc: &struct media_entity pointer with the M2M device itself. + * @proc_pads: &struct media_pad with the @proc pads. + * Used only when the M2M device is registered via + * v4l2_m2m_unregister_media_controller(). + * @intf_devnode: &struct media_intf devnode pointer with the interface + * with controls the M2M device. + * @curr_ctx: currently running instance + * @job_queue: instances queued to run + * @job_spinlock: protects job_queue + * @job_work: worker to run queued jobs. + * @job_queue_flags: flags of the queue status, %QUEUE_PAUSED. + * @m2m_ops: driver callbacks + */ +struct v4l2_m2m_dev { + struct v4l2_m2m_ctx *curr_ctx; +#ifdef CONFIG_MEDIA_CONTROLLER + struct media_entity *source; + struct media_pad source_pad; + struct media_entity sink; + struct media_pad sink_pad; + struct media_entity proc; + struct media_pad proc_pads[2]; + struct media_intf_devnode *intf_devnode; +#endif + + struct list_head job_queue; + spinlock_t job_spinlock; + struct work_struct job_work; + unsigned long job_queue_flags; + + const struct v4l2_m2m_ops *m2m_ops; +}; + +static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx, + enum v4l2_buf_type type) +{ + if (V4L2_TYPE_IS_OUTPUT(type)) + return &m2m_ctx->out_q_ctx; + else + return &m2m_ctx->cap_q_ctx; +} + +struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx, + enum v4l2_buf_type type) +{ + struct v4l2_m2m_queue_ctx *q_ctx; + + q_ctx = get_queue_ctx(m2m_ctx, type); + if (!q_ctx) + return NULL; + + return &q_ctx->q; +} +EXPORT_SYMBOL(v4l2_m2m_get_vq); + +struct vb2_v4l2_buffer *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx) +{ + struct v4l2_m2m_buffer *b; + unsigned long flags; + + spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); + + if (list_empty(&q_ctx->rdy_queue)) { + spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); + return NULL; + } + + b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); + spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); + return &b->vb; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf); + +struct vb2_v4l2_buffer *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx) +{ + struct v4l2_m2m_buffer *b; + unsigned long flags; + + spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); + + if (list_empty(&q_ctx->rdy_queue)) { + spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); + return NULL; + } + + b = list_last_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); + spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); + return &b->vb; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_last_buf); + +struct vb2_v4l2_buffer *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx) +{ + struct v4l2_m2m_buffer *b; + unsigned long flags; + + spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); + if (list_empty(&q_ctx->rdy_queue)) { + spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); + return NULL; + } + b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); + list_del(&b->list); + q_ctx->num_rdy--; + spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); + + return &b->vb; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove); + +void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx *q_ctx, + struct vb2_v4l2_buffer *vbuf) +{ + struct v4l2_m2m_buffer *b; + unsigned long flags; + + spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); + b = container_of(vbuf, struct v4l2_m2m_buffer, vb); + list_del(&b->list); + q_ctx->num_rdy--; + spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_buf); + +struct vb2_v4l2_buffer * +v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx *q_ctx, unsigned int idx) + +{ + struct v4l2_m2m_buffer *b, *tmp; + struct vb2_v4l2_buffer *ret = NULL; + unsigned long flags; + + spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); + list_for_each_entry_safe(b, tmp, &q_ctx->rdy_queue, list) { + if (b->vb.vb2_buf.index == idx) { + list_del(&b->list); + q_ctx->num_rdy--; + ret = &b->vb; + break; + } + } + spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); + + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_idx); + +/* + * Scheduling handlers + */ + +void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev) +{ + unsigned long flags; + void *ret = NULL; + + spin_lock_irqsave(&m2m_dev->job_spinlock, flags); + if (m2m_dev->curr_ctx) + ret = m2m_dev->curr_ctx->priv; + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); + + return ret; +} +EXPORT_SYMBOL(v4l2_m2m_get_curr_priv); + +/** + * v4l2_m2m_try_run() - select next job to perform and run it if possible + * @m2m_dev: per-device context + * + * Get next transaction (if present) from the waiting jobs list and run it. + * + * Note that this function can run on a given v4l2_m2m_ctx context, + * but call .device_run for another context. + */ +static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev) +{ + unsigned long flags; + + spin_lock_irqsave(&m2m_dev->job_spinlock, flags); + if (NULL != m2m_dev->curr_ctx) { + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); + dprintk("Another instance is running, won't run now\n"); + return; + } + + if (list_empty(&m2m_dev->job_queue)) { + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); + dprintk("No job pending\n"); + return; + } + + if (m2m_dev->job_queue_flags & QUEUE_PAUSED) { + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); + dprintk("Running new jobs is paused\n"); + return; + } + + m2m_dev->curr_ctx = list_first_entry(&m2m_dev->job_queue, + struct v4l2_m2m_ctx, queue); + m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING; + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); + + dprintk("Running job on m2m_ctx: %p\n", m2m_dev->curr_ctx); + m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv); +} + +/* + * __v4l2_m2m_try_queue() - queue a job + * @m2m_dev: m2m device + * @m2m_ctx: m2m context + * + * Check if this context is ready to queue a job. + * + * This function can run in interrupt context. + */ +static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev, + struct v4l2_m2m_ctx *m2m_ctx) +{ + unsigned long flags_job; + struct vb2_v4l2_buffer *dst, *src; + + dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx); + + if (!m2m_ctx->out_q_ctx.q.streaming || + (!m2m_ctx->cap_q_ctx.q.streaming && !m2m_ctx->ignore_cap_streaming)) { + if (!m2m_ctx->ignore_cap_streaming) + dprintk("Streaming needs to be on for both queues\n"); + else + dprintk("Streaming needs to be on for the OUTPUT queue\n"); + return; + } + + spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job); + + /* If the context is aborted then don't schedule it */ + if (m2m_ctx->job_flags & TRANS_ABORT) { + dprintk("Aborted context\n"); + goto job_unlock; + } + + if (m2m_ctx->job_flags & TRANS_QUEUED) { + dprintk("On job queue already\n"); + goto job_unlock; + } + + src = v4l2_m2m_next_src_buf(m2m_ctx); + dst = v4l2_m2m_next_dst_buf(m2m_ctx); + if (!src && !m2m_ctx->out_q_ctx.buffered) { + dprintk("No input buffers available\n"); + goto job_unlock; + } + if (!dst && !m2m_ctx->cap_q_ctx.buffered) { + dprintk("No output buffers available\n"); + goto job_unlock; + } + + m2m_ctx->new_frame = true; + + if (src && dst && dst->is_held && + dst->vb2_buf.copied_timestamp && + dst->vb2_buf.timestamp != src->vb2_buf.timestamp) { + dprintk("Timestamp mismatch, returning held capture buffer\n"); + dst->is_held = false; + v4l2_m2m_dst_buf_remove(m2m_ctx); + v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE); + dst = v4l2_m2m_next_dst_buf(m2m_ctx); + + if (!dst && !m2m_ctx->cap_q_ctx.buffered) { + dprintk("No output buffers available after returning held buffer\n"); + goto job_unlock; + } + } + + if (src && dst && (m2m_ctx->out_q_ctx.q.subsystem_flags & + VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF)) + m2m_ctx->new_frame = !dst->vb2_buf.copied_timestamp || + dst->vb2_buf.timestamp != src->vb2_buf.timestamp; + + if (m2m_ctx->has_stopped) { + dprintk("Device has stopped\n"); + goto job_unlock; + } + + if (m2m_dev->m2m_ops->job_ready + && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) { + dprintk("Driver not ready\n"); + goto job_unlock; + } + + list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue); + m2m_ctx->job_flags |= TRANS_QUEUED; + +job_unlock: + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); +} + +/** + * v4l2_m2m_try_schedule() - schedule and possibly run a job for any context + * @m2m_ctx: m2m context + * + * Check if this context is ready to queue a job. If suitable, + * run the next queued job on the mem2mem device. + * + * This function shouldn't run in interrupt context. + * + * Note that v4l2_m2m_try_schedule() can schedule one job for this context, + * and then run another job for another context. + */ +void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx) +{ + struct v4l2_m2m_dev *m2m_dev = m2m_ctx->m2m_dev; + + __v4l2_m2m_try_queue(m2m_dev, m2m_ctx); + v4l2_m2m_try_run(m2m_dev); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule); + +/** + * v4l2_m2m_device_run_work() - run pending jobs for the context + * @work: Work structure used for scheduling the execution of this function. + */ +static void v4l2_m2m_device_run_work(struct work_struct *work) +{ + struct v4l2_m2m_dev *m2m_dev = + container_of(work, struct v4l2_m2m_dev, job_work); + + v4l2_m2m_try_run(m2m_dev); +} + +/** + * v4l2_m2m_cancel_job() - cancel pending jobs for the context + * @m2m_ctx: m2m context with jobs to be canceled + * + * In case of streamoff or release called on any context, + * 1] If the context is currently running, then abort job will be called + * 2] If the context is queued, then the context will be removed from + * the job_queue + */ +static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx) +{ + struct v4l2_m2m_dev *m2m_dev; + unsigned long flags; + + m2m_dev = m2m_ctx->m2m_dev; + spin_lock_irqsave(&m2m_dev->job_spinlock, flags); + + m2m_ctx->job_flags |= TRANS_ABORT; + if (m2m_ctx->job_flags & TRANS_RUNNING) { + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); + if (m2m_dev->m2m_ops->job_abort) + m2m_dev->m2m_ops->job_abort(m2m_ctx->priv); + dprintk("m2m_ctx %p running, will wait to complete\n", m2m_ctx); + wait_event(m2m_ctx->finished, + !(m2m_ctx->job_flags & TRANS_RUNNING)); + } else if (m2m_ctx->job_flags & TRANS_QUEUED) { + list_del(&m2m_ctx->queue); + m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING); + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); + dprintk("m2m_ctx: %p had been on queue and was removed\n", + m2m_ctx); + } else { + /* Do nothing, was not on queue/running */ + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); + } +} + +/* + * Schedule the next job, called from v4l2_m2m_job_finish() or + * v4l2_m2m_buf_done_and_job_finish(). + */ +static void v4l2_m2m_schedule_next_job(struct v4l2_m2m_dev *m2m_dev, + struct v4l2_m2m_ctx *m2m_ctx) +{ + /* + * This instance might have more buffers ready, but since we do not + * allow more than one job on the job_queue per instance, each has + * to be scheduled separately after the previous one finishes. + */ + __v4l2_m2m_try_queue(m2m_dev, m2m_ctx); + + /* + * We might be running in atomic context, + * but the job must be run in non-atomic context. + */ + schedule_work(&m2m_dev->job_work); +} + +/* + * Assumes job_spinlock is held, called from v4l2_m2m_job_finish() or + * v4l2_m2m_buf_done_and_job_finish(). + */ +static bool _v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, + struct v4l2_m2m_ctx *m2m_ctx) +{ + if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) { + dprintk("Called by an instance not currently running\n"); + return false; + } + + list_del(&m2m_dev->curr_ctx->queue); + m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING); + wake_up(&m2m_dev->curr_ctx->finished); + m2m_dev->curr_ctx = NULL; + return true; +} + +void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, + struct v4l2_m2m_ctx *m2m_ctx) +{ + unsigned long flags; + bool schedule_next; + + /* + * This function should not be used for drivers that support + * holding capture buffers. Those should use + * v4l2_m2m_buf_done_and_job_finish() instead. + */ + WARN_ON(m2m_ctx->out_q_ctx.q.subsystem_flags & + VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF); + spin_lock_irqsave(&m2m_dev->job_spinlock, flags); + schedule_next = _v4l2_m2m_job_finish(m2m_dev, m2m_ctx); + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); + + if (schedule_next) + v4l2_m2m_schedule_next_job(m2m_dev, m2m_ctx); +} +EXPORT_SYMBOL(v4l2_m2m_job_finish); + +void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev *m2m_dev, + struct v4l2_m2m_ctx *m2m_ctx, + enum vb2_buffer_state state) +{ + struct vb2_v4l2_buffer *src_buf, *dst_buf; + bool schedule_next = false; + unsigned long flags; + + spin_lock_irqsave(&m2m_dev->job_spinlock, flags); + src_buf = v4l2_m2m_src_buf_remove(m2m_ctx); + dst_buf = v4l2_m2m_next_dst_buf(m2m_ctx); + + if (WARN_ON(!src_buf || !dst_buf)) + goto unlock; + dst_buf->is_held = src_buf->flags & V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF; + if (!dst_buf->is_held) { + v4l2_m2m_dst_buf_remove(m2m_ctx); + v4l2_m2m_buf_done(dst_buf, state); + } + /* + * If the request API is being used, returning the OUTPUT + * (src) buffer will wake-up any process waiting on the + * request file descriptor. + * + * Therefore, return the CAPTURE (dst) buffer first, + * to avoid signalling the request file descriptor + * before the CAPTURE buffer is done. + */ + v4l2_m2m_buf_done(src_buf, state); + schedule_next = _v4l2_m2m_job_finish(m2m_dev, m2m_ctx); +unlock: + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); + + if (schedule_next) + v4l2_m2m_schedule_next_job(m2m_dev, m2m_ctx); +} +EXPORT_SYMBOL(v4l2_m2m_buf_done_and_job_finish); + +void v4l2_m2m_suspend(struct v4l2_m2m_dev *m2m_dev) +{ + unsigned long flags; + struct v4l2_m2m_ctx *curr_ctx; + + spin_lock_irqsave(&m2m_dev->job_spinlock, flags); + m2m_dev->job_queue_flags |= QUEUE_PAUSED; + curr_ctx = m2m_dev->curr_ctx; + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); + + if (curr_ctx) + wait_event(curr_ctx->finished, + !(curr_ctx->job_flags & TRANS_RUNNING)); +} +EXPORT_SYMBOL(v4l2_m2m_suspend); + +void v4l2_m2m_resume(struct v4l2_m2m_dev *m2m_dev) +{ + unsigned long flags; + + spin_lock_irqsave(&m2m_dev->job_spinlock, flags); + m2m_dev->job_queue_flags &= ~QUEUE_PAUSED; + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); + + v4l2_m2m_try_run(m2m_dev); +} +EXPORT_SYMBOL(v4l2_m2m_resume); + +int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_requestbuffers *reqbufs) +{ + struct vb2_queue *vq; + int ret; + + vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type); + ret = vb2_reqbufs(vq, reqbufs); + /* If count == 0, then the owner has released all buffers and he + is no longer owner of the queue. Otherwise we have an owner. */ + if (ret == 0) + vq->owner = reqbufs->count ? file->private_data : NULL; + + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs); + +static void v4l2_m2m_adjust_mem_offset(struct vb2_queue *vq, + struct v4l2_buffer *buf) +{ + /* Adjust MMAP memory offsets for the CAPTURE queue */ + if (buf->memory == V4L2_MEMORY_MMAP && V4L2_TYPE_IS_CAPTURE(vq->type)) { + if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) { + unsigned int i; + + for (i = 0; i < buf->length; ++i) + buf->m.planes[i].m.mem_offset + += DST_QUEUE_OFF_BASE; + } else { + buf->m.offset += DST_QUEUE_OFF_BASE; + } + } +} + +int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_buffer *buf) +{ + struct vb2_queue *vq; + int ret; + + vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); + ret = vb2_querybuf(vq, buf); + if (ret) + return ret; + + /* Adjust MMAP memory offsets for the CAPTURE queue */ + v4l2_m2m_adjust_mem_offset(vq, buf); + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf); + +/* + * This will add the LAST flag and mark the buffer management + * state as stopped. + * This is called when the last capture buffer must be flagged as LAST + * in draining mode from the encoder/decoder driver buf_queue() callback + * or from v4l2_update_last_buf_state() when a capture buffer is available. + */ +void v4l2_m2m_last_buffer_done(struct v4l2_m2m_ctx *m2m_ctx, + struct vb2_v4l2_buffer *vbuf) +{ + vbuf->flags |= V4L2_BUF_FLAG_LAST; + vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_DONE); + + v4l2_m2m_mark_stopped(m2m_ctx); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_last_buffer_done); + +/* When stop command is issued, update buffer management state */ +static int v4l2_update_last_buf_state(struct v4l2_m2m_ctx *m2m_ctx) +{ + struct vb2_v4l2_buffer *next_dst_buf; + + if (m2m_ctx->is_draining) + return -EBUSY; + + if (m2m_ctx->has_stopped) + return 0; + + m2m_ctx->last_src_buf = v4l2_m2m_last_src_buf(m2m_ctx); + m2m_ctx->is_draining = true; + + /* + * The processing of the last output buffer queued before + * the STOP command is expected to mark the buffer management + * state as stopped with v4l2_m2m_mark_stopped(). + */ + if (m2m_ctx->last_src_buf) + return 0; + + /* + * In case the output queue is empty, try to mark the last capture + * buffer as LAST. + */ + next_dst_buf = v4l2_m2m_dst_buf_remove(m2m_ctx); + if (!next_dst_buf) { + /* + * Wait for the next queued one in encoder/decoder driver + * buf_queue() callback using the v4l2_m2m_dst_buf_is_last() + * helper or in v4l2_m2m_qbuf() if encoder/decoder is not yet + * streaming. + */ + m2m_ctx->next_buf_last = true; + return 0; + } + + v4l2_m2m_last_buffer_done(m2m_ctx, next_dst_buf); + + return 0; +} + +/* + * Updates the encoding/decoding buffer management state, should + * be called from encoder/decoder drivers start_streaming() + */ +void v4l2_m2m_update_start_streaming_state(struct v4l2_m2m_ctx *m2m_ctx, + struct vb2_queue *q) +{ + /* If start streaming again, untag the last output buffer */ + if (V4L2_TYPE_IS_OUTPUT(q->type)) + m2m_ctx->last_src_buf = NULL; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_update_start_streaming_state); + +/* + * Updates the encoding/decoding buffer management state, should + * be called from encoder/decoder driver stop_streaming() + */ +void v4l2_m2m_update_stop_streaming_state(struct v4l2_m2m_ctx *m2m_ctx, + struct vb2_queue *q) +{ + if (V4L2_TYPE_IS_OUTPUT(q->type)) { + /* + * If in draining state, either mark next dst buffer as + * done or flag next one to be marked as done either + * in encoder/decoder driver buf_queue() callback using + * the v4l2_m2m_dst_buf_is_last() helper or in v4l2_m2m_qbuf() + * if encoder/decoder is not yet streaming + */ + if (m2m_ctx->is_draining) { + struct vb2_v4l2_buffer *next_dst_buf; + + m2m_ctx->last_src_buf = NULL; + next_dst_buf = v4l2_m2m_dst_buf_remove(m2m_ctx); + if (!next_dst_buf) + m2m_ctx->next_buf_last = true; + else + v4l2_m2m_last_buffer_done(m2m_ctx, + next_dst_buf); + } + } else { + v4l2_m2m_clear_state(m2m_ctx); + } +} +EXPORT_SYMBOL_GPL(v4l2_m2m_update_stop_streaming_state); + +static void v4l2_m2m_force_last_buf_done(struct v4l2_m2m_ctx *m2m_ctx, + struct vb2_queue *q) +{ + struct vb2_buffer *vb; + struct vb2_v4l2_buffer *vbuf; + unsigned int i; + + if (WARN_ON(q->is_output)) + return; + if (list_empty(&q->queued_list)) + return; + + vb = list_first_entry(&q->queued_list, struct vb2_buffer, queued_entry); + for (i = 0; i < vb->num_planes; i++) + vb2_set_plane_payload(vb, i, 0); + + /* + * Since the buffer hasn't been queued to the ready queue, + * mark is active and owned before marking it LAST and DONE + */ + vb->state = VB2_BUF_STATE_ACTIVE; + atomic_inc(&q->owned_by_drv_count); + + vbuf = to_vb2_v4l2_buffer(vb); + vbuf->field = V4L2_FIELD_NONE; + + v4l2_m2m_last_buffer_done(m2m_ctx, vbuf); +} + +int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_buffer *buf) +{ + struct video_device *vdev = video_devdata(file); + struct vb2_queue *vq; + int ret; + + vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); + if (V4L2_TYPE_IS_CAPTURE(vq->type) && + (buf->flags & V4L2_BUF_FLAG_REQUEST_FD)) { + dprintk("%s: requests cannot be used with capture buffers\n", + __func__); + return -EPERM; + } + + ret = vb2_qbuf(vq, vdev->v4l2_dev->mdev, buf); + if (ret) + return ret; + + /* Adjust MMAP memory offsets for the CAPTURE queue */ + v4l2_m2m_adjust_mem_offset(vq, buf); + + /* + * If the capture queue is streaming, but streaming hasn't started + * on the device, but was asked to stop, mark the previously queued + * buffer as DONE with LAST flag since it won't be queued on the + * device. + */ + if (V4L2_TYPE_IS_CAPTURE(vq->type) && + vb2_is_streaming(vq) && !vb2_start_streaming_called(vq) && + (v4l2_m2m_has_stopped(m2m_ctx) || v4l2_m2m_dst_buf_is_last(m2m_ctx))) + v4l2_m2m_force_last_buf_done(m2m_ctx, vq); + else if (!(buf->flags & V4L2_BUF_FLAG_IN_REQUEST)) + v4l2_m2m_try_schedule(m2m_ctx); + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf); + +int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_buffer *buf) +{ + struct vb2_queue *vq; + int ret; + + vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); + ret = vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK); + if (ret) + return ret; + + /* Adjust MMAP memory offsets for the CAPTURE queue */ + v4l2_m2m_adjust_mem_offset(vq, buf); + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf); + +int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_buffer *buf) +{ + struct video_device *vdev = video_devdata(file); + struct vb2_queue *vq; + int ret; + + vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); + ret = vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf); + if (ret) + return ret; + + /* Adjust MMAP memory offsets for the CAPTURE queue */ + v4l2_m2m_adjust_mem_offset(vq, buf); + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf); + +int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_create_buffers *create) +{ + struct vb2_queue *vq; + + vq = v4l2_m2m_get_vq(m2m_ctx, create->format.type); + return vb2_create_bufs(vq, create); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs); + +int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_exportbuffer *eb) +{ + struct vb2_queue *vq; + + vq = v4l2_m2m_get_vq(m2m_ctx, eb->type); + return vb2_expbuf(vq, eb); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf); + +int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + enum v4l2_buf_type type) +{ + struct vb2_queue *vq; + int ret; + + vq = v4l2_m2m_get_vq(m2m_ctx, type); + ret = vb2_streamon(vq, type); + if (!ret) + v4l2_m2m_try_schedule(m2m_ctx); + + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_streamon); + +int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + enum v4l2_buf_type type) +{ + struct v4l2_m2m_dev *m2m_dev; + struct v4l2_m2m_queue_ctx *q_ctx; + unsigned long flags_job, flags; + int ret; + + /* wait until the current context is dequeued from job_queue */ + v4l2_m2m_cancel_job(m2m_ctx); + + q_ctx = get_queue_ctx(m2m_ctx, type); + ret = vb2_streamoff(&q_ctx->q, type); + if (ret) + return ret; + + m2m_dev = m2m_ctx->m2m_dev; + spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job); + /* We should not be scheduled anymore, since we're dropping a queue. */ + if (m2m_ctx->job_flags & TRANS_QUEUED) + list_del(&m2m_ctx->queue); + m2m_ctx->job_flags = 0; + + spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); + /* Drop queue, since streamoff returns device to the same state as after + * calling reqbufs. */ + INIT_LIST_HEAD(&q_ctx->rdy_queue); + q_ctx->num_rdy = 0; + spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); + + if (m2m_dev->curr_ctx == m2m_ctx) { + m2m_dev->curr_ctx = NULL; + wake_up(&m2m_ctx->finished); + } + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff); + +static __poll_t v4l2_m2m_poll_for_data(struct file *file, + struct v4l2_m2m_ctx *m2m_ctx, + struct poll_table_struct *wait) +{ + struct vb2_queue *src_q, *dst_q; + __poll_t rc = 0; + unsigned long flags; + + src_q = v4l2_m2m_get_src_vq(m2m_ctx); + dst_q = v4l2_m2m_get_dst_vq(m2m_ctx); + + /* + * There has to be at least one buffer queued on each queued_list, which + * means either in driver already or waiting for driver to claim it + * and start processing. + */ + if ((!vb2_is_streaming(src_q) || src_q->error || + list_empty(&src_q->queued_list)) && + (!vb2_is_streaming(dst_q) || dst_q->error || + (list_empty(&dst_q->queued_list) && !dst_q->last_buffer_dequeued))) + return EPOLLERR; + + spin_lock_irqsave(&src_q->done_lock, flags); + if (!list_empty(&src_q->done_list)) + rc |= EPOLLOUT | EPOLLWRNORM; + spin_unlock_irqrestore(&src_q->done_lock, flags); + + spin_lock_irqsave(&dst_q->done_lock, flags); + /* + * If the last buffer was dequeued from the capture queue, signal + * userspace. DQBUF(CAPTURE) will return -EPIPE. + */ + if (!list_empty(&dst_q->done_list) || dst_q->last_buffer_dequeued) + rc |= EPOLLIN | EPOLLRDNORM; + spin_unlock_irqrestore(&dst_q->done_lock, flags); + + return rc; +} + +__poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct poll_table_struct *wait) +{ + struct video_device *vfd = video_devdata(file); + struct vb2_queue *src_q = v4l2_m2m_get_src_vq(m2m_ctx); + struct vb2_queue *dst_q = v4l2_m2m_get_dst_vq(m2m_ctx); + __poll_t req_events = poll_requested_events(wait); + __poll_t rc = 0; + + /* + * poll_wait() MUST be called on the first invocation on all the + * potential queues of interest, even if we are not interested in their + * events during this first call. Failure to do so will result in + * queue's events to be ignored because the poll_table won't be capable + * of adding new wait queues thereafter. + */ + poll_wait(file, &src_q->done_wq, wait); + poll_wait(file, &dst_q->done_wq, wait); + + if (req_events & (EPOLLOUT | EPOLLWRNORM | EPOLLIN | EPOLLRDNORM)) + rc = v4l2_m2m_poll_for_data(file, m2m_ctx, wait); + + if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) { + struct v4l2_fh *fh = file->private_data; + + poll_wait(file, &fh->wait, wait); + if (v4l2_event_pending(fh)) + rc |= EPOLLPRI; + } + + return rc; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_poll); + +int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct vm_area_struct *vma) +{ + unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; + struct vb2_queue *vq; + + if (offset < DST_QUEUE_OFF_BASE) { + vq = v4l2_m2m_get_src_vq(m2m_ctx); + } else { + vq = v4l2_m2m_get_dst_vq(m2m_ctx); + vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT); + } + + return vb2_mmap(vq, vma); +} +EXPORT_SYMBOL(v4l2_m2m_mmap); + +#ifndef CONFIG_MMU +unsigned long v4l2_m2m_get_unmapped_area(struct file *file, unsigned long addr, + unsigned long len, unsigned long pgoff, + unsigned long flags) +{ + struct v4l2_fh *fh = file->private_data; + unsigned long offset = pgoff << PAGE_SHIFT; + struct vb2_queue *vq; + + if (offset < DST_QUEUE_OFF_BASE) { + vq = v4l2_m2m_get_src_vq(fh->m2m_ctx); + } else { + vq = v4l2_m2m_get_dst_vq(fh->m2m_ctx); + pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT); + } + + return vb2_get_unmapped_area(vq, addr, len, pgoff, flags); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_get_unmapped_area); +#endif + +#if defined(CONFIG_MEDIA_CONTROLLER) +void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev) +{ + media_remove_intf_links(&m2m_dev->intf_devnode->intf); + media_devnode_remove(m2m_dev->intf_devnode); + + media_entity_remove_links(m2m_dev->source); + media_entity_remove_links(&m2m_dev->sink); + media_entity_remove_links(&m2m_dev->proc); + media_device_unregister_entity(m2m_dev->source); + media_device_unregister_entity(&m2m_dev->sink); + media_device_unregister_entity(&m2m_dev->proc); + kfree(m2m_dev->source->name); + kfree(m2m_dev->sink.name); + kfree(m2m_dev->proc.name); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_unregister_media_controller); + +static int v4l2_m2m_register_entity(struct media_device *mdev, + struct v4l2_m2m_dev *m2m_dev, enum v4l2_m2m_entity_type type, + struct video_device *vdev, int function) +{ + struct media_entity *entity; + struct media_pad *pads; + char *name; + unsigned int len; + int num_pads; + int ret; + + switch (type) { + case MEM2MEM_ENT_TYPE_SOURCE: + entity = m2m_dev->source; + pads = &m2m_dev->source_pad; + pads[0].flags = MEDIA_PAD_FL_SOURCE; + num_pads = 1; + break; + case MEM2MEM_ENT_TYPE_SINK: + entity = &m2m_dev->sink; + pads = &m2m_dev->sink_pad; + pads[0].flags = MEDIA_PAD_FL_SINK; + num_pads = 1; + break; + case MEM2MEM_ENT_TYPE_PROC: + entity = &m2m_dev->proc; + pads = m2m_dev->proc_pads; + pads[0].flags = MEDIA_PAD_FL_SINK; + pads[1].flags = MEDIA_PAD_FL_SOURCE; + num_pads = 2; + break; + default: + return -EINVAL; + } + + entity->obj_type = MEDIA_ENTITY_TYPE_BASE; + if (type != MEM2MEM_ENT_TYPE_PROC) { + entity->info.dev.major = VIDEO_MAJOR; + entity->info.dev.minor = vdev->minor; + } + len = strlen(vdev->name) + 2 + strlen(m2m_entity_name[type]); + name = kmalloc(len, GFP_KERNEL); + if (!name) + return -ENOMEM; + snprintf(name, len, "%s-%s", vdev->name, m2m_entity_name[type]); + entity->name = name; + entity->function = function; + + ret = media_entity_pads_init(entity, num_pads, pads); + if (ret) { + kfree(entity->name); + entity->name = NULL; + return ret; + } + ret = media_device_register_entity(mdev, entity); + if (ret) { + kfree(entity->name); + entity->name = NULL; + return ret; + } + + return 0; +} + +int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev, + struct video_device *vdev, int function) +{ + struct media_device *mdev = vdev->v4l2_dev->mdev; + struct media_link *link; + int ret; + + if (!mdev) + return 0; + + /* A memory-to-memory device consists in two + * DMA engine and one video processing entities. + * The DMA engine entities are linked to a V4L interface + */ + + /* Create the three entities with their pads */ + m2m_dev->source = &vdev->entity; + ret = v4l2_m2m_register_entity(mdev, m2m_dev, + MEM2MEM_ENT_TYPE_SOURCE, vdev, MEDIA_ENT_F_IO_V4L); + if (ret) + return ret; + ret = v4l2_m2m_register_entity(mdev, m2m_dev, + MEM2MEM_ENT_TYPE_PROC, vdev, function); + if (ret) + goto err_rel_entity0; + ret = v4l2_m2m_register_entity(mdev, m2m_dev, + MEM2MEM_ENT_TYPE_SINK, vdev, MEDIA_ENT_F_IO_V4L); + if (ret) + goto err_rel_entity1; + + /* Connect the three entities */ + ret = media_create_pad_link(m2m_dev->source, 0, &m2m_dev->proc, 0, + MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); + if (ret) + goto err_rel_entity2; + + ret = media_create_pad_link(&m2m_dev->proc, 1, &m2m_dev->sink, 0, + MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); + if (ret) + goto err_rm_links0; + + /* Create video interface */ + m2m_dev->intf_devnode = media_devnode_create(mdev, + MEDIA_INTF_T_V4L_VIDEO, 0, + VIDEO_MAJOR, vdev->minor); + if (!m2m_dev->intf_devnode) { + ret = -ENOMEM; + goto err_rm_links1; + } + + /* Connect the two DMA engines to the interface */ + link = media_create_intf_link(m2m_dev->source, + &m2m_dev->intf_devnode->intf, + MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); + if (!link) { + ret = -ENOMEM; + goto err_rm_devnode; + } + + link = media_create_intf_link(&m2m_dev->sink, + &m2m_dev->intf_devnode->intf, + MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); + if (!link) { + ret = -ENOMEM; + goto err_rm_intf_link; + } + return 0; + +err_rm_intf_link: + media_remove_intf_links(&m2m_dev->intf_devnode->intf); +err_rm_devnode: + media_devnode_remove(m2m_dev->intf_devnode); +err_rm_links1: + media_entity_remove_links(&m2m_dev->sink); +err_rm_links0: + media_entity_remove_links(&m2m_dev->proc); + media_entity_remove_links(m2m_dev->source); +err_rel_entity2: + media_device_unregister_entity(&m2m_dev->proc); + kfree(m2m_dev->proc.name); +err_rel_entity1: + media_device_unregister_entity(&m2m_dev->sink); + kfree(m2m_dev->sink.name); +err_rel_entity0: + media_device_unregister_entity(m2m_dev->source); + kfree(m2m_dev->source->name); + return ret; + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_register_media_controller); +#endif + +struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops) +{ + struct v4l2_m2m_dev *m2m_dev; + + if (!m2m_ops || WARN_ON(!m2m_ops->device_run)) + return ERR_PTR(-EINVAL); + + m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL); + if (!m2m_dev) + return ERR_PTR(-ENOMEM); + + m2m_dev->curr_ctx = NULL; + m2m_dev->m2m_ops = m2m_ops; + INIT_LIST_HEAD(&m2m_dev->job_queue); + spin_lock_init(&m2m_dev->job_spinlock); + INIT_WORK(&m2m_dev->job_work, v4l2_m2m_device_run_work); + + return m2m_dev; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_init); + +void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev) +{ + kfree(m2m_dev); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_release); + +struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev, + void *drv_priv, + int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)) +{ + struct v4l2_m2m_ctx *m2m_ctx; + struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx; + int ret; + + m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL); + if (!m2m_ctx) + return ERR_PTR(-ENOMEM); + + m2m_ctx->priv = drv_priv; + m2m_ctx->m2m_dev = m2m_dev; + init_waitqueue_head(&m2m_ctx->finished); + + out_q_ctx = &m2m_ctx->out_q_ctx; + cap_q_ctx = &m2m_ctx->cap_q_ctx; + + INIT_LIST_HEAD(&out_q_ctx->rdy_queue); + INIT_LIST_HEAD(&cap_q_ctx->rdy_queue); + spin_lock_init(&out_q_ctx->rdy_spinlock); + spin_lock_init(&cap_q_ctx->rdy_spinlock); + + INIT_LIST_HEAD(&m2m_ctx->queue); + + ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q); + + if (ret) + goto err; + /* + * Both queues should use same the mutex to lock the m2m context. + * This lock is used in some v4l2_m2m_* helpers. + */ + if (WARN_ON(out_q_ctx->q.lock != cap_q_ctx->q.lock)) { + ret = -EINVAL; + goto err; + } + m2m_ctx->q_lock = out_q_ctx->q.lock; + + return m2m_ctx; +err: + kfree(m2m_ctx); + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init); + +void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx) +{ + /* wait until the current context is dequeued from job_queue */ + v4l2_m2m_cancel_job(m2m_ctx); + + vb2_queue_release(&m2m_ctx->cap_q_ctx.q); + vb2_queue_release(&m2m_ctx->out_q_ctx.q); + + kfree(m2m_ctx); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release); + +void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, + struct vb2_v4l2_buffer *vbuf) +{ + struct v4l2_m2m_buffer *b = container_of(vbuf, + struct v4l2_m2m_buffer, vb); + struct v4l2_m2m_queue_ctx *q_ctx; + unsigned long flags; + + q_ctx = get_queue_ctx(m2m_ctx, vbuf->vb2_buf.vb2_queue->type); + if (!q_ctx) + return; + + spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); + list_add_tail(&b->list, &q_ctx->rdy_queue); + q_ctx->num_rdy++; + spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue); + +void v4l2_m2m_buf_copy_metadata(const struct vb2_v4l2_buffer *out_vb, + struct vb2_v4l2_buffer *cap_vb, + bool copy_frame_flags) +{ + u32 mask = V4L2_BUF_FLAG_TIMECODE | V4L2_BUF_FLAG_TSTAMP_SRC_MASK; + + if (copy_frame_flags) + mask |= V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_PFRAME | + V4L2_BUF_FLAG_BFRAME; + + cap_vb->vb2_buf.timestamp = out_vb->vb2_buf.timestamp; + + if (out_vb->flags & V4L2_BUF_FLAG_TIMECODE) + cap_vb->timecode = out_vb->timecode; + cap_vb->field = out_vb->field; + cap_vb->flags &= ~mask; + cap_vb->flags |= out_vb->flags & mask; + cap_vb->vb2_buf.copied_timestamp = 1; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_buf_copy_metadata); + +void v4l2_m2m_request_queue(struct media_request *req) +{ + struct media_request_object *obj, *obj_safe; + struct v4l2_m2m_ctx *m2m_ctx = NULL; + + /* + * Queue all objects. Note that buffer objects are at the end of the + * objects list, after all other object types. Once buffer objects + * are queued, the driver might delete them immediately (if the driver + * processes the buffer at once), so we have to use + * list_for_each_entry_safe() to handle the case where the object we + * queue is deleted. + */ + list_for_each_entry_safe(obj, obj_safe, &req->objects, list) { + struct v4l2_m2m_ctx *m2m_ctx_obj; + struct vb2_buffer *vb; + + if (!obj->ops->queue) + continue; + + if (vb2_request_object_is_buffer(obj)) { + /* Sanity checks */ + vb = container_of(obj, struct vb2_buffer, req_obj); + WARN_ON(!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)); + m2m_ctx_obj = container_of(vb->vb2_queue, + struct v4l2_m2m_ctx, + out_q_ctx.q); + WARN_ON(m2m_ctx && m2m_ctx_obj != m2m_ctx); + m2m_ctx = m2m_ctx_obj; + } + + /* + * The buffer we queue here can in theory be immediately + * unbound, hence the use of list_for_each_entry_safe() + * above and why we call the queue op last. + */ + obj->ops->queue(obj); + } + + WARN_ON(!m2m_ctx); + + if (m2m_ctx) + v4l2_m2m_try_schedule(m2m_ctx); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_request_queue); + +/* Videobuf2 ioctl helpers */ + +int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv, + struct v4l2_requestbuffers *rb) +{ + struct v4l2_fh *fh = file->private_data; + + return v4l2_m2m_reqbufs(file, fh->m2m_ctx, rb); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_reqbufs); + +int v4l2_m2m_ioctl_create_bufs(struct file *file, void *priv, + struct v4l2_create_buffers *create) +{ + struct v4l2_fh *fh = file->private_data; + + return v4l2_m2m_create_bufs(file, fh->m2m_ctx, create); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs); + +int v4l2_m2m_ioctl_remove_bufs(struct file *file, void *priv, + struct v4l2_remove_buffers *remove) +{ + struct v4l2_fh *fh = file->private_data; + struct vb2_queue *q = v4l2_m2m_get_vq(fh->m2m_ctx, remove->type); + + if (!q) + return -EINVAL; + if (q->type != remove->type) + return -EINVAL; + + return vb2_core_remove_bufs(q, remove->index, remove->count); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_remove_bufs); + +int v4l2_m2m_ioctl_querybuf(struct file *file, void *priv, + struct v4l2_buffer *buf) +{ + struct v4l2_fh *fh = file->private_data; + + return v4l2_m2m_querybuf(file, fh->m2m_ctx, buf); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_querybuf); + +int v4l2_m2m_ioctl_qbuf(struct file *file, void *priv, + struct v4l2_buffer *buf) +{ + struct v4l2_fh *fh = file->private_data; + + return v4l2_m2m_qbuf(file, fh->m2m_ctx, buf); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_qbuf); + +int v4l2_m2m_ioctl_dqbuf(struct file *file, void *priv, + struct v4l2_buffer *buf) +{ + struct v4l2_fh *fh = file->private_data; + + return v4l2_m2m_dqbuf(file, fh->m2m_ctx, buf); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_dqbuf); + +int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *priv, + struct v4l2_buffer *buf) +{ + struct v4l2_fh *fh = file->private_data; + + return v4l2_m2m_prepare_buf(file, fh->m2m_ctx, buf); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_prepare_buf); + +int v4l2_m2m_ioctl_expbuf(struct file *file, void *priv, + struct v4l2_exportbuffer *eb) +{ + struct v4l2_fh *fh = file->private_data; + + return v4l2_m2m_expbuf(file, fh->m2m_ctx, eb); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_expbuf); + +int v4l2_m2m_ioctl_streamon(struct file *file, void *priv, + enum v4l2_buf_type type) +{ + struct v4l2_fh *fh = file->private_data; + + return v4l2_m2m_streamon(file, fh->m2m_ctx, type); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamon); + +int v4l2_m2m_ioctl_streamoff(struct file *file, void *priv, + enum v4l2_buf_type type) +{ + struct v4l2_fh *fh = file->private_data; + + return v4l2_m2m_streamoff(file, fh->m2m_ctx, type); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff); + +int v4l2_m2m_ioctl_try_encoder_cmd(struct file *file, void *fh, + struct v4l2_encoder_cmd *ec) +{ + if (ec->cmd != V4L2_ENC_CMD_STOP && ec->cmd != V4L2_ENC_CMD_START) + return -EINVAL; + + ec->flags = 0; + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_encoder_cmd); + +int v4l2_m2m_ioctl_try_decoder_cmd(struct file *file, void *fh, + struct v4l2_decoder_cmd *dc) +{ + if (dc->cmd != V4L2_DEC_CMD_STOP && dc->cmd != V4L2_DEC_CMD_START) + return -EINVAL; + + dc->flags = 0; + + if (dc->cmd == V4L2_DEC_CMD_STOP) { + dc->stop.pts = 0; + } else if (dc->cmd == V4L2_DEC_CMD_START) { + dc->start.speed = 0; + dc->start.format = V4L2_DEC_START_FMT_NONE; + } + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_decoder_cmd); + +/* + * Updates the encoding state on ENC_CMD_STOP/ENC_CMD_START + * Should be called from the encoder driver encoder_cmd() callback + */ +int v4l2_m2m_encoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_encoder_cmd *ec) +{ + if (ec->cmd != V4L2_ENC_CMD_STOP && ec->cmd != V4L2_ENC_CMD_START) + return -EINVAL; + + if (ec->cmd == V4L2_ENC_CMD_STOP) + return v4l2_update_last_buf_state(m2m_ctx); + + if (m2m_ctx->is_draining) + return -EBUSY; + + if (m2m_ctx->has_stopped) + m2m_ctx->has_stopped = false; + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_encoder_cmd); + +/* + * Updates the decoding state on DEC_CMD_STOP/DEC_CMD_START + * Should be called from the decoder driver decoder_cmd() callback + */ +int v4l2_m2m_decoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_decoder_cmd *dc) +{ + if (dc->cmd != V4L2_DEC_CMD_STOP && dc->cmd != V4L2_DEC_CMD_START) + return -EINVAL; + + if (dc->cmd == V4L2_DEC_CMD_STOP) + return v4l2_update_last_buf_state(m2m_ctx); + + if (m2m_ctx->is_draining) + return -EBUSY; + + if (m2m_ctx->has_stopped) + m2m_ctx->has_stopped = false; + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_decoder_cmd); + +int v4l2_m2m_ioctl_encoder_cmd(struct file *file, void *priv, + struct v4l2_encoder_cmd *ec) +{ + struct v4l2_fh *fh = file->private_data; + + return v4l2_m2m_encoder_cmd(file, fh->m2m_ctx, ec); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_encoder_cmd); + +int v4l2_m2m_ioctl_decoder_cmd(struct file *file, void *priv, + struct v4l2_decoder_cmd *dc) +{ + struct v4l2_fh *fh = file->private_data; + + return v4l2_m2m_decoder_cmd(file, fh->m2m_ctx, dc); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_decoder_cmd); + +int v4l2_m2m_ioctl_stateless_try_decoder_cmd(struct file *file, void *fh, + struct v4l2_decoder_cmd *dc) +{ + if (dc->cmd != V4L2_DEC_CMD_FLUSH) + return -EINVAL; + + dc->flags = 0; + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_stateless_try_decoder_cmd); + +int v4l2_m2m_ioctl_stateless_decoder_cmd(struct file *file, void *priv, + struct v4l2_decoder_cmd *dc) +{ + struct v4l2_fh *fh = file->private_data; + struct vb2_v4l2_buffer *out_vb, *cap_vb; + struct v4l2_m2m_dev *m2m_dev = fh->m2m_ctx->m2m_dev; + unsigned long flags; + int ret; + + ret = v4l2_m2m_ioctl_stateless_try_decoder_cmd(file, priv, dc); + if (ret < 0) + return ret; + + spin_lock_irqsave(&m2m_dev->job_spinlock, flags); + out_vb = v4l2_m2m_last_src_buf(fh->m2m_ctx); + cap_vb = v4l2_m2m_last_dst_buf(fh->m2m_ctx); + + /* + * If there is an out buffer pending, then clear any HOLD flag. + * + * By clearing this flag we ensure that when this output + * buffer is processed any held capture buffer will be released. + */ + if (out_vb) { + out_vb->flags &= ~V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF; + } else if (cap_vb && cap_vb->is_held) { + /* + * If there were no output buffers, but there is a + * capture buffer that is held, then release that + * buffer. + */ + cap_vb->is_held = false; + v4l2_m2m_dst_buf_remove(fh->m2m_ctx); + v4l2_m2m_buf_done(cap_vb, VB2_BUF_STATE_DONE); + } + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_stateless_decoder_cmd); + +/* + * v4l2_file_operations helpers. It is assumed here same lock is used + * for the output and the capture buffer queue. + */ + +int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct v4l2_fh *fh = file->private_data; + + return v4l2_m2m_mmap(file, fh->m2m_ctx, vma); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap); + +__poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait) +{ + struct v4l2_fh *fh = file->private_data; + struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx; + __poll_t ret; + + if (m2m_ctx->q_lock) + mutex_lock(m2m_ctx->q_lock); + + ret = v4l2_m2m_poll(file, m2m_ctx, wait); + + if (m2m_ctx->q_lock) + mutex_unlock(m2m_ctx->q_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_fop_poll); + diff --git a/6.17.0/drivers/media/v4l2-core/v4l2-spi.c b/6.17.0/drivers/media/v4l2-core/v4l2-spi.c new file mode 100644 index 0000000..1baf8e6 --- /dev/null +++ b/6.17.0/drivers/media/v4l2-core/v4l2-spi.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * v4l2-spi - SPI helpers for Video4Linux2 + */ + +#include +#include +#include +#include + +void v4l2_spi_subdev_unregister(struct v4l2_subdev *sd) +{ + struct spi_device *spi = v4l2_get_subdevdata(sd); + + if (spi && !spi->dev.of_node && !spi->dev.fwnode) + spi_unregister_device(spi); +} + +void v4l2_spi_subdev_init(struct v4l2_subdev *sd, struct spi_device *spi, + const struct v4l2_subdev_ops *ops) +{ + v4l2_subdev_init(sd, ops); + sd->flags |= V4L2_SUBDEV_FL_IS_SPI; + /* the owner is the same as the spi_device's driver owner */ + sd->owner = spi->dev.driver->owner; + sd->dev = &spi->dev; + /* spi_device and v4l2_subdev point to one another */ + v4l2_set_subdevdata(sd, spi); + spi_set_drvdata(spi, sd); + /* initialize name */ + snprintf(sd->name, sizeof(sd->name), "%s %s", + spi->dev.driver->name, dev_name(&spi->dev)); +} +EXPORT_SYMBOL_GPL(v4l2_spi_subdev_init); + +struct v4l2_subdev *v4l2_spi_new_subdev(struct v4l2_device *v4l2_dev, + struct spi_controller *ctlr, + struct spi_board_info *info) +{ + struct v4l2_subdev *sd = NULL; + struct spi_device *spi = NULL; + + if (!v4l2_dev) + return NULL; + if (info->modalias[0]) + request_module(info->modalias); + + spi = spi_new_device(ctlr, info); + + if (!spi || !spi->dev.driver) + goto error; + + if (!try_module_get(spi->dev.driver->owner)) + goto error; + + sd = spi_get_drvdata(spi); + + /* + * Register with the v4l2_device which increases the module's + * use count as well. + */ + if (__v4l2_device_register_subdev(v4l2_dev, sd, sd->owner)) + sd = NULL; + + /* Decrease the module use count to match the first try_module_get. */ + module_put(spi->dev.driver->owner); + +error: + /* + * If we have a client but no subdev, then something went wrong and + * we must unregister the client. + */ + if (!sd) + spi_unregister_device(spi); + + return sd; +} +EXPORT_SYMBOL_GPL(v4l2_spi_new_subdev); diff --git a/6.17.0/drivers/media/v4l2-core/v4l2-subdev-priv.h b/6.17.0/drivers/media/v4l2-core/v4l2-subdev-priv.h new file mode 100644 index 0000000..52391d6 --- /dev/null +++ b/6.17.0/drivers/media/v4l2-core/v4l2-subdev-priv.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * V4L2 sub-device pivate header. + * + * Copyright (C) 2023 Hans de Goede + */ + +#ifndef _V4L2_SUBDEV_PRIV_H_ +#define _V4L2_SUBDEV_PRIV_H_ + +int v4l2_subdev_get_privacy_led(struct v4l2_subdev *sd); +void v4l2_subdev_put_privacy_led(struct v4l2_subdev *sd); + +#endif diff --git a/6.17.0/drivers/media/v4l2-core/v4l2-subdev.c b/6.17.0/drivers/media/v4l2-core/v4l2-subdev.c new file mode 100644 index 0000000..4fd25fe --- /dev/null +++ b/6.17.0/drivers/media/v4l2-core/v4l2-subdev.c @@ -0,0 +1,2617 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * V4L2 sub-device + * + * Copyright (C) 2010 Nokia Corporation + * + * Contact: Laurent Pinchart + * Sakari Ailus + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) +/* + * The Streams API is an experimental feature. To use the Streams API, set + * 'v4l2_subdev_enable_streams_api' to 1 below. + */ + +static bool v4l2_subdev_enable_streams_api; +#endif + +/* + * Maximum stream ID is 63 for now, as we use u64 bitmask to represent a set + * of streams. + * + * Note that V4L2_FRAME_DESC_ENTRY_MAX is related: V4L2_FRAME_DESC_ENTRY_MAX + * restricts the total number of streams in a pad, although the stream ID is + * not restricted. + */ +#define V4L2_SUBDEV_MAX_STREAM_ID 63 + +#include "v4l2-subdev-priv.h" + +#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) +static int subdev_fh_init(struct v4l2_subdev_fh *fh, struct v4l2_subdev *sd) +{ + struct v4l2_subdev_state *state; + static struct lock_class_key key; + + state = __v4l2_subdev_state_alloc(sd, "fh->state->lock", &key); + if (IS_ERR(state)) + return PTR_ERR(state); + + fh->state = state; + + return 0; +} + +static void subdev_fh_free(struct v4l2_subdev_fh *fh) +{ + __v4l2_subdev_state_free(fh->state); + fh->state = NULL; +} + +static int subdev_open(struct file *file) +{ + struct video_device *vdev = video_devdata(file); + struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev); + struct v4l2_subdev_fh *subdev_fh; + int ret; + + subdev_fh = kzalloc(sizeof(*subdev_fh), GFP_KERNEL); + if (subdev_fh == NULL) + return -ENOMEM; + + ret = subdev_fh_init(subdev_fh, sd); + if (ret) { + kfree(subdev_fh); + return ret; + } + + v4l2_fh_init(&subdev_fh->vfh, vdev); + v4l2_fh_add(&subdev_fh->vfh); + file->private_data = &subdev_fh->vfh; + + if (sd->v4l2_dev->mdev && sd->entity.graph_obj.mdev->dev) { + struct module *owner; + + owner = sd->entity.graph_obj.mdev->dev->driver->owner; + if (!try_module_get(owner)) { + ret = -EBUSY; + goto err; + } + subdev_fh->owner = owner; + } + + if (sd->internal_ops && sd->internal_ops->open) { + ret = sd->internal_ops->open(sd, subdev_fh); + if (ret < 0) + goto err; + } + + return 0; + +err: + module_put(subdev_fh->owner); + v4l2_fh_del(&subdev_fh->vfh); + v4l2_fh_exit(&subdev_fh->vfh); + subdev_fh_free(subdev_fh); + kfree(subdev_fh); + + return ret; +} + +static int subdev_close(struct file *file) +{ + struct video_device *vdev = video_devdata(file); + struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev); + struct v4l2_fh *vfh = file->private_data; + struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh); + + if (sd->internal_ops && sd->internal_ops->close) + sd->internal_ops->close(sd, subdev_fh); + module_put(subdev_fh->owner); + v4l2_fh_del(vfh); + v4l2_fh_exit(vfh); + subdev_fh_free(subdev_fh); + kfree(subdev_fh); + file->private_data = NULL; + + return 0; +} +#else /* CONFIG_VIDEO_V4L2_SUBDEV_API */ +static int subdev_open(struct file *file) +{ + return -ENODEV; +} + +static int subdev_close(struct file *file) +{ + return -ENODEV; +} +#endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */ + +static void v4l2_subdev_enable_privacy_led(struct v4l2_subdev *sd) +{ +#if IS_REACHABLE(CONFIG_LEDS_CLASS) + if (!IS_ERR_OR_NULL(sd->privacy_led)) + led_set_brightness(sd->privacy_led, + sd->privacy_led->max_brightness); +#endif +} + +static void v4l2_subdev_disable_privacy_led(struct v4l2_subdev *sd) +{ +#if IS_REACHABLE(CONFIG_LEDS_CLASS) + if (!IS_ERR_OR_NULL(sd->privacy_led)) + led_set_brightness(sd->privacy_led, 0); +#endif +} + +static inline int check_which(u32 which) +{ + if (which != V4L2_SUBDEV_FORMAT_TRY && + which != V4L2_SUBDEV_FORMAT_ACTIVE) + return -EINVAL; + + return 0; +} + +static inline int check_pad(struct v4l2_subdev *sd, u32 pad) +{ +#if defined(CONFIG_MEDIA_CONTROLLER) + if (sd->entity.num_pads) { + if (pad >= sd->entity.num_pads) + return -EINVAL; + return 0; + } +#endif + /* allow pad 0 on subdevices not registered as media entities */ + if (pad > 0) + return -EINVAL; + return 0; +} + +static int check_state(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, + u32 which, u32 pad, u32 stream) +{ + if (sd->flags & V4L2_SUBDEV_FL_STREAMS) { +#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) + if (!v4l2_subdev_state_get_format(state, pad, stream)) + return -EINVAL; + return 0; +#else + return -EINVAL; +#endif + } + + if (stream != 0) + return -EINVAL; + + if (which == V4L2_SUBDEV_FORMAT_TRY && (!state || !state->pads)) + return -EINVAL; + + return 0; +} + +static inline int check_format(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_format *format) +{ + if (!format) + return -EINVAL; + + return check_which(format->which) ? : check_pad(sd, format->pad) ? : + check_state(sd, state, format->which, format->pad, format->stream); +} + +static int call_get_fmt(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_format *format) +{ + return check_format(sd, state, format) ? : + sd->ops->pad->get_fmt(sd, state, format); +} + +static int call_set_fmt(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_format *format) +{ + return check_format(sd, state, format) ? : + sd->ops->pad->set_fmt(sd, state, format); +} + +static int call_enum_mbus_code(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_mbus_code_enum *code) +{ + if (!code) + return -EINVAL; + + return check_which(code->which) ? : check_pad(sd, code->pad) ? : + check_state(sd, state, code->which, code->pad, code->stream) ? : + sd->ops->pad->enum_mbus_code(sd, state, code); +} + +static int call_enum_frame_size(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_frame_size_enum *fse) +{ + if (!fse) + return -EINVAL; + + return check_which(fse->which) ? : check_pad(sd, fse->pad) ? : + check_state(sd, state, fse->which, fse->pad, fse->stream) ? : + sd->ops->pad->enum_frame_size(sd, state, fse); +} + +static int call_enum_frame_interval(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_frame_interval_enum *fie) +{ + if (!fie) + return -EINVAL; + + return check_which(fie->which) ? : check_pad(sd, fie->pad) ? : + check_state(sd, state, fie->which, fie->pad, fie->stream) ? : + sd->ops->pad->enum_frame_interval(sd, state, fie); +} + +static inline int check_selection(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_selection *sel) +{ + if (!sel) + return -EINVAL; + + return check_which(sel->which) ? : check_pad(sd, sel->pad) ? : + check_state(sd, state, sel->which, sel->pad, sel->stream); +} + +static int call_get_selection(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_selection *sel) +{ + return check_selection(sd, state, sel) ? : + sd->ops->pad->get_selection(sd, state, sel); +} + +static int call_set_selection(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_selection *sel) +{ + return check_selection(sd, state, sel) ? : + sd->ops->pad->set_selection(sd, state, sel); +} + +static inline int check_frame_interval(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_frame_interval *fi) +{ + if (!fi) + return -EINVAL; + + return check_which(fi->which) ? : check_pad(sd, fi->pad) ? : + check_state(sd, state, fi->which, fi->pad, fi->stream); +} + +static int call_get_frame_interval(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_frame_interval *fi) +{ + return check_frame_interval(sd, state, fi) ? : + sd->ops->pad->get_frame_interval(sd, state, fi); +} + +static int call_set_frame_interval(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_frame_interval *fi) +{ + return check_frame_interval(sd, state, fi) ? : + sd->ops->pad->set_frame_interval(sd, state, fi); +} + +static int call_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad, + struct v4l2_mbus_frame_desc *fd) +{ + unsigned int i; + int ret; + +#if defined(CONFIG_MEDIA_CONTROLLER) + if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE)) + return -EOPNOTSUPP; +#endif + + memset(fd, 0, sizeof(*fd)); + + ret = sd->ops->pad->get_frame_desc(sd, pad, fd); + if (ret) + return ret; + + dev_dbg(sd->dev, "Frame descriptor on pad %u, type %s\n", pad, + fd->type == V4L2_MBUS_FRAME_DESC_TYPE_PARALLEL ? "parallel" : + fd->type == V4L2_MBUS_FRAME_DESC_TYPE_CSI2 ? "CSI-2" : + "unknown"); + + for (i = 0; i < fd->num_entries; i++) { + struct v4l2_mbus_frame_desc_entry *entry = &fd->entry[i]; + char buf[20] = ""; + + if (fd->type == V4L2_MBUS_FRAME_DESC_TYPE_CSI2) + WARN_ON(snprintf(buf, sizeof(buf), + ", vc %u, dt 0x%02x", + entry->bus.csi2.vc, + entry->bus.csi2.dt) >= sizeof(buf)); + + dev_dbg(sd->dev, + "\tstream %u, code 0x%04x, length %u, flags 0x%04x%s\n", + entry->stream, entry->pixelcode, entry->length, + entry->flags, buf); + } + + return 0; +} + +static inline int check_edid(struct v4l2_subdev *sd, + struct v4l2_subdev_edid *edid) +{ + if (!edid) + return -EINVAL; + + if (edid->blocks && edid->edid == NULL) + return -EINVAL; + + return check_pad(sd, edid->pad); +} + +static int call_get_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid) +{ + return check_edid(sd, edid) ? : sd->ops->pad->get_edid(sd, edid); +} + +static int call_set_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid) +{ + return check_edid(sd, edid) ? : sd->ops->pad->set_edid(sd, edid); +} + +static int call_s_dv_timings(struct v4l2_subdev *sd, unsigned int pad, + struct v4l2_dv_timings *timings) +{ + if (!timings) + return -EINVAL; + + return check_pad(sd, pad) ? : + sd->ops->pad->s_dv_timings(sd, pad, timings); +} + +static int call_g_dv_timings(struct v4l2_subdev *sd, unsigned int pad, + struct v4l2_dv_timings *timings) +{ + if (!timings) + return -EINVAL; + + return check_pad(sd, pad) ? : + sd->ops->pad->g_dv_timings(sd, pad, timings); +} + +static int call_query_dv_timings(struct v4l2_subdev *sd, unsigned int pad, + struct v4l2_dv_timings *timings) +{ + if (!timings) + return -EINVAL; + + return check_pad(sd, pad) ? : + sd->ops->pad->query_dv_timings(sd, pad, timings); +} + +static int call_dv_timings_cap(struct v4l2_subdev *sd, + struct v4l2_dv_timings_cap *cap) +{ + if (!cap) + return -EINVAL; + + return check_pad(sd, cap->pad) ? : + sd->ops->pad->dv_timings_cap(sd, cap); +} + +static int call_enum_dv_timings(struct v4l2_subdev *sd, + struct v4l2_enum_dv_timings *dvt) +{ + if (!dvt) + return -EINVAL; + + return check_pad(sd, dvt->pad) ? : + sd->ops->pad->enum_dv_timings(sd, dvt); +} + +static int call_get_mbus_config(struct v4l2_subdev *sd, unsigned int pad, + struct v4l2_mbus_config *config) +{ + memset(config, 0, sizeof(*config)); + + return check_pad(sd, pad) ? : + sd->ops->pad->get_mbus_config(sd, pad, config); +} + +static int call_s_stream(struct v4l2_subdev *sd, int enable) +{ + int ret; + + /* + * The .s_stream() operation must never be called to start or stop an + * already started or stopped subdev. Catch offenders but don't return + * an error yet to avoid regressions. + */ + if (WARN_ON(sd->s_stream_enabled == !!enable)) + return 0; + + ret = sd->ops->video->s_stream(sd, enable); + + if (!enable && ret < 0) { + dev_warn(sd->dev, "disabling streaming failed (%d)\n", ret); + ret = 0; + } + + if (!ret) { + sd->s_stream_enabled = enable; + + if (enable) + v4l2_subdev_enable_privacy_led(sd); + else + v4l2_subdev_disable_privacy_led(sd); + } + + return ret; +} + +#ifdef CONFIG_MEDIA_CONTROLLER +/* + * Create state-management wrapper for pad ops dealing with subdev state. The + * wrapper handles the case where the caller does not provide the called + * subdev's state. This should be removed when all the callers are fixed. + */ +#define DEFINE_STATE_WRAPPER(f, arg_type) \ + static int call_##f##_state(struct v4l2_subdev *sd, \ + struct v4l2_subdev_state *_state, \ + arg_type *arg) \ + { \ + struct v4l2_subdev_state *state = _state; \ + int ret; \ + if (!_state) \ + state = v4l2_subdev_lock_and_get_active_state(sd); \ + ret = call_##f(sd, state, arg); \ + if (!_state && state) \ + v4l2_subdev_unlock_state(state); \ + return ret; \ + } + +#else /* CONFIG_MEDIA_CONTROLLER */ + +#define DEFINE_STATE_WRAPPER(f, arg_type) \ + static int call_##f##_state(struct v4l2_subdev *sd, \ + struct v4l2_subdev_state *state, \ + arg_type *arg) \ + { \ + return call_##f(sd, state, arg); \ + } + +#endif /* CONFIG_MEDIA_CONTROLLER */ + +DEFINE_STATE_WRAPPER(get_fmt, struct v4l2_subdev_format); +DEFINE_STATE_WRAPPER(set_fmt, struct v4l2_subdev_format); +DEFINE_STATE_WRAPPER(enum_mbus_code, struct v4l2_subdev_mbus_code_enum); +DEFINE_STATE_WRAPPER(enum_frame_size, struct v4l2_subdev_frame_size_enum); +DEFINE_STATE_WRAPPER(enum_frame_interval, struct v4l2_subdev_frame_interval_enum); +DEFINE_STATE_WRAPPER(get_selection, struct v4l2_subdev_selection); +DEFINE_STATE_WRAPPER(set_selection, struct v4l2_subdev_selection); + +static const struct v4l2_subdev_pad_ops v4l2_subdev_call_pad_wrappers = { + .get_fmt = call_get_fmt_state, + .set_fmt = call_set_fmt_state, + .enum_mbus_code = call_enum_mbus_code_state, + .enum_frame_size = call_enum_frame_size_state, + .enum_frame_interval = call_enum_frame_interval_state, + .get_selection = call_get_selection_state, + .set_selection = call_set_selection_state, + .get_frame_interval = call_get_frame_interval, + .set_frame_interval = call_set_frame_interval, + .get_edid = call_get_edid, + .set_edid = call_set_edid, + .s_dv_timings = call_s_dv_timings, + .g_dv_timings = call_g_dv_timings, + .query_dv_timings = call_query_dv_timings, + .dv_timings_cap = call_dv_timings_cap, + .enum_dv_timings = call_enum_dv_timings, + .get_frame_desc = call_get_frame_desc, + .get_mbus_config = call_get_mbus_config, +}; + +static const struct v4l2_subdev_video_ops v4l2_subdev_call_video_wrappers = { + .s_stream = call_s_stream, +}; + +const struct v4l2_subdev_ops v4l2_subdev_call_wrappers = { + .pad = &v4l2_subdev_call_pad_wrappers, + .video = &v4l2_subdev_call_video_wrappers, +}; +EXPORT_SYMBOL(v4l2_subdev_call_wrappers); + +#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) + +static struct v4l2_subdev_state * +subdev_ioctl_get_state(struct v4l2_subdev *sd, struct v4l2_subdev_fh *subdev_fh, + unsigned int cmd, void *arg) +{ + u32 which; + + switch (cmd) { + default: + return NULL; + case VIDIOC_SUBDEV_G_FMT: + case VIDIOC_SUBDEV_S_FMT: + which = ((struct v4l2_subdev_format *)arg)->which; + break; + case VIDIOC_SUBDEV_G_CROP: + case VIDIOC_SUBDEV_S_CROP: + which = ((struct v4l2_subdev_crop *)arg)->which; + break; + case VIDIOC_SUBDEV_ENUM_MBUS_CODE: + which = ((struct v4l2_subdev_mbus_code_enum *)arg)->which; + break; + case VIDIOC_SUBDEV_ENUM_FRAME_SIZE: + which = ((struct v4l2_subdev_frame_size_enum *)arg)->which; + break; + case VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL: + which = ((struct v4l2_subdev_frame_interval_enum *)arg)->which; + break; + case VIDIOC_SUBDEV_G_SELECTION: + case VIDIOC_SUBDEV_S_SELECTION: + which = ((struct v4l2_subdev_selection *)arg)->which; + break; + case VIDIOC_SUBDEV_G_FRAME_INTERVAL: + case VIDIOC_SUBDEV_S_FRAME_INTERVAL: { + struct v4l2_subdev_frame_interval *fi = arg; + + if (!(subdev_fh->client_caps & + V4L2_SUBDEV_CLIENT_CAP_INTERVAL_USES_WHICH)) + fi->which = V4L2_SUBDEV_FORMAT_ACTIVE; + + which = fi->which; + break; + } + case VIDIOC_SUBDEV_G_ROUTING: + case VIDIOC_SUBDEV_S_ROUTING: + which = ((struct v4l2_subdev_routing *)arg)->which; + break; + } + + return which == V4L2_SUBDEV_FORMAT_TRY ? + subdev_fh->state : + v4l2_subdev_get_unlocked_active_state(sd); +} + +static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg, + struct v4l2_subdev_state *state) +{ + struct video_device *vdev = video_devdata(file); + struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev); + struct v4l2_fh *vfh = file->private_data; + struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh); + bool ro_subdev = test_bit(V4L2_FL_SUBDEV_RO_DEVNODE, &vdev->flags); + bool streams_subdev = sd->flags & V4L2_SUBDEV_FL_STREAMS; + bool client_supports_streams = subdev_fh->client_caps & + V4L2_SUBDEV_CLIENT_CAP_STREAMS; + int rval; + + /* + * If the streams API is not enabled, remove V4L2_SUBDEV_CAP_STREAMS. + * Remove this when the API is no longer experimental. + */ + if (!v4l2_subdev_enable_streams_api) + streams_subdev = false; + + switch (cmd) { + case VIDIOC_SUBDEV_QUERYCAP: { + struct v4l2_subdev_capability *cap = arg; + + memset(cap->reserved, 0, sizeof(cap->reserved)); + cap->version = LINUX_VERSION_CODE; + cap->capabilities = + (ro_subdev ? V4L2_SUBDEV_CAP_RO_SUBDEV : 0) | + (streams_subdev ? V4L2_SUBDEV_CAP_STREAMS : 0); + + return 0; + } + + case VIDIOC_QUERYCTRL: + /* + * TODO: this really should be folded into v4l2_queryctrl (this + * currently returns -EINVAL for NULL control handlers). + * However, v4l2_queryctrl() is still called directly by + * drivers as well and until that has been addressed I believe + * it is safer to do the check here. The same is true for the + * other control ioctls below. + */ + if (!vfh->ctrl_handler) + return -ENOTTY; + return v4l2_queryctrl(vfh->ctrl_handler, arg); + + case VIDIOC_QUERY_EXT_CTRL: + if (!vfh->ctrl_handler) + return -ENOTTY; + return v4l2_query_ext_ctrl(vfh->ctrl_handler, arg); + + case VIDIOC_QUERYMENU: + if (!vfh->ctrl_handler) + return -ENOTTY; + return v4l2_querymenu(vfh->ctrl_handler, arg); + + case VIDIOC_G_CTRL: + if (!vfh->ctrl_handler) + return -ENOTTY; + return v4l2_g_ctrl(vfh->ctrl_handler, arg); + + case VIDIOC_S_CTRL: + if (!vfh->ctrl_handler) + return -ENOTTY; + return v4l2_s_ctrl(vfh, vfh->ctrl_handler, arg); + + case VIDIOC_G_EXT_CTRLS: + if (!vfh->ctrl_handler) + return -ENOTTY; + return v4l2_g_ext_ctrls(vfh->ctrl_handler, + vdev, sd->v4l2_dev->mdev, arg); + + case VIDIOC_S_EXT_CTRLS: + if (!vfh->ctrl_handler) + return -ENOTTY; + return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler, + vdev, sd->v4l2_dev->mdev, arg); + + case VIDIOC_TRY_EXT_CTRLS: + if (!vfh->ctrl_handler) + return -ENOTTY; + return v4l2_try_ext_ctrls(vfh->ctrl_handler, + vdev, sd->v4l2_dev->mdev, arg); + + case VIDIOC_DQEVENT: + if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS)) + return -ENOIOCTLCMD; + + return v4l2_event_dequeue(vfh, arg, file->f_flags & O_NONBLOCK); + + case VIDIOC_SUBSCRIBE_EVENT: + if (v4l2_subdev_has_op(sd, core, subscribe_event)) + return v4l2_subdev_call(sd, core, subscribe_event, + vfh, arg); + + if ((sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS) && + vfh->ctrl_handler) + return v4l2_ctrl_subdev_subscribe_event(sd, vfh, arg); + + return -ENOIOCTLCMD; + + case VIDIOC_UNSUBSCRIBE_EVENT: + if (v4l2_subdev_has_op(sd, core, unsubscribe_event)) + return v4l2_subdev_call(sd, core, unsubscribe_event, + vfh, arg); + + if (sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS) + return v4l2_event_subdev_unsubscribe(sd, vfh, arg); + + return -ENOIOCTLCMD; + +#ifdef CONFIG_VIDEO_ADV_DEBUG + case VIDIOC_DBG_G_REGISTER: + { + struct v4l2_dbg_register *p = arg; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + return v4l2_subdev_call(sd, core, g_register, p); + } + case VIDIOC_DBG_S_REGISTER: + { + struct v4l2_dbg_register *p = arg; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + return v4l2_subdev_call(sd, core, s_register, p); + } + case VIDIOC_DBG_G_CHIP_INFO: + { + struct v4l2_dbg_chip_info *p = arg; + + if (p->match.type != V4L2_CHIP_MATCH_SUBDEV || p->match.addr) + return -EINVAL; + if (sd->ops->core && sd->ops->core->s_register) + p->flags |= V4L2_CHIP_FL_WRITABLE; + if (sd->ops->core && sd->ops->core->g_register) + p->flags |= V4L2_CHIP_FL_READABLE; + strscpy(p->name, sd->name, sizeof(p->name)); + return 0; + } +#endif + + case VIDIOC_LOG_STATUS: { + int ret; + + pr_info("%s: ================= START STATUS =================\n", + sd->name); + ret = v4l2_subdev_call(sd, core, log_status); + pr_info("%s: ================== END STATUS ==================\n", + sd->name); + return ret; + } + + case VIDIOC_SUBDEV_G_FMT: { + struct v4l2_subdev_format *format = arg; + + if (!client_supports_streams) + format->stream = 0; + + memset(format->reserved, 0, sizeof(format->reserved)); + memset(format->format.reserved, 0, sizeof(format->format.reserved)); + return v4l2_subdev_call(sd, pad, get_fmt, state, format); + } + + case VIDIOC_SUBDEV_S_FMT: { + struct v4l2_subdev_format *format = arg; + + if (format->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev) + return -EPERM; + + if (!client_supports_streams) + format->stream = 0; + + memset(format->reserved, 0, sizeof(format->reserved)); + memset(format->format.reserved, 0, sizeof(format->format.reserved)); + return v4l2_subdev_call(sd, pad, set_fmt, state, format); + } + + case VIDIOC_SUBDEV_G_CROP: { + struct v4l2_subdev_crop *crop = arg; + struct v4l2_subdev_selection sel; + + if (!client_supports_streams) + crop->stream = 0; + + memset(crop->reserved, 0, sizeof(crop->reserved)); + memset(&sel, 0, sizeof(sel)); + sel.which = crop->which; + sel.pad = crop->pad; + sel.stream = crop->stream; + sel.target = V4L2_SEL_TGT_CROP; + + rval = v4l2_subdev_call( + sd, pad, get_selection, state, &sel); + + crop->rect = sel.r; + + return rval; + } + + case VIDIOC_SUBDEV_S_CROP: { + struct v4l2_subdev_crop *crop = arg; + struct v4l2_subdev_selection sel; + + if (crop->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev) + return -EPERM; + + if (!client_supports_streams) + crop->stream = 0; + + memset(crop->reserved, 0, sizeof(crop->reserved)); + memset(&sel, 0, sizeof(sel)); + sel.which = crop->which; + sel.pad = crop->pad; + sel.stream = crop->stream; + sel.target = V4L2_SEL_TGT_CROP; + sel.r = crop->rect; + + rval = v4l2_subdev_call( + sd, pad, set_selection, state, &sel); + + crop->rect = sel.r; + + return rval; + } + + case VIDIOC_SUBDEV_ENUM_MBUS_CODE: { + struct v4l2_subdev_mbus_code_enum *code = arg; + + if (!client_supports_streams) + code->stream = 0; + + memset(code->reserved, 0, sizeof(code->reserved)); + return v4l2_subdev_call(sd, pad, enum_mbus_code, state, + code); + } + + case VIDIOC_SUBDEV_ENUM_FRAME_SIZE: { + struct v4l2_subdev_frame_size_enum *fse = arg; + + if (!client_supports_streams) + fse->stream = 0; + + memset(fse->reserved, 0, sizeof(fse->reserved)); + return v4l2_subdev_call(sd, pad, enum_frame_size, state, + fse); + } + + case VIDIOC_SUBDEV_G_FRAME_INTERVAL: { + struct v4l2_subdev_frame_interval *fi = arg; + + if (!client_supports_streams) + fi->stream = 0; + + memset(fi->reserved, 0, sizeof(fi->reserved)); + return v4l2_subdev_call(sd, pad, get_frame_interval, state, fi); + } + + case VIDIOC_SUBDEV_S_FRAME_INTERVAL: { + struct v4l2_subdev_frame_interval *fi = arg; + + if (!client_supports_streams) + fi->stream = 0; + + if (fi->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev) + return -EPERM; + + memset(fi->reserved, 0, sizeof(fi->reserved)); + return v4l2_subdev_call(sd, pad, set_frame_interval, state, fi); + } + + case VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL: { + struct v4l2_subdev_frame_interval_enum *fie = arg; + + if (!client_supports_streams) + fie->stream = 0; + + memset(fie->reserved, 0, sizeof(fie->reserved)); + return v4l2_subdev_call(sd, pad, enum_frame_interval, state, + fie); + } + + case VIDIOC_SUBDEV_G_SELECTION: { + struct v4l2_subdev_selection *sel = arg; + + if (!client_supports_streams) + sel->stream = 0; + + memset(sel->reserved, 0, sizeof(sel->reserved)); + return v4l2_subdev_call( + sd, pad, get_selection, state, sel); + } + + case VIDIOC_SUBDEV_S_SELECTION: { + struct v4l2_subdev_selection *sel = arg; + + if (sel->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev) + return -EPERM; + + if (!client_supports_streams) + sel->stream = 0; + + memset(sel->reserved, 0, sizeof(sel->reserved)); + return v4l2_subdev_call( + sd, pad, set_selection, state, sel); + } + + case VIDIOC_G_EDID: { + struct v4l2_subdev_edid *edid = arg; + + return v4l2_subdev_call(sd, pad, get_edid, edid); + } + + case VIDIOC_S_EDID: { + struct v4l2_subdev_edid *edid = arg; + + return v4l2_subdev_call(sd, pad, set_edid, edid); + } + + case VIDIOC_SUBDEV_DV_TIMINGS_CAP: { + struct v4l2_dv_timings_cap *cap = arg; + + return v4l2_subdev_call(sd, pad, dv_timings_cap, cap); + } + + case VIDIOC_SUBDEV_ENUM_DV_TIMINGS: { + struct v4l2_enum_dv_timings *dvt = arg; + + return v4l2_subdev_call(sd, pad, enum_dv_timings, dvt); + } + + case VIDIOC_SUBDEV_QUERY_DV_TIMINGS: + return v4l2_subdev_call(sd, pad, query_dv_timings, 0, arg); + + case VIDIOC_SUBDEV_G_DV_TIMINGS: + return v4l2_subdev_call(sd, pad, g_dv_timings, 0, arg); + + case VIDIOC_SUBDEV_S_DV_TIMINGS: + if (ro_subdev) + return -EPERM; + + return v4l2_subdev_call(sd, pad, s_dv_timings, 0, arg); + + case VIDIOC_SUBDEV_G_STD: + return v4l2_subdev_call(sd, video, g_std, arg); + + case VIDIOC_SUBDEV_S_STD: { + v4l2_std_id *std = arg; + + if (ro_subdev) + return -EPERM; + + return v4l2_subdev_call(sd, video, s_std, *std); + } + + case VIDIOC_SUBDEV_ENUMSTD: { + struct v4l2_standard *p = arg; + v4l2_std_id id; + + if (v4l2_subdev_call(sd, video, g_tvnorms, &id)) + return -EINVAL; + + return v4l_video_std_enumstd(p, id); + } + + case VIDIOC_SUBDEV_QUERYSTD: + return v4l2_subdev_call(sd, video, querystd, arg); + + case VIDIOC_SUBDEV_G_ROUTING: { + struct v4l2_subdev_routing *routing = arg; + struct v4l2_subdev_krouting *krouting; + + if (!v4l2_subdev_enable_streams_api) + return -ENOIOCTLCMD; + + if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS)) + return -ENOIOCTLCMD; + + memset(routing->reserved, 0, sizeof(routing->reserved)); + + krouting = &state->routing; + + memcpy((struct v4l2_subdev_route *)(uintptr_t)routing->routes, + krouting->routes, + min(krouting->num_routes, routing->len_routes) * + sizeof(*krouting->routes)); + routing->num_routes = krouting->num_routes; + + return 0; + } + + case VIDIOC_SUBDEV_S_ROUTING: { + struct v4l2_subdev_routing *routing = arg; + struct v4l2_subdev_route *routes = + (struct v4l2_subdev_route *)(uintptr_t)routing->routes; + struct v4l2_subdev_krouting krouting = {}; + unsigned int num_active_routes = 0; + unsigned int i; + + if (!v4l2_subdev_enable_streams_api) + return -ENOIOCTLCMD; + + if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS)) + return -ENOIOCTLCMD; + + if (routing->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev) + return -EPERM; + + if (routing->num_routes > routing->len_routes) + return -EINVAL; + + memset(routing->reserved, 0, sizeof(routing->reserved)); + + for (i = 0; i < routing->num_routes; ++i) { + const struct v4l2_subdev_route *route = &routes[i]; + const struct media_pad *pads = sd->entity.pads; + + if (route->sink_stream > V4L2_SUBDEV_MAX_STREAM_ID || + route->source_stream > V4L2_SUBDEV_MAX_STREAM_ID) + return -EINVAL; + + if (route->sink_pad >= sd->entity.num_pads) + return -EINVAL; + + if (!(pads[route->sink_pad].flags & + MEDIA_PAD_FL_SINK)) + return -EINVAL; + + if (route->source_pad >= sd->entity.num_pads) + return -EINVAL; + + if (!(pads[route->source_pad].flags & + MEDIA_PAD_FL_SOURCE)) + return -EINVAL; + + if (route->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE) + num_active_routes++; + } + + /* + * Drivers that implement routing need to report a frame + * descriptor accordingly, with up to one entry per route. Until + * the frame descriptors entries get allocated dynamically, + * limit the number of active routes to + * V4L2_FRAME_DESC_ENTRY_MAX. + */ + if (num_active_routes > V4L2_FRAME_DESC_ENTRY_MAX) + return -E2BIG; + + /* + * If the driver doesn't support setting routing, just return + * the routing table. + */ + if (!v4l2_subdev_has_op(sd, pad, set_routing)) { + memcpy((struct v4l2_subdev_route *)(uintptr_t)routing->routes, + state->routing.routes, + min(state->routing.num_routes, routing->len_routes) * + sizeof(*state->routing.routes)); + routing->num_routes = state->routing.num_routes; + + return 0; + } + + krouting.num_routes = routing->num_routes; + krouting.len_routes = routing->len_routes; + krouting.routes = routes; + + rval = v4l2_subdev_call(sd, pad, set_routing, state, + routing->which, &krouting); + if (rval < 0) + return rval; + + memcpy((struct v4l2_subdev_route *)(uintptr_t)routing->routes, + state->routing.routes, + min(state->routing.num_routes, routing->len_routes) * + sizeof(*state->routing.routes)); + routing->num_routes = state->routing.num_routes; + + return 0; + } + + case VIDIOC_SUBDEV_G_CLIENT_CAP: { + struct v4l2_subdev_client_capability *client_cap = arg; + + client_cap->capabilities = subdev_fh->client_caps; + + return 0; + } + + case VIDIOC_SUBDEV_S_CLIENT_CAP: { + struct v4l2_subdev_client_capability *client_cap = arg; + + /* + * Clear V4L2_SUBDEV_CLIENT_CAP_STREAMS if streams API is not + * enabled. Remove this when streams API is no longer + * experimental. + */ + if (!v4l2_subdev_enable_streams_api) + client_cap->capabilities &= ~V4L2_SUBDEV_CLIENT_CAP_STREAMS; + + /* Filter out unsupported capabilities */ + client_cap->capabilities &= (V4L2_SUBDEV_CLIENT_CAP_STREAMS | + V4L2_SUBDEV_CLIENT_CAP_INTERVAL_USES_WHICH); + + subdev_fh->client_caps = client_cap->capabilities; + + return 0; + } + + default: + return v4l2_subdev_call(sd, core, ioctl, cmd, arg); + } + + return 0; +} + +static long subdev_do_ioctl_lock(struct file *file, unsigned int cmd, void *arg) +{ + struct video_device *vdev = video_devdata(file); + struct mutex *lock = vdev->lock; + long ret = -ENODEV; + + if (lock && mutex_lock_interruptible(lock)) + return -ERESTARTSYS; + + if (video_is_registered(vdev)) { + struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev); + struct v4l2_fh *vfh = file->private_data; + struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh); + struct v4l2_subdev_state *state; + + state = subdev_ioctl_get_state(sd, subdev_fh, cmd, arg); + + if (state) + v4l2_subdev_lock_state(state); + + ret = subdev_do_ioctl(file, cmd, arg, state); + + if (state) + v4l2_subdev_unlock_state(state); + } + + if (lock) + mutex_unlock(lock); + return ret; +} + +static long subdev_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + return video_usercopy(file, cmd, arg, subdev_do_ioctl_lock); +} + +#ifdef CONFIG_COMPAT +static long subdev_compat_ioctl32(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct video_device *vdev = video_devdata(file); + struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev); + + return v4l2_subdev_call(sd, core, compat_ioctl32, cmd, arg); +} +#endif + +#else /* CONFIG_VIDEO_V4L2_SUBDEV_API */ +static long subdev_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + return -ENODEV; +} + +#ifdef CONFIG_COMPAT +static long subdev_compat_ioctl32(struct file *file, unsigned int cmd, + unsigned long arg) +{ + return -ENODEV; +} +#endif +#endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */ + +static __poll_t subdev_poll(struct file *file, poll_table *wait) +{ + struct video_device *vdev = video_devdata(file); + struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev); + struct v4l2_fh *fh = file->private_data; + + if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS)) + return EPOLLERR; + + poll_wait(file, &fh->wait, wait); + + if (v4l2_event_pending(fh)) + return EPOLLPRI; + + return 0; +} + +const struct v4l2_file_operations v4l2_subdev_fops = { + .owner = THIS_MODULE, + .open = subdev_open, + .unlocked_ioctl = subdev_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl32 = subdev_compat_ioctl32, +#endif + .release = subdev_close, + .poll = subdev_poll, +}; + +#ifdef CONFIG_MEDIA_CONTROLLER + +int v4l2_subdev_get_fwnode_pad_1_to_1(struct media_entity *entity, + struct fwnode_endpoint *endpoint) +{ + struct fwnode_handle *fwnode; + struct v4l2_subdev *sd; + + if (!is_media_entity_v4l2_subdev(entity)) + return -EINVAL; + + sd = media_entity_to_v4l2_subdev(entity); + + fwnode = fwnode_graph_get_port_parent(endpoint->local_fwnode); + fwnode_handle_put(fwnode); + + if (device_match_fwnode(sd->dev, fwnode)) + return endpoint->port; + + return -ENXIO; +} +EXPORT_SYMBOL_GPL(v4l2_subdev_get_fwnode_pad_1_to_1); + +int v4l2_subdev_link_validate_default(struct v4l2_subdev *sd, + struct media_link *link, + struct v4l2_subdev_format *source_fmt, + struct v4l2_subdev_format *sink_fmt) +{ + bool pass = true; + + /* The width, height and code must match. */ + if (source_fmt->format.width != sink_fmt->format.width) { + dev_dbg(sd->entity.graph_obj.mdev->dev, + "%s: width does not match (source %u, sink %u)\n", + __func__, + source_fmt->format.width, sink_fmt->format.width); + pass = false; + } + + if (source_fmt->format.height != sink_fmt->format.height) { + dev_dbg(sd->entity.graph_obj.mdev->dev, + "%s: height does not match (source %u, sink %u)\n", + __func__, + source_fmt->format.height, sink_fmt->format.height); + pass = false; + } + + if (source_fmt->format.code != sink_fmt->format.code) { + dev_dbg(sd->entity.graph_obj.mdev->dev, + "%s: media bus code does not match (source 0x%8.8x, sink 0x%8.8x)\n", + __func__, + source_fmt->format.code, sink_fmt->format.code); + pass = false; + } + + /* The field order must match, or the sink field order must be NONE + * to support interlaced hardware connected to bridges that support + * progressive formats only. + */ + if (source_fmt->format.field != sink_fmt->format.field && + sink_fmt->format.field != V4L2_FIELD_NONE) { + dev_dbg(sd->entity.graph_obj.mdev->dev, + "%s: field does not match (source %u, sink %u)\n", + __func__, + source_fmt->format.field, sink_fmt->format.field); + pass = false; + } + + if (pass) + return 0; + + dev_dbg(sd->entity.graph_obj.mdev->dev, + "%s: link was \"%s\":%u -> \"%s\":%u\n", __func__, + link->source->entity->name, link->source->index, + link->sink->entity->name, link->sink->index); + + return -EPIPE; +} +EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate_default); + +static int +v4l2_subdev_link_validate_get_format(struct media_pad *pad, u32 stream, + struct v4l2_subdev_format *fmt, + bool states_locked) +{ + struct v4l2_subdev_state *state; + struct v4l2_subdev *sd; + int ret; + + sd = media_entity_to_v4l2_subdev(pad->entity); + + fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE; + fmt->pad = pad->index; + fmt->stream = stream; + + if (states_locked) + state = v4l2_subdev_get_locked_active_state(sd); + else + state = v4l2_subdev_lock_and_get_active_state(sd); + + ret = v4l2_subdev_call(sd, pad, get_fmt, state, fmt); + + if (!states_locked && state) + v4l2_subdev_unlock_state(state); + + return ret; +} + +#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) + +static void __v4l2_link_validate_get_streams(struct media_pad *pad, + u64 *streams_mask, + bool states_locked) +{ + struct v4l2_subdev_route *route; + struct v4l2_subdev_state *state; + struct v4l2_subdev *subdev; + + subdev = media_entity_to_v4l2_subdev(pad->entity); + + *streams_mask = 0; + + if (states_locked) + state = v4l2_subdev_get_locked_active_state(subdev); + else + state = v4l2_subdev_lock_and_get_active_state(subdev); + + if (WARN_ON(!state)) + return; + + for_each_active_route(&state->routing, route) { + u32 route_pad; + u32 route_stream; + + if (pad->flags & MEDIA_PAD_FL_SOURCE) { + route_pad = route->source_pad; + route_stream = route->source_stream; + } else { + route_pad = route->sink_pad; + route_stream = route->sink_stream; + } + + if (route_pad != pad->index) + continue; + + *streams_mask |= BIT_ULL(route_stream); + } + + if (!states_locked) + v4l2_subdev_unlock_state(state); +} + +#endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */ + +static void v4l2_link_validate_get_streams(struct media_pad *pad, + u64 *streams_mask, + bool states_locked) +{ + struct v4l2_subdev *subdev = media_entity_to_v4l2_subdev(pad->entity); + + if (!(subdev->flags & V4L2_SUBDEV_FL_STREAMS)) { + /* Non-streams subdevs have an implicit stream 0 */ + *streams_mask = BIT_ULL(0); + return; + } + +#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) + __v4l2_link_validate_get_streams(pad, streams_mask, states_locked); +#else + /* This shouldn't happen */ + *streams_mask = 0; +#endif +} + +static int v4l2_subdev_link_validate_locked(struct media_link *link, bool states_locked) +{ + struct v4l2_subdev *sink_subdev = + media_entity_to_v4l2_subdev(link->sink->entity); + struct device *dev = sink_subdev->entity.graph_obj.mdev->dev; + u64 source_streams_mask; + u64 sink_streams_mask; + u64 dangling_sink_streams; + u32 stream; + int ret; + + dev_dbg(dev, "validating link \"%s\":%u -> \"%s\":%u\n", + link->source->entity->name, link->source->index, + link->sink->entity->name, link->sink->index); + + v4l2_link_validate_get_streams(link->source, &source_streams_mask, states_locked); + v4l2_link_validate_get_streams(link->sink, &sink_streams_mask, states_locked); + + /* + * It is ok to have more source streams than sink streams as extra + * source streams can just be ignored by the receiver, but having extra + * sink streams is an error as streams must have a source. + */ + dangling_sink_streams = (source_streams_mask ^ sink_streams_mask) & + sink_streams_mask; + if (dangling_sink_streams) { + dev_err(dev, "Dangling sink streams: mask %#llx\n", + dangling_sink_streams); + return -EINVAL; + } + + /* Validate source and sink stream formats */ + + for (stream = 0; stream < sizeof(sink_streams_mask) * 8; ++stream) { + struct v4l2_subdev_format sink_fmt, source_fmt; + + if (!(sink_streams_mask & BIT_ULL(stream))) + continue; + + dev_dbg(dev, "validating stream \"%s\":%u:%u -> \"%s\":%u:%u\n", + link->source->entity->name, link->source->index, stream, + link->sink->entity->name, link->sink->index, stream); + + ret = v4l2_subdev_link_validate_get_format(link->source, stream, + &source_fmt, states_locked); + if (ret < 0) { + dev_dbg(dev, + "Failed to get format for \"%s\":%u:%u (but that's ok)\n", + link->source->entity->name, link->source->index, + stream); + continue; + } + + ret = v4l2_subdev_link_validate_get_format(link->sink, stream, + &sink_fmt, states_locked); + if (ret < 0) { + dev_dbg(dev, + "Failed to get format for \"%s\":%u:%u (but that's ok)\n", + link->sink->entity->name, link->sink->index, + stream); + continue; + } + + /* TODO: add stream number to link_validate() */ + ret = v4l2_subdev_call(sink_subdev, pad, link_validate, link, + &source_fmt, &sink_fmt); + if (!ret) + continue; + + if (ret != -ENOIOCTLCMD) + return ret; + + ret = v4l2_subdev_link_validate_default(sink_subdev, link, + &source_fmt, &sink_fmt); + + if (ret) + return ret; + } + + return 0; +} + +int v4l2_subdev_link_validate(struct media_link *link) +{ + struct v4l2_subdev *source_sd, *sink_sd; + struct v4l2_subdev_state *source_state, *sink_state; + bool states_locked; + int ret; + + /* + * Links are validated in the context of the sink entity. Usage of this + * helper on a sink that is not a subdev is a clear driver bug. + */ + if (WARN_ON_ONCE(!is_media_entity_v4l2_subdev(link->sink->entity))) + return -EINVAL; + + /* + * If the source is a video device, delegate link validation to it. This + * allows usage of this helper for subdev connected to a video output + * device, provided that the driver implement the video output device's + * .link_validate() operation. + */ + if (is_media_entity_v4l2_video_device(link->source->entity)) { + struct media_entity *source = link->source->entity; + + if (!source->ops || !source->ops->link_validate) { + /* + * Many existing drivers do not implement the required + * .link_validate() operation for their video devices. + * Print a warning to get the drivers fixed, and return + * 0 to avoid breaking userspace. This should + * eventually be turned into a WARN_ON() when all + * drivers will have been fixed. + */ + pr_warn_once("video device '%s' does not implement .link_validate(), driver bug!\n", + source->name); + return 0; + } + + /* + * Avoid infinite loops in case a video device incorrectly uses + * this helper function as its .link_validate() handler. + */ + if (WARN_ON(source->ops->link_validate == v4l2_subdev_link_validate)) + return -EINVAL; + + return source->ops->link_validate(link); + } + + /* + * If the source is still not a subdev, usage of this helper is a clear + * driver bug. + */ + if (WARN_ON(!is_media_entity_v4l2_subdev(link->source->entity))) + return -EINVAL; + + sink_sd = media_entity_to_v4l2_subdev(link->sink->entity); + source_sd = media_entity_to_v4l2_subdev(link->source->entity); + + sink_state = v4l2_subdev_get_unlocked_active_state(sink_sd); + source_state = v4l2_subdev_get_unlocked_active_state(source_sd); + + states_locked = sink_state && source_state; + + if (states_locked) + v4l2_subdev_lock_states(sink_state, source_state); + + ret = v4l2_subdev_link_validate_locked(link, states_locked); + + if (states_locked) + v4l2_subdev_unlock_states(sink_state, source_state); + + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate); + +bool v4l2_subdev_has_pad_interdep(struct media_entity *entity, + unsigned int pad0, unsigned int pad1) +{ + struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity); + struct v4l2_subdev_krouting *routing; + struct v4l2_subdev_state *state; + unsigned int i; + + state = v4l2_subdev_lock_and_get_active_state(sd); + + routing = &state->routing; + + for (i = 0; i < routing->num_routes; ++i) { + struct v4l2_subdev_route *route = &routing->routes[i]; + + if (!(route->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE)) + continue; + + if ((route->sink_pad == pad0 && route->source_pad == pad1) || + (route->source_pad == pad0 && route->sink_pad == pad1)) { + v4l2_subdev_unlock_state(state); + return true; + } + } + + v4l2_subdev_unlock_state(state); + + return false; +} +EXPORT_SYMBOL_GPL(v4l2_subdev_has_pad_interdep); + +struct v4l2_subdev_state * +__v4l2_subdev_state_alloc(struct v4l2_subdev *sd, const char *lock_name, + struct lock_class_key *lock_key) +{ + struct v4l2_subdev_state *state; + int ret; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return ERR_PTR(-ENOMEM); + + __mutex_init(&state->_lock, lock_name, lock_key); + if (sd->state_lock) + state->lock = sd->state_lock; + else + state->lock = &state->_lock; + + state->sd = sd; + + /* Drivers that support streams do not need the legacy pad config */ + if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS) && sd->entity.num_pads) { + state->pads = kvcalloc(sd->entity.num_pads, + sizeof(*state->pads), GFP_KERNEL); + if (!state->pads) { + ret = -ENOMEM; + goto err; + } + } + + if (sd->internal_ops && sd->internal_ops->init_state) { + /* + * There can be no race at this point, but we lock the state + * anyway to satisfy lockdep checks. + */ + v4l2_subdev_lock_state(state); + ret = sd->internal_ops->init_state(sd, state); + v4l2_subdev_unlock_state(state); + + if (ret) + goto err; + } + + return state; + +err: + if (state && state->pads) + kvfree(state->pads); + + kfree(state); + + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(__v4l2_subdev_state_alloc); + +void __v4l2_subdev_state_free(struct v4l2_subdev_state *state) +{ + if (!state) + return; + + mutex_destroy(&state->_lock); + + kfree(state->routing.routes); + kvfree(state->stream_configs.configs); + kvfree(state->pads); + kfree(state); +} +EXPORT_SYMBOL_GPL(__v4l2_subdev_state_free); + +int __v4l2_subdev_init_finalize(struct v4l2_subdev *sd, const char *name, + struct lock_class_key *key) +{ + struct v4l2_subdev_state *state; + struct device *dev = sd->dev; + bool has_disable_streams; + bool has_enable_streams; + bool has_s_stream; + + /* Check that the subdevice implements the required features */ + + has_s_stream = v4l2_subdev_has_op(sd, video, s_stream); + has_enable_streams = v4l2_subdev_has_op(sd, pad, enable_streams); + has_disable_streams = v4l2_subdev_has_op(sd, pad, disable_streams); + + if (has_enable_streams != has_disable_streams) { + dev_err(dev, + "subdev '%s' must implement both or neither of .enable_streams() and .disable_streams()\n", + sd->name); + return -EINVAL; + } + + if (sd->flags & V4L2_SUBDEV_FL_STREAMS) { + if (has_s_stream && !has_enable_streams) { + dev_err(dev, + "subdev '%s' must implement .enable/disable_streams()\n", + sd->name); + + return -EINVAL; + } + } + + if (sd->ctrl_handler) + sd->flags |= V4L2_SUBDEV_FL_HAS_EVENTS; + + state = __v4l2_subdev_state_alloc(sd, name, key); + if (IS_ERR(state)) + return PTR_ERR(state); + + sd->active_state = state; + + return 0; +} +EXPORT_SYMBOL_GPL(__v4l2_subdev_init_finalize); + +void v4l2_subdev_cleanup(struct v4l2_subdev *sd) +{ + struct v4l2_async_subdev_endpoint *ase, *ase_tmp; + + __v4l2_subdev_state_free(sd->active_state); + sd->active_state = NULL; + + /* Uninitialised sub-device, bail out here. */ + if (!sd->async_subdev_endpoint_list.next) + return; + + list_for_each_entry_safe(ase, ase_tmp, &sd->async_subdev_endpoint_list, + async_subdev_endpoint_entry) { + list_del(&ase->async_subdev_endpoint_entry); + + kfree(ase); + } +} +EXPORT_SYMBOL_GPL(v4l2_subdev_cleanup); + +struct v4l2_mbus_framefmt * +__v4l2_subdev_state_get_format(struct v4l2_subdev_state *state, + unsigned int pad, u32 stream) +{ + struct v4l2_subdev_stream_configs *stream_configs; + unsigned int i; + + if (WARN_ON_ONCE(!state)) + return NULL; + + if (state->pads) { + if (stream) + return NULL; + + if (pad >= state->sd->entity.num_pads) + return NULL; + + return &state->pads[pad].format; + } + + lockdep_assert_held(state->lock); + + stream_configs = &state->stream_configs; + + for (i = 0; i < stream_configs->num_configs; ++i) { + if (stream_configs->configs[i].pad == pad && + stream_configs->configs[i].stream == stream) + return &stream_configs->configs[i].fmt; + } + + return NULL; +} +EXPORT_SYMBOL_GPL(__v4l2_subdev_state_get_format); + +struct v4l2_rect * +__v4l2_subdev_state_get_crop(struct v4l2_subdev_state *state, unsigned int pad, + u32 stream) +{ + struct v4l2_subdev_stream_configs *stream_configs; + unsigned int i; + + if (WARN_ON_ONCE(!state)) + return NULL; + + if (state->pads) { + if (stream) + return NULL; + + if (pad >= state->sd->entity.num_pads) + return NULL; + + return &state->pads[pad].crop; + } + + lockdep_assert_held(state->lock); + + stream_configs = &state->stream_configs; + + for (i = 0; i < stream_configs->num_configs; ++i) { + if (stream_configs->configs[i].pad == pad && + stream_configs->configs[i].stream == stream) + return &stream_configs->configs[i].crop; + } + + return NULL; +} +EXPORT_SYMBOL_GPL(__v4l2_subdev_state_get_crop); + +struct v4l2_rect * +__v4l2_subdev_state_get_compose(struct v4l2_subdev_state *state, + unsigned int pad, u32 stream) +{ + struct v4l2_subdev_stream_configs *stream_configs; + unsigned int i; + + if (WARN_ON_ONCE(!state)) + return NULL; + + if (state->pads) { + if (stream) + return NULL; + + if (pad >= state->sd->entity.num_pads) + return NULL; + + return &state->pads[pad].compose; + } + + lockdep_assert_held(state->lock); + + stream_configs = &state->stream_configs; + + for (i = 0; i < stream_configs->num_configs; ++i) { + if (stream_configs->configs[i].pad == pad && + stream_configs->configs[i].stream == stream) + return &stream_configs->configs[i].compose; + } + + return NULL; +} +EXPORT_SYMBOL_GPL(__v4l2_subdev_state_get_compose); + +struct v4l2_fract * +__v4l2_subdev_state_get_interval(struct v4l2_subdev_state *state, + unsigned int pad, u32 stream) +{ + struct v4l2_subdev_stream_configs *stream_configs; + unsigned int i; + + if (WARN_ON(!state)) + return NULL; + + lockdep_assert_held(state->lock); + + if (state->pads) { + if (stream) + return NULL; + + if (pad >= state->sd->entity.num_pads) + return NULL; + + return &state->pads[pad].interval; + } + + lockdep_assert_held(state->lock); + + stream_configs = &state->stream_configs; + + for (i = 0; i < stream_configs->num_configs; ++i) { + if (stream_configs->configs[i].pad == pad && + stream_configs->configs[i].stream == stream) + return &stream_configs->configs[i].interval; + } + + return NULL; +} +EXPORT_SYMBOL_GPL(__v4l2_subdev_state_get_interval); + +#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) + +static int +v4l2_subdev_init_stream_configs(struct v4l2_subdev_stream_configs *stream_configs, + const struct v4l2_subdev_krouting *routing) +{ + struct v4l2_subdev_stream_configs new_configs = { 0 }; + struct v4l2_subdev_route *route; + u32 idx; + + /* Count number of formats needed */ + for_each_active_route(routing, route) { + /* + * Each route needs a format on both ends of the route. + */ + new_configs.num_configs += 2; + } + + if (new_configs.num_configs) { + new_configs.configs = kvcalloc(new_configs.num_configs, + sizeof(*new_configs.configs), + GFP_KERNEL); + + if (!new_configs.configs) + return -ENOMEM; + } + + /* + * Fill in the 'pad' and stream' value for each item in the array from + * the routing table + */ + idx = 0; + + for_each_active_route(routing, route) { + new_configs.configs[idx].pad = route->sink_pad; + new_configs.configs[idx].stream = route->sink_stream; + + idx++; + + new_configs.configs[idx].pad = route->source_pad; + new_configs.configs[idx].stream = route->source_stream; + + idx++; + } + + kvfree(stream_configs->configs); + *stream_configs = new_configs; + + return 0; +} + +int v4l2_subdev_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, + struct v4l2_subdev_format *format) +{ + struct v4l2_mbus_framefmt *fmt; + + fmt = v4l2_subdev_state_get_format(state, format->pad, format->stream); + if (!fmt) + return -EINVAL; + + format->format = *fmt; + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_subdev_get_fmt); + +int v4l2_subdev_get_frame_interval(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_frame_interval *fi) +{ + struct v4l2_fract *interval; + + interval = v4l2_subdev_state_get_interval(state, fi->pad, fi->stream); + if (!interval) + return -EINVAL; + + fi->interval = *interval; + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_subdev_get_frame_interval); + +int v4l2_subdev_set_routing(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + const struct v4l2_subdev_krouting *routing) +{ + struct v4l2_subdev_krouting *dst = &state->routing; + const struct v4l2_subdev_krouting *src = routing; + struct v4l2_subdev_krouting new_routing = { 0 }; + size_t bytes; + int r; + + if (unlikely(check_mul_overflow((size_t)src->num_routes, + sizeof(*src->routes), &bytes))) + return -EOVERFLOW; + + lockdep_assert_held(state->lock); + + if (src->num_routes > 0) { + new_routing.routes = kmemdup(src->routes, bytes, GFP_KERNEL); + if (!new_routing.routes) + return -ENOMEM; + } + + new_routing.num_routes = src->num_routes; + + r = v4l2_subdev_init_stream_configs(&state->stream_configs, + &new_routing); + if (r) { + kfree(new_routing.routes); + return r; + } + + kfree(dst->routes); + *dst = new_routing; + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_subdev_set_routing); + +struct v4l2_subdev_route * +__v4l2_subdev_next_active_route(const struct v4l2_subdev_krouting *routing, + struct v4l2_subdev_route *route) +{ + if (route) + ++route; + else + route = &routing->routes[0]; + + for (; route < routing->routes + routing->num_routes; ++route) { + if (!(route->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE)) + continue; + + return route; + } + + return NULL; +} +EXPORT_SYMBOL_GPL(__v4l2_subdev_next_active_route); + +int v4l2_subdev_set_routing_with_fmt(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + const struct v4l2_subdev_krouting *routing, + const struct v4l2_mbus_framefmt *fmt) +{ + struct v4l2_subdev_stream_configs *stream_configs; + unsigned int i; + int ret; + + ret = v4l2_subdev_set_routing(sd, state, routing); + if (ret) + return ret; + + stream_configs = &state->stream_configs; + + for (i = 0; i < stream_configs->num_configs; ++i) + stream_configs->configs[i].fmt = *fmt; + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_subdev_set_routing_with_fmt); + +int v4l2_subdev_routing_find_opposite_end(const struct v4l2_subdev_krouting *routing, + u32 pad, u32 stream, u32 *other_pad, + u32 *other_stream) +{ + unsigned int i; + + for (i = 0; i < routing->num_routes; ++i) { + struct v4l2_subdev_route *route = &routing->routes[i]; + + if (route->source_pad == pad && + route->source_stream == stream) { + if (other_pad) + *other_pad = route->sink_pad; + if (other_stream) + *other_stream = route->sink_stream; + return 0; + } + + if (route->sink_pad == pad && route->sink_stream == stream) { + if (other_pad) + *other_pad = route->source_pad; + if (other_stream) + *other_stream = route->source_stream; + return 0; + } + } + + return -EINVAL; +} +EXPORT_SYMBOL_GPL(v4l2_subdev_routing_find_opposite_end); + +struct v4l2_mbus_framefmt * +v4l2_subdev_state_get_opposite_stream_format(struct v4l2_subdev_state *state, + u32 pad, u32 stream) +{ + u32 other_pad, other_stream; + int ret; + + ret = v4l2_subdev_routing_find_opposite_end(&state->routing, + pad, stream, + &other_pad, &other_stream); + if (ret) + return NULL; + + return v4l2_subdev_state_get_format(state, other_pad, other_stream); +} +EXPORT_SYMBOL_GPL(v4l2_subdev_state_get_opposite_stream_format); + +u64 v4l2_subdev_state_xlate_streams(const struct v4l2_subdev_state *state, + u32 pad0, u32 pad1, u64 *streams) +{ + const struct v4l2_subdev_krouting *routing = &state->routing; + struct v4l2_subdev_route *route; + u64 streams0 = 0; + u64 streams1 = 0; + + for_each_active_route(routing, route) { + if (route->sink_pad == pad0 && route->source_pad == pad1 && + (*streams & BIT_ULL(route->sink_stream))) { + streams0 |= BIT_ULL(route->sink_stream); + streams1 |= BIT_ULL(route->source_stream); + } + if (route->source_pad == pad0 && route->sink_pad == pad1 && + (*streams & BIT_ULL(route->source_stream))) { + streams0 |= BIT_ULL(route->source_stream); + streams1 |= BIT_ULL(route->sink_stream); + } + } + + *streams = streams0; + return streams1; +} +EXPORT_SYMBOL_GPL(v4l2_subdev_state_xlate_streams); + +int v4l2_subdev_routing_validate(struct v4l2_subdev *sd, + const struct v4l2_subdev_krouting *routing, + enum v4l2_subdev_routing_restriction disallow) +{ + u32 *remote_pads = NULL; + unsigned int i, j; + int ret = -EINVAL; + + if (disallow & (V4L2_SUBDEV_ROUTING_NO_STREAM_MIX | + V4L2_SUBDEV_ROUTING_NO_MULTIPLEXING)) { + remote_pads = kcalloc(sd->entity.num_pads, sizeof(*remote_pads), + GFP_KERNEL); + if (!remote_pads) + return -ENOMEM; + + for (i = 0; i < sd->entity.num_pads; ++i) + remote_pads[i] = U32_MAX; + } + + for (i = 0; i < routing->num_routes; ++i) { + const struct v4l2_subdev_route *route = &routing->routes[i]; + + /* Validate the sink and source pad numbers. */ + if (route->sink_pad >= sd->entity.num_pads || + !(sd->entity.pads[route->sink_pad].flags & MEDIA_PAD_FL_SINK)) { + dev_dbg(sd->dev, "route %u sink (%u) is not a sink pad\n", + i, route->sink_pad); + goto out; + } + + if (route->source_pad >= sd->entity.num_pads || + !(sd->entity.pads[route->source_pad].flags & MEDIA_PAD_FL_SOURCE)) { + dev_dbg(sd->dev, "route %u source (%u) is not a source pad\n", + i, route->source_pad); + goto out; + } + + /* + * V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX: all streams from a + * sink pad must be routed to a single source pad. + */ + if (disallow & V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX) { + if (remote_pads[route->sink_pad] != U32_MAX && + remote_pads[route->sink_pad] != route->source_pad) { + dev_dbg(sd->dev, + "route %u attempts to mix %s streams\n", + i, "sink"); + goto out; + } + } + + /* + * V4L2_SUBDEV_ROUTING_NO_SOURCE_STREAM_MIX: all streams on a + * source pad must originate from a single sink pad. + */ + if (disallow & V4L2_SUBDEV_ROUTING_NO_SOURCE_STREAM_MIX) { + if (remote_pads[route->source_pad] != U32_MAX && + remote_pads[route->source_pad] != route->sink_pad) { + dev_dbg(sd->dev, + "route %u attempts to mix %s streams\n", + i, "source"); + goto out; + } + } + + /* + * V4L2_SUBDEV_ROUTING_NO_SINK_MULTIPLEXING: Pads on the sink + * side can not do stream multiplexing, i.e. there can be only + * a single stream in a sink pad. + */ + if (disallow & V4L2_SUBDEV_ROUTING_NO_SINK_MULTIPLEXING) { + if (remote_pads[route->sink_pad] != U32_MAX) { + dev_dbg(sd->dev, + "route %u attempts to multiplex on %s pad %u\n", + i, "sink", route->sink_pad); + goto out; + } + } + + /* + * V4L2_SUBDEV_ROUTING_NO_SOURCE_MULTIPLEXING: Pads on the + * source side can not do stream multiplexing, i.e. there can + * be only a single stream in a source pad. + */ + if (disallow & V4L2_SUBDEV_ROUTING_NO_SOURCE_MULTIPLEXING) { + if (remote_pads[route->source_pad] != U32_MAX) { + dev_dbg(sd->dev, + "route %u attempts to multiplex on %s pad %u\n", + i, "source", route->source_pad); + goto out; + } + } + + if (remote_pads) { + remote_pads[route->sink_pad] = route->source_pad; + remote_pads[route->source_pad] = route->sink_pad; + } + + for (j = i + 1; j < routing->num_routes; ++j) { + const struct v4l2_subdev_route *r = &routing->routes[j]; + + /* + * V4L2_SUBDEV_ROUTING_NO_1_TO_N: No two routes can + * originate from the same (sink) stream. + */ + if ((disallow & V4L2_SUBDEV_ROUTING_NO_1_TO_N) && + route->sink_pad == r->sink_pad && + route->sink_stream == r->sink_stream) { + dev_dbg(sd->dev, + "routes %u and %u originate from same sink (%u/%u)\n", + i, j, route->sink_pad, + route->sink_stream); + goto out; + } + + /* + * V4L2_SUBDEV_ROUTING_NO_N_TO_1: No two routes can end + * at the same (source) stream. + */ + if ((disallow & V4L2_SUBDEV_ROUTING_NO_N_TO_1) && + route->source_pad == r->source_pad && + route->source_stream == r->source_stream) { + dev_dbg(sd->dev, + "routes %u and %u end at same source (%u/%u)\n", + i, j, route->source_pad, + route->source_stream); + goto out; + } + } + } + + ret = 0; + +out: + kfree(remote_pads); + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_subdev_routing_validate); + +static void v4l2_subdev_collect_streams(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + u32 pad, u64 streams_mask, + u64 *found_streams, + u64 *enabled_streams) +{ + if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS)) { + *found_streams = BIT_ULL(0); + *enabled_streams = + (sd->enabled_pads & BIT_ULL(pad)) ? BIT_ULL(0) : 0; + dev_dbg(sd->dev, + "collect_streams: sub-device \"%s\" does not support streams\n", + sd->entity.name); + return; + } + + *found_streams = 0; + *enabled_streams = 0; + + for (unsigned int i = 0; i < state->stream_configs.num_configs; ++i) { + const struct v4l2_subdev_stream_config *cfg = + &state->stream_configs.configs[i]; + + if (cfg->pad != pad || !(streams_mask & BIT_ULL(cfg->stream))) + continue; + + *found_streams |= BIT_ULL(cfg->stream); + if (cfg->enabled) + *enabled_streams |= BIT_ULL(cfg->stream); + } + + dev_dbg(sd->dev, + "collect_streams: \"%s\":%u: found %#llx enabled %#llx\n", + sd->entity.name, pad, *found_streams, *enabled_streams); +} + +static void v4l2_subdev_set_streams_enabled(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + u32 pad, u64 streams_mask, + bool enabled) +{ + if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS)) { + if (enabled) + sd->enabled_pads |= BIT_ULL(pad); + else + sd->enabled_pads &= ~BIT_ULL(pad); + return; + } + + for (unsigned int i = 0; i < state->stream_configs.num_configs; ++i) { + struct v4l2_subdev_stream_config *cfg = + &state->stream_configs.configs[i]; + + if (cfg->pad == pad && (streams_mask & BIT_ULL(cfg->stream))) + cfg->enabled = enabled; + } +} + +int v4l2_subdev_enable_streams(struct v4l2_subdev *sd, u32 pad, + u64 streams_mask) +{ + struct device *dev = sd->entity.graph_obj.mdev->dev; + struct v4l2_subdev_state *state; + bool already_streaming; + u64 enabled_streams; + u64 found_streams; + bool use_s_stream; + int ret; + + dev_dbg(dev, "enable streams \"%s\":%u/%#llx\n", sd->entity.name, pad, + streams_mask); + + /* A few basic sanity checks first. */ + if (pad >= sd->entity.num_pads) + return -EINVAL; + + if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE)) + return -EOPNOTSUPP; + + /* + * We use a 64-bit bitmask for tracking enabled pads, so only subdevices + * with 64 pads or less can be supported. + */ + if (pad >= sizeof(sd->enabled_pads) * BITS_PER_BYTE) + return -EOPNOTSUPP; + + if (!streams_mask) + return 0; + + /* Fallback on .s_stream() if .enable_streams() isn't available. */ + use_s_stream = !v4l2_subdev_has_op(sd, pad, enable_streams); + + if (!use_s_stream) + state = v4l2_subdev_lock_and_get_active_state(sd); + else + state = NULL; + + /* + * Verify that the requested streams exist and that they are not + * already enabled. + */ + + v4l2_subdev_collect_streams(sd, state, pad, streams_mask, + &found_streams, &enabled_streams); + + if (found_streams != streams_mask) { + dev_dbg(dev, "streams 0x%llx not found on %s:%u\n", + streams_mask & ~found_streams, sd->entity.name, pad); + ret = -EINVAL; + goto done; + } + + if (enabled_streams) { + dev_dbg(dev, "streams 0x%llx already enabled on %s:%u\n", + enabled_streams, sd->entity.name, pad); + ret = -EALREADY; + goto done; + } + + already_streaming = v4l2_subdev_is_streaming(sd); + + if (!use_s_stream) { + /* Call the .enable_streams() operation. */ + ret = v4l2_subdev_call(sd, pad, enable_streams, state, pad, + streams_mask); + } else { + /* Start streaming when the first pad is enabled. */ + if (!already_streaming) + ret = v4l2_subdev_call(sd, video, s_stream, 1); + else + ret = 0; + } + + if (ret) { + dev_dbg(dev, "enable streams %u:%#llx failed: %d\n", pad, + streams_mask, ret); + goto done; + } + + /* Mark the streams as enabled. */ + v4l2_subdev_set_streams_enabled(sd, state, pad, streams_mask, true); + + /* + * TODO: When all the drivers have been changed to use + * v4l2_subdev_enable_streams() and v4l2_subdev_disable_streams(), + * instead of calling .s_stream() operation directly, we can remove + * the privacy LED handling from call_s_stream() and do it here + * for all cases. + */ + if (!use_s_stream && !already_streaming) + v4l2_subdev_enable_privacy_led(sd); + +done: + if (!use_s_stream) + v4l2_subdev_unlock_state(state); + + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_subdev_enable_streams); + +int v4l2_subdev_disable_streams(struct v4l2_subdev *sd, u32 pad, + u64 streams_mask) +{ + struct device *dev = sd->entity.graph_obj.mdev->dev; + struct v4l2_subdev_state *state; + u64 enabled_streams; + u64 found_streams; + bool use_s_stream; + int ret; + + dev_dbg(dev, "disable streams \"%s\":%u/%#llx\n", sd->entity.name, pad, + streams_mask); + + /* A few basic sanity checks first. */ + if (pad >= sd->entity.num_pads) + return -EINVAL; + + if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE)) + return -EOPNOTSUPP; + + /* + * We use a 64-bit bitmask for tracking enabled pads, so only subdevices + * with 64 pads or less can be supported. + */ + if (pad >= sizeof(sd->enabled_pads) * BITS_PER_BYTE) + return -EOPNOTSUPP; + + if (!streams_mask) + return 0; + + /* Fallback on .s_stream() if .disable_streams() isn't available. */ + use_s_stream = !v4l2_subdev_has_op(sd, pad, disable_streams); + + if (!use_s_stream) + state = v4l2_subdev_lock_and_get_active_state(sd); + else + state = NULL; + + /* + * Verify that the requested streams exist and that they are not + * already disabled. + */ + + v4l2_subdev_collect_streams(sd, state, pad, streams_mask, + &found_streams, &enabled_streams); + + if (found_streams != streams_mask) { + dev_dbg(dev, "streams 0x%llx not found on %s:%u\n", + streams_mask & ~found_streams, sd->entity.name, pad); + ret = -EINVAL; + goto done; + } + + if (enabled_streams != streams_mask) { + dev_dbg(dev, "streams 0x%llx already disabled on %s:%u\n", + streams_mask & ~enabled_streams, sd->entity.name, pad); + ret = -EALREADY; + goto done; + } + + if (!use_s_stream) { + /* Call the .disable_streams() operation. */ + ret = v4l2_subdev_call(sd, pad, disable_streams, state, pad, + streams_mask); + } else { + /* Stop streaming when the last streams are disabled. */ + + if (!(sd->enabled_pads & ~BIT_ULL(pad))) + ret = v4l2_subdev_call(sd, video, s_stream, 0); + else + ret = 0; + } + + if (ret) { + dev_dbg(dev, "disable streams %u:%#llx failed: %d\n", pad, + streams_mask, ret); + goto done; + } + + v4l2_subdev_set_streams_enabled(sd, state, pad, streams_mask, false); + +done: + if (!use_s_stream) { + if (!v4l2_subdev_is_streaming(sd)) + v4l2_subdev_disable_privacy_led(sd); + + v4l2_subdev_unlock_state(state); + } + + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_subdev_disable_streams); + +int v4l2_subdev_s_stream_helper(struct v4l2_subdev *sd, int enable) +{ + struct v4l2_subdev_state *state; + struct v4l2_subdev_route *route; + struct media_pad *pad; + u64 source_mask = 0; + int pad_index = -1; + + /* + * Find the source pad. This helper is meant for subdevs that have a + * single source pad, so failures shouldn't happen, but catch them + * loudly nonetheless as they indicate a driver bug. + */ + media_entity_for_each_pad(&sd->entity, pad) { + if (pad->flags & MEDIA_PAD_FL_SOURCE) { + pad_index = pad->index; + break; + } + } + + if (WARN_ON(pad_index == -1)) + return -EINVAL; + + if (sd->flags & V4L2_SUBDEV_FL_STREAMS) { + /* + * As there's a single source pad, just collect all the source + * streams. + */ + state = v4l2_subdev_lock_and_get_active_state(sd); + + for_each_active_route(&state->routing, route) + source_mask |= BIT_ULL(route->source_stream); + + v4l2_subdev_unlock_state(state); + } else { + /* + * For non-streams subdevices, there's a single implicit stream + * per pad. + */ + source_mask = BIT_ULL(0); + } + + if (enable) + return v4l2_subdev_enable_streams(sd, pad_index, source_mask); + else + return v4l2_subdev_disable_streams(sd, pad_index, source_mask); +} +EXPORT_SYMBOL_GPL(v4l2_subdev_s_stream_helper); + +#endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */ + +#endif /* CONFIG_MEDIA_CONTROLLER */ + +void v4l2_subdev_init(struct v4l2_subdev *sd, const struct v4l2_subdev_ops *ops) +{ + INIT_LIST_HEAD(&sd->list); + BUG_ON(!ops); + sd->ops = ops; + sd->v4l2_dev = NULL; + sd->flags = 0; + sd->name[0] = '\0'; + sd->grp_id = 0; + sd->dev_priv = NULL; + sd->host_priv = NULL; + sd->privacy_led = NULL; + INIT_LIST_HEAD(&sd->async_subdev_endpoint_list); +#if defined(CONFIG_MEDIA_CONTROLLER) + sd->entity.name = sd->name; + sd->entity.obj_type = MEDIA_ENTITY_TYPE_V4L2_SUBDEV; + sd->entity.function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN; +#endif +} +EXPORT_SYMBOL(v4l2_subdev_init); + +void v4l2_subdev_notify_event(struct v4l2_subdev *sd, + const struct v4l2_event *ev) +{ + v4l2_event_queue(sd->devnode, ev); + v4l2_subdev_notify(sd, V4L2_DEVICE_NOTIFY_EVENT, (void *)ev); +} +EXPORT_SYMBOL_GPL(v4l2_subdev_notify_event); + +bool v4l2_subdev_is_streaming(struct v4l2_subdev *sd) +{ + struct v4l2_subdev_state *state; + + if (!v4l2_subdev_has_op(sd, pad, enable_streams)) + return sd->s_stream_enabled; + + if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS)) + return !!sd->enabled_pads; + + state = v4l2_subdev_get_locked_active_state(sd); + + for (unsigned int i = 0; i < state->stream_configs.num_configs; ++i) { + const struct v4l2_subdev_stream_config *cfg; + + cfg = &state->stream_configs.configs[i]; + + if (cfg->enabled) + return true; + } + + return false; +} +EXPORT_SYMBOL_GPL(v4l2_subdev_is_streaming); + +int v4l2_subdev_get_privacy_led(struct v4l2_subdev *sd) +{ +#if IS_REACHABLE(CONFIG_LEDS_CLASS) + sd->privacy_led = led_get(sd->dev, "privacy-led"); + if (IS_ERR(sd->privacy_led) && PTR_ERR(sd->privacy_led) != -ENOENT) + return dev_err_probe(sd->dev, PTR_ERR(sd->privacy_led), + "getting privacy LED\n"); + + if (!IS_ERR_OR_NULL(sd->privacy_led)) { + mutex_lock(&sd->privacy_led->led_access); + led_sysfs_disable(sd->privacy_led); + led_trigger_remove(sd->privacy_led); + led_set_brightness(sd->privacy_led, 0); + mutex_unlock(&sd->privacy_led->led_access); + } +#endif + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_subdev_get_privacy_led); + +void v4l2_subdev_put_privacy_led(struct v4l2_subdev *sd) +{ +#if IS_REACHABLE(CONFIG_LEDS_CLASS) + if (!IS_ERR_OR_NULL(sd->privacy_led)) { + mutex_lock(&sd->privacy_led->led_access); + led_sysfs_enable(sd->privacy_led); + mutex_unlock(&sd->privacy_led->led_access); + led_put(sd->privacy_led); + } +#endif +} +EXPORT_SYMBOL_GPL(v4l2_subdev_put_privacy_led); diff --git a/6.17.0/drivers/media/v4l2-core/v4l2-trace.c b/6.17.0/drivers/media/v4l2-core/v4l2-trace.c new file mode 100644 index 0000000..95f3b02 --- /dev/null +++ b/6.17.0/drivers/media/v4l2-core/v4l2-trace.c @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include + +#define CREATE_TRACE_POINTS +#include + +EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_v4l2_buf_done); +EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_v4l2_buf_queue); +EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_v4l2_dqbuf); +EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_v4l2_qbuf); diff --git a/6.17.0/drivers/media/v4l2-core/v4l2-vp9.c b/6.17.0/drivers/media/v4l2-core/v4l2-vp9.c new file mode 100644 index 0000000..859589f --- /dev/null +++ b/6.17.0/drivers/media/v4l2-core/v4l2-vp9.c @@ -0,0 +1,1850 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * V4L2 VP9 helpers. + * + * Copyright (C) 2021 Collabora, Ltd. + * + * Author: Andrzej Pietrasiewicz + */ + +#include + +#include + +const u8 v4l2_vp9_kf_y_mode_prob[10][10][9] = { + { + /* above = dc */ + { 137, 30, 42, 148, 151, 207, 70, 52, 91 }, /*left = dc */ + { 92, 45, 102, 136, 116, 180, 74, 90, 100 }, /*left = v */ + { 73, 32, 19, 187, 222, 215, 46, 34, 100 }, /*left = h */ + { 91, 30, 32, 116, 121, 186, 93, 86, 94 }, /*left = d45 */ + { 72, 35, 36, 149, 68, 206, 68, 63, 105 }, /*left = d135*/ + { 73, 31, 28, 138, 57, 124, 55, 122, 151 }, /*left = d117*/ + { 67, 23, 21, 140, 126, 197, 40, 37, 171 }, /*left = d153*/ + { 86, 27, 28, 128, 154, 212, 45, 43, 53 }, /*left = d207*/ + { 74, 32, 27, 107, 86, 160, 63, 134, 102 }, /*left = d63 */ + { 59, 67, 44, 140, 161, 202, 78, 67, 119 }, /*left = tm */ + }, { /* above = v */ + { 63, 36, 126, 146, 123, 158, 60, 90, 96 }, /*left = dc */ + { 43, 46, 168, 134, 107, 128, 69, 142, 92 }, /*left = v */ + { 44, 29, 68, 159, 201, 177, 50, 57, 77 }, /*left = h */ + { 58, 38, 76, 114, 97, 172, 78, 133, 92 }, /*left = d45 */ + { 46, 41, 76, 140, 63, 184, 69, 112, 57 }, /*left = d135*/ + { 38, 32, 85, 140, 46, 112, 54, 151, 133 }, /*left = d117*/ + { 39, 27, 61, 131, 110, 175, 44, 75, 136 }, /*left = d153*/ + { 52, 30, 74, 113, 130, 175, 51, 64, 58 }, /*left = d207*/ + { 47, 35, 80, 100, 74, 143, 64, 163, 74 }, /*left = d63 */ + { 36, 61, 116, 114, 128, 162, 80, 125, 82 }, /*left = tm */ + }, { /* above = h */ + { 82, 26, 26, 171, 208, 204, 44, 32, 105 }, /*left = dc */ + { 55, 44, 68, 166, 179, 192, 57, 57, 108 }, /*left = v */ + { 42, 26, 11, 199, 241, 228, 23, 15, 85 }, /*left = h */ + { 68, 42, 19, 131, 160, 199, 55, 52, 83 }, /*left = d45 */ + { 58, 50, 25, 139, 115, 232, 39, 52, 118 }, /*left = d135*/ + { 50, 35, 33, 153, 104, 162, 64, 59, 131 }, /*left = d117*/ + { 44, 24, 16, 150, 177, 202, 33, 19, 156 }, /*left = d153*/ + { 55, 27, 12, 153, 203, 218, 26, 27, 49 }, /*left = d207*/ + { 53, 49, 21, 110, 116, 168, 59, 80, 76 }, /*left = d63 */ + { 38, 72, 19, 168, 203, 212, 50, 50, 107 }, /*left = tm */ + }, { /* above = d45 */ + { 103, 26, 36, 129, 132, 201, 83, 80, 93 }, /*left = dc */ + { 59, 38, 83, 112, 103, 162, 98, 136, 90 }, /*left = v */ + { 62, 30, 23, 158, 200, 207, 59, 57, 50 }, /*left = h */ + { 67, 30, 29, 84, 86, 191, 102, 91, 59 }, /*left = d45 */ + { 60, 32, 33, 112, 71, 220, 64, 89, 104 }, /*left = d135*/ + { 53, 26, 34, 130, 56, 149, 84, 120, 103 }, /*left = d117*/ + { 53, 21, 23, 133, 109, 210, 56, 77, 172 }, /*left = d153*/ + { 77, 19, 29, 112, 142, 228, 55, 66, 36 }, /*left = d207*/ + { 61, 29, 29, 93, 97, 165, 83, 175, 162 }, /*left = d63 */ + { 47, 47, 43, 114, 137, 181, 100, 99, 95 }, /*left = tm */ + }, { /* above = d135 */ + { 69, 23, 29, 128, 83, 199, 46, 44, 101 }, /*left = dc */ + { 53, 40, 55, 139, 69, 183, 61, 80, 110 }, /*left = v */ + { 40, 29, 19, 161, 180, 207, 43, 24, 91 }, /*left = h */ + { 60, 34, 19, 105, 61, 198, 53, 64, 89 }, /*left = d45 */ + { 52, 31, 22, 158, 40, 209, 58, 62, 89 }, /*left = d135*/ + { 44, 31, 29, 147, 46, 158, 56, 102, 198 }, /*left = d117*/ + { 35, 19, 12, 135, 87, 209, 41, 45, 167 }, /*left = d153*/ + { 55, 25, 21, 118, 95, 215, 38, 39, 66 }, /*left = d207*/ + { 51, 38, 25, 113, 58, 164, 70, 93, 97 }, /*left = d63 */ + { 47, 54, 34, 146, 108, 203, 72, 103, 151 }, /*left = tm */ + }, { /* above = d117 */ + { 64, 19, 37, 156, 66, 138, 49, 95, 133 }, /*left = dc */ + { 46, 27, 80, 150, 55, 124, 55, 121, 135 }, /*left = v */ + { 36, 23, 27, 165, 149, 166, 54, 64, 118 }, /*left = h */ + { 53, 21, 36, 131, 63, 163, 60, 109, 81 }, /*left = d45 */ + { 40, 26, 35, 154, 40, 185, 51, 97, 123 }, /*left = d135*/ + { 35, 19, 34, 179, 19, 97, 48, 129, 124 }, /*left = d117*/ + { 36, 20, 26, 136, 62, 164, 33, 77, 154 }, /*left = d153*/ + { 45, 18, 32, 130, 90, 157, 40, 79, 91 }, /*left = d207*/ + { 45, 26, 28, 129, 45, 129, 49, 147, 123 }, /*left = d63 */ + { 38, 44, 51, 136, 74, 162, 57, 97, 121 }, /*left = tm */ + }, { /* above = d153 */ + { 75, 17, 22, 136, 138, 185, 32, 34, 166 }, /*left = dc */ + { 56, 39, 58, 133, 117, 173, 48, 53, 187 }, /*left = v */ + { 35, 21, 12, 161, 212, 207, 20, 23, 145 }, /*left = h */ + { 56, 29, 19, 117, 109, 181, 55, 68, 112 }, /*left = d45 */ + { 47, 29, 17, 153, 64, 220, 59, 51, 114 }, /*left = d135*/ + { 46, 16, 24, 136, 76, 147, 41, 64, 172 }, /*left = d117*/ + { 34, 17, 11, 108, 152, 187, 13, 15, 209 }, /*left = d153*/ + { 51, 24, 14, 115, 133, 209, 32, 26, 104 }, /*left = d207*/ + { 55, 30, 18, 122, 79, 179, 44, 88, 116 }, /*left = d63 */ + { 37, 49, 25, 129, 168, 164, 41, 54, 148 }, /*left = tm */ + }, { /* above = d207 */ + { 82, 22, 32, 127, 143, 213, 39, 41, 70 }, /*left = dc */ + { 62, 44, 61, 123, 105, 189, 48, 57, 64 }, /*left = v */ + { 47, 25, 17, 175, 222, 220, 24, 30, 86 }, /*left = h */ + { 68, 36, 17, 106, 102, 206, 59, 74, 74 }, /*left = d45 */ + { 57, 39, 23, 151, 68, 216, 55, 63, 58 }, /*left = d135*/ + { 49, 30, 35, 141, 70, 168, 82, 40, 115 }, /*left = d117*/ + { 51, 25, 15, 136, 129, 202, 38, 35, 139 }, /*left = d153*/ + { 68, 26, 16, 111, 141, 215, 29, 28, 28 }, /*left = d207*/ + { 59, 39, 19, 114, 75, 180, 77, 104, 42 }, /*left = d63 */ + { 40, 61, 26, 126, 152, 206, 61, 59, 93 }, /*left = tm */ + }, { /* above = d63 */ + { 78, 23, 39, 111, 117, 170, 74, 124, 94 }, /*left = dc */ + { 48, 34, 86, 101, 92, 146, 78, 179, 134 }, /*left = v */ + { 47, 22, 24, 138, 187, 178, 68, 69, 59 }, /*left = h */ + { 56, 25, 33, 105, 112, 187, 95, 177, 129 }, /*left = d45 */ + { 48, 31, 27, 114, 63, 183, 82, 116, 56 }, /*left = d135*/ + { 43, 28, 37, 121, 63, 123, 61, 192, 169 }, /*left = d117*/ + { 42, 17, 24, 109, 97, 177, 56, 76, 122 }, /*left = d153*/ + { 58, 18, 28, 105, 139, 182, 70, 92, 63 }, /*left = d207*/ + { 46, 23, 32, 74, 86, 150, 67, 183, 88 }, /*left = d63 */ + { 36, 38, 48, 92, 122, 165, 88, 137, 91 }, /*left = tm */ + }, { /* above = tm */ + { 65, 70, 60, 155, 159, 199, 61, 60, 81 }, /*left = dc */ + { 44, 78, 115, 132, 119, 173, 71, 112, 93 }, /*left = v */ + { 39, 38, 21, 184, 227, 206, 42, 32, 64 }, /*left = h */ + { 58, 47, 36, 124, 137, 193, 80, 82, 78 }, /*left = d45 */ + { 49, 50, 35, 144, 95, 205, 63, 78, 59 }, /*left = d135*/ + { 41, 53, 52, 148, 71, 142, 65, 128, 51 }, /*left = d117*/ + { 40, 36, 28, 143, 143, 202, 40, 55, 137 }, /*left = d153*/ + { 52, 34, 29, 129, 183, 227, 42, 35, 43 }, /*left = d207*/ + { 42, 44, 44, 104, 105, 164, 64, 130, 80 }, /*left = d63 */ + { 43, 81, 53, 140, 169, 204, 68, 84, 72 }, /*left = tm */ + } +}; +EXPORT_SYMBOL_GPL(v4l2_vp9_kf_y_mode_prob); + +const u8 v4l2_vp9_kf_partition_probs[16][3] = { + /* 8x8 -> 4x4 */ + { 158, 97, 94 }, /* a/l both not split */ + { 93, 24, 99 }, /* a split, l not split */ + { 85, 119, 44 }, /* l split, a not split */ + { 62, 59, 67 }, /* a/l both split */ + /* 16x16 -> 8x8 */ + { 149, 53, 53 }, /* a/l both not split */ + { 94, 20, 48 }, /* a split, l not split */ + { 83, 53, 24 }, /* l split, a not split */ + { 52, 18, 18 }, /* a/l both split */ + /* 32x32 -> 16x16 */ + { 150, 40, 39 }, /* a/l both not split */ + { 78, 12, 26 }, /* a split, l not split */ + { 67, 33, 11 }, /* l split, a not split */ + { 24, 7, 5 }, /* a/l both split */ + /* 64x64 -> 32x32 */ + { 174, 35, 49 }, /* a/l both not split */ + { 68, 11, 27 }, /* a split, l not split */ + { 57, 15, 9 }, /* l split, a not split */ + { 12, 3, 3 }, /* a/l both split */ +}; +EXPORT_SYMBOL_GPL(v4l2_vp9_kf_partition_probs); + +const u8 v4l2_vp9_kf_uv_mode_prob[10][9] = { + { 144, 11, 54, 157, 195, 130, 46, 58, 108 }, /* y = dc */ + { 118, 15, 123, 148, 131, 101, 44, 93, 131 }, /* y = v */ + { 113, 12, 23, 188, 226, 142, 26, 32, 125 }, /* y = h */ + { 120, 11, 50, 123, 163, 135, 64, 77, 103 }, /* y = d45 */ + { 113, 9, 36, 155, 111, 157, 32, 44, 161 }, /* y = d135 */ + { 116, 9, 55, 176, 76, 96, 37, 61, 149 }, /* y = d117 */ + { 115, 9, 28, 141, 161, 167, 21, 25, 193 }, /* y = d153 */ + { 120, 12, 32, 145, 195, 142, 32, 38, 86 }, /* y = d207 */ + { 116, 12, 64, 120, 140, 125, 49, 115, 121 }, /* y = d63 */ + { 102, 19, 66, 162, 182, 122, 35, 59, 128 } /* y = tm */ +}; +EXPORT_SYMBOL_GPL(v4l2_vp9_kf_uv_mode_prob); + +const struct v4l2_vp9_frame_context v4l2_vp9_default_probs = { + .tx8 = { + { 100 }, + { 66 }, + }, + .tx16 = { + { 20, 152 }, + { 15, 101 }, + }, + .tx32 = { + { 3, 136, 37 }, + { 5, 52, 13 }, + }, + .coef = { + { /* tx = 4x4 */ + { /* block Type 0 */ + { /* Intra */ + { /* Coeff Band 0 */ + { 195, 29, 183 }, + { 84, 49, 136 }, + { 8, 42, 71 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + }, + { /* Coeff Band 1 */ + { 31, 107, 169 }, + { 35, 99, 159 }, + { 17, 82, 140 }, + { 8, 66, 114 }, + { 2, 44, 76 }, + { 1, 19, 32 }, + }, + { /* Coeff Band 2 */ + { 40, 132, 201 }, + { 29, 114, 187 }, + { 13, 91, 157 }, + { 7, 75, 127 }, + { 3, 58, 95 }, + { 1, 28, 47 }, + }, + { /* Coeff Band 3 */ + { 69, 142, 221 }, + { 42, 122, 201 }, + { 15, 91, 159 }, + { 6, 67, 121 }, + { 1, 42, 77 }, + { 1, 17, 31 }, + }, + { /* Coeff Band 4 */ + { 102, 148, 228 }, + { 67, 117, 204 }, + { 17, 82, 154 }, + { 6, 59, 114 }, + { 2, 39, 75 }, + { 1, 15, 29 }, + }, + { /* Coeff Band 5 */ + { 156, 57, 233 }, + { 119, 57, 212 }, + { 58, 48, 163 }, + { 29, 40, 124 }, + { 12, 30, 81 }, + { 3, 12, 31 } + }, + }, + { /* Inter */ + { /* Coeff Band 0 */ + { 191, 107, 226 }, + { 124, 117, 204 }, + { 25, 99, 155 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + }, + { /* Coeff Band 1 */ + { 29, 148, 210 }, + { 37, 126, 194 }, + { 8, 93, 157 }, + { 2, 68, 118 }, + { 1, 39, 69 }, + { 1, 17, 33 }, + }, + { /* Coeff Band 2 */ + { 41, 151, 213 }, + { 27, 123, 193 }, + { 3, 82, 144 }, + { 1, 58, 105 }, + { 1, 32, 60 }, + { 1, 13, 26 }, + }, + { /* Coeff Band 3 */ + { 59, 159, 220 }, + { 23, 126, 198 }, + { 4, 88, 151 }, + { 1, 66, 114 }, + { 1, 38, 71 }, + { 1, 18, 34 }, + }, + { /* Coeff Band 4 */ + { 114, 136, 232 }, + { 51, 114, 207 }, + { 11, 83, 155 }, + { 3, 56, 105 }, + { 1, 33, 65 }, + { 1, 17, 34 }, + }, + { /* Coeff Band 5 */ + { 149, 65, 234 }, + { 121, 57, 215 }, + { 61, 49, 166 }, + { 28, 36, 114 }, + { 12, 25, 76 }, + { 3, 16, 42 }, + }, + }, + }, + { /* block Type 1 */ + { /* Intra */ + { /* Coeff Band 0 */ + { 214, 49, 220 }, + { 132, 63, 188 }, + { 42, 65, 137 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + }, + { /* Coeff Band 1 */ + { 85, 137, 221 }, + { 104, 131, 216 }, + { 49, 111, 192 }, + { 21, 87, 155 }, + { 2, 49, 87 }, + { 1, 16, 28 }, + }, + { /* Coeff Band 2 */ + { 89, 163, 230 }, + { 90, 137, 220 }, + { 29, 100, 183 }, + { 10, 70, 135 }, + { 2, 42, 81 }, + { 1, 17, 33 }, + }, + { /* Coeff Band 3 */ + { 108, 167, 237 }, + { 55, 133, 222 }, + { 15, 97, 179 }, + { 4, 72, 135 }, + { 1, 45, 85 }, + { 1, 19, 38 }, + }, + { /* Coeff Band 4 */ + { 124, 146, 240 }, + { 66, 124, 224 }, + { 17, 88, 175 }, + { 4, 58, 122 }, + { 1, 36, 75 }, + { 1, 18, 37 }, + }, + { /* Coeff Band 5 */ + { 141, 79, 241 }, + { 126, 70, 227 }, + { 66, 58, 182 }, + { 30, 44, 136 }, + { 12, 34, 96 }, + { 2, 20, 47 }, + }, + }, + { /* Inter */ + { /* Coeff Band 0 */ + { 229, 99, 249 }, + { 143, 111, 235 }, + { 46, 109, 192 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + }, + { /* Coeff Band 1 */ + { 82, 158, 236 }, + { 94, 146, 224 }, + { 25, 117, 191 }, + { 9, 87, 149 }, + { 3, 56, 99 }, + { 1, 33, 57 }, + }, + { /* Coeff Band 2 */ + { 83, 167, 237 }, + { 68, 145, 222 }, + { 10, 103, 177 }, + { 2, 72, 131 }, + { 1, 41, 79 }, + { 1, 20, 39 }, + }, + { /* Coeff Band 3 */ + { 99, 167, 239 }, + { 47, 141, 224 }, + { 10, 104, 178 }, + { 2, 73, 133 }, + { 1, 44, 85 }, + { 1, 22, 47 }, + }, + { /* Coeff Band 4 */ + { 127, 145, 243 }, + { 71, 129, 228 }, + { 17, 93, 177 }, + { 3, 61, 124 }, + { 1, 41, 84 }, + { 1, 21, 52 }, + }, + { /* Coeff Band 5 */ + { 157, 78, 244 }, + { 140, 72, 231 }, + { 69, 58, 184 }, + { 31, 44, 137 }, + { 14, 38, 105 }, + { 8, 23, 61 }, + }, + }, + }, + }, + { /* tx = 8x8 */ + { /* block Type 0 */ + { /* Intra */ + { /* Coeff Band 0 */ + { 125, 34, 187 }, + { 52, 41, 133 }, + { 6, 31, 56 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + }, + { /* Coeff Band 1 */ + { 37, 109, 153 }, + { 51, 102, 147 }, + { 23, 87, 128 }, + { 8, 67, 101 }, + { 1, 41, 63 }, + { 1, 19, 29 }, + }, + { /* Coeff Band 2 */ + { 31, 154, 185 }, + { 17, 127, 175 }, + { 6, 96, 145 }, + { 2, 73, 114 }, + { 1, 51, 82 }, + { 1, 28, 45 }, + }, + { /* Coeff Band 3 */ + { 23, 163, 200 }, + { 10, 131, 185 }, + { 2, 93, 148 }, + { 1, 67, 111 }, + { 1, 41, 69 }, + { 1, 14, 24 }, + }, + { /* Coeff Band 4 */ + { 29, 176, 217 }, + { 12, 145, 201 }, + { 3, 101, 156 }, + { 1, 69, 111 }, + { 1, 39, 63 }, + { 1, 14, 23 }, + }, + { /* Coeff Band 5 */ + { 57, 192, 233 }, + { 25, 154, 215 }, + { 6, 109, 167 }, + { 3, 78, 118 }, + { 1, 48, 69 }, + { 1, 21, 29 }, + }, + }, + { /* Inter */ + { /* Coeff Band 0 */ + { 202, 105, 245 }, + { 108, 106, 216 }, + { 18, 90, 144 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + }, + { /* Coeff Band 1 */ + { 33, 172, 219 }, + { 64, 149, 206 }, + { 14, 117, 177 }, + { 5, 90, 141 }, + { 2, 61, 95 }, + { 1, 37, 57 }, + }, + { /* Coeff Band 2 */ + { 33, 179, 220 }, + { 11, 140, 198 }, + { 1, 89, 148 }, + { 1, 60, 104 }, + { 1, 33, 57 }, + { 1, 12, 21 }, + }, + { /* Coeff Band 3 */ + { 30, 181, 221 }, + { 8, 141, 198 }, + { 1, 87, 145 }, + { 1, 58, 100 }, + { 1, 31, 55 }, + { 1, 12, 20 }, + }, + { /* Coeff Band 4 */ + { 32, 186, 224 }, + { 7, 142, 198 }, + { 1, 86, 143 }, + { 1, 58, 100 }, + { 1, 31, 55 }, + { 1, 12, 22 }, + }, + { /* Coeff Band 5 */ + { 57, 192, 227 }, + { 20, 143, 204 }, + { 3, 96, 154 }, + { 1, 68, 112 }, + { 1, 42, 69 }, + { 1, 19, 32 }, + }, + }, + }, + { /* block Type 1 */ + { /* Intra */ + { /* Coeff Band 0 */ + { 212, 35, 215 }, + { 113, 47, 169 }, + { 29, 48, 105 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + }, + { /* Coeff Band 1 */ + { 74, 129, 203 }, + { 106, 120, 203 }, + { 49, 107, 178 }, + { 19, 84, 144 }, + { 4, 50, 84 }, + { 1, 15, 25 }, + }, + { /* Coeff Band 2 */ + { 71, 172, 217 }, + { 44, 141, 209 }, + { 15, 102, 173 }, + { 6, 76, 133 }, + { 2, 51, 89 }, + { 1, 24, 42 }, + }, + { /* Coeff Band 3 */ + { 64, 185, 231 }, + { 31, 148, 216 }, + { 8, 103, 175 }, + { 3, 74, 131 }, + { 1, 46, 81 }, + { 1, 18, 30 }, + }, + { /* Coeff Band 4 */ + { 65, 196, 235 }, + { 25, 157, 221 }, + { 5, 105, 174 }, + { 1, 67, 120 }, + { 1, 38, 69 }, + { 1, 15, 30 }, + }, + { /* Coeff Band 5 */ + { 65, 204, 238 }, + { 30, 156, 224 }, + { 7, 107, 177 }, + { 2, 70, 124 }, + { 1, 42, 73 }, + { 1, 18, 34 }, + }, + }, + { /* Inter */ + { /* Coeff Band 0 */ + { 225, 86, 251 }, + { 144, 104, 235 }, + { 42, 99, 181 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + }, + { /* Coeff Band 1 */ + { 85, 175, 239 }, + { 112, 165, 229 }, + { 29, 136, 200 }, + { 12, 103, 162 }, + { 6, 77, 123 }, + { 2, 53, 84 }, + }, + { /* Coeff Band 2 */ + { 75, 183, 239 }, + { 30, 155, 221 }, + { 3, 106, 171 }, + { 1, 74, 128 }, + { 1, 44, 76 }, + { 1, 17, 28 }, + }, + { /* Coeff Band 3 */ + { 73, 185, 240 }, + { 27, 159, 222 }, + { 2, 107, 172 }, + { 1, 75, 127 }, + { 1, 42, 73 }, + { 1, 17, 29 }, + }, + { /* Coeff Band 4 */ + { 62, 190, 238 }, + { 21, 159, 222 }, + { 2, 107, 172 }, + { 1, 72, 122 }, + { 1, 40, 71 }, + { 1, 18, 32 }, + }, + { /* Coeff Band 5 */ + { 61, 199, 240 }, + { 27, 161, 226 }, + { 4, 113, 180 }, + { 1, 76, 129 }, + { 1, 46, 80 }, + { 1, 23, 41 }, + }, + }, + }, + }, + { /* tx = 16x16 */ + { /* block Type 0 */ + { /* Intra */ + { /* Coeff Band 0 */ + { 7, 27, 153 }, + { 5, 30, 95 }, + { 1, 16, 30 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + }, + { /* Coeff Band 1 */ + { 50, 75, 127 }, + { 57, 75, 124 }, + { 27, 67, 108 }, + { 10, 54, 86 }, + { 1, 33, 52 }, + { 1, 12, 18 }, + }, + { /* Coeff Band 2 */ + { 43, 125, 151 }, + { 26, 108, 148 }, + { 7, 83, 122 }, + { 2, 59, 89 }, + { 1, 38, 60 }, + { 1, 17, 27 }, + }, + { /* Coeff Band 3 */ + { 23, 144, 163 }, + { 13, 112, 154 }, + { 2, 75, 117 }, + { 1, 50, 81 }, + { 1, 31, 51 }, + { 1, 14, 23 }, + }, + { /* Coeff Band 4 */ + { 18, 162, 185 }, + { 6, 123, 171 }, + { 1, 78, 125 }, + { 1, 51, 86 }, + { 1, 31, 54 }, + { 1, 14, 23 }, + }, + { /* Coeff Band 5 */ + { 15, 199, 227 }, + { 3, 150, 204 }, + { 1, 91, 146 }, + { 1, 55, 95 }, + { 1, 30, 53 }, + { 1, 11, 20 }, + } + }, + { /* Inter */ + { /* Coeff Band 0 */ + { 19, 55, 240 }, + { 19, 59, 196 }, + { 3, 52, 105 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + }, + { /* Coeff Band 1 */ + { 41, 166, 207 }, + { 104, 153, 199 }, + { 31, 123, 181 }, + { 14, 101, 152 }, + { 5, 72, 106 }, + { 1, 36, 52 }, + }, + { /* Coeff Band 2 */ + { 35, 176, 211 }, + { 12, 131, 190 }, + { 2, 88, 144 }, + { 1, 60, 101 }, + { 1, 36, 60 }, + { 1, 16, 28 }, + }, + { /* Coeff Band 3 */ + { 28, 183, 213 }, + { 8, 134, 191 }, + { 1, 86, 142 }, + { 1, 56, 96 }, + { 1, 30, 53 }, + { 1, 12, 20 }, + }, + { /* Coeff Band 4 */ + { 20, 190, 215 }, + { 4, 135, 192 }, + { 1, 84, 139 }, + { 1, 53, 91 }, + { 1, 28, 49 }, + { 1, 11, 20 }, + }, + { /* Coeff Band 5 */ + { 13, 196, 216 }, + { 2, 137, 192 }, + { 1, 86, 143 }, + { 1, 57, 99 }, + { 1, 32, 56 }, + { 1, 13, 24 }, + }, + }, + }, + { /* block Type 1 */ + { /* Intra */ + { /* Coeff Band 0 */ + { 211, 29, 217 }, + { 96, 47, 156 }, + { 22, 43, 87 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + }, + { /* Coeff Band 1 */ + { 78, 120, 193 }, + { 111, 116, 186 }, + { 46, 102, 164 }, + { 15, 80, 128 }, + { 2, 49, 76 }, + { 1, 18, 28 }, + }, + { /* Coeff Band 2 */ + { 71, 161, 203 }, + { 42, 132, 192 }, + { 10, 98, 150 }, + { 3, 69, 109 }, + { 1, 44, 70 }, + { 1, 18, 29 }, + }, + { /* Coeff Band 3 */ + { 57, 186, 211 }, + { 30, 140, 196 }, + { 4, 93, 146 }, + { 1, 62, 102 }, + { 1, 38, 65 }, + { 1, 16, 27 }, + }, + { /* Coeff Band 4 */ + { 47, 199, 217 }, + { 14, 145, 196 }, + { 1, 88, 142 }, + { 1, 57, 98 }, + { 1, 36, 62 }, + { 1, 15, 26 }, + }, + { /* Coeff Band 5 */ + { 26, 219, 229 }, + { 5, 155, 207 }, + { 1, 94, 151 }, + { 1, 60, 104 }, + { 1, 36, 62 }, + { 1, 16, 28 }, + } + }, + { /* Inter */ + { /* Coeff Band 0 */ + { 233, 29, 248 }, + { 146, 47, 220 }, + { 43, 52, 140 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + }, + { /* Coeff Band 1 */ + { 100, 163, 232 }, + { 179, 161, 222 }, + { 63, 142, 204 }, + { 37, 113, 174 }, + { 26, 89, 137 }, + { 18, 68, 97 }, + }, + { /* Coeff Band 2 */ + { 85, 181, 230 }, + { 32, 146, 209 }, + { 7, 100, 164 }, + { 3, 71, 121 }, + { 1, 45, 77 }, + { 1, 18, 30 }, + }, + { /* Coeff Band 3 */ + { 65, 187, 230 }, + { 20, 148, 207 }, + { 2, 97, 159 }, + { 1, 68, 116 }, + { 1, 40, 70 }, + { 1, 14, 29 }, + }, + { /* Coeff Band 4 */ + { 40, 194, 227 }, + { 8, 147, 204 }, + { 1, 94, 155 }, + { 1, 65, 112 }, + { 1, 39, 66 }, + { 1, 14, 26 }, + }, + { /* Coeff Band 5 */ + { 16, 208, 228 }, + { 3, 151, 207 }, + { 1, 98, 160 }, + { 1, 67, 117 }, + { 1, 41, 74 }, + { 1, 17, 31 }, + }, + }, + }, + }, + { /* tx = 32x32 */ + { /* block Type 0 */ + { /* Intra */ + { /* Coeff Band 0 */ + { 17, 38, 140 }, + { 7, 34, 80 }, + { 1, 17, 29 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + }, + { /* Coeff Band 1 */ + { 37, 75, 128 }, + { 41, 76, 128 }, + { 26, 66, 116 }, + { 12, 52, 94 }, + { 2, 32, 55 }, + { 1, 10, 16 }, + }, + { /* Coeff Band 2 */ + { 50, 127, 154 }, + { 37, 109, 152 }, + { 16, 82, 121 }, + { 5, 59, 85 }, + { 1, 35, 54 }, + { 1, 13, 20 }, + }, + { /* Coeff Band 3 */ + { 40, 142, 167 }, + { 17, 110, 157 }, + { 2, 71, 112 }, + { 1, 44, 72 }, + { 1, 27, 45 }, + { 1, 11, 17 }, + }, + { /* Coeff Band 4 */ + { 30, 175, 188 }, + { 9, 124, 169 }, + { 1, 74, 116 }, + { 1, 48, 78 }, + { 1, 30, 49 }, + { 1, 11, 18 }, + }, + { /* Coeff Band 5 */ + { 10, 222, 223 }, + { 2, 150, 194 }, + { 1, 83, 128 }, + { 1, 48, 79 }, + { 1, 27, 45 }, + { 1, 11, 17 }, + }, + }, + { /* Inter */ + { /* Coeff Band 0 */ + { 36, 41, 235 }, + { 29, 36, 193 }, + { 10, 27, 111 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + }, + { /* Coeff Band 1 */ + { 85, 165, 222 }, + { 177, 162, 215 }, + { 110, 135, 195 }, + { 57, 113, 168 }, + { 23, 83, 120 }, + { 10, 49, 61 }, + }, + { /* Coeff Band 2 */ + { 85, 190, 223 }, + { 36, 139, 200 }, + { 5, 90, 146 }, + { 1, 60, 103 }, + { 1, 38, 65 }, + { 1, 18, 30 }, + }, + { /* Coeff Band 3 */ + { 72, 202, 223 }, + { 23, 141, 199 }, + { 2, 86, 140 }, + { 1, 56, 97 }, + { 1, 36, 61 }, + { 1, 16, 27 }, + }, + { /* Coeff Band 4 */ + { 55, 218, 225 }, + { 13, 145, 200 }, + { 1, 86, 141 }, + { 1, 57, 99 }, + { 1, 35, 61 }, + { 1, 13, 22 }, + }, + { /* Coeff Band 5 */ + { 15, 235, 212 }, + { 1, 132, 184 }, + { 1, 84, 139 }, + { 1, 57, 97 }, + { 1, 34, 56 }, + { 1, 14, 23 }, + }, + }, + }, + { /* block Type 1 */ + { /* Intra */ + { /* Coeff Band 0 */ + { 181, 21, 201 }, + { 61, 37, 123 }, + { 10, 38, 71 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + }, + { /* Coeff Band 1 */ + { 47, 106, 172 }, + { 95, 104, 173 }, + { 42, 93, 159 }, + { 18, 77, 131 }, + { 4, 50, 81 }, + { 1, 17, 23 }, + }, + { /* Coeff Band 2 */ + { 62, 147, 199 }, + { 44, 130, 189 }, + { 28, 102, 154 }, + { 18, 75, 115 }, + { 2, 44, 65 }, + { 1, 12, 19 }, + }, + { /* Coeff Band 3 */ + { 55, 153, 210 }, + { 24, 130, 194 }, + { 3, 93, 146 }, + { 1, 61, 97 }, + { 1, 31, 50 }, + { 1, 10, 16 }, + }, + { /* Coeff Band 4 */ + { 49, 186, 223 }, + { 17, 148, 204 }, + { 1, 96, 142 }, + { 1, 53, 83 }, + { 1, 26, 44 }, + { 1, 11, 17 }, + }, + { /* Coeff Band 5 */ + { 13, 217, 212 }, + { 2, 136, 180 }, + { 1, 78, 124 }, + { 1, 50, 83 }, + { 1, 29, 49 }, + { 1, 14, 23 }, + }, + }, + { /* Inter */ + { /* Coeff Band 0 */ + { 197, 13, 247 }, + { 82, 17, 222 }, + { 25, 17, 162 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + }, + { /* Coeff Band 1 */ + { 126, 186, 247 }, + { 234, 191, 243 }, + { 176, 177, 234 }, + { 104, 158, 220 }, + { 66, 128, 186 }, + { 55, 90, 137 }, + }, + { /* Coeff Band 2 */ + { 111, 197, 242 }, + { 46, 158, 219 }, + { 9, 104, 171 }, + { 2, 65, 125 }, + { 1, 44, 80 }, + { 1, 17, 91 }, + }, + { /* Coeff Band 3 */ + { 104, 208, 245 }, + { 39, 168, 224 }, + { 3, 109, 162 }, + { 1, 79, 124 }, + { 1, 50, 102 }, + { 1, 43, 102 }, + }, + { /* Coeff Band 4 */ + { 84, 220, 246 }, + { 31, 177, 231 }, + { 2, 115, 180 }, + { 1, 79, 134 }, + { 1, 55, 77 }, + { 1, 60, 79 }, + }, + { /* Coeff Band 5 */ + { 43, 243, 240 }, + { 8, 180, 217 }, + { 1, 115, 166 }, + { 1, 84, 121 }, + { 1, 51, 67 }, + { 1, 16, 6 }, + }, + }, + }, + }, + }, + + .skip = { 192, 128, 64 }, + .inter_mode = { + { 2, 173, 34 }, + { 7, 145, 85 }, + { 7, 166, 63 }, + { 7, 94, 66 }, + { 8, 64, 46 }, + { 17, 81, 31 }, + { 25, 29, 30 }, + }, + .interp_filter = { + { 235, 162 }, + { 36, 255 }, + { 34, 3 }, + { 149, 144 }, + }, + .is_inter = { 9, 102, 187, 225 }, + .comp_mode = { 239, 183, 119, 96, 41 }, + .single_ref = { + { 33, 16 }, + { 77, 74 }, + { 142, 142 }, + { 172, 170 }, + { 238, 247 }, + }, + .comp_ref = { 50, 126, 123, 221, 226 }, + .y_mode = { + { 65, 32, 18, 144, 162, 194, 41, 51, 98 }, + { 132, 68, 18, 165, 217, 196, 45, 40, 78 }, + { 173, 80, 19, 176, 240, 193, 64, 35, 46 }, + { 221, 135, 38, 194, 248, 121, 96, 85, 29 }, + }, + .uv_mode = { + { 120, 7, 76, 176, 208, 126, 28, 54, 103 } /* y = dc */, + { 48, 12, 154, 155, 139, 90, 34, 117, 119 } /* y = v */, + { 67, 6, 25, 204, 243, 158, 13, 21, 96 } /* y = h */, + { 97, 5, 44, 131, 176, 139, 48, 68, 97 } /* y = d45 */, + { 83, 5, 42, 156, 111, 152, 26, 49, 152 } /* y = d135 */, + { 80, 5, 58, 178, 74, 83, 33, 62, 145 } /* y = d117 */, + { 86, 5, 32, 154, 192, 168, 14, 22, 163 } /* y = d153 */, + { 85, 5, 32, 156, 216, 148, 19, 29, 73 } /* y = d207 */, + { 77, 7, 64, 116, 132, 122, 37, 126, 120 } /* y = d63 */, + { 101, 21, 107, 181, 192, 103, 19, 67, 125 } /* y = tm */ + }, + .partition = { + /* 8x8 -> 4x4 */ + { 199, 122, 141 } /* a/l both not split */, + { 147, 63, 159 } /* a split, l not split */, + { 148, 133, 118 } /* l split, a not split */, + { 121, 104, 114 } /* a/l both split */, + /* 16x16 -> 8x8 */ + { 174, 73, 87 } /* a/l both not split */, + { 92, 41, 83 } /* a split, l not split */, + { 82, 99, 50 } /* l split, a not split */, + { 53, 39, 39 } /* a/l both split */, + /* 32x32 -> 16x16 */ + { 177, 58, 59 } /* a/l both not split */, + { 68, 26, 63 } /* a split, l not split */, + { 52, 79, 25 } /* l split, a not split */, + { 17, 14, 12 } /* a/l both split */, + /* 64x64 -> 32x32 */ + { 222, 34, 30 } /* a/l both not split */, + { 72, 16, 44 } /* a split, l not split */, + { 58, 32, 12 } /* l split, a not split */, + { 10, 7, 6 } /* a/l both split */, + }, + + .mv = { + .joint = { 32, 64, 96 }, + .sign = { 128, 128 }, + .classes = { + { 224, 144, 192, 168, 192, 176, 192, 198, 198, 245 }, + { 216, 128, 176, 160, 176, 176, 192, 198, 198, 208 }, + }, + .class0_bit = { 216, 208 }, + .bits = { + { 136, 140, 148, 160, 176, 192, 224, 234, 234, 240}, + { 136, 140, 148, 160, 176, 192, 224, 234, 234, 240}, + }, + .class0_fr = { + { + { 128, 128, 64 }, + { 96, 112, 64 }, + }, + { + { 128, 128, 64 }, + { 96, 112, 64 }, + }, + }, + .fr = { + { 64, 96, 64 }, + { 64, 96, 64 }, + }, + .class0_hp = { 160, 160 }, + .hp = { 128, 128 }, + }, +}; +EXPORT_SYMBOL_GPL(v4l2_vp9_default_probs); + +static u32 fastdiv(u32 dividend, u16 divisor) +{ +#define DIV_INV(d) ((u32)(((1ULL << 32) + ((d) - 1)) / (d))) +#define DIVS_INV(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) \ + DIV_INV(d0), DIV_INV(d1), DIV_INV(d2), DIV_INV(d3), \ + DIV_INV(d4), DIV_INV(d5), DIV_INV(d6), DIV_INV(d7), \ + DIV_INV(d8), DIV_INV(d9) + + static const u32 inv[] = { + DIV_INV(2), DIV_INV(3), DIV_INV(4), DIV_INV(5), + DIV_INV(6), DIV_INV(7), DIV_INV(8), DIV_INV(9), + DIVS_INV(10, 11, 12, 13, 14, 15, 16, 17, 18, 19), + DIVS_INV(20, 21, 22, 23, 24, 25, 26, 27, 28, 29), + DIVS_INV(30, 31, 32, 33, 34, 35, 36, 37, 38, 39), + DIVS_INV(40, 41, 42, 43, 44, 45, 46, 47, 48, 49), + DIVS_INV(50, 51, 52, 53, 54, 55, 56, 57, 58, 59), + DIVS_INV(60, 61, 62, 63, 64, 65, 66, 67, 68, 69), + DIVS_INV(70, 71, 72, 73, 74, 75, 76, 77, 78, 79), + DIVS_INV(80, 81, 82, 83, 84, 85, 86, 87, 88, 89), + DIVS_INV(90, 91, 92, 93, 94, 95, 96, 97, 98, 99), + DIVS_INV(100, 101, 102, 103, 104, 105, 106, 107, 108, 109), + DIVS_INV(110, 111, 112, 113, 114, 115, 116, 117, 118, 119), + DIVS_INV(120, 121, 122, 123, 124, 125, 126, 127, 128, 129), + DIVS_INV(130, 131, 132, 133, 134, 135, 136, 137, 138, 139), + DIVS_INV(140, 141, 142, 143, 144, 145, 146, 147, 148, 149), + DIVS_INV(150, 151, 152, 153, 154, 155, 156, 157, 158, 159), + DIVS_INV(160, 161, 162, 163, 164, 165, 166, 167, 168, 169), + DIVS_INV(170, 171, 172, 173, 174, 175, 176, 177, 178, 179), + DIVS_INV(180, 181, 182, 183, 184, 185, 186, 187, 188, 189), + DIVS_INV(190, 191, 192, 193, 194, 195, 196, 197, 198, 199), + DIVS_INV(200, 201, 202, 203, 204, 205, 206, 207, 208, 209), + DIVS_INV(210, 211, 212, 213, 214, 215, 216, 217, 218, 219), + DIVS_INV(220, 221, 222, 223, 224, 225, 226, 227, 228, 229), + DIVS_INV(230, 231, 232, 233, 234, 235, 236, 237, 238, 239), + DIVS_INV(240, 241, 242, 243, 244, 245, 246, 247, 248, 249), + DIV_INV(250), DIV_INV(251), DIV_INV(252), DIV_INV(253), + DIV_INV(254), DIV_INV(255), DIV_INV(256), + }; + + if (divisor == 0) + return 0; + else if (divisor == 1) + return dividend; + + if (WARN_ON(divisor - 2 >= ARRAY_SIZE(inv))) + return dividend; + + return ((u64)dividend * inv[divisor - 2]) >> 32; +} + +/* 6.3.6 inv_recenter_nonneg(v, m) */ +static int inv_recenter_nonneg(int v, int m) +{ + if (v > 2 * m) + return v; + + if (v & 1) + return m - ((v + 1) >> 1); + + return m + (v >> 1); +} + +/* + * part of 6.3.5 inv_remap_prob(deltaProb, prob) + * delta = inv_map_table[deltaProb] done by userspace + */ +static int update_prob(int delta, int prob) +{ + if (!delta) + return prob; + + return prob <= 128 ? + 1 + inv_recenter_nonneg(delta, prob - 1) : + 255 - inv_recenter_nonneg(delta, 255 - prob); +} + +/* Counterpart to 6.3.2 tx_mode_probs() */ +static void update_tx_probs(struct v4l2_vp9_frame_context *probs, + const struct v4l2_ctrl_vp9_compressed_hdr *deltas) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(probs->tx8); i++) { + u8 *p8x8 = probs->tx8[i]; + u8 *p16x16 = probs->tx16[i]; + u8 *p32x32 = probs->tx32[i]; + const u8 *d8x8 = deltas->tx8[i]; + const u8 *d16x16 = deltas->tx16[i]; + const u8 *d32x32 = deltas->tx32[i]; + + p8x8[0] = update_prob(d8x8[0], p8x8[0]); + p16x16[0] = update_prob(d16x16[0], p16x16[0]); + p16x16[1] = update_prob(d16x16[1], p16x16[1]); + p32x32[0] = update_prob(d32x32[0], p32x32[0]); + p32x32[1] = update_prob(d32x32[1], p32x32[1]); + p32x32[2] = update_prob(d32x32[2], p32x32[2]); + } +} + +#define BAND_6(band) ((band) == 0 ? 3 : 6) + +static void update_coeff(const u8 deltas[6][6][3], u8 probs[6][6][3]) +{ + int l, m, n; + + for (l = 0; l < 6; l++) + for (m = 0; m < BAND_6(l); m++) { + u8 *p = probs[l][m]; + const u8 *d = deltas[l][m]; + + for (n = 0; n < 3; n++) + p[n] = update_prob(d[n], p[n]); + } +} + +/* Counterpart to 6.3.7 read_coef_probs() */ +static void update_coef_probs(struct v4l2_vp9_frame_context *probs, + const struct v4l2_ctrl_vp9_compressed_hdr *deltas, + const struct v4l2_ctrl_vp9_frame *dec_params) +{ + int i, j, k; + + for (i = 0; i < ARRAY_SIZE(probs->coef); i++) { + for (j = 0; j < ARRAY_SIZE(probs->coef[0]); j++) + for (k = 0; k < ARRAY_SIZE(probs->coef[0][0]); k++) + update_coeff(deltas->coef[i][j][k], probs->coef[i][j][k]); + + if (deltas->tx_mode == i) + break; + } +} + +/* Counterpart to 6.3.8 read_skip_prob() */ +static void update_skip_probs(struct v4l2_vp9_frame_context *probs, + const struct v4l2_ctrl_vp9_compressed_hdr *deltas) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(probs->skip); i++) + probs->skip[i] = update_prob(deltas->skip[i], probs->skip[i]); +} + +/* Counterpart to 6.3.9 read_inter_mode_probs() */ +static void update_inter_mode_probs(struct v4l2_vp9_frame_context *probs, + const struct v4l2_ctrl_vp9_compressed_hdr *deltas) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(probs->inter_mode); i++) { + u8 *p = probs->inter_mode[i]; + const u8 *d = deltas->inter_mode[i]; + + p[0] = update_prob(d[0], p[0]); + p[1] = update_prob(d[1], p[1]); + p[2] = update_prob(d[2], p[2]); + } +} + +/* Counterpart to 6.3.10 read_interp_filter_probs() */ +static void update_interp_filter_probs(struct v4l2_vp9_frame_context *probs, + const struct v4l2_ctrl_vp9_compressed_hdr *deltas) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(probs->interp_filter); i++) { + u8 *p = probs->interp_filter[i]; + const u8 *d = deltas->interp_filter[i]; + + p[0] = update_prob(d[0], p[0]); + p[1] = update_prob(d[1], p[1]); + } +} + +/* Counterpart to 6.3.11 read_is_inter_probs() */ +static void update_is_inter_probs(struct v4l2_vp9_frame_context *probs, + const struct v4l2_ctrl_vp9_compressed_hdr *deltas) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(probs->is_inter); i++) + probs->is_inter[i] = update_prob(deltas->is_inter[i], probs->is_inter[i]); +} + +/* 6.3.12 frame_reference_mode() done entirely in userspace */ + +/* Counterpart to 6.3.13 frame_reference_mode_probs() */ +static void +update_frame_reference_mode_probs(unsigned int reference_mode, + struct v4l2_vp9_frame_context *probs, + const struct v4l2_ctrl_vp9_compressed_hdr *deltas) +{ + int i; + + if (reference_mode == V4L2_VP9_REFERENCE_MODE_SELECT) + for (i = 0; i < ARRAY_SIZE(probs->comp_mode); i++) + probs->comp_mode[i] = update_prob(deltas->comp_mode[i], + probs->comp_mode[i]); + + if (reference_mode != V4L2_VP9_REFERENCE_MODE_COMPOUND_REFERENCE) + for (i = 0; i < ARRAY_SIZE(probs->single_ref); i++) { + u8 *p = probs->single_ref[i]; + const u8 *d = deltas->single_ref[i]; + + p[0] = update_prob(d[0], p[0]); + p[1] = update_prob(d[1], p[1]); + } + + if (reference_mode != V4L2_VP9_REFERENCE_MODE_SINGLE_REFERENCE) + for (i = 0; i < ARRAY_SIZE(probs->comp_ref); i++) + probs->comp_ref[i] = update_prob(deltas->comp_ref[i], probs->comp_ref[i]); +} + +/* Counterpart to 6.3.14 read_y_mode_probs() */ +static void update_y_mode_probs(struct v4l2_vp9_frame_context *probs, + const struct v4l2_ctrl_vp9_compressed_hdr *deltas) +{ + int i, j; + + for (i = 0; i < ARRAY_SIZE(probs->y_mode); i++) + for (j = 0; j < ARRAY_SIZE(probs->y_mode[0]); ++j) + probs->y_mode[i][j] = + update_prob(deltas->y_mode[i][j], probs->y_mode[i][j]); +} + +/* Counterpart to 6.3.15 read_partition_probs() */ +static void update_partition_probs(struct v4l2_vp9_frame_context *probs, + const struct v4l2_ctrl_vp9_compressed_hdr *deltas) +{ + int i, j; + + for (i = 0; i < 4; i++) + for (j = 0; j < 4; j++) { + u8 *p = probs->partition[i * 4 + j]; + const u8 *d = deltas->partition[i * 4 + j]; + + p[0] = update_prob(d[0], p[0]); + p[1] = update_prob(d[1], p[1]); + p[2] = update_prob(d[2], p[2]); + } +} + +static inline int update_mv_prob(int delta, int prob) +{ + if (!delta) + return prob; + + return delta; +} + +/* Counterpart to 6.3.16 mv_probs() */ +static void update_mv_probs(struct v4l2_vp9_frame_context *probs, + const struct v4l2_ctrl_vp9_compressed_hdr *deltas, + const struct v4l2_ctrl_vp9_frame *dec_params) +{ + u8 *p = probs->mv.joint; + const u8 *d = deltas->mv.joint; + unsigned int i, j; + + p[0] = update_mv_prob(d[0], p[0]); + p[1] = update_mv_prob(d[1], p[1]); + p[2] = update_mv_prob(d[2], p[2]); + + for (i = 0; i < ARRAY_SIZE(probs->mv.sign); i++) { + p = probs->mv.sign; + d = deltas->mv.sign; + p[i] = update_mv_prob(d[i], p[i]); + + p = probs->mv.classes[i]; + d = deltas->mv.classes[i]; + for (j = 0; j < ARRAY_SIZE(probs->mv.classes[0]); j++) + p[j] = update_mv_prob(d[j], p[j]); + + p = probs->mv.class0_bit; + d = deltas->mv.class0_bit; + p[i] = update_mv_prob(d[i], p[i]); + + p = probs->mv.bits[i]; + d = deltas->mv.bits[i]; + for (j = 0; j < ARRAY_SIZE(probs->mv.bits[0]); j++) + p[j] = update_mv_prob(d[j], p[j]); + + for (j = 0; j < ARRAY_SIZE(probs->mv.class0_fr[0]); j++) { + p = probs->mv.class0_fr[i][j]; + d = deltas->mv.class0_fr[i][j]; + + p[0] = update_mv_prob(d[0], p[0]); + p[1] = update_mv_prob(d[1], p[1]); + p[2] = update_mv_prob(d[2], p[2]); + } + + p = probs->mv.fr[i]; + d = deltas->mv.fr[i]; + for (j = 0; j < ARRAY_SIZE(probs->mv.fr[i]); j++) + p[j] = update_mv_prob(d[j], p[j]); + + if (dec_params->flags & V4L2_VP9_FRAME_FLAG_ALLOW_HIGH_PREC_MV) { + p = probs->mv.class0_hp; + d = deltas->mv.class0_hp; + p[i] = update_mv_prob(d[i], p[i]); + + p = probs->mv.hp; + d = deltas->mv.hp; + p[i] = update_mv_prob(d[i], p[i]); + } + } +} + +/* Counterpart to 6.3 compressed_header(), but parsing has been done in userspace. */ +void v4l2_vp9_fw_update_probs(struct v4l2_vp9_frame_context *probs, + const struct v4l2_ctrl_vp9_compressed_hdr *deltas, + const struct v4l2_ctrl_vp9_frame *dec_params) +{ + if (deltas->tx_mode == V4L2_VP9_TX_MODE_SELECT) + update_tx_probs(probs, deltas); + + update_coef_probs(probs, deltas, dec_params); + + update_skip_probs(probs, deltas); + + if (dec_params->flags & V4L2_VP9_FRAME_FLAG_KEY_FRAME || + dec_params->flags & V4L2_VP9_FRAME_FLAG_INTRA_ONLY) + return; + + update_inter_mode_probs(probs, deltas); + + if (dec_params->interpolation_filter == V4L2_VP9_INTERP_FILTER_SWITCHABLE) + update_interp_filter_probs(probs, deltas); + + update_is_inter_probs(probs, deltas); + + update_frame_reference_mode_probs(dec_params->reference_mode, probs, deltas); + + update_y_mode_probs(probs, deltas); + + update_partition_probs(probs, deltas); + + update_mv_probs(probs, deltas, dec_params); +} +EXPORT_SYMBOL_GPL(v4l2_vp9_fw_update_probs); + +u8 v4l2_vp9_reset_frame_ctx(const struct v4l2_ctrl_vp9_frame *dec_params, + struct v4l2_vp9_frame_context *frame_context) +{ + int i; + + u8 fctx_idx = dec_params->frame_context_idx; + + if (dec_params->flags & V4L2_VP9_FRAME_FLAG_KEY_FRAME || + dec_params->flags & V4L2_VP9_FRAME_FLAG_INTRA_ONLY || + dec_params->flags & V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT) { + /* + * setup_past_independence() + * We do nothing here. Instead of storing default probs in some intermediate + * location and then copying from that location to appropriate contexts + * in save_probs() below, we skip that step and save default probs directly + * to appropriate contexts. + */ + if (dec_params->flags & V4L2_VP9_FRAME_FLAG_KEY_FRAME || + dec_params->flags & V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT || + dec_params->reset_frame_context == V4L2_VP9_RESET_FRAME_CTX_ALL) + for (i = 0; i < 4; ++i) + /* save_probs(i) */ + memcpy(&frame_context[i], &v4l2_vp9_default_probs, + sizeof(v4l2_vp9_default_probs)); + else if (dec_params->reset_frame_context == V4L2_VP9_RESET_FRAME_CTX_SPEC) + /* save_probs(fctx_idx) */ + memcpy(&frame_context[fctx_idx], &v4l2_vp9_default_probs, + sizeof(v4l2_vp9_default_probs)); + fctx_idx = 0; + } + + return fctx_idx; +} +EXPORT_SYMBOL_GPL(v4l2_vp9_reset_frame_ctx); + +/* 8.4.1 Merge prob process */ +static u8 merge_prob(u8 pre_prob, u32 ct0, u32 ct1, u16 count_sat, u32 max_update_factor) +{ + u32 den, prob, count, factor; + + den = ct0 + ct1; + if (!den) { + /* + * prob = 128, count = 0, update_factor = 0 + * Round2's argument: pre_prob * 256 + * (pre_prob * 256 + 128) >> 8 == pre_prob + */ + return pre_prob; + } + + prob = clamp(((ct0 << 8) + (den >> 1)) / den, (u32)1, (u32)255); + count = min_t(u32, den, count_sat); + factor = fastdiv(max_update_factor * count, count_sat); + + /* + * Round2(pre_prob * (256 - factor) + prob * factor, 8) + * Round2(pre_prob * 256 + (prob - pre_prob) * factor, 8) + * (pre_prob * 256 >> 8) + (((prob - pre_prob) * factor + 128) >> 8) + */ + return pre_prob + (((prob - pre_prob) * factor + 128) >> 8); +} + +static inline u8 noncoef_merge_prob(u8 pre_prob, u32 ct0, u32 ct1) +{ + return merge_prob(pre_prob, ct0, ct1, 20, 128); +} + +/* 8.4.2 Merge probs process */ +/* + * merge_probs() is a recursive function in the spec. We avoid recursion in the kernel. + * That said, the "tree" parameter of merge_probs() controls how deep the recursion goes. + * It turns out that in all cases the recursive calls boil down to a short-ish series + * of merge_prob() invocations (note no "s"). + * + * Variant A + * --------- + * merge_probs(small_token_tree, 2): + * merge_prob(p[1], c[0], c[1] + c[2]) + * merge_prob(p[2], c[1], c[2]) + * + * Variant B + * --------- + * merge_probs(binary_tree, 0) or + * merge_probs(tx_size_8_tree, 0): + * merge_prob(p[0], c[0], c[1]) + * + * Variant C + * --------- + * merge_probs(inter_mode_tree, 0): + * merge_prob(p[0], c[2], c[1] + c[0] + c[3]) + * merge_prob(p[1], c[0], c[1] + c[3]) + * merge_prob(p[2], c[1], c[3]) + * + * Variant D + * --------- + * merge_probs(intra_mode_tree, 0): + * merge_prob(p[0], c[0], c[1] + ... + c[9]) + * merge_prob(p[1], c[9], c[1] + ... + c[8]) + * merge_prob(p[2], c[1], c[2] + ... + c[8]) + * merge_prob(p[3], c[2] + c[4] + c[5], c[3] + c[8] + c[6] + c[7]) + * merge_prob(p[4], c[2], c[4] + c[5]) + * merge_prob(p[5], c[4], c[5]) + * merge_prob(p[6], c[3], c[8] + c[6] + c[7]) + * merge_prob(p[7], c[8], c[6] + c[7]) + * merge_prob(p[8], c[6], c[7]) + * + * Variant E + * --------- + * merge_probs(partition_tree, 0) or + * merge_probs(tx_size_32_tree, 0) or + * merge_probs(mv_joint_tree, 0) or + * merge_probs(mv_fr_tree, 0): + * merge_prob(p[0], c[0], c[1] + c[2] + c[3]) + * merge_prob(p[1], c[1], c[2] + c[3]) + * merge_prob(p[2], c[2], c[3]) + * + * Variant F + * --------- + * merge_probs(interp_filter_tree, 0) or + * merge_probs(tx_size_16_tree, 0): + * merge_prob(p[0], c[0], c[1] + c[2]) + * merge_prob(p[1], c[1], c[2]) + * + * Variant G + * --------- + * merge_probs(mv_class_tree, 0): + * merge_prob(p[0], c[0], c[1] + ... + c[10]) + * merge_prob(p[1], c[1], c[2] + ... + c[10]) + * merge_prob(p[2], c[2] + c[3], c[4] + ... + c[10]) + * merge_prob(p[3], c[2], c[3]) + * merge_prob(p[4], c[4] + c[5], c[6] + ... + c[10]) + * merge_prob(p[5], c[4], c[5]) + * merge_prob(p[6], c[6], c[7] + ... + c[10]) + * merge_prob(p[7], c[7] + c[8], c[9] + c[10]) + * merge_prob(p[8], c[7], c[8]) + * merge_prob(p[9], c[9], [10]) + */ + +static inline void merge_probs_variant_a(u8 *p, const u32 *c, u16 count_sat, u32 update_factor) +{ + p[1] = merge_prob(p[1], c[0], c[1] + c[2], count_sat, update_factor); + p[2] = merge_prob(p[2], c[1], c[2], count_sat, update_factor); +} + +static inline void merge_probs_variant_b(u8 *p, const u32 *c, u16 count_sat, u32 update_factor) +{ + p[0] = merge_prob(p[0], c[0], c[1], count_sat, update_factor); +} + +static inline void merge_probs_variant_c(u8 *p, const u32 *c) +{ + p[0] = noncoef_merge_prob(p[0], c[2], c[1] + c[0] + c[3]); + p[1] = noncoef_merge_prob(p[1], c[0], c[1] + c[3]); + p[2] = noncoef_merge_prob(p[2], c[1], c[3]); +} + +static void merge_probs_variant_d(u8 *p, const u32 *c) +{ + u32 sum = 0, s2; + + sum = c[1] + c[2] + c[3] + c[4] + c[5] + c[6] + c[7] + c[8] + c[9]; + + p[0] = noncoef_merge_prob(p[0], c[0], sum); + sum -= c[9]; + p[1] = noncoef_merge_prob(p[1], c[9], sum); + sum -= c[1]; + p[2] = noncoef_merge_prob(p[2], c[1], sum); + s2 = c[2] + c[4] + c[5]; + sum -= s2; + p[3] = noncoef_merge_prob(p[3], s2, sum); + s2 -= c[2]; + p[4] = noncoef_merge_prob(p[4], c[2], s2); + p[5] = noncoef_merge_prob(p[5], c[4], c[5]); + sum -= c[3]; + p[6] = noncoef_merge_prob(p[6], c[3], sum); + sum -= c[8]; + p[7] = noncoef_merge_prob(p[7], c[8], sum); + p[8] = noncoef_merge_prob(p[8], c[6], c[7]); +} + +static inline void merge_probs_variant_e(u8 *p, const u32 *c) +{ + p[0] = noncoef_merge_prob(p[0], c[0], c[1] + c[2] + c[3]); + p[1] = noncoef_merge_prob(p[1], c[1], c[2] + c[3]); + p[2] = noncoef_merge_prob(p[2], c[2], c[3]); +} + +static inline void merge_probs_variant_f(u8 *p, const u32 *c) +{ + p[0] = noncoef_merge_prob(p[0], c[0], c[1] + c[2]); + p[1] = noncoef_merge_prob(p[1], c[1], c[2]); +} + +static void merge_probs_variant_g(u8 *p, const u32 *c) +{ + u32 sum; + + sum = c[1] + c[2] + c[3] + c[4] + c[5] + c[6] + c[7] + c[8] + c[9] + c[10]; + p[0] = noncoef_merge_prob(p[0], c[0], sum); + sum -= c[1]; + p[1] = noncoef_merge_prob(p[1], c[1], sum); + sum -= c[2] + c[3]; + p[2] = noncoef_merge_prob(p[2], c[2] + c[3], sum); + p[3] = noncoef_merge_prob(p[3], c[2], c[3]); + sum -= c[4] + c[5]; + p[4] = noncoef_merge_prob(p[4], c[4] + c[5], sum); + p[5] = noncoef_merge_prob(p[5], c[4], c[5]); + sum -= c[6]; + p[6] = noncoef_merge_prob(p[6], c[6], sum); + p[7] = noncoef_merge_prob(p[7], c[7] + c[8], c[9] + c[10]); + p[8] = noncoef_merge_prob(p[8], c[7], c[8]); + p[9] = noncoef_merge_prob(p[9], c[9], c[10]); +} + +/* 8.4.3 Coefficient probability adaptation process */ +static inline void adapt_probs_variant_a_coef(u8 *p, const u32 *c, u32 update_factor) +{ + merge_probs_variant_a(p, c, 24, update_factor); +} + +static inline void adapt_probs_variant_b_coef(u8 *p, const u32 *c, u32 update_factor) +{ + merge_probs_variant_b(p, c, 24, update_factor); +} + +static void _adapt_coeff(unsigned int i, unsigned int j, unsigned int k, + struct v4l2_vp9_frame_context *probs, + const struct v4l2_vp9_frame_symbol_counts *counts, + u32 uf) +{ + s32 l, m; + + for (l = 0; l < ARRAY_SIZE(probs->coef[0][0][0]); l++) { + for (m = 0; m < BAND_6(l); m++) { + u8 *p = probs->coef[i][j][k][l][m]; + const u32 counts_more_coefs[2] = { + *counts->eob[i][j][k][l][m][1], + *counts->eob[i][j][k][l][m][0] - *counts->eob[i][j][k][l][m][1], + }; + + adapt_probs_variant_a_coef(p, *counts->coeff[i][j][k][l][m], uf); + adapt_probs_variant_b_coef(p, counts_more_coefs, uf); + } + } +} + +static void _adapt_coef_probs(struct v4l2_vp9_frame_context *probs, + const struct v4l2_vp9_frame_symbol_counts *counts, + unsigned int uf) +{ + unsigned int i, j, k; + + for (i = 0; i < ARRAY_SIZE(probs->coef); i++) + for (j = 0; j < ARRAY_SIZE(probs->coef[0]); j++) + for (k = 0; k < ARRAY_SIZE(probs->coef[0][0]); k++) + _adapt_coeff(i, j, k, probs, counts, uf); +} + +void v4l2_vp9_adapt_coef_probs(struct v4l2_vp9_frame_context *probs, + struct v4l2_vp9_frame_symbol_counts *counts, + bool use_128, + bool frame_is_intra) +{ + if (frame_is_intra) { + _adapt_coef_probs(probs, counts, 112); + } else { + if (use_128) + _adapt_coef_probs(probs, counts, 128); + else + _adapt_coef_probs(probs, counts, 112); + } +} +EXPORT_SYMBOL_GPL(v4l2_vp9_adapt_coef_probs); + +/* 8.4.4 Non coefficient probability adaptation process, adapt_probs() */ +static inline void adapt_probs_variant_b(u8 *p, const u32 *c) +{ + merge_probs_variant_b(p, c, 20, 128); +} + +static inline void adapt_probs_variant_c(u8 *p, const u32 *c) +{ + merge_probs_variant_c(p, c); +} + +static inline void adapt_probs_variant_d(u8 *p, const u32 *c) +{ + merge_probs_variant_d(p, c); +} + +static inline void adapt_probs_variant_e(u8 *p, const u32 *c) +{ + merge_probs_variant_e(p, c); +} + +static inline void adapt_probs_variant_f(u8 *p, const u32 *c) +{ + merge_probs_variant_f(p, c); +} + +static inline void adapt_probs_variant_g(u8 *p, const u32 *c) +{ + merge_probs_variant_g(p, c); +} + +/* 8.4.4 Non coefficient probability adaptation process, adapt_prob() */ +static inline u8 adapt_prob(u8 prob, const u32 counts[2]) +{ + return noncoef_merge_prob(prob, counts[0], counts[1]); +} + +/* 8.4.4 Non coefficient probability adaptation process */ +void v4l2_vp9_adapt_noncoef_probs(struct v4l2_vp9_frame_context *probs, + struct v4l2_vp9_frame_symbol_counts *counts, + u8 reference_mode, u8 interpolation_filter, u8 tx_mode, + u32 flags) +{ + unsigned int i, j; + + for (i = 0; i < ARRAY_SIZE(probs->is_inter); i++) + probs->is_inter[i] = adapt_prob(probs->is_inter[i], (*counts->intra_inter)[i]); + + for (i = 0; i < ARRAY_SIZE(probs->comp_mode); i++) + probs->comp_mode[i] = adapt_prob(probs->comp_mode[i], (*counts->comp)[i]); + + for (i = 0; i < ARRAY_SIZE(probs->comp_ref); i++) + probs->comp_ref[i] = adapt_prob(probs->comp_ref[i], (*counts->comp_ref)[i]); + + if (reference_mode != V4L2_VP9_REFERENCE_MODE_COMPOUND_REFERENCE) + for (i = 0; i < ARRAY_SIZE(probs->single_ref); i++) + for (j = 0; j < ARRAY_SIZE(probs->single_ref[0]); j++) + probs->single_ref[i][j] = adapt_prob(probs->single_ref[i][j], + (*counts->single_ref)[i][j]); + + for (i = 0; i < ARRAY_SIZE(probs->inter_mode); i++) + adapt_probs_variant_c(probs->inter_mode[i], (*counts->mv_mode)[i]); + + for (i = 0; i < ARRAY_SIZE(probs->y_mode); i++) + adapt_probs_variant_d(probs->y_mode[i], (*counts->y_mode)[i]); + + for (i = 0; i < ARRAY_SIZE(probs->uv_mode); i++) + adapt_probs_variant_d(probs->uv_mode[i], (*counts->uv_mode)[i]); + + for (i = 0; i < ARRAY_SIZE(probs->partition); i++) + adapt_probs_variant_e(probs->partition[i], (*counts->partition)[i]); + + for (i = 0; i < ARRAY_SIZE(probs->skip); i++) + probs->skip[i] = adapt_prob(probs->skip[i], (*counts->skip)[i]); + + if (interpolation_filter == V4L2_VP9_INTERP_FILTER_SWITCHABLE) + for (i = 0; i < ARRAY_SIZE(probs->interp_filter); i++) + adapt_probs_variant_f(probs->interp_filter[i], (*counts->filter)[i]); + + if (tx_mode == V4L2_VP9_TX_MODE_SELECT) + for (i = 0; i < ARRAY_SIZE(probs->tx8); i++) { + adapt_probs_variant_b(probs->tx8[i], (*counts->tx8p)[i]); + adapt_probs_variant_f(probs->tx16[i], (*counts->tx16p)[i]); + adapt_probs_variant_e(probs->tx32[i], (*counts->tx32p)[i]); + } + + adapt_probs_variant_e(probs->mv.joint, *counts->mv_joint); + + for (i = 0; i < ARRAY_SIZE(probs->mv.sign); i++) { + probs->mv.sign[i] = adapt_prob(probs->mv.sign[i], (*counts->sign)[i]); + + adapt_probs_variant_g(probs->mv.classes[i], (*counts->classes)[i]); + + probs->mv.class0_bit[i] = adapt_prob(probs->mv.class0_bit[i], (*counts->class0)[i]); + + for (j = 0; j < ARRAY_SIZE(probs->mv.bits[0]); j++) + probs->mv.bits[i][j] = adapt_prob(probs->mv.bits[i][j], + (*counts->bits)[i][j]); + + for (j = 0; j < ARRAY_SIZE(probs->mv.class0_fr[0]); j++) + adapt_probs_variant_e(probs->mv.class0_fr[i][j], + (*counts->class0_fp)[i][j]); + + adapt_probs_variant_e(probs->mv.fr[i], (*counts->fp)[i]); + + if (!(flags & V4L2_VP9_FRAME_FLAG_ALLOW_HIGH_PREC_MV)) + continue; + + probs->mv.class0_hp[i] = adapt_prob(probs->mv.class0_hp[i], + (*counts->class0_hp)[i]); + + probs->mv.hp[i] = adapt_prob(probs->mv.hp[i], (*counts->hp)[i]); + } +} +EXPORT_SYMBOL_GPL(v4l2_vp9_adapt_noncoef_probs); + +bool +v4l2_vp9_seg_feat_enabled(const u8 *feature_enabled, + unsigned int feature, + unsigned int segid) +{ + u8 mask = V4L2_VP9_SEGMENT_FEATURE_ENABLED(feature); + + return !!(feature_enabled[segid] & mask); +} +EXPORT_SYMBOL_GPL(v4l2_vp9_seg_feat_enabled); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("V4L2 VP9 Helpers"); +MODULE_AUTHOR("Andrzej Pietrasiewicz "); diff --git a/6.17.0/include-overrides/media/cec-notifier.h b/6.17.0/include-overrides/media/cec-notifier.h new file mode 100644 index 0000000..b1c8397 --- /dev/null +++ b/6.17.0/include-overrides/media/cec-notifier.h @@ -0,0 +1,166 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * cec-notifier.h - notify CEC drivers of physical address changes + * + * Copyright 2016 Russell King. + * Copyright 2016-2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + */ + +#ifndef LINUX_CEC_NOTIFIER_H +#define LINUX_CEC_NOTIFIER_H + +#include +#include + +struct device; +struct edid; +struct cec_adapter; +struct cec_notifier; + +#if IS_REACHABLE(CONFIG_CEC_CORE) && IS_ENABLED(CONFIG_CEC_NOTIFIER) + +/** + * cec_notifier_conn_register - find or create a new cec_notifier for the given + * HDMI device and connector tuple. + * @hdmi_dev: HDMI device that sends the events. + * @port_name: the connector name from which the event occurs. May be NULL + * if there is always only one HDMI connector created by the HDMI device. + * @conn_info: the connector info from which the event occurs (may be NULL) + * + * If a notifier for device @dev and connector @port_name already exists, then + * increase the refcount and return that notifier. + * + * If it doesn't exist, then allocate a new notifier struct and return a + * pointer to that new struct. + * + * Return NULL if the memory could not be allocated. + */ +struct cec_notifier * +cec_notifier_conn_register(struct device *hdmi_dev, const char *port_name, + const struct cec_connector_info *conn_info); + +/** + * cec_notifier_conn_unregister - decrease refcount and delete when the + * refcount reaches 0. + * @n: notifier. If NULL, then this function does nothing. + */ +void cec_notifier_conn_unregister(struct cec_notifier *n); + +/** + * cec_notifier_cec_adap_register - find or create a new cec_notifier for the + * given device. + * @hdmi_dev: HDMI device that sends the events. + * @port_name: the connector name from which the event occurs. May be NULL + * if there is always only one HDMI connector created by the HDMI device. + * @adap: the cec adapter that registered this notifier. + * + * If a notifier for device @dev and connector @port_name already exists, then + * increase the refcount and return that notifier. + * + * If it doesn't exist, then allocate a new notifier struct and return a + * pointer to that new struct. + * + * Return NULL if the memory could not be allocated. + */ +struct cec_notifier * +cec_notifier_cec_adap_register(struct device *hdmi_dev, const char *port_name, + struct cec_adapter *adap); + +/** + * cec_notifier_cec_adap_unregister - decrease refcount and delete when the + * refcount reaches 0. + * @n: notifier. If NULL, then this function does nothing. + * @adap: the cec adapter that registered this notifier. + */ +void cec_notifier_cec_adap_unregister(struct cec_notifier *n, + struct cec_adapter *adap); + +/** + * cec_notifier_set_phys_addr - set a new physical address. + * @n: the CEC notifier + * @pa: the CEC physical address + * + * Set a new CEC physical address. + * Does nothing if @n == NULL. + */ +void cec_notifier_set_phys_addr(struct cec_notifier *n, u16 pa); + +/** + * cec_notifier_set_phys_addr_from_edid - set parse the PA from the EDID. + * @n: the CEC notifier + * @edid: the struct edid pointer + * + * Parses the EDID to obtain the new CEC physical address and set it. + * Does nothing if @n == NULL. + */ +void cec_notifier_set_phys_addr_from_edid(struct cec_notifier *n, + const struct edid *edid); + +/** + * cec_notifier_parse_hdmi_phandle - find the hdmi device from "hdmi-phandle" + * @dev: the device with the "hdmi-phandle" device tree property + * + * Returns the device pointer referenced by the "hdmi-phandle" property. + * Note that the refcount of the returned device is not incremented. + * This device pointer is only used as a key value in the notifier + * list, but it is never accessed by the CEC driver. + */ +struct device *cec_notifier_parse_hdmi_phandle(struct device *dev); + +#else + +static inline struct cec_notifier * +cec_notifier_conn_register(struct device *hdmi_dev, const char *port_name, + const struct cec_connector_info *conn_info) +{ + /* A non-NULL pointer is expected on success */ + return (struct cec_notifier *)0xdeadfeed; +} + +static inline void cec_notifier_conn_unregister(struct cec_notifier *n) +{ +} + +static inline struct cec_notifier * +cec_notifier_cec_adap_register(struct device *hdmi_dev, const char *port_name, + struct cec_adapter *adap) +{ + /* A non-NULL pointer is expected on success */ + return (struct cec_notifier *)0xdeadfeed; +} + +static inline void cec_notifier_cec_adap_unregister(struct cec_notifier *n, + struct cec_adapter *adap) +{ +} + +static inline void cec_notifier_set_phys_addr(struct cec_notifier *n, u16 pa) +{ +} + +static inline void cec_notifier_set_phys_addr_from_edid(struct cec_notifier *n, + const struct edid *edid) +{ +} + +static inline struct device *cec_notifier_parse_hdmi_phandle(struct device *dev) +{ + return ERR_PTR(-ENODEV); +} + +#endif + +/** + * cec_notifier_phys_addr_invalidate() - set the physical address to INVALID + * + * @n: the CEC notifier + * + * This is a simple helper function to invalidate the physical + * address. Does nothing if @n == NULL. + */ +static inline void cec_notifier_phys_addr_invalidate(struct cec_notifier *n) +{ + cec_notifier_set_phys_addr(n, CEC_PHYS_ADDR_INVALID); +} + +#endif diff --git a/6.17.0/include-overrides/media/cec-pin.h b/6.17.0/include-overrides/media/cec-pin.h new file mode 100644 index 0000000..483bc47 --- /dev/null +++ b/6.17.0/include-overrides/media/cec-pin.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * cec-pin.h - low-level CEC pin control + * + * Copyright 2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + */ + +#ifndef LINUX_CEC_PIN_H +#define LINUX_CEC_PIN_H + +#include +#include + +/** + * struct cec_pin_ops - low-level CEC pin operations + * @read: read the CEC pin. Returns > 0 if high, 0 if low, or an error + * if negative. + * @low: drive the CEC pin low. + * @high: stop driving the CEC pin. The pull-up will drive the pin + * high, unless someone else is driving the pin low. + * @enable_irq: optional, enable the interrupt to detect pin voltage changes. + * @disable_irq: optional, disable the interrupt. + * @free: optional. Free any allocated resources. Called when the + * adapter is deleted. + * @status: optional, log status information. + * @read_hpd: optional. Read the HPD pin. Returns > 0 if high, 0 if low or + * an error if negative. + * @read_5v: optional. Read the 5V pin. Returns > 0 if high, 0 if low or + * an error if negative. + * @received: optional. High-level CEC message callback. Allows the driver + * to process CEC messages. + * + * These operations (except for the @received op) are used by the + * cec pin framework to manipulate the CEC pin. + */ +struct cec_pin_ops { + int (*read)(struct cec_adapter *adap); + void (*low)(struct cec_adapter *adap); + void (*high)(struct cec_adapter *adap); + bool (*enable_irq)(struct cec_adapter *adap); + void (*disable_irq)(struct cec_adapter *adap); + void (*free)(struct cec_adapter *adap); + void (*status)(struct cec_adapter *adap, struct seq_file *file); + int (*read_hpd)(struct cec_adapter *adap); + int (*read_5v)(struct cec_adapter *adap); + + /* High-level CEC message callback */ + int (*received)(struct cec_adapter *adap, struct cec_msg *msg); +}; + +/** + * cec_pin_changed() - update pin state from interrupt + * + * @adap: pointer to the cec adapter + * @value: when true the pin is high, otherwise it is low + * + * If changes of the CEC voltage are detected via an interrupt, then + * cec_pin_changed is called from the interrupt with the new value. + */ +void cec_pin_changed(struct cec_adapter *adap, bool value); + +/** + * cec_pin_allocate_adapter() - allocate a pin-based cec adapter + * + * @pin_ops: low-level pin operations + * @priv: will be stored in adap->priv and can be used by the adapter ops. + * Use cec_get_drvdata(adap) to get the priv pointer. + * @name: the name of the CEC adapter. Note: this name will be copied. + * @caps: capabilities of the CEC adapter. This will be ORed with + * CEC_CAP_MONITOR_ALL and CEC_CAP_MONITOR_PIN. + * + * Allocate a cec adapter using the cec pin framework. + * + * Return: a pointer to the cec adapter or an error pointer + */ +struct cec_adapter *cec_pin_allocate_adapter(const struct cec_pin_ops *pin_ops, + void *priv, const char *name, u32 caps); + +#endif diff --git a/6.17.0/include-overrides/media/cec.h b/6.17.0/include-overrides/media/cec.h new file mode 100644 index 0000000..0c8e861 --- /dev/null +++ b/6.17.0/include-overrides/media/cec.h @@ -0,0 +1,597 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * cec - HDMI Consumer Electronics Control support header + * + * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + */ + +#ifndef _MEDIA_CEC_H +#define _MEDIA_CEC_H + +#include +#include +#include +#include +#include +#include +#include +#include + +#define CEC_CAP_DEFAULTS (CEC_CAP_LOG_ADDRS | CEC_CAP_TRANSMIT | \ + CEC_CAP_PASSTHROUGH | CEC_CAP_RC) + +/** + * struct cec_devnode - cec device node + * @dev: cec device + * @cdev: cec character device + * @minor: device node minor number + * @lock: lock to serialize open/release and registration + * @registered: the device was correctly registered + * @unregistered: the device was unregistered + * @lock_fhs: lock to control access to @fhs + * @fhs: the list of open filehandles (cec_fh) + * + * This structure represents a cec-related device node. + * + * To add or remove filehandles from @fhs the @lock must be taken first, + * followed by @lock_fhs. It is safe to access @fhs if either lock is held. + * + * The @parent is a physical device. It must be set by core or device drivers + * before registering the node. + */ +struct cec_devnode { + /* sysfs */ + struct device dev; + struct cdev cdev; + + /* device info */ + int minor; + /* serialize open/release and registration */ + struct mutex lock; + bool registered; + bool unregistered; + /* protect access to fhs */ + struct mutex lock_fhs; + struct list_head fhs; +}; + +struct cec_adapter; +struct cec_data; +struct cec_pin; +struct cec_notifier; + +struct cec_data { + struct list_head list; + struct list_head xfer_list; + struct cec_adapter *adap; + struct cec_msg msg; + u8 match_len; + u8 match_reply[5]; + struct cec_fh *fh; + struct delayed_work work; + struct completion c; + u8 attempts; + bool blocking; + bool completed; +}; + +struct cec_msg_entry { + struct list_head list; + struct cec_msg msg; +}; + +struct cec_event_entry { + struct list_head list; + struct cec_event ev; +}; + +#define CEC_NUM_CORE_EVENTS 2 +#define CEC_NUM_EVENTS CEC_EVENT_PIN_5V_HIGH + +struct cec_fh { + struct list_head list; + struct list_head xfer_list; + struct cec_adapter *adap; + u8 mode_initiator; + u8 mode_follower; + + /* Events */ + wait_queue_head_t wait; + struct mutex lock; + struct list_head events[CEC_NUM_EVENTS]; /* queued events */ + u16 queued_events[CEC_NUM_EVENTS]; + unsigned int total_queued_events; + struct cec_event_entry core_events[CEC_NUM_CORE_EVENTS]; + struct list_head msgs; /* queued messages */ + unsigned int queued_msgs; +}; + +#define CEC_SIGNAL_FREE_TIME_RETRY 3 +#define CEC_SIGNAL_FREE_TIME_NEW_INITIATOR 5 +#define CEC_SIGNAL_FREE_TIME_NEXT_XFER 7 + +/* The nominal data bit period is 2.4 ms */ +#define CEC_FREE_TIME_TO_USEC(ft) ((ft) * 2400) + +struct cec_adap_ops { + /* Low-level callbacks, called with adap->lock held */ + int (*adap_enable)(struct cec_adapter *adap, bool enable); + int (*adap_monitor_all_enable)(struct cec_adapter *adap, bool enable); + int (*adap_monitor_pin_enable)(struct cec_adapter *adap, bool enable); + int (*adap_log_addr)(struct cec_adapter *adap, u8 logical_addr); + void (*adap_unconfigured)(struct cec_adapter *adap); + int (*adap_transmit)(struct cec_adapter *adap, u8 attempts, + u32 signal_free_time, struct cec_msg *msg); + void (*adap_nb_transmit_canceled)(struct cec_adapter *adap, + const struct cec_msg *msg); + void (*adap_status)(struct cec_adapter *adap, struct seq_file *file); + void (*adap_free)(struct cec_adapter *adap); + + /* Error injection callbacks, called without adap->lock held */ + int (*error_inj_show)(struct cec_adapter *adap, struct seq_file *sf); + bool (*error_inj_parse_line)(struct cec_adapter *adap, char *line); + + /* High-level CEC message callback, called without adap->lock held */ + void (*configured)(struct cec_adapter *adap); + int (*received)(struct cec_adapter *adap, struct cec_msg *msg); +}; + +/* + * The minimum message length you can receive (excepting poll messages) is 2. + * With a transfer rate of at most 36 bytes per second this makes 18 messages + * per second worst case. + * + * We queue at most 3 seconds worth of received messages. The CEC specification + * requires that messages are replied to within a second, so 3 seconds should + * give more than enough margin. Since most messages are actually more than 2 + * bytes, this is in practice a lot more than 3 seconds. + */ +#define CEC_MAX_MSG_RX_QUEUE_SZ (18 * 3) + +/* + * The transmit queue is limited to 1 second worth of messages (worst case). + * Messages can be transmitted by userspace and kernel space. But for both it + * makes no sense to have a lot of messages queued up. One second seems + * reasonable. + */ +#define CEC_MAX_MSG_TX_QUEUE_SZ (18 * 1) + +/** + * struct cec_adapter - cec adapter structure + * @owner: module owner + * @name: name of the CEC adapter + * @devnode: device node for the /dev/cecX device + * @lock: mutex controlling access to this structure + * @rc: remote control device + * @transmit_queue: queue of pending transmits + * @transmit_queue_sz: number of pending transmits + * @wait_queue: queue of transmits waiting for a reply + * @transmitting: CEC messages currently being transmitted + * @transmit_in_progress: true if a transmit is in progress + * @transmit_in_progress_aborted: true if a transmit is in progress is to be + * aborted. This happens if the logical address is + * invalidated while the transmit is ongoing. In that + * case the transmit will finish, but will not retransmit + * and be marked as ABORTED. + * @xfer_timeout_ms: the transfer timeout in ms. + * If 0, then timeout after 2100 ms. + * @kthread_config: kthread used to configure a CEC adapter + * @config_completion: used to signal completion of the config kthread + * @kthread: main CEC processing thread + * @kthread_waitq: main CEC processing wait_queue + * @ops: cec adapter ops + * @priv: cec driver's private data + * @capabilities: cec adapter capabilities + * @available_log_addrs: maximum number of available logical addresses + * @phys_addr: the current physical address + * @needs_hpd: if true, then the HDMI HotPlug Detect pin must be high + * in order to transmit or receive CEC messages. This is usually a HW + * limitation. + * @is_enabled: the CEC adapter is enabled + * @is_claiming_log_addrs: true if cec_claim_log_addrs() is running + * @is_configuring: the CEC adapter is configuring (i.e. claiming LAs) + * @must_reconfigure: while configuring, the PA changed, so reclaim LAs + * @is_configured: the CEC adapter is configured (i.e. has claimed LAs) + * @cec_pin_is_high: if true then the CEC pin is high. Only used with the + * CEC pin framework. + * @adap_controls_phys_addr: if true, then the CEC adapter controls the + * physical address, i.e. the CEC hardware can detect HPD changes and + * read the EDID and is not dependent on an external HDMI driver. + * Drivers that need this can set this field to true after the + * cec_allocate_adapter() call. + * @last_initiator: the initiator of the last transmitted message. + * @monitor_all_cnt: number of filehandles monitoring all msgs + * @monitor_pin_cnt: number of filehandles monitoring pin changes + * @follower_cnt: number of filehandles in follower mode + * @cec_follower: filehandle of the exclusive follower + * @cec_initiator: filehandle of the exclusive initiator + * @passthrough: if true, then the exclusive follower is in + * passthrough mode. + * @log_addrs: current logical addresses + * @conn_info: current connector info + * @tx_timeout_cnt: count the number of Timed Out transmits. + * Reset to 0 when this is reported in cec_adap_status(). + * @tx_low_drive_cnt: count the number of Low Drive transmits. + * Reset to 0 when this is reported in cec_adap_status(). + * @tx_error_cnt: count the number of Error transmits. + * Reset to 0 when this is reported in cec_adap_status(). + * @tx_arb_lost_cnt: count the number of Arb Lost transmits. + * Reset to 0 when this is reported in cec_adap_status(). + * @tx_low_drive_log_cnt: number of logged Low Drive transmits since the + * adapter was enabled. Used to avoid flooding the kernel + * log if this happens a lot. + * @tx_error_log_cnt: number of logged Error transmits since the adapter was + * enabled. Used to avoid flooding the kernel log if this + * happens a lot. + * @notifier: CEC notifier + * @pin: CEC pin status struct + * @cec_dir: debugfs cec directory + * @sequence: transmit sequence counter + * @input_phys: remote control input_phys name + * + * This structure represents a cec adapter. + */ +struct cec_adapter { + struct module *owner; + char name[32]; + struct cec_devnode devnode; + struct mutex lock; + struct rc_dev *rc; + + struct list_head transmit_queue; + unsigned int transmit_queue_sz; + struct list_head wait_queue; + struct cec_data *transmitting; + bool transmit_in_progress; + bool transmit_in_progress_aborted; + unsigned int xfer_timeout_ms; + + struct task_struct *kthread_config; + struct completion config_completion; + + struct task_struct *kthread; + wait_queue_head_t kthread_waitq; + + const struct cec_adap_ops *ops; + void *priv; + u32 capabilities; + u8 available_log_addrs; + + u16 phys_addr; + bool needs_hpd; + bool is_enabled; + bool is_claiming_log_addrs; + bool is_configuring; + bool must_reconfigure; + bool is_configured; + bool cec_pin_is_high; + bool adap_controls_phys_addr; + u8 last_initiator; + u32 monitor_all_cnt; + u32 monitor_pin_cnt; + u32 follower_cnt; + struct cec_fh *cec_follower; + struct cec_fh *cec_initiator; + bool passthrough; + struct cec_log_addrs log_addrs; + struct cec_connector_info conn_info; + + u32 tx_timeout_cnt; + u32 tx_low_drive_cnt; + u32 tx_error_cnt; + u32 tx_arb_lost_cnt; + u32 tx_low_drive_log_cnt; + u32 tx_error_log_cnt; + +#ifdef CONFIG_CEC_NOTIFIER + struct cec_notifier *notifier; +#endif +#ifdef CONFIG_CEC_PIN + struct cec_pin *pin; +#endif + + struct dentry *cec_dir; + + u32 sequence; + + char input_phys[40]; +}; + +static inline int cec_get_device(struct cec_adapter *adap) +{ + struct cec_devnode *devnode = &adap->devnode; + + /* + * Check if the cec device is available. This needs to be done with + * the devnode->lock held to prevent an open/unregister race: + * without the lock, the device could be unregistered and freed between + * the devnode->registered check and get_device() calls, leading to + * a crash. + */ + mutex_lock(&devnode->lock); + /* + * return ENODEV if the cec device has been removed + * already or if it is not registered anymore. + */ + if (!devnode->registered) { + mutex_unlock(&devnode->lock); + return -ENODEV; + } + /* and increase the device refcount */ + get_device(&devnode->dev); + mutex_unlock(&devnode->lock); + return 0; +} + +static inline void cec_put_device(struct cec_adapter *adap) +{ + put_device(&adap->devnode.dev); +} + +static inline void *cec_get_drvdata(const struct cec_adapter *adap) +{ + return adap->priv; +} + +static inline bool cec_has_log_addr(const struct cec_adapter *adap, u8 log_addr) +{ + return adap->log_addrs.log_addr_mask & (1 << log_addr); +} + +static inline bool cec_is_sink(const struct cec_adapter *adap) +{ + return adap->phys_addr == 0; +} + +/** + * cec_is_registered() - is the CEC adapter registered? + * + * @adap: the CEC adapter, may be NULL. + * + * Return: true if the adapter is registered, false otherwise. + */ +static inline bool cec_is_registered(const struct cec_adapter *adap) +{ + return adap && adap->devnode.registered; +} + +#define cec_phys_addr_exp(pa) \ + ((pa) >> 12), ((pa) >> 8) & 0xf, ((pa) >> 4) & 0xf, (pa) & 0xf + +struct edid; +struct drm_connector; + +#if IS_REACHABLE(CONFIG_CEC_CORE) +struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops, + void *priv, const char *name, u32 caps, u8 available_las); +int cec_register_adapter(struct cec_adapter *adap, struct device *parent); +void cec_unregister_adapter(struct cec_adapter *adap); +void cec_delete_adapter(struct cec_adapter *adap); + +int cec_s_log_addrs(struct cec_adapter *adap, struct cec_log_addrs *log_addrs, + bool block); +void cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, + bool block); +void cec_s_phys_addr_from_edid(struct cec_adapter *adap, + const struct edid *edid); +void cec_s_conn_info(struct cec_adapter *adap, + const struct cec_connector_info *conn_info); +int cec_transmit_msg(struct cec_adapter *adap, struct cec_msg *msg, + bool block); + +/* Called by the adapter */ +void cec_transmit_done_ts(struct cec_adapter *adap, u8 status, + u8 arb_lost_cnt, u8 nack_cnt, u8 low_drive_cnt, + u8 error_cnt, ktime_t ts); + +static inline void cec_transmit_done(struct cec_adapter *adap, u8 status, + u8 arb_lost_cnt, u8 nack_cnt, + u8 low_drive_cnt, u8 error_cnt) +{ + cec_transmit_done_ts(adap, status, arb_lost_cnt, nack_cnt, + low_drive_cnt, error_cnt, ktime_get()); +} +/* + * Simplified version of cec_transmit_done for hardware that doesn't retry + * failed transmits. So this is always just one attempt in which case + * the status is sufficient. + */ +void cec_transmit_attempt_done_ts(struct cec_adapter *adap, + u8 status, ktime_t ts); + +static inline void cec_transmit_attempt_done(struct cec_adapter *adap, + u8 status) +{ + cec_transmit_attempt_done_ts(adap, status, ktime_get()); +} + +void cec_received_msg_ts(struct cec_adapter *adap, + struct cec_msg *msg, ktime_t ts); + +static inline void cec_received_msg(struct cec_adapter *adap, + struct cec_msg *msg) +{ + cec_received_msg_ts(adap, msg, ktime_get()); +} + +/** + * cec_queue_pin_cec_event() - queue a CEC pin event with a given timestamp. + * + * @adap: pointer to the cec adapter + * @is_high: when true the CEC pin is high, otherwise it is low + * @dropped_events: when true some events were dropped + * @ts: the timestamp for this event + * + */ +void cec_queue_pin_cec_event(struct cec_adapter *adap, bool is_high, + bool dropped_events, ktime_t ts); + +/** + * cec_queue_pin_hpd_event() - queue a pin event with a given timestamp. + * + * @adap: pointer to the cec adapter + * @is_high: when true the HPD pin is high, otherwise it is low + * @ts: the timestamp for this event + * + */ +void cec_queue_pin_hpd_event(struct cec_adapter *adap, bool is_high, ktime_t ts); + +/** + * cec_queue_pin_5v_event() - queue a pin event with a given timestamp. + * + * @adap: pointer to the cec adapter + * @is_high: when true the 5V pin is high, otherwise it is low + * @ts: the timestamp for this event + * + */ +void cec_queue_pin_5v_event(struct cec_adapter *adap, bool is_high, ktime_t ts); + +/** + * cec_get_edid_phys_addr() - find and return the physical address + * + * @edid: pointer to the EDID data + * @size: size in bytes of the EDID data + * @offset: If not %NULL then the location of the physical address + * bytes in the EDID will be returned here. This is set to 0 + * if there is no physical address found. + * + * Return: the physical address or CEC_PHYS_ADDR_INVALID if there is none. + */ +u16 cec_get_edid_phys_addr(const u8 *edid, unsigned int size, + unsigned int *offset); + +void cec_fill_conn_info_from_drm(struct cec_connector_info *conn_info, + const struct drm_connector *connector); + +#else + +static inline int cec_register_adapter(struct cec_adapter *adap, + struct device *parent) +{ + return 0; +} + +static inline void cec_unregister_adapter(struct cec_adapter *adap) +{ +} + +static inline void cec_delete_adapter(struct cec_adapter *adap) +{ +} + +static inline void cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, + bool block) +{ +} + +static inline void cec_s_phys_addr_from_edid(struct cec_adapter *adap, + const struct edid *edid) +{ +} + +static inline u16 cec_get_edid_phys_addr(const u8 *edid, unsigned int size, + unsigned int *offset) +{ + if (offset) + *offset = 0; + return CEC_PHYS_ADDR_INVALID; +} + +static inline void cec_s_conn_info(struct cec_adapter *adap, + const struct cec_connector_info *conn_info) +{ +} + +static inline void +cec_fill_conn_info_from_drm(struct cec_connector_info *conn_info, + const struct drm_connector *connector) +{ + memset(conn_info, 0, sizeof(*conn_info)); +} + +#endif + +/** + * cec_phys_addr_invalidate() - set the physical address to INVALID + * + * @adap: the CEC adapter + * + * This is a simple helper function to invalidate the physical + * address. + */ +static inline void cec_phys_addr_invalidate(struct cec_adapter *adap) +{ + cec_s_phys_addr(adap, CEC_PHYS_ADDR_INVALID, false); +} + +/** + * cec_get_edid_spa_location() - find location of the Source Physical Address + * + * @edid: the EDID + * @size: the size of the EDID + * + * This EDID is expected to be a CEA-861 compliant, which means that there are + * at least two blocks and one or more of the extensions blocks are CEA-861 + * blocks. + * + * The returned location is guaranteed to be <= size-2. + * + * This is an inline function since it is used by both CEC and V4L2. + * Ideally this would go in a module shared by both, but it is overkill to do + * that for just a single function. + */ +static inline unsigned int cec_get_edid_spa_location(const u8 *edid, + unsigned int size) +{ + unsigned int blocks = size / 128; + unsigned int block; + u8 d; + + /* Sanity check: at least 2 blocks and a multiple of the block size */ + if (blocks < 2 || size % 128) + return 0; + + /* + * If there are fewer extension blocks than the size, then update + * 'blocks'. It is allowed to have more extension blocks than the size, + * since some hardware can only read e.g. 256 bytes of the EDID, even + * though more blocks are present. The first CEA-861 extension block + * should normally be in block 1 anyway. + */ + if (edid[0x7e] + 1 < blocks) + blocks = edid[0x7e] + 1; + + for (block = 1; block < blocks; block++) { + unsigned int offset = block * 128; + + /* Skip any non-CEA-861 extension blocks */ + if (edid[offset] != 0x02 || edid[offset + 1] != 0x03) + continue; + + /* search Vendor Specific Data Block (tag 3) */ + d = edid[offset + 2] & 0x7f; + /* Check if there are Data Blocks */ + if (d <= 4) + continue; + if (d > 4) { + unsigned int i = offset + 4; + unsigned int end = offset + d; + + /* Note: 'end' is always < 'size' */ + do { + u8 tag = edid[i] >> 5; + u8 len = edid[i] & 0x1f; + + if (tag == 3 && len >= 5 && i + len <= end && + edid[i + 1] == 0x03 && + edid[i + 2] == 0x0c && + edid[i + 3] == 0x00) + return i + 4; + i += len + 1; + } while (i < end); + } + } + return 0; +} + +#endif /* _MEDIA_CEC_H */ diff --git a/6.17.0/include-overrides/media/demux.h b/6.17.0/include-overrides/media/demux.h new file mode 100644 index 0000000..bf00a5a --- /dev/null +++ b/6.17.0/include-overrides/media/demux.h @@ -0,0 +1,600 @@ +/* + * demux.h + * + * The Kernel Digital TV Demux kABI defines a driver-internal interface for + * registering low-level, hardware specific driver to a hardware independent + * demux layer. + * + * Copyright (c) 2002 Convergence GmbH + * + * based on code: + * Copyright (c) 2000 Nokia Research Center + * Tampere, FINLAND + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either version 2.1 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __DEMUX_H +#define __DEMUX_H + +#include +#include +#include +#include +#include + +/* + * Common definitions + */ + +/* + * DMX_MAX_FILTER_SIZE: Maximum length (in bytes) of a section/PES filter. + */ + +#ifndef DMX_MAX_FILTER_SIZE +#define DMX_MAX_FILTER_SIZE 18 +#endif + +/* + * DMX_MAX_SECFEED_SIZE: Maximum length (in bytes) of a private section feed + * filter. + */ + +#ifndef DMX_MAX_SECTION_SIZE +#define DMX_MAX_SECTION_SIZE 4096 +#endif +#ifndef DMX_MAX_SECFEED_SIZE +#define DMX_MAX_SECFEED_SIZE (DMX_MAX_SECTION_SIZE + 188) +#endif + +/* + * TS packet reception + */ + +/** + * enum ts_filter_type - filter type bitmap for dmx_ts_feed.set\(\) + * + * @TS_PACKET: Send TS packets (188 bytes) to callback (default). + * @TS_PAYLOAD_ONLY: In case TS_PACKET is set, only send the TS payload + * (<=184 bytes per packet) to callback + * @TS_DECODER: Send stream to built-in decoder (if present). + * @TS_DEMUX: In case TS_PACKET is set, send the TS to the demux + * device, not to the dvr device + */ +enum ts_filter_type { + TS_PACKET = 1, + TS_PAYLOAD_ONLY = 2, + TS_DECODER = 4, + TS_DEMUX = 8, +}; + +/** + * struct dmx_ts_feed - Structure that contains a TS feed filter + * + * @is_filtering: Set to non-zero when filtering in progress + * @parent: pointer to struct dmx_demux + * @priv: pointer to private data of the API client + * @set: sets the TS filter + * @start_filtering: starts TS filtering + * @stop_filtering: stops TS filtering + * + * A TS feed is typically mapped to a hardware PID filter on the demux chip. + * Using this API, the client can set the filtering properties to start/stop + * filtering TS packets on a particular TS feed. + */ +struct dmx_ts_feed { + int is_filtering; + struct dmx_demux *parent; + void *priv; + int (*set)(struct dmx_ts_feed *feed, + u16 pid, + int type, + enum dmx_ts_pes pes_type, + ktime_t timeout); + int (*start_filtering)(struct dmx_ts_feed *feed); + int (*stop_filtering)(struct dmx_ts_feed *feed); +}; + +/* + * Section reception + */ + +/** + * struct dmx_section_filter - Structure that describes a section filter + * + * @filter_value: Contains up to 16 bytes (128 bits) of the TS section header + * that will be matched by the section filter + * @filter_mask: Contains a 16 bytes (128 bits) filter mask with the bits + * specified by @filter_value that will be used on the filter + * match logic. + * @filter_mode: Contains a 16 bytes (128 bits) filter mode. + * @parent: Back-pointer to struct dmx_section_feed. + * @priv: Pointer to private data of the API client. + * + * + * The @filter_mask controls which bits of @filter_value are compared with + * the section headers/payload. On a binary value of 1 in filter_mask, the + * corresponding bits are compared. The filter only accepts sections that are + * equal to filter_value in all the tested bit positions. + */ +struct dmx_section_filter { + u8 filter_value[DMX_MAX_FILTER_SIZE]; + u8 filter_mask[DMX_MAX_FILTER_SIZE]; + u8 filter_mode[DMX_MAX_FILTER_SIZE]; + struct dmx_section_feed *parent; + + void *priv; +}; + +/** + * struct dmx_section_feed - Structure that contains a section feed filter + * + * @is_filtering: Set to non-zero when filtering in progress + * @parent: pointer to struct dmx_demux + * @priv: pointer to private data of the API client + * @check_crc: If non-zero, check the CRC values of filtered sections. + * @set: sets the section filter + * @allocate_filter: This function is used to allocate a section filter on + * the demux. It should only be called when no filtering + * is in progress on this section feed. If a filter cannot + * be allocated, the function fails with -ENOSPC. + * @release_filter: This function releases all the resources of a + * previously allocated section filter. The function + * should not be called while filtering is in progress + * on this section feed. After calling this function, + * the caller should not try to dereference the filter + * pointer. + * @start_filtering: starts section filtering + * @stop_filtering: stops section filtering + * + * A TS feed is typically mapped to a hardware PID filter on the demux chip. + * Using this API, the client can set the filtering properties to start/stop + * filtering TS packets on a particular TS feed. + */ +struct dmx_section_feed { + int is_filtering; + struct dmx_demux *parent; + void *priv; + + int check_crc; + + /* private: Used internally at dvb_demux.c */ + u32 crc_val; + + u8 *secbuf; + u8 secbuf_base[DMX_MAX_SECFEED_SIZE]; + u16 secbufp, seclen, tsfeedp; + + /* public: */ + int (*set)(struct dmx_section_feed *feed, + u16 pid, + int check_crc); + int (*allocate_filter)(struct dmx_section_feed *feed, + struct dmx_section_filter **filter); + int (*release_filter)(struct dmx_section_feed *feed, + struct dmx_section_filter *filter); + int (*start_filtering)(struct dmx_section_feed *feed); + int (*stop_filtering)(struct dmx_section_feed *feed); +}; + +/** + * typedef dmx_ts_cb - DVB demux TS filter callback function prototype + * + * @buffer1: Pointer to the start of the filtered TS packets. + * @buffer1_length: Length of the TS data in buffer1. + * @buffer2: Pointer to the tail of the filtered TS packets, or NULL. + * @buffer2_length: Length of the TS data in buffer2. + * @source: Indicates which TS feed is the source of the callback. + * @buffer_flags: Address where buffer flags are stored. Those are + * used to report discontinuity users via DVB + * memory mapped API, as defined by + * &enum dmx_buffer_flags. + * + * This function callback prototype, provided by the client of the demux API, + * is called from the demux code. The function is only called when filtering + * on a TS feed has been enabled using the start_filtering\(\) function at + * the &dmx_demux. + * Any TS packets that match the filter settings are copied to a circular + * buffer. The filtered TS packets are delivered to the client using this + * callback function. + * It is expected that the @buffer1 and @buffer2 callback parameters point to + * addresses within the circular buffer, but other implementations are also + * possible. Note that the called party should not try to free the memory + * the @buffer1 and @buffer2 parameters point to. + * + * When this function is called, the @buffer1 parameter typically points to + * the start of the first undelivered TS packet within a circular buffer. + * The @buffer2 buffer parameter is normally NULL, except when the received + * TS packets have crossed the last address of the circular buffer and + * "wrapped" to the beginning of the buffer. In the latter case the @buffer1 + * parameter would contain an address within the circular buffer, while the + * @buffer2 parameter would contain the first address of the circular buffer. + * The number of bytes delivered with this function (i.e. @buffer1_length + + * @buffer2_length) is usually equal to the value of callback_length parameter + * given in the set() function, with one exception: if a timeout occurs before + * receiving callback_length bytes of TS data, any undelivered packets are + * immediately delivered to the client by calling this function. The timeout + * duration is controlled by the set() function in the TS Feed API. + * + * If a TS packet is received with errors that could not be fixed by the + * TS-level forward error correction (FEC), the Transport_error_indicator + * flag of the TS packet header should be set. The TS packet should not be + * discarded, as the error can possibly be corrected by a higher layer + * protocol. If the called party is slow in processing the callback, it + * is possible that the circular buffer eventually fills up. If this happens, + * the demux driver should discard any TS packets received while the buffer + * is full and return -EOVERFLOW. + * + * The type of data returned to the callback can be selected by the + * &dmx_ts_feed.@set function. The type parameter decides if the raw + * TS packet (TS_PACKET) or just the payload (TS_PACKET|TS_PAYLOAD_ONLY) + * should be returned. If additionally the TS_DECODER bit is set the stream + * will also be sent to the hardware MPEG decoder. + * + * Return: + * + * - 0, on success; + * + * - -EOVERFLOW, on buffer overflow. + */ +typedef int (*dmx_ts_cb)(const u8 *buffer1, + size_t buffer1_length, + const u8 *buffer2, + size_t buffer2_length, + struct dmx_ts_feed *source, + u32 *buffer_flags); + +/** + * typedef dmx_section_cb - DVB demux TS filter callback function prototype + * + * @buffer1: Pointer to the start of the filtered section, e.g. + * within the circular buffer of the demux driver. + * @buffer1_len: Length of the filtered section data in @buffer1, + * including headers and CRC. + * @buffer2: Pointer to the tail of the filtered section data, + * or NULL. Useful to handle the wrapping of a + * circular buffer. + * @buffer2_len: Length of the filtered section data in @buffer2, + * including headers and CRC. + * @source: Indicates which section feed is the source of the + * callback. + * @buffer_flags: Address where buffer flags are stored. Those are + * used to report discontinuity users via DVB + * memory mapped API, as defined by + * &enum dmx_buffer_flags. + * + * This function callback prototype, provided by the client of the demux API, + * is called from the demux code. The function is only called when + * filtering of sections has been enabled using the function + * &dmx_ts_feed.@start_filtering. When the demux driver has received a + * complete section that matches at least one section filter, the client + * is notified via this callback function. Normally this function is called + * for each received section; however, it is also possible to deliver + * multiple sections with one callback, for example when the system load + * is high. If an error occurs while receiving a section, this + * function should be called with the corresponding error type set in the + * success field, whether or not there is data to deliver. The Section Feed + * implementation should maintain a circular buffer for received sections. + * However, this is not necessary if the Section Feed API is implemented as + * a client of the TS Feed API, because the TS Feed implementation then + * buffers the received data. The size of the circular buffer can be + * configured using the &dmx_ts_feed.@set function in the Section Feed API. + * If there is no room in the circular buffer when a new section is received, + * the section must be discarded. If this happens, the value of the success + * parameter should be DMX_OVERRUN_ERROR on the next callback. + */ +typedef int (*dmx_section_cb)(const u8 *buffer1, + size_t buffer1_len, + const u8 *buffer2, + size_t buffer2_len, + struct dmx_section_filter *source, + u32 *buffer_flags); + +/* + * DVB Front-End + */ + +/** + * enum dmx_frontend_source - Used to identify the type of frontend + * + * @DMX_MEMORY_FE: The source of the demux is memory. It means that + * the MPEG-TS to be filtered comes from userspace, + * via write() syscall. + * + * @DMX_FRONTEND_0: The source of the demux is a frontend connected + * to the demux. + */ +enum dmx_frontend_source { + DMX_MEMORY_FE, + DMX_FRONTEND_0, +}; + +/** + * struct dmx_frontend - Structure that lists the frontends associated with + * a demux + * + * @connectivity_list: List of front-ends that can be connected to a + * particular demux; + * @source: Type of the frontend. + * + * FIXME: this structure should likely be replaced soon by some + * media-controller based logic. + */ +struct dmx_frontend { + struct list_head connectivity_list; + enum dmx_frontend_source source; +}; + +/* + * MPEG-2 TS Demux + */ + +/** + * enum dmx_demux_caps - MPEG-2 TS Demux capabilities bitmap + * + * @DMX_TS_FILTERING: set if TS filtering is supported; + * @DMX_SECTION_FILTERING: set if section filtering is supported; + * @DMX_MEMORY_BASED_FILTERING: set if write() available. + * + * Those flags are OR'ed in the &dmx_demux.capabilities field + */ +enum dmx_demux_caps { + DMX_TS_FILTERING = 1, + DMX_SECTION_FILTERING = 4, + DMX_MEMORY_BASED_FILTERING = 8, +}; + +/* + * Demux resource type identifier. + */ + +/** + * DMX_FE_ENTRY - Casts elements in the list of registered + * front-ends from the generic type struct list_head + * to the type * struct dmx_frontend + * + * @list: list of struct dmx_frontend + */ +#define DMX_FE_ENTRY(list) \ + list_entry(list, struct dmx_frontend, connectivity_list) + +/** + * struct dmx_demux - Structure that contains the demux capabilities and + * callbacks. + * + * @capabilities: Bitfield of capability flags. + * + * @frontend: Front-end connected to the demux + * + * @priv: Pointer to private data of the API client + * + * @open: This function reserves the demux for use by the caller and, if + * necessary, initializes the demux. When the demux is no longer needed, + * the function @close should be called. It should be possible for + * multiple clients to access the demux at the same time. Thus, the + * function implementation should increment the demux usage count when + * @open is called and decrement it when @close is called. + * The @demux function parameter contains a pointer to the demux API and + * instance data. + * It returns: + * 0 on success; + * -EUSERS, if maximum usage count was reached; + * -EINVAL, on bad parameter. + * + * @close: This function reserves the demux for use by the caller and, if + * necessary, initializes the demux. When the demux is no longer needed, + * the function @close should be called. It should be possible for + * multiple clients to access the demux at the same time. Thus, the + * function implementation should increment the demux usage count when + * @open is called and decrement it when @close is called. + * The @demux function parameter contains a pointer to the demux API and + * instance data. + * It returns: + * 0 on success; + * -ENODEV, if demux was not in use (e. g. no users); + * -EINVAL, on bad parameter. + * + * @write: This function provides the demux driver with a memory buffer + * containing TS packets. Instead of receiving TS packets from the DVB + * front-end, the demux driver software will read packets from memory. + * Any clients of this demux with active TS, PES or Section filters will + * receive filtered data via the Demux callback API (see 0). The function + * returns when all the data in the buffer has been consumed by the demux. + * Demux hardware typically cannot read TS from memory. If this is the + * case, memory-based filtering has to be implemented entirely in software. + * The @demux function parameter contains a pointer to the demux API and + * instance data. + * The @buf function parameter contains a pointer to the TS data in + * kernel-space memory. + * The @count function parameter contains the length of the TS data. + * It returns: + * 0 on success; + * -ERESTARTSYS, if mutex lock was interrupted; + * -EINTR, if a signal handling is pending; + * -ENODEV, if demux was removed; + * -EINVAL, on bad parameter. + * + * @allocate_ts_feed: Allocates a new TS feed, which is used to filter the TS + * packets carrying a certain PID. The TS feed normally corresponds to a + * hardware PID filter on the demux chip. + * The @demux function parameter contains a pointer to the demux API and + * instance data. + * The @feed function parameter contains a pointer to the TS feed API and + * instance data. + * The @callback function parameter contains a pointer to the callback + * function for passing received TS packet. + * It returns: + * 0 on success; + * -ERESTARTSYS, if mutex lock was interrupted; + * -EBUSY, if no more TS feeds is available; + * -EINVAL, on bad parameter. + * + * @release_ts_feed: Releases the resources allocated with @allocate_ts_feed. + * Any filtering in progress on the TS feed should be stopped before + * calling this function. + * The @demux function parameter contains a pointer to the demux API and + * instance data. + * The @feed function parameter contains a pointer to the TS feed API and + * instance data. + * It returns: + * 0 on success; + * -EINVAL on bad parameter. + * + * @allocate_section_feed: Allocates a new section feed, i.e. a demux resource + * for filtering and receiving sections. On platforms with hardware + * support for section filtering, a section feed is directly mapped to + * the demux HW. On other platforms, TS packets are first PID filtered in + * hardware and a hardware section filter then emulated in software. The + * caller obtains an API pointer of type dmx_section_feed_t as an out + * parameter. Using this API the caller can set filtering parameters and + * start receiving sections. + * The @demux function parameter contains a pointer to the demux API and + * instance data. + * The @feed function parameter contains a pointer to the TS feed API and + * instance data. + * The @callback function parameter contains a pointer to the callback + * function for passing received TS packet. + * It returns: + * 0 on success; + * -EBUSY, if no more TS feeds is available; + * -EINVAL, on bad parameter. + * + * @release_section_feed: Releases the resources allocated with + * @allocate_section_feed, including allocated filters. Any filtering in + * progress on the section feed should be stopped before calling this + * function. + * The @demux function parameter contains a pointer to the demux API and + * instance data. + * The @feed function parameter contains a pointer to the TS feed API and + * instance data. + * It returns: + * 0 on success; + * -EINVAL, on bad parameter. + * + * @add_frontend: Registers a connectivity between a demux and a front-end, + * i.e., indicates that the demux can be connected via a call to + * @connect_frontend to use the given front-end as a TS source. The + * client of this function has to allocate dynamic or static memory for + * the frontend structure and initialize its fields before calling this + * function. This function is normally called during the driver + * initialization. The caller must not free the memory of the frontend + * struct before successfully calling @remove_frontend. + * The @demux function parameter contains a pointer to the demux API and + * instance data. + * The @frontend function parameter contains a pointer to the front-end + * instance data. + * It returns: + * 0 on success; + * -EINVAL, on bad parameter. + * + * @remove_frontend: Indicates that the given front-end, registered by a call + * to @add_frontend, can no longer be connected as a TS source by this + * demux. The function should be called when a front-end driver or a demux + * driver is removed from the system. If the front-end is in use, the + * function fails with the return value of -EBUSY. After successfully + * calling this function, the caller can free the memory of the frontend + * struct if it was dynamically allocated before the @add_frontend + * operation. + * The @demux function parameter contains a pointer to the demux API and + * instance data. + * The @frontend function parameter contains a pointer to the front-end + * instance data. + * It returns: + * 0 on success; + * -ENODEV, if the front-end was not found, + * -EINVAL, on bad parameter. + * + * @get_frontends: Provides the APIs of the front-ends that have been + * registered for this demux. Any of the front-ends obtained with this + * call can be used as a parameter for @connect_frontend. The include + * file demux.h contains the macro DMX_FE_ENTRY() for converting an + * element of the generic type struct &list_head * to the type + * struct &dmx_frontend *. The caller must not free the memory of any of + * the elements obtained via this function call. + * The @demux function parameter contains a pointer to the demux API and + * instance data. + * It returns a struct list_head pointer to the list of front-end + * interfaces, or NULL in the case of an empty list. + * + * @connect_frontend: Connects the TS output of the front-end to the input of + * the demux. A demux can only be connected to a front-end registered to + * the demux with the function @add_frontend. It may or may not be + * possible to connect multiple demuxes to the same front-end, depending + * on the capabilities of the HW platform. When not used, the front-end + * should be released by calling @disconnect_frontend. + * The @demux function parameter contains a pointer to the demux API and + * instance data. + * The @frontend function parameter contains a pointer to the front-end + * instance data. + * It returns: + * 0 on success; + * -EINVAL, on bad parameter. + * + * @disconnect_frontend: Disconnects the demux and a front-end previously + * connected by a @connect_frontend call. + * The @demux function parameter contains a pointer to the demux API and + * instance data. + * It returns: + * 0 on success; + * -EINVAL on bad parameter. + * + * @get_pes_pids: Get the PIDs for DMX_PES_AUDIO0, DMX_PES_VIDEO0, + * DMX_PES_TELETEXT0, DMX_PES_SUBTITLE0 and DMX_PES_PCR0. + * The @demux function parameter contains a pointer to the demux API and + * instance data. + * The @pids function parameter contains an array with five u16 elements + * where the PIDs will be stored. + * It returns: + * 0 on success; + * -EINVAL on bad parameter. + */ +struct dmx_demux { + enum dmx_demux_caps capabilities; + struct dmx_frontend *frontend; + void *priv; + int (*open)(struct dmx_demux *demux); + int (*close)(struct dmx_demux *demux); + int (*write)(struct dmx_demux *demux, const char __user *buf, + size_t count); + int (*allocate_ts_feed)(struct dmx_demux *demux, + struct dmx_ts_feed **feed, + dmx_ts_cb callback); + int (*release_ts_feed)(struct dmx_demux *demux, + struct dmx_ts_feed *feed); + int (*allocate_section_feed)(struct dmx_demux *demux, + struct dmx_section_feed **feed, + dmx_section_cb callback); + int (*release_section_feed)(struct dmx_demux *demux, + struct dmx_section_feed *feed); + int (*add_frontend)(struct dmx_demux *demux, + struct dmx_frontend *frontend); + int (*remove_frontend)(struct dmx_demux *demux, + struct dmx_frontend *frontend); + struct list_head *(*get_frontends)(struct dmx_demux *demux); + int (*connect_frontend)(struct dmx_demux *demux, + struct dmx_frontend *frontend); + int (*disconnect_frontend)(struct dmx_demux *demux); + + int (*get_pes_pids)(struct dmx_demux *demux, u16 *pids); + + /* private: */ + + /* + * Only used at av7110, to read some data from firmware. + * As this was never documented, we have no clue about what's + * there, and its usage on other drivers aren't encouraged. + */ + int (*get_stc)(struct dmx_demux *demux, unsigned int num, + u64 *stc, unsigned int *base); +}; + +#endif /* #ifndef __DEMUX_H */ diff --git a/6.17.0/include-overrides/media/dmxdev.h b/6.17.0/include-overrides/media/dmxdev.h new file mode 100644 index 0000000..63219a6 --- /dev/null +++ b/6.17.0/include-overrides/media/dmxdev.h @@ -0,0 +1,213 @@ +/* + * dmxdev.h + * + * Copyright (C) 2000 Ralph Metzler & Marcus Metzler + * for convergence integrated media GmbH + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either version 2.1 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _DMXDEV_H_ +#define _DMXDEV_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + +/** + * enum dmxdev_type - type of demux filter type. + * + * @DMXDEV_TYPE_NONE: no filter set. + * @DMXDEV_TYPE_SEC: section filter. + * @DMXDEV_TYPE_PES: Program Elementary Stream (PES) filter. + */ +enum dmxdev_type { + DMXDEV_TYPE_NONE, + DMXDEV_TYPE_SEC, + DMXDEV_TYPE_PES, +}; + +/** + * enum dmxdev_state - state machine for the dmxdev. + * + * @DMXDEV_STATE_FREE: indicates that the filter is freed. + * @DMXDEV_STATE_ALLOCATED: indicates that the filter was allocated + * to be used. + * @DMXDEV_STATE_SET: indicates that the filter parameters are set. + * @DMXDEV_STATE_GO: indicates that the filter is running. + * @DMXDEV_STATE_DONE: indicates that a packet was already filtered + * and the filter is now disabled. + * Set only if %DMX_ONESHOT. See + * &dmx_sct_filter_params. + * @DMXDEV_STATE_TIMEDOUT: Indicates a timeout condition. + */ +enum dmxdev_state { + DMXDEV_STATE_FREE, + DMXDEV_STATE_ALLOCATED, + DMXDEV_STATE_SET, + DMXDEV_STATE_GO, + DMXDEV_STATE_DONE, + DMXDEV_STATE_TIMEDOUT +}; + +/** + * struct dmxdev_feed - digital TV dmxdev feed + * + * @pid: Program ID to be filtered + * @ts: pointer to &struct dmx_ts_feed + * @next: &struct list_head pointing to the next feed. + */ + +struct dmxdev_feed { + u16 pid; + struct dmx_ts_feed *ts; + struct list_head next; +}; + +/** + * struct dmxdev_filter - digital TV dmxdev filter + * + * @filter: a union describing a dmxdev filter. + * Currently used only for section filters. + * @filter.sec: a &struct dmx_section_filter pointer. + * For section filter only. + * @feed: a union describing a dmxdev feed. + * Depending on the filter type, it can be either + * @feed.ts or @feed.sec. + * @feed.ts: a &struct list_head list. + * For TS and PES feeds. + * @feed.sec: a &struct dmx_section_feed pointer. + * For section feed only. + * @params: a union describing dmxdev filter parameters. + * Depending on the filter type, it can be either + * @params.sec or @params.pes. + * @params.sec: a &struct dmx_sct_filter_params embedded struct. + * For section filter only. + * @params.pes: a &struct dmx_pes_filter_params embedded struct. + * For PES filter only. + * @type: type of the dmxdev filter, as defined by &enum dmxdev_type. + * @state: state of the dmxdev filter, as defined by &enum dmxdev_state. + * @dev: pointer to &struct dmxdev. + * @buffer: an embedded &struct dvb_ringbuffer buffer. + * @vb2_ctx: control struct for VB2 handler + * @mutex: protects the access to &struct dmxdev_filter. + * @timer: &struct timer_list embedded timer, used to check for + * feed timeouts. + * Only for section filter. + * @todo: index for the @secheader. + * Only for section filter. + * @secheader: buffer cache to parse the section header. + * Only for section filter. + */ +struct dmxdev_filter { + union { + struct dmx_section_filter *sec; + } filter; + + union { + /* list of TS and PES feeds (struct dmxdev_feed) */ + struct list_head ts; + struct dmx_section_feed *sec; + } feed; + + union { + struct dmx_sct_filter_params sec; + struct dmx_pes_filter_params pes; + } params; + + enum dmxdev_type type; + enum dmxdev_state state; + struct dmxdev *dev; + struct dvb_ringbuffer buffer; + struct dvb_vb2_ctx vb2_ctx; + + struct mutex mutex; + + /* only for sections */ + struct timer_list timer; + int todo; + u8 secheader[3]; +}; + +/** + * struct dmxdev - Describes a digital TV demux device. + * + * @dvbdev: pointer to &struct dvb_device associated with + * the demux device node. + * @dvr_dvbdev: pointer to &struct dvb_device associated with + * the dvr device node. + * @filter: pointer to &struct dmxdev_filter. + * @demux: pointer to &struct dmx_demux. + * @filternum: number of filters. + * @capabilities: demux capabilities as defined by &enum dmx_demux_caps. + * @may_do_mmap: flag used to indicate if the device may do mmap. + * @exit: flag to indicate that the demux is being released. + * @dvr_orig_fe: pointer to &struct dmx_frontend. + * @dvr_buffer: embedded &struct dvb_ringbuffer for DVB output. + * @dvr_vb2_ctx: control struct for VB2 handler + * @mutex: protects the usage of this structure. + * @lock: protects access to &dmxdev->filter->data. + */ +struct dmxdev { + struct dvb_device *dvbdev; + struct dvb_device *dvr_dvbdev; + + struct dmxdev_filter *filter; + struct dmx_demux *demux; + + int filternum; + int capabilities; + + unsigned int may_do_mmap:1; + unsigned int exit:1; +#define DMXDEV_CAP_DUPLEX 1 + struct dmx_frontend *dvr_orig_fe; + + struct dvb_ringbuffer dvr_buffer; +#define DVR_BUFFER_SIZE (10*188*1024) + + struct dvb_vb2_ctx dvr_vb2_ctx; + + struct mutex mutex; + spinlock_t lock; +}; + +/** + * dvb_dmxdev_init - initializes a digital TV demux and registers both demux + * and DVR devices. + * + * @dmxdev: pointer to &struct dmxdev. + * @adap: pointer to &struct dvb_adapter. + */ +int dvb_dmxdev_init(struct dmxdev *dmxdev, struct dvb_adapter *adap); + +/** + * dvb_dmxdev_release - releases a digital TV demux and unregisters it. + * + * @dmxdev: pointer to &struct dmxdev. + */ +void dvb_dmxdev_release(struct dmxdev *dmxdev); + +#endif /* _DMXDEV_H_ */ diff --git a/6.17.0/include-overrides/media/dvb-usb-ids.h b/6.17.0/include-overrides/media/dvb-usb-ids.h new file mode 100644 index 0000000..1b7d10f --- /dev/null +++ b/6.17.0/include-overrides/media/dvb-usb-ids.h @@ -0,0 +1,471 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* dvb-usb-ids.h is part of the DVB USB library. + * + * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de) see + * dvb-usb-init.c for copyright information. + * + * a header file containing define's for the USB device supported by the + * various drivers. + */ +#ifndef _DVB_USB_IDS_H_ +#define _DVB_USB_IDS_H_ + +#include + +#define DVB_USB_DEV(pid, vid) \ + [vid] = { USB_DEVICE(USB_VID_ ## pid, USB_PID_ ## vid) } + +#define DVB_USB_DEV_VER(pid, vid, lo, hi) \ + [vid] = { USB_DEVICE_VER(USB_VID_ ## pid, USB_PID_ ## vid, lo, hi) } + +/* Vendor IDs */ + +#define USB_VID_774 0x7a69 +#define USB_VID_ADSTECH 0x06e1 +#define USB_VID_AFATECH 0x15a4 +#define USB_VID_ALCOR_MICRO 0x058f +#define USB_VID_ALINK 0x05e3 +#define USB_VID_AME 0x06be +#define USB_VID_AMT 0x1c73 +#define USB_VID_ANCHOR 0x0547 +#define USB_VID_ANSONIC 0x10b9 +#define USB_VID_ANUBIS_ELECTRONIC 0x10fd +#define USB_VID_ASUS 0x0b05 +#define USB_VID_AVERMEDIA 0x07ca +#define USB_VID_AZUREWAVE 0x13d3 +#define USB_VID_COMPRO 0x185b +#define USB_VID_COMPRO_UNK 0x145f +#define USB_VID_CONEXANT 0x0572 +#define USB_VID_CYPRESS 0x04b4 +#define USB_VID_DEXATEK 0x1d19 +#define USB_VID_DIBCOM 0x10b8 +#define USB_VID_DPOSH 0x1498 +#define USB_VID_DVICO 0x0fe9 +#define USB_VID_E3C 0x18b4 +#define USB_VID_ELGATO 0x0fd9 +#define USB_VID_EMPIA 0xeb1a +#define USB_VID_EVOLUTEPC 0x1e59 +#define USB_VID_GENPIX 0x09c0 +#define USB_VID_GIGABYTE 0x1044 +#define USB_VID_GOTVIEW 0x1fe1 +#define USB_VID_GRANDTEC 0x5032 +#define USB_VID_GTEK 0x1f4d +#define USB_VID_HAMA 0x147f +#define USB_VID_HANFTEK 0x15f4 +#define USB_VID_HAUPPAUGE 0x2040 +#define USB_VID_HUMAX_COEX 0x10b9 +#define USB_VID_HYPER_PALTEK 0x1025 +#define USB_VID_INTEL 0x8086 +#define USB_VID_ITETECH 0x048d +#define USB_VID_KWORLD 0xeb2a +#define USB_VID_KWORLD_2 0x1b80 +#define USB_VID_KYE 0x0458 +#define USB_VID_LEADTEK 0x0413 +#define USB_VID_LITEON 0x04ca +#define USB_VID_MEDION 0x1660 +#define USB_VID_MICROSOFT 0x045e +#define USB_VID_MIGLIA 0x18f3 +#define USB_VID_MSI 0x0db0 +#define USB_VID_MSI_2 0x1462 +#define USB_VID_OPERA1 0x695c +#define USB_VID_PCTV 0x2013 +#define USB_VID_PINNACLE 0x2304 +#define USB_VID_PIXELVIEW 0x1554 +#define USB_VID_PROF_1 0x3011 +#define USB_VID_PROF_2 0x3034 +#define USB_VID_REALTEK 0x0bda +#define USB_VID_SONY 0x1415 +#define USB_VID_TECHNISAT 0x14f7 +#define USB_VID_TECHNOTREND 0x0b48 +#define USB_VID_TELESTAR 0x10b9 +#define USB_VID_TERRATEC 0x0ccd +#define USB_VID_TERRATEC_2 0x153b +#define USB_VID_TEVII 0x9022 +#define USB_VID_TWINHAN 0x1822 +#define USB_VID_ULTIMA_ELECTRONIC 0x05d8 +#define USB_VID_UNIWILL 0x1584 +#define USB_VID_VISIONPLUS 0x13d3 +#define USB_VID_WIDEVIEW 0x14aa +#define USB_VID_XTENSIONS 0x1ae7 +#define USB_VID_YUAN 0x1164 +#define USB_VID_ZYDAS 0x0ace + +/* Product IDs */ + +#define USB_PID_ADSTECH_USB2_COLD 0xa333 +#define USB_PID_ADSTECH_USB2_WARM 0xa334 +#define USB_PID_AFATECH_AF9005 0x9020 +#define USB_PID_AFATECH_AF9015_9015 0x9015 +#define USB_PID_AFATECH_AF9015_9016 0x9016 +#define USB_PID_AFATECH_AF9035_1000 0x1000 +#define USB_PID_AFATECH_AF9035_1001 0x1001 +#define USB_PID_AFATECH_AF9035_1002 0x1002 +#define USB_PID_AFATECH_AF9035_1003 0x1003 +#define USB_PID_AFATECH_AF9035_9035 0x9035 +#define USB_PID_ALINK_DTU 0xf170 +#define USB_PID_AME_DTV5100 0xa232 +#define USB_PID_ANCHOR_NEBULA_DIGITV 0x0201 +#define USB_PID_ANSONIC_DVBT_USB 0x6000 +#define USB_PID_ANUBIS_LIFEVIEW_TV_WALKER_TWIN_COLD 0x0514 +#define USB_PID_ANUBIS_LIFEVIEW_TV_WALKER_TWIN_WARM 0x0513 +#define USB_PID_ANUBIS_MSI_DIGI_VOX_MINI_II 0x1513 +#define USB_PID_ANYSEE 0x861f +#define USB_PID_ASUS_U3000 0x171f +#define USB_PID_ASUS_U3000H 0x1736 +#define USB_PID_ASUS_U3100 0x173f +#define USB_PID_ASUS_U3100MINI_PLUS 0x1779 +#define USB_PID_AVERMEDIA_1867 0x1867 +#define USB_PID_AVERMEDIA_A309 0xa309 +#define USB_PID_AVERMEDIA_A310 0xa310 +#define USB_PID_AVERMEDIA_A805 0xa805 +#define USB_PID_AVERMEDIA_A815M 0x815a +#define USB_PID_AVERMEDIA_A835 0xa835 +#define USB_PID_AVERMEDIA_A835B_1835 0x1835 +#define USB_PID_AVERMEDIA_A835B_2835 0x2835 +#define USB_PID_AVERMEDIA_A835B_3835 0x3835 +#define USB_PID_AVERMEDIA_A835B_4835 0x4835 +#define USB_PID_AVERMEDIA_A850 0x850a +#define USB_PID_AVERMEDIA_A850T 0x850b +#define USB_PID_AVERMEDIA_A867 0xa867 +#define USB_PID_AVERMEDIA_B835 0xb835 +#define USB_PID_AVERMEDIA_DVBT_USB2_COLD 0xa800 +#define USB_PID_AVERMEDIA_DVBT_USB2_WARM 0xa801 +#define USB_PID_AVERMEDIA_EXPRESS 0xb568 +#define USB_PID_AVERMEDIA_H335 0x0335 +#define USB_PID_AVERMEDIA_HYBRID_ULTRA_USB_M039R 0x0039 +#define USB_PID_AVERMEDIA_HYBRID_ULTRA_USB_M039R_ATSC 0x1039 +#define USB_PID_AVERMEDIA_HYBRID_ULTRA_USB_M039R_DVBT 0x2039 +#define USB_PID_AVERMEDIA_MCE_USB_M038 0x1228 +#define USB_PID_AVERMEDIA_TD110 0xa110 +#define USB_PID_AVERMEDIA_TD310 0x1871 +#define USB_PID_AVERMEDIA_TWINSTAR 0x0825 +#define USB_PID_AVERMEDIA_VOLAR 0xa807 +#define USB_PID_AVERMEDIA_VOLAR_2 0xb808 +#define USB_PID_AVERMEDIA_VOLAR_A868R 0xa868 +#define USB_PID_AVERMEDIA_VOLAR_X 0xa815 +#define USB_PID_AVERMEDIA_VOLAR_X_2 0x8150 +#define USB_PID_AZUREWAVE_6007 0x0ccd +#define USB_PID_AZUREWAVE_AD_TU700 0x3237 +#define USB_PID_AZUREWAVE_AZ6027 0x3275 +#define USB_PID_AZUREWAVE_TWINHAN_VP7049 0x3219 +#define USB_PID_COMPRO_DVBU2000_COLD 0xd000 +#define USB_PID_COMPRO_DVBU2000_UNK_COLD 0x010c +#define USB_PID_COMPRO_DVBU2000_UNK_WARM 0x010d +#define USB_PID_COMPRO_DVBU2000_WARM 0xd001 +#define USB_PID_COMPRO_VIDEOMATE_U500 0x1e78 +#define USB_PID_COMPRO_VIDEOMATE_U500_PC 0x1e80 +#define USB_PID_CONCEPTRONIC_CTVDIGRCU 0xe397 +#define USB_PID_CONEXANT_D680_DMB 0x86d6 +#define USB_PID_CPYTO_REDI_PC50A 0xa803 +#define USB_PID_CTVDIGDUAL_V2 0xe410 +#define USB_PID_CYPRESS_DW2101 0x2101 +#define USB_PID_CYPRESS_DW2102 0x2102 +#define USB_PID_CYPRESS_DW2104 0x2104 +#define USB_PID_CYPRESS_DW3101 0x3101 +#define USB_PID_CYPRESS_OPERA1_COLD 0x2830 +#define USB_PID_DELOCK_USB2_DVBT 0xb803 +#define USB_PID_DIBCOM_ANCHOR_2135_COLD 0x2131 +#define USB_PID_DIBCOM_HOOK_DEFAULT 0x0064 +#define USB_PID_DIBCOM_HOOK_DEFAULT_REENUM 0x0065 +#define USB_PID_DIBCOM_MOD3000_COLD 0x0bb8 +#define USB_PID_DIBCOM_MOD3000_WARM 0x0bb9 +#define USB_PID_DIBCOM_MOD3001_COLD 0x0bc6 +#define USB_PID_DIBCOM_MOD3001_WARM 0x0bc7 +#define USB_PID_DIBCOM_NIM7090 0x1bb2 +#define USB_PID_DIBCOM_NIM8096MD 0x1fa8 +#define USB_PID_DIBCOM_NIM9090M 0x2383 +#define USB_PID_DIBCOM_NIM9090MD 0x2384 +#define USB_PID_DIBCOM_STK7070P 0x1ebc +#define USB_PID_DIBCOM_STK7070PD 0x1ebe +#define USB_PID_DIBCOM_STK7700D 0x1ef0 +#define USB_PID_DIBCOM_STK7700P 0x1e14 +#define USB_PID_DIBCOM_STK7700P_PC 0x1e78 +#define USB_PID_DIBCOM_STK7700_U7000 0x7001 +#define USB_PID_DIBCOM_STK7770P 0x1e80 +#define USB_PID_DIBCOM_STK807XP 0x1f90 +#define USB_PID_DIBCOM_STK807XPVR 0x1f98 +#define USB_PID_DIBCOM_STK8096GP 0x1fa0 +#define USB_PID_DIBCOM_STK8096PVR 0x1faa +#define USB_PID_DIBCOM_TFE7090PVR 0x1bb4 +#define USB_PID_DIBCOM_TFE7790P 0x1e6e +#define USB_PID_DIBCOM_TFE8096P 0x1f9C +#define USB_PID_DIGITALNOW_BLUEBIRD_DUAL_1_COLD 0xdb54 +#define USB_PID_DIGITALNOW_BLUEBIRD_DUAL_1_WARM 0xdb55 +#define USB_PID_DPOSH_M9206_COLD 0x9206 +#define USB_PID_DPOSH_M9206_WARM 0xa090 +#define USB_PID_DVICO_BLUEBIRD_DUAL_1_COLD 0xdb50 +#define USB_PID_DVICO_BLUEBIRD_DUAL_1_WARM 0xdb51 +#define USB_PID_DVICO_BLUEBIRD_DUAL_2_COLD 0xdb58 +#define USB_PID_DVICO_BLUEBIRD_DUAL_2_WARM 0xdb59 +#define USB_PID_DVICO_BLUEBIRD_DUAL_4 0xdb78 +#define USB_PID_DVICO_BLUEBIRD_DUAL_4_REV_2 0xdb98 +#define USB_PID_DVICO_BLUEBIRD_DVB_T_NANO_2 0xdb70 +#define USB_PID_DVICO_BLUEBIRD_DVB_T_NANO_2_NFW_WARM 0xdb71 +#define USB_PID_DVICO_BLUEBIRD_LG064F_COLD 0xd500 +#define USB_PID_DVICO_BLUEBIRD_LG064F_WARM 0xd501 +#define USB_PID_DVICO_BLUEBIRD_LGDT 0xd820 +#define USB_PID_DVICO_BLUEBIRD_LGZ201_COLD 0xdb00 +#define USB_PID_DVICO_BLUEBIRD_LGZ201_WARM 0xdb01 +#define USB_PID_DVICO_BLUEBIRD_TH7579_COLD 0xdb10 +#define USB_PID_DVICO_BLUEBIRD_TH7579_WARM 0xdb11 +#define USB_PID_E3C_EC168 0x1689 +#define USB_PID_E3C_EC168_2 0xfffa +#define USB_PID_E3C_EC168_3 0xfffb +#define USB_PID_E3C_EC168_4 0x1001 +#define USB_PID_E3C_EC168_5 0x1002 +#define USB_PID_ELGATO_EYETV_DIVERSITY 0x0011 +#define USB_PID_ELGATO_EYETV_DTT 0x0021 +#define USB_PID_ELGATO_EYETV_DTT_2 0x003f +#define USB_PID_ELGATO_EYETV_DTT_Dlx 0x0020 +#define USB_PID_ELGATO_EYETV_SAT 0x002a +#define USB_PID_ELGATO_EYETV_SAT_V2 0x0025 +#define USB_PID_ELGATO_EYETV_SAT_V3 0x0036 +#define USB_PID_EMPIA_DIGIVOX_MINI_SL_COLD 0xe360 +#define USB_PID_EMPIA_DIGIVOX_MINI_SL_WARM 0xe361 +#define USB_PID_EMPIA_VSTREAM_COLD 0x17de +#define USB_PID_EMPIA_VSTREAM_WARM 0x17df +#define USB_PID_EVOLUTEPC_TVWAY_PLUS 0x0002 +#define USB_PID_EVOLVEO_XTRATV_STICK 0xa115 +#define USB_PID_FREECOM_DVBT 0x0160 +#define USB_PID_FREECOM_DVBT_2 0x0161 +#define USB_PID_FRIIO_WHITE 0x0001 +#define USB_PID_GENIATECH_SU3000 0x3000 +#define USB_PID_GENIATECH_T220 0xd220 +#define USB_PID_GENIATECH_X3M_SPC1400HD 0x3100 +#define USB_PID_GENIUS_TVGO_DVB_T03 0x4012 +#define USB_PID_GENPIX_8PSK_REV_1_COLD 0x0200 +#define USB_PID_GENPIX_8PSK_REV_1_WARM 0x0201 +#define USB_PID_GENPIX_8PSK_REV_2 0x0202 +#define USB_PID_GENPIX_SKYWALKER_1 0x0203 +#define USB_PID_GENPIX_SKYWALKER_2 0x0206 +#define USB_PID_GENPIX_SKYWALKER_CW3K 0x0204 +#define USB_PID_GIGABYTE_U7000 0x7001 +#define USB_PID_GIGABYTE_U8000 0x7002 +#define USB_PID_GOTVIEW_SAT_HD 0x5456 +#define USB_PID_GRANDTEC_DVBT_USB2_COLD 0x0bc6 +#define USB_PID_GRANDTEC_DVBT_USB2_WARM 0x0bc7 +#define USB_PID_GRANDTEC_DVBT_USB_COLD 0x0fa0 +#define USB_PID_GRANDTEC_DVBT_USB_WARM 0x0fa1 +#define USB_PID_GRANDTEC_MOD3000_COLD 0x0bb8 +#define USB_PID_GRANDTEC_MOD3000_WARM 0x0bb9 +#define USB_PID_HAMA_DVBT_HYBRID 0x2758 +#define USB_PID_HANFTEK_UMT_010_COLD 0x0001 +#define USB_PID_HANFTEK_UMT_010_WARM 0x0015 +#define USB_PID_HAUPPAUGE_MAX_S2 0xd900 +#define USB_PID_HAUPPAUGE_MYTV_T 0x7080 +#define USB_PID_HAUPPAUGE_NOVA_TD_STICK 0x9580 +#define USB_PID_HAUPPAUGE_NOVA_TD_STICK_52009 0x5200 +#define USB_PID_HAUPPAUGE_NOVA_T_500 0x9941 +#define USB_PID_HAUPPAUGE_NOVA_T_500_2 0x9950 +#define USB_PID_HAUPPAUGE_NOVA_T_500_3 0x8400 +#define USB_PID_HAUPPAUGE_NOVA_T_STICK 0x7050 +#define USB_PID_HAUPPAUGE_NOVA_T_STICK_2 0x7060 +#define USB_PID_HAUPPAUGE_NOVA_T_STICK_3 0x7070 +#define USB_PID_HAUPPAUGE_TIGER_ATSC 0xb200 +#define USB_PID_HAUPPAUGE_TIGER_ATSC_B210 0xb210 +#define USB_PID_HAUPPAUGE_WINTV_NOVA_T_USB2_COLD 0x9300 +#define USB_PID_HAUPPAUGE_WINTV_NOVA_T_USB2_WARM 0x9301 +#define USB_PID_HUMAX_DVB_T_STICK_HIGH_SPEED_COLD 0x5000 +#define USB_PID_HUMAX_DVB_T_STICK_HIGH_SPEED_WARM 0x5001 +#define USB_PID_INTEL_CE9500 0x9500 +#define USB_PID_ITETECH_IT9135 0x9135 +#define USB_PID_ITETECH_IT9135_9005 0x9005 +#define USB_PID_ITETECH_IT9135_9006 0x9006 +#define USB_PID_ITETECH_IT9303 0x9306 +#define USB_PID_KWORLD_395U 0xe396 +#define USB_PID_KWORLD_395U_2 0xe39b +#define USB_PID_KWORLD_395U_3 0xe395 +#define USB_PID_KWORLD_395U_4 0xe39a +#define USB_PID_KWORLD_399U 0xe399 +#define USB_PID_KWORLD_399U_2 0xe400 +#define USB_PID_KWORLD_MC810 0xc810 +#define USB_PID_KWORLD_PC160_2T 0xc160 +#define USB_PID_KWORLD_PC160_T 0xc161 +#define USB_PID_KWORLD_UB383_T 0xe383 +#define USB_PID_KWORLD_UB499_2T_T09 0xe409 +#define USB_PID_KWORLD_VSTREAM_COLD 0x17de +#define USB_PID_KYE_DVB_T_COLD 0x701e +#define USB_PID_KYE_DVB_T_WARM 0x701f +#define USB_PID_LEADTEK_WINFAST_DTV_DONGLE_COLD 0x6025 +#define USB_PID_LEADTEK_WINFAST_DTV_DONGLE_H 0x60f6 +#define USB_PID_LEADTEK_WINFAST_DTV_DONGLE_STK7700P 0x6f00 +#define USB_PID_LEADTEK_WINFAST_DTV_DONGLE_STK7700P_2 0x6f01 +#define USB_PID_LEADTEK_WINFAST_DTV_DONGLE_WARM 0x6026 +#define USB_PID_LITEON_DVB_T_COLD 0xf000 +#define USB_PID_LITEON_DVB_T_WARM 0xf001 +#define USB_PID_MEDION_CREATIX_CTX1921 0x1921 +#define USB_PID_MEDION_MD95700 0x0932 +#define USB_PID_MICROSOFT_XBOX_ONE_TUNER 0x02d5 +#define USB_PID_MIGLIA_WT220U_ZAP250_COLD 0x0220 +#define USB_PID_MSI_DIGIVOX_DUO 0x8801 +#define USB_PID_MSI_DIGI_VOX_MINI_III 0x8807 +#define USB_PID_MSI_MEGASKY580 0x5580 +#define USB_PID_MSI_MEGASKY580_55801 0x5581 +#define USB_PID_MYGICA_D689 0xd811 +#define USB_PID_MYGICA_T230 0xc688 +#define USB_PID_MYGICA_T230A 0x689a +#define USB_PID_MYGICA_T230C 0xc689 +#define USB_PID_MYGICA_T230C2 0xc68a +#define USB_PID_MYGICA_T230C2_LITE 0xc69a +#define USB_PID_MYGICA_T230C_LITE 0xc699 +#define USB_PID_NOXON_DAB_STICK 0x00b3 +#define USB_PID_NOXON_DAB_STICK_REV2 0x00e0 +#define USB_PID_NOXON_DAB_STICK_REV3 0x00b4 +#define USB_PID_OPERA1_WARM 0x3829 +#define USB_PID_PCTV_2002E 0x025c +#define USB_PID_PCTV_2002E_SE 0x025d +#define USB_PID_PCTV_200E 0x020e +#define USB_PID_PCTV_78E 0x025a +#define USB_PID_PCTV_79E 0x0262 +#define USB_PID_PCTV_DIBCOM_STK8096PVR 0x1faa +#define USB_PID_PCTV_PINNACLE_PCTV282E 0x0248 +#define USB_PID_PCTV_PINNACLE_PCTV73ESE 0x0245 +#define USB_PID_PINNACLE_EXPRESSCARD_320CX 0x022e +#define USB_PID_PINNACLE_PCTV2000E 0x022c +#define USB_PID_PINNACLE_PCTV282E 0x0248 +#define USB_PID_PINNACLE_PCTV340E 0x023d +#define USB_PID_PINNACLE_PCTV340E_SE 0x023e +#define USB_PID_PINNACLE_PCTV71E 0x022b +#define USB_PID_PINNACLE_PCTV72E 0x0236 +#define USB_PID_PINNACLE_PCTV73A 0x0243 +#define USB_PID_PINNACLE_PCTV73E 0x0237 +#define USB_PID_PINNACLE_PCTV73ESE 0x0245 +#define USB_PID_PINNACLE_PCTV74E 0x0246 +#define USB_PID_PINNACLE_PCTV801E 0x023a +#define USB_PID_PINNACLE_PCTV801E_SE 0x023b +#define USB_PID_PINNACLE_PCTV_400E 0x020f +#define USB_PID_PINNACLE_PCTV_450E 0x0222 +#define USB_PID_PINNACLE_PCTV_452E 0x021f +#define USB_PID_PINNACLE_PCTV_DUAL_DIVERSITY_DVB_T 0x0229 +#define USB_PID_PINNACLE_PCTV_DVB_T_FLASH 0x0228 +#define USB_PID_PIXELVIEW_SBTVD 0x5010 +#define USB_PID_PROF_1100 0xb012 +#define USB_PID_PROF_7500 0x7500 +#define USB_PID_PROLECTRIX_DV107669 0xd803 +#define USB_PID_REALTEK_RTL2831U 0x2831 +#define USB_PID_REALTEK_RTL2832U 0x2832 +#define USB_PID_SIGMATEK_DVB_110 0x6610 +#define USB_PID_SONY_PLAYTV 0x0003 +#define USB_PID_SVEON_STV20 0xe39d +#define USB_PID_SVEON_STV20_RTL2832U 0xd39d +#define USB_PID_SVEON_STV21 0xd3b0 +#define USB_PID_SVEON_STV22 0xe401 +#define USB_PID_SVEON_STV22_IT9137 0xe411 +#define USB_PID_SVEON_STV27 0xd3af +#define USB_PID_TECHNISAT_AIRSTAR_TELESTICK_2 0x0004 +#define USB_PID_TECHNISAT_USB2_CABLESTAR_HDCI 0x0003 +#define USB_PID_TECHNISAT_USB2_DVB_S2 0x0500 +#define USB_PID_TECHNISAT_USB2_HDCI_V1 0x0001 +#define USB_PID_TECHNISAT_USB2_HDCI_V2 0x0002 +#define USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI 0x3012 +#define USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI_2 0x3015 +#define USB_PID_TECHNOTREND_CONNECT_CT3650 0x300d +#define USB_PID_TECHNOTREND_CONNECT_S2400 0x3006 +#define USB_PID_TECHNOTREND_CONNECT_S2400_8KEEPROM 0x3009 +#define USB_PID_TECHNOTREND_CONNECT_S2_3600 0x3007 +#define USB_PID_TECHNOTREND_CONNECT_S2_3650_CI 0x300a +#define USB_PID_TECHNOTREND_CONNECT_S2_4600 0x3011 +#define USB_PID_TECHNOTREND_CONNECT_S2_4650_CI 0x3017 +#define USB_PID_TECHNOTREND_TVSTICK_CT2_4400 0x3014 +#define USB_PID_TELESTAR_STARSTICK_2 0x8000 +#define USB_PID_TERRATEC_CINERGY_DT_XS_DIVERSITY 0x005a +#define USB_PID_TERRATEC_CINERGY_DT_XS_DIVERSITY_2 0x0081 +#define USB_PID_TERRATEC_CINERGY_HT_EXPRESS 0x0060 +#define USB_PID_TERRATEC_CINERGY_HT_USB_XE 0x0058 +#define USB_PID_TERRATEC_CINERGY_S 0x0064 +#define USB_PID_TERRATEC_CINERGY_S2_1 0x1181 +#define USB_PID_TERRATEC_CINERGY_S2_2 0x1182 +#define USB_PID_TERRATEC_CINERGY_S2_BOX 0x0105 +#define USB_PID_TERRATEC_CINERGY_S2_R1 0x00a8 +#define USB_PID_TERRATEC_CINERGY_S2_R2 0x00b0 +#define USB_PID_TERRATEC_CINERGY_S2_R3 0x0102 +#define USB_PID_TERRATEC_CINERGY_S2_R4 0x0105 +#define USB_PID_TERRATEC_CINERGY_T2 0x0038 +#define USB_PID_TERRATEC_CINERGY_TC2_STICK 0x10b2 +#define USB_PID_TERRATEC_CINERGY_T_EXPRESS 0x0062 +#define USB_PID_TERRATEC_CINERGY_T_STICK 0x0093 +#define USB_PID_TERRATEC_CINERGY_T_STICK_BLACK_REV1 0x00a9 +#define USB_PID_TERRATEC_CINERGY_T_STICK_DUAL_RC 0x0099 +#define USB_PID_TERRATEC_CINERGY_T_STICK_RC 0x0097 +#define USB_PID_TERRATEC_CINERGY_T_USB_XE 0x0055 +#define USB_PID_TERRATEC_CINERGY_T_USB_XE_REV2 0x0069 +#define USB_PID_TERRATEC_CINERGY_T_XXS 0x0078 +#define USB_PID_TERRATEC_CINERGY_T_XXS_2 0x00ab +#define USB_PID_TERRATEC_DVBS2CI_V1 0x10a4 +#define USB_PID_TERRATEC_DVBS2CI_V2 0x10ac +#define USB_PID_TERRATEC_H7 0x10b4 +#define USB_PID_TERRATEC_H7_2 0x10a3 +#define USB_PID_TERRATEC_H7_3 0x10a5 +#define USB_PID_TERRATEC_T1 0x10ae +#define USB_PID_TERRATEC_T3 0x10a0 +#define USB_PID_TERRATEC_T5 0x10a1 +#define USB_PID_TEVII_S421 0xd421 +#define USB_PID_TEVII_S480_1 0xd481 +#define USB_PID_TEVII_S480_2 0xd482 +#define USB_PID_TEVII_S482_1 0xd483 +#define USB_PID_TEVII_S482_2 0xd484 +#define USB_PID_TEVII_S630 0xd630 +#define USB_PID_TEVII_S632 0xd632 +#define USB_PID_TEVII_S650 0xd650 +#define USB_PID_TEVII_S660 0xd660 +#define USB_PID_TEVII_S662 0xd662 +#define USB_PID_TINYTWIN 0x3226 +#define USB_PID_TINYTWIN_2 0xe402 +#define USB_PID_TINYTWIN_3 0x9016 +#define USB_PID_TREKSTOR_DVBT 0x901b +#define USB_PID_TREKSTOR_TERRES_2_0 0xC803 +#define USB_PID_TURBOX_DTT_2000 0xd3a4 +#define USB_PID_TWINHAN_VP7021_WARM 0x3208 +#define USB_PID_TWINHAN_VP7041_COLD 0x3201 +#define USB_PID_TWINHAN_VP7041_WARM 0x3202 +#define USB_PID_ULTIMA_ARTEC_T14BR 0x810f +#define USB_PID_ULTIMA_ARTEC_T14_COLD 0x810b +#define USB_PID_ULTIMA_ARTEC_T14_WARM 0x810c +#define USB_PID_ULTIMA_TVBOX_AN2235_COLD 0x8107 +#define USB_PID_ULTIMA_TVBOX_AN2235_WARM 0x8108 +#define USB_PID_ULTIMA_TVBOX_ANCHOR_COLD 0x2235 +#define USB_PID_ULTIMA_TVBOX_COLD 0x8105 +#define USB_PID_ULTIMA_TVBOX_USB2_COLD 0x8109 +#define USB_PID_ULTIMA_TVBOX_USB2_FX_COLD 0x8613 +#define USB_PID_ULTIMA_TVBOX_USB2_FX_WARM 0x1002 +#define USB_PID_ULTIMA_TVBOX_USB2_WARM 0x810a +#define USB_PID_ULTIMA_TVBOX_WARM 0x8106 +#define USB_PID_UNIWILL_STK7700P 0x6003 +#define USB_PID_UNK_HYPER_PALTEK_COLD 0x005e +#define USB_PID_UNK_HYPER_PALTEK_WARM 0x005f +#define USB_PID_VISIONPLUS_PINNACLE_PCTV310E 0x3211 +#define USB_PID_VISIONPLUS_TINYUSB2_COLD 0x3223 +#define USB_PID_VISIONPLUS_TINYUSB2_WARM 0x3224 +#define USB_PID_VISIONPLUS_VP7020_COLD 0x3203 +#define USB_PID_VISIONPLUS_VP7020_WARM 0x3204 +#define USB_PID_VISIONPLUS_VP7021_COLD 0x3207 +#define USB_PID_VISIONPLUS_VP7041_COLD 0x3201 +#define USB_PID_VISIONPLUS_VP7041_WARM 0x3202 +#define USB_PID_VISIONPLUS_VP7045_COLD 0x3205 +#define USB_PID_VISIONPLUS_VP7045_WARM 0x3206 +#define USB_PID_WIDEVIEW_DTT200U_COLD 0x0201 +#define USB_PID_WIDEVIEW_DTT200U_WARM 0x0301 +#define USB_PID_WIDEVIEW_DVBT_USB_COLD 0x0001 +#define USB_PID_WIDEVIEW_DVBT_USB_WARM 0x0002 +#define USB_PID_WIDEVIEW_WT220U_COLD 0x0222 +#define USB_PID_WIDEVIEW_WT220U_FC_COLD 0x0225 +#define USB_PID_WIDEVIEW_WT220U_FC_WARM 0x0226 +#define USB_PID_WIDEVIEW_WT220U_WARM 0x0221 +#define USB_PID_WIDEVIEW_WT220U_ZAP250_COLD 0x0220 +#define USB_PID_WIDEVIEW_WT220U_ZL0353_COLD 0x022a +#define USB_PID_WIDEVIEW_WT220U_ZL0353_WARM 0x022b +#define USB_PID_WINFAST_DTV2000DS 0x6a04 +#define USB_PID_WINFAST_DTV2000DS_PLUS 0x6f12 +#define USB_PID_WINFAST_DTV_DONGLE_GOLD 0x6029 +#define USB_PID_WINFAST_DTV_DONGLE_MINID 0x6f0f +#define USB_PID_WINTV_SOLOHD 0x0264 +#define USB_PID_WINTV_SOLOHD_2 0x8268 +#define USB_PID_XTENSIONS_XD_380 0x0381 +#define USB_PID_YUAN_EC372S 0x1edc +#define USB_PID_YUAN_MC770 0x0871 +#define USB_PID_YUAN_PD378S 0x2edc +#define USB_PID_YUAN_STK7700D 0x1efc +#define USB_PID_YUAN_STK7700D_2 0x1e8c +#define USB_PID_YUAN_STK7700PH 0x1f08 + +#endif diff --git a/6.17.0/include-overrides/media/dvb_ca_en50221.h b/6.17.0/include-overrides/media/dvb_ca_en50221.h new file mode 100644 index 0000000..a1c014b --- /dev/null +++ b/6.17.0/include-overrides/media/dvb_ca_en50221.h @@ -0,0 +1,142 @@ +/* + * dvb_ca.h: generic DVB functions for EN50221 CA interfaces + * + * Copyright (C) 2004 Andrew de Quincey + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either version 2.1 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DVB_CA_EN50221_H_ +#define _DVB_CA_EN50221_H_ + +#include +#include + +#include + +#define DVB_CA_EN50221_POLL_CAM_PRESENT 1 +#define DVB_CA_EN50221_POLL_CAM_CHANGED 2 +#define DVB_CA_EN50221_POLL_CAM_READY 4 + +#define DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE 1 +#define DVB_CA_EN50221_FLAG_IRQ_FR 2 +#define DVB_CA_EN50221_FLAG_IRQ_DA 4 + +#define DVB_CA_EN50221_CAMCHANGE_REMOVED 0 +#define DVB_CA_EN50221_CAMCHANGE_INSERTED 1 + +/** + * struct dvb_ca_en50221- Structure describing a CA interface + * + * @owner: the module owning this structure + * @read_attribute_mem: function for reading attribute memory on the CAM + * @write_attribute_mem: function for writing attribute memory on the CAM + * @read_cam_control: function for reading the control interface on the CAM + * @write_cam_control: function for reading the control interface on the CAM + * @read_data: function for reading data (block mode) + * @write_data: function for writing data (block mode) + * @slot_reset: function to reset the CAM slot + * @slot_shutdown: function to shutdown a CAM slot + * @slot_ts_enable: function to enable the Transport Stream on a CAM slot + * @poll_slot_status: function to poll slot status. Only necessary if + * DVB_CA_FLAG_EN50221_IRQ_CAMCHANGE is not set. + * @data: private data, used by caller. + * @private: Opaque data used by the dvb_ca core. Do not modify! + * + * NOTE: the read_*, write_* and poll_slot_status functions will be + * called for different slots concurrently and need to use locks where + * and if appropriate. There will be no concurrent access to one slot. + */ +struct dvb_ca_en50221 { + struct module *owner; + + int (*read_attribute_mem)(struct dvb_ca_en50221 *ca, + int slot, int address); + int (*write_attribute_mem)(struct dvb_ca_en50221 *ca, + int slot, int address, u8 value); + + int (*read_cam_control)(struct dvb_ca_en50221 *ca, + int slot, u8 address); + int (*write_cam_control)(struct dvb_ca_en50221 *ca, + int slot, u8 address, u8 value); + + int (*read_data)(struct dvb_ca_en50221 *ca, + int slot, u8 *ebuf, int ecount); + int (*write_data)(struct dvb_ca_en50221 *ca, + int slot, u8 *ebuf, int ecount); + + int (*slot_reset)(struct dvb_ca_en50221 *ca, int slot); + int (*slot_shutdown)(struct dvb_ca_en50221 *ca, int slot); + int (*slot_ts_enable)(struct dvb_ca_en50221 *ca, int slot); + + int (*poll_slot_status)(struct dvb_ca_en50221 *ca, int slot, int open); + + void *data; + + void *private; +}; + +/* + * Functions for reporting IRQ events + */ + +/** + * dvb_ca_en50221_camchange_irq - A CAMCHANGE IRQ has occurred. + * + * @pubca: CA instance. + * @slot: Slot concerned. + * @change_type: One of the DVB_CA_CAMCHANGE_* values + */ +void dvb_ca_en50221_camchange_irq(struct dvb_ca_en50221 *pubca, int slot, + int change_type); + +/** + * dvb_ca_en50221_camready_irq - A CAMREADY IRQ has occurred. + * + * @pubca: CA instance. + * @slot: Slot concerned. + */ +void dvb_ca_en50221_camready_irq(struct dvb_ca_en50221 *pubca, int slot); + +/** + * dvb_ca_en50221_frda_irq - An FR or a DA IRQ has occurred. + * + * @ca: CA instance. + * @slot: Slot concerned. + */ +void dvb_ca_en50221_frda_irq(struct dvb_ca_en50221 *ca, int slot); + +/* + * Initialisation/shutdown functions + */ + +/** + * dvb_ca_en50221_init - Initialise a new DVB CA device. + * + * @dvb_adapter: DVB adapter to attach the new CA device to. + * @ca: The dvb_ca instance. + * @flags: Flags describing the CA device (DVB_CA_EN50221_FLAG_*). + * @slot_count: Number of slots supported. + * + * @return 0 on success, nonzero on failure + */ +int dvb_ca_en50221_init(struct dvb_adapter *dvb_adapter, + struct dvb_ca_en50221 *ca, int flags, + int slot_count); + +/** + * dvb_ca_en50221_release - Release a DVB CA device. + * + * @ca: The associated dvb_ca instance. + */ +void dvb_ca_en50221_release(struct dvb_ca_en50221 *ca); + +#endif diff --git a/6.17.0/include-overrides/media/dvb_demux.h b/6.17.0/include-overrides/media/dvb_demux.h new file mode 100644 index 0000000..3b6aeca --- /dev/null +++ b/6.17.0/include-overrides/media/dvb_demux.h @@ -0,0 +1,354 @@ +/* + * dvb_demux.h: DVB kernel demux API + * + * Copyright (C) 2000-2001 Marcus Metzler & Ralph Metzler + * for convergence integrated media GmbH + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either version 2.1 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _DVB_DEMUX_H_ +#define _DVB_DEMUX_H_ + +#include +#include +#include +#include + +#include + +/** + * enum dvb_dmx_filter_type - type of demux feed. + * + * @DMX_TYPE_TS: feed is in TS mode. + * @DMX_TYPE_SEC: feed is in Section mode. + */ +enum dvb_dmx_filter_type { + DMX_TYPE_TS, + DMX_TYPE_SEC, +}; + +/** + * enum dvb_dmx_state - state machine for a demux filter. + * + * @DMX_STATE_FREE: indicates that the filter is freed. + * @DMX_STATE_ALLOCATED: indicates that the filter was allocated + * to be used. + * @DMX_STATE_READY: indicates that the filter is ready + * to be used. + * @DMX_STATE_GO: indicates that the filter is running. + */ +enum dvb_dmx_state { + DMX_STATE_FREE, + DMX_STATE_ALLOCATED, + DMX_STATE_READY, + DMX_STATE_GO, +}; + +#define DVB_DEMUX_MASK_MAX 18 + +#define MAX_PID 0x1fff + +#define SPEED_PKTS_INTERVAL 50000 + +/** + * struct dvb_demux_filter - Describes a DVB demux section filter. + * + * @filter: Section filter as defined by &struct dmx_section_filter. + * @maskandmode: logical ``and`` bit mask. + * @maskandnotmode: logical ``and not`` bit mask. + * @doneq: flag that indicates when a filter is ready. + * @next: pointer to the next section filter. + * @feed: &struct dvb_demux_feed pointer. + * @index: index of the used demux filter. + * @state: state of the filter as described by &enum dvb_dmx_state. + * @type: type of the filter as described + * by &enum dvb_dmx_filter_type. + */ + +struct dvb_demux_filter { + struct dmx_section_filter filter; + u8 maskandmode[DMX_MAX_FILTER_SIZE]; + u8 maskandnotmode[DMX_MAX_FILTER_SIZE]; + bool doneq; + + struct dvb_demux_filter *next; + struct dvb_demux_feed *feed; + int index; + enum dvb_dmx_state state; + enum dvb_dmx_filter_type type; + + /* private: used only by av7110 */ + u16 hw_handle; +}; + +/** + * struct dvb_demux_feed - describes a DVB field + * + * @feed: a union describing a digital TV feed. + * Depending on the feed type, it can be either + * @feed.ts or @feed.sec. + * @feed.ts: a &struct dmx_ts_feed pointer. + * For TS feed only. + * @feed.sec: a &struct dmx_section_feed pointer. + * For section feed only. + * @cb: a union describing digital TV callbacks. + * Depending on the feed type, it can be either + * @cb.ts or @cb.sec. + * @cb.ts: a dmx_ts_cb() calback function pointer. + * For TS feed only. + * @cb.sec: a dmx_section_cb() callback function pointer. + * For section feed only. + * @demux: pointer to &struct dvb_demux. + * @priv: private data that can optionally be used by a DVB driver. + * @type: type of the filter, as defined by &enum dvb_dmx_filter_type. + * @state: state of the filter as defined by &enum dvb_dmx_state. + * @pid: PID to be filtered. + * @timeout: feed timeout. + * @filter: pointer to &struct dvb_demux_filter. + * @buffer_flags: Buffer flags used to report discontinuity users via DVB + * memory mapped API, as defined by &enum dmx_buffer_flags. + * @ts_type: type of TS, as defined by &enum ts_filter_type. + * @pes_type: type of PES, as defined by &enum dmx_ts_pes. + * @cc: MPEG-TS packet continuity counter + * @pusi_seen: if true, indicates that a discontinuity was detected. + * it is used to prevent feeding of garbage from previous section. + * @peslen: length of the PES (Packet Elementary Stream). + * @list_head: head for the list of digital TV demux feeds. + * @index: a unique index for each feed. Can be used as hardware + * pid filter index. + */ +struct dvb_demux_feed { + union { + struct dmx_ts_feed ts; + struct dmx_section_feed sec; + } feed; + + union { + dmx_ts_cb ts; + dmx_section_cb sec; + } cb; + + struct dvb_demux *demux; + void *priv; + enum dvb_dmx_filter_type type; + enum dvb_dmx_state state; + u16 pid; + + ktime_t timeout; + struct dvb_demux_filter *filter; + + u32 buffer_flags; + + enum ts_filter_type ts_type; + enum dmx_ts_pes pes_type; + + int cc; + bool pusi_seen; + + u16 peslen; + + struct list_head list_head; + unsigned int index; +}; + +/** + * struct dvb_demux - represents a digital TV demux + * @dmx: embedded &struct dmx_demux with demux capabilities + * and callbacks. + * @priv: private data that can optionally be used by + * a DVB driver. + * @filternum: maximum amount of DVB filters. + * @feednum: maximum amount of DVB feeds. + * @start_feed: callback routine to be called in order to start + * a DVB feed. + * @stop_feed: callback routine to be called in order to stop + * a DVB feed. + * @write_to_decoder: callback routine to be called if the feed is TS and + * it is routed to an A/V decoder, when a new TS packet + * is received. + * Used only on av7110-av.c. + * @check_crc32: callback routine to check CRC. If not initialized, + * dvb_demux will use an internal one. + * @memcopy: callback routine to memcopy received data. + * If not initialized, dvb_demux will default to memcpy(). + * @users: counter for the number of demux opened file descriptors. + * Currently, it is limited to 10 users. + * @filter: pointer to &struct dvb_demux_filter. + * @feed: pointer to &struct dvb_demux_feed. + * @frontend_list: &struct list_head with frontends used by the demux. + * @pesfilter: array of &struct dvb_demux_feed with the PES types + * that will be filtered. + * @pids: list of filtered program IDs. + * @feed_list: &struct list_head with feeds. + * @tsbuf: temporary buffer used internally to store TS packets. + * @tsbufp: temporary buffer index used internally. + * @mutex: pointer to &struct mutex used to protect feed set + * logic. + * @lock: pointer to &spinlock_t, used to protect buffer handling. + * @cnt_storage: buffer used for TS/TEI continuity check. + * @speed_last_time: &ktime_t used for TS speed check. + * @speed_pkts_cnt: packets count used for TS speed check. + */ +struct dvb_demux { + struct dmx_demux dmx; + void *priv; + int filternum; + int feednum; + int (*start_feed)(struct dvb_demux_feed *feed); + int (*stop_feed)(struct dvb_demux_feed *feed); + int (*write_to_decoder)(struct dvb_demux_feed *feed, + const u8 *buf, size_t len); + u32 (*check_crc32)(struct dvb_demux_feed *feed, + const u8 *buf, size_t len); + void (*memcopy)(struct dvb_demux_feed *feed, u8 *dst, + const u8 *src, size_t len); + + int users; +#define MAX_DVB_DEMUX_USERS 10 + struct dvb_demux_filter *filter; + struct dvb_demux_feed *feed; + + struct list_head frontend_list; + + struct dvb_demux_feed *pesfilter[DMX_PES_OTHER]; + u16 pids[DMX_PES_OTHER]; + +#define DMX_MAX_PID 0x2000 + struct list_head feed_list; + u8 tsbuf[204]; + int tsbufp; + + struct mutex mutex; + spinlock_t lock; + + uint8_t *cnt_storage; /* for TS continuity check */ + + ktime_t speed_last_time; /* for TS speed check */ + uint32_t speed_pkts_cnt; /* for TS speed check */ + + /* private: used only on av7110 */ + int playing; + int recording; +}; + +/** + * dvb_dmx_init - initialize a digital TV demux struct. + * + * @demux: &struct dvb_demux to be initialized. + * + * Before being able to register a digital TV demux struct, drivers + * should call this routine. On its typical usage, some fields should + * be initialized at the driver before calling it. + * + * A typical usecase is:: + * + * dvb->demux.dmx.capabilities = + * DMX_TS_FILTERING | DMX_SECTION_FILTERING | + * DMX_MEMORY_BASED_FILTERING; + * dvb->demux.priv = dvb; + * dvb->demux.filternum = 256; + * dvb->demux.feednum = 256; + * dvb->demux.start_feed = driver_start_feed; + * dvb->demux.stop_feed = driver_stop_feed; + * ret = dvb_dmx_init(&dvb->demux); + * if (ret < 0) + * return ret; + */ +int dvb_dmx_init(struct dvb_demux *demux); + +/** + * dvb_dmx_release - releases a digital TV demux internal buffers. + * + * @demux: &struct dvb_demux to be released. + * + * The DVB core internally allocates data at @demux. This routine + * releases those data. Please notice that the struct itelf is not + * released, as it can be embedded on other structs. + */ +void dvb_dmx_release(struct dvb_demux *demux); + +/** + * dvb_dmx_swfilter_packets - use dvb software filter for a buffer with + * multiple MPEG-TS packets with 188 bytes each. + * + * @demux: pointer to &struct dvb_demux + * @buf: buffer with data to be filtered + * @count: number of MPEG-TS packets with size of 188. + * + * The routine will discard a DVB packet that don't start with 0x47. + * + * Use this routine if the DVB demux fills MPEG-TS buffers that are + * already aligned. + * + * NOTE: The @buf size should have size equal to ``count * 188``. + */ +void dvb_dmx_swfilter_packets(struct dvb_demux *demux, const u8 *buf, + size_t count); + +/** + * dvb_dmx_swfilter - use dvb software filter for a buffer with + * multiple MPEG-TS packets with 188 bytes each. + * + * @demux: pointer to &struct dvb_demux + * @buf: buffer with data to be filtered + * @count: number of MPEG-TS packets with size of 188. + * + * If a DVB packet doesn't start with 0x47, it will seek for the first + * byte that starts with 0x47. + * + * Use this routine if the DVB demux fill buffers that may not start with + * a packet start mark (0x47). + * + * NOTE: The @buf size should have size equal to ``count * 188``. + */ +void dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf, size_t count); + +/** + * dvb_dmx_swfilter_204 - use dvb software filter for a buffer with + * multiple MPEG-TS packets with 204 bytes each. + * + * @demux: pointer to &struct dvb_demux + * @buf: buffer with data to be filtered + * @count: number of MPEG-TS packets with size of 204. + * + * If a DVB packet doesn't start with 0x47, it will seek for the first + * byte that starts with 0x47. + * + * Use this routine if the DVB demux fill buffers that may not start with + * a packet start mark (0x47). + * + * NOTE: The @buf size should have size equal to ``count * 204``. + */ +void dvb_dmx_swfilter_204(struct dvb_demux *demux, const u8 *buf, + size_t count); + +/** + * dvb_dmx_swfilter_raw - make the raw data available to userspace without + * filtering + * + * @demux: pointer to &struct dvb_demux + * @buf: buffer with data + * @count: number of packets to be passed. The actual size of each packet + * depends on the &dvb_demux->feed->cb.ts logic. + * + * Use it if the driver needs to deliver the raw payload to userspace without + * passing through the kernel demux. That is meant to support some + * delivery systems that aren't based on MPEG-TS. + * + * This function relies on &dvb_demux->feed->cb.ts to actually handle the + * buffer. + */ +void dvb_dmx_swfilter_raw(struct dvb_demux *demux, const u8 *buf, + size_t count); + +#endif /* _DVB_DEMUX_H_ */ diff --git a/6.17.0/include-overrides/media/dvb_frontend.h b/6.17.0/include-overrides/media/dvb_frontend.h new file mode 100644 index 0000000..e7c4487 --- /dev/null +++ b/6.17.0/include-overrides/media/dvb_frontend.h @@ -0,0 +1,834 @@ +/* + * dvb_frontend.h + * + * The Digital TV Frontend kABI defines a driver-internal interface for + * registering low-level, hardware specific driver to a hardware independent + * frontend layer. + * + * Copyright (C) 2001 convergence integrated media GmbH + * Copyright (C) 2004 convergence GmbH + * + * Written by Ralph Metzler + * Overhauled by Holger Waechtler + * Kernel I2C stuff by Michael Hunold + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either version 2.1 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + + * You should have received a copy of the GNU Lesser General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +#ifndef _DVB_FRONTEND_H_ +#define _DVB_FRONTEND_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +/* + * Maximum number of Delivery systems per frontend. It + * should be smaller or equal to 32 + */ +#define MAX_DELSYS 8 + +/* Helper definitions to be used at frontend drivers */ +#define kHz 1000UL +#define MHz 1000000UL + +/** + * struct dvb_frontend_tune_settings - parameters to adjust frontend tuning + * + * @min_delay_ms: minimum delay for tuning, in ms + * @step_size: step size between two consecutive frequencies + * @max_drift: maximum drift + * + * NOTE: step_size is in Hz, for terrestrial/cable or kHz for satellite + */ +struct dvb_frontend_tune_settings { + int min_delay_ms; + int step_size; + int max_drift; +}; + +struct dvb_frontend; + +/** + * struct dvb_tuner_info - Frontend name and min/max ranges/bandwidths + * + * @name: name of the Frontend + * @frequency_min_hz: minimal frequency supported in Hz + * @frequency_max_hz: maximum frequency supported in Hz + * @frequency_step_hz: frequency step in Hz + * @bandwidth_min: minimal frontend bandwidth supported + * @bandwidth_max: maximum frontend bandwidth supported + * @bandwidth_step: frontend bandwidth step + */ +struct dvb_tuner_info { + char name[128]; + + u32 frequency_min_hz; + u32 frequency_max_hz; + u32 frequency_step_hz; + + u32 bandwidth_min; + u32 bandwidth_max; + u32 bandwidth_step; +}; + +/** + * struct analog_parameters - Parameters to tune into an analog/radio channel + * + * @frequency: Frequency used by analog TV tuner (either in 62.5 kHz step, + * for TV, or 62.5 Hz for radio) + * @mode: Tuner mode, as defined on enum v4l2_tuner_type + * @audmode: Audio mode as defined for the rxsubchans field at videodev2.h, + * e. g. V4L2_TUNER_MODE_* + * @std: TV standard bitmap as defined at videodev2.h, e. g. V4L2_STD_* + * + * Hybrid tuners should be supported by both V4L2 and DVB APIs. This + * struct contains the data that are used by the V4L2 side. To avoid + * dependencies from V4L2 headers, all enums here are declared as integers. + */ +struct analog_parameters { + unsigned int frequency; + unsigned int mode; + unsigned int audmode; + u64 std; +}; + +/** + * enum dvbfe_algo - defines the algorithm used to tune into a channel + * + * @DVBFE_ALGO_HW: Hardware Algorithm - + * Devices that support this algorithm do everything in hardware + * and no software support is needed to handle them. + * Requesting these devices to LOCK is the only thing required, + * device is supposed to do everything in the hardware. + * + * @DVBFE_ALGO_SW: Software Algorithm - + * These are dumb devices, that require software to do everything + * + * @DVBFE_ALGO_CUSTOM: Customizable Agorithm - + * Devices having this algorithm can be customized to have specific + * algorithms in the frontend driver, rather than simply doing a + * software zig-zag. In this case the zigzag maybe hardware assisted + * or it maybe completely done in hardware. In all cases, usage of + * this algorithm, in conjunction with the search and track + * callbacks, utilizes the driver specific algorithm. + * + * @DVBFE_ALGO_RECOVERY: Recovery Algorithm - + * These devices have AUTO recovery capabilities from LOCK failure + */ +enum dvbfe_algo { + DVBFE_ALGO_HW = BIT(0), + DVBFE_ALGO_SW = BIT(1), + DVBFE_ALGO_CUSTOM = BIT(2), + DVBFE_ALGO_RECOVERY = BIT(31), +}; + +/** + * enum dvbfe_search - search callback possible return status + * + * @DVBFE_ALGO_SEARCH_SUCCESS: + * The frontend search algorithm completed and returned successfully + * + * @DVBFE_ALGO_SEARCH_ASLEEP: + * The frontend search algorithm is sleeping + * + * @DVBFE_ALGO_SEARCH_FAILED: + * The frontend search for a signal failed + * + * @DVBFE_ALGO_SEARCH_INVALID: + * The frontend search algorithm was probably supplied with invalid + * parameters and the search is an invalid one + * + * @DVBFE_ALGO_SEARCH_ERROR: + * The frontend search algorithm failed due to some error + * + * @DVBFE_ALGO_SEARCH_AGAIN: + * The frontend search algorithm was requested to search again + */ +enum dvbfe_search { + DVBFE_ALGO_SEARCH_SUCCESS = BIT(0), + DVBFE_ALGO_SEARCH_ASLEEP = BIT(1), + DVBFE_ALGO_SEARCH_FAILED = BIT(2), + DVBFE_ALGO_SEARCH_INVALID = BIT(3), + DVBFE_ALGO_SEARCH_AGAIN = BIT(4), + DVBFE_ALGO_SEARCH_ERROR = BIT(31), +}; + +/** + * struct dvb_tuner_ops - Tuner information and callbacks + * + * @info: embedded &struct dvb_tuner_info with tuner properties + * @release: callback function called when frontend is detached. + * drivers should free any allocated memory. + * @init: callback function used to initialize the tuner device. + * @sleep: callback function used to put the tuner to sleep. + * @suspend: callback function used to inform that the Kernel will + * suspend. + * @resume: callback function used to inform that the Kernel is + * resuming from suspend. + * @set_params: callback function used to inform the tuner to tune + * into a digital TV channel. The properties to be used + * are stored at &struct dvb_frontend.dtv_property_cache. + * The tuner demod can change the parameters to reflect + * the changes needed for the channel to be tuned, and + * update statistics. This is the recommended way to set + * the tuner parameters and should be used on newer + * drivers. + * @set_analog_params: callback function used to tune into an analog TV + * channel on hybrid tuners. It passes @analog_parameters + * to the driver. + * @set_config: callback function used to send some tuner-specific + * parameters. + * @get_frequency: get the actual tuned frequency + * @get_bandwidth: get the bandwidth used by the low pass filters + * @get_if_frequency: get the Intermediate Frequency, in Hz. For baseband, + * should return 0. + * @get_status: returns the frontend lock status + * @get_rf_strength: returns the RF signal strength. Used mostly to support + * analog TV and radio. Digital TV should report, instead, + * via DVBv5 API (&struct dvb_frontend.dtv_property_cache). + * @get_afc: Used only by analog TV core. Reports the frequency + * drift due to AFC. + * @calc_regs: callback function used to pass register data settings + * for simple tuners. Shouldn't be used on newer drivers. + * @set_frequency: Set a new frequency. Shouldn't be used on newer drivers. + * @set_bandwidth: Set a new frequency. Shouldn't be used on newer drivers. + * + * NOTE: frequencies used on @get_frequency and @set_frequency are in Hz for + * terrestrial/cable or kHz for satellite. + * + */ +struct dvb_tuner_ops { + + struct dvb_tuner_info info; + + void (*release)(struct dvb_frontend *fe); + int (*init)(struct dvb_frontend *fe); + int (*sleep)(struct dvb_frontend *fe); + int (*suspend)(struct dvb_frontend *fe); + int (*resume)(struct dvb_frontend *fe); + + /* This is the recommended way to set the tuner */ + int (*set_params)(struct dvb_frontend *fe); + int (*set_analog_params)(struct dvb_frontend *fe, struct analog_parameters *p); + + int (*set_config)(struct dvb_frontend *fe, void *priv_cfg); + + int (*get_frequency)(struct dvb_frontend *fe, u32 *frequency); + int (*get_bandwidth)(struct dvb_frontend *fe, u32 *bandwidth); + int (*get_if_frequency)(struct dvb_frontend *fe, u32 *frequency); + +#define TUNER_STATUS_LOCKED 1 +#define TUNER_STATUS_STEREO 2 + int (*get_status)(struct dvb_frontend *fe, u32 *status); + int (*get_rf_strength)(struct dvb_frontend *fe, u16 *strength); + int (*get_afc)(struct dvb_frontend *fe, s32 *afc); + + /* + * This is support for demods like the mt352 - fills out the supplied + * buffer with what to write. + * + * Don't use on newer drivers. + */ + int (*calc_regs)(struct dvb_frontend *fe, u8 *buf, int buf_len); + + /* + * These are provided separately from set_params in order to + * facilitate silicon tuners which require sophisticated tuning loops, + * controlling each parameter separately. + * + * Don't use on newer drivers. + */ + int (*set_frequency)(struct dvb_frontend *fe, u32 frequency); + int (*set_bandwidth)(struct dvb_frontend *fe, u32 bandwidth); +}; + +/** + * struct analog_demod_info - Information struct for analog TV part of the demod + * + * @name: Name of the analog TV demodulator + */ +struct analog_demod_info { + char *name; +}; + +/** + * struct analog_demod_ops - Demodulation information and callbacks for + * analog TV and radio + * + * @info: pointer to struct analog_demod_info + * @set_params: callback function used to inform the demod to set the + * demodulator parameters needed to decode an analog or + * radio channel. The properties are passed via + * &struct analog_params. + * @has_signal: returns 0xffff if has signal, or 0 if it doesn't. + * @get_afc: Used only by analog TV core. Reports the frequency + * drift due to AFC. + * @tuner_status: callback function that returns tuner status bits, e. g. + * %TUNER_STATUS_LOCKED and %TUNER_STATUS_STEREO. + * @standby: set the tuner to standby mode. + * @release: callback function called when frontend is detached. + * drivers should free any allocated memory. + * @i2c_gate_ctrl: controls the I2C gate. Newer drivers should use I2C + * mux support instead. + * @set_config: callback function used to send some tuner-specific + * parameters. + */ +struct analog_demod_ops { + + struct analog_demod_info info; + + void (*set_params)(struct dvb_frontend *fe, + struct analog_parameters *params); + int (*has_signal)(struct dvb_frontend *fe, u16 *signal); + int (*get_afc)(struct dvb_frontend *fe, s32 *afc); + void (*tuner_status)(struct dvb_frontend *fe); + void (*standby)(struct dvb_frontend *fe); + void (*release)(struct dvb_frontend *fe); + int (*i2c_gate_ctrl)(struct dvb_frontend *fe, int enable); + + /** This is to allow setting tuner-specific configuration */ + int (*set_config)(struct dvb_frontend *fe, void *priv_cfg); +}; + +struct dtv_frontend_properties; + +/** + * struct dvb_frontend_internal_info - Frontend properties and capabilities + * + * @name: Name of the frontend + * @frequency_min_hz: Minimal frequency supported by the frontend. + * @frequency_max_hz: Minimal frequency supported by the frontend. + * @frequency_stepsize_hz: All frequencies are multiple of this value. + * @frequency_tolerance_hz: Frequency tolerance. + * @symbol_rate_min: Minimal symbol rate, in bauds + * (for Cable/Satellite systems). + * @symbol_rate_max: Maximal symbol rate, in bauds + * (for Cable/Satellite systems). + * @symbol_rate_tolerance: Maximal symbol rate tolerance, in ppm + * (for Cable/Satellite systems). + * @caps: Capabilities supported by the frontend, + * as specified in &enum fe_caps. + */ +struct dvb_frontend_internal_info { + char name[128]; + u32 frequency_min_hz; + u32 frequency_max_hz; + u32 frequency_stepsize_hz; + u32 frequency_tolerance_hz; + u32 symbol_rate_min; + u32 symbol_rate_max; + u32 symbol_rate_tolerance; + enum fe_caps caps; +}; + +/** + * struct dvb_frontend_ops - Demodulation information and callbacks for + * ditialt TV + * + * @info: embedded &struct dvb_tuner_info with tuner properties + * @delsys: Delivery systems supported by the frontend + * @detach: callback function called when frontend is detached. + * drivers should clean up, but not yet free the &struct + * dvb_frontend allocation. + * @release: callback function called when frontend is ready to be + * freed. + * drivers should free any allocated memory. + * @release_sec: callback function requesting that the Satellite Equipment + * Control (SEC) driver to release and free any memory + * allocated by the driver. + * @init: callback function used to initialize the tuner device. + * @sleep: callback function used to put the tuner to sleep. + * @suspend: callback function used to inform that the Kernel will + * suspend. + * @resume: callback function used to inform that the Kernel is + * resuming from suspend. + * @write: callback function used by some demod legacy drivers to + * allow other drivers to write data into their registers. + * Should not be used on new drivers. + * @tune: callback function used by demod drivers that use + * @DVBFE_ALGO_HW to tune into a frequency. + * @get_frontend_algo: returns the desired hardware algorithm. + * @set_frontend: callback function used to inform the demod to set the + * parameters for demodulating a digital TV channel. + * The properties to be used are stored at &struct + * dvb_frontend.dtv_property_cache. The demod can change + * the parameters to reflect the changes needed for the + * channel to be decoded, and update statistics. + * @get_tune_settings: callback function + * @get_frontend: callback function used to inform the parameters + * actuall in use. The properties to be used are stored at + * &struct dvb_frontend.dtv_property_cache and update + * statistics. Please notice that it should not return + * an error code if the statistics are not available + * because the demog is not locked. + * @read_status: returns the locking status of the frontend. + * @read_ber: legacy callback function to return the bit error rate. + * Newer drivers should provide such info via DVBv5 API, + * e. g. @set_frontend;/@get_frontend, implementing this + * callback only if DVBv3 API compatibility is wanted. + * @read_signal_strength: legacy callback function to return the signal + * strength. Newer drivers should provide such info via + * DVBv5 API, e. g. @set_frontend/@get_frontend, + * implementing this callback only if DVBv3 API + * compatibility is wanted. + * @read_snr: legacy callback function to return the Signal/Noise + * rate. Newer drivers should provide such info via + * DVBv5 API, e. g. @set_frontend/@get_frontend, + * implementing this callback only if DVBv3 API + * compatibility is wanted. + * @read_ucblocks: legacy callback function to return the Uncorrected Error + * Blocks. Newer drivers should provide such info via + * DVBv5 API, e. g. @set_frontend/@get_frontend, + * implementing this callback only if DVBv3 API + * compatibility is wanted. + * @diseqc_reset_overload: callback function to implement the + * FE_DISEQC_RESET_OVERLOAD() ioctl (only Satellite) + * @diseqc_send_master_cmd: callback function to implement the + * FE_DISEQC_SEND_MASTER_CMD() ioctl (only Satellite). + * @diseqc_recv_slave_reply: callback function to implement the + * FE_DISEQC_RECV_SLAVE_REPLY() ioctl (only Satellite) + * @diseqc_send_burst: callback function to implement the + * FE_DISEQC_SEND_BURST() ioctl (only Satellite). + * @set_tone: callback function to implement the + * FE_SET_TONE() ioctl (only Satellite). + * @set_voltage: callback function to implement the + * FE_SET_VOLTAGE() ioctl (only Satellite). + * @enable_high_lnb_voltage: callback function to implement the + * FE_ENABLE_HIGH_LNB_VOLTAGE() ioctl (only Satellite). + * @dishnetwork_send_legacy_command: callback function to implement the + * FE_DISHNETWORK_SEND_LEGACY_CMD() ioctl (only Satellite). + * Drivers should not use this, except when the DVB + * core emulation fails to provide proper support (e.g. + * if @set_voltage takes more than 8ms to work), and + * when backward compatibility with this legacy API is + * required. + * @i2c_gate_ctrl: controls the I2C gate. Newer drivers should use I2C + * mux support instead. + * @ts_bus_ctrl: callback function used to take control of the TS bus. + * @set_lna: callback function to power on/off/auto the LNA. + * @search: callback function used on some custom algo search algos. + * @tuner_ops: pointer to &struct dvb_tuner_ops + * @analog_ops: pointer to &struct analog_demod_ops + */ +struct dvb_frontend_ops { + struct dvb_frontend_internal_info info; + + u8 delsys[MAX_DELSYS]; + + void (*detach)(struct dvb_frontend *fe); + void (*release)(struct dvb_frontend* fe); + void (*release_sec)(struct dvb_frontend* fe); + + int (*init)(struct dvb_frontend* fe); + int (*sleep)(struct dvb_frontend* fe); + int (*suspend)(struct dvb_frontend *fe); + int (*resume)(struct dvb_frontend *fe); + + int (*write)(struct dvb_frontend* fe, const u8 buf[], int len); + + /* if this is set, it overrides the default swzigzag */ + int (*tune)(struct dvb_frontend* fe, + bool re_tune, + unsigned int mode_flags, + unsigned int *delay, + enum fe_status *status); + + /* get frontend tuning algorithm from the module */ + enum dvbfe_algo (*get_frontend_algo)(struct dvb_frontend *fe); + + /* these two are only used for the swzigzag code */ + int (*set_frontend)(struct dvb_frontend *fe); + int (*get_tune_settings)(struct dvb_frontend* fe, struct dvb_frontend_tune_settings* settings); + + int (*get_frontend)(struct dvb_frontend *fe, + struct dtv_frontend_properties *props); + + int (*read_status)(struct dvb_frontend *fe, enum fe_status *status); + int (*read_ber)(struct dvb_frontend* fe, u32* ber); + int (*read_signal_strength)(struct dvb_frontend* fe, u16* strength); + int (*read_snr)(struct dvb_frontend* fe, u16* snr); + int (*read_ucblocks)(struct dvb_frontend* fe, u32* ucblocks); + + int (*diseqc_reset_overload)(struct dvb_frontend* fe); + int (*diseqc_send_master_cmd)(struct dvb_frontend* fe, struct dvb_diseqc_master_cmd* cmd); + int (*diseqc_recv_slave_reply)(struct dvb_frontend* fe, struct dvb_diseqc_slave_reply* reply); + int (*diseqc_send_burst)(struct dvb_frontend *fe, + enum fe_sec_mini_cmd minicmd); + int (*set_tone)(struct dvb_frontend *fe, enum fe_sec_tone_mode tone); + int (*set_voltage)(struct dvb_frontend *fe, + enum fe_sec_voltage voltage); + int (*enable_high_lnb_voltage)(struct dvb_frontend* fe, long arg); + int (*dishnetwork_send_legacy_command)(struct dvb_frontend* fe, unsigned long cmd); + int (*i2c_gate_ctrl)(struct dvb_frontend* fe, int enable); + int (*ts_bus_ctrl)(struct dvb_frontend* fe, int acquire); + int (*set_lna)(struct dvb_frontend *); + + /* + * These callbacks are for devices that implement their own + * tuning algorithms, rather than a simple swzigzag + */ + enum dvbfe_search (*search)(struct dvb_frontend *fe); + + struct dvb_tuner_ops tuner_ops; + struct analog_demod_ops analog_ops; +}; + +#ifdef __DVB_CORE__ +#define MAX_EVENT 8 + +/* Used only internally at dvb_frontend.c */ +struct dvb_fe_events { + struct dvb_frontend_event events[MAX_EVENT]; + int eventw; + int eventr; + int overflow; + wait_queue_head_t wait_queue; + struct mutex mtx; +}; +#endif + +/** + * struct dtv_frontend_properties - contains a list of properties that are + * specific to a digital TV standard. + * + * @frequency: frequency in Hz for terrestrial/cable or in kHz for + * Satellite + * @modulation: Frontend modulation type + * @voltage: SEC voltage (only Satellite) + * @sectone: SEC tone mode (only Satellite) + * @inversion: Spectral inversion + * @fec_inner: Forward error correction inner Code Rate + * @transmission_mode: Transmission Mode + * @bandwidth_hz: Bandwidth, in Hz. A zero value means that userspace + * wants to autodetect. + * @guard_interval: Guard Interval + * @hierarchy: Hierarchy + * @symbol_rate: Symbol Rate + * @code_rate_HP: high priority stream code rate + * @code_rate_LP: low priority stream code rate + * @pilot: Enable/disable/autodetect pilot tones + * @rolloff: Rolloff factor (alpha) + * @delivery_system: FE delivery system (e. g. digital TV standard) + * @interleaving: interleaving + * @isdbt_partial_reception: ISDB-T partial reception (only ISDB standard) + * @isdbt_sb_mode: ISDB-T Sound Broadcast (SB) mode (only ISDB standard) + * @isdbt_sb_subchannel: ISDB-T SB subchannel (only ISDB standard) + * @isdbt_sb_segment_idx: ISDB-T SB segment index (only ISDB standard) + * @isdbt_sb_segment_count: ISDB-T SB segment count (only ISDB standard) + * @isdbt_layer_enabled: ISDB Layer enabled (only ISDB standard) + * @layer: ISDB per-layer data (only ISDB standard) + * @layer.segment_count: Segment Count; + * @layer.fec: per layer code rate; + * @layer.modulation: per layer modulation; + * @layer.interleaving: per layer interleaving. + * @stream_id: If different than zero, enable substream filtering, if + * hardware supports (DVB-S2 and DVB-T2). + * @scrambling_sequence_index: Carries the index of the DVB-S2 physical layer + * scrambling sequence. + * @atscmh_fic_ver: Version number of the FIC (Fast Information Channel) + * signaling data (only ATSC-M/H) + * @atscmh_parade_id: Parade identification number (only ATSC-M/H) + * @atscmh_nog: Number of MH groups per MH subframe for a designated + * parade (only ATSC-M/H) + * @atscmh_tnog: Total number of MH groups including all MH groups + * belonging to all MH parades in one MH subframe + * (only ATSC-M/H) + * @atscmh_sgn: Start group number (only ATSC-M/H) + * @atscmh_prc: Parade repetition cycle (only ATSC-M/H) + * @atscmh_rs_frame_mode: Reed Solomon (RS) frame mode (only ATSC-M/H) + * @atscmh_rs_frame_ensemble: RS frame ensemble (only ATSC-M/H) + * @atscmh_rs_code_mode_pri: RS code mode pri (only ATSC-M/H) + * @atscmh_rs_code_mode_sec: RS code mode sec (only ATSC-M/H) + * @atscmh_sccc_block_mode: Series Concatenated Convolutional Code (SCCC) + * Block Mode (only ATSC-M/H) + * @atscmh_sccc_code_mode_a: SCCC code mode A (only ATSC-M/H) + * @atscmh_sccc_code_mode_b: SCCC code mode B (only ATSC-M/H) + * @atscmh_sccc_code_mode_c: SCCC code mode C (only ATSC-M/H) + * @atscmh_sccc_code_mode_d: SCCC code mode D (only ATSC-M/H) + * @lna: Power ON/OFF/AUTO the Linear Now-noise Amplifier (LNA) + * @strength: DVBv5 API statistics: Signal Strength + * @cnr: DVBv5 API statistics: Signal to Noise ratio of the + * (main) carrier + * @pre_bit_error: DVBv5 API statistics: pre-Viterbi bit error count + * @pre_bit_count: DVBv5 API statistics: pre-Viterbi bit count + * @post_bit_error: DVBv5 API statistics: post-Viterbi bit error count + * @post_bit_count: DVBv5 API statistics: post-Viterbi bit count + * @block_error: DVBv5 API statistics: block error count + * @block_count: DVBv5 API statistics: block count + * + * NOTE: derivated statistics like Uncorrected Error blocks (UCE) are + * calculated on userspace. + * + * Only a subset of the properties are needed for a given delivery system. + * For more info, consult the media_api.html with the documentation of the + * Userspace API. + */ +struct dtv_frontend_properties { + u32 frequency; + enum fe_modulation modulation; + + enum fe_sec_voltage voltage; + enum fe_sec_tone_mode sectone; + enum fe_spectral_inversion inversion; + enum fe_code_rate fec_inner; + enum fe_transmit_mode transmission_mode; + u32 bandwidth_hz; /* 0 = AUTO */ + enum fe_guard_interval guard_interval; + enum fe_hierarchy hierarchy; + u32 symbol_rate; + enum fe_code_rate code_rate_HP; + enum fe_code_rate code_rate_LP; + + enum fe_pilot pilot; + enum fe_rolloff rolloff; + + enum fe_delivery_system delivery_system; + + enum fe_interleaving interleaving; + + /* ISDB-T specifics */ + u8 isdbt_partial_reception; + u8 isdbt_sb_mode; + u8 isdbt_sb_subchannel; + u32 isdbt_sb_segment_idx; + u32 isdbt_sb_segment_count; + u8 isdbt_layer_enabled; + struct { + u8 segment_count; + enum fe_code_rate fec; + enum fe_modulation modulation; + u8 interleaving; + } layer[3]; + + /* Multistream specifics */ + u32 stream_id; + + /* Physical Layer Scrambling specifics */ + u32 scrambling_sequence_index; + + /* ATSC-MH specifics */ + u8 atscmh_fic_ver; + u8 atscmh_parade_id; + u8 atscmh_nog; + u8 atscmh_tnog; + u8 atscmh_sgn; + u8 atscmh_prc; + + u8 atscmh_rs_frame_mode; + u8 atscmh_rs_frame_ensemble; + u8 atscmh_rs_code_mode_pri; + u8 atscmh_rs_code_mode_sec; + u8 atscmh_sccc_block_mode; + u8 atscmh_sccc_code_mode_a; + u8 atscmh_sccc_code_mode_b; + u8 atscmh_sccc_code_mode_c; + u8 atscmh_sccc_code_mode_d; + + u32 lna; + + /* statistics data */ + struct dtv_fe_stats strength; + struct dtv_fe_stats cnr; + struct dtv_fe_stats pre_bit_error; + struct dtv_fe_stats pre_bit_count; + struct dtv_fe_stats post_bit_error; + struct dtv_fe_stats post_bit_count; + struct dtv_fe_stats block_error; + struct dtv_fe_stats block_count; +}; + +#define DVB_FE_NO_EXIT 0 +#define DVB_FE_NORMAL_EXIT 1 +#define DVB_FE_DEVICE_REMOVED 2 +#define DVB_FE_DEVICE_RESUME 3 + +/** + * struct dvb_frontend - Frontend structure to be used on drivers. + * + * @refcount: refcount to keep track of &struct dvb_frontend + * references + * @ops: embedded &struct dvb_frontend_ops + * @dvb: pointer to &struct dvb_adapter + * @demodulator_priv: demod private data + * @tuner_priv: tuner private data + * @frontend_priv: frontend private data + * @sec_priv: SEC private data + * @analog_demod_priv: Analog demod private data + * @dtv_property_cache: embedded &struct dtv_frontend_properties + * @callback: callback function used on some drivers to call + * either the tuner or the demodulator. + * @id: Frontend ID + * @exit: Used to inform the DVB core that the frontend + * thread should exit (usually, means that the hardware + * got disconnected. + */ + +struct dvb_frontend { + struct kref refcount; + struct dvb_frontend_ops ops; + struct dvb_adapter *dvb; + void *demodulator_priv; + void *tuner_priv; + void *frontend_priv; + void *sec_priv; + void *analog_demod_priv; + struct dtv_frontend_properties dtv_property_cache; +#define DVB_FRONTEND_COMPONENT_TUNER 0 +#define DVB_FRONTEND_COMPONENT_DEMOD 1 + int (*callback)(void *adapter_priv, int component, int cmd, int arg); + int id; + unsigned int exit; +}; + +/** + * dvb_register_frontend() - Registers a DVB frontend at the adapter + * + * @dvb: pointer to &struct dvb_adapter + * @fe: pointer to &struct dvb_frontend + * + * Allocate and initialize the private data needed by the frontend core to + * manage the frontend and calls dvb_register_device() to register a new + * frontend. It also cleans the property cache that stores the frontend + * parameters and selects the first available delivery system. + */ +int dvb_register_frontend(struct dvb_adapter *dvb, + struct dvb_frontend *fe); + +/** + * dvb_unregister_frontend() - Unregisters a DVB frontend + * + * @fe: pointer to &struct dvb_frontend + * + * Stops the frontend kthread, calls dvb_unregister_device() and frees the + * private frontend data allocated by dvb_register_frontend(). + * + * NOTE: This function doesn't frees the memory allocated by the demod, + * by the SEC driver and by the tuner. In order to free it, an explicit call to + * dvb_frontend_detach() is needed, after calling this function. + */ +int dvb_unregister_frontend(struct dvb_frontend *fe); + +/** + * dvb_frontend_detach() - Detaches and frees frontend specific data + * + * @fe: pointer to &struct dvb_frontend + * + * This function should be called after dvb_unregister_frontend(). It + * calls the SEC, tuner and demod release functions: + * &dvb_frontend_ops.release_sec, &dvb_frontend_ops.tuner_ops.release, + * &dvb_frontend_ops.analog_ops.release and &dvb_frontend_ops.release. + * + * If the driver is compiled with %CONFIG_MEDIA_ATTACH, it also decreases + * the module reference count, needed to allow userspace to remove the + * previously used DVB frontend modules. + */ +void dvb_frontend_detach(struct dvb_frontend *fe); + +/** + * dvb_frontend_suspend() - Suspends a Digital TV frontend + * + * @fe: pointer to &struct dvb_frontend + * + * This function prepares a Digital TV frontend to suspend. + * + * In order to prepare the tuner to suspend, if + * &dvb_frontend_ops.tuner_ops.suspend\(\) is available, it calls it. Otherwise, + * it will call &dvb_frontend_ops.tuner_ops.sleep\(\), if available. + * + * It will also call &dvb_frontend_ops.suspend\(\) to put the demod to suspend, + * if available. Otherwise it will call &dvb_frontend_ops.sleep\(\). + * + * The drivers should also call dvb_frontend_suspend\(\) as part of their + * handler for the &device_driver.suspend\(\). + */ +int dvb_frontend_suspend(struct dvb_frontend *fe); + +/** + * dvb_frontend_resume() - Resumes a Digital TV frontend + * + * @fe: pointer to &struct dvb_frontend + * + * This function resumes the usual operation of the tuner after resume. + * + * In order to resume the frontend, it calls the demod + * &dvb_frontend_ops.resume\(\) if available. Otherwise it calls demod + * &dvb_frontend_ops.init\(\). + * + * If &dvb_frontend_ops.tuner_ops.resume\(\) is available, It, it calls it. + * Otherwise,t will call &dvb_frontend_ops.tuner_ops.init\(\), if available. + * + * Once tuner and demods are resumed, it will enforce that the SEC voltage and + * tone are restored to their previous values and wake up the frontend's + * kthread in order to retune the frontend. + * + * The drivers should also call dvb_frontend_resume() as part of their + * handler for the &device_driver.resume\(\). + */ +int dvb_frontend_resume(struct dvb_frontend *fe); + +/** + * dvb_frontend_reinitialise() - forces a reinitialisation at the frontend + * + * @fe: pointer to &struct dvb_frontend + * + * Calls &dvb_frontend_ops.init\(\) and &dvb_frontend_ops.tuner_ops.init\(\), + * and resets SEC tone and voltage (for Satellite systems). + * + * NOTE: Currently, this function is used only by one driver (budget-av). + * It seems to be due to address some special issue with that specific + * frontend. + */ +void dvb_frontend_reinitialise(struct dvb_frontend *fe); + +/** + * dvb_frontend_sleep_until() - Sleep for the amount of time given by + * add_usec parameter + * + * @waketime: pointer to &struct ktime_t + * @add_usec: time to sleep, in microseconds + * + * This function is used to measure the time required for the + * FE_DISHNETWORK_SEND_LEGACY_CMD() ioctl to work. It needs to be as precise + * as possible, as it affects the detection of the dish tone command at the + * satellite subsystem. + * + * Its used internally by the DVB frontend core, in order to emulate + * FE_DISHNETWORK_SEND_LEGACY_CMD() using the &dvb_frontend_ops.set_voltage\(\) + * callback. + * + * NOTE: it should not be used at the drivers, as the emulation for the + * legacy callback is provided by the Kernel. The only situation where this + * should be at the drivers is when there are some bugs at the hardware that + * would prevent the core emulation to work. On such cases, the driver would + * be writing a &dvb_frontend_ops.dishnetwork_send_legacy_command\(\) and + * calling this function directly. + */ +void dvb_frontend_sleep_until(ktime_t *waketime, u32 add_usec); + +#endif diff --git a/6.17.0/include-overrides/media/dvb_net.h b/6.17.0/include-overrides/media/dvb_net.h new file mode 100644 index 0000000..4a921ea --- /dev/null +++ b/6.17.0/include-overrides/media/dvb_net.h @@ -0,0 +1,95 @@ +/* + * dvb_net.h + * + * Copyright (C) 2001 Ralph Metzler for convergence integrated media GmbH + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either version 2.1 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _DVB_NET_H_ +#define _DVB_NET_H_ + +#include + +#include + +struct net_device; + +#define DVB_NET_DEVICES_MAX 10 + +#ifdef CONFIG_DVB_NET + +/** + * struct dvb_net - describes a DVB network interface + * + * @dvbdev: pointer to &struct dvb_device. + * @device: array of pointers to &struct net_device. + * @state: array of integers to each net device. A value + * different than zero means that the interface is + * in usage. + * @exit: flag to indicate when the device is being removed. + * @demux: pointer to &struct dmx_demux. + * @ioctl_mutex: protect access to this struct. + * @remove_mutex: mutex that avoids a race condition between a callback + * called when the hardware is disconnected and the + * file_operations of dvb_net. + * + * Currently, the core supports up to %DVB_NET_DEVICES_MAX (10) network + * devices. + */ + +struct dvb_net { + struct dvb_device *dvbdev; + struct net_device *device[DVB_NET_DEVICES_MAX]; + int state[DVB_NET_DEVICES_MAX]; + unsigned int exit:1; + struct dmx_demux *demux; + struct mutex ioctl_mutex; + struct mutex remove_mutex; +}; + +/** + * dvb_net_init - nitializes a digital TV network device and registers it. + * + * @adap: pointer to &struct dvb_adapter. + * @dvbnet: pointer to &struct dvb_net. + * @dmxdemux: pointer to &struct dmx_demux. + */ +int dvb_net_init(struct dvb_adapter *adap, struct dvb_net *dvbnet, + struct dmx_demux *dmxdemux); + +/** + * dvb_net_release - releases a digital TV network device and unregisters it. + * + * @dvbnet: pointer to &struct dvb_net. + */ +void dvb_net_release(struct dvb_net *dvbnet); + +#else + +struct dvb_net { + struct dvb_device *dvbdev; +}; + +static inline void dvb_net_release(struct dvb_net *dvbnet) +{ +} + +static inline int dvb_net_init(struct dvb_adapter *adap, + struct dvb_net *dvbnet, struct dmx_demux *dmx) +{ + return 0; +} + +#endif /* ifdef CONFIG_DVB_NET */ + +#endif diff --git a/6.17.0/include-overrides/media/dvb_ringbuffer.h b/6.17.0/include-overrides/media/dvb_ringbuffer.h new file mode 100644 index 0000000..029c8b6 --- /dev/null +++ b/6.17.0/include-overrides/media/dvb_ringbuffer.h @@ -0,0 +1,280 @@ +/* + * + * dvb_ringbuffer.h: ring buffer implementation for the dvb driver + * + * Copyright (C) 2003 Oliver Endriss + * Copyright (C) 2004 Andrew de Quincey + * + * based on code originally found in av7110.c & dvb_ci.c: + * Copyright (C) 1999-2003 Ralph Metzler & Marcus Metzler + * for convergence integrated media GmbH + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either version 2.1 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + */ + +#ifndef _DVB_RINGBUFFER_H_ +#define _DVB_RINGBUFFER_H_ + +#include +#include + +/** + * struct dvb_ringbuffer - Describes a ring buffer used at DVB framework + * + * @data: Area were the ringbuffer data is written + * @size: size of the ringbuffer + * @pread: next position to read + * @pwrite: next position to write + * @error: used by ringbuffer clients to indicate that an error happened. + * @queue: Wait queue used by ringbuffer clients to indicate when buffer + * was filled + * @lock: Spinlock used to protect the ringbuffer + */ +struct dvb_ringbuffer { + u8 *data; + ssize_t size; + ssize_t pread; + ssize_t pwrite; + int error; + + wait_queue_head_t queue; + spinlock_t lock; +}; + +#define DVB_RINGBUFFER_PKTHDRSIZE 3 + +/** + * dvb_ringbuffer_init - initialize ring buffer, lock and queue + * + * @rbuf: pointer to struct dvb_ringbuffer + * @data: pointer to the buffer where the data will be stored + * @len: bytes from ring buffer into @buf + */ +extern void dvb_ringbuffer_init(struct dvb_ringbuffer *rbuf, void *data, + size_t len); + +/** + * dvb_ringbuffer_empty - test whether buffer is empty + * + * @rbuf: pointer to struct dvb_ringbuffer + */ +extern int dvb_ringbuffer_empty(struct dvb_ringbuffer *rbuf); + +/** + * dvb_ringbuffer_free - returns the number of free bytes in the buffer + * + * @rbuf: pointer to struct dvb_ringbuffer + * + * Return: number of free bytes in the buffer + */ +extern ssize_t dvb_ringbuffer_free(struct dvb_ringbuffer *rbuf); + +/** + * dvb_ringbuffer_avail - returns the number of bytes waiting in the buffer + * + * @rbuf: pointer to struct dvb_ringbuffer + * + * Return: number of bytes waiting in the buffer + */ +extern ssize_t dvb_ringbuffer_avail(struct dvb_ringbuffer *rbuf); + +/** + * dvb_ringbuffer_reset - resets the ringbuffer to initial state + * + * @rbuf: pointer to struct dvb_ringbuffer + * + * Resets the read and write pointers to zero and flush the buffer. + * + * This counts as a read and write operation + */ +extern void dvb_ringbuffer_reset(struct dvb_ringbuffer *rbuf); + +/* + * read routines & macros + */ + +/** + * dvb_ringbuffer_flush - flush buffer + * + * @rbuf: pointer to struct dvb_ringbuffer + */ +extern void dvb_ringbuffer_flush(struct dvb_ringbuffer *rbuf); + +/** + * dvb_ringbuffer_flush_spinlock_wakeup- flush buffer protected by spinlock + * and wake-up waiting task(s) + * + * @rbuf: pointer to struct dvb_ringbuffer + */ +extern void dvb_ringbuffer_flush_spinlock_wakeup(struct dvb_ringbuffer *rbuf); + +/** + * DVB_RINGBUFFER_PEEK - peek at byte @offs in the buffer + * + * @rbuf: pointer to struct dvb_ringbuffer + * @offs: offset inside the ringbuffer + */ +#define DVB_RINGBUFFER_PEEK(rbuf, offs) \ + ((rbuf)->data[((rbuf)->pread + (offs)) % (rbuf)->size]) + +/** + * DVB_RINGBUFFER_SKIP - advance read ptr by @num bytes + * + * @rbuf: pointer to struct dvb_ringbuffer + * @num: number of bytes to advance + */ +#define DVB_RINGBUFFER_SKIP(rbuf, num) {\ + (rbuf)->pread = ((rbuf)->pread + (num)) % (rbuf)->size;\ +} + +/** + * dvb_ringbuffer_read_user - Reads a buffer into a user pointer + * + * @rbuf: pointer to struct dvb_ringbuffer + * @buf: pointer to the buffer where the data will be stored + * @len: bytes from ring buffer into @buf + * + * This variant assumes that the buffer is a memory at the userspace. So, + * it will internally call copy_to_user(). + * + * Return: number of bytes transferred or -EFAULT + */ +extern ssize_t dvb_ringbuffer_read_user(struct dvb_ringbuffer *rbuf, + u8 __user *buf, size_t len); + +/** + * dvb_ringbuffer_read - Reads a buffer into a pointer + * + * @rbuf: pointer to struct dvb_ringbuffer + * @buf: pointer to the buffer where the data will be stored + * @len: bytes from ring buffer into @buf + * + * This variant assumes that the buffer is a memory at the Kernel space + * + * Return: number of bytes transferred or -EFAULT + */ +extern void dvb_ringbuffer_read(struct dvb_ringbuffer *rbuf, + u8 *buf, size_t len); + +/* + * write routines & macros + */ + +/** + * DVB_RINGBUFFER_WRITE_BYTE - write single byte to ring buffer + * + * @rbuf: pointer to struct dvb_ringbuffer + * @byte: byte to write + */ +#define DVB_RINGBUFFER_WRITE_BYTE(rbuf, byte) \ + { (rbuf)->data[(rbuf)->pwrite] = (byte); \ + (rbuf)->pwrite = ((rbuf)->pwrite + 1) % (rbuf)->size; } + +/** + * dvb_ringbuffer_write - Writes a buffer into the ringbuffer + * + * @rbuf: pointer to struct dvb_ringbuffer + * @buf: pointer to the buffer where the data will be read + * @len: bytes from ring buffer into @buf + * + * This variant assumes that the buffer is a memory at the Kernel space + * + * return: number of bytes transferred or -EFAULT + */ +extern ssize_t dvb_ringbuffer_write(struct dvb_ringbuffer *rbuf, const u8 *buf, + size_t len); + +/** + * dvb_ringbuffer_write_user - Writes a buffer received via a user pointer + * + * @rbuf: pointer to struct dvb_ringbuffer + * @buf: pointer to the buffer where the data will be read + * @len: bytes from ring buffer into @buf + * + * This variant assumes that the buffer is a memory at the userspace. So, + * it will internally call copy_from_user(). + * + * Return: number of bytes transferred or -EFAULT + */ +extern ssize_t dvb_ringbuffer_write_user(struct dvb_ringbuffer *rbuf, + const u8 __user *buf, size_t len); + +/** + * dvb_ringbuffer_pkt_write - Write a packet into the ringbuffer. + * + * @rbuf: Ringbuffer to write to. + * @buf: Buffer to write. + * @len: Length of buffer (currently limited to 65535 bytes max). + * + * Return: Number of bytes written, or -EFAULT, -ENOMEM, -EINVAL. + */ +extern ssize_t dvb_ringbuffer_pkt_write(struct dvb_ringbuffer *rbuf, u8 *buf, + size_t len); + +/** + * dvb_ringbuffer_pkt_read_user - Read from a packet in the ringbuffer. + * + * @rbuf: Ringbuffer concerned. + * @idx: Packet index as returned by dvb_ringbuffer_pkt_next(). + * @offset: Offset into packet to read from. + * @buf: Destination buffer for data. + * @len: Size of destination buffer. + * + * Return: Number of bytes read, or -EFAULT. + * + * .. note:: + * + * unlike dvb_ringbuffer_read(), this does **NOT** update the read pointer + * in the ringbuffer. You must use dvb_ringbuffer_pkt_dispose() to mark a + * packet as no longer required. + */ +extern ssize_t dvb_ringbuffer_pkt_read_user(struct dvb_ringbuffer *rbuf, + size_t idx, + int offset, u8 __user *buf, + size_t len); + +/** + * dvb_ringbuffer_pkt_read - Read from a packet in the ringbuffer. + * Note: unlike dvb_ringbuffer_read_user(), this DOES update the read pointer + * in the ringbuffer. + * + * @rbuf: Ringbuffer concerned. + * @idx: Packet index as returned by dvb_ringbuffer_pkt_next(). + * @offset: Offset into packet to read from. + * @buf: Destination buffer for data. + * @len: Size of destination buffer. + * + * Return: Number of bytes read, or -EFAULT. + */ +extern ssize_t dvb_ringbuffer_pkt_read(struct dvb_ringbuffer *rbuf, size_t idx, + int offset, u8 *buf, size_t len); + +/** + * dvb_ringbuffer_pkt_dispose - Dispose of a packet in the ring buffer. + * + * @rbuf: Ring buffer concerned. + * @idx: Packet index as returned by dvb_ringbuffer_pkt_next(). + */ +extern void dvb_ringbuffer_pkt_dispose(struct dvb_ringbuffer *rbuf, size_t idx); + +/** + * dvb_ringbuffer_pkt_next - Get the index of the next packet in a ringbuffer. + * + * @rbuf: Ringbuffer concerned. + * @idx: Previous packet index, or -1 to return the first packet index. + * @pktlen: On success, will be updated to contain the length of the packet + * in bytes. + * returns Packet index (if >=0), or -1 if no packets available. + */ +extern ssize_t dvb_ringbuffer_pkt_next(struct dvb_ringbuffer *rbuf, + size_t idx, size_t *pktlen); + +#endif /* _DVB_RINGBUFFER_H_ */ diff --git a/6.17.0/include-overrides/media/dvb_vb2.h b/6.17.0/include-overrides/media/dvb_vb2.h new file mode 100644 index 0000000..8cb8845 --- /dev/null +++ b/6.17.0/include-overrides/media/dvb_vb2.h @@ -0,0 +1,280 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * dvb-vb2.h - DVB driver helper framework for streaming I/O + * + * Copyright (C) 2015 Samsung Electronics + * + * Author: jh1009.sung@samsung.com + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef _DVB_VB2_H +#define _DVB_VB2_H + +#include +#include +#include +#include +#include +#include + +/** + * enum dvb_buf_type - types of Digital TV memory-mapped buffers + * + * @DVB_BUF_TYPE_CAPTURE: buffer is filled by the Kernel, + * with a received Digital TV stream + */ +enum dvb_buf_type { + DVB_BUF_TYPE_CAPTURE = 1, +}; + +/** + * enum dvb_vb2_states - states to control VB2 state machine + * @DVB_VB2_STATE_NONE: + * VB2 engine not initialized yet, init failed or VB2 was released. + * @DVB_VB2_STATE_INIT: + * VB2 engine initialized. + * @DVB_VB2_STATE_REQBUFS: + * Buffers were requested + * @DVB_VB2_STATE_STREAMON: + * VB2 is streaming. Callers should not check it directly. Instead, + * they should use dvb_vb2_is_streaming(). + * + * Note: + * + * Callers should not touch at the state machine directly. This + * is handled inside dvb_vb2.c. + */ +enum dvb_vb2_states { + DVB_VB2_STATE_NONE = 0x0, + DVB_VB2_STATE_INIT = 0x1, + DVB_VB2_STATE_REQBUFS = 0x2, + DVB_VB2_STATE_STREAMON = 0x4, +}; + +#define DVB_VB2_NAME_MAX (20) + +/** + * struct dvb_buffer - video buffer information for v4l2. + * + * @vb: embedded struct &vb2_buffer. + * @list: list of &struct dvb_buffer. + */ +struct dvb_buffer { + struct vb2_buffer vb; + struct list_head list; +}; + +/** + * struct dvb_vb2_ctx - control struct for VB2 handler + * @vb_q: pointer to &struct vb2_queue with videobuf2 queue. + * @mutex: mutex to serialize vb2 operations. Used by + * vb2 core %wait_prepare and %wait_finish operations. + * @slock: spin lock used to protect buffer filling at dvb_vb2.c. + * @dvb_q: List of buffers that are not filled yet. + * @buf: Pointer to the buffer that are currently being filled. + * @offset: index to the next position at the @buf to be filled. + * @remain: How many bytes are left to be filled at @buf. + * @state: bitmask of buffer states as defined by &enum dvb_vb2_states. + * @buf_siz: size of each VB2 buffer. + * @buf_cnt: number of VB2 buffers. + * @nonblocking: + * If different than zero, device is operating on non-blocking + * mode. + * @flags: buffer flags as defined by &enum dmx_buffer_flags. + * Filled only at &DMX_DQBUF. &DMX_QBUF should zero this field. + * @count: monotonic counter for filled buffers. Helps to identify + * data stream loses. Filled only at &DMX_DQBUF. &DMX_QBUF should + * zero this field. + * + * @name: name of the device type. Currently, it can either be + * "dvr" or "demux_filter". + */ +struct dvb_vb2_ctx { + struct vb2_queue vb_q; + struct mutex mutex; + spinlock_t slock; + struct list_head dvb_q; + struct dvb_buffer *buf; + int offset; + int remain; + int state; + int buf_siz; + int buf_cnt; + int nonblocking; + + enum dmx_buffer_flags flags; + u32 count; + + char name[DVB_VB2_NAME_MAX + 1]; +}; + +#ifndef CONFIG_DVB_MMAP +static inline int dvb_vb2_init(struct dvb_vb2_ctx *ctx, + const char *name, int non_blocking) +{ + return 0; +}; +static inline int dvb_vb2_release(struct dvb_vb2_ctx *ctx) +{ + return 0; +}; +#define dvb_vb2_is_streaming(ctx) (0) +#define dvb_vb2_fill_buffer(ctx, file, wait, flags) (0) + +static inline __poll_t dvb_vb2_poll(struct dvb_vb2_ctx *ctx, + struct file *file, + poll_table *wait) +{ + return 0; +} +#else +/** + * dvb_vb2_init - initializes VB2 handler + * + * @ctx: control struct for VB2 handler + * @name: name for the VB2 handler + * @non_blocking: + * if not zero, it means that the device is at non-blocking mode + */ +int dvb_vb2_init(struct dvb_vb2_ctx *ctx, const char *name, int non_blocking); + +/** + * dvb_vb2_release - Releases the VB2 handler allocated resources and + * put @ctx at DVB_VB2_STATE_NONE state. + * @ctx: control struct for VB2 handler + */ +int dvb_vb2_release(struct dvb_vb2_ctx *ctx); + +/** + * dvb_vb2_is_streaming - checks if the VB2 handler is streaming + * @ctx: control struct for VB2 handler + * + * Return: 0 if not streaming, 1 otherwise. + */ +int dvb_vb2_is_streaming(struct dvb_vb2_ctx *ctx); + +/** + * dvb_vb2_fill_buffer - fills a VB2 buffer + * @ctx: control struct for VB2 handler + * @src: place where the data is stored + * @len: number of bytes to be copied from @src + * @buffer_flags: + * pointer to buffer flags as defined by &enum dmx_buffer_flags. + * can be NULL. + */ +int dvb_vb2_fill_buffer(struct dvb_vb2_ctx *ctx, + const unsigned char *src, int len, + enum dmx_buffer_flags *buffer_flags); + +/** + * dvb_vb2_poll - Wrapper to vb2_core_streamon() for Digital TV + * buffer handling. + * + * @ctx: control struct for VB2 handler + * @file: &struct file argument passed to the poll + * file operation handler. + * @wait: &poll_table wait argument passed to the poll + * file operation handler. + * + * Implements poll syscall() logic. + */ +__poll_t dvb_vb2_poll(struct dvb_vb2_ctx *ctx, struct file *file, + poll_table *wait); +#endif + +/** + * dvb_vb2_stream_on() - Wrapper to vb2_core_streamon() for Digital TV + * buffer handling. + * + * @ctx: control struct for VB2 handler + * + * Starts dvb streaming + */ +int dvb_vb2_stream_on(struct dvb_vb2_ctx *ctx); +/** + * dvb_vb2_stream_off() - Wrapper to vb2_core_streamoff() for Digital TV + * buffer handling. + * + * @ctx: control struct for VB2 handler + * + * Stops dvb streaming + */ +int dvb_vb2_stream_off(struct dvb_vb2_ctx *ctx); + +/** + * dvb_vb2_reqbufs() - Wrapper to vb2_core_reqbufs() for Digital TV + * buffer handling. + * + * @ctx: control struct for VB2 handler + * @req: &struct dmx_requestbuffers passed from userspace in + * order to handle &DMX_REQBUFS. + * + * Initiate streaming by requesting a number of buffers. Also used to + * free previously requested buffers, is ``req->count`` is zero. + */ +int dvb_vb2_reqbufs(struct dvb_vb2_ctx *ctx, struct dmx_requestbuffers *req); + +/** + * dvb_vb2_querybuf() - Wrapper to vb2_core_querybuf() for Digital TV + * buffer handling. + * + * @ctx: control struct for VB2 handler + * @b: &struct dmx_buffer passed from userspace in + * order to handle &DMX_QUERYBUF. + * + * + */ +int dvb_vb2_querybuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b); + +/** + * dvb_vb2_expbuf() - Wrapper to vb2_core_expbuf() for Digital TV + * buffer handling. + * + * @ctx: control struct for VB2 handler + * @exp: &struct dmx_exportbuffer passed from userspace in + * order to handle &DMX_EXPBUF. + * + * Export a buffer as a file descriptor. + */ +int dvb_vb2_expbuf(struct dvb_vb2_ctx *ctx, struct dmx_exportbuffer *exp); + +/** + * dvb_vb2_qbuf() - Wrapper to vb2_core_qbuf() for Digital TV buffer handling. + * + * @ctx: control struct for VB2 handler + * @b: &struct dmx_buffer passed from userspace in + * order to handle &DMX_QBUF. + * + * Queue a Digital TV buffer as requested by userspace + */ +int dvb_vb2_qbuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b); + +/** + * dvb_vb2_dqbuf() - Wrapper to vb2_core_dqbuf() for Digital TV + * buffer handling. + * + * @ctx: control struct for VB2 handler + * @b: &struct dmx_buffer passed from userspace in + * order to handle &DMX_DQBUF. + * + * Dequeue a Digital TV buffer to the userspace + */ +int dvb_vb2_dqbuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b); + +/** + * dvb_vb2_mmap() - Wrapper to vb2_mmap() for Digital TV buffer handling. + * + * @ctx: control struct for VB2 handler + * @vma: pointer to &struct vm_area_struct with the vma passed + * to the mmap file operation handler in the driver. + * + * map Digital TV video buffers into application address space. + */ +int dvb_vb2_mmap(struct dvb_vb2_ctx *ctx, struct vm_area_struct *vma); + +#endif /* _DVB_VB2_H */ diff --git a/6.17.0/include-overrides/media/dvbdev.h b/6.17.0/include-overrides/media/dvbdev.h new file mode 100644 index 0000000..e5a00d1 --- /dev/null +++ b/6.17.0/include-overrides/media/dvbdev.h @@ -0,0 +1,493 @@ +/* + * dvbdev.h + * + * Copyright (C) 2000 Ralph Metzler & Marcus Metzler + * for convergence integrated media GmbH + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Lesser Public License + * as published by the Free Software Foundation; either version 2.1 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _DVBDEV_H_ +#define _DVBDEV_H_ + +#include +#include +#include +#include +#include + +#define DVB_MAJOR 212 + +#if defined(CONFIG_DVB_MAX_ADAPTERS) && CONFIG_DVB_MAX_ADAPTERS > 0 + #define DVB_MAX_ADAPTERS CONFIG_DVB_MAX_ADAPTERS +#else + #define DVB_MAX_ADAPTERS 16 +#endif + +#define DVB_UNSET (-1) + +/* List of DVB device types */ + +/** + * enum dvb_device_type - type of the Digital TV device + * + * @DVB_DEVICE_SEC: Digital TV standalone Common Interface (CI) + * @DVB_DEVICE_FRONTEND: Digital TV frontend. + * @DVB_DEVICE_DEMUX: Digital TV demux. + * @DVB_DEVICE_DVR: Digital TV digital video record (DVR). + * @DVB_DEVICE_CA: Digital TV Conditional Access (CA). + * @DVB_DEVICE_NET: Digital TV network. + * + * @DVB_DEVICE_VIDEO: Digital TV video decoder. + * Deprecated. Used only on av7110-av. + * @DVB_DEVICE_AUDIO: Digital TV audio decoder. + * Deprecated. Used only on av7110-av. + * @DVB_DEVICE_OSD: Digital TV On Screen Display (OSD). + * Deprecated. Used only on av7110. + */ +enum dvb_device_type { + DVB_DEVICE_SEC, + DVB_DEVICE_FRONTEND, + DVB_DEVICE_DEMUX, + DVB_DEVICE_DVR, + DVB_DEVICE_CA, + DVB_DEVICE_NET, + + DVB_DEVICE_VIDEO, + DVB_DEVICE_AUDIO, + DVB_DEVICE_OSD, +}; + +#define DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr) \ + static short adapter_nr[] = \ + {[0 ... (DVB_MAX_ADAPTERS - 1)] = DVB_UNSET }; \ + module_param_array(adapter_nr, short, NULL, 0444); \ + MODULE_PARM_DESC(adapter_nr, "DVB adapter numbers") + +struct dvb_frontend; + +/** + * struct dvb_adapter - represents a Digital TV adapter using Linux DVB API + * + * @num: Number of the adapter + * @list_head: List with the DVB adapters + * @device_list: List with the DVB devices + * @name: Name of the adapter + * @proposed_mac: proposed MAC address for the adapter + * @priv: private data + * @device: pointer to struct device + * @module: pointer to struct module + * @mfe_shared: indicates mutually exclusive frontends. + * 1 = legacy exclusion behavior: blocking any open() call + * 2 = enhanced exclusion behavior, emulating the standard + * behavior of busy frontends: allowing read-only sharing + * and otherwise returning immediately with -EBUSY when any + * of the frontends is already opened with write access. + * @mfe_dvbdev: Frontend device in use, in the case of MFE + * @mfe_lock: Lock to prevent using the other frontends when MFE is + * used. + * @mdev_lock: Protect access to the mdev pointer. + * @mdev: pointer to struct media_device, used when the media + * controller is used. + * @conn: RF connector. Used only if the device has no separate + * tuner. + * @conn_pads: pointer to struct media_pad associated with @conn; + */ +struct dvb_adapter { + int num; + struct list_head list_head; + struct list_head device_list; + const char *name; + u8 proposed_mac [6]; + void* priv; + + struct device *device; + + struct module *module; + + int mfe_shared; /* indicates mutually exclusive frontends */ + struct dvb_device *mfe_dvbdev; /* frontend device in use */ + struct mutex mfe_lock; /* access lock for thread creation */ + +#if defined(CONFIG_MEDIA_CONTROLLER_DVB) + struct mutex mdev_lock; + struct media_device *mdev; + struct media_entity *conn; + struct media_pad *conn_pads; +#endif +}; + +/** + * struct dvb_device - represents a DVB device node + * + * @list_head: List head with all DVB devices + * @ref: reference count for this device + * @fops: pointer to struct file_operations + * @adapter: pointer to the adapter that holds this device node + * @type: type of the device, as defined by &enum dvb_device_type. + * @minor: devnode minor number. Major number is always DVB_MAJOR. + * @id: device ID number, inside the adapter + * @readers: Initialized by the caller. Each call to open() in Read Only mode + * decreases this counter by one. + * @writers: Initialized by the caller. Each call to open() in Read/Write + * mode decreases this counter by one. + * @users: Initialized by the caller. Each call to open() in any mode + * decreases this counter by one. + * @wait_queue: wait queue, used to wait for certain events inside one of + * the DVB API callers + * @kernel_ioctl: callback function used to handle ioctl calls from userspace. + * @name: Name to be used for the device at the Media Controller + * @entity: pointer to struct media_entity associated with the device node + * @pads: pointer to struct media_pad associated with @entity; + * @priv: private data + * @intf_devnode: Pointer to media_intf_devnode. Used by the dvbdev core to + * store the MC device node interface + * @tsout_num_entities: Number of Transport Stream output entities + * @tsout_entity: array with MC entities associated to each TS output node + * @tsout_pads: array with the source pads for each @tsout_entity + * + * This structure is used by the DVB core (frontend, CA, net, demux) in + * order to create the device nodes. Usually, driver should not initialize + * this struct diretly. + */ +struct dvb_device { + struct list_head list_head; + struct kref ref; + const struct file_operations *fops; + struct dvb_adapter *adapter; + enum dvb_device_type type; + int minor; + u32 id; + + /* in theory, 'users' can vanish now, + but I don't want to change too much now... */ + int readers; + int writers; + int users; + + wait_queue_head_t wait_queue; + /* don't really need those !? -- FIXME: use video_usercopy */ + int (*kernel_ioctl)(struct file *file, unsigned int cmd, void *arg); + + /* Needed for media controller register/unregister */ +#if defined(CONFIG_MEDIA_CONTROLLER_DVB) + const char *name; + + /* Allocated and filled inside dvbdev.c */ + struct media_intf_devnode *intf_devnode; + + unsigned tsout_num_entities; + struct media_entity *entity, *tsout_entity; + struct media_pad *pads, *tsout_pads; +#endif + + void *priv; +}; + +/** + * struct dvbdevfops_node - fops nodes registered in dvbdevfops_list + * + * @fops: Dynamically allocated fops for ->owner registration + * @type: type of dvb_device + * @template: dvb_device used for registration + * @list_head: list_head for dvbdevfops_list + */ +struct dvbdevfops_node { + struct file_operations *fops; + enum dvb_device_type type; + const struct dvb_device *template; + struct list_head list_head; +}; + +/** + * dvb_device_get - Increase dvb_device reference + * + * @dvbdev: pointer to struct dvb_device + */ +struct dvb_device *dvb_device_get(struct dvb_device *dvbdev); + +/** + * dvb_device_put - Decrease dvb_device reference + * + * @dvbdev: pointer to struct dvb_device + */ +void dvb_device_put(struct dvb_device *dvbdev); + +/** + * dvb_register_adapter - Registers a new DVB adapter + * + * @adap: pointer to struct dvb_adapter + * @name: Adapter's name + * @module: initialized with THIS_MODULE at the caller + * @device: pointer to struct device that corresponds to the device driver + * @adapter_nums: Array with a list of the numbers for @dvb_register_adapter; + * to select among them. Typically, initialized with: + * DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nums) + */ +int dvb_register_adapter(struct dvb_adapter *adap, const char *name, + struct module *module, struct device *device, + short *adapter_nums); + +/** + * dvb_unregister_adapter - Unregisters a DVB adapter + * + * @adap: pointer to struct dvb_adapter + */ +int dvb_unregister_adapter(struct dvb_adapter *adap); + +/** + * dvb_register_device - Registers a new DVB device + * + * @adap: pointer to struct dvb_adapter + * @pdvbdev: pointer to the place where the new struct dvb_device will be + * stored + * @template: Template used to create &pdvbdev; + * @priv: private data + * @type: type of the device, as defined by &enum dvb_device_type. + * @demux_sink_pads: Number of demux outputs, to be used to create the TS + * outputs via the Media Controller. + */ +int dvb_register_device(struct dvb_adapter *adap, + struct dvb_device **pdvbdev, + const struct dvb_device *template, + void *priv, + enum dvb_device_type type, + int demux_sink_pads); + +/** + * dvb_remove_device - Remove a registered DVB device + * + * @dvbdev: pointer to struct dvb_device + * + * This does not free memory. dvb_free_device() will do that when + * reference counter is empty + */ +void dvb_remove_device(struct dvb_device *dvbdev); + + +/** + * dvb_unregister_device - Unregisters a DVB device + * + * @dvbdev: pointer to struct dvb_device + */ +void dvb_unregister_device(struct dvb_device *dvbdev); + +#ifdef CONFIG_MEDIA_CONTROLLER_DVB +/** + * dvb_create_media_graph - Creates media graph for the Digital TV part of the + * device. + * + * @adap: pointer to &struct dvb_adapter + * @create_rf_connector: if true, it creates the RF connector too + * + * This function checks all DVB-related functions at the media controller + * entities and creates the needed links for the media graph. It is + * capable of working with multiple tuners or multiple frontends, but it + * won't create links if the device has multiple tuners and multiple frontends + * or if the device has multiple muxes. In such case, the caller driver should + * manually create the remaining links. + */ +__must_check int dvb_create_media_graph(struct dvb_adapter *adap, + bool create_rf_connector); + +/** + * dvb_register_media_controller - registers a media controller at DVB adapter + * + * @adap: pointer to &struct dvb_adapter + * @mdev: pointer to &struct media_device + */ +static inline void dvb_register_media_controller(struct dvb_adapter *adap, + struct media_device *mdev) +{ + adap->mdev = mdev; +} + +/** + * dvb_get_media_controller - gets the associated media controller + * + * @adap: pointer to &struct dvb_adapter + */ +static inline struct media_device * +dvb_get_media_controller(struct dvb_adapter *adap) +{ + return adap->mdev; +} +#else +static inline +int dvb_create_media_graph(struct dvb_adapter *adap, + bool create_rf_connector) +{ + return 0; +}; +#define dvb_register_media_controller(a, b) {} +#define dvb_get_media_controller(a) NULL +#endif + +/** + * dvb_generic_open - Digital TV open function, used by DVB devices + * + * @inode: pointer to &struct inode. + * @file: pointer to &struct file. + * + * Checks if a DVB devnode is still valid, and if the permissions are + * OK and increment negative use count. + */ +int dvb_generic_open(struct inode *inode, struct file *file); + +/** + * dvb_generic_release - Digital TV close function, used by DVB devices + * + * @inode: pointer to &struct inode. + * @file: pointer to &struct file. + * + * Checks if a DVB devnode is still valid, and if the permissions are + * OK and decrement negative use count. + */ +int dvb_generic_release(struct inode *inode, struct file *file); + +/** + * dvb_generic_ioctl - Digital TV close function, used by DVB devices + * + * @file: pointer to &struct file. + * @cmd: Ioctl name. + * @arg: Ioctl argument. + * + * Checks if a DVB devnode and struct dvbdev.kernel_ioctl is still valid. + * If so, calls dvb_usercopy(). + */ +long dvb_generic_ioctl(struct file *file, + unsigned int cmd, unsigned long arg); + +/** + * dvb_usercopy - copies data from/to userspace memory when an ioctl is + * issued. + * + * @file: Pointer to struct &file. + * @cmd: Ioctl name. + * @arg: Ioctl argument. + * @func: function that will actually handle the ioctl + * + * Ancillary function that uses ioctl direction and size to copy from + * userspace. Then, it calls @func, and, if needed, data is copied back + * to userspace. + */ +int dvb_usercopy(struct file *file, unsigned int cmd, unsigned long arg, + int (*func)(struct file *file, unsigned int cmd, void *arg)); + +#if IS_ENABLED(CONFIG_I2C) + +struct i2c_adapter; +struct i2c_client; +/** + * dvb_module_probe - helper routine to probe an I2C module + * + * @module_name: + * Name of the I2C module to be probed + * @name: + * Optional name for the I2C module. Used for debug purposes. + * If %NULL, defaults to @module_name. + * @adap: + * pointer to &struct i2c_adapter that describes the I2C adapter where + * the module will be bound. + * @addr: + * I2C address of the adapter, in 7-bit notation. + * @platform_data: + * Platform data to be passed to the I2C module probed. + * + * This function binds an I2C device into the DVB core. Should be used by + * all drivers that use I2C bus to control the hardware. A module bound + * with dvb_module_probe() should use dvb_module_release() to unbind. + * + * Return: + * On success, return an &struct i2c_client, pointing to the bound + * I2C device. %NULL otherwise. + * + * .. note:: + * + * In the past, DVB modules (mainly, frontends) were bound via dvb_attach() + * macro, with does an ugly hack, using I2C low level functions. Such + * usage is deprecated and will be removed soon. Instead, use this routine. + */ +struct i2c_client *dvb_module_probe(const char *module_name, + const char *name, + struct i2c_adapter *adap, + unsigned char addr, + void *platform_data); + +/** + * dvb_module_release - releases an I2C device allocated with + * dvb_module_probe(). + * + * @client: pointer to &struct i2c_client with the I2C client to be released. + * can be %NULL. + * + * This function should be used to free all resources reserved by + * dvb_module_probe() and unbinding the I2C hardware. + */ +void dvb_module_release(struct i2c_client *client); + +#endif /* CONFIG_I2C */ + +/* Legacy generic DVB attach function. */ +#ifdef CONFIG_MEDIA_ATTACH + +/** + * dvb_attach - attaches a DVB frontend into the DVB core. + * + * @FUNCTION: function on a frontend module to be called. + * @ARGS: @FUNCTION arguments. + * + * This ancillary function loads a frontend module in runtime and runs + * the @FUNCTION function there, with @ARGS. + * As it increments symbol usage cont, at unregister, dvb_detach() + * should be called. + * + * .. note:: + * + * In the past, DVB modules (mainly, frontends) were bound via dvb_attach() + * macro, with does an ugly hack, using I2C low level functions. Such + * usage is deprecated and will be removed soon. Instead, you should use + * dvb_module_probe(). + */ +#define dvb_attach(FUNCTION, ARGS...) ({ \ + void *__r = NULL; \ + typeof(&FUNCTION) __a = symbol_request(FUNCTION); \ + if (__a) { \ + __r = (void *) __a(ARGS); \ + if (__r == NULL) \ + symbol_put(FUNCTION); \ + } else { \ + printk(KERN_ERR "DVB: Unable to find symbol "#FUNCTION"()\n"); \ + } \ + __r; \ +}) + +/** + * dvb_detach - detaches a DVB frontend loaded via dvb_attach() + * + * @FUNC: attach function + * + * Decrements usage count for a function previously called via dvb_attach(). + */ + +#define dvb_detach(FUNC) symbol_put_addr(FUNC) + +#else +#define dvb_attach(FUNCTION, ARGS...) ({ \ + FUNCTION(ARGS); \ +}) + +#define dvb_detach(FUNC) {} + +#endif /* CONFIG_MEDIA_ATTACH */ + +#endif /* #ifndef _DVBDEV_H_ */ diff --git a/6.17.0/include-overrides/media/frame_vector.h b/6.17.0/include-overrides/media/frame_vector.h new file mode 100644 index 0000000..541c71a --- /dev/null +++ b/6.17.0/include-overrides/media/frame_vector.h @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: GPL-2.0 +#ifndef _MEDIA_FRAME_VECTOR_H +#define _MEDIA_FRAME_VECTOR_H + +/* Container for pinned pfns / pages in frame_vector.c */ +struct frame_vector { + unsigned int nr_allocated; /* Number of frames we have space for */ + unsigned int nr_frames; /* Number of frames stored in ptrs array */ + bool got_ref; /* Did we pin pages by getting page ref? */ + bool is_pfns; /* Does array contain pages or pfns? */ + void *ptrs[]; /* Array of pinned pfns / pages. Use + * pfns_vector_pages() or pfns_vector_pfns() + * for access */ +}; + +struct frame_vector *frame_vector_create(unsigned int nr_frames); +void frame_vector_destroy(struct frame_vector *vec); +int get_vaddr_frames(unsigned long start, unsigned int nr_pfns, + bool write, struct frame_vector *vec); +void put_vaddr_frames(struct frame_vector *vec); +int frame_vector_to_pages(struct frame_vector *vec); +void frame_vector_to_pfns(struct frame_vector *vec); + +static inline unsigned int frame_vector_count(struct frame_vector *vec) +{ + return vec->nr_frames; +} + +static inline struct page **frame_vector_pages(struct frame_vector *vec) +{ + if (vec->is_pfns) { + int err = frame_vector_to_pages(vec); + + if (err) + return ERR_PTR(err); + } + return (struct page **)(vec->ptrs); +} + +static inline unsigned long *frame_vector_pfns(struct frame_vector *vec) +{ + if (!vec->is_pfns) + frame_vector_to_pfns(vec); + return (unsigned long *)(vec->ptrs); +} + +#endif /* _MEDIA_FRAME_VECTOR_H */ diff --git a/6.17.0/include-overrides/media/imx.h b/6.17.0/include-overrides/media/imx.h new file mode 100644 index 0000000..5f14acd --- /dev/null +++ b/6.17.0/include-overrides/media/imx.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2014-2017 Mentor Graphics Inc. + */ + +#ifndef __MEDIA_IMX_H__ +#define __MEDIA_IMX_H__ + +#include + +#endif diff --git a/6.17.0/include-overrides/media/ipu-bridge.h b/6.17.0/include-overrides/media/ipu-bridge.h new file mode 100644 index 0000000..16fac76 --- /dev/null +++ b/6.17.0/include-overrides/media/ipu-bridge.h @@ -0,0 +1,182 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Author: Dan Scally */ +#ifndef __IPU_BRIDGE_H +#define __IPU_BRIDGE_H + +#include +#include +#include +#include + +#define IPU_HID "INT343E" +#define IPU_MAX_LANES 4 +#define IPU_MAX_PORTS 4 +#define MAX_NUM_LINK_FREQS 3 + +/* Values are educated guesses as we don't have a spec */ +#define IPU_SENSOR_ROTATION_NORMAL 0 +#define IPU_SENSOR_ROTATION_INVERTED 1 + +#define IPU_SENSOR_CONFIG(_HID, _NR, ...) \ + (const struct ipu_sensor_config) { \ + .hid = _HID, \ + .nr_link_freqs = _NR, \ + .link_freqs = { __VA_ARGS__ } \ + } + +#define NODE_SENSOR(_HID, _PROPS) \ + (const struct software_node) { \ + .name = _HID, \ + .properties = _PROPS, \ + } + +#define NODE_PORT(_PORT, _SENSOR_NODE) \ + (const struct software_node) { \ + .name = _PORT, \ + .parent = _SENSOR_NODE, \ + } + +#define NODE_ENDPOINT(_EP, _PORT, _PROPS) \ + (const struct software_node) { \ + .name = _EP, \ + .parent = _PORT, \ + .properties = _PROPS, \ + } + +#define NODE_VCM(_TYPE) \ + (const struct software_node) { \ + .name = _TYPE, \ + } + +enum ipu_sensor_swnodes { + SWNODE_SENSOR_HID, + SWNODE_SENSOR_PORT, + SWNODE_SENSOR_ENDPOINT, + SWNODE_IPU_PORT, + SWNODE_IPU_ENDPOINT, + /* below are optional / maybe empty */ + SWNODE_IVSC_HID, + SWNODE_IVSC_SENSOR_PORT, + SWNODE_IVSC_SENSOR_ENDPOINT, + SWNODE_IVSC_IPU_PORT, + SWNODE_IVSC_IPU_ENDPOINT, + SWNODE_VCM, + SWNODE_COUNT +}; + +/* Data representation as it is in ACPI SSDB buffer */ +struct ipu_sensor_ssdb { + u8 version; + u8 sku; + u8 guid_csi2[16]; + u8 devfunction; + u8 bus; + u32 dphylinkenfuses; + u32 clockdiv; + u8 link; + u8 lanes; + u32 csiparams[10]; + u32 maxlanespeed; + u8 sensorcalibfileidx; + u8 sensorcalibfileidxInMBZ[3]; + u8 romtype; + u8 vcmtype; + u8 platforminfo; + u8 platformsubinfo; + u8 flash; + u8 privacyled; + u8 degree; + u8 mipilinkdefined; + u32 mclkspeed; + u8 controllogicid; + u8 reserved1[3]; + u8 mclkport; + u8 reserved2[13]; +} __packed; + +struct ipu_property_names { + char clock_frequency[16]; + char rotation[9]; + char orientation[12]; + char bus_type[9]; + char data_lanes[11]; + char remote_endpoint[16]; + char link_frequencies[17]; +}; + +struct ipu_node_names { + char port[7]; + char ivsc_sensor_port[7]; + char ivsc_ipu_port[7]; + char endpoint[11]; + char remote_port[9]; + char vcm[16]; +}; + +struct ipu_sensor_config { + const char *hid; + const u8 nr_link_freqs; + const u64 link_freqs[MAX_NUM_LINK_FREQS]; +}; + +struct ipu_sensor { + /* append ssdb.link(u8) in "-%u" format as suffix of HID */ + char name[ACPI_ID_LEN + 4]; + struct acpi_device *adev; + + struct device *csi_dev; + struct acpi_device *ivsc_adev; + char ivsc_name[ACPI_ID_LEN + 4]; + + /* SWNODE_COUNT + 1 for terminating NULL */ + const struct software_node *group[SWNODE_COUNT + 1]; + struct software_node swnodes[SWNODE_COUNT]; + struct ipu_node_names node_names; + + u8 link; + u8 lanes; + u32 mclkspeed; + u32 rotation; + enum v4l2_fwnode_orientation orientation; + const char *vcm_type; + + struct ipu_property_names prop_names; + struct property_entry ep_properties[5]; + struct property_entry dev_properties[5]; + struct property_entry ipu_properties[3]; + struct property_entry ivsc_properties[1]; + struct property_entry ivsc_sensor_ep_properties[4]; + struct property_entry ivsc_ipu_ep_properties[4]; + + struct software_node_ref_args local_ref[1]; + struct software_node_ref_args remote_ref[1]; + struct software_node_ref_args vcm_ref[1]; + struct software_node_ref_args ivsc_sensor_ref[1]; + struct software_node_ref_args ivsc_ipu_ref[1]; +}; + +typedef int (*ipu_parse_sensor_fwnode_t)(struct acpi_device *adev, + struct ipu_sensor *sensor); + +struct ipu_bridge { + struct device *dev; + ipu_parse_sensor_fwnode_t parse_sensor_fwnode; + char ipu_node_name[ACPI_ID_LEN]; + struct software_node ipu_hid_node; + u32 data_lanes[4]; + unsigned int n_sensors; + struct ipu_sensor sensors[IPU_MAX_PORTS]; +}; + +#if IS_ENABLED(CONFIG_IPU_BRIDGE) +int ipu_bridge_init(struct device *dev, + ipu_parse_sensor_fwnode_t parse_sensor_fwnode); +int ipu_bridge_parse_ssdb(struct acpi_device *adev, struct ipu_sensor *sensor); +int ipu_bridge_instantiate_vcm(struct device *sensor); +#else +/* Use a define to avoid the @parse_sensor_fwnode argument getting evaluated */ +#define ipu_bridge_init(dev, parse_sensor_fwnode) (0) +static inline int ipu_bridge_instantiate_vcm(struct device *s) { return 0; } +#endif + +#endif diff --git a/6.17.0/include-overrides/media/ipu6-pci-table.h b/6.17.0/include-overrides/media/ipu6-pci-table.h new file mode 100644 index 0000000..0899d9d --- /dev/null +++ b/6.17.0/include-overrides/media/ipu6-pci-table.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2024 Intel Corporation + */ + +#ifndef __IPU6_PCI_TBL_H__ +#define __IPU6_PCI_TBL_H__ + +#include + +#define PCI_DEVICE_ID_INTEL_IPU6 0x9a19 +#define PCI_DEVICE_ID_INTEL_IPU6SE 0x4e19 +#define PCI_DEVICE_ID_INTEL_IPU6EP_ADLP 0x465d +#define PCI_DEVICE_ID_INTEL_IPU6EP_ADLN 0x462e +#define PCI_DEVICE_ID_INTEL_IPU6EP_RPLP 0xa75d +#define PCI_DEVICE_ID_INTEL_IPU6EP_MTL 0x7d19 + +static const struct pci_device_id ipu6_pci_tbl[] = { + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IPU6) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IPU6SE) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IPU6EP_ADLP) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IPU6EP_ADLN) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IPU6EP_RPLP) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IPU6EP_MTL) }, + { } +}; + +#endif /* __IPU6_PCI_TBL_H__ */ diff --git a/6.17.0/include-overrides/media/jpeg.h b/6.17.0/include-overrides/media/jpeg.h new file mode 100644 index 0000000..a01e142 --- /dev/null +++ b/6.17.0/include-overrides/media/jpeg.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef _MEDIA_JPEG_H_ +#define _MEDIA_JPEG_H_ + +/* JPEG markers */ +#define JPEG_MARKER_TEM 0x01 +#define JPEG_MARKER_SOF0 0xc0 +#define JPEG_MARKER_DHT 0xc4 +#define JPEG_MARKER_RST 0xd0 +#define JPEG_MARKER_SOI 0xd8 +#define JPEG_MARKER_EOI 0xd9 +#define JPEG_MARKER_SOS 0xda +#define JPEG_MARKER_DQT 0xdb +#define JPEG_MARKER_DRI 0xdd +#define JPEG_MARKER_DHP 0xde +#define JPEG_MARKER_APP0 0xe0 +#define JPEG_MARKER_COM 0xfe + +#endif /* _MEDIA_JPEG_H_ */ diff --git a/6.17.0/include-overrides/media/media-dev-allocator.h b/6.17.0/include-overrides/media/media-dev-allocator.h new file mode 100644 index 0000000..2ab54d4 --- /dev/null +++ b/6.17.0/include-overrides/media/media-dev-allocator.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * media-dev-allocator.h - Media Controller Device Allocator API + * + * Copyright (c) 2019 Shuah Khan + * + * Credits: Suggested by Laurent Pinchart + */ + +/* + * This file adds a global ref-counted Media Controller Device Instance API. + * A system wide global media device list is managed and each media device + * includes a kref count. The last put on the media device releases the media + * device instance. + */ + +#ifndef _MEDIA_DEV_ALLOCATOR_H +#define _MEDIA_DEV_ALLOCATOR_H + +struct usb_device; + +#if defined(CONFIG_MEDIA_CONTROLLER) && IS_ENABLED(CONFIG_USB) +/** + * media_device_usb_allocate() - Allocate and return struct &media device + * + * @udev: struct &usb_device pointer + * @module_name: should be filled with %KBUILD_MODNAME + * @owner: struct module pointer %THIS_MODULE for the driver. + * %THIS_MODULE is null for a built-in driver. + * It is safe even when %THIS_MODULE is null. + * + * This interface should be called to allocate a Media Device when multiple + * drivers share usb_device and the media device. This interface allocates + * &media_device structure and calls media_device_usb_init() to initialize + * it. + * + */ +struct media_device *media_device_usb_allocate(struct usb_device *udev, + const char *module_name, + struct module *owner); +/** + * media_device_delete() - Release media device. Calls kref_put(). + * + * @mdev: struct &media_device pointer + * @module_name: should be filled with %KBUILD_MODNAME + * @owner: struct module pointer %THIS_MODULE for the driver. + * %THIS_MODULE is null for a built-in driver. + * It is safe even when %THIS_MODULE is null. + * + * This interface should be called to put Media Device Instance kref. + */ +void media_device_delete(struct media_device *mdev, const char *module_name, + struct module *owner); +#else +static inline struct media_device *media_device_usb_allocate( + struct usb_device *udev, const char *module_name, + struct module *owner) + { return NULL; } +static inline void media_device_delete( + struct media_device *mdev, const char *module_name, + struct module *owner) { } +#endif /* CONFIG_MEDIA_CONTROLLER && CONFIG_USB */ +#endif /* _MEDIA_DEV_ALLOCATOR_H */ diff --git a/6.17.0/include-overrides/media/media-device.h b/6.17.0/include-overrides/media/media-device.h new file mode 100644 index 0000000..53d2a16 --- /dev/null +++ b/6.17.0/include-overrides/media/media-device.h @@ -0,0 +1,518 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Media device + * + * Copyright (C) 2010 Nokia Corporation + * + * Contacts: Laurent Pinchart + * Sakari Ailus + */ + +#ifndef _MEDIA_DEVICE_H +#define _MEDIA_DEVICE_H + +#include +#include +#include +#include + +#include +#include + +struct ida; +struct media_device; + +/** + * struct media_entity_notify - Media Entity Notify + * + * @list: List head + * @notify_data: Input data to invoke the callback + * @notify: Callback function pointer + * + * Drivers may register a callback to take action when new entities get + * registered with the media device. This handler is intended for creating + * links between existing entities and should not create entities and register + * them. + */ +struct media_entity_notify { + struct list_head list; + void *notify_data; + void (*notify)(struct media_entity *entity, void *notify_data); +}; + +/** + * struct media_device_ops - Media device operations + * @link_notify: Link state change notification callback. This callback is + * called with the graph_mutex held. + * @req_alloc: Allocate a request. Set this if you need to allocate a struct + * larger then struct media_request. @req_alloc and @req_free must + * either both be set or both be NULL. + * @req_free: Free a request. Set this if @req_alloc was set as well, leave + * to NULL otherwise. + * @req_validate: Validate a request, but do not queue yet. The req_queue_mutex + * lock is held when this op is called. + * @req_queue: Queue a validated request, cannot fail. If something goes + * wrong when queueing this request then it should be marked + * as such internally in the driver and any related buffers + * must eventually return to vb2 with state VB2_BUF_STATE_ERROR. + * The req_queue_mutex lock is held when this op is called. + * It is important that vb2 buffer objects are queued last after + * all other object types are queued: queueing a buffer kickstarts + * the request processing, so all other objects related to the + * request (and thus the buffer) must be available to the driver. + * And once a buffer is queued, then the driver can complete + * or delete objects from the request before req_queue exits. + */ +struct media_device_ops { + int (*link_notify)(struct media_link *link, u32 flags, + unsigned int notification); + struct media_request *(*req_alloc)(struct media_device *mdev); + void (*req_free)(struct media_request *req); + int (*req_validate)(struct media_request *req); + void (*req_queue)(struct media_request *req); +}; + +/** + * struct media_device - Media device + * @dev: Parent device + * @devnode: Media device node + * @driver_name: Optional device driver name. If not set, calls to + * %MEDIA_IOC_DEVICE_INFO will return ``dev->driver->name``. + * This is needed for USB drivers for example, as otherwise + * they'll all appear as if the driver name was "usb". + * @model: Device model name + * @serial: Device serial number (optional) + * @bus_info: Unique and stable device location identifier + * @hw_revision: Hardware device revision + * @topology_version: Monotonic counter for storing the version of the graph + * topology. Should be incremented each time the topology changes. + * @id: Unique ID used on the last registered graph object + * @entity_internal_idx: Unique internal entity ID used by the graph traversal + * algorithms + * @entity_internal_idx_max: Allocated internal entity indices + * @entities: List of registered entities + * @interfaces: List of registered interfaces + * @pads: List of registered pads + * @links: List of registered links + * @entity_notify: List of registered entity_notify callbacks + * @graph_mutex: Protects access to struct media_device data + * @pm_count_walk: Graph walk for power state walk. Access serialised using + * graph_mutex. + * + * @source_priv: Driver Private data for enable/disable source handlers + * @enable_source: Enable Source Handler function pointer + * @disable_source: Disable Source Handler function pointer + * + * @ops: Operation handler callbacks + * @req_queue_mutex: Serialise the MEDIA_REQUEST_IOC_QUEUE ioctl w.r.t. + * other operations that stop or start streaming. + * @request_id: Used to generate unique request IDs + * + * This structure represents an abstract high-level media device. It allows easy + * access to entities and provides basic media device-level support. The + * structure can be allocated directly or embedded in a larger structure. + * + * The parent @dev is a physical device. It must be set before registering the + * media device. + * + * @model is a descriptive model name exported through sysfs. It doesn't have to + * be unique. + * + * @enable_source is a handler to find source entity for the + * sink entity and activate the link between them if source + * entity is free. Drivers should call this handler before + * accessing the source. + * + * @disable_source is a handler to find source entity for the + * sink entity and deactivate the link between them. Drivers + * should call this handler to release the source. + * + * Use-case: find tuner entity connected to the decoder + * entity and check if it is available, and activate the + * link between them from @enable_source and deactivate + * from @disable_source. + * + * .. note:: + * + * Bridge driver is expected to implement and set the + * handler when &media_device is registered or when + * bridge driver finds the media_device during probe. + * Bridge driver sets source_priv with information + * necessary to run @enable_source and @disable_source handlers. + * Callers should hold graph_mutex to access and call @enable_source + * and @disable_source handlers. + */ +struct media_device { + /* dev->driver_data points to this struct. */ + struct device *dev; + struct media_devnode *devnode; + + char model[32]; + char driver_name[32]; + char serial[40]; + char bus_info[32]; + u32 hw_revision; + + u64 topology_version; + + u32 id; + struct ida entity_internal_idx; + int entity_internal_idx_max; + + struct list_head entities; + struct list_head interfaces; + struct list_head pads; + struct list_head links; + + /* notify callback list invoked when a new entity is registered */ + struct list_head entity_notify; + + /* Serializes graph operations. */ + struct mutex graph_mutex; + struct media_graph pm_count_walk; + + void *source_priv; + int (*enable_source)(struct media_entity *entity, + struct media_pipeline *pipe); + void (*disable_source)(struct media_entity *entity); + + const struct media_device_ops *ops; + + struct mutex req_queue_mutex; + atomic_t request_id; +}; + +/* We don't need to include usb.h here */ +struct usb_device; + +#ifdef CONFIG_MEDIA_CONTROLLER + +/* Supported link_notify @notification values. */ +#define MEDIA_DEV_NOTIFY_PRE_LINK_CH 0 +#define MEDIA_DEV_NOTIFY_POST_LINK_CH 1 + +/** + * media_device_init() - Initializes a media device element + * + * @mdev: pointer to struct &media_device + * + * This function initializes the media device prior to its registration. + * The media device initialization and registration is split in two functions + * to avoid race conditions and make the media device available to user-space + * before the media graph has been completed. + * + * So drivers need to first initialize the media device, register any entity + * within the media device, create pad to pad links and then finally register + * the media device by calling media_device_register() as a final step. + * + * The caller is responsible for initializing the media device before + * registration. The following fields must be set: + * + * - dev must point to the parent device + * - model must be filled with the device model name + * + * The bus_info field is set by media_device_init() for PCI and platform devices + * if the field begins with '\0'. + */ +void media_device_init(struct media_device *mdev); + +/** + * media_device_cleanup() - Cleanups a media device element + * + * @mdev: pointer to struct &media_device + * + * This function that will destroy the graph_mutex that is + * initialized in media_device_init(). + */ +void media_device_cleanup(struct media_device *mdev); + +/** + * __media_device_register() - Registers a media device element + * + * @mdev: pointer to struct &media_device + * @owner: should be filled with %THIS_MODULE + * + * Users, should, instead, call the media_device_register() macro. + * + * The caller is responsible for initializing the &media_device structure + * before registration. The following fields of &media_device must be set: + * + * - &media_device.model must be filled with the device model name as a + * NUL-terminated UTF-8 string. The device/model revision must not be + * stored in this field. + * + * The following fields are optional: + * + * - &media_device.serial is a unique serial number stored as a + * NUL-terminated ASCII string. The field is big enough to store a GUID + * in text form. If the hardware doesn't provide a unique serial number + * this field must be left empty. + * + * - &media_device.bus_info represents the location of the device in the + * system as a NUL-terminated ASCII string. For PCI/PCIe devices + * &media_device.bus_info must be set to "PCI:" (or "PCIe:") followed by + * the value of pci_name(). For USB devices,the usb_make_path() function + * must be used. This field is used by applications to distinguish between + * otherwise identical devices that don't provide a serial number. + * + * - &media_device.hw_revision is the hardware device revision in a + * driver-specific format. When possible the revision should be formatted + * with the KERNEL_VERSION() macro. + * + * .. note:: + * + * #) Upon successful registration a character device named media[0-9]+ is created. The device major and minor numbers are dynamic. The model name is exported as a sysfs attribute. + * + * #) Unregistering a media device that hasn't been registered is **NOT** safe. + * + * Return: returns zero on success or a negative error code. + */ +int __must_check __media_device_register(struct media_device *mdev, + struct module *owner); + + +/** + * media_device_register() - Registers a media device element + * + * @mdev: pointer to struct &media_device + * + * This macro calls __media_device_register() passing %THIS_MODULE as + * the __media_device_register() second argument (**owner**). + */ +#define media_device_register(mdev) __media_device_register(mdev, THIS_MODULE) + +/** + * media_device_unregister() - Unregisters a media device element + * + * @mdev: pointer to struct &media_device + * + * It is safe to call this function on an unregistered (but initialised) + * media device. + */ +void media_device_unregister(struct media_device *mdev); + +/** + * media_device_register_entity() - registers a media entity inside a + * previously registered media device. + * + * @mdev: pointer to struct &media_device + * @entity: pointer to struct &media_entity to be registered + * + * Entities are identified by a unique positive integer ID. The media + * controller framework will such ID automatically. IDs are not guaranteed + * to be contiguous, and the ID number can change on newer Kernel versions. + * So, neither the driver nor userspace should hardcode ID numbers to refer + * to the entities, but, instead, use the framework to find the ID, when + * needed. + * + * The media_entity name, type and flags fields should be initialized before + * calling media_device_register_entity(). Entities embedded in higher-level + * standard structures can have some of those fields set by the higher-level + * framework. + * + * If the device has pads, media_entity_pads_init() should be called before + * this function. Otherwise, the &media_entity.pad and &media_entity.num_pads + * should be zeroed before calling this function. + * + * Entities have flags that describe the entity capabilities and state: + * + * %MEDIA_ENT_FL_DEFAULT + * indicates the default entity for a given type. + * This can be used to report the default audio and video devices or the + * default camera sensor. + * + * .. note:: + * + * Drivers should set the entity function before calling this function. + * Please notice that the values %MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN and + * %MEDIA_ENT_F_UNKNOWN should not be used by the drivers. + */ +int __must_check media_device_register_entity(struct media_device *mdev, + struct media_entity *entity); + +/** + * media_device_unregister_entity() - unregisters a media entity. + * + * @entity: pointer to struct &media_entity to be unregistered + * + * All links associated with the entity and all PADs are automatically + * unregistered from the media_device when this function is called. + * + * Unregistering an entity will not change the IDs of the other entities and + * the previoully used ID will never be reused for a newly registered entities. + * + * When a media device is unregistered, all its entities are unregistered + * automatically. No manual entities unregistration is then required. + * + * .. note:: + * + * The media_entity instance itself must be freed explicitly by + * the driver if required. + */ +void media_device_unregister_entity(struct media_entity *entity); + +/** + * media_device_register_entity_notify() - Registers a media entity_notify + * callback + * + * @mdev: The media device + * @nptr: The media_entity_notify + * + * .. note:: + * + * When a new entity is registered, all the registered + * media_entity_notify callbacks are invoked. + */ + +void media_device_register_entity_notify(struct media_device *mdev, + struct media_entity_notify *nptr); + +/** + * media_device_unregister_entity_notify() - Unregister a media entity notify + * callback + * + * @mdev: The media device + * @nptr: The media_entity_notify + * + */ +void media_device_unregister_entity_notify(struct media_device *mdev, + struct media_entity_notify *nptr); + +/* Iterate over all entities. */ +#define media_device_for_each_entity(entity, mdev) \ + list_for_each_entry(entity, &(mdev)->entities, graph_obj.list) + +/* Iterate over all interfaces. */ +#define media_device_for_each_intf(intf, mdev) \ + list_for_each_entry(intf, &(mdev)->interfaces, graph_obj.list) + +/* Iterate over all pads. */ +#define media_device_for_each_pad(pad, mdev) \ + list_for_each_entry(pad, &(mdev)->pads, graph_obj.list) + +/* Iterate over all links. */ +#define media_device_for_each_link(link, mdev) \ + list_for_each_entry(link, &(mdev)->links, graph_obj.list) + +/** + * media_device_pci_init() - create and initialize a + * struct &media_device from a PCI device. + * + * @mdev: pointer to struct &media_device + * @pci_dev: pointer to struct pci_dev + * @name: media device name. If %NULL, the routine will use the default + * name for the pci device, given by pci_name() macro. + */ +void media_device_pci_init(struct media_device *mdev, + struct pci_dev *pci_dev, + const char *name); +/** + * __media_device_usb_init() - create and initialize a + * struct &media_device from a PCI device. + * + * @mdev: pointer to struct &media_device + * @udev: pointer to struct usb_device + * @board_name: media device name. If %NULL, the routine will use the usb + * product name, if available. + * @driver_name: name of the driver. if %NULL, the routine will use the name + * given by ``udev->dev->driver->name``, with is usually the wrong + * thing to do. + * + * .. note:: + * + * It is better to call media_device_usb_init() instead, as + * such macro fills driver_name with %KBUILD_MODNAME. + */ +void __media_device_usb_init(struct media_device *mdev, + struct usb_device *udev, + const char *board_name, + const char *driver_name); + +#else +static inline void media_device_init(struct media_device *mdev) +{ +} +static inline int media_device_register(struct media_device *mdev) +{ + return 0; +} +static inline void media_device_unregister(struct media_device *mdev) +{ +} +static inline void media_device_cleanup(struct media_device *mdev) +{ +} +static inline int media_device_register_entity(struct media_device *mdev, + struct media_entity *entity) +{ + return 0; +} +static inline void media_device_unregister_entity(struct media_entity *entity) +{ +} +static inline void media_device_register_entity_notify( + struct media_device *mdev, + struct media_entity_notify *nptr) +{ +} +static inline void media_device_unregister_entity_notify( + struct media_device *mdev, + struct media_entity_notify *nptr) +{ +} + +static inline void media_device_pci_init(struct media_device *mdev, + struct pci_dev *pci_dev, + char *name) +{ +} + +static inline void __media_device_usb_init(struct media_device *mdev, + struct usb_device *udev, + char *board_name, + char *driver_name) +{ +} + +#endif /* CONFIG_MEDIA_CONTROLLER */ + +/** + * media_device_usb_init() - create and initialize a + * struct &media_device from a PCI device. + * + * @mdev: pointer to struct &media_device + * @udev: pointer to struct usb_device + * @name: media device name. If %NULL, the routine will use the usb + * product name, if available. + * + * This macro calls media_device_usb_init() passing the + * media_device_usb_init() **driver_name** parameter filled with + * %KBUILD_MODNAME. + */ +#define media_device_usb_init(mdev, udev, name) \ + __media_device_usb_init(mdev, udev, name, KBUILD_MODNAME) + +/** + * media_set_bus_info() - Set bus_info field + * + * @bus_info: Variable where to write the bus info (char array) + * @bus_info_size: Length of the bus_info + * @dev: Related struct device + * + * Sets bus information based on &dev. This is currently done for PCI and + * platform devices. dev is required to be non-NULL for this to happen. + * + * This function is not meant to be called from drivers. + */ +static inline void +media_set_bus_info(char *bus_info, size_t bus_info_size, struct device *dev) +{ + if (!dev) + strscpy(bus_info, "no bus info", bus_info_size); + else if (dev_is_platform(dev)) + snprintf(bus_info, bus_info_size, "platform:%s", dev_name(dev)); + else if (dev_is_pci(dev)) + snprintf(bus_info, bus_info_size, "PCI:%s", dev_name(dev)); +} + +#endif diff --git a/6.17.0/include-overrides/media/media-devnode.h b/6.17.0/include-overrides/media/media-devnode.h new file mode 100644 index 0000000..d27c1c6 --- /dev/null +++ b/6.17.0/include-overrides/media/media-devnode.h @@ -0,0 +1,168 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Media device node + * + * Copyright (C) 2010 Nokia Corporation + * + * Contacts: Laurent Pinchart + * Sakari Ailus + * + * -- + * + * Common functions for media-related drivers to register and unregister media + * device nodes. + */ + +#ifndef _MEDIA_DEVNODE_H +#define _MEDIA_DEVNODE_H + +#include +#include +#include +#include + +struct media_device; + +/* + * Flag to mark the media_devnode struct as registered. Drivers must not touch + * this flag directly, it will be set and cleared by media_devnode_register and + * media_devnode_unregister. + */ +#define MEDIA_FLAG_REGISTERED 0 + +/** + * struct media_file_operations - Media device file operations + * + * @owner: should be filled with %THIS_MODULE + * @read: pointer to the function that implements read() syscall + * @write: pointer to the function that implements write() syscall + * @poll: pointer to the function that implements poll() syscall + * @ioctl: pointer to the function that implements ioctl() syscall + * @compat_ioctl: pointer to the function that will handle 32 bits userspace + * calls to the ioctl() syscall on a Kernel compiled with 64 bits. + * @open: pointer to the function that implements open() syscall + * @release: pointer to the function that will release the resources allocated + * by the @open function. + */ +struct media_file_operations { + struct module *owner; + ssize_t (*read) (struct file *, char __user *, size_t, loff_t *); + ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *); + __poll_t (*poll) (struct file *, struct poll_table_struct *); + long (*ioctl) (struct file *, unsigned int, unsigned long); + long (*compat_ioctl) (struct file *, unsigned int, unsigned long); + int (*open) (struct file *); + int (*release) (struct file *); +}; + +/** + * struct media_devnode - Media device node + * @media_dev: pointer to struct &media_device + * @fops: pointer to struct &media_file_operations with media device ops + * @dev: pointer to struct &device containing the media controller device + * @cdev: struct cdev pointer character device + * @parent: parent device + * @minor: device node minor number + * @flags: flags, combination of the ``MEDIA_FLAG_*`` constants + * @release: release callback called at the end of ``media_devnode_release()`` + * routine at media-device.c. + * + * This structure represents a media-related device node. + * + * The @parent is a physical device. It must be set by core or device drivers + * before registering the node. + */ +struct media_devnode { + struct media_device *media_dev; + + /* device ops */ + const struct media_file_operations *fops; + + /* sysfs */ + struct device dev; /* media device */ + struct cdev cdev; /* character device */ + struct device *parent; /* device parent */ + + /* device info */ + int minor; + unsigned long flags; /* Use bitops to access flags */ + + /* callbacks */ + void (*release)(struct media_devnode *devnode); +}; + +/* dev to media_devnode */ +#define to_media_devnode(cd) container_of(cd, struct media_devnode, dev) + +/** + * media_devnode_register - register a media device node + * + * @mdev: struct media_device we want to register a device node + * @devnode: media device node structure we want to register + * @owner: should be filled with %THIS_MODULE + * + * The registration code assigns minor numbers and registers the new device node + * with the kernel. An error is returned if no free minor number can be found, + * or if the registration of the device node fails. + * + * Zero is returned on success. + * + * Note that if the media_devnode_register call fails, the release() callback of + * the media_devnode structure is *not* called, so the caller is responsible for + * freeing any data. + */ +int __must_check media_devnode_register(struct media_device *mdev, + struct media_devnode *devnode, + struct module *owner); + +/** + * media_devnode_unregister_prepare - clear the media device node register bit + * @devnode: the device node to prepare for unregister + * + * This clears the passed device register bit. Future open calls will be met + * with errors. Should be called before media_devnode_unregister() to avoid + * races with unregister and device file open calls. + * + * This function can safely be called if the device node has never been + * registered or has already been unregistered. + */ +void media_devnode_unregister_prepare(struct media_devnode *devnode); + +/** + * media_devnode_unregister - unregister a media device node + * @devnode: the device node to unregister + * + * This unregisters the passed device. Future open calls will be met with + * errors. + * + * Should be called after media_devnode_unregister_prepare() + */ +void media_devnode_unregister(struct media_devnode *devnode); + +/** + * media_devnode_data - returns a pointer to the &media_devnode + * + * @filp: pointer to struct &file + */ +static inline struct media_devnode *media_devnode_data(struct file *filp) +{ + return filp->private_data; +} + +/** + * media_devnode_is_registered - returns true if &media_devnode is registered; + * false otherwise. + * + * @devnode: pointer to struct &media_devnode. + * + * Note: If mdev is NULL, it also returns false. + */ +static inline int media_devnode_is_registered(struct media_devnode *devnode) +{ + if (!devnode) + return false; + + return test_bit(MEDIA_FLAG_REGISTERED, &devnode->flags); +} + +#endif /* _MEDIA_DEVNODE_H */ diff --git a/6.17.0/include-overrides/media/media-entity.h b/6.17.0/include-overrides/media/media-entity.h new file mode 100644 index 0000000..64cf590 --- /dev/null +++ b/6.17.0/include-overrides/media/media-entity.h @@ -0,0 +1,1450 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Media entity + * + * Copyright (C) 2010 Nokia Corporation + * + * Contacts: Laurent Pinchart + * Sakari Ailus + */ + +#ifndef _MEDIA_ENTITY_H +#define _MEDIA_ENTITY_H + +#include +#include +#include +#include +#include +#include +#include +#include + +/* Enums used internally at the media controller to represent graphs */ + +/** + * enum media_gobj_type - type of a graph object + * + * @MEDIA_GRAPH_ENTITY: Identify a media entity + * @MEDIA_GRAPH_PAD: Identify a media pad + * @MEDIA_GRAPH_LINK: Identify a media link + * @MEDIA_GRAPH_INTF_DEVNODE: Identify a media Kernel API interface via + * a device node + */ +enum media_gobj_type { + MEDIA_GRAPH_ENTITY, + MEDIA_GRAPH_PAD, + MEDIA_GRAPH_LINK, + MEDIA_GRAPH_INTF_DEVNODE, +}; + +#define MEDIA_BITS_PER_TYPE 8 +#define MEDIA_BITS_PER_ID (32 - MEDIA_BITS_PER_TYPE) +#define MEDIA_ID_MASK GENMASK_ULL(MEDIA_BITS_PER_ID - 1, 0) + +/* Structs to represent the objects that belong to a media graph */ + +/** + * struct media_gobj - Define a graph object. + * + * @mdev: Pointer to the struct &media_device that owns the object + * @id: Non-zero object ID identifier. The ID should be unique + * inside a media_device, as it is composed by + * %MEDIA_BITS_PER_TYPE to store the type plus + * %MEDIA_BITS_PER_ID to store the ID + * @list: List entry stored in one of the per-type mdev object lists + * + * All objects on the media graph should have this struct embedded + */ +struct media_gobj { + struct media_device *mdev; + u32 id; + struct list_head list; +}; + +#define MEDIA_ENTITY_ENUM_MAX_DEPTH 16 + +/** + * struct media_entity_enum - An enumeration of media entities. + * + * @bmap: Bit map in which each bit represents one entity at struct + * media_entity->internal_idx. + * @idx_max: Number of bits in bmap + */ +struct media_entity_enum { + unsigned long *bmap; + int idx_max; +}; + +/** + * struct media_graph - Media graph traversal state + * + * @stack: Graph traversal stack; the stack contains information + * on the path the media entities to be walked and the + * links through which they were reached. + * @stack.entity: pointer to &struct media_entity at the graph. + * @stack.link: pointer to &struct list_head. + * @ent_enum: Visited entities + * @top: The top of the stack + */ +struct media_graph { + struct { + struct media_entity *entity; + struct list_head *link; + } stack[MEDIA_ENTITY_ENUM_MAX_DEPTH]; + + struct media_entity_enum ent_enum; + int top; +}; + +/** + * struct media_pipeline - Media pipeline related information + * + * @allocated: Media pipeline allocated and freed by the framework + * @mdev: The media device the pipeline is part of + * @pads: List of media_pipeline_pad + * @start_count: Media pipeline start - stop count + */ +struct media_pipeline { + bool allocated; + struct media_device *mdev; + struct list_head pads; + int start_count; +}; + +/** + * struct media_pipeline_pad - A pad part of a media pipeline + * + * @list: Entry in the media_pad pads list + * @pipe: The media_pipeline that the pad is part of + * @pad: The media pad + * + * This structure associate a pad with a media pipeline. Instances of + * media_pipeline_pad are created by media_pipeline_start() when it builds the + * pipeline, and stored in the &media_pad.pads list. media_pipeline_stop() + * removes the entries from the list and deletes them. + */ +struct media_pipeline_pad { + struct list_head list; + struct media_pipeline *pipe; + struct media_pad *pad; +}; + +/** + * struct media_pipeline_pad_iter - Iterator for media_pipeline_for_each_pad + * + * @cursor: The current element + */ +struct media_pipeline_pad_iter { + struct list_head *cursor; +}; + +/** + * struct media_pipeline_entity_iter - Iterator for media_pipeline_for_each_entity + * + * @ent_enum: The entity enumeration tracker + * @cursor: The current element + */ +struct media_pipeline_entity_iter { + struct media_entity_enum ent_enum; + struct list_head *cursor; +}; + +/** + * struct media_link - A link object part of a media graph. + * + * @graph_obj: Embedded structure containing the media object common data + * @list: Linked list associated with an entity or an interface that + * owns the link. + * @gobj0: Part of a union. Used to get the pointer for the first + * graph_object of the link. + * @source: Part of a union. Used only if the first object (gobj0) is + * a pad. In that case, it represents the source pad. + * @intf: Part of a union. Used only if the first object (gobj0) is + * an interface. + * @gobj1: Part of a union. Used to get the pointer for the second + * graph_object of the link. + * @sink: Part of a union. Used only if the second object (gobj1) is + * a pad. In that case, it represents the sink pad. + * @entity: Part of a union. Used only if the second object (gobj1) is + * an entity. + * @reverse: Pointer to the link for the reverse direction of a pad to pad + * link. + * @flags: Link flags, as defined in uapi/media.h (MEDIA_LNK_FL_*) + * @is_backlink: Indicate if the link is a backlink. + */ +struct media_link { + struct media_gobj graph_obj; + struct list_head list; + union { + struct media_gobj *gobj0; + struct media_pad *source; + struct media_interface *intf; + }; + union { + struct media_gobj *gobj1; + struct media_pad *sink; + struct media_entity *entity; + }; + struct media_link *reverse; + unsigned long flags; + bool is_backlink; +}; + +/** + * enum media_pad_signal_type - type of the signal inside a media pad + * + * @PAD_SIGNAL_DEFAULT: + * Default signal. Use this when all inputs or all outputs are + * uniquely identified by the pad number. + * @PAD_SIGNAL_ANALOG: + * The pad contains an analog signal. It can be Radio Frequency, + * Intermediate Frequency, a baseband signal or sub-carriers. + * Tuner inputs, IF-PLL demodulators, composite and s-video signals + * should use it. + * @PAD_SIGNAL_DV: + * Contains a digital video signal, with can be a bitstream of samples + * taken from an analog TV video source. On such case, it usually + * contains the VBI data on it. + * @PAD_SIGNAL_AUDIO: + * Contains an Intermediate Frequency analog signal from an audio + * sub-carrier or an audio bitstream. IF signals are provided by tuners + * and consumed by audio AM/FM decoders. Bitstream audio is provided by + * an audio decoder. + */ +enum media_pad_signal_type { + PAD_SIGNAL_DEFAULT = 0, + PAD_SIGNAL_ANALOG, + PAD_SIGNAL_DV, + PAD_SIGNAL_AUDIO, +}; + +/** + * struct media_pad - A media pad graph object. + * + * @graph_obj: Embedded structure containing the media object common data + * @entity: Entity this pad belongs to + * @index: Pad index in the entity pads array, numbered from 0 to n + * @num_links: Number of links connected to this pad + * @sig_type: Type of the signal inside a media pad + * @flags: Pad flags, as defined in + * :ref:`include/uapi/linux/media.h ` + * (seek for ``MEDIA_PAD_FL_*``) + * @pipe: Pipeline this pad belongs to. Use media_entity_pipeline() to + * access this field. + */ +struct media_pad { + struct media_gobj graph_obj; /* must be first field in struct */ + struct media_entity *entity; + u16 index; + u16 num_links; + enum media_pad_signal_type sig_type; + unsigned long flags; + + /* + * The fields below are private, and should only be accessed via + * appropriate functions. + */ + struct media_pipeline *pipe; +}; + +/** + * struct media_entity_operations - Media entity operations + * @get_fwnode_pad: Return the pad number based on a fwnode endpoint or + * a negative value on error. This operation can be used + * to map a fwnode to a media pad number. Optional. + * @link_setup: Notify the entity of link changes. The operation can + * return an error, in which case link setup will be + * cancelled. Optional. + * @link_validate: Return whether a link is valid from the entity point of + * view. The media_pipeline_start() function + * validates all links by calling this operation. Optional. + * @has_pad_interdep: Return whether two pads of the entity are + * interdependent. If two pads are interdependent they are + * part of the same pipeline and enabling one of the pads + * means that the other pad will become "locked" and + * doesn't allow configuration changes. pad0 and pad1 are + * guaranteed to not both be sinks or sources. Never call + * the .has_pad_interdep() operation directly, always use + * media_entity_has_pad_interdep(). + * Optional: If the operation isn't implemented all pads + * will be considered as interdependent. + * + * .. note:: + * + * Those these callbacks are called with struct &media_device.graph_mutex + * mutex held. + */ +struct media_entity_operations { + int (*get_fwnode_pad)(struct media_entity *entity, + struct fwnode_endpoint *endpoint); + int (*link_setup)(struct media_entity *entity, + const struct media_pad *local, + const struct media_pad *remote, u32 flags); + int (*link_validate)(struct media_link *link); + bool (*has_pad_interdep)(struct media_entity *entity, unsigned int pad0, + unsigned int pad1); +}; + +/** + * enum media_entity_type - Media entity type + * + * @MEDIA_ENTITY_TYPE_BASE: + * The entity isn't embedded in another subsystem structure. + * @MEDIA_ENTITY_TYPE_VIDEO_DEVICE: + * The entity is embedded in a struct video_device instance. + * @MEDIA_ENTITY_TYPE_V4L2_SUBDEV: + * The entity is embedded in a struct v4l2_subdev instance. + * + * Media entity objects are often not instantiated directly, but the media + * entity structure is inherited by (through embedding) other subsystem-specific + * structures. The media entity type identifies the type of the subclass + * structure that implements a media entity instance. + * + * This allows runtime type identification of media entities and safe casting to + * the correct object type. For instance, a media entity structure instance + * embedded in a v4l2_subdev structure instance will have the type + * %MEDIA_ENTITY_TYPE_V4L2_SUBDEV and can safely be cast to a &v4l2_subdev + * structure using the container_of() macro. + */ +enum media_entity_type { + MEDIA_ENTITY_TYPE_BASE, + MEDIA_ENTITY_TYPE_VIDEO_DEVICE, + MEDIA_ENTITY_TYPE_V4L2_SUBDEV, +}; + +/** + * struct media_entity - A media entity graph object. + * + * @graph_obj: Embedded structure containing the media object common data. + * @name: Entity name. + * @obj_type: Type of the object that implements the media_entity. + * @function: Entity main function, as defined in + * :ref:`include/uapi/linux/media.h ` + * (seek for ``MEDIA_ENT_F_*``) + * @flags: Entity flags, as defined in + * :ref:`include/uapi/linux/media.h ` + * (seek for ``MEDIA_ENT_FL_*``) + * @num_pads: Number of sink and source pads. + * @num_links: Total number of links, forward and back, enabled and disabled. + * @num_backlinks: Number of backlinks + * @internal_idx: An unique internal entity specific number. The numbers are + * re-used if entities are unregistered or registered again. + * @pads: Pads array with the size defined by @num_pads. + * @links: List of data links. + * @ops: Entity operations. + * @use_count: Use count for the entity. + * @info: Union with devnode information. Kept just for backward + * compatibility. + * @info.dev: Contains device major and minor info. + * @info.dev.major: device node major, if the device is a devnode. + * @info.dev.minor: device node minor, if the device is a devnode. + * + * .. note:: + * + * The @use_count reference count must never be negative, but is a signed + * integer on purpose: a simple ``WARN_ON(<0)`` check can be used to detect + * reference count bugs that would make it negative. + */ +struct media_entity { + struct media_gobj graph_obj; /* must be first field in struct */ + const char *name; + enum media_entity_type obj_type; + u32 function; + unsigned long flags; + + u16 num_pads; + u16 num_links; + u16 num_backlinks; + int internal_idx; + + struct media_pad *pads; + struct list_head links; + + const struct media_entity_operations *ops; + + int use_count; + + union { + struct { + u32 major; + u32 minor; + } dev; + } info; +}; + +/** + * media_entity_for_each_pad - Iterate on all pads in an entity + * @entity: The entity the pads belong to + * @iter: The iterator pad + * + * Iterate on all pads in a media entity. + */ +#define media_entity_for_each_pad(entity, iter) \ + for (iter = (entity)->pads; \ + iter < &(entity)->pads[(entity)->num_pads]; \ + ++iter) + +/** + * struct media_interface - A media interface graph object. + * + * @graph_obj: embedded graph object + * @links: List of links pointing to graph entities + * @type: Type of the interface as defined in + * :ref:`include/uapi/linux/media.h ` + * (seek for ``MEDIA_INTF_T_*``) + * @flags: Interface flags as defined in + * :ref:`include/uapi/linux/media.h ` + * (seek for ``MEDIA_INTF_FL_*``) + * + * .. note:: + * + * Currently, no flags for &media_interface is defined. + */ +struct media_interface { + struct media_gobj graph_obj; + struct list_head links; + u32 type; + u32 flags; +}; + +/** + * struct media_intf_devnode - A media interface via a device node. + * + * @intf: embedded interface object + * @major: Major number of a device node + * @minor: Minor number of a device node + */ +struct media_intf_devnode { + struct media_interface intf; + + /* Should match the fields at media_v2_intf_devnode */ + u32 major; + u32 minor; +}; + +/** + * media_entity_id() - return the media entity graph object id + * + * @entity: pointer to &media_entity + */ +static inline u32 media_entity_id(struct media_entity *entity) +{ + return entity->graph_obj.id; +} + +/** + * media_type() - return the media object type + * + * @gobj: Pointer to the struct &media_gobj graph object + */ +static inline enum media_gobj_type media_type(struct media_gobj *gobj) +{ + return gobj->id >> MEDIA_BITS_PER_ID; +} + +/** + * media_id() - return the media object ID + * + * @gobj: Pointer to the struct &media_gobj graph object + */ +static inline u32 media_id(struct media_gobj *gobj) +{ + return gobj->id & MEDIA_ID_MASK; +} + +/** + * media_gobj_gen_id() - encapsulates type and ID on at the object ID + * + * @type: object type as define at enum &media_gobj_type. + * @local_id: next ID, from struct &media_device.id. + */ +static inline u32 media_gobj_gen_id(enum media_gobj_type type, u64 local_id) +{ + u32 id; + + id = type << MEDIA_BITS_PER_ID; + id |= local_id & MEDIA_ID_MASK; + + return id; +} + +/** + * is_media_entity_v4l2_video_device() - Check if the entity is a video_device + * @entity: pointer to entity + * + * Return: %true if the entity is an instance of a video_device object and can + * safely be cast to a struct video_device using the container_of() macro, or + * %false otherwise. + */ +static inline bool is_media_entity_v4l2_video_device(struct media_entity *entity) +{ + return entity && entity->obj_type == MEDIA_ENTITY_TYPE_VIDEO_DEVICE; +} + +/** + * is_media_entity_v4l2_subdev() - Check if the entity is a v4l2_subdev + * @entity: pointer to entity + * + * Return: %true if the entity is an instance of a &v4l2_subdev object and can + * safely be cast to a struct &v4l2_subdev using the container_of() macro, or + * %false otherwise. + */ +static inline bool is_media_entity_v4l2_subdev(struct media_entity *entity) +{ + return entity && entity->obj_type == MEDIA_ENTITY_TYPE_V4L2_SUBDEV; +} + +/** + * media_entity_enum_init - Initialise an entity enumeration + * + * @ent_enum: Entity enumeration to be initialised + * @mdev: The related media device + * + * Return: zero on success or a negative error code. + */ +__must_check int media_entity_enum_init(struct media_entity_enum *ent_enum, + struct media_device *mdev); + +/** + * media_entity_enum_cleanup - Release resources of an entity enumeration + * + * @ent_enum: Entity enumeration to be released + */ +void media_entity_enum_cleanup(struct media_entity_enum *ent_enum); + +/** + * media_entity_enum_zero - Clear the entire enum + * + * @ent_enum: Entity enumeration to be cleared + */ +static inline void media_entity_enum_zero(struct media_entity_enum *ent_enum) +{ + bitmap_zero(ent_enum->bmap, ent_enum->idx_max); +} + +/** + * media_entity_enum_set - Mark a single entity in the enum + * + * @ent_enum: Entity enumeration + * @entity: Entity to be marked + */ +static inline void media_entity_enum_set(struct media_entity_enum *ent_enum, + struct media_entity *entity) +{ + if (WARN_ON(entity->internal_idx >= ent_enum->idx_max)) + return; + + __set_bit(entity->internal_idx, ent_enum->bmap); +} + +/** + * media_entity_enum_clear - Unmark a single entity in the enum + * + * @ent_enum: Entity enumeration + * @entity: Entity to be unmarked + */ +static inline void media_entity_enum_clear(struct media_entity_enum *ent_enum, + struct media_entity *entity) +{ + if (WARN_ON(entity->internal_idx >= ent_enum->idx_max)) + return; + + __clear_bit(entity->internal_idx, ent_enum->bmap); +} + +/** + * media_entity_enum_test - Test whether the entity is marked + * + * @ent_enum: Entity enumeration + * @entity: Entity to be tested + * + * Returns %true if the entity was marked. + */ +static inline bool media_entity_enum_test(struct media_entity_enum *ent_enum, + struct media_entity *entity) +{ + if (WARN_ON(entity->internal_idx >= ent_enum->idx_max)) + return true; + + return test_bit(entity->internal_idx, ent_enum->bmap); +} + +/** + * media_entity_enum_test_and_set - Test whether the entity is marked, + * and mark it + * + * @ent_enum: Entity enumeration + * @entity: Entity to be tested + * + * Returns %true if the entity was marked, and mark it before doing so. + */ +static inline bool +media_entity_enum_test_and_set(struct media_entity_enum *ent_enum, + struct media_entity *entity) +{ + if (WARN_ON(entity->internal_idx >= ent_enum->idx_max)) + return true; + + return __test_and_set_bit(entity->internal_idx, ent_enum->bmap); +} + +/** + * media_entity_enum_empty - Test whether the entire enum is empty + * + * @ent_enum: Entity enumeration + * + * Return: %true if the entity was empty. + */ +static inline bool media_entity_enum_empty(struct media_entity_enum *ent_enum) +{ + return bitmap_empty(ent_enum->bmap, ent_enum->idx_max); +} + +/** + * media_entity_enum_intersects - Test whether two enums intersect + * + * @ent_enum1: First entity enumeration + * @ent_enum2: Second entity enumeration + * + * Return: %true if entity enumerations @ent_enum1 and @ent_enum2 intersect, + * otherwise %false. + */ +static inline bool media_entity_enum_intersects( + struct media_entity_enum *ent_enum1, + struct media_entity_enum *ent_enum2) +{ + WARN_ON(ent_enum1->idx_max != ent_enum2->idx_max); + + return bitmap_intersects(ent_enum1->bmap, ent_enum2->bmap, + min(ent_enum1->idx_max, ent_enum2->idx_max)); +} + +/** + * gobj_to_entity - returns the struct &media_entity pointer from the + * @gobj contained on it. + * + * @gobj: Pointer to the struct &media_gobj graph object + */ +#define gobj_to_entity(gobj) \ + container_of(gobj, struct media_entity, graph_obj) + +/** + * gobj_to_pad - returns the struct &media_pad pointer from the + * @gobj contained on it. + * + * @gobj: Pointer to the struct &media_gobj graph object + */ +#define gobj_to_pad(gobj) \ + container_of(gobj, struct media_pad, graph_obj) + +/** + * gobj_to_link - returns the struct &media_link pointer from the + * @gobj contained on it. + * + * @gobj: Pointer to the struct &media_gobj graph object + */ +#define gobj_to_link(gobj) \ + container_of(gobj, struct media_link, graph_obj) + +/** + * gobj_to_intf - returns the struct &media_interface pointer from the + * @gobj contained on it. + * + * @gobj: Pointer to the struct &media_gobj graph object + */ +#define gobj_to_intf(gobj) \ + container_of(gobj, struct media_interface, graph_obj) + +/** + * intf_to_devnode - returns the struct media_intf_devnode pointer from the + * @intf contained on it. + * + * @intf: Pointer to struct &media_intf_devnode + */ +#define intf_to_devnode(intf) \ + container_of(intf, struct media_intf_devnode, intf) + +/** + * media_gobj_create - Initialize a graph object + * + * @mdev: Pointer to the &media_device that contains the object + * @type: Type of the object + * @gobj: Pointer to the struct &media_gobj graph object + * + * This routine initializes the embedded struct &media_gobj inside a + * media graph object. It is called automatically if ``media_*_create`` + * function calls are used. However, if the object (entity, link, pad, + * interface) is embedded on some other object, this function should be + * called before registering the object at the media controller. + */ +void media_gobj_create(struct media_device *mdev, + enum media_gobj_type type, + struct media_gobj *gobj); + +/** + * media_gobj_destroy - Stop using a graph object on a media device + * + * @gobj: Pointer to the struct &media_gobj graph object + * + * This should be called by all routines like media_device_unregister() + * that remove/destroy media graph objects. + */ +void media_gobj_destroy(struct media_gobj *gobj); + +/** + * media_entity_pads_init() - Initialize the entity pads + * + * @entity: entity where the pads belong + * @num_pads: total number of sink and source pads + * @pads: Array of @num_pads pads. + * + * The pads array is managed by the entity driver and passed to + * media_entity_pads_init() where its pointer will be stored in the + * &media_entity structure. + * + * If no pads are needed, drivers could either directly fill + * &media_entity->num_pads with 0 and &media_entity->pads with %NULL or call + * this function that will do the same. + * + * As the number of pads is known in advance, the pads array is not allocated + * dynamically but is managed by the entity driver. Most drivers will embed the + * pads array in a driver-specific structure, avoiding dynamic allocation. + * + * Drivers must set the direction of every pad in the pads array before calling + * media_entity_pads_init(). The function will initialize the other pads fields. + */ +int media_entity_pads_init(struct media_entity *entity, u16 num_pads, + struct media_pad *pads); + +/** + * media_entity_cleanup() - free resources associated with an entity + * + * @entity: entity where the pads belong + * + * This function must be called during the cleanup phase after unregistering + * the entity (currently, it does nothing). + * + * Calling media_entity_cleanup() on a media_entity whose memory has been + * zeroed but that has not been initialized with media_entity_pad_init() is + * valid and is a no-op. + */ +#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER) +static inline void media_entity_cleanup(struct media_entity *entity) {} +#else +#define media_entity_cleanup(entity) do { } while (false) +#endif + +/** + * media_get_pad_index() - retrieves a pad index from an entity + * + * @entity: entity where the pads belong + * @pad_type: the type of the pad, one of MEDIA_PAD_FL_* pad types + * @sig_type: type of signal of the pad to be search + * + * This helper function finds the first pad index inside an entity that + * satisfies both @is_sink and @sig_type conditions. + * + * Return: + * + * On success, return the pad number. If the pad was not found or the media + * entity is a NULL pointer, return -EINVAL. + */ +int media_get_pad_index(struct media_entity *entity, u32 pad_type, + enum media_pad_signal_type sig_type); + +/** + * media_create_pad_link() - creates a link between two entities. + * + * @source: pointer to &media_entity of the source pad. + * @source_pad: number of the source pad in the pads array + * @sink: pointer to &media_entity of the sink pad. + * @sink_pad: number of the sink pad in the pads array. + * @flags: Link flags, as defined in + * :ref:`include/uapi/linux/media.h ` + * ( seek for ``MEDIA_LNK_FL_*``) + * + * Valid values for flags: + * + * %MEDIA_LNK_FL_ENABLED + * Indicates that the link is enabled and can be used to transfer media data. + * When two or more links target a sink pad, only one of them can be + * enabled at a time. + * + * %MEDIA_LNK_FL_IMMUTABLE + * Indicates that the link enabled state can't be modified at runtime. If + * %MEDIA_LNK_FL_IMMUTABLE is set, then %MEDIA_LNK_FL_ENABLED must also be + * set, since an immutable link is always enabled. + * + * .. note:: + * + * Before calling this function, media_entity_pads_init() and + * media_device_register_entity() should be called previously for both ends. + */ +__must_check int media_create_pad_link(struct media_entity *source, + u16 source_pad, struct media_entity *sink, + u16 sink_pad, u32 flags); + +/** + * media_create_pad_links() - creates a link between two entities. + * + * @mdev: Pointer to the media_device that contains the object + * @source_function: Function of the source entities. Used only if @source is + * NULL. + * @source: pointer to &media_entity of the source pad. If NULL, it will use + * all entities that matches the @sink_function. + * @source_pad: number of the source pad in the pads array + * @sink_function: Function of the sink entities. Used only if @sink is NULL. + * @sink: pointer to &media_entity of the sink pad. If NULL, it will use + * all entities that matches the @sink_function. + * @sink_pad: number of the sink pad in the pads array. + * @flags: Link flags, as defined in include/uapi/linux/media.h. + * @allow_both_undefined: if %true, then both @source and @sink can be NULL. + * In such case, it will create a crossbar between all entities that + * matches @source_function to all entities that matches @sink_function. + * If %false, it will return 0 and won't create any link if both @source + * and @sink are NULL. + * + * Valid values for flags: + * + * A %MEDIA_LNK_FL_ENABLED flag indicates that the link is enabled and can be + * used to transfer media data. If multiple links are created and this + * flag is passed as an argument, only the first created link will have + * this flag. + * + * A %MEDIA_LNK_FL_IMMUTABLE flag indicates that the link enabled state can't + * be modified at runtime. If %MEDIA_LNK_FL_IMMUTABLE is set, then + * %MEDIA_LNK_FL_ENABLED must also be set since an immutable link is + * always enabled. + * + * It is common for some devices to have multiple source and/or sink entities + * of the same type that should be linked. While media_create_pad_link() + * creates link by link, this function is meant to allow 1:n, n:1 and even + * cross-bar (n:n) links. + * + * .. note:: + * + * Before calling this function, media_entity_pads_init() and + * media_device_register_entity() should be called previously for the + * entities to be linked. + */ +int media_create_pad_links(const struct media_device *mdev, + const u32 source_function, + struct media_entity *source, + const u16 source_pad, + const u32 sink_function, + struct media_entity *sink, + const u16 sink_pad, + u32 flags, + const bool allow_both_undefined); + +void __media_entity_remove_links(struct media_entity *entity); + +/** + * media_entity_remove_links() - remove all links associated with an entity + * + * @entity: pointer to &media_entity + * + * .. note:: + * + * This is called automatically when an entity is unregistered via + * media_device_register_entity(). + */ +void media_entity_remove_links(struct media_entity *entity); + +/** + * __media_entity_setup_link - Configure a media link without locking + * @link: The link being configured + * @flags: Link configuration flags + * + * The bulk of link setup is handled by the two entities connected through the + * link. This function notifies both entities of the link configuration change. + * + * If the link is immutable or if the current and new configuration are + * identical, return immediately. + * + * The user is expected to hold link->source->parent->mutex. If not, + * media_entity_setup_link() should be used instead. + */ +int __media_entity_setup_link(struct media_link *link, u32 flags); + +/** + * media_entity_setup_link() - changes the link flags properties in runtime + * + * @link: pointer to &media_link + * @flags: the requested new link flags + * + * The only configurable property is the %MEDIA_LNK_FL_ENABLED link flag + * to enable/disable a link. Links marked with the + * %MEDIA_LNK_FL_IMMUTABLE link flag can not be enabled or disabled. + * + * When a link is enabled or disabled, the media framework calls the + * link_setup operation for the two entities at the source and sink of the + * link, in that order. If the second link_setup call fails, another + * link_setup call is made on the first entity to restore the original link + * flags. + * + * Media device drivers can be notified of link setup operations by setting the + * &media_device.link_notify pointer to a callback function. If provided, the + * notification callback will be called before enabling and after disabling + * links. + * + * Entity drivers must implement the link_setup operation if any of their links + * is non-immutable. The operation must either configure the hardware or store + * the configuration information to be applied later. + * + * Link configuration must not have any side effect on other links. If an + * enabled link at a sink pad prevents another link at the same pad from + * being enabled, the link_setup operation must return %-EBUSY and can't + * implicitly disable the first enabled link. + * + * .. note:: + * + * The valid values of the flags for the link is the same as described + * on media_create_pad_link(), for pad to pad links or the same as described + * on media_create_intf_link(), for interface to entity links. + */ +int media_entity_setup_link(struct media_link *link, u32 flags); + +/** + * media_entity_find_link - Find a link between two pads + * @source: Source pad + * @sink: Sink pad + * + * Return: returns a pointer to the link between the two entities. If no + * such link exists, return %NULL. + */ +struct media_link *media_entity_find_link(struct media_pad *source, + struct media_pad *sink); + +/** + * media_pad_remote_pad_first - Find the first pad at the remote end of a link + * @pad: Pad at the local end of the link + * + * Search for a remote pad connected to the given pad by iterating over all + * links originating or terminating at that pad until an enabled link is found. + * + * Return: returns a pointer to the pad at the remote end of the first found + * enabled link, or %NULL if no enabled link has been found. + */ +struct media_pad *media_pad_remote_pad_first(const struct media_pad *pad); + +/** + * media_pad_remote_pad_unique - Find a remote pad connected to a pad + * @pad: The pad + * + * Search for and return a remote pad connected to @pad through an enabled + * link. If multiple (or no) remote pads are found, an error is returned. + * + * The uniqueness constraint makes this helper function suitable for entities + * that support a single active source at a time on a given pad. + * + * Return: A pointer to the remote pad, or one of the following error pointers + * if an error occurs: + * + * * -ENOTUNIQ - Multiple links are enabled + * * -ENOLINK - No connected pad found + */ +struct media_pad *media_pad_remote_pad_unique(const struct media_pad *pad); + +/** + * media_entity_remote_pad_unique - Find a remote pad connected to an entity + * @entity: The entity + * @type: The type of pad to find (MEDIA_PAD_FL_SINK or MEDIA_PAD_FL_SOURCE) + * + * Search for and return a remote pad of @type connected to @entity through an + * enabled link. If multiple (or no) remote pads match these criteria, an error + * is returned. + * + * The uniqueness constraint makes this helper function suitable for entities + * that support a single active source or sink at a time. + * + * Return: A pointer to the remote pad, or one of the following error pointers + * if an error occurs: + * + * * -ENOTUNIQ - Multiple links are enabled + * * -ENOLINK - No connected pad found + */ +struct media_pad * +media_entity_remote_pad_unique(const struct media_entity *entity, + unsigned int type); + +/** + * media_entity_remote_source_pad_unique - Find a remote source pad connected to + * an entity + * @entity: The entity + * + * Search for and return a remote source pad connected to @entity through an + * enabled link. If multiple (or no) remote pads match these criteria, an error + * is returned. + * + * The uniqueness constraint makes this helper function suitable for entities + * that support a single active source at a time. + * + * Return: A pointer to the remote pad, or one of the following error pointers + * if an error occurs: + * + * * -ENOTUNIQ - Multiple links are enabled + * * -ENOLINK - No connected pad found + */ +static inline struct media_pad * +media_entity_remote_source_pad_unique(const struct media_entity *entity) +{ + return media_entity_remote_pad_unique(entity, MEDIA_PAD_FL_SOURCE); +} + +/** + * media_pad_is_streaming - Test if a pad is part of a streaming pipeline + * @pad: The pad + * + * Return: True if the pad is part of a pipeline started with the + * media_pipeline_start() function, false otherwise. + */ +static inline bool media_pad_is_streaming(const struct media_pad *pad) +{ + return pad->pipe; +} + +/** + * media_entity_is_streaming - Test if an entity is part of a streaming pipeline + * @entity: The entity + * + * Return: True if the entity is part of a pipeline started with the + * media_pipeline_start() function, false otherwise. + */ +static inline bool media_entity_is_streaming(const struct media_entity *entity) +{ + struct media_pad *pad; + + media_entity_for_each_pad(entity, pad) { + if (media_pad_is_streaming(pad)) + return true; + } + + return false; +} + +/** + * media_entity_pipeline - Get the media pipeline an entity is part of + * @entity: The entity + * + * DEPRECATED: use media_pad_pipeline() instead. + * + * This function returns the media pipeline that an entity has been associated + * with when constructing the pipeline with media_pipeline_start(). The pointer + * remains valid until media_pipeline_stop() is called. + * + * In general, entities can be part of multiple pipelines, when carrying + * multiple streams (either on different pads, or on the same pad using + * multiplexed streams). This function is to be used only for entities that + * do not support multiple pipelines. + * + * Return: The media_pipeline the entity is part of, or NULL if the entity is + * not part of any pipeline. + */ +struct media_pipeline *media_entity_pipeline(struct media_entity *entity); + +/** + * media_pad_pipeline - Get the media pipeline a pad is part of + * @pad: The pad + * + * This function returns the media pipeline that a pad has been associated + * with when constructing the pipeline with media_pipeline_start(). The pointer + * remains valid until media_pipeline_stop() is called. + * + * Return: The media_pipeline the pad is part of, or NULL if the pad is + * not part of any pipeline. + */ +struct media_pipeline *media_pad_pipeline(struct media_pad *pad); + +/** + * media_entity_get_fwnode_pad - Get pad number from fwnode + * + * @entity: The entity + * @fwnode: Pointer to the fwnode_handle which should be used to find the pad + * @direction_flags: Expected direction of the pad, as defined in + * :ref:`include/uapi/linux/media.h ` + * (seek for ``MEDIA_PAD_FL_*``) + * + * This function can be used to resolve the media pad number from + * a fwnode. This is useful for devices which use more complex + * mappings of media pads. + * + * If the entity does not implement the get_fwnode_pad() operation + * then this function searches the entity for the first pad that + * matches the @direction_flags. + * + * Return: returns the pad number on success or a negative error code. + */ +int media_entity_get_fwnode_pad(struct media_entity *entity, + const struct fwnode_handle *fwnode, + unsigned long direction_flags); + +/** + * media_graph_walk_init - Allocate resources used by graph walk. + * + * @graph: Media graph structure that will be used to walk the graph + * @mdev: Pointer to the &media_device that contains the object + * + * This function is deprecated, use media_pipeline_for_each_pad() instead. + * + * The caller is required to hold the media_device graph_mutex during the graph + * walk until the graph state is released. + * + * Returns zero on success or a negative error code otherwise. + */ +__must_check int media_graph_walk_init( + struct media_graph *graph, struct media_device *mdev); + +/** + * media_graph_walk_cleanup - Release resources used by graph walk. + * + * @graph: Media graph structure that will be used to walk the graph + * + * This function is deprecated, use media_pipeline_for_each_pad() instead. + */ +void media_graph_walk_cleanup(struct media_graph *graph); + +/** + * media_graph_walk_start - Start walking the media graph at a + * given entity + * + * @graph: Media graph structure that will be used to walk the graph + * @entity: Starting entity + * + * This function is deprecated, use media_pipeline_for_each_pad() instead. + * + * Before using this function, media_graph_walk_init() must be + * used to allocate resources used for walking the graph. This + * function initializes the graph traversal structure to walk the + * entities graph starting at the given entity. The traversal + * structure must not be modified by the caller during graph + * traversal. After the graph walk, the resources must be released + * using media_graph_walk_cleanup(). + */ +void media_graph_walk_start(struct media_graph *graph, + struct media_entity *entity); + +/** + * media_graph_walk_next - Get the next entity in the graph + * @graph: Media graph structure + * + * This function is deprecated, use media_pipeline_for_each_pad() instead. + * + * Perform a depth-first traversal of the given media entities graph. + * + * The graph structure must have been previously initialized with a call to + * media_graph_walk_start(). + * + * Return: returns the next entity in the graph or %NULL if the whole graph + * have been traversed. + */ +struct media_entity *media_graph_walk_next(struct media_graph *graph); + +/** + * media_pipeline_start - Mark a pipeline as streaming + * @origin: Starting pad + * @pipe: Media pipeline to be assigned to all pads in the pipeline. + * + * Mark all pads connected to pad @origin through enabled links, either + * directly or indirectly, as streaming. The given pipeline object is assigned + * to every pad in the pipeline and stored in the media_pad pipe field. + * + * Calls to this function can be nested, in which case the same number of + * media_pipeline_stop() calls will be required to stop streaming. The + * pipeline pointer must be identical for all nested calls to + * media_pipeline_start(). + */ +__must_check int media_pipeline_start(struct media_pad *origin, + struct media_pipeline *pipe); +/** + * __media_pipeline_start - Mark a pipeline as streaming + * + * @origin: Starting pad + * @pipe: Media pipeline to be assigned to all pads in the pipeline. + * + * ..note:: This is the non-locking version of media_pipeline_start() + */ +__must_check int __media_pipeline_start(struct media_pad *origin, + struct media_pipeline *pipe); + +/** + * media_pipeline_stop - Mark a pipeline as not streaming + * @pad: Starting pad + * + * Mark all pads connected to a given pad through enabled links, either + * directly or indirectly, as not streaming. The media_pad pipe field is + * reset to %NULL. + * + * If multiple calls to media_pipeline_start() have been made, the same + * number of calls to this function are required to mark the pipeline as not + * streaming. + */ +void media_pipeline_stop(struct media_pad *pad); + +/** + * __media_pipeline_stop - Mark a pipeline as not streaming + * + * @pad: Starting pad + * + * .. note:: This is the non-locking version of media_pipeline_stop() + */ +void __media_pipeline_stop(struct media_pad *pad); + +struct media_pad * +__media_pipeline_pad_iter_next(struct media_pipeline *pipe, + struct media_pipeline_pad_iter *iter, + struct media_pad *pad); + +/** + * media_pipeline_for_each_pad - Iterate on all pads in a media pipeline + * @pipe: The pipeline + * @iter: The iterator (struct media_pipeline_pad_iter) + * @pad: The iterator pad + * + * Iterate on all pads in a media pipeline. This is only valid after the + * pipeline has been built with media_pipeline_start() and before it gets + * destroyed with media_pipeline_stop(). + */ +#define media_pipeline_for_each_pad(pipe, iter, pad) \ + for (pad = __media_pipeline_pad_iter_next((pipe), iter, NULL); \ + pad != NULL; \ + pad = __media_pipeline_pad_iter_next((pipe), iter, pad)) + +/** + * media_pipeline_entity_iter_init - Initialize a pipeline entity iterator + * @pipe: The pipeline + * @iter: The iterator + * + * This function must be called to initialize the iterator before using it in a + * media_pipeline_for_each_entity() loop. The iterator must be destroyed by a + * call to media_pipeline_entity_iter_cleanup after the loop (including in code + * paths that break from the loop). + * + * The same iterator can be used in multiple consecutive loops without being + * destroyed and reinitialized. + * + * Return: 0 on success or a negative error code otherwise. + */ +int media_pipeline_entity_iter_init(struct media_pipeline *pipe, + struct media_pipeline_entity_iter *iter); + +/** + * media_pipeline_entity_iter_cleanup - Destroy a pipeline entity iterator + * @iter: The iterator + * + * This function must be called to destroy iterators initialized with + * media_pipeline_entity_iter_init(). + */ +void media_pipeline_entity_iter_cleanup(struct media_pipeline_entity_iter *iter); + +struct media_entity * +__media_pipeline_entity_iter_next(struct media_pipeline *pipe, + struct media_pipeline_entity_iter *iter, + struct media_entity *entity); + +/** + * media_pipeline_for_each_entity - Iterate on all entities in a media pipeline + * @pipe: The pipeline + * @iter: The iterator (struct media_pipeline_entity_iter) + * @entity: The iterator entity + * + * Iterate on all entities in a media pipeline. This is only valid after the + * pipeline has been built with media_pipeline_start() and before it gets + * destroyed with media_pipeline_stop(). The iterator must be initialized with + * media_pipeline_entity_iter_init() before iteration, and destroyed with + * media_pipeline_entity_iter_cleanup() after (including in code paths that + * break from the loop). + */ +#define media_pipeline_for_each_entity(pipe, iter, entity) \ + for (entity = __media_pipeline_entity_iter_next((pipe), iter, NULL); \ + entity != NULL; \ + entity = __media_pipeline_entity_iter_next((pipe), iter, entity)) + +/** + * media_pipeline_alloc_start - Mark a pipeline as streaming + * @pad: Starting pad + * + * media_pipeline_alloc_start() is similar to media_pipeline_start() but instead + * of working on a given pipeline the function will use an existing pipeline if + * the pad is already part of a pipeline, or allocate a new pipeline. + * + * Calls to media_pipeline_alloc_start() must be matched with + * media_pipeline_stop(). + */ +__must_check int media_pipeline_alloc_start(struct media_pad *pad); + +/** + * media_devnode_create() - creates and initializes a device node interface + * + * @mdev: pointer to struct &media_device + * @type: type of the interface, as given by + * :ref:`include/uapi/linux/media.h ` + * ( seek for ``MEDIA_INTF_T_*``) macros. + * @flags: Interface flags, as defined in + * :ref:`include/uapi/linux/media.h ` + * ( seek for ``MEDIA_INTF_FL_*``) + * @major: Device node major number. + * @minor: Device node minor number. + * + * Return: if succeeded, returns a pointer to the newly allocated + * &media_intf_devnode pointer. + * + * .. note:: + * + * Currently, no flags for &media_interface is defined. + */ +struct media_intf_devnode * +__must_check media_devnode_create(struct media_device *mdev, + u32 type, u32 flags, + u32 major, u32 minor); +/** + * media_devnode_remove() - removes a device node interface + * + * @devnode: pointer to &media_intf_devnode to be freed. + * + * When a device node interface is removed, all links to it are automatically + * removed. + */ +void media_devnode_remove(struct media_intf_devnode *devnode); + +/** + * media_create_intf_link() - creates a link between an entity and an interface + * + * @entity: pointer to %media_entity + * @intf: pointer to %media_interface + * @flags: Link flags, as defined in + * :ref:`include/uapi/linux/media.h ` + * ( seek for ``MEDIA_LNK_FL_*``) + * + * + * Valid values for flags: + * + * %MEDIA_LNK_FL_ENABLED + * Indicates that the interface is connected to the entity hardware. + * That's the default value for interfaces. An interface may be disabled if + * the hardware is busy due to the usage of some other interface that it is + * currently controlling the hardware. + * + * A typical example is an hybrid TV device that handle only one type of + * stream on a given time. So, when the digital TV is streaming, + * the V4L2 interfaces won't be enabled, as such device is not able to + * also stream analog TV or radio. + * + * .. note:: + * + * Before calling this function, media_devnode_create() should be called for + * the interface and media_device_register_entity() should be called for the + * interface that will be part of the link. + */ +struct media_link * +__must_check media_create_intf_link(struct media_entity *entity, + struct media_interface *intf, + u32 flags); +/** + * __media_remove_intf_link() - remove a single interface link + * + * @link: pointer to &media_link. + * + * .. note:: This is an unlocked version of media_remove_intf_link() + */ +void __media_remove_intf_link(struct media_link *link); + +/** + * media_remove_intf_link() - remove a single interface link + * + * @link: pointer to &media_link. + * + * .. note:: Prefer to use this one, instead of __media_remove_intf_link() + */ +void media_remove_intf_link(struct media_link *link); + +/** + * __media_remove_intf_links() - remove all links associated with an interface + * + * @intf: pointer to &media_interface + * + * .. note:: This is an unlocked version of media_remove_intf_links(). + */ +void __media_remove_intf_links(struct media_interface *intf); + +/** + * media_remove_intf_links() - remove all links associated with an interface + * + * @intf: pointer to &media_interface + * + * .. note:: + * + * #) This is called automatically when an entity is unregistered via + * media_device_register_entity() and by media_devnode_remove(). + * + * #) Prefer to use this one, instead of __media_remove_intf_links(). + */ +void media_remove_intf_links(struct media_interface *intf); + +/** + * media_entity_call - Calls a struct media_entity_operations operation on + * an entity + * + * @entity: entity where the @operation will be called + * @operation: type of the operation. Should be the name of a member of + * struct &media_entity_operations. + * + * This helper function will check if @operation is not %NULL. On such case, + * it will issue a call to @operation\(@entity, @args\). + */ + +#define media_entity_call(entity, operation, args...) \ + (((entity)->ops && (entity)->ops->operation) ? \ + (entity)->ops->operation((entity) , ##args) : -ENOIOCTLCMD) + +/** + * media_create_ancillary_link() - create an ancillary link between two + * instances of &media_entity + * + * @primary: pointer to the primary &media_entity + * @ancillary: pointer to the ancillary &media_entity + * + * Create an ancillary link between two entities, indicating that they + * represent two connected pieces of hardware that form a single logical unit. + * A typical example is a camera lens controller being linked to the sensor that + * it is supporting. + * + * The function sets both MEDIA_LNK_FL_ENABLED and MEDIA_LNK_FL_IMMUTABLE for + * the new link. + */ +struct media_link * +media_create_ancillary_link(struct media_entity *primary, + struct media_entity *ancillary); + +/** + * __media_entity_next_link() - Iterate through a &media_entity's links + * + * @entity: pointer to the &media_entity + * @link: pointer to a &media_link to hold the iterated values + * @link_type: one of the MEDIA_LNK_FL_LINK_TYPE flags + * + * Return the next link against an entity matching a specific link type. This + * allows iteration through an entity's links whilst guaranteeing all of the + * returned links are of the given type. + */ +struct media_link *__media_entity_next_link(struct media_entity *entity, + struct media_link *link, + unsigned long link_type); + +/** + * for_each_media_entity_data_link() - Iterate through an entity's data links + * + * @entity: pointer to the &media_entity + * @link: pointer to a &media_link to hold the iterated values + * + * Iterate over a &media_entity's data links + */ +#define for_each_media_entity_data_link(entity, link) \ + for (link = __media_entity_next_link(entity, NULL, \ + MEDIA_LNK_FL_DATA_LINK); \ + link; \ + link = __media_entity_next_link(entity, link, \ + MEDIA_LNK_FL_DATA_LINK)) + +#endif diff --git a/6.17.0/include-overrides/media/media-request.h b/6.17.0/include-overrides/media/media-request.h new file mode 100644 index 0000000..d4ac557 --- /dev/null +++ b/6.17.0/include-overrides/media/media-request.h @@ -0,0 +1,442 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Media device request objects + * + * Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + * Copyright (C) 2018 Intel Corporation + * + * Author: Hans Verkuil + * Author: Sakari Ailus + */ + +#ifndef MEDIA_REQUEST_H +#define MEDIA_REQUEST_H + +#include +#include +#include +#include + +#include + +/** + * enum media_request_state - media request state + * + * @MEDIA_REQUEST_STATE_IDLE: Idle + * @MEDIA_REQUEST_STATE_VALIDATING: Validating the request, no state changes + * allowed + * @MEDIA_REQUEST_STATE_QUEUED: Queued + * @MEDIA_REQUEST_STATE_COMPLETE: Completed, the request is done + * @MEDIA_REQUEST_STATE_CLEANING: Cleaning, the request is being re-inited + * @MEDIA_REQUEST_STATE_UPDATING: The request is being updated, i.e. + * request objects are being added, + * modified or removed + * @NR_OF_MEDIA_REQUEST_STATE: The number of media request states, used + * internally for sanity check purposes + */ +enum media_request_state { + MEDIA_REQUEST_STATE_IDLE, + MEDIA_REQUEST_STATE_VALIDATING, + MEDIA_REQUEST_STATE_QUEUED, + MEDIA_REQUEST_STATE_COMPLETE, + MEDIA_REQUEST_STATE_CLEANING, + MEDIA_REQUEST_STATE_UPDATING, + NR_OF_MEDIA_REQUEST_STATE, +}; + +struct media_request_object; + +/** + * struct media_request - Media device request + * @mdev: Media device this request belongs to + * @kref: Reference count + * @debug_str: Prefix for debug messages (process name:fd) + * @state: The state of the request + * @updating_count: count the number of request updates that are in progress + * @access_count: count the number of request accesses that are in progress + * @objects: List of @struct media_request_object request objects + * @num_incomplete_objects: The number of incomplete objects in the request + * @poll_wait: Wait queue for poll + * @lock: Serializes access to this struct + */ +struct media_request { + struct media_device *mdev; + struct kref kref; + char debug_str[TASK_COMM_LEN + 11]; + enum media_request_state state; + unsigned int updating_count; + unsigned int access_count; + struct list_head objects; + unsigned int num_incomplete_objects; + wait_queue_head_t poll_wait; + spinlock_t lock; +}; + +#ifdef CONFIG_MEDIA_CONTROLLER + +/** + * media_request_lock_for_access - Lock the request to access its objects + * + * @req: The media request + * + * Use before accessing a completed request. A reference to the request must + * be held during the access. This usually takes place automatically through + * a file handle. Use @media_request_unlock_for_access when done. + */ +static inline int __must_check +media_request_lock_for_access(struct media_request *req) +{ + unsigned long flags; + int ret = -EBUSY; + + spin_lock_irqsave(&req->lock, flags); + if (req->state == MEDIA_REQUEST_STATE_COMPLETE) { + req->access_count++; + ret = 0; + } + spin_unlock_irqrestore(&req->lock, flags); + + return ret; +} + +/** + * media_request_unlock_for_access - Unlock a request previously locked for + * access + * + * @req: The media request + * + * Unlock a request that has previously been locked using + * @media_request_lock_for_access. + */ +static inline void media_request_unlock_for_access(struct media_request *req) +{ + unsigned long flags; + + spin_lock_irqsave(&req->lock, flags); + if (!WARN_ON(!req->access_count)) + req->access_count--; + spin_unlock_irqrestore(&req->lock, flags); +} + +/** + * media_request_lock_for_update - Lock the request for updating its objects + * + * @req: The media request + * + * Use before updating a request, i.e. adding, modifying or removing a request + * object in it. A reference to the request must be held during the update. This + * usually takes place automatically through a file handle. Use + * @media_request_unlock_for_update when done. + */ +static inline int __must_check +media_request_lock_for_update(struct media_request *req) +{ + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&req->lock, flags); + if (req->state == MEDIA_REQUEST_STATE_IDLE || + req->state == MEDIA_REQUEST_STATE_UPDATING) { + req->state = MEDIA_REQUEST_STATE_UPDATING; + req->updating_count++; + } else { + ret = -EBUSY; + } + spin_unlock_irqrestore(&req->lock, flags); + + return ret; +} + +/** + * media_request_unlock_for_update - Unlock a request previously locked for + * update + * + * @req: The media request + * + * Unlock a request that has previously been locked using + * @media_request_lock_for_update. + */ +static inline void media_request_unlock_for_update(struct media_request *req) +{ + unsigned long flags; + + spin_lock_irqsave(&req->lock, flags); + WARN_ON(req->updating_count <= 0); + if (!--req->updating_count) + req->state = MEDIA_REQUEST_STATE_IDLE; + spin_unlock_irqrestore(&req->lock, flags); +} + +/** + * media_request_get - Get the media request + * + * @req: The media request + * + * Get the media request. + */ +static inline void media_request_get(struct media_request *req) +{ + kref_get(&req->kref); +} + +/** + * media_request_put - Put the media request + * + * @req: The media request + * + * Put the media request. The media request will be released + * when the refcount reaches 0. + */ +void media_request_put(struct media_request *req); + +/** + * media_request_get_by_fd - Get a media request by fd + * + * @mdev: Media device this request belongs to + * @request_fd: The file descriptor of the request + * + * Get the request represented by @request_fd that is owned + * by the media device. + * + * Return a -EBADR error pointer if requests are not supported + * by this driver. Return -EINVAL if the request was not found. + * Return the pointer to the request if found: the caller will + * have to call @media_request_put when it finished using the + * request. + */ +struct media_request * +media_request_get_by_fd(struct media_device *mdev, int request_fd); + +/** + * media_request_alloc - Allocate the media request + * + * @mdev: Media device this request belongs to + * @alloc_fd: Store the request's file descriptor in this int + * + * Allocated the media request and put the fd in @alloc_fd. + */ +int media_request_alloc(struct media_device *mdev, + int *alloc_fd); + +#else + +static inline void media_request_get(struct media_request *req) +{ +} + +static inline void media_request_put(struct media_request *req) +{ +} + +static inline struct media_request * +media_request_get_by_fd(struct media_device *mdev, int request_fd) +{ + return ERR_PTR(-EBADR); +} + +#endif + +/** + * struct media_request_object_ops - Media request object operations + * @prepare: Validate and prepare the request object, optional. + * @unprepare: Unprepare the request object, optional. + * @queue: Queue the request object, optional. + * @unbind: Unbind the request object, optional. + * @release: Release the request object, required. + */ +struct media_request_object_ops { + int (*prepare)(struct media_request_object *object); + void (*unprepare)(struct media_request_object *object); + void (*queue)(struct media_request_object *object); + void (*unbind)(struct media_request_object *object); + void (*release)(struct media_request_object *object); +}; + +/** + * struct media_request_object - An opaque object that belongs to a media + * request + * + * @ops: object's operations + * @priv: object's priv pointer + * @req: the request this object belongs to (can be NULL) + * @list: List entry of the object for @struct media_request + * @kref: Reference count of the object, acquire before releasing req->lock + * @completed: If true, then this object was completed. + * + * An object related to the request. This struct is always embedded in + * another struct that contains the actual data for this request object. + */ +struct media_request_object { + const struct media_request_object_ops *ops; + void *priv; + struct media_request *req; + struct list_head list; + struct kref kref; + bool completed; +}; + +#ifdef CONFIG_MEDIA_CONTROLLER + +/** + * media_request_object_get - Get a media request object + * + * @obj: The object + * + * Get a media request object. + */ +static inline void media_request_object_get(struct media_request_object *obj) +{ + kref_get(&obj->kref); +} + +/** + * media_request_object_put - Put a media request object + * + * @obj: The object + * + * Put a media request object. Once all references are gone, the + * object's memory is released. + */ +void media_request_object_put(struct media_request_object *obj); + +/** + * media_request_object_find - Find an object in a request + * + * @req: The media request + * @ops: Find an object with this ops value + * @priv: Find an object with this priv value + * + * Both @ops and @priv must be non-NULL. + * + * Returns the object pointer or NULL if not found. The caller must + * call media_request_object_put() once it finished using the object. + * + * Since this function needs to walk the list of objects it takes + * the @req->lock spin lock to make this safe. + */ +struct media_request_object * +media_request_object_find(struct media_request *req, + const struct media_request_object_ops *ops, + void *priv); + +/** + * media_request_object_init - Initialise a media request object + * + * @obj: The object + * + * Initialise a media request object. The object will be released using the + * release callback of the ops once it has no references (this function + * initialises references to one). + */ +void media_request_object_init(struct media_request_object *obj); + +/** + * media_request_object_bind - Bind a media request object to a request + * + * @req: The media request + * @ops: The object ops for this object + * @priv: A driver-specific priv pointer associated with this object + * @is_buffer: Set to true if the object a buffer object. + * @obj: The object + * + * Bind this object to the request and set the ops and priv values of + * the object so it can be found later with media_request_object_find(). + * + * Every bound object must be unbound or completed by the kernel at some + * point in time, otherwise the request will never complete. When the + * request is released all completed objects will be unbound by the + * request core code. + * + * Buffer objects will be added to the end of the request's object + * list, non-buffer objects will be added to the front of the list. + * This ensures that all buffer objects are at the end of the list + * and that all non-buffer objects that they depend on are processed + * first. + */ +int media_request_object_bind(struct media_request *req, + const struct media_request_object_ops *ops, + void *priv, bool is_buffer, + struct media_request_object *obj); + +/** + * media_request_object_unbind - Unbind a media request object + * + * @obj: The object + * + * Unbind the media request object from the request. + */ +void media_request_object_unbind(struct media_request_object *obj); + +/** + * media_request_object_complete - Mark the media request object as complete + * + * @obj: The object + * + * Mark the media request object as complete. Only bound objects can + * be completed. + */ +void media_request_object_complete(struct media_request_object *obj); + +#else + +static inline int __must_check +media_request_lock_for_access(struct media_request *req) +{ + return -EINVAL; +} + +static inline void media_request_unlock_for_access(struct media_request *req) +{ +} + +static inline int __must_check +media_request_lock_for_update(struct media_request *req) +{ + return -EINVAL; +} + +static inline void media_request_unlock_for_update(struct media_request *req) +{ +} + +static inline void media_request_object_get(struct media_request_object *obj) +{ +} + +static inline void media_request_object_put(struct media_request_object *obj) +{ +} + +static inline struct media_request_object * +media_request_object_find(struct media_request *req, + const struct media_request_object_ops *ops, + void *priv) +{ + return NULL; +} + +static inline void media_request_object_init(struct media_request_object *obj) +{ + obj->ops = NULL; + obj->req = NULL; +} + +static inline int media_request_object_bind(struct media_request *req, + const struct media_request_object_ops *ops, + void *priv, bool is_buffer, + struct media_request_object *obj) +{ + return 0; +} + +static inline void media_request_object_unbind(struct media_request_object *obj) +{ +} + +static inline void media_request_object_complete(struct media_request_object *obj) +{ +} + +#endif + +#endif diff --git a/6.17.0/include-overrides/media/mipi-csi2.h b/6.17.0/include-overrides/media/mipi-csi2.h new file mode 100644 index 0000000..40fc026 --- /dev/null +++ b/6.17.0/include-overrides/media/mipi-csi2.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * MIPI CSI-2 Data Types + * + * Copyright (C) 2022 Laurent Pinchart + */ + +#ifndef _MEDIA_MIPI_CSI2_H +#define _MEDIA_MIPI_CSI2_H + +/* Short packet data types */ +#define MIPI_CSI2_DT_FS 0x00 +#define MIPI_CSI2_DT_FE 0x01 +#define MIPI_CSI2_DT_LS 0x02 +#define MIPI_CSI2_DT_LE 0x03 +#define MIPI_CSI2_DT_GENERIC_SHORT(n) (0x08 + (n)) /* 0..7 */ + +/* Long packet data types */ +#define MIPI_CSI2_DT_NULL 0x10 +#define MIPI_CSI2_DT_BLANKING 0x11 +#define MIPI_CSI2_DT_EMBEDDED_8B 0x12 +#define MIPI_CSI2_DT_GENERIC_LONG(n) (0x13 + (n) - 1) /* 1..4 */ +#define MIPI_CSI2_DT_YUV420_8B 0x18 +#define MIPI_CSI2_DT_YUV420_10B 0x19 +#define MIPI_CSI2_DT_YUV420_8B_LEGACY 0x1a +#define MIPI_CSI2_DT_YUV420_8B_CS 0x1c +#define MIPI_CSI2_DT_YUV420_10B_CS 0x1d +#define MIPI_CSI2_DT_YUV422_8B 0x1e +#define MIPI_CSI2_DT_YUV422_10B 0x1f +#define MIPI_CSI2_DT_RGB444 0x20 +#define MIPI_CSI2_DT_RGB555 0x21 +#define MIPI_CSI2_DT_RGB565 0x22 +#define MIPI_CSI2_DT_RGB666 0x23 +#define MIPI_CSI2_DT_RGB888 0x24 +#define MIPI_CSI2_DT_RAW28 0x26 +#define MIPI_CSI2_DT_RAW24 0x27 +#define MIPI_CSI2_DT_RAW6 0x28 +#define MIPI_CSI2_DT_RAW7 0x29 +#define MIPI_CSI2_DT_RAW8 0x2a +#define MIPI_CSI2_DT_RAW10 0x2b +#define MIPI_CSI2_DT_RAW12 0x2c +#define MIPI_CSI2_DT_RAW14 0x2d +#define MIPI_CSI2_DT_RAW16 0x2e +#define MIPI_CSI2_DT_RAW20 0x2f +#define MIPI_CSI2_DT_USER_DEFINED(n) (0x30 + (n)) /* 0..7 */ + +#endif /* _MEDIA_MIPI_CSI2_H */ diff --git a/6.17.0/include-overrides/media/rc-core.h b/6.17.0/include-overrides/media/rc-core.h new file mode 100644 index 0000000..35c7a05 --- /dev/null +++ b/6.17.0/include-overrides/media/rc-core.h @@ -0,0 +1,377 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Remote Controller core header + * + * Copyright (C) 2009-2010 by Mauro Carvalho Chehab + */ + +#ifndef _RC_CORE +#define _RC_CORE + +#include +#include +#include +#include +#include +#include + +/** + * enum rc_driver_type - type of the RC driver. + * + * @RC_DRIVER_SCANCODE: Driver or hardware generates a scancode. + * @RC_DRIVER_IR_RAW: Driver or hardware generates pulse/space sequences. + * It needs a Infra-Red pulse/space decoder + * @RC_DRIVER_IR_RAW_TX: Device transmitter only, + * driver requires pulse/space data sequence. + */ +enum rc_driver_type { + RC_DRIVER_SCANCODE = 0, + RC_DRIVER_IR_RAW, + RC_DRIVER_IR_RAW_TX, +}; + +/** + * struct rc_scancode_filter - Filter scan codes. + * @data: Scancode data to match. + * @mask: Mask of bits of scancode to compare. + */ +struct rc_scancode_filter { + u32 data; + u32 mask; +}; + +/** + * enum rc_filter_type - Filter type constants. + * @RC_FILTER_NORMAL: Filter for normal operation. + * @RC_FILTER_WAKEUP: Filter for waking from suspend. + * @RC_FILTER_MAX: Number of filter types. + */ +enum rc_filter_type { + RC_FILTER_NORMAL = 0, + RC_FILTER_WAKEUP, + + RC_FILTER_MAX +}; + +/** + * struct lirc_fh - represents an open lirc file + * @list: list of open file handles + * @rc: rcdev for this lirc chardev + * @rawir: queue for incoming raw IR + * @scancodes: queue for incoming decoded scancodes + * @wait_poll: poll struct for lirc device + * @carrier_low: when setting the carrier range, first the low end must be + * set with an ioctl and then the high end with another ioctl + * @send_mode: lirc mode for sending, either LIRC_MODE_SCANCODE or + * LIRC_MODE_PULSE + * @rec_mode: lirc mode for receiving, either LIRC_MODE_SCANCODE or + * LIRC_MODE_MODE2 + */ +struct lirc_fh { + struct list_head list; + struct rc_dev *rc; + DECLARE_KFIFO_PTR(rawir, unsigned int); + DECLARE_KFIFO_PTR(scancodes, struct lirc_scancode); + wait_queue_head_t wait_poll; + u32 carrier_low; + u8 send_mode; + u8 rec_mode; +}; + +/** + * struct rc_dev - represents a remote control device + * @dev: driver model's view of this device + * @managed_alloc: devm_rc_allocate_device was used to create rc_dev + * @registered: set to true by rc_register_device(), false by + * rc_unregister_device + * @idle: used to keep track of RX state + * @encode_wakeup: wakeup filtering uses IR encode API, therefore the allowed + * wakeup protocols is the set of all raw encoders + * @minor: unique minor remote control device number + * @sysfs_groups: sysfs attribute groups + * @device_name: name of the rc child device + * @input_phys: physical path to the input child device + * @input_id: id of the input child device (struct input_id) + * @driver_name: name of the hardware driver which registered this device + * @map_name: name of the default keymap + * @rc_map: current scan/key table + * @lock: used to ensure we've filled in all protocol details before + * anyone can call show_protocols or store_protocols + * @raw: additional data for raw pulse/space devices + * @input_dev: the input child device used to communicate events to userspace + * @driver_type: specifies if protocol decoding is done in hardware or software + * @users: number of current users of the device + * @allowed_protocols: bitmask with the supported RC_PROTO_BIT_* protocols + * @enabled_protocols: bitmask with the enabled RC_PROTO_BIT_* protocols + * @allowed_wakeup_protocols: bitmask with the supported RC_PROTO_BIT_* wakeup + * protocols + * @wakeup_protocol: the enabled RC_PROTO_* wakeup protocol or + * RC_PROTO_UNKNOWN if disabled. + * @scancode_filter: scancode filter + * @scancode_wakeup_filter: scancode wakeup filters + * @scancode_mask: some hardware decoders are not capable of providing the full + * scancode to the application. As this is a hardware limit, we can't do + * anything with it. Yet, as the same keycode table can be used with other + * devices, a mask is provided to allow its usage. Drivers should generally + * leave this field in blank + * @priv: driver-specific data + * @keylock: protects the remaining members of the struct + * @keypressed: whether a key is currently pressed + * @last_toggle: toggle value of last command + * @last_keycode: keycode of last keypress + * @last_protocol: protocol of last keypress + * @last_scancode: scancode of last keypress + * @keyup_jiffies: time (in jiffies) when the current keypress should be released + * @timer_keyup: timer for releasing a keypress + * @timer_repeat: timer for autorepeat events. This is needed for CEC, which + * has non-standard repeats. + * @timeout: optional time after which device stops sending data + * @min_timeout: minimum timeout supported by device + * @max_timeout: maximum timeout supported by device + * @rx_resolution : resolution (in us) of input sampler + * @lirc_dev: lirc device + * @lirc_cdev: lirc char cdev + * @gap_start: start time for gap after timeout if non-zero + * @lirc_fh_lock: protects lirc_fh list + * @lirc_fh: list of open files + * @change_protocol: allow changing the protocol used on hardware decoders + * @open: callback to allow drivers to enable polling/irq when IR input device + * is opened. + * @close: callback to allow drivers to disable polling/irq when IR input device + * is opened. + * @s_tx_mask: set transmitter mask (for devices with multiple tx outputs) + * @s_tx_carrier: set transmit carrier frequency + * @s_tx_duty_cycle: set transmit duty cycle (0% - 100%) + * @s_rx_carrier_range: inform driver about carrier it is expected to handle + * @tx_ir: transmit IR + * @s_idle: enable/disable hardware idle mode, upon which, + * device doesn't interrupt host until it sees IR pulses + * @s_wideband_receiver: enable wide band receiver used for learning + * @s_carrier_report: enable carrier reports + * @s_filter: set the scancode filter + * @s_wakeup_filter: set the wakeup scancode filter. If the mask is zero + * then wakeup should be disabled. wakeup_protocol will be set to + * a valid protocol if mask is nonzero. + * @s_timeout: set hardware timeout in us + */ +struct rc_dev { + struct device dev; + bool managed_alloc; + bool registered; + bool idle; + bool encode_wakeup; + unsigned int minor; + const struct attribute_group *sysfs_groups[5]; + const char *device_name; + const char *input_phys; + struct input_id input_id; + const char *driver_name; + const char *map_name; + struct rc_map rc_map; + struct mutex lock; + struct ir_raw_event_ctrl *raw; + struct input_dev *input_dev; + enum rc_driver_type driver_type; + u32 users; + u64 allowed_protocols; + u64 enabled_protocols; + u64 allowed_wakeup_protocols; + enum rc_proto wakeup_protocol; + struct rc_scancode_filter scancode_filter; + struct rc_scancode_filter scancode_wakeup_filter; + u32 scancode_mask; + void *priv; + spinlock_t keylock; + bool keypressed; + u8 last_toggle; + u32 last_keycode; + enum rc_proto last_protocol; + u64 last_scancode; + unsigned long keyup_jiffies; + struct timer_list timer_keyup; + struct timer_list timer_repeat; + u32 timeout; + u32 min_timeout; + u32 max_timeout; + u32 rx_resolution; +#ifdef CONFIG_LIRC + struct device lirc_dev; + struct cdev lirc_cdev; + ktime_t gap_start; + spinlock_t lirc_fh_lock; + struct list_head lirc_fh; +#endif + int (*change_protocol)(struct rc_dev *dev, u64 *rc_proto); + int (*open)(struct rc_dev *dev); + void (*close)(struct rc_dev *dev); + int (*s_tx_mask)(struct rc_dev *dev, u32 mask); + int (*s_tx_carrier)(struct rc_dev *dev, u32 carrier); + int (*s_tx_duty_cycle)(struct rc_dev *dev, u32 duty_cycle); + int (*s_rx_carrier_range)(struct rc_dev *dev, u32 min, u32 max); + int (*tx_ir)(struct rc_dev *dev, unsigned *txbuf, unsigned n); + void (*s_idle)(struct rc_dev *dev, bool enable); + int (*s_wideband_receiver)(struct rc_dev *dev, int enable); + int (*s_carrier_report) (struct rc_dev *dev, int enable); + int (*s_filter)(struct rc_dev *dev, + struct rc_scancode_filter *filter); + int (*s_wakeup_filter)(struct rc_dev *dev, + struct rc_scancode_filter *filter); + int (*s_timeout)(struct rc_dev *dev, + unsigned int timeout); +}; + +#define to_rc_dev(d) container_of(d, struct rc_dev, dev) + +/* + * From rc-main.c + * Those functions can be used on any type of Remote Controller. They + * basically creates an input_dev and properly reports the device as a + * Remote Controller, at sys/class/rc. + */ + +/** + * rc_allocate_device - Allocates a RC device + * + * @rc_driver_type: specifies the type of the RC output to be allocated + * returns a pointer to struct rc_dev. + */ +struct rc_dev *rc_allocate_device(enum rc_driver_type); + +/** + * devm_rc_allocate_device - Managed RC device allocation + * + * @dev: pointer to struct device + * @rc_driver_type: specifies the type of the RC output to be allocated + * returns a pointer to struct rc_dev. + */ +struct rc_dev *devm_rc_allocate_device(struct device *dev, enum rc_driver_type); + +/** + * rc_free_device - Frees a RC device + * + * @dev: pointer to struct rc_dev. + */ +void rc_free_device(struct rc_dev *dev); + +/** + * rc_register_device - Registers a RC device + * + * @dev: pointer to struct rc_dev. + */ +int rc_register_device(struct rc_dev *dev); + +/** + * devm_rc_register_device - Manageded registering of a RC device + * + * @parent: pointer to struct device. + * @dev: pointer to struct rc_dev. + */ +int devm_rc_register_device(struct device *parent, struct rc_dev *dev); + +/** + * rc_unregister_device - Unregisters a RC device + * + * @dev: pointer to struct rc_dev. + */ +void rc_unregister_device(struct rc_dev *dev); + +void rc_repeat(struct rc_dev *dev); +void rc_keydown(struct rc_dev *dev, enum rc_proto protocol, u64 scancode, + u8 toggle); +void rc_keydown_notimeout(struct rc_dev *dev, enum rc_proto protocol, + u64 scancode, u8 toggle); +void rc_keyup(struct rc_dev *dev); +u32 rc_g_keycode_from_table(struct rc_dev *dev, u64 scancode); + +/* + * From rc-raw.c + * The Raw interface is specific to InfraRed. It may be a good idea to + * split it later into a separate header. + */ +struct ir_raw_event { + union { + u32 duration; + u32 carrier; + }; + u8 duty_cycle; + + unsigned pulse:1; + unsigned overflow:1; + unsigned timeout:1; + unsigned carrier_report:1; +}; + +#define US_TO_NS(usec) ((usec) * 1000) +#define MS_TO_US(msec) ((msec) * 1000) +#define IR_MAX_DURATION MS_TO_US(500) +#define IR_DEFAULT_TIMEOUT MS_TO_US(125) +#define IR_MAX_TIMEOUT LIRC_VALUE_MASK + +void ir_raw_event_handle(struct rc_dev *dev); +int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev); +int ir_raw_event_store_edge(struct rc_dev *dev, bool pulse); +int ir_raw_event_store_with_filter(struct rc_dev *dev, + struct ir_raw_event *ev); +int ir_raw_event_store_with_timeout(struct rc_dev *dev, + struct ir_raw_event *ev); +void ir_raw_event_set_idle(struct rc_dev *dev, bool idle); +int ir_raw_encode_scancode(enum rc_proto protocol, u32 scancode, + struct ir_raw_event *events, unsigned int max); +int ir_raw_encode_carrier(enum rc_proto protocol); + +static inline void ir_raw_event_overflow(struct rc_dev *dev) +{ + ir_raw_event_store(dev, &((struct ir_raw_event) { .overflow = true })); + dev->idle = true; + ir_raw_event_handle(dev); +} + +/* extract mask bits out of data and pack them into the result */ +static inline u32 ir_extract_bits(u32 data, u32 mask) +{ + u32 vbit = 1, value = 0; + + do { + if (mask & 1) { + if (data & 1) + value |= vbit; + vbit <<= 1; + } + data >>= 1; + } while (mask >>= 1); + + return value; +} + +/* Get NEC scancode and protocol type from address and command bytes */ +static inline u32 ir_nec_bytes_to_scancode(u8 address, u8 not_address, + u8 command, u8 not_command, + enum rc_proto *protocol) +{ + u32 scancode; + + if ((command ^ not_command) != 0xff) { + /* NEC transport, but modified protocol, used by at + * least Apple and TiVo remotes + */ + scancode = not_address << 24 | + address << 16 | + not_command << 8 | + command; + *protocol = RC_PROTO_NEC32; + } else if ((address ^ not_address) != 0xff) { + /* Extended NEC */ + scancode = address << 16 | + not_address << 8 | + command; + *protocol = RC_PROTO_NECX; + } else { + /* Normal NEC */ + scancode = address << 8 | command; + *protocol = RC_PROTO_NEC; + } + + return scancode; +} + +#endif /* _RC_CORE */ diff --git a/6.17.0/include-overrides/media/rc-map.h b/6.17.0/include-overrides/media/rc-map.h new file mode 100644 index 0000000..d90e461 --- /dev/null +++ b/6.17.0/include-overrides/media/rc-map.h @@ -0,0 +1,357 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * rc-map.h - define RC map names used by RC drivers + * + * Copyright (c) 2010 by Mauro Carvalho Chehab + */ + +#ifndef _MEDIA_RC_MAP_H +#define _MEDIA_RC_MAP_H + +#include +#include + +#define RC_PROTO_BIT_NONE 0ULL +#define RC_PROTO_BIT_UNKNOWN BIT_ULL(RC_PROTO_UNKNOWN) +#define RC_PROTO_BIT_OTHER BIT_ULL(RC_PROTO_OTHER) +#define RC_PROTO_BIT_RC5 BIT_ULL(RC_PROTO_RC5) +#define RC_PROTO_BIT_RC5X_20 BIT_ULL(RC_PROTO_RC5X_20) +#define RC_PROTO_BIT_RC5_SZ BIT_ULL(RC_PROTO_RC5_SZ) +#define RC_PROTO_BIT_JVC BIT_ULL(RC_PROTO_JVC) +#define RC_PROTO_BIT_SONY12 BIT_ULL(RC_PROTO_SONY12) +#define RC_PROTO_BIT_SONY15 BIT_ULL(RC_PROTO_SONY15) +#define RC_PROTO_BIT_SONY20 BIT_ULL(RC_PROTO_SONY20) +#define RC_PROTO_BIT_NEC BIT_ULL(RC_PROTO_NEC) +#define RC_PROTO_BIT_NECX BIT_ULL(RC_PROTO_NECX) +#define RC_PROTO_BIT_NEC32 BIT_ULL(RC_PROTO_NEC32) +#define RC_PROTO_BIT_SANYO BIT_ULL(RC_PROTO_SANYO) +#define RC_PROTO_BIT_MCIR2_KBD BIT_ULL(RC_PROTO_MCIR2_KBD) +#define RC_PROTO_BIT_MCIR2_MSE BIT_ULL(RC_PROTO_MCIR2_MSE) +#define RC_PROTO_BIT_RC6_0 BIT_ULL(RC_PROTO_RC6_0) +#define RC_PROTO_BIT_RC6_6A_20 BIT_ULL(RC_PROTO_RC6_6A_20) +#define RC_PROTO_BIT_RC6_6A_24 BIT_ULL(RC_PROTO_RC6_6A_24) +#define RC_PROTO_BIT_RC6_6A_32 BIT_ULL(RC_PROTO_RC6_6A_32) +#define RC_PROTO_BIT_RC6_MCE BIT_ULL(RC_PROTO_RC6_MCE) +#define RC_PROTO_BIT_SHARP BIT_ULL(RC_PROTO_SHARP) +#define RC_PROTO_BIT_XMP BIT_ULL(RC_PROTO_XMP) +#define RC_PROTO_BIT_CEC BIT_ULL(RC_PROTO_CEC) +#define RC_PROTO_BIT_IMON BIT_ULL(RC_PROTO_IMON) +#define RC_PROTO_BIT_RCMM12 BIT_ULL(RC_PROTO_RCMM12) +#define RC_PROTO_BIT_RCMM24 BIT_ULL(RC_PROTO_RCMM24) +#define RC_PROTO_BIT_RCMM32 BIT_ULL(RC_PROTO_RCMM32) +#define RC_PROTO_BIT_XBOX_DVD BIT_ULL(RC_PROTO_XBOX_DVD) + +#if IS_ENABLED(CONFIG_IR_RC5_DECODER) +#define __RC_PROTO_RC5_CODEC \ + (RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC5X_20 | RC_PROTO_BIT_RC5_SZ) +#else +#define __RC_PROTO_RC5_CODEC 0 +#endif + +#if IS_ENABLED(CONFIG_IR_JVC_DECODER) +#define __RC_PROTO_JVC_CODEC RC_PROTO_BIT_JVC +#else +#define __RC_PROTO_JVC_CODEC 0 +#endif +#if IS_ENABLED(CONFIG_IR_SONY_DECODER) +#define __RC_PROTO_SONY_CODEC \ + (RC_PROTO_BIT_SONY12 | RC_PROTO_BIT_SONY15 | RC_PROTO_BIT_SONY20) +#else +#define __RC_PROTO_SONY_CODEC 0 +#endif +#if IS_ENABLED(CONFIG_IR_NEC_DECODER) +#define __RC_PROTO_NEC_CODEC \ + (RC_PROTO_BIT_NEC | RC_PROTO_BIT_NECX | RC_PROTO_BIT_NEC32) +#else +#define __RC_PROTO_NEC_CODEC 0 +#endif +#if IS_ENABLED(CONFIG_IR_SANYO_DECODER) +#define __RC_PROTO_SANYO_CODEC RC_PROTO_BIT_SANYO +#else +#define __RC_PROTO_SANYO_CODEC 0 +#endif +#if IS_ENABLED(CONFIG_IR_MCE_KBD_DECODER) +#define __RC_PROTO_MCE_KBD_CODEC \ + (RC_PROTO_BIT_MCIR2_KBD | RC_PROTO_BIT_MCIR2_MSE) +#else +#define __RC_PROTO_MCE_KBD_CODEC 0 +#endif +#if IS_ENABLED(CONFIG_IR_RC6_DECODER) +#define __RC_PROTO_RC6_CODEC \ + (RC_PROTO_BIT_RC6_0 | RC_PROTO_BIT_RC6_6A_20 | \ + RC_PROTO_BIT_RC6_6A_24 | RC_PROTO_BIT_RC6_6A_32 | \ + RC_PROTO_BIT_RC6_MCE) +#else +#define __RC_PROTO_RC6_CODEC 0 +#endif +#if IS_ENABLED(CONFIG_IR_SHARP_DECODER) +#define __RC_PROTO_SHARP_CODEC RC_PROTO_BIT_SHARP +#else +#define __RC_PROTO_SHARP_CODEC 0 +#endif +#if IS_ENABLED(CONFIG_IR_XMP_DECODER) +#define __RC_PROTO_XMP_CODEC RC_PROTO_BIT_XMP +#else +#define __RC_PROTO_XMP_CODEC 0 +#endif +#if IS_ENABLED(CONFIG_IR_IMON_DECODER) +#define __RC_PROTO_IMON_CODEC RC_PROTO_BIT_IMON +#else +#define __RC_PROTO_IMON_CODEC 0 +#endif +#if IS_ENABLED(CONFIG_IR_RCMM_DECODER) +#define __RC_PROTO_RCMM_CODEC \ + (RC_PROTO_BIT_RCMM12 | RC_PROTO_BIT_RCMM24 | RC_PROTO_BIT_RCMM32) +#else +#define __RC_PROTO_RCMM_CODEC 0 +#endif + +/* All kernel-based codecs have encoders and decoders */ +#define RC_PROTO_BIT_ALL_IR_DECODER \ + (__RC_PROTO_RC5_CODEC | __RC_PROTO_JVC_CODEC | __RC_PROTO_SONY_CODEC | \ + __RC_PROTO_NEC_CODEC | __RC_PROTO_SANYO_CODEC | \ + __RC_PROTO_MCE_KBD_CODEC | __RC_PROTO_RC6_CODEC | \ + __RC_PROTO_SHARP_CODEC | __RC_PROTO_XMP_CODEC | \ + __RC_PROTO_IMON_CODEC | __RC_PROTO_RCMM_CODEC) + +#define RC_PROTO_BIT_ALL_IR_ENCODER \ + (__RC_PROTO_RC5_CODEC | __RC_PROTO_JVC_CODEC | __RC_PROTO_SONY_CODEC | \ + __RC_PROTO_NEC_CODEC | __RC_PROTO_SANYO_CODEC | \ + __RC_PROTO_MCE_KBD_CODEC | __RC_PROTO_RC6_CODEC | \ + __RC_PROTO_SHARP_CODEC | __RC_PROTO_XMP_CODEC | \ + __RC_PROTO_IMON_CODEC | __RC_PROTO_RCMM_CODEC) + +#define RC_SCANCODE_UNKNOWN(x) (x) +#define RC_SCANCODE_OTHER(x) (x) +#define RC_SCANCODE_NEC(addr, cmd) (((addr) << 8) | (cmd)) +#define RC_SCANCODE_NECX(addr, cmd) (((addr) << 8) | (cmd)) +#define RC_SCANCODE_NEC32(data) ((data) & 0xffffffff) +#define RC_SCANCODE_RC5(sys, cmd) (((sys) << 8) | (cmd)) +#define RC_SCANCODE_RC5_SZ(sys, cmd) (((sys) << 8) | (cmd)) +#define RC_SCANCODE_RC6_0(sys, cmd) (((sys) << 8) | (cmd)) +#define RC_SCANCODE_RC6_6A(vendor, sys, cmd) (((vendor) << 16) | ((sys) << 8) | (cmd)) + +/** + * struct rc_map_table - represents a scancode/keycode pair + * + * @scancode: scan code (u64) + * @keycode: Linux input keycode + */ +struct rc_map_table { + u64 scancode; + u32 keycode; +}; + +/** + * struct rc_map - represents a keycode map table + * + * @scan: pointer to struct &rc_map_table + * @size: Max number of entries + * @len: Number of entries that are in use + * @alloc: size of \*scan, in bytes + * @rc_proto: type of the remote controller protocol, as defined at + * enum &rc_proto + * @name: name of the key map table + * @lock: lock to protect access to this structure + */ +struct rc_map { + struct rc_map_table *scan; + unsigned int size; + unsigned int len; + unsigned int alloc; + enum rc_proto rc_proto; + const char *name; + spinlock_t lock; +}; + +/** + * struct rc_map_list - list of the registered &rc_map maps + * + * @list: pointer to struct &list_head + * @map: pointer to struct &rc_map + */ +struct rc_map_list { + struct list_head list; + struct rc_map map; +}; + +#ifdef CONFIG_MEDIA_CEC_RC +/* + * rc_map_list from rc-cec.c + */ +extern struct rc_map_list cec_map; +#endif + +/* Routines from rc-map.c */ + +/** + * rc_map_register() - Registers a Remote Controller scancode map + * + * @map: pointer to struct rc_map_list + */ +int rc_map_register(struct rc_map_list *map); + +/** + * rc_map_unregister() - Unregisters a Remote Controller scancode map + * + * @map: pointer to struct rc_map_list + */ +void rc_map_unregister(struct rc_map_list *map); + +/** + * rc_map_get - gets an RC map from its name + * @name: name of the RC scancode map + */ +struct rc_map *rc_map_get(const char *name); + +/* Names of the several keytables defined in-kernel */ + +#define RC_MAP_ADSTECH_DVB_T_PCI "rc-adstech-dvb-t-pci" +#define RC_MAP_ALINK_DTU_M "rc-alink-dtu-m" +#define RC_MAP_ANYSEE "rc-anysee" +#define RC_MAP_APAC_VIEWCOMP "rc-apac-viewcomp" +#define RC_MAP_ASTROMETA_T2HYBRID "rc-astrometa-t2hybrid" +#define RC_MAP_ASUS_PC39 "rc-asus-pc39" +#define RC_MAP_ASUS_PS3_100 "rc-asus-ps3-100" +#define RC_MAP_ATI_TV_WONDER_HD_600 "rc-ati-tv-wonder-hd-600" +#define RC_MAP_ATI_X10 "rc-ati-x10" +#define RC_MAP_AVERMEDIA "rc-avermedia" +#define RC_MAP_AVERMEDIA_A16D "rc-avermedia-a16d" +#define RC_MAP_AVERMEDIA_CARDBUS "rc-avermedia-cardbus" +#define RC_MAP_AVERMEDIA_DVBT "rc-avermedia-dvbt" +#define RC_MAP_AVERMEDIA_M135A "rc-avermedia-m135a" +#define RC_MAP_AVERMEDIA_M733A_RM_K6 "rc-avermedia-m733a-rm-k6" +#define RC_MAP_AVERMEDIA_RM_KS "rc-avermedia-rm-ks" +#define RC_MAP_AVERTV_303 "rc-avertv-303" +#define RC_MAP_AZUREWAVE_AD_TU700 "rc-azurewave-ad-tu700" +#define RC_MAP_BEELINK_GS1 "rc-beelink-gs1" +#define RC_MAP_BEELINK_MXIII "rc-beelink-mxiii" +#define RC_MAP_BEHOLD "rc-behold" +#define RC_MAP_BEHOLD_COLUMBUS "rc-behold-columbus" +#define RC_MAP_BUDGET_CI_OLD "rc-budget-ci-old" +#define RC_MAP_CEC "rc-cec" +#define RC_MAP_CINERGY "rc-cinergy" +#define RC_MAP_CINERGY_1400 "rc-cinergy-1400" +#define RC_MAP_CT_90405 "rc-ct-90405" +#define RC_MAP_D680_DMB "rc-d680-dmb" +#define RC_MAP_DELOCK_61959 "rc-delock-61959" +#define RC_MAP_DIB0700_NEC_TABLE "rc-dib0700-nec" +#define RC_MAP_DIB0700_RC5_TABLE "rc-dib0700-rc5" +#define RC_MAP_DIGITALNOW_TINYTWIN "rc-digitalnow-tinytwin" +#define RC_MAP_DIGITTRADE "rc-digittrade" +#define RC_MAP_DM1105_NEC "rc-dm1105-nec" +#define RC_MAP_DNTV_LIVE_DVB_T "rc-dntv-live-dvb-t" +#define RC_MAP_DNTV_LIVE_DVBT_PRO "rc-dntv-live-dvbt-pro" +#define RC_MAP_DREAMBOX "rc-dreambox" +#define RC_MAP_DTT200U "rc-dtt200u" +#define RC_MAP_DVBSKY "rc-dvbsky" +#define RC_MAP_DVICO_MCE "rc-dvico-mce" +#define RC_MAP_DVICO_PORTABLE "rc-dvico-portable" +#define RC_MAP_EMPTY "rc-empty" +#define RC_MAP_EM_TERRATEC "rc-em-terratec" +#define RC_MAP_ENCORE_ENLTV "rc-encore-enltv" +#define RC_MAP_ENCORE_ENLTV2 "rc-encore-enltv2" +#define RC_MAP_ENCORE_ENLTV_FM53 "rc-encore-enltv-fm53" +#define RC_MAP_EVGA_INDTUBE "rc-evga-indtube" +#define RC_MAP_EZTV "rc-eztv" +#define RC_MAP_FLYDVB "rc-flydvb" +#define RC_MAP_FLYVIDEO "rc-flyvideo" +#define RC_MAP_FUSIONHDTV_MCE "rc-fusionhdtv-mce" +#define RC_MAP_GADMEI_RM008Z "rc-gadmei-rm008z" +#define RC_MAP_GEEKBOX "rc-geekbox" +#define RC_MAP_GENIUS_TVGO_A11MCE "rc-genius-tvgo-a11mce" +#define RC_MAP_GOTVIEW7135 "rc-gotview7135" +#define RC_MAP_HAUPPAUGE "rc-hauppauge" +#define RC_MAP_HAUPPAUGE_NEW "rc-hauppauge" +#define RC_MAP_HISI_POPLAR "rc-hisi-poplar" +#define RC_MAP_HISI_TV_DEMO "rc-hisi-tv-demo" +#define RC_MAP_IMON_MCE "rc-imon-mce" +#define RC_MAP_IMON_PAD "rc-imon-pad" +#define RC_MAP_IMON_RSC "rc-imon-rsc" +#define RC_MAP_IODATA_BCTV7E "rc-iodata-bctv7e" +#define RC_MAP_IT913X_V1 "rc-it913x-v1" +#define RC_MAP_IT913X_V2 "rc-it913x-v2" +#define RC_MAP_KAIOMY "rc-kaiomy" +#define RC_MAP_KHADAS "rc-khadas" +#define RC_MAP_KHAMSIN "rc-khamsin" +#define RC_MAP_KWORLD_315U "rc-kworld-315u" +#define RC_MAP_KWORLD_PC150U "rc-kworld-pc150u" +#define RC_MAP_KWORLD_PLUS_TV_ANALOG "rc-kworld-plus-tv-analog" +#define RC_MAP_LEADTEK_Y04G0051 "rc-leadtek-y04g0051" +#define RC_MAP_LME2510 "rc-lme2510" +#define RC_MAP_MANLI "rc-manli" +#define RC_MAP_MECOOL_KII_PRO "rc-mecool-kii-pro" +#define RC_MAP_MECOOL_KIII_PRO "rc-mecool-kiii-pro" +#define RC_MAP_MEDION_X10 "rc-medion-x10" +#define RC_MAP_MEDION_X10_DIGITAINER "rc-medion-x10-digitainer" +#define RC_MAP_MEDION_X10_OR2X "rc-medion-x10-or2x" +#define RC_MAP_MINIX_NEO "rc-minix-neo" +#define RC_MAP_MSI_DIGIVOX_II "rc-msi-digivox-ii" +#define RC_MAP_MSI_DIGIVOX_III "rc-msi-digivox-iii" +#define RC_MAP_MSI_TVANYWHERE "rc-msi-tvanywhere" +#define RC_MAP_MSI_TVANYWHERE_PLUS "rc-msi-tvanywhere-plus" +#define RC_MAP_MYGICA_UTV3 "rc-mygica-utv3" +#define RC_MAP_NEBULA "rc-nebula" +#define RC_MAP_NEC_TERRATEC_CINERGY_XS "rc-nec-terratec-cinergy-xs" +#define RC_MAP_NORWOOD "rc-norwood" +#define RC_MAP_NPGTECH "rc-npgtech" +#define RC_MAP_ODROID "rc-odroid" +#define RC_MAP_PCTV_SEDNA "rc-pctv-sedna" +#define RC_MAP_PINE64 "rc-pine64" +#define RC_MAP_PINNACLE_COLOR "rc-pinnacle-color" +#define RC_MAP_PINNACLE_GREY "rc-pinnacle-grey" +#define RC_MAP_PINNACLE_PCTV_HD "rc-pinnacle-pctv-hd" +#define RC_MAP_PIXELVIEW "rc-pixelview" +#define RC_MAP_PIXELVIEW_002T "rc-pixelview-002t" +#define RC_MAP_PIXELVIEW_MK12 "rc-pixelview-mk12" +#define RC_MAP_PIXELVIEW_NEW "rc-pixelview-new" +#define RC_MAP_POWERCOLOR_REAL_ANGEL "rc-powercolor-real-angel" +#define RC_MAP_PROTEUS_2309 "rc-proteus-2309" +#define RC_MAP_PURPLETV "rc-purpletv" +#define RC_MAP_PV951 "rc-pv951" +#define RC_MAP_RC5_TV "rc-rc5-tv" +#define RC_MAP_RC6_MCE "rc-rc6-mce" +#define RC_MAP_REAL_AUDIO_220_32_KEYS "rc-real-audio-220-32-keys" +#define RC_MAP_REDDO "rc-reddo" +#define RC_MAP_SIEMENS_GIGASET_RC20 "rc-siemens-gigaset-rc20" +#define RC_MAP_SNAPSTREAM_FIREFLY "rc-snapstream-firefly" +#define RC_MAP_STREAMZAP "rc-streamzap" +#define RC_MAP_SU3000 "rc-su3000" +#define RC_MAP_TANIX_TX3MINI "rc-tanix-tx3mini" +#define RC_MAP_TANIX_TX5MAX "rc-tanix-tx5max" +#define RC_MAP_TBS_NEC "rc-tbs-nec" +#define RC_MAP_TECHNISAT_TS35 "rc-technisat-ts35" +#define RC_MAP_TECHNISAT_USB2 "rc-technisat-usb2" +#define RC_MAP_TERRATEC_CINERGY_C_PCI "rc-terratec-cinergy-c-pci" +#define RC_MAP_TERRATEC_CINERGY_S2_HD "rc-terratec-cinergy-s2-hd" +#define RC_MAP_TERRATEC_CINERGY_XS "rc-terratec-cinergy-xs" +#define RC_MAP_TERRATEC_SLIM "rc-terratec-slim" +#define RC_MAP_TERRATEC_SLIM_2 "rc-terratec-slim-2" +#define RC_MAP_TEVII_NEC "rc-tevii-nec" +#define RC_MAP_TIVO "rc-tivo" +#define RC_MAP_TOTAL_MEDIA_IN_HAND "rc-total-media-in-hand" +#define RC_MAP_TOTAL_MEDIA_IN_HAND_02 "rc-total-media-in-hand-02" +#define RC_MAP_TREKSTOR "rc-trekstor" +#define RC_MAP_TT_1500 "rc-tt-1500" +#define RC_MAP_TWINHAN_DTV_CAB_CI "rc-twinhan-dtv-cab-ci" +#define RC_MAP_TWINHAN_VP1027_DVBS "rc-twinhan1027" +#define RC_MAP_VEGA_S9X "rc-vega-s9x" +#define RC_MAP_VIDEOMATE_K100 "rc-videomate-k100" +#define RC_MAP_VIDEOMATE_S350 "rc-videomate-s350" +#define RC_MAP_VIDEOMATE_TV_PVR "rc-videomate-tv-pvr" +#define RC_MAP_KII_PRO "rc-videostrong-kii-pro" +#define RC_MAP_WETEK_HUB "rc-wetek-hub" +#define RC_MAP_WETEK_PLAY2 "rc-wetek-play2" +#define RC_MAP_WINFAST "rc-winfast" +#define RC_MAP_WINFAST_USBII_DELUXE "rc-winfast-usbii-deluxe" +#define RC_MAP_X96MAX "rc-x96max" +#define RC_MAP_XBOX_360 "rc-xbox-360" +#define RC_MAP_XBOX_DVD "rc-xbox-dvd" +#define RC_MAP_ZX_IRDEC "rc-zx-irdec" + +/* + * Please, do not just append newer Remote Controller names at the end. + * The names should be ordered in alphabetical order + */ + +#endif /* _MEDIA_RC_MAP_H */ diff --git a/6.17.0/include-overrides/media/rcar-fcp.h b/6.17.0/include-overrides/media/rcar-fcp.h new file mode 100644 index 0000000..6ac9be9 --- /dev/null +++ b/6.17.0/include-overrides/media/rcar-fcp.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * rcar-fcp.h -- R-Car Frame Compression Processor Driver + * + * Copyright (C) 2016 Renesas Electronics Corporation + * + * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) + */ +#ifndef __MEDIA_RCAR_FCP_H__ +#define __MEDIA_RCAR_FCP_H__ + +struct device_node; +struct rcar_fcp_device; + +#if IS_ENABLED(CONFIG_VIDEO_RENESAS_FCP) +struct rcar_fcp_device *rcar_fcp_get(const struct device_node *np); +void rcar_fcp_put(struct rcar_fcp_device *fcp); +struct device *rcar_fcp_get_device(struct rcar_fcp_device *fcp); +int rcar_fcp_enable(struct rcar_fcp_device *fcp); +void rcar_fcp_disable(struct rcar_fcp_device *fcp); +int rcar_fcp_soft_reset(struct rcar_fcp_device *fcp); +#else +static inline struct rcar_fcp_device *rcar_fcp_get(const struct device_node *np) +{ + return ERR_PTR(-ENOENT); +} +static inline void rcar_fcp_put(struct rcar_fcp_device *fcp) { } +static inline struct device *rcar_fcp_get_device(struct rcar_fcp_device *fcp) +{ + return NULL; +} +static inline int rcar_fcp_enable(struct rcar_fcp_device *fcp) +{ + return 0; +} +static inline void rcar_fcp_disable(struct rcar_fcp_device *fcp) { } +static inline int rcar_fcp_soft_reset(struct rcar_fcp_device *fcp) +{ + return 0; +} +#endif + +#endif /* __MEDIA_RCAR_FCP_H__ */ diff --git a/6.17.0/include-overrides/media/tuner-types.h b/6.17.0/include-overrides/media/tuner-types.h new file mode 100644 index 0000000..c79b773 --- /dev/null +++ b/6.17.0/include-overrides/media/tuner-types.h @@ -0,0 +1,205 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * descriptions for simple tuners. + */ + +#ifndef __TUNER_TYPES_H__ +#define __TUNER_TYPES_H__ + +/** + * enum param_type - type of the tuner pameters + * + * @TUNER_PARAM_TYPE_RADIO: Tuner params are for FM and/or AM radio + * @TUNER_PARAM_TYPE_PAL: Tuner params are for PAL color TV standard + * @TUNER_PARAM_TYPE_SECAM: Tuner params are for SECAM color TV standard + * @TUNER_PARAM_TYPE_NTSC: Tuner params are for NTSC color TV standard + * @TUNER_PARAM_TYPE_DIGITAL: Tuner params are for digital TV + */ +enum param_type { + TUNER_PARAM_TYPE_RADIO, + TUNER_PARAM_TYPE_PAL, + TUNER_PARAM_TYPE_SECAM, + TUNER_PARAM_TYPE_NTSC, + TUNER_PARAM_TYPE_DIGITAL, +}; + +/** + * struct tuner_range - define the frequencies supported by the tuner + * + * @limit: Max frequency supported by that range, in 62.5 kHz + * (TV) or 62.5 Hz (Radio), as defined by + * V4L2_TUNER_CAP_LOW. + * @config: Value of the band switch byte (BB) to setup this mode. + * @cb: Value of the CB byte to setup this mode. + * + * Please notice that digital tuners like xc3028/xc4000/xc5000 don't use + * those ranges, as they're defined inside the driver. This is used by + * analog tuners that are compatible with the "Philips way" to setup the + * tuners. On those devices, the tuner set is done via 4 bytes: + * + * #) divider byte1 (DB1) + * #) divider byte 2 (DB2) + * #) Control byte (CB) + * #) band switch byte (BB) + * + * Some tuners also have an additional optional Auxiliary byte (AB). + */ +struct tuner_range { + unsigned short limit; + unsigned char config; + unsigned char cb; +}; + +/** + * struct tuner_params - Parameters to be used to setup the tuner. Those + * are used by drivers/media/tuners/tuner-types.c in + * order to specify the tuner properties. Most of + * the parameters are for tuners based on tda9887 IF-PLL + * multi-standard analog TV/Radio demodulator, with is + * very common on legacy analog tuners. + * + * @type: Type of the tuner parameters, as defined at + * enum param_type. If the tuner supports multiple + * standards, an array should be used, with one + * row per different standard. + * @cb_first_if_lower_freq: Many Philips-based tuners have a comment in + * their datasheet like + * "For channel selection involving band + * switching, and to ensure smooth tuning to the + * desired channel without causing unnecessary + * charge pump action, it is recommended to + * consider the difference between wanted channel + * frequency and the current channel frequency. + * Unnecessary charge pump action will result + * in very low tuning voltage which may drive the + * oscillator to extreme conditions". + * Set cb_first_if_lower_freq to 1, if this check + * is required for this tuner. I tested this for + * PAL by first setting the TV frequency to + * 203 MHz and then switching to 96.6 MHz FM + * radio. The result was static unless the + * control byte was sent first. + * @has_tda9887: Set to 1 if this tuner uses a tda9887 + * @port1_fm_high_sensitivity: Many Philips tuners use tda9887 PORT1 to select + * the FM radio sensitivity. If this setting is 1, + * then set PORT1 to 1 to get proper FM reception. + * @port2_fm_high_sensitivity: Some Philips tuners use tda9887 PORT2 to select + * the FM radio sensitivity. If this setting is 1, + * then set PORT2 to 1 to get proper FM reception. + * @fm_gain_normal: Some Philips tuners use tda9887 cGainNormal to + * select the FM radio sensitivity. If this + * setting is 1, e register will use cGainNormal + * instead of cGainLow. + * @intercarrier_mode: Most tuners with a tda9887 use QSS mode. + * Some (cheaper) tuners use Intercarrier mode. + * If this setting is 1, then the tuner needs to + * be set to intercarrier mode. + * @port1_active: This setting sets the default value for PORT1. + * 0 means inactive, 1 means active. Note: the + * actual bit value written to the tda9887 is + * inverted. So a 0 here means a 1 in the B6 bit. + * @port2_active: This setting sets the default value for PORT2. + * 0 means inactive, 1 means active. Note: the + * actual bit value written to the tda9887 is + * inverted. So a 0 here means a 1 in the B7 bit. + * @port1_invert_for_secam_lc: Sometimes PORT1 is inverted when the SECAM-L' + * standard is selected. Set this bit to 1 if this + * is needed. + * @port2_invert_for_secam_lc: Sometimes PORT2 is inverted when the SECAM-L' + * standard is selected. Set this bit to 1 if this + * is needed. + * @port1_set_for_fm_mono: Some cards require PORT1 to be 1 for mono Radio + * FM and 0 for stereo. + * @default_pll_gating_18: Select 18% (or according to datasheet 0%) + * L standard PLL gating, vs the driver default + * of 36%. + * @radio_if: IF to use in radio mode. Tuners with a + * separate radio IF filter seem to use 10.7, + * while those without use 33.3 for PAL/SECAM + * tuners and 41.3 for NTSC tuners. + * 0 = 10.7, 1 = 33.3, 2 = 41.3 + * @default_top_low: Default tda9887 TOP value in dB for the low + * band. Default is 0. Range: -16:+15 + * @default_top_mid: Default tda9887 TOP value in dB for the mid + * band. Default is 0. Range: -16:+15 + * @default_top_high: Default tda9887 TOP value in dB for the high + * band. Default is 0. Range: -16:+15 + * @default_top_secam_low: Default tda9887 TOP value in dB for SECAM-L/L' + * for the low band. Default is 0. Several tuners + * require a different TOP value for the + * SECAM-L/L' standards. Range: -16:+15 + * @default_top_secam_mid: Default tda9887 TOP value in dB for SECAM-L/L' + * for the mid band. Default is 0. Several tuners + * require a different TOP value for the + * SECAM-L/L' standards. Range: -16:+15 + * @default_top_secam_high: Default tda9887 TOP value in dB for SECAM-L/L' + * for the high band. Default is 0. Several tuners + * require a different TOP value for the + * SECAM-L/L' standards. Range: -16:+15 + * @iffreq: Intermediate frequency (IF) used by the tuner + * on digital mode. + * @count: Size of the ranges array. + * @ranges: Array with the frequency ranges supported by + * the tuner. + */ +struct tuner_params { + enum param_type type; + + unsigned int cb_first_if_lower_freq:1; + unsigned int has_tda9887:1; + unsigned int port1_fm_high_sensitivity:1; + unsigned int port2_fm_high_sensitivity:1; + unsigned int fm_gain_normal:1; + unsigned int intercarrier_mode:1; + unsigned int port1_active:1; + unsigned int port2_active:1; + unsigned int port1_invert_for_secam_lc:1; + unsigned int port2_invert_for_secam_lc:1; + unsigned int port1_set_for_fm_mono:1; + unsigned int default_pll_gating_18:1; + unsigned int radio_if:2; + signed int default_top_low:5; + signed int default_top_mid:5; + signed int default_top_high:5; + signed int default_top_secam_low:5; + signed int default_top_secam_mid:5; + signed int default_top_secam_high:5; + + u16 iffreq; + + unsigned int count; + const struct tuner_range *ranges; +}; + +/** + * struct tunertype - describes the known tuners. + * + * @name: string with the tuner's name. + * @count: size of &struct tuner_params array. + * @params: pointer to &struct tuner_params array. + * + * @min: minimal tuner frequency, in 62.5 kHz step. + * should be multiplied to 16 to convert to MHz. + * @max: minimal tuner frequency, in 62.5 kHz step. + * Should be multiplied to 16 to convert to MHz. + * @stepsize: frequency step, in Hz. + * @initdata: optional byte sequence to initialize the tuner. + * @sleepdata: optional byte sequence to power down the tuner. + */ +struct tunertype { + char *name; + unsigned int count; + const struct tuner_params *params; + + u16 min; + u16 max; + u32 stepsize; + + u8 *initdata; + u8 *sleepdata; +}; + +extern const struct tunertype tuners[]; +extern unsigned const int tuner_count; + +#endif diff --git a/6.17.0/include-overrides/media/tuner.h b/6.17.0/include-overrides/media/tuner.h new file mode 100644 index 0000000..c5fd6fa --- /dev/null +++ b/6.17.0/include-overrides/media/tuner.h @@ -0,0 +1,229 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * tuner.h - definition for different tuners + * + * Copyright (C) 1997 Markus Schroeder (schroedm@uni-duesseldorf.de) + * minor modifications by Ralph Metzler (rjkm@thp.uni-koeln.de) + */ + +#ifndef _TUNER_H +#define _TUNER_H +#ifdef __KERNEL__ + +#include +#include + +#define ADDR_UNSET (255) + +#define TUNER_TEMIC_PAL 0 /* 4002 FH5 (3X 7756, 9483) */ +#define TUNER_PHILIPS_PAL_I 1 +#define TUNER_PHILIPS_NTSC 2 +#define TUNER_PHILIPS_SECAM 3 /* you must actively select B/G, L, L` */ + +#define TUNER_ABSENT 4 +#define TUNER_PHILIPS_PAL 5 +#define TUNER_TEMIC_NTSC 6 /* 4032 FY5 (3X 7004, 9498, 9789) */ +#define TUNER_TEMIC_PAL_I 7 /* 4062 FY5 (3X 8501, 9957) */ + +#define TUNER_TEMIC_4036FY5_NTSC 8 /* 4036 FY5 (3X 1223, 1981, 7686) */ +#define TUNER_ALPS_TSBH1_NTSC 9 +#define TUNER_ALPS_TSBE1_PAL 10 +#define TUNER_ALPS_TSBB5_PAL_I 11 + +#define TUNER_ALPS_TSBE5_PAL 12 +#define TUNER_ALPS_TSBC5_PAL 13 +#define TUNER_TEMIC_4006FH5_PAL 14 /* 4006 FH5 (3X 9500, 9501, 7291) */ +#define TUNER_ALPS_TSHC6_NTSC 15 + +#define TUNER_TEMIC_PAL_DK 16 /* 4016 FY5 (3X 1392, 1393) */ +#define TUNER_PHILIPS_NTSC_M 17 +#define TUNER_TEMIC_4066FY5_PAL_I 18 /* 4066 FY5 (3X 7032, 7035) */ +#define TUNER_TEMIC_4006FN5_MULTI_PAL 19 /* B/G, I and D/K autodetected (3X 7595, 7606, 7657) */ + +#define TUNER_TEMIC_4009FR5_PAL 20 /* incl. FM radio (3X 7607, 7488, 7711) */ +#define TUNER_TEMIC_4039FR5_NTSC 21 /* incl. FM radio (3X 7246, 7578, 7732) */ +#define TUNER_TEMIC_4046FM5 22 /* you must actively select B/G, D/K, I, L, L` ! (3X 7804, 7806, 8103, 8104) */ +#define TUNER_PHILIPS_PAL_DK 23 + +#define TUNER_PHILIPS_FQ1216ME 24 /* you must actively select B/G/D/K, I, L, L` */ +#define TUNER_LG_PAL_I_FM 25 +#define TUNER_LG_PAL_I 26 +#define TUNER_LG_NTSC_FM 27 + +#define TUNER_LG_PAL_FM 28 +#define TUNER_LG_PAL 29 +#define TUNER_TEMIC_4009FN5_MULTI_PAL_FM 30 /* B/G, I and D/K autodetected (3X 8155, 8160, 8163) */ +#define TUNER_SHARP_2U5JF5540_NTSC 31 + +#define TUNER_Samsung_PAL_TCPM9091PD27 32 +#define TUNER_MT2032 33 +#define TUNER_TEMIC_4106FH5 34 /* 4106 FH5 (3X 7808, 7865) */ +#define TUNER_TEMIC_4012FY5 35 /* 4012 FY5 (3X 0971, 1099) */ + +#define TUNER_TEMIC_4136FY5 36 /* 4136 FY5 (3X 7708, 7746) */ +#define TUNER_LG_PAL_NEW_TAPC 37 +#define TUNER_PHILIPS_FM1216ME_MK3 38 +#define TUNER_LG_NTSC_NEW_TAPC 39 + +#define TUNER_HITACHI_NTSC 40 +#define TUNER_PHILIPS_PAL_MK 41 +#define TUNER_PHILIPS_FCV1236D 42 +#define TUNER_PHILIPS_FM1236_MK3 43 + +#define TUNER_PHILIPS_4IN1 44 /* ATI TV Wonder Pro - Conexant */ + /* + * Microtune merged with Temic 12/31/1999 partially financed by Alps. + * these may be similar to Temic + */ +#define TUNER_MICROTUNE_4049FM5 45 +#define TUNER_PANASONIC_VP27 46 +#define TUNER_LG_NTSC_TAPE 47 + +#define TUNER_TNF_8831BGFF 48 +#define TUNER_MICROTUNE_4042FI5 49 /* DViCO FusionHDTV 3 Gold-Q - 4042 FI5 (3X 8147) */ +#define TUNER_TCL_2002N 50 +#define TUNER_PHILIPS_FM1256_IH3 51 + +#define TUNER_THOMSON_DTT7610 52 +#define TUNER_PHILIPS_FQ1286 53 +#define TUNER_PHILIPS_TDA8290 54 +#define TUNER_TCL_2002MB 55 /* Hauppauge PVR-150 PAL */ + +#define TUNER_PHILIPS_FQ1216AME_MK4 56 /* Hauppauge PVR-150 PAL */ +#define TUNER_PHILIPS_FQ1236A_MK4 57 /* Hauppauge PVR-500MCE NTSC */ +#define TUNER_YMEC_TVF_8531MF 58 +#define TUNER_YMEC_TVF_5533MF 59 /* Pixelview Pro Ultra NTSC */ + +#define TUNER_THOMSON_DTT761X 60 /* DTT 7611 7611A 7612 7613 7613A 7614 7615 7615A */ +#define TUNER_TENA_9533_DI 61 +#define TUNER_TEA5767 62 /* Only FM Radio Tuner */ +#define TUNER_PHILIPS_FMD1216ME_MK3 63 + +#define TUNER_LG_TDVS_H06XF 64 /* TDVS H061F, H062F, H064F */ +#define TUNER_YMEC_TVF66T5_B_DFF 65 /* Acorp Y878F */ +#define TUNER_LG_TALN 66 +#define TUNER_PHILIPS_TD1316 67 + +#define TUNER_PHILIPS_TUV1236D 68 /* ATI HDTV Wonder */ +#define TUNER_TNF_5335MF 69 /* Sabrent Bt848 */ +#define TUNER_SAMSUNG_TCPN_2121P30A 70 /* Hauppauge PVR-500MCE NTSC */ +#define TUNER_XC2028 71 + +#define TUNER_THOMSON_FE6600 72 /* DViCO FusionHDTV DVB-T Hybrid */ +#define TUNER_SAMSUNG_TCPG_6121P30A 73 /* Hauppauge PVR-500 PAL */ +#define TUNER_TDA9887 74 /* This tuner should be used only internally */ +#define TUNER_TEA5761 75 /* Only FM Radio Tuner */ +#define TUNER_XC5000 76 /* Xceive Silicon Tuner */ +#define TUNER_TCL_MF02GIP_5N 77 /* TCL MF02GIP_5N */ +#define TUNER_PHILIPS_FMD1216MEX_MK3 78 +#define TUNER_PHILIPS_FM1216MK5 79 +#define TUNER_PHILIPS_FQ1216LME_MK3 80 /* Active loopthrough, no FM */ + +#define TUNER_PARTSNIC_PTI_5NF05 81 +#define TUNER_PHILIPS_CU1216L 82 +#define TUNER_NXP_TDA18271 83 +#define TUNER_SONY_BTF_PXN01Z 84 +#define TUNER_PHILIPS_FQ1236_MK5 85 /* NTSC, TDA9885, no FM radio */ +#define TUNER_TENA_TNF_5337 86 + +#define TUNER_XC4000 87 /* Xceive Silicon Tuner */ +#define TUNER_XC5000C 88 /* Xceive Silicon Tuner */ + +#define TUNER_SONY_BTF_PG472Z 89 /* PAL+SECAM */ +#define TUNER_SONY_BTF_PK467Z 90 /* NTSC_JP */ +#define TUNER_SONY_BTF_PB463Z 91 /* NTSC */ +#define TUNER_SI2157 92 +#define TUNER_TENA_TNF_931D_DFDR1 93 + +/* tv card specific */ +#define TDA9887_PRESENT (1<<0) +#define TDA9887_PORT1_INACTIVE (1<<1) +#define TDA9887_PORT2_INACTIVE (1<<2) +#define TDA9887_QSS (1<<3) +#define TDA9887_INTERCARRIER (1<<4) +#define TDA9887_PORT1_ACTIVE (1<<5) +#define TDA9887_PORT2_ACTIVE (1<<6) +#define TDA9887_INTERCARRIER_NTSC (1<<7) +/* Tuner takeover point adjustment, in dB, -16 <= top <= 15 */ +#define TDA9887_TOP_MASK (0x3f << 8) +#define TDA9887_TOP_SET (1 << 13) +#define TDA9887_TOP(top) (TDA9887_TOP_SET | \ + (((16 + (top)) & 0x1f) << 8)) + +/* config options */ +#define TDA9887_DEEMPHASIS_MASK (3<<16) +#define TDA9887_DEEMPHASIS_NONE (1<<16) +#define TDA9887_DEEMPHASIS_50 (2<<16) +#define TDA9887_DEEMPHASIS_75 (3<<16) +#define TDA9887_AUTOMUTE (1<<18) +#define TDA9887_GATING_18 (1<<19) +#define TDA9887_GAIN_NORMAL (1<<20) +#define TDA9887_RIF_41_3 (1<<21) /* radio IF1 41.3 vs 33.3 */ + +/** + * enum tuner_mode - Mode of the tuner + * + * @T_RADIO: Tuner core will work in radio mode + * @T_ANALOG_TV: Tuner core will work in analog TV mode + * + * Older boards only had a single tuner device, but some devices have a + * separate tuner for radio. In any case, the tuner-core needs to know if + * the tuner chip(s) will be used in radio mode or analog TV mode, as, on + * radio mode, frequencies are specified on a different range than on TV + * mode. This enum is used by the tuner core in order to work with the + * proper tuner range and eventually use a different tuner chip while in + * radio mode. + */ +enum tuner_mode { + T_RADIO = 1 << V4L2_TUNER_RADIO, + T_ANALOG_TV = 1 << V4L2_TUNER_ANALOG_TV, + /* Don't map V4L2_TUNER_DIGITAL_TV, as tuner-core won't use it */ +}; + +/** + * struct tuner_setup - setup the tuner chipsets + * + * @addr: I2C address used to control the tuner device/chipset + * @type: Type of the tuner, as defined at the TUNER_* macros. + * Each different tuner model should have an unique + * identifier. + * @mode_mask: Mask with the allowed tuner modes: V4L2_TUNER_RADIO, + * V4L2_TUNER_ANALOG_TV and/or V4L2_TUNER_DIGITAL_TV, + * describing if the tuner should be used to support + * Radio, analog TV and/or digital TV. + * @config: Used to send tuner-specific configuration for complex + * tuners that require extra parameters to be set. + * Only a very few tuners require it and its usage on + * newer tuners should be avoided. + * @tuner_callback: Some tuners require to call back the bridge driver, + * in order to do some tasks like rising a GPIO at the + * bridge chipset, in order to do things like resetting + * the device. + * + * Older boards only had a single tuner device. Nowadays multiple tuner + * devices may be present on a single board. Using TUNER_SET_TYPE_ADDR + * to pass the tuner_setup structure it is possible to setup each tuner + * device in turn. + * + * Since multiple devices may be present it is no longer sufficient to + * send a command to a single i2c device. Instead you should broadcast + * the command to all i2c devices. + * + * By setting the mode_mask correctly you can select which commands are + * accepted by a specific tuner device. For example, set mode_mask to + * T_RADIO if the device is a radio-only tuner. That specific tuner will + * only accept commands when the tuner is in radio mode and ignore them + * when the tuner is set to TV mode. + */ + +struct tuner_setup { + unsigned short addr; + unsigned int type; + unsigned int mode_mask; + void *config; + int (*tuner_callback)(void *dev, int component, int cmd, int arg); +}; + +#endif /* __KERNEL__ */ + +#endif /* _TUNER_H */ diff --git a/6.17.0/include-overrides/media/tveeprom.h b/6.17.0/include-overrides/media/tveeprom.h new file mode 100644 index 0000000..f37c9b1 --- /dev/null +++ b/6.17.0/include-overrides/media/tveeprom.h @@ -0,0 +1,116 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * tveeprom - Contains structures and functions to work with Hauppauge + * eeproms. + */ + +#include + +/** + * enum tveeprom_audio_processor - Specifies the type of audio processor + * used on a Hauppauge device. + * + * @TVEEPROM_AUDPROC_NONE: No audio processor present + * @TVEEPROM_AUDPROC_INTERNAL: The audio processor is internal to the + * video processor + * @TVEEPROM_AUDPROC_MSP: The audio processor is a MSPXXXX device + * @TVEEPROM_AUDPROC_OTHER: The audio processor is another device + */ +enum tveeprom_audio_processor { + TVEEPROM_AUDPROC_NONE, + TVEEPROM_AUDPROC_INTERNAL, + TVEEPROM_AUDPROC_MSP, + TVEEPROM_AUDPROC_OTHER, +}; + +/** + * struct tveeprom - Contains the fields parsed from Hauppauge eeproms + * + * @has_radio: 1 if the device has radio; 0 otherwise. + * + * @has_ir: If has_ir == 0, then it is unknown what the IR + * capabilities are. Otherwise: + * bit 0) 1 (= IR capabilities are known); + * bit 1) IR receiver present; + * bit 2) IR transmitter (blaster) present. + * + * @has_MAC_address: 0: no MAC, 1: MAC present, 2: unknown. + * @tuner_type: type of the tuner (TUNER_*, as defined at + * include/media/tuner.h). + * + * @tuner_formats: Supported analog TV standards (V4L2_STD_*). + * @tuner_hauppauge_model: Hauppauge's code for the device model number. + * @tuner2_type: type of the second tuner (TUNER_*, as defined + * at include/media/tuner.h). + * + * @tuner2_formats: Tuner 2 supported analog TV standards + * (V4L2_STD_*). + * + * @tuner2_hauppauge_model: tuner 2 Hauppauge's code for the device model + * number. + * + * @audio_processor: analog audio decoder, as defined by enum + * tveeprom_audio_processor. + * + * @decoder_processor: Hauppauge's code for the decoder chipset. + * Unused by the drivers, as they probe the + * decoder based on the PCI or USB ID. + * + * @model: Hauppauge's model number + * + * @revision: Card revision number + * + * @serial_number: Card's serial number + * + * @rev_str: Card revision converted to number + * + * @MAC_address: MAC address for the network interface + */ +struct tveeprom { + u32 has_radio; + u32 has_ir; + u32 has_MAC_address; + + u32 tuner_type; + u32 tuner_formats; + u32 tuner_hauppauge_model; + + u32 tuner2_type; + u32 tuner2_formats; + u32 tuner2_hauppauge_model; + + u32 audio_processor; + u32 decoder_processor; + + u32 model; + u32 revision; + u32 serial_number; + char rev_str[5]; + u8 MAC_address[ETH_ALEN]; +}; + +/** + * tveeprom_hauppauge_analog - Fill struct tveeprom using the contents + * of the eeprom previously filled at + * @eeprom_data field. + * + * @tvee: Struct to where the eeprom parsed data will be filled; + * @eeprom_data: Array with the contents of the eeprom_data. It should + * contain 256 bytes filled with the contents of the + * eeprom read from the Hauppauge device. + */ +void tveeprom_hauppauge_analog(struct tveeprom *tvee, + unsigned char *eeprom_data); + +/** + * tveeprom_read - Reads the contents of the eeprom found at the Hauppauge + * devices. + * + * @c: I2C client struct + * @eedata: Array where the eeprom content will be stored. + * @len: Size of @eedata array. If the eeprom content will be latter + * be parsed by tveeprom_hauppauge_analog(), len should be, at + * least, 256. + */ +int tveeprom_read(struct i2c_client *c, unsigned char *eedata, int len); diff --git a/6.17.0/include-overrides/media/v4l2-async.h b/6.17.0/include-overrides/media/v4l2-async.h new file mode 100644 index 0000000..f26c323 --- /dev/null +++ b/6.17.0/include-overrides/media/v4l2-async.h @@ -0,0 +1,346 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * V4L2 asynchronous subdevice registration API + * + * Copyright (C) 2012-2013, Guennadi Liakhovetski + */ + +#ifndef V4L2_ASYNC_H +#define V4L2_ASYNC_H + +#include +#include + +struct dentry; +struct device; +struct device_node; +struct v4l2_device; +struct v4l2_subdev; +struct v4l2_async_notifier; + +/** + * enum v4l2_async_match_type - type of asynchronous subdevice logic to be used + * in order to identify a match + * + * @V4L2_ASYNC_MATCH_TYPE_I2C: Match will check for I2C adapter ID and address + * @V4L2_ASYNC_MATCH_TYPE_FWNODE: Match will use firmware node + * + * This enum is used by the asynchronous connection logic to define the + * algorithm that will be used to match an asynchronous device. + */ +enum v4l2_async_match_type { + V4L2_ASYNC_MATCH_TYPE_I2C, + V4L2_ASYNC_MATCH_TYPE_FWNODE, +}; + +/** + * struct v4l2_async_match_desc - async connection match information + * + * @type: type of match that will be used + * @fwnode: pointer to &struct fwnode_handle to be matched. + * Used if @match_type is %V4L2_ASYNC_MATCH_TYPE_FWNODE. + * @i2c: embedded struct with I2C parameters to be matched. + * Both @match.i2c.adapter_id and @match.i2c.address + * should be matched. + * Used if @match_type is %V4L2_ASYNC_MATCH_TYPE_I2C. + * @i2c.adapter_id: + * I2C adapter ID to be matched. + * Used if @match_type is %V4L2_ASYNC_MATCH_TYPE_I2C. + * @i2c.address: + * I2C address to be matched. + * Used if @match_type is %V4L2_ASYNC_MATCH_TYPE_I2C. + */ +struct v4l2_async_match_desc { + enum v4l2_async_match_type type; + union { + struct fwnode_handle *fwnode; + struct { + int adapter_id; + unsigned short address; + } i2c; + }; +}; + +/** + * struct v4l2_async_connection - sub-device connection descriptor, as known to + * a bridge + * + * @match: struct of match type and per-bus type matching data sets + * @notifier: the async notifier the connection is related to + * @asc_entry: used to add struct v4l2_async_connection objects to the + * notifier @waiting_list or @done_list + * @asc_subdev_entry: entry in struct v4l2_async_subdev.asc_list list + * @sd: the related sub-device + * + * When this struct is used as a member in a driver specific struct, the driver + * specific struct shall contain the &struct v4l2_async_connection as its first + * member. + */ +struct v4l2_async_connection { + struct v4l2_async_match_desc match; + struct v4l2_async_notifier *notifier; + struct list_head asc_entry; + struct list_head asc_subdev_entry; + struct v4l2_subdev *sd; +}; + +/** + * struct v4l2_async_notifier_operations - Asynchronous V4L2 notifier operations + * @bound: a sub-device has been bound by the given connection + * @complete: All connections have been bound successfully. The complete + * callback is only executed for the root notifier. + * @unbind: a subdevice is leaving + * @destroy: the asc is about to be freed + */ +struct v4l2_async_notifier_operations { + int (*bound)(struct v4l2_async_notifier *notifier, + struct v4l2_subdev *subdev, + struct v4l2_async_connection *asc); + int (*complete)(struct v4l2_async_notifier *notifier); + void (*unbind)(struct v4l2_async_notifier *notifier, + struct v4l2_subdev *subdev, + struct v4l2_async_connection *asc); + void (*destroy)(struct v4l2_async_connection *asc); +}; + +/** + * struct v4l2_async_notifier - v4l2_device notifier data + * + * @ops: notifier operations + * @v4l2_dev: v4l2_device of the root notifier, NULL otherwise + * @sd: sub-device that registered the notifier, NULL otherwise + * @parent: parent notifier + * @waiting_list: list of struct v4l2_async_connection, waiting for their + * drivers + * @done_list: list of struct v4l2_subdev, already probed + * @notifier_entry: member in a global list of notifiers + */ +struct v4l2_async_notifier { + const struct v4l2_async_notifier_operations *ops; + struct v4l2_device *v4l2_dev; + struct v4l2_subdev *sd; + struct v4l2_async_notifier *parent; + struct list_head waiting_list; + struct list_head done_list; + struct list_head notifier_entry; +}; + +/** + * struct v4l2_async_subdev_endpoint - Entry in sub-device's fwnode list + * + * @async_subdev_endpoint_entry: An entry in async_subdev_endpoint_list of + * &struct v4l2_subdev + * @endpoint: Endpoint fwnode agains which to match the sub-device + */ +struct v4l2_async_subdev_endpoint { + struct list_head async_subdev_endpoint_entry; + struct fwnode_handle *endpoint; +}; + +/** + * v4l2_async_debug_init - Initialize debugging tools. + * + * @debugfs_dir: pointer to the parent debugfs &struct dentry + */ +void v4l2_async_debug_init(struct dentry *debugfs_dir); + +/** + * v4l2_async_nf_init - Initialize a notifier. + * + * @notifier: pointer to &struct v4l2_async_notifier + * @v4l2_dev: pointer to &struct v4l2_device + * + * This function initializes the notifier @asc_entry. It must be called + * before adding a subdevice to a notifier, using one of: + * v4l2_async_nf_add_fwnode_remote(), + * v4l2_async_nf_add_fwnode() or + * v4l2_async_nf_add_i2c(). + */ +void v4l2_async_nf_init(struct v4l2_async_notifier *notifier, + struct v4l2_device *v4l2_dev); + +/** + * v4l2_async_subdev_nf_init - Initialize a sub-device notifier. + * + * @notifier: pointer to &struct v4l2_async_notifier + * @sd: pointer to &struct v4l2_subdev + * + * This function initializes the notifier @asc_list. It must be called + * before adding a subdevice to a notifier, using one of: + * v4l2_async_nf_add_fwnode_remote(), v4l2_async_nf_add_fwnode() or + * v4l2_async_nf_add_i2c(). + */ +void v4l2_async_subdev_nf_init(struct v4l2_async_notifier *notifier, + struct v4l2_subdev *sd); + +struct v4l2_async_connection * +__v4l2_async_nf_add_fwnode(struct v4l2_async_notifier *notifier, + struct fwnode_handle *fwnode, + unsigned int asc_struct_size); +/** + * v4l2_async_nf_add_fwnode - Allocate and add a fwnode async + * subdev to the notifier's master asc_list. + * + * @notifier: pointer to &struct v4l2_async_notifier + * @fwnode: fwnode handle of the sub-device to be matched, pointer to + * &struct fwnode_handle + * @type: Type of the driver's async sub-device or connection struct. The + * &struct v4l2_async_connection shall be the first member of the + * driver's async struct, i.e. both begin at the same memory address. + * + * Allocate a fwnode-matched asc of size asc_struct_size, and add it to the + * notifiers @asc_list. The function also gets a reference of the fwnode which + * is released later at notifier cleanup time. + */ +#define v4l2_async_nf_add_fwnode(notifier, fwnode, type) \ + ((type *)__v4l2_async_nf_add_fwnode(notifier, fwnode, sizeof(type))) + +struct v4l2_async_connection * +__v4l2_async_nf_add_fwnode_remote(struct v4l2_async_notifier *notif, + struct fwnode_handle *endpoint, + unsigned int asc_struct_size); +/** + * v4l2_async_nf_add_fwnode_remote - Allocate and add a fwnode + * remote async subdev to the + * notifier's master asc_list. + * + * @notifier: pointer to &struct v4l2_async_notifier + * @ep: local endpoint pointing to the remote connection to be matched, + * pointer to &struct fwnode_handle + * @type: Type of the driver's async connection struct. The &struct + * v4l2_async_connection shall be the first member of the driver's async + * connection struct, i.e. both begin at the same memory address. + * + * Gets the remote endpoint of a given local endpoint, set it up for fwnode + * matching and adds the async connection to the notifier's @asc_list. The + * function also gets a reference of the fwnode which is released later at + * notifier cleanup time. + * + * This is just like v4l2_async_nf_add_fwnode(), but with the + * exception that the fwnode refers to a local endpoint, not the remote one. + */ +#define v4l2_async_nf_add_fwnode_remote(notifier, ep, type) \ + ((type *)__v4l2_async_nf_add_fwnode_remote(notifier, ep, sizeof(type))) + +struct v4l2_async_connection * +__v4l2_async_nf_add_i2c(struct v4l2_async_notifier *notifier, + int adapter_id, unsigned short address, + unsigned int asc_struct_size); +/** + * v4l2_async_nf_add_i2c - Allocate and add an i2c async + * subdev to the notifier's master asc_list. + * + * @notifier: pointer to &struct v4l2_async_notifier + * @adapter: I2C adapter ID to be matched + * @address: I2C address of connection to be matched + * @type: Type of the driver's async connection struct. The &struct + * v4l2_async_connection shall be the first member of the driver's async + * connection struct, i.e. both begin at the same memory address. + * + * Same as v4l2_async_nf_add_fwnode() but for I2C matched + * connections. + */ +#define v4l2_async_nf_add_i2c(notifier, adapter, address, type) \ + ((type *)__v4l2_async_nf_add_i2c(notifier, adapter, address, \ + sizeof(type))) + +/** + * v4l2_async_subdev_endpoint_add - Add an endpoint fwnode to async sub-device + * matching list + * + * @sd: the sub-device + * @fwnode: the endpoint fwnode to match + * + * Add a fwnode to the async sub-device's matching list. This allows registering + * multiple async sub-devices from a single device. + * + * Note that calling v4l2_subdev_cleanup() as part of the sub-device's cleanup + * if endpoints have been added to the sub-device's fwnode matching list. + * + * Returns an error on failure, 0 on success. + */ +int v4l2_async_subdev_endpoint_add(struct v4l2_subdev *sd, + struct fwnode_handle *fwnode); + +/** + * v4l2_async_connection_unique - return a unique &struct v4l2_async_connection + * for a sub-device + * @sd: the sub-device + * + * Return an async connection for a sub-device, when there is a single + * one only. + */ +struct v4l2_async_connection * +v4l2_async_connection_unique(struct v4l2_subdev *sd); + +/** + * v4l2_async_nf_register - registers a subdevice asynchronous notifier + * + * @notifier: pointer to &struct v4l2_async_notifier + */ +int v4l2_async_nf_register(struct v4l2_async_notifier *notifier); + +/** + * v4l2_async_nf_unregister - unregisters a subdevice + * asynchronous notifier + * + * @notifier: pointer to &struct v4l2_async_notifier + */ +void v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier); + +/** + * v4l2_async_nf_cleanup - clean up notifier resources + * @notifier: the notifier the resources of which are to be cleaned up + * + * Release memory resources related to a notifier, including the async + * connections allocated for the purposes of the notifier but not the notifier + * itself. The user is responsible for calling this function to clean up the + * notifier after calling v4l2_async_nf_add_fwnode_remote(), + * v4l2_async_nf_add_fwnode() or v4l2_async_nf_add_i2c(). + * + * There is no harm from calling v4l2_async_nf_cleanup() in other + * cases as long as its memory has been zeroed after it has been + * allocated. + */ +void v4l2_async_nf_cleanup(struct v4l2_async_notifier *notifier); + +/** + * v4l2_async_register_subdev - registers a sub-device to the asynchronous + * subdevice framework + * + * @sd: pointer to &struct v4l2_subdev + */ +#define v4l2_async_register_subdev(sd) \ + __v4l2_async_register_subdev(sd, THIS_MODULE) +int __v4l2_async_register_subdev(struct v4l2_subdev *sd, struct module *module); + +/** + * v4l2_async_register_subdev_sensor - registers a sensor sub-device to the + * asynchronous sub-device framework and + * parse set up common sensor related + * devices + * + * @sd: pointer to struct &v4l2_subdev + * + * This function is just like v4l2_async_register_subdev() with the exception + * that calling it will also parse firmware interfaces for remote references + * using v4l2_async_nf_parse_fwnode_sensor() and registers the + * async sub-devices. The sub-device is similarly unregistered by calling + * v4l2_async_unregister_subdev(). + * + * While registered, the subdev module is marked as in-use. + * + * An error is returned if the module is no longer loaded on any attempts + * to register it. + */ +int __must_check +v4l2_async_register_subdev_sensor(struct v4l2_subdev *sd); + +/** + * v4l2_async_unregister_subdev - unregisters a sub-device to the asynchronous + * subdevice framework + * + * @sd: pointer to &struct v4l2_subdev + */ +void v4l2_async_unregister_subdev(struct v4l2_subdev *sd); +#endif diff --git a/6.17.0/include-overrides/media/v4l2-cci.h b/6.17.0/include-overrides/media/v4l2-cci.h new file mode 100644 index 0000000..4e96e90 --- /dev/null +++ b/6.17.0/include-overrides/media/v4l2-cci.h @@ -0,0 +1,141 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * MIPI Camera Control Interface (CCI) register access helpers. + * + * Copyright (C) 2023 Hans de Goede + */ +#ifndef _V4L2_CCI_H +#define _V4L2_CCI_H + +#include +#include +#include + +struct i2c_client; +struct regmap; + +/** + * struct cci_reg_sequence - An individual write from a sequence of CCI writes + * + * @reg: Register address, use CCI_REG#() macros to encode reg width + * @val: Register value + * + * Register/value pairs for sequences of writes. + */ +struct cci_reg_sequence { + u32 reg; + u64 val; +}; + +/* + * Macros to define register address with the register width encoded + * into the higher bits. + */ +#define CCI_REG_ADDR_MASK GENMASK(15, 0) +#define CCI_REG_WIDTH_SHIFT 16 +#define CCI_REG_WIDTH_MASK GENMASK(19, 16) +/* + * Private CCI register flags, for the use of drivers. + */ +#define CCI_REG_PRIVATE_SHIFT 28U +#define CCI_REG_PRIVATE_MASK GENMASK(31U, CCI_REG_PRIVATE_SHIFT) + +#define CCI_REG_WIDTH_BYTES(x) FIELD_GET(CCI_REG_WIDTH_MASK, x) +#define CCI_REG_WIDTH(x) (CCI_REG_WIDTH_BYTES(x) << 3) +#define CCI_REG_ADDR(x) FIELD_GET(CCI_REG_ADDR_MASK, x) +#define CCI_REG_LE BIT(20) + +#define CCI_REG8(x) ((1 << CCI_REG_WIDTH_SHIFT) | (x)) +#define CCI_REG16(x) ((2 << CCI_REG_WIDTH_SHIFT) | (x)) +#define CCI_REG24(x) ((3 << CCI_REG_WIDTH_SHIFT) | (x)) +#define CCI_REG32(x) ((4 << CCI_REG_WIDTH_SHIFT) | (x)) +#define CCI_REG64(x) ((8 << CCI_REG_WIDTH_SHIFT) | (x)) +#define CCI_REG16_LE(x) (CCI_REG_LE | (2U << CCI_REG_WIDTH_SHIFT) | (x)) +#define CCI_REG24_LE(x) (CCI_REG_LE | (3U << CCI_REG_WIDTH_SHIFT) | (x)) +#define CCI_REG32_LE(x) (CCI_REG_LE | (4U << CCI_REG_WIDTH_SHIFT) | (x)) +#define CCI_REG64_LE(x) (CCI_REG_LE | (8U << CCI_REG_WIDTH_SHIFT) | (x)) + +/** + * cci_read() - Read a value from a single CCI register + * + * @map: Register map to read from + * @reg: Register address to read, use CCI_REG#() macros to encode reg width + * @val: Pointer to store read value + * @err: Optional pointer to store errors, if a previous error is set + * then the read will be skipped + * + * Return: %0 on success or a negative error code on failure. + */ +int cci_read(struct regmap *map, u32 reg, u64 *val, int *err); + +/** + * cci_write() - Write a value to a single CCI register + * + * @map: Register map to write to + * @reg: Register address to write, use CCI_REG#() macros to encode reg width + * @val: Value to be written + * @err: Optional pointer to store errors, if a previous error is set + * then the write will be skipped + * + * Return: %0 on success or a negative error code on failure. + */ +int cci_write(struct regmap *map, u32 reg, u64 val, int *err); + +/** + * cci_update_bits() - Perform a read/modify/write cycle on + * a single CCI register + * + * @map: Register map to update + * @reg: Register address to update, use CCI_REG#() macros to encode reg width + * @mask: Bitmask to change + * @val: New value for bitmask + * @err: Optional pointer to store errors, if a previous error is set + * then the update will be skipped + * + * Note this uses read-modify-write to update the bits, atomicity with regards + * to other cci_*() register access functions is NOT guaranteed. + * + * Return: %0 on success or a negative error code on failure. + */ +int cci_update_bits(struct regmap *map, u32 reg, u64 mask, u64 val, int *err); + +/** + * cci_multi_reg_write() - Write multiple registers to the device + * + * @map: Register map to write to + * @regs: Array of structures containing register-address, -value pairs to be + * written, register-addresses use CCI_REG#() macros to encode reg width + * @num_regs: Number of registers to write + * @err: Optional pointer to store errors, if a previous error is set + * then the write will be skipped + * + * Write multiple registers to the device where the set of register, value + * pairs are supplied in any order, possibly not all in a single range. + * + * Use of the CCI_REG#() macros to encode reg width is mandatory. + * + * For raw lists of register-address, -value pairs with only 8 bit + * wide writes regmap_multi_reg_write() can be used instead. + * + * Return: %0 on success or a negative error code on failure. + */ +int cci_multi_reg_write(struct regmap *map, const struct cci_reg_sequence *regs, + unsigned int num_regs, int *err); + +#if IS_ENABLED(CONFIG_V4L2_CCI_I2C) +/** + * devm_cci_regmap_init_i2c() - Create regmap to use with cci_*() register + * access functions + * + * @client: i2c_client to create the regmap for + * @reg_addr_bits: register address width to use (8 or 16) + * + * Note the memory for the created regmap is devm() managed, tied to the client. + * + * Return: %0 on success or a negative error code on failure. + */ +struct regmap *devm_cci_regmap_init_i2c(struct i2c_client *client, + int reg_addr_bits); +#endif + +#endif diff --git a/6.17.0/include-overrides/media/v4l2-common.h b/6.17.0/include-overrides/media/v4l2-common.h new file mode 100644 index 0000000..0a43f56 --- /dev/null +++ b/6.17.0/include-overrides/media/v4l2-common.h @@ -0,0 +1,672 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + v4l2 common internal API header + + This header contains internal shared ioctl definitions for use by the + internal low-level v4l2 drivers. + Each ioctl begins with VIDIOC_INT_ to clearly mark that it is an internal + define, + + Copyright (C) 2005 Hans Verkuil + + */ + +#ifndef V4L2_COMMON_H_ +#define V4L2_COMMON_H_ + +#include +#include + +/* Common printk constructs for v4l-i2c drivers. These macros create a unique + prefix consisting of the driver name, the adapter number and the i2c + address. */ +#define v4l_printk(level, name, adapter, addr, fmt, arg...) \ + printk(level "%s %d-%04x: " fmt, name, i2c_adapter_id(adapter), addr , ## arg) + +#define v4l_client_printk(level, client, fmt, arg...) \ + v4l_printk(level, (client)->dev.driver->name, (client)->adapter, \ + (client)->addr, fmt , ## arg) + +#define v4l_err(client, fmt, arg...) \ + v4l_client_printk(KERN_ERR, client, fmt , ## arg) + +#define v4l_warn(client, fmt, arg...) \ + v4l_client_printk(KERN_WARNING, client, fmt , ## arg) + +#define v4l_info(client, fmt, arg...) \ + v4l_client_printk(KERN_INFO, client, fmt , ## arg) + +/* These three macros assume that the debug level is set with a module + parameter called 'debug'. */ +#define v4l_dbg(level, debug, client, fmt, arg...) \ + do { \ + if (debug >= (level)) \ + v4l_client_printk(KERN_DEBUG, client, fmt , ## arg); \ + } while (0) + +/* Add a version of v4l_dbg to be used on drivers using dev_foo() macros */ +#define dev_dbg_lvl(__dev, __level, __debug, __fmt, __arg...) \ + do { \ + if (__debug >= (__level)) \ + dev_printk(KERN_DEBUG, __dev, __fmt, ##__arg); \ + } while (0) + +/* ------------------------------------------------------------------------- */ + +/* These printk constructs can be used with v4l2_device and v4l2_subdev */ +#define v4l2_printk(level, dev, fmt, arg...) \ + printk(level "%s: " fmt, (dev)->name , ## arg) + +#define v4l2_err(dev, fmt, arg...) \ + v4l2_printk(KERN_ERR, dev, fmt , ## arg) + +#define v4l2_warn(dev, fmt, arg...) \ + v4l2_printk(KERN_WARNING, dev, fmt , ## arg) + +#define v4l2_info(dev, fmt, arg...) \ + v4l2_printk(KERN_INFO, dev, fmt , ## arg) + +/* These three macros assume that the debug level is set with a module + parameter called 'debug'. */ +#define v4l2_dbg(level, debug, dev, fmt, arg...) \ + do { \ + if (debug >= (level)) \ + v4l2_printk(KERN_DEBUG, dev, fmt , ## arg); \ + } while (0) + +/** + * v4l2_ctrl_query_fill- Fill in a struct v4l2_queryctrl + * + * @qctrl: pointer to the &struct v4l2_queryctrl to be filled + * @min: minimum value for the control + * @max: maximum value for the control + * @step: control step + * @def: default value for the control + * + * Fills the &struct v4l2_queryctrl fields for the query control. + * + * .. note:: + * + * This function assumes that the @qctrl->id field is filled. + * + * Returns -EINVAL if the control is not known by the V4L2 core, 0 on success. + */ + +int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, + s32 min, s32 max, s32 step, s32 def); + +/* ------------------------------------------------------------------------- */ + +struct v4l2_device; +struct v4l2_subdev; +struct v4l2_subdev_ops; + +/* I2C Helper functions */ +#include + +/** + * enum v4l2_i2c_tuner_type - specifies the range of tuner address that + * should be used when seeking for I2C devices. + * + * @ADDRS_RADIO: Radio tuner addresses. + * Represent the following I2C addresses: + * 0x10 (if compiled with tea5761 support) + * and 0x60. + * @ADDRS_DEMOD: Demod tuner addresses. + * Represent the following I2C addresses: + * 0x42, 0x43, 0x4a and 0x4b. + * @ADDRS_TV: TV tuner addresses. + * Represent the following I2C addresses: + * 0x42, 0x43, 0x4a, 0x4b, 0x60, 0x61, 0x62, + * 0x63 and 0x64. + * @ADDRS_TV_WITH_DEMOD: TV tuner addresses if demod is present, this + * excludes addresses used by the demodulator + * from the list of candidates. + * Represent the following I2C addresses: + * 0x60, 0x61, 0x62, 0x63 and 0x64. + * + * NOTE: All I2C addresses above use the 7-bit notation. + */ +enum v4l2_i2c_tuner_type { + ADDRS_RADIO, + ADDRS_DEMOD, + ADDRS_TV, + ADDRS_TV_WITH_DEMOD, +}; + +#if defined(CONFIG_VIDEO_V4L2_I2C) + +/** + * v4l2_i2c_new_subdev - Load an i2c module and return an initialized + * &struct v4l2_subdev. + * + * @v4l2_dev: pointer to &struct v4l2_device + * @adapter: pointer to struct i2c_adapter + * @client_type: name of the chip that's on the adapter. + * @addr: I2C address. If zero, it will use @probe_addrs + * @probe_addrs: array with a list of address. The last entry at such + * array should be %I2C_CLIENT_END. + * + * returns a &struct v4l2_subdev pointer. + */ +struct v4l2_subdev *v4l2_i2c_new_subdev(struct v4l2_device *v4l2_dev, + struct i2c_adapter *adapter, const char *client_type, + u8 addr, const unsigned short *probe_addrs); + +/** + * v4l2_i2c_new_subdev_board - Load an i2c module and return an initialized + * &struct v4l2_subdev. + * + * @v4l2_dev: pointer to &struct v4l2_device + * @adapter: pointer to struct i2c_adapter + * @info: pointer to struct i2c_board_info used to replace the irq, + * platform_data and addr arguments. + * @probe_addrs: array with a list of address. The last entry at such + * array should be %I2C_CLIENT_END. + * + * returns a &struct v4l2_subdev pointer. + */ +struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev, + struct i2c_adapter *adapter, struct i2c_board_info *info, + const unsigned short *probe_addrs); + +/** + * v4l2_i2c_subdev_set_name - Set name for an I²C sub-device + * + * @sd: pointer to &struct v4l2_subdev + * @client: pointer to struct i2c_client + * @devname: the name of the device; if NULL, the I²C device drivers's name + * will be used + * @postfix: sub-device specific string to put right after the I²C device name; + * may be NULL + */ +void v4l2_i2c_subdev_set_name(struct v4l2_subdev *sd, struct i2c_client *client, + const char *devname, const char *postfix); + +/** + * v4l2_i2c_subdev_init - Initializes a &struct v4l2_subdev with data from + * an i2c_client struct. + * + * @sd: pointer to &struct v4l2_subdev + * @client: pointer to struct i2c_client + * @ops: pointer to &struct v4l2_subdev_ops + */ +void v4l2_i2c_subdev_init(struct v4l2_subdev *sd, struct i2c_client *client, + const struct v4l2_subdev_ops *ops); + +/** + * v4l2_i2c_subdev_addr - returns i2c client address of &struct v4l2_subdev. + * + * @sd: pointer to &struct v4l2_subdev + * + * Returns the address of an I2C sub-device + */ +unsigned short v4l2_i2c_subdev_addr(struct v4l2_subdev *sd); + +/** + * v4l2_i2c_tuner_addrs - Return a list of I2C tuner addresses to probe. + * + * @type: type of the tuner to seek, as defined by + * &enum v4l2_i2c_tuner_type. + * + * NOTE: Use only if the tuner addresses are unknown. + */ +const unsigned short *v4l2_i2c_tuner_addrs(enum v4l2_i2c_tuner_type type); + +/** + * v4l2_i2c_subdev_unregister - Unregister a v4l2_subdev + * + * @sd: pointer to &struct v4l2_subdev + */ +void v4l2_i2c_subdev_unregister(struct v4l2_subdev *sd); + +#else + +static inline struct v4l2_subdev * +v4l2_i2c_new_subdev(struct v4l2_device *v4l2_dev, + struct i2c_adapter *adapter, const char *client_type, + u8 addr, const unsigned short *probe_addrs) +{ + return NULL; +} + +static inline struct v4l2_subdev * +v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev, + struct i2c_adapter *adapter, struct i2c_board_info *info, + const unsigned short *probe_addrs) +{ + return NULL; +} + +static inline void +v4l2_i2c_subdev_set_name(struct v4l2_subdev *sd, struct i2c_client *client, + const char *devname, const char *postfix) +{} + +static inline void +v4l2_i2c_subdev_init(struct v4l2_subdev *sd, struct i2c_client *client, + const struct v4l2_subdev_ops *ops) +{} + +static inline unsigned short v4l2_i2c_subdev_addr(struct v4l2_subdev *sd) +{ + return I2C_CLIENT_END; +} + +static inline const unsigned short * +v4l2_i2c_tuner_addrs(enum v4l2_i2c_tuner_type type) +{ + return NULL; +} + +static inline void v4l2_i2c_subdev_unregister(struct v4l2_subdev *sd) +{} + +#endif + +/* ------------------------------------------------------------------------- */ + +/* SPI Helper functions */ + +#include + +#if defined(CONFIG_SPI) + +/** + * v4l2_spi_new_subdev - Load an spi module and return an initialized + * &struct v4l2_subdev. + * + * + * @v4l2_dev: pointer to &struct v4l2_device. + * @ctlr: pointer to struct spi_controller. + * @info: pointer to struct spi_board_info. + * + * returns a &struct v4l2_subdev pointer. + */ +struct v4l2_subdev *v4l2_spi_new_subdev(struct v4l2_device *v4l2_dev, + struct spi_controller *ctlr, struct spi_board_info *info); + +/** + * v4l2_spi_subdev_init - Initialize a v4l2_subdev with data from an + * spi_device struct. + * + * @sd: pointer to &struct v4l2_subdev + * @spi: pointer to struct spi_device. + * @ops: pointer to &struct v4l2_subdev_ops + */ +void v4l2_spi_subdev_init(struct v4l2_subdev *sd, struct spi_device *spi, + const struct v4l2_subdev_ops *ops); + +/** + * v4l2_spi_subdev_unregister - Unregister a v4l2_subdev + * + * @sd: pointer to &struct v4l2_subdev + */ +void v4l2_spi_subdev_unregister(struct v4l2_subdev *sd); + +#else + +static inline struct v4l2_subdev * +v4l2_spi_new_subdev(struct v4l2_device *v4l2_dev, + struct spi_controller *ctlr, struct spi_board_info *info) +{ + return NULL; +} + +static inline void +v4l2_spi_subdev_init(struct v4l2_subdev *sd, struct spi_device *spi, + const struct v4l2_subdev_ops *ops) +{} + +static inline void v4l2_spi_subdev_unregister(struct v4l2_subdev *sd) +{} +#endif + +/* ------------------------------------------------------------------------- */ + +/* + * FIXME: these remaining ioctls/structs should be removed as well, but they + * are still used in tuner-simple.c (TUNER_SET_CONFIG) and cx18/ivtv (RESET). + * To remove these ioctls some more cleanup is needed in those modules. + * + * It doesn't make much sense on documenting them, as what we really want is + * to get rid of them. + */ + +/* s_config */ +struct v4l2_priv_tun_config { + int tuner; + void *priv; +}; +#define TUNER_SET_CONFIG _IOW('d', 92, struct v4l2_priv_tun_config) + +#define VIDIOC_INT_RESET _IOW ('d', 102, u32) + +/* ------------------------------------------------------------------------- */ + +/* Miscellaneous helper functions */ + +/** + * v4l_bound_align_image - adjust video dimensions according to + * a given constraints. + * + * @width: pointer to width that will be adjusted if needed. + * @wmin: minimum width. + * @wmax: maximum width. + * @walign: least significant bit on width. + * @height: pointer to height that will be adjusted if needed. + * @hmin: minimum height. + * @hmax: maximum height. + * @halign: least significant bit on height. + * @salign: least significant bit for the image size (e. g. + * :math:`width * height`). + * + * Clip an image to have @width between @wmin and @wmax, and @height between + * @hmin and @hmax, inclusive. + * + * Additionally, the @width will be a multiple of :math:`2^{walign}`, + * the @height will be a multiple of :math:`2^{halign}`, and the overall + * size :math:`width * height` will be a multiple of :math:`2^{salign}`. + * + * .. note:: + * + * #. The clipping rectangle may be shrunk or enlarged to fit the alignment + * constraints. + * #. @wmax must not be smaller than @wmin. + * #. @hmax must not be smaller than @hmin. + * #. The alignments must not be so high there are no possible image + * sizes within the allowed bounds. + * #. @wmin and @hmin must be at least 1 (don't use 0). + * #. For @walign, @halign and @salign, if you don't care about a certain + * alignment, specify ``0``, as :math:`2^0 = 1` and one byte alignment + * is equivalent to no alignment. + * #. If you only want to adjust downward, specify a maximum that's the + * same as the initial value. + */ +void v4l_bound_align_image(unsigned int *width, unsigned int wmin, + unsigned int wmax, unsigned int walign, + unsigned int *height, unsigned int hmin, + unsigned int hmax, unsigned int halign, + unsigned int salign); + +/** + * v4l2_find_nearest_size_conditional - Find the nearest size among a discrete + * set of resolutions contained in an array of a driver specific struct, + * with conditionally exlusion of certain modes + * + * @array: a driver specific array of image sizes + * @array_size: the length of the driver specific array of image sizes + * @width_field: the name of the width field in the driver specific struct + * @height_field: the name of the height field in the driver specific struct + * @width: desired width + * @height: desired height + * @func: ignores mode if returns false + * @context: context for the function + * + * Finds the closest resolution to minimize the width and height differences + * between what requested and the supported resolutions. The size of the width + * and height fields in the driver specific must equal to that of u32, i.e. four + * bytes. @func is called for each mode considered, a mode is ignored if @func + * returns false for it. + * + * Returns the best match or NULL if the length of the array is zero. + */ +#define v4l2_find_nearest_size_conditional(array, array_size, width_field, \ + height_field, width, height, \ + func, context) \ + ({ \ + BUILD_BUG_ON(sizeof((array)->width_field) != sizeof(u32) || \ + sizeof((array)->height_field) != sizeof(u32)); \ + (typeof(&(array)[0]))__v4l2_find_nearest_size_conditional( \ + (array), array_size, sizeof(*(array)), \ + offsetof(typeof(*(array)), width_field), \ + offsetof(typeof(*(array)), height_field), \ + width, height, func, context); \ + }) +const void * +__v4l2_find_nearest_size_conditional(const void *array, size_t array_size, + size_t entry_size, size_t width_offset, + size_t height_offset, s32 width, + s32 height, + bool (*func)(const void *array, + size_t index, + const void *context), + const void *context); + +/** + * v4l2_find_nearest_size - Find the nearest size among a discrete set of + * resolutions contained in an array of a driver specific struct + * + * @array: a driver specific array of image sizes + * @array_size: the length of the driver specific array of image sizes + * @width_field: the name of the width field in the driver specific struct + * @height_field: the name of the height field in the driver specific struct + * @width: desired width + * @height: desired height + * + * Finds the closest resolution to minimize the width and height differences + * between what requested and the supported resolutions. The size of the width + * and height fields in the driver specific must equal to that of u32, i.e. four + * bytes. + * + * Returns the best match or NULL if the length of the array is zero. + */ +#define v4l2_find_nearest_size(array, array_size, width_field, \ + height_field, width, height) \ + v4l2_find_nearest_size_conditional(array, array_size, width_field, \ + height_field, width, height, NULL, \ + NULL) + +/** + * v4l2_g_parm_cap - helper routine for vidioc_g_parm to fill this in by + * calling the get_frame_interval op of the given subdev. It only works + * for V4L2_BUF_TYPE_VIDEO_CAPTURE(_MPLANE), hence the _cap in the + * function name. + * + * @vdev: the struct video_device pointer. Used to determine the device caps. + * @sd: the sub-device pointer. + * @a: the VIDIOC_G_PARM argument. + */ +int v4l2_g_parm_cap(struct video_device *vdev, + struct v4l2_subdev *sd, struct v4l2_streamparm *a); + +/** + * v4l2_s_parm_cap - helper routine for vidioc_s_parm to fill this in by + * calling the set_frame_interval op of the given subdev. It only works + * for V4L2_BUF_TYPE_VIDEO_CAPTURE(_MPLANE), hence the _cap in the + * function name. + * + * @vdev: the struct video_device pointer. Used to determine the device caps. + * @sd: the sub-device pointer. + * @a: the VIDIOC_S_PARM argument. + */ +int v4l2_s_parm_cap(struct video_device *vdev, + struct v4l2_subdev *sd, struct v4l2_streamparm *a); + +/* Compare two v4l2_fract structs */ +#define V4L2_FRACT_COMPARE(a, OP, b) \ + ((u64)(a).numerator * (b).denominator OP \ + (u64)(b).numerator * (a).denominator) + +/* ------------------------------------------------------------------------- */ + +/* Pixel format and FourCC helpers */ + +/** + * enum v4l2_pixel_encoding - specifies the pixel encoding value + * + * @V4L2_PIXEL_ENC_UNKNOWN: Pixel encoding is unknown/un-initialized + * @V4L2_PIXEL_ENC_YUV: Pixel encoding is YUV + * @V4L2_PIXEL_ENC_RGB: Pixel encoding is RGB + * @V4L2_PIXEL_ENC_BAYER: Pixel encoding is Bayer + */ +enum v4l2_pixel_encoding { + V4L2_PIXEL_ENC_UNKNOWN = 0, + V4L2_PIXEL_ENC_YUV = 1, + V4L2_PIXEL_ENC_RGB = 2, + V4L2_PIXEL_ENC_BAYER = 3, +}; + +/** + * struct v4l2_format_info - information about a V4L2 format + * @format: 4CC format identifier (V4L2_PIX_FMT_*) + * @pixel_enc: Pixel encoding (see enum v4l2_pixel_encoding above) + * @mem_planes: Number of memory planes, which includes the alpha plane (1 to 4). + * @comp_planes: Number of component planes, which includes the alpha plane (1 to 4). + * @bpp: Array of per-plane bytes per pixel + * @bpp_div: Array of per-plane bytes per pixel divisors to support fractional pixel sizes. + * @hdiv: Horizontal chroma subsampling factor + * @vdiv: Vertical chroma subsampling factor + * @block_w: Per-plane macroblock pixel width (optional) + * @block_h: Per-plane macroblock pixel height (optional) + */ +struct v4l2_format_info { + u32 format; + u8 pixel_enc; + u8 mem_planes; + u8 comp_planes; + u8 bpp[4]; + u8 bpp_div[4]; + u8 hdiv; + u8 vdiv; + u8 block_w[4]; + u8 block_h[4]; +}; + +static inline bool v4l2_is_format_rgb(const struct v4l2_format_info *f) +{ + return f && f->pixel_enc == V4L2_PIXEL_ENC_RGB; +} + +static inline bool v4l2_is_format_yuv(const struct v4l2_format_info *f) +{ + return f && f->pixel_enc == V4L2_PIXEL_ENC_YUV; +} + +static inline bool v4l2_is_format_bayer(const struct v4l2_format_info *f) +{ + return f && f->pixel_enc == V4L2_PIXEL_ENC_BAYER; +} + +const struct v4l2_format_info *v4l2_format_info(u32 format); +void v4l2_apply_frmsize_constraints(u32 *width, u32 *height, + const struct v4l2_frmsize_stepwise *frmsize); +int v4l2_fill_pixfmt(struct v4l2_pix_format *pixfmt, u32 pixelformat, + u32 width, u32 height); +int v4l2_fill_pixfmt_mp(struct v4l2_pix_format_mplane *pixfmt, u32 pixelformat, + u32 width, u32 height); + +/** + * v4l2_get_link_freq - Get link rate from transmitter + * + * @pad: The transmitter's media pad (or control handler for non-MC users or + * compatibility reasons, don't use in new code) + * @mul: The multiplier between pixel rate and link frequency. Bits per pixel on + * D-PHY, samples per clock on parallel. 0 otherwise. + * @div: The divisor between pixel rate and link frequency. Number of data lanes + * times two on D-PHY, 1 on parallel. 0 otherwise. + * + * This function is intended for obtaining the link frequency from the + * transmitter sub-devices. It returns the link rate, either from the + * V4L2_CID_LINK_FREQ control implemented by the transmitter, or value + * calculated based on the V4L2_CID_PIXEL_RATE implemented by the transmitter. + * + * Return: + * * >0: Link frequency + * * %-ENOENT: Link frequency or pixel rate control not found + * * %-EINVAL: Invalid link frequency value + */ +#ifdef CONFIG_MEDIA_CONTROLLER +#define v4l2_get_link_freq(pad, mul, div) \ + _Generic(pad, \ + struct media_pad *: __v4l2_get_link_freq_pad, \ + struct v4l2_ctrl_handler *: __v4l2_get_link_freq_ctrl) \ + (pad, mul, div) +s64 __v4l2_get_link_freq_pad(struct media_pad *pad, unsigned int mul, + unsigned int div); +#else +#define v4l2_get_link_freq(handler, mul, div) \ + __v4l2_get_link_freq_ctrl(handler, mul, div) +#endif +s64 __v4l2_get_link_freq_ctrl(struct v4l2_ctrl_handler *handler, + unsigned int mul, unsigned int div); + +void v4l2_simplify_fraction(u32 *numerator, u32 *denominator, + unsigned int n_terms, unsigned int threshold); +u32 v4l2_fraction_to_interval(u32 numerator, u32 denominator); + +/** + * v4l2_link_freq_to_bitmap - Figure out platform-supported link frequencies + * @dev: The struct device + * @fw_link_freqs: Array of link frequencies from firmware + * @num_of_fw_link_freqs: Number of entries in @fw_link_freqs + * @driver_link_freqs: Array of link frequencies supported by the driver + * @num_of_driver_link_freqs: Number of entries in @driver_link_freqs + * @bitmap: Bitmap of driver-supported link frequencies found in @fw_link_freqs + * + * This function checks which driver-supported link frequencies are enabled in + * system firmware and sets the corresponding bits in @bitmap (after first + * zeroing it). + * + * Return: + * * %0: Success + * * %-ENOENT: No match found between driver-supported link frequencies and + * those available in firmware. + * * %-ENODATA: No link frequencies were specified in firmware. + */ +int v4l2_link_freq_to_bitmap(struct device *dev, const u64 *fw_link_freqs, + unsigned int num_of_fw_link_freqs, + const s64 *driver_link_freqs, + unsigned int num_of_driver_link_freqs, + unsigned long *bitmap); + +static inline u64 v4l2_buffer_get_timestamp(const struct v4l2_buffer *buf) +{ + /* + * When the timestamp comes from 32-bit user space, there may be + * uninitialized data in tv_usec, so cast it to u32. + * Otherwise allow invalid input for backwards compatibility. + */ + return buf->timestamp.tv_sec * NSEC_PER_SEC + + (u32)buf->timestamp.tv_usec * NSEC_PER_USEC; +} + +static inline void v4l2_buffer_set_timestamp(struct v4l2_buffer *buf, + u64 timestamp) +{ + struct timespec64 ts = ns_to_timespec64(timestamp); + + buf->timestamp.tv_sec = ts.tv_sec; + buf->timestamp.tv_usec = ts.tv_nsec / NSEC_PER_USEC; +} + +static inline bool v4l2_is_colorspace_valid(__u32 colorspace) +{ + return colorspace > V4L2_COLORSPACE_DEFAULT && + colorspace < V4L2_COLORSPACE_LAST; +} + +static inline bool v4l2_is_xfer_func_valid(__u32 xfer_func) +{ + return xfer_func > V4L2_XFER_FUNC_DEFAULT && + xfer_func < V4L2_XFER_FUNC_LAST; +} + +static inline bool v4l2_is_ycbcr_enc_valid(__u8 ycbcr_enc) +{ + return ycbcr_enc > V4L2_YCBCR_ENC_DEFAULT && + ycbcr_enc < V4L2_YCBCR_ENC_LAST; +} + +static inline bool v4l2_is_hsv_enc_valid(__u8 hsv_enc) +{ + return hsv_enc == V4L2_HSV_ENC_180 || hsv_enc == V4L2_HSV_ENC_256; +} + +static inline bool v4l2_is_quant_valid(__u8 quantization) +{ + return quantization == V4L2_QUANTIZATION_FULL_RANGE || + quantization == V4L2_QUANTIZATION_LIM_RANGE; +} + +#endif /* V4L2_COMMON_H_ */ diff --git a/6.17.0/include-overrides/media/v4l2-ctrls.h b/6.17.0/include-overrides/media/v4l2-ctrls.h new file mode 100644 index 0000000..c32c462 --- /dev/null +++ b/6.17.0/include-overrides/media/v4l2-ctrls.h @@ -0,0 +1,1633 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * V4L2 controls support header. + * + * Copyright (C) 2010 Hans Verkuil + */ + +#ifndef _V4L2_CTRLS_H +#define _V4L2_CTRLS_H + +#include +#include +#include +#include + +/* forward references */ +struct file; +struct poll_table_struct; +struct v4l2_ctrl; +struct v4l2_ctrl_handler; +struct v4l2_ctrl_helper; +struct v4l2_fh; +struct v4l2_fwnode_device_properties; +struct v4l2_subdev; +struct v4l2_subscribed_event; +struct video_device; + +/** + * union v4l2_ctrl_ptr - A pointer to a control value. + * @p_s32: Pointer to a 32-bit signed value. + * @p_s64: Pointer to a 64-bit signed value. + * @p_u8: Pointer to a 8-bit unsigned value. + * @p_u16: Pointer to a 16-bit unsigned value. + * @p_u32: Pointer to a 32-bit unsigned value. + * @p_char: Pointer to a string. + * @p_mpeg2_sequence: Pointer to a MPEG2 sequence structure. + * @p_mpeg2_picture: Pointer to a MPEG2 picture structure. + * @p_mpeg2_quantisation: Pointer to a MPEG2 quantisation data structure. + * @p_fwht_params: Pointer to a FWHT stateless parameters structure. + * @p_h264_sps: Pointer to a struct v4l2_ctrl_h264_sps. + * @p_h264_pps: Pointer to a struct v4l2_ctrl_h264_pps. + * @p_h264_scaling_matrix: Pointer to a struct v4l2_ctrl_h264_scaling_matrix. + * @p_h264_slice_params: Pointer to a struct v4l2_ctrl_h264_slice_params. + * @p_h264_decode_params: Pointer to a struct v4l2_ctrl_h264_decode_params. + * @p_h264_pred_weights: Pointer to a struct v4l2_ctrl_h264_pred_weights. + * @p_vp8_frame: Pointer to a VP8 frame params structure. + * @p_vp9_compressed_hdr_probs: Pointer to a VP9 frame compressed header probs structure. + * @p_vp9_frame: Pointer to a VP9 frame params structure. + * @p_hevc_sps: Pointer to an HEVC sequence parameter set structure. + * @p_hevc_pps: Pointer to an HEVC picture parameter set structure. + * @p_hevc_slice_params: Pointer to an HEVC slice parameters structure. + * @p_hdr10_cll: Pointer to an HDR10 Content Light Level structure. + * @p_hdr10_mastering: Pointer to an HDR10 Mastering Display structure. + * @p_area: Pointer to an area. + * @p_av1_sequence: Pointer to an AV1 sequence structure. + * @p_av1_tile_group_entry: Pointer to an AV1 tile group entry structure. + * @p_av1_frame: Pointer to an AV1 frame structure. + * @p_av1_film_grain: Pointer to an AV1 film grain structure. + * @p_rect: Pointer to a rectangle. + * @p: Pointer to a compound value. + * @p_const: Pointer to a constant compound value. + */ +union v4l2_ctrl_ptr { + s32 *p_s32; + s64 *p_s64; + u8 *p_u8; + u16 *p_u16; + u32 *p_u32; + char *p_char; + struct v4l2_ctrl_mpeg2_sequence *p_mpeg2_sequence; + struct v4l2_ctrl_mpeg2_picture *p_mpeg2_picture; + struct v4l2_ctrl_mpeg2_quantisation *p_mpeg2_quantisation; + struct v4l2_ctrl_fwht_params *p_fwht_params; + struct v4l2_ctrl_h264_sps *p_h264_sps; + struct v4l2_ctrl_h264_pps *p_h264_pps; + struct v4l2_ctrl_h264_scaling_matrix *p_h264_scaling_matrix; + struct v4l2_ctrl_h264_slice_params *p_h264_slice_params; + struct v4l2_ctrl_h264_decode_params *p_h264_decode_params; + struct v4l2_ctrl_h264_pred_weights *p_h264_pred_weights; + struct v4l2_ctrl_vp8_frame *p_vp8_frame; + struct v4l2_ctrl_hevc_sps *p_hevc_sps; + struct v4l2_ctrl_hevc_pps *p_hevc_pps; + struct v4l2_ctrl_hevc_slice_params *p_hevc_slice_params; + struct v4l2_ctrl_vp9_compressed_hdr *p_vp9_compressed_hdr_probs; + struct v4l2_ctrl_vp9_frame *p_vp9_frame; + struct v4l2_ctrl_hdr10_cll_info *p_hdr10_cll; + struct v4l2_ctrl_hdr10_mastering_display *p_hdr10_mastering; + struct v4l2_area *p_area; + struct v4l2_ctrl_av1_sequence *p_av1_sequence; + struct v4l2_ctrl_av1_tile_group_entry *p_av1_tile_group_entry; + struct v4l2_ctrl_av1_frame *p_av1_frame; + struct v4l2_ctrl_av1_film_grain *p_av1_film_grain; + struct v4l2_rect *p_rect; + void *p; + const void *p_const; +}; + +/** + * v4l2_ctrl_ptr_create() - Helper function to return a v4l2_ctrl_ptr from a + * void pointer + * @ptr: The void pointer + */ +static inline union v4l2_ctrl_ptr v4l2_ctrl_ptr_create(void *ptr) +{ + union v4l2_ctrl_ptr p = { .p = ptr }; + + return p; +} + +/** + * struct v4l2_ctrl_ops - The control operations that the driver has to provide. + * + * @g_volatile_ctrl: Get a new value for this control. Generally only relevant + * for volatile (and usually read-only) controls such as a control + * that returns the current signal strength which changes + * continuously. + * If not set, then the currently cached value will be returned. + * @try_ctrl: Test whether the control's value is valid. Only relevant when + * the usual min/max/step checks are not sufficient. + * @s_ctrl: Actually set the new control value. s_ctrl is compulsory. The + * ctrl->handler->lock is held when these ops are called, so no + * one else can access controls owned by that handler. + */ +struct v4l2_ctrl_ops { + int (*g_volatile_ctrl)(struct v4l2_ctrl *ctrl); + int (*try_ctrl)(struct v4l2_ctrl *ctrl); + int (*s_ctrl)(struct v4l2_ctrl *ctrl); +}; + +/** + * struct v4l2_ctrl_type_ops - The control type operations that the driver + * has to provide. + * + * @equal: return true if all ctrl->elems array elements are equal. + * @init: initialize the value for array elements from from_idx to ctrl->elems. + * @minimum: set the value to the minimum value of the control. + * @maximum: set the value to the maximum value of the control. + * @log: log the value. + * @validate: validate the value for ctrl->new_elems array elements. + * Return 0 on success and a negative value otherwise. + */ +struct v4l2_ctrl_type_ops { + bool (*equal)(const struct v4l2_ctrl *ctrl, + union v4l2_ctrl_ptr ptr1, union v4l2_ctrl_ptr ptr2); + void (*init)(const struct v4l2_ctrl *ctrl, u32 from_idx, + union v4l2_ctrl_ptr ptr); + void (*minimum)(const struct v4l2_ctrl *ctrl, u32 idx, + union v4l2_ctrl_ptr ptr); + void (*maximum)(const struct v4l2_ctrl *ctrl, u32 idx, + union v4l2_ctrl_ptr ptr); + void (*log)(const struct v4l2_ctrl *ctrl); + int (*validate)(const struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr ptr); +}; + +/** + * typedef v4l2_ctrl_notify_fnc - typedef for a notify argument with a function + * that should be called when a control value has changed. + * + * @ctrl: pointer to struct &v4l2_ctrl + * @priv: control private data + * + * This typedef definition is used as an argument to v4l2_ctrl_notify() + * and as an argument at struct &v4l2_ctrl_handler. + */ +typedef void (*v4l2_ctrl_notify_fnc)(struct v4l2_ctrl *ctrl, void *priv); + +/** + * struct v4l2_ctrl - The control structure. + * + * @node: The list node. + * @ev_subs: The list of control event subscriptions. + * @handler: The handler that owns the control. + * @cluster: Point to start of cluster array. + * @ncontrols: Number of controls in cluster array. + * @done: Internal flag: set for each processed control. + * @is_new: Set when the user specified a new value for this control. It + * is also set when called from v4l2_ctrl_handler_setup(). Drivers + * should never set this flag. + * @has_changed: Set when the current value differs from the new value. Drivers + * should never use this flag. + * @is_private: If set, then this control is private to its handler and it + * will not be added to any other handlers. Drivers can set + * this flag. + * @is_auto: If set, then this control selects whether the other cluster + * members are in 'automatic' mode or 'manual' mode. This is + * used for autogain/gain type clusters. Drivers should never + * set this flag directly. + * @is_int: If set, then this control has a simple integer value (i.e. it + * uses ctrl->val). + * @is_string: If set, then this control has type %V4L2_CTRL_TYPE_STRING. + * @is_ptr: If set, then this control is an array and/or has type >= + * %V4L2_CTRL_COMPOUND_TYPES + * and/or has type %V4L2_CTRL_TYPE_STRING. In other words, &struct + * v4l2_ext_control uses field p to point to the data. + * @is_array: If set, then this control contains an N-dimensional array. + * @is_dyn_array: If set, then this control contains a dynamically sized 1-dimensional array. + * If this is set, then @is_array is also set. + * @has_volatiles: If set, then one or more members of the cluster are volatile. + * Drivers should never touch this flag. + * @call_notify: If set, then call the handler's notify function whenever the + * control's value changes. + * @manual_mode_value: If the is_auto flag is set, then this is the value + * of the auto control that determines if that control is in + * manual mode. So if the value of the auto control equals this + * value, then the whole cluster is in manual mode. Drivers should + * never set this flag directly. + * @ops: The control ops. + * @type_ops: The control type ops. + * @id: The control ID. + * @name: The control name. + * @type: The control type. + * @minimum: The control's minimum value. + * @maximum: The control's maximum value. + * @default_value: The control's default value. + * @step: The control's step value for non-menu controls. + * @elems: The number of elements in the N-dimensional array. + * @elem_size: The size in bytes of the control. + * @new_elems: The number of elements in p_new. This is the same as @elems, + * except for dynamic arrays. In that case it is in the range of + * 1 to @p_array_alloc_elems. + * @dims: The size of each dimension. + * @nr_of_dims:The number of dimensions in @dims. + * @menu_skip_mask: The control's skip mask for menu controls. This makes it + * easy to skip menu items that are not valid. If bit X is set, + * then menu item X is skipped. Of course, this only works for + * menus with <= 32 menu items. There are no menus that come + * close to that number, so this is OK. Should we ever need more, + * then this will have to be extended to a u64 or a bit array. + * @qmenu: A const char * array for all menu items. Array entries that are + * empty strings ("") correspond to non-existing menu items (this + * is in addition to the menu_skip_mask above). The last entry + * must be NULL. + * Used only if the @type is %V4L2_CTRL_TYPE_MENU. + * @qmenu_int: A 64-bit integer array for with integer menu items. + * The size of array must be equal to the menu size, e. g.: + * :math:`ceil(\frac{maximum - minimum}{step}) + 1`. + * Used only if the @type is %V4L2_CTRL_TYPE_INTEGER_MENU. + * @flags: The control's flags. + * @priv: The control's private pointer. For use by the driver. It is + * untouched by the control framework. Note that this pointer is + * not freed when the control is deleted. Should this be needed + * then a new internal bitfield can be added to tell the framework + * to free this pointer. + * @p_array: Pointer to the allocated array. Only valid if @is_array is true. + * @p_array_alloc_elems: The number of elements in the allocated + * array for both the cur and new values. So @p_array is actually + * sized for 2 * @p_array_alloc_elems * @elem_size. Only valid if + * @is_array is true. + * @cur: Structure to store the current value. + * @cur.val: The control's current value, if the @type is represented via + * a u32 integer (see &enum v4l2_ctrl_type). + * @val: The control's new s32 value. + * @p_def: The control's default value represented via a union which + * provides a standard way of accessing control types + * through a pointer (for compound controls only). + * @p_min: The control's minimum value represented via a union which + * provides a standard way of accessing control types + * through a pointer (for compound controls only). + * @p_max: The control's maximum value represented via a union which + * provides a standard way of accessing control types + * through a pointer (for compound controls only). + * @p_cur: The control's current value represented via a union which + * provides a standard way of accessing control types + * through a pointer. + * @p_new: The control's new value represented via a union which provides + * a standard way of accessing control types + * through a pointer. + */ +struct v4l2_ctrl { + /* Administrative fields */ + struct list_head node; + struct list_head ev_subs; + struct v4l2_ctrl_handler *handler; + struct v4l2_ctrl **cluster; + unsigned int ncontrols; + + unsigned int done:1; + + unsigned int is_new:1; + unsigned int has_changed:1; + unsigned int is_private:1; + unsigned int is_auto:1; + unsigned int is_int:1; + unsigned int is_string:1; + unsigned int is_ptr:1; + unsigned int is_array:1; + unsigned int is_dyn_array:1; + unsigned int has_volatiles:1; + unsigned int call_notify:1; + unsigned int manual_mode_value:8; + + const struct v4l2_ctrl_ops *ops; + const struct v4l2_ctrl_type_ops *type_ops; + u32 id; + const char *name; + enum v4l2_ctrl_type type; + s64 minimum, maximum, default_value; + u32 elems; + u32 elem_size; + u32 new_elems; + u32 dims[V4L2_CTRL_MAX_DIMS]; + u32 nr_of_dims; + union { + u64 step; + u64 menu_skip_mask; + }; + union { + const char * const *qmenu; + const s64 *qmenu_int; + }; + unsigned long flags; + void *priv; + void *p_array; + u32 p_array_alloc_elems; + s32 val; + struct { + s32 val; + } cur; + + union v4l2_ctrl_ptr p_def; + union v4l2_ctrl_ptr p_min; + union v4l2_ctrl_ptr p_max; + union v4l2_ctrl_ptr p_new; + union v4l2_ctrl_ptr p_cur; +}; + +/** + * struct v4l2_ctrl_ref - The control reference. + * + * @node: List node for the sorted list. + * @next: Single-link list node for the hash. + * @ctrl: The actual control information. + * @helper: Pointer to helper struct. Used internally in + * ``prepare_ext_ctrls`` function at ``v4l2-ctrl.c``. + * @from_other_dev: If true, then @ctrl was defined in another + * device than the &struct v4l2_ctrl_handler. + * @req_done: Internal flag: if the control handler containing this control + * reference is bound to a media request, then this is set when + * the control has been applied. This prevents applying controls + * from a cluster with multiple controls twice (when the first + * control of a cluster is applied, they all are). + * @p_req_valid: If set, then p_req contains the control value for the request. + * @p_req_array_enomem: If set, then p_req is invalid since allocating space for + * an array failed. Attempting to read this value shall + * result in ENOMEM. Only valid if ctrl->is_array is true. + * @p_req_array_alloc_elems: The number of elements allocated for the + * array. Only valid if @p_req_valid and ctrl->is_array are + * true. + * @p_req_elems: The number of elements in @p_req. This is the same as + * ctrl->elems, except for dynamic arrays. In that case it is in + * the range of 1 to @p_req_array_alloc_elems. Only valid if + * @p_req_valid is true. + * @p_req: If the control handler containing this control reference + * is bound to a media request, then this points to the + * value of the control that must be applied when the request + * is executed, or to the value of the control at the time + * that the request was completed. If @p_req_valid is false, + * then this control was never set for this request and the + * control will not be updated when this request is applied. + * + * Each control handler has a list of these refs. The list_head is used to + * keep a sorted-by-control-ID list of all controls, while the next pointer + * is used to link the control in the hash's bucket. + */ +struct v4l2_ctrl_ref { + struct list_head node; + struct v4l2_ctrl_ref *next; + struct v4l2_ctrl *ctrl; + struct v4l2_ctrl_helper *helper; + bool from_other_dev; + bool req_done; + bool p_req_valid; + bool p_req_array_enomem; + u32 p_req_array_alloc_elems; + u32 p_req_elems; + union v4l2_ctrl_ptr p_req; +}; + +/** + * struct v4l2_ctrl_handler - The control handler keeps track of all the + * controls: both the controls owned by the handler and those inherited + * from other handlers. + * + * @_lock: Default for "lock". + * @lock: Lock to control access to this handler and its controls. + * May be replaced by the user right after init. + * @ctrls: The list of controls owned by this handler. + * @ctrl_refs: The list of control references. + * @cached: The last found control reference. It is common that the same + * control is needed multiple times, so this is a simple + * optimization. + * @buckets: Buckets for the hashing. Allows for quick control lookup. + * @notify: A notify callback that is called whenever the control changes + * value. + * Note that the handler's lock is held when the notify function + * is called! + * @notify_priv: Passed as argument to the v4l2_ctrl notify callback. + * @nr_of_buckets: Total number of buckets in the array. + * @error: The error code of the first failed control addition. + * @request_is_queued: True if the request was queued. + * @requests: List to keep track of open control handler request objects. + * For the parent control handler (@req_obj.ops == NULL) this + * is the list header. When the parent control handler is + * removed, it has to unbind and put all these requests since + * they refer to the parent. + * @requests_queued: List of the queued requests. This determines the order + * in which these controls are applied. Once the request is + * completed it is removed from this list. + * @req_obj: The &struct media_request_object, used to link into a + * &struct media_request. This request object has a refcount. + */ +struct v4l2_ctrl_handler { + struct mutex _lock; + struct mutex *lock; + struct list_head ctrls; + struct list_head ctrl_refs; + struct v4l2_ctrl_ref *cached; + struct v4l2_ctrl_ref **buckets; + v4l2_ctrl_notify_fnc notify; + void *notify_priv; + u16 nr_of_buckets; + int error; + bool request_is_queued; + struct list_head requests; + struct list_head requests_queued; + struct media_request_object req_obj; +}; + +/** + * struct v4l2_ctrl_config - Control configuration structure. + * + * @ops: The control ops. + * @type_ops: The control type ops. Only needed for compound controls. + * @id: The control ID. + * @name: The control name. + * @type: The control type. + * @min: The control's minimum value. + * @max: The control's maximum value. + * @step: The control's step value for non-menu controls. + * @def: The control's default value. + * @p_def: The control's default value for compound controls. + * @p_min: The control's minimum value for compound controls. + * @p_max: The control's maximum value for compound controls. + * @dims: The size of each dimension. + * @elem_size: The size in bytes of the control. + * @flags: The control's flags. + * @menu_skip_mask: The control's skip mask for menu controls. This makes it + * easy to skip menu items that are not valid. If bit X is set, + * then menu item X is skipped. Of course, this only works for + * menus with <= 64 menu items. There are no menus that come + * close to that number, so this is OK. Should we ever need more, + * then this will have to be extended to a bit array. + * @qmenu: A const char * array for all menu items. Array entries that are + * empty strings ("") correspond to non-existing menu items (this + * is in addition to the menu_skip_mask above). The last entry + * must be NULL. + * @qmenu_int: A const s64 integer array for all menu items of the type + * V4L2_CTRL_TYPE_INTEGER_MENU. + * @is_private: If set, then this control is private to its handler and it + * will not be added to any other handlers. + */ +struct v4l2_ctrl_config { + const struct v4l2_ctrl_ops *ops; + const struct v4l2_ctrl_type_ops *type_ops; + u32 id; + const char *name; + enum v4l2_ctrl_type type; + s64 min; + s64 max; + u64 step; + s64 def; + union v4l2_ctrl_ptr p_def; + union v4l2_ctrl_ptr p_min; + union v4l2_ctrl_ptr p_max; + u32 dims[V4L2_CTRL_MAX_DIMS]; + u32 elem_size; + u32 flags; + u64 menu_skip_mask; + const char * const *qmenu; + const s64 *qmenu_int; + unsigned int is_private:1; +}; + +/** + * v4l2_ctrl_fill - Fill in the control fields based on the control ID. + * + * @id: ID of the control + * @name: pointer to be filled with a string with the name of the control + * @type: pointer for storing the type of the control + * @min: pointer for storing the minimum value for the control + * @max: pointer for storing the maximum value for the control + * @step: pointer for storing the control step + * @def: pointer for storing the default value for the control + * @flags: pointer for storing the flags to be used on the control + * + * This works for all standard V4L2 controls. + * For non-standard controls it will only fill in the given arguments + * and @name content will be set to %NULL. + * + * This function will overwrite the contents of @name, @type and @flags. + * The contents of @min, @max, @step and @def may be modified depending on + * the type. + * + * .. note:: + * + * Do not use in drivers! It is used internally for backwards compatibility + * control handling only. Once all drivers are converted to use the new + * control framework this function will no longer be exported. + */ +void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type, + s64 *min, s64 *max, u64 *step, s64 *def, u32 *flags); + + +/** + * v4l2_ctrl_handler_init_class() - Initialize the control handler. + * @hdl: The control handler. + * @nr_of_controls_hint: A hint of how many controls this handler is + * expected to refer to. This is the total number, so including + * any inherited controls. It doesn't have to be precise, but if + * it is way off, then you either waste memory (too many buckets + * are allocated) or the control lookup becomes slower (not enough + * buckets are allocated, so there are more slow list lookups). + * It will always work, though. + * @key: Used by the lock validator if CONFIG_LOCKDEP is set. + * @name: Used by the lock validator if CONFIG_LOCKDEP is set. + * + * .. attention:: + * + * Never use this call directly, always use the v4l2_ctrl_handler_init() + * macro that hides the @key and @name arguments. + * + * Return: returns an error if the buckets could not be allocated. This + * error will also be stored in @hdl->error. + */ +int v4l2_ctrl_handler_init_class(struct v4l2_ctrl_handler *hdl, + unsigned int nr_of_controls_hint, + struct lock_class_key *key, const char *name); + +#ifdef CONFIG_LOCKDEP + +/** + * v4l2_ctrl_handler_init - helper function to create a static struct + * &lock_class_key and calls v4l2_ctrl_handler_init_class() + * + * @hdl: The control handler. + * @nr_of_controls_hint: A hint of how many controls this handler is + * expected to refer to. This is the total number, so including + * any inherited controls. It doesn't have to be precise, but if + * it is way off, then you either waste memory (too many buckets + * are allocated) or the control lookup becomes slower (not enough + * buckets are allocated, so there are more slow list lookups). + * It will always work, though. + * + * This helper function creates a static struct &lock_class_key and + * calls v4l2_ctrl_handler_init_class(), providing a proper name for the lock + * validador. + * + * Use this helper function to initialize a control handler. + */ +#define v4l2_ctrl_handler_init(hdl, nr_of_controls_hint) \ +( \ + ({ \ + static struct lock_class_key _key; \ + v4l2_ctrl_handler_init_class(hdl, nr_of_controls_hint, \ + &_key, \ + KBUILD_BASENAME ":" \ + __stringify(__LINE__) ":" \ + "(" #hdl ")->_lock"); \ + }) \ +) +#else +#define v4l2_ctrl_handler_init(hdl, nr_of_controls_hint) \ + v4l2_ctrl_handler_init_class(hdl, nr_of_controls_hint, NULL, NULL) +#endif + +/** + * v4l2_ctrl_handler_free() - Free all controls owned by the handler and free + * the control list. + * @hdl: The control handler. + * + * Does nothing if @hdl == NULL. + * + * Return: @hdl's error field or 0 if @hdl is NULL. + */ +int v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl); + +/** + * v4l2_ctrl_lock() - Helper function to lock the handler + * associated with the control. + * @ctrl: The control to lock. + */ +static inline void v4l2_ctrl_lock(struct v4l2_ctrl *ctrl) +{ + mutex_lock(ctrl->handler->lock); +} + +/** + * v4l2_ctrl_unlock() - Helper function to unlock the handler + * associated with the control. + * @ctrl: The control to unlock. + */ +static inline void v4l2_ctrl_unlock(struct v4l2_ctrl *ctrl) +{ + mutex_unlock(ctrl->handler->lock); +} + +/** + * __v4l2_ctrl_handler_setup() - Call the s_ctrl op for all controls belonging + * to the handler to initialize the hardware to the current control values. The + * caller is responsible for acquiring the control handler mutex on behalf of + * __v4l2_ctrl_handler_setup(). + * @hdl: The control handler. + * + * Button controls will be skipped, as are read-only controls. + * + * If @hdl == NULL, then this just returns 0. + */ +int __v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl); + +/** + * v4l2_ctrl_handler_setup() - Call the s_ctrl op for all controls belonging + * to the handler to initialize the hardware to the current control values. + * @hdl: The control handler. + * + * Button controls will be skipped, as are read-only controls. + * + * If @hdl == NULL, then this just returns 0. + */ +int v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl); + +/** + * v4l2_ctrl_handler_log_status() - Log all controls owned by the handler. + * @hdl: The control handler. + * @prefix: The prefix to use when logging the control values. If the + * prefix does not end with a space, then ": " will be added + * after the prefix. If @prefix == NULL, then no prefix will be + * used. + * + * For use with VIDIOC_LOG_STATUS. + * + * Does nothing if @hdl == NULL. + */ +void v4l2_ctrl_handler_log_status(struct v4l2_ctrl_handler *hdl, + const char *prefix); + +/** + * v4l2_ctrl_new_custom() - Allocate and initialize a new custom V4L2 + * control. + * + * @hdl: The control handler. + * @cfg: The control's configuration data. + * @priv: The control's driver-specific private data. + * + * If the &v4l2_ctrl struct could not be allocated then NULL is returned + * and @hdl->error is set to the error code (if it wasn't set already). + */ +struct v4l2_ctrl *v4l2_ctrl_new_custom(struct v4l2_ctrl_handler *hdl, + const struct v4l2_ctrl_config *cfg, + void *priv); + +/** + * v4l2_ctrl_new_std() - Allocate and initialize a new standard V4L2 non-menu + * control. + * + * @hdl: The control handler. + * @ops: The control ops. + * @id: The control ID. + * @min: The control's minimum value. + * @max: The control's maximum value. + * @step: The control's step value + * @def: The control's default value. + * + * If the &v4l2_ctrl struct could not be allocated, or the control + * ID is not known, then NULL is returned and @hdl->error is set to the + * appropriate error code (if it wasn't set already). + * + * If @id refers to a menu control, then this function will return NULL. + * + * Use v4l2_ctrl_new_std_menu() when adding menu controls. + */ +struct v4l2_ctrl *v4l2_ctrl_new_std(struct v4l2_ctrl_handler *hdl, + const struct v4l2_ctrl_ops *ops, + u32 id, s64 min, s64 max, u64 step, + s64 def); + +/** + * v4l2_ctrl_new_std_menu() - Allocate and initialize a new standard V4L2 + * menu control. + * + * @hdl: The control handler. + * @ops: The control ops. + * @id: The control ID. + * @max: The control's maximum value. + * @mask: The control's skip mask for menu controls. This makes it + * easy to skip menu items that are not valid. If bit X is set, + * then menu item X is skipped. Of course, this only works for + * menus with <= 64 menu items. There are no menus that come + * close to that number, so this is OK. Should we ever need more, + * then this will have to be extended to a bit array. + * @def: The control's default value. + * + * Same as v4l2_ctrl_new_std(), but @min is set to 0 and the @mask value + * determines which menu items are to be skipped. + * + * If @id refers to a non-menu control, then this function will return NULL. + */ +struct v4l2_ctrl *v4l2_ctrl_new_std_menu(struct v4l2_ctrl_handler *hdl, + const struct v4l2_ctrl_ops *ops, + u32 id, u8 max, u64 mask, u8 def); + +/** + * v4l2_ctrl_new_std_menu_items() - Create a new standard V4L2 menu control + * with driver specific menu. + * + * @hdl: The control handler. + * @ops: The control ops. + * @id: The control ID. + * @max: The control's maximum value. + * @mask: The control's skip mask for menu controls. This makes it + * easy to skip menu items that are not valid. If bit X is set, + * then menu item X is skipped. Of course, this only works for + * menus with <= 64 menu items. There are no menus that come + * close to that number, so this is OK. Should we ever need more, + * then this will have to be extended to a bit array. + * @def: The control's default value. + * @qmenu: The new menu. + * + * Same as v4l2_ctrl_new_std_menu(), but @qmenu will be the driver specific + * menu of this control. + * + */ +struct v4l2_ctrl *v4l2_ctrl_new_std_menu_items(struct v4l2_ctrl_handler *hdl, + const struct v4l2_ctrl_ops *ops, + u32 id, u8 max, + u64 mask, u8 def, + const char * const *qmenu); + +/** + * v4l2_ctrl_new_std_compound() - Allocate and initialize a new standard V4L2 + * compound control. + * + * @hdl: The control handler. + * @ops: The control ops. + * @id: The control ID. + * @p_def: The control's default value. + * @p_min: The control's minimum value. + * @p_max: The control's maximum value. + * + * Same as v4l2_ctrl_new_std(), but with support for compound controls. + * To fill in the @p_def, @p_min and @p_max fields, use v4l2_ctrl_ptr_create() + * to convert a pointer to a const union v4l2_ctrl_ptr. + * Use v4l2_ctrl_ptr_create(NULL) if you want the default, minimum or maximum + * value of the compound control to be all zeroes. + * If the compound control does not set the ``V4L2_CTRL_FLAG_HAS_WHICH_MIN_MAX`` + * flag, then it does not has minimum and maximum values. In that case just use + * v4l2_ctrl_ptr_create(NULL) for the @p_min and @p_max arguments. + * + */ +struct v4l2_ctrl *v4l2_ctrl_new_std_compound(struct v4l2_ctrl_handler *hdl, + const struct v4l2_ctrl_ops *ops, + u32 id, + const union v4l2_ctrl_ptr p_def, + const union v4l2_ctrl_ptr p_min, + const union v4l2_ctrl_ptr p_max); + +/** + * v4l2_ctrl_new_int_menu() - Create a new standard V4L2 integer menu control. + * + * @hdl: The control handler. + * @ops: The control ops. + * @id: The control ID. + * @max: The control's maximum value. + * @def: The control's default value. + * @qmenu_int: The control's menu entries. + * + * Same as v4l2_ctrl_new_std_menu(), but @mask is set to 0 and it additionally + * takes as an argument an array of integers determining the menu items. + * + * If @id refers to a non-integer-menu control, then this function will + * return %NULL. + */ +struct v4l2_ctrl *v4l2_ctrl_new_int_menu(struct v4l2_ctrl_handler *hdl, + const struct v4l2_ctrl_ops *ops, + u32 id, u8 max, u8 def, + const s64 *qmenu_int); + +/** + * typedef v4l2_ctrl_filter - Typedef to define the filter function to be + * used when adding a control handler. + * + * @ctrl: pointer to struct &v4l2_ctrl. + */ + +typedef bool (*v4l2_ctrl_filter)(const struct v4l2_ctrl *ctrl); + +/** + * v4l2_ctrl_add_handler() - Add all controls from handler @add to + * handler @hdl. + * + * @hdl: The control handler. + * @add: The control handler whose controls you want to add to + * the @hdl control handler. + * @filter: This function will filter which controls should be added. + * @from_other_dev: If true, then the controls in @add were defined in another + * device than @hdl. + * + * Does nothing if either of the two handlers is a NULL pointer. + * If @filter is NULL, then all controls are added. Otherwise only those + * controls for which @filter returns true will be added. + * In case of an error @hdl->error will be set to the error code (if it + * wasn't set already). + */ +int v4l2_ctrl_add_handler(struct v4l2_ctrl_handler *hdl, + struct v4l2_ctrl_handler *add, + v4l2_ctrl_filter filter, + bool from_other_dev); + +/** + * v4l2_ctrl_radio_filter() - Standard filter for radio controls. + * + * @ctrl: The control that is filtered. + * + * This will return true for any controls that are valid for radio device + * nodes. Those are all of the V4L2_CID_AUDIO_* user controls and all FM + * transmitter class controls. + * + * This function is to be used with v4l2_ctrl_add_handler(). + */ +bool v4l2_ctrl_radio_filter(const struct v4l2_ctrl *ctrl); + +/** + * v4l2_ctrl_cluster() - Mark all controls in the cluster as belonging + * to that cluster. + * + * @ncontrols: The number of controls in this cluster. + * @controls: The cluster control array of size @ncontrols. + */ +void v4l2_ctrl_cluster(unsigned int ncontrols, struct v4l2_ctrl **controls); + + +/** + * v4l2_ctrl_auto_cluster() - Mark all controls in the cluster as belonging + * to that cluster and set it up for autofoo/foo-type handling. + * + * @ncontrols: The number of controls in this cluster. + * @controls: The cluster control array of size @ncontrols. The first control + * must be the 'auto' control (e.g. autogain, autoexposure, etc.) + * @manual_val: The value for the first control in the cluster that equals the + * manual setting. + * @set_volatile: If true, then all controls except the first auto control will + * be volatile. + * + * Use for control groups where one control selects some automatic feature and + * the other controls are only active whenever the automatic feature is turned + * off (manual mode). Typical examples: autogain vs gain, auto-whitebalance vs + * red and blue balance, etc. + * + * The behavior of such controls is as follows: + * + * When the autofoo control is set to automatic, then any manual controls + * are set to inactive and any reads will call g_volatile_ctrl (if the control + * was marked volatile). + * + * When the autofoo control is set to manual, then any manual controls will + * be marked active, and any reads will just return the current value without + * going through g_volatile_ctrl. + * + * In addition, this function will set the %V4L2_CTRL_FLAG_UPDATE flag + * on the autofoo control and %V4L2_CTRL_FLAG_INACTIVE on the foo control(s) + * if autofoo is in auto mode. + */ +void v4l2_ctrl_auto_cluster(unsigned int ncontrols, + struct v4l2_ctrl **controls, + u8 manual_val, bool set_volatile); + + +/** + * v4l2_ctrl_find() - Find a control with the given ID. + * + * @hdl: The control handler. + * @id: The control ID to find. + * + * If @hdl == NULL this will return NULL as well. Will lock the handler so + * do not use from inside &v4l2_ctrl_ops. + */ +struct v4l2_ctrl *v4l2_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id); + +/** + * v4l2_ctrl_activate() - Make the control active or inactive. + * @ctrl: The control to (de)activate. + * @active: True if the control should become active. + * + * This sets or clears the V4L2_CTRL_FLAG_INACTIVE flag atomically. + * Does nothing if @ctrl == NULL. + * This will usually be called from within the s_ctrl op. + * The V4L2_EVENT_CTRL event will be generated afterwards. + * + * This function assumes that the control handler is locked. + */ +void v4l2_ctrl_activate(struct v4l2_ctrl *ctrl, bool active); + +/** + * __v4l2_ctrl_grab() - Unlocked variant of v4l2_ctrl_grab. + * + * @ctrl: The control to (de)activate. + * @grabbed: True if the control should become grabbed. + * + * This sets or clears the V4L2_CTRL_FLAG_GRABBED flag atomically. + * Does nothing if @ctrl == NULL. + * The V4L2_EVENT_CTRL event will be generated afterwards. + * This will usually be called when starting or stopping streaming in the + * driver. + * + * This function assumes that the control handler is locked by the caller. + */ +void __v4l2_ctrl_grab(struct v4l2_ctrl *ctrl, bool grabbed); + +/** + * v4l2_ctrl_grab() - Mark the control as grabbed or not grabbed. + * + * @ctrl: The control to (de)activate. + * @grabbed: True if the control should become grabbed. + * + * This sets or clears the V4L2_CTRL_FLAG_GRABBED flag atomically. + * Does nothing if @ctrl == NULL. + * The V4L2_EVENT_CTRL event will be generated afterwards. + * This will usually be called when starting or stopping streaming in the + * driver. + * + * This function assumes that the control handler is not locked and will + * take the lock itself. + */ +static inline void v4l2_ctrl_grab(struct v4l2_ctrl *ctrl, bool grabbed) +{ + if (!ctrl) + return; + + v4l2_ctrl_lock(ctrl); + __v4l2_ctrl_grab(ctrl, grabbed); + v4l2_ctrl_unlock(ctrl); +} + +/** + *__v4l2_ctrl_modify_range() - Unlocked variant of v4l2_ctrl_modify_range() + * + * @ctrl: The control to update. + * @min: The control's minimum value. + * @max: The control's maximum value. + * @step: The control's step value + * @def: The control's default value. + * + * Update the range of a control on the fly. This works for control types + * INTEGER, BOOLEAN, MENU, INTEGER MENU and BITMASK. For menu controls the + * @step value is interpreted as a menu_skip_mask. + * + * An error is returned if one of the range arguments is invalid for this + * control type. + * + * The caller is responsible for acquiring the control handler mutex on behalf + * of __v4l2_ctrl_modify_range(). + */ +int __v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl, + s64 min, s64 max, u64 step, s64 def); + +/** + * v4l2_ctrl_modify_range() - Update the range of a control. + * + * @ctrl: The control to update. + * @min: The control's minimum value. + * @max: The control's maximum value. + * @step: The control's step value + * @def: The control's default value. + * + * Update the range of a control on the fly. This works for control types + * INTEGER, BOOLEAN, MENU, INTEGER MENU and BITMASK. For menu controls the + * @step value is interpreted as a menu_skip_mask. + * + * An error is returned if one of the range arguments is invalid for this + * control type. + * + * This function assumes that the control handler is not locked and will + * take the lock itself. + */ +static inline int v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl, + s64 min, s64 max, u64 step, s64 def) +{ + int rval; + + v4l2_ctrl_lock(ctrl); + rval = __v4l2_ctrl_modify_range(ctrl, min, max, step, def); + v4l2_ctrl_unlock(ctrl); + + return rval; +} + +/** + *__v4l2_ctrl_modify_dimensions() - Unlocked variant of v4l2_ctrl_modify_dimensions() + * + * @ctrl: The control to update. + * @dims: The control's new dimensions. + * + * Update the dimensions of an array control on the fly. The elements of the + * array are reset to their default value, even if the dimensions are + * unchanged. + * + * An error is returned if @dims is invalid for this control. + * + * The caller is responsible for acquiring the control handler mutex on behalf + * of __v4l2_ctrl_modify_dimensions(). + * + * Note: calling this function when the same control is used in pending requests + * is untested. It should work (a request with the wrong size of the control + * will drop that control silently), but it will be very confusing. + */ +int __v4l2_ctrl_modify_dimensions(struct v4l2_ctrl *ctrl, + u32 dims[V4L2_CTRL_MAX_DIMS]); + +/** + * v4l2_ctrl_modify_dimensions() - Update the dimensions of an array control. + * + * @ctrl: The control to update. + * @dims: The control's new dimensions. + * + * Update the dimensions of an array control on the fly. The elements of the + * array are reset to their default value, even if the dimensions are + * unchanged. + * + * An error is returned if @dims is invalid for this control type. + * + * This function assumes that the control handler is not locked and will + * take the lock itself. + * + * Note: calling this function when the same control is used in pending requests + * is untested. It should work (a request with the wrong size of the control + * will drop that control silently), but it will be very confusing. + */ +static inline int v4l2_ctrl_modify_dimensions(struct v4l2_ctrl *ctrl, + u32 dims[V4L2_CTRL_MAX_DIMS]) +{ + int rval; + + v4l2_ctrl_lock(ctrl); + rval = __v4l2_ctrl_modify_dimensions(ctrl, dims); + v4l2_ctrl_unlock(ctrl); + + return rval; +} + +/** + * v4l2_ctrl_notify() - Function to set a notify callback for a control. + * + * @ctrl: The control. + * @notify: The callback function. + * @priv: The callback private handle, passed as argument to the callback. + * + * This function sets a callback function for the control. If @ctrl is NULL, + * then it will do nothing. If @notify is NULL, then the notify callback will + * be removed. + * + * There can be only one notify. If another already exists, then a WARN_ON + * will be issued and the function will do nothing. + */ +void v4l2_ctrl_notify(struct v4l2_ctrl *ctrl, v4l2_ctrl_notify_fnc notify, + void *priv); + +/** + * v4l2_ctrl_get_name() - Get the name of the control + * + * @id: The control ID. + * + * This function returns the name of the given control ID or NULL if it isn't + * a known control. + */ +const char *v4l2_ctrl_get_name(u32 id); + +/** + * v4l2_ctrl_get_menu() - Get the menu string array of the control + * + * @id: The control ID. + * + * This function returns the NULL-terminated menu string array name of the + * given control ID or NULL if it isn't a known menu control. + */ +const char * const *v4l2_ctrl_get_menu(u32 id); + +/** + * v4l2_ctrl_get_int_menu() - Get the integer menu array of the control + * + * @id: The control ID. + * @len: The size of the integer array. + * + * This function returns the integer array of the given control ID or NULL if it + * if it isn't a known integer menu control. + */ +const s64 *v4l2_ctrl_get_int_menu(u32 id, u32 *len); + +/** + * v4l2_ctrl_g_ctrl() - Helper function to get the control's value from + * within a driver. + * + * @ctrl: The control. + * + * This returns the control's value safely by going through the control + * framework. This function will lock the control's handler, so it cannot be + * used from within the &v4l2_ctrl_ops functions. + * + * This function is for integer type controls only. + */ +s32 v4l2_ctrl_g_ctrl(struct v4l2_ctrl *ctrl); + +/** + * __v4l2_ctrl_s_ctrl() - Unlocked variant of v4l2_ctrl_s_ctrl(). + * + * @ctrl: The control. + * @val: The new value. + * + * This sets the control's new value safely by going through the control + * framework. This function assumes the control's handler is already locked, + * allowing it to be used from within the &v4l2_ctrl_ops functions. + * + * This function is for integer type controls only. + */ +int __v4l2_ctrl_s_ctrl(struct v4l2_ctrl *ctrl, s32 val); + +/** + * v4l2_ctrl_s_ctrl() - Helper function to set the control's value from + * within a driver. + * @ctrl: The control. + * @val: The new value. + * + * This sets the control's new value safely by going through the control + * framework. This function will lock the control's handler, so it cannot be + * used from within the &v4l2_ctrl_ops functions. + * + * This function is for integer type controls only. + */ +static inline int v4l2_ctrl_s_ctrl(struct v4l2_ctrl *ctrl, s32 val) +{ + int rval; + + v4l2_ctrl_lock(ctrl); + rval = __v4l2_ctrl_s_ctrl(ctrl, val); + v4l2_ctrl_unlock(ctrl); + + return rval; +} + +/** + * v4l2_ctrl_g_ctrl_int64() - Helper function to get a 64-bit control's value + * from within a driver. + * + * @ctrl: The control. + * + * This returns the control's value safely by going through the control + * framework. This function will lock the control's handler, so it cannot be + * used from within the &v4l2_ctrl_ops functions. + * + * This function is for 64-bit integer type controls only. + */ +s64 v4l2_ctrl_g_ctrl_int64(struct v4l2_ctrl *ctrl); + +/** + * __v4l2_ctrl_s_ctrl_int64() - Unlocked variant of v4l2_ctrl_s_ctrl_int64(). + * + * @ctrl: The control. + * @val: The new value. + * + * This sets the control's new value safely by going through the control + * framework. This function assumes the control's handler is already locked, + * allowing it to be used from within the &v4l2_ctrl_ops functions. + * + * This function is for 64-bit integer type controls only. + */ +int __v4l2_ctrl_s_ctrl_int64(struct v4l2_ctrl *ctrl, s64 val); + +/** + * v4l2_ctrl_s_ctrl_int64() - Helper function to set a 64-bit control's value + * from within a driver. + * + * @ctrl: The control. + * @val: The new value. + * + * This sets the control's new value safely by going through the control + * framework. This function will lock the control's handler, so it cannot be + * used from within the &v4l2_ctrl_ops functions. + * + * This function is for 64-bit integer type controls only. + */ +static inline int v4l2_ctrl_s_ctrl_int64(struct v4l2_ctrl *ctrl, s64 val) +{ + int rval; + + v4l2_ctrl_lock(ctrl); + rval = __v4l2_ctrl_s_ctrl_int64(ctrl, val); + v4l2_ctrl_unlock(ctrl); + + return rval; +} + +/** + * __v4l2_ctrl_s_ctrl_string() - Unlocked variant of v4l2_ctrl_s_ctrl_string(). + * + * @ctrl: The control. + * @s: The new string. + * + * This sets the control's new string safely by going through the control + * framework. This function assumes the control's handler is already locked, + * allowing it to be used from within the &v4l2_ctrl_ops functions. + * + * This function is for string type controls only. + */ +int __v4l2_ctrl_s_ctrl_string(struct v4l2_ctrl *ctrl, const char *s); + +/** + * v4l2_ctrl_s_ctrl_string() - Helper function to set a control's string value + * from within a driver. + * + * @ctrl: The control. + * @s: The new string. + * + * This sets the control's new string safely by going through the control + * framework. This function will lock the control's handler, so it cannot be + * used from within the &v4l2_ctrl_ops functions. + * + * This function is for string type controls only. + */ +static inline int v4l2_ctrl_s_ctrl_string(struct v4l2_ctrl *ctrl, const char *s) +{ + int rval; + + v4l2_ctrl_lock(ctrl); + rval = __v4l2_ctrl_s_ctrl_string(ctrl, s); + v4l2_ctrl_unlock(ctrl); + + return rval; +} + +/** + * __v4l2_ctrl_s_ctrl_compound() - Unlocked variant to set a compound control + * + * @ctrl: The control. + * @type: The type of the data. + * @p: The new compound payload. + * + * This sets the control's new compound payload safely by going through the + * control framework. This function assumes the control's handler is already + * locked, allowing it to be used from within the &v4l2_ctrl_ops functions. + * + * This function is for compound type controls only. + */ +int __v4l2_ctrl_s_ctrl_compound(struct v4l2_ctrl *ctrl, + enum v4l2_ctrl_type type, const void *p); + +/** + * v4l2_ctrl_s_ctrl_compound() - Helper function to set a compound control + * from within a driver. + * + * @ctrl: The control. + * @type: The type of the data. + * @p: The new compound payload. + * + * This sets the control's new compound payload safely by going through the + * control framework. This function will lock the control's handler, so it + * cannot be used from within the &v4l2_ctrl_ops functions. + * + * This function is for compound type controls only. + */ +static inline int v4l2_ctrl_s_ctrl_compound(struct v4l2_ctrl *ctrl, + enum v4l2_ctrl_type type, + const void *p) +{ + int rval; + + v4l2_ctrl_lock(ctrl); + rval = __v4l2_ctrl_s_ctrl_compound(ctrl, type, p); + v4l2_ctrl_unlock(ctrl); + + return rval; +} + +/* Helper defines for area type controls */ +#define __v4l2_ctrl_s_ctrl_area(ctrl, area) \ + __v4l2_ctrl_s_ctrl_compound((ctrl), V4L2_CTRL_TYPE_AREA, (area)) +#define v4l2_ctrl_s_ctrl_area(ctrl, area) \ + v4l2_ctrl_s_ctrl_compound((ctrl), V4L2_CTRL_TYPE_AREA, (area)) + +/* Internal helper functions that deal with control events. */ +extern const struct v4l2_subscribed_event_ops v4l2_ctrl_sub_ev_ops; + +/** + * v4l2_ctrl_replace - Function to be used as a callback to + * &struct v4l2_subscribed_event_ops replace\(\) + * + * @old: pointer to struct &v4l2_event with the reported + * event; + * @new: pointer to struct &v4l2_event with the modified + * event; + */ +void v4l2_ctrl_replace(struct v4l2_event *old, const struct v4l2_event *new); + +/** + * v4l2_ctrl_merge - Function to be used as a callback to + * &struct v4l2_subscribed_event_ops merge(\) + * + * @old: pointer to struct &v4l2_event with the reported + * event; + * @new: pointer to struct &v4l2_event with the merged + * event; + */ +void v4l2_ctrl_merge(const struct v4l2_event *old, struct v4l2_event *new); + +/** + * v4l2_ctrl_log_status - helper function to implement %VIDIOC_LOG_STATUS ioctl + * + * @file: pointer to struct file + * @fh: unused. Kept just to be compatible to the arguments expected by + * &struct v4l2_ioctl_ops.vidioc_log_status. + * + * Can be used as a vidioc_log_status function that just dumps all controls + * associated with the filehandle. + */ +int v4l2_ctrl_log_status(struct file *file, void *fh); + +/** + * v4l2_ctrl_subscribe_event - Subscribes to an event + * + * + * @fh: pointer to struct v4l2_fh + * @sub: pointer to &struct v4l2_event_subscription + * + * Can be used as a vidioc_subscribe_event function that just subscribes + * control events. + */ +int v4l2_ctrl_subscribe_event(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub); + +/** + * v4l2_ctrl_poll - function to be used as a callback to the poll() + * That just polls for control events. + * + * @file: pointer to struct file + * @wait: pointer to struct poll_table_struct + */ +__poll_t v4l2_ctrl_poll(struct file *file, struct poll_table_struct *wait); + +/** + * v4l2_ctrl_request_setup - helper function to apply control values in a request + * + * @req: The request + * @parent: The parent control handler ('priv' in media_request_object_find()) + * + * This is a helper function to call the control handler's s_ctrl callback with + * the control values contained in the request. Do note that this approach of + * applying control values in a request is only applicable to memory-to-memory + * devices. + */ +int v4l2_ctrl_request_setup(struct media_request *req, + struct v4l2_ctrl_handler *parent); + +/** + * v4l2_ctrl_request_complete - Complete a control handler request object + * + * @req: The request + * @parent: The parent control handler ('priv' in media_request_object_find()) + * + * This function is to be called on each control handler that may have had a + * request object associated with it, i.e. control handlers of a driver that + * supports requests. + * + * The function first obtains the values of any volatile controls in the control + * handler and attach them to the request. Then, the function completes the + * request object. + */ +void v4l2_ctrl_request_complete(struct media_request *req, + struct v4l2_ctrl_handler *parent); + +/** + * v4l2_ctrl_request_hdl_find - Find the control handler in the request + * + * @req: The request + * @parent: The parent control handler ('priv' in media_request_object_find()) + * + * This function finds the control handler in the request. It may return + * NULL if not found. When done, you must call v4l2_ctrl_request_hdl_put() + * with the returned handler pointer. + * + * If the request is not in state VALIDATING or QUEUED, then this function + * will always return NULL. + * + * Note that in state VALIDATING the req_queue_mutex is held, so + * no objects can be added or deleted from the request. + * + * In state QUEUED it is the driver that will have to ensure this. + */ +struct v4l2_ctrl_handler *v4l2_ctrl_request_hdl_find(struct media_request *req, + struct v4l2_ctrl_handler *parent); + +/** + * v4l2_ctrl_request_hdl_put - Put the control handler + * + * @hdl: Put this control handler + * + * This function released the control handler previously obtained from' + * v4l2_ctrl_request_hdl_find(). + */ +static inline void v4l2_ctrl_request_hdl_put(struct v4l2_ctrl_handler *hdl) +{ + if (hdl) + media_request_object_put(&hdl->req_obj); +} + +/** + * v4l2_ctrl_request_hdl_ctrl_find() - Find a control with the given ID. + * + * @hdl: The control handler from the request. + * @id: The ID of the control to find. + * + * This function returns a pointer to the control if this control is + * part of the request or NULL otherwise. + */ +struct v4l2_ctrl * +v4l2_ctrl_request_hdl_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id); + +/* Helpers for ioctl_ops */ + +/** + * v4l2_queryctrl - Helper function to implement + * :ref:`VIDIOC_QUERYCTRL ` ioctl + * + * @hdl: pointer to &struct v4l2_ctrl_handler + * @qc: pointer to &struct v4l2_queryctrl + * + * If hdl == NULL then they will all return -EINVAL. + */ +int v4l2_queryctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_queryctrl *qc); + +/** + * v4l2_query_ext_ctrl_to_v4l2_queryctrl - Convert a qec to qe. + * + * @to: The v4l2_queryctrl to write to. + * @from: The v4l2_query_ext_ctrl to read from. + * + * This function is a helper to convert a v4l2_query_ext_ctrl into a + * v4l2_queryctrl. + */ +void v4l2_query_ext_ctrl_to_v4l2_queryctrl(struct v4l2_queryctrl *to, + const struct v4l2_query_ext_ctrl *from); + +/** + * v4l2_query_ext_ctrl - Helper function to implement + * :ref:`VIDIOC_QUERY_EXT_CTRL ` ioctl + * + * @hdl: pointer to &struct v4l2_ctrl_handler + * @qc: pointer to &struct v4l2_query_ext_ctrl + * + * If hdl == NULL then they will all return -EINVAL. + */ +int v4l2_query_ext_ctrl(struct v4l2_ctrl_handler *hdl, + struct v4l2_query_ext_ctrl *qc); + +/** + * v4l2_querymenu - Helper function to implement + * :ref:`VIDIOC_QUERYMENU ` ioctl + * + * @hdl: pointer to &struct v4l2_ctrl_handler + * @qm: pointer to &struct v4l2_querymenu + * + * If hdl == NULL then they will all return -EINVAL. + */ +int v4l2_querymenu(struct v4l2_ctrl_handler *hdl, struct v4l2_querymenu *qm); + +/** + * v4l2_g_ctrl - Helper function to implement + * :ref:`VIDIOC_G_CTRL ` ioctl + * + * @hdl: pointer to &struct v4l2_ctrl_handler + * @ctrl: pointer to &struct v4l2_control + * + * If hdl == NULL then they will all return -EINVAL. + */ +int v4l2_g_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_control *ctrl); + +/** + * v4l2_s_ctrl - Helper function to implement + * :ref:`VIDIOC_S_CTRL ` ioctl + * + * @fh: pointer to &struct v4l2_fh + * @hdl: pointer to &struct v4l2_ctrl_handler + * + * @ctrl: pointer to &struct v4l2_control + * + * If hdl == NULL then they will all return -EINVAL. + */ +int v4l2_s_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl, + struct v4l2_control *ctrl); + +/** + * v4l2_g_ext_ctrls - Helper function to implement + * :ref:`VIDIOC_G_EXT_CTRLS ` ioctl + * + * @hdl: pointer to &struct v4l2_ctrl_handler + * @vdev: pointer to &struct video_device + * @mdev: pointer to &struct media_device + * @c: pointer to &struct v4l2_ext_controls + * + * If hdl == NULL then they will all return -EINVAL. + */ +int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct video_device *vdev, + struct media_device *mdev, struct v4l2_ext_controls *c); + +/** + * v4l2_try_ext_ctrls - Helper function to implement + * :ref:`VIDIOC_TRY_EXT_CTRLS ` ioctl + * + * @hdl: pointer to &struct v4l2_ctrl_handler + * @vdev: pointer to &struct video_device + * @mdev: pointer to &struct media_device + * @c: pointer to &struct v4l2_ext_controls + * + * If hdl == NULL then they will all return -EINVAL. + */ +int v4l2_try_ext_ctrls(struct v4l2_ctrl_handler *hdl, + struct video_device *vdev, + struct media_device *mdev, + struct v4l2_ext_controls *c); + +/** + * v4l2_s_ext_ctrls - Helper function to implement + * :ref:`VIDIOC_S_EXT_CTRLS ` ioctl + * + * @fh: pointer to &struct v4l2_fh + * @hdl: pointer to &struct v4l2_ctrl_handler + * @vdev: pointer to &struct video_device + * @mdev: pointer to &struct media_device + * @c: pointer to &struct v4l2_ext_controls + * + * If hdl == NULL then they will all return -EINVAL. + */ +int v4l2_s_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl, + struct video_device *vdev, + struct media_device *mdev, + struct v4l2_ext_controls *c); + +/** + * v4l2_ctrl_subdev_subscribe_event - Helper function to implement + * as a &struct v4l2_subdev_core_ops subscribe_event function + * that just subscribes control events. + * + * @sd: pointer to &struct v4l2_subdev + * @fh: pointer to &struct v4l2_fh + * @sub: pointer to &struct v4l2_event_subscription + */ +int v4l2_ctrl_subdev_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh, + struct v4l2_event_subscription *sub); + +/** + * v4l2_ctrl_subdev_log_status - Log all controls owned by subdev's control + * handler. + * + * @sd: pointer to &struct v4l2_subdev + */ +int v4l2_ctrl_subdev_log_status(struct v4l2_subdev *sd); + +/** + * v4l2_ctrl_new_fwnode_properties() - Register controls for the device + * properties + * + * @hdl: pointer to &struct v4l2_ctrl_handler to register controls on + * @ctrl_ops: pointer to &struct v4l2_ctrl_ops to register controls with + * @p: pointer to &struct v4l2_fwnode_device_properties + * + * This function registers controls associated to device properties, using the + * property values contained in @p parameter, if the property has been set to + * a value. + * + * Currently the following v4l2 controls are parsed and registered: + * - V4L2_CID_CAMERA_ORIENTATION + * - V4L2_CID_CAMERA_SENSOR_ROTATION; + * + * Controls already registered by the caller with the @hdl control handler are + * not overwritten. Callers should register the controls they want to handle + * themselves before calling this function. + * + * Return: 0 on success, a negative error code on failure. + */ +int v4l2_ctrl_new_fwnode_properties(struct v4l2_ctrl_handler *hdl, + const struct v4l2_ctrl_ops *ctrl_ops, + const struct v4l2_fwnode_device_properties *p); + +/** + * v4l2_ctrl_type_op_equal - Default v4l2_ctrl_type_ops equal callback. + * + * @ctrl: The v4l2_ctrl pointer. + * @ptr1: A v4l2 control value. + * @ptr2: A v4l2 control value. + * + * Return: true if values are equal, otherwise false. + */ +bool v4l2_ctrl_type_op_equal(const struct v4l2_ctrl *ctrl, + union v4l2_ctrl_ptr ptr1, union v4l2_ctrl_ptr ptr2); + +/** + * v4l2_ctrl_type_op_init - Default v4l2_ctrl_type_ops init callback. + * + * @ctrl: The v4l2_ctrl pointer. + * @from_idx: Starting element index. + * @ptr: The v4l2 control value. + * + * Return: void + */ +void v4l2_ctrl_type_op_init(const struct v4l2_ctrl *ctrl, u32 from_idx, + union v4l2_ctrl_ptr ptr); + +/** + * v4l2_ctrl_type_op_log - Default v4l2_ctrl_type_ops log callback. + * + * @ctrl: The v4l2_ctrl pointer. + * + * Return: void + */ +void v4l2_ctrl_type_op_log(const struct v4l2_ctrl *ctrl); + +/** + * v4l2_ctrl_type_op_validate - Default v4l2_ctrl_type_ops validate callback. + * + * @ctrl: The v4l2_ctrl pointer. + * @ptr: The v4l2 control value. + * + * Return: 0 on success, a negative error code on failure. + */ +int v4l2_ctrl_type_op_validate(const struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr ptr); + +#endif diff --git a/6.17.0/include-overrides/media/v4l2-dev.h b/6.17.0/include-overrides/media/v4l2-dev.h new file mode 100644 index 0000000..a698012 --- /dev/null +++ b/6.17.0/include-overrides/media/v4l2-dev.h @@ -0,0 +1,665 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * + * V 4 L 2 D R I V E R H E L P E R A P I + * + * Moved from videodev2.h + * + * Some commonly needed functions for drivers (v4l2-common.o module) + */ +#ifndef _V4L2_DEV_H +#define _V4L2_DEV_H + +#include +#include +#include +#include +#include +#include + +#include + +#define VIDEO_MAJOR 81 + +/** + * enum vfl_devnode_type - type of V4L2 device node + * + * @VFL_TYPE_VIDEO: for video input/output devices + * @VFL_TYPE_VBI: for vertical blank data (i.e. closed captions, teletext) + * @VFL_TYPE_RADIO: for radio tuners + * @VFL_TYPE_SUBDEV: for V4L2 subdevices + * @VFL_TYPE_SDR: for Software Defined Radio tuners + * @VFL_TYPE_TOUCH: for touch sensors + * @VFL_TYPE_MAX: number of VFL types, must always be last in the enum + */ +enum vfl_devnode_type { + VFL_TYPE_VIDEO, + VFL_TYPE_VBI, + VFL_TYPE_RADIO, + VFL_TYPE_SUBDEV, + VFL_TYPE_SDR, + VFL_TYPE_TOUCH, + VFL_TYPE_MAX /* Shall be the last one */ +}; + +/** + * enum vfl_devnode_direction - Identifies if a &struct video_device + * corresponds to a receiver, a transmitter or a mem-to-mem device. + * + * @VFL_DIR_RX: device is a receiver. + * @VFL_DIR_TX: device is a transmitter. + * @VFL_DIR_M2M: device is a memory to memory device. + * + * Note: Ignored if &enum vfl_devnode_type is %VFL_TYPE_SUBDEV. + */ +enum vfl_devnode_direction { + VFL_DIR_RX, + VFL_DIR_TX, + VFL_DIR_M2M, +}; + +struct v4l2_ioctl_callbacks; +struct video_device; +struct v4l2_device; +struct v4l2_ctrl_handler; +struct dentry; + +/** + * enum v4l2_video_device_flags - Flags used by &struct video_device + * + * @V4L2_FL_REGISTERED: + * indicates that a &struct video_device is registered. + * Drivers can clear this flag if they want to block all future + * device access. It is cleared by video_unregister_device. + * @V4L2_FL_USES_V4L2_FH: + * indicates that file->private_data points to &struct v4l2_fh. + * This flag is set by the core when v4l2_fh_init() is called. + * All new drivers should use it. + * @V4L2_FL_QUIRK_INVERTED_CROP: + * some old M2M drivers use g/s_crop/cropcap incorrectly: crop and + * compose are swapped. If this flag is set, then the selection + * targets are swapped in the g/s_crop/cropcap functions in v4l2-ioctl.c. + * This allows those drivers to correctly implement the selection API, + * but the old crop API will still work as expected in order to preserve + * backwards compatibility. + * Never set this flag for new drivers. + * @V4L2_FL_SUBDEV_RO_DEVNODE: + * indicates that the video device node is registered in read-only mode. + * The flag only applies to device nodes registered for sub-devices, it is + * set by the core when the sub-devices device nodes are registered with + * v4l2_device_register_ro_subdev_nodes() and used by the sub-device ioctl + * handler to restrict access to some ioctl calls. + */ +enum v4l2_video_device_flags { + V4L2_FL_REGISTERED = 0, + V4L2_FL_USES_V4L2_FH = 1, + V4L2_FL_QUIRK_INVERTED_CROP = 2, + V4L2_FL_SUBDEV_RO_DEVNODE = 3, +}; + +/* Priority helper functions */ + +/** + * struct v4l2_prio_state - stores the priority states + * + * @prios: array with elements to store the array priorities + * + * + * .. note:: + * The size of @prios array matches the number of priority types defined + * by enum &v4l2_priority. + */ +struct v4l2_prio_state { + atomic_t prios[4]; +}; + +/** + * v4l2_prio_init - initializes a struct v4l2_prio_state + * + * @global: pointer to &struct v4l2_prio_state + */ +void v4l2_prio_init(struct v4l2_prio_state *global); + +/** + * v4l2_prio_change - changes the v4l2 file handler priority + * + * @global: pointer to the &struct v4l2_prio_state of the device node. + * @local: pointer to the desired priority, as defined by enum &v4l2_priority + * @new: Priority type requested, as defined by enum &v4l2_priority. + * + * .. note:: + * This function should be used only by the V4L2 core. + */ +int v4l2_prio_change(struct v4l2_prio_state *global, enum v4l2_priority *local, + enum v4l2_priority new); + +/** + * v4l2_prio_open - Implements the priority logic for a file handler open + * + * @global: pointer to the &struct v4l2_prio_state of the device node. + * @local: pointer to the desired priority, as defined by enum &v4l2_priority + * + * .. note:: + * This function should be used only by the V4L2 core. + */ +void v4l2_prio_open(struct v4l2_prio_state *global, enum v4l2_priority *local); + +/** + * v4l2_prio_close - Implements the priority logic for a file handler close + * + * @global: pointer to the &struct v4l2_prio_state of the device node. + * @local: priority to be released, as defined by enum &v4l2_priority + * + * .. note:: + * This function should be used only by the V4L2 core. + */ +void v4l2_prio_close(struct v4l2_prio_state *global, enum v4l2_priority local); + +/** + * v4l2_prio_max - Return the maximum priority, as stored at the @global array. + * + * @global: pointer to the &struct v4l2_prio_state of the device node. + * + * .. note:: + * This function should be used only by the V4L2 core. + */ +enum v4l2_priority v4l2_prio_max(struct v4l2_prio_state *global); + +/** + * v4l2_prio_check - Implements the priority logic for a file handler close + * + * @global: pointer to the &struct v4l2_prio_state of the device node. + * @local: desired priority, as defined by enum &v4l2_priority local + * + * .. note:: + * This function should be used only by the V4L2 core. + */ +int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local); + +/** + * struct v4l2_file_operations - fs operations used by a V4L2 device + * + * @owner: pointer to struct module + * @read: operations needed to implement the read() syscall + * @write: operations needed to implement the write() syscall + * @poll: operations needed to implement the poll() syscall + * @unlocked_ioctl: operations needed to implement the ioctl() syscall + * @compat_ioctl32: operations needed to implement the ioctl() syscall for + * the special case where the Kernel uses 64 bits instructions, but + * the userspace uses 32 bits. + * @get_unmapped_area: called by the mmap() syscall, used when %!CONFIG_MMU + * @mmap: operations needed to implement the mmap() syscall + * @open: operations needed to implement the open() syscall + * @release: operations needed to implement the release() syscall + * + * .. note:: + * + * Those operations are used to implemente the fs struct file_operations + * at the V4L2 drivers. The V4L2 core overrides the fs ops with some + * extra logic needed by the subsystem. + */ +struct v4l2_file_operations { + struct module *owner; + ssize_t (*read) (struct file *, char __user *, size_t, loff_t *); + ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *); + __poll_t (*poll) (struct file *, struct poll_table_struct *); + long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); +#ifdef CONFIG_COMPAT + long (*compat_ioctl32) (struct file *, unsigned int, unsigned long); +#endif + unsigned long (*get_unmapped_area) (struct file *, unsigned long, + unsigned long, unsigned long, unsigned long); + int (*mmap) (struct file *, struct vm_area_struct *); + int (*open) (struct file *); + int (*release) (struct file *); +}; + +/* + * Newer version of video_device, handled by videodev2.c + * This version moves redundant code from video device code to + * the common handler + */ + +/** + * struct video_device - Structure used to create and manage the V4L2 device + * nodes. + * + * @entity: &struct media_entity + * @intf_devnode: pointer to &struct media_intf_devnode + * @pipe: &struct media_pipeline + * @fops: pointer to &struct v4l2_file_operations for the video device + * @device_caps: device capabilities as used in v4l2_capabilities + * @dev: &struct device for the video device + * @cdev: character device + * @v4l2_dev: pointer to &struct v4l2_device parent + * @dev_parent: pointer to &struct device parent + * @ctrl_handler: Control handler associated with this device node. + * May be NULL. + * @queue: &struct vb2_queue associated with this device node. May be NULL. + * @prio: pointer to &struct v4l2_prio_state with device's Priority state. + * If NULL, then v4l2_dev->prio will be used. + * @name: video device name + * @vfl_type: V4L device type, as defined by &enum vfl_devnode_type + * @vfl_dir: V4L receiver, transmitter or m2m + * @minor: device node 'minor'. It is set to -1 if the registration failed + * @num: number of the video device node + * @flags: video device flags. Use bitops to set/clear/test flags. + * Contains a set of &enum v4l2_video_device_flags. + * @index: attribute to differentiate multiple indices on one physical device + * @fh_lock: Lock for all v4l2_fhs + * @fh_list: List of &struct v4l2_fh + * @dev_debug: Internal device debug flags, not for use by drivers + * @tvnorms: Supported tv norms + * + * @release: video device release() callback + * @ioctl_ops: pointer to &struct v4l2_ioctl_ops with ioctl callbacks + * + * @valid_ioctls: bitmap with the valid ioctls for this device + * @lock: pointer to &struct mutex serialization lock + * + * .. note:: + * Only set @dev_parent if that can't be deduced from @v4l2_dev. + */ + +struct video_device { +#if defined(CONFIG_MEDIA_CONTROLLER) + struct media_entity entity; + struct media_intf_devnode *intf_devnode; + struct media_pipeline pipe; +#endif + const struct v4l2_file_operations *fops; + + u32 device_caps; + + /* sysfs */ + struct device dev; + struct cdev *cdev; + + struct v4l2_device *v4l2_dev; + struct device *dev_parent; + + struct v4l2_ctrl_handler *ctrl_handler; + + struct vb2_queue *queue; + + struct v4l2_prio_state *prio; + + /* device info */ + char name[64]; + enum vfl_devnode_type vfl_type; + enum vfl_devnode_direction vfl_dir; + int minor; + u16 num; + unsigned long flags; + int index; + + /* V4L2 file handles */ + spinlock_t fh_lock; + struct list_head fh_list; + + int dev_debug; + + v4l2_std_id tvnorms; + + /* callbacks */ + void (*release)(struct video_device *vdev); + const struct v4l2_ioctl_ops *ioctl_ops; + DECLARE_BITMAP(valid_ioctls, BASE_VIDIOC_PRIVATE); + + struct mutex *lock; +}; + +/** + * media_entity_to_video_device - Returns a &struct video_device from + * the &struct media_entity embedded on it. + * + * @__entity: pointer to &struct media_entity, may be NULL + */ +#define media_entity_to_video_device(__entity) \ +({ \ + typeof(__entity) __me_vdev_ent = __entity; \ + \ + __me_vdev_ent ? \ + container_of(__me_vdev_ent, struct video_device, entity) : \ + NULL; \ +}) + +/** + * to_video_device - Returns a &struct video_device from the + * &struct device embedded on it. + * + * @cd: pointer to &struct device + */ +#define to_video_device(cd) container_of(cd, struct video_device, dev) + +/** + * __video_register_device - register video4linux devices + * + * @vdev: struct video_device to register + * @type: type of device to register, as defined by &enum vfl_devnode_type + * @nr: which device node number is desired: + * (0 == /dev/video0, 1 == /dev/video1, ..., -1 == first free) + * @warn_if_nr_in_use: warn if the desired device node number + * was already in use and another number was chosen instead. + * @owner: module that owns the video device node + * + * The registration code assigns minor numbers and device node numbers + * based on the requested type and registers the new device node with + * the kernel. + * + * This function assumes that struct video_device was zeroed when it + * was allocated and does not contain any stale date. + * + * An error is returned if no free minor or device node number could be + * found, or if the registration of the device node failed. + * + * Returns 0 on success. + * + * .. note:: + * + * This function is meant to be used only inside the V4L2 core. + * Drivers should use video_register_device() or + * video_register_device_no_warn(). + */ +int __must_check __video_register_device(struct video_device *vdev, + enum vfl_devnode_type type, + int nr, int warn_if_nr_in_use, + struct module *owner); + +/** + * video_register_device - register video4linux devices + * + * @vdev: struct video_device to register + * @type: type of device to register, as defined by &enum vfl_devnode_type + * @nr: which device node number is desired: + * (0 == /dev/video0, 1 == /dev/video1, ..., -1 == first free) + * + * Internally, it calls __video_register_device(). Please see its + * documentation for more details. + * + * .. note:: + * if video_register_device fails, the release() callback of + * &struct video_device structure is *not* called, so the caller + * is responsible for freeing any data. Usually that means that + * you video_device_release() should be called on failure. + */ +static inline int __must_check video_register_device(struct video_device *vdev, + enum vfl_devnode_type type, + int nr) +{ + return __video_register_device(vdev, type, nr, 1, vdev->fops->owner); +} + +/** + * video_register_device_no_warn - register video4linux devices + * + * @vdev: struct video_device to register + * @type: type of device to register, as defined by &enum vfl_devnode_type + * @nr: which device node number is desired: + * (0 == /dev/video0, 1 == /dev/video1, ..., -1 == first free) + * + * This function is identical to video_register_device() except that no + * warning is issued if the desired device node number was already in use. + * + * Internally, it calls __video_register_device(). Please see its + * documentation for more details. + * + * .. note:: + * if video_register_device fails, the release() callback of + * &struct video_device structure is *not* called, so the caller + * is responsible for freeing any data. Usually that means that + * you video_device_release() should be called on failure. + */ +static inline int __must_check +video_register_device_no_warn(struct video_device *vdev, + enum vfl_devnode_type type, int nr) +{ + return __video_register_device(vdev, type, nr, 0, vdev->fops->owner); +} + +/** + * video_unregister_device - Unregister video devices. + * + * @vdev: &struct video_device to register + * + * Does nothing if vdev == NULL or if video_is_registered() returns false. + */ +void video_unregister_device(struct video_device *vdev); + +/** + * video_device_alloc - helper function to alloc &struct video_device + * + * Returns NULL if %-ENOMEM or a &struct video_device on success. + */ +struct video_device * __must_check video_device_alloc(void); + +/** + * video_device_release - helper function to release &struct video_device + * + * @vdev: pointer to &struct video_device + * + * Can also be used for video_device->release\(\). + */ +void video_device_release(struct video_device *vdev); + +/** + * video_device_release_empty - helper function to implement the + * video_device->release\(\) callback. + * + * @vdev: pointer to &struct video_device + * + * This release function does nothing. + * + * It should be used when the video_device is a static global struct. + * + * .. note:: + * Having a static video_device is a dubious construction at best. + */ +void video_device_release_empty(struct video_device *vdev); + +/** + * v4l2_disable_ioctl- mark that a given command isn't implemented. + * shouldn't use core locking + * + * @vdev: pointer to &struct video_device + * @cmd: ioctl command + * + * This function allows drivers to provide just one v4l2_ioctl_ops struct, but + * disable ioctls based on the specific card that is actually found. + * + * .. note:: + * + * This must be called before video_register_device. + * See also the comments for determine_valid_ioctls(). + */ +static inline void v4l2_disable_ioctl(struct video_device *vdev, + unsigned int cmd) +{ + if (_IOC_NR(cmd) < BASE_VIDIOC_PRIVATE) + set_bit(_IOC_NR(cmd), vdev->valid_ioctls); +} + +/** + * video_get_drvdata - gets private data from &struct video_device. + * + * @vdev: pointer to &struct video_device + * + * returns a pointer to the private data + */ +static inline void *video_get_drvdata(struct video_device *vdev) +{ + return dev_get_drvdata(&vdev->dev); +} + +/** + * video_set_drvdata - sets private data from &struct video_device. + * + * @vdev: pointer to &struct video_device + * @data: private data pointer + */ +static inline void video_set_drvdata(struct video_device *vdev, void *data) +{ + dev_set_drvdata(&vdev->dev, data); +} + +/** + * video_devdata - gets &struct video_device from struct file. + * + * @file: pointer to struct file + */ +struct video_device *video_devdata(struct file *file); + +/** + * video_drvdata - gets private data from &struct video_device using the + * struct file. + * + * @file: pointer to struct file + * + * This is function combines both video_get_drvdata() and video_devdata() + * as this is used very often. + */ +static inline void *video_drvdata(struct file *file) +{ + return video_get_drvdata(video_devdata(file)); +} + +/** + * video_device_node_name - returns the video device name + * + * @vdev: pointer to &struct video_device + * + * Returns the device name string + */ +static inline const char *video_device_node_name(struct video_device *vdev) +{ + return dev_name(&vdev->dev); +} + +/** + * video_is_registered - returns true if the &struct video_device is registered. + * + * + * @vdev: pointer to &struct video_device + */ +static inline int video_is_registered(struct video_device *vdev) +{ + return test_bit(V4L2_FL_REGISTERED, &vdev->flags); +} + +/** + * v4l2_debugfs_root - returns the dentry of the top-level "v4l2" debugfs dir + * + * If this directory does not yet exist, then it will be created. + */ +#ifdef CONFIG_DEBUG_FS +struct dentry *v4l2_debugfs_root(void); +#else +static inline struct dentry *v4l2_debugfs_root(void) +{ + return NULL; +} +#endif + +#if defined(CONFIG_MEDIA_CONTROLLER) + +/** + * video_device_pipeline_start - Mark a pipeline as streaming + * @vdev: Starting video device + * @pipe: Media pipeline to be assigned to all entities in the pipeline. + * + * Mark all entities connected to a given video device through enabled links, + * either directly or indirectly, as streaming. The given pipeline object is + * assigned to every pad in the pipeline and stored in the media_pad pipe + * field. + * + * Calls to this function can be nested, in which case the same number of + * video_device_pipeline_stop() calls will be required to stop streaming. The + * pipeline pointer must be identical for all nested calls to + * video_device_pipeline_start(). + * + * The video device must contain a single pad. + * + * This is a convenience wrapper around media_pipeline_start(). + */ +__must_check int video_device_pipeline_start(struct video_device *vdev, + struct media_pipeline *pipe); + +/** + * __video_device_pipeline_start - Mark a pipeline as streaming + * @vdev: Starting video device + * @pipe: Media pipeline to be assigned to all entities in the pipeline. + * + * ..note:: This is the non-locking version of video_device_pipeline_start() + * + * The video device must contain a single pad. + * + * This is a convenience wrapper around __media_pipeline_start(). + */ +__must_check int __video_device_pipeline_start(struct video_device *vdev, + struct media_pipeline *pipe); + +/** + * video_device_pipeline_stop - Mark a pipeline as not streaming + * @vdev: Starting video device + * + * Mark all entities connected to a given video device through enabled links, + * either directly or indirectly, as not streaming. The media_pad pipe field + * is reset to %NULL. + * + * If multiple calls to media_pipeline_start() have been made, the same + * number of calls to this function are required to mark the pipeline as not + * streaming. + * + * The video device must contain a single pad. + * + * This is a convenience wrapper around media_pipeline_stop(). + */ +void video_device_pipeline_stop(struct video_device *vdev); + +/** + * __video_device_pipeline_stop - Mark a pipeline as not streaming + * @vdev: Starting video device + * + * .. note:: This is the non-locking version of media_pipeline_stop() + * + * The video device must contain a single pad. + * + * This is a convenience wrapper around __media_pipeline_stop(). + */ +void __video_device_pipeline_stop(struct video_device *vdev); + +/** + * video_device_pipeline_alloc_start - Mark a pipeline as streaming + * @vdev: Starting video device + * + * video_device_pipeline_alloc_start() is similar to video_device_pipeline_start() + * but instead of working on a given pipeline the function will use an + * existing pipeline if the video device is already part of a pipeline, or + * allocate a new pipeline. + * + * Calls to video_device_pipeline_alloc_start() must be matched with + * video_device_pipeline_stop(). + */ +__must_check int video_device_pipeline_alloc_start(struct video_device *vdev); + +/** + * video_device_pipeline - Get the media pipeline a video device is part of + * @vdev: The video device + * + * This function returns the media pipeline that a video device has been + * associated with when constructing the pipeline with + * video_device_pipeline_start(). The pointer remains valid until + * video_device_pipeline_stop() is called. + * + * Return: The media_pipeline the video device is part of, or NULL if the video + * device is not part of any pipeline. + * + * The video device must contain a single pad. + * + * This is a convenience wrapper around media_entity_pipeline(). + */ +struct media_pipeline *video_device_pipeline(struct video_device *vdev); + +#endif /* CONFIG_MEDIA_CONTROLLER */ + +#endif /* _V4L2_DEV_H */ diff --git a/6.17.0/include-overrides/media/v4l2-device.h b/6.17.0/include-overrides/media/v4l2-device.h new file mode 100644 index 0000000..dd897a3 --- /dev/null +++ b/6.17.0/include-overrides/media/v4l2-device.h @@ -0,0 +1,569 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + V4L2 device support header. + + Copyright (C) 2008 Hans Verkuil + + */ + +#ifndef _V4L2_DEVICE_H +#define _V4L2_DEVICE_H + +#include +#include +#include + +struct v4l2_ctrl_handler; + +/** + * struct v4l2_device - main struct to for V4L2 device drivers + * + * @dev: pointer to struct device. + * @mdev: pointer to struct media_device, may be NULL. + * @subdevs: used to keep track of the registered subdevs + * @lock: lock this struct; can be used by the driver as well + * if this struct is embedded into a larger struct. + * @name: unique device name, by default the driver name + bus ID + * @notify: notify operation called by some sub-devices. + * @ctrl_handler: The control handler. May be %NULL. + * @prio: Device's priority state + * @ref: Keep track of the references to this struct. + * @release: Release function that is called when the ref count + * goes to 0. + * + * Each instance of a V4L2 device should create the v4l2_device struct, + * either stand-alone or embedded in a larger struct. + * + * It allows easy access to sub-devices (see v4l2-subdev.h) and provides + * basic V4L2 device-level support. + * + * .. note:: + * + * #) @dev->driver_data points to this struct. + * #) @dev might be %NULL if there is no parent device + */ +struct v4l2_device { + struct device *dev; + struct media_device *mdev; + struct list_head subdevs; + spinlock_t lock; + char name[36]; + void (*notify)(struct v4l2_subdev *sd, + unsigned int notification, void *arg); + struct v4l2_ctrl_handler *ctrl_handler; + struct v4l2_prio_state prio; + struct kref ref; + void (*release)(struct v4l2_device *v4l2_dev); +}; + +/** + * v4l2_device_get - gets a V4L2 device reference + * + * @v4l2_dev: pointer to struct &v4l2_device + * + * This is an ancillary routine meant to increment the usage for the + * struct &v4l2_device pointed by @v4l2_dev. + */ +static inline void v4l2_device_get(struct v4l2_device *v4l2_dev) +{ + kref_get(&v4l2_dev->ref); +} + +/** + * v4l2_device_put - puts a V4L2 device reference + * + * @v4l2_dev: pointer to struct &v4l2_device + * + * This is an ancillary routine meant to decrement the usage for the + * struct &v4l2_device pointed by @v4l2_dev. + */ +int v4l2_device_put(struct v4l2_device *v4l2_dev); + +/** + * v4l2_device_register - Initialize v4l2_dev and make @dev->driver_data + * point to @v4l2_dev. + * + * @dev: pointer to struct &device + * @v4l2_dev: pointer to struct &v4l2_device + * + * .. note:: + * @dev may be %NULL in rare cases (ISA devices). + * In such case the caller must fill in the @v4l2_dev->name field + * before calling this function. + */ +int __must_check v4l2_device_register(struct device *dev, + struct v4l2_device *v4l2_dev); + +/** + * v4l2_device_set_name - Optional function to initialize the + * name field of struct &v4l2_device + * + * @v4l2_dev: pointer to struct &v4l2_device + * @basename: base name for the device name + * @instance: pointer to a static atomic_t var with the instance usage for + * the device driver. + * + * v4l2_device_set_name() initializes the name field of struct &v4l2_device + * using the driver name and a driver-global atomic_t instance. + * + * This function will increment the instance counter and returns the + * instance value used in the name. + * + * Example: + * + * static atomic_t drv_instance = ATOMIC_INIT(0); + * + * ... + * + * instance = v4l2_device_set_name(&\ v4l2_dev, "foo", &\ drv_instance); + * + * The first time this is called the name field will be set to foo0 and + * this function returns 0. If the name ends with a digit (e.g. cx18), + * then the name will be set to cx18-0 since cx180 would look really odd. + */ +int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename, + atomic_t *instance); + +/** + * v4l2_device_disconnect - Change V4L2 device state to disconnected. + * + * @v4l2_dev: pointer to struct v4l2_device + * + * Should be called when the USB parent disconnects. + * Since the parent disappears, this ensures that @v4l2_dev doesn't have + * an invalid parent pointer. + * + * .. note:: This function sets @v4l2_dev->dev to NULL. + */ +void v4l2_device_disconnect(struct v4l2_device *v4l2_dev); + +/** + * v4l2_device_unregister - Unregister all sub-devices and any other + * resources related to @v4l2_dev. + * + * @v4l2_dev: pointer to struct v4l2_device + */ +void v4l2_device_unregister(struct v4l2_device *v4l2_dev); + +/** + * v4l2_device_register_subdev - Registers a subdev with a v4l2 device. + * + * @v4l2_dev: pointer to struct &v4l2_device + * @sd: pointer to &struct v4l2_subdev + * + * While registered, the subdev module is marked as in-use. + * + * An error is returned if the module is no longer loaded on any attempts + * to register it. + */ +#define v4l2_device_register_subdev(v4l2_dev, sd) \ + __v4l2_device_register_subdev(v4l2_dev, sd, THIS_MODULE) +int __must_check __v4l2_device_register_subdev(struct v4l2_device *v4l2_dev, + struct v4l2_subdev *sd, + struct module *module); + +/** + * v4l2_device_unregister_subdev - Unregisters a subdev with a v4l2 device. + * + * @sd: pointer to &struct v4l2_subdev + * + * .. note :: + * + * Can also be called if the subdev wasn't registered. In such + * case, it will do nothing. + */ +void v4l2_device_unregister_subdev(struct v4l2_subdev *sd); + +/** + * __v4l2_device_register_subdev_nodes - Registers device nodes for + * all subdevs of the v4l2 device that are marked with the + * %V4L2_SUBDEV_FL_HAS_DEVNODE flag. + * + * @v4l2_dev: pointer to struct v4l2_device + * @read_only: subdevices read-only flag. True to register the subdevices + * device nodes in read-only mode, false to allow full access to the + * subdevice userspace API. + */ +int __must_check +__v4l2_device_register_subdev_nodes(struct v4l2_device *v4l2_dev, + bool read_only); + +/** + * v4l2_device_register_subdev_nodes - Registers subdevices device nodes with + * unrestricted access to the subdevice userspace operations + * + * Internally calls __v4l2_device_register_subdev_nodes(). See its documentation + * for more details. + * + * @v4l2_dev: pointer to struct v4l2_device + */ +static inline int __must_check +v4l2_device_register_subdev_nodes(struct v4l2_device *v4l2_dev) +{ +#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) + return __v4l2_device_register_subdev_nodes(v4l2_dev, false); +#else + return 0; +#endif +} + +/** + * v4l2_device_register_ro_subdev_nodes - Registers subdevices device nodes + * in read-only mode + * + * Internally calls __v4l2_device_register_subdev_nodes(). See its documentation + * for more details. + * + * @v4l2_dev: pointer to struct v4l2_device + */ +static inline int __must_check +v4l2_device_register_ro_subdev_nodes(struct v4l2_device *v4l2_dev) +{ +#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) + return __v4l2_device_register_subdev_nodes(v4l2_dev, true); +#else + return 0; +#endif +} + +/** + * v4l2_subdev_notify - Sends a notification to v4l2_device. + * + * @sd: pointer to &struct v4l2_subdev + * @notification: type of notification. Please notice that the notification + * type is driver-specific. + * @arg: arguments for the notification. Those are specific to each + * notification type. + */ +static inline void v4l2_subdev_notify(struct v4l2_subdev *sd, + unsigned int notification, void *arg) +{ + if (sd && sd->v4l2_dev && sd->v4l2_dev->notify) + sd->v4l2_dev->notify(sd, notification, arg); +} + +/** + * v4l2_device_supports_requests - Test if requests are supported. + * + * @v4l2_dev: pointer to struct v4l2_device + */ +static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev) +{ + return v4l2_dev->mdev && v4l2_dev->mdev->ops && + v4l2_dev->mdev->ops->req_queue; +} + +/* Helper macros to iterate over all subdevs. */ + +/** + * v4l2_device_for_each_subdev - Helper macro that interates over all + * sub-devices of a given &v4l2_device. + * + * @sd: pointer that will be filled by the macro with all + * &struct v4l2_subdev pointer used as an iterator by the loop. + * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. + * + * This macro iterates over all sub-devices owned by the @v4l2_dev device. + * It acts as a for loop iterator and executes the next statement with + * the @sd variable pointing to each sub-device in turn. + */ +#define v4l2_device_for_each_subdev(sd, v4l2_dev) \ + list_for_each_entry(sd, &(v4l2_dev)->subdevs, list) + +/** + * __v4l2_device_call_subdevs_p - Calls the specified operation for + * all subdevs matching the condition. + * + * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. + * @sd: pointer that will be filled by the macro with all + * &struct v4l2_subdev pointer used as an iterator by the loop. + * @cond: condition to be match + * @o: name of the element at &struct v4l2_subdev_ops that contains @f. + * Each element there groups a set of operations functions. + * @f: operation function that will be called if @cond matches. + * The operation functions are defined in groups, according to + * each element at &struct v4l2_subdev_ops. + * @args: arguments for @f. + * + * Ignore any errors. + * + * Note: subdevs cannot be added or deleted while walking + * the subdevs list. + */ +#define __v4l2_device_call_subdevs_p(v4l2_dev, sd, cond, o, f, args...) \ + do { \ + list_for_each_entry((sd), &(v4l2_dev)->subdevs, list) \ + if ((cond) && (sd)->ops->o && (sd)->ops->o->f) \ + (sd)->ops->o->f((sd) , ##args); \ + } while (0) + +/** + * __v4l2_device_call_subdevs - Calls the specified operation for + * all subdevs matching the condition. + * + * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. + * @cond: condition to be match + * @o: name of the element at &struct v4l2_subdev_ops that contains @f. + * Each element there groups a set of operations functions. + * @f: operation function that will be called if @cond matches. + * The operation functions are defined in groups, according to + * each element at &struct v4l2_subdev_ops. + * @args: arguments for @f. + * + * Ignore any errors. + * + * Note: subdevs cannot be added or deleted while walking + * the subdevs list. + */ +#define __v4l2_device_call_subdevs(v4l2_dev, cond, o, f, args...) \ + do { \ + struct v4l2_subdev *__sd; \ + \ + __v4l2_device_call_subdevs_p(v4l2_dev, __sd, cond, o, \ + f , ##args); \ + } while (0) + +/** + * __v4l2_device_call_subdevs_until_err_p - Calls the specified operation for + * all subdevs matching the condition. + * + * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. + * @sd: pointer that will be filled by the macro with all + * &struct v4l2_subdev sub-devices associated with @v4l2_dev. + * @cond: condition to be match + * @o: name of the element at &struct v4l2_subdev_ops that contains @f. + * Each element there groups a set of operations functions. + * @f: operation function that will be called if @cond matches. + * The operation functions are defined in groups, according to + * each element at &struct v4l2_subdev_ops. + * @args: arguments for @f. + * + * Return: + * + * If the operation returns an error other than 0 or ``-ENOIOCTLCMD`` + * for any subdevice, then abort and return with that error code, zero + * otherwise. + * + * Note: subdevs cannot be added or deleted while walking + * the subdevs list. + */ +#define __v4l2_device_call_subdevs_until_err_p(v4l2_dev, sd, cond, o, f, args...) \ +({ \ + long __err = 0; \ + \ + list_for_each_entry((sd), &(v4l2_dev)->subdevs, list) { \ + if ((cond) && (sd)->ops->o && (sd)->ops->o->f) \ + __err = (sd)->ops->o->f((sd) , ##args); \ + if (__err && __err != -ENOIOCTLCMD) \ + break; \ + } \ + (__err == -ENOIOCTLCMD) ? 0 : __err; \ +}) + +/** + * __v4l2_device_call_subdevs_until_err - Calls the specified operation for + * all subdevs matching the condition. + * + * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. + * @cond: condition to be match + * @o: name of the element at &struct v4l2_subdev_ops that contains @f. + * Each element there groups a set of operations functions. + * @f: operation function that will be called if @cond matches. + * The operation functions are defined in groups, according to + * each element at &struct v4l2_subdev_ops. + * @args: arguments for @f. + * + * Return: + * + * If the operation returns an error other than 0 or ``-ENOIOCTLCMD`` + * for any subdevice, then abort and return with that error code, + * zero otherwise. + * + * Note: subdevs cannot be added or deleted while walking + * the subdevs list. + */ +#define __v4l2_device_call_subdevs_until_err(v4l2_dev, cond, o, f, args...) \ +({ \ + struct v4l2_subdev *__sd; \ + __v4l2_device_call_subdevs_until_err_p(v4l2_dev, __sd, cond, o, \ + f , ##args); \ +}) + +/** + * v4l2_device_call_all - Calls the specified operation for + * all subdevs matching the &v4l2_subdev.grp_id, as assigned + * by the bridge driver. + * + * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. + * @grpid: &struct v4l2_subdev->grp_id group ID to match. + * Use 0 to match them all. + * @o: name of the element at &struct v4l2_subdev_ops that contains @f. + * Each element there groups a set of operations functions. + * @f: operation function that will be called if @cond matches. + * The operation functions are defined in groups, according to + * each element at &struct v4l2_subdev_ops. + * @args: arguments for @f. + * + * Ignore any errors. + * + * Note: subdevs cannot be added or deleted while walking + * the subdevs list. + */ +#define v4l2_device_call_all(v4l2_dev, grpid, o, f, args...) \ + do { \ + struct v4l2_subdev *__sd; \ + \ + __v4l2_device_call_subdevs_p(v4l2_dev, __sd, \ + (grpid) == 0 || __sd->grp_id == (grpid), o, f , \ + ##args); \ + } while (0) + +/** + * v4l2_device_call_until_err - Calls the specified operation for + * all subdevs matching the &v4l2_subdev.grp_id, as assigned + * by the bridge driver, until an error occurs. + * + * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. + * @grpid: &struct v4l2_subdev->grp_id group ID to match. + * Use 0 to match them all. + * @o: name of the element at &struct v4l2_subdev_ops that contains @f. + * Each element there groups a set of operations functions. + * @f: operation function that will be called if @cond matches. + * The operation functions are defined in groups, according to + * each element at &struct v4l2_subdev_ops. + * @args: arguments for @f. + * + * Return: + * + * If the operation returns an error other than 0 or ``-ENOIOCTLCMD`` + * for any subdevice, then abort and return with that error code, + * zero otherwise. + * + * Note: subdevs cannot be added or deleted while walking + * the subdevs list. + */ +#define v4l2_device_call_until_err(v4l2_dev, grpid, o, f, args...) \ +({ \ + struct v4l2_subdev *__sd; \ + __v4l2_device_call_subdevs_until_err_p(v4l2_dev, __sd, \ + (grpid) == 0 || __sd->grp_id == (grpid), o, f , \ + ##args); \ +}) + +/** + * v4l2_device_mask_call_all - Calls the specified operation for + * all subdevices where a group ID matches a specified bitmask. + * + * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. + * @grpmsk: bitmask to be checked against &struct v4l2_subdev->grp_id + * group ID to be matched. Use 0 to match them all. + * @o: name of the element at &struct v4l2_subdev_ops that contains @f. + * Each element there groups a set of operations functions. + * @f: operation function that will be called if @cond matches. + * The operation functions are defined in groups, according to + * each element at &struct v4l2_subdev_ops. + * @args: arguments for @f. + * + * Ignore any errors. + * + * Note: subdevs cannot be added or deleted while walking + * the subdevs list. + */ +#define v4l2_device_mask_call_all(v4l2_dev, grpmsk, o, f, args...) \ + do { \ + struct v4l2_subdev *__sd; \ + \ + __v4l2_device_call_subdevs_p(v4l2_dev, __sd, \ + (grpmsk) == 0 || (__sd->grp_id & (grpmsk)), o, \ + f , ##args); \ + } while (0) + +/** + * v4l2_device_mask_call_until_err - Calls the specified operation for + * all subdevices where a group ID matches a specified bitmask. + * + * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. + * @grpmsk: bitmask to be checked against &struct v4l2_subdev->grp_id + * group ID to be matched. Use 0 to match them all. + * @o: name of the element at &struct v4l2_subdev_ops that contains @f. + * Each element there groups a set of operations functions. + * @f: operation function that will be called if @cond matches. + * The operation functions are defined in groups, according to + * each element at &struct v4l2_subdev_ops. + * @args: arguments for @f. + * + * Return: + * + * If the operation returns an error other than 0 or ``-ENOIOCTLCMD`` + * for any subdevice, then abort and return with that error code, + * zero otherwise. + * + * Note: subdevs cannot be added or deleted while walking + * the subdevs list. + */ +#define v4l2_device_mask_call_until_err(v4l2_dev, grpmsk, o, f, args...) \ +({ \ + struct v4l2_subdev *__sd; \ + __v4l2_device_call_subdevs_until_err_p(v4l2_dev, __sd, \ + (grpmsk) == 0 || (__sd->grp_id & (grpmsk)), o, \ + f , ##args); \ +}) + + +/** + * v4l2_device_has_op - checks if any subdev with matching grpid has a + * given ops. + * + * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. + * @grpid: &struct v4l2_subdev->grp_id group ID to match. + * Use 0 to match them all. + * @o: name of the element at &struct v4l2_subdev_ops that contains @f. + * Each element there groups a set of operations functions. + * @f: operation function that will be called if @cond matches. + * The operation functions are defined in groups, according to + * each element at &struct v4l2_subdev_ops. + */ +#define v4l2_device_has_op(v4l2_dev, grpid, o, f) \ +({ \ + struct v4l2_subdev *__sd; \ + bool __result = false; \ + list_for_each_entry(__sd, &(v4l2_dev)->subdevs, list) { \ + if ((grpid) && __sd->grp_id != (grpid)) \ + continue; \ + if (v4l2_subdev_has_op(__sd, o, f)) { \ + __result = true; \ + break; \ + } \ + } \ + __result; \ +}) + +/** + * v4l2_device_mask_has_op - checks if any subdev with matching group + * mask has a given ops. + * + * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. + * @grpmsk: bitmask to be checked against &struct v4l2_subdev->grp_id + * group ID to be matched. Use 0 to match them all. + * @o: name of the element at &struct v4l2_subdev_ops that contains @f. + * Each element there groups a set of operations functions. + * @f: operation function that will be called if @cond matches. + * The operation functions are defined in groups, according to + * each element at &struct v4l2_subdev_ops. + */ +#define v4l2_device_mask_has_op(v4l2_dev, grpmsk, o, f) \ +({ \ + struct v4l2_subdev *__sd; \ + bool __result = false; \ + list_for_each_entry(__sd, &(v4l2_dev)->subdevs, list) { \ + if ((grpmsk) && !(__sd->grp_id & (grpmsk))) \ + continue; \ + if (v4l2_subdev_has_op(__sd, o, f)) { \ + __result = true; \ + break; \ + } \ + } \ + __result; \ +}) + +#endif diff --git a/6.17.0/include-overrides/media/v4l2-dv-timings.h b/6.17.0/include-overrides/media/v4l2-dv-timings.h new file mode 100644 index 0000000..714075c --- /dev/null +++ b/6.17.0/include-overrides/media/v4l2-dv-timings.h @@ -0,0 +1,309 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * v4l2-dv-timings - Internal header with dv-timings helper functions + * + * Copyright 2013 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + */ + +#ifndef __V4L2_DV_TIMINGS_H +#define __V4L2_DV_TIMINGS_H + +#include +#include + +/** + * v4l2_calc_timeperframe - helper function to calculate timeperframe based + * v4l2_dv_timings fields. + * @t: Timings for the video mode. + * + * Calculates the expected timeperframe using the pixel clock value and + * horizontal/vertical measures. This means that v4l2_dv_timings structure + * must be correctly and fully filled. + */ +struct v4l2_fract v4l2_calc_timeperframe(const struct v4l2_dv_timings *t); + +/* + * v4l2_dv_timings_presets: list of all dv_timings presets. + */ +extern const struct v4l2_dv_timings v4l2_dv_timings_presets[]; + +/** + * typedef v4l2_check_dv_timings_fnc - timings check callback + * + * @t: the v4l2_dv_timings struct. + * @handle: a handle from the driver. + * + * Returns true if the given timings are valid. + */ +typedef bool v4l2_check_dv_timings_fnc(const struct v4l2_dv_timings *t, void *handle); + +/** + * v4l2_valid_dv_timings() - are these timings valid? + * + * @t: the v4l2_dv_timings struct. + * @cap: the v4l2_dv_timings_cap capabilities. + * @fnc: callback to check if this timing is OK. May be NULL. + * @fnc_handle: a handle that is passed on to @fnc. + * + * Returns true if the given dv_timings struct is supported by the + * hardware capabilities and the callback function (if non-NULL), returns + * false otherwise. + */ +bool v4l2_valid_dv_timings(const struct v4l2_dv_timings *t, + const struct v4l2_dv_timings_cap *cap, + v4l2_check_dv_timings_fnc fnc, + void *fnc_handle); + +/** + * v4l2_enum_dv_timings_cap() - Helper function to enumerate possible DV + * timings based on capabilities + * + * @t: the v4l2_enum_dv_timings struct. + * @cap: the v4l2_dv_timings_cap capabilities. + * @fnc: callback to check if this timing is OK. May be NULL. + * @fnc_handle: a handle that is passed on to @fnc. + * + * This enumerates dv_timings using the full list of possible CEA-861 and DMT + * timings, filtering out any timings that are not supported based on the + * hardware capabilities and the callback function (if non-NULL). + * + * If a valid timing for the given index is found, it will fill in @t and + * return 0, otherwise it returns -EINVAL. + */ +int v4l2_enum_dv_timings_cap(struct v4l2_enum_dv_timings *t, + const struct v4l2_dv_timings_cap *cap, + v4l2_check_dv_timings_fnc fnc, + void *fnc_handle); + +/** + * v4l2_find_dv_timings_cap() - Find the closest timings struct + * + * @t: the v4l2_enum_dv_timings struct. + * @cap: the v4l2_dv_timings_cap capabilities. + * @pclock_delta: maximum delta between t->pixelclock and the timing struct + * under consideration. + * @fnc: callback to check if a given timings struct is OK. May be NULL. + * @fnc_handle: a handle that is passed on to @fnc. + * + * This function tries to map the given timings to an entry in the + * full list of possible CEA-861 and DMT timings, filtering out any timings + * that are not supported based on the hardware capabilities and the callback + * function (if non-NULL). + * + * On success it will fill in @t with the found timings and it returns true. + * On failure it will return false. + */ +bool v4l2_find_dv_timings_cap(struct v4l2_dv_timings *t, + const struct v4l2_dv_timings_cap *cap, + unsigned pclock_delta, + v4l2_check_dv_timings_fnc fnc, + void *fnc_handle); + +/** + * v4l2_find_dv_timings_cea861_vic() - find timings based on CEA-861 VIC + * @t: the timings data. + * @vic: CEA-861 VIC code + * + * On success it will fill in @t with the found timings and it returns true. + * On failure it will return false. + */ +bool v4l2_find_dv_timings_cea861_vic(struct v4l2_dv_timings *t, u8 vic); + +/** + * v4l2_match_dv_timings() - do two timings match? + * + * @measured: the measured timings data. + * @standard: the timings according to the standard. + * @pclock_delta: maximum delta in Hz between standard->pixelclock and + * the measured timings. + * @match_reduced_fps: if true, then fail if V4L2_DV_FL_REDUCED_FPS does not + * match. + * + * Returns true if the two timings match, returns false otherwise. + */ +bool v4l2_match_dv_timings(const struct v4l2_dv_timings *measured, + const struct v4l2_dv_timings *standard, + unsigned pclock_delta, bool match_reduced_fps); + +/** + * v4l2_print_dv_timings() - log the contents of a dv_timings struct + * @dev_prefix:device prefix for each log line. + * @prefix: additional prefix for each log line, may be NULL. + * @t: the timings data. + * @detailed: if true, give a detailed log. + */ +void v4l2_print_dv_timings(const char *dev_prefix, const char *prefix, + const struct v4l2_dv_timings *t, bool detailed); + +/** + * v4l2_detect_cvt - detect if the given timings follow the CVT standard + * + * @frame_height: the total height of the frame (including blanking) in lines. + * @hfreq: the horizontal frequency in Hz. + * @vsync: the height of the vertical sync in lines. + * @active_width: active width of image (does not include blanking). This + * information is needed only in case of version 2 of reduced blanking. + * In other cases, this parameter does not have any effect on timings. + * @polarities: the horizontal and vertical polarities (same as struct + * v4l2_bt_timings polarities). + * @interlaced: if this flag is true, it indicates interlaced format + * @cap: the v4l2_dv_timings_cap capabilities. + * @fmt: the resulting timings. + * + * This function will attempt to detect if the given values correspond to a + * valid CVT format. If so, then it will return true, and fmt will be filled + * in with the found CVT timings. + */ +bool v4l2_detect_cvt(unsigned int frame_height, unsigned int hfreq, + unsigned int vsync, unsigned int active_width, + u32 polarities, bool interlaced, + const struct v4l2_dv_timings_cap *cap, + struct v4l2_dv_timings *fmt); + +/** + * v4l2_detect_gtf - detect if the given timings follow the GTF standard + * + * @frame_height: the total height of the frame (including blanking) in lines. + * @hfreq: the horizontal frequency in Hz. + * @vsync: the height of the vertical sync in lines. + * @polarities: the horizontal and vertical polarities (same as struct + * v4l2_bt_timings polarities). + * @interlaced: if this flag is true, it indicates interlaced format + * @aspect: preferred aspect ratio. GTF has no method of determining the + * aspect ratio in order to derive the image width from the + * image height, so it has to be passed explicitly. Usually + * the native screen aspect ratio is used for this. If it + * is not filled in correctly, then 16:9 will be assumed. + * @cap: the v4l2_dv_timings_cap capabilities. + * @fmt: the resulting timings. + * + * This function will attempt to detect if the given values correspond to a + * valid GTF format. If so, then it will return true, and fmt will be filled + * in with the found GTF timings. + */ +bool v4l2_detect_gtf(unsigned int frame_height, unsigned int hfreq, + unsigned int vsync, u32 polarities, bool interlaced, + struct v4l2_fract aspect, + const struct v4l2_dv_timings_cap *cap, + struct v4l2_dv_timings *fmt); + +/** + * v4l2_calc_aspect_ratio - calculate the aspect ratio based on bytes + * 0x15 and 0x16 from the EDID. + * + * @hor_landscape: byte 0x15 from the EDID. + * @vert_portrait: byte 0x16 from the EDID. + * + * Determines the aspect ratio from the EDID. + * See VESA Enhanced EDID standard, release A, rev 2, section 3.6.2: + * "Horizontal and Vertical Screen Size or Aspect Ratio" + */ +struct v4l2_fract v4l2_calc_aspect_ratio(u8 hor_landscape, u8 vert_portrait); + +/** + * v4l2_dv_timings_aspect_ratio - calculate the aspect ratio based on the + * v4l2_dv_timings information. + * + * @t: the timings data. + */ +struct v4l2_fract v4l2_dv_timings_aspect_ratio(const struct v4l2_dv_timings *t); + +/** + * can_reduce_fps - check if conditions for reduced fps are true. + * @bt: v4l2 timing structure + * + * For different timings reduced fps is allowed if the following conditions + * are met: + * + * - For CVT timings: if reduced blanking v2 (vsync == 8) is true. + * - For CEA861 timings: if %V4L2_DV_FL_CAN_REDUCE_FPS flag is true. + */ +static inline bool can_reduce_fps(struct v4l2_bt_timings *bt) +{ + if ((bt->standards & V4L2_DV_BT_STD_CVT) && (bt->vsync == 8)) + return true; + + if ((bt->standards & V4L2_DV_BT_STD_CEA861) && + (bt->flags & V4L2_DV_FL_CAN_REDUCE_FPS)) + return true; + + return false; +} + +/** + * struct v4l2_hdmi_colorimetry - describes the HDMI colorimetry information + * @colorspace: enum v4l2_colorspace, the colorspace + * @ycbcr_enc: enum v4l2_ycbcr_encoding, Y'CbCr encoding + * @quantization: enum v4l2_quantization, colorspace quantization + * @xfer_func: enum v4l2_xfer_func, colorspace transfer function + */ +struct v4l2_hdmi_colorimetry { + enum v4l2_colorspace colorspace; + enum v4l2_ycbcr_encoding ycbcr_enc; + enum v4l2_quantization quantization; + enum v4l2_xfer_func xfer_func; +}; + +struct hdmi_avi_infoframe; +struct hdmi_vendor_infoframe; + +struct v4l2_hdmi_colorimetry +v4l2_hdmi_rx_colorimetry(const struct hdmi_avi_infoframe *avi, + const struct hdmi_vendor_infoframe *hdmi, + unsigned int height); + +unsigned int v4l2_num_edid_blocks(const u8 *edid, unsigned int max_blocks); +u16 v4l2_get_edid_phys_addr(const u8 *edid, unsigned int size, + unsigned int *offset); +void v4l2_set_edid_phys_addr(u8 *edid, unsigned int size, u16 phys_addr); +u16 v4l2_phys_addr_for_input(u16 phys_addr, u8 input); +int v4l2_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port); + +/* Add support for exporting InfoFrames to debugfs */ + +/* + * HDMI InfoFrames start with a 3 byte header, then a checksum, + * followed by the actual IF payload. + * + * The payload length is limited to 30 bytes according to the HDMI spec, + * but since the length is encoded in 5 bits, it can be 31 bytes theoretically. + * So set the max length as 31 + 3 (header) + 1 (checksum) = 35. + */ +#define V4L2_DEBUGFS_IF_MAX_LEN (35) + +#define V4L2_DEBUGFS_IF_AVI BIT(0) +#define V4L2_DEBUGFS_IF_AUDIO BIT(1) +#define V4L2_DEBUGFS_IF_SPD BIT(2) +#define V4L2_DEBUGFS_IF_HDMI BIT(3) + +typedef ssize_t (*v4l2_debugfs_if_read_t)(u32 type, void *priv, + struct file *filp, char __user *ubuf, + size_t count, loff_t *ppos); + +struct v4l2_debugfs_if { + struct dentry *if_dir; + void *priv; + + v4l2_debugfs_if_read_t if_read; +}; + +#ifdef CONFIG_DEBUG_FS +struct v4l2_debugfs_if *v4l2_debugfs_if_alloc(struct dentry *root, u32 if_types, + void *priv, + v4l2_debugfs_if_read_t if_read); +void v4l2_debugfs_if_free(struct v4l2_debugfs_if *infoframes); +#else +static inline +struct v4l2_debugfs_if *v4l2_debugfs_if_alloc(struct dentry *root, u32 if_types, + void *priv, + v4l2_debugfs_if_read_t if_read) +{ + return NULL; +} + +static inline void v4l2_debugfs_if_free(struct v4l2_debugfs_if *infoframes) +{ +} +#endif + +#endif diff --git a/6.17.0/include-overrides/media/v4l2-event.h b/6.17.0/include-overrides/media/v4l2-event.h new file mode 100644 index 0000000..3a0e258 --- /dev/null +++ b/6.17.0/include-overrides/media/v4l2-event.h @@ -0,0 +1,208 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * v4l2-event.h + * + * V4L2 events. + * + * Copyright (C) 2009--2010 Nokia Corporation. + * + * Contact: Sakari Ailus + */ + +#ifndef V4L2_EVENT_H +#define V4L2_EVENT_H + +#include +#include +#include + +struct v4l2_fh; +struct v4l2_subdev; +struct v4l2_subscribed_event; +struct video_device; + +/** + * struct v4l2_kevent - Internal kernel event struct. + * @list: List node for the v4l2_fh->available list. + * @sev: Pointer to parent v4l2_subscribed_event. + * @event: The event itself. + * @ts: The timestamp of the event. + */ +struct v4l2_kevent { + struct list_head list; + struct v4l2_subscribed_event *sev; + struct v4l2_event event; + u64 ts; +}; + +/** + * struct v4l2_subscribed_event_ops - Subscribed event operations. + * + * @add: Optional callback, called when a new listener is added + * @del: Optional callback, called when a listener stops listening + * @replace: Optional callback that can replace event 'old' with event 'new'. + * @merge: Optional callback that can merge event 'old' into event 'new'. + */ +struct v4l2_subscribed_event_ops { + int (*add)(struct v4l2_subscribed_event *sev, unsigned int elems); + void (*del)(struct v4l2_subscribed_event *sev); + void (*replace)(struct v4l2_event *old, const struct v4l2_event *new); + void (*merge)(const struct v4l2_event *old, struct v4l2_event *new); +}; + +/** + * struct v4l2_subscribed_event - Internal struct representing a subscribed + * event. + * + * @list: List node for the v4l2_fh->subscribed list. + * @type: Event type. + * @id: Associated object ID (e.g. control ID). 0 if there isn't any. + * @flags: Copy of v4l2_event_subscription->flags. + * @fh: Filehandle that subscribed to this event. + * @node: List node that hooks into the object's event list + * (if there is one). + * @ops: v4l2_subscribed_event_ops + * @elems: The number of elements in the events array. + * @first: The index of the events containing the oldest available event. + * @in_use: The number of queued events. + * @events: An array of @elems events. + */ +struct v4l2_subscribed_event { + struct list_head list; + u32 type; + u32 id; + u32 flags; + struct v4l2_fh *fh; + struct list_head node; + const struct v4l2_subscribed_event_ops *ops; + unsigned int elems; + unsigned int first; + unsigned int in_use; + struct v4l2_kevent events[] __counted_by(elems); +}; + +/** + * v4l2_event_dequeue - Dequeue events from video device. + * + * @fh: pointer to struct v4l2_fh + * @event: pointer to struct v4l2_event + * @nonblocking: if not zero, waits for an event to arrive + */ +int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event, + int nonblocking); + +/** + * v4l2_event_queue - Queue events to video device. + * + * @vdev: pointer to &struct video_device + * @ev: pointer to &struct v4l2_event + * + * The event will be queued for all &struct v4l2_fh file handlers. + * + * .. note:: + * The driver's only responsibility is to fill in the type and the data + * fields. The other fields will be filled in by V4L2. + */ +void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev); + +/** + * v4l2_event_queue_fh - Queue events to video device. + * + * @fh: pointer to &struct v4l2_fh + * @ev: pointer to &struct v4l2_event + * + * + * The event will be queued only for the specified &struct v4l2_fh file handler. + * + * .. note:: + * The driver's only responsibility is to fill in the type and the data + * fields. The other fields will be filled in by V4L2. + */ +void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev); + +/** + * v4l2_event_wake_all - Wake all filehandles. + * + * Used when unregistering a video device. + * + * @vdev: pointer to &struct video_device + */ +void v4l2_event_wake_all(struct video_device *vdev); + +/** + * v4l2_event_pending - Check if an event is available + * + * @fh: pointer to &struct v4l2_fh + * + * Returns the number of pending events. + */ +int v4l2_event_pending(struct v4l2_fh *fh); + +/** + * v4l2_event_subscribe - Subscribes to an event + * + * @fh: pointer to &struct v4l2_fh + * @sub: pointer to &struct v4l2_event_subscription + * @elems: size of the events queue + * @ops: pointer to &v4l2_subscribed_event_ops + * + * .. note:: + * + * if @elems is zero, the framework will fill in a default value, + * with is currently 1 element. + */ +int v4l2_event_subscribe(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub, + unsigned int elems, + const struct v4l2_subscribed_event_ops *ops); +/** + * v4l2_event_unsubscribe - Unsubscribes to an event + * + * @fh: pointer to &struct v4l2_fh + * @sub: pointer to &struct v4l2_event_subscription + */ +int v4l2_event_unsubscribe(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub); +/** + * v4l2_event_unsubscribe_all - Unsubscribes to all events + * + * @fh: pointer to &struct v4l2_fh + */ +void v4l2_event_unsubscribe_all(struct v4l2_fh *fh); + +/** + * v4l2_event_subdev_unsubscribe - Subdev variant of v4l2_event_unsubscribe() + * + * @sd: pointer to &struct v4l2_subdev + * @fh: pointer to &struct v4l2_fh + * @sub: pointer to &struct v4l2_event_subscription + * + * .. note:: + * + * This function should be used for the &struct v4l2_subdev_core_ops + * %unsubscribe_event field. + */ +int v4l2_event_subdev_unsubscribe(struct v4l2_subdev *sd, + struct v4l2_fh *fh, + struct v4l2_event_subscription *sub); +/** + * v4l2_src_change_event_subscribe - helper function that calls + * v4l2_event_subscribe() if the event is %V4L2_EVENT_SOURCE_CHANGE. + * + * @fh: pointer to struct v4l2_fh + * @sub: pointer to &struct v4l2_event_subscription + */ +int v4l2_src_change_event_subscribe(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub); +/** + * v4l2_src_change_event_subdev_subscribe - Variant of v4l2_event_subscribe(), + * meant to subscribe only events of the type %V4L2_EVENT_SOURCE_CHANGE. + * + * @sd: pointer to &struct v4l2_subdev + * @fh: pointer to &struct v4l2_fh + * @sub: pointer to &struct v4l2_event_subscription + */ +int v4l2_src_change_event_subdev_subscribe(struct v4l2_subdev *sd, + struct v4l2_fh *fh, + struct v4l2_event_subscription *sub); +#endif /* V4L2_EVENT_H */ diff --git a/6.17.0/include-overrides/media/v4l2-fh.h b/6.17.0/include-overrides/media/v4l2-fh.h new file mode 100644 index 0000000..b5b3e00 --- /dev/null +++ b/6.17.0/include-overrides/media/v4l2-fh.h @@ -0,0 +1,161 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * v4l2-fh.h + * + * V4L2 file handle. Store per file handle data for the V4L2 + * framework. Using file handles is optional for the drivers. + * + * Copyright (C) 2009--2010 Nokia Corporation. + * + * Contact: Sakari Ailus + */ + +#ifndef V4L2_FH_H +#define V4L2_FH_H + +#include +#include +#include +#include + +struct video_device; +struct v4l2_ctrl_handler; + +/** + * struct v4l2_fh - Describes a V4L2 file handler + * + * @list: list of file handlers + * @vdev: pointer to &struct video_device + * @ctrl_handler: pointer to &struct v4l2_ctrl_handler + * @prio: priority of the file handler, as defined by &enum v4l2_priority + * + * @wait: event' s wait queue + * @subscribe_lock: serialise changes to the subscribed list; guarantee that + * the add and del event callbacks are orderly called + * @subscribed: list of subscribed events + * @available: list of events waiting to be dequeued + * @navailable: number of available events at @available list + * @sequence: event sequence number + * + * @m2m_ctx: pointer to &struct v4l2_m2m_ctx + */ +struct v4l2_fh { + struct list_head list; + struct video_device *vdev; + struct v4l2_ctrl_handler *ctrl_handler; + enum v4l2_priority prio; + + /* Events */ + wait_queue_head_t wait; + struct mutex subscribe_lock; + struct list_head subscribed; + struct list_head available; + unsigned int navailable; + u32 sequence; + + struct v4l2_m2m_ctx *m2m_ctx; +}; + +/** + * v4l2_fh_init - Initialise the file handle. + * + * @fh: pointer to &struct v4l2_fh + * @vdev: pointer to &struct video_device + * + * Parts of the V4L2 framework using the + * file handles should be initialised in this function. Must be called + * from driver's v4l2_file_operations->open\(\) handler if the driver + * uses &struct v4l2_fh. + */ +void v4l2_fh_init(struct v4l2_fh *fh, struct video_device *vdev); + +/** + * v4l2_fh_add - Add the fh to the list of file handles on a video_device. + * + * @fh: pointer to &struct v4l2_fh + * + * .. note:: + * The @fh file handle must be initialised first. + */ +void v4l2_fh_add(struct v4l2_fh *fh); + +/** + * v4l2_fh_open - Ancillary routine that can be used as the open\(\) op + * of v4l2_file_operations. + * + * @filp: pointer to struct file + * + * It allocates a v4l2_fh and inits and adds it to the &struct video_device + * associated with the file pointer. + */ +int v4l2_fh_open(struct file *filp); + +/** + * v4l2_fh_del - Remove file handle from the list of file handles. + * + * @fh: pointer to &struct v4l2_fh + * + * On error filp->private_data will be %NULL, otherwise it will point to + * the &struct v4l2_fh. + * + * .. note:: + * Must be called in v4l2_file_operations->release\(\) handler if the driver + * uses &struct v4l2_fh. + */ +void v4l2_fh_del(struct v4l2_fh *fh); + +/** + * v4l2_fh_exit - Release resources related to a file handle. + * + * @fh: pointer to &struct v4l2_fh + * + * Parts of the V4L2 framework using the v4l2_fh must release their + * resources here, too. + * + * .. note:: + * Must be called in v4l2_file_operations->release\(\) handler if the + * driver uses &struct v4l2_fh. + */ +void v4l2_fh_exit(struct v4l2_fh *fh); + +/** + * v4l2_fh_release - Ancillary routine that can be used as the release\(\) op + * of v4l2_file_operations. + * + * @filp: pointer to struct file + * + * It deletes and exits the v4l2_fh associated with the file pointer and + * frees it. It will do nothing if filp->private_data (the pointer to the + * v4l2_fh struct) is %NULL. + * + * This function always returns 0. + */ +int v4l2_fh_release(struct file *filp); + +/** + * v4l2_fh_is_singular - Returns 1 if this filehandle is the only filehandle + * opened for the associated video_device. + * + * @fh: pointer to &struct v4l2_fh + * + * If @fh is NULL, then it returns 0. + */ +int v4l2_fh_is_singular(struct v4l2_fh *fh); + +/** + * v4l2_fh_is_singular_file - Returns 1 if this filehandle is the only + * filehandle opened for the associated video_device. + * + * @filp: pointer to struct file + * + * This is a helper function variant of v4l2_fh_is_singular() with uses + * struct file as argument. + * + * If filp->private_data is %NULL, then it will return 0. + */ +static inline int v4l2_fh_is_singular_file(struct file *filp) +{ + return v4l2_fh_is_singular(filp->private_data); +} + +#endif /* V4L2_EVENT_H */ diff --git a/6.17.0/include-overrides/media/v4l2-flash-led-class.h b/6.17.0/include-overrides/media/v4l2-flash-led-class.h new file mode 100644 index 0000000..b106e7a --- /dev/null +++ b/6.17.0/include-overrides/media/v4l2-flash-led-class.h @@ -0,0 +1,186 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * V4L2 flash LED sub-device registration helpers. + * + * Copyright (C) 2015 Samsung Electronics Co., Ltd + * Author: Jacek Anaszewski + */ + +#ifndef _V4L2_FLASH_H +#define _V4L2_FLASH_H + +#include +#include + +struct led_classdev_flash; +struct led_classdev; +struct v4l2_flash; +enum led_brightness; + +/** + * struct v4l2_flash_ctrl_data - flash control initialization data, filled + * basing on the features declared by the LED flash + * class driver in the v4l2_flash_config + * @config: initialization data for a control + * @cid: contains v4l2 flash control id if the config + * field was initialized, 0 otherwise + */ +struct v4l2_flash_ctrl_data { + struct v4l2_ctrl_config config; + u32 cid; +}; + +/** + * struct v4l2_flash_ops - V4L2 flash operations + * + * @external_strobe_set: Setup strobing the flash by hardware pin state + * assertion. + * @intensity_to_led_brightness: Convert intensity to brightness in a device + * specific manner + * @led_brightness_to_intensity: convert brightness to intensity in a device + * specific manner. + */ +struct v4l2_flash_ops { + int (*external_strobe_set)(struct v4l2_flash *v4l2_flash, + bool enable); + enum led_brightness (*intensity_to_led_brightness) + (struct v4l2_flash *v4l2_flash, s32 intensity); + s32 (*led_brightness_to_intensity) + (struct v4l2_flash *v4l2_flash, enum led_brightness); +}; + +/** + * struct v4l2_flash_config - V4L2 Flash sub-device initialization data + * @dev_name: the name of the media entity, + * unique in the system + * @intensity: non-flash strobe constraints for the LED + * @flash_faults: bitmask of flash faults that the LED flash class + * device can report; corresponding LED_FAULT* bit + * definitions are available in the header file + * + * @has_external_strobe: external strobe capability + */ +struct v4l2_flash_config { + char dev_name[32]; + struct led_flash_setting intensity; + u32 flash_faults; + unsigned int has_external_strobe:1; +}; + +/** + * struct v4l2_flash - Flash sub-device context + * @fled_cdev: LED flash class device controlled by this sub-device + * @iled_cdev: LED class device representing indicator LED associated + * with the LED flash class device + * @ops: V4L2 specific flash ops + * @sd: V4L2 sub-device + * @hdl: flash controls handler + * @ctrls: array of pointers to controls, whose values define + * the sub-device state + */ +struct v4l2_flash { + struct led_classdev_flash *fled_cdev; + struct led_classdev *iled_cdev; + const struct v4l2_flash_ops *ops; + + struct v4l2_subdev sd; + struct v4l2_ctrl_handler hdl; + struct v4l2_ctrl **ctrls; +}; + +/** + * v4l2_subdev_to_v4l2_flash - Returns a &struct v4l2_flash from the + * &struct v4l2_subdev embedded on it. + * + * @sd: pointer to &struct v4l2_subdev + */ +static inline struct v4l2_flash *v4l2_subdev_to_v4l2_flash( + struct v4l2_subdev *sd) +{ + return container_of(sd, struct v4l2_flash, sd); +} + +/** + * v4l2_ctrl_to_v4l2_flash - Returns a &struct v4l2_flash from the + * &struct v4l2_ctrl embedded on it. + * + * @c: pointer to &struct v4l2_ctrl + */ +static inline struct v4l2_flash *v4l2_ctrl_to_v4l2_flash(struct v4l2_ctrl *c) +{ + return container_of(c->handler, struct v4l2_flash, hdl); +} + +#if IS_ENABLED(CONFIG_V4L2_FLASH_LED_CLASS) +/** + * v4l2_flash_init - initialize V4L2 flash led sub-device + * @dev: flash device, e.g. an I2C device + * @fwn: fwnode_handle of the LED, may be NULL if the same as device's + * @fled_cdev: LED flash class device to wrap + * @ops: V4L2 Flash device ops + * @config: initialization data for V4L2 Flash sub-device + * + * Create V4L2 Flash sub-device wrapping given LED subsystem device. + * The ops pointer is stored by the V4L2 flash framework. No + * references are held to config nor its contents once this function + * has returned. + * + * Returns: A valid pointer, or, when an error occurs, the return + * value is encoded using ERR_PTR(). Use IS_ERR() to check and + * PTR_ERR() to obtain the numeric return value. + */ +struct v4l2_flash *v4l2_flash_init( + struct device *dev, struct fwnode_handle *fwn, + struct led_classdev_flash *fled_cdev, + const struct v4l2_flash_ops *ops, struct v4l2_flash_config *config); + +/** + * v4l2_flash_indicator_init - initialize V4L2 indicator sub-device + * @dev: flash device, e.g. an I2C device + * @fwn: fwnode_handle of the LED, may be NULL if the same as device's + * @iled_cdev: LED flash class device representing the indicator LED + * @config: initialization data for V4L2 Flash sub-device + * + * Create V4L2 Flash sub-device wrapping given LED subsystem device. + * The ops pointer is stored by the V4L2 flash framework. No + * references are held to config nor its contents once this function + * has returned. + * + * Returns: A valid pointer, or, when an error occurs, the return + * value is encoded using ERR_PTR(). Use IS_ERR() to check and + * PTR_ERR() to obtain the numeric return value. + */ +struct v4l2_flash *v4l2_flash_indicator_init( + struct device *dev, struct fwnode_handle *fwn, + struct led_classdev *iled_cdev, struct v4l2_flash_config *config); + +/** + * v4l2_flash_release - release V4L2 Flash sub-device + * @v4l2_flash: the V4L2 Flash sub-device to release + * + * Release V4L2 Flash sub-device. + */ +void v4l2_flash_release(struct v4l2_flash *v4l2_flash); + +#else +static inline struct v4l2_flash *v4l2_flash_init( + struct device *dev, struct fwnode_handle *fwn, + struct led_classdev_flash *fled_cdev, + const struct v4l2_flash_ops *ops, struct v4l2_flash_config *config) +{ + return NULL; +} + +static inline struct v4l2_flash *v4l2_flash_indicator_init( + struct device *dev, struct fwnode_handle *fwn, + struct led_classdev *iled_cdev, struct v4l2_flash_config *config) +{ + return NULL; +} + +static inline void v4l2_flash_release(struct v4l2_flash *v4l2_flash) +{ +} +#endif /* CONFIG_V4L2_FLASH_LED_CLASS */ + +#endif /* _V4L2_FLASH_H */ diff --git a/6.17.0/include-overrides/media/v4l2-fwnode.h b/6.17.0/include-overrides/media/v4l2-fwnode.h new file mode 100644 index 0000000..f7c57c7 --- /dev/null +++ b/6.17.0/include-overrides/media/v4l2-fwnode.h @@ -0,0 +1,414 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * V4L2 fwnode binding parsing library + * + * Copyright (c) 2016 Intel Corporation. + * Author: Sakari Ailus + * + * Copyright (C) 2012 - 2013 Samsung Electronics Co., Ltd. + * Author: Sylwester Nawrocki + * + * Copyright (C) 2012 Renesas Electronics Corp. + * Author: Guennadi Liakhovetski + */ +#ifndef _V4L2_FWNODE_H +#define _V4L2_FWNODE_H + +#include +#include +#include +#include + +#include + +/** + * struct v4l2_fwnode_endpoint - the endpoint data structure + * @base: fwnode endpoint of the v4l2_fwnode + * @bus_type: bus type + * @bus: bus configuration data structure + * @bus.parallel: embedded &struct v4l2_mbus_config_parallel. + * Used if the bus is parallel. + * @bus.mipi_csi1: embedded &struct v4l2_mbus_config_mipi_csi1. + * Used if the bus is MIPI Alliance's Camera Serial + * Interface version 1 (MIPI CSI1) or Standard + * Mobile Imaging Architecture's Compact Camera Port 2 + * (SMIA CCP2). + * @bus.mipi_csi2: embedded &struct v4l2_mbus_config_mipi_csi2. + * Used if the bus is MIPI Alliance's Camera Serial + * Interface version 2 (MIPI CSI2). + * @link_frequencies: array of supported link frequencies + * @nr_of_link_frequencies: number of elements in link_frequenccies array + */ +struct v4l2_fwnode_endpoint { + struct fwnode_endpoint base; + enum v4l2_mbus_type bus_type; + struct { + struct v4l2_mbus_config_parallel parallel; + struct v4l2_mbus_config_mipi_csi1 mipi_csi1; + struct v4l2_mbus_config_mipi_csi2 mipi_csi2; + } bus; + u64 *link_frequencies; + unsigned int nr_of_link_frequencies; +}; + +/** + * V4L2_FWNODE_PROPERTY_UNSET - identify a non initialized property + * + * All properties in &struct v4l2_fwnode_device_properties are initialized + * to this value. + */ +#define V4L2_FWNODE_PROPERTY_UNSET (-1U) + +/** + * enum v4l2_fwnode_orientation - possible device orientation + * @V4L2_FWNODE_ORIENTATION_FRONT: device installed on the front side + * @V4L2_FWNODE_ORIENTATION_BACK: device installed on the back side + * @V4L2_FWNODE_ORIENTATION_EXTERNAL: device externally located + */ +enum v4l2_fwnode_orientation { + V4L2_FWNODE_ORIENTATION_FRONT, + V4L2_FWNODE_ORIENTATION_BACK, + V4L2_FWNODE_ORIENTATION_EXTERNAL +}; + +/** + * struct v4l2_fwnode_device_properties - fwnode device properties + * @orientation: device orientation. See &enum v4l2_fwnode_orientation + * @rotation: device rotation + */ +struct v4l2_fwnode_device_properties { + enum v4l2_fwnode_orientation orientation; + unsigned int rotation; +}; + +/** + * struct v4l2_fwnode_link - a link between two endpoints + * @local_node: pointer to device_node of this endpoint + * @local_port: identifier of the port this endpoint belongs to + * @local_id: identifier of the id this endpoint belongs to + * @remote_node: pointer to device_node of the remote endpoint + * @remote_port: identifier of the port the remote endpoint belongs to + * @remote_id: identifier of the id the remote endpoint belongs to + */ +struct v4l2_fwnode_link { + struct fwnode_handle *local_node; + unsigned int local_port; + unsigned int local_id; + struct fwnode_handle *remote_node; + unsigned int remote_port; + unsigned int remote_id; +}; + +/** + * enum v4l2_connector_type - connector type + * @V4L2_CONN_UNKNOWN: unknown connector type, no V4L2 connector configuration + * @V4L2_CONN_COMPOSITE: analog composite connector + * @V4L2_CONN_SVIDEO: analog svideo connector + */ +enum v4l2_connector_type { + V4L2_CONN_UNKNOWN, + V4L2_CONN_COMPOSITE, + V4L2_CONN_SVIDEO, +}; + +/** + * struct v4l2_connector_link - connector link data structure + * @head: structure to be used to add the link to the + * &struct v4l2_fwnode_connector + * @fwnode_link: &struct v4l2_fwnode_link link between the connector and the + * device the connector belongs to. + */ +struct v4l2_connector_link { + struct list_head head; + struct v4l2_fwnode_link fwnode_link; +}; + +/** + * struct v4l2_fwnode_connector_analog - analog connector data structure + * @sdtv_stds: sdtv standards this connector supports, set to V4L2_STD_ALL + * if no restrictions are specified. + */ +struct v4l2_fwnode_connector_analog { + v4l2_std_id sdtv_stds; +}; + +/** + * struct v4l2_fwnode_connector - the connector data structure + * @name: the connector device name + * @label: optional connector label + * @type: connector type + * @links: list of all connector &struct v4l2_connector_link links + * @nr_of_links: total number of links + * @connector: connector configuration + * @connector.analog: analog connector configuration + * &struct v4l2_fwnode_connector_analog + */ +struct v4l2_fwnode_connector { + const char *name; + const char *label; + enum v4l2_connector_type type; + struct list_head links; + unsigned int nr_of_links; + + union { + struct v4l2_fwnode_connector_analog analog; + /* future connectors */ + } connector; +}; + +/** + * enum v4l2_fwnode_bus_type - Video bus types defined by firmware properties + * @V4L2_FWNODE_BUS_TYPE_GUESS: Default value if no bus-type fwnode property + * @V4L2_FWNODE_BUS_TYPE_CSI2_CPHY: MIPI CSI-2 bus, C-PHY physical layer + * @V4L2_FWNODE_BUS_TYPE_CSI1: MIPI CSI-1 bus + * @V4L2_FWNODE_BUS_TYPE_CCP2: SMIA Compact Camera Port 2 bus + * @V4L2_FWNODE_BUS_TYPE_CSI2_DPHY: MIPI CSI-2 bus, D-PHY physical layer + * @V4L2_FWNODE_BUS_TYPE_PARALLEL: Camera Parallel Interface bus + * @V4L2_FWNODE_BUS_TYPE_BT656: BT.656 video format bus-type + * @V4L2_FWNODE_BUS_TYPE_DPI: Video Parallel Interface bus + * @NR_OF_V4L2_FWNODE_BUS_TYPE: Number of bus-types + */ +enum v4l2_fwnode_bus_type { + V4L2_FWNODE_BUS_TYPE_GUESS = 0, + V4L2_FWNODE_BUS_TYPE_CSI2_CPHY, + V4L2_FWNODE_BUS_TYPE_CSI1, + V4L2_FWNODE_BUS_TYPE_CCP2, + V4L2_FWNODE_BUS_TYPE_CSI2_DPHY, + V4L2_FWNODE_BUS_TYPE_PARALLEL, + V4L2_FWNODE_BUS_TYPE_BT656, + V4L2_FWNODE_BUS_TYPE_DPI, + NR_OF_V4L2_FWNODE_BUS_TYPE +}; + +/** + * v4l2_fwnode_endpoint_parse() - parse all fwnode node properties + * @fwnode: pointer to the endpoint's fwnode handle + * @vep: pointer to the V4L2 fwnode data structure + * + * This function parses the V4L2 fwnode endpoint specific parameters from the + * firmware. There are two ways to use this function, either by letting it + * obtain the type of the bus (by setting the @vep.bus_type field to + * V4L2_MBUS_UNKNOWN) or specifying the bus type explicitly to one of the &enum + * v4l2_mbus_type types. + * + * When @vep.bus_type is V4L2_MBUS_UNKNOWN, the function will use the "bus-type" + * property to determine the type when it is available. The caller is + * responsible for validating the contents of @vep.bus_type field after the call + * returns. + * + * As a deprecated functionality to support older DT bindings without "bus-type" + * property for devices that support multiple types, if the "bus-type" property + * does not exist, the function will attempt to guess the type based on the + * endpoint properties available. NEVER RELY ON GUESSING THE BUS TYPE IN NEW + * DRIVERS OR BINDINGS. + * + * It is also possible to set @vep.bus_type corresponding to an actual bus. In + * this case the function will only attempt to parse properties related to this + * bus, and it will return an error if the value of the "bus-type" property + * corresponds to a different bus. + * + * The caller is required to initialise all fields of @vep, either with + * explicitly values, or by zeroing them. + * + * The function does not change the V4L2 fwnode endpoint state if it fails. + * + * NOTE: This function does not parse "link-frequencies" property as its size is + * not known in advance. Please use v4l2_fwnode_endpoint_alloc_parse() if you + * need properties of variable size. + * + * Return: %0 on success or a negative error code on failure: + * %-ENOMEM on memory allocation failure + * %-EINVAL on parsing failure + * %-ENXIO on mismatching bus types + */ +int v4l2_fwnode_endpoint_parse(struct fwnode_handle *fwnode, + struct v4l2_fwnode_endpoint *vep); + +/** + * v4l2_fwnode_endpoint_free() - free the V4L2 fwnode acquired by + * v4l2_fwnode_endpoint_alloc_parse() + * @vep: the V4L2 fwnode the resources of which are to be released + * + * It is safe to call this function with NULL argument or on a V4L2 fwnode the + * parsing of which failed. + */ +void v4l2_fwnode_endpoint_free(struct v4l2_fwnode_endpoint *vep); + +/** + * v4l2_fwnode_endpoint_alloc_parse() - parse all fwnode node properties + * @fwnode: pointer to the endpoint's fwnode handle + * @vep: pointer to the V4L2 fwnode data structure + * + * This function parses the V4L2 fwnode endpoint specific parameters from the + * firmware. There are two ways to use this function, either by letting it + * obtain the type of the bus (by setting the @vep.bus_type field to + * V4L2_MBUS_UNKNOWN) or specifying the bus type explicitly to one of the &enum + * v4l2_mbus_type types. + * + * When @vep.bus_type is V4L2_MBUS_UNKNOWN, the function will use the "bus-type" + * property to determine the type when it is available. The caller is + * responsible for validating the contents of @vep.bus_type field after the call + * returns. + * + * As a deprecated functionality to support older DT bindings without "bus-type" + * property for devices that support multiple types, if the "bus-type" property + * does not exist, the function will attempt to guess the type based on the + * endpoint properties available. NEVER RELY ON GUESSING THE BUS TYPE IN NEW + * DRIVERS OR BINDINGS. + * + * It is also possible to set @vep.bus_type corresponding to an actual bus. In + * this case the function will only attempt to parse properties related to this + * bus, and it will return an error if the value of the "bus-type" property + * corresponds to a different bus. + * + * The caller is required to initialise all fields of @vep, either with + * explicitly values, or by zeroing them. + * + * The function does not change the V4L2 fwnode endpoint state if it fails. + * + * v4l2_fwnode_endpoint_alloc_parse() has two important differences to + * v4l2_fwnode_endpoint_parse(): + * + * 1. It also parses variable size data. + * + * 2. The memory it has allocated to store the variable size data must be freed + * using v4l2_fwnode_endpoint_free() when no longer needed. + * + * Return: %0 on success or a negative error code on failure: + * %-ENOMEM on memory allocation failure + * %-EINVAL on parsing failure + * %-ENXIO on mismatching bus types + */ +int v4l2_fwnode_endpoint_alloc_parse(struct fwnode_handle *fwnode, + struct v4l2_fwnode_endpoint *vep); + +/** + * v4l2_fwnode_parse_link() - parse a link between two endpoints + * @fwnode: pointer to the endpoint's fwnode at the local end of the link + * @link: pointer to the V4L2 fwnode link data structure + * + * Fill the link structure with the local and remote nodes and port numbers. + * The local_node and remote_node fields are set to point to the local and + * remote port's parent nodes respectively (the port parent node being the + * parent node of the port node if that node isn't a 'ports' node, or the + * grand-parent node of the port node otherwise). + * + * A reference is taken to both the local and remote nodes, the caller must use + * v4l2_fwnode_put_link() to drop the references when done with the + * link. + * + * Return: 0 on success, or -ENOLINK if the remote endpoint fwnode can't be + * found. + */ +int v4l2_fwnode_parse_link(struct fwnode_handle *fwnode, + struct v4l2_fwnode_link *link); + +/** + * v4l2_fwnode_put_link() - drop references to nodes in a link + * @link: pointer to the V4L2 fwnode link data structure + * + * Drop references to the local and remote nodes in the link. This function + * must be called on every link parsed with v4l2_fwnode_parse_link(). + */ +void v4l2_fwnode_put_link(struct v4l2_fwnode_link *link); + +/** + * v4l2_fwnode_connector_free() - free the V4L2 connector acquired memory + * @connector: the V4L2 connector resources of which are to be released + * + * Free all allocated memory and put all links acquired by + * v4l2_fwnode_connector_parse() and v4l2_fwnode_connector_add_link(). + * + * It is safe to call this function with NULL argument or on a V4L2 connector + * the parsing of which failed. + */ +void v4l2_fwnode_connector_free(struct v4l2_fwnode_connector *connector); + +/** + * v4l2_fwnode_connector_parse() - initialize the 'struct v4l2_fwnode_connector' + * @fwnode: pointer to the subdev endpoint's fwnode handle where the connector + * is connected to or to the connector endpoint fwnode handle. + * @connector: pointer to the V4L2 fwnode connector data structure + * + * Fill the &struct v4l2_fwnode_connector with the connector type, label and + * all &enum v4l2_connector_type specific connector data. The label is optional + * so it is set to %NULL if no one was found. The function initialize the links + * to zero. Adding links to the connector is done by calling + * v4l2_fwnode_connector_add_link(). + * + * The memory allocated for the label must be freed when no longer needed. + * Freeing the memory is done by v4l2_fwnode_connector_free(). + * + * Return: + * * %0 on success or a negative error code on failure: + * * %-EINVAL if @fwnode is invalid + * * %-ENOTCONN if connector type is unknown or connector device can't be found + */ +int v4l2_fwnode_connector_parse(struct fwnode_handle *fwnode, + struct v4l2_fwnode_connector *connector); + +/** + * v4l2_fwnode_connector_add_link - add a link between a connector node and + * a v4l2-subdev node. + * @fwnode: pointer to the subdev endpoint's fwnode handle where the connector + * is connected to + * @connector: pointer to the V4L2 fwnode connector data structure + * + * Add a new &struct v4l2_connector_link link to the + * &struct v4l2_fwnode_connector connector links list. The link local_node + * points to the connector node, the remote_node to the host v4l2 (sub)dev. + * + * The taken references to remote_node and local_node must be dropped and the + * allocated memory must be freed when no longer needed. Both is done by calling + * v4l2_fwnode_connector_free(). + * + * Return: + * * %0 on success or a negative error code on failure: + * * %-EINVAL if @fwnode or @connector is invalid or @connector type is unknown + * * %-ENOMEM on link memory allocation failure + * * %-ENOTCONN if remote connector device can't be found + * * %-ENOLINK if link parsing between v4l2 (sub)dev and connector fails + */ +int v4l2_fwnode_connector_add_link(struct fwnode_handle *fwnode, + struct v4l2_fwnode_connector *connector); + +/** + * v4l2_fwnode_device_parse() - parse fwnode device properties + * @dev: pointer to &struct device + * @props: pointer to &struct v4l2_fwnode_device_properties where to store the + * parsed properties values + * + * This function parses and validates the V4L2 fwnode device properties from the + * firmware interface, and fills the @struct v4l2_fwnode_device_properties + * provided by the caller. + * + * Return: + * % 0 on success + * %-EINVAL if a parsed property value is not valid + */ +int v4l2_fwnode_device_parse(struct device *dev, + struct v4l2_fwnode_device_properties *props); + +/* Helper macros to access the connector links. */ + +/** v4l2_connector_last_link - Helper macro to get the first + * &struct v4l2_fwnode_connector link + * @v4l2c: &struct v4l2_fwnode_connector owning the connector links + * + * This marco returns the first added &struct v4l2_connector_link connector + * link or @NULL if the connector has no links. + */ +#define v4l2_connector_first_link(v4l2c) \ + list_first_entry_or_null(&(v4l2c)->links, \ + struct v4l2_connector_link, head) + +/** v4l2_connector_last_link - Helper macro to get the last + * &struct v4l2_fwnode_connector link + * @v4l2c: &struct v4l2_fwnode_connector owning the connector links + * + * This marco returns the last &struct v4l2_connector_link added connector link. + */ +#define v4l2_connector_last_link(v4l2c) \ + list_last_entry(&(v4l2c)->links, struct v4l2_connector_link, head) + +#endif /* _V4L2_FWNODE_H */ diff --git a/6.17.0/include-overrides/media/v4l2-h264.h b/6.17.0/include-overrides/media/v4l2-h264.h new file mode 100644 index 0000000..0d9eaa9 --- /dev/null +++ b/6.17.0/include-overrides/media/v4l2-h264.h @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Helper functions for H264 codecs. + * + * Copyright (c) 2019 Collabora, Ltd. + * + * Author: Boris Brezillon + */ + +#ifndef _MEDIA_V4L2_H264_H +#define _MEDIA_V4L2_H264_H + +#include + +/** + * struct v4l2_h264_reflist_builder - Reference list builder object + * + * @refs.top_field_order_cnt: top field order count + * @refs.bottom_field_order_cnt: bottom field order count + * @refs.frame_num: reference frame number + * @refs.longterm: set to true for a long term reference + * @refs: array of references + * @cur_pic_order_count: picture order count of the frame being decoded + * @cur_pic_fields: fields present in the frame being decoded + * @unordered_reflist: unordered list of references. Will be used to generate + * ordered P/B0/B1 lists + * @num_valid: number of valid references in the refs array + * + * This object stores the context of the P/B0/B1 reference list builder. + * This procedure is described in section '8.2.4 Decoding process for reference + * picture lists construction' of the H264 spec. + */ +struct v4l2_h264_reflist_builder { + struct { + s32 top_field_order_cnt; + s32 bottom_field_order_cnt; + int frame_num; + u16 longterm : 1; + } refs[V4L2_H264_NUM_DPB_ENTRIES]; + + s32 cur_pic_order_count; + u8 cur_pic_fields; + + struct v4l2_h264_reference unordered_reflist[V4L2_H264_REF_LIST_LEN]; + u8 num_valid; +}; + +void +v4l2_h264_init_reflist_builder(struct v4l2_h264_reflist_builder *b, + const struct v4l2_ctrl_h264_decode_params *dec_params, + const struct v4l2_ctrl_h264_sps *sps, + const struct v4l2_h264_dpb_entry dpb[V4L2_H264_NUM_DPB_ENTRIES]); + +/** + * v4l2_h264_build_b_ref_lists() - Build the B0/B1 reference lists + * + * @builder: reference list builder context + * @b0_reflist: 32 sized array used to store the B0 reference list. Each entry + * is a v4l2_h264_reference structure + * @b1_reflist: 32 sized array used to store the B1 reference list. Each entry + * is a v4l2_h264_reference structure + * + * This functions builds the B0/B1 reference lists. This procedure is described + * in section '8.2.4 Decoding process for reference picture lists construction' + * of the H264 spec. This function can be used by H264 decoder drivers that + * need to pass B0/B1 reference lists to the hardware. + */ +void +v4l2_h264_build_b_ref_lists(const struct v4l2_h264_reflist_builder *builder, + struct v4l2_h264_reference *b0_reflist, + struct v4l2_h264_reference *b1_reflist); + +/** + * v4l2_h264_build_p_ref_list() - Build the P reference list + * + * @builder: reference list builder context + * @reflist: 32 sized array used to store the P reference list. Each entry + * is a v4l2_h264_reference structure + * + * This functions builds the P reference lists. This procedure is describe in + * section '8.2.4 Decoding process for reference picture lists construction' + * of the H264 spec. This function can be used by H264 decoder drivers that + * need to pass a P reference list to the hardware. + */ +void +v4l2_h264_build_p_ref_list(const struct v4l2_h264_reflist_builder *builder, + struct v4l2_h264_reference *reflist); + +#endif /* _MEDIA_V4L2_H264_H */ diff --git a/6.17.0/include-overrides/media/v4l2-image-sizes.h b/6.17.0/include-overrides/media/v4l2-image-sizes.h new file mode 100644 index 0000000..24a7a0b --- /dev/null +++ b/6.17.0/include-overrides/media/v4l2-image-sizes.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Standard image size definitions + * + * Copyright (C) 2013, Sylwester Nawrocki + */ +#ifndef _IMAGE_SIZES_H +#define _IMAGE_SIZES_H + +#define CIF_WIDTH 352 +#define CIF_HEIGHT 288 + +#define HD_720_WIDTH 1280 +#define HD_720_HEIGHT 720 + +#define HD_1080_WIDTH 1920 +#define HD_1080_HEIGHT 1080 + +#define QCIF_WIDTH 176 +#define QCIF_HEIGHT 144 + +#define QQCIF_WIDTH 88 +#define QQCIF_HEIGHT 72 + +#define QQVGA_WIDTH 160 +#define QQVGA_HEIGHT 120 + +#define QVGA_WIDTH 320 +#define QVGA_HEIGHT 240 + +#define SVGA_WIDTH 800 +#define SVGA_HEIGHT 600 + +#define SXGA_WIDTH 1280 +#define SXGA_HEIGHT 1024 + +#define VGA_WIDTH 640 +#define VGA_HEIGHT 480 + +#define UXGA_WIDTH 1600 +#define UXGA_HEIGHT 1200 + +#define XGA_WIDTH 1024 +#define XGA_HEIGHT 768 + +#endif /* _IMAGE_SIZES_H */ diff --git a/6.17.0/include-overrides/media/v4l2-ioctl.h b/6.17.0/include-overrides/media/v4l2-ioctl.h new file mode 100644 index 0000000..82695c3 --- /dev/null +++ b/6.17.0/include-overrides/media/v4l2-ioctl.h @@ -0,0 +1,785 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * + * V 4 L 2 D R I V E R H E L P E R A P I + * + * Moved from videodev2.h + * + * Some commonly needed functions for drivers (v4l2-common.o module) + */ +#ifndef _V4L2_IOCTL_H +#define _V4L2_IOCTL_H + +#include +#include +#include +#include +#include /* need __user */ +#include + +struct v4l2_fh; + +/** + * struct v4l2_ioctl_ops - describe operations for each V4L2 ioctl + * + * @vidioc_querycap: pointer to the function that implements + * :ref:`VIDIOC_QUERYCAP ` ioctl + * @vidioc_enum_fmt_vid_cap: pointer to the function that implements + * :ref:`VIDIOC_ENUM_FMT ` ioctl logic + * for video capture in single and multi plane mode + * @vidioc_enum_fmt_vid_overlay: pointer to the function that implements + * :ref:`VIDIOC_ENUM_FMT ` ioctl logic + * for video overlay + * @vidioc_enum_fmt_vid_out: pointer to the function that implements + * :ref:`VIDIOC_ENUM_FMT ` ioctl logic + * for video output in single and multi plane mode + * @vidioc_enum_fmt_sdr_cap: pointer to the function that implements + * :ref:`VIDIOC_ENUM_FMT ` ioctl logic + * for Software Defined Radio capture + * @vidioc_enum_fmt_sdr_out: pointer to the function that implements + * :ref:`VIDIOC_ENUM_FMT ` ioctl logic + * for Software Defined Radio output + * @vidioc_enum_fmt_meta_cap: pointer to the function that implements + * :ref:`VIDIOC_ENUM_FMT ` ioctl logic + * for metadata capture + * @vidioc_enum_fmt_meta_out: pointer to the function that implements + * :ref:`VIDIOC_ENUM_FMT ` ioctl logic + * for metadata output + * @vidioc_g_fmt_vid_cap: pointer to the function that implements + * :ref:`VIDIOC_G_FMT ` ioctl logic for video capture + * in single plane mode + * @vidioc_g_fmt_vid_overlay: pointer to the function that implements + * :ref:`VIDIOC_G_FMT ` ioctl logic for video overlay + * @vidioc_g_fmt_vid_out: pointer to the function that implements + * :ref:`VIDIOC_G_FMT ` ioctl logic for video out + * in single plane mode + * @vidioc_g_fmt_vid_out_overlay: pointer to the function that implements + * :ref:`VIDIOC_G_FMT ` ioctl logic for video overlay output + * @vidioc_g_fmt_vbi_cap: pointer to the function that implements + * :ref:`VIDIOC_G_FMT ` ioctl logic for raw VBI capture + * @vidioc_g_fmt_vbi_out: pointer to the function that implements + * :ref:`VIDIOC_G_FMT ` ioctl logic for raw VBI output + * @vidioc_g_fmt_sliced_vbi_cap: pointer to the function that implements + * :ref:`VIDIOC_G_FMT ` ioctl logic for sliced VBI capture + * @vidioc_g_fmt_sliced_vbi_out: pointer to the function that implements + * :ref:`VIDIOC_G_FMT ` ioctl logic for sliced VBI output + * @vidioc_g_fmt_vid_cap_mplane: pointer to the function that implements + * :ref:`VIDIOC_G_FMT ` ioctl logic for video capture + * in multiple plane mode + * @vidioc_g_fmt_vid_out_mplane: pointer to the function that implements + * :ref:`VIDIOC_G_FMT ` ioctl logic for video out + * in multiplane plane mode + * @vidioc_g_fmt_sdr_cap: pointer to the function that implements + * :ref:`VIDIOC_G_FMT ` ioctl logic for Software Defined + * Radio capture + * @vidioc_g_fmt_sdr_out: pointer to the function that implements + * :ref:`VIDIOC_G_FMT ` ioctl logic for Software Defined + * Radio output + * @vidioc_g_fmt_meta_cap: pointer to the function that implements + * :ref:`VIDIOC_G_FMT ` ioctl logic for metadata capture + * @vidioc_g_fmt_meta_out: pointer to the function that implements + * :ref:`VIDIOC_G_FMT ` ioctl logic for metadata output + * @vidioc_s_fmt_vid_cap: pointer to the function that implements + * :ref:`VIDIOC_S_FMT ` ioctl logic for video capture + * in single plane mode + * @vidioc_s_fmt_vid_overlay: pointer to the function that implements + * :ref:`VIDIOC_S_FMT ` ioctl logic for video overlay + * @vidioc_s_fmt_vid_out: pointer to the function that implements + * :ref:`VIDIOC_S_FMT ` ioctl logic for video out + * in single plane mode + * @vidioc_s_fmt_vid_out_overlay: pointer to the function that implements + * :ref:`VIDIOC_S_FMT ` ioctl logic for video overlay output + * @vidioc_s_fmt_vbi_cap: pointer to the function that implements + * :ref:`VIDIOC_S_FMT ` ioctl logic for raw VBI capture + * @vidioc_s_fmt_vbi_out: pointer to the function that implements + * :ref:`VIDIOC_S_FMT ` ioctl logic for raw VBI output + * @vidioc_s_fmt_sliced_vbi_cap: pointer to the function that implements + * :ref:`VIDIOC_S_FMT ` ioctl logic for sliced VBI capture + * @vidioc_s_fmt_sliced_vbi_out: pointer to the function that implements + * :ref:`VIDIOC_S_FMT ` ioctl logic for sliced VBI output + * @vidioc_s_fmt_vid_cap_mplane: pointer to the function that implements + * :ref:`VIDIOC_S_FMT ` ioctl logic for video capture + * in multiple plane mode + * @vidioc_s_fmt_vid_out_mplane: pointer to the function that implements + * :ref:`VIDIOC_S_FMT ` ioctl logic for video out + * in multiplane plane mode + * @vidioc_s_fmt_sdr_cap: pointer to the function that implements + * :ref:`VIDIOC_S_FMT ` ioctl logic for Software Defined + * Radio capture + * @vidioc_s_fmt_sdr_out: pointer to the function that implements + * :ref:`VIDIOC_S_FMT ` ioctl logic for Software Defined + * Radio output + * @vidioc_s_fmt_meta_cap: pointer to the function that implements + * :ref:`VIDIOC_S_FMT ` ioctl logic for metadata capture + * @vidioc_s_fmt_meta_out: pointer to the function that implements + * :ref:`VIDIOC_S_FMT ` ioctl logic for metadata output + * @vidioc_try_fmt_vid_cap: pointer to the function that implements + * :ref:`VIDIOC_TRY_FMT ` ioctl logic for video capture + * in single plane mode + * @vidioc_try_fmt_vid_overlay: pointer to the function that implements + * :ref:`VIDIOC_TRY_FMT ` ioctl logic for video overlay + * @vidioc_try_fmt_vid_out: pointer to the function that implements + * :ref:`VIDIOC_TRY_FMT ` ioctl logic for video out + * in single plane mode + * @vidioc_try_fmt_vid_out_overlay: pointer to the function that implements + * :ref:`VIDIOC_TRY_FMT ` ioctl logic for video overlay + * output + * @vidioc_try_fmt_vbi_cap: pointer to the function that implements + * :ref:`VIDIOC_TRY_FMT ` ioctl logic for raw VBI capture + * @vidioc_try_fmt_vbi_out: pointer to the function that implements + * :ref:`VIDIOC_TRY_FMT ` ioctl logic for raw VBI output + * @vidioc_try_fmt_sliced_vbi_cap: pointer to the function that implements + * :ref:`VIDIOC_TRY_FMT ` ioctl logic for sliced VBI + * capture + * @vidioc_try_fmt_sliced_vbi_out: pointer to the function that implements + * :ref:`VIDIOC_TRY_FMT ` ioctl logic for sliced VBI output + * @vidioc_try_fmt_vid_cap_mplane: pointer to the function that implements + * :ref:`VIDIOC_TRY_FMT ` ioctl logic for video capture + * in multiple plane mode + * @vidioc_try_fmt_vid_out_mplane: pointer to the function that implements + * :ref:`VIDIOC_TRY_FMT ` ioctl logic for video out + * in multiplane plane mode + * @vidioc_try_fmt_sdr_cap: pointer to the function that implements + * :ref:`VIDIOC_TRY_FMT ` ioctl logic for Software Defined + * Radio capture + * @vidioc_try_fmt_sdr_out: pointer to the function that implements + * :ref:`VIDIOC_TRY_FMT ` ioctl logic for Software Defined + * Radio output + * @vidioc_try_fmt_meta_cap: pointer to the function that implements + * :ref:`VIDIOC_TRY_FMT ` ioctl logic for metadata capture + * @vidioc_try_fmt_meta_out: pointer to the function that implements + * :ref:`VIDIOC_TRY_FMT ` ioctl logic for metadata output + * @vidioc_reqbufs: pointer to the function that implements + * :ref:`VIDIOC_REQBUFS ` ioctl + * @vidioc_querybuf: pointer to the function that implements + * :ref:`VIDIOC_QUERYBUF ` ioctl + * @vidioc_qbuf: pointer to the function that implements + * :ref:`VIDIOC_QBUF ` ioctl + * @vidioc_expbuf: pointer to the function that implements + * :ref:`VIDIOC_EXPBUF ` ioctl + * @vidioc_dqbuf: pointer to the function that implements + * :ref:`VIDIOC_DQBUF ` ioctl + * @vidioc_create_bufs: pointer to the function that implements + * :ref:`VIDIOC_CREATE_BUFS ` ioctl + * @vidioc_prepare_buf: pointer to the function that implements + * :ref:`VIDIOC_PREPARE_BUF ` ioctl + * @vidioc_remove_bufs: pointer to the function that implements + * :ref:`VIDIOC_REMOVE_BUFS ` ioctl + * @vidioc_overlay: pointer to the function that implements + * :ref:`VIDIOC_OVERLAY ` ioctl + * @vidioc_g_fbuf: pointer to the function that implements + * :ref:`VIDIOC_G_FBUF ` ioctl + * @vidioc_s_fbuf: pointer to the function that implements + * :ref:`VIDIOC_S_FBUF ` ioctl + * @vidioc_streamon: pointer to the function that implements + * :ref:`VIDIOC_STREAMON ` ioctl + * @vidioc_streamoff: pointer to the function that implements + * :ref:`VIDIOC_STREAMOFF ` ioctl + * @vidioc_g_std: pointer to the function that implements + * :ref:`VIDIOC_G_STD ` ioctl + * @vidioc_s_std: pointer to the function that implements + * :ref:`VIDIOC_S_STD ` ioctl + * @vidioc_querystd: pointer to the function that implements + * :ref:`VIDIOC_QUERYSTD ` ioctl + * @vidioc_enum_input: pointer to the function that implements + * :ref:`VIDIOC_ENUM_INPUT ` ioctl + * @vidioc_g_input: pointer to the function that implements + * :ref:`VIDIOC_G_INPUT ` ioctl + * @vidioc_s_input: pointer to the function that implements + * :ref:`VIDIOC_S_INPUT ` ioctl + * @vidioc_enum_output: pointer to the function that implements + * :ref:`VIDIOC_ENUM_OUTPUT ` ioctl + * @vidioc_g_output: pointer to the function that implements + * :ref:`VIDIOC_G_OUTPUT ` ioctl + * @vidioc_s_output: pointer to the function that implements + * :ref:`VIDIOC_S_OUTPUT ` ioctl + * @vidioc_query_ext_ctrl: pointer to the function that implements + * :ref:`VIDIOC_QUERY_EXT_CTRL ` ioctl + * @vidioc_g_ext_ctrls: pointer to the function that implements + * :ref:`VIDIOC_G_EXT_CTRLS ` ioctl + * @vidioc_s_ext_ctrls: pointer to the function that implements + * :ref:`VIDIOC_S_EXT_CTRLS ` ioctl + * @vidioc_try_ext_ctrls: pointer to the function that implements + * :ref:`VIDIOC_TRY_EXT_CTRLS ` ioctl + * @vidioc_querymenu: pointer to the function that implements + * :ref:`VIDIOC_QUERYMENU ` ioctl + * @vidioc_enumaudio: pointer to the function that implements + * :ref:`VIDIOC_ENUMAUDIO ` ioctl + * @vidioc_g_audio: pointer to the function that implements + * :ref:`VIDIOC_G_AUDIO ` ioctl + * @vidioc_s_audio: pointer to the function that implements + * :ref:`VIDIOC_S_AUDIO ` ioctl + * @vidioc_enumaudout: pointer to the function that implements + * :ref:`VIDIOC_ENUMAUDOUT ` ioctl + * @vidioc_g_audout: pointer to the function that implements + * :ref:`VIDIOC_G_AUDOUT ` ioctl + * @vidioc_s_audout: pointer to the function that implements + * :ref:`VIDIOC_S_AUDOUT ` ioctl + * @vidioc_g_modulator: pointer to the function that implements + * :ref:`VIDIOC_G_MODULATOR ` ioctl + * @vidioc_s_modulator: pointer to the function that implements + * :ref:`VIDIOC_S_MODULATOR ` ioctl + * @vidioc_g_pixelaspect: pointer to the function that implements + * the pixelaspect part of the :ref:`VIDIOC_CROPCAP ` ioctl + * @vidioc_g_selection: pointer to the function that implements + * :ref:`VIDIOC_G_SELECTION ` ioctl + * @vidioc_s_selection: pointer to the function that implements + * :ref:`VIDIOC_S_SELECTION ` ioctl + * @vidioc_g_jpegcomp: pointer to the function that implements + * :ref:`VIDIOC_G_JPEGCOMP ` ioctl + * @vidioc_s_jpegcomp: pointer to the function that implements + * :ref:`VIDIOC_S_JPEGCOMP ` ioctl + * @vidioc_g_enc_index: pointer to the function that implements + * :ref:`VIDIOC_G_ENC_INDEX ` ioctl + * @vidioc_encoder_cmd: pointer to the function that implements + * :ref:`VIDIOC_ENCODER_CMD ` ioctl + * @vidioc_try_encoder_cmd: pointer to the function that implements + * :ref:`VIDIOC_TRY_ENCODER_CMD ` ioctl + * @vidioc_decoder_cmd: pointer to the function that implements + * :ref:`VIDIOC_DECODER_CMD ` ioctl + * @vidioc_try_decoder_cmd: pointer to the function that implements + * :ref:`VIDIOC_TRY_DECODER_CMD ` ioctl + * @vidioc_g_parm: pointer to the function that implements + * :ref:`VIDIOC_G_PARM ` ioctl + * @vidioc_s_parm: pointer to the function that implements + * :ref:`VIDIOC_S_PARM ` ioctl + * @vidioc_g_tuner: pointer to the function that implements + * :ref:`VIDIOC_G_TUNER ` ioctl + * @vidioc_s_tuner: pointer to the function that implements + * :ref:`VIDIOC_S_TUNER ` ioctl + * @vidioc_g_frequency: pointer to the function that implements + * :ref:`VIDIOC_G_FREQUENCY ` ioctl + * @vidioc_s_frequency: pointer to the function that implements + * :ref:`VIDIOC_S_FREQUENCY ` ioctl + * @vidioc_enum_freq_bands: pointer to the function that implements + * :ref:`VIDIOC_ENUM_FREQ_BANDS ` ioctl + * @vidioc_g_sliced_vbi_cap: pointer to the function that implements + * :ref:`VIDIOC_G_SLICED_VBI_CAP ` ioctl + * @vidioc_log_status: pointer to the function that implements + * :ref:`VIDIOC_LOG_STATUS ` ioctl + * @vidioc_s_hw_freq_seek: pointer to the function that implements + * :ref:`VIDIOC_S_HW_FREQ_SEEK ` ioctl + * @vidioc_g_register: pointer to the function that implements + * :ref:`VIDIOC_DBG_G_REGISTER ` ioctl + * @vidioc_s_register: pointer to the function that implements + * :ref:`VIDIOC_DBG_S_REGISTER ` ioctl + * @vidioc_g_chip_info: pointer to the function that implements + * :ref:`VIDIOC_DBG_G_CHIP_INFO ` ioctl + * @vidioc_enum_framesizes: pointer to the function that implements + * :ref:`VIDIOC_ENUM_FRAMESIZES ` ioctl + * @vidioc_enum_frameintervals: pointer to the function that implements + * :ref:`VIDIOC_ENUM_FRAMEINTERVALS ` ioctl + * @vidioc_s_dv_timings: pointer to the function that implements + * :ref:`VIDIOC_S_DV_TIMINGS ` ioctl + * @vidioc_g_dv_timings: pointer to the function that implements + * :ref:`VIDIOC_G_DV_TIMINGS ` ioctl + * @vidioc_query_dv_timings: pointer to the function that implements + * :ref:`VIDIOC_QUERY_DV_TIMINGS ` ioctl + * @vidioc_enum_dv_timings: pointer to the function that implements + * :ref:`VIDIOC_ENUM_DV_TIMINGS ` ioctl + * @vidioc_dv_timings_cap: pointer to the function that implements + * :ref:`VIDIOC_DV_TIMINGS_CAP ` ioctl + * @vidioc_g_edid: pointer to the function that implements + * :ref:`VIDIOC_G_EDID ` ioctl + * @vidioc_s_edid: pointer to the function that implements + * :ref:`VIDIOC_S_EDID ` ioctl + * @vidioc_subscribe_event: pointer to the function that implements + * :ref:`VIDIOC_SUBSCRIBE_EVENT ` ioctl + * @vidioc_unsubscribe_event: pointer to the function that implements + * :ref:`VIDIOC_UNSUBSCRIBE_EVENT ` ioctl + * @vidioc_default: pointed used to allow other ioctls + */ +struct v4l2_ioctl_ops { + /* ioctl callbacks */ + + /* VIDIOC_QUERYCAP handler */ + int (*vidioc_querycap)(struct file *file, void *fh, + struct v4l2_capability *cap); + + /* VIDIOC_ENUM_FMT handlers */ + int (*vidioc_enum_fmt_vid_cap)(struct file *file, void *fh, + struct v4l2_fmtdesc *f); + int (*vidioc_enum_fmt_vid_overlay)(struct file *file, void *fh, + struct v4l2_fmtdesc *f); + int (*vidioc_enum_fmt_vid_out)(struct file *file, void *fh, + struct v4l2_fmtdesc *f); + int (*vidioc_enum_fmt_sdr_cap)(struct file *file, void *fh, + struct v4l2_fmtdesc *f); + int (*vidioc_enum_fmt_sdr_out)(struct file *file, void *fh, + struct v4l2_fmtdesc *f); + int (*vidioc_enum_fmt_meta_cap)(struct file *file, void *fh, + struct v4l2_fmtdesc *f); + int (*vidioc_enum_fmt_meta_out)(struct file *file, void *fh, + struct v4l2_fmtdesc *f); + + /* VIDIOC_G_FMT handlers */ + int (*vidioc_g_fmt_vid_cap)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_g_fmt_vid_overlay)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_g_fmt_vid_out)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_g_fmt_vid_out_overlay)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_g_fmt_vbi_cap)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_g_fmt_vbi_out)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_g_fmt_sliced_vbi_cap)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_g_fmt_sliced_vbi_out)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_g_fmt_vid_cap_mplane)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_g_fmt_vid_out_mplane)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_g_fmt_sdr_cap)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_g_fmt_sdr_out)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_g_fmt_meta_cap)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_g_fmt_meta_out)(struct file *file, void *fh, + struct v4l2_format *f); + + /* VIDIOC_S_FMT handlers */ + int (*vidioc_s_fmt_vid_cap)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_s_fmt_vid_overlay)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_s_fmt_vid_out)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_s_fmt_vid_out_overlay)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_s_fmt_vbi_cap)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_s_fmt_vbi_out)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_s_fmt_sliced_vbi_cap)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_s_fmt_sliced_vbi_out)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_s_fmt_vid_cap_mplane)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_s_fmt_vid_out_mplane)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_s_fmt_sdr_cap)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_s_fmt_sdr_out)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_s_fmt_meta_cap)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_s_fmt_meta_out)(struct file *file, void *fh, + struct v4l2_format *f); + + /* VIDIOC_TRY_FMT handlers */ + int (*vidioc_try_fmt_vid_cap)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_try_fmt_vid_overlay)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_try_fmt_vid_out)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_try_fmt_vid_out_overlay)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_try_fmt_vbi_cap)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_try_fmt_vbi_out)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_try_fmt_sliced_vbi_cap)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_try_fmt_sliced_vbi_out)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_try_fmt_vid_cap_mplane)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_try_fmt_vid_out_mplane)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_try_fmt_sdr_cap)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_try_fmt_sdr_out)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_try_fmt_meta_cap)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_try_fmt_meta_out)(struct file *file, void *fh, + struct v4l2_format *f); + + /* Buffer handlers */ + int (*vidioc_reqbufs)(struct file *file, void *fh, + struct v4l2_requestbuffers *b); + int (*vidioc_querybuf)(struct file *file, void *fh, + struct v4l2_buffer *b); + int (*vidioc_qbuf)(struct file *file, void *fh, + struct v4l2_buffer *b); + int (*vidioc_expbuf)(struct file *file, void *fh, + struct v4l2_exportbuffer *e); + int (*vidioc_dqbuf)(struct file *file, void *fh, + struct v4l2_buffer *b); + + int (*vidioc_create_bufs)(struct file *file, void *fh, + struct v4l2_create_buffers *b); + int (*vidioc_prepare_buf)(struct file *file, void *fh, + struct v4l2_buffer *b); + int (*vidioc_remove_bufs)(struct file *file, void *fh, + struct v4l2_remove_buffers *d); + + int (*vidioc_overlay)(struct file *file, void *fh, unsigned int i); + int (*vidioc_g_fbuf)(struct file *file, void *fh, + struct v4l2_framebuffer *a); + int (*vidioc_s_fbuf)(struct file *file, void *fh, + const struct v4l2_framebuffer *a); + + /* Stream on/off */ + int (*vidioc_streamon)(struct file *file, void *fh, + enum v4l2_buf_type i); + int (*vidioc_streamoff)(struct file *file, void *fh, + enum v4l2_buf_type i); + + /* + * Standard handling + * + * Note: ENUMSTD is handled by videodev.c + */ + int (*vidioc_g_std)(struct file *file, void *fh, v4l2_std_id *norm); + int (*vidioc_s_std)(struct file *file, void *fh, v4l2_std_id norm); + int (*vidioc_querystd)(struct file *file, void *fh, v4l2_std_id *a); + + /* Input handling */ + int (*vidioc_enum_input)(struct file *file, void *fh, + struct v4l2_input *inp); + int (*vidioc_g_input)(struct file *file, void *fh, unsigned int *i); + int (*vidioc_s_input)(struct file *file, void *fh, unsigned int i); + + /* Output handling */ + int (*vidioc_enum_output)(struct file *file, void *fh, + struct v4l2_output *a); + int (*vidioc_g_output)(struct file *file, void *fh, unsigned int *i); + int (*vidioc_s_output)(struct file *file, void *fh, unsigned int i); + + /* Control handling */ + int (*vidioc_query_ext_ctrl)(struct file *file, void *fh, + struct v4l2_query_ext_ctrl *a); + int (*vidioc_g_ext_ctrls)(struct file *file, void *fh, + struct v4l2_ext_controls *a); + int (*vidioc_s_ext_ctrls)(struct file *file, void *fh, + struct v4l2_ext_controls *a); + int (*vidioc_try_ext_ctrls)(struct file *file, void *fh, + struct v4l2_ext_controls *a); + int (*vidioc_querymenu)(struct file *file, void *fh, + struct v4l2_querymenu *a); + + /* Audio ioctls */ + int (*vidioc_enumaudio)(struct file *file, void *fh, + struct v4l2_audio *a); + int (*vidioc_g_audio)(struct file *file, void *fh, + struct v4l2_audio *a); + int (*vidioc_s_audio)(struct file *file, void *fh, + const struct v4l2_audio *a); + + /* Audio out ioctls */ + int (*vidioc_enumaudout)(struct file *file, void *fh, + struct v4l2_audioout *a); + int (*vidioc_g_audout)(struct file *file, void *fh, + struct v4l2_audioout *a); + int (*vidioc_s_audout)(struct file *file, void *fh, + const struct v4l2_audioout *a); + int (*vidioc_g_modulator)(struct file *file, void *fh, + struct v4l2_modulator *a); + int (*vidioc_s_modulator)(struct file *file, void *fh, + const struct v4l2_modulator *a); + /* Crop ioctls */ + int (*vidioc_g_pixelaspect)(struct file *file, void *fh, + int buf_type, struct v4l2_fract *aspect); + int (*vidioc_g_selection)(struct file *file, void *fh, + struct v4l2_selection *s); + int (*vidioc_s_selection)(struct file *file, void *fh, + struct v4l2_selection *s); + /* Compression ioctls */ + int (*vidioc_g_jpegcomp)(struct file *file, void *fh, + struct v4l2_jpegcompression *a); + int (*vidioc_s_jpegcomp)(struct file *file, void *fh, + const struct v4l2_jpegcompression *a); + int (*vidioc_g_enc_index)(struct file *file, void *fh, + struct v4l2_enc_idx *a); + int (*vidioc_encoder_cmd)(struct file *file, void *fh, + struct v4l2_encoder_cmd *a); + int (*vidioc_try_encoder_cmd)(struct file *file, void *fh, + struct v4l2_encoder_cmd *a); + int (*vidioc_decoder_cmd)(struct file *file, void *fh, + struct v4l2_decoder_cmd *a); + int (*vidioc_try_decoder_cmd)(struct file *file, void *fh, + struct v4l2_decoder_cmd *a); + + /* Stream type-dependent parameter ioctls */ + int (*vidioc_g_parm)(struct file *file, void *fh, + struct v4l2_streamparm *a); + int (*vidioc_s_parm)(struct file *file, void *fh, + struct v4l2_streamparm *a); + + /* Tuner ioctls */ + int (*vidioc_g_tuner)(struct file *file, void *fh, + struct v4l2_tuner *a); + int (*vidioc_s_tuner)(struct file *file, void *fh, + const struct v4l2_tuner *a); + int (*vidioc_g_frequency)(struct file *file, void *fh, + struct v4l2_frequency *a); + int (*vidioc_s_frequency)(struct file *file, void *fh, + const struct v4l2_frequency *a); + int (*vidioc_enum_freq_bands)(struct file *file, void *fh, + struct v4l2_frequency_band *band); + + /* Sliced VBI cap */ + int (*vidioc_g_sliced_vbi_cap)(struct file *file, void *fh, + struct v4l2_sliced_vbi_cap *a); + + /* Log status ioctl */ + int (*vidioc_log_status)(struct file *file, void *fh); + + int (*vidioc_s_hw_freq_seek)(struct file *file, void *fh, + const struct v4l2_hw_freq_seek *a); + + /* Debugging ioctls */ +#ifdef CONFIG_VIDEO_ADV_DEBUG + int (*vidioc_g_register)(struct file *file, void *fh, + struct v4l2_dbg_register *reg); + int (*vidioc_s_register)(struct file *file, void *fh, + const struct v4l2_dbg_register *reg); + + int (*vidioc_g_chip_info)(struct file *file, void *fh, + struct v4l2_dbg_chip_info *chip); +#endif + + int (*vidioc_enum_framesizes)(struct file *file, void *fh, + struct v4l2_frmsizeenum *fsize); + + int (*vidioc_enum_frameintervals)(struct file *file, void *fh, + struct v4l2_frmivalenum *fival); + + /* DV Timings IOCTLs */ + int (*vidioc_s_dv_timings)(struct file *file, void *fh, + struct v4l2_dv_timings *timings); + int (*vidioc_g_dv_timings)(struct file *file, void *fh, + struct v4l2_dv_timings *timings); + int (*vidioc_query_dv_timings)(struct file *file, void *fh, + struct v4l2_dv_timings *timings); + int (*vidioc_enum_dv_timings)(struct file *file, void *fh, + struct v4l2_enum_dv_timings *timings); + int (*vidioc_dv_timings_cap)(struct file *file, void *fh, + struct v4l2_dv_timings_cap *cap); + int (*vidioc_g_edid)(struct file *file, void *fh, + struct v4l2_edid *edid); + int (*vidioc_s_edid)(struct file *file, void *fh, + struct v4l2_edid *edid); + + int (*vidioc_subscribe_event)(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub); + int (*vidioc_unsubscribe_event)(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub); + + /* For other private ioctls */ + long (*vidioc_default)(struct file *file, void *fh, + bool valid_prio, unsigned int cmd, void *arg); +}; + + +/* v4l debugging and diagnostics */ + +/* Device debug flags to be used with the video device debug attribute */ + +/* Just log the ioctl name + error code */ +#define V4L2_DEV_DEBUG_IOCTL 0x01 +/* Log the ioctl name arguments + error code */ +#define V4L2_DEV_DEBUG_IOCTL_ARG 0x02 +/* Log the file operations open, release, mmap and get_unmapped_area */ +#define V4L2_DEV_DEBUG_FOP 0x04 +/* Log the read and write file operations and the VIDIOC_(D)QBUF ioctls */ +#define V4L2_DEV_DEBUG_STREAMING 0x08 +/* Log poll() */ +#define V4L2_DEV_DEBUG_POLL 0x10 +/* Log controls */ +#define V4L2_DEV_DEBUG_CTRL 0x20 + +/* Video standard functions */ + +/** + * v4l2_norm_to_name - Ancillary routine to analog TV standard name from its ID. + * + * @id: analog TV standard ID. + * + * Return: returns a string with the name of the analog TV standard. + * If the standard is not found or if @id points to multiple standard, + * it returns "Unknown". + */ +const char *v4l2_norm_to_name(v4l2_std_id id); + +/** + * v4l2_video_std_frame_period - Ancillary routine that fills a + * struct &v4l2_fract pointer with the default framerate fraction. + * + * @id: analog TV standard ID. + * @frameperiod: struct &v4l2_fract pointer to be filled + * + */ +void v4l2_video_std_frame_period(int id, struct v4l2_fract *frameperiod); + +/** + * v4l2_video_std_construct - Ancillary routine that fills in the fields of + * a &v4l2_standard structure according to the @id parameter. + * + * @vs: struct &v4l2_standard pointer to be filled + * @id: analog TV standard ID. + * @name: name of the standard to be used + * + * .. note:: + * + * This ancillary routine is obsolete. Shouldn't be used on newer drivers. + */ +int v4l2_video_std_construct(struct v4l2_standard *vs, + int id, const char *name); + +/** + * v4l_video_std_enumstd - Ancillary routine that fills in the fields of + * a &v4l2_standard structure according to the @id and @vs->index + * parameters. + * + * @vs: struct &v4l2_standard pointer to be filled. + * @id: analog TV standard ID. + * + */ +int v4l_video_std_enumstd(struct v4l2_standard *vs, v4l2_std_id id); + +/** + * v4l_printk_ioctl - Ancillary routine that prints the ioctl in a + * human-readable format. + * + * @prefix: prefix to be added at the ioctl prints. + * @cmd: ioctl name + * + * .. note:: + * + * If prefix != %NULL, then it will issue a + * ``printk(KERN_DEBUG "%s: ", prefix)`` first. + */ +void v4l_printk_ioctl(const char *prefix, unsigned int cmd); + +struct video_device; + +/* names for fancy debug output */ +extern const char *v4l2_field_names[]; +extern const char *v4l2_type_names[]; + +#ifdef CONFIG_COMPAT +/** + * v4l2_compat_ioctl32 -32 Bits compatibility layer for 64 bits processors + * + * @file: Pointer to struct &file. + * @cmd: Ioctl name. + * @arg: Ioctl argument. + */ +long int v4l2_compat_ioctl32(struct file *file, unsigned int cmd, + unsigned long arg); +#endif + +unsigned int v4l2_compat_translate_cmd(unsigned int cmd); +unsigned int v4l2_translate_cmd(unsigned int cmd); +int v4l2_compat_get_user(void __user *arg, void *parg, unsigned int cmd); +int v4l2_compat_put_user(void __user *arg, void *parg, unsigned int cmd); +int v4l2_compat_get_array_args(struct file *file, void *mbuf, + void __user *user_ptr, size_t array_size, + unsigned int cmd, void *arg); +int v4l2_compat_put_array_args(struct file *file, void __user *user_ptr, + void *mbuf, size_t array_size, + unsigned int cmd, void *arg); + +/** + * typedef v4l2_kioctl - Typedef used to pass an ioctl handler. + * + * @file: Pointer to struct &file. + * @cmd: Ioctl name. + * @arg: Ioctl argument. + */ +typedef long (*v4l2_kioctl)(struct file *file, unsigned int cmd, void *arg); + +/** + * video_usercopy - copies data from/to userspace memory when an ioctl is + * issued. + * + * @file: Pointer to struct &file. + * @cmd: Ioctl name. + * @arg: Ioctl argument. + * @func: function that will handle the ioctl + * + * .. note:: + * + * This routine should be used only inside the V4L2 core. + */ +long int video_usercopy(struct file *file, unsigned int cmd, + unsigned long int arg, v4l2_kioctl func); + +/** + * video_ioctl2 - Handles a V4L2 ioctl. + * + * @file: Pointer to struct &file. + * @cmd: Ioctl name. + * @arg: Ioctl argument. + * + * Method used to hancle an ioctl. Should be used to fill the + * &v4l2_ioctl_ops.unlocked_ioctl on all V4L2 drivers. + */ +long int video_ioctl2(struct file *file, + unsigned int cmd, unsigned long int arg); + +/* + * The user space interpretation of the 'v4l2_event' differs + * based on the 'time_t' definition on 32-bit architectures, so + * the kernel has to handle both. + * This is the old version for 32-bit architectures. + */ +struct v4l2_event_time32 { + __u32 type; + union { + struct v4l2_event_vsync vsync; + struct v4l2_event_ctrl ctrl; + struct v4l2_event_frame_sync frame_sync; + struct v4l2_event_src_change src_change; + struct v4l2_event_motion_det motion_det; + __u8 data[64]; + } u; + __u32 pending; + __u32 sequence; + struct old_timespec32 timestamp; + __u32 id; + __u32 reserved[8]; +}; + +#define VIDIOC_DQEVENT_TIME32 _IOR('V', 89, struct v4l2_event_time32) + +struct v4l2_buffer_time32 { + __u32 index; + __u32 type; + __u32 bytesused; + __u32 flags; + __u32 field; + struct old_timeval32 timestamp; + struct v4l2_timecode timecode; + __u32 sequence; + + /* memory location */ + __u32 memory; + union { + __u32 offset; + unsigned long userptr; + struct v4l2_plane *planes; + __s32 fd; + } m; + __u32 length; + __u32 reserved2; + union { + __s32 request_fd; + __u32 reserved; + }; +}; +#define VIDIOC_QUERYBUF_TIME32 _IOWR('V', 9, struct v4l2_buffer_time32) +#define VIDIOC_QBUF_TIME32 _IOWR('V', 15, struct v4l2_buffer_time32) +#define VIDIOC_DQBUF_TIME32 _IOWR('V', 17, struct v4l2_buffer_time32) +#define VIDIOC_PREPARE_BUF_TIME32 _IOWR('V', 93, struct v4l2_buffer_time32) + +#endif /* _V4L2_IOCTL_H */ diff --git a/6.17.0/include-overrides/media/v4l2-jpeg.h b/6.17.0/include-overrides/media/v4l2-jpeg.h new file mode 100644 index 0000000..62dda15 --- /dev/null +++ b/6.17.0/include-overrides/media/v4l2-jpeg.h @@ -0,0 +1,180 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * V4L2 JPEG helpers header + * + * Copyright (C) 2019 Pengutronix, Philipp Zabel + * + * For reference, see JPEG ITU-T.81 (ISO/IEC 10918-1) + */ + +#ifndef _V4L2_JPEG_H +#define _V4L2_JPEG_H + +#include + +#define V4L2_JPEG_MAX_COMPONENTS 4 +#define V4L2_JPEG_MAX_TABLES 4 +/* + * Prefixes used to generate huffman table class and destination identifiers as + * described below: + * + * V4L2_JPEG_LUM_HT | V4L2_JPEG_DC_HT : Prefix for Luma DC coefficients + * huffman table + * V4L2_JPEG_LUM_HT | V4L2_JPEG_AC_HT : Prefix for Luma AC coefficients + * huffman table + * V4L2_JPEG_CHR_HT | V4L2_JPEG_DC_HT : Prefix for Chroma DC coefficients + * huffman table + * V4L2_JPEG_CHR_HT | V4L2_JPEG_AC_HT : Prefix for Chroma AC coefficients + * huffman table + */ +#define V4L2_JPEG_LUM_HT 0x00 +#define V4L2_JPEG_CHR_HT 0x01 +#define V4L2_JPEG_DC_HT 0x00 +#define V4L2_JPEG_AC_HT 0x10 + +/* Length of reference huffman tables as provided in Table K.3 of ITU-T.81 */ +#define V4L2_JPEG_REF_HT_AC_LEN 178 +#define V4L2_JPEG_REF_HT_DC_LEN 28 + +/* Array size for 8x8 block of samples or DCT coefficient */ +#define V4L2_JPEG_PIXELS_IN_BLOCK 64 + +/** + * struct v4l2_jpeg_reference - reference into the JPEG buffer + * @start: pointer to the start of the referenced segment or table + * @length: size of the referenced segment or table + * + * Wnen referencing marker segments, start points right after the marker code, + * and length is the size of the segment parameters, excluding the marker code. + */ +struct v4l2_jpeg_reference { + u8 *start; + size_t length; +}; + +/* B.2.2 Frame header syntax */ + +/** + * struct v4l2_jpeg_frame_component_spec - frame component-specification + * @component_identifier: C[i] + * @horizontal_sampling_factor: H[i] + * @vertical_sampling_factor: V[i] + * @quantization_table_selector: quantization table destination selector Tq[i] + */ +struct v4l2_jpeg_frame_component_spec { + u8 component_identifier; + u8 horizontal_sampling_factor; + u8 vertical_sampling_factor; + u8 quantization_table_selector; +}; + +/** + * struct v4l2_jpeg_frame_header - JPEG frame header + * @height: Y + * @width: X + * @precision: P + * @num_components: Nf + * @component: component-specification, see v4l2_jpeg_frame_component_spec + * @subsampling: decoded subsampling from component-specification + */ +struct v4l2_jpeg_frame_header { + u16 height; + u16 width; + u8 precision; + u8 num_components; + struct v4l2_jpeg_frame_component_spec component[V4L2_JPEG_MAX_COMPONENTS]; + enum v4l2_jpeg_chroma_subsampling subsampling; +}; + +/* B.2.3 Scan header syntax */ + +/** + * struct v4l2_jpeg_scan_component_spec - scan component-specification + * @component_selector: Cs[j] + * @dc_entropy_coding_table_selector: Td[j] + * @ac_entropy_coding_table_selector: Ta[j] + */ +struct v4l2_jpeg_scan_component_spec { + u8 component_selector; + u8 dc_entropy_coding_table_selector; + u8 ac_entropy_coding_table_selector; +}; + +/** + * struct v4l2_jpeg_scan_header - JPEG scan header + * @num_components: Ns + * @component: component-specification, see v4l2_jpeg_scan_component_spec + */ +struct v4l2_jpeg_scan_header { + u8 num_components; /* Ns */ + struct v4l2_jpeg_scan_component_spec component[V4L2_JPEG_MAX_COMPONENTS]; + /* Ss, Se, Ah, and Al are not used by any driver */ +}; + +/** + * enum v4l2_jpeg_app14_tf - APP14 transform flag + * According to Rec. ITU-T T.872 (06/2012) 6.5.3 + * APP14 segment is for color encoding, it contains a transform flag, + * which may have values of 0, 1 and 2 and are interpreted as follows: + * @V4L2_JPEG_APP14_TF_CMYK_RGB: CMYK for images encoded with four components + * RGB for images encoded with three components + * @V4L2_JPEG_APP14_TF_YCBCR: an image encoded with three components using YCbCr + * @V4L2_JPEG_APP14_TF_YCCK: an image encoded with four components using YCCK + * @V4L2_JPEG_APP14_TF_UNKNOWN: indicate app14 is not present + */ +enum v4l2_jpeg_app14_tf { + V4L2_JPEG_APP14_TF_CMYK_RGB = 0, + V4L2_JPEG_APP14_TF_YCBCR = 1, + V4L2_JPEG_APP14_TF_YCCK = 2, + V4L2_JPEG_APP14_TF_UNKNOWN = -1, +}; + +/** + * struct v4l2_jpeg_header - parsed JPEG header + * @sof: pointer to frame header and size + * @sos: pointer to scan header and size + * @num_dht: number of entries in @dht + * @dht: pointers to huffman tables and sizes + * @num_dqt: number of entries in @dqt + * @dqt: pointers to quantization tables and sizes + * @frame: parsed frame header + * @scan: pointer to parsed scan header, optional + * @quantization_tables: references to four quantization tables, optional + * @huffman_tables: references to four Huffman tables in DC0, DC1, AC0, AC1 + * order, optional + * @restart_interval: number of MCU per restart interval, Ri + * @ecs_offset: buffer offset in bytes to the entropy coded segment + * @app14_tf: transform flag from app14 data + * + * When this structure is passed to v4l2_jpeg_parse_header, the optional scan, + * quantization_tables, and huffman_tables pointers must be initialized to NULL + * or point at valid memory. + */ +struct v4l2_jpeg_header { + struct v4l2_jpeg_reference sof; + struct v4l2_jpeg_reference sos; + unsigned int num_dht; + struct v4l2_jpeg_reference dht[V4L2_JPEG_MAX_TABLES]; + unsigned int num_dqt; + struct v4l2_jpeg_reference dqt[V4L2_JPEG_MAX_TABLES]; + + struct v4l2_jpeg_frame_header frame; + struct v4l2_jpeg_scan_header *scan; + struct v4l2_jpeg_reference *quantization_tables; + struct v4l2_jpeg_reference *huffman_tables; + u16 restart_interval; + size_t ecs_offset; + enum v4l2_jpeg_app14_tf app14_tf; +}; + +int v4l2_jpeg_parse_header(void *buf, size_t len, struct v4l2_jpeg_header *out); + +extern const u8 v4l2_jpeg_zigzag_scan_index[V4L2_JPEG_PIXELS_IN_BLOCK]; +extern const u8 v4l2_jpeg_ref_table_luma_qt[V4L2_JPEG_PIXELS_IN_BLOCK]; +extern const u8 v4l2_jpeg_ref_table_chroma_qt[V4L2_JPEG_PIXELS_IN_BLOCK]; +extern const u8 v4l2_jpeg_ref_table_luma_dc_ht[V4L2_JPEG_REF_HT_DC_LEN]; +extern const u8 v4l2_jpeg_ref_table_luma_ac_ht[V4L2_JPEG_REF_HT_AC_LEN]; +extern const u8 v4l2_jpeg_ref_table_chroma_dc_ht[V4L2_JPEG_REF_HT_DC_LEN]; +extern const u8 v4l2_jpeg_ref_table_chroma_ac_ht[V4L2_JPEG_REF_HT_AC_LEN]; + +#endif diff --git a/6.17.0/include-overrides/media/v4l2-mc.h b/6.17.0/include-overrides/media/v4l2-mc.h new file mode 100644 index 0000000..1837c9f --- /dev/null +++ b/6.17.0/include-overrides/media/v4l2-mc.h @@ -0,0 +1,232 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * v4l2-mc.h - Media Controller V4L2 types and prototypes + * + * Copyright (C) 2016 Mauro Carvalho Chehab + * Copyright (C) 2006-2010 Nokia Corporation + * Copyright (c) 2016 Intel Corporation. + */ + +#ifndef _V4L2_MC_H +#define _V4L2_MC_H + +#include +#include +#include +#include + +/* We don't need to include pci.h or usb.h here */ +struct pci_dev; +struct usb_device; + +#ifdef CONFIG_MEDIA_CONTROLLER +/** + * v4l2_mc_create_media_graph() - create Media Controller links at the graph. + * + * @mdev: pointer to the &media_device struct. + * + * Add links between the entities commonly found on PC customer's hardware at + * the V4L2 side: camera sensors, audio and video PLL-IF decoders, tuners, + * analog TV decoder and I/O entities (video, VBI and Software Defined Radio). + * + * .. note:: + * + * Webcams are modelled on a very simple way: the sensor is + * connected directly to the I/O entity. All dirty details, like + * scaler and crop HW are hidden. While such mapping is enough for v4l2 + * interface centric PC-consumer's hardware, V4L2 subdev centric camera + * hardware should not use this routine, as it will not build the right graph. + */ +int v4l2_mc_create_media_graph(struct media_device *mdev); + +/** + * v4l_enable_media_source() - Hold media source for exclusive use + * if free + * + * @vdev: pointer to struct video_device + * + * This interface calls enable_source handler to determine if + * media source is free for use. The enable_source handler is + * responsible for checking is the media source is free and + * start a pipeline between the media source and the media + * entity associated with the video device. This interface + * should be called from v4l2-core and dvb-core interfaces + * that change the source configuration. + * + * Return: returns zero on success or a negative error code. + */ +int v4l_enable_media_source(struct video_device *vdev); + +/** + * v4l_disable_media_source() - Release media source + * + * @vdev: pointer to struct video_device + * + * This interface calls disable_source handler to release + * the media source. The disable_source handler stops the + * active media pipeline between the media source and the + * media entity associated with the video device. + * + * Return: returns zero on success or a negative error code. + */ +void v4l_disable_media_source(struct video_device *vdev); + +/* + * v4l_vb2q_enable_media_tuner - Hold media source for exclusive use + * if free. + * @q - pointer to struct vb2_queue + * + * Wrapper for v4l_enable_media_source(). This function should + * be called from v4l2-core to enable the media source with + * pointer to struct vb2_queue as the input argument. Some + * v4l2-core interfaces don't have access to video device and + * this interface finds the struct video_device for the q and + * calls v4l_enable_media_source(). + */ +int v4l_vb2q_enable_media_source(struct vb2_queue *q); + +/** + * v4l2_create_fwnode_links_to_pad - Create fwnode-based links from a + * source subdev to a sink pad. + * + * @src_sd: pointer to a source subdev + * @sink: pointer to a sink pad + * @flags: the link flags + * + * This function searches for fwnode endpoint connections from a source + * subdevice to a single sink pad, and if suitable connections are found, + * translates them into media links to that pad. The function can be + * called by the sink, in its v4l2-async notifier bound callback, to create + * links from a bound source subdevice. + * + * The @flags argument specifies the link flags. The caller shall ensure that + * the flags are valid regardless of the number of links that may be created. + * For instance, setting the MEDIA_LNK_FL_ENABLED flag will cause all created + * links to be enabled, which isn't valid if more than one link is created. + * + * .. note:: + * + * Any sink subdevice that calls this function must implement the + * .get_fwnode_pad media operation in order to verify endpoints passed + * to the sink are owned by the sink. + * + * Return 0 on success or a negative error code on failure. + */ +int v4l2_create_fwnode_links_to_pad(struct v4l2_subdev *src_sd, + struct media_pad *sink, u32 flags); + +/** + * v4l2_create_fwnode_links - Create fwnode-based links from a source + * subdev to a sink subdev. + * + * @src_sd: pointer to a source subdevice + * @sink_sd: pointer to a sink subdevice + * + * This function searches for any and all fwnode endpoint connections + * between source and sink subdevices, and translates them into media + * links. The function can be called by the sink subdevice, in its + * v4l2-async notifier subdev bound callback, to create all links from + * a bound source subdevice. + * + * .. note:: + * + * Any sink subdevice that calls this function must implement the + * .get_fwnode_pad media operation in order to verify endpoints passed + * to the sink are owned by the sink. + * + * Return 0 on success or a negative error code on failure. + */ +int v4l2_create_fwnode_links(struct v4l2_subdev *src_sd, + struct v4l2_subdev *sink_sd); + +/** + * v4l2_pipeline_pm_get - Increase the use count of a pipeline + * @entity: The root entity of a pipeline + * + * THIS FUNCTION IS DEPRECATED. DO NOT USE IN NEW DRIVERS. USE RUNTIME PM + * ON SUB-DEVICE DRIVERS INSTEAD. + * + * Update the use count of all entities in the pipeline and power entities on. + * + * This function is intended to be called in video node open. It uses + * struct media_entity.use_count to track the power status. The use + * of this function should be paired with v4l2_pipeline_link_notify(). + * + * Return 0 on success or a negative error code on failure. + */ +int v4l2_pipeline_pm_get(struct media_entity *entity); + +/** + * v4l2_pipeline_pm_put - Decrease the use count of a pipeline + * @entity: The root entity of a pipeline + * + * THIS FUNCTION IS DEPRECATED. DO NOT USE IN NEW DRIVERS. USE RUNTIME PM + * ON SUB-DEVICE DRIVERS INSTEAD. + * + * Update the use count of all entities in the pipeline and power entities off. + * + * This function is intended to be called in video node release. It uses + * struct media_entity.use_count to track the power status. The use + * of this function should be paired with v4l2_pipeline_link_notify(). + */ +void v4l2_pipeline_pm_put(struct media_entity *entity); + + +/** + * v4l2_pipeline_link_notify - Link management notification callback + * @link: The link + * @flags: New link flags that will be applied + * @notification: The link's state change notification type (MEDIA_DEV_NOTIFY_*) + * + * THIS FUNCTION IS DEPRECATED. DO NOT USE IN NEW DRIVERS. USE RUNTIME PM + * ON SUB-DEVICE DRIVERS INSTEAD. + * + * React to link management on powered pipelines by updating the use count of + * all entities in the source and sink sides of the link. Entities are powered + * on or off accordingly. The use of this function should be paired + * with v4l2_pipeline_pm_{get,put}(). + * + * Return 0 on success or a negative error code on failure. Powering entities + * off is assumed to never fail. This function will not fail for disconnection + * events. + */ +int v4l2_pipeline_link_notify(struct media_link *link, u32 flags, + unsigned int notification); + +#else /* CONFIG_MEDIA_CONTROLLER */ + +static inline int v4l2_mc_create_media_graph(struct media_device *mdev) +{ + return 0; +} + +static inline int v4l_enable_media_source(struct video_device *vdev) +{ + return 0; +} + +static inline void v4l_disable_media_source(struct video_device *vdev) +{ +} + +static inline int v4l_vb2q_enable_media_source(struct vb2_queue *q) +{ + return 0; +} + +static inline int v4l2_pipeline_pm_get(struct media_entity *entity) +{ + return 0; +} + +static inline void v4l2_pipeline_pm_put(struct media_entity *entity) +{} + +static inline int v4l2_pipeline_link_notify(struct media_link *link, u32 flags, + unsigned int notification) +{ + return 0; +} + +#endif /* CONFIG_MEDIA_CONTROLLER */ +#endif /* _V4L2_MC_H */ diff --git a/6.17.0/include-overrides/media/v4l2-mediabus.h b/6.17.0/include-overrides/media/v4l2-mediabus.h new file mode 100644 index 0000000..24c738c --- /dev/null +++ b/6.17.0/include-overrides/media/v4l2-mediabus.h @@ -0,0 +1,278 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Media Bus API header + * + * Copyright (C) 2009, Guennadi Liakhovetski + */ + +#ifndef V4L2_MEDIABUS_H +#define V4L2_MEDIABUS_H + +#include +#include + +/* + * How to use the V4L2_MBUS_* flags: + * Flags are defined for each of the possible states and values of a media + * bus configuration parameter. One and only one bit of each group of flags + * shall be set by the users of the v4l2_subdev_pad_ops.get_mbus_config + * operation to ensure that no conflicting settings are specified when + * reporting the media bus configuration. For example, it is invalid to set or + * clear both the V4L2_MBUS_HSYNC_ACTIVE_HIGH and the + * V4L2_MBUS_HSYNC_ACTIVE_LOW flag at the same time. Instead either flag + * V4L2_MBUS_HSYNC_ACTIVE_HIGH or flag V4L2_MBUS_HSYNC_ACTIVE_LOW shall be set. + * + * TODO: replace the existing V4L2_MBUS_* flags with structures of fields + * to avoid conflicting settings. + * + * In example: + * #define V4L2_MBUS_HSYNC_ACTIVE_HIGH BIT(2) + * #define V4L2_MBUS_HSYNC_ACTIVE_LOW BIT(3) + * will be replaced by a field whose value reports the intended active state of + * the signal: + * unsigned int v4l2_mbus_hsync_active : 1; + */ + +/* Parallel flags */ +/* + * The client runs in master or in slave mode. By "Master mode" an operation + * mode is meant, when the client (e.g., a camera sensor) is producing + * horizontal and vertical synchronisation. In "Slave mode" the host is + * providing these signals to the slave. + */ +#define V4L2_MBUS_MASTER BIT(0) +#define V4L2_MBUS_SLAVE BIT(1) +/* + * Signal polarity flags + * Note: in BT.656 mode HSYNC, FIELD, and VSYNC are unused + * V4L2_MBUS_[HV]SYNC* flags should be also used for specifying + * configuration of hardware that uses [HV]REF signals + */ +#define V4L2_MBUS_HSYNC_ACTIVE_HIGH BIT(2) +#define V4L2_MBUS_HSYNC_ACTIVE_LOW BIT(3) +#define V4L2_MBUS_VSYNC_ACTIVE_HIGH BIT(4) +#define V4L2_MBUS_VSYNC_ACTIVE_LOW BIT(5) +#define V4L2_MBUS_PCLK_SAMPLE_RISING BIT(6) +#define V4L2_MBUS_PCLK_SAMPLE_FALLING BIT(7) +#define V4L2_MBUS_PCLK_SAMPLE_DUALEDGE BIT(8) +#define V4L2_MBUS_DATA_ACTIVE_HIGH BIT(9) +#define V4L2_MBUS_DATA_ACTIVE_LOW BIT(10) +/* FIELD = 0/1 - Field1 (odd)/Field2 (even) */ +#define V4L2_MBUS_FIELD_EVEN_HIGH BIT(11) +/* FIELD = 1/0 - Field1 (odd)/Field2 (even) */ +#define V4L2_MBUS_FIELD_EVEN_LOW BIT(12) +/* Active state of Sync-on-green (SoG) signal, 0/1 for LOW/HIGH respectively. */ +#define V4L2_MBUS_VIDEO_SOG_ACTIVE_HIGH BIT(13) +#define V4L2_MBUS_VIDEO_SOG_ACTIVE_LOW BIT(14) +#define V4L2_MBUS_DATA_ENABLE_HIGH BIT(15) +#define V4L2_MBUS_DATA_ENABLE_LOW BIT(16) + +/* Serial flags */ +/* Clock non-continuous mode support. */ +#define V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK BIT(0) + +#define V4L2_MBUS_CSI2_MAX_DATA_LANES 8 + +/** + * enum v4l2_mbus_csi2_cphy_line_orders_type - CSI-2 C-PHY line order + * @V4L2_MBUS_CSI2_CPHY_LINE_ORDER_ABC: C-PHY line order ABC (default) + * @V4L2_MBUS_CSI2_CPHY_LINE_ORDER_ACB: C-PHY line order ACB + * @V4L2_MBUS_CSI2_CPHY_LINE_ORDER_BAC: C-PHY line order BAC + * @V4L2_MBUS_CSI2_CPHY_LINE_ORDER_BCA: C-PHY line order BCA + * @V4L2_MBUS_CSI2_CPHY_LINE_ORDER_CAB: C-PHY line order CAB + * @V4L2_MBUS_CSI2_CPHY_LINE_ORDER_CBA: C-PHY line order CBA + */ +enum v4l2_mbus_csi2_cphy_line_orders_type { + V4L2_MBUS_CSI2_CPHY_LINE_ORDER_ABC, + V4L2_MBUS_CSI2_CPHY_LINE_ORDER_ACB, + V4L2_MBUS_CSI2_CPHY_LINE_ORDER_BAC, + V4L2_MBUS_CSI2_CPHY_LINE_ORDER_BCA, + V4L2_MBUS_CSI2_CPHY_LINE_ORDER_CAB, + V4L2_MBUS_CSI2_CPHY_LINE_ORDER_CBA, +}; + +/** + * struct v4l2_mbus_config_mipi_csi2 - MIPI CSI-2 data bus configuration + * @flags: media bus (V4L2_MBUS_*) flags + * @data_lanes: an array of physical data lane indexes + * @clock_lane: physical lane index of the clock lane + * @num_data_lanes: number of data lanes + * @lane_polarities: polarity of the lanes. The order is the same of + * the physical lanes. + * @line_orders: line order of the data lanes. The order is the same of the + * physical lanes. + */ +struct v4l2_mbus_config_mipi_csi2 { + unsigned int flags; + unsigned char data_lanes[V4L2_MBUS_CSI2_MAX_DATA_LANES]; + unsigned char clock_lane; + unsigned char num_data_lanes; + bool lane_polarities[1 + V4L2_MBUS_CSI2_MAX_DATA_LANES]; + enum v4l2_mbus_csi2_cphy_line_orders_type line_orders[V4L2_MBUS_CSI2_MAX_DATA_LANES]; +}; + +/** + * struct v4l2_mbus_config_parallel - parallel data bus configuration + * @flags: media bus (V4L2_MBUS_*) flags + * @bus_width: bus width in bits + * @data_shift: data shift in bits + */ +struct v4l2_mbus_config_parallel { + unsigned int flags; + unsigned char bus_width; + unsigned char data_shift; +}; + +/** + * struct v4l2_mbus_config_mipi_csi1 - CSI-1/CCP2 data bus configuration + * @clock_inv: polarity of clock/strobe signal + * false - not inverted, true - inverted + * @strobe: false - data/clock, true - data/strobe + * @lane_polarity: the polarities of the clock (index 0) and data lanes + * index (1) + * @data_lane: the number of the data lane + * @clock_lane: the number of the clock lane + */ +struct v4l2_mbus_config_mipi_csi1 { + unsigned char clock_inv:1; + unsigned char strobe:1; + bool lane_polarity[2]; + unsigned char data_lane; + unsigned char clock_lane; +}; + +/** + * enum v4l2_mbus_type - media bus type + * @V4L2_MBUS_UNKNOWN: unknown bus type, no V4L2 mediabus configuration + * @V4L2_MBUS_PARALLEL: parallel interface with hsync and vsync + * @V4L2_MBUS_BT656: parallel interface with embedded synchronisation, can + * also be used for BT.1120 + * @V4L2_MBUS_CSI1: MIPI CSI-1 serial interface + * @V4L2_MBUS_CCP2: CCP2 (Compact Camera Port 2) + * @V4L2_MBUS_CSI2_DPHY: MIPI CSI-2 serial interface, with D-PHY + * @V4L2_MBUS_CSI2_CPHY: MIPI CSI-2 serial interface, with C-PHY + * @V4L2_MBUS_DPI: MIPI VIDEO DPI interface + * @V4L2_MBUS_INVALID: invalid bus type (keep as last) + */ +enum v4l2_mbus_type { + V4L2_MBUS_UNKNOWN, + V4L2_MBUS_PARALLEL, + V4L2_MBUS_BT656, + V4L2_MBUS_CSI1, + V4L2_MBUS_CCP2, + V4L2_MBUS_CSI2_DPHY, + V4L2_MBUS_CSI2_CPHY, + V4L2_MBUS_DPI, + V4L2_MBUS_INVALID, +}; + +/** + * struct v4l2_mbus_config - media bus configuration + * @type: interface type + * @link_freq: The link frequency. See also V4L2_CID_LINK_FREQ control. + * @bus: bus configuration data structure + * @bus.parallel: embedded &struct v4l2_mbus_config_parallel. + * Used if the bus is parallel or BT.656. + * @bus.mipi_csi1: embedded &struct v4l2_mbus_config_mipi_csi1. + * Used if the bus is MIPI Alliance's Camera Serial + * Interface version 1 (MIPI CSI1) or Standard + * Mobile Imaging Architecture's Compact Camera Port 2 + * (SMIA CCP2). + * @bus.mipi_csi2: embedded &struct v4l2_mbus_config_mipi_csi2. + * Used if the bus is MIPI Alliance's Camera Serial + * Interface version 2 (MIPI CSI2). + */ +struct v4l2_mbus_config { + enum v4l2_mbus_type type; + u64 link_freq; + union { + struct v4l2_mbus_config_parallel parallel; + struct v4l2_mbus_config_mipi_csi1 mipi_csi1; + struct v4l2_mbus_config_mipi_csi2 mipi_csi2; + } bus; +}; + +/** + * v4l2_fill_pix_format - Ancillary routine that fills a &struct + * v4l2_pix_format fields from a &struct v4l2_mbus_framefmt. + * + * @pix_fmt: pointer to &struct v4l2_pix_format to be filled + * @mbus_fmt: pointer to &struct v4l2_mbus_framefmt to be used as model + */ +static inline void +v4l2_fill_pix_format(struct v4l2_pix_format *pix_fmt, + const struct v4l2_mbus_framefmt *mbus_fmt) +{ + pix_fmt->width = mbus_fmt->width; + pix_fmt->height = mbus_fmt->height; + pix_fmt->field = mbus_fmt->field; + pix_fmt->colorspace = mbus_fmt->colorspace; + pix_fmt->ycbcr_enc = mbus_fmt->ycbcr_enc; + pix_fmt->quantization = mbus_fmt->quantization; + pix_fmt->xfer_func = mbus_fmt->xfer_func; +} + +/** + * v4l2_fill_mbus_format - Ancillary routine that fills a &struct + * v4l2_mbus_framefmt from a &struct v4l2_pix_format and a + * data format code. + * + * @mbus_fmt: pointer to &struct v4l2_mbus_framefmt to be filled + * @pix_fmt: pointer to &struct v4l2_pix_format to be used as model + * @code: data format code (from &enum v4l2_mbus_pixelcode) + */ +static inline void v4l2_fill_mbus_format(struct v4l2_mbus_framefmt *mbus_fmt, + const struct v4l2_pix_format *pix_fmt, + u32 code) +{ + mbus_fmt->width = pix_fmt->width; + mbus_fmt->height = pix_fmt->height; + mbus_fmt->field = pix_fmt->field; + mbus_fmt->colorspace = pix_fmt->colorspace; + mbus_fmt->ycbcr_enc = pix_fmt->ycbcr_enc; + mbus_fmt->quantization = pix_fmt->quantization; + mbus_fmt->xfer_func = pix_fmt->xfer_func; + mbus_fmt->code = code; +} + +/** + * v4l2_fill_pix_format_mplane - Ancillary routine that fills a &struct + * v4l2_pix_format_mplane fields from a media bus structure. + * + * @pix_mp_fmt: pointer to &struct v4l2_pix_format_mplane to be filled + * @mbus_fmt: pointer to &struct v4l2_mbus_framefmt to be used as model + */ +static inline void +v4l2_fill_pix_format_mplane(struct v4l2_pix_format_mplane *pix_mp_fmt, + const struct v4l2_mbus_framefmt *mbus_fmt) +{ + pix_mp_fmt->width = mbus_fmt->width; + pix_mp_fmt->height = mbus_fmt->height; + pix_mp_fmt->field = mbus_fmt->field; + pix_mp_fmt->colorspace = mbus_fmt->colorspace; + pix_mp_fmt->ycbcr_enc = mbus_fmt->ycbcr_enc; + pix_mp_fmt->quantization = mbus_fmt->quantization; + pix_mp_fmt->xfer_func = mbus_fmt->xfer_func; +} + +/** + * v4l2_fill_mbus_format_mplane - Ancillary routine that fills a &struct + * v4l2_mbus_framefmt from a &struct v4l2_pix_format_mplane. + * + * @mbus_fmt: pointer to &struct v4l2_mbus_framefmt to be filled + * @pix_mp_fmt: pointer to &struct v4l2_pix_format_mplane to be used as model + */ +static inline void +v4l2_fill_mbus_format_mplane(struct v4l2_mbus_framefmt *mbus_fmt, + const struct v4l2_pix_format_mplane *pix_mp_fmt) +{ + mbus_fmt->width = pix_mp_fmt->width; + mbus_fmt->height = pix_mp_fmt->height; + mbus_fmt->field = pix_mp_fmt->field; + mbus_fmt->colorspace = pix_mp_fmt->colorspace; + mbus_fmt->ycbcr_enc = pix_mp_fmt->ycbcr_enc; + mbus_fmt->quantization = pix_mp_fmt->quantization; + mbus_fmt->xfer_func = pix_mp_fmt->xfer_func; +} + +#endif diff --git a/6.17.0/include-overrides/media/v4l2-mem2mem.h b/6.17.0/include-overrides/media/v4l2-mem2mem.h new file mode 100644 index 0000000..0af330c --- /dev/null +++ b/6.17.0/include-overrides/media/v4l2-mem2mem.h @@ -0,0 +1,902 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Memory-to-memory device framework for Video for Linux 2. + * + * Helper functions for devices that use memory buffers for both source + * and destination. + * + * Copyright (c) 2009 Samsung Electronics Co., Ltd. + * Pawel Osciak, + * Marek Szyprowski, + */ + +#ifndef _MEDIA_V4L2_MEM2MEM_H +#define _MEDIA_V4L2_MEM2MEM_H + +#include + +/** + * struct v4l2_m2m_ops - mem-to-mem device driver callbacks + * @device_run: required. Begin the actual job (transaction) inside this + * callback. + * The job does NOT have to end before this callback returns + * (and it will be the usual case). When the job finishes, + * v4l2_m2m_job_finish() or v4l2_m2m_buf_done_and_job_finish() + * has to be called. + * @job_ready: optional. Should return 0 if the driver does not have a job + * fully prepared to run yet (i.e. it will not be able to finish a + * transaction without sleeping). If not provided, it will be + * assumed that one source and one destination buffer are all + * that is required for the driver to perform one full transaction. + * This method may not sleep. + * @job_abort: optional. Informs the driver that it has to abort the currently + * running transaction as soon as possible (i.e. as soon as it can + * stop the device safely; e.g. in the next interrupt handler), + * even if the transaction would not have been finished by then. + * After the driver performs the necessary steps, it has to call + * v4l2_m2m_job_finish() or v4l2_m2m_buf_done_and_job_finish() as + * if the transaction ended normally. + * This function does not have to (and will usually not) wait + * until the device enters a state when it can be stopped. + */ +struct v4l2_m2m_ops { + void (*device_run)(void *priv); + int (*job_ready)(void *priv); + void (*job_abort)(void *priv); +}; + +struct video_device; +struct v4l2_m2m_dev; + +/** + * struct v4l2_m2m_queue_ctx - represents a queue for buffers ready to be + * processed + * + * @q: pointer to struct &vb2_queue + * @rdy_queue: List of V4L2 mem-to-mem queues + * @rdy_spinlock: spin lock to protect the struct usage + * @num_rdy: number of buffers ready to be processed + * @buffered: is the queue buffered? + * + * Queue for buffers ready to be processed as soon as this + * instance receives access to the device. + */ + +struct v4l2_m2m_queue_ctx { + struct vb2_queue q; + + struct list_head rdy_queue; + spinlock_t rdy_spinlock; + u8 num_rdy; + bool buffered; +}; + +/** + * struct v4l2_m2m_ctx - Memory to memory context structure + * + * @q_lock: struct &mutex lock + * @new_frame: valid in the device_run callback: if true, then this + * starts a new frame; if false, then this is a new slice + * for an existing frame. This is always true unless + * V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF is set, which + * indicates slicing support. + * @is_draining: indicates device is in draining phase + * @last_src_buf: indicate the last source buffer for draining + * @next_buf_last: next capture queud buffer will be tagged as last + * @has_stopped: indicate the device has been stopped + * @ignore_cap_streaming: If true, job_ready can be called even if the CAPTURE + * queue is not streaming. This allows firmware to + * analyze the bitstream header which arrives on the + * OUTPUT queue. The driver must implement the job_ready + * callback correctly to make sure that the requirements + * for actual decoding are met. + * @m2m_dev: opaque pointer to the internal data to handle M2M context + * @cap_q_ctx: Capture (output to memory) queue context + * @out_q_ctx: Output (input from memory) queue context + * @queue: List of memory to memory contexts + * @job_flags: Job queue flags, used internally by v4l2-mem2mem.c: + * %TRANS_QUEUED, %TRANS_RUNNING and %TRANS_ABORT. + * @finished: Wait queue used to signalize when a job queue finished. + * @priv: Instance private data + * + * The memory to memory context is specific to a file handle, NOT to e.g. + * a device. + */ +struct v4l2_m2m_ctx { + /* optional cap/out vb2 queues lock */ + struct mutex *q_lock; + + bool new_frame; + + bool is_draining; + struct vb2_v4l2_buffer *last_src_buf; + bool next_buf_last; + bool has_stopped; + bool ignore_cap_streaming; + + /* internal use only */ + struct v4l2_m2m_dev *m2m_dev; + + struct v4l2_m2m_queue_ctx cap_q_ctx; + + struct v4l2_m2m_queue_ctx out_q_ctx; + + /* For device job queue */ + struct list_head queue; + unsigned long job_flags; + wait_queue_head_t finished; + + void *priv; +}; + +/** + * struct v4l2_m2m_buffer - Memory to memory buffer + * + * @vb: pointer to struct &vb2_v4l2_buffer + * @list: list of m2m buffers + */ +struct v4l2_m2m_buffer { + struct vb2_v4l2_buffer vb; + struct list_head list; +}; + +/** + * v4l2_m2m_get_curr_priv() - return driver private data for the currently + * running instance or NULL if no instance is running + * + * @m2m_dev: opaque pointer to the internal data to handle M2M context + */ +void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev); + +/** + * v4l2_m2m_get_vq() - return vb2_queue for the given type + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type + */ +struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx, + enum v4l2_buf_type type); + +/** + * v4l2_m2m_try_schedule() - check whether an instance is ready to be added to + * the pending job queue and add it if so. + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * + * There are three basic requirements an instance has to meet to be able to run: + * 1) at least one source buffer has to be queued, + * 2) at least one destination buffer has to be queued, + * 3) streaming has to be on. + * + * If a queue is buffered (for example a decoder hardware ringbuffer that has + * to be drained before doing streamoff), allow scheduling without v4l2 buffers + * on that queue. + * + * There may also be additional, custom requirements. In such case the driver + * should supply a custom callback (job_ready in v4l2_m2m_ops) that should + * return 1 if the instance is ready. + * An example of the above could be an instance that requires more than one + * src/dst buffer per transaction. + */ +void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx); + +/** + * v4l2_m2m_job_finish() - inform the framework that a job has been finished + * and have it clean up + * + * @m2m_dev: opaque pointer to the internal data to handle M2M context + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * + * Called by a driver to yield back the device after it has finished with it. + * Should be called as soon as possible after reaching a state which allows + * other instances to take control of the device. + * + * This function has to be called only after &v4l2_m2m_ops->device_run + * callback has been called on the driver. To prevent recursion, it should + * not be called directly from the &v4l2_m2m_ops->device_run callback though. + */ +void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, + struct v4l2_m2m_ctx *m2m_ctx); + +/** + * v4l2_m2m_buf_done_and_job_finish() - return source/destination buffers with + * state and inform the framework that a job has been finished and have it + * clean up + * + * @m2m_dev: opaque pointer to the internal data to handle M2M context + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @state: vb2 buffer state passed to v4l2_m2m_buf_done(). + * + * Drivers that set V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF must use this + * function instead of job_finish() to take held buffers into account. It is + * optional for other drivers. + * + * This function removes the source buffer from the ready list and returns + * it with the given state. The same is done for the destination buffer, unless + * it is marked 'held'. In that case the buffer is kept on the ready list. + * + * After that the job is finished (see job_finish()). + * + * This allows for multiple output buffers to be used to fill in a single + * capture buffer. This is typically used by stateless decoders where + * multiple e.g. H.264 slices contribute to a single decoded frame. + */ +void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev *m2m_dev, + struct v4l2_m2m_ctx *m2m_ctx, + enum vb2_buffer_state state); + +static inline void +v4l2_m2m_buf_done(struct vb2_v4l2_buffer *buf, enum vb2_buffer_state state) +{ + vb2_buffer_done(&buf->vb2_buf, state); +} + +/** + * v4l2_m2m_clear_state() - clear encoding/decoding state + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + */ +static inline void +v4l2_m2m_clear_state(struct v4l2_m2m_ctx *m2m_ctx) +{ + m2m_ctx->next_buf_last = false; + m2m_ctx->is_draining = false; + m2m_ctx->has_stopped = false; +} + +/** + * v4l2_m2m_mark_stopped() - set current encoding/decoding state as stopped + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + */ +static inline void +v4l2_m2m_mark_stopped(struct v4l2_m2m_ctx *m2m_ctx) +{ + m2m_ctx->next_buf_last = false; + m2m_ctx->is_draining = false; + m2m_ctx->has_stopped = true; +} + +/** + * v4l2_m2m_dst_buf_is_last() - return the current encoding/decoding session + * draining management state of next queued capture buffer + * + * This last capture buffer should be tagged with V4L2_BUF_FLAG_LAST to notify + * the end of the capture session. + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + */ +static inline bool +v4l2_m2m_dst_buf_is_last(struct v4l2_m2m_ctx *m2m_ctx) +{ + return m2m_ctx->is_draining && m2m_ctx->next_buf_last; +} + +/** + * v4l2_m2m_has_stopped() - return the current encoding/decoding session + * stopped state + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + */ +static inline bool +v4l2_m2m_has_stopped(struct v4l2_m2m_ctx *m2m_ctx) +{ + return m2m_ctx->has_stopped; +} + +/** + * v4l2_m2m_is_last_draining_src_buf() - return the output buffer draining + * state in the current encoding/decoding session + * + * This will identify the last output buffer queued before a session stop + * was required, leading to an actual encoding/decoding session stop state + * in the encoding/decoding process after being processed. + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @vbuf: pointer to struct &v4l2_buffer + */ +static inline bool +v4l2_m2m_is_last_draining_src_buf(struct v4l2_m2m_ctx *m2m_ctx, + struct vb2_v4l2_buffer *vbuf) +{ + return m2m_ctx->is_draining && vbuf == m2m_ctx->last_src_buf; +} + +/** + * v4l2_m2m_last_buffer_done() - marks the buffer with LAST flag and DONE + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @vbuf: pointer to struct &v4l2_buffer + */ +void v4l2_m2m_last_buffer_done(struct v4l2_m2m_ctx *m2m_ctx, + struct vb2_v4l2_buffer *vbuf); + +/** + * v4l2_m2m_suspend() - stop new jobs from being run and wait for current job + * to finish + * + * @m2m_dev: opaque pointer to the internal data to handle M2M context + * + * Called by a driver in the suspend hook. Stop new jobs from being run, and + * wait for current running job to finish. + */ +void v4l2_m2m_suspend(struct v4l2_m2m_dev *m2m_dev); + +/** + * v4l2_m2m_resume() - resume job running and try to run a queued job + * + * @m2m_dev: opaque pointer to the internal data to handle M2M context + * + * Called by a driver in the resume hook. This reverts the operation of + * v4l2_m2m_suspend() and allows job to be run. Also try to run a queued job if + * there is any. + */ +void v4l2_m2m_resume(struct v4l2_m2m_dev *m2m_dev); + +/** + * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer + * + * @file: pointer to struct &file + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @reqbufs: pointer to struct &v4l2_requestbuffers + */ +int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_requestbuffers *reqbufs); + +/** + * v4l2_m2m_querybuf() - multi-queue-aware QUERYBUF multiplexer + * + * @file: pointer to struct &file + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @buf: pointer to struct &v4l2_buffer + * + * See v4l2_m2m_mmap() documentation for details. + */ +int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_buffer *buf); + +/** + * v4l2_m2m_qbuf() - enqueue a source or destination buffer, depending on + * the type + * + * @file: pointer to struct &file + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @buf: pointer to struct &v4l2_buffer + */ +int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_buffer *buf); + +/** + * v4l2_m2m_dqbuf() - dequeue a source or destination buffer, depending on + * the type + * + * @file: pointer to struct &file + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @buf: pointer to struct &v4l2_buffer + */ +int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_buffer *buf); + +/** + * v4l2_m2m_prepare_buf() - prepare a source or destination buffer, depending on + * the type + * + * @file: pointer to struct &file + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @buf: pointer to struct &v4l2_buffer + */ +int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_buffer *buf); + +/** + * v4l2_m2m_create_bufs() - create a source or destination buffer, depending + * on the type + * + * @file: pointer to struct &file + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @create: pointer to struct &v4l2_create_buffers + */ +int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_create_buffers *create); + +/** + * v4l2_m2m_expbuf() - export a source or destination buffer, depending on + * the type + * + * @file: pointer to struct &file + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @eb: pointer to struct &v4l2_exportbuffer + */ +int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_exportbuffer *eb); + +/** + * v4l2_m2m_streamon() - turn on streaming for a video queue + * + * @file: pointer to struct &file + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type + */ +int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + enum v4l2_buf_type type); + +/** + * v4l2_m2m_streamoff() - turn off streaming for a video queue + * + * @file: pointer to struct &file + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type + */ +int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + enum v4l2_buf_type type); + +/** + * v4l2_m2m_update_start_streaming_state() - update the encoding/decoding + * session state when a start of streaming of a video queue is requested + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @q: queue + */ +void v4l2_m2m_update_start_streaming_state(struct v4l2_m2m_ctx *m2m_ctx, + struct vb2_queue *q); + +/** + * v4l2_m2m_update_stop_streaming_state() - update the encoding/decoding + * session state when a stop of streaming of a video queue is requested + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @q: queue + */ +void v4l2_m2m_update_stop_streaming_state(struct v4l2_m2m_ctx *m2m_ctx, + struct vb2_queue *q); + +/** + * v4l2_m2m_encoder_cmd() - execute an encoder command + * + * @file: pointer to struct &file + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @ec: pointer to the encoder command + */ +int v4l2_m2m_encoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_encoder_cmd *ec); + +/** + * v4l2_m2m_decoder_cmd() - execute a decoder command + * + * @file: pointer to struct &file + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @dc: pointer to the decoder command + */ +int v4l2_m2m_decoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_decoder_cmd *dc); + +/** + * v4l2_m2m_poll() - poll replacement, for destination buffers only + * + * @file: pointer to struct &file + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @wait: pointer to struct &poll_table_struct + * + * Call from the driver's poll() function. Will poll both queues. If a buffer + * is available to dequeue (with dqbuf) from the source queue, this will + * indicate that a non-blocking write can be performed, while read will be + * returned in case of the destination queue. + */ +__poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct poll_table_struct *wait); + +/** + * v4l2_m2m_mmap() - source and destination queues-aware mmap multiplexer + * + * @file: pointer to struct &file + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @vma: pointer to struct &vm_area_struct + * + * Call from driver's mmap() function. Will handle mmap() for both queues + * seamlessly for the video buffer, which will receive normal per-queue offsets + * and proper vb2 queue pointers. The differentiation is made outside + * vb2 by adding a predefined offset to buffers from one of the queues + * and subtracting it before passing it back to vb2. Only drivers (and + * thus applications) receive modified offsets. + */ +int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct vm_area_struct *vma); + +#ifndef CONFIG_MMU +unsigned long v4l2_m2m_get_unmapped_area(struct file *file, unsigned long addr, + unsigned long len, unsigned long pgoff, + unsigned long flags); +#endif +/** + * v4l2_m2m_init() - initialize per-driver m2m data + * + * @m2m_ops: pointer to struct v4l2_m2m_ops + * + * Usually called from driver's ``probe()`` function. + * + * Return: returns an opaque pointer to the internal data to handle M2M context + */ +struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops); + +#if defined(CONFIG_MEDIA_CONTROLLER) +void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev); +int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev, + struct video_device *vdev, int function); +#else +static inline void +v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev) +{ +} + +static inline int +v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev, + struct video_device *vdev, int function) +{ + return 0; +} +#endif + +/** + * v4l2_m2m_release() - cleans up and frees a m2m_dev structure + * + * @m2m_dev: opaque pointer to the internal data to handle M2M context + * + * Usually called from driver's ``remove()`` function. + */ +void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev); + +/** + * v4l2_m2m_ctx_init() - allocate and initialize a m2m context + * + * @m2m_dev: opaque pointer to the internal data to handle M2M context + * @drv_priv: driver's instance private data + * @queue_init: a callback for queue type-specific initialization function + * to be used for initializing vb2_queues + * + * Usually called from driver's ``open()`` function. + */ +struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev, + void *drv_priv, + int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)); + +static inline void v4l2_m2m_set_src_buffered(struct v4l2_m2m_ctx *m2m_ctx, + bool buffered) +{ + m2m_ctx->out_q_ctx.buffered = buffered; +} + +static inline void v4l2_m2m_set_dst_buffered(struct v4l2_m2m_ctx *m2m_ctx, + bool buffered) +{ + m2m_ctx->cap_q_ctx.buffered = buffered; +} + +/** + * v4l2_m2m_ctx_release() - release m2m context + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * + * Usually called from driver's release() function. + */ +void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx); + +/** + * v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list. + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @vbuf: pointer to struct &vb2_v4l2_buffer + * + * Call from vb2_queue_ops->ops->buf_queue, vb2_queue_ops callback. + */ +void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, + struct vb2_v4l2_buffer *vbuf); + +/** + * v4l2_m2m_num_src_bufs_ready() - return the number of source buffers ready for + * use + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + */ +static inline +unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx) +{ + unsigned int num_buf_rdy; + unsigned long flags; + + spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); + num_buf_rdy = m2m_ctx->out_q_ctx.num_rdy; + spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); + + return num_buf_rdy; +} + +/** + * v4l2_m2m_num_dst_bufs_ready() - return the number of destination buffers + * ready for use + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + */ +static inline +unsigned int v4l2_m2m_num_dst_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx) +{ + unsigned int num_buf_rdy; + unsigned long flags; + + spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags); + num_buf_rdy = m2m_ctx->cap_q_ctx.num_rdy; + spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags); + + return num_buf_rdy; +} + +/** + * v4l2_m2m_next_buf() - return next buffer from the list of ready buffers + * + * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx + */ +struct vb2_v4l2_buffer *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx); + +/** + * v4l2_m2m_next_src_buf() - return next source buffer from the list of ready + * buffers + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + */ +static inline struct vb2_v4l2_buffer * +v4l2_m2m_next_src_buf(struct v4l2_m2m_ctx *m2m_ctx) +{ + return v4l2_m2m_next_buf(&m2m_ctx->out_q_ctx); +} + +/** + * v4l2_m2m_next_dst_buf() - return next destination buffer from the list of + * ready buffers + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + */ +static inline struct vb2_v4l2_buffer * +v4l2_m2m_next_dst_buf(struct v4l2_m2m_ctx *m2m_ctx) +{ + return v4l2_m2m_next_buf(&m2m_ctx->cap_q_ctx); +} + +/** + * v4l2_m2m_last_buf() - return last buffer from the list of ready buffers + * + * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx + */ +struct vb2_v4l2_buffer *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx); + +/** + * v4l2_m2m_last_src_buf() - return last source buffer from the list of + * ready buffers + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + */ +static inline struct vb2_v4l2_buffer * +v4l2_m2m_last_src_buf(struct v4l2_m2m_ctx *m2m_ctx) +{ + return v4l2_m2m_last_buf(&m2m_ctx->out_q_ctx); +} + +/** + * v4l2_m2m_last_dst_buf() - return last destination buffer from the list of + * ready buffers + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + */ +static inline struct vb2_v4l2_buffer * +v4l2_m2m_last_dst_buf(struct v4l2_m2m_ctx *m2m_ctx) +{ + return v4l2_m2m_last_buf(&m2m_ctx->cap_q_ctx); +} + +/** + * v4l2_m2m_for_each_dst_buf() - iterate over a list of destination ready + * buffers + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @b: current buffer of type struct v4l2_m2m_buffer + */ +#define v4l2_m2m_for_each_dst_buf(m2m_ctx, b) \ + list_for_each_entry(b, &m2m_ctx->cap_q_ctx.rdy_queue, list) + +/** + * v4l2_m2m_for_each_src_buf() - iterate over a list of source ready buffers + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @b: current buffer of type struct v4l2_m2m_buffer + */ +#define v4l2_m2m_for_each_src_buf(m2m_ctx, b) \ + list_for_each_entry(b, &m2m_ctx->out_q_ctx.rdy_queue, list) + +/** + * v4l2_m2m_for_each_dst_buf_safe() - iterate over a list of destination ready + * buffers safely + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @b: current buffer of type struct v4l2_m2m_buffer + * @n: used as temporary storage + */ +#define v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, b, n) \ + list_for_each_entry_safe(b, n, &m2m_ctx->cap_q_ctx.rdy_queue, list) + +/** + * v4l2_m2m_for_each_src_buf_safe() - iterate over a list of source ready + * buffers safely + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @b: current buffer of type struct v4l2_m2m_buffer + * @n: used as temporary storage + */ +#define v4l2_m2m_for_each_src_buf_safe(m2m_ctx, b, n) \ + list_for_each_entry_safe(b, n, &m2m_ctx->out_q_ctx.rdy_queue, list) + +/** + * v4l2_m2m_get_src_vq() - return vb2_queue for source buffers + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + */ +static inline +struct vb2_queue *v4l2_m2m_get_src_vq(struct v4l2_m2m_ctx *m2m_ctx) +{ + return &m2m_ctx->out_q_ctx.q; +} + +/** + * v4l2_m2m_get_dst_vq() - return vb2_queue for destination buffers + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + */ +static inline +struct vb2_queue *v4l2_m2m_get_dst_vq(struct v4l2_m2m_ctx *m2m_ctx) +{ + return &m2m_ctx->cap_q_ctx.q; +} + +/** + * v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and + * return it + * + * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx + */ +struct vb2_v4l2_buffer *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx); + +/** + * v4l2_m2m_src_buf_remove() - take off a source buffer from the list of ready + * buffers and return it + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + */ +static inline struct vb2_v4l2_buffer * +v4l2_m2m_src_buf_remove(struct v4l2_m2m_ctx *m2m_ctx) +{ + return v4l2_m2m_buf_remove(&m2m_ctx->out_q_ctx); +} + +/** + * v4l2_m2m_dst_buf_remove() - take off a destination buffer from the list of + * ready buffers and return it + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + */ +static inline struct vb2_v4l2_buffer * +v4l2_m2m_dst_buf_remove(struct v4l2_m2m_ctx *m2m_ctx) +{ + return v4l2_m2m_buf_remove(&m2m_ctx->cap_q_ctx); +} + +/** + * v4l2_m2m_buf_remove_by_buf() - take off exact buffer from the list of ready + * buffers + * + * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx + * @vbuf: the buffer to be removed + */ +void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx *q_ctx, + struct vb2_v4l2_buffer *vbuf); + +/** + * v4l2_m2m_src_buf_remove_by_buf() - take off exact source buffer from the list + * of ready buffers + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @vbuf: the buffer to be removed + */ +static inline void v4l2_m2m_src_buf_remove_by_buf(struct v4l2_m2m_ctx *m2m_ctx, + struct vb2_v4l2_buffer *vbuf) +{ + v4l2_m2m_buf_remove_by_buf(&m2m_ctx->out_q_ctx, vbuf); +} + +/** + * v4l2_m2m_dst_buf_remove_by_buf() - take off exact destination buffer from the + * list of ready buffers + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @vbuf: the buffer to be removed + */ +static inline void v4l2_m2m_dst_buf_remove_by_buf(struct v4l2_m2m_ctx *m2m_ctx, + struct vb2_v4l2_buffer *vbuf) +{ + v4l2_m2m_buf_remove_by_buf(&m2m_ctx->cap_q_ctx, vbuf); +} + +struct vb2_v4l2_buffer * +v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx *q_ctx, unsigned int idx); + +static inline struct vb2_v4l2_buffer * +v4l2_m2m_src_buf_remove_by_idx(struct v4l2_m2m_ctx *m2m_ctx, unsigned int idx) +{ + return v4l2_m2m_buf_remove_by_idx(&m2m_ctx->out_q_ctx, idx); +} + +static inline struct vb2_v4l2_buffer * +v4l2_m2m_dst_buf_remove_by_idx(struct v4l2_m2m_ctx *m2m_ctx, unsigned int idx) +{ + return v4l2_m2m_buf_remove_by_idx(&m2m_ctx->cap_q_ctx, idx); +} + +/** + * v4l2_m2m_buf_copy_metadata() - copy buffer metadata from + * the output buffer to the capture buffer + * + * @out_vb: the output buffer that is the source of the metadata. + * @cap_vb: the capture buffer that will receive the metadata. + * @copy_frame_flags: copy the KEY/B/PFRAME flags as well. + * + * This helper function copies the timestamp, timecode (if the TIMECODE + * buffer flag was set), field and the TIMECODE, KEYFRAME, BFRAME, PFRAME + * and TSTAMP_SRC_MASK flags from @out_vb to @cap_vb. + * + * If @copy_frame_flags is false, then the KEYFRAME, BFRAME and PFRAME + * flags are not copied. This is typically needed for encoders that + * set this bits explicitly. + */ +void v4l2_m2m_buf_copy_metadata(const struct vb2_v4l2_buffer *out_vb, + struct vb2_v4l2_buffer *cap_vb, + bool copy_frame_flags); + +/* v4l2 request helper */ + +void v4l2_m2m_request_queue(struct media_request *req); + +/* v4l2 ioctl helpers */ + +int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv, + struct v4l2_requestbuffers *rb); +int v4l2_m2m_ioctl_create_bufs(struct file *file, void *fh, + struct v4l2_create_buffers *create); +int v4l2_m2m_ioctl_remove_bufs(struct file *file, void *priv, + struct v4l2_remove_buffers *d); +int v4l2_m2m_ioctl_querybuf(struct file *file, void *fh, + struct v4l2_buffer *buf); +int v4l2_m2m_ioctl_expbuf(struct file *file, void *fh, + struct v4l2_exportbuffer *eb); +int v4l2_m2m_ioctl_qbuf(struct file *file, void *fh, + struct v4l2_buffer *buf); +int v4l2_m2m_ioctl_dqbuf(struct file *file, void *fh, + struct v4l2_buffer *buf); +int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *fh, + struct v4l2_buffer *buf); +int v4l2_m2m_ioctl_streamon(struct file *file, void *fh, + enum v4l2_buf_type type); +int v4l2_m2m_ioctl_streamoff(struct file *file, void *fh, + enum v4l2_buf_type type); +int v4l2_m2m_ioctl_encoder_cmd(struct file *file, void *fh, + struct v4l2_encoder_cmd *ec); +int v4l2_m2m_ioctl_decoder_cmd(struct file *file, void *fh, + struct v4l2_decoder_cmd *dc); +int v4l2_m2m_ioctl_try_encoder_cmd(struct file *file, void *fh, + struct v4l2_encoder_cmd *ec); +int v4l2_m2m_ioctl_try_decoder_cmd(struct file *file, void *fh, + struct v4l2_decoder_cmd *dc); +int v4l2_m2m_ioctl_stateless_try_decoder_cmd(struct file *file, void *fh, + struct v4l2_decoder_cmd *dc); +int v4l2_m2m_ioctl_stateless_decoder_cmd(struct file *file, void *priv, + struct v4l2_decoder_cmd *dc); +int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma); +__poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait); + +#endif /* _MEDIA_V4L2_MEM2MEM_H */ + diff --git a/6.17.0/include-overrides/media/v4l2-rect.h b/6.17.0/include-overrides/media/v4l2-rect.h new file mode 100644 index 0000000..bd587d0 --- /dev/null +++ b/6.17.0/include-overrides/media/v4l2-rect.h @@ -0,0 +1,207 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * v4l2-rect.h - v4l2_rect helper functions + * + * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + */ + +#ifndef _V4L2_RECT_H_ +#define _V4L2_RECT_H_ + +#include + +/** + * v4l2_rect_set_size_to() - copy the width/height values. + * @r: rect whose width and height fields will be set + * @size: rect containing the width and height fields you need. + */ +static inline void v4l2_rect_set_size_to(struct v4l2_rect *r, + const struct v4l2_rect *size) +{ + r->width = size->width; + r->height = size->height; +} + +/** + * v4l2_rect_set_min_size() - width and height of r should be >= min_size. + * @r: rect whose width and height will be modified + * @min_size: rect containing the minimal width and height + */ +static inline void v4l2_rect_set_min_size(struct v4l2_rect *r, + const struct v4l2_rect *min_size) +{ + if (r->width < min_size->width) + r->width = min_size->width; + if (r->height < min_size->height) + r->height = min_size->height; +} + +/** + * v4l2_rect_set_max_size() - width and height of r should be <= max_size + * @r: rect whose width and height will be modified + * @max_size: rect containing the maximum width and height + */ +static inline void v4l2_rect_set_max_size(struct v4l2_rect *r, + const struct v4l2_rect *max_size) +{ + if (r->width > max_size->width) + r->width = max_size->width; + if (r->height > max_size->height) + r->height = max_size->height; +} + +/** + * v4l2_rect_map_inside()- r should be inside boundary. + * @r: rect that will be modified + * @boundary: rect containing the boundary for @r + */ +static inline void v4l2_rect_map_inside(struct v4l2_rect *r, + const struct v4l2_rect *boundary) +{ + v4l2_rect_set_max_size(r, boundary); + if (r->left < boundary->left) + r->left = boundary->left; + if (r->top < boundary->top) + r->top = boundary->top; + if (r->left + r->width > boundary->left + boundary->width) + r->left = boundary->left + boundary->width - r->width; + if (r->top + r->height > boundary->top + boundary->height) + r->top = boundary->top + boundary->height - r->height; +} + +/** + * v4l2_rect_same_size() - return true if r1 has the same size as r2 + * @r1: rectangle. + * @r2: rectangle. + * + * Return true if both rectangles have the same size. + */ +static inline bool v4l2_rect_same_size(const struct v4l2_rect *r1, + const struct v4l2_rect *r2) +{ + return r1->width == r2->width && r1->height == r2->height; +} + +/** + * v4l2_rect_same_position() - return true if r1 has the same position as r2 + * @r1: rectangle. + * @r2: rectangle. + * + * Return true if both rectangles have the same position + */ +static inline bool v4l2_rect_same_position(const struct v4l2_rect *r1, + const struct v4l2_rect *r2) +{ + return r1->top == r2->top && r1->left == r2->left; +} + +/** + * v4l2_rect_equal() - return true if r1 equals r2 + * @r1: rectangle. + * @r2: rectangle. + * + * Return true if both rectangles have the same size and position. + */ +static inline bool v4l2_rect_equal(const struct v4l2_rect *r1, + const struct v4l2_rect *r2) +{ + return v4l2_rect_same_size(r1, r2) && v4l2_rect_same_position(r1, r2); +} + +/** + * v4l2_rect_intersect() - calculate the intersection of two rects. + * @r: intersection of @r1 and @r2. + * @r1: rectangle. + * @r2: rectangle. + */ +static inline void v4l2_rect_intersect(struct v4l2_rect *r, + const struct v4l2_rect *r1, + const struct v4l2_rect *r2) +{ + int right, bottom; + + r->top = max(r1->top, r2->top); + r->left = max(r1->left, r2->left); + bottom = min(r1->top + r1->height, r2->top + r2->height); + right = min(r1->left + r1->width, r2->left + r2->width); + r->height = max(0, bottom - r->top); + r->width = max(0, right - r->left); +} + +/** + * v4l2_rect_scale() - scale rect r by to/from + * @r: rect to be scaled. + * @from: from rectangle. + * @to: to rectangle. + * + * This scales rectangle @r horizontally by @to->width / @from->width and + * vertically by @to->height / @from->height. + * + * Typically @r is a rectangle inside @from and you want the rectangle as + * it would appear after scaling @from to @to. So the resulting @r will + * be the scaled rectangle inside @to. + */ +static inline void v4l2_rect_scale(struct v4l2_rect *r, + const struct v4l2_rect *from, + const struct v4l2_rect *to) +{ + if (from->width == 0 || from->height == 0) { + r->left = r->top = r->width = r->height = 0; + return; + } + r->left = (((r->left - from->left) * to->width) / from->width) & ~1; + r->width = ((r->width * to->width) / from->width) & ~1; + r->top = ((r->top - from->top) * to->height) / from->height; + r->height = (r->height * to->height) / from->height; +} + +/** + * v4l2_rect_overlap() - do r1 and r2 overlap? + * @r1: rectangle. + * @r2: rectangle. + * + * Returns true if @r1 and @r2 overlap. + */ +static inline bool v4l2_rect_overlap(const struct v4l2_rect *r1, + const struct v4l2_rect *r2) +{ + /* + * IF the left side of r1 is to the right of the right side of r2 OR + * the left side of r2 is to the right of the right side of r1 THEN + * they do not overlap. + */ + if (r1->left >= r2->left + r2->width || + r2->left >= r1->left + r1->width) + return false; + /* + * IF the top side of r1 is below the bottom of r2 OR + * the top side of r2 is below the bottom of r1 THEN + * they do not overlap. + */ + if (r1->top >= r2->top + r2->height || + r2->top >= r1->top + r1->height) + return false; + return true; +} + +/** + * v4l2_rect_enclosed() - is r1 enclosed in r2? + * @r1: rectangle. + * @r2: rectangle. + * + * Returns true if @r1 is enclosed in @r2. + */ +static inline bool v4l2_rect_enclosed(struct v4l2_rect *r1, + struct v4l2_rect *r2) +{ + if (r1->left < r2->left || r1->top < r2->top) + return false; + if (r1->left + r1->width > r2->left + r2->width) + return false; + if (r1->top + r1->height > r2->top + r2->height) + return false; + + return true; +} + +#endif diff --git a/6.17.0/include-overrides/media/v4l2-subdev.h b/6.17.0/include-overrides/media/v4l2-subdev.h new file mode 100644 index 0000000..5dcf406 --- /dev/null +++ b/6.17.0/include-overrides/media/v4l2-subdev.h @@ -0,0 +1,2017 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * V4L2 sub-device support header. + * + * Copyright (C) 2008 Hans Verkuil + */ + +#ifndef _V4L2_SUBDEV_H +#define _V4L2_SUBDEV_H + +#include +#include +#include +#include +#include +#include +#include +#include + +/* generic v4l2_device notify callback notification values */ +#define V4L2_SUBDEV_IR_RX_NOTIFY _IOW('v', 0, u32) +#define V4L2_SUBDEV_IR_RX_FIFO_SERVICE_REQ 0x00000001 +#define V4L2_SUBDEV_IR_RX_END_OF_RX_DETECTED 0x00000002 +#define V4L2_SUBDEV_IR_RX_HW_FIFO_OVERRUN 0x00000004 +#define V4L2_SUBDEV_IR_RX_SW_FIFO_OVERRUN 0x00000008 + +#define V4L2_SUBDEV_IR_TX_NOTIFY _IOW('v', 1, u32) +#define V4L2_SUBDEV_IR_TX_FIFO_SERVICE_REQ 0x00000001 + +#define V4L2_DEVICE_NOTIFY_EVENT _IOW('v', 2, struct v4l2_event) + +struct v4l2_device; +struct v4l2_ctrl_handler; +struct v4l2_event; +struct v4l2_event_subscription; +struct v4l2_fh; +struct v4l2_subdev; +struct v4l2_subdev_fh; +struct tuner_setup; +struct v4l2_mbus_frame_desc; +struct led_classdev; + +/** + * struct v4l2_decode_vbi_line - used to decode_vbi_line + * + * @is_second_field: Set to 0 for the first (odd) field; + * set to 1 for the second (even) field. + * @p: Pointer to the sliced VBI data from the decoder. On exit, points to + * the start of the payload. + * @line: Line number of the sliced VBI data (1-23) + * @type: VBI service type (V4L2_SLICED_*). 0 if no service found + */ +struct v4l2_decode_vbi_line { + u32 is_second_field; + u8 *p; + u32 line; + u32 type; +}; + +/* + * Sub-devices are devices that are connected somehow to the main bridge + * device. These devices are usually audio/video muxers/encoders/decoders or + * sensors and webcam controllers. + * + * Usually these devices are controlled through an i2c bus, but other buses + * may also be used. + * + * The v4l2_subdev struct provides a way of accessing these devices in a + * generic manner. Most operations that these sub-devices support fall in + * a few categories: core ops, audio ops, video ops and tuner ops. + * + * More categories can be added if needed, although this should remain a + * limited set (no more than approx. 8 categories). + * + * Each category has its own set of ops that subdev drivers can implement. + * + * A subdev driver can leave the pointer to the category ops NULL if + * it does not implement them (e.g. an audio subdev will generally not + * implement the video category ops). The exception is the core category: + * this must always be present. + * + * These ops are all used internally so it is no problem to change, remove + * or add ops or move ops from one to another category. Currently these + * ops are based on the original ioctls, but since ops are not limited to + * one argument there is room for improvement here once all i2c subdev + * drivers are converted to use these ops. + */ + +/* + * Core ops: it is highly recommended to implement at least these ops: + * + * log_status + * g_register + * s_register + * + * This provides basic debugging support. + * + * The ioctl ops is meant for generic ioctl-like commands. Depending on + * the use-case it might be better to use subdev-specific ops (currently + * not yet implemented) since ops provide proper type-checking. + */ + +/** + * enum v4l2_subdev_io_pin_bits - Subdevice external IO pin configuration + * bits + * + * @V4L2_SUBDEV_IO_PIN_DISABLE: disables a pin config. ENABLE assumed. + * @V4L2_SUBDEV_IO_PIN_OUTPUT: set it if pin is an output. + * @V4L2_SUBDEV_IO_PIN_INPUT: set it if pin is an input. + * @V4L2_SUBDEV_IO_PIN_SET_VALUE: to set the output value via + * &struct v4l2_subdev_io_pin_config->value. + * @V4L2_SUBDEV_IO_PIN_ACTIVE_LOW: pin active is bit 0. + * Otherwise, ACTIVE HIGH is assumed. + */ +enum v4l2_subdev_io_pin_bits { + V4L2_SUBDEV_IO_PIN_DISABLE = 0, + V4L2_SUBDEV_IO_PIN_OUTPUT = 1, + V4L2_SUBDEV_IO_PIN_INPUT = 2, + V4L2_SUBDEV_IO_PIN_SET_VALUE = 3, + V4L2_SUBDEV_IO_PIN_ACTIVE_LOW = 4, +}; + +/** + * struct v4l2_subdev_io_pin_config - Subdevice external IO pin configuration + * + * @flags: bitmask with flags for this pin's config, whose bits are defined by + * &enum v4l2_subdev_io_pin_bits. + * @pin: Chip external IO pin to configure + * @function: Internal signal pad/function to route to IO pin + * @value: Initial value for pin - e.g. GPIO output value + * @strength: Pin drive strength + */ +struct v4l2_subdev_io_pin_config { + u32 flags; + u8 pin; + u8 function; + u8 value; + u8 strength; +}; + +/** + * struct v4l2_subdev_core_ops - Define core ops callbacks for subdevs + * + * @log_status: callback for VIDIOC_LOG_STATUS() ioctl handler code. + * + * @s_io_pin_config: configure one or more chip I/O pins for chips that + * multiplex different internal signal pads out to IO pins. This function + * takes a pointer to an array of 'n' pin configuration entries, one for + * each pin being configured. This function could be called at times + * other than just subdevice initialization. + * + * @init: initialize the sensor registers to some sort of reasonable default + * values. Do not use for new drivers and should be removed in existing + * drivers. + * + * @load_fw: load firmware. + * + * @reset: generic reset command. The argument selects which subsystems to + * reset. Passing 0 will always reset the whole chip. Do not use for new + * drivers without discussing this first on the linux-media mailinglist. + * There should be no reason normally to reset a device. + * + * @s_gpio: set GPIO pins. Very simple right now, might need to be extended with + * a direction argument if needed. + * + * @command: called by in-kernel drivers in order to call functions internal + * to subdev drivers driver that have a separate callback. + * + * @ioctl: called at the end of ioctl() syscall handler at the V4L2 core. + * used to provide support for private ioctls used on the driver. + * + * @compat_ioctl32: called when a 32 bits application uses a 64 bits Kernel, + * in order to fix data passed from/to userspace. + * + * @g_register: callback for VIDIOC_DBG_G_REGISTER() ioctl handler code. + * + * @s_register: callback for VIDIOC_DBG_S_REGISTER() ioctl handler code. + * + * @s_power: puts subdevice in power saving mode (on == 0) or normal operation + * mode (on == 1). DEPRECATED. See + * Documentation/driver-api/media/camera-sensor.rst . pre_streamon and + * post_streamoff callbacks can be used for e.g. setting the bus to LP-11 + * mode before s_stream is called. + * + * @interrupt_service_routine: Called by the bridge chip's interrupt service + * handler, when an interrupt status has be raised due to this subdev, + * so that this subdev can handle the details. It may schedule work to be + * performed later. It must not sleep. **Called from an IRQ context**. + * + * @subscribe_event: used by the drivers to request the control framework that + * for it to be warned when the value of a control changes. + * + * @unsubscribe_event: remove event subscription from the control framework. + */ +struct v4l2_subdev_core_ops { + int (*log_status)(struct v4l2_subdev *sd); + int (*s_io_pin_config)(struct v4l2_subdev *sd, size_t n, + struct v4l2_subdev_io_pin_config *pincfg); + int (*init)(struct v4l2_subdev *sd, u32 val); + int (*load_fw)(struct v4l2_subdev *sd); + int (*reset)(struct v4l2_subdev *sd, u32 val); + int (*s_gpio)(struct v4l2_subdev *sd, u32 val); + long (*command)(struct v4l2_subdev *sd, unsigned int cmd, void *arg); + long (*ioctl)(struct v4l2_subdev *sd, unsigned int cmd, void *arg); +#ifdef CONFIG_COMPAT + long (*compat_ioctl32)(struct v4l2_subdev *sd, unsigned int cmd, + unsigned long arg); +#endif +#ifdef CONFIG_VIDEO_ADV_DEBUG + int (*g_register)(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg); + int (*s_register)(struct v4l2_subdev *sd, const struct v4l2_dbg_register *reg); +#endif + int (*s_power)(struct v4l2_subdev *sd, int on); + int (*interrupt_service_routine)(struct v4l2_subdev *sd, + u32 status, bool *handled); + int (*subscribe_event)(struct v4l2_subdev *sd, struct v4l2_fh *fh, + struct v4l2_event_subscription *sub); + int (*unsubscribe_event)(struct v4l2_subdev *sd, struct v4l2_fh *fh, + struct v4l2_event_subscription *sub); +}; + +/** + * struct v4l2_subdev_tuner_ops - Callbacks used when v4l device was opened + * in radio mode. + * + * @standby: puts the tuner in standby mode. It will be woken up + * automatically the next time it is used. + * + * @s_radio: callback that switches the tuner to radio mode. + * drivers should explicitly call it when a tuner ops should + * operate on radio mode, before being able to handle it. + * Used on devices that have both AM/FM radio receiver and TV. + * + * @s_frequency: callback for VIDIOC_S_FREQUENCY() ioctl handler code. + * + * @g_frequency: callback for VIDIOC_G_FREQUENCY() ioctl handler code. + * freq->type must be filled in. Normally done by video_ioctl2() + * or the bridge driver. + * + * @enum_freq_bands: callback for VIDIOC_ENUM_FREQ_BANDS() ioctl handler code. + * + * @g_tuner: callback for VIDIOC_G_TUNER() ioctl handler code. + * + * @s_tuner: callback for VIDIOC_S_TUNER() ioctl handler code. @vt->type must be + * filled in. Normally done by video_ioctl2 or the + * bridge driver. + * + * @g_modulator: callback for VIDIOC_G_MODULATOR() ioctl handler code. + * + * @s_modulator: callback for VIDIOC_S_MODULATOR() ioctl handler code. + * + * @s_type_addr: sets tuner type and its I2C addr. + * + * @s_config: sets tda9887 specific stuff, like port1, port2 and qss + * + * .. note:: + * + * On devices that have both AM/FM and TV, it is up to the driver + * to explicitly call s_radio when the tuner should be switched to + * radio mode, before handling other &struct v4l2_subdev_tuner_ops + * that would require it. An example of such usage is:: + * + * static void s_frequency(void *priv, const struct v4l2_frequency *f) + * { + * ... + * if (f.type == V4L2_TUNER_RADIO) + * v4l2_device_call_all(v4l2_dev, 0, tuner, s_radio); + * ... + * v4l2_device_call_all(v4l2_dev, 0, tuner, s_frequency); + * } + */ +struct v4l2_subdev_tuner_ops { + int (*standby)(struct v4l2_subdev *sd); + int (*s_radio)(struct v4l2_subdev *sd); + int (*s_frequency)(struct v4l2_subdev *sd, const struct v4l2_frequency *freq); + int (*g_frequency)(struct v4l2_subdev *sd, struct v4l2_frequency *freq); + int (*enum_freq_bands)(struct v4l2_subdev *sd, struct v4l2_frequency_band *band); + int (*g_tuner)(struct v4l2_subdev *sd, struct v4l2_tuner *vt); + int (*s_tuner)(struct v4l2_subdev *sd, const struct v4l2_tuner *vt); + int (*g_modulator)(struct v4l2_subdev *sd, struct v4l2_modulator *vm); + int (*s_modulator)(struct v4l2_subdev *sd, const struct v4l2_modulator *vm); + int (*s_type_addr)(struct v4l2_subdev *sd, struct tuner_setup *type); + int (*s_config)(struct v4l2_subdev *sd, const struct v4l2_priv_tun_config *config); +}; + +/** + * struct v4l2_subdev_audio_ops - Callbacks used for audio-related settings + * + * @s_clock_freq: set the frequency (in Hz) of the audio clock output. + * Used to slave an audio processor to the video decoder, ensuring that + * audio and video remain synchronized. Usual values for the frequency + * are 48000, 44100 or 32000 Hz. If the frequency is not supported, then + * -EINVAL is returned. + * + * @s_i2s_clock_freq: sets I2S speed in bps. This is used to provide a standard + * way to select I2S clock used by driving digital audio streams at some + * board designs. Usual values for the frequency are 1024000 and 2048000. + * If the frequency is not supported, then %-EINVAL is returned. + * + * @s_routing: used to define the input and/or output pins of an audio chip, + * and any additional configuration data. + * Never attempt to use user-level input IDs (e.g. Composite, S-Video, + * Tuner) at this level. An i2c device shouldn't know about whether an + * input pin is connected to a Composite connector, become on another + * board or platform it might be connected to something else entirely. + * The calling driver is responsible for mapping a user-level input to + * the right pins on the i2c device. + * + * @s_stream: used to notify the audio code that stream will start or has + * stopped. + */ +struct v4l2_subdev_audio_ops { + int (*s_clock_freq)(struct v4l2_subdev *sd, u32 freq); + int (*s_i2s_clock_freq)(struct v4l2_subdev *sd, u32 freq); + int (*s_routing)(struct v4l2_subdev *sd, u32 input, u32 output, u32 config); + int (*s_stream)(struct v4l2_subdev *sd, int enable); +}; + +/** + * struct v4l2_mbus_frame_desc_entry_csi2 + * + * @vc: CSI-2 virtual channel + * @dt: CSI-2 data type ID + */ +struct v4l2_mbus_frame_desc_entry_csi2 { + u8 vc; + u8 dt; +}; + +/** + * enum v4l2_mbus_frame_desc_flags - media bus frame description flags + * + * @V4L2_MBUS_FRAME_DESC_FL_LEN_MAX: + * Indicates that &struct v4l2_mbus_frame_desc_entry->length field + * specifies maximum data length. + * @V4L2_MBUS_FRAME_DESC_FL_BLOB: + * Indicates that the format does not have line offsets, i.e. + * the receiver should use 1D DMA. + */ +enum v4l2_mbus_frame_desc_flags { + V4L2_MBUS_FRAME_DESC_FL_LEN_MAX = BIT(0), + V4L2_MBUS_FRAME_DESC_FL_BLOB = BIT(1), +}; + +/** + * struct v4l2_mbus_frame_desc_entry - media bus frame description structure + * + * @flags: bitmask flags, as defined by &enum v4l2_mbus_frame_desc_flags. + * @stream: stream in routing configuration + * @pixelcode: media bus pixel code, valid if @flags + * %FRAME_DESC_FL_BLOB is not set. + * @length: number of octets per frame, valid if @flags + * %V4L2_MBUS_FRAME_DESC_FL_LEN_MAX is set. + * @bus: Bus-specific frame descriptor parameters + * @bus.csi2: CSI-2-specific bus configuration + */ +struct v4l2_mbus_frame_desc_entry { + enum v4l2_mbus_frame_desc_flags flags; + u32 stream; + u32 pixelcode; + u32 length; + union { + struct v4l2_mbus_frame_desc_entry_csi2 csi2; + } bus; +}; + + /* + * If this number is too small, it should be dropped altogether and the + * API switched to a dynamic number of frame descriptor entries. + */ +#define V4L2_FRAME_DESC_ENTRY_MAX 8 + +/** + * enum v4l2_mbus_frame_desc_type - media bus frame description type + * + * @V4L2_MBUS_FRAME_DESC_TYPE_UNDEFINED: + * Undefined frame desc type. Drivers should not use this, it is + * for backwards compatibility. + * @V4L2_MBUS_FRAME_DESC_TYPE_PARALLEL: + * Parallel media bus. + * @V4L2_MBUS_FRAME_DESC_TYPE_CSI2: + * CSI-2 media bus. Frame desc parameters must be set in + * &struct v4l2_mbus_frame_desc_entry->csi2. + */ +enum v4l2_mbus_frame_desc_type { + V4L2_MBUS_FRAME_DESC_TYPE_UNDEFINED = 0, + V4L2_MBUS_FRAME_DESC_TYPE_PARALLEL, + V4L2_MBUS_FRAME_DESC_TYPE_CSI2, +}; + +/** + * struct v4l2_mbus_frame_desc - media bus data frame description + * @type: type of the bus (enum v4l2_mbus_frame_desc_type) + * @entry: frame descriptors array + * @num_entries: number of entries in @entry array + */ +struct v4l2_mbus_frame_desc { + enum v4l2_mbus_frame_desc_type type; + struct v4l2_mbus_frame_desc_entry entry[V4L2_FRAME_DESC_ENTRY_MAX]; + unsigned short num_entries; +}; + +/** + * enum v4l2_subdev_pre_streamon_flags - Flags for pre_streamon subdev core op + * + * @V4L2_SUBDEV_PRE_STREAMON_FL_MANUAL_LP: Set the transmitter to either LP-11 + * or LP-111 mode before call to s_stream(). + */ +enum v4l2_subdev_pre_streamon_flags { + V4L2_SUBDEV_PRE_STREAMON_FL_MANUAL_LP = BIT(0), +}; + +/** + * struct v4l2_subdev_video_ops - Callbacks used when v4l device was opened + * in video mode. + * + * @s_routing: see s_routing in audio_ops, except this version is for video + * devices. + * + * @s_crystal_freq: sets the frequency of the crystal used to generate the + * clocks in Hz. An extra flags field allows device specific configuration + * regarding clock frequency dividers, etc. If not used, then set flags + * to 0. If the frequency is not supported, then -EINVAL is returned. + * + * @g_std: callback for VIDIOC_G_STD() ioctl handler code. + * + * @s_std: callback for VIDIOC_S_STD() ioctl handler code. + * + * @s_std_output: set v4l2_std_id for video OUTPUT devices. This is ignored by + * video input devices. + * + * @g_std_output: get current standard for video OUTPUT devices. This is ignored + * by video input devices. + * + * @querystd: callback for VIDIOC_QUERYSTD() ioctl handler code. + * + * @g_tvnorms: get &v4l2_std_id with all standards supported by the video + * CAPTURE device. This is ignored by video output devices. + * + * @g_tvnorms_output: get v4l2_std_id with all standards supported by the video + * OUTPUT device. This is ignored by video capture devices. + * + * @g_input_status: get input status. Same as the status field in the + * &struct v4l2_input + * + * @s_stream: start (enabled == 1) or stop (enabled == 0) streaming on the + * sub-device. Failure on stop will remove any resources acquired in + * streaming start, while the error code is still returned by the driver. + * The caller shall track the subdev state, and shall not start or stop an + * already started or stopped subdev. Also see call_s_stream wrapper in + * v4l2-subdev.c. + * + * This callback is DEPRECATED. New drivers should instead implement + * &v4l2_subdev_pad_ops.enable_streams and + * &v4l2_subdev_pad_ops.disable_streams operations, and use + * v4l2_subdev_s_stream_helper for the &v4l2_subdev_video_ops.s_stream + * operation to support legacy users. + * + * Drivers should also not call the .s_stream() subdev operation directly, + * but use the v4l2_subdev_enable_streams() and + * v4l2_subdev_disable_streams() helpers. + * + * @s_rx_buffer: set a host allocated memory buffer for the subdev. The subdev + * can adjust @size to a lower value and must not write more data to the + * buffer starting at @data than the original value of @size. + * + * @pre_streamon: May be called before streaming is actually started, to help + * initialising the bus. Current usage is to set a CSI-2 transmitter to + * LP-11 or LP-111 mode before streaming. See &enum + * v4l2_subdev_pre_streamon_flags. + * + * pre_streamon shall return error if it cannot perform the operation as + * indicated by the flags argument. In particular, -EACCES indicates lack + * of support for the operation. The caller shall call post_streamoff for + * each successful call of pre_streamon. + * + * @post_streamoff: Called after streaming is stopped, but if and only if + * pre_streamon was called earlier. + */ +struct v4l2_subdev_video_ops { + int (*s_routing)(struct v4l2_subdev *sd, u32 input, u32 output, u32 config); + int (*s_crystal_freq)(struct v4l2_subdev *sd, u32 freq, u32 flags); + int (*g_std)(struct v4l2_subdev *sd, v4l2_std_id *norm); + int (*s_std)(struct v4l2_subdev *sd, v4l2_std_id norm); + int (*s_std_output)(struct v4l2_subdev *sd, v4l2_std_id std); + int (*g_std_output)(struct v4l2_subdev *sd, v4l2_std_id *std); + int (*querystd)(struct v4l2_subdev *sd, v4l2_std_id *std); + int (*g_tvnorms)(struct v4l2_subdev *sd, v4l2_std_id *std); + int (*g_tvnorms_output)(struct v4l2_subdev *sd, v4l2_std_id *std); + int (*g_input_status)(struct v4l2_subdev *sd, u32 *status); + int (*s_stream)(struct v4l2_subdev *sd, int enable); + int (*s_rx_buffer)(struct v4l2_subdev *sd, void *buf, + unsigned int *size); + int (*pre_streamon)(struct v4l2_subdev *sd, u32 flags); + int (*post_streamoff)(struct v4l2_subdev *sd); +}; + +/** + * struct v4l2_subdev_vbi_ops - Callbacks used when v4l device was opened + * in video mode via the vbi device node. + * + * @decode_vbi_line: video decoders that support sliced VBI need to implement + * this ioctl. Field p of the &struct v4l2_decode_vbi_line is set to the + * start of the VBI data that was generated by the decoder. The driver + * then parses the sliced VBI data and sets the other fields in the + * struct accordingly. The pointer p is updated to point to the start of + * the payload which can be copied verbatim into the data field of the + * &struct v4l2_sliced_vbi_data. If no valid VBI data was found, then the + * type field is set to 0 on return. + * + * @s_vbi_data: used to generate VBI signals on a video signal. + * &struct v4l2_sliced_vbi_data is filled with the data packets that + * should be output. Note that if you set the line field to 0, then that + * VBI signal is disabled. If no valid VBI data was found, then the type + * field is set to 0 on return. + * + * @g_vbi_data: used to obtain the sliced VBI packet from a readback register. + * Not all video decoders support this. If no data is available because + * the readback register contains invalid or erroneous data %-EIO is + * returned. Note that you must fill in the 'id' member and the 'field' + * member (to determine whether CC data from the first or second field + * should be obtained). + * + * @g_sliced_vbi_cap: callback for VIDIOC_G_SLICED_VBI_CAP() ioctl handler + * code. + * + * @s_raw_fmt: setup the video encoder/decoder for raw VBI. + * + * @g_sliced_fmt: retrieve the current sliced VBI settings. + * + * @s_sliced_fmt: setup the sliced VBI settings. + */ +struct v4l2_subdev_vbi_ops { + int (*decode_vbi_line)(struct v4l2_subdev *sd, struct v4l2_decode_vbi_line *vbi_line); + int (*s_vbi_data)(struct v4l2_subdev *sd, const struct v4l2_sliced_vbi_data *vbi_data); + int (*g_vbi_data)(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_data *vbi_data); + int (*g_sliced_vbi_cap)(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_cap *cap); + int (*s_raw_fmt)(struct v4l2_subdev *sd, struct v4l2_vbi_format *fmt); + int (*g_sliced_fmt)(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *fmt); + int (*s_sliced_fmt)(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *fmt); +}; + +/** + * struct v4l2_subdev_sensor_ops - v4l2-subdev sensor operations + * @g_skip_top_lines: number of lines at the top of the image to be skipped. + * This is needed for some sensors, which always corrupt + * several top lines of the output image, or which send their + * metadata in them. + * @g_skip_frames: number of frames to skip at stream start. This is needed for + * buggy sensors that generate faulty frames when they are + * turned on. + */ +struct v4l2_subdev_sensor_ops { + int (*g_skip_top_lines)(struct v4l2_subdev *sd, u32 *lines); + int (*g_skip_frames)(struct v4l2_subdev *sd, u32 *frames); +}; + +/** + * enum v4l2_subdev_ir_mode- describes the type of IR supported + * + * @V4L2_SUBDEV_IR_MODE_PULSE_WIDTH: IR uses struct ir_raw_event records + */ +enum v4l2_subdev_ir_mode { + V4L2_SUBDEV_IR_MODE_PULSE_WIDTH, +}; + +/** + * struct v4l2_subdev_ir_parameters - Parameters for IR TX or TX + * + * @bytes_per_data_element: bytes per data element of data in read or + * write call. + * @mode: IR mode as defined by &enum v4l2_subdev_ir_mode. + * @enable: device is active if true + * @interrupt_enable: IR interrupts are enabled if true + * @shutdown: if true: set hardware to low/no power, false: normal mode + * + * @modulation: if true, it uses carrier, if false: baseband + * @max_pulse_width: maximum pulse width in ns, valid only for baseband signal + * @carrier_freq: carrier frequency in Hz, valid only for modulated signal + * @duty_cycle: duty cycle percentage, valid only for modulated signal + * @invert_level: invert signal level + * + * @invert_carrier_sense: Send 0/space as a carrier burst. used only in TX. + * + * @noise_filter_min_width: min time of a valid pulse, in ns. Used only for RX. + * @carrier_range_lower: Lower carrier range, in Hz, valid only for modulated + * signal. Used only for RX. + * @carrier_range_upper: Upper carrier range, in Hz, valid only for modulated + * signal. Used only for RX. + * @resolution: The receive resolution, in ns . Used only for RX. + */ +struct v4l2_subdev_ir_parameters { + unsigned int bytes_per_data_element; + enum v4l2_subdev_ir_mode mode; + + bool enable; + bool interrupt_enable; + bool shutdown; + + bool modulation; + u32 max_pulse_width; + unsigned int carrier_freq; + unsigned int duty_cycle; + bool invert_level; + + /* Tx only */ + bool invert_carrier_sense; + + /* Rx only */ + u32 noise_filter_min_width; + unsigned int carrier_range_lower; + unsigned int carrier_range_upper; + u32 resolution; +}; + +/** + * struct v4l2_subdev_ir_ops - operations for IR subdevices + * + * @rx_read: Reads received codes or pulse width data. + * The semantics are similar to a non-blocking read() call. + * @rx_g_parameters: Get the current operating parameters and state of + * the IR receiver. + * @rx_s_parameters: Set the current operating parameters and state of + * the IR receiver. It is recommended to call + * [rt]x_g_parameters first to fill out the current state, and only change + * the fields that need to be changed. Upon return, the actual device + * operating parameters and state will be returned. Note that hardware + * limitations may prevent the actual settings from matching the requested + * settings - e.g. an actual carrier setting of 35,904 Hz when 36,000 Hz + * was requested. An exception is when the shutdown parameter is true. + * The last used operational parameters will be returned, but the actual + * state of the hardware be different to minimize power consumption and + * processing when shutdown is true. + * + * @tx_write: Writes codes or pulse width data for transmission. + * The semantics are similar to a non-blocking write() call. + * @tx_g_parameters: Get the current operating parameters and state of + * the IR transmitter. + * @tx_s_parameters: Set the current operating parameters and state of + * the IR transmitter. It is recommended to call + * [rt]x_g_parameters first to fill out the current state, and only change + * the fields that need to be changed. Upon return, the actual device + * operating parameters and state will be returned. Note that hardware + * limitations may prevent the actual settings from matching the requested + * settings - e.g. an actual carrier setting of 35,904 Hz when 36,000 Hz + * was requested. An exception is when the shutdown parameter is true. + * The last used operational parameters will be returned, but the actual + * state of the hardware be different to minimize power consumption and + * processing when shutdown is true. + */ +struct v4l2_subdev_ir_ops { + /* Receiver */ + int (*rx_read)(struct v4l2_subdev *sd, u8 *buf, size_t count, + ssize_t *num); + + int (*rx_g_parameters)(struct v4l2_subdev *sd, + struct v4l2_subdev_ir_parameters *params); + int (*rx_s_parameters)(struct v4l2_subdev *sd, + struct v4l2_subdev_ir_parameters *params); + + /* Transmitter */ + int (*tx_write)(struct v4l2_subdev *sd, u8 *buf, size_t count, + ssize_t *num); + + int (*tx_g_parameters)(struct v4l2_subdev *sd, + struct v4l2_subdev_ir_parameters *params); + int (*tx_s_parameters)(struct v4l2_subdev *sd, + struct v4l2_subdev_ir_parameters *params); +}; + +/** + * struct v4l2_subdev_pad_config - Used for storing subdev pad information. + * + * @format: &struct v4l2_mbus_framefmt + * @crop: &struct v4l2_rect to be used for crop + * @compose: &struct v4l2_rect to be used for compose + * @interval: frame interval + */ +struct v4l2_subdev_pad_config { + struct v4l2_mbus_framefmt format; + struct v4l2_rect crop; + struct v4l2_rect compose; + struct v4l2_fract interval; +}; + +/** + * struct v4l2_subdev_stream_config - Used for storing stream configuration. + * + * @pad: pad number + * @stream: stream number + * @enabled: has the stream been enabled with v4l2_subdev_enable_streams() + * @fmt: &struct v4l2_mbus_framefmt + * @crop: &struct v4l2_rect to be used for crop + * @compose: &struct v4l2_rect to be used for compose + * @interval: frame interval + * + * This structure stores configuration for a stream. + */ +struct v4l2_subdev_stream_config { + u32 pad; + u32 stream; + bool enabled; + + struct v4l2_mbus_framefmt fmt; + struct v4l2_rect crop; + struct v4l2_rect compose; + struct v4l2_fract interval; +}; + +/** + * struct v4l2_subdev_stream_configs - A collection of stream configs. + * + * @num_configs: number of entries in @config. + * @configs: an array of &struct v4l2_subdev_stream_configs. + */ +struct v4l2_subdev_stream_configs { + u32 num_configs; + struct v4l2_subdev_stream_config *configs; +}; + +/** + * struct v4l2_subdev_krouting - subdev routing table + * + * @len_routes: length of routes array, in routes + * @num_routes: number of routes + * @routes: &struct v4l2_subdev_route + * + * This structure contains the routing table for a subdev. + */ +struct v4l2_subdev_krouting { + unsigned int len_routes; + unsigned int num_routes; + struct v4l2_subdev_route *routes; +}; + +/** + * struct v4l2_subdev_state - Used for storing subdev state information. + * + * @_lock: default for 'lock' + * @lock: mutex for the state. May be replaced by the user. + * @sd: the sub-device which the state is related to + * @pads: &struct v4l2_subdev_pad_config array + * @routing: routing table for the subdev + * @stream_configs: stream configurations (only for V4L2_SUBDEV_FL_STREAMS) + * + * This structure only needs to be passed to the pad op if the 'which' field + * of the main argument is set to %V4L2_SUBDEV_FORMAT_TRY. For + * %V4L2_SUBDEV_FORMAT_ACTIVE it is safe to pass %NULL. + */ +struct v4l2_subdev_state { + /* lock for the struct v4l2_subdev_state fields */ + struct mutex _lock; + struct mutex *lock; + struct v4l2_subdev *sd; + struct v4l2_subdev_pad_config *pads; + struct v4l2_subdev_krouting routing; + struct v4l2_subdev_stream_configs stream_configs; +}; + +/** + * struct v4l2_subdev_pad_ops - v4l2-subdev pad level operations + * + * @enum_mbus_code: callback for VIDIOC_SUBDEV_ENUM_MBUS_CODE() ioctl handler + * code. + * @enum_frame_size: callback for VIDIOC_SUBDEV_ENUM_FRAME_SIZE() ioctl handler + * code. + * + * @enum_frame_interval: callback for VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL() ioctl + * handler code. + * + * @get_fmt: callback for VIDIOC_SUBDEV_G_FMT() ioctl handler code. + * + * @set_fmt: callback for VIDIOC_SUBDEV_S_FMT() ioctl handler code. + * + * @get_selection: callback for VIDIOC_SUBDEV_G_SELECTION() ioctl handler code. + * + * @set_selection: callback for VIDIOC_SUBDEV_S_SELECTION() ioctl handler code. + * + * @get_frame_interval: callback for VIDIOC_SUBDEV_G_FRAME_INTERVAL() + * ioctl handler code. + * + * @set_frame_interval: callback for VIDIOC_SUBDEV_S_FRAME_INTERVAL() + * ioctl handler code. + * + * @get_edid: callback for VIDIOC_SUBDEV_G_EDID() ioctl handler code. + * + * @set_edid: callback for VIDIOC_SUBDEV_S_EDID() ioctl handler code. + * + * @s_dv_timings: Set custom dv timings in the sub device. This is used + * when sub device is capable of setting detailed timing information + * in the hardware to generate/detect the video signal. + * + * @g_dv_timings: Get custom dv timings in the sub device. + * + * @query_dv_timings: callback for VIDIOC_QUERY_DV_TIMINGS() ioctl handler code. + * + * @dv_timings_cap: callback for VIDIOC_SUBDEV_DV_TIMINGS_CAP() ioctl handler + * code. + * + * @enum_dv_timings: callback for VIDIOC_SUBDEV_ENUM_DV_TIMINGS() ioctl handler + * code. + * + * @link_validate: used by the media controller code to check if the links + * that belongs to a pipeline can be used for stream. + * + * @get_frame_desc: get the current low level media bus frame parameters. + * + * @set_frame_desc: set the low level media bus frame parameters, @fd array + * may be adjusted by the subdev driver to device capabilities. + * + * @get_mbus_config: get the media bus configuration of a remote sub-device. + * The media bus configuration is usually retrieved from the + * firmware interface at sub-device probe time, immediately + * applied to the hardware and eventually adjusted by the + * driver. Remote sub-devices (usually video receivers) shall + * use this operation to query the transmitting end bus + * configuration in order to adjust their own one accordingly. + * Callers should make sure they get the most up-to-date as + * possible configuration from the remote end, likely calling + * this operation as close as possible to stream on time. The + * operation shall fail if the pad index it has been called on + * is not valid or in case of unrecoverable failures. The + * config argument has been memset to 0 just before calling + * the op. + * + * @set_routing: Enable or disable data connection routes described in the + * subdevice routing table. Subdevs that implement this operation + * must set the V4L2_SUBDEV_FL_STREAMS flag. + * + * @enable_streams: Enable the streams defined in streams_mask on the given + * source pad. Subdevs that implement this operation must use the active + * state management provided by the subdev core (enabled through a call to + * v4l2_subdev_init_finalize() at initialization time). Do not call + * directly, use v4l2_subdev_enable_streams() instead. + * + * Drivers that support only a single stream without setting the + * V4L2_SUBDEV_CAP_STREAMS sub-device capability flag can ignore the mask + * argument. + * + * @disable_streams: Disable the streams defined in streams_mask on the given + * source pad. Subdevs that implement this operation must use the active + * state management provided by the subdev core (enabled through a call to + * v4l2_subdev_init_finalize() at initialization time). Do not call + * directly, use v4l2_subdev_disable_streams() instead. + * + * Drivers that support only a single stream without setting the + * V4L2_SUBDEV_CAP_STREAMS sub-device capability flag can ignore the mask + * argument. + */ +struct v4l2_subdev_pad_ops { + int (*enum_mbus_code)(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_mbus_code_enum *code); + int (*enum_frame_size)(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_frame_size_enum *fse); + int (*enum_frame_interval)(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_frame_interval_enum *fie); + int (*get_fmt)(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_format *format); + int (*set_fmt)(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_format *format); + int (*get_selection)(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_selection *sel); + int (*set_selection)(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_selection *sel); + int (*get_frame_interval)(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_frame_interval *interval); + int (*set_frame_interval)(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_frame_interval *interval); + int (*get_edid)(struct v4l2_subdev *sd, struct v4l2_edid *edid); + int (*set_edid)(struct v4l2_subdev *sd, struct v4l2_edid *edid); + int (*s_dv_timings)(struct v4l2_subdev *sd, unsigned int pad, + struct v4l2_dv_timings *timings); + int (*g_dv_timings)(struct v4l2_subdev *sd, unsigned int pad, + struct v4l2_dv_timings *timings); + int (*query_dv_timings)(struct v4l2_subdev *sd, unsigned int pad, + struct v4l2_dv_timings *timings); + int (*dv_timings_cap)(struct v4l2_subdev *sd, + struct v4l2_dv_timings_cap *cap); + int (*enum_dv_timings)(struct v4l2_subdev *sd, + struct v4l2_enum_dv_timings *timings); +#ifdef CONFIG_MEDIA_CONTROLLER + int (*link_validate)(struct v4l2_subdev *sd, struct media_link *link, + struct v4l2_subdev_format *source_fmt, + struct v4l2_subdev_format *sink_fmt); +#endif /* CONFIG_MEDIA_CONTROLLER */ + int (*get_frame_desc)(struct v4l2_subdev *sd, unsigned int pad, + struct v4l2_mbus_frame_desc *fd); + int (*set_frame_desc)(struct v4l2_subdev *sd, unsigned int pad, + struct v4l2_mbus_frame_desc *fd); + int (*get_mbus_config)(struct v4l2_subdev *sd, unsigned int pad, + struct v4l2_mbus_config *config); + int (*set_routing)(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + enum v4l2_subdev_format_whence which, + struct v4l2_subdev_krouting *route); + int (*enable_streams)(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, u32 pad, + u64 streams_mask); + int (*disable_streams)(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, u32 pad, + u64 streams_mask); +}; + +/** + * struct v4l2_subdev_ops - Subdev operations + * + * @core: pointer to &struct v4l2_subdev_core_ops. Can be %NULL + * @tuner: pointer to &struct v4l2_subdev_tuner_ops. Can be %NULL + * @audio: pointer to &struct v4l2_subdev_audio_ops. Can be %NULL + * @video: pointer to &struct v4l2_subdev_video_ops. Can be %NULL + * @vbi: pointer to &struct v4l2_subdev_vbi_ops. Can be %NULL + * @ir: pointer to &struct v4l2_subdev_ir_ops. Can be %NULL + * @sensor: pointer to &struct v4l2_subdev_sensor_ops. Can be %NULL + * @pad: pointer to &struct v4l2_subdev_pad_ops. Can be %NULL + */ +struct v4l2_subdev_ops { + const struct v4l2_subdev_core_ops *core; + const struct v4l2_subdev_tuner_ops *tuner; + const struct v4l2_subdev_audio_ops *audio; + const struct v4l2_subdev_video_ops *video; + const struct v4l2_subdev_vbi_ops *vbi; + const struct v4l2_subdev_ir_ops *ir; + const struct v4l2_subdev_sensor_ops *sensor; + const struct v4l2_subdev_pad_ops *pad; +}; + +/** + * struct v4l2_subdev_internal_ops - V4L2 subdev internal ops + * + * @init_state: initialize the subdev state to default values + * + * @registered: called when this subdev is registered. When called the v4l2_dev + * field is set to the correct v4l2_device. + * + * @unregistered: called when this subdev is unregistered. When called the + * v4l2_dev field is still set to the correct v4l2_device. + * + * @open: called when the subdev device node is opened by an application. + * + * @close: called when the subdev device node is closed. Please note that + * it is possible for @close to be called after @unregistered! + * + * @release: called when the last user of the subdev device is gone. This + * happens after the @unregistered callback and when the last open + * filehandle to the v4l-subdevX device node was closed. If no device + * node was created for this sub-device, then the @release callback + * is called right after the @unregistered callback. + * The @release callback is typically used to free the memory containing + * the v4l2_subdev structure. It is almost certainly required for any + * sub-device that sets the V4L2_SUBDEV_FL_HAS_DEVNODE flag. + * + * .. note:: + * Never call this from drivers, only the v4l2 framework can call + * these ops. + */ +struct v4l2_subdev_internal_ops { + int (*init_state)(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state); + int (*registered)(struct v4l2_subdev *sd); + void (*unregistered)(struct v4l2_subdev *sd); + int (*open)(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh); + int (*close)(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh); + void (*release)(struct v4l2_subdev *sd); +}; + +/* Set this flag if this subdev is a i2c device. */ +#define V4L2_SUBDEV_FL_IS_I2C (1U << 0) +/* Set this flag if this subdev is a spi device. */ +#define V4L2_SUBDEV_FL_IS_SPI (1U << 1) +/* Set this flag if this subdev needs a device node. */ +#define V4L2_SUBDEV_FL_HAS_DEVNODE (1U << 2) +/* + * Set this flag if this subdev generates events. + * Note controls can send events, thus drivers exposing controls + * should set this flag. + */ +#define V4L2_SUBDEV_FL_HAS_EVENTS (1U << 3) +/* + * Set this flag if this subdev supports multiplexed streams. This means + * that the driver supports routing and handles the stream parameter in its + * v4l2_subdev_pad_ops handlers. More specifically, this means: + * + * - Centrally managed subdev active state is enabled + * - Legacy pad config is _not_ supported (state->pads is NULL) + * - Routing ioctls are available + * - Multiple streams per pad are supported + */ +#define V4L2_SUBDEV_FL_STREAMS (1U << 4) + +struct regulator_bulk_data; + +/** + * struct v4l2_subdev_platform_data - regulators config struct + * + * @regulators: Optional regulators used to power on/off the subdevice + * @num_regulators: Number of regululators + * @host_priv: Per-subdevice data, specific for a certain video host device + */ +struct v4l2_subdev_platform_data { + struct regulator_bulk_data *regulators; + int num_regulators; + + void *host_priv; +}; + +/** + * struct v4l2_subdev - describes a V4L2 sub-device + * + * @entity: pointer to &struct media_entity + * @list: List of sub-devices + * @owner: The owner is the same as the driver's &struct device owner. + * @owner_v4l2_dev: true if the &sd->owner matches the owner of @v4l2_dev->dev + * owner. Initialized by v4l2_device_register_subdev(). + * @flags: subdev flags. Can be: + * %V4L2_SUBDEV_FL_IS_I2C - Set this flag if this subdev is a i2c device; + * %V4L2_SUBDEV_FL_IS_SPI - Set this flag if this subdev is a spi device; + * %V4L2_SUBDEV_FL_HAS_DEVNODE - Set this flag if this subdev needs a + * device node; + * %V4L2_SUBDEV_FL_HAS_EVENTS - Set this flag if this subdev generates + * events. + * + * @v4l2_dev: pointer to struct &v4l2_device + * @ops: pointer to struct &v4l2_subdev_ops + * @internal_ops: pointer to struct &v4l2_subdev_internal_ops. + * Never call these internal ops from within a driver! + * @ctrl_handler: The control handler of this subdev. May be NULL. + * @name: Name of the sub-device. Please notice that the name must be unique. + * @grp_id: can be used to group similar subdevs. Value is driver-specific + * @dev_priv: pointer to private data + * @host_priv: pointer to private data used by the device where the subdev + * is attached. + * @devnode: subdev device node + * @dev: pointer to the physical device, if any + * @fwnode: The fwnode_handle of the subdev, usually the same as + * either dev->of_node->fwnode or dev->fwnode (whichever is non-NULL). + * @async_list: Links this subdev to a global subdev_list or + * @notifier->done_list list. + * @async_subdev_endpoint_list: List entry in async_subdev_endpoint_entry of + * &struct v4l2_async_subdev_endpoint. + * @subdev_notifier: A sub-device notifier implicitly registered for the sub- + * device using v4l2_async_register_subdev_sensor(). + * @asc_list: Async connection list, of &struct + * v4l2_async_connection.subdev_entry. + * @pdata: common part of subdevice platform data + * @state_lock: A pointer to a lock used for all the subdev's states, set by the + * driver. This is optional. If NULL, each state instance will get + * a lock of its own. + * @privacy_led: Optional pointer to a LED classdev for the privacy LED for sensors. + * @active_state: Active state for the subdev (NULL for subdevs tracking the + * state internally). Initialized by calling + * v4l2_subdev_init_finalize(). + * @enabled_pads: Bitmask of enabled pads used by v4l2_subdev_enable_streams() + * and v4l2_subdev_disable_streams() helper functions for + * fallback cases. + * @s_stream_enabled: Tracks whether streaming has been enabled with s_stream. + * This is only for call_s_stream() internal use. + * + * Each instance of a subdev driver should create this struct, either + * stand-alone or embedded in a larger struct. + * + * This structure should be initialized by v4l2_subdev_init() or one of + * its variants: v4l2_spi_subdev_init(), v4l2_i2c_subdev_init(). + */ +struct v4l2_subdev { +#if defined(CONFIG_MEDIA_CONTROLLER) + struct media_entity entity; +#endif + struct list_head list; + struct module *owner; + bool owner_v4l2_dev; + u32 flags; + struct v4l2_device *v4l2_dev; + const struct v4l2_subdev_ops *ops; + const struct v4l2_subdev_internal_ops *internal_ops; + struct v4l2_ctrl_handler *ctrl_handler; + char name[52]; + u32 grp_id; + void *dev_priv; + void *host_priv; + struct video_device *devnode; + struct device *dev; + struct fwnode_handle *fwnode; + struct list_head async_list; + struct list_head async_subdev_endpoint_list; + struct v4l2_async_notifier *subdev_notifier; + struct list_head asc_list; + struct v4l2_subdev_platform_data *pdata; + struct mutex *state_lock; + + /* + * The fields below are private, and should only be accessed via + * appropriate functions. + */ + + struct led_classdev *privacy_led; + + /* + * TODO: active_state should most likely be changed from a pointer to an + * embedded field. For the time being it's kept as a pointer to more + * easily catch uses of active_state in the cases where the driver + * doesn't support it. + */ + struct v4l2_subdev_state *active_state; + u64 enabled_pads; + bool s_stream_enabled; +}; + + +/** + * media_entity_to_v4l2_subdev - Returns a &struct v4l2_subdev from + * the &struct media_entity embedded in it. + * + * @ent: pointer to &struct media_entity. + */ +#define media_entity_to_v4l2_subdev(ent) \ +({ \ + typeof(ent) __me_sd_ent = (ent); \ + \ + __me_sd_ent ? \ + container_of(__me_sd_ent, struct v4l2_subdev, entity) : \ + NULL; \ +}) + +/** + * vdev_to_v4l2_subdev - Returns a &struct v4l2_subdev from + * the &struct video_device embedded on it. + * + * @vdev: pointer to &struct video_device + */ +#define vdev_to_v4l2_subdev(vdev) \ + ((struct v4l2_subdev *)video_get_drvdata(vdev)) + +/** + * struct v4l2_subdev_fh - Used for storing subdev information per file handle + * + * @vfh: pointer to &struct v4l2_fh + * @state: pointer to &struct v4l2_subdev_state + * @owner: module pointer to the owner of this file handle + * @client_caps: bitmask of ``V4L2_SUBDEV_CLIENT_CAP_*`` + */ +struct v4l2_subdev_fh { + struct v4l2_fh vfh; + struct module *owner; +#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) + struct v4l2_subdev_state *state; + u64 client_caps; +#endif +}; + +/** + * to_v4l2_subdev_fh - Returns a &struct v4l2_subdev_fh from + * the &struct v4l2_fh embedded on it. + * + * @fh: pointer to &struct v4l2_fh + */ +#define to_v4l2_subdev_fh(fh) \ + container_of(fh, struct v4l2_subdev_fh, vfh) + +extern const struct v4l2_file_operations v4l2_subdev_fops; + +/** + * v4l2_set_subdevdata - Sets V4L2 dev private device data + * + * @sd: pointer to &struct v4l2_subdev + * @p: pointer to the private device data to be stored. + */ +static inline void v4l2_set_subdevdata(struct v4l2_subdev *sd, void *p) +{ + sd->dev_priv = p; +} + +/** + * v4l2_get_subdevdata - Gets V4L2 dev private device data + * + * @sd: pointer to &struct v4l2_subdev + * + * Returns the pointer to the private device data to be stored. + */ +static inline void *v4l2_get_subdevdata(const struct v4l2_subdev *sd) +{ + return sd->dev_priv; +} + +/** + * v4l2_set_subdev_hostdata - Sets V4L2 dev private host data + * + * @sd: pointer to &struct v4l2_subdev + * @p: pointer to the private data to be stored. + */ +static inline void v4l2_set_subdev_hostdata(struct v4l2_subdev *sd, void *p) +{ + sd->host_priv = p; +} + +/** + * v4l2_get_subdev_hostdata - Gets V4L2 dev private data + * + * @sd: pointer to &struct v4l2_subdev + * + * Returns the pointer to the private host data to be stored. + */ +static inline void *v4l2_get_subdev_hostdata(const struct v4l2_subdev *sd) +{ + return sd->host_priv; +} + +#ifdef CONFIG_MEDIA_CONTROLLER + +/** + * v4l2_subdev_get_fwnode_pad_1_to_1 - Get pad number from a subdev fwnode + * endpoint, assuming 1:1 port:pad + * + * @entity: Pointer to the subdev entity + * @endpoint: Pointer to a parsed fwnode endpoint + * + * This function can be used as the .get_fwnode_pad operation for + * subdevices that map port numbers and pad indexes 1:1. If the endpoint + * is owned by the subdevice, the function returns the endpoint port + * number. + * + * Returns the endpoint port number on success or a negative error code. + */ +int v4l2_subdev_get_fwnode_pad_1_to_1(struct media_entity *entity, + struct fwnode_endpoint *endpoint); + +/** + * v4l2_subdev_link_validate_default - validates a media link + * + * @sd: pointer to &struct v4l2_subdev + * @link: pointer to &struct media_link + * @source_fmt: pointer to &struct v4l2_subdev_format + * @sink_fmt: pointer to &struct v4l2_subdev_format + * + * This function ensures that width, height and the media bus pixel + * code are equal on both source and sink of the link. + */ +int v4l2_subdev_link_validate_default(struct v4l2_subdev *sd, + struct media_link *link, + struct v4l2_subdev_format *source_fmt, + struct v4l2_subdev_format *sink_fmt); + +/** + * v4l2_subdev_link_validate - validates a media link + * + * @link: pointer to &struct media_link + * + * This function calls the subdev's link_validate ops to validate + * if a media link is valid for streaming. It also internally + * calls v4l2_subdev_link_validate_default() to ensure that + * width, height and the media bus pixel code are equal on both + * source and sink of the link. + * + * The function can be used as a drop-in &media_entity_ops.link_validate + * implementation for v4l2_subdev instances. It supports all links between + * subdevs, as well as links between subdevs and video devices, provided that + * the video devices also implement their &media_entity_ops.link_validate + * operation. + */ +int v4l2_subdev_link_validate(struct media_link *link); + +/** + * v4l2_subdev_has_pad_interdep - MC has_pad_interdep implementation for subdevs + * + * @entity: pointer to &struct media_entity + * @pad0: pad number for the first pad + * @pad1: pad number for the second pad + * + * This function is an implementation of the + * media_entity_operations.has_pad_interdep operation for subdevs that + * implement the multiplexed streams API (as indicated by the + * V4L2_SUBDEV_FL_STREAMS subdev flag). + * + * It considers two pads interdependent if there is an active route between pad0 + * and pad1. + */ +bool v4l2_subdev_has_pad_interdep(struct media_entity *entity, + unsigned int pad0, unsigned int pad1); + +/** + * __v4l2_subdev_state_alloc - allocate v4l2_subdev_state + * + * @sd: pointer to &struct v4l2_subdev for which the state is being allocated. + * @lock_name: name of the state lock + * @key: lock_class_key for the lock + * + * Must call __v4l2_subdev_state_free() when state is no longer needed. + * + * Not to be called directly by the drivers. + */ +struct v4l2_subdev_state *__v4l2_subdev_state_alloc(struct v4l2_subdev *sd, + const char *lock_name, + struct lock_class_key *key); + +/** + * __v4l2_subdev_state_free - free a v4l2_subdev_state + * + * @state: v4l2_subdev_state to be freed. + * + * Not to be called directly by the drivers. + */ +void __v4l2_subdev_state_free(struct v4l2_subdev_state *state); + +/** + * v4l2_subdev_init_finalize() - Finalizes the initialization of the subdevice + * @sd: The subdev + * + * This function finalizes the initialization of the subdev, including + * allocation of the active state for the subdev. + * + * This function must be called by the subdev drivers that use the centralized + * active state, after the subdev struct has been initialized and + * media_entity_pads_init() has been called, but before registering the + * subdev. + * + * The user must call v4l2_subdev_cleanup() when the subdev is being removed. + */ +#define v4l2_subdev_init_finalize(sd) \ + ({ \ + static struct lock_class_key __key; \ + const char *name = KBUILD_BASENAME \ + ":" __stringify(__LINE__) ":sd->active_state->lock"; \ + __v4l2_subdev_init_finalize(sd, name, &__key); \ + }) + +int __v4l2_subdev_init_finalize(struct v4l2_subdev *sd, const char *name, + struct lock_class_key *key); + +/** + * v4l2_subdev_cleanup() - Releases the resources allocated by the subdevice + * @sd: The subdevice + * + * Clean up a V4L2 async sub-device. Must be called for a sub-device as part of + * its release if resources have been associated with it using + * v4l2_async_subdev_endpoint_add() or v4l2_subdev_init_finalize(). + */ +void v4l2_subdev_cleanup(struct v4l2_subdev *sd); + +/* + * A macro to generate the macro or function name for sub-devices state access + * wrapper macros below. + */ +#define __v4l2_subdev_state_gen_call(NAME, _1, ARG, ...) \ + __v4l2_subdev_state_get_ ## NAME ## ARG + +/* + * A macro to constify the return value of the state accessors when the state + * parameter is const. + */ +#define __v4l2_subdev_state_constify_ret(state, value) \ + _Generic(state, \ + const struct v4l2_subdev_state *: (const typeof(*(value)) *)(value), \ + struct v4l2_subdev_state *: (value) \ + ) + +/** + * v4l2_subdev_state_get_format() - Get pointer to a stream format + * @state: subdevice state + * @pad: pad id + * @...: stream id (optional argument) + * + * This returns a pointer to &struct v4l2_mbus_framefmt for the given pad + + * stream in the subdev state. + * + * For stream-unaware drivers the format for the corresponding pad is returned. + * If the pad does not exist, NULL is returned. + */ +/* + * Wrap v4l2_subdev_state_get_format(), allowing the function to be called with + * two or three arguments. The purpose of the __v4l2_subdev_state_gen_call() + * macro is to come up with the name of the function or macro to call, using + * the last two arguments (_stream and _pad). The selected function or macro is + * then called using the arguments specified by the caller. The + * __v4l2_subdev_state_constify_ret() macro constifies the returned pointer + * when the state is const, allowing the state accessors to guarantee + * const-correctness in all cases. + * + * A similar arrangement is used for v4l2_subdev_state_crop(), + * v4l2_subdev_state_compose() and v4l2_subdev_state_get_interval() below. + */ +#define v4l2_subdev_state_get_format(state, pad, ...) \ + __v4l2_subdev_state_constify_ret(state, \ + __v4l2_subdev_state_gen_call(format, ##__VA_ARGS__, , _pad) \ + ((struct v4l2_subdev_state *)state, pad, ##__VA_ARGS__)) +#define __v4l2_subdev_state_get_format_pad(state, pad) \ + __v4l2_subdev_state_get_format(state, pad, 0) +struct v4l2_mbus_framefmt * +__v4l2_subdev_state_get_format(struct v4l2_subdev_state *state, + unsigned int pad, u32 stream); + +/** + * v4l2_subdev_state_get_crop() - Get pointer to a stream crop rectangle + * @state: subdevice state + * @pad: pad id + * @...: stream id (optional argument) + * + * This returns a pointer to crop rectangle for the given pad + stream in the + * subdev state. + * + * For stream-unaware drivers the crop rectangle for the corresponding pad is + * returned. If the pad does not exist, NULL is returned. + */ +#define v4l2_subdev_state_get_crop(state, pad, ...) \ + __v4l2_subdev_state_constify_ret(state, \ + __v4l2_subdev_state_gen_call(crop, ##__VA_ARGS__, , _pad) \ + ((struct v4l2_subdev_state *)state, pad, ##__VA_ARGS__)) +#define __v4l2_subdev_state_get_crop_pad(state, pad) \ + __v4l2_subdev_state_get_crop(state, pad, 0) +struct v4l2_rect * +__v4l2_subdev_state_get_crop(struct v4l2_subdev_state *state, unsigned int pad, + u32 stream); + +/** + * v4l2_subdev_state_get_compose() - Get pointer to a stream compose rectangle + * @state: subdevice state + * @pad: pad id + * @...: stream id (optional argument) + * + * This returns a pointer to compose rectangle for the given pad + stream in the + * subdev state. + * + * For stream-unaware drivers the compose rectangle for the corresponding pad is + * returned. If the pad does not exist, NULL is returned. + */ +#define v4l2_subdev_state_get_compose(state, pad, ...) \ + __v4l2_subdev_state_constify_ret(state, \ + __v4l2_subdev_state_gen_call(compose, ##__VA_ARGS__, , _pad) \ + ((struct v4l2_subdev_state *)state, pad, ##__VA_ARGS__)) +#define __v4l2_subdev_state_get_compose_pad(state, pad) \ + __v4l2_subdev_state_get_compose(state, pad, 0) +struct v4l2_rect * +__v4l2_subdev_state_get_compose(struct v4l2_subdev_state *state, + unsigned int pad, u32 stream); + +/** + * v4l2_subdev_state_get_interval() - Get pointer to a stream frame interval + * @state: subdevice state + * @pad: pad id + * @...: stream id (optional argument) + * + * This returns a pointer to the frame interval for the given pad + stream in + * the subdev state. + * + * For stream-unaware drivers the frame interval for the corresponding pad is + * returned. If the pad does not exist, NULL is returned. + */ +#define v4l2_subdev_state_get_interval(state, pad, ...) \ + __v4l2_subdev_state_constify_ret(state, \ + __v4l2_subdev_state_gen_call(interval, ##__VA_ARGS__, , _pad) \ + ((struct v4l2_subdev_state *)state, pad, ##__VA_ARGS__)) +#define __v4l2_subdev_state_get_interval_pad(state, pad) \ + __v4l2_subdev_state_get_interval(state, pad, 0) +struct v4l2_fract * +__v4l2_subdev_state_get_interval(struct v4l2_subdev_state *state, + unsigned int pad, u32 stream); + +#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) + +/** + * v4l2_subdev_get_fmt() - Fill format based on state + * @sd: subdevice + * @state: subdevice state + * @format: pointer to &struct v4l2_subdev_format + * + * Fill @format->format field based on the information in the @format struct. + * + * This function can be used by the subdev drivers which support active state to + * implement v4l2_subdev_pad_ops.get_fmt if the subdev driver does not need to + * do anything special in their get_fmt op. + * + * Returns 0 on success, error value otherwise. + */ +int v4l2_subdev_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, + struct v4l2_subdev_format *format); + +/** + * v4l2_subdev_get_frame_interval() - Fill frame interval based on state + * @sd: subdevice + * @state: subdevice state + * @fi: pointer to &struct v4l2_subdev_frame_interval + * + * Fill @fi->interval field based on the information in the @fi struct. + * + * This function can be used by the subdev drivers which support active state to + * implement v4l2_subdev_pad_ops.get_frame_interval if the subdev driver does + * not need to do anything special in their get_frame_interval op. + * + * Returns 0 on success, error value otherwise. + */ +int v4l2_subdev_get_frame_interval(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_frame_interval *fi); + +/** + * v4l2_subdev_set_routing() - Set given routing to subdev state + * @sd: The subdevice + * @state: The subdevice state + * @routing: Routing that will be copied to subdev state + * + * This will release old routing table (if any) from the state, allocate + * enough space for the given routing, and copy the routing. + * + * This can be used from the subdev driver's set_routing op, after validating + * the routing. + */ +int v4l2_subdev_set_routing(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + const struct v4l2_subdev_krouting *routing); + +struct v4l2_subdev_route * +__v4l2_subdev_next_active_route(const struct v4l2_subdev_krouting *routing, + struct v4l2_subdev_route *route); + +/** + * for_each_active_route - iterate on all active routes of a routing table + * @routing: The routing table + * @route: The route iterator + */ +#define for_each_active_route(routing, route) \ + for ((route) = NULL; \ + ((route) = __v4l2_subdev_next_active_route((routing), (route)));) + +/** + * v4l2_subdev_set_routing_with_fmt() - Set given routing and format to subdev + * state + * @sd: The subdevice + * @state: The subdevice state + * @routing: Routing that will be copied to subdev state + * @fmt: Format used to initialize all the streams + * + * This is the same as v4l2_subdev_set_routing, but additionally initializes + * all the streams using the given format. + */ +int v4l2_subdev_set_routing_with_fmt(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + const struct v4l2_subdev_krouting *routing, + const struct v4l2_mbus_framefmt *fmt); + +/** + * v4l2_subdev_routing_find_opposite_end() - Find the opposite stream + * @routing: routing used to find the opposite side + * @pad: pad id + * @stream: stream id + * @other_pad: pointer used to return the opposite pad + * @other_stream: pointer used to return the opposite stream + * + * This function uses the routing table to find the pad + stream which is + * opposite the given pad + stream. + * + * @other_pad and/or @other_stream can be NULL if the caller does not need the + * value. + * + * Returns 0 on success, or -EINVAL if no matching route is found. + */ +int v4l2_subdev_routing_find_opposite_end(const struct v4l2_subdev_krouting *routing, + u32 pad, u32 stream, u32 *other_pad, + u32 *other_stream); + +/** + * v4l2_subdev_state_get_opposite_stream_format() - Get pointer to opposite + * stream format + * @state: subdevice state + * @pad: pad id + * @stream: stream id + * + * This returns a pointer to &struct v4l2_mbus_framefmt for the pad + stream + * that is opposite the given pad + stream in the subdev state. + * + * If the state does not contain the given pad + stream, NULL is returned. + */ +struct v4l2_mbus_framefmt * +v4l2_subdev_state_get_opposite_stream_format(struct v4l2_subdev_state *state, + u32 pad, u32 stream); + +/** + * v4l2_subdev_state_xlate_streams() - Translate streams from one pad to another + * + * @state: Subdevice state + * @pad0: The first pad + * @pad1: The second pad + * @streams: Streams bitmask on the first pad + * + * Streams on sink pads of a subdev are routed to source pads as expressed in + * the subdev state routing table. Stream numbers don't necessarily match on + * the sink and source side of a route. This function translates stream numbers + * on @pad0, expressed as a bitmask in @streams, to the corresponding streams + * on @pad1 using the routing table from the @state. It returns the stream mask + * on @pad1, and updates @streams with the streams that have been found in the + * routing table. + * + * @pad0 and @pad1 must be a sink and a source, in any order. + * + * Return: The bitmask of streams of @pad1 that are routed to @streams on @pad0. + */ +u64 v4l2_subdev_state_xlate_streams(const struct v4l2_subdev_state *state, + u32 pad0, u32 pad1, u64 *streams); + +/** + * enum v4l2_subdev_routing_restriction - Subdevice internal routing restrictions + * + * @V4L2_SUBDEV_ROUTING_NO_1_TO_N: + * an input stream shall not be routed to multiple output streams (stream + * duplication) + * @V4L2_SUBDEV_ROUTING_NO_N_TO_1: + * multiple input streams shall not be routed to the same output stream + * (stream merging) + * @V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX: + * all streams from a sink pad must be routed to a single source pad + * @V4L2_SUBDEV_ROUTING_NO_SOURCE_STREAM_MIX: + * all streams on a source pad must originate from a single sink pad + * @V4L2_SUBDEV_ROUTING_NO_SOURCE_MULTIPLEXING: + * source pads shall not contain multiplexed streams + * @V4L2_SUBDEV_ROUTING_NO_SINK_MULTIPLEXING: + * sink pads shall not contain multiplexed streams + * @V4L2_SUBDEV_ROUTING_ONLY_1_TO_1: + * only non-overlapping 1-to-1 stream routing is allowed (a combination of + * @V4L2_SUBDEV_ROUTING_NO_1_TO_N and @V4L2_SUBDEV_ROUTING_NO_N_TO_1) + * @V4L2_SUBDEV_ROUTING_NO_STREAM_MIX: + * all streams from a sink pad must be routed to a single source pad, and + * that source pad shall not get routes from any other sink pad + * (a combination of @V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX and + * @V4L2_SUBDEV_ROUTING_NO_SOURCE_STREAM_MIX) + * @V4L2_SUBDEV_ROUTING_NO_MULTIPLEXING: + * no multiplexed streams allowed on either source or sink sides. + */ +enum v4l2_subdev_routing_restriction { + V4L2_SUBDEV_ROUTING_NO_1_TO_N = BIT(0), + V4L2_SUBDEV_ROUTING_NO_N_TO_1 = BIT(1), + V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX = BIT(2), + V4L2_SUBDEV_ROUTING_NO_SOURCE_STREAM_MIX = BIT(3), + V4L2_SUBDEV_ROUTING_NO_SINK_MULTIPLEXING = BIT(4), + V4L2_SUBDEV_ROUTING_NO_SOURCE_MULTIPLEXING = BIT(5), + V4L2_SUBDEV_ROUTING_ONLY_1_TO_1 = + V4L2_SUBDEV_ROUTING_NO_1_TO_N | + V4L2_SUBDEV_ROUTING_NO_N_TO_1, + V4L2_SUBDEV_ROUTING_NO_STREAM_MIX = + V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX | + V4L2_SUBDEV_ROUTING_NO_SOURCE_STREAM_MIX, + V4L2_SUBDEV_ROUTING_NO_MULTIPLEXING = + V4L2_SUBDEV_ROUTING_NO_SINK_MULTIPLEXING | + V4L2_SUBDEV_ROUTING_NO_SOURCE_MULTIPLEXING, +}; + +/** + * v4l2_subdev_routing_validate() - Verify that routes comply with driver + * constraints + * @sd: The subdevice + * @routing: Routing to verify + * @disallow: Restrictions on routes + * + * This verifies that the given routing complies with the @disallow constraints. + * + * Returns 0 on success, error value otherwise. + */ +int v4l2_subdev_routing_validate(struct v4l2_subdev *sd, + const struct v4l2_subdev_krouting *routing, + enum v4l2_subdev_routing_restriction disallow); + +/** + * v4l2_subdev_enable_streams() - Enable streams on a pad + * @sd: The subdevice + * @pad: The pad + * @streams_mask: Bitmask of streams to enable + * + * This function enables streams on a source @pad of a subdevice. The pad is + * identified by its index, while the streams are identified by the + * @streams_mask bitmask. This allows enabling multiple streams on a pad at + * once. + * + * Enabling a stream that is already enabled isn't allowed. If @streams_mask + * contains an already enabled stream, this function returns -EALREADY without + * performing any operation. + * + * Per-stream enable is only available for subdevs that implement the + * .enable_streams() and .disable_streams() operations. For other subdevs, this + * function implements a best-effort compatibility by calling the .s_stream() + * operation, limited to subdevs that have a single source pad. + * + * Drivers that are not stream-aware shall set @streams_mask to BIT_ULL(0). + * + * Return: + * * 0: Success + * * -EALREADY: One of the streams in streams_mask is already enabled + * * -EINVAL: The pad index is invalid, or doesn't correspond to a source pad + * * -EOPNOTSUPP: Falling back to the legacy .s_stream() operation is + * impossible because the subdev has multiple source pads + */ +int v4l2_subdev_enable_streams(struct v4l2_subdev *sd, u32 pad, + u64 streams_mask); + +/** + * v4l2_subdev_disable_streams() - Disable streams on a pad + * @sd: The subdevice + * @pad: The pad + * @streams_mask: Bitmask of streams to disable + * + * This function disables streams on a source @pad of a subdevice. The pad is + * identified by its index, while the streams are identified by the + * @streams_mask bitmask. This allows disabling multiple streams on a pad at + * once. + * + * Disabling a streams that is not enabled isn't allowed. If @streams_mask + * contains a disabled stream, this function returns -EALREADY without + * performing any operation. + * + * Per-stream disable is only available for subdevs that implement the + * .enable_streams() and .disable_streams() operations. For other subdevs, this + * function implements a best-effort compatibility by calling the .s_stream() + * operation, limited to subdevs that have a single source pad. + * + * Drivers that are not stream-aware shall set @streams_mask to BIT_ULL(0). + * + * Return: + * * 0: Success + * * -EALREADY: One of the streams in streams_mask is not enabled + * * -EINVAL: The pad index is invalid, or doesn't correspond to a source pad + * * -EOPNOTSUPP: Falling back to the legacy .s_stream() operation is + * impossible because the subdev has multiple source pads + */ +int v4l2_subdev_disable_streams(struct v4l2_subdev *sd, u32 pad, + u64 streams_mask); + +/** + * v4l2_subdev_s_stream_helper() - Helper to implement the subdev s_stream + * operation using enable_streams and disable_streams + * @sd: The subdevice + * @enable: Enable or disable streaming + * + * Subdevice drivers that implement the streams-aware + * &v4l2_subdev_pad_ops.enable_streams and &v4l2_subdev_pad_ops.disable_streams + * operations can use this helper to implement the legacy + * &v4l2_subdev_video_ops.s_stream operation. + * + * This helper can only be used by subdevs that have a single source pad. + * + * Return: 0 on success, or a negative error code otherwise. + */ +int v4l2_subdev_s_stream_helper(struct v4l2_subdev *sd, int enable); + +#endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */ + +#endif /* CONFIG_MEDIA_CONTROLLER */ + +/** + * v4l2_subdev_lock_state() - Locks the subdev state + * @state: The subdevice state + * + * Locks the given subdev state. + * + * The state must be unlocked with v4l2_subdev_unlock_state() after use. + */ +static inline void v4l2_subdev_lock_state(struct v4l2_subdev_state *state) +{ + mutex_lock(state->lock); +} + +/** + * v4l2_subdev_unlock_state() - Unlocks the subdev state + * @state: The subdevice state + * + * Unlocks the given subdev state. + */ +static inline void v4l2_subdev_unlock_state(struct v4l2_subdev_state *state) +{ + mutex_unlock(state->lock); +} + +/** + * v4l2_subdev_lock_states - Lock two sub-device states + * @state1: One subdevice state + * @state2: The other subdevice state + * + * Locks the state of two sub-devices. + * + * The states must be unlocked with v4l2_subdev_unlock_states() after use. + * + * This differs from calling v4l2_subdev_lock_state() on both states so that if + * the states share the same lock, the lock is acquired only once (so no + * deadlock occurs). The caller is responsible for ensuring the locks will + * always be acquired in the same order. + */ +static inline void v4l2_subdev_lock_states(struct v4l2_subdev_state *state1, + struct v4l2_subdev_state *state2) +{ + mutex_lock(state1->lock); + if (state1->lock != state2->lock) + mutex_lock(state2->lock); +} + +/** + * v4l2_subdev_unlock_states() - Unlock two sub-device states + * @state1: One subdevice state + * @state2: The other subdevice state + * + * Unlocks the state of two sub-devices. + * + * This differs from calling v4l2_subdev_unlock_state() on both states so that + * if the states share the same lock, the lock is released only once. + */ +static inline void v4l2_subdev_unlock_states(struct v4l2_subdev_state *state1, + struct v4l2_subdev_state *state2) +{ + mutex_unlock(state1->lock); + if (state1->lock != state2->lock) + mutex_unlock(state2->lock); +} + +/** + * v4l2_subdev_get_unlocked_active_state() - Checks that the active subdev state + * is unlocked and returns it + * @sd: The subdevice + * + * Returns the active state for the subdevice, or NULL if the subdev does not + * support active state. If the state is not NULL, calls + * lockdep_assert_not_held() to issue a warning if the state is locked. + * + * This function is to be used e.g. when getting the active state for the sole + * purpose of passing it forward, without accessing the state fields. + */ +static inline struct v4l2_subdev_state * +v4l2_subdev_get_unlocked_active_state(struct v4l2_subdev *sd) +{ + if (sd->active_state) + lockdep_assert_not_held(sd->active_state->lock); + return sd->active_state; +} + +/** + * v4l2_subdev_get_locked_active_state() - Checks that the active subdev state + * is locked and returns it + * + * @sd: The subdevice + * + * Returns the active state for the subdevice, or NULL if the subdev does not + * support active state. If the state is not NULL, calls lockdep_assert_held() + * to issue a warning if the state is not locked. + * + * This function is to be used when the caller knows that the active state is + * already locked. + */ +static inline struct v4l2_subdev_state * +v4l2_subdev_get_locked_active_state(struct v4l2_subdev *sd) +{ + if (sd->active_state) + lockdep_assert_held(sd->active_state->lock); + return sd->active_state; +} + +/** + * v4l2_subdev_lock_and_get_active_state() - Locks and returns the active subdev + * state for the subdevice + * @sd: The subdevice + * + * Returns the locked active state for the subdevice, or NULL if the subdev + * does not support active state. + * + * The state must be unlocked with v4l2_subdev_unlock_state() after use. + */ +static inline struct v4l2_subdev_state * +v4l2_subdev_lock_and_get_active_state(struct v4l2_subdev *sd) +{ + if (sd->active_state) + v4l2_subdev_lock_state(sd->active_state); + return sd->active_state; +} + +/** + * v4l2_subdev_init - initializes the sub-device struct + * + * @sd: pointer to the &struct v4l2_subdev to be initialized + * @ops: pointer to &struct v4l2_subdev_ops. + */ +void v4l2_subdev_init(struct v4l2_subdev *sd, + const struct v4l2_subdev_ops *ops); + +extern const struct v4l2_subdev_ops v4l2_subdev_call_wrappers; + +/** + * v4l2_subdev_call - call an operation of a v4l2_subdev. + * + * @sd: pointer to the &struct v4l2_subdev + * @o: name of the element at &struct v4l2_subdev_ops that contains @f. + * Each element there groups a set of callbacks functions. + * @f: callback function to be called. + * The callback functions are defined in groups, according to + * each element at &struct v4l2_subdev_ops. + * @args: arguments for @f. + * + * Example: err = v4l2_subdev_call(sd, video, s_std, norm); + */ +#define v4l2_subdev_call(sd, o, f, args...) \ + ({ \ + struct v4l2_subdev *__sd = (sd); \ + int __result; \ + if (!__sd) \ + __result = -ENODEV; \ + else if (!(__sd->ops->o && __sd->ops->o->f)) \ + __result = -ENOIOCTLCMD; \ + else if (v4l2_subdev_call_wrappers.o && \ + v4l2_subdev_call_wrappers.o->f) \ + __result = v4l2_subdev_call_wrappers.o->f( \ + __sd, ##args); \ + else \ + __result = __sd->ops->o->f(__sd, ##args); \ + __result; \ + }) + +/** + * v4l2_subdev_call_state_active - call an operation of a v4l2_subdev which + * takes state as a parameter, passing the + * subdev its active state. + * + * @sd: pointer to the &struct v4l2_subdev + * @o: name of the element at &struct v4l2_subdev_ops that contains @f. + * Each element there groups a set of callbacks functions. + * @f: callback function to be called. + * The callback functions are defined in groups, according to + * each element at &struct v4l2_subdev_ops. + * @args: arguments for @f. + * + * This is similar to v4l2_subdev_call(), except that this version can only be + * used for ops that take a subdev state as a parameter. The macro will get the + * active state, lock it before calling the op and unlock it after the call. + */ +#define v4l2_subdev_call_state_active(sd, o, f, args...) \ + ({ \ + int __result; \ + struct v4l2_subdev_state *state; \ + state = v4l2_subdev_get_unlocked_active_state(sd); \ + if (state) \ + v4l2_subdev_lock_state(state); \ + __result = v4l2_subdev_call(sd, o, f, state, ##args); \ + if (state) \ + v4l2_subdev_unlock_state(state); \ + __result; \ + }) + +/** + * v4l2_subdev_call_state_try - call an operation of a v4l2_subdev which + * takes state as a parameter, passing the + * subdev a newly allocated try state. + * + * @sd: pointer to the &struct v4l2_subdev + * @o: name of the element at &struct v4l2_subdev_ops that contains @f. + * Each element there groups a set of callbacks functions. + * @f: callback function to be called. + * The callback functions are defined in groups, according to + * each element at &struct v4l2_subdev_ops. + * @args: arguments for @f. + * + * This is similar to v4l2_subdev_call_state_active(), except that as this + * version allocates a new state, this is only usable for + * V4L2_SUBDEV_FORMAT_TRY use cases. + * + * Note: only legacy non-MC drivers may need this macro. + */ +#define v4l2_subdev_call_state_try(sd, o, f, args...) \ + ({ \ + int __result; \ + static struct lock_class_key __key; \ + const char *name = KBUILD_BASENAME \ + ":" __stringify(__LINE__) ":state->lock"; \ + struct v4l2_subdev_state *state = \ + __v4l2_subdev_state_alloc(sd, name, &__key); \ + v4l2_subdev_lock_state(state); \ + __result = v4l2_subdev_call(sd, o, f, state, ##args); \ + v4l2_subdev_unlock_state(state); \ + __v4l2_subdev_state_free(state); \ + __result; \ + }) + +/** + * v4l2_subdev_has_op - Checks if a subdev defines a certain operation. + * + * @sd: pointer to the &struct v4l2_subdev + * @o: The group of callback functions in &struct v4l2_subdev_ops + * which @f is a part of. + * @f: callback function to be checked for its existence. + */ +#define v4l2_subdev_has_op(sd, o, f) \ + ((sd)->ops->o && (sd)->ops->o->f) + +/** + * v4l2_subdev_notify_event() - Delivers event notification for subdevice + * @sd: The subdev for which to deliver the event + * @ev: The event to deliver + * + * Will deliver the specified event to all userspace event listeners which are + * subscribed to the v42l subdev event queue as well as to the bridge driver + * using the notify callback. The notification type for the notify callback + * will be %V4L2_DEVICE_NOTIFY_EVENT. + */ +void v4l2_subdev_notify_event(struct v4l2_subdev *sd, + const struct v4l2_event *ev); + +/** + * v4l2_subdev_is_streaming() - Returns if the subdevice is streaming + * @sd: The subdevice + * + * v4l2_subdev_is_streaming() tells if the subdevice is currently streaming. + * "Streaming" here means whether .s_stream() or .enable_streams() has been + * successfully called, and the streaming has not yet been disabled. + * + * If the subdevice implements .enable_streams() this function must be called + * while holding the active state lock. + */ +bool v4l2_subdev_is_streaming(struct v4l2_subdev *sd); + +#endif /* _V4L2_SUBDEV_H */ diff --git a/6.17.0/include-overrides/media/v4l2-vp9.h b/6.17.0/include-overrides/media/v4l2-vp9.h new file mode 100644 index 0000000..05478ad --- /dev/null +++ b/6.17.0/include-overrides/media/v4l2-vp9.h @@ -0,0 +1,233 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Helper functions for vp9 codecs. + * + * Copyright (c) 2021 Collabora, Ltd. + * + * Author: Andrzej Pietrasiewicz + */ + +#ifndef _MEDIA_V4L2_VP9_H +#define _MEDIA_V4L2_VP9_H + +#include + +/** + * struct v4l2_vp9_frame_mv_context - motion vector-related probabilities + * + * @joint: motion vector joint probabilities. + * @sign: motion vector sign probabilities. + * @classes: motion vector class probabilities. + * @class0_bit: motion vector class0 bit probabilities. + * @bits: motion vector bits probabilities. + * @class0_fr: motion vector class0 fractional bit probabilities. + * @fr: motion vector fractional bit probabilities. + * @class0_hp: motion vector class0 high precision fractional bit probabilities. + * @hp: motion vector high precision fractional bit probabilities. + * + * A member of v4l2_vp9_frame_context. + */ +struct v4l2_vp9_frame_mv_context { + u8 joint[3]; + u8 sign[2]; + u8 classes[2][10]; + u8 class0_bit[2]; + u8 bits[2][10]; + u8 class0_fr[2][2][3]; + u8 fr[2][3]; + u8 class0_hp[2]; + u8 hp[2]; +}; + +/** + * struct v4l2_vp9_frame_context - frame probabilities, including motion-vector related + * + * @tx8: TX 8x8 probabilities. + * @tx16: TX 16x16 probabilities. + * @tx32: TX 32x32 probabilities. + * @coef: coefficient probabilities. + * @skip: skip probabilities. + * @inter_mode: inter mode probabilities. + * @interp_filter: interpolation filter probabilities. + * @is_inter: is inter-block probabilities. + * @comp_mode: compound prediction mode probabilities. + * @single_ref: single ref probabilities. + * @comp_ref: compound ref probabilities. + * @y_mode: Y prediction mode probabilities. + * @uv_mode: UV prediction mode probabilities. + * @partition: partition probabilities. + * @mv: motion vector probabilities. + * + * Drivers which need to keep track of frame context(s) can use this struct. + * The members correspond to probability tables, which are specified only implicitly in the + * vp9 spec. Section 10.5 "Default probability tables" contains all the types of involved + * tables, i.e. the actual tables are of the same kind, and when they are reset (which is + * mandated by the spec sometimes) they are overwritten with values from the default tables. + */ +struct v4l2_vp9_frame_context { + u8 tx8[2][1]; + u8 tx16[2][2]; + u8 tx32[2][3]; + u8 coef[4][2][2][6][6][3]; + u8 skip[3]; + u8 inter_mode[7][3]; + u8 interp_filter[4][2]; + u8 is_inter[4]; + u8 comp_mode[5]; + u8 single_ref[5][2]; + u8 comp_ref[5]; + u8 y_mode[4][9]; + u8 uv_mode[10][9]; + u8 partition[16][3]; + + struct v4l2_vp9_frame_mv_context mv; +}; + +/** + * struct v4l2_vp9_frame_symbol_counts - pointers to arrays of symbol counts + * + * @partition: partition counts. + * @skip: skip counts. + * @intra_inter: is inter-block counts. + * @tx32p: TX32 counts. + * @tx16p: TX16 counts. + * @tx8p: TX8 counts. + * @y_mode: Y prediction mode counts. + * @uv_mode: UV prediction mode counts. + * @comp: compound prediction mode counts. + * @comp_ref: compound ref counts. + * @single_ref: single ref counts. + * @mv_mode: inter mode counts. + * @filter: interpolation filter counts. + * @mv_joint: motion vector joint counts. + * @sign: motion vector sign counts. + * @classes: motion vector class counts. + * @class0: motion vector class0 bit counts. + * @bits: motion vector bits counts. + * @class0_fp: motion vector class0 fractional bit counts. + * @fp: motion vector fractional bit counts. + * @class0_hp: motion vector class0 high precision fractional bit counts. + * @hp: motion vector high precision fractional bit counts. + * @coeff: coefficient counts. + * @eob: eob counts + * + * The fields correspond to what is specified in section 8.3 "Clear counts process" of the spec. + * Different pieces of hardware can report the counts in different order, so we cannot rely on + * simply overlaying a struct on a relevant block of memory. Instead we provide pointers to + * arrays or array of pointers to arrays in case of coeff, or array of pointers for eob. + */ +struct v4l2_vp9_frame_symbol_counts { + u32 (*partition)[16][4]; + u32 (*skip)[3][2]; + u32 (*intra_inter)[4][2]; + u32 (*tx32p)[2][4]; + u32 (*tx16p)[2][4]; + u32 (*tx8p)[2][2]; + u32 (*y_mode)[4][10]; + u32 (*uv_mode)[10][10]; + u32 (*comp)[5][2]; + u32 (*comp_ref)[5][2]; + u32 (*single_ref)[5][2][2]; + u32 (*mv_mode)[7][4]; + u32 (*filter)[4][3]; + u32 (*mv_joint)[4]; + u32 (*sign)[2][2]; + u32 (*classes)[2][11]; + u32 (*class0)[2][2]; + u32 (*bits)[2][10][2]; + u32 (*class0_fp)[2][2][4]; + u32 (*fp)[2][4]; + u32 (*class0_hp)[2][2]; + u32 (*hp)[2][2]; + u32 (*coeff[4][2][2][6][6])[3]; + u32 *eob[4][2][2][6][6][2]; +}; + +extern const u8 v4l2_vp9_kf_y_mode_prob[10][10][9]; /* Section 10.4 of the spec */ +extern const u8 v4l2_vp9_kf_partition_probs[16][3]; /* Section 10.4 of the spec */ +extern const u8 v4l2_vp9_kf_uv_mode_prob[10][9]; /* Section 10.4 of the spec */ +extern const struct v4l2_vp9_frame_context v4l2_vp9_default_probs; /* Section 10.5 of the spec */ + +/** + * v4l2_vp9_fw_update_probs() - Perform forward update of vp9 probabilities + * + * @probs: current probabilities values + * @deltas: delta values from compressed header + * @dec_params: vp9 frame decoding parameters + * + * This function performs forward updates of probabilities for the vp9 boolean decoder. + * The frame header can contain a directive to update the probabilities (deltas), if so, then + * the deltas are provided in the header, too. The userspace parses those and passes the said + * deltas struct to the kernel. + */ +void v4l2_vp9_fw_update_probs(struct v4l2_vp9_frame_context *probs, + const struct v4l2_ctrl_vp9_compressed_hdr *deltas, + const struct v4l2_ctrl_vp9_frame *dec_params); + +/** + * v4l2_vp9_reset_frame_ctx() - Reset appropriate frame context + * + * @dec_params: vp9 frame decoding parameters + * @frame_context: array of the 4 frame contexts + * + * This function resets appropriate frame contexts, based on what's in dec_params. + * + * Returns the frame context index after the update, which might be reset to zero if + * mandated by the spec. + */ +u8 v4l2_vp9_reset_frame_ctx(const struct v4l2_ctrl_vp9_frame *dec_params, + struct v4l2_vp9_frame_context *frame_context); + +/** + * v4l2_vp9_adapt_coef_probs() - Perform backward update of vp9 coefficients probabilities + * + * @probs: current probabilities values + * @counts: values of symbol counts after the current frame has been decoded + * @use_128: flag to request that 128 is used as update factor if true, otherwise 112 is used + * @frame_is_intra: flag indicating that FrameIsIntra is true + * + * This function performs backward updates of coefficients probabilities for the vp9 boolean + * decoder. After a frame has been decoded the counts of how many times a given symbol has + * occurred are known and are used to update the probability of each symbol. + */ +void v4l2_vp9_adapt_coef_probs(struct v4l2_vp9_frame_context *probs, + struct v4l2_vp9_frame_symbol_counts *counts, + bool use_128, + bool frame_is_intra); + +/** + * v4l2_vp9_adapt_noncoef_probs() - Perform backward update of vp9 non-coefficients probabilities + * + * @probs: current probabilities values + * @counts: values of symbol counts after the current frame has been decoded + * @reference_mode: specifies the type of inter prediction to be used. See + * &v4l2_vp9_reference_mode for more details + * @interpolation_filter: specifies the filter selection used for performing inter prediction. + * See &v4l2_vp9_interpolation_filter for more details + * @tx_mode: specifies the TX mode. See &v4l2_vp9_tx_mode for more details + * @flags: combination of V4L2_VP9_FRAME_FLAG_* flags + * + * This function performs backward updates of non-coefficients probabilities for the vp9 boolean + * decoder. After a frame has been decoded the counts of how many times a given symbol has + * occurred are known and are used to update the probability of each symbol. + */ +void v4l2_vp9_adapt_noncoef_probs(struct v4l2_vp9_frame_context *probs, + struct v4l2_vp9_frame_symbol_counts *counts, + u8 reference_mode, u8 interpolation_filter, u8 tx_mode, + u32 flags); + +/** + * v4l2_vp9_seg_feat_enabled() - Check if a segmentation feature is enabled + * + * @feature_enabled: array of 8-bit flags (for all segments) + * @feature: id of the feature to check + * @segid: id of the segment to look up + * + * This function returns true if a given feature is active in a given segment. + */ +bool +v4l2_vp9_seg_feat_enabled(const u8 *feature_enabled, + unsigned int feature, + unsigned int segid); + +#endif /* _MEDIA_V4L2_VP9_H */ diff --git a/6.17.0/include-overrides/media/videobuf2-core.h b/6.17.0/include-overrides/media/videobuf2-core.h new file mode 100644 index 0000000..9b02aeb --- /dev/null +++ b/6.17.0/include-overrides/media/videobuf2-core.h @@ -0,0 +1,1348 @@ +/* + * videobuf2-core.h - Video Buffer 2 Core Framework + * + * Copyright (C) 2010 Samsung Electronics + * + * Author: Pawel Osciak + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ +#ifndef _MEDIA_VIDEOBUF2_CORE_H +#define _MEDIA_VIDEOBUF2_CORE_H + +#include +#include +#include +#include +#include +#include +#include + +#define VB2_MAX_FRAME (32) +#define VB2_MAX_PLANES (8) + +/** + * enum vb2_memory - type of memory model used to make the buffers visible + * on userspace. + * + * @VB2_MEMORY_UNKNOWN: Buffer status is unknown or it is not used yet on + * userspace. + * @VB2_MEMORY_MMAP: The buffers are allocated by the Kernel and it is + * memory mapped via mmap() ioctl. This model is + * also used when the user is using the buffers via + * read() or write() system calls. + * @VB2_MEMORY_USERPTR: The buffers was allocated in userspace and it is + * memory mapped via mmap() ioctl. + * @VB2_MEMORY_DMABUF: The buffers are passed to userspace via DMA buffer. + */ +enum vb2_memory { + VB2_MEMORY_UNKNOWN = 0, + VB2_MEMORY_MMAP = 1, + VB2_MEMORY_USERPTR = 2, + VB2_MEMORY_DMABUF = 4, +}; + +struct vb2_fileio_data; +struct vb2_threadio_data; +struct vb2_buffer; + +/** + * struct vb2_mem_ops - memory handling/memory allocator operations. + * @alloc: allocate video memory and, optionally, allocator private data, + * return ERR_PTR() on failure or a pointer to allocator private, + * per-buffer data on success; the returned private structure + * will then be passed as @buf_priv argument to other ops in this + * structure. The size argument to this function shall be + * *page aligned*. + * @put: inform the allocator that the buffer will no longer be used; + * usually will result in the allocator freeing the buffer (if + * no other users of this buffer are present); the @buf_priv + * argument is the allocator private per-buffer structure + * previously returned from the alloc callback. + * @get_dmabuf: acquire userspace memory for a hardware operation; used for + * DMABUF memory types. + * @get_userptr: acquire userspace memory for a hardware operation; used for + * USERPTR memory types; vaddr is the address passed to the + * videobuf2 layer when queuing a video buffer of USERPTR type; + * should return an allocator private per-buffer structure + * associated with the buffer on success, ERR_PTR() on failure; + * the returned private structure will then be passed as @buf_priv + * argument to other ops in this structure. + * @put_userptr: inform the allocator that a USERPTR buffer will no longer + * be used. + * @prepare: called every time the buffer is passed from userspace to the + * driver, useful for cache synchronisation, optional. + * @finish: called every time the buffer is passed back from the driver + * to the userspace, also optional. + * @attach_dmabuf: attach a shared &struct dma_buf for a hardware operation; + * used for DMABUF memory types; dev is the alloc device + * dbuf is the shared dma_buf; returns ERR_PTR() on failure; + * allocator private per-buffer structure on success; + * this needs to be used for further accesses to the buffer. + * @detach_dmabuf: inform the exporter of the buffer that the current DMABUF + * buffer is no longer used; the @buf_priv argument is the + * allocator private per-buffer structure previously returned + * from the attach_dmabuf callback. + * @map_dmabuf: request for access to the dmabuf from allocator; the allocator + * of dmabuf is informed that this driver is going to use the + * dmabuf. + * @unmap_dmabuf: releases access control to the dmabuf - allocator is notified + * that this driver is done using the dmabuf for now. + * @vaddr: return a kernel virtual address to a given memory buffer + * associated with the passed private structure or NULL if no + * such mapping exists. + * @cookie: return allocator specific cookie for a given memory buffer + * associated with the passed private structure or NULL if not + * available. + * @num_users: return the current number of users of a memory buffer; + * return 1 if the videobuf2 layer (or actually the driver using + * it) is the only user. + * @mmap: setup a userspace mapping for a given memory buffer under + * the provided virtual memory region. + * + * Those operations are used by the videobuf2 core to implement the memory + * handling/memory allocators for each type of supported streaming I/O method. + * + * .. note:: + * #) Required ops for USERPTR types: get_userptr, put_userptr. + * + * #) Required ops for MMAP types: alloc, put, num_users, mmap. + * + * #) Required ops for read/write access types: alloc, put, num_users, vaddr. + * + * #) Required ops for DMABUF types: attach_dmabuf, detach_dmabuf, + * map_dmabuf, unmap_dmabuf. + */ +struct vb2_mem_ops { + void *(*alloc)(struct vb2_buffer *vb, + struct device *dev, + unsigned long size); + void (*put)(void *buf_priv); + struct dma_buf *(*get_dmabuf)(struct vb2_buffer *vb, + void *buf_priv, + unsigned long flags); + + void *(*get_userptr)(struct vb2_buffer *vb, + struct device *dev, + unsigned long vaddr, + unsigned long size); + void (*put_userptr)(void *buf_priv); + + void (*prepare)(void *buf_priv); + void (*finish)(void *buf_priv); + + void *(*attach_dmabuf)(struct vb2_buffer *vb, + struct device *dev, + struct dma_buf *dbuf, + unsigned long size); + void (*detach_dmabuf)(void *buf_priv); + int (*map_dmabuf)(void *buf_priv); + void (*unmap_dmabuf)(void *buf_priv); + + void *(*vaddr)(struct vb2_buffer *vb, void *buf_priv); + void *(*cookie)(struct vb2_buffer *vb, void *buf_priv); + + unsigned int (*num_users)(void *buf_priv); + + int (*mmap)(void *buf_priv, struct vm_area_struct *vma); +}; + +/** + * struct vb2_plane - plane information. + * @mem_priv: private data with this plane. + * @dbuf: dma_buf - shared buffer object. + * @dbuf_mapped: flag to show whether dbuf is mapped or not + * @dbuf_duplicated: boolean to show whether dbuf is duplicated with a + * previous plane of the buffer. + * @bytesused: number of bytes occupied by data in the plane (payload). + * @length: size of this plane (NOT the payload) in bytes. The maximum + * valid size is MAX_UINT - PAGE_SIZE. + * @min_length: minimum required size of this plane (NOT the payload) in bytes. + * @length is always greater or equal to @min_length, and like + * @length, it is limited to MAX_UINT - PAGE_SIZE. + * @m: Union with memtype-specific data. + * @m.offset: when memory in the associated struct vb2_buffer is + * %VB2_MEMORY_MMAP, equals the offset from the start of + * the device memory for this plane (or is a "cookie" that + * should be passed to mmap() called on the video node). + * @m.userptr: when memory is %VB2_MEMORY_USERPTR, a userspace pointer + * pointing to this plane. + * @m.fd: when memory is %VB2_MEMORY_DMABUF, a userspace file + * descriptor associated with this plane. + * @data_offset: offset in the plane to the start of data; usually 0, + * unless there is a header in front of the data. + * + * Should contain enough information to be able to cover all the fields + * of &struct v4l2_plane at videodev2.h. + */ +struct vb2_plane { + void *mem_priv; + struct dma_buf *dbuf; + unsigned int dbuf_mapped; + bool dbuf_duplicated; + unsigned int bytesused; + unsigned int length; + unsigned int min_length; + union { + unsigned int offset; + unsigned long userptr; + int fd; + } m; + unsigned int data_offset; +}; + +/** + * enum vb2_io_modes - queue access methods. + * @VB2_MMAP: driver supports MMAP with streaming API. + * @VB2_USERPTR: driver supports USERPTR with streaming API. + * @VB2_READ: driver supports read() style access. + * @VB2_WRITE: driver supports write() style access. + * @VB2_DMABUF: driver supports DMABUF with streaming API. + */ +enum vb2_io_modes { + VB2_MMAP = BIT(0), + VB2_USERPTR = BIT(1), + VB2_READ = BIT(2), + VB2_WRITE = BIT(3), + VB2_DMABUF = BIT(4), +}; + +/** + * enum vb2_buffer_state - current video buffer state. + * @VB2_BUF_STATE_DEQUEUED: buffer under userspace control. + * @VB2_BUF_STATE_IN_REQUEST: buffer is queued in media request. + * @VB2_BUF_STATE_PREPARING: buffer is being prepared in videobuf2. + * @VB2_BUF_STATE_QUEUED: buffer queued in videobuf2, but not in driver. + * @VB2_BUF_STATE_ACTIVE: buffer queued in driver and possibly used + * in a hardware operation. + * @VB2_BUF_STATE_DONE: buffer returned from driver to videobuf2, but + * not yet dequeued to userspace. + * @VB2_BUF_STATE_ERROR: same as above, but the operation on the buffer + * has ended with an error, which will be reported + * to the userspace when it is dequeued. + */ +enum vb2_buffer_state { + VB2_BUF_STATE_DEQUEUED, + VB2_BUF_STATE_IN_REQUEST, + VB2_BUF_STATE_PREPARING, + VB2_BUF_STATE_QUEUED, + VB2_BUF_STATE_ACTIVE, + VB2_BUF_STATE_DONE, + VB2_BUF_STATE_ERROR, +}; + +struct vb2_queue; + +/** + * struct vb2_buffer - represents a video buffer. + * @vb2_queue: pointer to &struct vb2_queue with the queue to + * which this driver belongs. + * @index: id number of the buffer. + * @type: buffer type. + * @memory: the method, in which the actual data is passed. + * @num_planes: number of planes in the buffer + * on an internal driver queue. + * @timestamp: frame timestamp in ns. + * @request: the request this buffer is associated with. + * @req_obj: used to bind this buffer to a request. This + * request object has a refcount. + */ +struct vb2_buffer { + struct vb2_queue *vb2_queue; + unsigned int index; + unsigned int type; + unsigned int memory; + unsigned int num_planes; + u64 timestamp; + struct media_request *request; + struct media_request_object req_obj; + + /* private: internal use only + * + * state: current buffer state; do not change + * synced: this buffer has been synced for DMA, i.e. the + * 'prepare' memop was called. It is cleared again + * after the 'finish' memop is called. + * prepared: this buffer has been prepared, i.e. the + * buf_prepare op was called. It is cleared again + * after the 'buf_finish' op is called. + * copied_timestamp: the timestamp of this capture buffer was copied + * from an output buffer. + * skip_cache_sync_on_prepare: when set buffer's ->prepare() function + * skips cache sync/invalidation. + * skip_cache_sync_on_finish: when set buffer's ->finish() function + * skips cache sync/invalidation. + * planes: per-plane information; do not change + * queued_entry: entry on the queued buffers list, which holds + * all buffers queued from userspace + * done_entry: entry on the list that stores all buffers ready + * to be dequeued to userspace + */ + enum vb2_buffer_state state; + unsigned int synced:1; + unsigned int prepared:1; + unsigned int copied_timestamp:1; + unsigned int skip_cache_sync_on_prepare:1; + unsigned int skip_cache_sync_on_finish:1; + + struct vb2_plane planes[VB2_MAX_PLANES]; + struct list_head queued_entry; + struct list_head done_entry; +#ifdef CONFIG_VIDEO_ADV_DEBUG + /* + * Counters for how often these buffer-related ops are + * called. Used to check for unbalanced ops. + */ + u32 cnt_mem_alloc; + u32 cnt_mem_put; + u32 cnt_mem_get_dmabuf; + u32 cnt_mem_get_userptr; + u32 cnt_mem_put_userptr; + u32 cnt_mem_prepare; + u32 cnt_mem_finish; + u32 cnt_mem_attach_dmabuf; + u32 cnt_mem_detach_dmabuf; + u32 cnt_mem_map_dmabuf; + u32 cnt_mem_unmap_dmabuf; + u32 cnt_mem_vaddr; + u32 cnt_mem_cookie; + u32 cnt_mem_num_users; + u32 cnt_mem_mmap; + + u32 cnt_buf_out_validate; + u32 cnt_buf_init; + u32 cnt_buf_prepare; + u32 cnt_buf_finish; + u32 cnt_buf_cleanup; + u32 cnt_buf_queue; + u32 cnt_buf_request_complete; + + /* This counts the number of calls to vb2_buffer_done() */ + u32 cnt_buf_done; +#endif +}; + +/** + * struct vb2_ops - driver-specific callbacks. + * + * These operations are not called from interrupt context except where + * mentioned specifically. + * + * @queue_setup: called from VIDIOC_REQBUFS() and VIDIOC_CREATE_BUFS() + * handlers before memory allocation. It can be called + * twice: if the original number of requested buffers + * could not be allocated, then it will be called a + * second time with the actually allocated number of + * buffers to verify if that is OK. + * The driver should return the required number of buffers + * in \*num_buffers, the required number of planes per + * buffer in \*num_planes, the size of each plane should be + * set in the sizes\[\] array and optional per-plane + * allocator specific device in the alloc_devs\[\] array. + * When called from VIDIOC_REQBUFS(), \*num_planes == 0, + * the driver has to use the currently configured format to + * determine the plane sizes and \*num_buffers is the total + * number of buffers that are being allocated. When called + * from VIDIOC_CREATE_BUFS(), \*num_planes != 0 and it + * describes the requested number of planes and sizes\[\] + * contains the requested plane sizes. In this case + * \*num_buffers are being allocated additionally to + * the buffers already allocated. If either \*num_planes + * or the requested sizes are invalid callback must return %-EINVAL. + * @wait_prepare: release any locks taken while calling vb2 functions; + * it is called before an ioctl needs to wait for a new + * buffer to arrive; required to avoid a deadlock in + * blocking access type. + * @wait_finish: reacquire all locks released in the previous callback; + * required to continue operation after sleeping while + * waiting for a new buffer to arrive. + * @buf_out_validate: called when the output buffer is prepared or queued + * to a request; drivers can use this to validate + * userspace-provided information; this is required only + * for OUTPUT queues. + * @buf_init: called once after allocating a buffer (in MMAP case) + * or after acquiring a new USERPTR buffer; drivers may + * perform additional buffer-related initialization; + * initialization failure (return != 0) will prevent + * queue setup from completing successfully; optional. + * @buf_prepare: called every time the buffer is queued from userspace + * and from the VIDIOC_PREPARE_BUF() ioctl; drivers may + * perform any initialization required before each + * hardware operation in this callback; drivers can + * access/modify the buffer here as it is still synced for + * the CPU; drivers that support VIDIOC_CREATE_BUFS() must + * also validate the buffer size; if an error is returned, + * the buffer will not be queued in driver; optional. + * @buf_finish: called before every dequeue of the buffer back to + * userspace; the buffer is synced for the CPU, so drivers + * can access/modify the buffer contents; drivers may + * perform any operations required before userspace + * accesses the buffer; optional. The buffer state can be + * one of the following: %DONE and %ERROR occur while + * streaming is in progress, and the %PREPARED state occurs + * when the queue has been canceled and all pending + * buffers are being returned to their default %DEQUEUED + * state. Typically you only have to do something if the + * state is %VB2_BUF_STATE_DONE, since in all other cases + * the buffer contents will be ignored anyway. + * @buf_cleanup: called once before the buffer is freed; drivers may + * perform any additional cleanup; optional. + * @prepare_streaming: called once to prepare for 'streaming' state; this is + * where validation can be done to verify everything is + * okay and streaming resources can be claimed. It is + * called when the VIDIOC_STREAMON ioctl is called. The + * actual streaming starts when @start_streaming is called. + * Optional. + * @start_streaming: called once to enter 'streaming' state; the driver may + * receive buffers with @buf_queue callback + * before @start_streaming is called; the driver gets the + * number of already queued buffers in count parameter; + * driver can return an error if hardware fails, in that + * case all buffers that have been already given by + * the @buf_queue callback are to be returned by the driver + * by calling vb2_buffer_done() with %VB2_BUF_STATE_QUEUED. + * If you need a minimum number of buffers before you can + * start streaming, then set + * &vb2_queue->min_queued_buffers. If that is non-zero + * then @start_streaming won't be called until at least + * that many buffers have been queued up by userspace. + * @stop_streaming: called when 'streaming' state must be disabled; driver + * should stop any DMA transactions or wait until they + * finish and give back all buffers it got from &buf_queue + * callback by calling vb2_buffer_done() with either + * %VB2_BUF_STATE_DONE or %VB2_BUF_STATE_ERROR; may use + * vb2_wait_for_all_buffers() function + * @unprepare_streaming:called as counterpart to @prepare_streaming; any claimed + * streaming resources can be released here. It is + * called when the VIDIOC_STREAMOFF ioctls is called or + * when the streaming filehandle is closed. Optional. + * @buf_queue: passes buffer vb to the driver; driver may start + * hardware operation on this buffer; driver should give + * the buffer back by calling vb2_buffer_done() function; + * it is always called after calling VIDIOC_STREAMON() + * ioctl; might be called before @start_streaming callback + * if user pre-queued buffers before calling + * VIDIOC_STREAMON(). + * @buf_request_complete: a buffer that was never queued to the driver but is + * associated with a queued request was canceled. + * The driver will have to mark associated objects in the + * request as completed; required if requests are + * supported. + */ +struct vb2_ops { + int (*queue_setup)(struct vb2_queue *q, + unsigned int *num_buffers, unsigned int *num_planes, + unsigned int sizes[], struct device *alloc_devs[]); + + void (*wait_prepare)(struct vb2_queue *q); + void (*wait_finish)(struct vb2_queue *q); + + int (*buf_out_validate)(struct vb2_buffer *vb); + int (*buf_init)(struct vb2_buffer *vb); + int (*buf_prepare)(struct vb2_buffer *vb); + void (*buf_finish)(struct vb2_buffer *vb); + void (*buf_cleanup)(struct vb2_buffer *vb); + + int (*prepare_streaming)(struct vb2_queue *q); + int (*start_streaming)(struct vb2_queue *q, unsigned int count); + void (*stop_streaming)(struct vb2_queue *q); + void (*unprepare_streaming)(struct vb2_queue *q); + + void (*buf_queue)(struct vb2_buffer *vb); + + void (*buf_request_complete)(struct vb2_buffer *vb); +}; + +/** + * struct vb2_buf_ops - driver-specific callbacks. + * + * @verify_planes_array: Verify that a given user space structure contains + * enough planes for the buffer. This is called + * for each dequeued buffer. + * @init_buffer: given a &vb2_buffer initialize the extra data after + * struct vb2_buffer. + * For V4L2 this is a &struct vb2_v4l2_buffer. + * @fill_user_buffer: given a &vb2_buffer fill in the userspace structure. + * For V4L2 this is a &struct v4l2_buffer. + * @fill_vb2_buffer: given a userspace structure, fill in the &vb2_buffer. + * If the userspace structure is invalid, then this op + * will return an error. + * @copy_timestamp: copy the timestamp from a userspace structure to + * the &struct vb2_buffer. + */ +struct vb2_buf_ops { + int (*verify_planes_array)(struct vb2_buffer *vb, const void *pb); + void (*init_buffer)(struct vb2_buffer *vb); + void (*fill_user_buffer)(struct vb2_buffer *vb, void *pb); + int (*fill_vb2_buffer)(struct vb2_buffer *vb, struct vb2_plane *planes); + void (*copy_timestamp)(struct vb2_buffer *vb, const void *pb); +}; + +/** + * struct vb2_queue - a videobuf2 queue. + * + * @type: private buffer type whose content is defined by the vb2-core + * caller. For example, for V4L2, it should match + * the types defined on &enum v4l2_buf_type. + * @io_modes: supported io methods (see &enum vb2_io_modes). + * @dev: device to use for the default allocation context if the driver + * doesn't fill in the @alloc_devs array. + * @dma_attrs: DMA attributes to use for the DMA. + * @bidirectional: when this flag is set the DMA direction for the buffers of + * this queue will be overridden with %DMA_BIDIRECTIONAL direction. + * This is useful in cases where the hardware (firmware) writes to + * a buffer which is mapped as read (%DMA_TO_DEVICE), or reads from + * buffer which is mapped for write (%DMA_FROM_DEVICE) in order + * to satisfy some internal hardware restrictions or adds a padding + * needed by the processing algorithm. In case the DMA mapping is + * not bidirectional but the hardware (firmware) trying to access + * the buffer (in the opposite direction) this could lead to an + * IOMMU protection faults. + * @fileio_read_once: report EOF after reading the first buffer + * @fileio_write_immediately: queue buffer after each write() call + * @allow_zero_bytesused: allow bytesused == 0 to be passed to the driver + * @quirk_poll_must_check_waiting_for_buffers: Return %EPOLLERR at poll when QBUF + * has not been called. This is a vb1 idiom that has been adopted + * also by vb2. + * @supports_requests: this queue supports the Request API. + * @requires_requests: this queue requires the Request API. If this is set to 1, + * then supports_requests must be set to 1 as well. + * @uses_qbuf: qbuf was used directly for this queue. Set to 1 the first + * time this is called. Set to 0 when the queue is canceled. + * If this is 1, then you cannot queue buffers from a request. + * @uses_requests: requests are used for this queue. Set to 1 the first time + * a request is queued. Set to 0 when the queue is canceled. + * If this is 1, then you cannot queue buffers directly. + * @allow_cache_hints: when set user-space can pass cache management hints in + * order to skip cache flush/invalidation on ->prepare() or/and + * ->finish(). + * @non_coherent_mem: when set queue will attempt to allocate buffers using + * non-coherent memory. + * @lock: pointer to a mutex that protects the &struct vb2_queue. The + * driver can set this to a mutex to let the v4l2 core serialize + * the queuing ioctls. If the driver wants to handle locking + * itself, then this should be set to NULL. This lock is not used + * by the videobuf2 core API. + * @owner: The filehandle that 'owns' the buffers, i.e. the filehandle + * that called reqbufs, create_buffers or started fileio. + * This field is not used by the videobuf2 core API, but it allows + * drivers to easily associate an owner filehandle with the queue. + * @ops: driver-specific callbacks + * @mem_ops: memory allocator specific callbacks + * @buf_ops: callbacks to deliver buffer information. + * between user-space and kernel-space. + * @drv_priv: driver private data. + * @subsystem_flags: Flags specific to the subsystem (V4L2/DVB/etc.). Not used + * by the vb2 core. + * @buf_struct_size: size of the driver-specific buffer structure; + * "0" indicates the driver doesn't want to use a custom buffer + * structure type. In that case a subsystem-specific struct + * will be used (in the case of V4L2 that is + * ``sizeof(struct vb2_v4l2_buffer)``). The first field of the + * driver-specific buffer structure must be the subsystem-specific + * struct (vb2_v4l2_buffer in the case of V4L2). + * @timestamp_flags: Timestamp flags; ``V4L2_BUF_FLAG_TIMESTAMP_*`` and + * ``V4L2_BUF_FLAG_TSTAMP_SRC_*`` + * @gfp_flags: additional gfp flags used when allocating the buffers. + * Typically this is 0, but it may be e.g. %GFP_DMA or %__GFP_DMA32 + * to force the buffer allocation to a specific memory zone. + * @min_queued_buffers: the minimum number of queued buffers needed before + * @start_streaming can be called. Used when a DMA engine + * cannot be started unless at least this number of buffers + * have been queued into the driver. + * VIDIOC_REQBUFS will ensure at least @min_queued_buffers + 1 + * buffers will be allocated. Note that VIDIOC_CREATE_BUFS will not + * modify the requested buffer count. + * @min_reqbufs_allocation: the minimum number of buffers to be allocated when + * calling VIDIOC_REQBUFS. Note that VIDIOC_CREATE_BUFS will *not* + * modify the requested buffer count and does not use this field. + * Drivers can set this if there has to be a certain number of + * buffers available for the hardware to work effectively. + * This allows calling VIDIOC_REQBUFS with a buffer count of 1 and + * it will be automatically adjusted to a workable buffer count. + * If set, then @min_reqbufs_allocation must be larger than + * @min_queued_buffers + 1. + * If this field is > 3, then it is highly recommended that the + * driver implements the V4L2_CID_MIN_BUFFERS_FOR_CAPTURE/OUTPUT + * control. + * @alloc_devs: &struct device memory type/allocator-specific per-plane device + */ +/* + * Private elements (won't appear at the uAPI book): + * @mmap_lock: private mutex used when buffers are allocated/freed/mmapped + * @memory: current memory type used + * @dma_dir: DMA mapping direction. + * @bufs: videobuf2 buffer structures. If it is non-NULL then + * bufs_bitmap is also non-NULL. + * @bufs_bitmap: bitmap tracking whether each bufs[] entry is used + * @max_num_buffers: upper limit of number of allocated/used buffers. + * If set to 0 v4l2 core will change it VB2_MAX_FRAME + * for backward compatibility. + * @queued_list: list of buffers currently queued from userspace + * @queued_count: number of buffers queued and ready for streaming. + * @owned_by_drv_count: number of buffers owned by the driver + * @done_list: list of buffers ready to be dequeued to userspace + * @done_lock: lock to protect done_list list + * @done_wq: waitqueue for processes waiting for buffers ready to be dequeued + * @streaming: current streaming state + * @start_streaming_called: @start_streaming was called successfully and we + * started streaming. + * @error: a fatal error occurred on the queue + * @waiting_for_buffers: used in poll() to check if vb2 is still waiting for + * buffers. Only set for capture queues if qbuf has not yet been + * called since poll() needs to return %EPOLLERR in that situation. + * @waiting_in_dqbuf: set by the core for the duration of a blocking DQBUF, when + * it has to wait for a buffer to become available with vb2_queue->lock + * released. Used to prevent destroying the queue by other threads. + * @is_multiplanar: set if buffer type is multiplanar + * @is_output: set if buffer type is output + * @is_busy: set if at least one buffer has been allocated at some time. + * @copy_timestamp: set if vb2-core should set timestamps + * @last_buffer_dequeued: used in poll() and DQBUF to immediately return if the + * last decoded buffer was already dequeued. Set for capture queues + * when a buffer with the %V4L2_BUF_FLAG_LAST is dequeued. + * @fileio: file io emulator internal data, used only if emulator is active + * @threadio: thread io internal data, used only if thread is active + * @name: queue name, used for logging purpose. Initialized automatically + * if left empty by drivers. + */ +struct vb2_queue { + unsigned int type; + unsigned int io_modes; + struct device *dev; + unsigned long dma_attrs; + unsigned int bidirectional:1; + unsigned int fileio_read_once:1; + unsigned int fileio_write_immediately:1; + unsigned int allow_zero_bytesused:1; + unsigned int quirk_poll_must_check_waiting_for_buffers:1; + unsigned int supports_requests:1; + unsigned int requires_requests:1; + unsigned int uses_qbuf:1; + unsigned int uses_requests:1; + unsigned int allow_cache_hints:1; + unsigned int non_coherent_mem:1; + + struct mutex *lock; + void *owner; + + const struct vb2_ops *ops; + const struct vb2_mem_ops *mem_ops; + const struct vb2_buf_ops *buf_ops; + + void *drv_priv; + u32 subsystem_flags; + unsigned int buf_struct_size; + u32 timestamp_flags; + gfp_t gfp_flags; + u32 min_queued_buffers; + u32 min_reqbufs_allocation; + + struct device *alloc_devs[VB2_MAX_PLANES]; + + /* private: internal use only */ + struct mutex mmap_lock; + unsigned int memory; + enum dma_data_direction dma_dir; + struct vb2_buffer **bufs; + unsigned long *bufs_bitmap; + unsigned int max_num_buffers; + + struct list_head queued_list; + unsigned int queued_count; + + atomic_t owned_by_drv_count; + struct list_head done_list; + spinlock_t done_lock; + wait_queue_head_t done_wq; + + unsigned int streaming:1; + unsigned int start_streaming_called:1; + unsigned int error:1; + unsigned int waiting_for_buffers:1; + unsigned int waiting_in_dqbuf:1; + unsigned int is_multiplanar:1; + unsigned int is_output:1; + unsigned int is_busy:1; + unsigned int copy_timestamp:1; + unsigned int last_buffer_dequeued:1; + + struct vb2_fileio_data *fileio; + struct vb2_threadio_data *threadio; + + char name[32]; + +#ifdef CONFIG_VIDEO_ADV_DEBUG + /* + * Counters for how often these queue-related ops are + * called. Used to check for unbalanced ops. + */ + u32 cnt_queue_setup; + u32 cnt_wait_prepare; + u32 cnt_wait_finish; + u32 cnt_prepare_streaming; + u32 cnt_start_streaming; + u32 cnt_stop_streaming; + u32 cnt_unprepare_streaming; +#endif +}; + +/** + * vb2_queue_allows_cache_hints() - Return true if the queue allows cache + * and memory consistency hints. + * + * @q: pointer to &struct vb2_queue with videobuf2 queue + */ +static inline bool vb2_queue_allows_cache_hints(struct vb2_queue *q) +{ + return q->allow_cache_hints && q->memory == VB2_MEMORY_MMAP; +} + +/** + * vb2_plane_vaddr() - Return a kernel virtual address of a given plane. + * @vb: pointer to &struct vb2_buffer to which the plane in + * question belongs to. + * @plane_no: plane number for which the address is to be returned. + * + * This function returns a kernel virtual address of a given plane if + * such a mapping exist, NULL otherwise. + */ +void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no); + +/** + * vb2_plane_cookie() - Return allocator specific cookie for the given plane. + * @vb: pointer to &struct vb2_buffer to which the plane in + * question belongs to. + * @plane_no: plane number for which the cookie is to be returned. + * + * This function returns an allocator specific cookie for a given plane if + * available, NULL otherwise. The allocator should provide some simple static + * inline function, which would convert this cookie to the allocator specific + * type that can be used directly by the driver to access the buffer. This can + * be for example physical address, pointer to scatter list or IOMMU mapping. + */ +void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no); + +/** + * vb2_buffer_done() - inform videobuf2 that an operation on a buffer + * is finished. + * @vb: pointer to &struct vb2_buffer to be used. + * @state: state of the buffer, as defined by &enum vb2_buffer_state. + * Either %VB2_BUF_STATE_DONE if the operation finished + * successfully, %VB2_BUF_STATE_ERROR if the operation finished + * with an error or %VB2_BUF_STATE_QUEUED. + * + * This function should be called by the driver after a hardware operation on + * a buffer is finished and the buffer may be returned to userspace. The driver + * cannot use this buffer anymore until it is queued back to it by videobuf + * by the means of &vb2_ops->buf_queue callback. Only buffers previously queued + * to the driver by &vb2_ops->buf_queue can be passed to this function. + * + * While streaming a buffer can only be returned in state DONE or ERROR. + * The &vb2_ops->start_streaming op can also return them in case the DMA engine + * cannot be started for some reason. In that case the buffers should be + * returned with state QUEUED to put them back into the queue. + */ +void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state); + +/** + * vb2_discard_done() - discard all buffers marked as DONE. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * + * This function is intended to be used with suspend/resume operations. It + * discards all 'done' buffers as they would be too old to be requested after + * resume. + * + * Drivers must stop the hardware and synchronize with interrupt handlers and/or + * delayed works before calling this function to make sure no buffer will be + * touched by the driver and/or hardware. + */ +void vb2_discard_done(struct vb2_queue *q); + +/** + * vb2_wait_for_all_buffers() - wait until all buffers are given back to vb2. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * + * This function will wait until all buffers that have been given to the driver + * by &vb2_ops->buf_queue are given back to vb2 with vb2_buffer_done(). It + * doesn't call &vb2_ops->wait_prepare/&vb2_ops->wait_finish pair. + * It is intended to be called with all locks taken, for example from + * &vb2_ops->stop_streaming callback. + */ +int vb2_wait_for_all_buffers(struct vb2_queue *q); + +/** + * vb2_core_querybuf() - query video buffer information. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @vb: pointer to struct &vb2_buffer. + * @pb: buffer struct passed from userspace. + * + * Videobuf2 core helper to implement VIDIOC_QUERYBUF() operation. It is called + * internally by VB2 by an API-specific handler, like ``videobuf2-v4l2.h``. + * + * The passed buffer should have been verified. + * + * This function fills the relevant information for the userspace. + * + * Return: returns zero on success; an error code otherwise. + */ +void vb2_core_querybuf(struct vb2_queue *q, struct vb2_buffer *vb, void *pb); + +/** + * vb2_core_reqbufs() - Initiate streaming. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @memory: memory type, as defined by &enum vb2_memory. + * @flags: auxiliary queue/buffer management flags. Currently, the only + * used flag is %V4L2_MEMORY_FLAG_NON_COHERENT. + * @count: requested buffer count. + * + * Videobuf2 core helper to implement VIDIOC_REQBUF() operation. It is called + * internally by VB2 by an API-specific handler, like ``videobuf2-v4l2.h``. + * + * This function: + * + * #) verifies streaming parameters passed from the userspace; + * #) sets up the queue; + * #) negotiates number of buffers and planes per buffer with the driver + * to be used during streaming; + * #) allocates internal buffer structures (&struct vb2_buffer), according to + * the agreed parameters; + * #) for MMAP memory type, allocates actual video memory, using the + * memory handling/allocation routines provided during queue initialization. + * + * If req->count is 0, all the memory will be freed instead. + * + * If the queue has been allocated previously by a previous vb2_core_reqbufs() + * call and the queue is not busy, memory will be reallocated. + * + * Return: returns zero on success; an error code otherwise. + */ +int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, + unsigned int flags, unsigned int *count); + +/** + * vb2_core_create_bufs() - Allocate buffers and any required auxiliary structs + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @memory: memory type, as defined by &enum vb2_memory. + * @flags: auxiliary queue/buffer management flags. + * @count: requested buffer count. + * @requested_planes: number of planes requested. + * @requested_sizes: array with the size of the planes. + * @first_index: index of the first created buffer, all allocated buffers have + * indices in the range [first_index..first_index+count-1] + * + * Videobuf2 core helper to implement VIDIOC_CREATE_BUFS() operation. It is + * called internally by VB2 by an API-specific handler, like + * ``videobuf2-v4l2.h``. + * + * This function: + * + * #) verifies parameter sanity; + * #) calls the &vb2_ops->queue_setup queue operation; + * #) performs any necessary memory allocations. + * + * Return: returns zero on success; an error code otherwise. + */ +int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, + unsigned int flags, unsigned int *count, + unsigned int requested_planes, + const unsigned int requested_sizes[], + unsigned int *first_index); + +/** + * vb2_core_prepare_buf() - Pass ownership of a buffer from userspace + * to the kernel. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @vb: pointer to struct &vb2_buffer. + * @pb: buffer structure passed from userspace to + * &v4l2_ioctl_ops->vidioc_prepare_buf handler in driver. + * + * Videobuf2 core helper to implement VIDIOC_PREPARE_BUF() operation. It is + * called internally by VB2 by an API-specific handler, like + * ``videobuf2-v4l2.h``. + * + * The passed buffer should have been verified. + * + * This function calls vb2_ops->buf_prepare callback in the driver + * (if provided), in which driver-specific buffer initialization can + * be performed. + * + * Return: returns zero on success; an error code otherwise. + */ +int vb2_core_prepare_buf(struct vb2_queue *q, struct vb2_buffer *vb, void *pb); + +/** + * vb2_core_remove_bufs() - + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @start: first index of the range of buffers to remove. + * @count: number of buffers to remove. + * + * Return: returns zero on success; an error code otherwise. + */ +int vb2_core_remove_bufs(struct vb2_queue *q, unsigned int start, unsigned int count); + +/** + * vb2_core_qbuf() - Queue a buffer from userspace + * + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @vb: pointer to struct &vb2_buffer. + * @pb: buffer structure passed from userspace to + * v4l2_ioctl_ops->vidioc_qbuf handler in driver + * @req: pointer to &struct media_request, may be NULL. + * + * Videobuf2 core helper to implement VIDIOC_QBUF() operation. It is called + * internally by VB2 by an API-specific handler, like ``videobuf2-v4l2.h``. + * + * This function: + * + * #) If @req is non-NULL, then the buffer will be bound to this + * media request and it returns. The buffer will be prepared and + * queued to the driver (i.e. the next two steps) when the request + * itself is queued. + * #) if necessary, calls &vb2_ops->buf_prepare callback in the driver + * (if provided), in which driver-specific buffer initialization can + * be performed; + * #) if streaming is on, queues the buffer in driver by the means of + * &vb2_ops->buf_queue callback for processing. + * + * Return: returns zero on success; an error code otherwise. + */ +int vb2_core_qbuf(struct vb2_queue *q, struct vb2_buffer *vb, void *pb, + struct media_request *req); + +/** + * vb2_core_dqbuf() - Dequeue a buffer to the userspace + * @q: pointer to &struct vb2_queue with videobuf2 queue + * @pindex: pointer to the buffer index. May be NULL + * @pb: buffer structure passed from userspace to + * v4l2_ioctl_ops->vidioc_dqbuf handler in driver. + * @nonblocking: if true, this call will not sleep waiting for a buffer if no + * buffers ready for dequeuing are present. Normally the driver + * would be passing (file->f_flags & O_NONBLOCK) here. + * + * Videobuf2 core helper to implement VIDIOC_DQBUF() operation. It is called + * internally by VB2 by an API-specific handler, like ``videobuf2-v4l2.h``. + * + * This function: + * + * #) calls buf_finish callback in the driver (if provided), in which + * driver can perform any additional operations that may be required before + * returning the buffer to userspace, such as cache sync, + * #) the buffer struct members are filled with relevant information for + * the userspace. + * + * Return: returns zero on success; an error code otherwise. + */ +int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb, + bool nonblocking); + +/** + * vb2_core_streamon() - Implements VB2 stream ON logic + * + * @q: pointer to &struct vb2_queue with videobuf2 queue + * @type: type of the queue to be started. + * For V4L2, this is defined by &enum v4l2_buf_type type. + * + * Videobuf2 core helper to implement VIDIOC_STREAMON() operation. It is called + * internally by VB2 by an API-specific handler, like ``videobuf2-v4l2.h``. + * + * Return: returns zero on success; an error code otherwise. + */ +int vb2_core_streamon(struct vb2_queue *q, unsigned int type); + +/** + * vb2_core_streamoff() - Implements VB2 stream OFF logic + * + * @q: pointer to &struct vb2_queue with videobuf2 queue + * @type: type of the queue to be started. + * For V4L2, this is defined by &enum v4l2_buf_type type. + * + * Videobuf2 core helper to implement VIDIOC_STREAMOFF() operation. It is + * called internally by VB2 by an API-specific handler, like + * ``videobuf2-v4l2.h``. + * + * Return: returns zero on success; an error code otherwise. + */ +int vb2_core_streamoff(struct vb2_queue *q, unsigned int type); + +/** + * vb2_core_expbuf() - Export a buffer as a file descriptor. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @fd: pointer to the file descriptor associated with DMABUF + * (set by driver). + * @type: buffer type. + * @vb: pointer to struct &vb2_buffer. + * @plane: index of the plane to be exported, 0 for single plane queues + * @flags: file flags for newly created file, as defined at + * include/uapi/asm-generic/fcntl.h. + * Currently, the only used flag is %O_CLOEXEC. + * is supported, refer to manual of open syscall for more details. + * + * + * Videobuf2 core helper to implement VIDIOC_EXPBUF() operation. It is called + * internally by VB2 by an API-specific handler, like ``videobuf2-v4l2.h``. + * + * Return: returns zero on success; an error code otherwise. + */ +int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type, + struct vb2_buffer *vb, unsigned int plane, unsigned int flags); + +/** + * vb2_core_queue_init() - initialize a videobuf2 queue + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * This structure should be allocated in driver + * + * The &vb2_queue structure should be allocated by the driver. The driver is + * responsible of clearing it's content and setting initial values for some + * required entries before calling this function. + * + * .. note:: + * + * The following fields at @q should be set before calling this function: + * &vb2_queue->ops, &vb2_queue->mem_ops, &vb2_queue->type. + */ +int vb2_core_queue_init(struct vb2_queue *q); + +/** + * vb2_core_queue_release() - stop streaming, release the queue and free memory + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * + * This function stops streaming and performs necessary clean ups, including + * freeing video buffer memory. The driver is responsible for freeing + * the &struct vb2_queue itself. + */ +void vb2_core_queue_release(struct vb2_queue *q); + +/** + * vb2_queue_error() - signal a fatal error on the queue + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * + * Flag that a fatal unrecoverable error has occurred and wake up all processes + * waiting on the queue. Polling will now set %EPOLLERR and queuing and dequeuing + * buffers will return %-EIO. + * + * The error flag will be cleared when canceling the queue, either from + * vb2_streamoff() or vb2_queue_release(). Drivers should thus not call this + * function before starting the stream, otherwise the error flag will remain set + * until the queue is released when closing the device node. + */ +void vb2_queue_error(struct vb2_queue *q); + +/** + * vb2_mmap() - map video buffers into application address space. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @vma: pointer to &struct vm_area_struct with the vma passed + * to the mmap file operation handler in the driver. + * + * Should be called from mmap file operation handler of a driver. + * This function maps one plane of one of the available video buffers to + * userspace. To map whole video memory allocated on reqbufs, this function + * has to be called once per each plane per each buffer previously allocated. + * + * When the userspace application calls mmap, it passes to it an offset returned + * to it earlier by the means of &v4l2_ioctl_ops->vidioc_querybuf handler. + * That offset acts as a "cookie", which is then used to identify the plane + * to be mapped. + * + * This function finds a plane with a matching offset and a mapping is performed + * by the means of a provided memory operation. + * + * The return values from this function are intended to be directly returned + * from the mmap handler in driver. + */ +int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma); + +#ifndef CONFIG_MMU +/** + * vb2_get_unmapped_area - map video buffers into application address space. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @addr: memory address. + * @len: buffer size. + * @pgoff: page offset. + * @flags: memory flags. + * + * This function is used in noMMU platforms to propose address mapping + * for a given buffer. It's intended to be used as a handler for the + * &file_operations->get_unmapped_area operation. + * + * This is called by the mmap() syscall routines will call this + * to get a proposed address for the mapping, when ``!CONFIG_MMU``. + */ +unsigned long vb2_get_unmapped_area(struct vb2_queue *q, + unsigned long addr, + unsigned long len, + unsigned long pgoff, + unsigned long flags); +#endif + +/** + * vb2_core_poll() - implements poll syscall() logic. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @file: &struct file argument passed to the poll + * file operation handler. + * @wait: &poll_table wait argument passed to the poll + * file operation handler. + * + * This function implements poll file operation handler for a driver. + * For CAPTURE queues, if a buffer is ready to be dequeued, the userspace will + * be informed that the file descriptor of a video device is available for + * reading. + * For OUTPUT queues, if a buffer is ready to be dequeued, the file descriptor + * will be reported as available for writing. + * + * The return values from this function are intended to be directly returned + * from poll handler in driver. + */ +__poll_t vb2_core_poll(struct vb2_queue *q, struct file *file, + poll_table *wait); + +/** + * vb2_read() - implements read() syscall logic. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @data: pointed to target userspace buffer + * @count: number of bytes to read + * @ppos: file handle position tracking pointer + * @nonblock: mode selector (1 means blocking calls, 0 means nonblocking) + */ +size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count, + loff_t *ppos, int nonblock); +/** + * vb2_write() - implements write() syscall logic. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @data: pointed to target userspace buffer + * @count: number of bytes to write + * @ppos: file handle position tracking pointer + * @nonblock: mode selector (1 means blocking calls, 0 means nonblocking) + */ +size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count, + loff_t *ppos, int nonblock); + +/** + * typedef vb2_thread_fnc - callback function for use with vb2_thread. + * + * @vb: pointer to struct &vb2_buffer. + * @priv: pointer to a private data. + * + * This is called whenever a buffer is dequeued in the thread. + */ +typedef int (*vb2_thread_fnc)(struct vb2_buffer *vb, void *priv); + +/** + * vb2_thread_start() - start a thread for the given queue. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @fnc: &vb2_thread_fnc callback function. + * @priv: priv pointer passed to the callback function. + * @thread_name:the name of the thread. This will be prefixed with "vb2-". + * + * This starts a thread that will queue and dequeue until an error occurs + * or vb2_thread_stop() is called. + * + * .. attention:: + * + * This function should not be used for anything else but the videobuf2-dvb + * support. If you think you have another good use-case for this, then please + * contact the linux-media mailing list first. + */ +int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv, + const char *thread_name); + +/** + * vb2_thread_stop() - stop the thread for the given queue. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + */ +int vb2_thread_stop(struct vb2_queue *q); + +/** + * vb2_is_streaming() - return streaming status of the queue. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + */ +static inline bool vb2_is_streaming(struct vb2_queue *q) +{ + return q->streaming; +} + +/** + * vb2_fileio_is_active() - return true if fileio is active. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * + * This returns true if read() or write() is used to stream the data + * as opposed to stream I/O. This is almost never an important distinction, + * except in rare cases. One such case is that using read() or write() to + * stream a format using %V4L2_FIELD_ALTERNATE is not allowed since there + * is no way you can pass the field information of each buffer to/from + * userspace. A driver that supports this field format should check for + * this in the &vb2_ops->queue_setup op and reject it if this function returns + * true. + */ +static inline bool vb2_fileio_is_active(struct vb2_queue *q) +{ + return q->fileio; +} + +/** + * vb2_get_num_buffers() - get the number of buffer in a queue + * @q: pointer to &struct vb2_queue with videobuf2 queue. + */ +static inline unsigned int vb2_get_num_buffers(struct vb2_queue *q) +{ + if (q->bufs_bitmap) + return bitmap_weight(q->bufs_bitmap, q->max_num_buffers); + + return 0; +} + +/** + * vb2_is_busy() - return busy status of the queue. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * + * This function checks if queue has any buffers allocated. + */ +static inline bool vb2_is_busy(struct vb2_queue *q) +{ + return !!q->is_busy; +} + +/** + * vb2_get_drv_priv() - return driver private data associated with the queue. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + */ +static inline void *vb2_get_drv_priv(struct vb2_queue *q) +{ + return q->drv_priv; +} + +/** + * vb2_set_plane_payload() - set bytesused for the plane @plane_no. + * @vb: pointer to &struct vb2_buffer to which the plane in + * question belongs to. + * @plane_no: plane number for which payload should be set. + * @size: payload in bytes. + */ +static inline void vb2_set_plane_payload(struct vb2_buffer *vb, + unsigned int plane_no, unsigned long size) +{ + /* + * size must never be larger than the buffer length, so + * warn and clamp to the buffer length if that's the case. + */ + if (plane_no < vb->num_planes) { + if (WARN_ON_ONCE(size > vb->planes[plane_no].length)) + size = vb->planes[plane_no].length; + vb->planes[plane_no].bytesused = size; + } +} + +/** + * vb2_get_plane_payload() - get bytesused for the plane plane_no + * @vb: pointer to &struct vb2_buffer to which the plane in + * question belongs to. + * @plane_no: plane number for which payload should be set. + */ +static inline unsigned long vb2_get_plane_payload(struct vb2_buffer *vb, + unsigned int plane_no) +{ + if (plane_no < vb->num_planes) + return vb->planes[plane_no].bytesused; + return 0; +} + +/** + * vb2_plane_size() - return plane size in bytes. + * @vb: pointer to &struct vb2_buffer to which the plane in + * question belongs to. + * @plane_no: plane number for which size should be returned. + */ +static inline unsigned long +vb2_plane_size(struct vb2_buffer *vb, unsigned int plane_no) +{ + if (plane_no < vb->num_planes) + return vb->planes[plane_no].length; + return 0; +} + +/** + * vb2_start_streaming_called() - return streaming status of driver. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + */ +static inline bool vb2_start_streaming_called(struct vb2_queue *q) +{ + return q->start_streaming_called; +} + +/** + * vb2_clear_last_buffer_dequeued() - clear last buffer dequeued flag of queue. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + */ +static inline void vb2_clear_last_buffer_dequeued(struct vb2_queue *q) +{ + q->last_buffer_dequeued = false; +} + +/** + * vb2_get_buffer() - get a buffer from a queue + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @index: buffer index + * + * This function obtains a buffer from a queue, by its index. + * Keep in mind that there is no refcounting involved in this + * operation, so the buffer lifetime should be taken into + * consideration. + */ +static inline struct vb2_buffer *vb2_get_buffer(struct vb2_queue *q, + unsigned int index) +{ + if (!q->bufs) + return NULL; + + if (index >= q->max_num_buffers) + return NULL; + + if (test_bit(index, q->bufs_bitmap)) + return q->bufs[index]; + return NULL; +} + +/* + * The following functions are not part of the vb2 core API, but are useful + * functions for videobuf2-*. + */ + +/** + * vb2_buffer_in_use() - return true if the buffer is in use and + * the queue cannot be freed (by the means of VIDIOC_REQBUFS(0)) call. + * + * @vb: buffer for which plane size should be returned. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + */ +bool vb2_buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb); + +/** + * vb2_verify_memory_type() - Check whether the memory type and buffer type + * passed to a buffer operation are compatible with the queue. + * + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @memory: memory model, as defined by enum &vb2_memory. + * @type: private buffer type whose content is defined by the vb2-core + * caller. For example, for V4L2, it should match + * the types defined on enum &v4l2_buf_type. + */ +int vb2_verify_memory_type(struct vb2_queue *q, + enum vb2_memory memory, unsigned int type); + +/** + * vb2_request_object_is_buffer() - return true if the object is a buffer + * + * @obj: the request object. + */ +bool vb2_request_object_is_buffer(struct media_request_object *obj); + +/** + * vb2_request_buffer_cnt() - return the number of buffers in the request + * + * @req: the request. + */ +unsigned int vb2_request_buffer_cnt(struct media_request *req); + +#endif /* _MEDIA_VIDEOBUF2_CORE_H */ diff --git a/6.17.0/include-overrides/media/videobuf2-dma-contig.h b/6.17.0/include-overrides/media/videobuf2-dma-contig.h new file mode 100644 index 0000000..5be313c --- /dev/null +++ b/6.17.0/include-overrides/media/videobuf2-dma-contig.h @@ -0,0 +1,32 @@ +/* + * videobuf2-dma-contig.h - DMA contig memory allocator for videobuf2 + * + * Copyright (C) 2010 Samsung Electronics + * + * Author: Pawel Osciak + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef _MEDIA_VIDEOBUF2_DMA_CONTIG_H +#define _MEDIA_VIDEOBUF2_DMA_CONTIG_H + +#include +#include + +static inline dma_addr_t +vb2_dma_contig_plane_dma_addr(struct vb2_buffer *vb, unsigned int plane_no) +{ + dma_addr_t *addr = vb2_plane_cookie(vb, plane_no); + + return *addr; +} + +int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size); +static inline void vb2_dma_contig_clear_max_seg_size(struct device *dev) { } + +extern const struct vb2_mem_ops vb2_dma_contig_memops; + +#endif diff --git a/6.17.0/include-overrides/media/videobuf2-dma-sg.h b/6.17.0/include-overrides/media/videobuf2-dma-sg.h new file mode 100644 index 0000000..f28fcb0 --- /dev/null +++ b/6.17.0/include-overrides/media/videobuf2-dma-sg.h @@ -0,0 +1,26 @@ +/* + * videobuf2-dma-sg.h - DMA scatter/gather memory allocator for videobuf2 + * + * Copyright (C) 2010 Samsung Electronics + * + * Author: Andrzej Pietrasiewicz + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef _MEDIA_VIDEOBUF2_DMA_SG_H +#define _MEDIA_VIDEOBUF2_DMA_SG_H + +#include + +static inline struct sg_table *vb2_dma_sg_plane_desc( + struct vb2_buffer *vb, unsigned int plane_no) +{ + return (struct sg_table *)vb2_plane_cookie(vb, plane_no); +} + +extern const struct vb2_mem_ops vb2_dma_sg_memops; + +#endif diff --git a/6.17.0/include-overrides/media/videobuf2-dvb.h b/6.17.0/include-overrides/media/videobuf2-dvb.h new file mode 100644 index 0000000..2d577b9 --- /dev/null +++ b/6.17.0/include-overrides/media/videobuf2-dvb.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _VIDEOBUF2_DVB_H_ +#define _VIDEOBUF2_DVB_H_ + +#include +#include +#include +#include +#include +#include + +/* We don't actually need to include media-device.h here */ +struct media_device; + +/* + * TODO: This header file should be replaced with videobuf2-core.h + * Currently, vb2_thread is not a stuff of videobuf2-core, + * since vb2_thread has many dependencies on videobuf2-v4l2. + */ + +struct vb2_dvb { + /* filling that the job of the driver */ + char *name; + struct dvb_frontend *frontend; + struct vb2_queue dvbq; + + /* vb2-dvb state info */ + struct mutex lock; + int nfeeds; + + /* vb2_dvb_(un)register manages this */ + struct dvb_demux demux; + struct dmxdev dmxdev; + struct dmx_frontend fe_hw; + struct dmx_frontend fe_mem; + struct dvb_net net; +}; + +struct vb2_dvb_frontend { + struct list_head felist; + int id; + struct vb2_dvb dvb; +}; + +struct vb2_dvb_frontends { + struct list_head felist; + struct mutex lock; + struct dvb_adapter adapter; + int active_fe_id; /* Indicates which frontend in the felist is in use */ + int gate; /* Frontend with gate control 0=!MFE,1=fe0,2=fe1 etc */ +}; + +int vb2_dvb_register_bus(struct vb2_dvb_frontends *f, + struct module *module, + void *adapter_priv, + struct device *device, + struct media_device *mdev, + short *adapter_nr, + int mfe_shared); + +void vb2_dvb_unregister_bus(struct vb2_dvb_frontends *f); + +struct vb2_dvb_frontend *vb2_dvb_alloc_frontend(struct vb2_dvb_frontends *f, int id); +void vb2_dvb_dealloc_frontends(struct vb2_dvb_frontends *f); + +struct vb2_dvb_frontend *vb2_dvb_get_frontend(struct vb2_dvb_frontends *f, int id); +int vb2_dvb_find_frontend(struct vb2_dvb_frontends *f, struct dvb_frontend *p); + +#endif /* _VIDEOBUF2_DVB_H_ */ diff --git a/6.17.0/include-overrides/media/videobuf2-memops.h b/6.17.0/include-overrides/media/videobuf2-memops.h new file mode 100644 index 0000000..4b5b84f --- /dev/null +++ b/6.17.0/include-overrides/media/videobuf2-memops.h @@ -0,0 +1,41 @@ +/* + * videobuf2-memops.h - generic memory handling routines for videobuf2 + * + * Copyright (C) 2010 Samsung Electronics + * + * Author: Pawel Osciak + * Marek Szyprowski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef _MEDIA_VIDEOBUF2_MEMOPS_H +#define _MEDIA_VIDEOBUF2_MEMOPS_H + +#include +#include +#include + +/** + * struct vb2_vmarea_handler - common vma refcount tracking handler. + * + * @refcount: pointer to &refcount_t entry in the buffer. + * @put: callback to function that decreases buffer refcount. + * @arg: argument for @put callback. + */ +struct vb2_vmarea_handler { + refcount_t *refcount; + void (*put)(void *arg); + void *arg; +}; + +extern const struct vm_operations_struct vb2_common_vm_ops; + +struct frame_vector *vb2_create_framevec(unsigned long start, + unsigned long length, + bool write); +void vb2_destroy_framevec(struct frame_vector *vec); + +#endif diff --git a/6.17.0/include-overrides/media/videobuf2-v4l2.h b/6.17.0/include-overrides/media/videobuf2-v4l2.h new file mode 100644 index 0000000..77ce823 --- /dev/null +++ b/6.17.0/include-overrides/media/videobuf2-v4l2.h @@ -0,0 +1,392 @@ +/* + * videobuf2-v4l2.h - V4L2 driver helper framework + * + * Copyright (C) 2010 Samsung Electronics + * + * Author: Pawel Osciak + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ +#ifndef _MEDIA_VIDEOBUF2_V4L2_H +#define _MEDIA_VIDEOBUF2_V4L2_H + +#include +#include + +#if VB2_MAX_FRAME != VIDEO_MAX_FRAME +#error VB2_MAX_FRAME != VIDEO_MAX_FRAME +#endif + +#if VB2_MAX_PLANES != VIDEO_MAX_PLANES +#error VB2_MAX_PLANES != VIDEO_MAX_PLANES +#endif + +struct video_device; + +/** + * struct vb2_v4l2_buffer - video buffer information for v4l2. + * + * @vb2_buf: embedded struct &vb2_buffer. + * @flags: buffer informational flags. + * @field: field order of the image in the buffer, as defined by + * &enum v4l2_field. + * @timecode: frame timecode. + * @sequence: sequence count of this frame. + * @request_fd: the request_fd associated with this buffer + * @is_held: if true, then this capture buffer was held + * @planes: plane information (userptr/fd, length, bytesused, data_offset). + * + * Should contain enough information to be able to cover all the fields + * of &struct v4l2_buffer at ``videodev2.h``. + */ +struct vb2_v4l2_buffer { + struct vb2_buffer vb2_buf; + + __u32 flags; + __u32 field; + struct v4l2_timecode timecode; + __u32 sequence; + __s32 request_fd; + bool is_held; + struct vb2_plane planes[VB2_MAX_PLANES]; +}; + +/* VB2 V4L2 flags as set in vb2_queue.subsystem_flags */ +#define VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF (1 << 0) + +/* + * to_vb2_v4l2_buffer() - cast struct vb2_buffer * to struct vb2_v4l2_buffer * + */ +#define to_vb2_v4l2_buffer(vb) \ + container_of(vb, struct vb2_v4l2_buffer, vb2_buf) + +/** + * vb2_find_buffer() - Find a buffer with given timestamp + * + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @timestamp: the timestamp to find. + * + * Returns the buffer with the given @timestamp, or NULL if not found. + */ +struct vb2_buffer *vb2_find_buffer(struct vb2_queue *q, u64 timestamp); + +int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b); + +/** + * vb2_reqbufs() - Wrapper for vb2_core_reqbufs() that also verifies + * the memory and type values. + * + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @req: &struct v4l2_requestbuffers passed from userspace to + * &v4l2_ioctl_ops->vidioc_reqbufs handler in driver. + */ +int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req); + +/** + * vb2_create_bufs() - Wrapper for vb2_core_create_bufs() that also verifies + * the memory and type values. + * + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @create: creation parameters, passed from userspace to + * &v4l2_ioctl_ops->vidioc_create_bufs handler in driver + */ +int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create); + +/** + * vb2_prepare_buf() - Pass ownership of a buffer from userspace to the kernel + * + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @mdev: pointer to &struct media_device, may be NULL. + * @b: buffer structure passed from userspace to + * &v4l2_ioctl_ops->vidioc_prepare_buf handler in driver + * + * Should be called from &v4l2_ioctl_ops->vidioc_prepare_buf ioctl handler + * of a driver. + * + * This function: + * + * #) verifies the passed buffer, + * #) calls &vb2_ops->buf_prepare callback in the driver (if provided), + * in which driver-specific buffer initialization can be performed. + * #) if @b->request_fd is non-zero and @mdev->ops->req_queue is set, + * then bind the prepared buffer to the request. + * + * The return values from this function are intended to be directly returned + * from &v4l2_ioctl_ops->vidioc_prepare_buf handler in driver. + */ +int vb2_prepare_buf(struct vb2_queue *q, struct media_device *mdev, + struct v4l2_buffer *b); + +/** + * vb2_qbuf() - Queue a buffer from userspace + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @mdev: pointer to &struct media_device, may be NULL. + * @b: buffer structure passed from userspace to + * &v4l2_ioctl_ops->vidioc_qbuf handler in driver + * + * Should be called from &v4l2_ioctl_ops->vidioc_qbuf handler of a driver. + * + * This function: + * + * #) verifies the passed buffer; + * #) if @b->request_fd is non-zero and @mdev->ops->req_queue is set, + * then bind the buffer to the request. + * #) if necessary, calls &vb2_ops->buf_prepare callback in the driver + * (if provided), in which driver-specific buffer initialization can + * be performed; + * #) if streaming is on, queues the buffer in driver by the means of + * &vb2_ops->buf_queue callback for processing. + * + * The return values from this function are intended to be directly returned + * from &v4l2_ioctl_ops->vidioc_qbuf handler in driver. + */ +int vb2_qbuf(struct vb2_queue *q, struct media_device *mdev, + struct v4l2_buffer *b); + +/** + * vb2_expbuf() - Export a buffer as a file descriptor + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @eb: export buffer structure passed from userspace to + * &v4l2_ioctl_ops->vidioc_expbuf handler in driver + * + * The return values from this function are intended to be directly returned + * from &v4l2_ioctl_ops->vidioc_expbuf handler in driver. + */ +int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb); + +/** + * vb2_dqbuf() - Dequeue a buffer to the userspace + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @b: buffer structure passed from userspace to + * &v4l2_ioctl_ops->vidioc_dqbuf handler in driver + * @nonblocking: if true, this call will not sleep waiting for a buffer if no + * buffers ready for dequeuing are present. Normally the driver + * would be passing (&file->f_flags & %O_NONBLOCK) here + * + * Should be called from &v4l2_ioctl_ops->vidioc_dqbuf ioctl handler + * of a driver. + * + * This function: + * + * #) verifies the passed buffer; + * #) calls &vb2_ops->buf_finish callback in the driver (if provided), in which + * driver can perform any additional operations that may be required before + * returning the buffer to userspace, such as cache sync; + * #) the buffer struct members are filled with relevant information for + * the userspace. + * + * The return values from this function are intended to be directly returned + * from &v4l2_ioctl_ops->vidioc_dqbuf handler in driver. + */ +int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking); + +/** + * vb2_streamon - start streaming + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @type: type argument passed from userspace to vidioc_streamon handler, + * as defined by &enum v4l2_buf_type. + * + * Should be called from &v4l2_ioctl_ops->vidioc_streamon handler of a driver. + * + * This function: + * + * 1) verifies current state + * 2) passes any previously queued buffers to the driver and starts streaming + * + * The return values from this function are intended to be directly returned + * from &v4l2_ioctl_ops->vidioc_streamon handler in the driver. + */ +int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type); + +/** + * vb2_streamoff - stop streaming + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @type: type argument passed from userspace to vidioc_streamoff handler + * + * Should be called from vidioc_streamoff handler of a driver. + * + * This function: + * + * #) verifies current state, + * #) stop streaming and dequeues any queued buffers, including those previously + * passed to the driver (after waiting for the driver to finish). + * + * This call can be used for pausing playback. + * The return values from this function are intended to be directly returned + * from vidioc_streamoff handler in the driver + */ +int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type); + +/** + * vb2_queue_init() - initialize a videobuf2 queue + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * + * The vb2_queue structure should be allocated by the driver. The driver is + * responsible of clearing it's content and setting initial values for some + * required entries before calling this function. + * q->ops, q->mem_ops, q->type and q->io_modes are mandatory. Please refer + * to the struct vb2_queue description in include/media/videobuf2-core.h + * for more information. + */ +int __must_check vb2_queue_init(struct vb2_queue *q); + +/** + * vb2_queue_init_name() - initialize a videobuf2 queue with a name + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @name: the queue name + * + * This function initializes the vb2_queue exactly like vb2_queue_init(), + * and additionally sets the queue name. The queue name is used for logging + * purpose, and should uniquely identify the queue within the context of the + * device it belongs to. This is useful to attribute kernel log messages to the + * right queue for m2m devices or other devices that handle multiple queues. + */ +int __must_check vb2_queue_init_name(struct vb2_queue *q, const char *name); + +/** + * vb2_queue_release() - stop streaming, release the queue and free memory + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * + * This function stops streaming and performs necessary clean ups, including + * freeing video buffer memory. The driver is responsible for freeing + * the vb2_queue structure itself. + */ +void vb2_queue_release(struct vb2_queue *q); + +/** + * vb2_queue_change_type() - change the type of an inactive vb2_queue + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @type: the type to change to (V4L2_BUF_TYPE_VIDEO_*) + * + * This function changes the type of the vb2_queue. This is only possible + * if the queue is not busy (i.e. no buffers have been allocated). + * + * vb2_queue_change_type() can be used to support multiple buffer types using + * the same queue. The driver can implement v4l2_ioctl_ops.vidioc_reqbufs and + * v4l2_ioctl_ops.vidioc_create_bufs functions and call vb2_queue_change_type() + * before calling vb2_ioctl_reqbufs() or vb2_ioctl_create_bufs(), and thus + * "lock" the buffer type until the buffers have been released. + */ +int vb2_queue_change_type(struct vb2_queue *q, unsigned int type); + +/** + * vb2_poll() - implements poll userspace operation + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @file: file argument passed to the poll file operation handler + * @wait: wait argument passed to the poll file operation handler + * + * This function implements poll file operation handler for a driver. + * For CAPTURE queues, if a buffer is ready to be dequeued, the userspace will + * be informed that the file descriptor of a video device is available for + * reading. + * For OUTPUT queues, if a buffer is ready to be dequeued, the file descriptor + * will be reported as available for writing. + * + * If the driver uses struct v4l2_fh, then vb2_poll() will also check for any + * pending events. + * + * The return values from this function are intended to be directly returned + * from poll handler in driver. + */ +__poll_t vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait); + +/* + * The following functions are not part of the vb2 core API, but are simple + * helper functions that you can use in your struct v4l2_file_operations, + * struct v4l2_ioctl_ops and struct vb2_ops. They will serialize if vb2_queue->lock + * or video_device->lock is set, and they will set and test the queue owner + * (vb2_queue->owner) to check if the calling filehandle is permitted to do the + * queuing operation. + */ + +/** + * vb2_queue_is_busy() - check if the queue is busy + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @file: file through which the vb2 queue access is performed + * + * The queue is considered busy if it has an owner and the owner is not the + * @file. + * + * Queue ownership is acquired and checked by some of the v4l2_ioctl_ops helpers + * below. Drivers can also use this function directly when they need to + * open-code ioctl handlers, for instance to add additional checks between the + * queue ownership test and the call to the corresponding vb2 operation. + */ +static inline bool vb2_queue_is_busy(struct vb2_queue *q, struct file *file) +{ + return q->owner && q->owner != file->private_data; +} + +/* struct v4l2_ioctl_ops helpers */ + +int vb2_ioctl_reqbufs(struct file *file, void *priv, + struct v4l2_requestbuffers *p); +int vb2_ioctl_create_bufs(struct file *file, void *priv, + struct v4l2_create_buffers *p); +int vb2_ioctl_prepare_buf(struct file *file, void *priv, + struct v4l2_buffer *p); +int vb2_ioctl_querybuf(struct file *file, void *priv, struct v4l2_buffer *p); +int vb2_ioctl_qbuf(struct file *file, void *priv, struct v4l2_buffer *p); +int vb2_ioctl_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p); +int vb2_ioctl_streamon(struct file *file, void *priv, enum v4l2_buf_type i); +int vb2_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type i); +int vb2_ioctl_expbuf(struct file *file, void *priv, + struct v4l2_exportbuffer *p); +int vb2_ioctl_remove_bufs(struct file *file, void *priv, + struct v4l2_remove_buffers *p); + +/* struct v4l2_file_operations helpers */ + +int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma); +int vb2_fop_release(struct file *file); +int _vb2_fop_release(struct file *file, struct mutex *lock); +ssize_t vb2_fop_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos); +ssize_t vb2_fop_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos); +__poll_t vb2_fop_poll(struct file *file, poll_table *wait); +#ifndef CONFIG_MMU +unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags); +#endif + +/** + * vb2_video_unregister_device - unregister the video device and release queue + * + * @vdev: pointer to &struct video_device + * + * If the driver uses vb2_fop_release()/_vb2_fop_release(), then it should use + * vb2_video_unregister_device() instead of video_unregister_device(). + * + * This function will call video_unregister_device() and then release the + * vb2_queue if streaming is in progress. This will stop streaming and + * this will simplify the unbind sequence since after this call all subdevs + * will have stopped streaming as well. + */ +void vb2_video_unregister_device(struct video_device *vdev); + +/** + * vb2_ops_wait_prepare - helper function to lock a struct &vb2_queue + * + * @vq: pointer to &struct vb2_queue + * + * ..note:: only use if vq->lock is non-NULL. + */ +void vb2_ops_wait_prepare(struct vb2_queue *vq); + +/** + * vb2_ops_wait_finish - helper function to unlock a struct &vb2_queue + * + * @vq: pointer to &struct vb2_queue + * + * ..note:: only use if vq->lock is non-NULL. + */ +void vb2_ops_wait_finish(struct vb2_queue *vq); + +struct media_request; +int vb2_request_validate(struct media_request *req); +void vb2_request_queue(struct media_request *req); + +#endif /* _MEDIA_VIDEOBUF2_V4L2_H */ diff --git a/6.17.0/include-overrides/media/videobuf2-vmalloc.h b/6.17.0/include-overrides/media/videobuf2-vmalloc.h new file mode 100644 index 0000000..a63fe66 --- /dev/null +++ b/6.17.0/include-overrides/media/videobuf2-vmalloc.h @@ -0,0 +1,20 @@ +/* + * videobuf2-vmalloc.h - vmalloc memory allocator for videobuf2 + * + * Copyright (C) 2010 Samsung Electronics + * + * Author: Pawel Osciak + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef _MEDIA_VIDEOBUF2_VMALLOC_H +#define _MEDIA_VIDEOBUF2_VMALLOC_H + +#include + +extern const struct vb2_mem_ops vb2_vmalloc_memops; + +#endif diff --git a/6.17.0/include-overrides/media/vsp1.h b/6.17.0/include-overrides/media/vsp1.h new file mode 100644 index 0000000..d9b91ff --- /dev/null +++ b/6.17.0/include-overrides/media/vsp1.h @@ -0,0 +1,213 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * vsp1.h -- R-Car VSP1 API + * + * Copyright (C) 2015 Renesas Electronics Corporation + * + * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) + */ +#ifndef __MEDIA_VSP1_H__ +#define __MEDIA_VSP1_H__ + +#include +#include +#include + +struct device; +struct vsp1_dl_list; + +/* ----------------------------------------------------------------------------- + * VSP1 DU interface + */ + +int vsp1_du_init(struct device *dev); + +#define VSP1_DU_STATUS_COMPLETE BIT(0) +#define VSP1_DU_STATUS_WRITEBACK BIT(1) + +/** + * struct vsp1_du_lif_config - VSP LIF configuration + * @width: output frame width + * @height: output frame height + * @interlaced: true for interlaced pipelines + * @callback: frame completion callback function (optional). When a callback + * is provided, the VSP driver guarantees that it will be called once + * and only once for each vsp1_du_atomic_flush() call. + * @callback_data: data to be passed to the frame completion callback + */ +struct vsp1_du_lif_config { + unsigned int width; + unsigned int height; + bool interlaced; + + void (*callback)(void *data, unsigned int status, u32 crc); + void *callback_data; +}; + +int vsp1_du_setup_lif(struct device *dev, unsigned int pipe_index, + const struct vsp1_du_lif_config *cfg); + +/** + * struct vsp1_du_atomic_config - VSP atomic configuration parameters + * @pixelformat: plane pixel format (V4L2 4CC) + * @pitch: line pitch in bytes for the first plane + * @mem: DMA memory address for each plane of the frame buffer + * @src: source rectangle in the frame buffer (integer coordinates) + * @dst: destination rectangle on the display (integer coordinates) + * @alpha: alpha value (0: fully transparent, 255: fully opaque) + * @zpos: Z position of the plane (from 0 to number of planes minus 1) + * @premult: true for premultiplied alpha + * @color_encoding: color encoding (valid for YUV formats only) + * @color_range: color range (valid for YUV formats only) + */ +struct vsp1_du_atomic_config { + u32 pixelformat; + unsigned int pitch; + dma_addr_t mem[3]; + struct v4l2_rect src; + struct v4l2_rect dst; + unsigned int alpha; + unsigned int zpos; + bool premult; + enum v4l2_ycbcr_encoding color_encoding; + enum v4l2_quantization color_range; +}; + +/** + * enum vsp1_du_crc_source - Source used for CRC calculation + * @VSP1_DU_CRC_NONE: CRC calculation disabled + * @VSP1_DU_CRC_PLANE: Perform CRC calculation on an input plane + * @VSP1_DU_CRC_OUTPUT: Perform CRC calculation on the composed output + */ +enum vsp1_du_crc_source { + VSP1_DU_CRC_NONE, + VSP1_DU_CRC_PLANE, + VSP1_DU_CRC_OUTPUT, +}; + +/** + * struct vsp1_du_crc_config - VSP CRC computation configuration parameters + * @source: source for CRC calculation + * @index: index of the CRC source plane (when source is set to plane) + */ +struct vsp1_du_crc_config { + enum vsp1_du_crc_source source; + unsigned int index; +}; + +/** + * struct vsp1_du_writeback_config - VSP writeback configuration parameters + * @pixelformat: plane pixel format (V4L2 4CC) + * @pitch: line pitch in bytes for the first plane + * @mem: DMA memory address for each plane of the frame buffer + */ +struct vsp1_du_writeback_config { + u32 pixelformat; + unsigned int pitch; + dma_addr_t mem[3]; +}; + +/** + * struct vsp1_du_atomic_pipe_config - VSP atomic pipe configuration parameters + * @crc: CRC computation configuration + * @writeback: writeback configuration + */ +struct vsp1_du_atomic_pipe_config { + struct vsp1_du_crc_config crc; + struct vsp1_du_writeback_config writeback; +}; + +void vsp1_du_atomic_begin(struct device *dev, unsigned int pipe_index); +int vsp1_du_atomic_update(struct device *dev, unsigned int pipe_index, + unsigned int rpf, + const struct vsp1_du_atomic_config *cfg); +void vsp1_du_atomic_flush(struct device *dev, unsigned int pipe_index, + const struct vsp1_du_atomic_pipe_config *cfg); +int vsp1_du_map_sg(struct device *dev, struct sg_table *sgt); +void vsp1_du_unmap_sg(struct device *dev, struct sg_table *sgt); + +/* ----------------------------------------------------------------------------- + * VSP1 ISP interface + */ + +/** + * struct vsp1_isp_buffer_desc - Describe a buffer allocated by VSPX + * @size: Byte size of the buffer allocated by VSPX + * @cpu_addr: CPU-mapped address of a buffer allocated by VSPX + * @dma_addr: bus address of a buffer allocated by VSPX + */ +struct vsp1_isp_buffer_desc { + size_t size; + void *cpu_addr; + dma_addr_t dma_addr; +}; + +/** + * struct vsp1_isp_job_desc - Describe a VSPX buffer transfer request + * @config: ConfigDMA buffer descriptor + * @config.pairs: number of reg-value pairs in the ConfigDMA buffer + * @config.mem: bus address of the ConfigDMA buffer + * @img: RAW image buffer descriptor + * @img.fmt: RAW image format + * @img.mem: bus address of the RAW image buffer + * @dl: pointer to the display list populated by the VSPX driver in the + * vsp1_isp_job_prepare() function + * + * Describe a transfer request for the VSPX to perform on behalf of the ISP. + * The job descriptor contains an optional ConfigDMA buffer and one RAW image + * buffer. Set config.pairs to 0 if no ConfigDMA buffer should be transferred. + * The minimum number of config.pairs that can be written using ConfigDMA is 17. + * A number of pairs < 16 corrupts the output image. A number of pairs == 16 + * freezes the VSPX operation. If the ISP driver has to write less than 17 pairs + * it shall pad the buffer with writes directed to registers that have no effect + * or avoid using ConfigDMA at all for such small write sequences. + * + * The ISP driver shall pass an instance this type to the vsp1_isp_job_prepare() + * function that will populate the display list pointer @dl using the @config + * and @img descriptors. When the job has to be run on the VSPX, the descriptor + * shall be passed to vsp1_isp_job_run() which consumes the display list. + * + * Job descriptors not yet run shall be released with a call to + * vsp1_isp_job_release() when stopping the streaming in order to properly + * release the resources acquired by vsp1_isp_job_prepare(). + */ +struct vsp1_isp_job_desc { + struct { + unsigned int pairs; + dma_addr_t mem; + } config; + struct { + struct v4l2_pix_format_mplane fmt; + dma_addr_t mem; + } img; + struct vsp1_dl_list *dl; +}; + +/** + * struct vsp1_vspx_frame_end - VSPX frame end callback data + * @vspx_frame_end: Frame end callback. Called after a transfer job has been + * completed. If the job includes both a ConfigDMA and a + * RAW image, the callback is called after both have been + * transferred + * @frame_end_data: Frame end callback data, passed to vspx_frame_end + */ +struct vsp1_vspx_frame_end { + void (*vspx_frame_end)(void *data); + void *frame_end_data; +}; + +int vsp1_isp_init(struct device *dev); +struct device *vsp1_isp_get_bus_master(struct device *dev); +int vsp1_isp_alloc_buffer(struct device *dev, size_t size, + struct vsp1_isp_buffer_desc *buffer_desc); +void vsp1_isp_free_buffer(struct device *dev, + struct vsp1_isp_buffer_desc *buffer_desc); +int vsp1_isp_start_streaming(struct device *dev, + struct vsp1_vspx_frame_end *frame_end); +void vsp1_isp_stop_streaming(struct device *dev); +int vsp1_isp_job_prepare(struct device *dev, + struct vsp1_isp_job_desc *job); +int vsp1_isp_job_run(struct device *dev, struct vsp1_isp_job_desc *job); +void vsp1_isp_job_release(struct device *dev, struct vsp1_isp_job_desc *job); + +#endif /* __MEDIA_VSP1_H__ */ diff --git a/dkms.conf b/dkms.conf index 347c190..87055af 100644 --- a/dkms.conf +++ b/dkms.conf @@ -1,16 +1,48 @@ -PACKAGE_NAME="ipu-camera-sensor" -PACKAGE_VERSION="0.1" +PACKAGE_NAME="intel-mipi-gmsl-dkms" +PACKAGE_VERSION="#MODULE_VERSION#" +BUILD_EXCLUSIVE_KERNEL="^(6\.(1[7])\.)" +VERSION_SUFFIX="${PACKAGE_VERSION}" + +KBASE=${dkms_tree}/${PACKAGE_NAME}/${PACKAGE_VERSION}/build +#Extract kernel version substring that is used to identify source directory for kernel modules +# i.e 4.4.0-116-generic ==> 4.4.0 +SELECTED_KERNEL=$(echo $kernelver | cut -d- -f1 | sed -r 's/(.*?)\..*/\1.0/g') +SELECTED_KERNEL_PATCH=$(echo $kernelver | cut -d- -f2) +VERSION=$(echo $kernelver | sed -ne 's/\([0-9]\+\)\.\([0-9]\+\)\.\([0-9]\+\)\-\(.\+\)/\1/p') +PATCHLEVEL=$(echo $kernelver | sed -ne 's/\([0-9]\+\)\.\([0-9]\+\)\.\([0-9]\+\)\-\(.\+\)/\2/p') +SUBLEVEL=$(echo $kernelver | sed -ne 's/\([0-9]\+\)\.\([0-9]\+\)\.\([0-9]\+\)\-\(.\+\)/\3/p') + +#This variable is required to address includes from videodev.ko +srctree=${dkms_tree}/${PACKAGE_NAME}/${PACKAGE_VERSION}/build + +#We will invoke multiple make directives to build all the required kernel modules, then manually copy them to the staged directory for dkms install rule +MAKE="make V=1 KERNELRELEASE=$kernelver KERNEL_SRC=$kernel_source_dir && \ + if [ -d ${KBASE}/drivers/media/v4l2-core ]; then rmdir ${KBASE}/drivers/media/v4l2-core ; fi &&\ + make V=1 -C ${kernel_source_dir} M=${KBASE}/${SELECTED_KERNEL}/drivers/media/v4l2-core videodev.ko DRIVER_VERSION_SUFFIX='${VERSION_SUFFIX}' &&\ + mkdir -p ${KBASE}/drivers/media/v4l2-core/ && cp ${KBASE}/${SELECTED_KERNEL}/drivers/media/v4l2-core/videodev.ko ${KBASE}/drivers/media/v4l2-core/" + +MAKE_MATCH[1]="^(6.1[7])" + +CLEAN="make V=1 KERNELRELEASE=$kernelver KERNEL_SRC=$kernel_source_dir clean && \ + make -C ${kernel_source_dir} M=${KBASE}/${SELECTED_KERNEL}/drivers/media/v4l2-core clean" -MAKE="make KERNELRELEASE=$kernelver KERNEL_SRC=$kernel_source_dir" -CLEAN="make KERNELRELEASE=$kernelver KERNEL_SRC=$kernel_source_dir clean" AUTOINSTALL="yes" -MODULE_PATH="drivers/media/i2c" -MODULE_DEST="/kernel/drivers/media/i2c/" +#Patches for Kernel 6.17 +PATCH[0]="0001-ipu7-dkms-v4l2-core-makefile-adaptation-6.17.patch" +PATCH[1]="0002-drivers-media-set-v4l2_subdev_enable_streams_api-tru.patch" +PATCH_MATCH[0]="^(6.1[7])" +PATCH_MATCH[1]="^(6.1[7])" i=0 -BUILT_MODULE_NAME[$i]="ipu-acpi" +BUILT_MODULE_NAME[$i]="videodev" +BUILT_MODULE_LOCATION[$i]="drivers/media/v4l2-core/" +DEST_MODULE_NAME[$i]="videodev" +DEST_MODULE_LOCATION[$i]="/updates" +STRIP[$i]=no + +BUILT_MODULE_NAME[$((++i))]="ipu-acpi" BUILT_MODULE_LOCATION[$i]="drivers/media/platform/intel" DEST_MODULE_LOCATION[$i]="/updates" STRIP[$i]=no @@ -30,6 +62,11 @@ BUILT_MODULE_LOCATION[$i]="drivers/media/i2c" DEST_MODULE_LOCATION[$i]="/updates" STRIP[$i]=no +BUILT_MODULE_NAME[$((++i))]="ar0234" +BUILT_MODULE_LOCATION[$i]="drivers/media/i2c" +DEST_MODULE_LOCATION[$i]="/updates" +STRIP[$i]=no + BUILT_MODULE_NAME[$((++i))]="ar0820" BUILT_MODULE_LOCATION[$i]="drivers/media/i2c" DEST_MODULE_LOCATION[$i]="/updates" diff --git a/patches/0001-ipu7-dkms-v4l2-core-makefile-adaptation-6.17.patch b/patches/0001-ipu7-dkms-v4l2-core-makefile-adaptation-6.17.patch new file mode 100644 index 0000000..44cff75 --- /dev/null +++ b/patches/0001-ipu7-dkms-v4l2-core-makefile-adaptation-6.17.patch @@ -0,0 +1,37 @@ +From 459bc453270ba6fd0a77ff8d53a0f22641a1c55f Mon Sep 17 00:00:00 2001 +From: florent pirou +Date: Mon, 17 Nov 2025 07:44:33 -0700 +Subject: [PATCH 1/2] ipu7 dkms : v4l2-core makefile adaptation 6.17 + +Signed-off-by: florent pirou +--- + 6.17.0/drivers/media/v4l2-core/Makefile | 3 +++ + 6.17.0/drivers/media/v4l2-core/v4l2-dev.c | 1 + + 2 files changed, 4 insertions(+) + +diff --git a/6.17.0/drivers/media/v4l2-core/Makefile b/6.17.0/drivers/media/v4l2-core/Makefile +index 2177b9d63..82c235f19 100644 +--- a/6.17.0/drivers/media/v4l2-core/Makefile ++++ b/6.17.0/drivers/media/v4l2-core/Makefile +@@ -2,6 +2,9 @@ + # + # Makefile for the V4L2 core + # ++# ipu7-dkms appendix ++CC := ${CC} -I ${M}/../../../include-overrides -I ${M}/../tuners -I ${M}/../dvb-core -I ${M}/../dvb-frontends ++ccflags-y := -DDRIVER_VERSION_SUFFIX=\"${DRIVER_VERSION_SUFFIX}\" + + ccflags-y += -I$(srctree)/drivers/media/dvb-frontends + ccflags-y += -I$(srctree)/drivers/media/tuners +diff --git a/6.17.0/drivers/media/v4l2-core/v4l2-dev.c b/6.17.0/drivers/media/v4l2-core/v4l2-dev.c +index c36923511..199737025 100644 +--- a/6.17.0/drivers/media/v4l2-core/v4l2-dev.c ++++ b/6.17.0/drivers/media/v4l2-core/v4l2-dev.c +@@ -1245,3 +1245,4 @@ MODULE_AUTHOR("Alan Cox, Mauro Carvalho Chehab , Bill Dirks, + MODULE_DESCRIPTION("Video4Linux2 core driver"); + MODULE_LICENSE("GPL"); + MODULE_ALIAS_CHARDEV_MAJOR(VIDEO_MAJOR); ++MODULE_VERSION(DRIVER_VERSION_SUFFIX); +-- +2.25.1 + diff --git a/patches/0002-drivers-media-set-v4l2_subdev_enable_streams_api-tru.patch b/patches/0002-drivers-media-set-v4l2_subdev_enable_streams_api-tru.patch new file mode 100644 index 0000000..427599d --- /dev/null +++ b/patches/0002-drivers-media-set-v4l2_subdev_enable_streams_api-tru.patch @@ -0,0 +1,28 @@ +From 20ee554282631a6b12eba6ebb60bdb7bf9fc9c35 Mon Sep 17 00:00:00 2001 +From: florent pirou +Date: Mon, 17 Nov 2025 07:45:23 -0700 +Subject: [PATCH 2/2] drivers: media: set v4l2_subdev_enable_streams_api=true + for WA + +Signed-off-by: hepengpx +Signed-off-by: florent pirou +--- + 6.17.0/drivers/media/v4l2-core/v4l2-subdev.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/6.17.0/drivers/media/v4l2-core/v4l2-subdev.c b/6.17.0/drivers/media/v4l2-core/v4l2-subdev.c +index 4fd25fea3..60a05561f 100644 +--- a/6.17.0/drivers/media/v4l2-core/v4l2-subdev.c ++++ b/6.17.0/drivers/media/v4l2-core/v4l2-subdev.c +@@ -32,7 +32,7 @@ + * 'v4l2_subdev_enable_streams_api' to 1 below. + */ + +-static bool v4l2_subdev_enable_streams_api; ++static bool v4l2_subdev_enable_streams_api = true; + #endif + + /* +-- +2.25.1 + From fdff2bd373bea04783d27ad8708504934f7e2770 Mon Sep 17 00:00:00 2001 From: florent pirou Date: Thu, 4 Dec 2025 00:45:39 -0700 Subject: [PATCH 04/14] ipu6-isys : add v6.17 v4l2_subdev_enable_streams_api support * dkms : build both with linux intel and canonical headers Signed-off-by: florent pirou --- 6.17.0/drivers/media/pci/intel/ipu6/Kconfig | 18 + 6.17.0/drivers/media/pci/intel/ipu6/Makefile | 23 + .../drivers/media/pci/intel/ipu6/ipu6-bus.c | 159 ++ .../drivers/media/pci/intel/ipu6/ipu6-bus.h | 55 + .../media/pci/intel/ipu6/ipu6-buttress.c | 910 +++++++++++ .../media/pci/intel/ipu6/ipu6-buttress.h | 85 + .../drivers/media/pci/intel/ipu6/ipu6-cpd.c | 362 +++++ .../drivers/media/pci/intel/ipu6/ipu6-cpd.h | 105 ++ .../drivers/media/pci/intel/ipu6/ipu6-dma.c | 459 ++++++ .../drivers/media/pci/intel/ipu6/ipu6-dma.h | 43 + .../media/pci/intel/ipu6/ipu6-fw-com.c | 413 +++++ .../media/pci/intel/ipu6/ipu6-fw-com.h | 47 + .../media/pci/intel/ipu6/ipu6-fw-isys.c | 487 ++++++ .../media/pci/intel/ipu6/ipu6-fw-isys.h | 596 +++++++ .../media/pci/intel/ipu6/ipu6-isys-csi2.c | 643 ++++++++ .../media/pci/intel/ipu6/ipu6-isys-csi2.h | 78 + .../media/pci/intel/ipu6/ipu6-isys-dwc-phy.c | 536 +++++++ .../media/pci/intel/ipu6/ipu6-isys-jsl-phy.c | 242 +++ .../media/pci/intel/ipu6/ipu6-isys-mcd-phy.c | 720 +++++++++ .../media/pci/intel/ipu6/ipu6-isys-queue.c | 850 ++++++++++ .../media/pci/intel/ipu6/ipu6-isys-queue.h | 71 + .../media/pci/intel/ipu6/ipu6-isys-subdev.c | 403 +++++ .../media/pci/intel/ipu6/ipu6-isys-subdev.h | 55 + .../media/pci/intel/ipu6/ipu6-isys-video.c | 1391 +++++++++++++++++ .../media/pci/intel/ipu6/ipu6-isys-video.h | 135 ++ .../drivers/media/pci/intel/ipu6/ipu6-isys.c | 1380 ++++++++++++++++ .../drivers/media/pci/intel/ipu6/ipu6-isys.h | 202 +++ .../drivers/media/pci/intel/ipu6/ipu6-mmu.c | 806 ++++++++++ .../drivers/media/pci/intel/ipu6/ipu6-mmu.h | 73 + .../intel/ipu6/ipu6-platform-buttress-regs.h | 224 +++ .../intel/ipu6/ipu6-platform-isys-csi2-reg.h | 172 ++ .../media/pci/intel/ipu6/ipu6-platform-regs.h | 179 +++ 6.17.0/drivers/media/pci/intel/ipu6/ipu6.c | 846 ++++++++++ 6.17.0/drivers/media/pci/intel/ipu6/ipu6.h | 342 ++++ dkms.conf | 24 +- ...ms-add-isys-makefile-adaptation-6.17.patch | 48 + 36 files changed, 13178 insertions(+), 4 deletions(-) create mode 100644 6.17.0/drivers/media/pci/intel/ipu6/Kconfig create mode 100644 6.17.0/drivers/media/pci/intel/ipu6/Makefile create mode 100644 6.17.0/drivers/media/pci/intel/ipu6/ipu6-bus.c create mode 100644 6.17.0/drivers/media/pci/intel/ipu6/ipu6-bus.h create mode 100644 6.17.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c create mode 100644 6.17.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h create mode 100644 6.17.0/drivers/media/pci/intel/ipu6/ipu6-cpd.c create mode 100644 6.17.0/drivers/media/pci/intel/ipu6/ipu6-cpd.h create mode 100644 6.17.0/drivers/media/pci/intel/ipu6/ipu6-dma.c create mode 100644 6.17.0/drivers/media/pci/intel/ipu6/ipu6-dma.h create mode 100644 6.17.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.c create mode 100644 6.17.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.h create mode 100644 6.17.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.c create mode 100644 6.17.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h create mode 100644 6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c create mode 100644 6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h create mode 100644 6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-dwc-phy.c create mode 100644 6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-jsl-phy.c create mode 100644 6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c create mode 100644 6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c create mode 100644 6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h create mode 100644 6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c create mode 100644 6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.h create mode 100644 6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c create mode 100644 6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.h create mode 100644 6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys.c create mode 100644 6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys.h create mode 100644 6.17.0/drivers/media/pci/intel/ipu6/ipu6-mmu.c create mode 100644 6.17.0/drivers/media/pci/intel/ipu6/ipu6-mmu.h create mode 100644 6.17.0/drivers/media/pci/intel/ipu6/ipu6-platform-buttress-regs.h create mode 100644 6.17.0/drivers/media/pci/intel/ipu6/ipu6-platform-isys-csi2-reg.h create mode 100644 6.17.0/drivers/media/pci/intel/ipu6/ipu6-platform-regs.h create mode 100644 6.17.0/drivers/media/pci/intel/ipu6/ipu6.c create mode 100644 6.17.0/drivers/media/pci/intel/ipu6/ipu6.h create mode 100644 patches/0003-ipu6-dkms-add-isys-makefile-adaptation-6.17.patch diff --git a/6.17.0/drivers/media/pci/intel/ipu6/Kconfig b/6.17.0/drivers/media/pci/intel/ipu6/Kconfig new file mode 100644 index 0000000..1129e2b --- /dev/null +++ b/6.17.0/drivers/media/pci/intel/ipu6/Kconfig @@ -0,0 +1,18 @@ +config VIDEO_INTEL_IPU6 + tristate "Intel IPU6 driver" + depends on ACPI || COMPILE_TEST + depends on VIDEO_DEV + depends on X86 && HAS_DMA + depends on IPU_BRIDGE || !IPU_BRIDGE + select AUXILIARY_BUS + select IOMMU_IOVA + select VIDEO_V4L2_SUBDEV_API + select MEDIA_CONTROLLER + select VIDEOBUF2_DMA_SG + select V4L2_FWNODE + help + This is the 6th Gen Intel Image Processing Unit, found in Intel SoCs + and used for capturing images and video from camera sensors. + + To compile this driver, say Y here! It contains 2 modules - + intel_ipu6 and intel_ipu6_isys. diff --git a/6.17.0/drivers/media/pci/intel/ipu6/Makefile b/6.17.0/drivers/media/pci/intel/ipu6/Makefile new file mode 100644 index 0000000..a821b0a --- /dev/null +++ b/6.17.0/drivers/media/pci/intel/ipu6/Makefile @@ -0,0 +1,23 @@ +# SPDX-License-Identifier: GPL-2.0-only + +intel-ipu6-y := ipu6.o \ + ipu6-bus.o \ + ipu6-dma.o \ + ipu6-mmu.o \ + ipu6-buttress.o \ + ipu6-cpd.o \ + ipu6-fw-com.o + +obj-$(CONFIG_VIDEO_INTEL_IPU6) += intel-ipu6.o + +intel-ipu6-isys-y := ipu6-isys.o \ + ipu6-isys-csi2.o \ + ipu6-fw-isys.o \ + ipu6-isys-video.o \ + ipu6-isys-queue.o \ + ipu6-isys-subdev.o \ + ipu6-isys-mcd-phy.o \ + ipu6-isys-jsl-phy.o \ + ipu6-isys-dwc-phy.o + +obj-$(CONFIG_VIDEO_INTEL_IPU6) += intel-ipu6-isys.o diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-bus.c b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-bus.c new file mode 100644 index 0000000..5cee274 --- /dev/null +++ b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-bus.c @@ -0,0 +1,159 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2013 - 2024 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ipu6.h" +#include "ipu6-bus.h" +#include "ipu6-buttress.h" +#include "ipu6-dma.h" + +static int bus_pm_runtime_suspend(struct device *dev) +{ + struct ipu6_bus_device *adev = to_ipu6_bus_device(dev); + int ret; + + ret = pm_generic_runtime_suspend(dev); + if (ret) + return ret; + + ret = ipu6_buttress_power(dev, adev->ctrl, false); + if (!ret) + return 0; + + dev_err(dev, "power down failed!\n"); + + /* Powering down failed, attempt to resume device now */ + ret = pm_generic_runtime_resume(dev); + if (!ret) + return -EBUSY; + + return -EIO; +} + +static int bus_pm_runtime_resume(struct device *dev) +{ + struct ipu6_bus_device *adev = to_ipu6_bus_device(dev); + int ret; + + ret = ipu6_buttress_power(dev, adev->ctrl, true); + if (ret) + return ret; + + ret = pm_generic_runtime_resume(dev); + if (ret) + goto out_err; + + return 0; + +out_err: + ipu6_buttress_power(dev, adev->ctrl, false); + + return -EBUSY; +} + +static struct dev_pm_domain ipu6_bus_pm_domain = { + .ops = { + .runtime_suspend = bus_pm_runtime_suspend, + .runtime_resume = bus_pm_runtime_resume, + }, +}; + +static DEFINE_MUTEX(ipu6_bus_mutex); + +static void ipu6_bus_release(struct device *dev) +{ + struct ipu6_bus_device *adev = to_ipu6_bus_device(dev); + + kfree(adev->pdata); + kfree(adev); +} + +struct ipu6_bus_device * +ipu6_bus_initialize_device(struct pci_dev *pdev, struct device *parent, + void *pdata, const struct ipu6_buttress_ctrl *ctrl, + char *name) +{ + struct auxiliary_device *auxdev; + struct ipu6_bus_device *adev; + struct ipu6_device *isp = pci_get_drvdata(pdev); + int ret; + + adev = kzalloc(sizeof(*adev), GFP_KERNEL); + if (!adev) + return ERR_PTR(-ENOMEM); + + adev->isp = isp; + adev->ctrl = ctrl; + adev->pdata = pdata; + auxdev = &adev->auxdev; + auxdev->name = name; + auxdev->id = (pci_domain_nr(pdev->bus) << 16) | + PCI_DEVID(pdev->bus->number, pdev->devfn); + + auxdev->dev.parent = parent; + auxdev->dev.release = ipu6_bus_release; + + ret = auxiliary_device_init(auxdev); + if (ret < 0) { + dev_err(&isp->pdev->dev, "auxiliary device init failed (%d)\n", + ret); + kfree(adev); + return ERR_PTR(ret); + } + + dev_pm_domain_set(&auxdev->dev, &ipu6_bus_pm_domain); + + pm_runtime_forbid(&adev->auxdev.dev); + pm_runtime_enable(&adev->auxdev.dev); + + return adev; +} + +int ipu6_bus_add_device(struct ipu6_bus_device *adev) +{ + struct auxiliary_device *auxdev = &adev->auxdev; + int ret; + + ret = auxiliary_device_add(auxdev); + if (ret) { + auxiliary_device_uninit(auxdev); + return ret; + } + + mutex_lock(&ipu6_bus_mutex); + list_add(&adev->list, &adev->isp->devices); + mutex_unlock(&ipu6_bus_mutex); + + pm_runtime_allow(&auxdev->dev); + + return 0; +} + +void ipu6_bus_del_devices(struct pci_dev *pdev) +{ + struct ipu6_device *isp = pci_get_drvdata(pdev); + struct ipu6_bus_device *adev, *save; + + mutex_lock(&ipu6_bus_mutex); + + list_for_each_entry_safe(adev, save, &isp->devices, list) { + pm_runtime_disable(&adev->auxdev.dev); + list_del(&adev->list); + auxiliary_device_delete(&adev->auxdev); + auxiliary_device_uninit(&adev->auxdev); + } + + mutex_unlock(&ipu6_bus_mutex); +} diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-bus.h b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-bus.h new file mode 100644 index 0000000..a08c546 --- /dev/null +++ b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-bus.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2013 - 2024 Intel Corporation */ + +#ifndef IPU6_BUS_H +#define IPU6_BUS_H + +#include +#include +#include +#include +#include +#include +#include + +struct firmware; +struct pci_dev; + +struct ipu6_buttress_ctrl; + +struct ipu6_bus_device { + struct auxiliary_device auxdev; + const struct auxiliary_driver *auxdrv; + const struct ipu6_auxdrv_data *auxdrv_data; + struct list_head list; + void *pdata; + struct ipu6_mmu *mmu; + struct ipu6_device *isp; + const struct ipu6_buttress_ctrl *ctrl; + const struct firmware *fw; + struct sg_table fw_sgt; + u64 *pkg_dir; + dma_addr_t pkg_dir_dma_addr; + unsigned int pkg_dir_size; +}; + +struct ipu6_auxdrv_data { + irqreturn_t (*isr)(struct ipu6_bus_device *adev); + irqreturn_t (*isr_threaded)(struct ipu6_bus_device *adev); + bool wake_isr_thread; +}; + +#define to_ipu6_bus_device(_dev) \ + container_of(to_auxiliary_dev(_dev), struct ipu6_bus_device, auxdev) +#define auxdev_to_adev(_auxdev) \ + container_of(_auxdev, struct ipu6_bus_device, auxdev) +#define ipu6_bus_get_drvdata(adev) dev_get_drvdata(&(adev)->auxdev.dev) + +struct ipu6_bus_device * +ipu6_bus_initialize_device(struct pci_dev *pdev, struct device *parent, + void *pdata, const struct ipu6_buttress_ctrl *ctrl, + char *name); +int ipu6_bus_add_device(struct ipu6_bus_device *adev); +void ipu6_bus_del_devices(struct pci_dev *pdev); + +#endif diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c new file mode 100644 index 0000000..103386c --- /dev/null +++ b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c @@ -0,0 +1,910 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2013--2024 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ipu6.h" +#include "ipu6-bus.h" +#include "ipu6-dma.h" +#include "ipu6-buttress.h" +#include "ipu6-platform-buttress-regs.h" + +#define BOOTLOADER_STATUS_OFFSET 0x15c + +#define BOOTLOADER_MAGIC_KEY 0xb00710ad + +#define ENTRY BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE1 +#define EXIT BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE2 +#define QUERY BUTTRESS_IU2CSECSR_IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE + +#define BUTTRESS_TSC_SYNC_RESET_TRIAL_MAX 10 + +#define BUTTRESS_POWER_TIMEOUT_US (200 * USEC_PER_MSEC) + +#define BUTTRESS_CSE_BOOTLOAD_TIMEOUT_US (5 * USEC_PER_SEC) +#define BUTTRESS_CSE_AUTHENTICATE_TIMEOUT_US (10 * USEC_PER_SEC) +#define BUTTRESS_CSE_FWRESET_TIMEOUT_US (100 * USEC_PER_MSEC) + +#define BUTTRESS_IPC_TX_TIMEOUT_MS MSEC_PER_SEC +#define BUTTRESS_IPC_RX_TIMEOUT_MS MSEC_PER_SEC +#define BUTTRESS_IPC_VALIDITY_TIMEOUT_US (1 * USEC_PER_SEC) +#define BUTTRESS_TSC_SYNC_TIMEOUT_US (5 * USEC_PER_MSEC) + +#define BUTTRESS_IPC_RESET_RETRY 2000 +#define BUTTRESS_CSE_IPC_RESET_RETRY 4 +#define BUTTRESS_IPC_CMD_SEND_RETRY 1 + +#define BUTTRESS_MAX_CONSECUTIVE_IRQS 100 + +static const u32 ipu6_adev_irq_mask[2] = { + BUTTRESS_ISR_IS_IRQ, + BUTTRESS_ISR_PS_IRQ +}; + +int ipu6_buttress_ipc_reset(struct ipu6_device *isp, + struct ipu6_buttress_ipc *ipc) +{ + unsigned int retries = BUTTRESS_IPC_RESET_RETRY; + struct ipu6_buttress *b = &isp->buttress; + u32 val = 0, csr_in_clr; + + if (!isp->secure_mode) { + dev_dbg(&isp->pdev->dev, "Skip IPC reset for non-secure mode"); + return 0; + } + + mutex_lock(&b->ipc_mutex); + + /* Clear-by-1 CSR (all bits), corresponding internal states. */ + val = readl(isp->base + ipc->csr_in); + writel(val, isp->base + ipc->csr_in); + + /* Set peer CSR bit IPC_PEER_COMP_ACTIONS_RST_PHASE1 */ + writel(ENTRY, isp->base + ipc->csr_out); + /* + * Clear-by-1 all CSR bits EXCEPT following + * bits: + * A. IPC_PEER_COMP_ACTIONS_RST_PHASE1. + * B. IPC_PEER_COMP_ACTIONS_RST_PHASE2. + * C. Possibly custom bits, depending on + * their role. + */ + csr_in_clr = BUTTRESS_IU2CSECSR_IPC_PEER_DEASSERTED_REG_VALID_REQ | + BUTTRESS_IU2CSECSR_IPC_PEER_ACKED_REG_VALID | + BUTTRESS_IU2CSECSR_IPC_PEER_ASSERTED_REG_VALID_REQ | QUERY; + + do { + usleep_range(400, 500); + val = readl(isp->base + ipc->csr_in); + switch (val) { + case ENTRY | EXIT: + case ENTRY | EXIT | QUERY: + /* + * 1) Clear-by-1 CSR bits + * (IPC_PEER_COMP_ACTIONS_RST_PHASE1, + * IPC_PEER_COMP_ACTIONS_RST_PHASE2). + * 2) Set peer CSR bit + * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE. + */ + writel(ENTRY | EXIT, isp->base + ipc->csr_in); + writel(QUERY, isp->base + ipc->csr_out); + break; + case ENTRY: + case ENTRY | QUERY: + /* + * 1) Clear-by-1 CSR bits + * (IPC_PEER_COMP_ACTIONS_RST_PHASE1, + * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE). + * 2) Set peer CSR bit + * IPC_PEER_COMP_ACTIONS_RST_PHASE1. + */ + writel(ENTRY | QUERY, isp->base + ipc->csr_in); + writel(ENTRY, isp->base + ipc->csr_out); + break; + case EXIT: + case EXIT | QUERY: + /* + * Clear-by-1 CSR bit + * IPC_PEER_COMP_ACTIONS_RST_PHASE2. + * 1) Clear incoming doorbell. + * 2) Clear-by-1 all CSR bits EXCEPT following + * bits: + * A. IPC_PEER_COMP_ACTIONS_RST_PHASE1. + * B. IPC_PEER_COMP_ACTIONS_RST_PHASE2. + * C. Possibly custom bits, depending on + * their role. + * 3) Set peer CSR bit + * IPC_PEER_COMP_ACTIONS_RST_PHASE2. + */ + writel(EXIT, isp->base + ipc->csr_in); + writel(0, isp->base + ipc->db0_in); + writel(csr_in_clr, isp->base + ipc->csr_in); + writel(EXIT, isp->base + ipc->csr_out); + + /* + * Read csr_in again to make sure if RST_PHASE2 is done. + * If csr_in is QUERY, it should be handled again. + */ + usleep_range(200, 300); + val = readl(isp->base + ipc->csr_in); + if (val & QUERY) { + dev_dbg(&isp->pdev->dev, + "RST_PHASE2 retry csr_in = %x\n", val); + break; + } + mutex_unlock(&b->ipc_mutex); + return 0; + case QUERY: + /* + * 1) Clear-by-1 CSR bit + * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE. + * 2) Set peer CSR bit + * IPC_PEER_COMP_ACTIONS_RST_PHASE1 + */ + writel(QUERY, isp->base + ipc->csr_in); + writel(ENTRY, isp->base + ipc->csr_out); + break; + default: + dev_dbg_ratelimited(&isp->pdev->dev, + "Unexpected CSR 0x%x\n", val); + break; + } + } while (retries--); + + mutex_unlock(&b->ipc_mutex); + dev_err(&isp->pdev->dev, "Timed out while waiting for CSE\n"); + + return -ETIMEDOUT; +} + +static void ipu6_buttress_ipc_validity_close(struct ipu6_device *isp, + struct ipu6_buttress_ipc *ipc) +{ + writel(BUTTRESS_IU2CSECSR_IPC_PEER_DEASSERTED_REG_VALID_REQ, + isp->base + ipc->csr_out); +} + +static int +ipu6_buttress_ipc_validity_open(struct ipu6_device *isp, + struct ipu6_buttress_ipc *ipc) +{ + unsigned int mask = BUTTRESS_IU2CSECSR_IPC_PEER_ACKED_REG_VALID; + void __iomem *addr; + int ret; + u32 val; + + writel(BUTTRESS_IU2CSECSR_IPC_PEER_ASSERTED_REG_VALID_REQ, + isp->base + ipc->csr_out); + + addr = isp->base + ipc->csr_in; + ret = readl_poll_timeout(addr, val, val & mask, 200, + BUTTRESS_IPC_VALIDITY_TIMEOUT_US); + if (ret) { + dev_err(&isp->pdev->dev, "CSE validity timeout 0x%x\n", val); + ipu6_buttress_ipc_validity_close(isp, ipc); + } + + return ret; +} + +static void ipu6_buttress_ipc_recv(struct ipu6_device *isp, + struct ipu6_buttress_ipc *ipc, u32 *ipc_msg) +{ + if (ipc_msg) + *ipc_msg = readl(isp->base + ipc->data0_in); + writel(0, isp->base + ipc->db0_in); +} + +static int ipu6_buttress_ipc_send_bulk(struct ipu6_device *isp, + struct ipu6_ipc_buttress_bulk_msg *msgs, + u32 size) +{ + unsigned long tx_timeout_jiffies, rx_timeout_jiffies; + unsigned int i, retry = BUTTRESS_IPC_CMD_SEND_RETRY; + struct ipu6_buttress *b = &isp->buttress; + struct ipu6_buttress_ipc *ipc = &b->cse; + u32 val; + int ret; + int tout; + + mutex_lock(&b->ipc_mutex); + + ret = ipu6_buttress_ipc_validity_open(isp, ipc); + if (ret) { + dev_err(&isp->pdev->dev, "IPC validity open failed\n"); + goto out; + } + + tx_timeout_jiffies = msecs_to_jiffies(BUTTRESS_IPC_TX_TIMEOUT_MS); + rx_timeout_jiffies = msecs_to_jiffies(BUTTRESS_IPC_RX_TIMEOUT_MS); + + for (i = 0; i < size; i++) { + reinit_completion(&ipc->send_complete); + if (msgs[i].require_resp) + reinit_completion(&ipc->recv_complete); + + dev_dbg(&isp->pdev->dev, "bulk IPC command: 0x%x\n", + msgs[i].cmd); + writel(msgs[i].cmd, isp->base + ipc->data0_out); + val = BUTTRESS_IU2CSEDB0_BUSY | msgs[i].cmd_size; + writel(val, isp->base + ipc->db0_out); + + tout = wait_for_completion_timeout(&ipc->send_complete, + tx_timeout_jiffies); + if (!tout) { + dev_err(&isp->pdev->dev, "send IPC response timeout\n"); + if (!retry--) { + ret = -ETIMEDOUT; + goto out; + } + + /* Try again if CSE is not responding on first try */ + writel(0, isp->base + ipc->db0_out); + i--; + continue; + } + + retry = BUTTRESS_IPC_CMD_SEND_RETRY; + + if (!msgs[i].require_resp) + continue; + + tout = wait_for_completion_timeout(&ipc->recv_complete, + rx_timeout_jiffies); + if (!tout) { + dev_err(&isp->pdev->dev, "recv IPC response timeout\n"); + ret = -ETIMEDOUT; + goto out; + } + + if (ipc->nack_mask && + (ipc->recv_data & ipc->nack_mask) == ipc->nack) { + dev_err(&isp->pdev->dev, + "IPC NACK for cmd 0x%x\n", msgs[i].cmd); + ret = -EIO; + goto out; + } + + if (ipc->recv_data != msgs[i].expected_resp) { + dev_err(&isp->pdev->dev, + "expected resp: 0x%x, IPC response: 0x%x ", + msgs[i].expected_resp, ipc->recv_data); + ret = -EIO; + goto out; + } + } + + dev_dbg(&isp->pdev->dev, "bulk IPC commands done\n"); + +out: + ipu6_buttress_ipc_validity_close(isp, ipc); + mutex_unlock(&b->ipc_mutex); + return ret; +} + +static int +ipu6_buttress_ipc_send(struct ipu6_device *isp, + u32 ipc_msg, u32 size, bool require_resp, + u32 expected_resp) +{ + struct ipu6_ipc_buttress_bulk_msg msg = { + .cmd = ipc_msg, + .cmd_size = size, + .require_resp = require_resp, + .expected_resp = expected_resp, + }; + + return ipu6_buttress_ipc_send_bulk(isp, &msg, 1); +} + +static irqreturn_t ipu6_buttress_call_isr(struct ipu6_bus_device *adev) +{ + irqreturn_t ret = IRQ_WAKE_THREAD; + + if (!adev || !adev->auxdrv || !adev->auxdrv_data) + return IRQ_NONE; + + if (adev->auxdrv_data->isr) + ret = adev->auxdrv_data->isr(adev); + + if (ret == IRQ_WAKE_THREAD && !adev->auxdrv_data->isr_threaded) + ret = IRQ_NONE; + + return ret; +} + +irqreturn_t ipu6_buttress_isr(int irq, void *isp_ptr) +{ + struct ipu6_device *isp = isp_ptr; + struct ipu6_bus_device *adev[] = { isp->isys, isp->psys }; + struct ipu6_buttress *b = &isp->buttress; + u32 reg_irq_sts = BUTTRESS_REG_ISR_STATUS; + irqreturn_t ret = IRQ_NONE; + u32 disable_irqs = 0; + u32 irq_status; + u32 i, count = 0; + int active; + + active = pm_runtime_get_if_active(&isp->pdev->dev); + if (!active) + return IRQ_NONE; + + irq_status = readl(isp->base + reg_irq_sts); + if (irq_status == 0 || WARN_ON_ONCE(irq_status == 0xffffffffu)) { + if (active > 0) + pm_runtime_put_noidle(&isp->pdev->dev); + return IRQ_NONE; + } + + do { + writel(irq_status, isp->base + BUTTRESS_REG_ISR_CLEAR); + + for (i = 0; i < ARRAY_SIZE(ipu6_adev_irq_mask); i++) { + irqreturn_t r = ipu6_buttress_call_isr(adev[i]); + + if (!(irq_status & ipu6_adev_irq_mask[i])) + continue; + + if (r == IRQ_WAKE_THREAD) { + ret = IRQ_WAKE_THREAD; + disable_irqs |= ipu6_adev_irq_mask[i]; + } else if (ret == IRQ_NONE && r == IRQ_HANDLED) { + ret = IRQ_HANDLED; + } + } + + if ((irq_status & BUTTRESS_EVENT) && ret == IRQ_NONE) + ret = IRQ_HANDLED; + + if (irq_status & BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING) { + dev_dbg(&isp->pdev->dev, + "BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING\n"); + ipu6_buttress_ipc_recv(isp, &b->cse, &b->cse.recv_data); + complete(&b->cse.recv_complete); + } + + if (irq_status & BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE) { + dev_dbg(&isp->pdev->dev, + "BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE\n"); + complete(&b->cse.send_complete); + } + + if (irq_status & BUTTRESS_ISR_SAI_VIOLATION && + ipu6_buttress_get_secure_mode(isp)) + dev_err(&isp->pdev->dev, + "BUTTRESS_ISR_SAI_VIOLATION\n"); + + if (irq_status & (BUTTRESS_ISR_IS_FATAL_MEM_ERR | + BUTTRESS_ISR_PS_FATAL_MEM_ERR)) + dev_err(&isp->pdev->dev, + "BUTTRESS_ISR_FATAL_MEM_ERR\n"); + + if (irq_status & BUTTRESS_ISR_UFI_ERROR) + dev_err(&isp->pdev->dev, "BUTTRESS_ISR_UFI_ERROR\n"); + + if (++count == BUTTRESS_MAX_CONSECUTIVE_IRQS) { + dev_err(&isp->pdev->dev, "too many consecutive IRQs\n"); + ret = IRQ_NONE; + break; + } + + irq_status = readl(isp->base + reg_irq_sts); + } while (irq_status); + + if (disable_irqs) + writel(BUTTRESS_IRQS & ~disable_irqs, + isp->base + BUTTRESS_REG_ISR_ENABLE); + + if (active > 0) + pm_runtime_put(&isp->pdev->dev); + + return ret; +} + +irqreturn_t ipu6_buttress_isr_threaded(int irq, void *isp_ptr) +{ + struct ipu6_device *isp = isp_ptr; + struct ipu6_bus_device *adev[] = { isp->isys, isp->psys }; + const struct ipu6_auxdrv_data *drv_data = NULL; + irqreturn_t ret = IRQ_NONE; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(ipu6_adev_irq_mask) && adev[i]; i++) { + drv_data = adev[i]->auxdrv_data; + if (!drv_data) + continue; + + if (drv_data->wake_isr_thread && + drv_data->isr_threaded(adev[i]) == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + + writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_ENABLE); + + return ret; +} + +int ipu6_buttress_power(struct device *dev, + const struct ipu6_buttress_ctrl *ctrl, bool on) +{ + struct ipu6_device *isp = to_ipu6_bus_device(dev)->isp; + u32 pwr_sts, val; + int ret; + + if (!ctrl) + return 0; + + mutex_lock(&isp->buttress.power_mutex); + + if (!on) { + val = 0; + pwr_sts = ctrl->pwr_sts_off << ctrl->pwr_sts_shift; + } else { + val = BUTTRESS_FREQ_CTL_START | + FIELD_PREP(BUTTRESS_FREQ_CTL_RATIO_MASK, + ctrl->ratio) | + FIELD_PREP(BUTTRESS_FREQ_CTL_QOS_FLOOR_MASK, + ctrl->qos_floor) | + BUTTRESS_FREQ_CTL_ICCMAX_LEVEL; + + pwr_sts = ctrl->pwr_sts_on << ctrl->pwr_sts_shift; + } + + writel(val, isp->base + ctrl->freq_ctl); + + ret = readl_poll_timeout(isp->base + BUTTRESS_REG_PWR_STATE, + val, (val & ctrl->pwr_sts_mask) == pwr_sts, + 100, BUTTRESS_POWER_TIMEOUT_US); + if (ret) + dev_err(&isp->pdev->dev, + "Change power status timeout with 0x%x\n", val); + + mutex_unlock(&isp->buttress.power_mutex); + + return ret; +} + +bool ipu6_buttress_get_secure_mode(struct ipu6_device *isp) +{ + u32 val; + + val = readl(isp->base + BUTTRESS_REG_SECURITY_CTL); + + return val & BUTTRESS_SECURITY_CTL_FW_SECURE_MODE; +} + +bool ipu6_buttress_auth_done(struct ipu6_device *isp) +{ + u32 val; + + if (!isp->secure_mode) + return true; + + val = readl(isp->base + BUTTRESS_REG_SECURITY_CTL); + val = FIELD_GET(BUTTRESS_SECURITY_CTL_FW_SETUP_MASK, val); + + return val == BUTTRESS_SECURITY_CTL_AUTH_DONE; +} +EXPORT_SYMBOL_NS_GPL(ipu6_buttress_auth_done, "INTEL_IPU6"); + +int ipu6_buttress_reset_authentication(struct ipu6_device *isp) +{ + int ret; + u32 val; + + if (!isp->secure_mode) { + dev_dbg(&isp->pdev->dev, "Skip auth for non-secure mode\n"); + return 0; + } + + writel(BUTTRESS_FW_RESET_CTL_START, isp->base + + BUTTRESS_REG_FW_RESET_CTL); + + ret = readl_poll_timeout(isp->base + BUTTRESS_REG_FW_RESET_CTL, val, + val & BUTTRESS_FW_RESET_CTL_DONE, 500, + BUTTRESS_CSE_FWRESET_TIMEOUT_US); + if (ret) { + dev_err(&isp->pdev->dev, + "Time out while resetting authentication state\n"); + return ret; + } + + dev_dbg(&isp->pdev->dev, "FW reset for authentication done\n"); + writel(0, isp->base + BUTTRESS_REG_FW_RESET_CTL); + /* leave some time for HW restore */ + usleep_range(800, 1000); + + return 0; +} + +int ipu6_buttress_map_fw_image(struct ipu6_bus_device *sys, + const struct firmware *fw, struct sg_table *sgt) +{ + bool is_vmalloc = is_vmalloc_addr(fw->data); + struct pci_dev *pdev = sys->isp->pdev; + struct page **pages; + const void *addr; + unsigned long n_pages; + unsigned int i; + int ret; + + if (!is_vmalloc && !virt_addr_valid(fw->data)) + return -EDOM; + + n_pages = PFN_UP(fw->size); + + pages = kmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL); + if (!pages) + return -ENOMEM; + + addr = fw->data; + for (i = 0; i < n_pages; i++) { + struct page *p = is_vmalloc ? + vmalloc_to_page(addr) : virt_to_page(addr); + + if (!p) { + ret = -ENOMEM; + goto out; + } + pages[i] = p; + addr += PAGE_SIZE; + } + + ret = sg_alloc_table_from_pages(sgt, pages, n_pages, 0, fw->size, + GFP_KERNEL); + if (ret) { + ret = -ENOMEM; + goto out; + } + + ret = dma_map_sgtable(&pdev->dev, sgt, DMA_TO_DEVICE, 0); + if (ret) { + sg_free_table(sgt); + goto out; + } + + ret = ipu6_dma_map_sgtable(sys, sgt, DMA_TO_DEVICE, 0); + if (ret) { + dma_unmap_sgtable(&pdev->dev, sgt, DMA_TO_DEVICE, 0); + sg_free_table(sgt); + goto out; + } + + ipu6_dma_sync_sgtable(sys, sgt); + +out: + kfree(pages); + + return ret; +} +EXPORT_SYMBOL_NS_GPL(ipu6_buttress_map_fw_image, "INTEL_IPU6"); + +void ipu6_buttress_unmap_fw_image(struct ipu6_bus_device *sys, + struct sg_table *sgt) +{ + struct pci_dev *pdev = sys->isp->pdev; + + ipu6_dma_unmap_sgtable(sys, sgt, DMA_TO_DEVICE, 0); + dma_unmap_sgtable(&pdev->dev, sgt, DMA_TO_DEVICE, 0); + sg_free_table(sgt); +} +EXPORT_SYMBOL_NS_GPL(ipu6_buttress_unmap_fw_image, "INTEL_IPU6"); + +int ipu6_buttress_authenticate(struct ipu6_device *isp) +{ + struct ipu6_buttress *b = &isp->buttress; + struct ipu6_psys_pdata *psys_pdata; + u32 data, mask, done, fail; + int ret; + + if (!isp->secure_mode) { + dev_dbg(&isp->pdev->dev, "Skip auth for non-secure mode\n"); + return 0; + } + + psys_pdata = isp->psys->pdata; + + mutex_lock(&b->auth_mutex); + + if (ipu6_buttress_auth_done(isp)) { + ret = 0; + goto out_unlock; + } + + /* + * Write address of FIT table to FW_SOURCE register + * Let's use fw address. I.e. not using FIT table yet + */ + data = lower_32_bits(isp->psys->pkg_dir_dma_addr); + writel(data, isp->base + BUTTRESS_REG_FW_SOURCE_BASE_LO); + + data = upper_32_bits(isp->psys->pkg_dir_dma_addr); + writel(data, isp->base + BUTTRESS_REG_FW_SOURCE_BASE_HI); + + /* + * Write boot_load into IU2CSEDATA0 + * Write sizeof(boot_load) | 0x2 << CLIENT_ID to + * IU2CSEDB.IU2CSECMD and set IU2CSEDB.IU2CSEBUSY as + */ + dev_info(&isp->pdev->dev, "Sending BOOT_LOAD to CSE\n"); + + ret = ipu6_buttress_ipc_send(isp, + BUTTRESS_IU2CSEDATA0_IPC_BOOT_LOAD, + 1, true, + BUTTRESS_CSE2IUDATA0_IPC_BOOT_LOAD_DONE); + if (ret) { + dev_err(&isp->pdev->dev, "CSE boot_load failed\n"); + goto out_unlock; + } + + mask = BUTTRESS_SECURITY_CTL_FW_SETUP_MASK; + done = BUTTRESS_SECURITY_CTL_FW_SETUP_DONE; + fail = BUTTRESS_SECURITY_CTL_AUTH_FAILED; + ret = readl_poll_timeout(isp->base + BUTTRESS_REG_SECURITY_CTL, data, + ((data & mask) == done || + (data & mask) == fail), 500, + BUTTRESS_CSE_BOOTLOAD_TIMEOUT_US); + if (ret) { + dev_err(&isp->pdev->dev, "CSE boot_load timeout\n"); + goto out_unlock; + } + + if ((data & mask) == fail) { + dev_err(&isp->pdev->dev, "CSE auth failed\n"); + ret = -EINVAL; + goto out_unlock; + } + + ret = readl_poll_timeout(psys_pdata->base + BOOTLOADER_STATUS_OFFSET, + data, data == BOOTLOADER_MAGIC_KEY, 500, + BUTTRESS_CSE_BOOTLOAD_TIMEOUT_US); + if (ret) { + dev_err(&isp->pdev->dev, "Unexpected magic number 0x%x\n", + data); + goto out_unlock; + } + + /* + * Write authenticate_run into IU2CSEDATA0 + * Write sizeof(boot_load) | 0x2 << CLIENT_ID to + * IU2CSEDB.IU2CSECMD and set IU2CSEDB.IU2CSEBUSY as + */ + dev_info(&isp->pdev->dev, "Sending AUTHENTICATE_RUN to CSE\n"); + ret = ipu6_buttress_ipc_send(isp, + BUTTRESS_IU2CSEDATA0_IPC_AUTH_RUN, + 1, true, + BUTTRESS_CSE2IUDATA0_IPC_AUTH_RUN_DONE); + if (ret) { + dev_err(&isp->pdev->dev, "CSE authenticate_run failed\n"); + goto out_unlock; + } + + done = BUTTRESS_SECURITY_CTL_AUTH_DONE; + ret = readl_poll_timeout(isp->base + BUTTRESS_REG_SECURITY_CTL, data, + ((data & mask) == done || + (data & mask) == fail), 500, + BUTTRESS_CSE_AUTHENTICATE_TIMEOUT_US); + if (ret) { + dev_err(&isp->pdev->dev, "CSE authenticate timeout\n"); + goto out_unlock; + } + + if ((data & mask) == fail) { + dev_err(&isp->pdev->dev, "CSE boot_load failed\n"); + ret = -EINVAL; + goto out_unlock; + } + + dev_info(&isp->pdev->dev, "CSE authenticate_run done\n"); + +out_unlock: + mutex_unlock(&b->auth_mutex); + + return ret; +} + +static int ipu6_buttress_send_tsc_request(struct ipu6_device *isp) +{ + u32 val, mask, done; + int ret; + + mask = BUTTRESS_PWR_STATE_HH_STATUS_MASK; + + writel(BUTTRESS_FABRIC_CMD_START_TSC_SYNC, + isp->base + BUTTRESS_REG_FABRIC_CMD); + + val = readl(isp->base + BUTTRESS_REG_PWR_STATE); + val = FIELD_GET(mask, val); + if (val == BUTTRESS_PWR_STATE_HH_STATE_ERR) { + dev_err(&isp->pdev->dev, "Start tsc sync failed\n"); + return -EINVAL; + } + + done = BUTTRESS_PWR_STATE_HH_STATE_DONE; + ret = readl_poll_timeout(isp->base + BUTTRESS_REG_PWR_STATE, val, + FIELD_GET(mask, val) == done, 500, + BUTTRESS_TSC_SYNC_TIMEOUT_US); + if (ret) + dev_err(&isp->pdev->dev, "Start tsc sync timeout\n"); + + return ret; +} + +int ipu6_buttress_start_tsc_sync(struct ipu6_device *isp) +{ + unsigned int i; + + for (i = 0; i < BUTTRESS_TSC_SYNC_RESET_TRIAL_MAX; i++) { + u32 val; + int ret; + + ret = ipu6_buttress_send_tsc_request(isp); + if (ret != -ETIMEDOUT) + return ret; + + val = readl(isp->base + BUTTRESS_REG_TSW_CTL); + val = val | BUTTRESS_TSW_CTL_SOFT_RESET; + writel(val, isp->base + BUTTRESS_REG_TSW_CTL); + val = val & ~BUTTRESS_TSW_CTL_SOFT_RESET; + writel(val, isp->base + BUTTRESS_REG_TSW_CTL); + } + + dev_err(&isp->pdev->dev, "TSC sync failed (timeout)\n"); + + return -ETIMEDOUT; +} +EXPORT_SYMBOL_NS_GPL(ipu6_buttress_start_tsc_sync, "INTEL_IPU6"); + +void ipu6_buttress_tsc_read(struct ipu6_device *isp, u64 *val) +{ + u32 tsc_hi_1, tsc_hi_2, tsc_lo; + unsigned long flags; + + local_irq_save(flags); + tsc_hi_1 = readl(isp->base + BUTTRESS_REG_TSC_HI); + tsc_lo = readl(isp->base + BUTTRESS_REG_TSC_LO); + tsc_hi_2 = readl(isp->base + BUTTRESS_REG_TSC_HI); + if (tsc_hi_1 == tsc_hi_2) { + *val = (u64)tsc_hi_1 << 32 | tsc_lo; + } else { + /* Check if TSC has rolled over */ + if (tsc_lo & BIT(31)) + *val = (u64)tsc_hi_1 << 32 | tsc_lo; + else + *val = (u64)tsc_hi_2 << 32 | tsc_lo; + } + local_irq_restore(flags); +} +EXPORT_SYMBOL_NS_GPL(ipu6_buttress_tsc_read, "INTEL_IPU6"); + +u64 ipu6_buttress_tsc_ticks_to_ns(u64 ticks, const struct ipu6_device *isp) +{ + u64 ns = ticks * 10000; + + /* + * converting TSC tick count to ns is calculated by: + * Example (TSC clock frequency is 19.2MHz): + * ns = ticks * 1000 000 000 / 19.2Mhz + * = ticks * 1000 000 000 / 19200000Hz + * = ticks * 10000 / 192 ns + */ + return div_u64(ns, isp->buttress.ref_clk); +} +EXPORT_SYMBOL_NS_GPL(ipu6_buttress_tsc_ticks_to_ns, "INTEL_IPU6"); + +void ipu6_buttress_restore(struct ipu6_device *isp) +{ + struct ipu6_buttress *b = &isp->buttress; + + writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_CLEAR); + writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_ENABLE); + writel(b->wdt_cached_value, isp->base + BUTTRESS_REG_WDT); +} + +int ipu6_buttress_init(struct ipu6_device *isp) +{ + int ret, ipc_reset_retry = BUTTRESS_CSE_IPC_RESET_RETRY; + struct ipu6_buttress *b = &isp->buttress; + u32 val; + + mutex_init(&b->power_mutex); + mutex_init(&b->auth_mutex); + mutex_init(&b->cons_mutex); + mutex_init(&b->ipc_mutex); + init_completion(&b->cse.send_complete); + init_completion(&b->cse.recv_complete); + + b->cse.nack = BUTTRESS_CSE2IUDATA0_IPC_NACK; + b->cse.nack_mask = BUTTRESS_CSE2IUDATA0_IPC_NACK_MASK; + b->cse.csr_in = BUTTRESS_REG_CSE2IUCSR; + b->cse.csr_out = BUTTRESS_REG_IU2CSECSR; + b->cse.db0_in = BUTTRESS_REG_CSE2IUDB0; + b->cse.db0_out = BUTTRESS_REG_IU2CSEDB0; + b->cse.data0_in = BUTTRESS_REG_CSE2IUDATA0; + b->cse.data0_out = BUTTRESS_REG_IU2CSEDATA0; + + INIT_LIST_HEAD(&b->constraints); + + isp->secure_mode = ipu6_buttress_get_secure_mode(isp); + dev_dbg(&isp->pdev->dev, "IPU6 in %s mode touch 0x%x mask 0x%x\n", + isp->secure_mode ? "secure" : "non-secure", + readl(isp->base + BUTTRESS_REG_SECURITY_TOUCH), + readl(isp->base + BUTTRESS_REG_CAMERA_MASK)); + + b->wdt_cached_value = readl(isp->base + BUTTRESS_REG_WDT); + writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_CLEAR); + writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_ENABLE); + + /* get ref_clk frequency by reading the indication in btrs control */ + val = readl(isp->base + BUTTRESS_REG_BTRS_CTRL); + val = FIELD_GET(BUTTRESS_REG_BTRS_CTRL_REF_CLK_IND, val); + + switch (val) { + case 0x0: + b->ref_clk = 240; + break; + case 0x1: + b->ref_clk = 192; + break; + case 0x2: + b->ref_clk = 384; + break; + default: + dev_warn(&isp->pdev->dev, + "Unsupported ref clock, use 19.2Mhz by default.\n"); + b->ref_clk = 192; + break; + } + + /* Retry couple of times in case of CSE initialization is delayed */ + do { + ret = ipu6_buttress_ipc_reset(isp, &b->cse); + if (ret) { + dev_warn(&isp->pdev->dev, + "IPC reset protocol failed, retrying\n"); + } else { + dev_dbg(&isp->pdev->dev, "IPC reset done\n"); + return 0; + } + } while (ipc_reset_retry--); + + dev_err(&isp->pdev->dev, "IPC reset protocol failed\n"); + + mutex_destroy(&b->power_mutex); + mutex_destroy(&b->auth_mutex); + mutex_destroy(&b->cons_mutex); + mutex_destroy(&b->ipc_mutex); + + return ret; +} + +void ipu6_buttress_exit(struct ipu6_device *isp) +{ + struct ipu6_buttress *b = &isp->buttress; + + writel(0, isp->base + BUTTRESS_REG_ISR_ENABLE); + + mutex_destroy(&b->power_mutex); + mutex_destroy(&b->auth_mutex); + mutex_destroy(&b->cons_mutex); + mutex_destroy(&b->ipc_mutex); +} diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h new file mode 100644 index 0000000..51e5ad4 --- /dev/null +++ b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2013--2024 Intel Corporation */ + +#ifndef IPU6_BUTTRESS_H +#define IPU6_BUTTRESS_H + +#include +#include +#include +#include + +struct device; +struct firmware; +struct ipu6_device; +struct ipu6_bus_device; + +#define BUTTRESS_PS_FREQ_STEP 25U +#define BUTTRESS_MIN_FORCE_PS_FREQ (BUTTRESS_PS_FREQ_STEP * 8) +#define BUTTRESS_MAX_FORCE_PS_FREQ (BUTTRESS_PS_FREQ_STEP * 32) + +#define BUTTRESS_IS_FREQ_STEP 25U +#define BUTTRESS_MIN_FORCE_IS_FREQ (BUTTRESS_IS_FREQ_STEP * 8) +#define BUTTRESS_MAX_FORCE_IS_FREQ (BUTTRESS_IS_FREQ_STEP * 22) + +struct ipu6_buttress_ctrl { + u32 freq_ctl, pwr_sts_shift, pwr_sts_mask, pwr_sts_on, pwr_sts_off; + unsigned int ratio; + unsigned int qos_floor; +}; + +struct ipu6_buttress_ipc { + struct completion send_complete; + struct completion recv_complete; + u32 nack; + u32 nack_mask; + u32 recv_data; + u32 csr_out; + u32 csr_in; + u32 db0_in; + u32 db0_out; + u32 data0_out; + u32 data0_in; +}; + +struct ipu6_buttress { + struct mutex power_mutex, auth_mutex, cons_mutex, ipc_mutex; + struct ipu6_buttress_ipc cse; + struct list_head constraints; + u32 wdt_cached_value; + bool force_suspend; + u32 ref_clk; +}; + +struct ipu6_ipc_buttress_bulk_msg { + u32 cmd; + u32 expected_resp; + bool require_resp; + u8 cmd_size; +}; + +int ipu6_buttress_ipc_reset(struct ipu6_device *isp, + struct ipu6_buttress_ipc *ipc); +int ipu6_buttress_map_fw_image(struct ipu6_bus_device *sys, + const struct firmware *fw, + struct sg_table *sgt); +void ipu6_buttress_unmap_fw_image(struct ipu6_bus_device *sys, + struct sg_table *sgt); +int ipu6_buttress_power(struct device *dev, + const struct ipu6_buttress_ctrl *ctrl, bool on); +bool ipu6_buttress_get_secure_mode(struct ipu6_device *isp); +int ipu6_buttress_authenticate(struct ipu6_device *isp); +int ipu6_buttress_reset_authentication(struct ipu6_device *isp); +bool ipu6_buttress_auth_done(struct ipu6_device *isp); +int ipu6_buttress_start_tsc_sync(struct ipu6_device *isp); +void ipu6_buttress_tsc_read(struct ipu6_device *isp, u64 *val); +u64 ipu6_buttress_tsc_ticks_to_ns(u64 ticks, const struct ipu6_device *isp); + +irqreturn_t ipu6_buttress_isr(int irq, void *isp_ptr); +irqreturn_t ipu6_buttress_isr_threaded(int irq, void *isp_ptr); +int ipu6_buttress_init(struct ipu6_device *isp); +void ipu6_buttress_exit(struct ipu6_device *isp); +void ipu6_buttress_csi_port_config(struct ipu6_device *isp, + u32 legacy, u32 combo); +void ipu6_buttress_restore(struct ipu6_device *isp); +#endif /* IPU6_BUTTRESS_H */ diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-cpd.c b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-cpd.c new file mode 100644 index 0000000..b7013f6 --- /dev/null +++ b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-cpd.c @@ -0,0 +1,362 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2013--2024 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ipu6.h" +#include "ipu6-bus.h" +#include "ipu6-cpd.h" +#include "ipu6-dma.h" + +/* 15 entries + header*/ +#define MAX_PKG_DIR_ENT_CNT 16 +/* 2 qword per entry/header */ +#define PKG_DIR_ENT_LEN 2 +/* PKG_DIR size in bytes */ +#define PKG_DIR_SIZE ((MAX_PKG_DIR_ENT_CNT) * \ + (PKG_DIR_ENT_LEN) * sizeof(u64)) +/* _IUPKDR_ */ +#define PKG_DIR_HDR_MARK 0x5f4955504b44525fULL + +/* $CPD */ +#define CPD_HDR_MARK 0x44504324 + +#define MAX_MANIFEST_SIZE (SZ_2K * sizeof(u32)) +#define MAX_METADATA_SIZE SZ_64K + +#define MAX_COMPONENT_ID 127 +#define MAX_COMPONENT_VERSION 0xffff + +#define MANIFEST_IDX 0 +#define METADATA_IDX 1 +#define MODULEDATA_IDX 2 +/* + * PKG_DIR Entry (type == id) + * 63:56 55 54:48 47:32 31:24 23:0 + * Rsvd Rsvd Type Version Rsvd Size + */ +#define PKG_DIR_SIZE_MASK GENMASK_ULL(23, 0) +#define PKG_DIR_VERSION_MASK GENMASK_ULL(47, 32) +#define PKG_DIR_TYPE_MASK GENMASK_ULL(54, 48) + +static inline const struct ipu6_cpd_ent *ipu6_cpd_get_entry(const void *cpd, + u8 idx) +{ + const struct ipu6_cpd_hdr *cpd_hdr = cpd; + const struct ipu6_cpd_ent *ent; + + ent = (const struct ipu6_cpd_ent *)((const u8 *)cpd + cpd_hdr->hdr_len); + return ent + idx; +} + +#define ipu6_cpd_get_manifest(cpd) ipu6_cpd_get_entry(cpd, MANIFEST_IDX) +#define ipu6_cpd_get_metadata(cpd) ipu6_cpd_get_entry(cpd, METADATA_IDX) +#define ipu6_cpd_get_moduledata(cpd) ipu6_cpd_get_entry(cpd, MODULEDATA_IDX) + +static const struct ipu6_cpd_metadata_cmpnt_hdr * +ipu6_cpd_metadata_get_cmpnt(struct ipu6_device *isp, const void *metadata, + unsigned int metadata_size, u8 idx) +{ + size_t extn_size = sizeof(struct ipu6_cpd_metadata_extn); + size_t cmpnt_count = metadata_size - extn_size; + + cmpnt_count = div_u64(cmpnt_count, isp->cpd_metadata_cmpnt_size); + + if (idx > MAX_COMPONENT_ID || idx >= cmpnt_count) { + dev_err(&isp->pdev->dev, "Component index out of range (%d)\n", + idx); + return ERR_PTR(-EINVAL); + } + + return metadata + extn_size + idx * isp->cpd_metadata_cmpnt_size; +} + +static u32 ipu6_cpd_metadata_cmpnt_version(struct ipu6_device *isp, + const void *metadata, + unsigned int metadata_size, u8 idx) +{ + const struct ipu6_cpd_metadata_cmpnt_hdr *cmpnt; + + cmpnt = ipu6_cpd_metadata_get_cmpnt(isp, metadata, metadata_size, idx); + if (IS_ERR(cmpnt)) + return PTR_ERR(cmpnt); + + return cmpnt->ver; +} + +static int ipu6_cpd_metadata_get_cmpnt_id(struct ipu6_device *isp, + const void *metadata, + unsigned int metadata_size, u8 idx) +{ + const struct ipu6_cpd_metadata_cmpnt_hdr *cmpnt; + + cmpnt = ipu6_cpd_metadata_get_cmpnt(isp, metadata, metadata_size, idx); + if (IS_ERR(cmpnt)) + return PTR_ERR(cmpnt); + + return cmpnt->id; +} + +static int ipu6_cpd_parse_module_data(struct ipu6_device *isp, + const void *module_data, + unsigned int module_data_size, + dma_addr_t dma_addr_module_data, + u64 *pkg_dir, const void *metadata, + unsigned int metadata_size) +{ + const struct ipu6_cpd_module_data_hdr *module_data_hdr; + const struct ipu6_cpd_hdr *dir_hdr; + const struct ipu6_cpd_ent *dir_ent; + unsigned int i; + u8 len; + + if (!module_data) + return -EINVAL; + + module_data_hdr = module_data; + dir_hdr = module_data + module_data_hdr->hdr_len; + len = dir_hdr->hdr_len; + dir_ent = (const struct ipu6_cpd_ent *)(((u8 *)dir_hdr) + len); + + pkg_dir[0] = PKG_DIR_HDR_MARK; + /* pkg_dir entry count = component count + pkg_dir header */ + pkg_dir[1] = dir_hdr->ent_cnt + 1; + + for (i = 0; i < dir_hdr->ent_cnt; i++, dir_ent++) { + u64 *p = &pkg_dir[PKG_DIR_ENT_LEN * (1 + i)]; + int ver, id; + + *p++ = dma_addr_module_data + dir_ent->offset; + id = ipu6_cpd_metadata_get_cmpnt_id(isp, metadata, + metadata_size, i); + if (id < 0 || id > MAX_COMPONENT_ID) { + dev_err(&isp->pdev->dev, "Invalid CPD component id\n"); + return -EINVAL; + } + + ver = ipu6_cpd_metadata_cmpnt_version(isp, metadata, + metadata_size, i); + if (ver < 0 || ver > MAX_COMPONENT_VERSION) { + dev_err(&isp->pdev->dev, + "Invalid CPD component version\n"); + return -EINVAL; + } + + *p = FIELD_PREP(PKG_DIR_SIZE_MASK, dir_ent->len) | + FIELD_PREP(PKG_DIR_TYPE_MASK, id) | + FIELD_PREP(PKG_DIR_VERSION_MASK, ver); + } + + return 0; +} + +int ipu6_cpd_create_pkg_dir(struct ipu6_bus_device *adev, const void *src) +{ + dma_addr_t dma_addr_src = sg_dma_address(adev->fw_sgt.sgl); + const struct ipu6_cpd_ent *ent, *man_ent, *met_ent; + struct ipu6_device *isp = adev->isp; + unsigned int man_sz, met_sz; + void *pkg_dir_pos; + int ret; + + man_ent = ipu6_cpd_get_manifest(src); + man_sz = man_ent->len; + + met_ent = ipu6_cpd_get_metadata(src); + met_sz = met_ent->len; + + adev->pkg_dir_size = PKG_DIR_SIZE + man_sz + met_sz; + adev->pkg_dir = ipu6_dma_alloc(adev, adev->pkg_dir_size, + &adev->pkg_dir_dma_addr, GFP_KERNEL, 0); + if (!adev->pkg_dir) + return -ENOMEM; + + /* + * pkg_dir entry/header: + * qword | 63:56 | 55 | 54:48 | 47:32 | 31:24 | 23:0 + * N Address/Offset/"_IUPKDR_" + * N + 1 | rsvd | rsvd | type | ver | rsvd | size + * + * We can ignore other fields that size in N + 1 qword as they + * are 0 anyway. Just setting size for now. + */ + + ent = ipu6_cpd_get_moduledata(src); + + ret = ipu6_cpd_parse_module_data(isp, src + ent->offset, + ent->len, dma_addr_src + ent->offset, + adev->pkg_dir, src + met_ent->offset, + met_ent->len); + if (ret) { + dev_err(&isp->pdev->dev, "Failed to parse module data\n"); + ipu6_dma_free(adev, adev->pkg_dir_size, + adev->pkg_dir, adev->pkg_dir_dma_addr, 0); + return ret; + } + + /* Copy manifest after pkg_dir */ + pkg_dir_pos = adev->pkg_dir + PKG_DIR_ENT_LEN * MAX_PKG_DIR_ENT_CNT; + memcpy(pkg_dir_pos, src + man_ent->offset, man_sz); + + /* Copy metadata after manifest */ + pkg_dir_pos += man_sz; + memcpy(pkg_dir_pos, src + met_ent->offset, met_sz); + + ipu6_dma_sync_single(adev, adev->pkg_dir_dma_addr, + adev->pkg_dir_size); + + return 0; +} +EXPORT_SYMBOL_NS_GPL(ipu6_cpd_create_pkg_dir, "INTEL_IPU6"); + +void ipu6_cpd_free_pkg_dir(struct ipu6_bus_device *adev) +{ + ipu6_dma_free(adev, adev->pkg_dir_size, adev->pkg_dir, + adev->pkg_dir_dma_addr, 0); +} +EXPORT_SYMBOL_NS_GPL(ipu6_cpd_free_pkg_dir, "INTEL_IPU6"); + +static int ipu6_cpd_validate_cpd(struct ipu6_device *isp, const void *cpd, + unsigned long cpd_size, + unsigned long data_size) +{ + const struct ipu6_cpd_hdr *cpd_hdr = cpd; + const struct ipu6_cpd_ent *ent; + unsigned int i; + u8 len; + + len = cpd_hdr->hdr_len; + + /* Ensure cpd hdr is within moduledata */ + if (cpd_size < len) { + dev_err(&isp->pdev->dev, "Invalid CPD moduledata size\n"); + return -EINVAL; + } + + /* Sanity check for CPD header */ + if ((cpd_size - len) / sizeof(*ent) < cpd_hdr->ent_cnt) { + dev_err(&isp->pdev->dev, "Invalid CPD header\n"); + return -EINVAL; + } + + /* Ensure that all entries are within moduledata */ + ent = (const struct ipu6_cpd_ent *)(((const u8 *)cpd_hdr) + len); + for (i = 0; i < cpd_hdr->ent_cnt; i++, ent++) { + if (data_size < ent->offset || + data_size - ent->offset < ent->len) { + dev_err(&isp->pdev->dev, "Invalid CPD entry (%d)\n", i); + return -EINVAL; + } + } + + return 0; +} + +static int ipu6_cpd_validate_moduledata(struct ipu6_device *isp, + const void *moduledata, + u32 moduledata_size) +{ + const struct ipu6_cpd_module_data_hdr *mod_hdr = moduledata; + int ret; + + /* Ensure moduledata hdr is within moduledata */ + if (moduledata_size < sizeof(*mod_hdr) || + moduledata_size < mod_hdr->hdr_len) { + dev_err(&isp->pdev->dev, "Invalid CPD moduledata size\n"); + return -EINVAL; + } + + dev_dbg(&isp->pdev->dev, "FW version: %x\n", mod_hdr->fw_pkg_date); + ret = ipu6_cpd_validate_cpd(isp, moduledata + mod_hdr->hdr_len, + moduledata_size - mod_hdr->hdr_len, + moduledata_size); + if (ret) { + dev_err(&isp->pdev->dev, "Invalid CPD in moduledata\n"); + return ret; + } + + return 0; +} + +static int ipu6_cpd_validate_metadata(struct ipu6_device *isp, + const void *metadata, u32 meta_size) +{ + const struct ipu6_cpd_metadata_extn *extn = metadata; + + /* Sanity check for metadata size */ + if (meta_size < sizeof(*extn) || meta_size > MAX_METADATA_SIZE) { + dev_err(&isp->pdev->dev, "Invalid CPD metadata\n"); + return -EINVAL; + } + + /* Validate extension and image types */ + if (extn->extn_type != IPU6_CPD_METADATA_EXTN_TYPE_IUNIT || + extn->img_type != IPU6_CPD_METADATA_IMAGE_TYPE_MAIN_FIRMWARE) { + dev_err(&isp->pdev->dev, + "Invalid CPD metadata descriptor img_type (%d)\n", + extn->img_type); + return -EINVAL; + } + + /* Validate metadata size multiple of metadata components */ + if ((meta_size - sizeof(*extn)) % isp->cpd_metadata_cmpnt_size) { + dev_err(&isp->pdev->dev, "Invalid CPD metadata size\n"); + return -EINVAL; + } + + return 0; +} + +int ipu6_cpd_validate_cpd_file(struct ipu6_device *isp, const void *cpd_file, + unsigned long cpd_file_size) +{ + const struct ipu6_cpd_hdr *hdr = cpd_file; + const struct ipu6_cpd_ent *ent; + int ret; + + ret = ipu6_cpd_validate_cpd(isp, cpd_file, cpd_file_size, + cpd_file_size); + if (ret) { + dev_err(&isp->pdev->dev, "Invalid CPD in file\n"); + return ret; + } + + /* Check for CPD file marker */ + if (hdr->hdr_mark != CPD_HDR_MARK) { + dev_err(&isp->pdev->dev, "Invalid CPD header\n"); + return -EINVAL; + } + + /* Sanity check for manifest size */ + ent = ipu6_cpd_get_manifest(cpd_file); + if (ent->len > MAX_MANIFEST_SIZE) { + dev_err(&isp->pdev->dev, "Invalid CPD manifest size\n"); + return -EINVAL; + } + + /* Validate metadata */ + ent = ipu6_cpd_get_metadata(cpd_file); + ret = ipu6_cpd_validate_metadata(isp, cpd_file + ent->offset, ent->len); + if (ret) { + dev_err(&isp->pdev->dev, "Invalid CPD metadata\n"); + return ret; + } + + /* Validate moduledata */ + ent = ipu6_cpd_get_moduledata(cpd_file); + ret = ipu6_cpd_validate_moduledata(isp, cpd_file + ent->offset, + ent->len); + if (ret) + dev_err(&isp->pdev->dev, "Invalid CPD moduledata\n"); + + return ret; +} diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-cpd.h b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-cpd.h new file mode 100644 index 0000000..e0e4fde --- /dev/null +++ b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-cpd.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2015--2024 Intel Corporation */ + +#ifndef IPU6_CPD_H +#define IPU6_CPD_H + +struct ipu6_device; +struct ipu6_bus_device; + +#define IPU6_CPD_SIZE_OF_FW_ARCH_VERSION 7 +#define IPU6_CPD_SIZE_OF_SYSTEM_VERSION 11 +#define IPU6_CPD_SIZE_OF_COMPONENT_NAME 12 + +#define IPU6_CPD_METADATA_EXTN_TYPE_IUNIT 0x10 + +#define IPU6_CPD_METADATA_IMAGE_TYPE_RESERVED 0 +#define IPU6_CPD_METADATA_IMAGE_TYPE_BOOTLOADER 1 +#define IPU6_CPD_METADATA_IMAGE_TYPE_MAIN_FIRMWARE 2 + +#define IPU6_CPD_PKG_DIR_PSYS_SERVER_IDX 0 +#define IPU6_CPD_PKG_DIR_ISYS_SERVER_IDX 1 + +#define IPU6_CPD_PKG_DIR_CLIENT_PG_TYPE 3 + +#define IPU6_CPD_METADATA_HASH_KEY_SIZE 48 +#define IPU6SE_CPD_METADATA_HASH_KEY_SIZE 32 + +struct ipu6_cpd_module_data_hdr { + u32 hdr_len; + u32 endian; + u32 fw_pkg_date; + u32 hive_sdk_date; + u32 compiler_date; + u32 target_platform_type; + u8 sys_ver[IPU6_CPD_SIZE_OF_SYSTEM_VERSION]; + u8 fw_arch_ver[IPU6_CPD_SIZE_OF_FW_ARCH_VERSION]; + u8 rsvd[2]; +} __packed; + +/* + * ipu6_cpd_hdr structure updated as the chksum and + * sub_partition_name is unused on host side + * CSE layout version 1.6 for IPU6SE (hdr_len = 0x10) + * CSE layout version 1.7 for IPU6 (hdr_len = 0x14) + */ +struct ipu6_cpd_hdr { + u32 hdr_mark; + u32 ent_cnt; + u8 hdr_ver; + u8 ent_ver; + u8 hdr_len; +} __packed; + +struct ipu6_cpd_ent { + u8 name[IPU6_CPD_SIZE_OF_COMPONENT_NAME]; + u32 offset; + u32 len; + u8 rsvd[4]; +} __packed; + +struct ipu6_cpd_metadata_cmpnt_hdr { + u32 id; + u32 size; + u32 ver; +} __packed; + +struct ipu6_cpd_metadata_cmpnt { + struct ipu6_cpd_metadata_cmpnt_hdr hdr; + u8 sha2_hash[IPU6_CPD_METADATA_HASH_KEY_SIZE]; + u32 entry_point; + u32 icache_base_offs; + u8 attrs[16]; +} __packed; + +struct ipu6se_cpd_metadata_cmpnt { + struct ipu6_cpd_metadata_cmpnt_hdr hdr; + u8 sha2_hash[IPU6SE_CPD_METADATA_HASH_KEY_SIZE]; + u32 entry_point; + u32 icache_base_offs; + u8 attrs[16]; +} __packed; + +struct ipu6_cpd_metadata_extn { + u32 extn_type; + u32 len; + u32 img_type; + u8 rsvd[16]; +} __packed; + +struct ipu6_cpd_client_pkg_hdr { + u32 prog_list_offs; + u32 prog_list_size; + u32 prog_desc_offs; + u32 prog_desc_size; + u32 pg_manifest_offs; + u32 pg_manifest_size; + u32 prog_bin_offs; + u32 prog_bin_size; +} __packed; + +int ipu6_cpd_create_pkg_dir(struct ipu6_bus_device *adev, const void *src); +void ipu6_cpd_free_pkg_dir(struct ipu6_bus_device *adev); +int ipu6_cpd_validate_cpd_file(struct ipu6_device *isp, const void *cpd_file, + unsigned long cpd_file_size); +#endif /* IPU6_CPD_H */ diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-dma.c b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-dma.c new file mode 100644 index 0000000..7296373 --- /dev/null +++ b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-dma.c @@ -0,0 +1,459 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2013--2024 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ipu6.h" +#include "ipu6-bus.h" +#include "ipu6-dma.h" +#include "ipu6-mmu.h" + +struct vm_info { + struct list_head list; + struct page **pages; + dma_addr_t ipu6_iova; + void *vaddr; + unsigned long size; +}; + +static struct vm_info *get_vm_info(struct ipu6_mmu *mmu, dma_addr_t iova) +{ + struct vm_info *info, *save; + + list_for_each_entry_safe(info, save, &mmu->vma_list, list) { + if (iova >= info->ipu6_iova && + iova < (info->ipu6_iova + info->size)) + return info; + } + + return NULL; +} + +static void __clear_buffer(struct page *page, size_t size, unsigned long attrs) +{ + void *ptr; + + if (!page) + return; + /* + * Ensure that the allocated pages are zeroed, and that any data + * lurking in the kernel direct-mapped region is invalidated. + */ + ptr = page_address(page); + memset(ptr, 0, size); + if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) + clflush_cache_range(ptr, size); +} + +static struct page **__alloc_buffer(size_t size, gfp_t gfp, unsigned long attrs) +{ + int count = PHYS_PFN(size); + int array_size = count * sizeof(struct page *); + struct page **pages; + int i = 0; + + pages = kvzalloc(array_size, GFP_KERNEL); + if (!pages) + return NULL; + + gfp |= __GFP_NOWARN; + + while (count) { + int j, order = __fls(count); + + pages[i] = alloc_pages(gfp, order); + while (!pages[i] && order) + pages[i] = alloc_pages(gfp, --order); + if (!pages[i]) + goto error; + + if (order) { + split_page(pages[i], order); + j = 1 << order; + while (j--) + pages[i + j] = pages[i] + j; + } + + __clear_buffer(pages[i], PAGE_SIZE << order, attrs); + i += 1 << order; + count -= 1 << order; + } + + return pages; +error: + while (i--) + if (pages[i]) + __free_pages(pages[i], 0); + kvfree(pages); + return NULL; +} + +static void __free_buffer(struct page **pages, size_t size, unsigned long attrs) +{ + int count = PHYS_PFN(size); + unsigned int i; + + for (i = 0; i < count && pages[i]; i++) { + __clear_buffer(pages[i], PAGE_SIZE, attrs); + __free_pages(pages[i], 0); + } + + kvfree(pages); +} + +void ipu6_dma_sync_single(struct ipu6_bus_device *sys, dma_addr_t dma_handle, + size_t size) +{ + void *vaddr; + u32 offset; + struct vm_info *info; + struct ipu6_mmu *mmu = sys->mmu; + + info = get_vm_info(mmu, dma_handle); + if (WARN_ON(!info)) + return; + + offset = dma_handle - info->ipu6_iova; + if (WARN_ON(size > (info->size - offset))) + return; + + vaddr = info->vaddr + offset; + clflush_cache_range(vaddr, size); +} +EXPORT_SYMBOL_NS_GPL(ipu6_dma_sync_single, "INTEL_IPU6"); + +void ipu6_dma_sync_sg(struct ipu6_bus_device *sys, struct scatterlist *sglist, + int nents) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sglist, sg, nents, i) + clflush_cache_range(sg_virt(sg), sg->length); +} +EXPORT_SYMBOL_NS_GPL(ipu6_dma_sync_sg, "INTEL_IPU6"); + +void ipu6_dma_sync_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt) +{ + ipu6_dma_sync_sg(sys, sgt->sgl, sgt->orig_nents); +} +EXPORT_SYMBOL_NS_GPL(ipu6_dma_sync_sgtable, "INTEL_IPU6"); + +void *ipu6_dma_alloc(struct ipu6_bus_device *sys, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, + unsigned long attrs) +{ + struct device *dev = &sys->auxdev.dev; + struct pci_dev *pdev = sys->isp->pdev; + dma_addr_t pci_dma_addr, ipu6_iova; + struct ipu6_mmu *mmu = sys->mmu; + struct vm_info *info; + unsigned long count; + struct page **pages; + struct iova *iova; + unsigned int i; + int ret; + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return NULL; + + size = PAGE_ALIGN(size); + count = PHYS_PFN(size); + + iova = alloc_iova(&mmu->dmap->iovad, count, + PHYS_PFN(mmu->dmap->mmu_info->aperture_end), 0); + if (!iova) + goto out_kfree; + + pages = __alloc_buffer(size, gfp, attrs); + if (!pages) + goto out_free_iova; + + dev_dbg(dev, "dma_alloc: size %zu iova low pfn %lu, high pfn %lu\n", + size, iova->pfn_lo, iova->pfn_hi); + for (i = 0; iova->pfn_lo + i <= iova->pfn_hi; i++) { + pci_dma_addr = dma_map_page_attrs(&pdev->dev, pages[i], 0, + PAGE_SIZE, DMA_BIDIRECTIONAL, + attrs); + dev_dbg(dev, "dma_alloc: mapped pci_dma_addr %pad\n", + &pci_dma_addr); + if (dma_mapping_error(&pdev->dev, pci_dma_addr)) { + dev_err(dev, "pci_dma_mapping for page[%d] failed", i); + goto out_unmap; + } + + ret = ipu6_mmu_map(mmu->dmap->mmu_info, + PFN_PHYS(iova->pfn_lo + i), pci_dma_addr, + PAGE_SIZE); + if (ret) { + dev_err(dev, "ipu6_mmu_map for pci_dma[%d] %pad failed", + i, &pci_dma_addr); + dma_unmap_page_attrs(&pdev->dev, pci_dma_addr, + PAGE_SIZE, DMA_BIDIRECTIONAL, + attrs); + goto out_unmap; + } + } + + info->vaddr = vmap(pages, count, VM_USERMAP, PAGE_KERNEL); + if (!info->vaddr) + goto out_unmap; + + *dma_handle = PFN_PHYS(iova->pfn_lo); + + info->pages = pages; + info->ipu6_iova = *dma_handle; + info->size = size; + list_add(&info->list, &mmu->vma_list); + + return info->vaddr; + +out_unmap: + while (i--) { + ipu6_iova = PFN_PHYS(iova->pfn_lo + i); + pci_dma_addr = ipu6_mmu_iova_to_phys(mmu->dmap->mmu_info, + ipu6_iova); + dma_unmap_page_attrs(&pdev->dev, pci_dma_addr, PAGE_SIZE, + DMA_BIDIRECTIONAL, attrs); + + ipu6_mmu_unmap(mmu->dmap->mmu_info, ipu6_iova, PAGE_SIZE); + } + + __free_buffer(pages, size, attrs); + +out_free_iova: + __free_iova(&mmu->dmap->iovad, iova); +out_kfree: + kfree(info); + + return NULL; +} +EXPORT_SYMBOL_NS_GPL(ipu6_dma_alloc, "INTEL_IPU6"); + +void ipu6_dma_free(struct ipu6_bus_device *sys, size_t size, void *vaddr, + dma_addr_t dma_handle, unsigned long attrs) +{ + struct ipu6_mmu *mmu = sys->mmu; + struct pci_dev *pdev = sys->isp->pdev; + struct iova *iova = find_iova(&mmu->dmap->iovad, PHYS_PFN(dma_handle)); + dma_addr_t pci_dma_addr, ipu6_iova; + struct vm_info *info; + struct page **pages; + unsigned int i; + + if (WARN_ON(!iova)) + return; + + info = get_vm_info(mmu, dma_handle); + if (WARN_ON(!info)) + return; + + if (WARN_ON(!info->vaddr)) + return; + + if (WARN_ON(!info->pages)) + return; + + list_del(&info->list); + + size = PAGE_ALIGN(size); + + pages = info->pages; + + vunmap(vaddr); + + for (i = 0; i < PHYS_PFN(size); i++) { + ipu6_iova = PFN_PHYS(iova->pfn_lo + i); + pci_dma_addr = ipu6_mmu_iova_to_phys(mmu->dmap->mmu_info, + ipu6_iova); + dma_unmap_page_attrs(&pdev->dev, pci_dma_addr, PAGE_SIZE, + DMA_BIDIRECTIONAL, attrs); + } + + ipu6_mmu_unmap(mmu->dmap->mmu_info, PFN_PHYS(iova->pfn_lo), + PFN_PHYS(iova_size(iova))); + + __free_buffer(pages, size, attrs); + + mmu->tlb_invalidate(mmu); + + __free_iova(&mmu->dmap->iovad, iova); + + kfree(info); +} +EXPORT_SYMBOL_NS_GPL(ipu6_dma_free, "INTEL_IPU6"); + +int ipu6_dma_mmap(struct ipu6_bus_device *sys, struct vm_area_struct *vma, + void *addr, dma_addr_t iova, size_t size, + unsigned long attrs) +{ + struct ipu6_mmu *mmu = sys->mmu; + size_t count = PFN_UP(size); + struct vm_info *info; + size_t i; + int ret; + + info = get_vm_info(mmu, iova); + if (!info) + return -EFAULT; + + if (!info->vaddr) + return -EFAULT; + + if (vma->vm_start & ~PAGE_MASK) + return -EINVAL; + + if (size > info->size) + return -EFAULT; + + for (i = 0; i < count; i++) { + ret = vm_insert_page(vma, vma->vm_start + PFN_PHYS(i), + info->pages[i]); + if (ret < 0) + return ret; + } + + return 0; +} + +void ipu6_dma_unmap_sg(struct ipu6_bus_device *sys, struct scatterlist *sglist, + int nents, enum dma_data_direction dir, + unsigned long attrs) +{ + struct device *dev = &sys->auxdev.dev; + struct ipu6_mmu *mmu = sys->mmu; + struct iova *iova = find_iova(&mmu->dmap->iovad, + PHYS_PFN(sg_dma_address(sglist))); + struct scatterlist *sg; + dma_addr_t pci_dma_addr; + unsigned int i; + + if (!nents) + return; + + if (WARN_ON(!iova)) + return; + + /* + * Before IPU6 mmu unmap, return the pci dma address back to sg + * assume the nents is less than orig_nents as the least granule + * is 1 SZ_4K page + */ + dev_dbg(dev, "trying to unmap concatenated %u ents\n", nents); + for_each_sg(sglist, sg, nents, i) { + dev_dbg(dev, "unmap sg[%d] %pad size %u\n", i, + &sg_dma_address(sg), sg_dma_len(sg)); + pci_dma_addr = ipu6_mmu_iova_to_phys(mmu->dmap->mmu_info, + sg_dma_address(sg)); + dev_dbg(dev, "return pci_dma_addr %pad back to sg[%d]\n", + &pci_dma_addr, i); + sg_dma_address(sg) = pci_dma_addr; + } + + dev_dbg(dev, "ipu6_mmu_unmap low pfn %lu high pfn %lu\n", + iova->pfn_lo, iova->pfn_hi); + ipu6_mmu_unmap(mmu->dmap->mmu_info, PFN_PHYS(iova->pfn_lo), + PFN_PHYS(iova_size(iova))); + + mmu->tlb_invalidate(mmu); + __free_iova(&mmu->dmap->iovad, iova); +} +EXPORT_SYMBOL_NS_GPL(ipu6_dma_unmap_sg, "INTEL_IPU6"); + +int ipu6_dma_map_sg(struct ipu6_bus_device *sys, struct scatterlist *sglist, + int nents, enum dma_data_direction dir, + unsigned long attrs) +{ + struct device *dev = &sys->auxdev.dev; + struct ipu6_mmu *mmu = sys->mmu; + struct scatterlist *sg; + struct iova *iova; + size_t npages = 0; + unsigned long iova_addr; + int i; + + for_each_sg(sglist, sg, nents, i) { + if (sg->offset) { + dev_err(dev, "Unsupported non-zero sg[%d].offset %x\n", + i, sg->offset); + return -EFAULT; + } + } + + for_each_sg(sglist, sg, nents, i) + npages += PFN_UP(sg_dma_len(sg)); + + dev_dbg(dev, "dmamap trying to map %d ents %zu pages\n", + nents, npages); + + iova = alloc_iova(&mmu->dmap->iovad, npages, + PHYS_PFN(mmu->dmap->mmu_info->aperture_end), 0); + if (!iova) + return 0; + + dev_dbg(dev, "dmamap: iova low pfn %lu, high pfn %lu\n", iova->pfn_lo, + iova->pfn_hi); + + iova_addr = iova->pfn_lo; + for_each_sg(sglist, sg, nents, i) { + phys_addr_t iova_pa; + int ret; + + iova_pa = PFN_PHYS(iova_addr); + dev_dbg(dev, "mapping entry %d: iova %pap phy %pap size %d\n", + i, &iova_pa, &sg_dma_address(sg), sg_dma_len(sg)); + + ret = ipu6_mmu_map(mmu->dmap->mmu_info, PFN_PHYS(iova_addr), + sg_dma_address(sg), + PAGE_ALIGN(sg_dma_len(sg))); + if (ret) + goto out_fail; + + sg_dma_address(sg) = PFN_PHYS(iova_addr); + + iova_addr += PFN_UP(sg_dma_len(sg)); + } + + dev_dbg(dev, "dmamap %d ents %zu pages mapped\n", nents, npages); + + return nents; + +out_fail: + ipu6_dma_unmap_sg(sys, sglist, i, dir, attrs); + + return 0; +} +EXPORT_SYMBOL_NS_GPL(ipu6_dma_map_sg, "INTEL_IPU6"); + +int ipu6_dma_map_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt, + enum dma_data_direction dir, unsigned long attrs) +{ + int nents; + + nents = ipu6_dma_map_sg(sys, sgt->sgl, sgt->nents, dir, attrs); + if (nents < 0) + return nents; + + sgt->nents = nents; + + return 0; +} +EXPORT_SYMBOL_NS_GPL(ipu6_dma_map_sgtable, "INTEL_IPU6"); + +void ipu6_dma_unmap_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt, + enum dma_data_direction dir, unsigned long attrs) +{ + ipu6_dma_unmap_sg(sys, sgt->sgl, sgt->nents, dir, attrs); +} +EXPORT_SYMBOL_NS_GPL(ipu6_dma_unmap_sgtable, "INTEL_IPU6"); diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-dma.h b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-dma.h new file mode 100644 index 0000000..ae9b9a5 --- /dev/null +++ b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-dma.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2013--2024 Intel Corporation */ + +#ifndef IPU6_DMA_H +#define IPU6_DMA_H + +#include +#include +#include + +#include "ipu6-bus.h" + +struct ipu6_mmu_info; + +struct ipu6_dma_mapping { + struct ipu6_mmu_info *mmu_info; + struct iova_domain iovad; +}; + +void ipu6_dma_sync_single(struct ipu6_bus_device *sys, dma_addr_t dma_handle, + size_t size); +void ipu6_dma_sync_sg(struct ipu6_bus_device *sys, struct scatterlist *sglist, + int nents); +void ipu6_dma_sync_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt); +void *ipu6_dma_alloc(struct ipu6_bus_device *sys, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, + unsigned long attrs); +void ipu6_dma_free(struct ipu6_bus_device *sys, size_t size, void *vaddr, + dma_addr_t dma_handle, unsigned long attrs); +int ipu6_dma_mmap(struct ipu6_bus_device *sys, struct vm_area_struct *vma, + void *addr, dma_addr_t iova, size_t size, + unsigned long attrs); +int ipu6_dma_map_sg(struct ipu6_bus_device *sys, struct scatterlist *sglist, + int nents, enum dma_data_direction dir, + unsigned long attrs); +void ipu6_dma_unmap_sg(struct ipu6_bus_device *sys, struct scatterlist *sglist, + int nents, enum dma_data_direction dir, + unsigned long attrs); +int ipu6_dma_map_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt, + enum dma_data_direction dir, unsigned long attrs); +void ipu6_dma_unmap_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt, + enum dma_data_direction dir, unsigned long attrs); +#endif /* IPU6_DMA_H */ diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.c b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.c new file mode 100644 index 0000000..40d8ce1 --- /dev/null +++ b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.c @@ -0,0 +1,413 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2013--2024 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "ipu6-bus.h" +#include "ipu6-dma.h" +#include "ipu6-fw-com.h" + +/* + * FWCOM layer is a shared resource between FW and driver. It consist + * of token queues to both send and receive directions. Queue is simply + * an array of structures with read and write indexes to the queue. + * There are 1...n queues to both directions. Queues locates in + * system RAM and are mapped to ISP MMU so that both CPU and ISP can + * see the same buffer. Indexes are located in ISP DMEM so that FW code + * can poll those with very low latency and cost. CPU access to indexes is + * more costly but that happens only at message sending time and + * interrupt triggered message handling. CPU doesn't need to poll indexes. + * wr_reg / rd_reg are offsets to those dmem location. They are not + * the indexes itself. + */ + +/* Shared structure between driver and FW - do not modify */ +struct ipu6_fw_sys_queue { + u64 host_address; + u32 vied_address; + u32 size; + u32 token_size; + u32 wr_reg; /* reg number in subsystem's regmem */ + u32 rd_reg; + u32 _align; +} __packed; + +struct ipu6_fw_sys_queue_res { + u64 host_address; + u32 vied_address; + u32 reg; +} __packed; + +enum syscom_state { + /* Program load or explicit host setting should init to this */ + SYSCOM_STATE_UNINIT = 0x57a7e000, + /* SP Syscom sets this when it is ready for use */ + SYSCOM_STATE_READY = 0x57a7e001, + /* SP Syscom sets this when no more syscom accesses will happen */ + SYSCOM_STATE_INACTIVE = 0x57a7e002, +}; + +enum syscom_cmd { + /* Program load or explicit host setting should init to this */ + SYSCOM_COMMAND_UNINIT = 0x57a7f000, + /* Host Syscom requests syscom to become inactive */ + SYSCOM_COMMAND_INACTIVE = 0x57a7f001, +}; + +/* firmware config: data that sent from the host to SP via DDR */ +/* Cell copies data into a context */ + +struct ipu6_fw_syscom_config { + u32 firmware_address; + + u32 num_input_queues; + u32 num_output_queues; + + /* ISP pointers to an array of ipu6_fw_sys_queue structures */ + u32 input_queue; + u32 output_queue; + + /* ISYS / PSYS private data */ + u32 specific_addr; + u32 specific_size; +}; + +struct ipu6_fw_com_context { + struct ipu6_bus_device *adev; + void __iomem *dmem_addr; + int (*cell_ready)(struct ipu6_bus_device *adev); + void (*cell_start)(struct ipu6_bus_device *adev); + + void *dma_buffer; + dma_addr_t dma_addr; + unsigned int dma_size; + + struct ipu6_fw_sys_queue *input_queue; /* array of host to SP queues */ + struct ipu6_fw_sys_queue *output_queue; /* array of SP to host */ + + u32 config_vied_addr; + + unsigned int buttress_boot_offset; + void __iomem *base_addr; +}; + +#define FW_COM_WR_REG 0 +#define FW_COM_RD_REG 4 + +#define REGMEM_OFFSET 0 +#define TUNIT_MAGIC_PATTERN 0x5a5a5a5a + +enum regmem_id { + /* pass pkg_dir address to SPC in non-secure mode */ + PKG_DIR_ADDR_REG = 0, + /* Tunit CFG blob for secure - provided by host.*/ + TUNIT_CFG_DWR_REG = 1, + /* syscom commands - modified by the host */ + SYSCOM_COMMAND_REG = 2, + /* Store interrupt status - updated by SP */ + SYSCOM_IRQ_REG = 3, + /* first syscom queue pointer register */ + SYSCOM_QPR_BASE_REG = 4 +}; + +#define BUTTRESS_FW_BOOT_PARAMS_0 0x4000 +#define BUTTRESS_FW_BOOT_PARAM_REG(base, offset, id) \ + ((base) + BUTTRESS_FW_BOOT_PARAMS_0 + ((offset) + (id)) * 4) + +enum buttress_syscom_id { + /* pass syscom configuration to SPC */ + SYSCOM_CONFIG_ID = 0, + /* syscom state - modified by SP */ + SYSCOM_STATE_ID = 1, + /* syscom vtl0 addr mask */ + SYSCOM_VTL0_ADDR_MASK_ID = 2, + SYSCOM_ID_MAX +}; + +static void ipu6_sys_queue_init(struct ipu6_fw_sys_queue *q, unsigned int size, + unsigned int token_size, + struct ipu6_fw_sys_queue_res *res) +{ + unsigned int buf_size = (size + 1) * token_size; + + q->size = size + 1; + q->token_size = token_size; + + /* acquire the shared buffer space */ + q->host_address = res->host_address; + res->host_address += buf_size; + q->vied_address = res->vied_address; + res->vied_address += buf_size; + + /* acquire the shared read and writer pointers */ + q->wr_reg = res->reg; + res->reg++; + q->rd_reg = res->reg; + res->reg++; +} + +void *ipu6_fw_com_prepare(struct ipu6_fw_com_cfg *cfg, + struct ipu6_bus_device *adev, void __iomem *base) +{ + size_t conf_size, inq_size, outq_size, specific_size; + struct ipu6_fw_syscom_config *config_host_addr; + unsigned int sizeinput = 0, sizeoutput = 0; + struct ipu6_fw_sys_queue_res res; + struct ipu6_fw_com_context *ctx; + struct device *dev = &adev->auxdev.dev; + size_t sizeall, offset; + void *specific_host_addr; + unsigned int i; + + if (!cfg || !cfg->cell_start || !cfg->cell_ready) + return NULL; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return NULL; + ctx->dmem_addr = base + cfg->dmem_addr + REGMEM_OFFSET; + ctx->adev = adev; + ctx->cell_start = cfg->cell_start; + ctx->cell_ready = cfg->cell_ready; + ctx->buttress_boot_offset = cfg->buttress_boot_offset; + ctx->base_addr = base; + + /* + * Allocate DMA mapped memory. Allocate one big chunk. + */ + /* Base cfg for FW */ + conf_size = roundup(sizeof(struct ipu6_fw_syscom_config), 8); + /* Descriptions of the queues */ + inq_size = size_mul(cfg->num_input_queues, + sizeof(struct ipu6_fw_sys_queue)); + outq_size = size_mul(cfg->num_output_queues, + sizeof(struct ipu6_fw_sys_queue)); + /* FW specific information structure */ + specific_size = roundup(cfg->specific_size, 8); + + sizeall = conf_size + inq_size + outq_size + specific_size; + + for (i = 0; i < cfg->num_input_queues; i++) + sizeinput += size_mul(cfg->input[i].queue_size + 1, + cfg->input[i].token_size); + + for (i = 0; i < cfg->num_output_queues; i++) + sizeoutput += size_mul(cfg->output[i].queue_size + 1, + cfg->output[i].token_size); + + sizeall += sizeinput + sizeoutput; + + ctx->dma_buffer = ipu6_dma_alloc(adev, sizeall, &ctx->dma_addr, + GFP_KERNEL, 0); + if (!ctx->dma_buffer) { + dev_err(dev, "failed to allocate dma memory\n"); + kfree(ctx); + return NULL; + } + + ctx->dma_size = sizeall; + + config_host_addr = ctx->dma_buffer; + ctx->config_vied_addr = ctx->dma_addr; + + offset = conf_size; + ctx->input_queue = ctx->dma_buffer + offset; + config_host_addr->input_queue = ctx->dma_addr + offset; + config_host_addr->num_input_queues = cfg->num_input_queues; + + offset += inq_size; + ctx->output_queue = ctx->dma_buffer + offset; + config_host_addr->output_queue = ctx->dma_addr + offset; + config_host_addr->num_output_queues = cfg->num_output_queues; + + /* copy firmware specific data */ + offset += outq_size; + specific_host_addr = ctx->dma_buffer + offset; + config_host_addr->specific_addr = ctx->dma_addr + offset; + config_host_addr->specific_size = cfg->specific_size; + if (cfg->specific_addr && cfg->specific_size) + memcpy(specific_host_addr, cfg->specific_addr, + cfg->specific_size); + + ipu6_dma_sync_single(adev, ctx->config_vied_addr, sizeall); + + /* initialize input queues */ + offset += specific_size; + res.reg = SYSCOM_QPR_BASE_REG; + res.host_address = (uintptr_t)(ctx->dma_buffer + offset); + res.vied_address = ctx->dma_addr + offset; + for (i = 0; i < cfg->num_input_queues; i++) + ipu6_sys_queue_init(ctx->input_queue + i, + cfg->input[i].queue_size, + cfg->input[i].token_size, &res); + + /* initialize output queues */ + offset += sizeinput; + res.host_address = (uintptr_t)(ctx->dma_buffer + offset); + res.vied_address = ctx->dma_addr + offset; + for (i = 0; i < cfg->num_output_queues; i++) { + ipu6_sys_queue_init(ctx->output_queue + i, + cfg->output[i].queue_size, + cfg->output[i].token_size, &res); + } + + return ctx; +} +EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_prepare, "INTEL_IPU6"); + +int ipu6_fw_com_open(struct ipu6_fw_com_context *ctx) +{ + /* write magic pattern to disable the tunit trace */ + writel(TUNIT_MAGIC_PATTERN, ctx->dmem_addr + TUNIT_CFG_DWR_REG * 4); + /* Check if SP is in valid state */ + if (!ctx->cell_ready(ctx->adev)) + return -EIO; + + /* store syscom uninitialized command */ + writel(SYSCOM_COMMAND_UNINIT, ctx->dmem_addr + SYSCOM_COMMAND_REG * 4); + + /* store syscom uninitialized state */ + writel(SYSCOM_STATE_UNINIT, + BUTTRESS_FW_BOOT_PARAM_REG(ctx->base_addr, + ctx->buttress_boot_offset, + SYSCOM_STATE_ID)); + + /* store firmware configuration address */ + writel(ctx->config_vied_addr, + BUTTRESS_FW_BOOT_PARAM_REG(ctx->base_addr, + ctx->buttress_boot_offset, + SYSCOM_CONFIG_ID)); + ctx->cell_start(ctx->adev); + + return 0; +} +EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_open, "INTEL_IPU6"); + +int ipu6_fw_com_close(struct ipu6_fw_com_context *ctx) +{ + int state; + + state = readl(BUTTRESS_FW_BOOT_PARAM_REG(ctx->base_addr, + ctx->buttress_boot_offset, + SYSCOM_STATE_ID)); + if (state != SYSCOM_STATE_READY) + return -EBUSY; + + /* set close request flag */ + writel(SYSCOM_COMMAND_INACTIVE, ctx->dmem_addr + + SYSCOM_COMMAND_REG * 4); + + return 0; +} +EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_close, "INTEL_IPU6"); + +int ipu6_fw_com_release(struct ipu6_fw_com_context *ctx, unsigned int force) +{ + /* check if release is forced, an verify cell state if it is not */ + if (!force && !ctx->cell_ready(ctx->adev)) + return -EBUSY; + + ipu6_dma_free(ctx->adev, ctx->dma_size, + ctx->dma_buffer, ctx->dma_addr, 0); + kfree(ctx); + return 0; +} +EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_release, "INTEL_IPU6"); + +bool ipu6_fw_com_ready(struct ipu6_fw_com_context *ctx) +{ + int state; + + state = readl(BUTTRESS_FW_BOOT_PARAM_REG(ctx->base_addr, + ctx->buttress_boot_offset, + SYSCOM_STATE_ID)); + + return state == SYSCOM_STATE_READY; +} +EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_ready, "INTEL_IPU6"); + +void *ipu6_send_get_token(struct ipu6_fw_com_context *ctx, int q_nbr) +{ + struct ipu6_fw_sys_queue *q = &ctx->input_queue[q_nbr]; + void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4; + unsigned int wr, rd; + unsigned int packets; + unsigned int index; + + wr = readl(q_dmem + FW_COM_WR_REG); + rd = readl(q_dmem + FW_COM_RD_REG); + + if (WARN_ON_ONCE(wr >= q->size || rd >= q->size)) + return NULL; + + if (wr < rd) + packets = rd - wr - 1; + else + packets = q->size - (wr - rd + 1); + + if (!packets) + return NULL; + + index = readl(q_dmem + FW_COM_WR_REG); + + return (void *)((uintptr_t)q->host_address + index * q->token_size); +} +EXPORT_SYMBOL_NS_GPL(ipu6_send_get_token, "INTEL_IPU6"); + +void ipu6_send_put_token(struct ipu6_fw_com_context *ctx, int q_nbr) +{ + struct ipu6_fw_sys_queue *q = &ctx->input_queue[q_nbr]; + void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4; + unsigned int wr = readl(q_dmem + FW_COM_WR_REG) + 1; + + if (wr >= q->size) + wr = 0; + + writel(wr, q_dmem + FW_COM_WR_REG); +} +EXPORT_SYMBOL_NS_GPL(ipu6_send_put_token, "INTEL_IPU6"); + +void *ipu6_recv_get_token(struct ipu6_fw_com_context *ctx, int q_nbr) +{ + struct ipu6_fw_sys_queue *q = &ctx->output_queue[q_nbr]; + void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4; + unsigned int wr, rd; + unsigned int packets; + + wr = readl(q_dmem + FW_COM_WR_REG); + rd = readl(q_dmem + FW_COM_RD_REG); + + if (WARN_ON_ONCE(wr >= q->size || rd >= q->size)) + return NULL; + + if (wr < rd) + wr += q->size; + + packets = wr - rd; + if (!packets) + return NULL; + + return (void *)((uintptr_t)q->host_address + rd * q->token_size); +} +EXPORT_SYMBOL_NS_GPL(ipu6_recv_get_token, "INTEL_IPU6"); + +void ipu6_recv_put_token(struct ipu6_fw_com_context *ctx, int q_nbr) +{ + struct ipu6_fw_sys_queue *q = &ctx->output_queue[q_nbr]; + void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4; + unsigned int rd = readl(q_dmem + FW_COM_RD_REG) + 1; + + if (rd >= q->size) + rd = 0; + + writel(rd, q_dmem + FW_COM_RD_REG); +} +EXPORT_SYMBOL_NS_GPL(ipu6_recv_put_token, "INTEL_IPU6"); diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.h b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.h new file mode 100644 index 0000000..b02285a --- /dev/null +++ b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2013--2024 Intel Corporation */ + +#ifndef IPU6_FW_COM_H +#define IPU6_FW_COM_H + +struct ipu6_fw_com_context; +struct ipu6_bus_device; + +struct ipu6_fw_syscom_queue_config { + unsigned int queue_size; /* tokens per queue */ + unsigned int token_size; /* bytes per token */ +}; + +#define SYSCOM_BUTTRESS_FW_PARAMS_ISYS_OFFSET 0 + +struct ipu6_fw_com_cfg { + unsigned int num_input_queues; + unsigned int num_output_queues; + struct ipu6_fw_syscom_queue_config *input; + struct ipu6_fw_syscom_queue_config *output; + + unsigned int dmem_addr; + + /* firmware-specific configuration data */ + void *specific_addr; + unsigned int specific_size; + int (*cell_ready)(struct ipu6_bus_device *adev); + void (*cell_start)(struct ipu6_bus_device *adev); + + unsigned int buttress_boot_offset; +}; + +void *ipu6_fw_com_prepare(struct ipu6_fw_com_cfg *cfg, + struct ipu6_bus_device *adev, void __iomem *base); + +int ipu6_fw_com_open(struct ipu6_fw_com_context *ctx); +bool ipu6_fw_com_ready(struct ipu6_fw_com_context *ctx); +int ipu6_fw_com_close(struct ipu6_fw_com_context *ctx); +int ipu6_fw_com_release(struct ipu6_fw_com_context *ctx, unsigned int force); + +void *ipu6_recv_get_token(struct ipu6_fw_com_context *ctx, int q_nbr); +void ipu6_recv_put_token(struct ipu6_fw_com_context *ctx, int q_nbr); +void *ipu6_send_get_token(struct ipu6_fw_com_context *ctx, int q_nbr); +void ipu6_send_put_token(struct ipu6_fw_com_context *ctx, int q_nbr); + +#endif diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.c b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.c new file mode 100644 index 0000000..62ed92f --- /dev/null +++ b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.c @@ -0,0 +1,487 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2013--2024 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include + +#include "ipu6-bus.h" +#include "ipu6-fw-com.h" +#include "ipu6-isys.h" +#include "ipu6-platform-isys-csi2-reg.h" +#include "ipu6-platform-regs.h" + +static const char send_msg_types[N_IPU6_FW_ISYS_SEND_TYPE][32] = { + "STREAM_OPEN", + "STREAM_START", + "STREAM_START_AND_CAPTURE", + "STREAM_CAPTURE", + "STREAM_STOP", + "STREAM_FLUSH", + "STREAM_CLOSE" +}; + +static int handle_proxy_response(struct ipu6_isys *isys, unsigned int req_id) +{ + struct device *dev = &isys->adev->auxdev.dev; + struct ipu6_fw_isys_proxy_resp_info_abi *resp; + int ret; + + resp = ipu6_recv_get_token(isys->fwcom, IPU6_BASE_PROXY_RECV_QUEUES); + if (!resp) + return 1; + + dev_dbg(dev, "Proxy response: id %u, error %u, details %u\n", + resp->request_id, resp->error_info.error, + resp->error_info.error_details); + + ret = req_id == resp->request_id ? 0 : -EIO; + + ipu6_recv_put_token(isys->fwcom, IPU6_BASE_PROXY_RECV_QUEUES); + + return ret; +} + +int ipu6_fw_isys_send_proxy_token(struct ipu6_isys *isys, + unsigned int req_id, + unsigned int index, + unsigned int offset, u32 value) +{ + struct ipu6_fw_com_context *ctx = isys->fwcom; + struct device *dev = &isys->adev->auxdev.dev; + struct ipu6_fw_proxy_send_queue_token *token; + unsigned int timeout = 1000; + int ret; + + dev_dbg(dev, + "proxy send: req_id 0x%x, index %d, offset 0x%x, value 0x%x\n", + req_id, index, offset, value); + + token = ipu6_send_get_token(ctx, IPU6_BASE_PROXY_SEND_QUEUES); + if (!token) + return -EBUSY; + + token->request_id = req_id; + token->region_index = index; + token->offset = offset; + token->value = value; + ipu6_send_put_token(ctx, IPU6_BASE_PROXY_SEND_QUEUES); + + do { + usleep_range(100, 110); + ret = handle_proxy_response(isys, req_id); + if (!ret) + break; + if (ret == -EIO) { + dev_err(dev, "Proxy respond with unexpected id\n"); + break; + } + timeout--; + } while (ret && timeout); + + if (!timeout) + dev_err(dev, "Proxy response timed out\n"); + + return ret; +} + +int ipu6_fw_isys_complex_cmd(struct ipu6_isys *isys, + const unsigned int stream_handle, + void *cpu_mapped_buf, + dma_addr_t dma_mapped_buf, + size_t size, u16 send_type) +{ + struct ipu6_fw_com_context *ctx = isys->fwcom; + struct device *dev = &isys->adev->auxdev.dev; + struct ipu6_fw_send_queue_token *token; + + if (send_type >= N_IPU6_FW_ISYS_SEND_TYPE) + return -EINVAL; + + dev_dbg(dev, "send_token: %s\n", send_msg_types[send_type]); + + /* + * Time to flush cache in case we have some payload. Not all messages + * have that + */ + if (cpu_mapped_buf) + clflush_cache_range(cpu_mapped_buf, size); + + token = ipu6_send_get_token(ctx, + stream_handle + IPU6_BASE_MSG_SEND_QUEUES); + if (!token) + return -EBUSY; + + token->payload = dma_mapped_buf; + token->buf_handle = (unsigned long)cpu_mapped_buf; + token->send_type = send_type; + + ipu6_send_put_token(ctx, stream_handle + IPU6_BASE_MSG_SEND_QUEUES); + + return 0; +} + +int ipu6_fw_isys_simple_cmd(struct ipu6_isys *isys, + const unsigned int stream_handle, u16 send_type) +{ + return ipu6_fw_isys_complex_cmd(isys, stream_handle, NULL, 0, 0, + send_type); +} + +int ipu6_fw_isys_close(struct ipu6_isys *isys) +{ + struct device *dev = &isys->adev->auxdev.dev; + int retry = IPU6_ISYS_CLOSE_RETRY; + unsigned long flags; + void *fwcom; + int ret; + + /* + * Stop the isys fw. Actual close takes + * some time as the FW must stop its actions including code fetch + * to SP icache. + * spinlock to wait the interrupt handler to be finished + */ + spin_lock_irqsave(&isys->power_lock, flags); + ret = ipu6_fw_com_close(isys->fwcom); + fwcom = isys->fwcom; + isys->fwcom = NULL; + spin_unlock_irqrestore(&isys->power_lock, flags); + if (ret) + dev_err(dev, "Device close failure: %d\n", ret); + + /* release probably fails if the close failed. Let's try still */ + do { + usleep_range(400, 500); + ret = ipu6_fw_com_release(fwcom, 0); + retry--; + } while (ret && retry); + + if (ret) { + dev_err(dev, "Device release time out %d\n", ret); + spin_lock_irqsave(&isys->power_lock, flags); + isys->fwcom = fwcom; + spin_unlock_irqrestore(&isys->power_lock, flags); + } + + return ret; +} + +void ipu6_fw_isys_cleanup(struct ipu6_isys *isys) +{ + int ret; + + ret = ipu6_fw_com_release(isys->fwcom, 1); + if (ret < 0) + dev_warn(&isys->adev->auxdev.dev, + "Device busy, fw_com release failed."); + isys->fwcom = NULL; +} + +static void start_sp(struct ipu6_bus_device *adev) +{ + struct ipu6_isys *isys = ipu6_bus_get_drvdata(adev); + void __iomem *spc_regs_base = isys->pdata->base + + isys->pdata->ipdata->hw_variant.spc_offset; + u32 val = IPU6_ISYS_SPC_STATUS_START | + IPU6_ISYS_SPC_STATUS_RUN | + IPU6_ISYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE; + + val |= isys->icache_prefetch ? IPU6_ISYS_SPC_STATUS_ICACHE_PREFETCH : 0; + + writel(val, spc_regs_base + IPU6_ISYS_REG_SPC_STATUS_CTRL); +} + +static int query_sp(struct ipu6_bus_device *adev) +{ + struct ipu6_isys *isys = ipu6_bus_get_drvdata(adev); + void __iomem *spc_regs_base = isys->pdata->base + + isys->pdata->ipdata->hw_variant.spc_offset; + u32 val; + + val = readl(spc_regs_base + IPU6_ISYS_REG_SPC_STATUS_CTRL); + /* return true when READY == 1, START == 0 */ + val &= IPU6_ISYS_SPC_STATUS_READY | IPU6_ISYS_SPC_STATUS_START; + + return val == IPU6_ISYS_SPC_STATUS_READY; +} + +static int ipu6_isys_fwcom_cfg_init(struct ipu6_isys *isys, + struct ipu6_fw_com_cfg *fwcom, + unsigned int num_streams) +{ + unsigned int max_send_queues, max_sram_blocks, max_devq_size; + struct ipu6_fw_syscom_queue_config *input_queue_cfg; + struct ipu6_fw_syscom_queue_config *output_queue_cfg; + struct device *dev = &isys->adev->auxdev.dev; + int type_proxy = IPU6_FW_ISYS_QUEUE_TYPE_PROXY; + int type_dev = IPU6_FW_ISYS_QUEUE_TYPE_DEV; + int type_msg = IPU6_FW_ISYS_QUEUE_TYPE_MSG; + int base_dev_send = IPU6_BASE_DEV_SEND_QUEUES; + int base_msg_send = IPU6_BASE_MSG_SEND_QUEUES; + int base_msg_recv = IPU6_BASE_MSG_RECV_QUEUES; + struct ipu6_fw_isys_fw_config *isys_fw_cfg; + u32 num_in_message_queues; + unsigned int max_streams; + unsigned int size; + unsigned int i; + + max_streams = isys->pdata->ipdata->max_streams; + max_send_queues = isys->pdata->ipdata->max_send_queues; + max_sram_blocks = isys->pdata->ipdata->max_sram_blocks; + max_devq_size = isys->pdata->ipdata->max_devq_size; + num_in_message_queues = clamp(num_streams, 1U, max_streams); + isys_fw_cfg = devm_kzalloc(dev, sizeof(*isys_fw_cfg), GFP_KERNEL); + if (!isys_fw_cfg) + return -ENOMEM; + + isys_fw_cfg->num_send_queues[type_proxy] = IPU6_N_MAX_PROXY_SEND_QUEUES; + isys_fw_cfg->num_send_queues[type_dev] = IPU6_N_MAX_DEV_SEND_QUEUES; + isys_fw_cfg->num_send_queues[type_msg] = num_in_message_queues; + isys_fw_cfg->num_recv_queues[type_proxy] = IPU6_N_MAX_PROXY_RECV_QUEUES; + /* Common msg/dev return queue */ + isys_fw_cfg->num_recv_queues[type_dev] = 0; + isys_fw_cfg->num_recv_queues[type_msg] = 1; + + size = sizeof(*input_queue_cfg) * max_send_queues; + input_queue_cfg = devm_kzalloc(dev, size, GFP_KERNEL); + if (!input_queue_cfg) + return -ENOMEM; + + size = sizeof(*output_queue_cfg) * IPU6_N_MAX_RECV_QUEUES; + output_queue_cfg = devm_kzalloc(dev, size, GFP_KERNEL); + if (!output_queue_cfg) + return -ENOMEM; + + fwcom->input = input_queue_cfg; + fwcom->output = output_queue_cfg; + + fwcom->num_input_queues = isys_fw_cfg->num_send_queues[type_proxy] + + isys_fw_cfg->num_send_queues[type_dev] + + isys_fw_cfg->num_send_queues[type_msg]; + + fwcom->num_output_queues = isys_fw_cfg->num_recv_queues[type_proxy] + + isys_fw_cfg->num_recv_queues[type_dev] + + isys_fw_cfg->num_recv_queues[type_msg]; + + /* SRAM partitioning. Equal partitioning is set. */ + for (i = 0; i < max_sram_blocks; i++) { + if (i < num_in_message_queues) + isys_fw_cfg->buffer_partition.num_gda_pages[i] = + (IPU6_DEVICE_GDA_NR_PAGES * + IPU6_DEVICE_GDA_VIRT_FACTOR) / + num_in_message_queues; + else + isys_fw_cfg->buffer_partition.num_gda_pages[i] = 0; + } + + /* FW assumes proxy interface at fwcom queue 0 */ + for (i = 0; i < isys_fw_cfg->num_send_queues[type_proxy]; i++) { + input_queue_cfg[i].token_size = + sizeof(struct ipu6_fw_proxy_send_queue_token); + input_queue_cfg[i].queue_size = IPU6_ISYS_SIZE_PROXY_SEND_QUEUE; + } + + for (i = 0; i < isys_fw_cfg->num_send_queues[type_dev]; i++) { + input_queue_cfg[base_dev_send + i].token_size = + sizeof(struct ipu6_fw_send_queue_token); + input_queue_cfg[base_dev_send + i].queue_size = max_devq_size; + } + + for (i = 0; i < isys_fw_cfg->num_send_queues[type_msg]; i++) { + input_queue_cfg[base_msg_send + i].token_size = + sizeof(struct ipu6_fw_send_queue_token); + input_queue_cfg[base_msg_send + i].queue_size = + IPU6_ISYS_SIZE_SEND_QUEUE; + } + + for (i = 0; i < isys_fw_cfg->num_recv_queues[type_proxy]; i++) { + output_queue_cfg[i].token_size = + sizeof(struct ipu6_fw_proxy_resp_queue_token); + output_queue_cfg[i].queue_size = + IPU6_ISYS_SIZE_PROXY_RECV_QUEUE; + } + /* There is no recv DEV queue */ + for (i = 0; i < isys_fw_cfg->num_recv_queues[type_msg]; i++) { + output_queue_cfg[base_msg_recv + i].token_size = + sizeof(struct ipu6_fw_resp_queue_token); + output_queue_cfg[base_msg_recv + i].queue_size = + IPU6_ISYS_SIZE_RECV_QUEUE; + } + + fwcom->dmem_addr = isys->pdata->ipdata->hw_variant.dmem_offset; + fwcom->specific_addr = isys_fw_cfg; + fwcom->specific_size = sizeof(*isys_fw_cfg); + + return 0; +} + +int ipu6_fw_isys_init(struct ipu6_isys *isys, unsigned int num_streams) +{ + struct device *dev = &isys->adev->auxdev.dev; + int retry = IPU6_ISYS_OPEN_RETRY; + struct ipu6_fw_com_cfg fwcom = { + .cell_start = start_sp, + .cell_ready = query_sp, + .buttress_boot_offset = SYSCOM_BUTTRESS_FW_PARAMS_ISYS_OFFSET, + }; + int ret; + + ipu6_isys_fwcom_cfg_init(isys, &fwcom, num_streams); + + isys->fwcom = ipu6_fw_com_prepare(&fwcom, isys->adev, + isys->pdata->base); + if (!isys->fwcom) { + dev_err(dev, "isys fw com prepare failed\n"); + return -EIO; + } + + ret = ipu6_fw_com_open(isys->fwcom); + if (ret) { + dev_err(dev, "isys fw com open failed %d\n", ret); + return ret; + } + + do { + usleep_range(400, 500); + if (ipu6_fw_com_ready(isys->fwcom)) + break; + retry--; + } while (retry > 0); + + if (!retry) { + dev_err(dev, "isys port open ready failed %d\n", ret); + ipu6_fw_isys_close(isys); + ret = -EIO; + } + + return ret; +} + +struct ipu6_fw_isys_resp_info_abi * +ipu6_fw_isys_get_resp(void *context, unsigned int queue) +{ + return ipu6_recv_get_token(context, queue); +} + +void ipu6_fw_isys_put_resp(void *context, unsigned int queue) +{ + ipu6_recv_put_token(context, queue); +} + +void ipu6_fw_isys_dump_stream_cfg(struct device *dev, + struct ipu6_fw_isys_stream_cfg_data_abi *cfg) +{ + unsigned int i; + + dev_dbg(dev, "-----------------------------------------------------\n"); + dev_dbg(dev, "IPU6_FW_ISYS_STREAM_CFG_DATA\n"); + + dev_dbg(dev, "compfmt = %d\n", cfg->vc); + dev_dbg(dev, "src = %d\n", cfg->src); + dev_dbg(dev, "vc = %d\n", cfg->vc); + dev_dbg(dev, "isl_use = %d\n", cfg->isl_use); + dev_dbg(dev, "sensor_type = %d\n", cfg->sensor_type); + + dev_dbg(dev, "send_irq_sof_discarded = %d\n", + cfg->send_irq_sof_discarded); + dev_dbg(dev, "send_irq_eof_discarded = %d\n", + cfg->send_irq_eof_discarded); + dev_dbg(dev, "send_resp_sof_discarded = %d\n", + cfg->send_resp_sof_discarded); + dev_dbg(dev, "send_resp_eof_discarded = %d\n", + cfg->send_resp_eof_discarded); + + dev_dbg(dev, "crop:\n"); + dev_dbg(dev, "\t.left_top = [%d, %d]\n", cfg->crop.left_offset, + cfg->crop.top_offset); + dev_dbg(dev, "\t.right_bottom = [%d, %d]\n", cfg->crop.right_offset, + cfg->crop.bottom_offset); + + dev_dbg(dev, "nof_input_pins = %d\n", cfg->nof_input_pins); + for (i = 0; i < cfg->nof_input_pins; i++) { + dev_dbg(dev, "input pin[%d]:\n", i); + dev_dbg(dev, "\t.dt = 0x%0x\n", cfg->input_pins[i].dt); + dev_dbg(dev, "\t.mipi_store_mode = %d\n", + cfg->input_pins[i].mipi_store_mode); + dev_dbg(dev, "\t.bits_per_pix = %d\n", + cfg->input_pins[i].bits_per_pix); + dev_dbg(dev, "\t.mapped_dt = 0x%0x\n", + cfg->input_pins[i].mapped_dt); + dev_dbg(dev, "\t.input_res = %dx%d\n", + cfg->input_pins[i].input_res.width, + cfg->input_pins[i].input_res.height); + dev_dbg(dev, "\t.mipi_decompression = %d\n", + cfg->input_pins[i].mipi_decompression); + dev_dbg(dev, "\t.capture_mode = %d\n", + cfg->input_pins[i].capture_mode); + } + + dev_dbg(dev, "nof_output_pins = %d\n", cfg->nof_output_pins); + for (i = 0; i < cfg->nof_output_pins; i++) { + dev_dbg(dev, "output_pin[%d]:\n", i); + dev_dbg(dev, "\t.input_pin_id = %d\n", + cfg->output_pins[i].input_pin_id); + dev_dbg(dev, "\t.output_res = %dx%d\n", + cfg->output_pins[i].output_res.width, + cfg->output_pins[i].output_res.height); + dev_dbg(dev, "\t.stride = %d\n", cfg->output_pins[i].stride); + dev_dbg(dev, "\t.pt = %d\n", cfg->output_pins[i].pt); + dev_dbg(dev, "\t.payload_buf_size = %d\n", + cfg->output_pins[i].payload_buf_size); + dev_dbg(dev, "\t.ft = %d\n", cfg->output_pins[i].ft); + dev_dbg(dev, "\t.watermark_in_lines = %d\n", + cfg->output_pins[i].watermark_in_lines); + dev_dbg(dev, "\t.send_irq = %d\n", + cfg->output_pins[i].send_irq); + dev_dbg(dev, "\t.reserve_compression = %d\n", + cfg->output_pins[i].reserve_compression); + dev_dbg(dev, "\t.snoopable = %d\n", + cfg->output_pins[i].snoopable); + dev_dbg(dev, "\t.error_handling_enable = %d\n", + cfg->output_pins[i].error_handling_enable); + dev_dbg(dev, "\t.sensor_type = %d\n", + cfg->output_pins[i].sensor_type); + } + dev_dbg(dev, "-----------------------------------------------------\n"); +} + +void +ipu6_fw_isys_dump_frame_buff_set(struct device *dev, + struct ipu6_fw_isys_frame_buff_set_abi *buf, + unsigned int outputs) +{ + unsigned int i; + + dev_dbg(dev, "-----------------------------------------------------\n"); + dev_dbg(dev, "IPU6_FW_ISYS_FRAME_BUFF_SET\n"); + + for (i = 0; i < outputs; i++) { + dev_dbg(dev, "output_pin[%d]:\n", i); + dev_dbg(dev, "\t.out_buf_id = %llu\n", + buf->output_pins[i].out_buf_id); + dev_dbg(dev, "\t.addr = 0x%x\n", buf->output_pins[i].addr); + dev_dbg(dev, "\t.compress = %d\n", + buf->output_pins[i].compress); + } + + dev_dbg(dev, "send_irq_sof = 0x%x\n", buf->send_irq_sof); + dev_dbg(dev, "send_irq_eof = 0x%x\n", buf->send_irq_eof); + dev_dbg(dev, "send_resp_sof = 0x%x\n", buf->send_resp_sof); + dev_dbg(dev, "send_resp_eof = 0x%x\n", buf->send_resp_eof); + dev_dbg(dev, "send_irq_capture_ack = 0x%x\n", + buf->send_irq_capture_ack); + dev_dbg(dev, "send_irq_capture_done = 0x%x\n", + buf->send_irq_capture_done); + dev_dbg(dev, "send_resp_capture_ack = 0x%x\n", + buf->send_resp_capture_ack); + dev_dbg(dev, "send_resp_capture_done = 0x%x\n", + buf->send_resp_capture_done); + + dev_dbg(dev, "-----------------------------------------------------\n"); +} diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h new file mode 100644 index 0000000..b60f020 --- /dev/null +++ b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h @@ -0,0 +1,596 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2013--2024 Intel Corporation */ + +#ifndef IPU6_FW_ISYS_H +#define IPU6_FW_ISYS_H + +#include + +struct device; +struct ipu6_isys; + +/* Max number of Input/Output Pins */ +#define IPU6_MAX_IPINS 4 + +#define IPU6_MAX_OPINS ((IPU6_MAX_IPINS) + 1) + +#define IPU6_STREAM_ID_MAX 16 +#define IPU6_NONSECURE_STREAM_ID_MAX 12 +#define IPU6_DEV_SEND_QUEUE_SIZE (IPU6_STREAM_ID_MAX) +#define IPU6_NOF_SRAM_BLOCKS_MAX (IPU6_STREAM_ID_MAX) +#define IPU6_N_MAX_MSG_SEND_QUEUES (IPU6_STREAM_ID_MAX) +#define IPU6SE_STREAM_ID_MAX 8 +#define IPU6SE_NONSECURE_STREAM_ID_MAX 4 +#define IPU6SE_DEV_SEND_QUEUE_SIZE (IPU6SE_STREAM_ID_MAX) +#define IPU6SE_NOF_SRAM_BLOCKS_MAX (IPU6SE_STREAM_ID_MAX) +#define IPU6SE_N_MAX_MSG_SEND_QUEUES (IPU6SE_STREAM_ID_MAX) + +/* Single return queue for all streams/commands type */ +#define IPU6_N_MAX_MSG_RECV_QUEUES 1 +/* Single device queue for high priority commands (bypass in-order queue) */ +#define IPU6_N_MAX_DEV_SEND_QUEUES 1 +/* Single dedicated send queue for proxy interface */ +#define IPU6_N_MAX_PROXY_SEND_QUEUES 1 +/* Single dedicated recv queue for proxy interface */ +#define IPU6_N_MAX_PROXY_RECV_QUEUES 1 +/* Send queues layout */ +#define IPU6_BASE_PROXY_SEND_QUEUES 0 +#define IPU6_BASE_DEV_SEND_QUEUES \ + (IPU6_BASE_PROXY_SEND_QUEUES + IPU6_N_MAX_PROXY_SEND_QUEUES) +#define IPU6_BASE_MSG_SEND_QUEUES \ + (IPU6_BASE_DEV_SEND_QUEUES + IPU6_N_MAX_DEV_SEND_QUEUES) +/* Recv queues layout */ +#define IPU6_BASE_PROXY_RECV_QUEUES 0 +#define IPU6_BASE_MSG_RECV_QUEUES \ + (IPU6_BASE_PROXY_RECV_QUEUES + IPU6_N_MAX_PROXY_RECV_QUEUES) +#define IPU6_N_MAX_RECV_QUEUES \ + (IPU6_BASE_MSG_RECV_QUEUES + IPU6_N_MAX_MSG_RECV_QUEUES) + +#define IPU6_N_MAX_SEND_QUEUES \ + (IPU6_BASE_MSG_SEND_QUEUES + IPU6_N_MAX_MSG_SEND_QUEUES) +#define IPU6SE_N_MAX_SEND_QUEUES \ + (IPU6_BASE_MSG_SEND_QUEUES + IPU6SE_N_MAX_MSG_SEND_QUEUES) + +/* Max number of planes for frame formats supported by the FW */ +#define IPU6_PIN_PLANES_MAX 4 + +#define IPU6_FW_ISYS_SENSOR_TYPE_START 14 +#define IPU6_FW_ISYS_SENSOR_TYPE_END 19 +#define IPU6SE_FW_ISYS_SENSOR_TYPE_START 6 +#define IPU6SE_FW_ISYS_SENSOR_TYPE_END 11 +/* + * Device close takes some time from last ack message to actual stopping + * of the SP processor. As long as the SP processor runs we can't proceed with + * clean up of resources. + */ +#define IPU6_ISYS_OPEN_RETRY 2000 +#define IPU6_ISYS_CLOSE_RETRY 2000 +#define IPU6_FW_CALL_TIMEOUT_JIFFIES msecs_to_jiffies(2000) + +enum ipu6_fw_isys_resp_type { + IPU6_FW_ISYS_RESP_TYPE_STREAM_OPEN_DONE = 0, + IPU6_FW_ISYS_RESP_TYPE_STREAM_START_ACK, + IPU6_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK, + IPU6_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK, + IPU6_FW_ISYS_RESP_TYPE_STREAM_STOP_ACK, + IPU6_FW_ISYS_RESP_TYPE_STREAM_FLUSH_ACK, + IPU6_FW_ISYS_RESP_TYPE_STREAM_CLOSE_ACK, + IPU6_FW_ISYS_RESP_TYPE_PIN_DATA_READY, + IPU6_FW_ISYS_RESP_TYPE_PIN_DATA_WATERMARK, + IPU6_FW_ISYS_RESP_TYPE_FRAME_SOF, + IPU6_FW_ISYS_RESP_TYPE_FRAME_EOF, + IPU6_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE, + IPU6_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_DONE, + IPU6_FW_ISYS_RESP_TYPE_PIN_DATA_SKIPPED, + IPU6_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_SKIPPED, + IPU6_FW_ISYS_RESP_TYPE_FRAME_SOF_DISCARDED, + IPU6_FW_ISYS_RESP_TYPE_FRAME_EOF_DISCARDED, + IPU6_FW_ISYS_RESP_TYPE_STATS_DATA_READY, + N_IPU6_FW_ISYS_RESP_TYPE +}; + +enum ipu6_fw_isys_send_type { + IPU6_FW_ISYS_SEND_TYPE_STREAM_OPEN = 0, + IPU6_FW_ISYS_SEND_TYPE_STREAM_START, + IPU6_FW_ISYS_SEND_TYPE_STREAM_START_AND_CAPTURE, + IPU6_FW_ISYS_SEND_TYPE_STREAM_CAPTURE, + IPU6_FW_ISYS_SEND_TYPE_STREAM_STOP, + IPU6_FW_ISYS_SEND_TYPE_STREAM_FLUSH, + IPU6_FW_ISYS_SEND_TYPE_STREAM_CLOSE, + N_IPU6_FW_ISYS_SEND_TYPE +}; + +enum ipu6_fw_isys_queue_type { + IPU6_FW_ISYS_QUEUE_TYPE_PROXY = 0, + IPU6_FW_ISYS_QUEUE_TYPE_DEV, + IPU6_FW_ISYS_QUEUE_TYPE_MSG, + N_IPU6_FW_ISYS_QUEUE_TYPE +}; + +enum ipu6_fw_isys_stream_source { + IPU6_FW_ISYS_STREAM_SRC_PORT_0 = 0, + IPU6_FW_ISYS_STREAM_SRC_PORT_1, + IPU6_FW_ISYS_STREAM_SRC_PORT_2, + IPU6_FW_ISYS_STREAM_SRC_PORT_3, + IPU6_FW_ISYS_STREAM_SRC_PORT_4, + IPU6_FW_ISYS_STREAM_SRC_PORT_5, + IPU6_FW_ISYS_STREAM_SRC_PORT_6, + IPU6_FW_ISYS_STREAM_SRC_PORT_7, + IPU6_FW_ISYS_STREAM_SRC_PORT_8, + IPU6_FW_ISYS_STREAM_SRC_PORT_9, + IPU6_FW_ISYS_STREAM_SRC_PORT_10, + IPU6_FW_ISYS_STREAM_SRC_PORT_11, + IPU6_FW_ISYS_STREAM_SRC_PORT_12, + IPU6_FW_ISYS_STREAM_SRC_PORT_13, + IPU6_FW_ISYS_STREAM_SRC_PORT_14, + IPU6_FW_ISYS_STREAM_SRC_PORT_15, + IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_0, + IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_1, + IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_2, + IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_3, + IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_4, + IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_5, + IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_6, + IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_7, + IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_8, + IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_9, + N_IPU6_FW_ISYS_STREAM_SRC +}; + +#define IPU6_FW_ISYS_STREAM_SRC_CSI2_PORT0 IPU6_FW_ISYS_STREAM_SRC_PORT_0 +#define IPU6_FW_ISYS_STREAM_SRC_CSI2_PORT1 IPU6_FW_ISYS_STREAM_SRC_PORT_1 +#define IPU6_FW_ISYS_STREAM_SRC_CSI2_PORT2 IPU6_FW_ISYS_STREAM_SRC_PORT_2 +#define IPU6_FW_ISYS_STREAM_SRC_CSI2_PORT3 IPU6_FW_ISYS_STREAM_SRC_PORT_3 + +#define IPU6_FW_ISYS_STREAM_SRC_CSI2_3PH_PORTA IPU6_FW_ISYS_STREAM_SRC_PORT_4 +#define IPU6_FW_ISYS_STREAM_SRC_CSI2_3PH_PORTB IPU6_FW_ISYS_STREAM_SRC_PORT_5 +#define IPU6_FW_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT0 \ + IPU6_FW_ISYS_STREAM_SRC_PORT_6 +#define IPU6_FW_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT1 \ + IPU6_FW_ISYS_STREAM_SRC_PORT_7 +#define IPU6_FW_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT2 \ + IPU6_FW_ISYS_STREAM_SRC_PORT_8 +#define IPU6_FW_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT3 \ + IPU6_FW_ISYS_STREAM_SRC_PORT_9 + +#define IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_PORT0 IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_0 +#define IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_PORT1 IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_1 + +/* + * enum ipu6_fw_isys_mipi_vc: MIPI csi2 spec + * supports up to 4 virtual per physical channel + */ +enum ipu6_fw_isys_mipi_vc { + IPU6_FW_ISYS_MIPI_VC_0 = 0, + IPU6_FW_ISYS_MIPI_VC_1, + IPU6_FW_ISYS_MIPI_VC_2, + IPU6_FW_ISYS_MIPI_VC_3, + N_IPU6_FW_ISYS_MIPI_VC +}; + +enum ipu6_fw_isys_frame_format_type { + IPU6_FW_ISYS_FRAME_FORMAT_NV11 = 0, /* 12 bit YUV 411, Y, UV plane */ + IPU6_FW_ISYS_FRAME_FORMAT_NV12, /* 12 bit YUV 420, Y, UV plane */ + IPU6_FW_ISYS_FRAME_FORMAT_NV12_16, /* 16 bit YUV 420, Y, UV plane */ + /* 12 bit YUV 420, Intel proprietary tiled format */ + IPU6_FW_ISYS_FRAME_FORMAT_NV12_TILEY, + + IPU6_FW_ISYS_FRAME_FORMAT_NV16, /* 16 bit YUV 422, Y, UV plane */ + IPU6_FW_ISYS_FRAME_FORMAT_NV21, /* 12 bit YUV 420, Y, VU plane */ + IPU6_FW_ISYS_FRAME_FORMAT_NV61, /* 16 bit YUV 422, Y, VU plane */ + IPU6_FW_ISYS_FRAME_FORMAT_YV12, /* 12 bit YUV 420, Y, V, U plane */ + IPU6_FW_ISYS_FRAME_FORMAT_YV16, /* 16 bit YUV 422, Y, V, U plane */ + IPU6_FW_ISYS_FRAME_FORMAT_YUV420, /* 12 bit YUV 420, Y, U, V plane */ + IPU6_FW_ISYS_FRAME_FORMAT_YUV420_10, /* yuv420, 10 bits per subpixel */ + IPU6_FW_ISYS_FRAME_FORMAT_YUV420_12, /* yuv420, 12 bits per subpixel */ + IPU6_FW_ISYS_FRAME_FORMAT_YUV420_14, /* yuv420, 14 bits per subpixel */ + IPU6_FW_ISYS_FRAME_FORMAT_YUV420_16, /* yuv420, 16 bits per subpixel */ + IPU6_FW_ISYS_FRAME_FORMAT_YUV422, /* 16 bit YUV 422, Y, U, V plane */ + IPU6_FW_ISYS_FRAME_FORMAT_YUV422_16, /* yuv422, 16 bits per subpixel */ + IPU6_FW_ISYS_FRAME_FORMAT_UYVY, /* 16 bit YUV 422, UYVY interleaved */ + IPU6_FW_ISYS_FRAME_FORMAT_YUYV, /* 16 bit YUV 422, YUYV interleaved */ + IPU6_FW_ISYS_FRAME_FORMAT_YUV444, /* 24 bit YUV 444, Y, U, V plane */ + /* Internal format, 2 y lines followed by a uvinterleaved line */ + IPU6_FW_ISYS_FRAME_FORMAT_YUV_LINE, + IPU6_FW_ISYS_FRAME_FORMAT_RAW8, /* RAW8, 1 plane */ + IPU6_FW_ISYS_FRAME_FORMAT_RAW10, /* RAW10, 1 plane */ + IPU6_FW_ISYS_FRAME_FORMAT_RAW12, /* RAW12, 1 plane */ + IPU6_FW_ISYS_FRAME_FORMAT_RAW14, /* RAW14, 1 plane */ + IPU6_FW_ISYS_FRAME_FORMAT_RAW16, /* RAW16, 1 plane */ + /** + * 16 bit RGB, 1 plane. Each 3 sub pixels are packed into one 16 bit + * value, 5 bits for R, 6 bits for G and 5 bits for B. + */ + IPU6_FW_ISYS_FRAME_FORMAT_RGB565, + IPU6_FW_ISYS_FRAME_FORMAT_PLANAR_RGB888, /* 24 bit RGB, 3 planes */ + IPU6_FW_ISYS_FRAME_FORMAT_RGBA888, /* 32 bit RGBA, 1 plane, A=Alpha */ + IPU6_FW_ISYS_FRAME_FORMAT_QPLANE6, /* Internal, for advanced ISP */ + IPU6_FW_ISYS_FRAME_FORMAT_BINARY_8, /* byte stream, used for jpeg. */ + N_IPU6_FW_ISYS_FRAME_FORMAT +}; + +enum ipu6_fw_isys_pin_type { + /* captured as MIPI packets */ + IPU6_FW_ISYS_PIN_TYPE_MIPI = 0, + /* captured through the SoC path */ + IPU6_FW_ISYS_PIN_TYPE_RAW_SOC = 3, +}; + +/* + * enum ipu6_fw_isys_mipi_store_mode. Describes if long MIPI packets reach + * MIPI SRAM with the long packet header or + * if not, then only option is to capture it with pin type MIPI. + */ +enum ipu6_fw_isys_mipi_store_mode { + IPU6_FW_ISYS_MIPI_STORE_MODE_NORMAL = 0, + IPU6_FW_ISYS_MIPI_STORE_MODE_DISCARD_LONG_HEADER, + N_IPU6_FW_ISYS_MIPI_STORE_MODE +}; + +enum ipu6_fw_isys_capture_mode { + IPU6_FW_ISYS_CAPTURE_MODE_REGULAR = 0, + IPU6_FW_ISYS_CAPTURE_MODE_BURST, + N_IPU6_FW_ISYS_CAPTURE_MODE, +}; + +enum ipu6_fw_isys_sensor_mode { + IPU6_FW_ISYS_SENSOR_MODE_NORMAL = 0, + IPU6_FW_ISYS_SENSOR_MODE_TOBII, + N_IPU6_FW_ISYS_SENSOR_MODE, +}; + +enum ipu6_fw_isys_error { + IPU6_FW_ISYS_ERROR_NONE = 0, + IPU6_FW_ISYS_ERROR_FW_INTERNAL_CONSISTENCY, + IPU6_FW_ISYS_ERROR_HW_CONSISTENCY, + IPU6_FW_ISYS_ERROR_DRIVER_INVALID_COMMAND_SEQUENCE, + IPU6_FW_ISYS_ERROR_DRIVER_INVALID_DEVICE_CONFIGURATION, + IPU6_FW_ISYS_ERROR_DRIVER_INVALID_STREAM_CONFIGURATION, + IPU6_FW_ISYS_ERROR_DRIVER_INVALID_FRAME_CONFIGURATION, + IPU6_FW_ISYS_ERROR_INSUFFICIENT_RESOURCES, + IPU6_FW_ISYS_ERROR_HW_REPORTED_STR2MMIO, + IPU6_FW_ISYS_ERROR_HW_REPORTED_SIG2CIO, + IPU6_FW_ISYS_ERROR_SENSOR_FW_SYNC, + IPU6_FW_ISYS_ERROR_STREAM_IN_SUSPENSION, + IPU6_FW_ISYS_ERROR_RESPONSE_QUEUE_FULL, + N_IPU6_FW_ISYS_ERROR +}; + +enum ipu6_fw_proxy_error { + IPU6_FW_PROXY_ERROR_NONE = 0, + IPU6_FW_PROXY_ERROR_INVALID_WRITE_REGION, + IPU6_FW_PROXY_ERROR_INVALID_WRITE_OFFSET, + N_IPU6_FW_PROXY_ERROR +}; + +/* firmware ABI structure below are aligned in firmware, no need pack */ +struct ipu6_fw_isys_buffer_partition_abi { + u32 num_gda_pages[IPU6_STREAM_ID_MAX]; +}; + +struct ipu6_fw_isys_fw_config { + struct ipu6_fw_isys_buffer_partition_abi buffer_partition; + u32 num_send_queues[N_IPU6_FW_ISYS_QUEUE_TYPE]; + u32 num_recv_queues[N_IPU6_FW_ISYS_QUEUE_TYPE]; +}; + +/* + * struct ipu6_fw_isys_resolution_abi: Generic resolution structure. + */ +struct ipu6_fw_isys_resolution_abi { + u32 width; + u32 height; +}; + +/** + * struct ipu6_fw_isys_output_pin_payload_abi - ISYS output pin buffer + * @out_buf_id: Points to output pin buffer - buffer identifier + * @addr: Points to output pin buffer - CSS Virtual Address + * @compress: Request frame compression (1), or not (0) + */ +struct ipu6_fw_isys_output_pin_payload_abi { + u64 out_buf_id; + u32 addr; + u32 compress; +}; + +/** + * struct ipu6_fw_isys_output_pin_info_abi - ISYS output pin info + * @output_res: output pin resolution + * @stride: output stride in Bytes (not valid for statistics) + * @watermark_in_lines: pin watermark level in lines + * @payload_buf_size: minimum size in Bytes of all buffers that will be + * supplied for capture on this pin + * @ts_offsets: ts_offsets + * @s2m_pixel_soc_pixel_remapping: pixel soc remapping (see the definition of + * S2M_PIXEL_SOC_PIXEL_REMAPPING_FLAG_NO_REMAPPING) + * @csi_be_soc_pixel_remapping: see s2m_pixel_soc_pixel_remapping + * @send_irq: assert if pin event should trigger irq + * @input_pin_id: related input pin id + * @pt: pin type -real format "enum ipu6_fw_isys_pin_type" + * @ft: frame format type -real format "enum ipu6_fw_isys_frame_format_type" + * @reserved: a reserved field + * @reserve_compression: reserve compression resources for pin + * @snoopable: snoopable + * @error_handling_enable: enable error handling + * @sensor_type: sensor_type + */ +struct ipu6_fw_isys_output_pin_info_abi { + struct ipu6_fw_isys_resolution_abi output_res; + u32 stride; + u32 watermark_in_lines; + u32 payload_buf_size; + u32 ts_offsets[IPU6_PIN_PLANES_MAX]; + u32 s2m_pixel_soc_pixel_remapping; + u32 csi_be_soc_pixel_remapping; + u8 send_irq; + u8 input_pin_id; + u8 pt; + u8 ft; + u8 reserved; + u8 reserve_compression; + u8 snoopable; + u8 error_handling_enable; + u32 sensor_type; +}; + +/** + * struct ipu6_fw_isys_input_pin_info_abi - ISYS input pin info + * @input_res: input resolution + * @dt: mipi data type ((enum ipu6_fw_isys_mipi_data_type) + * @mipi_store_mode: defines if legacy long packet header will be stored or + * discarded if discarded, output pin type for this + * input pin can only be MIPI + * (enum ipu6_fw_isys_mipi_store_mode) + * @bits_per_pix: native bits per pixel + * @mapped_dt: actual data type from sensor + * @mipi_decompression: defines which compression will be in mipi backend + * @crop_first_and_last_lines: Control whether to crop the first and last line + * of the input image. Crop done by HW device. + * @capture_mode: mode of capture, regular or burst, default value is regular + * @reserved: a reserved field + */ +struct ipu6_fw_isys_input_pin_info_abi { + struct ipu6_fw_isys_resolution_abi input_res; + u8 dt; + u8 mipi_store_mode; + u8 bits_per_pix; + u8 mapped_dt; + u8 mipi_decompression; + u8 crop_first_and_last_lines; + u8 capture_mode; + u8 reserved; +}; + +/** + * struct ipu6_fw_isys_cropping_abi - ISYS cropping coordinates + * @top_offset: Top offset + * @left_offset: Left offset + * @bottom_offset: Bottom offset + * @right_offset: Right offset + */ +struct ipu6_fw_isys_cropping_abi { + s32 top_offset; + s32 left_offset; + s32 bottom_offset; + s32 right_offset; +}; + +/** + * struct ipu6_fw_isys_stream_cfg_data_abi - ISYS stream configuration data + * ISYS stream configuration data structure + * @crop: for extended use and is not used in FW currently + * @input_pins: input pin descriptors + * @output_pins: output pin descriptors + * @compfmt: de-compression setting for User Defined Data + * @nof_input_pins: number of input pins + * @nof_output_pins: number of output pins + * @send_irq_sof_discarded: send irq on discarded frame sof response + * - if '1' it will override the send_resp_sof_discarded + * and send the response + * - if '0' the send_resp_sof_discarded will determine + * whether to send the response + * @send_irq_eof_discarded: send irq on discarded frame eof response + * - if '1' it will override the send_resp_eof_discarded + * and send the response + * - if '0' the send_resp_eof_discarded will determine + * whether to send the response + * @send_resp_sof_discarded: send response for discarded frame sof detected, + * used only when send_irq_sof_discarded is '0' + * @send_resp_eof_discarded: send response for discarded frame eof detected, + * used only when send_irq_eof_discarded is '0' + * @src: Stream source index e.g. MIPI_generator_0, CSI2-rx_1 + * @vc: MIPI Virtual Channel (up to 4 virtual per physical channel) + * @isl_use: indicates whether stream requires ISL and how + * @sensor_type: type of connected sensor, tobii or others, default is 0 + * @reserved: a reserved field + * @reserved2: a reserved field + */ +struct ipu6_fw_isys_stream_cfg_data_abi { + struct ipu6_fw_isys_cropping_abi crop; + struct ipu6_fw_isys_input_pin_info_abi input_pins[IPU6_MAX_IPINS]; + struct ipu6_fw_isys_output_pin_info_abi output_pins[IPU6_MAX_OPINS]; + u32 compfmt; + u8 nof_input_pins; + u8 nof_output_pins; + u8 send_irq_sof_discarded; + u8 send_irq_eof_discarded; + u8 send_resp_sof_discarded; + u8 send_resp_eof_discarded; + u8 src; + u8 vc; + u8 isl_use; + u8 sensor_type; + u8 reserved; + u8 reserved2; +}; + +/** + * struct ipu6_fw_isys_frame_buff_set_abi - ISYS frame buffer set (request) + * @output_pins: output pin addresses + * @send_irq_sof: send irq on frame sof response + * - if '1' it will override the send_resp_sof and + * send the response + * - if '0' the send_resp_sof will determine whether to + * send the response + * @send_irq_eof: send irq on frame eof response + * - if '1' it will override the send_resp_eof and + * send the response + * - if '0' the send_resp_eof will determine whether to + * send the response + * @send_irq_capture_ack: send irq on capture ack + * @send_irq_capture_done: send irq on capture done + * @send_resp_sof: send response for frame sof detected, + * used only when send_irq_sof is '0' + * @send_resp_eof: send response for frame eof detected, + * used only when send_irq_eof is '0' + * @send_resp_capture_ack: send response for capture ack event + * @send_resp_capture_done: send response for capture done event + * @reserved: a reserved field + */ +struct ipu6_fw_isys_frame_buff_set_abi { + struct ipu6_fw_isys_output_pin_payload_abi output_pins[IPU6_MAX_OPINS]; + u8 send_irq_sof; + u8 send_irq_eof; + u8 send_irq_capture_ack; + u8 send_irq_capture_done; + u8 send_resp_sof; + u8 send_resp_eof; + u8 send_resp_capture_ack; + u8 send_resp_capture_done; + u8 reserved[8]; +}; + +/** + * struct ipu6_fw_isys_error_info_abi - ISYS error information + * @error: error code if something went wrong + * @error_details: depending on error code, it may contain additional error info + */ +struct ipu6_fw_isys_error_info_abi { + u32 error; + u32 error_details; +}; + +/** + * struct ipu6_fw_isys_resp_info_abi - ISYS firmware response + * @buf_id: buffer ID + * @pin: this var is only valid for pin event related responses, + * contains pin addresses + * @error_info: error information from the FW + * @timestamp: Time information for event if available + * @stream_handle: stream id the response corresponds to + * @type: response type (enum ipu6_fw_isys_resp_type) + * @pin_id: pin id that the pin payload corresponds to + * @reserved: a reserved field + * @reserved2: a reserved field + */ +struct ipu6_fw_isys_resp_info_abi { + u64 buf_id; + struct ipu6_fw_isys_output_pin_payload_abi pin; + struct ipu6_fw_isys_error_info_abi error_info; + u32 timestamp[2]; + u8 stream_handle; + u8 type; + u8 pin_id; + u8 reserved; + u32 reserved2; +}; + +/** + * struct ipu6_fw_isys_proxy_error_info_abi - ISYS proxy error + * @error: error code if something went wrong + * @error_details: depending on error code, it may contain additional error info + */ +struct ipu6_fw_isys_proxy_error_info_abi { + u32 error; + u32 error_details; +}; + +struct ipu6_fw_isys_proxy_resp_info_abi { + u32 request_id; + struct ipu6_fw_isys_proxy_error_info_abi error_info; +}; + +/** + * struct ipu6_fw_proxy_write_queue_token - ISYS proxy write queue token + * @request_id: update id for the specific proxy write request + * @region_index: Region id for the proxy write request + * @offset: Offset of the write request according to the base address + * of the region + * @value: Value that is requested to be written with the proxy write request + */ +struct ipu6_fw_proxy_write_queue_token { + u32 request_id; + u32 region_index; + u32 offset; + u32 value; +}; + +/** + * struct ipu6_fw_resp_queue_token - ISYS response queue token + * @resp_info: response info + */ +struct ipu6_fw_resp_queue_token { + struct ipu6_fw_isys_resp_info_abi resp_info; +}; + +/** + * struct ipu6_fw_send_queue_token - ISYS send queue token + * @buf_handle: buffer handle + * @payload: payload + * @send_type: send_type + * @stream_id: stream_id + */ +struct ipu6_fw_send_queue_token { + u64 buf_handle; + u32 payload; + u16 send_type; + u16 stream_id; +}; + +/** + * struct ipu6_fw_proxy_resp_queue_token - ISYS proxy response queue token + * @proxy_resp_info: proxy response info + */ +struct ipu6_fw_proxy_resp_queue_token { + struct ipu6_fw_isys_proxy_resp_info_abi proxy_resp_info; +}; + +/** + * struct ipu6_fw_proxy_send_queue_token - SYS proxy send queue token + * @request_id: request_id + * @region_index: region_index + * @offset: offset + * @value: value + */ +struct ipu6_fw_proxy_send_queue_token { + u32 request_id; + u32 region_index; + u32 offset; + u32 value; +}; + +void +ipu6_fw_isys_dump_stream_cfg(struct device *dev, + struct ipu6_fw_isys_stream_cfg_data_abi *cfg); +void +ipu6_fw_isys_dump_frame_buff_set(struct device *dev, + struct ipu6_fw_isys_frame_buff_set_abi *buf, + unsigned int outputs); +int ipu6_fw_isys_init(struct ipu6_isys *isys, unsigned int num_streams); +int ipu6_fw_isys_close(struct ipu6_isys *isys); +int ipu6_fw_isys_simple_cmd(struct ipu6_isys *isys, + const unsigned int stream_handle, u16 send_type); +int ipu6_fw_isys_complex_cmd(struct ipu6_isys *isys, + const unsigned int stream_handle, + void *cpu_mapped_buf, dma_addr_t dma_mapped_buf, + size_t size, u16 send_type); +int ipu6_fw_isys_send_proxy_token(struct ipu6_isys *isys, + unsigned int req_id, + unsigned int index, + unsigned int offset, u32 value); +void ipu6_fw_isys_cleanup(struct ipu6_isys *isys); +struct ipu6_fw_isys_resp_info_abi * +ipu6_fw_isys_get_resp(void *context, unsigned int queue); +void ipu6_fw_isys_put_resp(void *context, unsigned int queue); +#endif diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c new file mode 100644 index 0000000..6030bd2 --- /dev/null +++ b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c @@ -0,0 +1,643 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2013--2024 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "ipu6-bus.h" +#include "ipu6-isys.h" +#include "ipu6-isys-csi2.h" +#include "ipu6-isys-subdev.h" +#include "ipu6-platform-isys-csi2-reg.h" + +static const u32 csi2_supported_codes[] = { + MEDIA_BUS_FMT_RGB565_1X16, + MEDIA_BUS_FMT_RGB888_1X24, + MEDIA_BUS_FMT_UYVY8_1X16, + MEDIA_BUS_FMT_YUYV8_1X16, + MEDIA_BUS_FMT_SBGGR10_1X10, + MEDIA_BUS_FMT_SGBRG10_1X10, + MEDIA_BUS_FMT_SGRBG10_1X10, + MEDIA_BUS_FMT_SRGGB10_1X10, + MEDIA_BUS_FMT_SBGGR12_1X12, + MEDIA_BUS_FMT_SGBRG12_1X12, + MEDIA_BUS_FMT_SGRBG12_1X12, + MEDIA_BUS_FMT_SRGGB12_1X12, + MEDIA_BUS_FMT_SBGGR8_1X8, + MEDIA_BUS_FMT_SGBRG8_1X8, + MEDIA_BUS_FMT_SGRBG8_1X8, + MEDIA_BUS_FMT_SRGGB8_1X8, + MEDIA_BUS_FMT_META_8, + MEDIA_BUS_FMT_META_10, + MEDIA_BUS_FMT_META_12, + MEDIA_BUS_FMT_META_16, + MEDIA_BUS_FMT_META_24, + 0 +}; + +/* + * Strings corresponding to CSI-2 receiver errors are here. + * Corresponding macros are defined in the header file. + */ +static const struct ipu6_csi2_error dphy_rx_errors[] = { + { "Single packet header error corrected", true }, + { "Multiple packet header errors detected", true }, + { "Payload checksum (CRC) error", true }, + { "Transfer FIFO overflow", false }, + { "Reserved short packet data type detected", true }, + { "Reserved long packet data type detected", true }, + { "Incomplete long packet detected", false }, + { "Frame sync error", false }, + { "Line sync error", false }, + { "DPHY recoverable synchronization error", true }, + { "DPHY fatal error", false }, + { "DPHY elastic FIFO overflow", false }, + { "Inter-frame short packet discarded", true }, + { "Inter-frame long packet discarded", true }, + { "MIPI pktgen overflow", false }, + { "MIPI pktgen data loss", false }, + { "FIFO overflow", false }, + { "Lane deskew", false }, + { "SOT sync error", false }, + { "HSIDLE detected", false } +}; + +s64 ipu6_isys_csi2_get_link_freq(struct ipu6_isys_csi2 *csi2) +{ + struct media_pad *src_pad; + + if (!csi2) + return -EINVAL; + + src_pad = media_entity_remote_source_pad_unique(&csi2->asd.sd.entity); + if (IS_ERR(src_pad)) { + dev_err(&csi2->isys->adev->auxdev.dev, + "can't get source pad of %s (%ld)\n", + csi2->asd.sd.name, PTR_ERR(src_pad)); + return PTR_ERR(src_pad); + } + + return v4l2_get_link_freq(src_pad, 0, 0); +} + +static int csi2_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh, + struct v4l2_event_subscription *sub) +{ + struct ipu6_isys_subdev *asd = to_ipu6_isys_subdev(sd); + struct ipu6_isys_csi2 *csi2 = to_ipu6_isys_csi2(asd); + struct device *dev = &csi2->isys->adev->auxdev.dev; + + dev_dbg(dev, "csi2 subscribe event(type %u id %u)\n", + sub->type, sub->id); + + switch (sub->type) { + case V4L2_EVENT_FRAME_SYNC: + return v4l2_event_subscribe(fh, sub, 10, NULL); + case V4L2_EVENT_CTRL: + return v4l2_ctrl_subscribe_event(fh, sub); + default: + return -EINVAL; + } +} + +static const struct v4l2_subdev_core_ops csi2_sd_core_ops = { + .subscribe_event = csi2_subscribe_event, + .unsubscribe_event = v4l2_event_subdev_unsubscribe, +}; + +/* + * The input system CSI2+ receiver has several + * parameters affecting the receiver timings. These depend + * on the MIPI bus frequency F in Hz (sensor transmitter rate) + * as follows: + * register value = (A/1e9 + B * UI) / COUNT_ACC + * where + * UI = 1 / (2 * F) in seconds + * COUNT_ACC = counter accuracy in seconds + * COUNT_ACC = 0.125 ns = 1 / 8 ns, ACCINV = 8. + * + * A and B are coefficients from the table below, + * depending whether the register minimum or maximum value is + * calculated. + * Minimum Maximum + * Clock lane A B A B + * reg_rx_csi_dly_cnt_termen_clane 0 0 38 0 + * reg_rx_csi_dly_cnt_settle_clane 95 -8 300 -16 + * Data lanes + * reg_rx_csi_dly_cnt_termen_dlane0 0 0 35 4 + * reg_rx_csi_dly_cnt_settle_dlane0 85 -2 145 -6 + * reg_rx_csi_dly_cnt_termen_dlane1 0 0 35 4 + * reg_rx_csi_dly_cnt_settle_dlane1 85 -2 145 -6 + * reg_rx_csi_dly_cnt_termen_dlane2 0 0 35 4 + * reg_rx_csi_dly_cnt_settle_dlane2 85 -2 145 -6 + * reg_rx_csi_dly_cnt_termen_dlane3 0 0 35 4 + * reg_rx_csi_dly_cnt_settle_dlane3 85 -2 145 -6 + * + * We use the minimum values of both A and B. + */ + +#define DIV_SHIFT 8 +#define CSI2_ACCINV 8 + +static u32 calc_timing(s32 a, s32 b, s64 link_freq, s32 accinv) +{ + return accinv * a + (accinv * b * (500000000 >> DIV_SHIFT) + / (s32)(link_freq >> DIV_SHIFT)); +} + +static int +ipu6_isys_csi2_calc_timing(struct ipu6_isys_csi2 *csi2, + struct ipu6_isys_csi2_timing *timing, s32 accinv) +{ + struct device *dev = &csi2->isys->adev->auxdev.dev; + s64 link_freq; + + link_freq = ipu6_isys_csi2_get_link_freq(csi2); + if (link_freq < 0) + return link_freq; + + timing->ctermen = calc_timing(CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_A, + CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_B, + link_freq, accinv); + timing->csettle = calc_timing(CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_A, + CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_B, + link_freq, accinv); + timing->dtermen = calc_timing(CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_A, + CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_B, + link_freq, accinv); + timing->dsettle = calc_timing(CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_A, + CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_B, + link_freq, accinv); + + dev_dbg(dev, "ctermen %u csettle %u dtermen %u dsettle %u\n", + timing->ctermen, timing->csettle, + timing->dtermen, timing->dsettle); + + return 0; +} + +void ipu6_isys_register_errors(struct ipu6_isys_csi2 *csi2) +{ + u32 irq = readl(csi2->base + CSI_PORT_REG_BASE_IRQ_CSI + + CSI_PORT_REG_BASE_IRQ_STATUS_OFFSET); + struct ipu6_isys *isys = csi2->isys; + u32 mask; + + mask = isys->pdata->ipdata->csi2.irq_mask; + writel(irq & mask, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI + + CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET); + csi2->receiver_errors |= irq & mask; +} + +void ipu6_isys_csi2_error(struct ipu6_isys_csi2 *csi2) +{ + struct device *dev = &csi2->isys->adev->auxdev.dev; + const struct ipu6_csi2_error *errors; + u32 status; + u32 i; + + /* register errors once more in case of interrupts are disabled */ + ipu6_isys_register_errors(csi2); + status = csi2->receiver_errors; + csi2->receiver_errors = 0; + errors = dphy_rx_errors; + + for (i = 0; i < CSI_RX_NUM_ERRORS_IN_IRQ; i++) { + if (status & BIT(i)) + dev_err_ratelimited(dev, "csi2-%i error: %s\n", + csi2->port, errors[i].error_string); + } +} + +static int ipu6_isys_csi2_set_stream(struct v4l2_subdev *sd, + const struct ipu6_isys_csi2_timing *timing, + unsigned int nlanes, int enable) +{ + struct ipu6_isys_subdev *asd = to_ipu6_isys_subdev(sd); + struct ipu6_isys_csi2 *csi2 = to_ipu6_isys_csi2(asd); + struct ipu6_isys *isys = csi2->isys; + struct device *dev = &isys->adev->auxdev.dev; + struct ipu6_isys_csi2_config cfg; + unsigned int nports; + int ret = 0; + u32 mask = 0; + u32 i; + + dev_dbg(dev, "stream %s CSI2-%u with %u lanes\n", enable ? "on" : "off", + csi2->port, nlanes); + + cfg.port = csi2->port; + cfg.nlanes = nlanes; + + mask = isys->pdata->ipdata->csi2.irq_mask; + nports = isys->pdata->ipdata->csi2.nports; + + if (!enable) { + writel(0, csi2->base + CSI_REG_CSI_FE_ENABLE); + writel(0, csi2->base + CSI_REG_PPI2CSI_ENABLE); + + writel(0, + csi2->base + CSI_PORT_REG_BASE_IRQ_CSI + + CSI_PORT_REG_BASE_IRQ_ENABLE_OFFSET); + writel(mask, + csi2->base + CSI_PORT_REG_BASE_IRQ_CSI + + CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET); + writel(0, + csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC + + CSI_PORT_REG_BASE_IRQ_ENABLE_OFFSET); + writel(0xffffffff, + csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC + + CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET); + + isys->phy_set_power(isys, &cfg, timing, false); + + writel(0, isys->pdata->base + CSI_REG_HUB_FW_ACCESS_PORT + (isys->pdata->ipdata->csi2.fw_access_port_ofs, + csi2->port)); + writel(0, isys->pdata->base + + CSI_REG_HUB_DRV_ACCESS_PORT(csi2->port)); + + return ret; + } + + /* reset port reset */ + writel(0x1, csi2->base + CSI_REG_PORT_GPREG_SRST); + usleep_range(100, 200); + writel(0x0, csi2->base + CSI_REG_PORT_GPREG_SRST); + + /* enable port clock */ + for (i = 0; i < nports; i++) { + writel(1, isys->pdata->base + CSI_REG_HUB_DRV_ACCESS_PORT(i)); + writel(1, isys->pdata->base + CSI_REG_HUB_FW_ACCESS_PORT + (isys->pdata->ipdata->csi2.fw_access_port_ofs, i)); + } + + /* enable all error related irq */ + writel(mask, + csi2->base + CSI_PORT_REG_BASE_IRQ_CSI + + CSI_PORT_REG_BASE_IRQ_STATUS_OFFSET); + writel(mask, + csi2->base + CSI_PORT_REG_BASE_IRQ_CSI + + CSI_PORT_REG_BASE_IRQ_MASK_OFFSET); + writel(mask, + csi2->base + CSI_PORT_REG_BASE_IRQ_CSI + + CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET); + writel(mask, + csi2->base + CSI_PORT_REG_BASE_IRQ_CSI + + CSI_PORT_REG_BASE_IRQ_LEVEL_NOT_PULSE_OFFSET); + writel(mask, + csi2->base + CSI_PORT_REG_BASE_IRQ_CSI + + CSI_PORT_REG_BASE_IRQ_ENABLE_OFFSET); + + /* + * Using event from firmware instead of irq to handle CSI2 sync event + * which can reduce system wakeups. If CSI2 sync irq enabled, we need + * disable the firmware CSI2 sync event to avoid duplicate handling. + */ + writel(0xffffffff, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC + + CSI_PORT_REG_BASE_IRQ_STATUS_OFFSET); + writel(0, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC + + CSI_PORT_REG_BASE_IRQ_MASK_OFFSET); + writel(0xffffffff, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC + + CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET); + writel(0, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC + + CSI_PORT_REG_BASE_IRQ_LEVEL_NOT_PULSE_OFFSET); + writel(0xffffffff, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC + + CSI_PORT_REG_BASE_IRQ_ENABLE_OFFSET); + + /* configure to enable FE and PPI2CSI */ + writel(0, csi2->base + CSI_REG_CSI_FE_MODE); + writel(CSI_SENSOR_INPUT, csi2->base + CSI_REG_CSI_FE_MUX_CTRL); + writel(CSI_CNTR_SENSOR_LINE_ID | CSI_CNTR_SENSOR_FRAME_ID, + csi2->base + CSI_REG_CSI_FE_SYNC_CNTR_SEL); + writel(FIELD_PREP(PPI_INTF_CONFIG_NOF_ENABLED_DLANES_MASK, nlanes - 1), + csi2->base + CSI_REG_PPI2CSI_CONFIG_PPI_INTF); + + writel(1, csi2->base + CSI_REG_PPI2CSI_ENABLE); + writel(1, csi2->base + CSI_REG_CSI_FE_ENABLE); + + ret = isys->phy_set_power(isys, &cfg, timing, true); + if (ret) + dev_err(dev, "csi-%d phy power up failed %d\n", csi2->port, + ret); + + return ret; +} + +static int ipu6_isys_csi2_enable_streams(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + u32 pad, u64 streams_mask) +{ + struct ipu6_isys_subdev *asd = to_ipu6_isys_subdev(sd); + struct ipu6_isys_csi2 *csi2 = to_ipu6_isys_csi2(asd); + struct ipu6_isys_csi2_timing timing = { }; + struct v4l2_subdev *remote_sd; + struct media_pad *remote_pad; + u64 sink_streams; + int ret; + + remote_pad = media_pad_remote_pad_first(&sd->entity.pads[CSI2_PAD_SINK]); + remote_sd = media_entity_to_v4l2_subdev(remote_pad->entity); + + sink_streams = + v4l2_subdev_state_xlate_streams(state, pad, CSI2_PAD_SINK, + &streams_mask); + + ret = ipu6_isys_csi2_calc_timing(csi2, &timing, CSI2_ACCINV); + if (ret) + return ret; + + ret = ipu6_isys_csi2_set_stream(sd, &timing, csi2->nlanes, true); + if (ret) + return ret; + + ret = v4l2_subdev_enable_streams(remote_sd, remote_pad->index, + sink_streams); + if (ret) { + ipu6_isys_csi2_set_stream(sd, NULL, 0, false); + return ret; + } + + return 0; +} + +static int ipu6_isys_csi2_disable_streams(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + u32 pad, u64 streams_mask) +{ + struct v4l2_subdev *remote_sd; + struct media_pad *remote_pad; + u64 sink_streams; + + sink_streams = + v4l2_subdev_state_xlate_streams(state, pad, CSI2_PAD_SINK, + &streams_mask); + + remote_pad = media_pad_remote_pad_first(&sd->entity.pads[CSI2_PAD_SINK]); + remote_sd = media_entity_to_v4l2_subdev(remote_pad->entity); + + ipu6_isys_csi2_set_stream(sd, NULL, 0, false); + + v4l2_subdev_disable_streams(remote_sd, remote_pad->index, sink_streams); + + return 0; +} + +static int ipu6_isys_csi2_set_sel(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_selection *sel) +{ + struct ipu6_isys_subdev *asd = to_ipu6_isys_subdev(sd); + struct device *dev = &asd->isys->adev->auxdev.dev; + struct v4l2_mbus_framefmt *sink_ffmt; + struct v4l2_mbus_framefmt *src_ffmt; + struct v4l2_rect *crop; + + if (sel->pad == CSI2_PAD_SINK || sel->target != V4L2_SEL_TGT_CROP) + return -EINVAL; + + sink_ffmt = v4l2_subdev_state_get_opposite_stream_format(state, + sel->pad, + sel->stream); + if (!sink_ffmt) + return -EINVAL; + + src_ffmt = v4l2_subdev_state_get_format(state, sel->pad, sel->stream); + if (!src_ffmt) + return -EINVAL; + + crop = v4l2_subdev_state_get_crop(state, sel->pad, sel->stream); + if (!crop) + return -EINVAL; + + /* Only vertical cropping is supported */ + sel->r.left = 0; + sel->r.width = sink_ffmt->width; + /* Non-bayer formats can't be single line cropped */ + if (!ipu6_isys_is_bayer_format(sink_ffmt->code)) + sel->r.top &= ~1; + sel->r.height = clamp(sel->r.height & ~1, IPU6_ISYS_MIN_HEIGHT, + sink_ffmt->height - sel->r.top); + *crop = sel->r; + + /* update source pad format */ + src_ffmt->width = sel->r.width; + src_ffmt->height = sel->r.height; + if (ipu6_isys_is_bayer_format(sink_ffmt->code)) + src_ffmt->code = ipu6_isys_convert_bayer_order(sink_ffmt->code, + sel->r.left, + sel->r.top); + dev_dbg(dev, "set crop for %s sel: %d,%d,%d,%d code: 0x%x\n", + sd->name, sel->r.left, sel->r.top, sel->r.width, sel->r.height, + src_ffmt->code); + + return 0; +} + +static int ipu6_isys_csi2_get_sel(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_selection *sel) +{ + struct v4l2_mbus_framefmt *sink_ffmt; + struct v4l2_rect *crop; + int ret = 0; + + if (sd->entity.pads[sel->pad].flags & MEDIA_PAD_FL_SINK) + return -EINVAL; + + sink_ffmt = v4l2_subdev_state_get_opposite_stream_format(state, + sel->pad, + sel->stream); + if (!sink_ffmt) + return -EINVAL; + + crop = v4l2_subdev_state_get_crop(state, sel->pad, sel->stream); + if (!crop) + return -EINVAL; + + switch (sel->target) { + case V4L2_SEL_TGT_CROP_DEFAULT: + case V4L2_SEL_TGT_CROP_BOUNDS: + sel->r.left = 0; + sel->r.top = 0; + sel->r.width = sink_ffmt->width; + sel->r.height = sink_ffmt->height; + break; + case V4L2_SEL_TGT_CROP: + sel->r = *crop; + break; + default: + ret = -EINVAL; + } + + return ret; +} + +static const struct v4l2_subdev_pad_ops csi2_sd_pad_ops = { + .get_fmt = v4l2_subdev_get_fmt, + .set_fmt = ipu6_isys_subdev_set_fmt, + .get_selection = ipu6_isys_csi2_get_sel, + .set_selection = ipu6_isys_csi2_set_sel, + .enum_mbus_code = ipu6_isys_subdev_enum_mbus_code, + .set_routing = ipu6_isys_subdev_set_routing, + .enable_streams = ipu6_isys_csi2_enable_streams, + .disable_streams = ipu6_isys_csi2_disable_streams, +}; + +static const struct v4l2_subdev_ops csi2_sd_ops = { + .core = &csi2_sd_core_ops, + .pad = &csi2_sd_pad_ops, +}; + +static const struct media_entity_operations csi2_entity_ops = { + .link_validate = v4l2_subdev_link_validate, + .has_pad_interdep = v4l2_subdev_has_pad_interdep, +}; + +void ipu6_isys_csi2_cleanup(struct ipu6_isys_csi2 *csi2) +{ + if (!csi2->isys) + return; + + v4l2_device_unregister_subdev(&csi2->asd.sd); + v4l2_subdev_cleanup(&csi2->asd.sd); + ipu6_isys_subdev_cleanup(&csi2->asd); + csi2->isys = NULL; +} + +int ipu6_isys_csi2_init(struct ipu6_isys_csi2 *csi2, + struct ipu6_isys *isys, + void __iomem *base, unsigned int index) +{ + struct device *dev = &isys->adev->auxdev.dev; + int ret; + + csi2->isys = isys; + csi2->base = base; + csi2->port = index; + + csi2->asd.sd.entity.ops = &csi2_entity_ops; + csi2->asd.isys = isys; + ret = ipu6_isys_subdev_init(&csi2->asd, &csi2_sd_ops, 0, + NR_OF_CSI2_SINK_PADS, NR_OF_CSI2_SRC_PADS); + if (ret) + goto fail; + + csi2->asd.source = IPU6_FW_ISYS_STREAM_SRC_CSI2_PORT0 + index; + csi2->asd.supported_codes = csi2_supported_codes; + snprintf(csi2->asd.sd.name, sizeof(csi2->asd.sd.name), + IPU6_ISYS_ENTITY_PREFIX " CSI2 %u", index); + v4l2_set_subdevdata(&csi2->asd.sd, &csi2->asd); + ret = v4l2_subdev_init_finalize(&csi2->asd.sd); + if (ret) { + dev_err(dev, "failed to init v4l2 subdev\n"); + goto fail; + } + + ret = v4l2_device_register_subdev(&isys->v4l2_dev, &csi2->asd.sd); + if (ret) { + dev_err(dev, "failed to register v4l2 subdev\n"); + goto fail; + } + + return 0; + +fail: + ipu6_isys_csi2_cleanup(csi2); + + return ret; +} + +void ipu6_isys_csi2_sof_event_by_stream(struct ipu6_isys_stream *stream) +{ + struct video_device *vdev = stream->asd->sd.devnode; + struct device *dev = &stream->isys->adev->auxdev.dev; + struct ipu6_isys_csi2 *csi2 = ipu6_isys_subdev_to_csi2(stream->asd); + struct v4l2_event ev = { + .type = V4L2_EVENT_FRAME_SYNC, + }; + + ev.u.frame_sync.frame_sequence = atomic_fetch_inc(&stream->sequence); + v4l2_event_queue(vdev, &ev); + + dev_dbg(dev, "sof_event::csi2-%i sequence: %i, vc: %d\n", + csi2->port, ev.u.frame_sync.frame_sequence, stream->vc); +} + +void ipu6_isys_csi2_eof_event_by_stream(struct ipu6_isys_stream *stream) +{ + struct device *dev = &stream->isys->adev->auxdev.dev; + struct ipu6_isys_csi2 *csi2 = ipu6_isys_subdev_to_csi2(stream->asd); + u32 frame_sequence = atomic_read(&stream->sequence); + + dev_dbg(dev, "eof_event::csi2-%i sequence: %i\n", + csi2->port, frame_sequence); +} + +int ipu6_isys_csi2_get_remote_desc(u32 source_stream, + struct ipu6_isys_csi2 *csi2, + struct media_entity *source_entity, + struct v4l2_mbus_frame_desc_entry *entry) +{ + struct v4l2_mbus_frame_desc_entry *desc_entry = NULL; + struct device *dev = &csi2->isys->adev->auxdev.dev; + struct v4l2_mbus_frame_desc desc; + struct v4l2_subdev *source; + struct media_pad *pad; + unsigned int i; + int ret; + + source = media_entity_to_v4l2_subdev(source_entity); + if (!source) + return -EPIPE; + + pad = media_pad_remote_pad_first(&csi2->asd.pad[CSI2_PAD_SINK]); + if (!pad) + return -EPIPE; + + ret = v4l2_subdev_call(source, pad, get_frame_desc, pad->index, &desc); + if (ret) + return ret; + + if (desc.type != V4L2_MBUS_FRAME_DESC_TYPE_CSI2) { + dev_err(dev, "Unsupported frame descriptor type\n"); + return -EINVAL; + } + + for (i = 0; i < desc.num_entries; i++) { + if (source_stream == desc.entry[i].stream) { + desc_entry = &desc.entry[i]; + break; + } + } + + if (!desc_entry) { + dev_err(dev, "Failed to find stream %u from remote subdev\n", + source_stream); + return -EINVAL; + } + + if (desc_entry->bus.csi2.vc >= NR_OF_CSI2_VC) { + dev_err(dev, "invalid vc %d\n", desc_entry->bus.csi2.vc); + return -EINVAL; + } + + *entry = *desc_entry; + + return 0; +} diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h new file mode 100644 index 0000000..ce8eed9 --- /dev/null +++ b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2013--2024 Intel Corporation */ + +#ifndef IPU6_ISYS_CSI2_H +#define IPU6_ISYS_CSI2_H + +#include + +#include "ipu6-isys-subdev.h" +#include "ipu6-isys-video.h" + +struct media_entity; +struct v4l2_mbus_frame_desc_entry; + +struct ipu6_isys_video; +struct ipu6_isys; +struct ipu6_isys_stream; + +#define NR_OF_CSI2_VC 16 +#define INVALID_VC_ID -1 +#define NR_OF_CSI2_SINK_PADS 1 +#define CSI2_PAD_SINK 0 +#define NR_OF_CSI2_SRC_PADS 8 +#define CSI2_PAD_SRC 1 +#define NR_OF_CSI2_PADS (NR_OF_CSI2_SINK_PADS + NR_OF_CSI2_SRC_PADS) + +#define CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_A 0 +#define CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_B 0 +#define CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_A 95 +#define CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_B -8 + +#define CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_A 0 +#define CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_B 0 +#define CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_A 85 +#define CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_B -2 + +struct ipu6_isys_csi2 { + struct ipu6_isys_subdev asd; + struct ipu6_isys *isys; + struct ipu6_isys_video av[NR_OF_CSI2_SRC_PADS]; + + void __iomem *base; + u32 receiver_errors; + unsigned int nlanes; + unsigned int port; +}; + +struct ipu6_isys_csi2_timing { + u32 ctermen; + u32 csettle; + u32 dtermen; + u32 dsettle; +}; + +struct ipu6_csi2_error { + const char *error_string; + bool is_info_only; +}; + +#define ipu6_isys_subdev_to_csi2(__sd) \ + container_of(__sd, struct ipu6_isys_csi2, asd) + +#define to_ipu6_isys_csi2(__asd) container_of(__asd, struct ipu6_isys_csi2, asd) + +s64 ipu6_isys_csi2_get_link_freq(struct ipu6_isys_csi2 *csi2); +int ipu6_isys_csi2_init(struct ipu6_isys_csi2 *csi2, struct ipu6_isys *isys, + void __iomem *base, unsigned int index); +void ipu6_isys_csi2_cleanup(struct ipu6_isys_csi2 *csi2); +void ipu6_isys_csi2_sof_event_by_stream(struct ipu6_isys_stream *stream); +void ipu6_isys_csi2_eof_event_by_stream(struct ipu6_isys_stream *stream); +void ipu6_isys_register_errors(struct ipu6_isys_csi2 *csi2); +void ipu6_isys_csi2_error(struct ipu6_isys_csi2 *csi2); +int ipu6_isys_csi2_get_remote_desc(u32 source_stream, + struct ipu6_isys_csi2 *csi2, + struct media_entity *source_entity, + struct v4l2_mbus_frame_desc_entry *entry); + +#endif /* IPU6_ISYS_CSI2_H */ diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-dwc-phy.c b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-dwc-phy.c new file mode 100644 index 0000000..db28748 --- /dev/null +++ b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-dwc-phy.c @@ -0,0 +1,536 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2013--2024 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include + +#include "ipu6-bus.h" +#include "ipu6-isys.h" +#include "ipu6-platform-isys-csi2-reg.h" + +#define IPU6_DWC_DPHY_BASE(i) (0x238038 + 0x34 * (i)) +#define IPU6_DWC_DPHY_RSTZ 0x00 +#define IPU6_DWC_DPHY_SHUTDOWNZ 0x04 +#define IPU6_DWC_DPHY_HSFREQRANGE 0x08 +#define IPU6_DWC_DPHY_CFGCLKFREQRANGE 0x0c +#define IPU6_DWC_DPHY_TEST_IFC_ACCESS_MODE 0x10 +#define IPU6_DWC_DPHY_TEST_IFC_REQ 0x14 +#define IPU6_DWC_DPHY_TEST_IFC_REQ_COMPLETION 0x18 +#define IPU6_DWC_DPHY_DFT_CTRL0 0x28 +#define IPU6_DWC_DPHY_DFT_CTRL1 0x2c +#define IPU6_DWC_DPHY_DFT_CTRL2 0x30 + +/* + * test IFC request definition: + * - req: 0 for read, 1 for write + * - 12 bits address + * - 8bits data (will ignore for read) + * --24----16------4-----0 + * --|-data-|-addr-|-req-| + */ +#define IFC_REQ(req, addr, data) (FIELD_PREP(GENMASK(23, 16), data) | \ + FIELD_PREP(GENMASK(15, 4), addr) | \ + FIELD_PREP(GENMASK(1, 0), req)) + +#define TEST_IFC_REQ_READ 0 +#define TEST_IFC_REQ_WRITE 1 +#define TEST_IFC_REQ_RESET 2 + +#define TEST_IFC_ACCESS_MODE_FSM 0 +#define TEST_IFC_ACCESS_MODE_IFC_CTL 1 + +enum phy_fsm_state { + PHY_FSM_STATE_POWERON = 0, + PHY_FSM_STATE_BGPON = 1, + PHY_FSM_STATE_CAL_TYPE = 2, + PHY_FSM_STATE_BURNIN_CAL = 3, + PHY_FSM_STATE_TERMCAL = 4, + PHY_FSM_STATE_OFFSETCAL = 5, + PHY_FSM_STATE_OFFSET_LANE = 6, + PHY_FSM_STATE_IDLE = 7, + PHY_FSM_STATE_ULP = 8, + PHY_FSM_STATE_DDLTUNNING = 9, + PHY_FSM_STATE_SKEW_BACKWARD = 10, + PHY_FSM_STATE_INVALID, +}; + +static void dwc_dphy_write(struct ipu6_isys *isys, u32 phy_id, u32 addr, + u32 data) +{ + struct device *dev = &isys->adev->auxdev.dev; + void __iomem *isys_base = isys->pdata->base; + void __iomem *base = isys_base + IPU6_DWC_DPHY_BASE(phy_id); + + dev_dbg(dev, "write: reg 0x%zx = data 0x%x", base + addr - isys_base, + data); + writel(data, base + addr); +} + +static u32 dwc_dphy_read(struct ipu6_isys *isys, u32 phy_id, u32 addr) +{ + struct device *dev = &isys->adev->auxdev.dev; + void __iomem *isys_base = isys->pdata->base; + void __iomem *base = isys_base + IPU6_DWC_DPHY_BASE(phy_id); + u32 data; + + data = readl(base + addr); + dev_dbg(dev, "read: reg 0x%zx = data 0x%x", base + addr - isys_base, + data); + + return data; +} + +static void dwc_dphy_write_mask(struct ipu6_isys *isys, u32 phy_id, u32 addr, + u32 data, u8 shift, u8 width) +{ + u32 temp; + u32 mask; + + mask = (1 << width) - 1; + temp = dwc_dphy_read(isys, phy_id, addr); + temp &= ~(mask << shift); + temp |= (data & mask) << shift; + dwc_dphy_write(isys, phy_id, addr, temp); +} + +static u32 __maybe_unused dwc_dphy_read_mask(struct ipu6_isys *isys, u32 phy_id, + u32 addr, u8 shift, u8 width) +{ + u32 val; + + val = dwc_dphy_read(isys, phy_id, addr) >> shift; + return val & ((1 << width) - 1); +} + +#define DWC_DPHY_TIMEOUT (5 * USEC_PER_SEC) +static int dwc_dphy_ifc_read(struct ipu6_isys *isys, u32 phy_id, u32 addr, + u32 *val) +{ + struct device *dev = &isys->adev->auxdev.dev; + void __iomem *isys_base = isys->pdata->base; + void __iomem *base = isys_base + IPU6_DWC_DPHY_BASE(phy_id); + void __iomem *reg; + u32 completion; + int ret; + + dwc_dphy_write(isys, phy_id, IPU6_DWC_DPHY_TEST_IFC_REQ, + IFC_REQ(TEST_IFC_REQ_READ, addr, 0)); + reg = base + IPU6_DWC_DPHY_TEST_IFC_REQ_COMPLETION; + ret = readl_poll_timeout(reg, completion, !(completion & BIT(0)), + 10, DWC_DPHY_TIMEOUT); + if (ret) { + dev_err(dev, "DWC ifc request read timeout\n"); + return ret; + } + + *val = completion >> 8 & 0xff; + *val = FIELD_GET(GENMASK(15, 8), completion); + dev_dbg(dev, "DWC ifc read 0x%x = 0x%x", addr, *val); + + return 0; +} + +static int dwc_dphy_ifc_write(struct ipu6_isys *isys, u32 phy_id, u32 addr, + u32 data) +{ + struct device *dev = &isys->adev->auxdev.dev; + void __iomem *isys_base = isys->pdata->base; + void __iomem *base = isys_base + IPU6_DWC_DPHY_BASE(phy_id); + void __iomem *reg; + u32 completion; + int ret; + + dwc_dphy_write(isys, phy_id, IPU6_DWC_DPHY_TEST_IFC_REQ, + IFC_REQ(TEST_IFC_REQ_WRITE, addr, data)); + completion = readl(base + IPU6_DWC_DPHY_TEST_IFC_REQ_COMPLETION); + reg = base + IPU6_DWC_DPHY_TEST_IFC_REQ_COMPLETION; + ret = readl_poll_timeout(reg, completion, !(completion & BIT(0)), + 10, DWC_DPHY_TIMEOUT); + if (ret) + dev_err(dev, "DWC ifc request write timeout\n"); + + return ret; +} + +static void dwc_dphy_ifc_write_mask(struct ipu6_isys *isys, u32 phy_id, + u32 addr, u32 data, u8 shift, u8 width) +{ + u32 temp, mask; + int ret; + + ret = dwc_dphy_ifc_read(isys, phy_id, addr, &temp); + if (ret) + return; + + mask = (1 << width) - 1; + temp &= ~(mask << shift); + temp |= (data & mask) << shift; + dwc_dphy_ifc_write(isys, phy_id, addr, temp); +} + +static u32 dwc_dphy_ifc_read_mask(struct ipu6_isys *isys, u32 phy_id, u32 addr, + u8 shift, u8 width) +{ + int ret; + u32 val; + + ret = dwc_dphy_ifc_read(isys, phy_id, addr, &val); + if (ret) + return 0; + + return ((val >> shift) & ((1 << width) - 1)); +} + +static int dwc_dphy_pwr_up(struct ipu6_isys *isys, u32 phy_id) +{ + struct device *dev = &isys->adev->auxdev.dev; + u32 fsm_state; + int ret; + + dwc_dphy_write(isys, phy_id, IPU6_DWC_DPHY_RSTZ, 1); + usleep_range(10, 20); + dwc_dphy_write(isys, phy_id, IPU6_DWC_DPHY_SHUTDOWNZ, 1); + + ret = read_poll_timeout(dwc_dphy_ifc_read_mask, fsm_state, + (fsm_state == PHY_FSM_STATE_IDLE || + fsm_state == PHY_FSM_STATE_ULP), + 100, DWC_DPHY_TIMEOUT, false, isys, + phy_id, 0x1e, 0, 4); + + if (ret) + dev_err(dev, "Dphy %d power up failed, state 0x%x", phy_id, + fsm_state); + + return ret; +} + +struct dwc_dphy_freq_range { + u8 hsfreq; + u16 min; + u16 max; + u16 default_mbps; + u16 osc_freq_target; +}; + +#define DPHY_FREQ_RANGE_NUM (63) +#define DPHY_FREQ_RANGE_INVALID_INDEX (0xff) +static const struct dwc_dphy_freq_range freqranges[DPHY_FREQ_RANGE_NUM] = { + {0x00, 80, 97, 80, 335}, + {0x10, 80, 107, 90, 335}, + {0x20, 84, 118, 100, 335}, + {0x30, 93, 128, 110, 335}, + {0x01, 103, 139, 120, 335}, + {0x11, 112, 149, 130, 335}, + {0x21, 122, 160, 140, 335}, + {0x31, 131, 170, 150, 335}, + {0x02, 141, 181, 160, 335}, + {0x12, 150, 191, 170, 335}, + {0x22, 160, 202, 180, 335}, + {0x32, 169, 212, 190, 335}, + {0x03, 183, 228, 205, 335}, + {0x13, 198, 244, 220, 335}, + {0x23, 212, 259, 235, 335}, + {0x33, 226, 275, 250, 335}, + {0x04, 250, 301, 275, 335}, + {0x14, 274, 328, 300, 335}, + {0x25, 297, 354, 325, 335}, + {0x35, 321, 380, 350, 335}, + {0x05, 369, 433, 400, 335}, + {0x16, 416, 485, 450, 335}, + {0x26, 464, 538, 500, 335}, + {0x37, 511, 590, 550, 335}, + {0x07, 559, 643, 600, 335}, + {0x18, 606, 695, 650, 335}, + {0x28, 654, 748, 700, 335}, + {0x39, 701, 800, 750, 335}, + {0x09, 749, 853, 800, 335}, + {0x19, 796, 905, 850, 335}, + {0x29, 844, 958, 900, 335}, + {0x3a, 891, 1010, 950, 335}, + {0x0a, 939, 1063, 1000, 335}, + {0x1a, 986, 1115, 1050, 335}, + {0x2a, 1034, 1168, 1100, 335}, + {0x3b, 1081, 1220, 1150, 335}, + {0x0b, 1129, 1273, 1200, 335}, + {0x1b, 1176, 1325, 1250, 335}, + {0x2b, 1224, 1378, 1300, 335}, + {0x3c, 1271, 1430, 1350, 335}, + {0x0c, 1319, 1483, 1400, 335}, + {0x1c, 1366, 1535, 1450, 335}, + {0x2c, 1414, 1588, 1500, 335}, + {0x3d, 1461, 1640, 1550, 208}, + {0x0d, 1509, 1693, 1600, 214}, + {0x1d, 1556, 1745, 1650, 221}, + {0x2e, 1604, 1798, 1700, 228}, + {0x3e, 1651, 1850, 1750, 234}, + {0x0e, 1699, 1903, 1800, 241}, + {0x1e, 1746, 1955, 1850, 248}, + {0x2f, 1794, 2008, 1900, 255}, + {0x3f, 1841, 2060, 1950, 261}, + {0x0f, 1889, 2113, 2000, 268}, + {0x40, 1936, 2165, 2050, 275}, + {0x41, 1984, 2218, 2100, 281}, + {0x42, 2031, 2270, 2150, 288}, + {0x43, 2079, 2323, 2200, 294}, + {0x44, 2126, 2375, 2250, 302}, + {0x45, 2174, 2428, 2300, 308}, + {0x46, 2221, 2480, 2350, 315}, + {0x47, 2269, 2500, 2400, 321}, + {0x48, 2316, 2500, 2450, 328}, + {0x49, 2364, 2500, 2500, 335} +}; + +static u16 get_hsfreq_by_mbps(u32 mbps) +{ + unsigned int i = DPHY_FREQ_RANGE_NUM; + + while (i--) { + if (freqranges[i].default_mbps == mbps || + (mbps >= freqranges[i].min && mbps <= freqranges[i].max)) + return i; + } + + return DPHY_FREQ_RANGE_INVALID_INDEX; +} + +static int ipu6_isys_dwc_phy_config(struct ipu6_isys *isys, + u32 phy_id, u32 mbps) +{ + struct ipu6_bus_device *adev = isys->adev; + struct device *dev = &adev->auxdev.dev; + struct ipu6_device *isp = adev->isp; + u32 cfg_clk_freqrange; + u32 osc_freq_target; + u32 index; + + dev_dbg(dev, "config Dphy %u with %u mbps", phy_id, mbps); + + index = get_hsfreq_by_mbps(mbps); + if (index == DPHY_FREQ_RANGE_INVALID_INDEX) { + dev_err(dev, "link freq not found for mbps %u", mbps); + return -EINVAL; + } + + dwc_dphy_write_mask(isys, phy_id, IPU6_DWC_DPHY_HSFREQRANGE, + freqranges[index].hsfreq, 0, 7); + + /* Force termination Calibration */ + if (isys->phy_termcal_val) { + dwc_dphy_ifc_write_mask(isys, phy_id, 0x20a, 0x1, 0, 1); + dwc_dphy_ifc_write_mask(isys, phy_id, 0x209, 0x3, 0, 2); + dwc_dphy_ifc_write_mask(isys, phy_id, 0x209, + isys->phy_termcal_val, 4, 4); + } + + /* + * Enable override to configure the DDL target oscillation + * frequency on bit 0 of register 0xe4 + */ + dwc_dphy_ifc_write_mask(isys, phy_id, 0xe4, 0x1, 0, 1); + /* + * configure registers 0xe2, 0xe3 with the + * appropriate DDL target oscillation frequency + * 0x1cc(460) + */ + osc_freq_target = freqranges[index].osc_freq_target; + dwc_dphy_ifc_write_mask(isys, phy_id, 0xe2, + osc_freq_target & 0xff, 0, 8); + dwc_dphy_ifc_write_mask(isys, phy_id, 0xe3, + (osc_freq_target >> 8) & 0xf, 0, 4); + + if (mbps < 1500) { + /* deskew_polarity_rw, for < 1.5Gbps */ + dwc_dphy_ifc_write_mask(isys, phy_id, 0x8, 0x1, 5, 1); + } + + /* + * Set cfgclkfreqrange[5:0] = round[(Fcfg_clk(MHz)-17)*4] + * (38.4 - 17) * 4 = ~85 (0x55) + */ + cfg_clk_freqrange = (isp->buttress.ref_clk - 170) * 4 / 10; + dev_dbg(dev, "ref_clk = %u clk_freqrange = %u", + isp->buttress.ref_clk, cfg_clk_freqrange); + dwc_dphy_write_mask(isys, phy_id, IPU6_DWC_DPHY_CFGCLKFREQRANGE, + cfg_clk_freqrange, 0, 8); + + dwc_dphy_write_mask(isys, phy_id, IPU6_DWC_DPHY_DFT_CTRL2, 0x1, 4, 1); + dwc_dphy_write_mask(isys, phy_id, IPU6_DWC_DPHY_DFT_CTRL2, 0x1, 8, 1); + + return 0; +} + +static void ipu6_isys_dwc_phy_aggr_setup(struct ipu6_isys *isys, u32 master, + u32 slave, u32 mbps) +{ + /* Config mastermacro */ + dwc_dphy_ifc_write_mask(isys, master, 0x133, 0x1, 0, 1); + dwc_dphy_ifc_write_mask(isys, slave, 0x133, 0x0, 0, 1); + + /* Config master PHY clk lane to drive long channel clk */ + dwc_dphy_ifc_write_mask(isys, master, 0x307, 0x1, 2, 1); + dwc_dphy_ifc_write_mask(isys, slave, 0x307, 0x0, 2, 1); + + /* Config both PHYs data lanes to get clk from long channel */ + dwc_dphy_ifc_write_mask(isys, master, 0x508, 0x1, 5, 1); + dwc_dphy_ifc_write_mask(isys, slave, 0x508, 0x1, 5, 1); + dwc_dphy_ifc_write_mask(isys, master, 0x708, 0x1, 5, 1); + dwc_dphy_ifc_write_mask(isys, slave, 0x708, 0x1, 5, 1); + + /* Config slave PHY clk lane to bypass long channel clk to DDR clk */ + dwc_dphy_ifc_write_mask(isys, master, 0x308, 0x0, 3, 1); + dwc_dphy_ifc_write_mask(isys, slave, 0x308, 0x1, 3, 1); + + /* Override slave PHY clk lane enable (DPHYRXCLK_CLL_demux module) */ + dwc_dphy_ifc_write_mask(isys, slave, 0xe0, 0x3, 0, 2); + + /* Override slave PHY DDR clk lane enable (DPHYHSRX_div124 module) */ + dwc_dphy_ifc_write_mask(isys, slave, 0xe1, 0x1, 1, 1); + dwc_dphy_ifc_write_mask(isys, slave, 0x307, 0x1, 3, 1); + + /* Turn off slave PHY LP-RX clk lane */ + dwc_dphy_ifc_write_mask(isys, slave, 0x304, 0x1, 7, 1); + dwc_dphy_ifc_write_mask(isys, slave, 0x305, 0xa, 0, 5); +} + +#define PHY_E 4 +static int ipu6_isys_dwc_phy_powerup_ack(struct ipu6_isys *isys, u32 phy_id) +{ + struct device *dev = &isys->adev->auxdev.dev; + u32 rescal_done; + int ret; + + ret = dwc_dphy_pwr_up(isys, phy_id); + if (ret != 0) { + dev_err(dev, "Dphy %u power up failed(%d)", phy_id, ret); + return ret; + } + + /* reset forcerxmode */ + dwc_dphy_write_mask(isys, phy_id, IPU6_DWC_DPHY_DFT_CTRL2, 0, 4, 1); + dwc_dphy_write_mask(isys, phy_id, IPU6_DWC_DPHY_DFT_CTRL2, 0, 8, 1); + + dev_dbg(dev, "Dphy %u is ready!", phy_id); + + if (phy_id != PHY_E || isys->phy_termcal_val) + return 0; + + usleep_range(100, 200); + rescal_done = dwc_dphy_ifc_read_mask(isys, phy_id, 0x221, 7, 1); + if (rescal_done) { + isys->phy_termcal_val = dwc_dphy_ifc_read_mask(isys, phy_id, + 0x220, 2, 4); + dev_dbg(dev, "termcal done with value = %u", + isys->phy_termcal_val); + } + + return 0; +} + +static void ipu6_isys_dwc_phy_reset(struct ipu6_isys *isys, u32 phy_id) +{ + dev_dbg(&isys->adev->auxdev.dev, "Reset phy %u", phy_id); + + dwc_dphy_write(isys, phy_id, IPU6_DWC_DPHY_SHUTDOWNZ, 0); + dwc_dphy_write(isys, phy_id, IPU6_DWC_DPHY_RSTZ, 0); + dwc_dphy_write(isys, phy_id, IPU6_DWC_DPHY_TEST_IFC_ACCESS_MODE, + TEST_IFC_ACCESS_MODE_FSM); + dwc_dphy_write(isys, phy_id, IPU6_DWC_DPHY_TEST_IFC_REQ, + TEST_IFC_REQ_RESET); +} + +int ipu6_isys_dwc_phy_set_power(struct ipu6_isys *isys, + struct ipu6_isys_csi2_config *cfg, + const struct ipu6_isys_csi2_timing *timing, + bool on) +{ + struct device *dev = &isys->adev->auxdev.dev; + void __iomem *isys_base = isys->pdata->base; + u32 phy_id, primary, secondary; + u32 nlanes, port, mbps; + s64 link_freq; + int ret; + + port = cfg->port; + + if (!isys_base || port >= isys->pdata->ipdata->csi2.nports) { + dev_warn(dev, "invalid port ID %d\n", port); + return -EINVAL; + } + + nlanes = cfg->nlanes; + /* only port 0, 2 and 4 support 4 lanes */ + if (nlanes == 4 && port % 2) { + dev_err(dev, "invalid csi-port %u with %u lanes\n", port, + nlanes); + return -EINVAL; + } + + link_freq = ipu6_isys_csi2_get_link_freq(&isys->csi2[port]); + if (link_freq < 0) { + dev_err(dev, "get link freq failed(%lld).\n", link_freq); + return link_freq; + } + + mbps = div_u64(link_freq, 500000); + + phy_id = port; + primary = port & ~1; + secondary = primary + 1; + if (on) { + if (nlanes == 4) { + dev_dbg(dev, "config phy %u and %u in aggr mode\n", + primary, secondary); + + ipu6_isys_dwc_phy_reset(isys, primary); + ipu6_isys_dwc_phy_reset(isys, secondary); + ipu6_isys_dwc_phy_aggr_setup(isys, primary, + secondary, mbps); + + ret = ipu6_isys_dwc_phy_config(isys, primary, mbps); + if (ret) + return ret; + ret = ipu6_isys_dwc_phy_config(isys, secondary, mbps); + if (ret) + return ret; + + ret = ipu6_isys_dwc_phy_powerup_ack(isys, primary); + if (ret) + return ret; + + ret = ipu6_isys_dwc_phy_powerup_ack(isys, secondary); + return ret; + } + + dev_dbg(dev, "config phy %u with %u lanes in non-aggr mode\n", + phy_id, nlanes); + + ipu6_isys_dwc_phy_reset(isys, phy_id); + ret = ipu6_isys_dwc_phy_config(isys, phy_id, mbps); + if (ret) + return ret; + + ret = ipu6_isys_dwc_phy_powerup_ack(isys, phy_id); + return ret; + } + + if (nlanes == 4) { + dev_dbg(dev, "Power down phy %u and phy %u for port %u\n", + primary, secondary, port); + ipu6_isys_dwc_phy_reset(isys, secondary); + ipu6_isys_dwc_phy_reset(isys, primary); + + return 0; + } + + dev_dbg(dev, "Powerdown phy %u with %u lanes\n", phy_id, nlanes); + + ipu6_isys_dwc_phy_reset(isys, phy_id); + + return 0; +} diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-jsl-phy.c b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-jsl-phy.c new file mode 100644 index 0000000..c804291 --- /dev/null +++ b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-jsl-phy.c @@ -0,0 +1,242 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2013--2024 Intel Corporation + */ + +#include +#include +#include +#include + +#include "ipu6-bus.h" +#include "ipu6-isys.h" +#include "ipu6-isys-csi2.h" +#include "ipu6-platform-isys-csi2-reg.h" + +/* only use BB0, BB2, BB4, and BB6 on PHY0 */ +#define IPU6SE_ISYS_PHY_BB_NUM 4 +#define IPU6SE_ISYS_PHY_0_BASE 0x10000 + +#define PHY_CPHY_DLL_OVRD(x) (0x100 + 0x100 * (x)) +#define PHY_CPHY_RX_CONTROL1(x) (0x110 + 0x100 * (x)) +#define PHY_DPHY_CFG(x) (0x148 + 0x100 * (x)) +#define PHY_BB_AFE_CONFIG(x) (0x174 + 0x100 * (x)) + +/* + * use port_cfg to configure that which data lanes used + * +---------+ +------+ +-----+ + * | port0 x4<-----| | | | + * | | | port | | | + * | port1 x2<-----| | | | + * | | | <-| PHY | + * | port2 x4<-----| | | | + * | | |config| | | + * | port3 x2<-----| | | | + * +---------+ +------+ +-----+ + */ +static const unsigned int csi2_port_cfg[][3] = { + {0, 0, 0x1f}, /* no link */ + {4, 0, 0x10}, /* x4 + x4 config */ + {2, 0, 0x12}, /* x2 + x2 config */ + {1, 0, 0x13}, /* x1 + x1 config */ + {2, 1, 0x15}, /* x2x1 + x2x1 config */ + {1, 1, 0x16}, /* x1x1 + x1x1 config */ + {2, 2, 0x18}, /* x2x2 + x2x2 config */ + {1, 2, 0x19} /* x1x2 + x1x2 config */ +}; + +/* port, nlanes, bbindex, portcfg */ +static const unsigned int phy_port_cfg[][4] = { + /* sip0 */ + {0, 1, 0, 0x15}, + {0, 2, 0, 0x15}, + {0, 4, 0, 0x15}, + {0, 4, 2, 0x22}, + /* sip1 */ + {2, 1, 4, 0x15}, + {2, 2, 4, 0x15}, + {2, 4, 4, 0x15}, + {2, 4, 6, 0x22} +}; + +static void ipu6_isys_csi2_phy_config_by_port(struct ipu6_isys *isys, + unsigned int port, + unsigned int nlanes) +{ + struct device *dev = &isys->adev->auxdev.dev; + void __iomem *base = isys->adev->isp->base; + unsigned int bbnum; + u32 val, reg, i; + + dev_dbg(dev, "port %u with %u lanes", port, nlanes); + + /* only support <1.5Gbps */ + for (i = 0; i < IPU6SE_ISYS_PHY_BB_NUM; i++) { + /* cphy_dll_ovrd.crcdc_fsm_dlane0 = 13 */ + reg = IPU6SE_ISYS_PHY_0_BASE + PHY_CPHY_DLL_OVRD(i); + val = readl(base + reg); + val |= FIELD_PREP(GENMASK(6, 1), 13); + writel(val, base + reg); + + /* cphy_rx_control1.en_crc1 = 1 */ + reg = IPU6SE_ISYS_PHY_0_BASE + PHY_CPHY_RX_CONTROL1(i); + val = readl(base + reg); + val |= BIT(31); + writel(val, base + reg); + + /* dphy_cfg.reserved = 1, .lden_from_dll_ovrd_0 = 1 */ + reg = IPU6SE_ISYS_PHY_0_BASE + PHY_DPHY_CFG(i); + val = readl(base + reg); + val |= BIT(25) | BIT(26); + writel(val, base + reg); + + /* cphy_dll_ovrd.lden_crcdc_fsm_dlane0 = 1 */ + reg = IPU6SE_ISYS_PHY_0_BASE + PHY_CPHY_DLL_OVRD(i); + val = readl(base + reg); + val |= BIT(0); + writel(val, base + reg); + } + + /* Front end config, use minimal channel loss */ + for (i = 0; i < ARRAY_SIZE(phy_port_cfg); i++) { + if (phy_port_cfg[i][0] == port && + phy_port_cfg[i][1] == nlanes) { + bbnum = phy_port_cfg[i][2] / 2; + reg = IPU6SE_ISYS_PHY_0_BASE + PHY_BB_AFE_CONFIG(bbnum); + val = readl(base + reg); + val |= phy_port_cfg[i][3]; + writel(val, base + reg); + } + } +} + +static void ipu6_isys_csi2_rx_control(struct ipu6_isys *isys) +{ + void __iomem *base = isys->adev->isp->base; + u32 val, reg; + + reg = CSI2_HUB_GPREG_SIP0_CSI_RX_A_CONTROL; + val = readl(base + reg); + val |= BIT(0); + writel(val, base + CSI2_HUB_GPREG_SIP0_CSI_RX_A_CONTROL); + + reg = CSI2_HUB_GPREG_SIP0_CSI_RX_B_CONTROL; + val = readl(base + reg); + val |= BIT(0); + writel(val, base + CSI2_HUB_GPREG_SIP0_CSI_RX_B_CONTROL); + + reg = CSI2_HUB_GPREG_SIP1_CSI_RX_A_CONTROL; + val = readl(base + reg); + val |= BIT(0); + writel(val, base + CSI2_HUB_GPREG_SIP1_CSI_RX_A_CONTROL); + + reg = CSI2_HUB_GPREG_SIP1_CSI_RX_B_CONTROL; + val = readl(base + reg); + val |= BIT(0); + writel(val, base + CSI2_HUB_GPREG_SIP1_CSI_RX_B_CONTROL); +} + +static int ipu6_isys_csi2_set_port_cfg(struct ipu6_isys *isys, + unsigned int port, unsigned int nlanes) +{ + struct device *dev = &isys->adev->auxdev.dev; + unsigned int sip = port / 2; + unsigned int index; + + switch (nlanes) { + case 1: + index = 5; + break; + case 2: + index = 6; + break; + case 4: + index = 1; + break; + default: + dev_err(dev, "lanes nr %u is unsupported\n", nlanes); + return -EINVAL; + } + + dev_dbg(dev, "port config for port %u with %u lanes\n", port, nlanes); + + writel(csi2_port_cfg[index][2], + isys->pdata->base + CSI2_HUB_GPREG_SIP_FB_PORT_CFG(sip)); + + return 0; +} + +static void +ipu6_isys_csi2_set_timing(struct ipu6_isys *isys, + const struct ipu6_isys_csi2_timing *timing, + unsigned int port, unsigned int nlanes) +{ + struct device *dev = &isys->adev->auxdev.dev; + void __iomem *reg; + u32 port_base; + u32 i; + + port_base = (port % 2) ? CSI2_SIP_TOP_CSI_RX_PORT_BASE_1(port) : + CSI2_SIP_TOP_CSI_RX_PORT_BASE_0(port); + + dev_dbg(dev, "set timing for port %u with %u lanes\n", port, nlanes); + + reg = isys->pdata->base + port_base; + reg += CSI2_SIP_TOP_CSI_RX_DLY_CNT_TERMEN_CLANE; + + writel(timing->ctermen, reg); + + reg = isys->pdata->base + port_base; + reg += CSI2_SIP_TOP_CSI_RX_DLY_CNT_SETTLE_CLANE; + writel(timing->csettle, reg); + + for (i = 0; i < nlanes; i++) { + reg = isys->pdata->base + port_base; + reg += CSI2_SIP_TOP_CSI_RX_DLY_CNT_TERMEN_DLANE(i); + writel(timing->dtermen, reg); + + reg = isys->pdata->base + port_base; + reg += CSI2_SIP_TOP_CSI_RX_DLY_CNT_SETTLE_DLANE(i); + writel(timing->dsettle, reg); + } +} + +#define DPHY_TIMER_INCR 0x28 +int ipu6_isys_jsl_phy_set_power(struct ipu6_isys *isys, + struct ipu6_isys_csi2_config *cfg, + const struct ipu6_isys_csi2_timing *timing, + bool on) +{ + struct device *dev = &isys->adev->auxdev.dev; + void __iomem *isys_base = isys->pdata->base; + int ret = 0; + u32 nlanes; + u32 port; + + if (!on) + return 0; + + port = cfg->port; + nlanes = cfg->nlanes; + + if (!isys_base || port >= isys->pdata->ipdata->csi2.nports) { + dev_warn(dev, "invalid port ID %d\n", port); + return -EINVAL; + } + + ipu6_isys_csi2_phy_config_by_port(isys, port, nlanes); + + writel(DPHY_TIMER_INCR, + isys->pdata->base + CSI2_HUB_GPREG_DPHY_TIMER_INCR); + + /* set port cfg and rx timing */ + ipu6_isys_csi2_set_timing(isys, timing, port, nlanes); + + ret = ipu6_isys_csi2_set_port_cfg(isys, port, nlanes); + if (ret) + return ret; + + ipu6_isys_csi2_rx_control(isys); + + return 0; +} diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c new file mode 100644 index 0000000..71aa500 --- /dev/null +++ b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c @@ -0,0 +1,720 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2013--2024 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "ipu6.h" +#include "ipu6-bus.h" +#include "ipu6-isys.h" +#include "ipu6-isys-csi2.h" +#include "ipu6-platform-isys-csi2-reg.h" + +#define CSI_REG_HUB_GPREG_PHY_CTL(id) (CSI_REG_BASE + 0x18008 + (id) * 0x8) +#define CSI_REG_HUB_GPREG_PHY_CTL_RESET BIT(4) +#define CSI_REG_HUB_GPREG_PHY_CTL_PWR_EN BIT(0) +#define CSI_REG_HUB_GPREG_PHY_STATUS(id) (CSI_REG_BASE + 0x1800c + (id) * 0x8) +#define CSI_REG_HUB_GPREG_PHY_POWER_ACK BIT(0) +#define CSI_REG_HUB_GPREG_PHY_READY BIT(4) + +#define MCD_PHY_POWER_STATUS_TIMEOUT (200 * USEC_PER_MSEC) + +/* + * bridge to phy in buttress reg map, each phy has 16 kbytes + * only 2 phys for TGL U and Y + */ +#define IPU6_ISYS_MCD_PHY_BASE(i) (0x10000 + (i) * 0x4000) + +/* + * There are 2 MCD DPHY instances on TGL and 1 MCD DPHY instance on ADL. + * Each MCD PHY has 12-lanes which has 8 data lanes and 4 clock lanes. + * CSI port 1, 3 (5, 7) can support max 2 data lanes. + * CSI port 0, 2 (4, 6) can support max 4 data lanes. + * PHY configurations are PPI based instead of port. + * Left: + * +---------+---------+---------+---------+--------+---------+----------+ + * | | | | | | | | + * | PPI | PPI5 | PPI4 | PPI3 | PPI2 | PPI1 | PPI0 | + * +---------+---------+---------+---------+--------+---------+----------+ + * | | | | | | | | + * | x4 | unused | D3 | D2 | C0 | D0 | D1 | + * |---------+---------+---------+---------+--------+---------+----------+ + * | | | | | | | | + * | x2x2 | C1 | D0 | D1 | C0 | D0 | D1 | + * ----------+---------+---------+---------+--------+---------+----------+ + * | | | | | | | | + * | x2x1 | C1 | D0 | unused | C0 | D0 | D1 | + * +---------+---------+---------+---------+--------+---------+----------+ + * | | | | | | | | + * | x1x1 | C1 | D0 | unused | C0 | D0 | unused | + * +---------+---------+---------+---------+--------+---------+----------+ + * | | | | | | | | + * | x1x2 | C1 | D0 | D1 | C0 | D0 | unused | + * +---------+---------+---------+---------+--------+---------+----------+ + * + * Right: + * +---------+---------+---------+---------+--------+---------+----------+ + * | | | | | | | | + * | PPI | PPI6 | PPI7 | PPI8 | PPI9 | PPI10 | PPI11 | + * +---------+---------+---------+---------+--------+---------+----------+ + * | | | | | | | | + * | x4 | D1 | D0 | C2 | D2 | D3 | unused | + * |---------+---------+---------+---------+--------+---------+----------+ + * | | | | | | | | + * | x2x2 | D1 | D0 | C2 | D1 | D0 | C3 | + * ----------+---------+---------+---------+--------+---------+----------+ + * | | | | | | | | + * | x2x1 | D1 | D0 | C2 | unused | D0 | C3 | + * +---------+---------+---------+---------+--------+---------+----------+ + * | | | | | | | | + * | x1x1 | unused | D0 | C2 | unused | D0 | C3 | + * +---------+---------+---------+---------+--------+---------+----------+ + * | | | | | | | | + * | x1x2 | unused | D0 | C2 | D1 | D0 | C3 | + * +---------+---------+---------+---------+--------+---------+----------+ + * + * ppi mapping per phy : + * + * x4 + x4: + * Left : port0 - PPI range {0, 1, 2, 3, 4} + * Right: port2 - PPI range {6, 7, 8, 9, 10} + * + * x4 + x2x2: + * Left: port0 - PPI range {0, 1, 2, 3, 4} + * Right: port2 - PPI range {6, 7, 8}, port3 - PPI range {9, 10, 11} + * + * x2x2 + x4: + * Left: port0 - PPI range {0, 1, 2}, port1 - PPI range {3, 4, 5} + * Right: port2 - PPI range {6, 7, 8, 9, 10} + * + * x2x2 + x2x2: + * Left : port0 - PPI range {0, 1, 2}, port1 - PPI range {3, 4, 5} + * Right: port2 - PPI range {6, 7, 8}, port3 - PPI range {9, 10, 11} + */ + +struct phy_reg { + u32 reg; + u32 val; +}; + +static const struct phy_reg common_init_regs[] = { + /* for TGL-U, use 0x80000000 */ + {0x00000040, 0x80000000}, + {0x00000044, 0x00a80880}, + {0x00000044, 0x00b80880}, + {0x00000010, 0x0000078c}, + {0x00000344, 0x2f4401e2}, + {0x00000544, 0x924401e2}, + {0x00000744, 0x594401e2}, + {0x00000944, 0x624401e2}, + {0x00000b44, 0xfc4401e2}, + {0x00000d44, 0xc54401e2}, + {0x00000f44, 0x034401e2}, + {0x00001144, 0x8f4401e2}, + {0x00001344, 0x754401e2}, + {0x00001544, 0xe94401e2}, + {0x00001744, 0xcb4401e2}, + {0x00001944, 0xfa4401e2} +}; + +static const struct phy_reg x1_port0_config_regs[] = { + {0x00000694, 0xc80060fa}, + {0x00000680, 0x3d4f78ea}, + {0x00000690, 0x10a0140b}, + {0x000006a8, 0xdf04010a}, + {0x00000700, 0x57050060}, + {0x00000710, 0x0030001c}, + {0x00000738, 0x5f004444}, + {0x0000073c, 0x78464204}, + {0x00000748, 0x7821f940}, + {0x0000074c, 0xb2000433}, + {0x00000494, 0xfe6030fa}, + {0x00000480, 0x29ef5ed0}, + {0x00000490, 0x10a0540b}, + {0x000004a8, 0x7a01010a}, + {0x00000500, 0xef053460}, + {0x00000510, 0xe030101c}, + {0x00000538, 0xdf808444}, + {0x0000053c, 0xc8422204}, + {0x00000540, 0x0180088c}, + {0x00000574, 0x00000000}, + {0x00000000, 0x00000000} +}; + +static const struct phy_reg x1_port1_config_regs[] = { + {0x00000c94, 0xc80060fa}, + {0x00000c80, 0xcf47abea}, + {0x00000c90, 0x10a0840b}, + {0x00000ca8, 0xdf04010a}, + {0x00000d00, 0x57050060}, + {0x00000d10, 0x0030001c}, + {0x00000d38, 0x5f004444}, + {0x00000d3c, 0x78464204}, + {0x00000d48, 0x7821f940}, + {0x00000d4c, 0xb2000433}, + {0x00000a94, 0xc91030fa}, + {0x00000a80, 0x5a166ed0}, + {0x00000a90, 0x10a0540b}, + {0x00000aa8, 0x5d060100}, + {0x00000b00, 0xef053460}, + {0x00000b10, 0xa030101c}, + {0x00000b38, 0xdf808444}, + {0x00000b3c, 0xc8422204}, + {0x00000b40, 0x0180088c}, + {0x00000b74, 0x00000000}, + {0x00000000, 0x00000000} +}; + +static const struct phy_reg x1_port2_config_regs[] = { + {0x00001294, 0x28f000fa}, + {0x00001280, 0x08130cea}, + {0x00001290, 0x10a0140b}, + {0x000012a8, 0xd704010a}, + {0x00001300, 0x8d050060}, + {0x00001310, 0x0030001c}, + {0x00001338, 0xdf008444}, + {0x0000133c, 0x78422204}, + {0x00001348, 0x7821f940}, + {0x0000134c, 0x5a000433}, + {0x00001094, 0x2d20b0fa}, + {0x00001080, 0xade75dd0}, + {0x00001090, 0x10a0540b}, + {0x000010a8, 0xb101010a}, + {0x00001100, 0x33053460}, + {0x00001110, 0x0030101c}, + {0x00001138, 0xdf808444}, + {0x0000113c, 0xc8422204}, + {0x00001140, 0x8180088c}, + {0x00001174, 0x00000000}, + {0x00000000, 0x00000000} +}; + +static const struct phy_reg x1_port3_config_regs[] = { + {0x00001894, 0xc80060fa}, + {0x00001880, 0x0f90fd6a}, + {0x00001890, 0x10a0840b}, + {0x000018a8, 0xdf04010a}, + {0x00001900, 0x57050060}, + {0x00001910, 0x0030001c}, + {0x00001938, 0x5f004444}, + {0x0000193c, 0x78464204}, + {0x00001948, 0x7821f940}, + {0x0000194c, 0xb2000433}, + {0x00001694, 0x3050d0fa}, + {0x00001680, 0x0ef6d050}, + {0x00001690, 0x10a0540b}, + {0x000016a8, 0xe301010a}, + {0x00001700, 0x69053460}, + {0x00001710, 0xa030101c}, + {0x00001738, 0xdf808444}, + {0x0000173c, 0xc8422204}, + {0x00001740, 0x0180088c}, + {0x00001774, 0x00000000}, + {0x00000000, 0x00000000} +}; + +static const struct phy_reg x2_port0_config_regs[] = { + {0x00000694, 0xc80060fa}, + {0x00000680, 0x3d4f78ea}, + {0x00000690, 0x10a0140b}, + {0x000006a8, 0xdf04010a}, + {0x00000700, 0x57050060}, + {0x00000710, 0x0030001c}, + {0x00000738, 0x5f004444}, + {0x0000073c, 0x78464204}, + {0x00000748, 0x7821f940}, + {0x0000074c, 0xb2000433}, + {0x00000494, 0xc80060fa}, + {0x00000480, 0x29ef5ed8}, + {0x00000490, 0x10a0540b}, + {0x000004a8, 0x7a01010a}, + {0x00000500, 0xef053460}, + {0x00000510, 0xe030101c}, + {0x00000538, 0xdf808444}, + {0x0000053c, 0xc8422204}, + {0x00000540, 0x0180088c}, + {0x00000574, 0x00000000}, + {0x00000294, 0xc80060fa}, + {0x00000280, 0xcb45b950}, + {0x00000290, 0x10a0540b}, + {0x000002a8, 0x8c01010a}, + {0x00000300, 0xef053460}, + {0x00000310, 0x8030101c}, + {0x00000338, 0x41808444}, + {0x0000033c, 0x32422204}, + {0x00000340, 0x0180088c}, + {0x00000374, 0x00000000}, + {0x00000000, 0x00000000} +}; + +static const struct phy_reg x2_port1_config_regs[] = { + {0x00000c94, 0xc80060fa}, + {0x00000c80, 0xcf47abea}, + {0x00000c90, 0x10a0840b}, + {0x00000ca8, 0xdf04010a}, + {0x00000d00, 0x57050060}, + {0x00000d10, 0x0030001c}, + {0x00000d38, 0x5f004444}, + {0x00000d3c, 0x78464204}, + {0x00000d48, 0x7821f940}, + {0x00000d4c, 0xb2000433}, + {0x00000a94, 0xc80060fa}, + {0x00000a80, 0x5a166ed8}, + {0x00000a90, 0x10a0540b}, + {0x00000aa8, 0x7a01010a}, + {0x00000b00, 0xef053460}, + {0x00000b10, 0xa030101c}, + {0x00000b38, 0xdf808444}, + {0x00000b3c, 0xc8422204}, + {0x00000b40, 0x0180088c}, + {0x00000b74, 0x00000000}, + {0x00000894, 0xc80060fa}, + {0x00000880, 0x4d4f21d0}, + {0x00000890, 0x10a0540b}, + {0x000008a8, 0x5601010a}, + {0x00000900, 0xef053460}, + {0x00000910, 0x8030101c}, + {0x00000938, 0xdf808444}, + {0x0000093c, 0xc8422204}, + {0x00000940, 0x0180088c}, + {0x00000974, 0x00000000}, + {0x00000000, 0x00000000} +}; + +static const struct phy_reg x2_port2_config_regs[] = { + {0x00001294, 0xc80060fa}, + {0x00001280, 0x08130cea}, + {0x00001290, 0x10a0140b}, + {0x000012a8, 0xd704010a}, + {0x00001300, 0x8d050060}, + {0x00001310, 0x0030001c}, + {0x00001338, 0xdf008444}, + {0x0000133c, 0x78422204}, + {0x00001348, 0x7821f940}, + {0x0000134c, 0x5a000433}, + {0x00001094, 0xc80060fa}, + {0x00001080, 0xade75dd8}, + {0x00001090, 0x10a0540b}, + {0x000010a8, 0xb101010a}, + {0x00001100, 0x33053460}, + {0x00001110, 0x0030101c}, + {0x00001138, 0xdf808444}, + {0x0000113c, 0xc8422204}, + {0x00001140, 0x8180088c}, + {0x00001174, 0x00000000}, + {0x00000e94, 0xc80060fa}, + {0x00000e80, 0x0fbf16d0}, + {0x00000e90, 0x10a0540b}, + {0x00000ea8, 0x7a01010a}, + {0x00000f00, 0xf5053460}, + {0x00000f10, 0xc030101c}, + {0x00000f38, 0xdf808444}, + {0x00000f3c, 0xc8422204}, + {0x00000f40, 0x8180088c}, + {0x00000000, 0x00000000} +}; + +static const struct phy_reg x2_port3_config_regs[] = { + {0x00001894, 0xc80060fa}, + {0x00001880, 0x0f90fd6a}, + {0x00001890, 0x10a0840b}, + {0x000018a8, 0xdf04010a}, + {0x00001900, 0x57050060}, + {0x00001910, 0x0030001c}, + {0x00001938, 0x5f004444}, + {0x0000193c, 0x78464204}, + {0x00001948, 0x7821f940}, + {0x0000194c, 0xb2000433}, + {0x00001694, 0xc80060fa}, + {0x00001680, 0x0ef6d058}, + {0x00001690, 0x10a0540b}, + {0x000016a8, 0x7a01010a}, + {0x00001700, 0x69053460}, + {0x00001710, 0xa030101c}, + {0x00001738, 0xdf808444}, + {0x0000173c, 0xc8422204}, + {0x00001740, 0x0180088c}, + {0x00001774, 0x00000000}, + {0x00001494, 0xc80060fa}, + {0x00001480, 0xf9d34bd0}, + {0x00001490, 0x10a0540b}, + {0x000014a8, 0x7a01010a}, + {0x00001500, 0x1b053460}, + {0x00001510, 0x0030101c}, + {0x00001538, 0xdf808444}, + {0x0000153c, 0xc8422204}, + {0x00001540, 0x8180088c}, + {0x00001574, 0x00000000}, + {0x00000000, 0x00000000} +}; + +static const struct phy_reg x4_port0_config_regs[] = { + {0x00000694, 0xc80060fa}, + {0x00000680, 0x3d4f78fa}, + {0x00000690, 0x10a0140b}, + {0x000006a8, 0xdf04010a}, + {0x00000700, 0x57050060}, + {0x00000710, 0x0030001c}, + {0x00000738, 0x5f004444}, + {0x0000073c, 0x78464204}, + {0x00000748, 0x7821f940}, + {0x0000074c, 0xb2000433}, + {0x00000494, 0xfe6030fa}, + {0x00000480, 0x29ef5ed8}, + {0x00000490, 0x10a0540b}, + {0x000004a8, 0x7a01010a}, + {0x00000500, 0xef053460}, + {0x00000510, 0xe030101c}, + {0x00000538, 0xdf808444}, + {0x0000053c, 0xc8422204}, + {0x00000540, 0x0180088c}, + {0x00000574, 0x00000004}, + {0x00000294, 0x23e030fa}, + {0x00000280, 0xcb45b950}, + {0x00000290, 0x10a0540b}, + {0x000002a8, 0x8c01010a}, + {0x00000300, 0xef053460}, + {0x00000310, 0x8030101c}, + {0x00000338, 0x41808444}, + {0x0000033c, 0x32422204}, + {0x00000340, 0x0180088c}, + {0x00000374, 0x00000004}, + {0x00000894, 0x5620b0fa}, + {0x00000880, 0x4d4f21dc}, + {0x00000890, 0x10a0540b}, + {0x000008a8, 0x5601010a}, + {0x00000900, 0xef053460}, + {0x00000910, 0x8030101c}, + {0x00000938, 0xdf808444}, + {0x0000093c, 0xc8422204}, + {0x00000940, 0x0180088c}, + {0x00000974, 0x00000004}, + {0x00000a94, 0xc91030fa}, + {0x00000a80, 0x5a166ecc}, + {0x00000a90, 0x10a0540b}, + {0x00000aa8, 0x5d01010a}, + {0x00000b00, 0xef053460}, + {0x00000b10, 0xa030101c}, + {0x00000b38, 0xdf808444}, + {0x00000b3c, 0xc8422204}, + {0x00000b40, 0x0180088c}, + {0x00000b74, 0x00000004}, + {0x00000000, 0x00000000} +}; + +static const struct phy_reg x4_port1_config_regs[] = { + {0x00000000, 0x00000000} +}; + +static const struct phy_reg x4_port2_config_regs[] = { + {0x00001294, 0x28f000fa}, + {0x00001280, 0x08130cfa}, + {0x00001290, 0x10c0140b}, + {0x000012a8, 0xd704010a}, + {0x00001300, 0x8d050060}, + {0x00001310, 0x0030001c}, + {0x00001338, 0xdf008444}, + {0x0000133c, 0x78422204}, + {0x00001348, 0x7821f940}, + {0x0000134c, 0x5a000433}, + {0x00001094, 0x2d20b0fa}, + {0x00001080, 0xade75dd8}, + {0x00001090, 0x10a0540b}, + {0x000010a8, 0xb101010a}, + {0x00001100, 0x33053460}, + {0x00001110, 0x0030101c}, + {0x00001138, 0xdf808444}, + {0x0000113c, 0xc8422204}, + {0x00001140, 0x8180088c}, + {0x00001174, 0x00000004}, + {0x00000e94, 0xd308d0fa}, + {0x00000e80, 0x0fbf16d0}, + {0x00000e90, 0x10a0540b}, + {0x00000ea8, 0x2c01010a}, + {0x00000f00, 0xf5053460}, + {0x00000f10, 0xc030101c}, + {0x00000f38, 0xdf808444}, + {0x00000f3c, 0xc8422204}, + {0x00000f40, 0x8180088c}, + {0x00000f74, 0x00000004}, + {0x00001494, 0x136850fa}, + {0x00001480, 0xf9d34bdc}, + {0x00001490, 0x10a0540b}, + {0x000014a8, 0x5a01010a}, + {0x00001500, 0x1b053460}, + {0x00001510, 0x0030101c}, + {0x00001538, 0xdf808444}, + {0x0000153c, 0xc8422204}, + {0x00001540, 0x8180088c}, + {0x00001574, 0x00000004}, + {0x00001694, 0x3050d0fa}, + {0x00001680, 0x0ef6d04c}, + {0x00001690, 0x10a0540b}, + {0x000016a8, 0xe301010a}, + {0x00001700, 0x69053460}, + {0x00001710, 0xa030101c}, + {0x00001738, 0xdf808444}, + {0x0000173c, 0xc8422204}, + {0x00001740, 0x0180088c}, + {0x00001774, 0x00000004}, + {0x00000000, 0x00000000} +}; + +static const struct phy_reg x4_port3_config_regs[] = { + {0x00000000, 0x00000000} +}; + +static const struct phy_reg *x1_config_regs[4] = { + x1_port0_config_regs, + x1_port1_config_regs, + x1_port2_config_regs, + x1_port3_config_regs +}; + +static const struct phy_reg *x2_config_regs[4] = { + x2_port0_config_regs, + x2_port1_config_regs, + x2_port2_config_regs, + x2_port3_config_regs +}; + +static const struct phy_reg *x4_config_regs[4] = { + x4_port0_config_regs, + x4_port1_config_regs, + x4_port2_config_regs, + x4_port3_config_regs +}; + +static const struct phy_reg **config_regs[3] = { + x1_config_regs, + x2_config_regs, + x4_config_regs +}; + +static int ipu6_isys_mcd_phy_powerup_ack(struct ipu6_isys *isys, u8 id) +{ + struct device *dev = &isys->adev->auxdev.dev; + void __iomem *isys_base = isys->pdata->base; + u32 val; + int ret; + + val = readl(isys_base + CSI_REG_HUB_GPREG_PHY_CTL(id)); + val |= CSI_REG_HUB_GPREG_PHY_CTL_PWR_EN; + writel(val, isys_base + CSI_REG_HUB_GPREG_PHY_CTL(id)); + + ret = readl_poll_timeout(isys_base + CSI_REG_HUB_GPREG_PHY_STATUS(id), + val, val & CSI_REG_HUB_GPREG_PHY_POWER_ACK, + 200, MCD_PHY_POWER_STATUS_TIMEOUT); + if (ret) + dev_err(dev, "PHY%d powerup ack timeout", id); + + return ret; +} + +static int ipu6_isys_mcd_phy_powerdown_ack(struct ipu6_isys *isys, u8 id) +{ + struct device *dev = &isys->adev->auxdev.dev; + void __iomem *isys_base = isys->pdata->base; + u32 val; + int ret; + + writel(0, isys_base + CSI_REG_HUB_GPREG_PHY_CTL(id)); + ret = readl_poll_timeout(isys_base + CSI_REG_HUB_GPREG_PHY_STATUS(id), + val, !(val & CSI_REG_HUB_GPREG_PHY_POWER_ACK), + 200, MCD_PHY_POWER_STATUS_TIMEOUT); + if (ret) + dev_err(dev, "PHY%d powerdown ack timeout", id); + + return ret; +} + +static void ipu6_isys_mcd_phy_reset(struct ipu6_isys *isys, u8 id, bool assert) +{ + void __iomem *isys_base = isys->pdata->base; + u32 val; + + val = readl(isys_base + CSI_REG_HUB_GPREG_PHY_CTL(id)); + if (assert) + val |= CSI_REG_HUB_GPREG_PHY_CTL_RESET; + else + val &= ~(CSI_REG_HUB_GPREG_PHY_CTL_RESET); + + writel(val, isys_base + CSI_REG_HUB_GPREG_PHY_CTL(id)); +} + +static int ipu6_isys_mcd_phy_ready(struct ipu6_isys *isys, u8 id) +{ + struct device *dev = &isys->adev->auxdev.dev; + void __iomem *isys_base = isys->pdata->base; + u32 val; + int ret; + + ret = readl_poll_timeout(isys_base + CSI_REG_HUB_GPREG_PHY_STATUS(id), + val, val & CSI_REG_HUB_GPREG_PHY_READY, + 200, MCD_PHY_POWER_STATUS_TIMEOUT); + if (ret) + dev_err(dev, "PHY%d ready ack timeout", id); + + return ret; +} + +static void ipu6_isys_mcd_phy_common_init(struct ipu6_isys *isys) +{ + struct ipu6_bus_device *adev = isys->adev; + struct ipu6_device *isp = adev->isp; + void __iomem *isp_base = isp->base; + struct sensor_async_sd *s_asd; + struct v4l2_async_connection *asc; + void __iomem *phy_base; + unsigned int i; + u8 phy_id; + + list_for_each_entry(asc, &isys->notifier.done_list, asc_entry) { + s_asd = container_of(asc, struct sensor_async_sd, asc); + phy_id = s_asd->csi2.port / 4; + phy_base = isp_base + IPU6_ISYS_MCD_PHY_BASE(phy_id); + + for (i = 0; i < ARRAY_SIZE(common_init_regs); i++) + writel(common_init_regs[i].val, + phy_base + common_init_regs[i].reg); + } +} + +static int ipu6_isys_driver_port_to_phy_port(struct ipu6_isys_csi2_config *cfg) +{ + int phy_port; + int ret; + + if (!(cfg->nlanes == 4 || cfg->nlanes == 2 || cfg->nlanes == 1)) + return -EINVAL; + + /* B,F -> C0 A,E -> C1 C,G -> C2 D,H -> C4 */ + /* normalize driver port number */ + phy_port = cfg->port % 4; + + /* swap port number only for A and B */ + if (phy_port == 0) + phy_port = 1; + else if (phy_port == 1) + phy_port = 0; + + ret = phy_port; + + /* check validity per lane configuration */ + if (cfg->nlanes == 4 && !(phy_port == 0 || phy_port == 2)) + ret = -EINVAL; + else if ((cfg->nlanes == 2 || cfg->nlanes == 1) && + !(phy_port >= 0 && phy_port <= 3)) + ret = -EINVAL; + + return ret; +} + +static int ipu6_isys_mcd_phy_config(struct ipu6_isys *isys) +{ + struct device *dev = &isys->adev->auxdev.dev; + struct ipu6_bus_device *adev = isys->adev; + const struct phy_reg **phy_config_regs; + struct ipu6_device *isp = adev->isp; + void __iomem *isp_base = isp->base; + struct sensor_async_sd *s_asd; + struct ipu6_isys_csi2_config cfg; + struct v4l2_async_connection *asc; + int phy_port, phy_id; + unsigned int i; + void __iomem *phy_base; + + list_for_each_entry(asc, &isys->notifier.done_list, asc_entry) { + s_asd = container_of(asc, struct sensor_async_sd, asc); + cfg.port = s_asd->csi2.port; + cfg.nlanes = s_asd->csi2.nlanes; + phy_port = ipu6_isys_driver_port_to_phy_port(&cfg); + if (phy_port < 0) { + dev_err(dev, "invalid port %d for lane %d", cfg.port, + cfg.nlanes); + return -ENXIO; + } + + phy_id = cfg.port / 4; + phy_base = isp_base + IPU6_ISYS_MCD_PHY_BASE(phy_id); + dev_dbg(dev, "port%d PHY%u lanes %u\n", cfg.port, phy_id, + cfg.nlanes); + + phy_config_regs = config_regs[cfg.nlanes / 2]; + cfg.port = phy_port; + for (i = 0; phy_config_regs[cfg.port][i].reg; i++) + writel(phy_config_regs[cfg.port][i].val, + phy_base + phy_config_regs[cfg.port][i].reg); + } + + return 0; +} + +#define CSI_MCD_PHY_NUM 2 +static refcount_t phy_power_ref_count[CSI_MCD_PHY_NUM]; + +int ipu6_isys_mcd_phy_set_power(struct ipu6_isys *isys, + struct ipu6_isys_csi2_config *cfg, + const struct ipu6_isys_csi2_timing *timing, + bool on) +{ + struct device *dev = &isys->adev->auxdev.dev; + void __iomem *isys_base = isys->pdata->base; + u8 port, phy_id; + refcount_t *ref; + int ret; + + port = cfg->port; + phy_id = port / 4; + + ref = &phy_power_ref_count[phy_id]; + + dev_dbg(dev, "for phy %d port %d, lanes: %d\n", phy_id, port, + cfg->nlanes); + + if (!isys_base || port >= isys->pdata->ipdata->csi2.nports) { + dev_warn(dev, "invalid port ID %d\n", port); + return -EINVAL; + } + + if (on) { + if (refcount_read(ref)) { + dev_dbg(dev, "for phy %d is already UP", phy_id); + refcount_inc(ref); + return 0; + } + + ret = ipu6_isys_mcd_phy_powerup_ack(isys, phy_id); + if (ret) + return ret; + + ipu6_isys_mcd_phy_reset(isys, phy_id, 0); + ipu6_isys_mcd_phy_common_init(isys); + + ret = ipu6_isys_mcd_phy_config(isys); + if (ret) + return ret; + + ipu6_isys_mcd_phy_reset(isys, phy_id, 1); + ret = ipu6_isys_mcd_phy_ready(isys, phy_id); + if (ret) + return ret; + + refcount_set(ref, 1); + return 0; + } + + if (!refcount_dec_and_test(ref)) + return 0; + + return ipu6_isys_mcd_phy_powerdown_ack(isys, phy_id); +} diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c new file mode 100644 index 0000000..aa2cf72 --- /dev/null +++ b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c @@ -0,0 +1,850 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2013--2024 Intel Corporation + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "ipu6-bus.h" +#include "ipu6-dma.h" +#include "ipu6-fw-isys.h" +#include "ipu6-isys.h" +#include "ipu6-isys-video.h" + +static int ipu6_isys_buf_init(struct vb2_buffer *vb) +{ + struct ipu6_isys *isys = vb2_get_drv_priv(vb->vb2_queue); + struct sg_table *sg = vb2_dma_sg_plane_desc(vb, 0); + struct vb2_v4l2_buffer *vvb = to_vb2_v4l2_buffer(vb); + struct ipu6_isys_video_buffer *ivb = + vb2_buffer_to_ipu6_isys_video_buffer(vvb); + int ret; + + ret = ipu6_dma_map_sgtable(isys->adev, sg, DMA_TO_DEVICE, 0); + if (ret) + return ret; + + ivb->dma_addr = sg_dma_address(sg->sgl); + + return 0; +} + +static void ipu6_isys_buf_cleanup(struct vb2_buffer *vb) +{ + struct ipu6_isys *isys = vb2_get_drv_priv(vb->vb2_queue); + struct sg_table *sg = vb2_dma_sg_plane_desc(vb, 0); + struct vb2_v4l2_buffer *vvb = to_vb2_v4l2_buffer(vb); + struct ipu6_isys_video_buffer *ivb = + vb2_buffer_to_ipu6_isys_video_buffer(vvb); + + ivb->dma_addr = 0; + ipu6_dma_unmap_sgtable(isys->adev, sg, DMA_TO_DEVICE, 0); +} + +static int ipu6_isys_queue_setup(struct vb2_queue *q, unsigned int *num_buffers, + unsigned int *num_planes, unsigned int sizes[], + struct device *alloc_devs[]) +{ + struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(q); + struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq); + struct device *dev = &av->isys->adev->auxdev.dev; + u32 size = ipu6_isys_get_data_size(av); + + /* num_planes == 0: we're being called through VIDIOC_REQBUFS */ + if (!*num_planes) { + sizes[0] = size; + } else if (sizes[0] < size) { + dev_dbg(dev, "%s: queue setup: size %u < %u\n", + av->vdev.name, sizes[0], size); + return -EINVAL; + } + + *num_planes = 1; + + return 0; +} + +static int ipu6_isys_buf_prepare(struct vb2_buffer *vb) +{ + struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(vb->vb2_queue); + struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq); + struct device *dev = &av->isys->adev->auxdev.dev; + u32 bytesperline = ipu6_isys_get_bytes_per_line(av); + u32 height = ipu6_isys_get_frame_height(av); + u32 size = ipu6_isys_get_data_size(av); + + dev_dbg(dev, "buffer: %s: configured size %u, buffer size %lu\n", + av->vdev.name, size, vb2_plane_size(vb, 0)); + + if (size > vb2_plane_size(vb, 0)) + return -EINVAL; + + vb2_set_plane_payload(vb, 0, bytesperline * height); + + return 0; +} + +/* + * Queue a buffer list back to incoming or active queues. The buffers + * are removed from the buffer list. + */ +void ipu6_isys_buffer_list_queue(struct ipu6_isys_buffer_list *bl, + unsigned long op_flags, + enum vb2_buffer_state state) +{ + struct ipu6_isys_buffer *ib, *ib_safe; + unsigned long flags; + bool first = true; + + if (!bl) + return; + + WARN_ON_ONCE(!bl->nbufs); + WARN_ON_ONCE(op_flags & IPU6_ISYS_BUFFER_LIST_FL_ACTIVE && + op_flags & IPU6_ISYS_BUFFER_LIST_FL_INCOMING); + + list_for_each_entry_safe(ib, ib_safe, &bl->head, head) { + struct ipu6_isys_video *av; + struct vb2_buffer *vb = ipu6_isys_buffer_to_vb2_buffer(ib); + struct ipu6_isys_queue *aq = + vb2_queue_to_isys_queue(vb->vb2_queue); + struct device *dev; + + av = ipu6_isys_queue_to_video(aq); + dev = &av->isys->adev->auxdev.dev; + spin_lock_irqsave(&aq->lock, flags); + list_del(&ib->head); + if (op_flags & IPU6_ISYS_BUFFER_LIST_FL_ACTIVE) + list_add(&ib->head, &aq->active); + else if (op_flags & IPU6_ISYS_BUFFER_LIST_FL_INCOMING) + list_add_tail(&ib->head, &aq->incoming); + spin_unlock_irqrestore(&aq->lock, flags); + + if (op_flags & IPU6_ISYS_BUFFER_LIST_FL_SET_STATE) + vb2_buffer_done(vb, state); + + if (first) { + dev_dbg(dev, + "queue buf list %p flags %lx, s %d, %d bufs\n", + bl, op_flags, state, bl->nbufs); + first = false; + } + + bl->nbufs--; + } + + WARN_ON(bl->nbufs); +} + +/* + * flush_firmware_streamon_fail() - Flush in cases where requests may + * have been queued to firmware and the *firmware streamon fails for a + * reason or another. + */ +static void flush_firmware_streamon_fail(struct ipu6_isys_stream *stream) +{ + struct device *dev = &stream->isys->adev->auxdev.dev; + struct ipu6_isys_queue *aq; + unsigned long flags; + + lockdep_assert_held(&stream->mutex); + + list_for_each_entry(aq, &stream->queues, node) { + struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq); + struct ipu6_isys_buffer *ib, *ib_safe; + + spin_lock_irqsave(&aq->lock, flags); + list_for_each_entry_safe(ib, ib_safe, &aq->active, head) { + struct vb2_buffer *vb = + ipu6_isys_buffer_to_vb2_buffer(ib); + + list_del(&ib->head); + if (av->streaming) { + dev_dbg(dev, + "%s: queue buffer %u back to incoming\n", + av->vdev.name, vb->index); + /* Queue already streaming, return to driver. */ + list_add(&ib->head, &aq->incoming); + continue; + } + /* Queue not yet streaming, return to user. */ + dev_dbg(dev, "%s: return %u back to videobuf2\n", + av->vdev.name, vb->index); + vb2_buffer_done(ipu6_isys_buffer_to_vb2_buffer(ib), + VB2_BUF_STATE_QUEUED); + } + spin_unlock_irqrestore(&aq->lock, flags); + } +} + +/* + * Attempt obtaining a buffer list from the incoming queues, a list of buffers + * that contains one entry from each video buffer queue. If a buffer can't be + * obtained from every queue, the buffers are returned back to the queue. + */ +static int buffer_list_get(struct ipu6_isys_stream *stream, + struct ipu6_isys_buffer_list *bl) +{ + struct device *dev = &stream->isys->adev->auxdev.dev; + struct ipu6_isys_queue *aq; + unsigned long flags; + unsigned long buf_flag = IPU6_ISYS_BUFFER_LIST_FL_INCOMING; + + bl->nbufs = 0; + INIT_LIST_HEAD(&bl->head); + + list_for_each_entry(aq, &stream->queues, node) { + struct ipu6_isys_buffer *ib; + + spin_lock_irqsave(&aq->lock, flags); + if (list_empty(&aq->incoming)) { + spin_unlock_irqrestore(&aq->lock, flags); + if (!list_empty(&bl->head)) + ipu6_isys_buffer_list_queue(bl, buf_flag, 0); + return -ENODATA; + } + + ib = list_last_entry(&aq->incoming, + struct ipu6_isys_buffer, head); + + dev_dbg(dev, "buffer: %s: buffer %u\n", + ipu6_isys_queue_to_video(aq)->vdev.name, + ipu6_isys_buffer_to_vb2_buffer(ib)->index); + list_del(&ib->head); + list_add(&ib->head, &bl->head); + spin_unlock_irqrestore(&aq->lock, flags); + + bl->nbufs++; + } + + dev_dbg(dev, "get buffer list %p, %u buffers\n", bl, bl->nbufs); + + return 0; +} + +static void +ipu6_isys_buf_to_fw_frame_buf_pin(struct vb2_buffer *vb, + struct ipu6_fw_isys_frame_buff_set_abi *set) +{ + struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(vb->vb2_queue); + struct vb2_v4l2_buffer *vvb = to_vb2_v4l2_buffer(vb); + struct ipu6_isys_video_buffer *ivb = + vb2_buffer_to_ipu6_isys_video_buffer(vvb); + + set->output_pins[aq->fw_output].addr = ivb->dma_addr; + set->output_pins[aq->fw_output].out_buf_id = vb->index + 1; +} + +/* + * Convert a buffer list to a isys fw ABI framebuffer set. The + * buffer list is not modified. + */ +#define IPU6_ISYS_FRAME_NUM_THRESHOLD (30) +void +ipu6_isys_buf_to_fw_frame_buf(struct ipu6_fw_isys_frame_buff_set_abi *set, + struct ipu6_isys_stream *stream, + struct ipu6_isys_buffer_list *bl) +{ + struct ipu6_isys_buffer *ib; + + WARN_ON(!bl->nbufs); + + set->send_irq_sof = 1; + set->send_resp_sof = 1; + set->send_irq_eof = 0; + set->send_resp_eof = 0; + + if (stream->streaming) + set->send_irq_capture_ack = 0; + else + set->send_irq_capture_ack = 1; + set->send_irq_capture_done = 0; + + set->send_resp_capture_ack = 1; + set->send_resp_capture_done = 1; + if (atomic_read(&stream->sequence) >= IPU6_ISYS_FRAME_NUM_THRESHOLD) { + set->send_resp_capture_ack = 0; + set->send_resp_capture_done = 0; + } + + list_for_each_entry(ib, &bl->head, head) { + struct vb2_buffer *vb = ipu6_isys_buffer_to_vb2_buffer(ib); + + ipu6_isys_buf_to_fw_frame_buf_pin(vb, set); + } +} + +/* Start streaming for real. The buffer list must be available. */ +static int ipu6_isys_stream_start(struct ipu6_isys_video *av, + struct ipu6_isys_buffer_list *bl, bool error) +{ + struct ipu6_isys_stream *stream = av->stream; + struct device *dev = &stream->isys->adev->auxdev.dev; + struct ipu6_isys_buffer_list __bl; + int ret; + + mutex_lock(&stream->isys->stream_mutex); + ret = ipu6_isys_video_set_streaming(av, 1, bl); + mutex_unlock(&stream->isys->stream_mutex); + if (ret) + goto out_requeue; + + stream->streaming = 1; + + bl = &__bl; + + do { + struct ipu6_fw_isys_frame_buff_set_abi *buf = NULL; + struct isys_fw_msgs *msg; + u16 send_type = IPU6_FW_ISYS_SEND_TYPE_STREAM_CAPTURE; + + ret = buffer_list_get(stream, bl); + if (ret < 0) + break; + + msg = ipu6_get_fw_msg_buf(stream); + if (!msg) + return -ENOMEM; + + buf = &msg->fw_msg.frame; + ipu6_isys_buf_to_fw_frame_buf(buf, stream, bl); + ipu6_fw_isys_dump_frame_buff_set(dev, buf, + stream->nr_output_pins); + ipu6_isys_buffer_list_queue(bl, IPU6_ISYS_BUFFER_LIST_FL_ACTIVE, + 0); + ret = ipu6_fw_isys_complex_cmd(stream->isys, + stream->stream_handle, buf, + msg->dma_addr, sizeof(*buf), + send_type); + } while (!WARN_ON(ret)); + + return 0; + +out_requeue: + if (bl && bl->nbufs) + ipu6_isys_buffer_list_queue(bl, + IPU6_ISYS_BUFFER_LIST_FL_INCOMING | + (error ? + IPU6_ISYS_BUFFER_LIST_FL_SET_STATE : + 0), error ? VB2_BUF_STATE_ERROR : + VB2_BUF_STATE_QUEUED); + flush_firmware_streamon_fail(stream); + + return ret; +} + +static void buf_queue(struct vb2_buffer *vb) +{ + struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(vb->vb2_queue); + struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq); + struct vb2_v4l2_buffer *vvb = to_vb2_v4l2_buffer(vb); + struct ipu6_isys_video_buffer *ivb = + vb2_buffer_to_ipu6_isys_video_buffer(vvb); + struct ipu6_isys_buffer *ib = &ivb->ib; + struct device *dev = &av->isys->adev->auxdev.dev; + struct media_pipeline *media_pipe = + media_entity_pipeline(&av->vdev.entity); + struct ipu6_fw_isys_frame_buff_set_abi *buf = NULL; + struct ipu6_isys_stream *stream = av->stream; + struct ipu6_isys_buffer_list bl; + struct isys_fw_msgs *msg; + unsigned long flags; + dma_addr_t dma; + int ret; + + dev_dbg(dev, "queue buffer %u for %s\n", vb->index, av->vdev.name); + + dma = ivb->dma_addr; + dev_dbg(dev, "iova: iova %pad\n", &dma); + + spin_lock_irqsave(&aq->lock, flags); + list_add(&ib->head, &aq->incoming); + spin_unlock_irqrestore(&aq->lock, flags); + + if (!media_pipe || !vb->vb2_queue->start_streaming_called) { + dev_dbg(dev, "media pipeline is not ready for %s\n", + av->vdev.name); + return; + } + + mutex_lock(&stream->mutex); + + if (stream->nr_streaming != stream->nr_queues) { + dev_dbg(dev, "not streaming yet, adding to incoming\n"); + goto out; + } + + /* + * We just put one buffer to the incoming list of this queue + * (above). Let's see whether all queues in the pipeline would + * have a buffer. + */ + ret = buffer_list_get(stream, &bl); + if (ret < 0) { + dev_dbg(dev, "No buffers available\n"); + goto out; + } + + msg = ipu6_get_fw_msg_buf(stream); + if (!msg) { + ret = -ENOMEM; + goto out; + } + + buf = &msg->fw_msg.frame; + ipu6_isys_buf_to_fw_frame_buf(buf, stream, &bl); + ipu6_fw_isys_dump_frame_buff_set(dev, buf, stream->nr_output_pins); + + if (!stream->streaming) { + ret = ipu6_isys_stream_start(av, &bl, true); + if (ret) + dev_err(dev, "stream start failed.\n"); + goto out; + } + + /* + * We must queue the buffers in the buffer list to the + * appropriate video buffer queues BEFORE passing them to the + * firmware since we could get a buffer event back before we + * have queued them ourselves to the active queue. + */ + ipu6_isys_buffer_list_queue(&bl, IPU6_ISYS_BUFFER_LIST_FL_ACTIVE, 0); + + ret = ipu6_fw_isys_complex_cmd(stream->isys, stream->stream_handle, + buf, msg->dma_addr, sizeof(*buf), + IPU6_FW_ISYS_SEND_TYPE_STREAM_CAPTURE); + if (ret < 0) + dev_err(dev, "send stream capture failed\n"); + +out: + mutex_unlock(&stream->mutex); +} + +static int ipu6_isys_link_fmt_validate(struct ipu6_isys_queue *aq) +{ + struct v4l2_mbus_framefmt format; + struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq); + struct device *dev = &av->isys->adev->auxdev.dev; + struct media_pad *remote_pad = + media_pad_remote_pad_first(av->vdev.entity.pads); + struct v4l2_subdev *sd; + u32 r_stream, code; + int ret; + + if (!remote_pad) + return -ENOTCONN; + + sd = media_entity_to_v4l2_subdev(remote_pad->entity); + r_stream = ipu6_isys_get_src_stream_by_src_pad(sd, remote_pad->index); + + ret = ipu6_isys_get_stream_pad_fmt(sd, remote_pad->index, r_stream, + &format); + + if (ret) { + dev_dbg(dev, "failed to get %s: pad %d, stream:%d format\n", + sd->entity.name, remote_pad->index, r_stream); + return ret; + } + + if (format.width != ipu6_isys_get_frame_width(av) || + format.height != ipu6_isys_get_frame_height(av)) { + dev_dbg(dev, "wrong width or height %ux%u (%ux%u expected)\n", + ipu6_isys_get_frame_width(av), + ipu6_isys_get_frame_height(av), format.width, + format.height); + return -EINVAL; + } + + code = ipu6_isys_get_isys_format(ipu6_isys_get_format(av), 0)->code; + if (format.code != code) { + dev_dbg(dev, "wrong mbus code 0x%8.8x (0x%8.8x expected)\n", + code, format.code); + return -EINVAL; + } + + return 0; +} + +static void return_buffers(struct ipu6_isys_queue *aq, + enum vb2_buffer_state state) +{ + struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq); + struct ipu6_isys_buffer *ib; + bool need_reset = false; + unsigned long flags; + + spin_lock_irqsave(&aq->lock, flags); + while (!list_empty(&aq->incoming)) { + struct vb2_buffer *vb; + + ib = list_first_entry(&aq->incoming, struct ipu6_isys_buffer, + head); + vb = ipu6_isys_buffer_to_vb2_buffer(ib); + list_del(&ib->head); + spin_unlock_irqrestore(&aq->lock, flags); + + vb2_buffer_done(vb, state); + + spin_lock_irqsave(&aq->lock, flags); + } + + /* + * Something went wrong (FW crash / HW hang / not all buffers + * returned from isys) if there are still buffers queued in active + * queue. We have to clean up places a bit. + */ + while (!list_empty(&aq->active)) { + struct vb2_buffer *vb; + + ib = list_first_entry(&aq->active, struct ipu6_isys_buffer, + head); + vb = ipu6_isys_buffer_to_vb2_buffer(ib); + + list_del(&ib->head); + spin_unlock_irqrestore(&aq->lock, flags); + + vb2_buffer_done(vb, state); + + spin_lock_irqsave(&aq->lock, flags); + need_reset = true; + } + + spin_unlock_irqrestore(&aq->lock, flags); + + if (need_reset) { + mutex_lock(&av->isys->mutex); + av->isys->need_reset = true; + mutex_unlock(&av->isys->mutex); + } +} + +static void ipu6_isys_stream_cleanup(struct ipu6_isys_video *av) +{ + video_device_pipeline_stop(&av->vdev); + ipu6_isys_put_stream(av->stream); + av->stream = NULL; +} + +static int start_streaming(struct vb2_queue *q, unsigned int count) +{ + struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(q); + struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq); + struct device *dev = &av->isys->adev->auxdev.dev; + const struct ipu6_isys_pixelformat *pfmt = + ipu6_isys_get_isys_format(ipu6_isys_get_format(av), 0); + struct ipu6_isys_buffer_list __bl, *bl = NULL; + struct ipu6_isys_stream *stream; + struct media_entity *source_entity = NULL; + int nr_queues, ret; + + dev_dbg(dev, "stream: %s: width %u, height %u, css pixelformat %u\n", + av->vdev.name, ipu6_isys_get_frame_width(av), + ipu6_isys_get_frame_height(av), pfmt->css_pixelformat); + + ret = ipu6_isys_setup_video(av, &source_entity, &nr_queues); + if (ret < 0) { + dev_dbg(dev, "failed to setup video\n"); + goto out_return_buffers; + } + + ret = ipu6_isys_link_fmt_validate(aq); + if (ret) { + dev_dbg(dev, + "%s: link format validation failed (%d)\n", + av->vdev.name, ret); + goto out_pipeline_stop; + } + + ret = ipu6_isys_fw_open(av->isys); + if (ret) + goto out_pipeline_stop; + + stream = av->stream; + mutex_lock(&stream->mutex); + if (!stream->nr_streaming) { + ret = ipu6_isys_video_prepare_stream(av, source_entity, + nr_queues); + if (ret) + goto out_fw_close; + } + + stream->nr_streaming++; + dev_dbg(dev, "queue %u of %u\n", stream->nr_streaming, + stream->nr_queues); + + list_add(&aq->node, &stream->queues); + ipu6_isys_configure_stream_watermark(av, true); + ipu6_isys_update_stream_watermark(av, true); + + if (stream->nr_streaming != stream->nr_queues) + goto out; + + bl = &__bl; + ret = buffer_list_get(stream, bl); + if (ret < 0) { + dev_warn(dev, "no buffer available, DRIVER BUG?\n"); + goto out; + } + + ret = ipu6_isys_stream_start(av, bl, false); + if (ret) + goto out_stream_start; + +out: + mutex_unlock(&stream->mutex); + + return 0; + +out_stream_start: + ipu6_isys_update_stream_watermark(av, false); + list_del(&aq->node); + stream->nr_streaming--; + +out_fw_close: + mutex_unlock(&stream->mutex); + ipu6_isys_fw_close(av->isys); + +out_pipeline_stop: + ipu6_isys_stream_cleanup(av); + +out_return_buffers: + return_buffers(aq, VB2_BUF_STATE_QUEUED); + + return ret; +} + +static void stop_streaming(struct vb2_queue *q) +{ + struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(q); + struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq); + struct ipu6_isys_stream *stream = av->stream; + + mutex_lock(&stream->mutex); + + ipu6_isys_update_stream_watermark(av, false); + + mutex_lock(&av->isys->stream_mutex); + if (stream->nr_streaming == stream->nr_queues && stream->streaming) + ipu6_isys_video_set_streaming(av, 0, NULL); + mutex_unlock(&av->isys->stream_mutex); + + stream->nr_streaming--; + list_del(&aq->node); + stream->streaming = 0; + mutex_unlock(&stream->mutex); + + ipu6_isys_stream_cleanup(av); + + return_buffers(aq, VB2_BUF_STATE_ERROR); + + ipu6_isys_fw_close(av->isys); +} + +static unsigned int +get_sof_sequence_by_timestamp(struct ipu6_isys_stream *stream, u64 time) +{ + struct ipu6_isys *isys = stream->isys; + struct device *dev = &isys->adev->auxdev.dev; + unsigned int i; + + /* + * The timestamp is invalid as no TSC in some FPGA platform, + * so get the sequence from pipeline directly in this case. + */ + if (time == 0) + return atomic_read(&stream->sequence) - 1; + + for (i = 0; i < IPU6_ISYS_MAX_PARALLEL_SOF; i++) + if (time == stream->seq[i].timestamp) { + dev_dbg(dev, "sof: using seq nr %u for ts %llu\n", + stream->seq[i].sequence, time); + return stream->seq[i].sequence; + } + + for (i = 0; i < IPU6_ISYS_MAX_PARALLEL_SOF; i++) + dev_dbg(dev, "sof: sequence %u, timestamp value %llu\n", + stream->seq[i].sequence, stream->seq[i].timestamp); + + return 0; +} + +static u64 get_sof_ns_delta(struct ipu6_isys_video *av, u64 timestamp) +{ + struct ipu6_bus_device *adev = av->isys->adev; + struct ipu6_device *isp = adev->isp; + u64 delta, tsc_now; + + ipu6_buttress_tsc_read(isp, &tsc_now); + if (!tsc_now) + return 0; + + delta = tsc_now - timestamp; + + return ipu6_buttress_tsc_ticks_to_ns(delta, isp); +} + +static void +ipu6_isys_buf_calc_sequence_time(struct ipu6_isys_buffer *ib, u64 time) +{ + struct vb2_buffer *vb = ipu6_isys_buffer_to_vb2_buffer(ib); + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(vb->vb2_queue); + struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq); + struct device *dev = &av->isys->adev->auxdev.dev; + struct ipu6_isys_stream *stream = av->stream; + u64 ns; + u32 sequence; + + ns = ktime_get_ns() - get_sof_ns_delta(av, time); + sequence = get_sof_sequence_by_timestamp(stream, time); + + vbuf->vb2_buf.timestamp = ns; + vbuf->sequence = sequence; + + dev_dbg(dev, "buf: %s: buffer done, CPU-timestamp:%lld, sequence:%d\n", + av->vdev.name, ktime_get_ns(), sequence); + dev_dbg(dev, "index:%d, vbuf timestamp:%lld\n", vb->index, + vbuf->vb2_buf.timestamp); +} + +static void ipu6_isys_queue_buf_done(struct ipu6_isys_buffer *ib) +{ + struct vb2_buffer *vb = ipu6_isys_buffer_to_vb2_buffer(ib); + + if (atomic_read(&ib->str2mmio_flag)) { + vb2_buffer_done(vb, VB2_BUF_STATE_ERROR); + /* + * Operation on buffer is ended with error and will be reported + * to the userspace when it is de-queued + */ + atomic_set(&ib->str2mmio_flag, 0); + } else { + vb2_buffer_done(vb, VB2_BUF_STATE_DONE); + } +} + +static void +ipu6_stream_buf_ready(struct ipu6_isys_stream *stream, u8 pin_id, u32 pin_addr, + u64 time, bool error_check) +{ + struct ipu6_isys_queue *aq = stream->output_pins_queue[pin_id]; + struct ipu6_isys *isys = stream->isys; + struct device *dev = &isys->adev->auxdev.dev; + struct ipu6_isys_buffer *ib; + struct vb2_buffer *vb; + unsigned long flags; + bool first = true; + struct vb2_v4l2_buffer *buf; + + spin_lock_irqsave(&aq->lock, flags); + if (list_empty(&aq->active)) { + spin_unlock_irqrestore(&aq->lock, flags); + dev_err(dev, "active queue empty\n"); + return; + } + + list_for_each_entry_reverse(ib, &aq->active, head) { + struct ipu6_isys_video_buffer *ivb; + struct vb2_v4l2_buffer *vvb; + dma_addr_t addr; + + vb = ipu6_isys_buffer_to_vb2_buffer(ib); + vvb = to_vb2_v4l2_buffer(vb); + ivb = vb2_buffer_to_ipu6_isys_video_buffer(vvb); + addr = ivb->dma_addr; + + if (pin_addr != addr) { + if (first) + dev_err(dev, "Unexpected buffer address %pad\n", + &addr); + first = false; + continue; + } + + if (error_check) { + /* + * Check for error message: + * 'IPU6_FW_ISYS_ERROR_HW_REPORTED_STR2MMIO' + */ + atomic_set(&ib->str2mmio_flag, 1); + } + dev_dbg(dev, "buffer: found buffer %pad\n", &addr); + + buf = to_vb2_v4l2_buffer(vb); + buf->field = V4L2_FIELD_NONE; + + list_del(&ib->head); + spin_unlock_irqrestore(&aq->lock, flags); + + ipu6_isys_buf_calc_sequence_time(ib, time); + + ipu6_isys_queue_buf_done(ib); + + return; + } + + dev_err(dev, "Failed to find a matching video buffer\n"); + + spin_unlock_irqrestore(&aq->lock, flags); +} + +void ipu6_isys_queue_buf_ready(struct ipu6_isys_stream *stream, + struct ipu6_fw_isys_resp_info_abi *info) +{ + u64 time = (u64)info->timestamp[1] << 32 | info->timestamp[0]; + bool err = info->error_info.error == IPU6_FW_ISYS_ERROR_HW_REPORTED_STR2MMIO; + + ipu6_stream_buf_ready(stream, info->pin_id, info->pin.addr, time, err); +} + +static const struct vb2_ops ipu6_isys_queue_ops = { + .queue_setup = ipu6_isys_queue_setup, + .buf_init = ipu6_isys_buf_init, + .buf_prepare = ipu6_isys_buf_prepare, + .buf_cleanup = ipu6_isys_buf_cleanup, + .start_streaming = start_streaming, + .stop_streaming = stop_streaming, + .buf_queue = buf_queue, +}; + +int ipu6_isys_queue_init(struct ipu6_isys_queue *aq) +{ + struct ipu6_isys *isys = ipu6_isys_queue_to_video(aq)->isys; + struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq); + struct ipu6_bus_device *adev = isys->adev; + int ret; + + /* no support for userptr */ + if (!aq->vbq.io_modes) + aq->vbq.io_modes = VB2_MMAP | VB2_DMABUF; + + aq->vbq.drv_priv = isys; + aq->vbq.ops = &ipu6_isys_queue_ops; + aq->vbq.lock = &av->mutex; + aq->vbq.mem_ops = &vb2_dma_sg_memops; + aq->vbq.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + aq->vbq.min_queued_buffers = 1; + aq->vbq.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; + + ret = vb2_queue_init(&aq->vbq); + if (ret) + return ret; + + aq->vbq.dev = &adev->isp->pdev->dev; + spin_lock_init(&aq->lock); + INIT_LIST_HEAD(&aq->active); + INIT_LIST_HEAD(&aq->incoming); + + return 0; +} diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h new file mode 100644 index 0000000..844dfda --- /dev/null +++ b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2013--2024 Intel Corporation */ + +#ifndef IPU6_ISYS_QUEUE_H +#define IPU6_ISYS_QUEUE_H + +#include +#include +#include +#include +#include + +#include + +#include "ipu6-fw-isys.h" +#include "ipu6-isys-video.h" + +struct ipu6_isys_stream; + +struct ipu6_isys_queue { + struct vb2_queue vbq; + struct list_head node; + spinlock_t lock; /* Protects active and incoming lists */ + struct list_head active; + struct list_head incoming; + unsigned int fw_output; +}; + +struct ipu6_isys_buffer { + struct list_head head; + atomic_t str2mmio_flag; +}; + +struct ipu6_isys_video_buffer { + struct vb2_v4l2_buffer vb_v4l2; + struct ipu6_isys_buffer ib; + dma_addr_t dma_addr; +}; + +#define IPU6_ISYS_BUFFER_LIST_FL_INCOMING BIT(0) +#define IPU6_ISYS_BUFFER_LIST_FL_ACTIVE BIT(1) +#define IPU6_ISYS_BUFFER_LIST_FL_SET_STATE BIT(2) + +struct ipu6_isys_buffer_list { + struct list_head head; + unsigned int nbufs; +}; + +#define vb2_queue_to_isys_queue(__vb2) \ + container_of(__vb2, struct ipu6_isys_queue, vbq) + +#define ipu6_isys_to_isys_video_buffer(__ib) \ + container_of(__ib, struct ipu6_isys_video_buffer, ib) + +#define vb2_buffer_to_ipu6_isys_video_buffer(__vvb) \ + container_of(__vvb, struct ipu6_isys_video_buffer, vb_v4l2) + +#define ipu6_isys_buffer_to_vb2_buffer(__ib) \ + (&ipu6_isys_to_isys_video_buffer(__ib)->vb_v4l2.vb2_buf) + +void ipu6_isys_buffer_list_queue(struct ipu6_isys_buffer_list *bl, + unsigned long op_flags, + enum vb2_buffer_state state); +void +ipu6_isys_buf_to_fw_frame_buf(struct ipu6_fw_isys_frame_buff_set_abi *set, + struct ipu6_isys_stream *stream, + struct ipu6_isys_buffer_list *bl); +void ipu6_isys_queue_buf_ready(struct ipu6_isys_stream *stream, + struct ipu6_fw_isys_resp_info_abi *info); +int ipu6_isys_queue_init(struct ipu6_isys_queue *aq); +#endif /* IPU6_ISYS_QUEUE_H */ diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c new file mode 100644 index 0000000..0a06de5 --- /dev/null +++ b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c @@ -0,0 +1,403 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2013--2024 Intel Corporation + */ + +#include +#include +#include + +#include +#include +#include +#include + +#include "ipu6-bus.h" +#include "ipu6-isys.h" +#include "ipu6-isys-subdev.h" + +unsigned int ipu6_isys_mbus_code_to_bpp(u32 code) +{ + switch (code) { + case MEDIA_BUS_FMT_RGB888_1X24: + case MEDIA_BUS_FMT_META_24: + return 24; + case MEDIA_BUS_FMT_RGB565_1X16: + case MEDIA_BUS_FMT_UYVY8_1X16: + case MEDIA_BUS_FMT_YUYV8_1X16: + case MEDIA_BUS_FMT_META_16: + return 16; + case MEDIA_BUS_FMT_SBGGR12_1X12: + case MEDIA_BUS_FMT_SGBRG12_1X12: + case MEDIA_BUS_FMT_SGRBG12_1X12: + case MEDIA_BUS_FMT_SRGGB12_1X12: + case MEDIA_BUS_FMT_META_12: + return 12; + case MEDIA_BUS_FMT_SBGGR10_1X10: + case MEDIA_BUS_FMT_SGBRG10_1X10: + case MEDIA_BUS_FMT_SGRBG10_1X10: + case MEDIA_BUS_FMT_SRGGB10_1X10: + case MEDIA_BUS_FMT_META_10: + return 10; + case MEDIA_BUS_FMT_SBGGR8_1X8: + case MEDIA_BUS_FMT_SGBRG8_1X8: + case MEDIA_BUS_FMT_SGRBG8_1X8: + case MEDIA_BUS_FMT_SRGGB8_1X8: + case MEDIA_BUS_FMT_META_8: + return 8; + default: + WARN_ON(1); + return 8; + } +} + +unsigned int ipu6_isys_mbus_code_to_mipi(u32 code) +{ + switch (code) { + case MEDIA_BUS_FMT_RGB565_1X16: + return MIPI_CSI2_DT_RGB565; + case MEDIA_BUS_FMT_RGB888_1X24: + return MIPI_CSI2_DT_RGB888; + case MEDIA_BUS_FMT_UYVY8_1X16: + case MEDIA_BUS_FMT_YUYV8_1X16: + return MIPI_CSI2_DT_YUV422_8B; + case MEDIA_BUS_FMT_SBGGR16_1X16: + case MEDIA_BUS_FMT_SGBRG16_1X16: + case MEDIA_BUS_FMT_SGRBG16_1X16: + case MEDIA_BUS_FMT_SRGGB16_1X16: + return MIPI_CSI2_DT_RAW16; + case MEDIA_BUS_FMT_SBGGR12_1X12: + case MEDIA_BUS_FMT_SGBRG12_1X12: + case MEDIA_BUS_FMT_SGRBG12_1X12: + case MEDIA_BUS_FMT_SRGGB12_1X12: + return MIPI_CSI2_DT_RAW12; + case MEDIA_BUS_FMT_SBGGR10_1X10: + case MEDIA_BUS_FMT_SGBRG10_1X10: + case MEDIA_BUS_FMT_SGRBG10_1X10: + case MEDIA_BUS_FMT_SRGGB10_1X10: + return MIPI_CSI2_DT_RAW10; + case MEDIA_BUS_FMT_SBGGR8_1X8: + case MEDIA_BUS_FMT_SGBRG8_1X8: + case MEDIA_BUS_FMT_SGRBG8_1X8: + case MEDIA_BUS_FMT_SRGGB8_1X8: + return MIPI_CSI2_DT_RAW8; + default: + /* return unavailable MIPI data type - 0x3f */ + WARN_ON(1); + return 0x3f; + } +} + +bool ipu6_isys_is_bayer_format(u32 code) +{ + switch (ipu6_isys_mbus_code_to_mipi(code)) { + case MIPI_CSI2_DT_RAW8: + case MIPI_CSI2_DT_RAW10: + case MIPI_CSI2_DT_RAW12: + case MIPI_CSI2_DT_RAW14: + case MIPI_CSI2_DT_RAW16: + case MIPI_CSI2_DT_RAW20: + case MIPI_CSI2_DT_RAW24: + case MIPI_CSI2_DT_RAW28: + return true; + default: + return false; + } +} + +u32 ipu6_isys_convert_bayer_order(u32 code, int x, int y) +{ + static const u32 code_map[] = { + MEDIA_BUS_FMT_SRGGB8_1X8, + MEDIA_BUS_FMT_SGRBG8_1X8, + MEDIA_BUS_FMT_SGBRG8_1X8, + MEDIA_BUS_FMT_SBGGR8_1X8, + MEDIA_BUS_FMT_SRGGB10_1X10, + MEDIA_BUS_FMT_SGRBG10_1X10, + MEDIA_BUS_FMT_SGBRG10_1X10, + MEDIA_BUS_FMT_SBGGR10_1X10, + MEDIA_BUS_FMT_SRGGB12_1X12, + MEDIA_BUS_FMT_SGRBG12_1X12, + MEDIA_BUS_FMT_SGBRG12_1X12, + MEDIA_BUS_FMT_SBGGR12_1X12, + MEDIA_BUS_FMT_SRGGB16_1X16, + MEDIA_BUS_FMT_SGRBG16_1X16, + MEDIA_BUS_FMT_SGBRG16_1X16, + MEDIA_BUS_FMT_SBGGR16_1X16, + }; + u32 i; + + for (i = 0; i < ARRAY_SIZE(code_map); i++) + if (code_map[i] == code) + break; + + if (WARN_ON(i == ARRAY_SIZE(code_map))) + return code; + + return code_map[i ^ (((y & 1) << 1) | (x & 1))]; +} + +int ipu6_isys_subdev_set_fmt(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_format *format) +{ + struct ipu6_isys_subdev *asd = to_ipu6_isys_subdev(sd); + struct v4l2_mbus_framefmt *fmt; + struct v4l2_rect *crop; + u32 code = asd->supported_codes[0]; + u32 other_pad, other_stream; + unsigned int i; + int ret; + + /* No transcoding, source and sink formats must match. */ + if ((sd->entity.pads[format->pad].flags & MEDIA_PAD_FL_SOURCE) && + sd->entity.num_pads > 1) + return v4l2_subdev_get_fmt(sd, state, format); + + format->format.width = clamp(format->format.width, IPU6_ISYS_MIN_WIDTH, + IPU6_ISYS_MAX_WIDTH); + format->format.height = clamp(format->format.height, + IPU6_ISYS_MIN_HEIGHT, + IPU6_ISYS_MAX_HEIGHT); + + for (i = 0; asd->supported_codes[i]; i++) { + if (asd->supported_codes[i] == format->format.code) { + code = asd->supported_codes[i]; + break; + } + } + format->format.code = code; + format->format.field = V4L2_FIELD_NONE; + + /* Store the format and propagate it to the source pad. */ + fmt = v4l2_subdev_state_get_format(state, format->pad, format->stream); + if (!fmt) + return -EINVAL; + + *fmt = format->format; + + if (!(sd->entity.pads[format->pad].flags & MEDIA_PAD_FL_SINK)) + return 0; + + /* propagate format to following source pad */ + fmt = v4l2_subdev_state_get_opposite_stream_format(state, format->pad, + format->stream); + if (!fmt) + return -EINVAL; + + *fmt = format->format; + + ret = v4l2_subdev_routing_find_opposite_end(&state->routing, + format->pad, + format->stream, + &other_pad, + &other_stream); + if (ret) + return -EINVAL; + + crop = v4l2_subdev_state_get_crop(state, other_pad, other_stream); + /* reset crop */ + crop->left = 0; + crop->top = 0; + crop->width = fmt->width; + crop->height = fmt->height; + + return 0; +} + +int ipu6_isys_subdev_enum_mbus_code(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_mbus_code_enum *code) +{ + struct ipu6_isys_subdev *asd = to_ipu6_isys_subdev(sd); + const u32 *supported_codes = asd->supported_codes; + u32 index; + + for (index = 0; supported_codes[index]; index++) { + if (index == code->index) { + code->code = supported_codes[index]; + return 0; + } + } + + return -EINVAL; +} + +static int subdev_set_routing(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_krouting *routing) +{ + static const struct v4l2_mbus_framefmt format = { + .width = 4096, + .height = 3072, + .code = MEDIA_BUS_FMT_SGRBG10_1X10, + .field = V4L2_FIELD_NONE, + }; + int ret; + + ret = v4l2_subdev_routing_validate(sd, routing, + V4L2_SUBDEV_ROUTING_ONLY_1_TO_1); + if (ret) + return ret; + + return v4l2_subdev_set_routing_with_fmt(sd, state, routing, &format); +} + +int ipu6_isys_get_stream_pad_fmt(struct v4l2_subdev *sd, u32 pad, u32 stream, + struct v4l2_mbus_framefmt *format) +{ + struct v4l2_mbus_framefmt *fmt; + struct v4l2_subdev_state *state; + + if (!sd || !format) + return -EINVAL; + + state = v4l2_subdev_lock_and_get_active_state(sd); + fmt = v4l2_subdev_state_get_format(state, pad, stream); + if (fmt) + *format = *fmt; + v4l2_subdev_unlock_state(state); + + return fmt ? 0 : -EINVAL; +} + +int ipu6_isys_get_stream_pad_crop(struct v4l2_subdev *sd, u32 pad, u32 stream, + struct v4l2_rect *crop) +{ + struct v4l2_subdev_state *state; + struct v4l2_rect *rect; + + if (!sd || !crop) + return -EINVAL; + + state = v4l2_subdev_lock_and_get_active_state(sd); + rect = v4l2_subdev_state_get_crop(state, pad, stream); + if (rect) + *crop = *rect; + v4l2_subdev_unlock_state(state); + + return rect ? 0 : -EINVAL; +} + +u32 ipu6_isys_get_src_stream_by_src_pad(struct v4l2_subdev *sd, u32 pad) +{ + struct v4l2_subdev_state *state; + struct v4l2_subdev_route *routes; + unsigned int i; + u32 source_stream = 0; + + state = v4l2_subdev_lock_and_get_active_state(sd); + if (!state) + return 0; + + routes = state->routing.routes; + for (i = 0; i < state->routing.num_routes; i++) { + if (routes[i].source_pad == pad) { + source_stream = routes[i].source_stream; + break; + } + } + + v4l2_subdev_unlock_state(state); + + return source_stream; +} + +static int ipu6_isys_subdev_init_state(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state) +{ + struct v4l2_subdev_route route = { + .sink_pad = 0, + .sink_stream = 0, + .source_pad = 1, + .source_stream = 0, + .flags = V4L2_SUBDEV_ROUTE_FL_ACTIVE, + }; + struct v4l2_subdev_krouting routing = { + .num_routes = 1, + .routes = &route, + }; + + return subdev_set_routing(sd, state, &routing); +} + +int ipu6_isys_subdev_set_routing(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + enum v4l2_subdev_format_whence which, + struct v4l2_subdev_krouting *routing) +{ + return subdev_set_routing(sd, state, routing); +} + +static const struct v4l2_subdev_internal_ops ipu6_isys_subdev_internal_ops = { + .init_state = ipu6_isys_subdev_init_state, +}; + +int ipu6_isys_subdev_init(struct ipu6_isys_subdev *asd, + const struct v4l2_subdev_ops *ops, + unsigned int nr_ctrls, + unsigned int num_sink_pads, + unsigned int num_source_pads) +{ + unsigned int num_pads = num_sink_pads + num_source_pads; + unsigned int i; + int ret; + + v4l2_subdev_init(&asd->sd, ops); + + asd->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | + V4L2_SUBDEV_FL_HAS_EVENTS | + V4L2_SUBDEV_FL_STREAMS; + asd->sd.owner = THIS_MODULE; + asd->sd.dev = &asd->isys->adev->auxdev.dev; + asd->sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE; + asd->sd.internal_ops = &ipu6_isys_subdev_internal_ops; + + asd->pad = devm_kcalloc(&asd->isys->adev->auxdev.dev, num_pads, + sizeof(*asd->pad), GFP_KERNEL); + if (!asd->pad) + return -ENOMEM; + + for (i = 0; i < num_sink_pads; i++) + asd->pad[i].flags = MEDIA_PAD_FL_SINK | + MEDIA_PAD_FL_MUST_CONNECT; + + for (i = num_sink_pads; i < num_pads; i++) + asd->pad[i].flags = MEDIA_PAD_FL_SOURCE; + + ret = media_entity_pads_init(&asd->sd.entity, num_pads, asd->pad); + if (ret) + return ret; + + if (asd->ctrl_init) { + ret = v4l2_ctrl_handler_init(&asd->ctrl_handler, nr_ctrls); + if (ret) + goto out_media_entity_cleanup; + + asd->ctrl_init(&asd->sd); + if (asd->ctrl_handler.error) { + ret = asd->ctrl_handler.error; + goto out_v4l2_ctrl_handler_free; + } + + asd->sd.ctrl_handler = &asd->ctrl_handler; + } + + asd->source = -1; + + return 0; + +out_v4l2_ctrl_handler_free: + v4l2_ctrl_handler_free(&asd->ctrl_handler); + +out_media_entity_cleanup: + media_entity_cleanup(&asd->sd.entity); + + return ret; +} + +void ipu6_isys_subdev_cleanup(struct ipu6_isys_subdev *asd) +{ + media_entity_cleanup(&asd->sd.entity); + v4l2_ctrl_handler_free(&asd->ctrl_handler); +} diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.h b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.h new file mode 100644 index 0000000..268dfa0 --- /dev/null +++ b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2013--2024 Intel Corporation */ + +#ifndef IPU6_ISYS_SUBDEV_H +#define IPU6_ISYS_SUBDEV_H + +#include + +#include +#include +#include + +struct ipu6_isys; + +struct ipu6_isys_subdev { + struct v4l2_subdev sd; + struct ipu6_isys *isys; + u32 const *supported_codes; + struct media_pad *pad; + struct v4l2_ctrl_handler ctrl_handler; + void (*ctrl_init)(struct v4l2_subdev *sd); + int source; /* SSI stream source; -1 if unset */ +}; + +#define to_ipu6_isys_subdev(__sd) \ + container_of(__sd, struct ipu6_isys_subdev, sd) + +unsigned int ipu6_isys_mbus_code_to_bpp(u32 code); +unsigned int ipu6_isys_mbus_code_to_mipi(u32 code); +bool ipu6_isys_is_bayer_format(u32 code); +u32 ipu6_isys_convert_bayer_order(u32 code, int x, int y); + +int ipu6_isys_subdev_set_fmt(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_format *fmt); +int ipu6_isys_subdev_enum_mbus_code(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_mbus_code_enum + *code); +u32 ipu6_isys_get_src_stream_by_src_pad(struct v4l2_subdev *sd, u32 pad); +int ipu6_isys_get_stream_pad_fmt(struct v4l2_subdev *sd, u32 pad, u32 stream, + struct v4l2_mbus_framefmt *format); +int ipu6_isys_get_stream_pad_crop(struct v4l2_subdev *sd, u32 pad, u32 stream, + struct v4l2_rect *crop); +int ipu6_isys_subdev_set_routing(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + enum v4l2_subdev_format_whence which, + struct v4l2_subdev_krouting *routing); +int ipu6_isys_subdev_init(struct ipu6_isys_subdev *asd, + const struct v4l2_subdev_ops *ops, + unsigned int nr_ctrls, + unsigned int num_sink_pads, + unsigned int num_source_pads); +void ipu6_isys_subdev_cleanup(struct ipu6_isys_subdev *asd); +#endif /* IPU6_ISYS_SUBDEV_H */ diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c new file mode 100644 index 0000000..24a2ef9 --- /dev/null +++ b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c @@ -0,0 +1,1391 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2013--2024 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "ipu6.h" +#include "ipu6-bus.h" +#include "ipu6-cpd.h" +#include "ipu6-fw-isys.h" +#include "ipu6-isys.h" +#include "ipu6-isys-csi2.h" +#include "ipu6-isys-queue.h" +#include "ipu6-isys-video.h" +#include "ipu6-platform-regs.h" + +const struct ipu6_isys_pixelformat ipu6_isys_pfmts[] = { + { V4L2_PIX_FMT_SBGGR12, 16, 12, MEDIA_BUS_FMT_SBGGR12_1X12, + IPU6_FW_ISYS_FRAME_FORMAT_RAW16 }, + { V4L2_PIX_FMT_SGBRG12, 16, 12, MEDIA_BUS_FMT_SGBRG12_1X12, + IPU6_FW_ISYS_FRAME_FORMAT_RAW16 }, + { V4L2_PIX_FMT_SGRBG12, 16, 12, MEDIA_BUS_FMT_SGRBG12_1X12, + IPU6_FW_ISYS_FRAME_FORMAT_RAW16 }, + { V4L2_PIX_FMT_SRGGB12, 16, 12, MEDIA_BUS_FMT_SRGGB12_1X12, + IPU6_FW_ISYS_FRAME_FORMAT_RAW16 }, + { V4L2_PIX_FMT_SBGGR10, 16, 10, MEDIA_BUS_FMT_SBGGR10_1X10, + IPU6_FW_ISYS_FRAME_FORMAT_RAW16 }, + { V4L2_PIX_FMT_SGBRG10, 16, 10, MEDIA_BUS_FMT_SGBRG10_1X10, + IPU6_FW_ISYS_FRAME_FORMAT_RAW16 }, + { V4L2_PIX_FMT_SGRBG10, 16, 10, MEDIA_BUS_FMT_SGRBG10_1X10, + IPU6_FW_ISYS_FRAME_FORMAT_RAW16 }, + { V4L2_PIX_FMT_SRGGB10, 16, 10, MEDIA_BUS_FMT_SRGGB10_1X10, + IPU6_FW_ISYS_FRAME_FORMAT_RAW16 }, + { V4L2_PIX_FMT_SBGGR8, 8, 8, MEDIA_BUS_FMT_SBGGR8_1X8, + IPU6_FW_ISYS_FRAME_FORMAT_RAW8 }, + { V4L2_PIX_FMT_SGBRG8, 8, 8, MEDIA_BUS_FMT_SGBRG8_1X8, + IPU6_FW_ISYS_FRAME_FORMAT_RAW8 }, + { V4L2_PIX_FMT_SGRBG8, 8, 8, MEDIA_BUS_FMT_SGRBG8_1X8, + IPU6_FW_ISYS_FRAME_FORMAT_RAW8 }, + { V4L2_PIX_FMT_SRGGB8, 8, 8, MEDIA_BUS_FMT_SRGGB8_1X8, + IPU6_FW_ISYS_FRAME_FORMAT_RAW8 }, + { V4L2_PIX_FMT_SBGGR12P, 12, 12, MEDIA_BUS_FMT_SBGGR12_1X12, + IPU6_FW_ISYS_FRAME_FORMAT_RAW12 }, + { V4L2_PIX_FMT_SGBRG12P, 12, 12, MEDIA_BUS_FMT_SGBRG12_1X12, + IPU6_FW_ISYS_FRAME_FORMAT_RAW12 }, + { V4L2_PIX_FMT_SGRBG12P, 12, 12, MEDIA_BUS_FMT_SGRBG12_1X12, + IPU6_FW_ISYS_FRAME_FORMAT_RAW12 }, + { V4L2_PIX_FMT_SRGGB12P, 12, 12, MEDIA_BUS_FMT_SRGGB12_1X12, + IPU6_FW_ISYS_FRAME_FORMAT_RAW12 }, + { V4L2_PIX_FMT_SBGGR10P, 10, 10, MEDIA_BUS_FMT_SBGGR10_1X10, + IPU6_FW_ISYS_FRAME_FORMAT_RAW10 }, + { V4L2_PIX_FMT_SGBRG10P, 10, 10, MEDIA_BUS_FMT_SGBRG10_1X10, + IPU6_FW_ISYS_FRAME_FORMAT_RAW10 }, + { V4L2_PIX_FMT_SGRBG10P, 10, 10, MEDIA_BUS_FMT_SGRBG10_1X10, + IPU6_FW_ISYS_FRAME_FORMAT_RAW10 }, + { V4L2_PIX_FMT_SRGGB10P, 10, 10, MEDIA_BUS_FMT_SRGGB10_1X10, + IPU6_FW_ISYS_FRAME_FORMAT_RAW10 }, + { V4L2_PIX_FMT_UYVY, 16, 16, MEDIA_BUS_FMT_UYVY8_1X16, + IPU6_FW_ISYS_FRAME_FORMAT_UYVY}, + { V4L2_PIX_FMT_YUYV, 16, 16, MEDIA_BUS_FMT_YUYV8_1X16, + IPU6_FW_ISYS_FRAME_FORMAT_YUYV}, + { V4L2_PIX_FMT_RGB565, 16, 16, MEDIA_BUS_FMT_RGB565_1X16, + IPU6_FW_ISYS_FRAME_FORMAT_RGB565 }, + { V4L2_PIX_FMT_BGR24, 24, 24, MEDIA_BUS_FMT_RGB888_1X24, + IPU6_FW_ISYS_FRAME_FORMAT_RGBA888 }, + { V4L2_META_FMT_GENERIC_8, 8, 8, MEDIA_BUS_FMT_META_8, + IPU6_FW_ISYS_FRAME_FORMAT_RAW8, true }, + { V4L2_META_FMT_GENERIC_CSI2_10, 10, 10, MEDIA_BUS_FMT_META_10, + IPU6_FW_ISYS_FRAME_FORMAT_RAW10, true }, + { V4L2_META_FMT_GENERIC_CSI2_12, 12, 12, MEDIA_BUS_FMT_META_12, + IPU6_FW_ISYS_FRAME_FORMAT_RAW12, true }, + { V4L2_META_FMT_GENERIC_CSI2_16, 16, 16, MEDIA_BUS_FMT_META_16, + IPU6_FW_ISYS_FRAME_FORMAT_RAW16, true }, +}; + +static int video_open(struct file *file) +{ + struct ipu6_isys_video *av = video_drvdata(file); + struct ipu6_isys *isys = av->isys; + struct ipu6_bus_device *adev = isys->adev; + + mutex_lock(&isys->mutex); + if (isys->need_reset) { + mutex_unlock(&isys->mutex); + dev_warn(&adev->auxdev.dev, "isys power cycle required\n"); + return -EIO; + } + mutex_unlock(&isys->mutex); + + return v4l2_fh_open(file); +} + +const struct ipu6_isys_pixelformat * +ipu6_isys_get_isys_format(u32 pixelformat, u32 type) +{ + const struct ipu6_isys_pixelformat *default_pfmt = NULL; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(ipu6_isys_pfmts); i++) { + const struct ipu6_isys_pixelformat *pfmt = &ipu6_isys_pfmts[i]; + + if (type && ((!pfmt->is_meta && + type != V4L2_BUF_TYPE_VIDEO_CAPTURE) || + (pfmt->is_meta && + type != V4L2_BUF_TYPE_META_CAPTURE))) + continue; + + if (!default_pfmt) + default_pfmt = pfmt; + + if (pfmt->pixelformat != pixelformat) + continue; + + return pfmt; + } + + return default_pfmt; +} + +static int ipu6_isys_vidioc_querycap(struct file *file, void *fh, + struct v4l2_capability *cap) +{ + struct ipu6_isys_video *av = video_drvdata(file); + + strscpy(cap->driver, IPU6_ISYS_NAME, sizeof(cap->driver)); + strscpy(cap->card, av->isys->media_dev.model, sizeof(cap->card)); + + return 0; +} + +static int ipu6_isys_vidioc_enum_fmt(struct file *file, void *fh, + struct v4l2_fmtdesc *f) +{ + unsigned int i, num_found; + + for (i = 0, num_found = 0; i < ARRAY_SIZE(ipu6_isys_pfmts); i++) { + if ((ipu6_isys_pfmts[i].is_meta && + f->type != V4L2_BUF_TYPE_META_CAPTURE) || + (!ipu6_isys_pfmts[i].is_meta && + f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)) + continue; + + if (f->mbus_code && f->mbus_code != ipu6_isys_pfmts[i].code) + continue; + + if (num_found < f->index) { + num_found++; + continue; + } + + f->flags = 0; + f->pixelformat = ipu6_isys_pfmts[i].pixelformat; + + return 0; + } + + return -EINVAL; +} + +static int ipu6_isys_vidioc_enum_framesizes(struct file *file, void *fh, + struct v4l2_frmsizeenum *fsize) +{ + unsigned int i; + + if (fsize->index > 0) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(ipu6_isys_pfmts); i++) { + if (fsize->pixel_format != ipu6_isys_pfmts[i].pixelformat) + continue; + + fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE; + fsize->stepwise.min_width = IPU6_ISYS_MIN_WIDTH; + fsize->stepwise.max_width = IPU6_ISYS_MAX_WIDTH; + fsize->stepwise.min_height = IPU6_ISYS_MIN_HEIGHT; + fsize->stepwise.max_height = IPU6_ISYS_MAX_HEIGHT; + fsize->stepwise.step_width = 2; + fsize->stepwise.step_height = 2; + + return 0; + } + + return -EINVAL; +} + +static int ipu6_isys_vidioc_g_fmt_vid_cap(struct file *file, void *fh, + struct v4l2_format *f) +{ + struct ipu6_isys_video *av = video_drvdata(file); + + f->fmt.pix = av->pix_fmt; + + return 0; +} + +static int ipu6_isys_vidioc_g_fmt_meta_cap(struct file *file, void *fh, + struct v4l2_format *f) +{ + struct ipu6_isys_video *av = video_drvdata(file); + + f->fmt.meta = av->meta_fmt; + + return 0; +} + +static void ipu6_isys_try_fmt_cap(struct ipu6_isys_video *av, u32 type, + u32 *format, u32 *width, u32 *height, + u32 *bytesperline, u32 *sizeimage) +{ + const struct ipu6_isys_pixelformat *pfmt = + ipu6_isys_get_isys_format(*format, type); + + *format = pfmt->pixelformat; + *width = clamp(*width, IPU6_ISYS_MIN_WIDTH, IPU6_ISYS_MAX_WIDTH); + *height = clamp(*height, IPU6_ISYS_MIN_HEIGHT, IPU6_ISYS_MAX_HEIGHT); + + if (pfmt->bpp != pfmt->bpp_packed) + *bytesperline = *width * DIV_ROUND_UP(pfmt->bpp, BITS_PER_BYTE); + else + *bytesperline = DIV_ROUND_UP(*width * pfmt->bpp, BITS_PER_BYTE); + + *bytesperline = ALIGN(*bytesperline, 64); + + /* + * (height + 1) * bytesperline due to a hardware issue: the DMA unit + * is a power of two, and a line should be transferred as few units + * as possible. The result is that up to line length more data than + * the image size may be transferred to memory after the image. + * Another limitation is the GDA allocation unit size. For low + * resolution it gives a bigger number. Use larger one to avoid + * memory corruption. + */ + *sizeimage = *bytesperline * *height + + max(*bytesperline, + av->isys->pdata->ipdata->isys_dma_overshoot); +} + +static void __ipu6_isys_vidioc_try_fmt_vid_cap(struct ipu6_isys_video *av, + struct v4l2_format *f) +{ + ipu6_isys_try_fmt_cap(av, f->type, &f->fmt.pix.pixelformat, + &f->fmt.pix.width, &f->fmt.pix.height, + &f->fmt.pix.bytesperline, &f->fmt.pix.sizeimage); + + f->fmt.pix.field = V4L2_FIELD_NONE; + f->fmt.pix.colorspace = V4L2_COLORSPACE_RAW; + f->fmt.pix.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; + f->fmt.pix.quantization = V4L2_QUANTIZATION_DEFAULT; + f->fmt.pix.xfer_func = V4L2_XFER_FUNC_DEFAULT; +} + +static int ipu6_isys_vidioc_try_fmt_vid_cap(struct file *file, void *fh, + struct v4l2_format *f) +{ + struct ipu6_isys_video *av = video_drvdata(file); + + if (vb2_is_busy(&av->aq.vbq)) + return -EBUSY; + + __ipu6_isys_vidioc_try_fmt_vid_cap(av, f); + + return 0; +} + +static int __ipu6_isys_vidioc_try_fmt_meta_cap(struct ipu6_isys_video *av, + struct v4l2_format *f) +{ + ipu6_isys_try_fmt_cap(av, f->type, &f->fmt.meta.dataformat, + &f->fmt.meta.width, &f->fmt.meta.height, + &f->fmt.meta.bytesperline, + &f->fmt.meta.buffersize); + + return 0; +} + +static int ipu6_isys_vidioc_try_fmt_meta_cap(struct file *file, void *fh, + struct v4l2_format *f) +{ + struct ipu6_isys_video *av = video_drvdata(file); + + __ipu6_isys_vidioc_try_fmt_meta_cap(av, f); + + return 0; +} + +static int ipu6_isys_vidioc_s_fmt_vid_cap(struct file *file, void *fh, + struct v4l2_format *f) +{ + struct ipu6_isys_video *av = video_drvdata(file); + + ipu6_isys_vidioc_try_fmt_vid_cap(file, fh, f); + av->pix_fmt = f->fmt.pix; + + return 0; +} + +static int ipu6_isys_vidioc_s_fmt_meta_cap(struct file *file, void *fh, + struct v4l2_format *f) +{ + struct ipu6_isys_video *av = video_drvdata(file); + + if (vb2_is_busy(&av->aq.vbq)) + return -EBUSY; + + ipu6_isys_vidioc_try_fmt_meta_cap(file, fh, f); + av->meta_fmt = f->fmt.meta; + + return 0; +} + +static int ipu6_isys_vidioc_reqbufs(struct file *file, void *priv, + struct v4l2_requestbuffers *p) +{ + struct ipu6_isys_video *av = video_drvdata(file); + int ret; + + av->aq.vbq.is_multiplanar = V4L2_TYPE_IS_MULTIPLANAR(p->type); + av->aq.vbq.is_output = V4L2_TYPE_IS_OUTPUT(p->type); + + ret = vb2_queue_change_type(&av->aq.vbq, p->type); + if (ret) + return ret; + + return vb2_ioctl_reqbufs(file, priv, p); +} + +static int ipu6_isys_vidioc_create_bufs(struct file *file, void *priv, + struct v4l2_create_buffers *p) +{ + struct ipu6_isys_video *av = video_drvdata(file); + int ret; + + av->aq.vbq.is_multiplanar = V4L2_TYPE_IS_MULTIPLANAR(p->format.type); + av->aq.vbq.is_output = V4L2_TYPE_IS_OUTPUT(p->format.type); + + ret = vb2_queue_change_type(&av->aq.vbq, p->format.type); + if (ret) + return ret; + + return vb2_ioctl_create_bufs(file, priv, p); +} + +static int link_validate(struct media_link *link) +{ + struct ipu6_isys_video *av = + container_of(link->sink, struct ipu6_isys_video, pad); + struct device *dev = &av->isys->adev->auxdev.dev; + struct v4l2_subdev_state *s_state; + struct v4l2_subdev *s_sd; + struct v4l2_mbus_framefmt *s_fmt; + struct media_pad *s_pad; + u32 s_stream, code; + int ret = -EPIPE; + + if (!link->source->entity) + return ret; + + s_sd = media_entity_to_v4l2_subdev(link->source->entity); + s_state = v4l2_subdev_get_unlocked_active_state(s_sd); + if (!s_state) + return ret; + + dev_dbg(dev, "validating link \"%s\":%u -> \"%s\"\n", + link->source->entity->name, link->source->index, + link->sink->entity->name); + + s_pad = media_pad_remote_pad_first(&av->pad); + s_stream = ipu6_isys_get_src_stream_by_src_pad(s_sd, s_pad->index); + + v4l2_subdev_lock_state(s_state); + + s_fmt = v4l2_subdev_state_get_format(s_state, s_pad->index, s_stream); + if (!s_fmt) { + dev_err(dev, "failed to get source pad format\n"); + goto unlock; + } + + code = ipu6_isys_get_isys_format(ipu6_isys_get_format(av), 0)->code; + + if (s_fmt->width != ipu6_isys_get_frame_width(av) || + s_fmt->height != ipu6_isys_get_frame_height(av) || + s_fmt->code != code) { + dev_dbg(dev, "format mismatch %dx%d,%x != %dx%d,%x\n", + s_fmt->width, s_fmt->height, s_fmt->code, + ipu6_isys_get_frame_width(av), + ipu6_isys_get_frame_height(av), code); + goto unlock; + } + + v4l2_subdev_unlock_state(s_state); + + return 0; +unlock: + v4l2_subdev_unlock_state(s_state); + + return ret; +} + +static void get_stream_opened(struct ipu6_isys_video *av) +{ + unsigned long flags; + + spin_lock_irqsave(&av->isys->streams_lock, flags); + av->isys->stream_opened++; + spin_unlock_irqrestore(&av->isys->streams_lock, flags); +} + +static void put_stream_opened(struct ipu6_isys_video *av) +{ + unsigned long flags; + + spin_lock_irqsave(&av->isys->streams_lock, flags); + av->isys->stream_opened--; + spin_unlock_irqrestore(&av->isys->streams_lock, flags); +} + +static int ipu6_isys_fw_pin_cfg(struct ipu6_isys_video *av, + struct ipu6_fw_isys_stream_cfg_data_abi *cfg) +{ + struct media_pad *src_pad = media_pad_remote_pad_first(&av->pad); + struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(src_pad->entity); + struct ipu6_fw_isys_input_pin_info_abi *input_pin; + struct ipu6_fw_isys_output_pin_info_abi *output_pin; + struct ipu6_isys_stream *stream = av->stream; + struct ipu6_isys_queue *aq = &av->aq; + struct v4l2_mbus_framefmt fmt; + const struct ipu6_isys_pixelformat *pfmt = + ipu6_isys_get_isys_format(ipu6_isys_get_format(av), 0); + struct v4l2_rect v4l2_crop; + struct ipu6_isys *isys = av->isys; + struct device *dev = &isys->adev->auxdev.dev; + int input_pins = cfg->nof_input_pins++; + int output_pins; + u32 src_stream; + int ret; + + src_stream = ipu6_isys_get_src_stream_by_src_pad(sd, src_pad->index); + ret = ipu6_isys_get_stream_pad_fmt(sd, src_pad->index, src_stream, + &fmt); + if (ret < 0) { + dev_err(dev, "can't get stream format (%d)\n", ret); + return ret; + } + + ret = ipu6_isys_get_stream_pad_crop(sd, src_pad->index, src_stream, + &v4l2_crop); + if (ret < 0) { + dev_err(dev, "can't get stream crop (%d)\n", ret); + return ret; + } + + input_pin = &cfg->input_pins[input_pins]; + input_pin->input_res.width = fmt.width; + input_pin->input_res.height = fmt.height; + input_pin->dt = av->dt; + input_pin->bits_per_pix = pfmt->bpp_packed; + input_pin->mapped_dt = 0x40; /* invalid mipi data type */ + input_pin->mipi_decompression = 0; + input_pin->capture_mode = IPU6_FW_ISYS_CAPTURE_MODE_REGULAR; + input_pin->mipi_store_mode = pfmt->bpp == pfmt->bpp_packed ? + IPU6_FW_ISYS_MIPI_STORE_MODE_DISCARD_LONG_HEADER : + IPU6_FW_ISYS_MIPI_STORE_MODE_NORMAL; + input_pin->crop_first_and_last_lines = v4l2_crop.top & 1; + + output_pins = cfg->nof_output_pins++; + aq->fw_output = output_pins; + stream->output_pins_queue[output_pins] = aq; + + output_pin = &cfg->output_pins[output_pins]; + output_pin->input_pin_id = input_pins; + output_pin->output_res.width = ipu6_isys_get_frame_width(av); + output_pin->output_res.height = ipu6_isys_get_frame_height(av); + + output_pin->stride = ipu6_isys_get_bytes_per_line(av); + if (pfmt->bpp != pfmt->bpp_packed) + output_pin->pt = IPU6_FW_ISYS_PIN_TYPE_RAW_SOC; + else + output_pin->pt = IPU6_FW_ISYS_PIN_TYPE_MIPI; + output_pin->ft = pfmt->css_pixelformat; + output_pin->send_irq = 1; + memset(output_pin->ts_offsets, 0, sizeof(output_pin->ts_offsets)); + output_pin->s2m_pixel_soc_pixel_remapping = + S2M_PIXEL_SOC_PIXEL_REMAPPING_FLAG_NO_REMAPPING; + output_pin->csi_be_soc_pixel_remapping = + CSI_BE_SOC_PIXEL_REMAPPING_FLAG_NO_REMAPPING; + + output_pin->snoopable = true; + output_pin->error_handling_enable = false; + output_pin->sensor_type = isys->sensor_type++; + if (isys->sensor_type > isys->pdata->ipdata->sensor_type_end) + isys->sensor_type = isys->pdata->ipdata->sensor_type_start; + + return 0; +} + +static int start_stream_firmware(struct ipu6_isys_video *av, + struct ipu6_isys_buffer_list *bl) +{ + struct ipu6_fw_isys_stream_cfg_data_abi *stream_cfg; + struct ipu6_fw_isys_frame_buff_set_abi *buf = NULL; + struct ipu6_isys_stream *stream = av->stream; + struct device *dev = &av->isys->adev->auxdev.dev; + struct isys_fw_msgs *msg = NULL; + struct ipu6_isys_queue *aq; + int ret, retout, tout; + u16 send_type; + + msg = ipu6_get_fw_msg_buf(stream); + if (!msg) + return -ENOMEM; + + stream_cfg = &msg->fw_msg.stream; + stream_cfg->src = stream->stream_source; + stream_cfg->vc = stream->vc; + stream_cfg->isl_use = 0; + stream_cfg->sensor_type = IPU6_FW_ISYS_SENSOR_MODE_NORMAL; + + list_for_each_entry(aq, &stream->queues, node) { + struct ipu6_isys_video *__av = ipu6_isys_queue_to_video(aq); + + ret = ipu6_isys_fw_pin_cfg(__av, stream_cfg); + if (ret < 0) { + ipu6_put_fw_msg_buf(av->isys, (uintptr_t)stream_cfg); + return ret; + } + } + + ipu6_fw_isys_dump_stream_cfg(dev, stream_cfg); + + stream->nr_output_pins = stream_cfg->nof_output_pins; + + reinit_completion(&stream->stream_open_completion); + + ret = ipu6_fw_isys_complex_cmd(av->isys, stream->stream_handle, + stream_cfg, msg->dma_addr, + sizeof(*stream_cfg), + IPU6_FW_ISYS_SEND_TYPE_STREAM_OPEN); + if (ret < 0) { + dev_err(dev, "can't open stream (%d)\n", ret); + ipu6_put_fw_msg_buf(av->isys, (uintptr_t)stream_cfg); + return ret; + } + + get_stream_opened(av); + + tout = wait_for_completion_timeout(&stream->stream_open_completion, + IPU6_FW_CALL_TIMEOUT_JIFFIES); + + ipu6_put_fw_msg_buf(av->isys, (uintptr_t)stream_cfg); + + if (!tout) { + dev_err(dev, "stream open time out\n"); + ret = -ETIMEDOUT; + goto out_put_stream_opened; + } + if (stream->error) { + dev_err(dev, "stream open error: %d\n", stream->error); + ret = -EIO; + goto out_put_stream_opened; + } + dev_dbg(dev, "start stream: open complete\n"); + + if (bl) { + msg = ipu6_get_fw_msg_buf(stream); + if (!msg) { + ret = -ENOMEM; + goto out_put_stream_opened; + } + buf = &msg->fw_msg.frame; + ipu6_isys_buf_to_fw_frame_buf(buf, stream, bl); + ipu6_isys_buffer_list_queue(bl, + IPU6_ISYS_BUFFER_LIST_FL_ACTIVE, 0); + } + + reinit_completion(&stream->stream_start_completion); + + if (bl) { + send_type = IPU6_FW_ISYS_SEND_TYPE_STREAM_START_AND_CAPTURE; + ipu6_fw_isys_dump_frame_buff_set(dev, buf, + stream_cfg->nof_output_pins); + ret = ipu6_fw_isys_complex_cmd(av->isys, stream->stream_handle, + buf, msg->dma_addr, + sizeof(*buf), send_type); + } else { + send_type = IPU6_FW_ISYS_SEND_TYPE_STREAM_START; + ret = ipu6_fw_isys_simple_cmd(av->isys, stream->stream_handle, + send_type); + } + + if (ret < 0) { + dev_err(dev, "can't start streaming (%d)\n", ret); + goto out_stream_close; + } + + tout = wait_for_completion_timeout(&stream->stream_start_completion, + IPU6_FW_CALL_TIMEOUT_JIFFIES); + if (!tout) { + dev_err(dev, "stream start time out\n"); + ret = -ETIMEDOUT; + goto out_stream_close; + } + if (stream->error) { + dev_err(dev, "stream start error: %d\n", stream->error); + ret = -EIO; + goto out_stream_close; + } + dev_dbg(dev, "start stream: complete\n"); + + return 0; + +out_stream_close: + reinit_completion(&stream->stream_close_completion); + + retout = ipu6_fw_isys_simple_cmd(av->isys, + stream->stream_handle, + IPU6_FW_ISYS_SEND_TYPE_STREAM_CLOSE); + if (retout < 0) { + dev_dbg(dev, "can't close stream (%d)\n", retout); + goto out_put_stream_opened; + } + + tout = wait_for_completion_timeout(&stream->stream_close_completion, + IPU6_FW_CALL_TIMEOUT_JIFFIES); + if (!tout) + dev_err(dev, "stream close time out\n"); + else if (stream->error) + dev_err(dev, "stream close error: %d\n", stream->error); + else + dev_dbg(dev, "stream close complete\n"); + +out_put_stream_opened: + put_stream_opened(av); + + return ret; +} + +static void stop_streaming_firmware(struct ipu6_isys_video *av) +{ + struct device *dev = &av->isys->adev->auxdev.dev; + struct ipu6_isys_stream *stream = av->stream; + int ret, tout; + + reinit_completion(&stream->stream_stop_completion); + + ret = ipu6_fw_isys_simple_cmd(av->isys, stream->stream_handle, + IPU6_FW_ISYS_SEND_TYPE_STREAM_FLUSH); + + if (ret < 0) { + dev_err(dev, "can't stop stream (%d)\n", ret); + return; + } + + tout = wait_for_completion_timeout(&stream->stream_stop_completion, + IPU6_FW_CALL_TIMEOUT_JIFFIES); + if (!tout) + dev_warn(dev, "stream stop time out\n"); + else if (stream->error) + dev_warn(dev, "stream stop error: %d\n", stream->error); + else + dev_dbg(dev, "stop stream: complete\n"); +} + +static void close_streaming_firmware(struct ipu6_isys_video *av) +{ + struct ipu6_isys_stream *stream = av->stream; + struct device *dev = &av->isys->adev->auxdev.dev; + int ret, tout; + + reinit_completion(&stream->stream_close_completion); + + ret = ipu6_fw_isys_simple_cmd(av->isys, stream->stream_handle, + IPU6_FW_ISYS_SEND_TYPE_STREAM_CLOSE); + if (ret < 0) { + dev_err(dev, "can't close stream (%d)\n", ret); + return; + } + + tout = wait_for_completion_timeout(&stream->stream_close_completion, + IPU6_FW_CALL_TIMEOUT_JIFFIES); + if (!tout) + dev_warn(dev, "stream close time out\n"); + else if (stream->error) + dev_warn(dev, "stream close error: %d\n", stream->error); + else + dev_dbg(dev, "close stream: complete\n"); + + put_stream_opened(av); +} + +int ipu6_isys_video_prepare_stream(struct ipu6_isys_video *av, + struct media_entity *source_entity, + int nr_queues) +{ + struct ipu6_isys_stream *stream = av->stream; + struct ipu6_isys_csi2 *csi2; + + if (WARN_ON(stream->nr_streaming)) + return -EINVAL; + + stream->nr_queues = nr_queues; + atomic_set(&stream->sequence, 0); + + stream->seq_index = 0; + memset(stream->seq, 0, sizeof(stream->seq)); + + if (WARN_ON(!list_empty(&stream->queues))) + return -EINVAL; + + stream->stream_source = stream->asd->source; + csi2 = ipu6_isys_subdev_to_csi2(stream->asd); + csi2->receiver_errors = 0; + stream->source_entity = source_entity; + + dev_dbg(&av->isys->adev->auxdev.dev, + "prepare stream: external entity %s\n", + stream->source_entity->name); + + return 0; +} + +void ipu6_isys_configure_stream_watermark(struct ipu6_isys_video *av, + bool state) +{ + struct ipu6_isys *isys = av->isys; + struct ipu6_isys_csi2 *csi2 = NULL; + struct isys_iwake_watermark *iwake_watermark = &isys->iwake_watermark; + struct device *dev = &isys->adev->auxdev.dev; + struct v4l2_mbus_framefmt format; + struct v4l2_subdev *esd; + struct v4l2_control hb = { .id = V4L2_CID_HBLANK, .value = 0 }; + unsigned int bpp, lanes; + s64 link_freq = 0; + u64 pixel_rate = 0; + int ret; + + if (!state) + return; + + esd = media_entity_to_v4l2_subdev(av->stream->source_entity); + + av->watermark.width = ipu6_isys_get_frame_width(av); + av->watermark.height = ipu6_isys_get_frame_height(av); + av->watermark.sram_gran_shift = isys->pdata->ipdata->sram_gran_shift; + av->watermark.sram_gran_size = isys->pdata->ipdata->sram_gran_size; + + ret = v4l2_g_ctrl(esd->ctrl_handler, &hb); + if (!ret && hb.value >= 0) + av->watermark.hblank = hb.value; + else + av->watermark.hblank = 0; + + csi2 = ipu6_isys_subdev_to_csi2(av->stream->asd); + link_freq = ipu6_isys_csi2_get_link_freq(csi2); + if (link_freq > 0) { + lanes = csi2->nlanes; + ret = ipu6_isys_get_stream_pad_fmt(&csi2->asd.sd, 0, + av->source_stream, &format); + if (!ret) { + bpp = ipu6_isys_mbus_code_to_bpp(format.code); + pixel_rate = mul_u64_u32_div(link_freq, lanes * 2, bpp); + } + } + + av->watermark.pixel_rate = pixel_rate; + + if (!pixel_rate) { + mutex_lock(&iwake_watermark->mutex); + iwake_watermark->force_iwake_disable = true; + mutex_unlock(&iwake_watermark->mutex); + dev_warn(dev, "unexpected pixel_rate from %s, disable iwake.\n", + av->stream->source_entity->name); + } +} + +static void calculate_stream_datarate(struct ipu6_isys_video *av) +{ + struct video_stream_watermark *watermark = &av->watermark; + const struct ipu6_isys_pixelformat *pfmt = + ipu6_isys_get_isys_format(ipu6_isys_get_format(av), 0); + u32 pages_per_line, pb_bytes_per_line, pixels_per_line, bytes_per_line; + u64 line_time_ns, stream_data_rate; + u16 shift, size; + + shift = watermark->sram_gran_shift; + size = watermark->sram_gran_size; + + pixels_per_line = watermark->width + watermark->hblank; + line_time_ns = div_u64(pixels_per_line * NSEC_PER_SEC, + watermark->pixel_rate); + bytes_per_line = watermark->width * pfmt->bpp / 8; + pages_per_line = DIV_ROUND_UP(bytes_per_line, size); + pb_bytes_per_line = pages_per_line << shift; + stream_data_rate = div64_u64(pb_bytes_per_line * 1000, line_time_ns); + + watermark->stream_data_rate = stream_data_rate; +} + +void ipu6_isys_update_stream_watermark(struct ipu6_isys_video *av, bool state) +{ + struct isys_iwake_watermark *iwake_watermark = + &av->isys->iwake_watermark; + + if (!av->watermark.pixel_rate) + return; + + if (state) { + calculate_stream_datarate(av); + mutex_lock(&iwake_watermark->mutex); + list_add(&av->watermark.stream_node, + &iwake_watermark->video_list); + mutex_unlock(&iwake_watermark->mutex); + } else { + av->watermark.stream_data_rate = 0; + mutex_lock(&iwake_watermark->mutex); + list_del(&av->watermark.stream_node); + mutex_unlock(&iwake_watermark->mutex); + } + + update_watermark_setting(av->isys); +} + +void ipu6_isys_put_stream(struct ipu6_isys_stream *stream) +{ + struct device *dev; + unsigned int i; + unsigned long flags; + + if (!stream) { + pr_err("ipu6-isys: no available stream\n"); + return; + } + + dev = &stream->isys->adev->auxdev.dev; + + spin_lock_irqsave(&stream->isys->streams_lock, flags); + for (i = 0; i < IPU6_ISYS_MAX_STREAMS; i++) { + if (&stream->isys->streams[i] == stream) { + if (stream->isys->streams_ref_count[i] > 0) + stream->isys->streams_ref_count[i]--; + else + dev_warn(dev, "invalid stream %d\n", i); + + break; + } + } + spin_unlock_irqrestore(&stream->isys->streams_lock, flags); +} + +static struct ipu6_isys_stream * +ipu6_isys_get_stream(struct ipu6_isys_video *av, struct ipu6_isys_subdev *asd) +{ + struct ipu6_isys_stream *stream = NULL; + struct ipu6_isys *isys = av->isys; + unsigned long flags; + unsigned int i; + u8 vc = av->vc; + + if (!isys) + return NULL; + + spin_lock_irqsave(&isys->streams_lock, flags); + for (i = 0; i < IPU6_ISYS_MAX_STREAMS; i++) { + if (isys->streams_ref_count[i] && isys->streams[i].vc == vc && + isys->streams[i].asd == asd) { + isys->streams_ref_count[i]++; + stream = &isys->streams[i]; + break; + } + } + + if (!stream) { + for (i = 0; i < IPU6_ISYS_MAX_STREAMS; i++) { + if (!isys->streams_ref_count[i]) { + isys->streams_ref_count[i]++; + stream = &isys->streams[i]; + stream->vc = vc; + stream->asd = asd; + break; + } + } + } + spin_unlock_irqrestore(&isys->streams_lock, flags); + + return stream; +} + +struct ipu6_isys_stream * +ipu6_isys_query_stream_by_handle(struct ipu6_isys *isys, u8 stream_handle) +{ + unsigned long flags; + struct ipu6_isys_stream *stream = NULL; + + if (!isys) + return NULL; + + if (stream_handle >= IPU6_ISYS_MAX_STREAMS) { + dev_err(&isys->adev->auxdev.dev, + "stream_handle %d is invalid\n", stream_handle); + return NULL; + } + + spin_lock_irqsave(&isys->streams_lock, flags); + if (isys->streams_ref_count[stream_handle] > 0) { + isys->streams_ref_count[stream_handle]++; + stream = &isys->streams[stream_handle]; + } + spin_unlock_irqrestore(&isys->streams_lock, flags); + + return stream; +} + +struct ipu6_isys_stream * +ipu6_isys_query_stream_by_source(struct ipu6_isys *isys, int source, u8 vc) +{ + struct ipu6_isys_stream *stream = NULL; + unsigned long flags; + unsigned int i; + + if (!isys) + return NULL; + + if (source < 0) { + dev_err(&isys->adev->auxdev.dev, + "query stream with invalid port number\n"); + return NULL; + } + + spin_lock_irqsave(&isys->streams_lock, flags); + for (i = 0; i < IPU6_ISYS_MAX_STREAMS; i++) { + if (!isys->streams_ref_count[i]) + continue; + + if (isys->streams[i].stream_source == source && + isys->streams[i].vc == vc) { + stream = &isys->streams[i]; + isys->streams_ref_count[i]++; + break; + } + } + spin_unlock_irqrestore(&isys->streams_lock, flags); + + return stream; +} + +static u64 get_stream_mask_by_pipeline(struct ipu6_isys_video *__av) +{ + struct media_pipeline *pipeline = + media_entity_pipeline(&__av->vdev.entity); + unsigned int i; + u64 stream_mask = 0; + + for (i = 0; i < NR_OF_CSI2_SRC_PADS; i++) { + struct ipu6_isys_video *av = &__av->csi2->av[i]; + + if (pipeline == media_entity_pipeline(&av->vdev.entity)) + stream_mask |= BIT_ULL(av->source_stream); + } + + return stream_mask; +} + +int ipu6_isys_video_set_streaming(struct ipu6_isys_video *av, int state, + struct ipu6_isys_buffer_list *bl) +{ + struct v4l2_subdev_krouting *routing; + struct ipu6_isys_stream *stream = av->stream; + struct v4l2_subdev_state *subdev_state; + struct device *dev = &av->isys->adev->auxdev.dev; + struct v4l2_subdev *sd; + struct media_pad *r_pad; + u32 sink_pad, sink_stream; + u64 r_stream; + u64 stream_mask = 0; + int ret = 0; + + dev_dbg(dev, "set stream: %d\n", state); + + if (WARN(!stream->source_entity, "No source entity for stream\n")) + return -ENODEV; + + sd = &stream->asd->sd; + r_pad = media_pad_remote_pad_first(&av->pad); + r_stream = ipu6_isys_get_src_stream_by_src_pad(sd, r_pad->index); + + subdev_state = v4l2_subdev_lock_and_get_active_state(sd); + routing = &subdev_state->routing; + ret = v4l2_subdev_routing_find_opposite_end(routing, r_pad->index, + r_stream, &sink_pad, + &sink_stream); + v4l2_subdev_unlock_state(subdev_state); + if (ret) + return ret; + + stream_mask = get_stream_mask_by_pipeline(av); + if (!state) { + stop_streaming_firmware(av); + + /* stop sub-device which connects with video */ + dev_dbg(dev, "stream off entity %s pad:%d mask:0x%llx\n", + sd->name, r_pad->index, stream_mask); + ret = v4l2_subdev_disable_streams(sd, r_pad->index, + stream_mask); + if (ret) { + dev_err(dev, "stream off %s failed with %d\n", sd->name, + ret); + return ret; + } + close_streaming_firmware(av); + } else { + ret = start_stream_firmware(av, bl); + if (ret) { + dev_err(dev, "start stream of firmware failed\n"); + return ret; + } + + /* start sub-device which connects with video */ + dev_dbg(dev, "stream on %s pad %d mask 0x%llx\n", sd->name, + r_pad->index, stream_mask); + ret = v4l2_subdev_enable_streams(sd, r_pad->index, stream_mask); + if (ret) { + dev_err(dev, "stream on %s failed with %d\n", sd->name, + ret); + goto out_media_entity_stop_streaming_firmware; + } + } + + av->streaming = state; + + return 0; + +out_media_entity_stop_streaming_firmware: + stop_streaming_firmware(av); + + return ret; +} + +static const struct v4l2_ioctl_ops ipu6_v4l2_ioctl_ops = { + .vidioc_querycap = ipu6_isys_vidioc_querycap, + .vidioc_enum_fmt_vid_cap = ipu6_isys_vidioc_enum_fmt, + .vidioc_enum_fmt_meta_cap = ipu6_isys_vidioc_enum_fmt, + .vidioc_enum_framesizes = ipu6_isys_vidioc_enum_framesizes, + .vidioc_g_fmt_vid_cap = ipu6_isys_vidioc_g_fmt_vid_cap, + .vidioc_s_fmt_vid_cap = ipu6_isys_vidioc_s_fmt_vid_cap, + .vidioc_try_fmt_vid_cap = ipu6_isys_vidioc_try_fmt_vid_cap, + .vidioc_g_fmt_meta_cap = ipu6_isys_vidioc_g_fmt_meta_cap, + .vidioc_s_fmt_meta_cap = ipu6_isys_vidioc_s_fmt_meta_cap, + .vidioc_try_fmt_meta_cap = ipu6_isys_vidioc_try_fmt_meta_cap, + .vidioc_reqbufs = ipu6_isys_vidioc_reqbufs, + .vidioc_create_bufs = ipu6_isys_vidioc_create_bufs, + .vidioc_prepare_buf = vb2_ioctl_prepare_buf, + .vidioc_querybuf = vb2_ioctl_querybuf, + .vidioc_qbuf = vb2_ioctl_qbuf, + .vidioc_dqbuf = vb2_ioctl_dqbuf, + .vidioc_streamon = vb2_ioctl_streamon, + .vidioc_streamoff = vb2_ioctl_streamoff, + .vidioc_expbuf = vb2_ioctl_expbuf, +}; + +static const struct media_entity_operations entity_ops = { + .link_validate = link_validate, +}; + +static const struct v4l2_file_operations isys_fops = { + .owner = THIS_MODULE, + .poll = vb2_fop_poll, + .unlocked_ioctl = video_ioctl2, + .mmap = vb2_fop_mmap, + .open = video_open, + .release = vb2_fop_release, +}; + +int ipu6_isys_fw_open(struct ipu6_isys *isys) +{ + struct ipu6_bus_device *adev = isys->adev; + const struct ipu6_isys_internal_pdata *ipdata = isys->pdata->ipdata; + int ret; + + ret = pm_runtime_resume_and_get(&adev->auxdev.dev); + if (ret < 0) + return ret; + + mutex_lock(&isys->mutex); + + if (isys->ref_count++) + goto unlock; + + ipu6_configure_spc(adev->isp, &ipdata->hw_variant, + IPU6_CPD_PKG_DIR_ISYS_SERVER_IDX, isys->pdata->base, + adev->pkg_dir, adev->pkg_dir_dma_addr); + + /* + * Buffers could have been left to wrong queue at last closure. + * Move them now back to empty buffer queue. + */ + ipu6_cleanup_fw_msg_bufs(isys); + + if (isys->fwcom) { + /* + * Something went wrong in previous shutdown. As we are now + * restarting isys we can safely delete old context. + */ + dev_warn(&adev->auxdev.dev, "clearing old context\n"); + ipu6_fw_isys_cleanup(isys); + } + + ret = ipu6_fw_isys_init(isys, ipdata->num_parallel_streams); + if (ret < 0) + goto out; + +unlock: + mutex_unlock(&isys->mutex); + + return 0; + +out: + isys->ref_count--; + mutex_unlock(&isys->mutex); + pm_runtime_put(&adev->auxdev.dev); + + return ret; +} + +void ipu6_isys_fw_close(struct ipu6_isys *isys) +{ + mutex_lock(&isys->mutex); + + isys->ref_count--; + if (!isys->ref_count) { + ipu6_fw_isys_close(isys); + if (isys->fwcom) { + isys->need_reset = true; + dev_warn(&isys->adev->auxdev.dev, + "failed to close fw isys\n"); + } + } + + mutex_unlock(&isys->mutex); + + if (isys->need_reset) + pm_runtime_put_sync(&isys->adev->auxdev.dev); + else + pm_runtime_put(&isys->adev->auxdev.dev); +} + +int ipu6_isys_setup_video(struct ipu6_isys_video *av, + struct media_entity **source_entity, int *nr_queues) +{ + const struct ipu6_isys_pixelformat *pfmt = + ipu6_isys_get_isys_format(ipu6_isys_get_format(av), 0); + struct device *dev = &av->isys->adev->auxdev.dev; + struct v4l2_mbus_frame_desc_entry entry; + struct v4l2_subdev_route *route = NULL; + struct v4l2_subdev_route *r; + struct v4l2_subdev_state *state; + struct ipu6_isys_subdev *asd; + struct v4l2_subdev *remote_sd; + struct media_pipeline *pipeline; + struct media_pad *source_pad, *remote_pad; + int ret = -EINVAL; + + *nr_queues = 0; + + remote_pad = media_pad_remote_pad_unique(&av->pad); + if (IS_ERR(remote_pad)) { + dev_dbg(dev, "failed to get remote pad\n"); + return PTR_ERR(remote_pad); + } + + remote_sd = media_entity_to_v4l2_subdev(remote_pad->entity); + asd = to_ipu6_isys_subdev(remote_sd); + source_pad = media_pad_remote_pad_first(&remote_pad->entity->pads[0]); + if (!source_pad) { + dev_dbg(dev, "No external source entity\n"); + return -ENODEV; + } + + *source_entity = source_pad->entity; + + /* Find the root */ + state = v4l2_subdev_lock_and_get_active_state(remote_sd); + for_each_active_route(&state->routing, r) { + (*nr_queues)++; + + if (r->source_pad == remote_pad->index) + route = r; + } + + if (!route) { + v4l2_subdev_unlock_state(state); + dev_dbg(dev, "Failed to find route\n"); + return -ENODEV; + } + av->source_stream = route->sink_stream; + v4l2_subdev_unlock_state(state); + + ret = ipu6_isys_csi2_get_remote_desc(av->source_stream, + to_ipu6_isys_csi2(asd), + *source_entity, &entry); + if (ret == -ENOIOCTLCMD) { + av->vc = 0; + av->dt = ipu6_isys_mbus_code_to_mipi(pfmt->code); + } else if (!ret) { + dev_dbg(dev, "Framedesc: stream %u, len %u, vc %u, dt %#x\n", + entry.stream, entry.length, entry.bus.csi2.vc, + entry.bus.csi2.dt); + + av->vc = entry.bus.csi2.vc; + av->dt = entry.bus.csi2.dt; + } else { + dev_err(dev, "failed to get remote frame desc\n"); + return ret; + } + + pipeline = media_entity_pipeline(&av->vdev.entity); + if (!pipeline) + ret = video_device_pipeline_alloc_start(&av->vdev); + else + ret = video_device_pipeline_start(&av->vdev, pipeline); + if (ret < 0) { + dev_dbg(dev, "media pipeline start failed\n"); + return ret; + } + + av->stream = ipu6_isys_get_stream(av, asd); + if (!av->stream) { + video_device_pipeline_stop(&av->vdev); + dev_err(dev, "no available stream for firmware\n"); + return -EINVAL; + } + + return 0; +} + +/* + * Do everything that's needed to initialise things related to video + * buffer queue, video node, and the related media entity. The caller + * is expected to assign isys field and set the name of the video + * device. + */ +int ipu6_isys_video_init(struct ipu6_isys_video *av) +{ + struct v4l2_format format = { + .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, + .fmt.pix = { + .width = 1920, + .height = 1080, + }, + }; + struct v4l2_format format_meta = { + .type = V4L2_BUF_TYPE_META_CAPTURE, + .fmt.meta = { + .width = 1920, + .height = 4, + }, + }; + int ret; + + mutex_init(&av->mutex); + av->vdev.device_caps = V4L2_CAP_STREAMING | V4L2_CAP_IO_MC | + V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_META_CAPTURE; + av->vdev.vfl_dir = VFL_DIR_RX; + + ret = ipu6_isys_queue_init(&av->aq); + if (ret) + goto out_free_watermark; + + av->pad.flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT; + ret = media_entity_pads_init(&av->vdev.entity, 1, &av->pad); + if (ret) + goto out_vb2_queue_release; + + av->vdev.entity.ops = &entity_ops; + av->vdev.release = video_device_release_empty; + av->vdev.fops = &isys_fops; + av->vdev.v4l2_dev = &av->isys->v4l2_dev; + av->vdev.dev_parent = &av->isys->adev->isp->pdev->dev; + if (!av->vdev.ioctl_ops) + av->vdev.ioctl_ops = &ipu6_v4l2_ioctl_ops; + av->vdev.queue = &av->aq.vbq; + av->vdev.lock = &av->mutex; + + __ipu6_isys_vidioc_try_fmt_vid_cap(av, &format); + av->pix_fmt = format.fmt.pix; + __ipu6_isys_vidioc_try_fmt_meta_cap(av, &format_meta); + av->meta_fmt = format_meta.fmt.meta; + + set_bit(V4L2_FL_USES_V4L2_FH, &av->vdev.flags); + video_set_drvdata(&av->vdev, av); + + ret = video_register_device(&av->vdev, VFL_TYPE_VIDEO, -1); + if (ret) + goto out_media_entity_cleanup; + + return ret; + +out_media_entity_cleanup: + vb2_video_unregister_device(&av->vdev); + media_entity_cleanup(&av->vdev.entity); + +out_vb2_queue_release: + vb2_queue_release(&av->aq.vbq); + +out_free_watermark: + mutex_destroy(&av->mutex); + + return ret; +} + +void ipu6_isys_video_cleanup(struct ipu6_isys_video *av) +{ + vb2_video_unregister_device(&av->vdev); + media_entity_cleanup(&av->vdev.entity); + mutex_destroy(&av->mutex); +} + +u32 ipu6_isys_get_format(struct ipu6_isys_video *av) +{ + if (av->aq.vbq.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) + return av->pix_fmt.pixelformat; + + if (av->aq.vbq.type == V4L2_BUF_TYPE_META_CAPTURE) + return av->meta_fmt.dataformat; + + return 0; +} + +u32 ipu6_isys_get_data_size(struct ipu6_isys_video *av) +{ + if (av->aq.vbq.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) + return av->pix_fmt.sizeimage; + + if (av->aq.vbq.type == V4L2_BUF_TYPE_META_CAPTURE) + return av->meta_fmt.buffersize; + + return 0; +} + +u32 ipu6_isys_get_bytes_per_line(struct ipu6_isys_video *av) +{ + if (av->aq.vbq.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) + return av->pix_fmt.bytesperline; + + if (av->aq.vbq.type == V4L2_BUF_TYPE_META_CAPTURE) + return av->meta_fmt.bytesperline; + + return 0; +} + +u32 ipu6_isys_get_frame_width(struct ipu6_isys_video *av) +{ + if (av->aq.vbq.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) + return av->pix_fmt.width; + + if (av->aq.vbq.type == V4L2_BUF_TYPE_META_CAPTURE) + return av->meta_fmt.width; + + return 0; +} + +u32 ipu6_isys_get_frame_height(struct ipu6_isys_video *av) +{ + if (av->aq.vbq.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) + return av->pix_fmt.height; + + if (av->aq.vbq.type == V4L2_BUF_TYPE_META_CAPTURE) + return av->meta_fmt.height; + + return 0; +} diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.h b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.h new file mode 100644 index 0000000..1dd36f2 --- /dev/null +++ b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.h @@ -0,0 +1,135 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2013--2024 Intel Corporation */ + +#ifndef IPU6_ISYS_VIDEO_H +#define IPU6_ISYS_VIDEO_H + +#include +#include +#include +#include +#include + +#include +#include + +#include "ipu6-isys-queue.h" + +#define IPU6_ISYS_OUTPUT_PINS 11 +#define IPU6_ISYS_MAX_PARALLEL_SOF 2 + +struct file; +struct ipu6_isys; +struct ipu6_isys_csi2; +struct ipu6_isys_subdev; + +struct ipu6_isys_pixelformat { + u32 pixelformat; + u32 bpp; + u32 bpp_packed; + u32 code; + u32 css_pixelformat; + bool is_meta; +}; + +struct sequence_info { + unsigned int sequence; + u64 timestamp; +}; + +/* + * Align with firmware stream. Each stream represents a CSI virtual channel. + * May map to multiple video devices + */ +struct ipu6_isys_stream { + struct mutex mutex; + struct media_entity *source_entity; + atomic_t sequence; + unsigned int seq_index; + struct sequence_info seq[IPU6_ISYS_MAX_PARALLEL_SOF]; + int stream_source; + int stream_handle; + unsigned int nr_output_pins; + struct ipu6_isys_subdev *asd; + + int nr_queues; /* Number of capture queues */ + int nr_streaming; + int streaming; /* Has streaming been really started? */ + struct list_head queues; + struct completion stream_open_completion; + struct completion stream_close_completion; + struct completion stream_start_completion; + struct completion stream_stop_completion; + struct ipu6_isys *isys; + + struct ipu6_isys_queue *output_pins_queue[IPU6_ISYS_OUTPUT_PINS]; + int error; + u8 vc; +}; + +struct video_stream_watermark { + u32 width; + u32 height; + u32 hblank; + u32 frame_rate; + u64 pixel_rate; + u64 stream_data_rate; + u16 sram_gran_shift; + u16 sram_gran_size; + struct list_head stream_node; +}; + +struct ipu6_isys_video { + struct ipu6_isys_queue aq; + /* Serialise access to other fields in the struct. */ + struct mutex mutex; + struct media_pad pad; + struct video_device vdev; + struct v4l2_pix_format pix_fmt; + struct v4l2_meta_format meta_fmt; + struct ipu6_isys *isys; + struct ipu6_isys_csi2 *csi2; + struct ipu6_isys_stream *stream; + unsigned int streaming; + struct video_stream_watermark watermark; + u32 source_stream; + u8 vc; + u8 dt; +}; + +#define ipu6_isys_queue_to_video(__aq) \ + container_of(__aq, struct ipu6_isys_video, aq) + +extern const struct ipu6_isys_pixelformat ipu6_isys_pfmts[]; +extern const struct ipu6_isys_pixelformat ipu6_isys_pfmts_packed[]; + +const struct ipu6_isys_pixelformat * +ipu6_isys_get_isys_format(u32 pixelformat, u32 code); +int ipu6_isys_video_prepare_stream(struct ipu6_isys_video *av, + struct media_entity *source_entity, + int nr_queues); +int ipu6_isys_video_set_streaming(struct ipu6_isys_video *av, int state, + struct ipu6_isys_buffer_list *bl); +int ipu6_isys_fw_open(struct ipu6_isys *isys); +void ipu6_isys_fw_close(struct ipu6_isys *isys); +int ipu6_isys_setup_video(struct ipu6_isys_video *av, + struct media_entity **source_entity, int *nr_queues); +int ipu6_isys_video_init(struct ipu6_isys_video *av); +void ipu6_isys_video_cleanup(struct ipu6_isys_video *av); +void ipu6_isys_put_stream(struct ipu6_isys_stream *stream); +struct ipu6_isys_stream * +ipu6_isys_query_stream_by_handle(struct ipu6_isys *isys, u8 stream_handle); +struct ipu6_isys_stream * +ipu6_isys_query_stream_by_source(struct ipu6_isys *isys, int source, u8 vc); + +void ipu6_isys_configure_stream_watermark(struct ipu6_isys_video *av, + bool state); +void ipu6_isys_update_stream_watermark(struct ipu6_isys_video *av, bool state); + +u32 ipu6_isys_get_format(struct ipu6_isys_video *av); +u32 ipu6_isys_get_data_size(struct ipu6_isys_video *av); +u32 ipu6_isys_get_bytes_per_line(struct ipu6_isys_video *av); +u32 ipu6_isys_get_frame_width(struct ipu6_isys_video *av); +u32 ipu6_isys_get_frame_height(struct ipu6_isys_video *av); + +#endif /* IPU6_ISYS_VIDEO_H */ diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys.c b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys.c new file mode 100644 index 0000000..fc0ec0a --- /dev/null +++ b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys.c @@ -0,0 +1,1380 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2013--2024 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "ipu6-bus.h" +#include "ipu6-cpd.h" +#include "ipu6-dma.h" +#include "ipu6-isys.h" +#include "ipu6-isys-csi2.h" +#include "ipu6-mmu.h" +#include "ipu6-platform-buttress-regs.h" +#include "ipu6-platform-isys-csi2-reg.h" +#include "ipu6-platform-regs.h" + +#define IPU6_BUTTRESS_FABIC_CONTROL 0x68 +#define GDA_ENABLE_IWAKE_INDEX 2 +#define GDA_IWAKE_THRESHOLD_INDEX 1 +#define GDA_IRQ_CRITICAL_THRESHOLD_INDEX 0 +#define GDA_MEMOPEN_THRESHOLD_INDEX 3 +#define DEFAULT_DID_RATIO 90 +#define DEFAULT_IWAKE_THRESHOLD 0x42 +#define DEFAULT_MEM_OPEN_TIME 10 +#define ONE_THOUSAND_MICROSECOND 1000 +/* One page is 2KB, 8 x 16 x 16 = 2048B = 2KB */ +#define ISF_DMA_TOP_GDA_PROFERTY_PAGE_SIZE 0x800 + +/* LTR & DID value are 10 bit at most */ +#define LTR_DID_VAL_MAX 1023 +#define LTR_DEFAULT_VALUE 0x70503c19 +#define FILL_TIME_DEFAULT_VALUE 0xfff0783c +#define LTR_DID_PKGC_2R 20 +#define LTR_SCALE_DEFAULT 5 +#define LTR_SCALE_1024NS 2 +#define DID_SCALE_1US 2 +#define DID_SCALE_32US 3 +#define REG_PKGC_PMON_CFG 0xb00 + +#define VAL_PKGC_PMON_CFG_RESET 0x38 +#define VAL_PKGC_PMON_CFG_START 0x7 + +#define IS_PIXEL_BUFFER_PAGES 0x80 +/* + * when iwake mode is disabled, the critical threshold is statically set + * to 75% of the IS pixel buffer, criticalThreshold = (128 * 3) / 4 + */ +#define CRITICAL_THRESHOLD_IWAKE_DISABLE (IS_PIXEL_BUFFER_PAGES * 3 / 4) + +union fabric_ctrl { + struct { + u16 ltr_val : 10; + u16 ltr_scale : 3; + u16 reserved : 3; + u16 did_val : 10; + u16 did_scale : 3; + u16 reserved2 : 1; + u16 keep_power_in_D0 : 1; + u16 keep_power_override : 1; + } bits; + u32 value; +}; + +enum ltr_did_type { + LTR_IWAKE_ON, + LTR_IWAKE_OFF, + LTR_ISYS_ON, + LTR_ISYS_OFF, + LTR_ENHANNCE_IWAKE, + LTR_TYPE_MAX +}; + +#define ISYS_PM_QOS_VALUE 300 + +static int isys_isr_one(struct ipu6_bus_device *adev); + +static int +isys_complete_ext_device_registration(struct ipu6_isys *isys, + struct v4l2_subdev *sd, + struct ipu6_isys_csi2_config *csi2) +{ + struct device *dev = &isys->adev->auxdev.dev; + unsigned int i; + int ret; + + for (i = 0; i < sd->entity.num_pads; i++) { + if (sd->entity.pads[i].flags & MEDIA_PAD_FL_SOURCE) + break; + } + + if (i == sd->entity.num_pads) { + dev_warn(dev, "no src pad in external entity\n"); + ret = -ENOENT; + goto unregister_subdev; + } + + ret = media_create_pad_link(&sd->entity, i, + &isys->csi2[csi2->port].asd.sd.entity, + 0, MEDIA_LNK_FL_ENABLED | + MEDIA_LNK_FL_IMMUTABLE); + if (ret) { + dev_warn(dev, "can't create link\n"); + goto unregister_subdev; + } + + isys->csi2[csi2->port].nlanes = csi2->nlanes; + + return 0; + +unregister_subdev: + v4l2_device_unregister_subdev(sd); + + return ret; +} + +static void isys_stream_init(struct ipu6_isys *isys) +{ + u32 i; + + for (i = 0; i < IPU6_ISYS_MAX_STREAMS; i++) { + mutex_init(&isys->streams[i].mutex); + init_completion(&isys->streams[i].stream_open_completion); + init_completion(&isys->streams[i].stream_close_completion); + init_completion(&isys->streams[i].stream_start_completion); + init_completion(&isys->streams[i].stream_stop_completion); + INIT_LIST_HEAD(&isys->streams[i].queues); + isys->streams[i].isys = isys; + isys->streams[i].stream_handle = i; + isys->streams[i].vc = INVALID_VC_ID; + } +} + +static void isys_csi2_unregister_subdevices(struct ipu6_isys *isys) +{ + const struct ipu6_isys_internal_csi2_pdata *csi2 = + &isys->pdata->ipdata->csi2; + unsigned int i; + + for (i = 0; i < csi2->nports; i++) + ipu6_isys_csi2_cleanup(&isys->csi2[i]); +} + +static int isys_csi2_register_subdevices(struct ipu6_isys *isys) +{ + const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = + &isys->pdata->ipdata->csi2; + unsigned int i; + int ret; + + for (i = 0; i < csi2_pdata->nports; i++) { + ret = ipu6_isys_csi2_init(&isys->csi2[i], isys, + isys->pdata->base + + CSI_REG_PORT_BASE(i), i); + if (ret) + goto fail; + + isys->isr_csi2_bits |= IPU6_ISYS_UNISPART_IRQ_CSI2(i); + } + + return 0; + +fail: + while (i--) + ipu6_isys_csi2_cleanup(&isys->csi2[i]); + + return ret; +} + +static int isys_csi2_create_media_links(struct ipu6_isys *isys) +{ + const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = + &isys->pdata->ipdata->csi2; + struct device *dev = &isys->adev->auxdev.dev; + unsigned int i, j; + int ret; + + for (i = 0; i < csi2_pdata->nports; i++) { + struct media_entity *sd = &isys->csi2[i].asd.sd.entity; + + for (j = 0; j < NR_OF_CSI2_SRC_PADS; j++) { + struct ipu6_isys_video *av = &isys->csi2[i].av[j]; + + ret = media_create_pad_link(sd, CSI2_PAD_SRC + j, + &av->vdev.entity, 0, 0); + if (ret) { + dev_err(dev, "CSI2 can't create link\n"); + return ret; + } + + av->csi2 = &isys->csi2[i]; + } + } + + return 0; +} + +static void isys_unregister_video_devices(struct ipu6_isys *isys) +{ + const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = + &isys->pdata->ipdata->csi2; + unsigned int i, j; + + for (i = 0; i < csi2_pdata->nports; i++) + for (j = 0; j < NR_OF_CSI2_SRC_PADS; j++) + ipu6_isys_video_cleanup(&isys->csi2[i].av[j]); +} + +static int isys_register_video_devices(struct ipu6_isys *isys) +{ + const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = + &isys->pdata->ipdata->csi2; + unsigned int i, j; + int ret; + + for (i = 0; i < csi2_pdata->nports; i++) { + for (j = 0; j < NR_OF_CSI2_SRC_PADS; j++) { + struct ipu6_isys_video *av = &isys->csi2[i].av[j]; + + snprintf(av->vdev.name, sizeof(av->vdev.name), + IPU6_ISYS_ENTITY_PREFIX " ISYS Capture %u", + i * NR_OF_CSI2_SRC_PADS + j); + av->isys = isys; + av->aq.vbq.buf_struct_size = + sizeof(struct ipu6_isys_video_buffer); + + ret = ipu6_isys_video_init(av); + if (ret) + goto fail; + } + } + + return 0; + +fail: + while (i--) { + while (j--) + ipu6_isys_video_cleanup(&isys->csi2[i].av[j]); + j = NR_OF_CSI2_SRC_PADS; + } + + return ret; +} + +void isys_setup_hw(struct ipu6_isys *isys) +{ + void __iomem *base = isys->pdata->base; + const u8 *thd = isys->pdata->ipdata->hw_variant.cdc_fifo_threshold; + u32 irqs = 0; + unsigned int i, nports; + + nports = isys->pdata->ipdata->csi2.nports; + + /* Enable irqs for all MIPI ports */ + for (i = 0; i < nports; i++) + irqs |= IPU6_ISYS_UNISPART_IRQ_CSI2(i); + + writel(irqs, base + isys->pdata->ipdata->csi2.ctrl0_irq_edge); + writel(irqs, base + isys->pdata->ipdata->csi2.ctrl0_irq_lnp); + writel(irqs, base + isys->pdata->ipdata->csi2.ctrl0_irq_mask); + writel(irqs, base + isys->pdata->ipdata->csi2.ctrl0_irq_enable); + writel(GENMASK(19, 0), + base + isys->pdata->ipdata->csi2.ctrl0_irq_clear); + + irqs = ISYS_UNISPART_IRQS; + writel(irqs, base + IPU6_REG_ISYS_UNISPART_IRQ_EDGE); + writel(irqs, base + IPU6_REG_ISYS_UNISPART_IRQ_LEVEL_NOT_PULSE); + writel(GENMASK(28, 0), base + IPU6_REG_ISYS_UNISPART_IRQ_CLEAR); + writel(irqs, base + IPU6_REG_ISYS_UNISPART_IRQ_MASK); + writel(irqs, base + IPU6_REG_ISYS_UNISPART_IRQ_ENABLE); + + writel(0, base + IPU6_REG_ISYS_UNISPART_SW_IRQ_REG); + writel(0, base + IPU6_REG_ISYS_UNISPART_SW_IRQ_MUX_REG); + + /* Write CDC FIFO threshold values for isys */ + for (i = 0; i < isys->pdata->ipdata->hw_variant.cdc_fifos; i++) + writel(thd[i], base + IPU6_REG_ISYS_CDC_THRESHOLD(i)); +} + +static void ipu6_isys_csi2_isr(struct ipu6_isys_csi2 *csi2) +{ + struct ipu6_isys_stream *stream; + unsigned int i; + u32 status; + int source; + + ipu6_isys_register_errors(csi2); + + status = readl(csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC + + CSI_PORT_REG_BASE_IRQ_STATUS_OFFSET); + + writel(status, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC + + CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET); + + source = csi2->asd.source; + for (i = 0; i < NR_OF_CSI2_VC; i++) { + if (status & IPU_CSI_RX_IRQ_FS_VC(i)) { + stream = ipu6_isys_query_stream_by_source(csi2->isys, + source, i); + if (stream) { + ipu6_isys_csi2_sof_event_by_stream(stream); + ipu6_isys_put_stream(stream); + } + } + + if (status & IPU_CSI_RX_IRQ_FE_VC(i)) { + stream = ipu6_isys_query_stream_by_source(csi2->isys, + source, i); + if (stream) { + ipu6_isys_csi2_eof_event_by_stream(stream); + ipu6_isys_put_stream(stream); + } + } + } +} + +irqreturn_t isys_isr(struct ipu6_bus_device *adev) +{ + struct ipu6_isys *isys = ipu6_bus_get_drvdata(adev); + void __iomem *base = isys->pdata->base; + u32 status_sw, status_csi; + u32 ctrl0_status, ctrl0_clear; + + spin_lock(&isys->power_lock); + if (!isys->power) { + spin_unlock(&isys->power_lock); + return IRQ_NONE; + } + + ctrl0_status = isys->pdata->ipdata->csi2.ctrl0_irq_status; + ctrl0_clear = isys->pdata->ipdata->csi2.ctrl0_irq_clear; + + status_csi = readl(isys->pdata->base + ctrl0_status); + status_sw = readl(isys->pdata->base + + IPU6_REG_ISYS_UNISPART_IRQ_STATUS); + + writel(ISYS_UNISPART_IRQS & ~IPU6_ISYS_UNISPART_IRQ_SW, + base + IPU6_REG_ISYS_UNISPART_IRQ_MASK); + + do { + writel(status_csi, isys->pdata->base + ctrl0_clear); + + writel(status_sw, isys->pdata->base + + IPU6_REG_ISYS_UNISPART_IRQ_CLEAR); + + if (isys->isr_csi2_bits & status_csi) { + unsigned int i; + + for (i = 0; i < isys->pdata->ipdata->csi2.nports; i++) { + /* irq from not enabled port */ + if (!isys->csi2[i].base) + continue; + if (status_csi & IPU6_ISYS_UNISPART_IRQ_CSI2(i)) + ipu6_isys_csi2_isr(&isys->csi2[i]); + } + } + + writel(0, base + IPU6_REG_ISYS_UNISPART_SW_IRQ_REG); + + if (!isys_isr_one(adev)) + status_sw = IPU6_ISYS_UNISPART_IRQ_SW; + else + status_sw = 0; + + status_csi = readl(isys->pdata->base + ctrl0_status); + status_sw |= readl(isys->pdata->base + + IPU6_REG_ISYS_UNISPART_IRQ_STATUS); + } while ((status_csi & isys->isr_csi2_bits) || + (status_sw & IPU6_ISYS_UNISPART_IRQ_SW)); + + writel(ISYS_UNISPART_IRQS, base + IPU6_REG_ISYS_UNISPART_IRQ_MASK); + + spin_unlock(&isys->power_lock); + + return IRQ_HANDLED; +} + +static void get_lut_ltrdid(struct ipu6_isys *isys, struct ltr_did *pltr_did) +{ + struct isys_iwake_watermark *iwake_watermark = &isys->iwake_watermark; + struct ltr_did ltrdid_default; + + ltrdid_default.lut_ltr.value = LTR_DEFAULT_VALUE; + ltrdid_default.lut_fill_time.value = FILL_TIME_DEFAULT_VALUE; + + if (iwake_watermark->ltrdid.lut_ltr.value) + *pltr_did = iwake_watermark->ltrdid; + else + *pltr_did = ltrdid_default; +} + +static int set_iwake_register(struct ipu6_isys *isys, u32 index, u32 value) +{ + struct device *dev = &isys->adev->auxdev.dev; + u32 req_id = index; + u32 offset = 0; + int ret; + + ret = ipu6_fw_isys_send_proxy_token(isys, req_id, index, offset, value); + if (ret) + dev_err(dev, "write %d failed %d", index, ret); + + return ret; +} + +/* + * When input system is powered up and before enabling any new sensor capture, + * or after disabling any sensor capture the following values need to be set: + * LTR_value = LTR(usec) from calculation; + * LTR_scale = 2; + * DID_value = DID(usec) from calculation; + * DID_scale = 2; + * + * When input system is powered down, the LTR and DID values + * must be returned to the default values: + * LTR_value = 1023; + * LTR_scale = 5; + * DID_value = 1023; + * DID_scale = 2; + */ +static void set_iwake_ltrdid(struct ipu6_isys *isys, u16 ltr, u16 did, + enum ltr_did_type use) +{ + struct device *dev = &isys->adev->auxdev.dev; + u16 ltr_val, ltr_scale = LTR_SCALE_1024NS; + u16 did_val, did_scale = DID_SCALE_1US; + struct ipu6_device *isp = isys->adev->isp; + union fabric_ctrl fc; + + switch (use) { + case LTR_IWAKE_ON: + ltr_val = min_t(u16, ltr, (u16)LTR_DID_VAL_MAX); + did_val = min_t(u16, did, (u16)LTR_DID_VAL_MAX); + ltr_scale = (ltr == LTR_DID_VAL_MAX && + did == LTR_DID_VAL_MAX) ? + LTR_SCALE_DEFAULT : LTR_SCALE_1024NS; + break; + case LTR_ISYS_ON: + case LTR_IWAKE_OFF: + ltr_val = LTR_DID_PKGC_2R; + did_val = LTR_DID_PKGC_2R; + break; + case LTR_ISYS_OFF: + ltr_val = LTR_DID_VAL_MAX; + did_val = LTR_DID_VAL_MAX; + ltr_scale = LTR_SCALE_DEFAULT; + break; + case LTR_ENHANNCE_IWAKE: + if (ltr == LTR_DID_VAL_MAX && did == LTR_DID_VAL_MAX) { + ltr_val = LTR_DID_VAL_MAX; + did_val = LTR_DID_VAL_MAX; + ltr_scale = LTR_SCALE_DEFAULT; + } else if (did < ONE_THOUSAND_MICROSECOND) { + ltr_val = ltr; + did_val = did; + } else { + ltr_val = ltr; + /* div 90% value by 32 to account for scale change */ + did_val = did / 32; + did_scale = DID_SCALE_32US; + } + break; + default: + ltr_val = LTR_DID_VAL_MAX; + did_val = LTR_DID_VAL_MAX; + ltr_scale = LTR_SCALE_DEFAULT; + break; + } + + fc.value = readl(isp->base + IPU6_BUTTRESS_FABIC_CONTROL); + fc.bits.ltr_val = ltr_val; + fc.bits.ltr_scale = ltr_scale; + fc.bits.did_val = did_val; + fc.bits.did_scale = did_scale; + + dev_dbg(dev, "ltr: value %u scale %u, did: value %u scale %u\n", + ltr_val, ltr_scale, did_val, did_scale); + writel(fc.value, isp->base + IPU6_BUTTRESS_FABIC_CONTROL); +} + +/* + * Driver may clear register GDA_ENABLE_IWAKE before FW configures the + * stream for debug purpose. Otherwise driver should not access this register. + */ +static void enable_iwake(struct ipu6_isys *isys, bool enable) +{ + struct isys_iwake_watermark *iwake_watermark = &isys->iwake_watermark; + int ret; + + mutex_lock(&iwake_watermark->mutex); + + if (iwake_watermark->iwake_enabled == enable) { + mutex_unlock(&iwake_watermark->mutex); + return; + } + + ret = set_iwake_register(isys, GDA_ENABLE_IWAKE_INDEX, enable); + if (!ret) + iwake_watermark->iwake_enabled = enable; + + mutex_unlock(&iwake_watermark->mutex); +} + +void update_watermark_setting(struct ipu6_isys *isys) +{ + struct isys_iwake_watermark *iwake_watermark = &isys->iwake_watermark; + u32 iwake_threshold, iwake_critical_threshold, page_num; + struct device *dev = &isys->adev->auxdev.dev; + u32 calc_fill_time_us = 0, ltr = 0, did = 0; + struct video_stream_watermark *p_watermark; + enum ltr_did_type ltr_did_type; + struct list_head *stream_node; + u64 isys_pb_datarate_mbs = 0; + u32 mem_open_threshold = 0; + struct ltr_did ltrdid; + u64 threshold_bytes; + u32 max_sram_size; + u32 shift; + + shift = isys->pdata->ipdata->sram_gran_shift; + max_sram_size = isys->pdata->ipdata->max_sram_size; + + mutex_lock(&iwake_watermark->mutex); + if (iwake_watermark->force_iwake_disable) { + set_iwake_ltrdid(isys, 0, 0, LTR_IWAKE_OFF); + set_iwake_register(isys, GDA_IRQ_CRITICAL_THRESHOLD_INDEX, + CRITICAL_THRESHOLD_IWAKE_DISABLE); + goto unlock_exit; + } + + if (list_empty(&iwake_watermark->video_list)) { + isys_pb_datarate_mbs = 0; + } else { + list_for_each(stream_node, &iwake_watermark->video_list) { + p_watermark = list_entry(stream_node, + struct video_stream_watermark, + stream_node); + isys_pb_datarate_mbs += p_watermark->stream_data_rate; + } + } + mutex_unlock(&iwake_watermark->mutex); + + if (!isys_pb_datarate_mbs) { + enable_iwake(isys, false); + set_iwake_ltrdid(isys, 0, 0, LTR_IWAKE_OFF); + mutex_lock(&iwake_watermark->mutex); + set_iwake_register(isys, GDA_IRQ_CRITICAL_THRESHOLD_INDEX, + CRITICAL_THRESHOLD_IWAKE_DISABLE); + goto unlock_exit; + } + + enable_iwake(isys, true); + calc_fill_time_us = div64_u64(max_sram_size, isys_pb_datarate_mbs); + + if (isys->pdata->ipdata->enhanced_iwake) { + ltr = isys->pdata->ipdata->ltr; + did = calc_fill_time_us * DEFAULT_DID_RATIO / 100; + ltr_did_type = LTR_ENHANNCE_IWAKE; + } else { + get_lut_ltrdid(isys, <rdid); + + if (calc_fill_time_us <= ltrdid.lut_fill_time.bits.th0) + ltr = 0; + else if (calc_fill_time_us <= ltrdid.lut_fill_time.bits.th1) + ltr = ltrdid.lut_ltr.bits.val0; + else if (calc_fill_time_us <= ltrdid.lut_fill_time.bits.th2) + ltr = ltrdid.lut_ltr.bits.val1; + else if (calc_fill_time_us <= ltrdid.lut_fill_time.bits.th3) + ltr = ltrdid.lut_ltr.bits.val2; + else + ltr = ltrdid.lut_ltr.bits.val3; + + did = calc_fill_time_us - ltr; + ltr_did_type = LTR_IWAKE_ON; + } + + set_iwake_ltrdid(isys, ltr, did, ltr_did_type); + + /* calculate iwake threshold with 2KB granularity pages */ + threshold_bytes = did * isys_pb_datarate_mbs; + iwake_threshold = max_t(u32, 1, threshold_bytes >> shift); + iwake_threshold = min_t(u32, iwake_threshold, max_sram_size); + + mutex_lock(&iwake_watermark->mutex); + if (isys->pdata->ipdata->enhanced_iwake) { + set_iwake_register(isys, GDA_IWAKE_THRESHOLD_INDEX, + DEFAULT_IWAKE_THRESHOLD); + /* calculate number of pages that will be filled in 10 usec */ + page_num = (DEFAULT_MEM_OPEN_TIME * isys_pb_datarate_mbs) / + ISF_DMA_TOP_GDA_PROFERTY_PAGE_SIZE; + page_num += ((DEFAULT_MEM_OPEN_TIME * isys_pb_datarate_mbs) % + ISF_DMA_TOP_GDA_PROFERTY_PAGE_SIZE) ? 1 : 0; + mem_open_threshold = isys->pdata->ipdata->memopen_threshold; + mem_open_threshold = max_t(u32, mem_open_threshold, page_num); + dev_dbg(dev, "mem_open_threshold: %u\n", mem_open_threshold); + set_iwake_register(isys, GDA_MEMOPEN_THRESHOLD_INDEX, + mem_open_threshold); + } else { + set_iwake_register(isys, GDA_IWAKE_THRESHOLD_INDEX, + iwake_threshold); + } + + iwake_critical_threshold = iwake_threshold + + (IS_PIXEL_BUFFER_PAGES - iwake_threshold) / 2; + + dev_dbg(dev, "threshold: %u critical: %u\n", iwake_threshold, + iwake_critical_threshold); + + set_iwake_register(isys, GDA_IRQ_CRITICAL_THRESHOLD_INDEX, + iwake_critical_threshold); + + writel(VAL_PKGC_PMON_CFG_RESET, + isys->adev->isp->base + REG_PKGC_PMON_CFG); + writel(VAL_PKGC_PMON_CFG_START, + isys->adev->isp->base + REG_PKGC_PMON_CFG); +unlock_exit: + mutex_unlock(&iwake_watermark->mutex); +} + +static void isys_iwake_watermark_init(struct ipu6_isys *isys) +{ + struct isys_iwake_watermark *iwake_watermark = &isys->iwake_watermark; + + INIT_LIST_HEAD(&iwake_watermark->video_list); + mutex_init(&iwake_watermark->mutex); + + iwake_watermark->ltrdid.lut_ltr.value = 0; + iwake_watermark->isys = isys; + iwake_watermark->iwake_enabled = false; + iwake_watermark->force_iwake_disable = false; +} + +static void isys_iwake_watermark_cleanup(struct ipu6_isys *isys) +{ + struct isys_iwake_watermark *iwake_watermark = &isys->iwake_watermark; + + mutex_lock(&iwake_watermark->mutex); + list_del(&iwake_watermark->video_list); + mutex_unlock(&iwake_watermark->mutex); + + mutex_destroy(&iwake_watermark->mutex); +} + +/* The .bound() notifier callback when a match is found */ +static int isys_notifier_bound(struct v4l2_async_notifier *notifier, + struct v4l2_subdev *sd, + struct v4l2_async_connection *asc) +{ + struct ipu6_isys *isys = + container_of(notifier, struct ipu6_isys, notifier); + struct sensor_async_sd *s_asd = + container_of(asc, struct sensor_async_sd, asc); + int ret; + + if (s_asd->csi2.port >= isys->pdata->ipdata->csi2.nports) { + dev_err(&isys->adev->auxdev.dev, "invalid csi2 port %u\n", + s_asd->csi2.port); + return -EINVAL; + } + + ret = ipu_bridge_instantiate_vcm(sd->dev); + if (ret) { + dev_err(&isys->adev->auxdev.dev, "instantiate vcm failed\n"); + return ret; + } + + dev_dbg(&isys->adev->auxdev.dev, "bind %s nlanes is %d port is %d\n", + sd->name, s_asd->csi2.nlanes, s_asd->csi2.port); + ret = isys_complete_ext_device_registration(isys, sd, &s_asd->csi2); + if (ret) + return ret; + + return v4l2_device_register_subdev_nodes(&isys->v4l2_dev); +} + +static int isys_notifier_complete(struct v4l2_async_notifier *notifier) +{ + struct ipu6_isys *isys = + container_of(notifier, struct ipu6_isys, notifier); + + return v4l2_device_register_subdev_nodes(&isys->v4l2_dev); +} + +static const struct v4l2_async_notifier_operations isys_async_ops = { + .bound = isys_notifier_bound, + .complete = isys_notifier_complete, +}; + +#define ISYS_MAX_PORTS 8 +static int isys_notifier_init(struct ipu6_isys *isys) +{ + struct ipu6_device *isp = isys->adev->isp; + struct device *dev = &isp->pdev->dev; + unsigned int i; + int ret; + + v4l2_async_nf_init(&isys->notifier, &isys->v4l2_dev); + + for (i = 0; i < ISYS_MAX_PORTS; i++) { + struct v4l2_fwnode_endpoint vep = { + .bus_type = V4L2_MBUS_CSI2_DPHY + }; + struct sensor_async_sd *s_asd; + struct fwnode_handle *ep; + + ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), i, 0, + FWNODE_GRAPH_ENDPOINT_NEXT); + if (!ep) + continue; + + ret = v4l2_fwnode_endpoint_parse(ep, &vep); + if (ret) { + dev_err(dev, "fwnode endpoint parse failed: %d\n", ret); + goto err_parse; + } + + s_asd = v4l2_async_nf_add_fwnode_remote(&isys->notifier, ep, + struct sensor_async_sd); + if (IS_ERR(s_asd)) { + ret = PTR_ERR(s_asd); + dev_err(dev, "add remove fwnode failed: %d\n", ret); + goto err_parse; + } + + s_asd->csi2.port = vep.base.port; + s_asd->csi2.nlanes = vep.bus.mipi_csi2.num_data_lanes; + + dev_dbg(dev, "remote endpoint port %d with %d lanes added\n", + s_asd->csi2.port, s_asd->csi2.nlanes); + + fwnode_handle_put(ep); + + continue; + +err_parse: + fwnode_handle_put(ep); + return ret; + } + + isys->notifier.ops = &isys_async_ops; + ret = v4l2_async_nf_register(&isys->notifier); + if (ret) { + dev_err(dev, "failed to register async notifier : %d\n", ret); + v4l2_async_nf_cleanup(&isys->notifier); + } + + return ret; +} + +static void isys_notifier_cleanup(struct ipu6_isys *isys) +{ + v4l2_async_nf_unregister(&isys->notifier); + v4l2_async_nf_cleanup(&isys->notifier); +} + +static int isys_register_devices(struct ipu6_isys *isys) +{ + struct device *dev = &isys->adev->auxdev.dev; + struct pci_dev *pdev = isys->adev->isp->pdev; + int ret; + + isys->media_dev.dev = dev; + media_device_pci_init(&isys->media_dev, + pdev, IPU6_MEDIA_DEV_MODEL_NAME); + + strscpy(isys->v4l2_dev.name, isys->media_dev.model, + sizeof(isys->v4l2_dev.name)); + + ret = media_device_register(&isys->media_dev); + if (ret < 0) + goto out_media_device_unregister; + + isys->v4l2_dev.mdev = &isys->media_dev; + isys->v4l2_dev.ctrl_handler = NULL; + + ret = v4l2_device_register(dev, &isys->v4l2_dev); + if (ret < 0) + goto out_media_device_unregister; + + ret = isys_register_video_devices(isys); + if (ret) + goto out_v4l2_device_unregister; + + ret = isys_csi2_register_subdevices(isys); + if (ret) + goto out_isys_unregister_video_device; + + ret = isys_csi2_create_media_links(isys); + if (ret) + goto out_isys_unregister_subdevices; + + ret = isys_notifier_init(isys); + if (ret) + goto out_isys_unregister_subdevices; + + return 0; + +out_isys_unregister_subdevices: + isys_csi2_unregister_subdevices(isys); + +out_isys_unregister_video_device: + isys_unregister_video_devices(isys); + +out_v4l2_device_unregister: + v4l2_device_unregister(&isys->v4l2_dev); + +out_media_device_unregister: + media_device_unregister(&isys->media_dev); + media_device_cleanup(&isys->media_dev); + + dev_err(dev, "failed to register isys devices\n"); + + return ret; +} + +static void isys_unregister_devices(struct ipu6_isys *isys) +{ + isys_unregister_video_devices(isys); + isys_csi2_unregister_subdevices(isys); + v4l2_device_unregister(&isys->v4l2_dev); + media_device_unregister(&isys->media_dev); + media_device_cleanup(&isys->media_dev); +} + +static int isys_runtime_pm_resume(struct device *dev) +{ + struct ipu6_bus_device *adev = to_ipu6_bus_device(dev); + struct ipu6_isys *isys = ipu6_bus_get_drvdata(adev); + struct ipu6_device *isp = adev->isp; + unsigned long flags; + int ret; + + if (!isys) + return 0; + + ret = ipu6_mmu_hw_init(adev->mmu); + if (ret) + return ret; + + cpu_latency_qos_update_request(&isys->pm_qos, ISYS_PM_QOS_VALUE); + + ret = ipu6_buttress_start_tsc_sync(isp); + if (ret) + return ret; + + spin_lock_irqsave(&isys->power_lock, flags); + isys->power = 1; + spin_unlock_irqrestore(&isys->power_lock, flags); + + isys_setup_hw(isys); + + set_iwake_ltrdid(isys, 0, 0, LTR_ISYS_ON); + + return 0; +} + +static int isys_runtime_pm_suspend(struct device *dev) +{ + struct ipu6_bus_device *adev = to_ipu6_bus_device(dev); + struct ipu6_isys *isys; + unsigned long flags; + + isys = dev_get_drvdata(dev); + if (!isys) + return 0; + + spin_lock_irqsave(&isys->power_lock, flags); + isys->power = 0; + spin_unlock_irqrestore(&isys->power_lock, flags); + + mutex_lock(&isys->mutex); + isys->need_reset = false; + mutex_unlock(&isys->mutex); + + isys->phy_termcal_val = 0; + cpu_latency_qos_update_request(&isys->pm_qos, PM_QOS_DEFAULT_VALUE); + + set_iwake_ltrdid(isys, 0, 0, LTR_ISYS_OFF); + + ipu6_mmu_hw_cleanup(adev->mmu); + + return 0; +} + +static int isys_suspend(struct device *dev) +{ + struct ipu6_isys *isys = dev_get_drvdata(dev); + + /* If stream is open, refuse to suspend */ + if (isys->stream_opened) + return -EBUSY; + + return 0; +} + +static int isys_resume(struct device *dev) +{ + return 0; +} + +static const struct dev_pm_ops isys_pm_ops = { + .runtime_suspend = isys_runtime_pm_suspend, + .runtime_resume = isys_runtime_pm_resume, + .suspend = isys_suspend, + .resume = isys_resume, +}; + +static void free_fw_msg_bufs(struct ipu6_isys *isys) +{ + struct isys_fw_msgs *fwmsg, *safe; + + list_for_each_entry_safe(fwmsg, safe, &isys->framebuflist, head) + ipu6_dma_free(isys->adev, sizeof(struct isys_fw_msgs), fwmsg, + fwmsg->dma_addr, 0); + + list_for_each_entry_safe(fwmsg, safe, &isys->framebuflist_fw, head) + ipu6_dma_free(isys->adev, sizeof(struct isys_fw_msgs), fwmsg, + fwmsg->dma_addr, 0); +} + +static int alloc_fw_msg_bufs(struct ipu6_isys *isys, int amount) +{ + struct isys_fw_msgs *addr; + dma_addr_t dma_addr; + unsigned long flags; + unsigned int i; + + for (i = 0; i < amount; i++) { + addr = ipu6_dma_alloc(isys->adev, sizeof(*addr), + &dma_addr, GFP_KERNEL, 0); + if (!addr) + break; + addr->dma_addr = dma_addr; + + spin_lock_irqsave(&isys->listlock, flags); + list_add(&addr->head, &isys->framebuflist); + spin_unlock_irqrestore(&isys->listlock, flags); + } + + if (i == amount) + return 0; + + spin_lock_irqsave(&isys->listlock, flags); + while (!list_empty(&isys->framebuflist)) { + addr = list_first_entry(&isys->framebuflist, + struct isys_fw_msgs, head); + list_del(&addr->head); + spin_unlock_irqrestore(&isys->listlock, flags); + ipu6_dma_free(isys->adev, sizeof(struct isys_fw_msgs), addr, + addr->dma_addr, 0); + spin_lock_irqsave(&isys->listlock, flags); + } + spin_unlock_irqrestore(&isys->listlock, flags); + + return -ENOMEM; +} + +struct isys_fw_msgs *ipu6_get_fw_msg_buf(struct ipu6_isys_stream *stream) +{ + struct ipu6_isys *isys = stream->isys; + struct device *dev = &isys->adev->auxdev.dev; + struct isys_fw_msgs *msg; + unsigned long flags; + int ret; + + spin_lock_irqsave(&isys->listlock, flags); + if (list_empty(&isys->framebuflist)) { + spin_unlock_irqrestore(&isys->listlock, flags); + dev_dbg(dev, "Frame list empty\n"); + + ret = alloc_fw_msg_bufs(isys, 5); + if (ret < 0) + return NULL; + + spin_lock_irqsave(&isys->listlock, flags); + if (list_empty(&isys->framebuflist)) { + spin_unlock_irqrestore(&isys->listlock, flags); + dev_err(dev, "Frame list empty\n"); + return NULL; + } + } + msg = list_last_entry(&isys->framebuflist, struct isys_fw_msgs, head); + list_move(&msg->head, &isys->framebuflist_fw); + spin_unlock_irqrestore(&isys->listlock, flags); + memset(&msg->fw_msg, 0, sizeof(msg->fw_msg)); + + return msg; +} + +void ipu6_cleanup_fw_msg_bufs(struct ipu6_isys *isys) +{ + struct isys_fw_msgs *fwmsg, *fwmsg0; + unsigned long flags; + + spin_lock_irqsave(&isys->listlock, flags); + list_for_each_entry_safe(fwmsg, fwmsg0, &isys->framebuflist_fw, head) + list_move(&fwmsg->head, &isys->framebuflist); + spin_unlock_irqrestore(&isys->listlock, flags); +} + +void ipu6_put_fw_msg_buf(struct ipu6_isys *isys, uintptr_t data) +{ + struct isys_fw_msgs *msg; + unsigned long flags; + void *ptr = (void *)data; + + if (!ptr) + return; + + spin_lock_irqsave(&isys->listlock, flags); + msg = container_of(ptr, struct isys_fw_msgs, fw_msg.dummy); + list_move(&msg->head, &isys->framebuflist); + spin_unlock_irqrestore(&isys->listlock, flags); +} + +static int isys_probe(struct auxiliary_device *auxdev, + const struct auxiliary_device_id *auxdev_id) +{ + const struct ipu6_isys_internal_csi2_pdata *csi2_pdata; + struct ipu6_bus_device *adev = auxdev_to_adev(auxdev); + struct ipu6_device *isp = adev->isp; + const struct firmware *fw; + struct ipu6_isys *isys; + unsigned int i; + int ret; + + if (!isp->bus_ready_to_probe) + return -EPROBE_DEFER; + + isys = devm_kzalloc(&auxdev->dev, sizeof(*isys), GFP_KERNEL); + if (!isys) + return -ENOMEM; + + adev->auxdrv_data = + (const struct ipu6_auxdrv_data *)auxdev_id->driver_data; + adev->auxdrv = to_auxiliary_drv(auxdev->dev.driver); + isys->adev = adev; + isys->pdata = adev->pdata; + csi2_pdata = &isys->pdata->ipdata->csi2; + + isys->csi2 = devm_kcalloc(&auxdev->dev, csi2_pdata->nports, + sizeof(*isys->csi2), GFP_KERNEL); + if (!isys->csi2) + return -ENOMEM; + + ret = ipu6_mmu_hw_init(adev->mmu); + if (ret) + return ret; + + /* initial sensor type */ + isys->sensor_type = isys->pdata->ipdata->sensor_type_start; + + spin_lock_init(&isys->streams_lock); + spin_lock_init(&isys->power_lock); + isys->power = 0; + isys->phy_termcal_val = 0; + + mutex_init(&isys->mutex); + mutex_init(&isys->stream_mutex); + + spin_lock_init(&isys->listlock); + INIT_LIST_HEAD(&isys->framebuflist); + INIT_LIST_HEAD(&isys->framebuflist_fw); + + isys->icache_prefetch = 0; + + dev_set_drvdata(&auxdev->dev, isys); + + isys_stream_init(isys); + + if (!isp->secure_mode) { + fw = isp->cpd_fw; + ret = ipu6_buttress_map_fw_image(adev, fw, &adev->fw_sgt); + if (ret) + goto release_firmware; + + ret = ipu6_cpd_create_pkg_dir(adev, isp->cpd_fw->data); + if (ret) + goto remove_shared_buffer; + } + + cpu_latency_qos_add_request(&isys->pm_qos, PM_QOS_DEFAULT_VALUE); + + ret = alloc_fw_msg_bufs(isys, 20); + if (ret < 0) + goto out_remove_pkg_dir_shared_buffer; + + isys_iwake_watermark_init(isys); + + if (is_ipu6se(adev->isp->hw_ver)) + isys->phy_set_power = ipu6_isys_jsl_phy_set_power; + else if (is_ipu6ep_mtl(adev->isp->hw_ver)) + isys->phy_set_power = ipu6_isys_dwc_phy_set_power; + else + isys->phy_set_power = ipu6_isys_mcd_phy_set_power; + + ret = isys_register_devices(isys); + if (ret) + goto free_fw_msg_bufs; + + ipu6_mmu_hw_cleanup(adev->mmu); + + return 0; + +free_fw_msg_bufs: + free_fw_msg_bufs(isys); +out_remove_pkg_dir_shared_buffer: + cpu_latency_qos_remove_request(&isys->pm_qos); + if (!isp->secure_mode) + ipu6_cpd_free_pkg_dir(adev); +remove_shared_buffer: + if (!isp->secure_mode) + ipu6_buttress_unmap_fw_image(adev, &adev->fw_sgt); +release_firmware: + if (!isp->secure_mode) + release_firmware(adev->fw); + + for (i = 0; i < IPU6_ISYS_MAX_STREAMS; i++) + mutex_destroy(&isys->streams[i].mutex); + + mutex_destroy(&isys->mutex); + mutex_destroy(&isys->stream_mutex); + + ipu6_mmu_hw_cleanup(adev->mmu); + + return ret; +} + +static void isys_remove(struct auxiliary_device *auxdev) +{ + struct ipu6_bus_device *adev = auxdev_to_adev(auxdev); + struct ipu6_isys *isys = dev_get_drvdata(&auxdev->dev); + struct ipu6_device *isp = adev->isp; + unsigned int i; + + free_fw_msg_bufs(isys); + + isys_unregister_devices(isys); + isys_notifier_cleanup(isys); + + cpu_latency_qos_remove_request(&isys->pm_qos); + + if (!isp->secure_mode) { + ipu6_cpd_free_pkg_dir(adev); + ipu6_buttress_unmap_fw_image(adev, &adev->fw_sgt); + release_firmware(adev->fw); + } + + for (i = 0; i < IPU6_ISYS_MAX_STREAMS; i++) + mutex_destroy(&isys->streams[i].mutex); + + isys_iwake_watermark_cleanup(isys); + mutex_destroy(&isys->stream_mutex); + mutex_destroy(&isys->mutex); +} + +struct fwmsg { + int type; + char *msg; + bool valid_ts; +}; + +static const struct fwmsg fw_msg[] = { + {IPU6_FW_ISYS_RESP_TYPE_STREAM_OPEN_DONE, "STREAM_OPEN_DONE", 0}, + {IPU6_FW_ISYS_RESP_TYPE_STREAM_CLOSE_ACK, "STREAM_CLOSE_ACK", 0}, + {IPU6_FW_ISYS_RESP_TYPE_STREAM_START_ACK, "STREAM_START_ACK", 0}, + {IPU6_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK, + "STREAM_START_AND_CAPTURE_ACK", 0}, + {IPU6_FW_ISYS_RESP_TYPE_STREAM_STOP_ACK, "STREAM_STOP_ACK", 0}, + {IPU6_FW_ISYS_RESP_TYPE_STREAM_FLUSH_ACK, "STREAM_FLUSH_ACK", 0}, + {IPU6_FW_ISYS_RESP_TYPE_PIN_DATA_READY, "PIN_DATA_READY", 1}, + {IPU6_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK, "STREAM_CAPTURE_ACK", 0}, + {IPU6_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE, + "STREAM_START_AND_CAPTURE_DONE", 1}, + {IPU6_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_DONE, "STREAM_CAPTURE_DONE", 1}, + {IPU6_FW_ISYS_RESP_TYPE_FRAME_SOF, "FRAME_SOF", 1}, + {IPU6_FW_ISYS_RESP_TYPE_FRAME_EOF, "FRAME_EOF", 1}, + {IPU6_FW_ISYS_RESP_TYPE_STATS_DATA_READY, "STATS_READY", 1}, + {-1, "UNKNOWN MESSAGE", 0} +}; + +static u32 resp_type_to_index(int type) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(fw_msg); i++) + if (fw_msg[i].type == type) + return i; + + return ARRAY_SIZE(fw_msg) - 1; +} + +static int isys_isr_one(struct ipu6_bus_device *adev) +{ + struct ipu6_isys *isys = ipu6_bus_get_drvdata(adev); + struct ipu6_fw_isys_resp_info_abi *resp; + struct ipu6_isys_stream *stream; + struct ipu6_isys_csi2 *csi2 = NULL; + u32 index; + u64 ts; + + if (!isys->fwcom) + return 1; + + resp = ipu6_fw_isys_get_resp(isys->fwcom, IPU6_BASE_MSG_RECV_QUEUES); + if (!resp) + return 1; + + ts = (u64)resp->timestamp[1] << 32 | resp->timestamp[0]; + + index = resp_type_to_index(resp->type); + dev_dbg(&adev->auxdev.dev, + "FW resp %02d %s, stream %u, ts 0x%16.16llx, pin %d\n", + resp->type, fw_msg[index].msg, resp->stream_handle, + fw_msg[index].valid_ts ? ts : 0, resp->pin_id); + + if (resp->error_info.error == IPU6_FW_ISYS_ERROR_STREAM_IN_SUSPENSION) + /* Suspension is kind of special case: not enough buffers */ + dev_dbg(&adev->auxdev.dev, + "FW error resp SUSPENSION, details %d\n", + resp->error_info.error_details); + else if (resp->error_info.error) + dev_dbg(&adev->auxdev.dev, + "FW error resp error %d, details %d\n", + resp->error_info.error, resp->error_info.error_details); + + if (resp->stream_handle >= IPU6_ISYS_MAX_STREAMS) { + dev_err(&adev->auxdev.dev, "bad stream handle %u\n", + resp->stream_handle); + goto leave; + } + + stream = ipu6_isys_query_stream_by_handle(isys, resp->stream_handle); + if (!stream) { + dev_err(&adev->auxdev.dev, "stream of stream_handle %u is unused\n", + resp->stream_handle); + goto leave; + } + stream->error = resp->error_info.error; + + csi2 = ipu6_isys_subdev_to_csi2(stream->asd); + + switch (resp->type) { + case IPU6_FW_ISYS_RESP_TYPE_STREAM_OPEN_DONE: + complete(&stream->stream_open_completion); + break; + case IPU6_FW_ISYS_RESP_TYPE_STREAM_CLOSE_ACK: + complete(&stream->stream_close_completion); + break; + case IPU6_FW_ISYS_RESP_TYPE_STREAM_START_ACK: + complete(&stream->stream_start_completion); + break; + case IPU6_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK: + complete(&stream->stream_start_completion); + break; + case IPU6_FW_ISYS_RESP_TYPE_STREAM_STOP_ACK: + complete(&stream->stream_stop_completion); + break; + case IPU6_FW_ISYS_RESP_TYPE_STREAM_FLUSH_ACK: + complete(&stream->stream_stop_completion); + break; + case IPU6_FW_ISYS_RESP_TYPE_PIN_DATA_READY: + /* + * firmware only release the capture msg until software + * get pin_data_ready event + */ + ipu6_put_fw_msg_buf(ipu6_bus_get_drvdata(adev), resp->buf_id); + if (resp->pin_id < IPU6_ISYS_OUTPUT_PINS && + stream->output_pins_queue[resp->pin_id]) + ipu6_isys_queue_buf_ready(stream, resp); + else + dev_warn(&adev->auxdev.dev, + "%d:No queue for pin id %d\n", + resp->stream_handle, resp->pin_id); + if (csi2) + ipu6_isys_csi2_error(csi2); + + break; + case IPU6_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK: + break; + case IPU6_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE: + case IPU6_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_DONE: + break; + case IPU6_FW_ISYS_RESP_TYPE_FRAME_SOF: + + ipu6_isys_csi2_sof_event_by_stream(stream); + stream->seq[stream->seq_index].sequence = + atomic_read(&stream->sequence) - 1; + stream->seq[stream->seq_index].timestamp = ts; + dev_dbg(&adev->auxdev.dev, + "sof: handle %d: (index %u), timestamp 0x%16.16llx\n", + resp->stream_handle, + stream->seq[stream->seq_index].sequence, ts); + stream->seq_index = (stream->seq_index + 1) + % IPU6_ISYS_MAX_PARALLEL_SOF; + break; + case IPU6_FW_ISYS_RESP_TYPE_FRAME_EOF: + ipu6_isys_csi2_eof_event_by_stream(stream); + dev_dbg(&adev->auxdev.dev, + "eof: handle %d: (index %u), timestamp 0x%16.16llx\n", + resp->stream_handle, + stream->seq[stream->seq_index].sequence, ts); + break; + case IPU6_FW_ISYS_RESP_TYPE_STATS_DATA_READY: + break; + default: + dev_err(&adev->auxdev.dev, "%d:unknown response type %u\n", + resp->stream_handle, resp->type); + break; + } + + ipu6_isys_put_stream(stream); +leave: + ipu6_fw_isys_put_resp(isys->fwcom, IPU6_BASE_MSG_RECV_QUEUES); + return 0; +} + +static const struct ipu6_auxdrv_data ipu6_isys_auxdrv_data = { + .isr = isys_isr, + .isr_threaded = NULL, + .wake_isr_thread = false, +}; + +static const struct auxiliary_device_id ipu6_isys_id_table[] = { + { + .name = "intel_ipu6.isys", + .driver_data = (kernel_ulong_t)&ipu6_isys_auxdrv_data, + }, + { } +}; +MODULE_DEVICE_TABLE(auxiliary, ipu6_isys_id_table); + +static struct auxiliary_driver isys_driver = { + .name = IPU6_ISYS_NAME, + .probe = isys_probe, + .remove = isys_remove, + .id_table = ipu6_isys_id_table, + .driver = { + .pm = &isys_pm_ops, + }, +}; + +module_auxiliary_driver(isys_driver); + +MODULE_AUTHOR("Sakari Ailus "); +MODULE_AUTHOR("Tianshu Qiu "); +MODULE_AUTHOR("Bingbu Cao "); +MODULE_AUTHOR("Yunliang Ding "); +MODULE_AUTHOR("Hongju Wang "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Intel IPU6 input system driver"); +MODULE_IMPORT_NS("INTEL_IPU6"); +MODULE_IMPORT_NS("INTEL_IPU_BRIDGE"); diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys.h b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys.h new file mode 100644 index 0000000..0e2c8b7 --- /dev/null +++ b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys.h @@ -0,0 +1,202 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2013--2024 Intel Corporation */ + +#ifndef IPU6_ISYS_H +#define IPU6_ISYS_H + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "ipu6.h" +#include "ipu6-fw-isys.h" +#include "ipu6-isys-csi2.h" +#include "ipu6-isys-video.h" + +struct ipu6_bus_device; + +#define IPU6_ISYS_ENTITY_PREFIX "Intel IPU6" +/* FW support max 16 streams */ +#define IPU6_ISYS_MAX_STREAMS 16 +#define ISYS_UNISPART_IRQS (IPU6_ISYS_UNISPART_IRQ_SW | \ + IPU6_ISYS_UNISPART_IRQ_CSI0 | \ + IPU6_ISYS_UNISPART_IRQ_CSI1) + +/* + * Current message queue configuration. These must be big enough + * so that they never gets full. Queues are located in system memory + */ +#define IPU6_ISYS_SIZE_RECV_QUEUE 40 +#define IPU6_ISYS_SIZE_SEND_QUEUE 40 +#define IPU6_ISYS_SIZE_PROXY_RECV_QUEUE 5 +#define IPU6_ISYS_SIZE_PROXY_SEND_QUEUE 5 +#define IPU6_ISYS_NUM_RECV_QUEUE 1 + +#define IPU6_ISYS_MIN_WIDTH 2U +#define IPU6_ISYS_MIN_HEIGHT 1U +#define IPU6_ISYS_MAX_WIDTH 4672U +#define IPU6_ISYS_MAX_HEIGHT 3416U + +/* the threshold granularity is 2KB on IPU6 */ +#define IPU6_SRAM_GRANULARITY_SHIFT 11 +#define IPU6_SRAM_GRANULARITY_SIZE 2048 +/* the threshold granularity is 1KB on IPU6SE */ +#define IPU6SE_SRAM_GRANULARITY_SHIFT 10 +#define IPU6SE_SRAM_GRANULARITY_SIZE 1024 +/* IS pixel buffer is 256KB, MaxSRAMSize is 200KB on IPU6 */ +#define IPU6_MAX_SRAM_SIZE (200 << 10) +/* IS pixel buffer is 128KB, MaxSRAMSize is 96KB on IPU6SE */ +#define IPU6SE_MAX_SRAM_SIZE (96 << 10) + +#define IPU6EP_LTR_VALUE 200 +#define IPU6EP_MIN_MEMOPEN_TH 0x4 +#define IPU6EP_MTL_LTR_VALUE 1023 +#define IPU6EP_MTL_MIN_MEMOPEN_TH 0xc + +struct ltr_did { + union { + u32 value; + struct { + u8 val0; + u8 val1; + u8 val2; + u8 val3; + } bits; + } lut_ltr; + union { + u32 value; + struct { + u8 th0; + u8 th1; + u8 th2; + u8 th3; + } bits; + } lut_fill_time; +}; + +struct isys_iwake_watermark { + bool iwake_enabled; + bool force_iwake_disable; + u32 iwake_threshold; + u64 isys_pixelbuffer_datarate; + struct ltr_did ltrdid; + struct mutex mutex; /* protect whole struct */ + struct ipu6_isys *isys; + struct list_head video_list; +}; + +struct ipu6_isys_csi2_config { + u32 nlanes; + u32 port; +}; + +struct sensor_async_sd { + struct v4l2_async_connection asc; + struct ipu6_isys_csi2_config csi2; +}; + +/* + * struct ipu6_isys + * + * @media_dev: Media device + * @v4l2_dev: V4L2 device + * @adev: ISYS bus device + * @power: Is ISYS powered on or not? + * @isr_bits: Which bits does the ISR handle? + * @power_lock: Serialise access to power (power state in general) + * @csi2_rx_ctrl_cached: cached shared value between all CSI2 receivers + * @streams_lock: serialise access to streams + * @streams: streams per firmware stream ID + * @fwcom: fw communication layer private pointer + * or optional external library private pointer + * @phy_termcal_val: the termination calibration value, only used for DWC PHY + * @need_reset: Isys requires d0i0->i3 transition + * @ref_count: total number of callers fw open + * @mutex: serialise access isys video open/release related operations + * @stream_mutex: serialise stream start and stop, queueing requests + * @pdata: platform data pointer + * @csi2: CSI-2 receivers + */ +struct ipu6_isys { + struct media_device media_dev; + struct v4l2_device v4l2_dev; + struct ipu6_bus_device *adev; + + int power; + spinlock_t power_lock; + u32 isr_csi2_bits; + u32 csi2_rx_ctrl_cached; + spinlock_t streams_lock; + struct ipu6_isys_stream streams[IPU6_ISYS_MAX_STREAMS]; + int streams_ref_count[IPU6_ISYS_MAX_STREAMS]; + void *fwcom; + u32 phy_termcal_val; + bool need_reset; + bool icache_prefetch; + bool csi2_cse_ipc_not_supported; + unsigned int ref_count; + unsigned int stream_opened; + unsigned int sensor_type; + + struct mutex mutex; + struct mutex stream_mutex; + + struct ipu6_isys_pdata *pdata; + + int (*phy_set_power)(struct ipu6_isys *isys, + struct ipu6_isys_csi2_config *cfg, + const struct ipu6_isys_csi2_timing *timing, + bool on); + + struct ipu6_isys_csi2 *csi2; + + struct pm_qos_request pm_qos; + spinlock_t listlock; /* Protect framebuflist */ + struct list_head framebuflist; + struct list_head framebuflist_fw; + struct v4l2_async_notifier notifier; + struct isys_iwake_watermark iwake_watermark; +}; + +struct isys_fw_msgs { + union { + u64 dummy; + struct ipu6_fw_isys_frame_buff_set_abi frame; + struct ipu6_fw_isys_stream_cfg_data_abi stream; + } fw_msg; + struct list_head head; + dma_addr_t dma_addr; +}; + +struct isys_fw_msgs *ipu6_get_fw_msg_buf(struct ipu6_isys_stream *stream); +void ipu6_put_fw_msg_buf(struct ipu6_isys *isys, uintptr_t data); +void ipu6_cleanup_fw_msg_bufs(struct ipu6_isys *isys); + +extern const struct v4l2_ioctl_ops ipu6_isys_ioctl_ops; + +void isys_setup_hw(struct ipu6_isys *isys); +irqreturn_t isys_isr(struct ipu6_bus_device *adev); +void update_watermark_setting(struct ipu6_isys *isys); + +int ipu6_isys_mcd_phy_set_power(struct ipu6_isys *isys, + struct ipu6_isys_csi2_config *cfg, + const struct ipu6_isys_csi2_timing *timing, + bool on); + +int ipu6_isys_dwc_phy_set_power(struct ipu6_isys *isys, + struct ipu6_isys_csi2_config *cfg, + const struct ipu6_isys_csi2_timing *timing, + bool on); + +int ipu6_isys_jsl_phy_set_power(struct ipu6_isys *isys, + struct ipu6_isys_csi2_config *cfg, + const struct ipu6_isys_csi2_timing *timing, + bool on); +#endif /* IPU6_ISYS_H */ diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-mmu.c b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-mmu.c new file mode 100644 index 0000000..6d1c0b9 --- /dev/null +++ b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-mmu.c @@ -0,0 +1,806 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2013--2024 Intel Corporation + */ +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ipu6.h" +#include "ipu6-dma.h" +#include "ipu6-mmu.h" +#include "ipu6-platform-regs.h" + +#define ISP_PAGE_SHIFT 12 +#define ISP_PAGE_SIZE BIT(ISP_PAGE_SHIFT) +#define ISP_PAGE_MASK (~(ISP_PAGE_SIZE - 1)) + +#define ISP_L1PT_SHIFT 22 +#define ISP_L1PT_MASK (~((1U << ISP_L1PT_SHIFT) - 1)) + +#define ISP_L2PT_SHIFT 12 +#define ISP_L2PT_MASK (~(ISP_L1PT_MASK | (~(ISP_PAGE_MASK)))) + +#define ISP_L1PT_PTES 1024 +#define ISP_L2PT_PTES 1024 + +#define ISP_PADDR_SHIFT 12 + +#define REG_TLB_INVALIDATE 0x0000 + +#define REG_L1_PHYS 0x0004 /* 27-bit pfn */ +#define REG_INFO 0x0008 + +#define TBL_PHYS_ADDR(a) ((phys_addr_t)(a) << ISP_PADDR_SHIFT) + +static void tlb_invalidate(struct ipu6_mmu *mmu) +{ + unsigned long flags; + unsigned int i; + + spin_lock_irqsave(&mmu->ready_lock, flags); + if (!mmu->ready) { + spin_unlock_irqrestore(&mmu->ready_lock, flags); + return; + } + + for (i = 0; i < mmu->nr_mmus; i++) { + /* + * To avoid the HW bug induced dead lock in some of the IPU6 + * MMUs on successive invalidate calls, we need to first do a + * read to the page table base before writing the invalidate + * register. MMUs which need to implement this WA, will have + * the insert_read_before_invalidate flags set as true. + * Disregard the return value of the read. + */ + if (mmu->mmu_hw[i].insert_read_before_invalidate) + readl(mmu->mmu_hw[i].base + REG_L1_PHYS); + + writel(0xffffffff, mmu->mmu_hw[i].base + + REG_TLB_INVALIDATE); + /* + * The TLB invalidation is a "single cycle" (IOMMU clock cycles) + * When the actual MMIO write reaches the IPU6 TLB Invalidate + * register, wmb() will force the TLB invalidate out if the CPU + * attempts to update the IOMMU page table (or sooner). + */ + wmb(); + } + spin_unlock_irqrestore(&mmu->ready_lock, flags); +} + +#ifdef DEBUG +static void page_table_dump(struct ipu6_mmu_info *mmu_info) +{ + u32 l1_idx; + + dev_dbg(mmu_info->dev, "begin IOMMU page table dump\n"); + + for (l1_idx = 0; l1_idx < ISP_L1PT_PTES; l1_idx++) { + u32 l2_idx; + u32 iova = (phys_addr_t)l1_idx << ISP_L1PT_SHIFT; + phys_addr_t l2_phys; + + if (mmu_info->l1_pt[l1_idx] == mmu_info->dummy_l2_pteval) + continue; + + l2_phys = TBL_PHYS_ADDR(mmu_info->l1_pt[l1_idx];) + dev_dbg(mmu_info->dev, + "l1 entry %u; iovas 0x%8.8x-0x%8.8x, at %pap\n", + l1_idx, iova, iova + ISP_PAGE_SIZE, &l2_phys); + + for (l2_idx = 0; l2_idx < ISP_L2PT_PTES; l2_idx++) { + u32 *l2_pt = mmu_info->l2_pts[l1_idx]; + u32 iova2 = iova + (l2_idx << ISP_L2PT_SHIFT); + + if (l2_pt[l2_idx] == mmu_info->dummy_page_pteval) + continue; + + dev_dbg(mmu_info->dev, + "\tl2 entry %u; iova 0x%8.8x, phys %pa\n", + l2_idx, iova2, + TBL_PHYS_ADDR(l2_pt[l2_idx])); + } + } + + dev_dbg(mmu_info->dev, "end IOMMU page table dump\n"); +} +#endif /* DEBUG */ + +static dma_addr_t map_single(struct ipu6_mmu_info *mmu_info, void *ptr) +{ + dma_addr_t dma; + + dma = dma_map_single(mmu_info->dev, ptr, PAGE_SIZE, DMA_BIDIRECTIONAL); + if (dma_mapping_error(mmu_info->dev, dma)) + return 0; + + return dma; +} + +static int get_dummy_page(struct ipu6_mmu_info *mmu_info) +{ + void *pt = (void *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32); + dma_addr_t dma; + + if (!pt) + return -ENOMEM; + + dev_dbg(mmu_info->dev, "dummy_page: get_zeroed_page() == %p\n", pt); + + dma = map_single(mmu_info, pt); + if (!dma) { + dev_err(mmu_info->dev, "Failed to map dummy page\n"); + goto err_free_page; + } + + mmu_info->dummy_page = pt; + mmu_info->dummy_page_pteval = dma >> ISP_PAGE_SHIFT; + + return 0; + +err_free_page: + free_page((unsigned long)pt); + return -ENOMEM; +} + +static void free_dummy_page(struct ipu6_mmu_info *mmu_info) +{ + dma_unmap_single(mmu_info->dev, + TBL_PHYS_ADDR(mmu_info->dummy_page_pteval), + PAGE_SIZE, DMA_BIDIRECTIONAL); + free_page((unsigned long)mmu_info->dummy_page); +} + +static int alloc_dummy_l2_pt(struct ipu6_mmu_info *mmu_info) +{ + u32 *pt = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32); + dma_addr_t dma; + unsigned int i; + + if (!pt) + return -ENOMEM; + + dev_dbg(mmu_info->dev, "dummy_l2: get_zeroed_page() = %p\n", pt); + + dma = map_single(mmu_info, pt); + if (!dma) { + dev_err(mmu_info->dev, "Failed to map l2pt page\n"); + goto err_free_page; + } + + for (i = 0; i < ISP_L2PT_PTES; i++) + pt[i] = mmu_info->dummy_page_pteval; + + mmu_info->dummy_l2_pt = pt; + mmu_info->dummy_l2_pteval = dma >> ISP_PAGE_SHIFT; + + return 0; + +err_free_page: + free_page((unsigned long)pt); + return -ENOMEM; +} + +static void free_dummy_l2_pt(struct ipu6_mmu_info *mmu_info) +{ + dma_unmap_single(mmu_info->dev, + TBL_PHYS_ADDR(mmu_info->dummy_l2_pteval), + PAGE_SIZE, DMA_BIDIRECTIONAL); + free_page((unsigned long)mmu_info->dummy_l2_pt); +} + +static u32 *alloc_l1_pt(struct ipu6_mmu_info *mmu_info) +{ + u32 *pt = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32); + dma_addr_t dma; + unsigned int i; + + if (!pt) + return NULL; + + dev_dbg(mmu_info->dev, "alloc_l1: get_zeroed_page() = %p\n", pt); + + for (i = 0; i < ISP_L1PT_PTES; i++) + pt[i] = mmu_info->dummy_l2_pteval; + + dma = map_single(mmu_info, pt); + if (!dma) { + dev_err(mmu_info->dev, "Failed to map l1pt page\n"); + goto err_free_page; + } + + mmu_info->l1_pt_dma = dma >> ISP_PADDR_SHIFT; + dev_dbg(mmu_info->dev, "l1 pt %p mapped at %pad\n", pt, &dma); + + return pt; + +err_free_page: + free_page((unsigned long)pt); + return NULL; +} + +static u32 *alloc_l2_pt(struct ipu6_mmu_info *mmu_info) +{ + u32 *pt = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32); + unsigned int i; + + if (!pt) + return NULL; + + dev_dbg(mmu_info->dev, "alloc_l2: get_zeroed_page() = %p\n", pt); + + for (i = 0; i < ISP_L1PT_PTES; i++) + pt[i] = mmu_info->dummy_page_pteval; + + return pt; +} + +static void l2_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova, + phys_addr_t dummy, size_t size) +{ + unsigned int l2_entries; + unsigned int l2_idx; + unsigned long flags; + u32 l1_idx; + u32 *l2_pt; + + spin_lock_irqsave(&mmu_info->lock, flags); + for (l1_idx = iova >> ISP_L1PT_SHIFT; + size > 0 && l1_idx < ISP_L1PT_PTES; l1_idx++) { + dev_dbg(mmu_info->dev, + "unmapping l2 pgtable (l1 index %u (iova 0x%8.8lx))\n", + l1_idx, iova); + + if (mmu_info->l1_pt[l1_idx] == mmu_info->dummy_l2_pteval) { + dev_err(mmu_info->dev, + "unmap not mapped iova 0x%8.8lx l1 index %u\n", + iova, l1_idx); + continue; + } + l2_pt = mmu_info->l2_pts[l1_idx]; + + l2_entries = 0; + for (l2_idx = (iova & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT; + size > 0 && l2_idx < ISP_L2PT_PTES; l2_idx++) { + phys_addr_t pteval = TBL_PHYS_ADDR(l2_pt[l2_idx]); + + dev_dbg(mmu_info->dev, + "unmap l2 index %u with pteval 0x%p\n", + l2_idx, &pteval); + l2_pt[l2_idx] = mmu_info->dummy_page_pteval; + + iova += ISP_PAGE_SIZE; + size -= ISP_PAGE_SIZE; + + l2_entries++; + } + + WARN_ON_ONCE(!l2_entries); + clflush_cache_range(&l2_pt[l2_idx - l2_entries], + sizeof(l2_pt[0]) * l2_entries); + } + + WARN_ON_ONCE(size); + spin_unlock_irqrestore(&mmu_info->lock, flags); +} + +static int l2_map(struct ipu6_mmu_info *mmu_info, unsigned long iova, + phys_addr_t paddr, size_t size) +{ + struct device *dev = mmu_info->dev; + unsigned int l2_entries; + u32 *l2_pt, *l2_virt; + unsigned int l2_idx; + unsigned long flags; + size_t mapped = 0; + dma_addr_t dma; + u32 l1_entry; + u32 l1_idx; + int err = 0; + + spin_lock_irqsave(&mmu_info->lock, flags); + + paddr = ALIGN(paddr, ISP_PAGE_SIZE); + for (l1_idx = iova >> ISP_L1PT_SHIFT; + size > 0 && l1_idx < ISP_L1PT_PTES; l1_idx++) { + dev_dbg(dev, + "mapping l2 page table for l1 index %u (iova %8.8x)\n", + l1_idx, (u32)iova); + + l1_entry = mmu_info->l1_pt[l1_idx]; + if (l1_entry == mmu_info->dummy_l2_pteval) { + l2_virt = mmu_info->l2_pts[l1_idx]; + if (likely(!l2_virt)) { + l2_virt = alloc_l2_pt(mmu_info); + if (!l2_virt) { + err = -ENOMEM; + goto error; + } + } + + dma = map_single(mmu_info, l2_virt); + if (!dma) { + dev_err(dev, "Failed to map l2pt page\n"); + free_page((unsigned long)l2_virt); + err = -EINVAL; + goto error; + } + + l1_entry = dma >> ISP_PADDR_SHIFT; + + dev_dbg(dev, "page for l1_idx %u %p allocated\n", + l1_idx, l2_virt); + mmu_info->l1_pt[l1_idx] = l1_entry; + mmu_info->l2_pts[l1_idx] = l2_virt; + + clflush_cache_range(&mmu_info->l1_pt[l1_idx], + sizeof(mmu_info->l1_pt[l1_idx])); + } + + l2_pt = mmu_info->l2_pts[l1_idx]; + l2_entries = 0; + + for (l2_idx = (iova & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT; + size > 0 && l2_idx < ISP_L2PT_PTES; l2_idx++) { + l2_pt[l2_idx] = paddr >> ISP_PADDR_SHIFT; + + dev_dbg(dev, "l2 index %u mapped as 0x%8.8x\n", l2_idx, + l2_pt[l2_idx]); + + iova += ISP_PAGE_SIZE; + paddr += ISP_PAGE_SIZE; + mapped += ISP_PAGE_SIZE; + size -= ISP_PAGE_SIZE; + + l2_entries++; + } + + WARN_ON_ONCE(!l2_entries); + clflush_cache_range(&l2_pt[l2_idx - l2_entries], + sizeof(l2_pt[0]) * l2_entries); + } + + spin_unlock_irqrestore(&mmu_info->lock, flags); + + return 0; + +error: + spin_unlock_irqrestore(&mmu_info->lock, flags); + /* unroll mapping in case something went wrong */ + if (mapped) + l2_unmap(mmu_info, iova - mapped, paddr - mapped, mapped); + + return err; +} + +static int __ipu6_mmu_map(struct ipu6_mmu_info *mmu_info, unsigned long iova, + phys_addr_t paddr, size_t size) +{ + u32 iova_start = round_down(iova, ISP_PAGE_SIZE); + u32 iova_end = ALIGN(iova + size, ISP_PAGE_SIZE); + + dev_dbg(mmu_info->dev, + "mapping iova 0x%8.8x--0x%8.8x, size %zu at paddr %pap\n", + iova_start, iova_end, size, &paddr); + + return l2_map(mmu_info, iova_start, paddr, size); +} + +static void __ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info, + unsigned long iova, size_t size) +{ + l2_unmap(mmu_info, iova, 0, size); +} + +static int allocate_trash_buffer(struct ipu6_mmu *mmu) +{ + unsigned int n_pages = PFN_UP(IPU6_MMUV2_TRASH_RANGE); + struct iova *iova; + unsigned int i; + dma_addr_t dma; + unsigned long iova_addr; + int ret; + + /* Allocate 8MB in iova range */ + iova = alloc_iova(&mmu->dmap->iovad, n_pages, + PHYS_PFN(mmu->dmap->mmu_info->aperture_end), 0); + if (!iova) { + dev_err(mmu->dev, "cannot allocate iova range for trash\n"); + return -ENOMEM; + } + + dma = dma_map_page(mmu->dmap->mmu_info->dev, mmu->trash_page, 0, + PAGE_SIZE, DMA_BIDIRECTIONAL); + if (dma_mapping_error(mmu->dmap->mmu_info->dev, dma)) { + dev_err(mmu->dmap->mmu_info->dev, "Failed to map trash page\n"); + ret = -ENOMEM; + goto out_free_iova; + } + + mmu->pci_trash_page = dma; + + /* + * Map the 8MB iova address range to the same physical trash page + * mmu->trash_page which is already reserved at the probe + */ + iova_addr = iova->pfn_lo; + for (i = 0; i < n_pages; i++) { + ret = ipu6_mmu_map(mmu->dmap->mmu_info, PFN_PHYS(iova_addr), + mmu->pci_trash_page, PAGE_SIZE); + if (ret) { + dev_err(mmu->dev, + "mapping trash buffer range failed\n"); + goto out_unmap; + } + + iova_addr++; + } + + mmu->iova_trash_page = PFN_PHYS(iova->pfn_lo); + dev_dbg(mmu->dev, "iova trash buffer for MMUID: %d is %u\n", + mmu->mmid, (unsigned int)mmu->iova_trash_page); + return 0; + +out_unmap: + ipu6_mmu_unmap(mmu->dmap->mmu_info, PFN_PHYS(iova->pfn_lo), + PFN_PHYS(iova_size(iova))); + dma_unmap_page(mmu->dmap->mmu_info->dev, mmu->pci_trash_page, + PAGE_SIZE, DMA_BIDIRECTIONAL); +out_free_iova: + __free_iova(&mmu->dmap->iovad, iova); + return ret; +} + +int ipu6_mmu_hw_init(struct ipu6_mmu *mmu) +{ + struct ipu6_mmu_info *mmu_info; + unsigned long flags; + unsigned int i; + + mmu_info = mmu->dmap->mmu_info; + + /* Initialise the each MMU HW block */ + for (i = 0; i < mmu->nr_mmus; i++) { + struct ipu6_mmu_hw *mmu_hw = &mmu->mmu_hw[i]; + unsigned int j; + u16 block_addr; + + /* Write page table address per MMU */ + writel((phys_addr_t)mmu_info->l1_pt_dma, + mmu->mmu_hw[i].base + REG_L1_PHYS); + + /* Set info bits per MMU */ + writel(mmu->mmu_hw[i].info_bits, + mmu->mmu_hw[i].base + REG_INFO); + + /* Configure MMU TLB stream configuration for L1 */ + for (j = 0, block_addr = 0; j < mmu_hw->nr_l1streams; + block_addr += mmu->mmu_hw[i].l1_block_sz[j], j++) { + if (block_addr > IPU6_MAX_LI_BLOCK_ADDR) { + dev_err(mmu->dev, "invalid L1 configuration\n"); + return -EINVAL; + } + + /* Write block start address for each streams */ + writel(block_addr, mmu_hw->base + + mmu_hw->l1_stream_id_reg_offset + 4 * j); + } + + /* Configure MMU TLB stream configuration for L2 */ + for (j = 0, block_addr = 0; j < mmu_hw->nr_l2streams; + block_addr += mmu->mmu_hw[i].l2_block_sz[j], j++) { + if (block_addr > IPU6_MAX_L2_BLOCK_ADDR) { + dev_err(mmu->dev, "invalid L2 configuration\n"); + return -EINVAL; + } + + writel(block_addr, mmu_hw->base + + mmu_hw->l2_stream_id_reg_offset + 4 * j); + } + } + + if (!mmu->trash_page) { + int ret; + + mmu->trash_page = alloc_page(GFP_KERNEL); + if (!mmu->trash_page) { + dev_err(mmu->dev, "insufficient memory for trash buffer\n"); + return -ENOMEM; + } + + ret = allocate_trash_buffer(mmu); + if (ret) { + __free_page(mmu->trash_page); + mmu->trash_page = NULL; + dev_err(mmu->dev, "trash buffer allocation failed\n"); + return ret; + } + } + + spin_lock_irqsave(&mmu->ready_lock, flags); + mmu->ready = true; + spin_unlock_irqrestore(&mmu->ready_lock, flags); + + return 0; +} +EXPORT_SYMBOL_NS_GPL(ipu6_mmu_hw_init, "INTEL_IPU6"); + +static struct ipu6_mmu_info *ipu6_mmu_alloc(struct ipu6_device *isp) +{ + struct ipu6_mmu_info *mmu_info; + int ret; + + mmu_info = kzalloc(sizeof(*mmu_info), GFP_KERNEL); + if (!mmu_info) + return NULL; + + mmu_info->aperture_start = 0; + mmu_info->aperture_end = + (dma_addr_t)DMA_BIT_MASK(isp->secure_mode ? + IPU6_MMU_ADDR_BITS : + IPU6_MMU_ADDR_BITS_NON_SECURE); + mmu_info->pgsize_bitmap = SZ_4K; + mmu_info->dev = &isp->pdev->dev; + + ret = get_dummy_page(mmu_info); + if (ret) + goto err_free_info; + + ret = alloc_dummy_l2_pt(mmu_info); + if (ret) + goto err_free_dummy_page; + + mmu_info->l2_pts = vzalloc(ISP_L2PT_PTES * sizeof(*mmu_info->l2_pts)); + if (!mmu_info->l2_pts) + goto err_free_dummy_l2_pt; + + /* + * We always map the L1 page table (a single page as well as + * the L2 page tables). + */ + mmu_info->l1_pt = alloc_l1_pt(mmu_info); + if (!mmu_info->l1_pt) + goto err_free_l2_pts; + + spin_lock_init(&mmu_info->lock); + + dev_dbg(mmu_info->dev, "domain initialised\n"); + + return mmu_info; + +err_free_l2_pts: + vfree(mmu_info->l2_pts); +err_free_dummy_l2_pt: + free_dummy_l2_pt(mmu_info); +err_free_dummy_page: + free_dummy_page(mmu_info); +err_free_info: + kfree(mmu_info); + + return NULL; +} + +void ipu6_mmu_hw_cleanup(struct ipu6_mmu *mmu) +{ + unsigned long flags; + + spin_lock_irqsave(&mmu->ready_lock, flags); + mmu->ready = false; + spin_unlock_irqrestore(&mmu->ready_lock, flags); +} +EXPORT_SYMBOL_NS_GPL(ipu6_mmu_hw_cleanup, "INTEL_IPU6"); + +static struct ipu6_dma_mapping *alloc_dma_mapping(struct ipu6_device *isp) +{ + struct ipu6_dma_mapping *dmap; + + dmap = kzalloc(sizeof(*dmap), GFP_KERNEL); + if (!dmap) + return NULL; + + dmap->mmu_info = ipu6_mmu_alloc(isp); + if (!dmap->mmu_info) { + kfree(dmap); + return NULL; + } + + init_iova_domain(&dmap->iovad, SZ_4K, 1); + dmap->mmu_info->dmap = dmap; + + dev_dbg(&isp->pdev->dev, "alloc mapping\n"); + + iova_cache_get(); + + return dmap; +} + +phys_addr_t ipu6_mmu_iova_to_phys(struct ipu6_mmu_info *mmu_info, + dma_addr_t iova) +{ + phys_addr_t phy_addr; + unsigned long flags; + u32 *l2_pt; + + spin_lock_irqsave(&mmu_info->lock, flags); + l2_pt = mmu_info->l2_pts[iova >> ISP_L1PT_SHIFT]; + phy_addr = (phys_addr_t)l2_pt[(iova & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT]; + phy_addr <<= ISP_PAGE_SHIFT; + spin_unlock_irqrestore(&mmu_info->lock, flags); + + return phy_addr; +} + +void ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova, + size_t size) +{ + unsigned int min_pagesz; + + dev_dbg(mmu_info->dev, "unmapping iova 0x%lx size 0x%zx\n", iova, size); + + /* find out the minimum page size supported */ + min_pagesz = 1 << __ffs(mmu_info->pgsize_bitmap); + + /* + * The virtual address and the size of the mapping must be + * aligned (at least) to the size of the smallest page supported + * by the hardware + */ + if (!IS_ALIGNED(iova | size, min_pagesz)) { + dev_err(NULL, "unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n", + iova, size, min_pagesz); + return; + } + + __ipu6_mmu_unmap(mmu_info, iova, size); +} + +int ipu6_mmu_map(struct ipu6_mmu_info *mmu_info, unsigned long iova, + phys_addr_t paddr, size_t size) +{ + unsigned int min_pagesz; + + if (mmu_info->pgsize_bitmap == 0UL) + return -ENODEV; + + /* find out the minimum page size supported */ + min_pagesz = 1 << __ffs(mmu_info->pgsize_bitmap); + + /* + * both the virtual address and the physical one, as well as + * the size of the mapping, must be aligned (at least) to the + * size of the smallest page supported by the hardware + */ + if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) { + dev_err(mmu_info->dev, + "unaligned: iova %lx pa %pa size %zx min_pagesz %x\n", + iova, &paddr, size, min_pagesz); + return -EINVAL; + } + + dev_dbg(mmu_info->dev, "map: iova 0x%lx pa %pa size 0x%zx\n", + iova, &paddr, size); + + return __ipu6_mmu_map(mmu_info, iova, paddr, size); +} + +static void ipu6_mmu_destroy(struct ipu6_mmu *mmu) +{ + struct ipu6_dma_mapping *dmap = mmu->dmap; + struct ipu6_mmu_info *mmu_info = dmap->mmu_info; + struct iova *iova; + u32 l1_idx; + + if (mmu->iova_trash_page) { + iova = find_iova(&dmap->iovad, PHYS_PFN(mmu->iova_trash_page)); + if (iova) { + /* unmap and free the trash buffer iova */ + ipu6_mmu_unmap(mmu_info, PFN_PHYS(iova->pfn_lo), + PFN_PHYS(iova_size(iova))); + __free_iova(&dmap->iovad, iova); + } else { + dev_err(mmu->dev, "trash buffer iova not found.\n"); + } + + mmu->iova_trash_page = 0; + dma_unmap_page(mmu_info->dev, mmu->pci_trash_page, + PAGE_SIZE, DMA_BIDIRECTIONAL); + mmu->pci_trash_page = 0; + __free_page(mmu->trash_page); + } + + for (l1_idx = 0; l1_idx < ISP_L1PT_PTES; l1_idx++) { + if (mmu_info->l1_pt[l1_idx] != mmu_info->dummy_l2_pteval) { + dma_unmap_single(mmu_info->dev, + TBL_PHYS_ADDR(mmu_info->l1_pt[l1_idx]), + PAGE_SIZE, DMA_BIDIRECTIONAL); + free_page((unsigned long)mmu_info->l2_pts[l1_idx]); + } + } + + vfree(mmu_info->l2_pts); + free_dummy_page(mmu_info); + dma_unmap_single(mmu_info->dev, TBL_PHYS_ADDR(mmu_info->l1_pt_dma), + PAGE_SIZE, DMA_BIDIRECTIONAL); + free_page((unsigned long)mmu_info->dummy_l2_pt); + free_page((unsigned long)mmu_info->l1_pt); + kfree(mmu_info); +} + +struct ipu6_mmu *ipu6_mmu_init(struct device *dev, + void __iomem *base, int mmid, + const struct ipu6_hw_variants *hw) +{ + struct ipu6_device *isp = pci_get_drvdata(to_pci_dev(dev)); + struct ipu6_mmu_pdata *pdata; + struct ipu6_mmu *mmu; + unsigned int i; + + if (hw->nr_mmus > IPU6_MMU_MAX_DEVICES) + return ERR_PTR(-EINVAL); + + pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return ERR_PTR(-ENOMEM); + + for (i = 0; i < hw->nr_mmus; i++) { + struct ipu6_mmu_hw *pdata_mmu = &pdata->mmu_hw[i]; + const struct ipu6_mmu_hw *src_mmu = &hw->mmu_hw[i]; + + if (src_mmu->nr_l1streams > IPU6_MMU_MAX_TLB_L1_STREAMS || + src_mmu->nr_l2streams > IPU6_MMU_MAX_TLB_L2_STREAMS) + return ERR_PTR(-EINVAL); + + *pdata_mmu = *src_mmu; + pdata_mmu->base = base + src_mmu->offset; + } + + mmu = devm_kzalloc(dev, sizeof(*mmu), GFP_KERNEL); + if (!mmu) + return ERR_PTR(-ENOMEM); + + mmu->mmid = mmid; + mmu->mmu_hw = pdata->mmu_hw; + mmu->nr_mmus = hw->nr_mmus; + mmu->tlb_invalidate = tlb_invalidate; + mmu->ready = false; + INIT_LIST_HEAD(&mmu->vma_list); + spin_lock_init(&mmu->ready_lock); + + mmu->dmap = alloc_dma_mapping(isp); + if (!mmu->dmap) { + dev_err(dev, "can't alloc dma mapping\n"); + return ERR_PTR(-ENOMEM); + } + + return mmu; +} + +void ipu6_mmu_cleanup(struct ipu6_mmu *mmu) +{ + struct ipu6_dma_mapping *dmap = mmu->dmap; + + ipu6_mmu_destroy(mmu); + mmu->dmap = NULL; + iova_cache_put(); + put_iova_domain(&dmap->iovad); + kfree(dmap); +} diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-mmu.h b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-mmu.h new file mode 100644 index 0000000..8e40b4a --- /dev/null +++ b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-mmu.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2013--2024 Intel Corporation */ + +#ifndef IPU6_MMU_H +#define IPU6_MMU_H + +#define ISYS_MMID 1 +#define PSYS_MMID 0 + +#include +#include +#include + +struct device; +struct page; +struct ipu6_hw_variants; + +struct ipu6_mmu_info { + struct device *dev; + + u32 *l1_pt; + u32 l1_pt_dma; + u32 **l2_pts; + + u32 *dummy_l2_pt; + u32 dummy_l2_pteval; + void *dummy_page; + u32 dummy_page_pteval; + + dma_addr_t aperture_start; + dma_addr_t aperture_end; + unsigned long pgsize_bitmap; + + spinlock_t lock; /* Serialize access to users */ + struct ipu6_dma_mapping *dmap; +}; + +struct ipu6_mmu { + struct list_head node; + + struct ipu6_mmu_hw *mmu_hw; + unsigned int nr_mmus; + unsigned int mmid; + + phys_addr_t pgtbl; + struct device *dev; + + struct ipu6_dma_mapping *dmap; + struct list_head vma_list; + + struct page *trash_page; + dma_addr_t pci_trash_page; /* IOVA from PCI DMA services (parent) */ + dma_addr_t iova_trash_page; /* IOVA for IPU6 child nodes to use */ + + bool ready; + spinlock_t ready_lock; /* Serialize access to bool ready */ + + void (*tlb_invalidate)(struct ipu6_mmu *mmu); +}; + +struct ipu6_mmu *ipu6_mmu_init(struct device *dev, + void __iomem *base, int mmid, + const struct ipu6_hw_variants *hw); +void ipu6_mmu_cleanup(struct ipu6_mmu *mmu); +int ipu6_mmu_hw_init(struct ipu6_mmu *mmu); +void ipu6_mmu_hw_cleanup(struct ipu6_mmu *mmu); +int ipu6_mmu_map(struct ipu6_mmu_info *mmu_info, unsigned long iova, + phys_addr_t paddr, size_t size); +void ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova, + size_t size); +phys_addr_t ipu6_mmu_iova_to_phys(struct ipu6_mmu_info *mmu_info, + dma_addr_t iova); +#endif diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-platform-buttress-regs.h b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-platform-buttress-regs.h new file mode 100644 index 0000000..efd65e4 --- /dev/null +++ b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-platform-buttress-regs.h @@ -0,0 +1,224 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2023--2024 Intel Corporation */ + +#ifndef IPU6_PLATFORM_BUTTRESS_REGS_H +#define IPU6_PLATFORM_BUTTRESS_REGS_H + +#include + +/* IS_WORKPOINT_REQ */ +#define IPU6_BUTTRESS_REG_IS_FREQ_CTL 0x34 +/* PS_WORKPOINT_REQ */ +#define IPU6_BUTTRESS_REG_PS_FREQ_CTL 0x38 + +/* should be tuned for real silicon */ +#define IPU6_IS_FREQ_CTL_DEFAULT_RATIO 0x08 +#define IPU6SE_IS_FREQ_CTL_DEFAULT_RATIO 0x0a +#define IPU6_PS_FREQ_CTL_DEFAULT_RATIO 0x0d + +#define IPU6_IS_FREQ_CTL_DEFAULT_QOS_FLOOR_RATIO 0x10 +#define IPU6_PS_FREQ_CTL_DEFAULT_QOS_FLOOR_RATIO 0x0708 + +#define IPU6_BUTTRESS_PWR_STATE_IS_PWR_SHIFT 3 +#define IPU6_BUTTRESS_PWR_STATE_IS_PWR_MASK GENMASK(4, 3) + +#define IPU6_BUTTRESS_PWR_STATE_PS_PWR_SHIFT 6 +#define IPU6_BUTTRESS_PWR_STATE_PS_PWR_MASK GENMASK(7, 6) + +#define IPU6_BUTTRESS_PWR_STATE_DN_DONE 0x0 +#define IPU6_BUTTRESS_PWR_STATE_UP_PROCESS 0x1 +#define IPU6_BUTTRESS_PWR_STATE_DN_PROCESS 0x2 +#define IPU6_BUTTRESS_PWR_STATE_UP_DONE 0x3 + +#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_0 0x270 +#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_1 0x274 +#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_2 0x278 +#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_3 0x27c +#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_4 0x280 +#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_5 0x284 +#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_6 0x288 +#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_7 0x28c + +#define BUTTRESS_REG_WDT 0x8 +#define BUTTRESS_REG_BTRS_CTRL 0xc +#define BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC0 BIT(0) +#define BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC1 BIT(1) +#define BUTTRESS_REG_BTRS_CTRL_REF_CLK_IND GENMASK(9, 8) + +#define BUTTRESS_REG_FW_RESET_CTL 0x30 +#define BUTTRESS_FW_RESET_CTL_START BIT(0) +#define BUTTRESS_FW_RESET_CTL_DONE BIT(1) + +#define BUTTRESS_REG_IS_FREQ_CTL 0x34 +#define BUTTRESS_REG_PS_FREQ_CTL 0x38 + +#define BUTTRESS_FREQ_CTL_START BIT(31) +#define BUTTRESS_FREQ_CTL_ICCMAX_LEVEL GENMASK(19, 16) +#define BUTTRESS_FREQ_CTL_QOS_FLOOR_MASK GENMASK(15, 8) +#define BUTTRESS_FREQ_CTL_RATIO_MASK GENMASK(7, 0) + +#define BUTTRESS_REG_PWR_STATE 0x5c + +#define BUTTRESS_PWR_STATE_RESET 0x0 +#define BUTTRESS_PWR_STATE_PWR_ON_DONE 0x1 +#define BUTTRESS_PWR_STATE_PWR_RDY 0x3 +#define BUTTRESS_PWR_STATE_PWR_IDLE 0x4 + +#define BUTTRESS_PWR_STATE_HH_STATUS_MASK GENMASK(12, 11) + +enum { + BUTTRESS_PWR_STATE_HH_STATE_IDLE, + BUTTRESS_PWR_STATE_HH_STATE_IN_PRGS, + BUTTRESS_PWR_STATE_HH_STATE_DONE, + BUTTRESS_PWR_STATE_HH_STATE_ERR, +}; + +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_MASK GENMASK(23, 19) + +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_IDLE 0x0 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_PLL_CMP 0x1 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_CLKACK 0x2 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_PG_ACK 0x3 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_RST_ASSRT_CYCLES 0x4 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_STOP_CLK_CYCLES1 0x5 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_STOP_CLK_CYCLES2 0x6 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_RST_DEASSRT_CYCLES 0x7 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_FUSE_WR_CMP 0x8 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_BRK_POINT 0x9 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_IS_RDY 0xa +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_HALT_HALTED 0xb +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_RST_DURATION_CNT3 0xc +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_CLKACK_PD 0xd +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_PD_BRK_POINT 0xe +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_PD_PG_ACK0 0xf + +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_MASK GENMASK(28, 24) + +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_IDLE 0x0 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PU_PLL_IP_RDY 0x1 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_RO_PRE_CNT_EXH 0x2 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PU_VGI_PWRGOOD 0x3 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_RO_POST_CNT_EXH 0x4 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WR_PLL_RATIO 0x5 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PU_PLL_CMP 0x6 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PU_CLKACK 0x7 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_RST_ASSRT_CYCLES 0x8 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_STOP_CLK_CYCLES1 0x9 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_STOP_CLK_CYCLES2 0xa +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_RST_DEASSRT_CYCLES 0xb +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_PU_BRK_PNT 0xc +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_FUSE_ACCPT 0xd +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_PS_PWR_UP 0xf +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_4_HALTED 0x10 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_RESET_CNT3 0x11 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PD_CLKACK 0x12 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PD_OFF_IND 0x13 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_DVFS_PH4 0x14 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_DVFS_PLL_CMP 0x15 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_DVFS_CLKACK 0x16 + +#define BUTTRESS_REG_SECURITY_CTL 0x300 +#define BUTTRESS_REG_SKU 0x314 +#define BUTTRESS_REG_SECURITY_TOUCH 0x318 +#define BUTTRESS_REG_CAMERA_MASK 0x84 + +#define BUTTRESS_SECURITY_CTL_FW_SECURE_MODE BIT(16) +#define BUTTRESS_SECURITY_CTL_FW_SETUP_MASK GENMASK(4, 0) + +#define BUTTRESS_SECURITY_CTL_FW_SETUP_DONE BIT(0) +#define BUTTRESS_SECURITY_CTL_AUTH_DONE BIT(1) +#define BUTTRESS_SECURITY_CTL_AUTH_FAILED BIT(3) + +#define BUTTRESS_REG_FW_SOURCE_BASE_LO 0x78 +#define BUTTRESS_REG_FW_SOURCE_BASE_HI 0x7C +#define BUTTRESS_REG_FW_SOURCE_SIZE 0x80 + +#define BUTTRESS_REG_ISR_STATUS 0x90 +#define BUTTRESS_REG_ISR_ENABLED_STATUS 0x94 +#define BUTTRESS_REG_ISR_ENABLE 0x98 +#define BUTTRESS_REG_ISR_CLEAR 0x9C + +#define BUTTRESS_ISR_IS_IRQ BIT(0) +#define BUTTRESS_ISR_PS_IRQ BIT(1) +#define BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE BIT(2) +#define BUTTRESS_ISR_IPC_EXEC_DONE_BY_ISH BIT(3) +#define BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING BIT(4) +#define BUTTRESS_ISR_IPC_FROM_ISH_IS_WAITING BIT(5) +#define BUTTRESS_ISR_CSE_CSR_SET BIT(6) +#define BUTTRESS_ISR_ISH_CSR_SET BIT(7) +#define BUTTRESS_ISR_SPURIOUS_CMP BIT(8) +#define BUTTRESS_ISR_WATCHDOG_EXPIRED BIT(9) +#define BUTTRESS_ISR_PUNIT_2_IUNIT_IRQ BIT(10) +#define BUTTRESS_ISR_SAI_VIOLATION BIT(11) +#define BUTTRESS_ISR_HW_ASSERTION BIT(12) +#define BUTTRESS_ISR_IS_CORRECTABLE_MEM_ERR BIT(13) +#define BUTTRESS_ISR_IS_FATAL_MEM_ERR BIT(14) +#define BUTTRESS_ISR_IS_NON_FATAL_MEM_ERR BIT(15) +#define BUTTRESS_ISR_PS_CORRECTABLE_MEM_ERR BIT(16) +#define BUTTRESS_ISR_PS_FATAL_MEM_ERR BIT(17) +#define BUTTRESS_ISR_PS_NON_FATAL_MEM_ERR BIT(18) +#define BUTTRESS_ISR_PS_FAST_THROTTLE BIT(19) +#define BUTTRESS_ISR_UFI_ERROR BIT(20) + +#define BUTTRESS_REG_IU2CSEDB0 0x100 + +#define BUTTRESS_IU2CSEDB0_BUSY BIT(31) +#define BUTTRESS_IU2CSEDB0_IPC_CLIENT_ID_VAL 2 + +#define BUTTRESS_REG_IU2CSEDATA0 0x104 + +#define BUTTRESS_IU2CSEDATA0_IPC_BOOT_LOAD 1 +#define BUTTRESS_IU2CSEDATA0_IPC_AUTH_RUN 2 +#define BUTTRESS_IU2CSEDATA0_IPC_AUTH_REPLACE 3 +#define BUTTRESS_IU2CSEDATA0_IPC_UPDATE_SECURE_TOUCH 16 + +#define BUTTRESS_CSE2IUDATA0_IPC_BOOT_LOAD_DONE BIT(0) +#define BUTTRESS_CSE2IUDATA0_IPC_AUTH_RUN_DONE BIT(1) +#define BUTTRESS_CSE2IUDATA0_IPC_AUTH_REPLACE_DONE BIT(2) +#define BUTTRESS_CSE2IUDATA0_IPC_UPDATE_SECURE_TOUCH_DONE BIT(4) + +#define BUTTRESS_REG_IU2CSECSR 0x108 + +#define BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE1 BIT(0) +#define BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE2 BIT(1) +#define BUTTRESS_IU2CSECSR_IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE BIT(2) +#define BUTTRESS_IU2CSECSR_IPC_PEER_ASSERTED_REG_VALID_REQ BIT(3) +#define BUTTRESS_IU2CSECSR_IPC_PEER_ACKED_REG_VALID BIT(4) +#define BUTTRESS_IU2CSECSR_IPC_PEER_DEASSERTED_REG_VALID_REQ BIT(5) + +#define BUTTRESS_REG_CSE2IUDB0 0x304 +#define BUTTRESS_REG_CSE2IUCSR 0x30C +#define BUTTRESS_REG_CSE2IUDATA0 0x308 + +/* 0x20 == NACK, 0xf == unknown command */ +#define BUTTRESS_CSE2IUDATA0_IPC_NACK 0xf20 +#define BUTTRESS_CSE2IUDATA0_IPC_NACK_MASK GENMASK(15, 0) + +#define BUTTRESS_REG_ISH2IUCSR 0x50 +#define BUTTRESS_REG_ISH2IUDB0 0x54 +#define BUTTRESS_REG_ISH2IUDATA0 0x58 + +#define BUTTRESS_REG_IU2ISHDB0 0x10C +#define BUTTRESS_REG_IU2ISHDATA0 0x110 +#define BUTTRESS_REG_IU2ISHDATA1 0x114 +#define BUTTRESS_REG_IU2ISHCSR 0x118 + +#define BUTTRESS_REG_FABRIC_CMD 0x88 + +#define BUTTRESS_FABRIC_CMD_START_TSC_SYNC BIT(0) +#define BUTTRESS_FABRIC_CMD_IS_DRAIN BIT(4) + +#define BUTTRESS_REG_TSW_CTL 0x120 +#define BUTTRESS_TSW_CTL_SOFT_RESET BIT(8) + +#define BUTTRESS_REG_TSC_LO 0x164 +#define BUTTRESS_REG_TSC_HI 0x168 + +#define BUTTRESS_IRQS (BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING | \ + BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE | \ + BUTTRESS_ISR_IS_IRQ | BUTTRESS_ISR_PS_IRQ) + +#define BUTTRESS_EVENT (BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING | \ + BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE | \ + BUTTRESS_ISR_SAI_VIOLATION) +#endif /* IPU6_PLATFORM_BUTTRESS_REGS_H */ diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-platform-isys-csi2-reg.h b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-platform-isys-csi2-reg.h new file mode 100644 index 0000000..cc58377 --- /dev/null +++ b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-platform-isys-csi2-reg.h @@ -0,0 +1,172 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2023--2024 Intel Corporation */ + +#ifndef IPU6_PLATFORM_ISYS_CSI2_REG_H +#define IPU6_PLATFORM_ISYS_CSI2_REG_H + +#include + +#define CSI_REG_BASE 0x220000 +#define CSI_REG_PORT_BASE(id) (CSI_REG_BASE + (id) * 0x1000) + +/* CSI Port Genral Purpose Registers */ +#define CSI_REG_PORT_GPREG_SRST 0x0 +#define CSI_REG_PORT_GPREG_CSI2_SLV_REG_SRST 0x4 +#define CSI_REG_PORT_GPREG_CSI2_PORT_CONTROL 0x8 + +/* + * Port IRQs mapping events: + * IRQ0 - CSI_FE event + * IRQ1 - CSI_SYNC + * IRQ2 - S2M_SIDS0TO7 + * IRQ3 - S2M_SIDS8TO15 + */ +#define CSI_PORT_REG_BASE_IRQ_CSI 0x80 +#define CSI_PORT_REG_BASE_IRQ_CSI_SYNC 0xA0 +#define CSI_PORT_REG_BASE_IRQ_S2M_SIDS0TOS7 0xC0 +#define CSI_PORT_REG_BASE_IRQ_S2M_SIDS8TOS15 0xE0 + +#define CSI_PORT_REG_BASE_IRQ_EDGE_OFFSET 0x0 +#define CSI_PORT_REG_BASE_IRQ_MASK_OFFSET 0x4 +#define CSI_PORT_REG_BASE_IRQ_STATUS_OFFSET 0x8 +#define CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET 0xc +#define CSI_PORT_REG_BASE_IRQ_ENABLE_OFFSET 0x10 +#define CSI_PORT_REG_BASE_IRQ_LEVEL_NOT_PULSE_OFFSET 0x14 + +#define IPU6SE_CSI_RX_ERROR_IRQ_MASK GENMASK(18, 0) +#define IPU6_CSI_RX_ERROR_IRQ_MASK GENMASK(19, 0) + +#define CSI_RX_NUM_ERRORS_IN_IRQ 20 +#define CSI_RX_NUM_IRQ 32 + +#define IPU_CSI_RX_IRQ_FS_VC(chn) (1 << ((chn) * 2)) +#define IPU_CSI_RX_IRQ_FE_VC(chn) (2 << ((chn) * 2)) + +/* PPI2CSI */ +#define CSI_REG_PPI2CSI_ENABLE 0x200 +#define CSI_REG_PPI2CSI_CONFIG_PPI_INTF 0x204 +#define PPI_INTF_CONFIG_NOF_ENABLED_DLANES_MASK GENMASK(4, 3) +#define CSI_REG_PPI2CSI_CONFIG_CSI_FEATURE 0x208 + +enum CSI_PPI2CSI_CTRL { + CSI_PPI2CSI_DISABLE = 0, + CSI_PPI2CSI_ENABLE = 1, +}; + +/* CSI_FE */ +#define CSI_REG_CSI_FE_ENABLE 0x280 +#define CSI_REG_CSI_FE_MODE 0x284 +#define CSI_REG_CSI_FE_MUX_CTRL 0x288 +#define CSI_REG_CSI_FE_SYNC_CNTR_SEL 0x290 + +enum CSI_FE_ENABLE_TYPE { + CSI_FE_DISABLE = 0, + CSI_FE_ENABLE = 1, +}; + +enum CSI_FE_MODE_TYPE { + CSI_FE_DPHY_MODE = 0, + CSI_FE_CPHY_MODE = 1, +}; + +enum CSI_FE_INPUT_SELECTOR { + CSI_SENSOR_INPUT = 0, + CSI_MIPIGEN_INPUT = 1, +}; + +enum CSI_FE_SYNC_CNTR_SEL_TYPE { + CSI_CNTR_SENSOR_LINE_ID = BIT(0), + CSI_CNTR_INT_LINE_PKT_ID = ~CSI_CNTR_SENSOR_LINE_ID, + CSI_CNTR_SENSOR_FRAME_ID = BIT(1), + CSI_CNTR_INT_FRAME_PKT_ID = ~CSI_CNTR_SENSOR_FRAME_ID, +}; + +/* CSI HUB General Purpose Registers */ +#define CSI_REG_HUB_GPREG_SRST (CSI_REG_BASE + 0x18000) +#define CSI_REG_HUB_GPREG_SLV_REG_SRST (CSI_REG_BASE + 0x18004) + +#define CSI_REG_HUB_DRV_ACCESS_PORT(id) (CSI_REG_BASE + 0x18018 + (id) * 4) +#define CSI_REG_HUB_FW_ACCESS_PORT_OFS 0x17000 +#define CSI_REG_HUB_FW_ACCESS_PORT_V6OFS 0x16000 +#define CSI_REG_HUB_FW_ACCESS_PORT(ofs, id) \ + (CSI_REG_BASE + (ofs) + (id) * 4) + +enum CSI_PORT_CLK_GATING_SWITCH { + CSI_PORT_CLK_GATING_OFF = 0, + CSI_PORT_CLK_GATING_ON = 1, +}; + +#define CSI_REG_BASE_HUB_IRQ 0x18200 + +#define IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_EDGE 0x238200 +#define IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_MASK 0x238204 +#define IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_STATUS 0x238208 +#define IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_CLEAR 0x23820c +#define IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_ENABLE 0x238210 +#define IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_LEVEL_NOT_PULSE 0x238214 + +#define IPU6_REG_ISYS_CSI_TOP_CTRL1_IRQ_EDGE 0x238220 +#define IPU6_REG_ISYS_CSI_TOP_CTRL1_IRQ_MASK 0x238224 +#define IPU6_REG_ISYS_CSI_TOP_CTRL1_IRQ_STATUS 0x238228 +#define IPU6_REG_ISYS_CSI_TOP_CTRL1_IRQ_CLEAR 0x23822c +#define IPU6_REG_ISYS_CSI_TOP_CTRL1_IRQ_ENABLE 0x238230 +#define IPU6_REG_ISYS_CSI_TOP_CTRL1_IRQ_LEVEL_NOT_PULSE 0x238234 + +/* MTL IPU6V6 irq ctrl0 & ctrl1 */ +#define IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_EDGE 0x238700 +#define IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_MASK 0x238704 +#define IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_STATUS 0x238708 +#define IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_CLEAR 0x23870c +#define IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_ENABLE 0x238710 +#define IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_LEVEL_NOT_PULSE 0x238714 + +#define IPU6V6_REG_ISYS_CSI_TOP_CTRL1_IRQ_EDGE 0x238720 +#define IPU6V6_REG_ISYS_CSI_TOP_CTRL1_IRQ_MASK 0x238724 +#define IPU6V6_REG_ISYS_CSI_TOP_CTRL1_IRQ_STATUS 0x238728 +#define IPU6V6_REG_ISYS_CSI_TOP_CTRL1_IRQ_CLEAR 0x23872c +#define IPU6V6_REG_ISYS_CSI_TOP_CTRL1_IRQ_ENABLE 0x238730 +#define IPU6V6_REG_ISYS_CSI_TOP_CTRL1_IRQ_LEVEL_NOT_PULSE 0x238734 + +/* + * 3:0 CSI_PORT.irq_out[3:0] CSI_PORT_CTRL0 IRQ outputs (4bits) + * [0] CSI_PORT.IRQ_CTRL0_csi + * [1] CSI_PORT.IRQ_CTRL1_csi_sync + * [2] CSI_PORT.IRQ_CTRL2_s2m_sids0to7 + * [3] CSI_PORT.IRQ_CTRL3_s2m_sids8to15 + */ +#define IPU6_ISYS_UNISPART_IRQ_CSI2(port) \ + (0x3 << ((port) * IPU6_CSI_IRQ_NUM_PER_PIPE)) + +/* + * ipu6se support 2 front ends, 2 port per front end, 4 ports 0..3 + * sip0 - 0, 1 + * sip1 - 2, 3 + * 0 and 2 support 4 data lanes, 1 and 3 support 2 data lanes + * all offset are base from isys base address + */ + +#define CSI2_HUB_GPREG_SIP_SRST(sip) (0x238038 + (sip) * 4) +#define CSI2_HUB_GPREG_SIP_FB_PORT_CFG(sip) (0x238050 + (sip) * 4) + +#define CSI2_HUB_GPREG_DPHY_TIMER_INCR 0x238040 +#define CSI2_HUB_GPREG_HPLL_FREQ 0x238044 +#define CSI2_HUB_GPREG_IS_CLK_RATIO 0x238048 +#define CSI2_HUB_GPREG_HPLL_FREQ_ISCLK_RATE_OVERRIDE 0x23804c +#define CSI2_HUB_GPREG_PORT_CLKGATING_DISABLE 0x238058 +#define CSI2_HUB_GPREG_SIP0_CSI_RX_A_CONTROL 0x23805c +#define CSI2_HUB_GPREG_SIP0_CSI_RX_B_CONTROL 0x238088 +#define CSI2_HUB_GPREG_SIP1_CSI_RX_A_CONTROL 0x2380a4 +#define CSI2_HUB_GPREG_SIP1_CSI_RX_B_CONTROL 0x2380d0 + +#define CSI2_SIP_TOP_CSI_RX_BASE(sip) (0x23805c + (sip) * 0x48) +#define CSI2_SIP_TOP_CSI_RX_PORT_BASE_0(port) (0x23805c + ((port) / 2) * 0x48) +#define CSI2_SIP_TOP_CSI_RX_PORT_BASE_1(port) (0x238088 + ((port) / 2) * 0x48) + +/* offset from port base */ +#define CSI2_SIP_TOP_CSI_RX_PORT_CONTROL 0x0 +#define CSI2_SIP_TOP_CSI_RX_DLY_CNT_TERMEN_CLANE 0x4 +#define CSI2_SIP_TOP_CSI_RX_DLY_CNT_SETTLE_CLANE 0x8 +#define CSI2_SIP_TOP_CSI_RX_DLY_CNT_TERMEN_DLANE(lane) (0xc + (lane) * 8) +#define CSI2_SIP_TOP_CSI_RX_DLY_CNT_SETTLE_DLANE(lane) (0x10 + (lane) * 8) + +#endif /* IPU6_ISYS_CSI2_REG_H */ diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-platform-regs.h b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-platform-regs.h new file mode 100644 index 0000000..b883385 --- /dev/null +++ b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-platform-regs.h @@ -0,0 +1,179 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2018 - 2024 Intel Corporation */ + +#ifndef IPU6_PLATFORM_REGS_H +#define IPU6_PLATFORM_REGS_H + +#include + +/* + * IPU6 uses uniform address within IPU6, therefore all subsystem registers + * locates in one single space starts from 0 but in different sctions with + * different addresses, the subsystem offsets are defined to 0 as the + * register definition will have the address offset to 0. + */ +#define IPU6_UNIFIED_OFFSET 0 + +#define IPU6_ISYS_IOMMU0_OFFSET 0x2e0000 +#define IPU6_ISYS_IOMMU1_OFFSET 0x2e0500 +#define IPU6_ISYS_IOMMUI_OFFSET 0x2e0a00 + +#define IPU6_PSYS_IOMMU0_OFFSET 0x1b0000 +#define IPU6_PSYS_IOMMU1_OFFSET 0x1b0700 +#define IPU6_PSYS_IOMMU1R_OFFSET 0x1b0e00 +#define IPU6_PSYS_IOMMUI_OFFSET 0x1b1500 + +/* the offset from IOMMU base register */ +#define IPU6_MMU_L1_STREAM_ID_REG_OFFSET 0x0c +#define IPU6_MMU_L2_STREAM_ID_REG_OFFSET 0x4c +#define IPU6_PSYS_MMU1W_L2_STREAM_ID_REG_OFFSET 0x8c + +#define IPU6_MMU_INFO_OFFSET 0x8 + +#define IPU6_ISYS_SPC_OFFSET 0x210000 + +#define IPU6SE_PSYS_SPC_OFFSET 0x110000 +#define IPU6_PSYS_SPC_OFFSET 0x118000 + +#define IPU6_ISYS_DMEM_OFFSET 0x200000 +#define IPU6_PSYS_DMEM_OFFSET 0x100000 + +#define IPU6_REG_ISYS_UNISPART_IRQ_EDGE 0x27c000 +#define IPU6_REG_ISYS_UNISPART_IRQ_MASK 0x27c004 +#define IPU6_REG_ISYS_UNISPART_IRQ_STATUS 0x27c008 +#define IPU6_REG_ISYS_UNISPART_IRQ_CLEAR 0x27c00c +#define IPU6_REG_ISYS_UNISPART_IRQ_ENABLE 0x27c010 +#define IPU6_REG_ISYS_UNISPART_IRQ_LEVEL_NOT_PULSE 0x27c014 +#define IPU6_REG_ISYS_UNISPART_SW_IRQ_REG 0x27c414 +#define IPU6_REG_ISYS_UNISPART_SW_IRQ_MUX_REG 0x27c418 +#define IPU6_ISYS_UNISPART_IRQ_CSI0 BIT(2) +#define IPU6_ISYS_UNISPART_IRQ_CSI1 BIT(3) +#define IPU6_ISYS_UNISPART_IRQ_SW BIT(22) + +#define IPU6_REG_ISYS_ISL_TOP_IRQ_EDGE 0x2b0200 +#define IPU6_REG_ISYS_ISL_TOP_IRQ_MASK 0x2b0204 +#define IPU6_REG_ISYS_ISL_TOP_IRQ_STATUS 0x2b0208 +#define IPU6_REG_ISYS_ISL_TOP_IRQ_CLEAR 0x2b020c +#define IPU6_REG_ISYS_ISL_TOP_IRQ_ENABLE 0x2b0210 +#define IPU6_REG_ISYS_ISL_TOP_IRQ_LEVEL_NOT_PULSE 0x2b0214 + +#define IPU6_REG_ISYS_CMPR_TOP_IRQ_EDGE 0x2d2100 +#define IPU6_REG_ISYS_CMPR_TOP_IRQ_MASK 0x2d2104 +#define IPU6_REG_ISYS_CMPR_TOP_IRQ_STATUS 0x2d2108 +#define IPU6_REG_ISYS_CMPR_TOP_IRQ_CLEAR 0x2d210c +#define IPU6_REG_ISYS_CMPR_TOP_IRQ_ENABLE 0x2d2110 +#define IPU6_REG_ISYS_CMPR_TOP_IRQ_LEVEL_NOT_PULSE 0x2d2114 + +/* CDC Burst collector thresholds for isys - 3 FIFOs i = 0..2 */ +#define IPU6_REG_ISYS_CDC_THRESHOLD(i) (0x27c400 + ((i) * 4)) + +#define IPU6_CSI_IRQ_NUM_PER_PIPE 4 +#define IPU6SE_ISYS_CSI_PORT_NUM 4 +#define IPU6_ISYS_CSI_PORT_NUM 8 + +#define IPU6_ISYS_CSI_PORT_IRQ(irq_num) BIT(irq_num) + +/* PKG DIR OFFSET in IMR in secure mode */ +#define IPU6_PKG_DIR_IMR_OFFSET 0x40 + +#define IPU6_ISYS_REG_SPC_STATUS_CTRL 0x0 + +#define IPU6_ISYS_SPC_STATUS_START BIT(1) +#define IPU6_ISYS_SPC_STATUS_RUN BIT(3) +#define IPU6_ISYS_SPC_STATUS_READY BIT(5) +#define IPU6_ISYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE BIT(12) +#define IPU6_ISYS_SPC_STATUS_ICACHE_PREFETCH BIT(13) + +#define IPU6_PSYS_REG_SPC_STATUS_CTRL 0x0 +#define IPU6_PSYS_REG_SPC_START_PC 0x4 +#define IPU6_PSYS_REG_SPC_ICACHE_BASE 0x10 +#define IPU6_REG_PSYS_INFO_SEG_0_CONFIG_ICACHE_MASTER 0x14 + +#define IPU6_PSYS_SPC_STATUS_START BIT(1) +#define IPU6_PSYS_SPC_STATUS_RUN BIT(3) +#define IPU6_PSYS_SPC_STATUS_READY BIT(5) +#define IPU6_PSYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE BIT(12) +#define IPU6_PSYS_SPC_STATUS_ICACHE_PREFETCH BIT(13) + +#define IPU6_PSYS_REG_SPP0_STATUS_CTRL 0x20000 + +#define IPU6_INFO_ENABLE_SNOOP BIT(0) +#define IPU6_INFO_DEC_FORCE_FLUSH BIT(1) +#define IPU6_INFO_DEC_PASS_THROUGH BIT(2) +#define IPU6_INFO_ZLW BIT(3) +#define IPU6_INFO_REQUEST_DESTINATION_IOSF BIT(9) +#define IPU6_INFO_IMR_BASE BIT(10) +#define IPU6_INFO_IMR_DESTINED BIT(11) + +#define IPU6_INFO_REQUEST_DESTINATION_PRIMARY IPU6_INFO_REQUEST_DESTINATION_IOSF + +/* + * s2m_pixel_soc_pixel_remapping is dedicated for the enabling of the + * pixel s2m remp ability.Remap here means that s2m rearange the order + * of the pixels in each 4 pixels group. + * For examle, mirroring remping means that if input's 4 first pixels + * are 1 2 3 4 then in output we should see 4 3 2 1 in this 4 first pixels. + * 0xE4 is from s2m MAS document. It means no remapping. + */ +#define S2M_PIXEL_SOC_PIXEL_REMAPPING_FLAG_NO_REMAPPING 0xe4 +/* + * csi_be_soc_pixel_remapping is for the enabling of the pixel remapping. + * This remapping is exactly like the stream2mmio remapping. + */ +#define CSI_BE_SOC_PIXEL_REMAPPING_FLAG_NO_REMAPPING 0xe4 + +#define IPU6_REG_DMA_TOP_AB_GROUP1_BASE_ADDR 0x1ae000 +#define IPU6_REG_DMA_TOP_AB_GROUP2_BASE_ADDR 0x1af000 +#define IPU6_REG_DMA_TOP_AB_RING_MIN_OFFSET(n) (0x4 + (n) * 0xc) +#define IPU6_REG_DMA_TOP_AB_RING_MAX_OFFSET(n) (0x8 + (n) * 0xc) +#define IPU6_REG_DMA_TOP_AB_RING_ACCESS_OFFSET(n) (0xc + (n) * 0xc) + +enum ipu6_device_ab_group1_target_id { + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R0_SPC_DMEM, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R1_SPC_DMEM, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R2_SPC_DMEM, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R3_SPC_STATUS_REG, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R4_SPC_MASTER_BASE_ADDR, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R5_SPC_PC_STALL, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R6_SPC_EQ, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R7_SPC_RESERVED, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R8_SPC_RESERVED, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R9_SPP0, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R10_SPP1, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R11_CENTRAL_R1, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R12_IRQ, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R13_CENTRAL_R2, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R14_DMA, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R15_DMA, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R16_GP, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R17_ZLW_INSERTER, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R18_AB, +}; + +enum nci_ab_access_mode { + NCI_AB_ACCESS_MODE_RW, /* read & write */ + NCI_AB_ACCESS_MODE_RO, /* read only */ + NCI_AB_ACCESS_MODE_WO, /* write only */ + NCI_AB_ACCESS_MODE_NA, /* No access at all */ +}; + +/* IRQ-related registers in PSYS */ +#define IPU6_REG_PSYS_GPDEV_IRQ_EDGE 0x1aa200 +#define IPU6_REG_PSYS_GPDEV_IRQ_MASK 0x1aa204 +#define IPU6_REG_PSYS_GPDEV_IRQ_STATUS 0x1aa208 +#define IPU6_REG_PSYS_GPDEV_IRQ_CLEAR 0x1aa20c +#define IPU6_REG_PSYS_GPDEV_IRQ_ENABLE 0x1aa210 +#define IPU6_REG_PSYS_GPDEV_IRQ_LEVEL_NOT_PULSE 0x1aa214 +/* There are 8 FW interrupts, n = 0..7 */ +#define IPU6_PSYS_GPDEV_FWIRQ0 5 +#define IPU6_PSYS_GPDEV_FWIRQ1 6 +#define IPU6_PSYS_GPDEV_FWIRQ2 7 +#define IPU6_PSYS_GPDEV_FWIRQ3 8 +#define IPU6_PSYS_GPDEV_FWIRQ4 9 +#define IPU6_PSYS_GPDEV_FWIRQ5 10 +#define IPU6_PSYS_GPDEV_FWIRQ6 11 +#define IPU6_PSYS_GPDEV_FWIRQ7 12 +#define IPU6_PSYS_GPDEV_IRQ_FWIRQ(n) BIT(n) +#define IPU6_REG_PSYS_GPDEV_FWIRQ(n) (4 * (n) + 0x1aa100) + +#endif /* IPU6_PLATFORM_REGS_H */ diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6.c b/6.17.0/drivers/media/pci/intel/ipu6/ipu6.c new file mode 100644 index 0000000..1f4f20b --- /dev/null +++ b/6.17.0/drivers/media/pci/intel/ipu6/ipu6.c @@ -0,0 +1,846 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2013--2024 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "ipu6.h" +#include "ipu6-bus.h" +#include "ipu6-buttress.h" +#include "ipu6-cpd.h" +#include "ipu6-isys.h" +#include "ipu6-mmu.h" +#include "ipu6-platform-buttress-regs.h" +#include "ipu6-platform-isys-csi2-reg.h" +#include "ipu6-platform-regs.h" + +#define IPU6_PCI_BAR 0 + +struct ipu6_cell_program { + u32 magic_number; + + u32 blob_offset; + u32 blob_size; + + u32 start[3]; + + u32 icache_source; + u32 icache_target; + u32 icache_size; + + u32 pmem_source; + u32 pmem_target; + u32 pmem_size; + + u32 data_source; + u32 data_target; + u32 data_size; + + u32 bss_target; + u32 bss_size; + + u32 cell_id; + u32 regs_addr; + + u32 cell_pmem_data_bus_address; + u32 cell_dmem_data_bus_address; + u32 cell_pmem_control_bus_address; + u32 cell_dmem_control_bus_address; + + u32 next; + u32 dummy[2]; +}; + +static struct ipu6_isys_internal_pdata isys_ipdata = { + .hw_variant = { + .offset = IPU6_UNIFIED_OFFSET, + .nr_mmus = 3, + .mmu_hw = { + { + .offset = IPU6_ISYS_IOMMU0_OFFSET, + .info_bits = IPU6_INFO_REQUEST_DESTINATION_IOSF, + .nr_l1streams = 16, + .l1_block_sz = { + 3, 8, 2, 2, 2, 2, 2, 2, 1, 1, + 1, 1, 1, 1, 1, 1 + }, + .nr_l2streams = 16, + .l2_block_sz = { + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2 + }, + .insert_read_before_invalidate = false, + .l1_stream_id_reg_offset = + IPU6_MMU_L1_STREAM_ID_REG_OFFSET, + .l2_stream_id_reg_offset = + IPU6_MMU_L2_STREAM_ID_REG_OFFSET, + }, + { + .offset = IPU6_ISYS_IOMMU1_OFFSET, + .info_bits = 0, + .nr_l1streams = 16, + .l1_block_sz = { + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 1, 1, 4 + }, + .nr_l2streams = 16, + .l2_block_sz = { + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2 + }, + .insert_read_before_invalidate = false, + .l1_stream_id_reg_offset = + IPU6_MMU_L1_STREAM_ID_REG_OFFSET, + .l2_stream_id_reg_offset = + IPU6_MMU_L2_STREAM_ID_REG_OFFSET, + }, + { + .offset = IPU6_ISYS_IOMMUI_OFFSET, + .info_bits = 0, + .nr_l1streams = 0, + .nr_l2streams = 0, + .insert_read_before_invalidate = false, + }, + }, + .cdc_fifos = 3, + .cdc_fifo_threshold = {6, 8, 2}, + .dmem_offset = IPU6_ISYS_DMEM_OFFSET, + .spc_offset = IPU6_ISYS_SPC_OFFSET, + }, + .isys_dma_overshoot = IPU6_ISYS_OVERALLOC_MIN, +}; + +static struct ipu6_psys_internal_pdata psys_ipdata = { + .hw_variant = { + .offset = IPU6_UNIFIED_OFFSET, + .nr_mmus = 4, + .mmu_hw = { + { + .offset = IPU6_PSYS_IOMMU0_OFFSET, + .info_bits = + IPU6_INFO_REQUEST_DESTINATION_IOSF, + .nr_l1streams = 16, + .l1_block_sz = { + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2 + }, + .nr_l2streams = 16, + .l2_block_sz = { + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2 + }, + .insert_read_before_invalidate = false, + .l1_stream_id_reg_offset = + IPU6_MMU_L1_STREAM_ID_REG_OFFSET, + .l2_stream_id_reg_offset = + IPU6_MMU_L2_STREAM_ID_REG_OFFSET, + }, + { + .offset = IPU6_PSYS_IOMMU1_OFFSET, + .info_bits = 0, + .nr_l1streams = 32, + .l1_block_sz = { + 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 10, + 5, 4, 14, 6, 4, 14, 6, 4, 8, + 4, 2, 1, 1, 1, 1, 14 + }, + .nr_l2streams = 32, + .l2_block_sz = { + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2 + }, + .insert_read_before_invalidate = false, + .l1_stream_id_reg_offset = + IPU6_MMU_L1_STREAM_ID_REG_OFFSET, + .l2_stream_id_reg_offset = + IPU6_PSYS_MMU1W_L2_STREAM_ID_REG_OFFSET, + }, + { + .offset = IPU6_PSYS_IOMMU1R_OFFSET, + .info_bits = 0, + .nr_l1streams = 16, + .l1_block_sz = { + 1, 4, 4, 4, 4, 16, 8, 4, 32, + 16, 16, 2, 2, 2, 1, 12 + }, + .nr_l2streams = 16, + .l2_block_sz = { + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2 + }, + .insert_read_before_invalidate = false, + .l1_stream_id_reg_offset = + IPU6_MMU_L1_STREAM_ID_REG_OFFSET, + .l2_stream_id_reg_offset = + IPU6_MMU_L2_STREAM_ID_REG_OFFSET, + }, + { + .offset = IPU6_PSYS_IOMMUI_OFFSET, + .info_bits = 0, + .nr_l1streams = 0, + .nr_l2streams = 0, + .insert_read_before_invalidate = false, + }, + }, + .dmem_offset = IPU6_PSYS_DMEM_OFFSET, + }, +}; + +static const struct ipu6_buttress_ctrl isys_buttress_ctrl = { + .ratio = IPU6_IS_FREQ_CTL_DEFAULT_RATIO, + .qos_floor = IPU6_IS_FREQ_CTL_DEFAULT_QOS_FLOOR_RATIO, + .freq_ctl = IPU6_BUTTRESS_REG_IS_FREQ_CTL, + .pwr_sts_shift = IPU6_BUTTRESS_PWR_STATE_IS_PWR_SHIFT, + .pwr_sts_mask = IPU6_BUTTRESS_PWR_STATE_IS_PWR_MASK, + .pwr_sts_on = IPU6_BUTTRESS_PWR_STATE_UP_DONE, + .pwr_sts_off = IPU6_BUTTRESS_PWR_STATE_DN_DONE, +}; + +static const struct ipu6_buttress_ctrl psys_buttress_ctrl = { + .ratio = IPU6_PS_FREQ_CTL_DEFAULT_RATIO, + .qos_floor = IPU6_PS_FREQ_CTL_DEFAULT_QOS_FLOOR_RATIO, + .freq_ctl = IPU6_BUTTRESS_REG_PS_FREQ_CTL, + .pwr_sts_shift = IPU6_BUTTRESS_PWR_STATE_PS_PWR_SHIFT, + .pwr_sts_mask = IPU6_BUTTRESS_PWR_STATE_PS_PWR_MASK, + .pwr_sts_on = IPU6_BUTTRESS_PWR_STATE_UP_DONE, + .pwr_sts_off = IPU6_BUTTRESS_PWR_STATE_DN_DONE, +}; + +static void +ipu6_pkg_dir_configure_spc(struct ipu6_device *isp, + const struct ipu6_hw_variants *hw_variant, + int pkg_dir_idx, void __iomem *base, + u64 *pkg_dir, dma_addr_t pkg_dir_vied_address) +{ + struct ipu6_cell_program *prog; + void __iomem *spc_base; + u32 server_fw_addr; + dma_addr_t dma_addr; + u32 pg_offset; + + server_fw_addr = lower_32_bits(*(pkg_dir + (pkg_dir_idx + 1) * 2)); + if (pkg_dir_idx == IPU6_CPD_PKG_DIR_ISYS_SERVER_IDX) + dma_addr = sg_dma_address(isp->isys->fw_sgt.sgl); + else + dma_addr = sg_dma_address(isp->psys->fw_sgt.sgl); + + pg_offset = server_fw_addr - dma_addr; + prog = (struct ipu6_cell_program *)((uintptr_t)isp->cpd_fw->data + + pg_offset); + spc_base = base + prog->regs_addr; + if (spc_base != (base + hw_variant->spc_offset)) + dev_warn(&isp->pdev->dev, + "SPC reg addr %p not matching value from CPD %p\n", + base + hw_variant->spc_offset, spc_base); + writel(server_fw_addr + prog->blob_offset + + prog->icache_source, spc_base + IPU6_PSYS_REG_SPC_ICACHE_BASE); + writel(IPU6_INFO_REQUEST_DESTINATION_IOSF, + spc_base + IPU6_REG_PSYS_INFO_SEG_0_CONFIG_ICACHE_MASTER); + writel(prog->start[1], spc_base + IPU6_PSYS_REG_SPC_START_PC); + writel(pkg_dir_vied_address, base + hw_variant->dmem_offset); +} + +void ipu6_configure_spc(struct ipu6_device *isp, + const struct ipu6_hw_variants *hw_variant, + int pkg_dir_idx, void __iomem *base, u64 *pkg_dir, + dma_addr_t pkg_dir_dma_addr) +{ + void __iomem *dmem_base = base + hw_variant->dmem_offset; + void __iomem *spc_regs_base = base + hw_variant->spc_offset; + u32 val; + + val = readl(spc_regs_base + IPU6_PSYS_REG_SPC_STATUS_CTRL); + val |= IPU6_PSYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE; + writel(val, spc_regs_base + IPU6_PSYS_REG_SPC_STATUS_CTRL); + + if (isp->secure_mode) + writel(IPU6_PKG_DIR_IMR_OFFSET, dmem_base); + else + ipu6_pkg_dir_configure_spc(isp, hw_variant, pkg_dir_idx, base, + pkg_dir, pkg_dir_dma_addr); +} +EXPORT_SYMBOL_NS_GPL(ipu6_configure_spc, "INTEL_IPU6"); + +#define IPU6_ISYS_CSI2_NPORTS 4 +#define IPU6SE_ISYS_CSI2_NPORTS 4 +#define IPU6_TGL_ISYS_CSI2_NPORTS 8 +#define IPU6EP_MTL_ISYS_CSI2_NPORTS 6 + +static void ipu6_internal_pdata_init(struct ipu6_device *isp) +{ + u8 hw_ver = isp->hw_ver; + + isys_ipdata.num_parallel_streams = IPU6_ISYS_NUM_STREAMS; + isys_ipdata.sram_gran_shift = IPU6_SRAM_GRANULARITY_SHIFT; + isys_ipdata.sram_gran_size = IPU6_SRAM_GRANULARITY_SIZE; + isys_ipdata.max_sram_size = IPU6_MAX_SRAM_SIZE; + isys_ipdata.sensor_type_start = IPU6_FW_ISYS_SENSOR_TYPE_START; + isys_ipdata.sensor_type_end = IPU6_FW_ISYS_SENSOR_TYPE_END; + isys_ipdata.max_streams = IPU6_ISYS_NUM_STREAMS; + isys_ipdata.max_send_queues = IPU6_N_MAX_SEND_QUEUES; + isys_ipdata.max_sram_blocks = IPU6_NOF_SRAM_BLOCKS_MAX; + isys_ipdata.max_devq_size = IPU6_DEV_SEND_QUEUE_SIZE; + isys_ipdata.csi2.nports = IPU6_ISYS_CSI2_NPORTS; + isys_ipdata.csi2.irq_mask = IPU6_CSI_RX_ERROR_IRQ_MASK; + isys_ipdata.csi2.ctrl0_irq_edge = IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_EDGE; + isys_ipdata.csi2.ctrl0_irq_clear = + IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_CLEAR; + isys_ipdata.csi2.ctrl0_irq_mask = IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_MASK; + isys_ipdata.csi2.ctrl0_irq_enable = + IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_ENABLE; + isys_ipdata.csi2.ctrl0_irq_status = + IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_STATUS; + isys_ipdata.csi2.ctrl0_irq_lnp = + IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_LEVEL_NOT_PULSE; + isys_ipdata.enhanced_iwake = is_ipu6ep_mtl(hw_ver) || is_ipu6ep(hw_ver); + psys_ipdata.hw_variant.spc_offset = IPU6_PSYS_SPC_OFFSET; + isys_ipdata.csi2.fw_access_port_ofs = CSI_REG_HUB_FW_ACCESS_PORT_OFS; + + if (is_ipu6ep(hw_ver)) { + isys_ipdata.ltr = IPU6EP_LTR_VALUE; + isys_ipdata.memopen_threshold = IPU6EP_MIN_MEMOPEN_TH; + } + + if (is_ipu6_tgl(hw_ver)) + isys_ipdata.csi2.nports = IPU6_TGL_ISYS_CSI2_NPORTS; + + if (is_ipu6ep_mtl(hw_ver)) { + isys_ipdata.csi2.nports = IPU6EP_MTL_ISYS_CSI2_NPORTS; + + isys_ipdata.csi2.ctrl0_irq_edge = + IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_EDGE; + isys_ipdata.csi2.ctrl0_irq_clear = + IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_CLEAR; + isys_ipdata.csi2.ctrl0_irq_mask = + IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_MASK; + isys_ipdata.csi2.ctrl0_irq_enable = + IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_ENABLE; + isys_ipdata.csi2.ctrl0_irq_lnp = + IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_LEVEL_NOT_PULSE; + isys_ipdata.csi2.ctrl0_irq_status = + IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_STATUS; + isys_ipdata.csi2.fw_access_port_ofs = + CSI_REG_HUB_FW_ACCESS_PORT_V6OFS; + isys_ipdata.ltr = IPU6EP_MTL_LTR_VALUE; + isys_ipdata.memopen_threshold = IPU6EP_MTL_MIN_MEMOPEN_TH; + } + + if (is_ipu6se(hw_ver)) { + isys_ipdata.csi2.nports = IPU6SE_ISYS_CSI2_NPORTS; + isys_ipdata.csi2.irq_mask = IPU6SE_CSI_RX_ERROR_IRQ_MASK; + isys_ipdata.num_parallel_streams = IPU6SE_ISYS_NUM_STREAMS; + isys_ipdata.sram_gran_shift = IPU6SE_SRAM_GRANULARITY_SHIFT; + isys_ipdata.sram_gran_size = IPU6SE_SRAM_GRANULARITY_SIZE; + isys_ipdata.max_sram_size = IPU6SE_MAX_SRAM_SIZE; + isys_ipdata.sensor_type_start = + IPU6SE_FW_ISYS_SENSOR_TYPE_START; + isys_ipdata.sensor_type_end = IPU6SE_FW_ISYS_SENSOR_TYPE_END; + isys_ipdata.max_streams = IPU6SE_ISYS_NUM_STREAMS; + isys_ipdata.max_send_queues = IPU6SE_N_MAX_SEND_QUEUES; + isys_ipdata.max_sram_blocks = IPU6SE_NOF_SRAM_BLOCKS_MAX; + isys_ipdata.max_devq_size = IPU6SE_DEV_SEND_QUEUE_SIZE; + psys_ipdata.hw_variant.spc_offset = IPU6SE_PSYS_SPC_OFFSET; + } +} + +static struct ipu6_bus_device * +ipu6_isys_init(struct pci_dev *pdev, struct device *parent, + struct ipu6_buttress_ctrl *ctrl, void __iomem *base, + const struct ipu6_isys_internal_pdata *ipdata) +{ + struct device *dev = &pdev->dev; + struct ipu6_bus_device *isys_adev; + struct ipu6_isys_pdata *pdata; + int ret; + + ret = ipu_bridge_init(dev, ipu_bridge_parse_ssdb); + if (ret) { + dev_err_probe(dev, ret, "IPU6 bridge init failed\n"); + return ERR_PTR(ret); + } + + pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return ERR_PTR(-ENOMEM); + + pdata->base = base; + pdata->ipdata = ipdata; + + isys_adev = ipu6_bus_initialize_device(pdev, parent, pdata, ctrl, + IPU6_ISYS_NAME); + if (IS_ERR(isys_adev)) { + kfree(pdata); + return dev_err_cast_probe(dev, isys_adev, + "ipu6_bus_initialize_device isys failed\n"); + } + + isys_adev->mmu = ipu6_mmu_init(dev, base, ISYS_MMID, + &ipdata->hw_variant); + if (IS_ERR(isys_adev->mmu)) { + put_device(&isys_adev->auxdev.dev); + kfree(pdata); + return dev_err_cast_probe(dev, isys_adev->mmu, + "ipu6_mmu_init(isys_adev->mmu) failed\n"); + } + + isys_adev->mmu->dev = &isys_adev->auxdev.dev; + + ret = ipu6_bus_add_device(isys_adev); + if (ret) { + kfree(pdata); + return ERR_PTR(ret); + } + + return isys_adev; +} + +static struct ipu6_bus_device * +ipu6_psys_init(struct pci_dev *pdev, struct device *parent, + struct ipu6_buttress_ctrl *ctrl, void __iomem *base, + const struct ipu6_psys_internal_pdata *ipdata) +{ + struct ipu6_bus_device *psys_adev; + struct ipu6_psys_pdata *pdata; + int ret; + + pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return ERR_PTR(-ENOMEM); + + pdata->base = base; + pdata->ipdata = ipdata; + + psys_adev = ipu6_bus_initialize_device(pdev, parent, pdata, ctrl, + IPU6_PSYS_NAME); + if (IS_ERR(psys_adev)) { + kfree(pdata); + return dev_err_cast_probe(&pdev->dev, psys_adev, + "ipu6_bus_initialize_device psys failed\n"); + } + + psys_adev->mmu = ipu6_mmu_init(&pdev->dev, base, PSYS_MMID, + &ipdata->hw_variant); + if (IS_ERR(psys_adev->mmu)) { + put_device(&psys_adev->auxdev.dev); + kfree(pdata); + return dev_err_cast_probe(&pdev->dev, psys_adev->mmu, + "ipu6_mmu_init(psys_adev->mmu) failed\n"); + } + + psys_adev->mmu->dev = &psys_adev->auxdev.dev; + + ret = ipu6_bus_add_device(psys_adev); + if (ret) { + kfree(pdata); + return ERR_PTR(ret); + } + + return psys_adev; +} + +static int ipu6_pci_config_setup(struct pci_dev *dev, u8 hw_ver) +{ + int ret; + + /* No PCI msi capability for IPU6EP */ + if (is_ipu6ep(hw_ver) || is_ipu6ep_mtl(hw_ver)) { + /* likely do nothing as msi not enabled by default */ + pci_disable_msi(dev); + return 0; + } + + ret = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_MSI); + if (ret < 0) + return dev_err_probe(&dev->dev, ret, "Request msi failed"); + + return 0; +} + +static void ipu6_configure_vc_mechanism(struct ipu6_device *isp) +{ + u32 val = readl(isp->base + BUTTRESS_REG_BTRS_CTRL); + + if (IPU6_BTRS_ARB_STALL_MODE_VC0 == IPU6_BTRS_ARB_MODE_TYPE_STALL) + val |= BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC0; + else + val &= ~BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC0; + + if (IPU6_BTRS_ARB_STALL_MODE_VC1 == IPU6_BTRS_ARB_MODE_TYPE_STALL) + val |= BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC1; + else + val &= ~BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC1; + + writel(val, isp->base + BUTTRESS_REG_BTRS_CTRL); +} + +static int ipu6_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct ipu6_buttress_ctrl *isys_ctrl = NULL, *psys_ctrl = NULL; + struct device *dev = &pdev->dev; + void __iomem *isys_base = NULL; + void __iomem *psys_base = NULL; + struct ipu6_device *isp; + phys_addr_t phys; + u32 val, version, sku_id; + int ret; + + isp = devm_kzalloc(dev, sizeof(*isp), GFP_KERNEL); + if (!isp) + return -ENOMEM; + + isp->pdev = pdev; + INIT_LIST_HEAD(&isp->devices); + + ret = pcim_enable_device(pdev); + if (ret) + return dev_err_probe(dev, ret, "Enable PCI device failed\n"); + + phys = pci_resource_start(pdev, IPU6_PCI_BAR); + dev_dbg(dev, "IPU6 PCI bar[%u] = %pa\n", IPU6_PCI_BAR, &phys); + + isp->base = pcim_iomap_region(pdev, IPU6_PCI_BAR, IPU6_NAME); + if (IS_ERR(isp->base)) + return dev_err_probe(dev, PTR_ERR(isp->base), + "Failed to I/O mem remapping\n"); + + pci_set_drvdata(pdev, isp); + pci_set_master(pdev); + + isp->cpd_metadata_cmpnt_size = sizeof(struct ipu6_cpd_metadata_cmpnt); + switch (id->device) { + case PCI_DEVICE_ID_INTEL_IPU6: + isp->hw_ver = IPU6_VER_6; + isp->cpd_fw_name = IPU6_FIRMWARE_NAME; + break; + case PCI_DEVICE_ID_INTEL_IPU6SE: + isp->hw_ver = IPU6_VER_6SE; + isp->cpd_fw_name = IPU6SE_FIRMWARE_NAME; + isp->cpd_metadata_cmpnt_size = + sizeof(struct ipu6se_cpd_metadata_cmpnt); + break; + case PCI_DEVICE_ID_INTEL_IPU6EP_ADLP: + case PCI_DEVICE_ID_INTEL_IPU6EP_RPLP: + isp->hw_ver = IPU6_VER_6EP; + isp->cpd_fw_name = IPU6EP_FIRMWARE_NAME; + break; + case PCI_DEVICE_ID_INTEL_IPU6EP_ADLN: + isp->hw_ver = IPU6_VER_6EP; + isp->cpd_fw_name = IPU6EPADLN_FIRMWARE_NAME; + break; + case PCI_DEVICE_ID_INTEL_IPU6EP_MTL: + isp->hw_ver = IPU6_VER_6EP_MTL; + isp->cpd_fw_name = IPU6EPMTL_FIRMWARE_NAME; + break; + default: + return dev_err_probe(dev, -ENODEV, + "Unsupported IPU6 device %x\n", + id->device); + } + + ipu6_internal_pdata_init(isp); + + isys_base = isp->base + isys_ipdata.hw_variant.offset; + psys_base = isp->base + psys_ipdata.hw_variant.offset; + + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(39)); + if (ret) + return dev_err_probe(dev, ret, "Failed to set DMA mask\n"); + + dma_set_max_seg_size(dev, UINT_MAX); + + ret = ipu6_pci_config_setup(pdev, isp->hw_ver); + if (ret) + return ret; + + ret = ipu6_buttress_init(isp); + if (ret) + return ret; + + ret = request_firmware(&isp->cpd_fw, isp->cpd_fw_name, dev); + if (ret) { + dev_err_probe(&isp->pdev->dev, ret, + "Requesting signed firmware %s failed\n", + isp->cpd_fw_name); + goto buttress_exit; + } + + ret = ipu6_cpd_validate_cpd_file(isp, isp->cpd_fw->data, + isp->cpd_fw->size); + if (ret) { + dev_err_probe(&isp->pdev->dev, ret, + "Failed to validate cpd\n"); + goto out_ipu6_bus_del_devices; + } + + isys_ctrl = devm_kmemdup(dev, &isys_buttress_ctrl, + sizeof(isys_buttress_ctrl), GFP_KERNEL); + if (!isys_ctrl) { + ret = -ENOMEM; + goto out_ipu6_bus_del_devices; + } + + isp->isys = ipu6_isys_init(pdev, dev, isys_ctrl, isys_base, + &isys_ipdata); + if (IS_ERR(isp->isys)) { + ret = PTR_ERR(isp->isys); + goto out_ipu6_bus_del_devices; + } + + psys_ctrl = devm_kmemdup(dev, &psys_buttress_ctrl, + sizeof(psys_buttress_ctrl), GFP_KERNEL); + if (!psys_ctrl) { + ret = -ENOMEM; + goto out_ipu6_bus_del_devices; + } + + isp->psys = ipu6_psys_init(pdev, &isp->isys->auxdev.dev, psys_ctrl, + psys_base, &psys_ipdata); + if (IS_ERR(isp->psys)) { + ret = PTR_ERR(isp->psys); + goto out_ipu6_bus_del_devices; + } + + ret = pm_runtime_resume_and_get(&isp->psys->auxdev.dev); + if (ret < 0) + goto out_ipu6_bus_del_devices; + + ret = ipu6_mmu_hw_init(isp->psys->mmu); + if (ret) { + dev_err_probe(&isp->pdev->dev, ret, + "Failed to set MMU hardware\n"); + goto out_ipu6_bus_del_devices; + } + + ret = ipu6_buttress_map_fw_image(isp->psys, isp->cpd_fw, + &isp->psys->fw_sgt); + if (ret) { + dev_err_probe(&isp->pdev->dev, ret, "failed to map fw image\n"); + goto out_ipu6_bus_del_devices; + } + + ret = ipu6_cpd_create_pkg_dir(isp->psys, isp->cpd_fw->data); + if (ret) { + dev_err_probe(&isp->pdev->dev, ret, + "failed to create pkg dir\n"); + goto out_ipu6_bus_del_devices; + } + + ret = devm_request_threaded_irq(dev, pdev->irq, ipu6_buttress_isr, + ipu6_buttress_isr_threaded, + IRQF_SHARED, IPU6_NAME, isp); + if (ret) { + dev_err_probe(dev, ret, "Requesting irq failed\n"); + goto out_ipu6_bus_del_devices; + } + + ret = ipu6_buttress_authenticate(isp); + if (ret) { + dev_err_probe(&isp->pdev->dev, ret, + "FW authentication failed\n"); + goto out_free_irq; + } + + ipu6_mmu_hw_cleanup(isp->psys->mmu); + pm_runtime_put(&isp->psys->auxdev.dev); + + /* Configure the arbitration mechanisms for VC requests */ + ipu6_configure_vc_mechanism(isp); + + val = readl(isp->base + BUTTRESS_REG_SKU); + sku_id = FIELD_GET(GENMASK(6, 4), val); + version = FIELD_GET(GENMASK(3, 0), val); + dev_info(dev, "IPU%u-v%u[%x] hardware version %d\n", version, sku_id, + pdev->device, isp->hw_ver); + + pm_runtime_put_noidle(dev); + pm_runtime_allow(dev); + + isp->bus_ready_to_probe = true; + + return 0; + +out_free_irq: + devm_free_irq(dev, pdev->irq, isp); +out_ipu6_bus_del_devices: + if (isp->psys) { + ipu6_cpd_free_pkg_dir(isp->psys); + ipu6_buttress_unmap_fw_image(isp->psys, &isp->psys->fw_sgt); + } + if (!IS_ERR_OR_NULL(isp->psys) && !IS_ERR_OR_NULL(isp->psys->mmu)) + ipu6_mmu_cleanup(isp->psys->mmu); + if (!IS_ERR_OR_NULL(isp->isys) && !IS_ERR_OR_NULL(isp->isys->mmu)) + ipu6_mmu_cleanup(isp->isys->mmu); + ipu6_bus_del_devices(pdev); + release_firmware(isp->cpd_fw); +buttress_exit: + ipu6_buttress_exit(isp); + + return ret; +} + +static void ipu6_pci_remove(struct pci_dev *pdev) +{ + struct ipu6_device *isp = pci_get_drvdata(pdev); + struct ipu6_mmu *isys_mmu = isp->isys->mmu; + struct ipu6_mmu *psys_mmu = isp->psys->mmu; + + devm_free_irq(&pdev->dev, pdev->irq, isp); + ipu6_cpd_free_pkg_dir(isp->psys); + + ipu6_buttress_unmap_fw_image(isp->psys, &isp->psys->fw_sgt); + ipu6_buttress_exit(isp); + + ipu6_bus_del_devices(pdev); + + pm_runtime_forbid(&pdev->dev); + pm_runtime_get_noresume(&pdev->dev); + + release_firmware(isp->cpd_fw); + + ipu6_mmu_cleanup(psys_mmu); + ipu6_mmu_cleanup(isys_mmu); +} + +static void ipu6_pci_reset_prepare(struct pci_dev *pdev) +{ + struct ipu6_device *isp = pci_get_drvdata(pdev); + + pm_runtime_forbid(&isp->pdev->dev); +} + +static void ipu6_pci_reset_done(struct pci_dev *pdev) +{ + struct ipu6_device *isp = pci_get_drvdata(pdev); + + ipu6_buttress_restore(isp); + if (isp->secure_mode) + ipu6_buttress_reset_authentication(isp); + + isp->need_ipc_reset = true; + pm_runtime_allow(&isp->pdev->dev); +} + +/* + * PCI base driver code requires driver to provide these to enable + * PCI device level PM state transitions (D0<->D3) + */ +static int ipu6_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + + synchronize_irq(pdev->irq); + return 0; +} + +static int ipu6_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct ipu6_device *isp = pci_get_drvdata(pdev); + struct ipu6_buttress *b = &isp->buttress; + int ret; + + /* Configure the arbitration mechanisms for VC requests */ + ipu6_configure_vc_mechanism(isp); + + isp->secure_mode = ipu6_buttress_get_secure_mode(isp); + dev_info(dev, "IPU6 in %s mode\n", + isp->secure_mode ? "secure" : "non-secure"); + + ipu6_buttress_restore(isp); + + ret = ipu6_buttress_ipc_reset(isp, &b->cse); + if (ret) + dev_err(&isp->pdev->dev, "IPC reset protocol failed!\n"); + + ret = pm_runtime_resume_and_get(&isp->psys->auxdev.dev); + if (ret < 0) { + dev_err(&isp->psys->auxdev.dev, "Failed to get runtime PM\n"); + return 0; + } + + ret = ipu6_buttress_authenticate(isp); + if (ret) + dev_err(&isp->pdev->dev, "FW authentication failed(%d)\n", ret); + + pm_runtime_put(&isp->psys->auxdev.dev); + + return 0; +} + +static int ipu6_runtime_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct ipu6_device *isp = pci_get_drvdata(pdev); + int ret; + + ipu6_configure_vc_mechanism(isp); + ipu6_buttress_restore(isp); + + if (isp->need_ipc_reset) { + struct ipu6_buttress *b = &isp->buttress; + + isp->need_ipc_reset = false; + ret = ipu6_buttress_ipc_reset(isp, &b->cse); + if (ret) + dev_err(&isp->pdev->dev, "IPC reset protocol failed\n"); + } + + return 0; +} + +static const struct dev_pm_ops ipu6_pm_ops = { + SYSTEM_SLEEP_PM_OPS(&ipu6_suspend, &ipu6_resume) + RUNTIME_PM_OPS(&ipu6_suspend, &ipu6_runtime_resume, NULL) +}; + +MODULE_DEVICE_TABLE(pci, ipu6_pci_tbl); + +static const struct pci_error_handlers pci_err_handlers = { + .reset_prepare = ipu6_pci_reset_prepare, + .reset_done = ipu6_pci_reset_done, +}; + +static struct pci_driver ipu6_pci_driver = { + .name = IPU6_NAME, + .id_table = ipu6_pci_tbl, + .probe = ipu6_pci_probe, + .remove = ipu6_pci_remove, + .driver = { + .pm = pm_ptr(&ipu6_pm_ops), + }, + .err_handler = &pci_err_handlers, +}; + +module_pci_driver(ipu6_pci_driver); + +MODULE_IMPORT_NS("INTEL_IPU_BRIDGE"); +MODULE_AUTHOR("Sakari Ailus "); +MODULE_AUTHOR("Tianshu Qiu "); +MODULE_AUTHOR("Bingbu Cao "); +MODULE_AUTHOR("Qingwu Zhang "); +MODULE_AUTHOR("Yunliang Ding "); +MODULE_AUTHOR("Hongju Wang "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Intel IPU6 PCI driver"); diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6.h b/6.17.0/drivers/media/pci/intel/ipu6/ipu6.h new file mode 100644 index 0000000..92e3c34 --- /dev/null +++ b/6.17.0/drivers/media/pci/intel/ipu6/ipu6.h @@ -0,0 +1,342 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2013 - 2024 Intel Corporation */ + +#ifndef IPU6_H +#define IPU6_H + +#include +#include +#include + +#include "ipu6-buttress.h" + +struct firmware; +struct pci_dev; +struct ipu6_bus_device; + +#define IPU6_NAME "intel-ipu6" +#define IPU6_MEDIA_DEV_MODEL_NAME "ipu6" + +#define IPU6SE_FIRMWARE_NAME "intel/ipu/ipu6se_fw.bin" +#define IPU6EP_FIRMWARE_NAME "intel/ipu/ipu6ep_fw.bin" +#define IPU6_FIRMWARE_NAME "intel/ipu/ipu6_fw.bin" +#define IPU6EPMTL_FIRMWARE_NAME "intel/ipu/ipu6epmtl_fw.bin" +#define IPU6EPADLN_FIRMWARE_NAME "intel/ipu/ipu6epadln_fw.bin" + +enum ipu6_version { + IPU6_VER_INVALID = 0, + IPU6_VER_6 = 1, + IPU6_VER_6SE = 3, + IPU6_VER_6EP = 5, + IPU6_VER_6EP_MTL = 6, +}; + +/* + * IPU6 - TGL + * IPU6SE - JSL + * IPU6EP - ADL/RPL + * IPU6EP_MTL - MTL + */ +static inline bool is_ipu6se(u8 hw_ver) +{ + return hw_ver == IPU6_VER_6SE; +} + +static inline bool is_ipu6ep(u8 hw_ver) +{ + return hw_ver == IPU6_VER_6EP; +} + +static inline bool is_ipu6ep_mtl(u8 hw_ver) +{ + return hw_ver == IPU6_VER_6EP_MTL; +} + +static inline bool is_ipu6_tgl(u8 hw_ver) +{ + return hw_ver == IPU6_VER_6; +} + +/* + * ISYS DMA can overshoot. For higher resolutions over allocation is one line + * but it must be at minimum 1024 bytes. Value could be different in + * different versions / generations thus provide it via platform data. + */ +#define IPU6_ISYS_OVERALLOC_MIN 1024 + +/* Physical pages in GDA is 128, page size is 2K for IPU6, 1K for others */ +#define IPU6_DEVICE_GDA_NR_PAGES 128 + +/* Virtualization factor to calculate the available virtual pages */ +#define IPU6_DEVICE_GDA_VIRT_FACTOR 32 + +struct ipu6_device { + struct pci_dev *pdev; + struct list_head devices; + struct ipu6_bus_device *isys; + struct ipu6_bus_device *psys; + struct ipu6_buttress buttress; + + const struct firmware *cpd_fw; + const char *cpd_fw_name; + u32 cpd_metadata_cmpnt_size; + + void __iomem *base; + bool need_ipc_reset; + bool secure_mode; + u8 hw_ver; + bool bus_ready_to_probe; +}; + +#define IPU6_ISYS_NAME "isys" +#define IPU6_PSYS_NAME "psys" + +#define IPU6_MMU_MAX_DEVICES 4 +#define IPU6_MMU_ADDR_BITS 32 +/* The firmware is accessible within the first 2 GiB only in non-secure mode. */ +#define IPU6_MMU_ADDR_BITS_NON_SECURE 31 + +#define IPU6_MMU_MAX_TLB_L1_STREAMS 32 +#define IPU6_MMU_MAX_TLB_L2_STREAMS 32 +#define IPU6_MAX_LI_BLOCK_ADDR 128 +#define IPU6_MAX_L2_BLOCK_ADDR 64 + +#define IPU6SE_ISYS_NUM_STREAMS IPU6SE_NONSECURE_STREAM_ID_MAX +#define IPU6_ISYS_NUM_STREAMS IPU6_NONSECURE_STREAM_ID_MAX + +/* + * To maximize the IOSF utlization, IPU6 need to send requests in bursts. + * At the DMA interface with the buttress, there are CDC FIFOs with burst + * collection capability. CDC FIFO burst collectors have a configurable + * threshold and is configured based on the outcome of performance measurements. + * + * isys has 3 ports with IOSF interface for VC0, VC1 and VC2 + * psys has 4 ports with IOSF interface for VC0, VC1w, VC1r and VC2 + * + * Threshold values are pre-defined and are arrived at after performance + * evaluations on a type of IPU6 + */ +#define IPU6_MAX_VC_IOSF_PORTS 4 + +/* + * IPU6 must configure correct arbitration mechanism related to the IOSF VC + * requests. There are two options per VC0 and VC1 - > 0 means rearbitrate on + * stall and 1 means stall until the request is completed. + */ +#define IPU6_BTRS_ARB_MODE_TYPE_REARB 0 +#define IPU6_BTRS_ARB_MODE_TYPE_STALL 1 + +/* Currently chosen arbitration mechanism for VC0 */ +#define IPU6_BTRS_ARB_STALL_MODE_VC0 \ + IPU6_BTRS_ARB_MODE_TYPE_REARB + +/* Currently chosen arbitration mechanism for VC1 */ +#define IPU6_BTRS_ARB_STALL_MODE_VC1 \ + IPU6_BTRS_ARB_MODE_TYPE_REARB + +/* + * MMU Invalidation HW bug workaround by ZLW mechanism + * + * Old IPU6 MMUV2 has a bug in the invalidation mechanism which might result in + * wrong translation or replication of the translation. This will cause data + * corruption. So we cannot directly use the MMU V2 invalidation registers + * to invalidate the MMU. Instead, whenever an invalidate is called, we need to + * clear the TLB by evicting all the valid translations by filling it with trash + * buffer (which is guaranteed not to be used by any other processes). ZLW is + * used to fill the L1 and L2 caches with the trash buffer translations. ZLW + * or Zero length write, is pre-fetch mechanism to pre-fetch the pages in + * advance to the L1 and L2 caches without triggering any memory operations. + * + * In MMU V2, L1 -> 16 streams and 64 blocks, maximum 16 blocks per stream + * One L1 block has 16 entries, hence points to 16 * 4K pages + * L2 -> 16 streams and 32 blocks. 2 blocks per streams + * One L2 block maps to 1024 L1 entries, hence points to 4MB address range + * 2 blocks per L2 stream means, 1 stream points to 8MB range + * + * As we need to clear the caches and 8MB being the biggest cache size, we need + * to have trash buffer which points to 8MB address range. As these trash + * buffers are not used for any memory transactions, we need only the least + * amount of physical memory. So we reserve 8MB IOVA address range but only + * one page is reserved from physical memory. Each of this 8MB IOVA address + * range is then mapped to the same physical memory page. + */ +/* One L2 entry maps 1024 L1 entries and one L1 entry per page */ +#define IPU6_MMUV2_L2_RANGE (1024 * PAGE_SIZE) +/* Max L2 blocks per stream */ +#define IPU6_MMUV2_MAX_L2_BLOCKS 2 +/* Max L1 blocks per stream */ +#define IPU6_MMUV2_MAX_L1_BLOCKS 16 +#define IPU6_MMUV2_TRASH_RANGE (IPU6_MMUV2_L2_RANGE * IPU6_MMUV2_MAX_L2_BLOCKS) +/* Entries per L1 block */ +#define MMUV2_ENTRIES_PER_L1_BLOCK 16 +#define MMUV2_TRASH_L1_BLOCK_OFFSET (MMUV2_ENTRIES_PER_L1_BLOCK * PAGE_SIZE) +#define MMUV2_TRASH_L2_BLOCK_OFFSET IPU6_MMUV2_L2_RANGE + +/* + * In some of the IPU6 MMUs, there is provision to configure L1 and L2 page + * table caches. Both these L1 and L2 caches are divided into multiple sections + * called streams. There is maximum 16 streams for both caches. Each of these + * sections are subdivided into multiple blocks. When nr_l1streams = 0 and + * nr_l2streams = 0, means the MMU is of type MMU_V1 and do not support + * L1/L2 page table caches. + * + * L1 stream per block sizes are configurable and varies per usecase. + * L2 has constant block sizes - 2 blocks per stream. + * + * MMU1 support pre-fetching of the pages to have less cache lookup misses. To + * enable the pre-fetching, MMU1 AT (Address Translator) device registers + * need to be configured. + * + * There are four types of memory accesses which requires ZLW configuration. + * ZLW(Zero Length Write) is a mechanism to enable VT-d pre-fetching on IOMMU. + * + * 1. Sequential Access or 1D mode + * Set ZLW_EN -> 1 + * set ZLW_PAGE_CROSS_1D -> 1 + * Set ZLW_N to "N" pages so that ZLW will be inserte N pages ahead where + * N is pre-defined and hardcoded in the platform data + * Set ZLW_2D -> 0 + * + * 2. ZLW 2D mode + * Set ZLW_EN -> 1 + * set ZLW_PAGE_CROSS_1D -> 1, + * Set ZLW_N -> 0 + * Set ZLW_2D -> 1 + * + * 3. ZLW Enable (no 1D or 2D mode) + * Set ZLW_EN -> 1 + * set ZLW_PAGE_CROSS_1D -> 0, + * Set ZLW_N -> 0 + * Set ZLW_2D -> 0 + * + * 4. ZLW disable + * Set ZLW_EN -> 0 + * set ZLW_PAGE_CROSS_1D -> 0, + * Set ZLW_N -> 0 + * Set ZLW_2D -> 0 + * + * To configure the ZLW for the above memory access, four registers are + * available. Hence to track these four settings, we have the following entries + * in the struct ipu6_mmu_hw. Each of these entries are per stream and + * available only for the L1 streams. + * + * a. l1_zlw_en -> To track zlw enabled per stream (ZLW_EN) + * b. l1_zlw_1d_mode -> Track 1D mode per stream. ZLW inserted at page boundary + * c. l1_ins_zlw_ahead_pages -> to track how advance the ZLW need to be inserted + * Insert ZLW request N pages ahead address. + * d. l1_zlw_2d_mode -> To track 2D mode per stream (ZLW_2D) + * + * + * Currently L1/L2 streams, blocks, AT ZLW configurations etc. are pre-defined + * as per the usecase specific calculations. Any change to this pre-defined + * table has to happen in sync with IPU6 FW. + */ +struct ipu6_mmu_hw { + union { + unsigned long offset; + void __iomem *base; + }; + u32 info_bits; + u8 nr_l1streams; + /* + * L1 has variable blocks per stream - total of 64 blocks and maximum of + * 16 blocks per stream. Configurable by using the block start address + * per stream. Block start address is calculated from the block size + */ + u8 l1_block_sz[IPU6_MMU_MAX_TLB_L1_STREAMS]; + /* Is ZLW is enabled in each stream */ + bool l1_zlw_en[IPU6_MMU_MAX_TLB_L1_STREAMS]; + bool l1_zlw_1d_mode[IPU6_MMU_MAX_TLB_L1_STREAMS]; + u8 l1_ins_zlw_ahead_pages[IPU6_MMU_MAX_TLB_L1_STREAMS]; + bool l1_zlw_2d_mode[IPU6_MMU_MAX_TLB_L1_STREAMS]; + + u32 l1_stream_id_reg_offset; + u32 l2_stream_id_reg_offset; + + u8 nr_l2streams; + /* + * L2 has fixed 2 blocks per stream. Block address is calculated + * from the block size + */ + u8 l2_block_sz[IPU6_MMU_MAX_TLB_L2_STREAMS]; + /* flag to track if WA is needed for successive invalidate HW bug */ + bool insert_read_before_invalidate; +}; + +struct ipu6_mmu_pdata { + u32 nr_mmus; + struct ipu6_mmu_hw mmu_hw[IPU6_MMU_MAX_DEVICES]; + int mmid; +}; + +struct ipu6_isys_csi2_pdata { + void __iomem *base; +}; + +struct ipu6_isys_internal_csi2_pdata { + u32 nports; + u32 irq_mask; + u32 ctrl0_irq_edge; + u32 ctrl0_irq_clear; + u32 ctrl0_irq_mask; + u32 ctrl0_irq_enable; + u32 ctrl0_irq_lnp; + u32 ctrl0_irq_status; + u32 fw_access_port_ofs; +}; + +struct ipu6_isys_internal_tpg_pdata { + u32 ntpgs; + u32 *offsets; + u32 *sels; +}; + +struct ipu6_hw_variants { + unsigned long offset; + u32 nr_mmus; + struct ipu6_mmu_hw mmu_hw[IPU6_MMU_MAX_DEVICES]; + u8 cdc_fifos; + u8 cdc_fifo_threshold[IPU6_MAX_VC_IOSF_PORTS]; + u32 dmem_offset; + u32 spc_offset; +}; + +struct ipu6_isys_internal_pdata { + struct ipu6_isys_internal_csi2_pdata csi2; + struct ipu6_hw_variants hw_variant; + u32 num_parallel_streams; + u32 isys_dma_overshoot; + u32 sram_gran_shift; + u32 sram_gran_size; + u32 max_sram_size; + u32 max_streams; + u32 max_send_queues; + u32 max_sram_blocks; + u32 max_devq_size; + u32 sensor_type_start; + u32 sensor_type_end; + u32 ltr; + u32 memopen_threshold; + bool enhanced_iwake; +}; + +struct ipu6_isys_pdata { + void __iomem *base; + const struct ipu6_isys_internal_pdata *ipdata; +}; + +struct ipu6_psys_internal_pdata { + struct ipu6_hw_variants hw_variant; +}; + +struct ipu6_psys_pdata { + void __iomem *base; + const struct ipu6_psys_internal_pdata *ipdata; +}; + +int ipu6_fw_authenticate(void *data, u64 val); +void ipu6_configure_spc(struct ipu6_device *isp, + const struct ipu6_hw_variants *hw_variant, + int pkg_dir_idx, void __iomem *base, u64 *pkg_dir, + dma_addr_t pkg_dir_dma_addr); +#endif /* IPU6_H */ diff --git a/dkms.conf b/dkms.conf index 87055af..67e67f8 100644 --- a/dkms.conf +++ b/dkms.conf @@ -16,23 +16,29 @@ SUBLEVEL=$(echo $kernelver | sed -ne 's/\([0-9]\+\)\.\([0-9]\+\)\.\([0-9]\+\)\-\ srctree=${dkms_tree}/${PACKAGE_NAME}/${PACKAGE_VERSION}/build #We will invoke multiple make directives to build all the required kernel modules, then manually copy them to the staged directory for dkms install rule -MAKE="make V=1 KERNELRELEASE=$kernelver KERNEL_SRC=$kernel_source_dir && \ - if [ -d ${KBASE}/drivers/media/v4l2-core ]; then rmdir ${KBASE}/drivers/media/v4l2-core ; fi &&\ +MAKE="make V=1 KERNELRELEASE=$kernelver DRIVER_VERSION_SUFFIX='${VERSION_SUFFIX}' KERNEL_SRC=$kernel_source_dir && \ + if [ -d ${KBASE}/drivers/media/v4l2-core ]; then rmdir ${KBASE}/drivers/media/v4l2-core ; fi &&\ make V=1 -C ${kernel_source_dir} M=${KBASE}/${SELECTED_KERNEL}/drivers/media/v4l2-core videodev.ko DRIVER_VERSION_SUFFIX='${VERSION_SUFFIX}' &&\ - mkdir -p ${KBASE}/drivers/media/v4l2-core/ && cp ${KBASE}/${SELECTED_KERNEL}/drivers/media/v4l2-core/videodev.ko ${KBASE}/drivers/media/v4l2-core/" + mkdir -p ${KBASE}/drivers/media/v4l2-core/ && cp ${KBASE}/${SELECTED_KERNEL}/drivers/media/v4l2-core/videodev.ko ${KBASE}/drivers/media/v4l2-core/ && \ + if [ -d ${KBASE}/drivers/media/pci/intel/ipu6 ]; then rmdir ${KBASE}/drivers/media/pci/intel/ipu6 ; fi &&\ + make V=1 -C ${kernel_source_dir} M=${KBASE}/${SELECTED_KERNEL}/drivers/media/pci/intel/ipu6 intel-ipu6-isys.ko intel-ipu6.ko DRIVER_VERSION_SUFFIX='${VERSION_SUFFIX}' && \ + mkdir -p ${KBASE}/drivers/media/pci/intel/ipu6/ && cp ${KBASE}/${SELECTED_KERNEL}/drivers/media/pci/intel/ipu6/intel-ipu6.ko ${KBASE}/drivers/media/pci/intel/ipu6/ && cp ${KBASE}/${SELECTED_KERNEL}/drivers/media/pci/intel/ipu6/intel-ipu6-isys.ko ${KBASE}/drivers/media/pci/intel/ipu6/" MAKE_MATCH[1]="^(6.1[7])" CLEAN="make V=1 KERNELRELEASE=$kernelver KERNEL_SRC=$kernel_source_dir clean && \ - make -C ${kernel_source_dir} M=${KBASE}/${SELECTED_KERNEL}/drivers/media/v4l2-core clean" + make -C ${kernel_source_dir} M=${KBASE}/${SELECTED_KERNEL}/drivers/media/v4l2-core clean clean && \ + make -C ${kernel_source_dir} M=${KBASE}/${SELECTED_KERNEL}/drivers/media/pci/intel/ipu6 clean" AUTOINSTALL="yes" #Patches for Kernel 6.17 PATCH[0]="0001-ipu7-dkms-v4l2-core-makefile-adaptation-6.17.patch" PATCH[1]="0002-drivers-media-set-v4l2_subdev_enable_streams_api-tru.patch" +PATCH[2]="0003-ipu6-dkms-add-isys-makefile-adaptation-6.17.patch" PATCH_MATCH[0]="^(6.1[7])" PATCH_MATCH[1]="^(6.1[7])" +PATCH_MATCH[2]="^(6.1[7])" i=0 @@ -42,6 +48,16 @@ DEST_MODULE_NAME[$i]="videodev" DEST_MODULE_LOCATION[$i]="/updates" STRIP[$i]=no +BUILT_MODULE_NAME[$((++i))]="intel-ipu6" +BUILT_MODULE_LOCATION[$i]="drivers/media/pci/intel/ipu6" +DEST_MODULE_LOCATION[$i]="/updates" +STRIP[$i]=no + +BUILT_MODULE_NAME[$((++i))]="intel-ipu6-isys" +BUILT_MODULE_LOCATION[$i]="drivers/media/pci/intel/ipu6" +DEST_MODULE_LOCATION[$i]="/updates" +STRIP[$i]=no + BUILT_MODULE_NAME[$((++i))]="ipu-acpi" BUILT_MODULE_LOCATION[$i]="drivers/media/platform/intel" DEST_MODULE_LOCATION[$i]="/updates" diff --git a/patches/0003-ipu6-dkms-add-isys-makefile-adaptation-6.17.patch b/patches/0003-ipu6-dkms-add-isys-makefile-adaptation-6.17.patch new file mode 100644 index 0000000..31f9366 --- /dev/null +++ b/patches/0003-ipu6-dkms-add-isys-makefile-adaptation-6.17.patch @@ -0,0 +1,48 @@ +From 11adf12607a6dab13805325def41b6c87184bb31 Mon Sep 17 00:00:00 2001 +From: florent pirou +Date: Thu, 4 Dec 2025 00:50:25 -0700 +Subject: [PATCH 3/3] ipu6-dkms: add isys makefile adaptation 6.17 + +Signed-off-by: florent pirou +--- + 6.17.0/drivers/media/pci/intel/ipu6/Makefile | 5 +++++ + 6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys.c | 1 + + 6.17.0/drivers/media/pci/intel/ipu6/ipu6.c | 1 + + 3 files changed, 7 insertions(+) + +diff --git a/6.17.0/drivers/media/pci/intel/ipu6/Makefile b/6.17.0/drivers/media/pci/intel/ipu6/Makefile +index a821b0a..f15fd92 100644 +--- a/6.17.0/drivers/media/pci/intel/ipu6/Makefile ++++ b/6.17.0/drivers/media/pci/intel/ipu6/Makefile +@@ -1,5 +1,10 @@ + # SPDX-License-Identifier: GPL-2.0-only + ++CC := ${CC} -I ${M}/../../../include-overrides ++ccflags-y := -DDRIVER_VERSION_SUFFIX=\"${DRIVER_VERSION_SUFFIX}\" ++ ++export CONFIG_VIDEO_INTEL_IPU6 = m ++ + intel-ipu6-y := ipu6.o \ + ipu6-bus.o \ + ipu6-dma.o \ +diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys.c b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys.c +index fc0ec0a..f048f05 100644 +--- a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys.c ++++ b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys.c +@@ -1378,3 +1378,4 @@ MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("Intel IPU6 input system driver"); + MODULE_IMPORT_NS("INTEL_IPU6"); + MODULE_IMPORT_NS("INTEL_IPU_BRIDGE"); ++MODULE_VERSION(DRIVER_VERSION_SUFFIX); +diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6.c b/6.17.0/drivers/media/pci/intel/ipu6/ipu6.c +index 1f4f20b..abada2f 100644 +--- a/6.17.0/drivers/media/pci/intel/ipu6/ipu6.c ++++ b/6.17.0/drivers/media/pci/intel/ipu6/ipu6.c +@@ -844,3 +844,4 @@ MODULE_AUTHOR("Yunliang Ding "); + MODULE_AUTHOR("Hongju Wang "); + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("Intel IPU6 PCI driver"); ++MODULE_VERSION(DRIVER_VERSION_SUFFIX); +-- +2.43.0 + From 376be3a1958f8adadb89cdf752e920510dbb556a Mon Sep 17 00:00:00 2001 From: Florent Pirou Date: Fri, 16 Jan 2026 12:06:55 -0700 Subject: [PATCH 05/14] v4l2-core, ipu6-isys : add 6.12 v4l2_subdev_enable_streams_api support * ipu6-isys : add intel-lts v6.12.41 quilt ontop of upstream v6.12 * ipu6-psys : add intel-lts v6.12.41 quilt ontop of upstream v6.12 * v4l2-core : add v4l2_subdev_enable_streams_api support ontop of upstream v6.12 Signed-off-by: Florent Pirou --- 6.12.0/drivers/media/pci/intel/ipu6/Kconfig | 18 + 6.12.0/drivers/media/pci/intel/ipu6/Makefile | 23 + .../drivers/media/pci/intel/ipu6/ipu6-bus.c | 159 + .../drivers/media/pci/intel/ipu6/ipu6-bus.h | 58 + .../media/pci/intel/ipu6/ipu6-buttress.c | 933 ++ .../media/pci/intel/ipu6/ipu6-buttress.h | 92 + .../drivers/media/pci/intel/ipu6/ipu6-cpd.c | 362 + .../drivers/media/pci/intel/ipu6/ipu6-cpd.h | 105 + .../drivers/media/pci/intel/ipu6/ipu6-dma.c | 492 + .../drivers/media/pci/intel/ipu6/ipu6-dma.h | 49 + .../media/pci/intel/ipu6/ipu6-fw-com.c | 413 + .../media/pci/intel/ipu6/ipu6-fw-com.h | 47 + .../media/pci/intel/ipu6/ipu6-fw-isys.c | 487 + .../media/pci/intel/ipu6/ipu6-fw-isys.h | 596 ++ .../media/pci/intel/ipu6/ipu6-isys-csi2.c | 649 ++ .../media/pci/intel/ipu6/ipu6-isys-csi2.h | 80 + .../media/pci/intel/ipu6/ipu6-isys-dwc-phy.c | 536 ++ .../media/pci/intel/ipu6/ipu6-isys-jsl-phy.c | 242 + .../media/pci/intel/ipu6/ipu6-isys-mcd-phy.c | 720 ++ .../media/pci/intel/ipu6/ipu6-isys-queue.c | 847 ++ .../media/pci/intel/ipu6/ipu6-isys-queue.h | 79 + .../media/pci/intel/ipu6/ipu6-isys-subdev.c | 403 + .../media/pci/intel/ipu6/ipu6-isys-subdev.h | 59 + .../media/pci/intel/ipu6/ipu6-isys-video.c | 1392 +++ .../media/pci/intel/ipu6/ipu6-isys-video.h | 141 + .../drivers/media/pci/intel/ipu6/ipu6-isys.c | 1382 +++ .../drivers/media/pci/intel/ipu6/ipu6-isys.h | 206 + .../drivers/media/pci/intel/ipu6/ipu6-mmu.c | 852 ++ .../drivers/media/pci/intel/ipu6/ipu6-mmu.h | 73 + .../intel/ipu6/ipu6-platform-buttress-regs.h | 226 + .../intel/ipu6/ipu6-platform-isys-csi2-reg.h | 172 + .../media/pci/intel/ipu6/ipu6-platform-regs.h | 179 + 6.12.0/drivers/media/pci/intel/ipu6/ipu6.c | 850 ++ 6.12.0/drivers/media/pci/intel/ipu6/ipu6.h | 342 + 6.12.0/drivers/media/v4l2-core/Kconfig | 84 + 6.12.0/drivers/media/v4l2-core/Makefile | 37 + 6.12.0/drivers/media/v4l2-core/tuner-core.c | 1424 +++ 6.12.0/drivers/media/v4l2-core/v4l2-async.c | 976 ++ 6.12.0/drivers/media/v4l2-core/v4l2-cci.c | 203 + 6.12.0/drivers/media/v4l2-core/v4l2-common.c | 638 ++ .../media/v4l2-core/v4l2-compat-ioctl32.c | 1210 +++ .../drivers/media/v4l2-core/v4l2-ctrls-api.c | 1315 +++ .../drivers/media/v4l2-core/v4l2-ctrls-core.c | 2598 +++++ .../drivers/media/v4l2-core/v4l2-ctrls-defs.c | 1685 ++++ .../drivers/media/v4l2-core/v4l2-ctrls-priv.h | 95 + .../media/v4l2-core/v4l2-ctrls-request.c | 501 + 6.12.0/drivers/media/v4l2-core/v4l2-dev.c | 1233 +++ 6.12.0/drivers/media/v4l2-core/v4l2-device.c | 295 + .../drivers/media/v4l2-core/v4l2-dv-timings.c | 1168 +++ 6.12.0/drivers/media/v4l2-core/v4l2-event.c | 373 + 6.12.0/drivers/media/v4l2-core/v4l2-fh.c | 117 + .../media/v4l2-core/v4l2-flash-led-class.c | 746 ++ 6.12.0/drivers/media/v4l2-core/v4l2-fwnode.c | 1258 +++ 6.12.0/drivers/media/v4l2-core/v4l2-h264.c | 453 + 6.12.0/drivers/media/v4l2-core/v4l2-i2c.c | 184 + 6.12.0/drivers/media/v4l2-core/v4l2-ioctl.c | 3519 +++++++ 6.12.0/drivers/media/v4l2-core/v4l2-jpeg.c | 793 ++ 6.12.0/drivers/media/v4l2-core/v4l2-mc.c | 614 ++ 6.12.0/drivers/media/v4l2-core/v4l2-mem2mem.c | 1643 ++++ 6.12.0/drivers/media/v4l2-core/v4l2-spi.c | 78 + .../media/v4l2-core/v4l2-subdev-priv.h | 14 + 6.12.0/drivers/media/v4l2-core/v4l2-subdev.c | 2569 +++++ 6.12.0/drivers/media/v4l2-core/v4l2-trace.c | 12 + 6.12.0/drivers/media/v4l2-core/v4l2-vp9.c | 1850 ++++ 6.12.0/include-overrides/media/cec-notifier.h | 166 + 6.12.0/include-overrides/media/cec-pin.h | 79 + 6.12.0/include-overrides/media/cec.h | 598 ++ 6.12.0/include-overrides/media/demux.h | 600 ++ 6.12.0/include-overrides/media/dmxdev.h | 213 + 6.12.0/include-overrides/media/dvb-usb-ids.h | 471 + .../include-overrides/media/dvb_ca_en50221.h | 142 + 6.12.0/include-overrides/media/dvb_demux.h | 354 + 6.12.0/include-overrides/media/dvb_frontend.h | 834 ++ 6.12.0/include-overrides/media/dvb_net.h | 95 + .../include-overrides/media/dvb_ringbuffer.h | 280 + 6.12.0/include-overrides/media/dvb_vb2.h | 280 + 6.12.0/include-overrides/media/dvbdev.h | 493 + 6.12.0/include-overrides/media/frame_vector.h | 47 + 6.12.0/include-overrides/media/imx.h | 11 + 6.12.0/include-overrides/media/ipu-bridge.h | 182 + .../include-overrides/media/ipu6-pci-table.h | 28 + 6.12.0/include-overrides/media/jpeg.h | 20 + .../media/media-dev-allocator.h | 63 + 6.12.0/include-overrides/media/media-device.h | 518 + .../include-overrides/media/media-devnode.h | 168 + 6.12.0/include-overrides/media/media-entity.h | 1450 +++ .../include-overrides/media/media-request.h | 442 + 6.12.0/include-overrides/media/mipi-csi2.h | 47 + 6.12.0/include-overrides/media/rc-core.h | 377 + 6.12.0/include-overrides/media/rc-map.h | 356 + 6.12.0/include-overrides/media/rcar-fcp.h | 38 + 6.12.0/include-overrides/media/tuner-types.h | 205 + 6.12.0/include-overrides/media/tuner.h | 229 + 6.12.0/include-overrides/media/tveeprom.h | 116 + 6.12.0/include-overrides/media/v4l2-async.h | 346 + 6.12.0/include-overrides/media/v4l2-cci.h | 141 + 6.12.0/include-overrides/media/v4l2-common.h | 625 ++ 6.12.0/include-overrides/media/v4l2-ctrls.h | 1591 ++++ 6.12.0/include-overrides/media/v4l2-dev.h | 644 ++ 6.12.0/include-overrides/media/v4l2-device.h | 569 ++ .../include-overrides/media/v4l2-dv-timings.h | 260 + 6.12.0/include-overrides/media/v4l2-event.h | 208 + 6.12.0/include-overrides/media/v4l2-fh.h | 161 + .../media/v4l2-flash-led-class.h | 186 + 6.12.0/include-overrides/media/v4l2-fwnode.h | 414 + 6.12.0/include-overrides/media/v4l2-h264.h | 89 + .../media/v4l2-image-sizes.h | 46 + 6.12.0/include-overrides/media/v4l2-ioctl.h | 796 ++ 6.12.0/include-overrides/media/v4l2-jpeg.h | 189 + 6.12.0/include-overrides/media/v4l2-mc.h | 232 + .../include-overrides/media/v4l2-mediabus.h | 255 + 6.12.0/include-overrides/media/v4l2-mem2mem.h | 902 ++ 6.12.0/include-overrides/media/v4l2-rect.h | 207 + 6.12.0/include-overrides/media/v4l2-subdev.h | 2005 ++++ 6.12.0/include-overrides/media/v4l2-vp9.h | 233 + .../include-overrides/media/videobuf2-core.h | 1348 +++ .../media/videobuf2-dma-contig.h | 32 + .../media/videobuf2-dma-sg.h | 26 + .../include-overrides/media/videobuf2-dvb.h | 69 + .../media/videobuf2-memops.h | 41 + .../include-overrides/media/videobuf2-v4l2.h | 392 + .../media/videobuf2-vmalloc.h | 20 + 6.12.0/include-overrides/media/vsp1.h | 120 + dkms.conf | 76 +- ...pu6-Fix-dma-mask-for-non-secure-mode.patch | 49 + ...emove-workaround-for-Meteor-Lake-ES2.patch | 45 + ...l-ipu6-remove-buttress-ish-structure.patch | 125 + ...se-module-parameter-to-set-isys-freq.patch | 45 + .../0005-media-pci-Enable-ISYS-reset.patch | 627 ++ ...l-ipu6-remove-buttress-ish-structure.patch | 38 + ...tel-ipu6-use-vc1-dma-for-MTL-and-ARL.patch | 42 + ...nc-at-buffer_prepare-callback-as-DMA.patch | 42 + ...-support-IPU6-ISYS-FW-trace-dump-for.patch | 1432 +++ ...der-of-return-buffers-should-be-FIFO.patch | 100 + ...-Modify-enble-disable-stream-in-CSI2.patch | 338 + ...the-correct-SOF-for-different-stream.patch | 24 + ...l-ipu6-support-imx390-for-6.11.0-rc3.patch | 981 ++ ...-media-intel-ipu6-fix-coverity-issue.patch | 31 + ...-move-ipu-acpi-module-to-linux-drive.patch | 253 + ...ster-i2c-device-to-complete-ext_subd.patch | 58 + ...7-media-pci-add-missing-if-for-PDATA.patch | 116 + ...edia-pci-refine-PDATA-related-config.patch | 526 ++ ...-align-ACPI-PDATA-and-ACPI-fwnode-bu.patch | 385 + ...se-module-parameter-to-set-psys-freq.patch | 43 + ...21-media-pci-update-IPU6-PSYS-driver.patch | 8335 +++++++++++++++++ ...-support-IPU6-PSYS-FW-trace-dump-for.patch | 134 + ...s-v4l2-core-makefile-adaptation-6.12.patch | 37 + ...t-v4l2_subdev_enable_streams_api-tru.patch | 28 + ...ms-add-isys-makefile-adaptation-6.12.patch | 65 + ...ipu7-acpi-pdata-device-parser-on-6.1.patch | 80 + 150 files changed, 76042 insertions(+), 11 deletions(-) create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/Kconfig create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/Makefile create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-bus.c create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-bus.h create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-cpd.c create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-cpd.h create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-dma.c create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-dma.h create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.c create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.h create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.c create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-dwc-phy.c create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-jsl-phy.c create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.h create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.h create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-mmu.c create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-mmu.h create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-platform-buttress-regs.h create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-platform-isys-csi2-reg.h create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-platform-regs.h create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6.c create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6.h create mode 100644 6.12.0/drivers/media/v4l2-core/Kconfig create mode 100644 6.12.0/drivers/media/v4l2-core/Makefile create mode 100644 6.12.0/drivers/media/v4l2-core/tuner-core.c create mode 100644 6.12.0/drivers/media/v4l2-core/v4l2-async.c create mode 100644 6.12.0/drivers/media/v4l2-core/v4l2-cci.c create mode 100644 6.12.0/drivers/media/v4l2-core/v4l2-common.c create mode 100644 6.12.0/drivers/media/v4l2-core/v4l2-compat-ioctl32.c create mode 100644 6.12.0/drivers/media/v4l2-core/v4l2-ctrls-api.c create mode 100644 6.12.0/drivers/media/v4l2-core/v4l2-ctrls-core.c create mode 100644 6.12.0/drivers/media/v4l2-core/v4l2-ctrls-defs.c create mode 100644 6.12.0/drivers/media/v4l2-core/v4l2-ctrls-priv.h create mode 100644 6.12.0/drivers/media/v4l2-core/v4l2-ctrls-request.c create mode 100644 6.12.0/drivers/media/v4l2-core/v4l2-dev.c create mode 100644 6.12.0/drivers/media/v4l2-core/v4l2-device.c create mode 100644 6.12.0/drivers/media/v4l2-core/v4l2-dv-timings.c create mode 100644 6.12.0/drivers/media/v4l2-core/v4l2-event.c create mode 100644 6.12.0/drivers/media/v4l2-core/v4l2-fh.c create mode 100644 6.12.0/drivers/media/v4l2-core/v4l2-flash-led-class.c create mode 100644 6.12.0/drivers/media/v4l2-core/v4l2-fwnode.c create mode 100644 6.12.0/drivers/media/v4l2-core/v4l2-h264.c create mode 100644 6.12.0/drivers/media/v4l2-core/v4l2-i2c.c create mode 100644 6.12.0/drivers/media/v4l2-core/v4l2-ioctl.c create mode 100644 6.12.0/drivers/media/v4l2-core/v4l2-jpeg.c create mode 100644 6.12.0/drivers/media/v4l2-core/v4l2-mc.c create mode 100644 6.12.0/drivers/media/v4l2-core/v4l2-mem2mem.c create mode 100644 6.12.0/drivers/media/v4l2-core/v4l2-spi.c create mode 100644 6.12.0/drivers/media/v4l2-core/v4l2-subdev-priv.h create mode 100644 6.12.0/drivers/media/v4l2-core/v4l2-subdev.c create mode 100644 6.12.0/drivers/media/v4l2-core/v4l2-trace.c create mode 100644 6.12.0/drivers/media/v4l2-core/v4l2-vp9.c create mode 100644 6.12.0/include-overrides/media/cec-notifier.h create mode 100644 6.12.0/include-overrides/media/cec-pin.h create mode 100644 6.12.0/include-overrides/media/cec.h create mode 100644 6.12.0/include-overrides/media/demux.h create mode 100644 6.12.0/include-overrides/media/dmxdev.h create mode 100644 6.12.0/include-overrides/media/dvb-usb-ids.h create mode 100644 6.12.0/include-overrides/media/dvb_ca_en50221.h create mode 100644 6.12.0/include-overrides/media/dvb_demux.h create mode 100644 6.12.0/include-overrides/media/dvb_frontend.h create mode 100644 6.12.0/include-overrides/media/dvb_net.h create mode 100644 6.12.0/include-overrides/media/dvb_ringbuffer.h create mode 100644 6.12.0/include-overrides/media/dvb_vb2.h create mode 100644 6.12.0/include-overrides/media/dvbdev.h create mode 100644 6.12.0/include-overrides/media/frame_vector.h create mode 100644 6.12.0/include-overrides/media/imx.h create mode 100644 6.12.0/include-overrides/media/ipu-bridge.h create mode 100644 6.12.0/include-overrides/media/ipu6-pci-table.h create mode 100644 6.12.0/include-overrides/media/jpeg.h create mode 100644 6.12.0/include-overrides/media/media-dev-allocator.h create mode 100644 6.12.0/include-overrides/media/media-device.h create mode 100644 6.12.0/include-overrides/media/media-devnode.h create mode 100644 6.12.0/include-overrides/media/media-entity.h create mode 100644 6.12.0/include-overrides/media/media-request.h create mode 100644 6.12.0/include-overrides/media/mipi-csi2.h create mode 100644 6.12.0/include-overrides/media/rc-core.h create mode 100644 6.12.0/include-overrides/media/rc-map.h create mode 100644 6.12.0/include-overrides/media/rcar-fcp.h create mode 100644 6.12.0/include-overrides/media/tuner-types.h create mode 100644 6.12.0/include-overrides/media/tuner.h create mode 100644 6.12.0/include-overrides/media/tveeprom.h create mode 100644 6.12.0/include-overrides/media/v4l2-async.h create mode 100644 6.12.0/include-overrides/media/v4l2-cci.h create mode 100644 6.12.0/include-overrides/media/v4l2-common.h create mode 100644 6.12.0/include-overrides/media/v4l2-ctrls.h create mode 100644 6.12.0/include-overrides/media/v4l2-dev.h create mode 100644 6.12.0/include-overrides/media/v4l2-device.h create mode 100644 6.12.0/include-overrides/media/v4l2-dv-timings.h create mode 100644 6.12.0/include-overrides/media/v4l2-event.h create mode 100644 6.12.0/include-overrides/media/v4l2-fh.h create mode 100644 6.12.0/include-overrides/media/v4l2-flash-led-class.h create mode 100644 6.12.0/include-overrides/media/v4l2-fwnode.h create mode 100644 6.12.0/include-overrides/media/v4l2-h264.h create mode 100644 6.12.0/include-overrides/media/v4l2-image-sizes.h create mode 100644 6.12.0/include-overrides/media/v4l2-ioctl.h create mode 100644 6.12.0/include-overrides/media/v4l2-jpeg.h create mode 100644 6.12.0/include-overrides/media/v4l2-mc.h create mode 100644 6.12.0/include-overrides/media/v4l2-mediabus.h create mode 100644 6.12.0/include-overrides/media/v4l2-mem2mem.h create mode 100644 6.12.0/include-overrides/media/v4l2-rect.h create mode 100644 6.12.0/include-overrides/media/v4l2-subdev.h create mode 100644 6.12.0/include-overrides/media/v4l2-vp9.h create mode 100644 6.12.0/include-overrides/media/videobuf2-core.h create mode 100644 6.12.0/include-overrides/media/videobuf2-dma-contig.h create mode 100644 6.12.0/include-overrides/media/videobuf2-dma-sg.h create mode 100644 6.12.0/include-overrides/media/videobuf2-dvb.h create mode 100644 6.12.0/include-overrides/media/videobuf2-memops.h create mode 100644 6.12.0/include-overrides/media/videobuf2-v4l2.h create mode 100644 6.12.0/include-overrides/media/videobuf2-vmalloc.h create mode 100644 6.12.0/include-overrides/media/vsp1.h create mode 100644 patches/0001-media-intel-ipu6-Fix-dma-mask-for-non-secure-mode.patch create mode 100644 patches/0002-media-ipu6-Remove-workaround-for-Meteor-Lake-ES2.patch create mode 100644 patches/0003-media-intel-ipu6-remove-buttress-ish-structure.patch create mode 100644 patches/0004-upstream-Use-module-parameter-to-set-isys-freq.patch create mode 100644 patches/0005-media-pci-Enable-ISYS-reset.patch create mode 100644 patches/0006-media-intel-ipu6-remove-buttress-ish-structure.patch create mode 100644 patches/0007-media-intel-ipu6-use-vc1-dma-for-MTL-and-ARL.patch create mode 100644 patches/0008-media-ipu-Dma-sync-at-buffer_prepare-callback-as-DMA.patch create mode 100644 patches/0009-media-intel-ipu6-support-IPU6-ISYS-FW-trace-dump-for.patch create mode 100644 patches/0010-media-pci-The-order-of-return-buffers-should-be-FIFO.patch create mode 100644 patches/0011-media-pci-Modify-enble-disable-stream-in-CSI2.patch create mode 100644 patches/0012-media-pci-Set-the-correct-SOF-for-different-stream.patch create mode 100644 patches/0013-media-intel-ipu6-support-imx390-for-6.11.0-rc3.patch create mode 100644 patches/0014-media-intel-ipu6-fix-coverity-issue.patch create mode 100644 patches/0015-media-intel-ipu6-move-ipu-acpi-module-to-linux-drive.patch create mode 100644 patches/0016-media-pci-unregister-i2c-device-to-complete-ext_subd.patch create mode 100644 patches/0017-media-pci-add-missing-if-for-PDATA.patch create mode 100644 patches/0018-media-pci-refine-PDATA-related-config.patch create mode 100644 patches/0019-media-intel-ipu6-align-ACPI-PDATA-and-ACPI-fwnode-bu.patch create mode 100644 patches/0020-upstream-Use-module-parameter-to-set-psys-freq.patch create mode 100644 patches/0021-media-pci-update-IPU6-PSYS-driver.patch create mode 100644 patches/0022-media-intel-ipu6-support-IPU6-PSYS-FW-trace-dump-for.patch create mode 100644 patches/0023-ipu7-dkms-v4l2-core-makefile-adaptation-6.12.patch create mode 100644 patches/0024-drivers-media-set-v4l2_subdev_enable_streams_api-tru.patch create mode 100644 patches/0025-ipu6-dkms-add-isys-makefile-adaptation-6.12.patch create mode 100644 patches/0026-ipu6-dkms-align-ipu7-acpi-pdata-device-parser-on-6.1.patch diff --git a/6.12.0/drivers/media/pci/intel/ipu6/Kconfig b/6.12.0/drivers/media/pci/intel/ipu6/Kconfig new file mode 100644 index 0000000..cd1c545 --- /dev/null +++ b/6.12.0/drivers/media/pci/intel/ipu6/Kconfig @@ -0,0 +1,18 @@ +config VIDEO_INTEL_IPU6 + tristate "Intel IPU6 driver" + depends on ACPI || COMPILE_TEST + depends on VIDEO_DEV + depends on X86 && X86_64 && HAS_DMA + depends on IPU_BRIDGE || !IPU_BRIDGE + select AUXILIARY_BUS + select IOMMU_IOVA + select VIDEO_V4L2_SUBDEV_API + select MEDIA_CONTROLLER + select VIDEOBUF2_DMA_SG + select V4L2_FWNODE + help + This is the 6th Gen Intel Image Processing Unit, found in Intel SoCs + and used for capturing images and video from camera sensors. + + To compile this driver, say Y here! It contains 2 modules - + intel_ipu6 and intel_ipu6_isys. diff --git a/6.12.0/drivers/media/pci/intel/ipu6/Makefile b/6.12.0/drivers/media/pci/intel/ipu6/Makefile new file mode 100644 index 0000000..a821b0a --- /dev/null +++ b/6.12.0/drivers/media/pci/intel/ipu6/Makefile @@ -0,0 +1,23 @@ +# SPDX-License-Identifier: GPL-2.0-only + +intel-ipu6-y := ipu6.o \ + ipu6-bus.o \ + ipu6-dma.o \ + ipu6-mmu.o \ + ipu6-buttress.o \ + ipu6-cpd.o \ + ipu6-fw-com.o + +obj-$(CONFIG_VIDEO_INTEL_IPU6) += intel-ipu6.o + +intel-ipu6-isys-y := ipu6-isys.o \ + ipu6-isys-csi2.o \ + ipu6-fw-isys.o \ + ipu6-isys-video.o \ + ipu6-isys-queue.o \ + ipu6-isys-subdev.o \ + ipu6-isys-mcd-phy.o \ + ipu6-isys-jsl-phy.o \ + ipu6-isys-dwc-phy.o + +obj-$(CONFIG_VIDEO_INTEL_IPU6) += intel-ipu6-isys.o diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-bus.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-bus.c new file mode 100644 index 0000000..37d88dd --- /dev/null +++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-bus.c @@ -0,0 +1,159 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2013 - 2024 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ipu6.h" +#include "ipu6-bus.h" +#include "ipu6-buttress.h" +#include "ipu6-dma.h" + +static int bus_pm_runtime_suspend(struct device *dev) +{ + struct ipu6_bus_device *adev = to_ipu6_bus_device(dev); + int ret; + + ret = pm_generic_runtime_suspend(dev); + if (ret) + return ret; + + ret = ipu6_buttress_power(dev, adev->ctrl, false); + if (!ret) + return 0; + + dev_err(dev, "power down failed!\n"); + + /* Powering down failed, attempt to resume device now */ + ret = pm_generic_runtime_resume(dev); + if (!ret) + return -EBUSY; + + return -EIO; +} + +static int bus_pm_runtime_resume(struct device *dev) +{ + struct ipu6_bus_device *adev = to_ipu6_bus_device(dev); + int ret; + + ret = ipu6_buttress_power(dev, adev->ctrl, true); + if (ret) + return ret; + + ret = pm_generic_runtime_resume(dev); + if (ret) + goto out_err; + + return 0; + +out_err: + ipu6_buttress_power(dev, adev->ctrl, false); + + return -EBUSY; +} + +static struct dev_pm_domain ipu6_bus_pm_domain = { + .ops = { + .runtime_suspend = bus_pm_runtime_suspend, + .runtime_resume = bus_pm_runtime_resume, + }, +}; + +static DEFINE_MUTEX(ipu6_bus_mutex); + +static void ipu6_bus_release(struct device *dev) +{ + struct ipu6_bus_device *adev = to_ipu6_bus_device(dev); + + kfree(adev->pdata); + kfree(adev); +} + +struct ipu6_bus_device * +ipu6_bus_initialize_device(struct pci_dev *pdev, struct device *parent, + void *pdata, struct ipu6_buttress_ctrl *ctrl, + char *name) +{ + struct auxiliary_device *auxdev; + struct ipu6_bus_device *adev; + struct ipu6_device *isp = pci_get_drvdata(pdev); + int ret; + + adev = kzalloc(sizeof(*adev), GFP_KERNEL); + if (!adev) + return ERR_PTR(-ENOMEM); + + adev->isp = isp; + adev->ctrl = ctrl; + adev->pdata = pdata; + auxdev = &adev->auxdev; + auxdev->name = name; + auxdev->id = (pci_domain_nr(pdev->bus) << 16) | + PCI_DEVID(pdev->bus->number, pdev->devfn); + + auxdev->dev.parent = parent; + auxdev->dev.release = ipu6_bus_release; + + ret = auxiliary_device_init(auxdev); + if (ret < 0) { + dev_err(&isp->pdev->dev, "auxiliary device init failed (%d)\n", + ret); + kfree(adev); + return ERR_PTR(ret); + } + + dev_pm_domain_set(&auxdev->dev, &ipu6_bus_pm_domain); + + pm_runtime_forbid(&adev->auxdev.dev); + pm_runtime_enable(&adev->auxdev.dev); + + return adev; +} + +int ipu6_bus_add_device(struct ipu6_bus_device *adev) +{ + struct auxiliary_device *auxdev = &adev->auxdev; + int ret; + + ret = auxiliary_device_add(auxdev); + if (ret) { + auxiliary_device_uninit(auxdev); + return ret; + } + + mutex_lock(&ipu6_bus_mutex); + list_add(&adev->list, &adev->isp->devices); + mutex_unlock(&ipu6_bus_mutex); + + pm_runtime_allow(&auxdev->dev); + + return 0; +} + +void ipu6_bus_del_devices(struct pci_dev *pdev) +{ + struct ipu6_device *isp = pci_get_drvdata(pdev); + struct ipu6_bus_device *adev, *save; + + mutex_lock(&ipu6_bus_mutex); + + list_for_each_entry_safe(adev, save, &isp->devices, list) { + pm_runtime_disable(&adev->auxdev.dev); + list_del(&adev->list); + auxiliary_device_delete(&adev->auxdev); + auxiliary_device_uninit(&adev->auxdev); + } + + mutex_unlock(&ipu6_bus_mutex); +} diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-bus.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-bus.h new file mode 100644 index 0000000..bb4926d --- /dev/null +++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-bus.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2013 - 2024 Intel Corporation */ + +#ifndef IPU6_BUS_H +#define IPU6_BUS_H + +#include +#include +#include +#include +#include +#include +#include + +struct firmware; +struct pci_dev; + +#define IPU6_BUS_NAME IPU6_NAME "-bus" + +struct ipu6_buttress_ctrl; + +struct ipu6_bus_device { + struct auxiliary_device auxdev; + const struct auxiliary_driver *auxdrv; + const struct ipu6_auxdrv_data *auxdrv_data; + struct list_head list; + void *pdata; + struct ipu6_mmu *mmu; + struct ipu6_device *isp; + struct ipu6_buttress_ctrl *ctrl; + u64 dma_mask; + const struct firmware *fw; + struct sg_table fw_sgt; + u64 *pkg_dir; + dma_addr_t pkg_dir_dma_addr; + unsigned int pkg_dir_size; +}; + +struct ipu6_auxdrv_data { + irqreturn_t (*isr)(struct ipu6_bus_device *adev); + irqreturn_t (*isr_threaded)(struct ipu6_bus_device *adev); + bool wake_isr_thread; +}; + +#define to_ipu6_bus_device(_dev) \ + container_of(to_auxiliary_dev(_dev), struct ipu6_bus_device, auxdev) +#define auxdev_to_adev(_auxdev) \ + container_of(_auxdev, struct ipu6_bus_device, auxdev) +#define ipu6_bus_get_drvdata(adev) dev_get_drvdata(&(adev)->auxdev.dev) + +struct ipu6_bus_device * +ipu6_bus_initialize_device(struct pci_dev *pdev, struct device *parent, + void *pdata, struct ipu6_buttress_ctrl *ctrl, + char *name); +int ipu6_bus_add_device(struct ipu6_bus_device *adev); +void ipu6_bus_del_devices(struct pci_dev *pdev); + +#endif diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c new file mode 100644 index 0000000..1ee63ef --- /dev/null +++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c @@ -0,0 +1,933 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2013--2024 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ipu6.h" +#include "ipu6-bus.h" +#include "ipu6-dma.h" +#include "ipu6-buttress.h" +#include "ipu6-platform-buttress-regs.h" + +#define BOOTLOADER_STATUS_OFFSET 0x15c + +#define BOOTLOADER_MAGIC_KEY 0xb00710ad + +#define ENTRY BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE1 +#define EXIT BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE2 +#define QUERY BUTTRESS_IU2CSECSR_IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE + +#define BUTTRESS_TSC_SYNC_RESET_TRIAL_MAX 10 + +#define BUTTRESS_POWER_TIMEOUT_US (200 * USEC_PER_MSEC) + +#define BUTTRESS_CSE_BOOTLOAD_TIMEOUT_US (5 * USEC_PER_SEC) +#define BUTTRESS_CSE_AUTHENTICATE_TIMEOUT_US (10 * USEC_PER_SEC) +#define BUTTRESS_CSE_FWRESET_TIMEOUT_US (100 * USEC_PER_MSEC) + +#define BUTTRESS_IPC_TX_TIMEOUT_MS MSEC_PER_SEC +#define BUTTRESS_IPC_RX_TIMEOUT_MS MSEC_PER_SEC +#define BUTTRESS_IPC_VALIDITY_TIMEOUT_US (1 * USEC_PER_SEC) +#define BUTTRESS_TSC_SYNC_TIMEOUT_US (5 * USEC_PER_MSEC) + +#define BUTTRESS_IPC_RESET_RETRY 2000 +#define BUTTRESS_CSE_IPC_RESET_RETRY 4 +#define BUTTRESS_IPC_CMD_SEND_RETRY 1 + +#define BUTTRESS_MAX_CONSECUTIVE_IRQS 100 + +static const u32 ipu6_adev_irq_mask[2] = { + BUTTRESS_ISR_IS_IRQ, + BUTTRESS_ISR_PS_IRQ +}; + +int ipu6_buttress_ipc_reset(struct ipu6_device *isp, + struct ipu6_buttress_ipc *ipc) +{ + unsigned int retries = BUTTRESS_IPC_RESET_RETRY; + struct ipu6_buttress *b = &isp->buttress; + u32 val = 0, csr_in_clr; + + if (!isp->secure_mode) { + dev_dbg(&isp->pdev->dev, "Skip IPC reset for non-secure mode"); + return 0; + } + + mutex_lock(&b->ipc_mutex); + + /* Clear-by-1 CSR (all bits), corresponding internal states. */ + val = readl(isp->base + ipc->csr_in); + writel(val, isp->base + ipc->csr_in); + + /* Set peer CSR bit IPC_PEER_COMP_ACTIONS_RST_PHASE1 */ + writel(ENTRY, isp->base + ipc->csr_out); + /* + * Clear-by-1 all CSR bits EXCEPT following + * bits: + * A. IPC_PEER_COMP_ACTIONS_RST_PHASE1. + * B. IPC_PEER_COMP_ACTIONS_RST_PHASE2. + * C. Possibly custom bits, depending on + * their role. + */ + csr_in_clr = BUTTRESS_IU2CSECSR_IPC_PEER_DEASSERTED_REG_VALID_REQ | + BUTTRESS_IU2CSECSR_IPC_PEER_ACKED_REG_VALID | + BUTTRESS_IU2CSECSR_IPC_PEER_ASSERTED_REG_VALID_REQ | QUERY; + + do { + usleep_range(400, 500); + val = readl(isp->base + ipc->csr_in); + switch (val) { + case ENTRY | EXIT: + case ENTRY | EXIT | QUERY: + /* + * 1) Clear-by-1 CSR bits + * (IPC_PEER_COMP_ACTIONS_RST_PHASE1, + * IPC_PEER_COMP_ACTIONS_RST_PHASE2). + * 2) Set peer CSR bit + * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE. + */ + writel(ENTRY | EXIT, isp->base + ipc->csr_in); + writel(QUERY, isp->base + ipc->csr_out); + break; + case ENTRY: + case ENTRY | QUERY: + /* + * 1) Clear-by-1 CSR bits + * (IPC_PEER_COMP_ACTIONS_RST_PHASE1, + * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE). + * 2) Set peer CSR bit + * IPC_PEER_COMP_ACTIONS_RST_PHASE1. + */ + writel(ENTRY | QUERY, isp->base + ipc->csr_in); + writel(ENTRY, isp->base + ipc->csr_out); + break; + case EXIT: + case EXIT | QUERY: + /* + * Clear-by-1 CSR bit + * IPC_PEER_COMP_ACTIONS_RST_PHASE2. + * 1) Clear incoming doorbell. + * 2) Clear-by-1 all CSR bits EXCEPT following + * bits: + * A. IPC_PEER_COMP_ACTIONS_RST_PHASE1. + * B. IPC_PEER_COMP_ACTIONS_RST_PHASE2. + * C. Possibly custom bits, depending on + * their role. + * 3) Set peer CSR bit + * IPC_PEER_COMP_ACTIONS_RST_PHASE2. + */ + writel(EXIT, isp->base + ipc->csr_in); + writel(0, isp->base + ipc->db0_in); + writel(csr_in_clr, isp->base + ipc->csr_in); + writel(EXIT, isp->base + ipc->csr_out); + + /* + * Read csr_in again to make sure if RST_PHASE2 is done. + * If csr_in is QUERY, it should be handled again. + */ + usleep_range(200, 300); + val = readl(isp->base + ipc->csr_in); + if (val & QUERY) { + dev_dbg(&isp->pdev->dev, + "RST_PHASE2 retry csr_in = %x\n", val); + break; + } + mutex_unlock(&b->ipc_mutex); + return 0; + case QUERY: + /* + * 1) Clear-by-1 CSR bit + * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE. + * 2) Set peer CSR bit + * IPC_PEER_COMP_ACTIONS_RST_PHASE1 + */ + writel(QUERY, isp->base + ipc->csr_in); + writel(ENTRY, isp->base + ipc->csr_out); + break; + default: + dev_dbg_ratelimited(&isp->pdev->dev, + "Unexpected CSR 0x%x\n", val); + break; + } + } while (retries--); + + mutex_unlock(&b->ipc_mutex); + dev_err(&isp->pdev->dev, "Timed out while waiting for CSE\n"); + + return -ETIMEDOUT; +} + +static void ipu6_buttress_ipc_validity_close(struct ipu6_device *isp, + struct ipu6_buttress_ipc *ipc) +{ + writel(BUTTRESS_IU2CSECSR_IPC_PEER_DEASSERTED_REG_VALID_REQ, + isp->base + ipc->csr_out); +} + +static int +ipu6_buttress_ipc_validity_open(struct ipu6_device *isp, + struct ipu6_buttress_ipc *ipc) +{ + unsigned int mask = BUTTRESS_IU2CSECSR_IPC_PEER_ACKED_REG_VALID; + void __iomem *addr; + int ret; + u32 val; + + writel(BUTTRESS_IU2CSECSR_IPC_PEER_ASSERTED_REG_VALID_REQ, + isp->base + ipc->csr_out); + + addr = isp->base + ipc->csr_in; + ret = readl_poll_timeout(addr, val, val & mask, 200, + BUTTRESS_IPC_VALIDITY_TIMEOUT_US); + if (ret) { + dev_err(&isp->pdev->dev, "CSE validity timeout 0x%x\n", val); + ipu6_buttress_ipc_validity_close(isp, ipc); + } + + return ret; +} + +static void ipu6_buttress_ipc_recv(struct ipu6_device *isp, + struct ipu6_buttress_ipc *ipc, u32 *ipc_msg) +{ + if (ipc_msg) + *ipc_msg = readl(isp->base + ipc->data0_in); + writel(0, isp->base + ipc->db0_in); +} + +static int ipu6_buttress_ipc_send_bulk(struct ipu6_device *isp, + enum ipu6_buttress_ipc_domain ipc_domain, + struct ipu6_ipc_buttress_bulk_msg *msgs, + u32 size) +{ + unsigned long tx_timeout_jiffies, rx_timeout_jiffies; + unsigned int i, retry = BUTTRESS_IPC_CMD_SEND_RETRY; + struct ipu6_buttress *b = &isp->buttress; + struct ipu6_buttress_ipc *ipc; + u32 val; + int ret; + int tout; + + ipc = ipc_domain == IPU6_BUTTRESS_IPC_CSE ? &b->cse : &b->ish; + + mutex_lock(&b->ipc_mutex); + + ret = ipu6_buttress_ipc_validity_open(isp, ipc); + if (ret) { + dev_err(&isp->pdev->dev, "IPC validity open failed\n"); + goto out; + } + + tx_timeout_jiffies = msecs_to_jiffies(BUTTRESS_IPC_TX_TIMEOUT_MS); + rx_timeout_jiffies = msecs_to_jiffies(BUTTRESS_IPC_RX_TIMEOUT_MS); + + for (i = 0; i < size; i++) { + reinit_completion(&ipc->send_complete); + if (msgs[i].require_resp) + reinit_completion(&ipc->recv_complete); + + dev_dbg(&isp->pdev->dev, "bulk IPC command: 0x%x\n", + msgs[i].cmd); + writel(msgs[i].cmd, isp->base + ipc->data0_out); + val = BUTTRESS_IU2CSEDB0_BUSY | msgs[i].cmd_size; + writel(val, isp->base + ipc->db0_out); + + tout = wait_for_completion_timeout(&ipc->send_complete, + tx_timeout_jiffies); + if (!tout) { + dev_err(&isp->pdev->dev, "send IPC response timeout\n"); + if (!retry--) { + ret = -ETIMEDOUT; + goto out; + } + + /* Try again if CSE is not responding on first try */ + writel(0, isp->base + ipc->db0_out); + i--; + continue; + } + + retry = BUTTRESS_IPC_CMD_SEND_RETRY; + + if (!msgs[i].require_resp) + continue; + + tout = wait_for_completion_timeout(&ipc->recv_complete, + rx_timeout_jiffies); + if (!tout) { + dev_err(&isp->pdev->dev, "recv IPC response timeout\n"); + ret = -ETIMEDOUT; + goto out; + } + + if (ipc->nack_mask && + (ipc->recv_data & ipc->nack_mask) == ipc->nack) { + dev_err(&isp->pdev->dev, + "IPC NACK for cmd 0x%x\n", msgs[i].cmd); + ret = -EIO; + goto out; + } + + if (ipc->recv_data != msgs[i].expected_resp) { + dev_err(&isp->pdev->dev, + "expected resp: 0x%x, IPC response: 0x%x ", + msgs[i].expected_resp, ipc->recv_data); + ret = -EIO; + goto out; + } + } + + dev_dbg(&isp->pdev->dev, "bulk IPC commands done\n"); + +out: + ipu6_buttress_ipc_validity_close(isp, ipc); + mutex_unlock(&b->ipc_mutex); + return ret; +} + +static int +ipu6_buttress_ipc_send(struct ipu6_device *isp, + enum ipu6_buttress_ipc_domain ipc_domain, + u32 ipc_msg, u32 size, bool require_resp, + u32 expected_resp) +{ + struct ipu6_ipc_buttress_bulk_msg msg = { + .cmd = ipc_msg, + .cmd_size = size, + .require_resp = require_resp, + .expected_resp = expected_resp, + }; + + return ipu6_buttress_ipc_send_bulk(isp, ipc_domain, &msg, 1); +} + +static irqreturn_t ipu6_buttress_call_isr(struct ipu6_bus_device *adev) +{ + irqreturn_t ret = IRQ_WAKE_THREAD; + + if (!adev || !adev->auxdrv || !adev->auxdrv_data) + return IRQ_NONE; + + if (adev->auxdrv_data->isr) + ret = adev->auxdrv_data->isr(adev); + + if (ret == IRQ_WAKE_THREAD && !adev->auxdrv_data->isr_threaded) + ret = IRQ_NONE; + + return ret; +} + +irqreturn_t ipu6_buttress_isr(int irq, void *isp_ptr) +{ + struct ipu6_device *isp = isp_ptr; + struct ipu6_bus_device *adev[] = { isp->isys, isp->psys }; + struct ipu6_buttress *b = &isp->buttress; + u32 reg_irq_sts = BUTTRESS_REG_ISR_STATUS; + irqreturn_t ret = IRQ_NONE; + u32 disable_irqs = 0; + u32 irq_status; + u32 i, count = 0; + int active; + + active = pm_runtime_get_if_active(&isp->pdev->dev); + if (!active) + return IRQ_NONE; + + irq_status = readl(isp->base + reg_irq_sts); + if (irq_status == 0 || WARN_ON_ONCE(irq_status == 0xffffffffu)) { + if (active > 0) + pm_runtime_put_noidle(&isp->pdev->dev); + return IRQ_NONE; + } + + do { + writel(irq_status, isp->base + BUTTRESS_REG_ISR_CLEAR); + + for (i = 0; i < ARRAY_SIZE(ipu6_adev_irq_mask); i++) { + irqreturn_t r = ipu6_buttress_call_isr(adev[i]); + + if (!(irq_status & ipu6_adev_irq_mask[i])) + continue; + + if (r == IRQ_WAKE_THREAD) { + ret = IRQ_WAKE_THREAD; + disable_irqs |= ipu6_adev_irq_mask[i]; + } else if (ret == IRQ_NONE && r == IRQ_HANDLED) { + ret = IRQ_HANDLED; + } + } + + if ((irq_status & BUTTRESS_EVENT) && ret == IRQ_NONE) + ret = IRQ_HANDLED; + + if (irq_status & BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING) { + dev_dbg(&isp->pdev->dev, + "BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING\n"); + ipu6_buttress_ipc_recv(isp, &b->cse, &b->cse.recv_data); + complete(&b->cse.recv_complete); + } + + if (irq_status & BUTTRESS_ISR_IPC_FROM_ISH_IS_WAITING) { + dev_dbg(&isp->pdev->dev, + "BUTTRESS_ISR_IPC_FROM_ISH_IS_WAITING\n"); + ipu6_buttress_ipc_recv(isp, &b->ish, &b->ish.recv_data); + complete(&b->ish.recv_complete); + } + + if (irq_status & BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE) { + dev_dbg(&isp->pdev->dev, + "BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE\n"); + complete(&b->cse.send_complete); + } + + if (irq_status & BUTTRESS_ISR_IPC_EXEC_DONE_BY_ISH) { + dev_dbg(&isp->pdev->dev, + "BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE\n"); + complete(&b->ish.send_complete); + } + + if (irq_status & BUTTRESS_ISR_SAI_VIOLATION && + ipu6_buttress_get_secure_mode(isp)) + dev_err(&isp->pdev->dev, + "BUTTRESS_ISR_SAI_VIOLATION\n"); + + if (irq_status & (BUTTRESS_ISR_IS_FATAL_MEM_ERR | + BUTTRESS_ISR_PS_FATAL_MEM_ERR)) + dev_err(&isp->pdev->dev, + "BUTTRESS_ISR_FATAL_MEM_ERR\n"); + + if (irq_status & BUTTRESS_ISR_UFI_ERROR) + dev_err(&isp->pdev->dev, "BUTTRESS_ISR_UFI_ERROR\n"); + + if (++count == BUTTRESS_MAX_CONSECUTIVE_IRQS) { + dev_err(&isp->pdev->dev, "too many consecutive IRQs\n"); + ret = IRQ_NONE; + break; + } + + irq_status = readl(isp->base + reg_irq_sts); + } while (irq_status); + + if (disable_irqs) + writel(BUTTRESS_IRQS & ~disable_irqs, + isp->base + BUTTRESS_REG_ISR_ENABLE); + + if (active > 0) + pm_runtime_put(&isp->pdev->dev); + + return ret; +} + +irqreturn_t ipu6_buttress_isr_threaded(int irq, void *isp_ptr) +{ + struct ipu6_device *isp = isp_ptr; + struct ipu6_bus_device *adev[] = { isp->isys, isp->psys }; + const struct ipu6_auxdrv_data *drv_data = NULL; + irqreturn_t ret = IRQ_NONE; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(ipu6_adev_irq_mask) && adev[i]; i++) { + drv_data = adev[i]->auxdrv_data; + if (!drv_data) + continue; + + if (drv_data->wake_isr_thread && + drv_data->isr_threaded(adev[i]) == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + + writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_ENABLE); + + return ret; +} + +int ipu6_buttress_power(struct device *dev, struct ipu6_buttress_ctrl *ctrl, + bool on) +{ + struct ipu6_device *isp = to_ipu6_bus_device(dev)->isp; + u32 pwr_sts, val; + int ret; + + if (!ctrl) + return 0; + + mutex_lock(&isp->buttress.power_mutex); + + if (!on) { + val = 0; + pwr_sts = ctrl->pwr_sts_off << ctrl->pwr_sts_shift; + } else { + val = BUTTRESS_FREQ_CTL_START | + FIELD_PREP(BUTTRESS_FREQ_CTL_RATIO_MASK, + ctrl->ratio) | + FIELD_PREP(BUTTRESS_FREQ_CTL_QOS_FLOOR_MASK, + ctrl->qos_floor) | + BUTTRESS_FREQ_CTL_ICCMAX_LEVEL; + + pwr_sts = ctrl->pwr_sts_on << ctrl->pwr_sts_shift; + } + + writel(val, isp->base + ctrl->freq_ctl); + + ret = readl_poll_timeout(isp->base + BUTTRESS_REG_PWR_STATE, + val, (val & ctrl->pwr_sts_mask) == pwr_sts, + 100, BUTTRESS_POWER_TIMEOUT_US); + if (ret) + dev_err(&isp->pdev->dev, + "Change power status timeout with 0x%x\n", val); + + ctrl->started = !ret && on; + + mutex_unlock(&isp->buttress.power_mutex); + + return ret; +} + +bool ipu6_buttress_get_secure_mode(struct ipu6_device *isp) +{ + u32 val; + + val = readl(isp->base + BUTTRESS_REG_SECURITY_CTL); + + return val & BUTTRESS_SECURITY_CTL_FW_SECURE_MODE; +} + +bool ipu6_buttress_auth_done(struct ipu6_device *isp) +{ + u32 val; + + if (!isp->secure_mode) + return true; + + val = readl(isp->base + BUTTRESS_REG_SECURITY_CTL); + val = FIELD_GET(BUTTRESS_SECURITY_CTL_FW_SETUP_MASK, val); + + return val == BUTTRESS_SECURITY_CTL_AUTH_DONE; +} +EXPORT_SYMBOL_NS_GPL(ipu6_buttress_auth_done, INTEL_IPU6); + +int ipu6_buttress_reset_authentication(struct ipu6_device *isp) +{ + int ret; + u32 val; + + if (!isp->secure_mode) { + dev_dbg(&isp->pdev->dev, "Skip auth for non-secure mode\n"); + return 0; + } + + writel(BUTTRESS_FW_RESET_CTL_START, isp->base + + BUTTRESS_REG_FW_RESET_CTL); + + ret = readl_poll_timeout(isp->base + BUTTRESS_REG_FW_RESET_CTL, val, + val & BUTTRESS_FW_RESET_CTL_DONE, 500, + BUTTRESS_CSE_FWRESET_TIMEOUT_US); + if (ret) { + dev_err(&isp->pdev->dev, + "Time out while resetting authentication state\n"); + return ret; + } + + dev_dbg(&isp->pdev->dev, "FW reset for authentication done\n"); + writel(0, isp->base + BUTTRESS_REG_FW_RESET_CTL); + /* leave some time for HW restore */ + usleep_range(800, 1000); + + return 0; +} + +int ipu6_buttress_map_fw_image(struct ipu6_bus_device *sys, + const struct firmware *fw, struct sg_table *sgt) +{ + bool is_vmalloc = is_vmalloc_addr(fw->data); + struct pci_dev *pdev = sys->isp->pdev; + struct page **pages; + const void *addr; + unsigned long n_pages; + unsigned int i; + int ret; + + if (!is_vmalloc && !virt_addr_valid(fw->data)) + return -EDOM; + + n_pages = PHYS_PFN(PAGE_ALIGN(fw->size)); + + pages = kmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL); + if (!pages) + return -ENOMEM; + + addr = fw->data; + for (i = 0; i < n_pages; i++) { + struct page *p = is_vmalloc ? + vmalloc_to_page(addr) : virt_to_page(addr); + + if (!p) { + ret = -ENOMEM; + goto out; + } + pages[i] = p; + addr += PAGE_SIZE; + } + + ret = sg_alloc_table_from_pages(sgt, pages, n_pages, 0, fw->size, + GFP_KERNEL); + if (ret) { + ret = -ENOMEM; + goto out; + } + + ret = dma_map_sgtable(&pdev->dev, sgt, DMA_TO_DEVICE, 0); + if (ret) { + sg_free_table(sgt); + goto out; + } + + ret = ipu6_dma_map_sgtable(sys, sgt, DMA_TO_DEVICE, 0); + if (ret) { + dma_unmap_sgtable(&pdev->dev, sgt, DMA_TO_DEVICE, 0); + sg_free_table(sgt); + goto out; + } + + ipu6_dma_sync_sgtable(sys, sgt); + +out: + kfree(pages); + + return ret; +} +EXPORT_SYMBOL_NS_GPL(ipu6_buttress_map_fw_image, INTEL_IPU6); + +void ipu6_buttress_unmap_fw_image(struct ipu6_bus_device *sys, + struct sg_table *sgt) +{ + struct pci_dev *pdev = sys->isp->pdev; + + ipu6_dma_unmap_sgtable(sys, sgt, DMA_TO_DEVICE, 0); + dma_unmap_sgtable(&pdev->dev, sgt, DMA_TO_DEVICE, 0); + sg_free_table(sgt); +} +EXPORT_SYMBOL_NS_GPL(ipu6_buttress_unmap_fw_image, INTEL_IPU6); + +int ipu6_buttress_authenticate(struct ipu6_device *isp) +{ + struct ipu6_buttress *b = &isp->buttress; + struct ipu6_psys_pdata *psys_pdata; + u32 data, mask, done, fail; + int ret; + + if (!isp->secure_mode) { + dev_dbg(&isp->pdev->dev, "Skip auth for non-secure mode\n"); + return 0; + } + + psys_pdata = isp->psys->pdata; + + mutex_lock(&b->auth_mutex); + + if (ipu6_buttress_auth_done(isp)) { + ret = 0; + goto out_unlock; + } + + /* + * Write address of FIT table to FW_SOURCE register + * Let's use fw address. I.e. not using FIT table yet + */ + data = lower_32_bits(isp->psys->pkg_dir_dma_addr); + writel(data, isp->base + BUTTRESS_REG_FW_SOURCE_BASE_LO); + + data = upper_32_bits(isp->psys->pkg_dir_dma_addr); + writel(data, isp->base + BUTTRESS_REG_FW_SOURCE_BASE_HI); + + /* + * Write boot_load into IU2CSEDATA0 + * Write sizeof(boot_load) | 0x2 << CLIENT_ID to + * IU2CSEDB.IU2CSECMD and set IU2CSEDB.IU2CSEBUSY as + */ + dev_info(&isp->pdev->dev, "Sending BOOT_LOAD to CSE\n"); + + ret = ipu6_buttress_ipc_send(isp, IPU6_BUTTRESS_IPC_CSE, + BUTTRESS_IU2CSEDATA0_IPC_BOOT_LOAD, + 1, true, + BUTTRESS_CSE2IUDATA0_IPC_BOOT_LOAD_DONE); + if (ret) { + dev_err(&isp->pdev->dev, "CSE boot_load failed\n"); + goto out_unlock; + } + + mask = BUTTRESS_SECURITY_CTL_FW_SETUP_MASK; + done = BUTTRESS_SECURITY_CTL_FW_SETUP_DONE; + fail = BUTTRESS_SECURITY_CTL_AUTH_FAILED; + ret = readl_poll_timeout(isp->base + BUTTRESS_REG_SECURITY_CTL, data, + ((data & mask) == done || + (data & mask) == fail), 500, + BUTTRESS_CSE_BOOTLOAD_TIMEOUT_US); + if (ret) { + dev_err(&isp->pdev->dev, "CSE boot_load timeout\n"); + goto out_unlock; + } + + if ((data & mask) == fail) { + dev_err(&isp->pdev->dev, "CSE auth failed\n"); + ret = -EINVAL; + goto out_unlock; + } + + ret = readl_poll_timeout(psys_pdata->base + BOOTLOADER_STATUS_OFFSET, + data, data == BOOTLOADER_MAGIC_KEY, 500, + BUTTRESS_CSE_BOOTLOAD_TIMEOUT_US); + if (ret) { + dev_err(&isp->pdev->dev, "Unexpected magic number 0x%x\n", + data); + goto out_unlock; + } + + /* + * Write authenticate_run into IU2CSEDATA0 + * Write sizeof(boot_load) | 0x2 << CLIENT_ID to + * IU2CSEDB.IU2CSECMD and set IU2CSEDB.IU2CSEBUSY as + */ + dev_info(&isp->pdev->dev, "Sending AUTHENTICATE_RUN to CSE\n"); + ret = ipu6_buttress_ipc_send(isp, IPU6_BUTTRESS_IPC_CSE, + BUTTRESS_IU2CSEDATA0_IPC_AUTH_RUN, + 1, true, + BUTTRESS_CSE2IUDATA0_IPC_AUTH_RUN_DONE); + if (ret) { + dev_err(&isp->pdev->dev, "CSE authenticate_run failed\n"); + goto out_unlock; + } + + done = BUTTRESS_SECURITY_CTL_AUTH_DONE; + ret = readl_poll_timeout(isp->base + BUTTRESS_REG_SECURITY_CTL, data, + ((data & mask) == done || + (data & mask) == fail), 500, + BUTTRESS_CSE_AUTHENTICATE_TIMEOUT_US); + if (ret) { + dev_err(&isp->pdev->dev, "CSE authenticate timeout\n"); + goto out_unlock; + } + + if ((data & mask) == fail) { + dev_err(&isp->pdev->dev, "CSE boot_load failed\n"); + ret = -EINVAL; + goto out_unlock; + } + + dev_info(&isp->pdev->dev, "CSE authenticate_run done\n"); + +out_unlock: + mutex_unlock(&b->auth_mutex); + + return ret; +} + +static int ipu6_buttress_send_tsc_request(struct ipu6_device *isp) +{ + u32 val, mask, done; + int ret; + + mask = BUTTRESS_PWR_STATE_HH_STATUS_MASK; + + writel(BUTTRESS_FABRIC_CMD_START_TSC_SYNC, + isp->base + BUTTRESS_REG_FABRIC_CMD); + + val = readl(isp->base + BUTTRESS_REG_PWR_STATE); + val = FIELD_GET(mask, val); + if (val == BUTTRESS_PWR_STATE_HH_STATE_ERR) { + dev_err(&isp->pdev->dev, "Start tsc sync failed\n"); + return -EINVAL; + } + + done = BUTTRESS_PWR_STATE_HH_STATE_DONE; + ret = readl_poll_timeout(isp->base + BUTTRESS_REG_PWR_STATE, val, + FIELD_GET(mask, val) == done, 500, + BUTTRESS_TSC_SYNC_TIMEOUT_US); + if (ret) + dev_err(&isp->pdev->dev, "Start tsc sync timeout\n"); + + return ret; +} + +int ipu6_buttress_start_tsc_sync(struct ipu6_device *isp) +{ + unsigned int i; + + for (i = 0; i < BUTTRESS_TSC_SYNC_RESET_TRIAL_MAX; i++) { + u32 val; + int ret; + + ret = ipu6_buttress_send_tsc_request(isp); + if (ret != -ETIMEDOUT) + return ret; + + val = readl(isp->base + BUTTRESS_REG_TSW_CTL); + val = val | BUTTRESS_TSW_CTL_SOFT_RESET; + writel(val, isp->base + BUTTRESS_REG_TSW_CTL); + val = val & ~BUTTRESS_TSW_CTL_SOFT_RESET; + writel(val, isp->base + BUTTRESS_REG_TSW_CTL); + } + + dev_err(&isp->pdev->dev, "TSC sync failed (timeout)\n"); + + return -ETIMEDOUT; +} +EXPORT_SYMBOL_NS_GPL(ipu6_buttress_start_tsc_sync, INTEL_IPU6); + +void ipu6_buttress_tsc_read(struct ipu6_device *isp, u64 *val) +{ + u32 tsc_hi_1, tsc_hi_2, tsc_lo; + unsigned long flags; + + local_irq_save(flags); + tsc_hi_1 = readl(isp->base + BUTTRESS_REG_TSC_HI); + tsc_lo = readl(isp->base + BUTTRESS_REG_TSC_LO); + tsc_hi_2 = readl(isp->base + BUTTRESS_REG_TSC_HI); + if (tsc_hi_1 == tsc_hi_2) { + *val = (u64)tsc_hi_1 << 32 | tsc_lo; + } else { + /* Check if TSC has rolled over */ + if (tsc_lo & BIT(31)) + *val = (u64)tsc_hi_1 << 32 | tsc_lo; + else + *val = (u64)tsc_hi_2 << 32 | tsc_lo; + } + local_irq_restore(flags); +} +EXPORT_SYMBOL_NS_GPL(ipu6_buttress_tsc_read, INTEL_IPU6); + +u64 ipu6_buttress_tsc_ticks_to_ns(u64 ticks, const struct ipu6_device *isp) +{ + u64 ns = ticks * 10000; + + /* + * converting TSC tick count to ns is calculated by: + * Example (TSC clock frequency is 19.2MHz): + * ns = ticks * 1000 000 000 / 19.2Mhz + * = ticks * 1000 000 000 / 19200000Hz + * = ticks * 10000 / 192 ns + */ + return div_u64(ns, isp->buttress.ref_clk); +} +EXPORT_SYMBOL_NS_GPL(ipu6_buttress_tsc_ticks_to_ns, INTEL_IPU6); + +void ipu6_buttress_restore(struct ipu6_device *isp) +{ + struct ipu6_buttress *b = &isp->buttress; + + writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_CLEAR); + writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_ENABLE); + writel(b->wdt_cached_value, isp->base + BUTTRESS_REG_WDT); +} + +int ipu6_buttress_init(struct ipu6_device *isp) +{ + int ret, ipc_reset_retry = BUTTRESS_CSE_IPC_RESET_RETRY; + struct ipu6_buttress *b = &isp->buttress; + u32 val; + + mutex_init(&b->power_mutex); + mutex_init(&b->auth_mutex); + mutex_init(&b->cons_mutex); + mutex_init(&b->ipc_mutex); + init_completion(&b->ish.send_complete); + init_completion(&b->cse.send_complete); + init_completion(&b->ish.recv_complete); + init_completion(&b->cse.recv_complete); + + b->cse.nack = BUTTRESS_CSE2IUDATA0_IPC_NACK; + b->cse.nack_mask = BUTTRESS_CSE2IUDATA0_IPC_NACK_MASK; + b->cse.csr_in = BUTTRESS_REG_CSE2IUCSR; + b->cse.csr_out = BUTTRESS_REG_IU2CSECSR; + b->cse.db0_in = BUTTRESS_REG_CSE2IUDB0; + b->cse.db0_out = BUTTRESS_REG_IU2CSEDB0; + b->cse.data0_in = BUTTRESS_REG_CSE2IUDATA0; + b->cse.data0_out = BUTTRESS_REG_IU2CSEDATA0; + + /* no ISH on IPU6 */ + memset(&b->ish, 0, sizeof(b->ish)); + INIT_LIST_HEAD(&b->constraints); + + isp->secure_mode = ipu6_buttress_get_secure_mode(isp); + dev_info(&isp->pdev->dev, "IPU6 in %s mode touch 0x%x mask 0x%x\n", + isp->secure_mode ? "secure" : "non-secure", + readl(isp->base + BUTTRESS_REG_SECURITY_TOUCH), + readl(isp->base + BUTTRESS_REG_CAMERA_MASK)); + + b->wdt_cached_value = readl(isp->base + BUTTRESS_REG_WDT); + writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_CLEAR); + writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_ENABLE); + + /* get ref_clk frequency by reading the indication in btrs control */ + val = readl(isp->base + BUTTRESS_REG_BTRS_CTRL); + val = FIELD_GET(BUTTRESS_REG_BTRS_CTRL_REF_CLK_IND, val); + + switch (val) { + case 0x0: + b->ref_clk = 240; + break; + case 0x1: + b->ref_clk = 192; + break; + case 0x2: + b->ref_clk = 384; + break; + default: + dev_warn(&isp->pdev->dev, + "Unsupported ref clock, use 19.2Mhz by default.\n"); + b->ref_clk = 192; + break; + } + + /* Retry couple of times in case of CSE initialization is delayed */ + do { + ret = ipu6_buttress_ipc_reset(isp, &b->cse); + if (ret) { + dev_warn(&isp->pdev->dev, + "IPC reset protocol failed, retrying\n"); + } else { + dev_dbg(&isp->pdev->dev, "IPC reset done\n"); + return 0; + } + } while (ipc_reset_retry--); + + dev_err(&isp->pdev->dev, "IPC reset protocol failed\n"); + + mutex_destroy(&b->power_mutex); + mutex_destroy(&b->auth_mutex); + mutex_destroy(&b->cons_mutex); + mutex_destroy(&b->ipc_mutex); + + return ret; +} + +void ipu6_buttress_exit(struct ipu6_device *isp) +{ + struct ipu6_buttress *b = &isp->buttress; + + writel(0, isp->base + BUTTRESS_REG_ISR_ENABLE); + + mutex_destroy(&b->power_mutex); + mutex_destroy(&b->auth_mutex); + mutex_destroy(&b->cons_mutex); + mutex_destroy(&b->ipc_mutex); +} diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h new file mode 100644 index 0000000..9b6f569 --- /dev/null +++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2013--2024 Intel Corporation */ + +#ifndef IPU6_BUTTRESS_H +#define IPU6_BUTTRESS_H + +#include +#include +#include +#include + +struct device; +struct firmware; +struct ipu6_device; +struct ipu6_bus_device; + +#define BUTTRESS_PS_FREQ_STEP 25U +#define BUTTRESS_MIN_FORCE_PS_FREQ (BUTTRESS_PS_FREQ_STEP * 8) +#define BUTTRESS_MAX_FORCE_PS_FREQ (BUTTRESS_PS_FREQ_STEP * 32) + +#define BUTTRESS_IS_FREQ_STEP 25U +#define BUTTRESS_MIN_FORCE_IS_FREQ (BUTTRESS_IS_FREQ_STEP * 8) +#define BUTTRESS_MAX_FORCE_IS_FREQ (BUTTRESS_IS_FREQ_STEP * 22) + +struct ipu6_buttress_ctrl { + u32 freq_ctl, pwr_sts_shift, pwr_sts_mask, pwr_sts_on, pwr_sts_off; + unsigned int ratio; + unsigned int qos_floor; + bool started; +}; + +struct ipu6_buttress_ipc { + struct completion send_complete; + struct completion recv_complete; + u32 nack; + u32 nack_mask; + u32 recv_data; + u32 csr_out; + u32 csr_in; + u32 db0_in; + u32 db0_out; + u32 data0_out; + u32 data0_in; +}; + +struct ipu6_buttress { + struct mutex power_mutex, auth_mutex, cons_mutex, ipc_mutex; + struct ipu6_buttress_ipc cse; + struct ipu6_buttress_ipc ish; + struct list_head constraints; + u32 wdt_cached_value; + bool force_suspend; + u32 ref_clk; +}; + +enum ipu6_buttress_ipc_domain { + IPU6_BUTTRESS_IPC_CSE, + IPU6_BUTTRESS_IPC_ISH, +}; + +struct ipu6_ipc_buttress_bulk_msg { + u32 cmd; + u32 expected_resp; + bool require_resp; + u8 cmd_size; +}; + +int ipu6_buttress_ipc_reset(struct ipu6_device *isp, + struct ipu6_buttress_ipc *ipc); +int ipu6_buttress_map_fw_image(struct ipu6_bus_device *sys, + const struct firmware *fw, + struct sg_table *sgt); +void ipu6_buttress_unmap_fw_image(struct ipu6_bus_device *sys, + struct sg_table *sgt); +int ipu6_buttress_power(struct device *dev, struct ipu6_buttress_ctrl *ctrl, + bool on); +bool ipu6_buttress_get_secure_mode(struct ipu6_device *isp); +int ipu6_buttress_authenticate(struct ipu6_device *isp); +int ipu6_buttress_reset_authentication(struct ipu6_device *isp); +bool ipu6_buttress_auth_done(struct ipu6_device *isp); +int ipu6_buttress_start_tsc_sync(struct ipu6_device *isp); +void ipu6_buttress_tsc_read(struct ipu6_device *isp, u64 *val); +u64 ipu6_buttress_tsc_ticks_to_ns(u64 ticks, const struct ipu6_device *isp); + +irqreturn_t ipu6_buttress_isr(int irq, void *isp_ptr); +irqreturn_t ipu6_buttress_isr_threaded(int irq, void *isp_ptr); +int ipu6_buttress_init(struct ipu6_device *isp); +void ipu6_buttress_exit(struct ipu6_device *isp); +void ipu6_buttress_csi_port_config(struct ipu6_device *isp, + u32 legacy, u32 combo); +void ipu6_buttress_restore(struct ipu6_device *isp); +#endif /* IPU6_BUTTRESS_H */ diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-cpd.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-cpd.c new file mode 100644 index 0000000..21c1c12 --- /dev/null +++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-cpd.c @@ -0,0 +1,362 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2013--2024 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ipu6.h" +#include "ipu6-bus.h" +#include "ipu6-cpd.h" +#include "ipu6-dma.h" + +/* 15 entries + header*/ +#define MAX_PKG_DIR_ENT_CNT 16 +/* 2 qword per entry/header */ +#define PKG_DIR_ENT_LEN 2 +/* PKG_DIR size in bytes */ +#define PKG_DIR_SIZE ((MAX_PKG_DIR_ENT_CNT) * \ + (PKG_DIR_ENT_LEN) * sizeof(u64)) +/* _IUPKDR_ */ +#define PKG_DIR_HDR_MARK 0x5f4955504b44525fULL + +/* $CPD */ +#define CPD_HDR_MARK 0x44504324 + +#define MAX_MANIFEST_SIZE (SZ_2K * sizeof(u32)) +#define MAX_METADATA_SIZE SZ_64K + +#define MAX_COMPONENT_ID 127 +#define MAX_COMPONENT_VERSION 0xffff + +#define MANIFEST_IDX 0 +#define METADATA_IDX 1 +#define MODULEDATA_IDX 2 +/* + * PKG_DIR Entry (type == id) + * 63:56 55 54:48 47:32 31:24 23:0 + * Rsvd Rsvd Type Version Rsvd Size + */ +#define PKG_DIR_SIZE_MASK GENMASK(23, 0) +#define PKG_DIR_VERSION_MASK GENMASK(47, 32) +#define PKG_DIR_TYPE_MASK GENMASK(54, 48) + +static inline const struct ipu6_cpd_ent *ipu6_cpd_get_entry(const void *cpd, + u8 idx) +{ + const struct ipu6_cpd_hdr *cpd_hdr = cpd; + const struct ipu6_cpd_ent *ent; + + ent = (const struct ipu6_cpd_ent *)((const u8 *)cpd + cpd_hdr->hdr_len); + return ent + idx; +} + +#define ipu6_cpd_get_manifest(cpd) ipu6_cpd_get_entry(cpd, MANIFEST_IDX) +#define ipu6_cpd_get_metadata(cpd) ipu6_cpd_get_entry(cpd, METADATA_IDX) +#define ipu6_cpd_get_moduledata(cpd) ipu6_cpd_get_entry(cpd, MODULEDATA_IDX) + +static const struct ipu6_cpd_metadata_cmpnt_hdr * +ipu6_cpd_metadata_get_cmpnt(struct ipu6_device *isp, const void *metadata, + unsigned int metadata_size, u8 idx) +{ + size_t extn_size = sizeof(struct ipu6_cpd_metadata_extn); + size_t cmpnt_count = metadata_size - extn_size; + + cmpnt_count = div_u64(cmpnt_count, isp->cpd_metadata_cmpnt_size); + + if (idx > MAX_COMPONENT_ID || idx >= cmpnt_count) { + dev_err(&isp->pdev->dev, "Component index out of range (%d)\n", + idx); + return ERR_PTR(-EINVAL); + } + + return metadata + extn_size + idx * isp->cpd_metadata_cmpnt_size; +} + +static u32 ipu6_cpd_metadata_cmpnt_version(struct ipu6_device *isp, + const void *metadata, + unsigned int metadata_size, u8 idx) +{ + const struct ipu6_cpd_metadata_cmpnt_hdr *cmpnt; + + cmpnt = ipu6_cpd_metadata_get_cmpnt(isp, metadata, metadata_size, idx); + if (IS_ERR(cmpnt)) + return PTR_ERR(cmpnt); + + return cmpnt->ver; +} + +static int ipu6_cpd_metadata_get_cmpnt_id(struct ipu6_device *isp, + const void *metadata, + unsigned int metadata_size, u8 idx) +{ + const struct ipu6_cpd_metadata_cmpnt_hdr *cmpnt; + + cmpnt = ipu6_cpd_metadata_get_cmpnt(isp, metadata, metadata_size, idx); + if (IS_ERR(cmpnt)) + return PTR_ERR(cmpnt); + + return cmpnt->id; +} + +static int ipu6_cpd_parse_module_data(struct ipu6_device *isp, + const void *module_data, + unsigned int module_data_size, + dma_addr_t dma_addr_module_data, + u64 *pkg_dir, const void *metadata, + unsigned int metadata_size) +{ + const struct ipu6_cpd_module_data_hdr *module_data_hdr; + const struct ipu6_cpd_hdr *dir_hdr; + const struct ipu6_cpd_ent *dir_ent; + unsigned int i; + u8 len; + + if (!module_data) + return -EINVAL; + + module_data_hdr = module_data; + dir_hdr = module_data + module_data_hdr->hdr_len; + len = dir_hdr->hdr_len; + dir_ent = (const struct ipu6_cpd_ent *)(((u8 *)dir_hdr) + len); + + pkg_dir[0] = PKG_DIR_HDR_MARK; + /* pkg_dir entry count = component count + pkg_dir header */ + pkg_dir[1] = dir_hdr->ent_cnt + 1; + + for (i = 0; i < dir_hdr->ent_cnt; i++, dir_ent++) { + u64 *p = &pkg_dir[PKG_DIR_ENT_LEN * (1 + i)]; + int ver, id; + + *p++ = dma_addr_module_data + dir_ent->offset; + id = ipu6_cpd_metadata_get_cmpnt_id(isp, metadata, + metadata_size, i); + if (id < 0 || id > MAX_COMPONENT_ID) { + dev_err(&isp->pdev->dev, "Invalid CPD component id\n"); + return -EINVAL; + } + + ver = ipu6_cpd_metadata_cmpnt_version(isp, metadata, + metadata_size, i); + if (ver < 0 || ver > MAX_COMPONENT_VERSION) { + dev_err(&isp->pdev->dev, + "Invalid CPD component version\n"); + return -EINVAL; + } + + *p = FIELD_PREP(PKG_DIR_SIZE_MASK, dir_ent->len) | + FIELD_PREP(PKG_DIR_TYPE_MASK, id) | + FIELD_PREP(PKG_DIR_VERSION_MASK, ver); + } + + return 0; +} + +int ipu6_cpd_create_pkg_dir(struct ipu6_bus_device *adev, const void *src) +{ + dma_addr_t dma_addr_src = sg_dma_address(adev->fw_sgt.sgl); + const struct ipu6_cpd_ent *ent, *man_ent, *met_ent; + struct ipu6_device *isp = adev->isp; + unsigned int man_sz, met_sz; + void *pkg_dir_pos; + int ret; + + man_ent = ipu6_cpd_get_manifest(src); + man_sz = man_ent->len; + + met_ent = ipu6_cpd_get_metadata(src); + met_sz = met_ent->len; + + adev->pkg_dir_size = PKG_DIR_SIZE + man_sz + met_sz; + adev->pkg_dir = ipu6_dma_alloc(adev, adev->pkg_dir_size, + &adev->pkg_dir_dma_addr, GFP_KERNEL, 0); + if (!adev->pkg_dir) + return -ENOMEM; + + /* + * pkg_dir entry/header: + * qword | 63:56 | 55 | 54:48 | 47:32 | 31:24 | 23:0 + * N Address/Offset/"_IUPKDR_" + * N + 1 | rsvd | rsvd | type | ver | rsvd | size + * + * We can ignore other fields that size in N + 1 qword as they + * are 0 anyway. Just setting size for now. + */ + + ent = ipu6_cpd_get_moduledata(src); + + ret = ipu6_cpd_parse_module_data(isp, src + ent->offset, + ent->len, dma_addr_src + ent->offset, + adev->pkg_dir, src + met_ent->offset, + met_ent->len); + if (ret) { + dev_err(&isp->pdev->dev, "Failed to parse module data\n"); + ipu6_dma_free(adev, adev->pkg_dir_size, + adev->pkg_dir, adev->pkg_dir_dma_addr, 0); + return ret; + } + + /* Copy manifest after pkg_dir */ + pkg_dir_pos = adev->pkg_dir + PKG_DIR_ENT_LEN * MAX_PKG_DIR_ENT_CNT; + memcpy(pkg_dir_pos, src + man_ent->offset, man_sz); + + /* Copy metadata after manifest */ + pkg_dir_pos += man_sz; + memcpy(pkg_dir_pos, src + met_ent->offset, met_sz); + + ipu6_dma_sync_single(adev, adev->pkg_dir_dma_addr, + adev->pkg_dir_size); + + return 0; +} +EXPORT_SYMBOL_NS_GPL(ipu6_cpd_create_pkg_dir, INTEL_IPU6); + +void ipu6_cpd_free_pkg_dir(struct ipu6_bus_device *adev) +{ + ipu6_dma_free(adev, adev->pkg_dir_size, adev->pkg_dir, + adev->pkg_dir_dma_addr, 0); +} +EXPORT_SYMBOL_NS_GPL(ipu6_cpd_free_pkg_dir, INTEL_IPU6); + +static int ipu6_cpd_validate_cpd(struct ipu6_device *isp, const void *cpd, + unsigned long cpd_size, + unsigned long data_size) +{ + const struct ipu6_cpd_hdr *cpd_hdr = cpd; + const struct ipu6_cpd_ent *ent; + unsigned int i; + u8 len; + + len = cpd_hdr->hdr_len; + + /* Ensure cpd hdr is within moduledata */ + if (cpd_size < len) { + dev_err(&isp->pdev->dev, "Invalid CPD moduledata size\n"); + return -EINVAL; + } + + /* Sanity check for CPD header */ + if ((cpd_size - len) / sizeof(*ent) < cpd_hdr->ent_cnt) { + dev_err(&isp->pdev->dev, "Invalid CPD header\n"); + return -EINVAL; + } + + /* Ensure that all entries are within moduledata */ + ent = (const struct ipu6_cpd_ent *)(((const u8 *)cpd_hdr) + len); + for (i = 0; i < cpd_hdr->ent_cnt; i++, ent++) { + if (data_size < ent->offset || + data_size - ent->offset < ent->len) { + dev_err(&isp->pdev->dev, "Invalid CPD entry (%d)\n", i); + return -EINVAL; + } + } + + return 0; +} + +static int ipu6_cpd_validate_moduledata(struct ipu6_device *isp, + const void *moduledata, + u32 moduledata_size) +{ + const struct ipu6_cpd_module_data_hdr *mod_hdr = moduledata; + int ret; + + /* Ensure moduledata hdr is within moduledata */ + if (moduledata_size < sizeof(*mod_hdr) || + moduledata_size < mod_hdr->hdr_len) { + dev_err(&isp->pdev->dev, "Invalid CPD moduledata size\n"); + return -EINVAL; + } + + dev_info(&isp->pdev->dev, "FW version: %x\n", mod_hdr->fw_pkg_date); + ret = ipu6_cpd_validate_cpd(isp, moduledata + mod_hdr->hdr_len, + moduledata_size - mod_hdr->hdr_len, + moduledata_size); + if (ret) { + dev_err(&isp->pdev->dev, "Invalid CPD in moduledata\n"); + return ret; + } + + return 0; +} + +static int ipu6_cpd_validate_metadata(struct ipu6_device *isp, + const void *metadata, u32 meta_size) +{ + const struct ipu6_cpd_metadata_extn *extn = metadata; + + /* Sanity check for metadata size */ + if (meta_size < sizeof(*extn) || meta_size > MAX_METADATA_SIZE) { + dev_err(&isp->pdev->dev, "Invalid CPD metadata\n"); + return -EINVAL; + } + + /* Validate extension and image types */ + if (extn->extn_type != IPU6_CPD_METADATA_EXTN_TYPE_IUNIT || + extn->img_type != IPU6_CPD_METADATA_IMAGE_TYPE_MAIN_FIRMWARE) { + dev_err(&isp->pdev->dev, + "Invalid CPD metadata descriptor img_type (%d)\n", + extn->img_type); + return -EINVAL; + } + + /* Validate metadata size multiple of metadata components */ + if ((meta_size - sizeof(*extn)) % isp->cpd_metadata_cmpnt_size) { + dev_err(&isp->pdev->dev, "Invalid CPD metadata size\n"); + return -EINVAL; + } + + return 0; +} + +int ipu6_cpd_validate_cpd_file(struct ipu6_device *isp, const void *cpd_file, + unsigned long cpd_file_size) +{ + const struct ipu6_cpd_hdr *hdr = cpd_file; + const struct ipu6_cpd_ent *ent; + int ret; + + ret = ipu6_cpd_validate_cpd(isp, cpd_file, cpd_file_size, + cpd_file_size); + if (ret) { + dev_err(&isp->pdev->dev, "Invalid CPD in file\n"); + return ret; + } + + /* Check for CPD file marker */ + if (hdr->hdr_mark != CPD_HDR_MARK) { + dev_err(&isp->pdev->dev, "Invalid CPD header\n"); + return -EINVAL; + } + + /* Sanity check for manifest size */ + ent = ipu6_cpd_get_manifest(cpd_file); + if (ent->len > MAX_MANIFEST_SIZE) { + dev_err(&isp->pdev->dev, "Invalid CPD manifest size\n"); + return -EINVAL; + } + + /* Validate metadata */ + ent = ipu6_cpd_get_metadata(cpd_file); + ret = ipu6_cpd_validate_metadata(isp, cpd_file + ent->offset, ent->len); + if (ret) { + dev_err(&isp->pdev->dev, "Invalid CPD metadata\n"); + return ret; + } + + /* Validate moduledata */ + ent = ipu6_cpd_get_moduledata(cpd_file); + ret = ipu6_cpd_validate_moduledata(isp, cpd_file + ent->offset, + ent->len); + if (ret) + dev_err(&isp->pdev->dev, "Invalid CPD moduledata\n"); + + return ret; +} diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-cpd.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-cpd.h new file mode 100644 index 0000000..e0e4fde --- /dev/null +++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-cpd.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2015--2024 Intel Corporation */ + +#ifndef IPU6_CPD_H +#define IPU6_CPD_H + +struct ipu6_device; +struct ipu6_bus_device; + +#define IPU6_CPD_SIZE_OF_FW_ARCH_VERSION 7 +#define IPU6_CPD_SIZE_OF_SYSTEM_VERSION 11 +#define IPU6_CPD_SIZE_OF_COMPONENT_NAME 12 + +#define IPU6_CPD_METADATA_EXTN_TYPE_IUNIT 0x10 + +#define IPU6_CPD_METADATA_IMAGE_TYPE_RESERVED 0 +#define IPU6_CPD_METADATA_IMAGE_TYPE_BOOTLOADER 1 +#define IPU6_CPD_METADATA_IMAGE_TYPE_MAIN_FIRMWARE 2 + +#define IPU6_CPD_PKG_DIR_PSYS_SERVER_IDX 0 +#define IPU6_CPD_PKG_DIR_ISYS_SERVER_IDX 1 + +#define IPU6_CPD_PKG_DIR_CLIENT_PG_TYPE 3 + +#define IPU6_CPD_METADATA_HASH_KEY_SIZE 48 +#define IPU6SE_CPD_METADATA_HASH_KEY_SIZE 32 + +struct ipu6_cpd_module_data_hdr { + u32 hdr_len; + u32 endian; + u32 fw_pkg_date; + u32 hive_sdk_date; + u32 compiler_date; + u32 target_platform_type; + u8 sys_ver[IPU6_CPD_SIZE_OF_SYSTEM_VERSION]; + u8 fw_arch_ver[IPU6_CPD_SIZE_OF_FW_ARCH_VERSION]; + u8 rsvd[2]; +} __packed; + +/* + * ipu6_cpd_hdr structure updated as the chksum and + * sub_partition_name is unused on host side + * CSE layout version 1.6 for IPU6SE (hdr_len = 0x10) + * CSE layout version 1.7 for IPU6 (hdr_len = 0x14) + */ +struct ipu6_cpd_hdr { + u32 hdr_mark; + u32 ent_cnt; + u8 hdr_ver; + u8 ent_ver; + u8 hdr_len; +} __packed; + +struct ipu6_cpd_ent { + u8 name[IPU6_CPD_SIZE_OF_COMPONENT_NAME]; + u32 offset; + u32 len; + u8 rsvd[4]; +} __packed; + +struct ipu6_cpd_metadata_cmpnt_hdr { + u32 id; + u32 size; + u32 ver; +} __packed; + +struct ipu6_cpd_metadata_cmpnt { + struct ipu6_cpd_metadata_cmpnt_hdr hdr; + u8 sha2_hash[IPU6_CPD_METADATA_HASH_KEY_SIZE]; + u32 entry_point; + u32 icache_base_offs; + u8 attrs[16]; +} __packed; + +struct ipu6se_cpd_metadata_cmpnt { + struct ipu6_cpd_metadata_cmpnt_hdr hdr; + u8 sha2_hash[IPU6SE_CPD_METADATA_HASH_KEY_SIZE]; + u32 entry_point; + u32 icache_base_offs; + u8 attrs[16]; +} __packed; + +struct ipu6_cpd_metadata_extn { + u32 extn_type; + u32 len; + u32 img_type; + u8 rsvd[16]; +} __packed; + +struct ipu6_cpd_client_pkg_hdr { + u32 prog_list_offs; + u32 prog_list_size; + u32 prog_desc_offs; + u32 prog_desc_size; + u32 pg_manifest_offs; + u32 pg_manifest_size; + u32 prog_bin_offs; + u32 prog_bin_size; +} __packed; + +int ipu6_cpd_create_pkg_dir(struct ipu6_bus_device *adev, const void *src); +void ipu6_cpd_free_pkg_dir(struct ipu6_bus_device *adev); +int ipu6_cpd_validate_cpd_file(struct ipu6_device *isp, const void *cpd_file, + unsigned long cpd_file_size); +#endif /* IPU6_CPD_H */ diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-dma.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-dma.c new file mode 100644 index 0000000..b71f66b --- /dev/null +++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-dma.c @@ -0,0 +1,492 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2013--2024 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ipu6.h" +#include "ipu6-bus.h" +#include "ipu6-dma.h" +#include "ipu6-mmu.h" + +struct vm_info { + struct list_head list; + struct page **pages; + dma_addr_t ipu6_iova; + void *vaddr; + unsigned long size; +}; + +static struct vm_info *get_vm_info(struct ipu6_mmu *mmu, dma_addr_t iova) +{ + struct vm_info *info, *save; + + list_for_each_entry_safe(info, save, &mmu->vma_list, list) { + if (iova >= info->ipu6_iova && + iova < (info->ipu6_iova + info->size)) + return info; + } + + return NULL; +} + +static void __clear_buffer(struct page *page, size_t size, unsigned long attrs) +{ + void *ptr; + + if (!page) + return; + /* + * Ensure that the allocated pages are zeroed, and that any data + * lurking in the kernel direct-mapped region is invalidated. + */ + ptr = page_address(page); + memset(ptr, 0, size); + if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) + clflush_cache_range(ptr, size); +} + +static struct page **__alloc_buffer(size_t size, gfp_t gfp, unsigned long attrs) +{ + int count = PHYS_PFN(size); + int array_size = count * sizeof(struct page *); + struct page **pages; + int i = 0; + + pages = kvzalloc(array_size, GFP_KERNEL); + if (!pages) + return NULL; + + gfp |= __GFP_NOWARN; + + while (count) { + int j, order = __fls(count); + + pages[i] = alloc_pages(gfp, order); + while (!pages[i] && order) + pages[i] = alloc_pages(gfp, --order); + if (!pages[i]) + goto error; + + if (order) { + split_page(pages[i], order); + j = 1 << order; + while (j--) + pages[i + j] = pages[i] + j; + } + + __clear_buffer(pages[i], PAGE_SIZE << order, attrs); + i += 1 << order; + count -= 1 << order; + } + + return pages; +error: + while (i--) + if (pages[i]) + __free_pages(pages[i], 0); + kvfree(pages); + return NULL; +} + +static void __free_buffer(struct page **pages, size_t size, unsigned long attrs) +{ + int count = PHYS_PFN(size); + unsigned int i; + + for (i = 0; i < count && pages[i]; i++) { + __clear_buffer(pages[i], PAGE_SIZE, attrs); + __free_pages(pages[i], 0); + } + + kvfree(pages); +} + +void ipu6_dma_sync_single(struct ipu6_bus_device *sys, dma_addr_t dma_handle, + size_t size) +{ + void *vaddr; + u32 offset; + struct vm_info *info; + struct ipu6_mmu *mmu = sys->mmu; + + info = get_vm_info(mmu, dma_handle); + if (WARN_ON(!info)) + return; + + offset = dma_handle - info->ipu6_iova; + if (WARN_ON(size > (info->size - offset))) + return; + + vaddr = info->vaddr + offset; + clflush_cache_range(vaddr, size); +} +EXPORT_SYMBOL_NS_GPL(ipu6_dma_sync_single, INTEL_IPU6); + +void ipu6_dma_sync_sg(struct ipu6_bus_device *sys, struct scatterlist *sglist, + int nents) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sglist, sg, nents, i) + clflush_cache_range(page_to_virt(sg_page(sg)), sg->length); +} +EXPORT_SYMBOL_NS_GPL(ipu6_dma_sync_sg, INTEL_IPU6); + +void ipu6_dma_sync_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt) +{ + ipu6_dma_sync_sg(sys, sgt->sgl, sgt->orig_nents); +} +EXPORT_SYMBOL_NS_GPL(ipu6_dma_sync_sgtable, INTEL_IPU6); + +void *ipu6_dma_alloc(struct ipu6_bus_device *sys, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, + unsigned long attrs) +{ + struct device *dev = &sys->auxdev.dev; + struct pci_dev *pdev = sys->isp->pdev; + dma_addr_t pci_dma_addr, ipu6_iova; + struct ipu6_mmu *mmu = sys->mmu; + struct vm_info *info; + unsigned long count; + struct page **pages; + struct iova *iova; + unsigned int i; + int ret; + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return NULL; + + size = PAGE_ALIGN(size); + count = PHYS_PFN(size); + + iova = alloc_iova(&mmu->dmap->iovad, count, + PHYS_PFN(dma_get_mask(dev)), 0); + if (!iova) + goto out_kfree; + + pages = __alloc_buffer(size, gfp, attrs); + if (!pages) + goto out_free_iova; + + dev_dbg(dev, "dma_alloc: size %zu iova low pfn %lu, high pfn %lu\n", + size, iova->pfn_lo, iova->pfn_hi); + for (i = 0; iova->pfn_lo + i <= iova->pfn_hi; i++) { + pci_dma_addr = dma_map_page_attrs(&pdev->dev, pages[i], 0, + PAGE_SIZE, DMA_BIDIRECTIONAL, + attrs); + dev_dbg(dev, "dma_alloc: mapped pci_dma_addr %pad\n", + &pci_dma_addr); + if (dma_mapping_error(&pdev->dev, pci_dma_addr)) { + dev_err(dev, "pci_dma_mapping for page[%d] failed", i); + goto out_unmap; + } + + ret = ipu6_mmu_map(mmu->dmap->mmu_info, + PFN_PHYS(iova->pfn_lo + i), pci_dma_addr, + PAGE_SIZE); + if (ret) { + dev_err(dev, "ipu6_mmu_map for pci_dma[%d] %pad failed", + i, &pci_dma_addr); + dma_unmap_page_attrs(&pdev->dev, pci_dma_addr, + PAGE_SIZE, DMA_BIDIRECTIONAL, + attrs); + goto out_unmap; + } + } + + info->vaddr = vmap(pages, count, VM_USERMAP, PAGE_KERNEL); + if (!info->vaddr) + goto out_unmap; + + *dma_handle = PFN_PHYS(iova->pfn_lo); + + info->pages = pages; + info->ipu6_iova = *dma_handle; + info->size = size; + list_add(&info->list, &mmu->vma_list); + + return info->vaddr; + +out_unmap: + while (i--) { + ipu6_iova = PFN_PHYS(iova->pfn_lo + i); + pci_dma_addr = ipu6_mmu_iova_to_phys(mmu->dmap->mmu_info, + ipu6_iova); + dma_unmap_page_attrs(&pdev->dev, pci_dma_addr, PAGE_SIZE, + DMA_BIDIRECTIONAL, attrs); + + ipu6_mmu_unmap(mmu->dmap->mmu_info, ipu6_iova, PAGE_SIZE); + } + + __free_buffer(pages, size, attrs); + +out_free_iova: + __free_iova(&mmu->dmap->iovad, iova); +out_kfree: + kfree(info); + + return NULL; +} +EXPORT_SYMBOL_NS_GPL(ipu6_dma_alloc, INTEL_IPU6); + +void ipu6_dma_free(struct ipu6_bus_device *sys, size_t size, void *vaddr, + dma_addr_t dma_handle, unsigned long attrs) +{ + struct ipu6_mmu *mmu = sys->mmu; + struct pci_dev *pdev = sys->isp->pdev; + struct iova *iova = find_iova(&mmu->dmap->iovad, PHYS_PFN(dma_handle)); + dma_addr_t pci_dma_addr, ipu6_iova; + struct vm_info *info; + struct page **pages; + unsigned int i; + + if (WARN_ON(!iova)) + return; + + info = get_vm_info(mmu, dma_handle); + if (WARN_ON(!info)) + return; + + if (WARN_ON(!info->vaddr)) + return; + + if (WARN_ON(!info->pages)) + return; + + list_del(&info->list); + + size = PAGE_ALIGN(size); + + pages = info->pages; + + vunmap(vaddr); + + for (i = 0; i < PHYS_PFN(size); i++) { + ipu6_iova = PFN_PHYS(iova->pfn_lo + i); + pci_dma_addr = ipu6_mmu_iova_to_phys(mmu->dmap->mmu_info, + ipu6_iova); + dma_unmap_page_attrs(&pdev->dev, pci_dma_addr, PAGE_SIZE, + DMA_BIDIRECTIONAL, attrs); + } + + ipu6_mmu_unmap(mmu->dmap->mmu_info, PFN_PHYS(iova->pfn_lo), + PFN_PHYS(iova_size(iova))); + + __free_buffer(pages, size, attrs); + + mmu->tlb_invalidate(mmu); + + __free_iova(&mmu->dmap->iovad, iova); + + kfree(info); +} +EXPORT_SYMBOL_NS_GPL(ipu6_dma_free, INTEL_IPU6); + +int ipu6_dma_mmap(struct ipu6_bus_device *sys, struct vm_area_struct *vma, + void *addr, dma_addr_t iova, size_t size, + unsigned long attrs) +{ + struct ipu6_mmu *mmu = sys->mmu; + size_t count = PFN_UP(size); + struct vm_info *info; + size_t i; + int ret; + + info = get_vm_info(mmu, iova); + if (!info) + return -EFAULT; + + if (!info->vaddr) + return -EFAULT; + + if (vma->vm_start & ~PAGE_MASK) + return -EINVAL; + + if (size > info->size) + return -EFAULT; + + for (i = 0; i < count; i++) { + ret = vm_insert_page(vma, vma->vm_start + PFN_PHYS(i), + info->pages[i]); + if (ret < 0) + return ret; + } + + return 0; +} + +void ipu6_dma_unmap_sg(struct ipu6_bus_device *sys, struct scatterlist *sglist, + int nents, enum dma_data_direction dir, + unsigned long attrs) +{ + struct device *dev = &sys->auxdev.dev; + struct ipu6_mmu *mmu = sys->mmu; + struct iova *iova = find_iova(&mmu->dmap->iovad, + PHYS_PFN(sg_dma_address(sglist))); + struct scatterlist *sg; + dma_addr_t pci_dma_addr; + unsigned int i; + + if (!nents) + return; + + if (WARN_ON(!iova)) + return; + + /* + * Before IPU6 mmu unmap, return the pci dma address back to sg + * assume the nents is less than orig_nents as the least granule + * is 1 SZ_4K page + */ + dev_dbg(dev, "trying to unmap concatenated %u ents\n", nents); + for_each_sg(sglist, sg, nents, i) { + dev_dbg(dev, "unmap sg[%d] %pad size %u\n", i, + &sg_dma_address(sg), sg_dma_len(sg)); + pci_dma_addr = ipu6_mmu_iova_to_phys(mmu->dmap->mmu_info, + sg_dma_address(sg)); + dev_dbg(dev, "return pci_dma_addr %pad back to sg[%d]\n", + &pci_dma_addr, i); + sg_dma_address(sg) = pci_dma_addr; + } + + dev_dbg(dev, "ipu6_mmu_unmap low pfn %lu high pfn %lu\n", + iova->pfn_lo, iova->pfn_hi); + ipu6_mmu_unmap(mmu->dmap->mmu_info, PFN_PHYS(iova->pfn_lo), + PFN_PHYS(iova_size(iova))); + + mmu->tlb_invalidate(mmu); + __free_iova(&mmu->dmap->iovad, iova); +} +EXPORT_SYMBOL_NS_GPL(ipu6_dma_unmap_sg, INTEL_IPU6); + +int ipu6_dma_map_sg(struct ipu6_bus_device *sys, struct scatterlist *sglist, + int nents, enum dma_data_direction dir, + unsigned long attrs) +{ + struct device *dev = &sys->auxdev.dev; + struct ipu6_mmu *mmu = sys->mmu; + struct scatterlist *sg; + struct iova *iova; + size_t npages = 0; + unsigned long iova_addr; + int i; + + for_each_sg(sglist, sg, nents, i) { + if (sg->offset) { + dev_err(dev, "Unsupported non-zero sg[%d].offset %x\n", + i, sg->offset); + return -EFAULT; + } + } + + for_each_sg(sglist, sg, nents, i) + npages += PHYS_PFN(PAGE_ALIGN(sg_dma_len(sg))); + + dev_dbg(dev, "dmamap trying to map %d ents %zu pages\n", + nents, npages); + + iova = alloc_iova(&mmu->dmap->iovad, npages, + PHYS_PFN(dma_get_mask(dev)), 0); + if (!iova) + return 0; + + dev_dbg(dev, "dmamap: iova low pfn %lu, high pfn %lu\n", iova->pfn_lo, + iova->pfn_hi); + + iova_addr = iova->pfn_lo; + for_each_sg(sglist, sg, nents, i) { + phys_addr_t iova_pa; + int ret; + + iova_pa = PFN_PHYS(iova_addr); + dev_dbg(dev, "mapping entry %d: iova %pap phy %pap size %d\n", + i, &iova_pa, &sg_dma_address(sg), sg_dma_len(sg)); + + ret = ipu6_mmu_map(mmu->dmap->mmu_info, PFN_PHYS(iova_addr), + sg_dma_address(sg), + PAGE_ALIGN(sg_dma_len(sg))); + if (ret) + goto out_fail; + + sg_dma_address(sg) = PFN_PHYS(iova_addr); + + iova_addr += PHYS_PFN(PAGE_ALIGN(sg_dma_len(sg))); + } + + dev_dbg(dev, "dmamap %d ents %zu pages mapped\n", nents, npages); + + return nents; + +out_fail: + ipu6_dma_unmap_sg(sys, sglist, i, dir, attrs); + + return 0; +} +EXPORT_SYMBOL_NS_GPL(ipu6_dma_map_sg, INTEL_IPU6); + +int ipu6_dma_map_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt, + enum dma_data_direction dir, unsigned long attrs) +{ + int nents; + + nents = ipu6_dma_map_sg(sys, sgt->sgl, sgt->nents, dir, attrs); + if (nents < 0) + return nents; + + sgt->nents = nents; + + return 0; +} +EXPORT_SYMBOL_NS_GPL(ipu6_dma_map_sgtable, INTEL_IPU6); + +void ipu6_dma_unmap_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt, + enum dma_data_direction dir, unsigned long attrs) +{ + ipu6_dma_unmap_sg(sys, sgt->sgl, sgt->nents, dir, attrs); +} +EXPORT_SYMBOL_NS_GPL(ipu6_dma_unmap_sgtable, INTEL_IPU6); + +/* + * Create scatter-list for the already allocated DMA buffer + */ +int ipu6_dma_get_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt, + void *cpu_addr, dma_addr_t handle, size_t size, + unsigned long attrs) +{ + struct device *dev = &sys->auxdev.dev; + struct ipu6_mmu *mmu = sys->mmu; + struct vm_info *info; + int n_pages; + int ret = 0; + + info = get_vm_info(mmu, handle); + if (!info) + return -EFAULT; + + if (!info->vaddr) + return -EFAULT; + + if (WARN_ON(!info->pages)) + return -ENOMEM; + + n_pages = PHYS_PFN(PAGE_ALIGN(size)); + + ret = sg_alloc_table_from_pages(sgt, info->pages, n_pages, 0, size, + GFP_KERNEL); + if (ret) + dev_warn(dev, "get sgt table failed\n"); + + return ret; +} diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-dma.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-dma.h new file mode 100644 index 0000000..b51244a --- /dev/null +++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-dma.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2013--2024 Intel Corporation */ + +#ifndef IPU6_DMA_H +#define IPU6_DMA_H + +#include +#include +#include +#include +#include +#include + +#include "ipu6-bus.h" + +struct ipu6_mmu_info; + +struct ipu6_dma_mapping { + struct ipu6_mmu_info *mmu_info; + struct iova_domain iovad; +}; + +void ipu6_dma_sync_single(struct ipu6_bus_device *sys, dma_addr_t dma_handle, + size_t size); +void ipu6_dma_sync_sg(struct ipu6_bus_device *sys, struct scatterlist *sglist, + int nents); +void ipu6_dma_sync_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt); +void *ipu6_dma_alloc(struct ipu6_bus_device *sys, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, + unsigned long attrs); +void ipu6_dma_free(struct ipu6_bus_device *sys, size_t size, void *vaddr, + dma_addr_t dma_handle, unsigned long attrs); +int ipu6_dma_mmap(struct ipu6_bus_device *sys, struct vm_area_struct *vma, + void *addr, dma_addr_t iova, size_t size, + unsigned long attrs); +int ipu6_dma_map_sg(struct ipu6_bus_device *sys, struct scatterlist *sglist, + int nents, enum dma_data_direction dir, + unsigned long attrs); +void ipu6_dma_unmap_sg(struct ipu6_bus_device *sys, struct scatterlist *sglist, + int nents, enum dma_data_direction dir, + unsigned long attrs); +int ipu6_dma_map_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt, + enum dma_data_direction dir, unsigned long attrs); +void ipu6_dma_unmap_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt, + enum dma_data_direction dir, unsigned long attrs); +int ipu6_dma_get_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt, + void *cpu_addr, dma_addr_t handle, size_t size, + unsigned long attrs); +#endif /* IPU6_DMA_H */ diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.c new file mode 100644 index 0000000..7d3d931 --- /dev/null +++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.c @@ -0,0 +1,413 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2013--2024 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "ipu6-bus.h" +#include "ipu6-dma.h" +#include "ipu6-fw-com.h" + +/* + * FWCOM layer is a shared resource between FW and driver. It consist + * of token queues to both send and receive directions. Queue is simply + * an array of structures with read and write indexes to the queue. + * There are 1...n queues to both directions. Queues locates in + * system RAM and are mapped to ISP MMU so that both CPU and ISP can + * see the same buffer. Indexes are located in ISP DMEM so that FW code + * can poll those with very low latency and cost. CPU access to indexes is + * more costly but that happens only at message sending time and + * interrupt triggered message handling. CPU doesn't need to poll indexes. + * wr_reg / rd_reg are offsets to those dmem location. They are not + * the indexes itself. + */ + +/* Shared structure between driver and FW - do not modify */ +struct ipu6_fw_sys_queue { + u64 host_address; + u32 vied_address; + u32 size; + u32 token_size; + u32 wr_reg; /* reg number in subsystem's regmem */ + u32 rd_reg; + u32 _align; +} __packed; + +struct ipu6_fw_sys_queue_res { + u64 host_address; + u32 vied_address; + u32 reg; +} __packed; + +enum syscom_state { + /* Program load or explicit host setting should init to this */ + SYSCOM_STATE_UNINIT = 0x57a7e000, + /* SP Syscom sets this when it is ready for use */ + SYSCOM_STATE_READY = 0x57a7e001, + /* SP Syscom sets this when no more syscom accesses will happen */ + SYSCOM_STATE_INACTIVE = 0x57a7e002, +}; + +enum syscom_cmd { + /* Program load or explicit host setting should init to this */ + SYSCOM_COMMAND_UNINIT = 0x57a7f000, + /* Host Syscom requests syscom to become inactive */ + SYSCOM_COMMAND_INACTIVE = 0x57a7f001, +}; + +/* firmware config: data that sent from the host to SP via DDR */ +/* Cell copies data into a context */ + +struct ipu6_fw_syscom_config { + u32 firmware_address; + + u32 num_input_queues; + u32 num_output_queues; + + /* ISP pointers to an array of ipu6_fw_sys_queue structures */ + u32 input_queue; + u32 output_queue; + + /* ISYS / PSYS private data */ + u32 specific_addr; + u32 specific_size; +}; + +struct ipu6_fw_com_context { + struct ipu6_bus_device *adev; + void __iomem *dmem_addr; + int (*cell_ready)(struct ipu6_bus_device *adev); + void (*cell_start)(struct ipu6_bus_device *adev); + + void *dma_buffer; + dma_addr_t dma_addr; + unsigned int dma_size; + + struct ipu6_fw_sys_queue *input_queue; /* array of host to SP queues */ + struct ipu6_fw_sys_queue *output_queue; /* array of SP to host */ + + u32 config_vied_addr; + + unsigned int buttress_boot_offset; + void __iomem *base_addr; +}; + +#define FW_COM_WR_REG 0 +#define FW_COM_RD_REG 4 + +#define REGMEM_OFFSET 0 +#define TUNIT_MAGIC_PATTERN 0x5a5a5a5a + +enum regmem_id { + /* pass pkg_dir address to SPC in non-secure mode */ + PKG_DIR_ADDR_REG = 0, + /* Tunit CFG blob for secure - provided by host.*/ + TUNIT_CFG_DWR_REG = 1, + /* syscom commands - modified by the host */ + SYSCOM_COMMAND_REG = 2, + /* Store interrupt status - updated by SP */ + SYSCOM_IRQ_REG = 3, + /* first syscom queue pointer register */ + SYSCOM_QPR_BASE_REG = 4 +}; + +#define BUTTRESS_FW_BOOT_PARAMS_0 0x4000 +#define BUTTRESS_FW_BOOT_PARAM_REG(base, offset, id) \ + ((base) + BUTTRESS_FW_BOOT_PARAMS_0 + ((offset) + (id)) * 4) + +enum buttress_syscom_id { + /* pass syscom configuration to SPC */ + SYSCOM_CONFIG_ID = 0, + /* syscom state - modified by SP */ + SYSCOM_STATE_ID = 1, + /* syscom vtl0 addr mask */ + SYSCOM_VTL0_ADDR_MASK_ID = 2, + SYSCOM_ID_MAX +}; + +static void ipu6_sys_queue_init(struct ipu6_fw_sys_queue *q, unsigned int size, + unsigned int token_size, + struct ipu6_fw_sys_queue_res *res) +{ + unsigned int buf_size = (size + 1) * token_size; + + q->size = size + 1; + q->token_size = token_size; + + /* acquire the shared buffer space */ + q->host_address = res->host_address; + res->host_address += buf_size; + q->vied_address = res->vied_address; + res->vied_address += buf_size; + + /* acquire the shared read and writer pointers */ + q->wr_reg = res->reg; + res->reg++; + q->rd_reg = res->reg; + res->reg++; +} + +void *ipu6_fw_com_prepare(struct ipu6_fw_com_cfg *cfg, + struct ipu6_bus_device *adev, void __iomem *base) +{ + size_t conf_size, inq_size, outq_size, specific_size; + struct ipu6_fw_syscom_config *config_host_addr; + unsigned int sizeinput = 0, sizeoutput = 0; + struct ipu6_fw_sys_queue_res res; + struct ipu6_fw_com_context *ctx; + struct device *dev = &adev->auxdev.dev; + size_t sizeall, offset; + void *specific_host_addr; + unsigned int i; + + if (!cfg || !cfg->cell_start || !cfg->cell_ready) + return NULL; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return NULL; + ctx->dmem_addr = base + cfg->dmem_addr + REGMEM_OFFSET; + ctx->adev = adev; + ctx->cell_start = cfg->cell_start; + ctx->cell_ready = cfg->cell_ready; + ctx->buttress_boot_offset = cfg->buttress_boot_offset; + ctx->base_addr = base; + + /* + * Allocate DMA mapped memory. Allocate one big chunk. + */ + /* Base cfg for FW */ + conf_size = roundup(sizeof(struct ipu6_fw_syscom_config), 8); + /* Descriptions of the queues */ + inq_size = size_mul(cfg->num_input_queues, + sizeof(struct ipu6_fw_sys_queue)); + outq_size = size_mul(cfg->num_output_queues, + sizeof(struct ipu6_fw_sys_queue)); + /* FW specific information structure */ + specific_size = roundup(cfg->specific_size, 8); + + sizeall = conf_size + inq_size + outq_size + specific_size; + + for (i = 0; i < cfg->num_input_queues; i++) + sizeinput += size_mul(cfg->input[i].queue_size + 1, + cfg->input[i].token_size); + + for (i = 0; i < cfg->num_output_queues; i++) + sizeoutput += size_mul(cfg->output[i].queue_size + 1, + cfg->output[i].token_size); + + sizeall += sizeinput + sizeoutput; + + ctx->dma_buffer = ipu6_dma_alloc(adev, sizeall, &ctx->dma_addr, + GFP_KERNEL, 0); + if (!ctx->dma_buffer) { + dev_err(dev, "failed to allocate dma memory\n"); + kfree(ctx); + return NULL; + } + + ctx->dma_size = sizeall; + + config_host_addr = ctx->dma_buffer; + ctx->config_vied_addr = ctx->dma_addr; + + offset = conf_size; + ctx->input_queue = ctx->dma_buffer + offset; + config_host_addr->input_queue = ctx->dma_addr + offset; + config_host_addr->num_input_queues = cfg->num_input_queues; + + offset += inq_size; + ctx->output_queue = ctx->dma_buffer + offset; + config_host_addr->output_queue = ctx->dma_addr + offset; + config_host_addr->num_output_queues = cfg->num_output_queues; + + /* copy firmware specific data */ + offset += outq_size; + specific_host_addr = ctx->dma_buffer + offset; + config_host_addr->specific_addr = ctx->dma_addr + offset; + config_host_addr->specific_size = cfg->specific_size; + if (cfg->specific_addr && cfg->specific_size) + memcpy(specific_host_addr, cfg->specific_addr, + cfg->specific_size); + + ipu6_dma_sync_single(adev, ctx->config_vied_addr, sizeall); + + /* initialize input queues */ + offset += specific_size; + res.reg = SYSCOM_QPR_BASE_REG; + res.host_address = (u64)(ctx->dma_buffer + offset); + res.vied_address = ctx->dma_addr + offset; + for (i = 0; i < cfg->num_input_queues; i++) + ipu6_sys_queue_init(ctx->input_queue + i, + cfg->input[i].queue_size, + cfg->input[i].token_size, &res); + + /* initialize output queues */ + offset += sizeinput; + res.host_address = (u64)(ctx->dma_buffer + offset); + res.vied_address = ctx->dma_addr + offset; + for (i = 0; i < cfg->num_output_queues; i++) { + ipu6_sys_queue_init(ctx->output_queue + i, + cfg->output[i].queue_size, + cfg->output[i].token_size, &res); + } + + return ctx; +} +EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_prepare, INTEL_IPU6); + +int ipu6_fw_com_open(struct ipu6_fw_com_context *ctx) +{ + /* write magic pattern to disable the tunit trace */ + writel(TUNIT_MAGIC_PATTERN, ctx->dmem_addr + TUNIT_CFG_DWR_REG * 4); + /* Check if SP is in valid state */ + if (!ctx->cell_ready(ctx->adev)) + return -EIO; + + /* store syscom uninitialized command */ + writel(SYSCOM_COMMAND_UNINIT, ctx->dmem_addr + SYSCOM_COMMAND_REG * 4); + + /* store syscom uninitialized state */ + writel(SYSCOM_STATE_UNINIT, + BUTTRESS_FW_BOOT_PARAM_REG(ctx->base_addr, + ctx->buttress_boot_offset, + SYSCOM_STATE_ID)); + + /* store firmware configuration address */ + writel(ctx->config_vied_addr, + BUTTRESS_FW_BOOT_PARAM_REG(ctx->base_addr, + ctx->buttress_boot_offset, + SYSCOM_CONFIG_ID)); + ctx->cell_start(ctx->adev); + + return 0; +} +EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_open, INTEL_IPU6); + +int ipu6_fw_com_close(struct ipu6_fw_com_context *ctx) +{ + int state; + + state = readl(BUTTRESS_FW_BOOT_PARAM_REG(ctx->base_addr, + ctx->buttress_boot_offset, + SYSCOM_STATE_ID)); + if (state != SYSCOM_STATE_READY) + return -EBUSY; + + /* set close request flag */ + writel(SYSCOM_COMMAND_INACTIVE, ctx->dmem_addr + + SYSCOM_COMMAND_REG * 4); + + return 0; +} +EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_close, INTEL_IPU6); + +int ipu6_fw_com_release(struct ipu6_fw_com_context *ctx, unsigned int force) +{ + /* check if release is forced, an verify cell state if it is not */ + if (!force && !ctx->cell_ready(ctx->adev)) + return -EBUSY; + + ipu6_dma_free(ctx->adev, ctx->dma_size, + ctx->dma_buffer, ctx->dma_addr, 0); + kfree(ctx); + return 0; +} +EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_release, INTEL_IPU6); + +bool ipu6_fw_com_ready(struct ipu6_fw_com_context *ctx) +{ + int state; + + state = readl(BUTTRESS_FW_BOOT_PARAM_REG(ctx->base_addr, + ctx->buttress_boot_offset, + SYSCOM_STATE_ID)); + + return state == SYSCOM_STATE_READY; +} +EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_ready, INTEL_IPU6); + +void *ipu6_send_get_token(struct ipu6_fw_com_context *ctx, int q_nbr) +{ + struct ipu6_fw_sys_queue *q = &ctx->input_queue[q_nbr]; + void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4; + unsigned int wr, rd; + unsigned int packets; + unsigned int index; + + wr = readl(q_dmem + FW_COM_WR_REG); + rd = readl(q_dmem + FW_COM_RD_REG); + + if (WARN_ON_ONCE(wr >= q->size || rd >= q->size)) + return NULL; + + if (wr < rd) + packets = rd - wr - 1; + else + packets = q->size - (wr - rd + 1); + + if (!packets) + return NULL; + + index = readl(q_dmem + FW_COM_WR_REG); + + return (void *)(q->host_address + index * q->token_size); +} +EXPORT_SYMBOL_NS_GPL(ipu6_send_get_token, INTEL_IPU6); + +void ipu6_send_put_token(struct ipu6_fw_com_context *ctx, int q_nbr) +{ + struct ipu6_fw_sys_queue *q = &ctx->input_queue[q_nbr]; + void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4; + unsigned int wr = readl(q_dmem + FW_COM_WR_REG) + 1; + + if (wr >= q->size) + wr = 0; + + writel(wr, q_dmem + FW_COM_WR_REG); +} +EXPORT_SYMBOL_NS_GPL(ipu6_send_put_token, INTEL_IPU6); + +void *ipu6_recv_get_token(struct ipu6_fw_com_context *ctx, int q_nbr) +{ + struct ipu6_fw_sys_queue *q = &ctx->output_queue[q_nbr]; + void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4; + unsigned int wr, rd; + unsigned int packets; + + wr = readl(q_dmem + FW_COM_WR_REG); + rd = readl(q_dmem + FW_COM_RD_REG); + + if (WARN_ON_ONCE(wr >= q->size || rd >= q->size)) + return NULL; + + if (wr < rd) + wr += q->size; + + packets = wr - rd; + if (!packets) + return NULL; + + return (void *)(q->host_address + rd * q->token_size); +} +EXPORT_SYMBOL_NS_GPL(ipu6_recv_get_token, INTEL_IPU6); + +void ipu6_recv_put_token(struct ipu6_fw_com_context *ctx, int q_nbr) +{ + struct ipu6_fw_sys_queue *q = &ctx->output_queue[q_nbr]; + void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4; + unsigned int rd = readl(q_dmem + FW_COM_RD_REG) + 1; + + if (rd >= q->size) + rd = 0; + + writel(rd, q_dmem + FW_COM_RD_REG); +} +EXPORT_SYMBOL_NS_GPL(ipu6_recv_put_token, INTEL_IPU6); diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.h new file mode 100644 index 0000000..b02285a --- /dev/null +++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2013--2024 Intel Corporation */ + +#ifndef IPU6_FW_COM_H +#define IPU6_FW_COM_H + +struct ipu6_fw_com_context; +struct ipu6_bus_device; + +struct ipu6_fw_syscom_queue_config { + unsigned int queue_size; /* tokens per queue */ + unsigned int token_size; /* bytes per token */ +}; + +#define SYSCOM_BUTTRESS_FW_PARAMS_ISYS_OFFSET 0 + +struct ipu6_fw_com_cfg { + unsigned int num_input_queues; + unsigned int num_output_queues; + struct ipu6_fw_syscom_queue_config *input; + struct ipu6_fw_syscom_queue_config *output; + + unsigned int dmem_addr; + + /* firmware-specific configuration data */ + void *specific_addr; + unsigned int specific_size; + int (*cell_ready)(struct ipu6_bus_device *adev); + void (*cell_start)(struct ipu6_bus_device *adev); + + unsigned int buttress_boot_offset; +}; + +void *ipu6_fw_com_prepare(struct ipu6_fw_com_cfg *cfg, + struct ipu6_bus_device *adev, void __iomem *base); + +int ipu6_fw_com_open(struct ipu6_fw_com_context *ctx); +bool ipu6_fw_com_ready(struct ipu6_fw_com_context *ctx); +int ipu6_fw_com_close(struct ipu6_fw_com_context *ctx); +int ipu6_fw_com_release(struct ipu6_fw_com_context *ctx, unsigned int force); + +void *ipu6_recv_get_token(struct ipu6_fw_com_context *ctx, int q_nbr); +void ipu6_recv_put_token(struct ipu6_fw_com_context *ctx, int q_nbr); +void *ipu6_send_get_token(struct ipu6_fw_com_context *ctx, int q_nbr); +void ipu6_send_put_token(struct ipu6_fw_com_context *ctx, int q_nbr); + +#endif diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.c new file mode 100644 index 0000000..62ed92f --- /dev/null +++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.c @@ -0,0 +1,487 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2013--2024 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include + +#include "ipu6-bus.h" +#include "ipu6-fw-com.h" +#include "ipu6-isys.h" +#include "ipu6-platform-isys-csi2-reg.h" +#include "ipu6-platform-regs.h" + +static const char send_msg_types[N_IPU6_FW_ISYS_SEND_TYPE][32] = { + "STREAM_OPEN", + "STREAM_START", + "STREAM_START_AND_CAPTURE", + "STREAM_CAPTURE", + "STREAM_STOP", + "STREAM_FLUSH", + "STREAM_CLOSE" +}; + +static int handle_proxy_response(struct ipu6_isys *isys, unsigned int req_id) +{ + struct device *dev = &isys->adev->auxdev.dev; + struct ipu6_fw_isys_proxy_resp_info_abi *resp; + int ret; + + resp = ipu6_recv_get_token(isys->fwcom, IPU6_BASE_PROXY_RECV_QUEUES); + if (!resp) + return 1; + + dev_dbg(dev, "Proxy response: id %u, error %u, details %u\n", + resp->request_id, resp->error_info.error, + resp->error_info.error_details); + + ret = req_id == resp->request_id ? 0 : -EIO; + + ipu6_recv_put_token(isys->fwcom, IPU6_BASE_PROXY_RECV_QUEUES); + + return ret; +} + +int ipu6_fw_isys_send_proxy_token(struct ipu6_isys *isys, + unsigned int req_id, + unsigned int index, + unsigned int offset, u32 value) +{ + struct ipu6_fw_com_context *ctx = isys->fwcom; + struct device *dev = &isys->adev->auxdev.dev; + struct ipu6_fw_proxy_send_queue_token *token; + unsigned int timeout = 1000; + int ret; + + dev_dbg(dev, + "proxy send: req_id 0x%x, index %d, offset 0x%x, value 0x%x\n", + req_id, index, offset, value); + + token = ipu6_send_get_token(ctx, IPU6_BASE_PROXY_SEND_QUEUES); + if (!token) + return -EBUSY; + + token->request_id = req_id; + token->region_index = index; + token->offset = offset; + token->value = value; + ipu6_send_put_token(ctx, IPU6_BASE_PROXY_SEND_QUEUES); + + do { + usleep_range(100, 110); + ret = handle_proxy_response(isys, req_id); + if (!ret) + break; + if (ret == -EIO) { + dev_err(dev, "Proxy respond with unexpected id\n"); + break; + } + timeout--; + } while (ret && timeout); + + if (!timeout) + dev_err(dev, "Proxy response timed out\n"); + + return ret; +} + +int ipu6_fw_isys_complex_cmd(struct ipu6_isys *isys, + const unsigned int stream_handle, + void *cpu_mapped_buf, + dma_addr_t dma_mapped_buf, + size_t size, u16 send_type) +{ + struct ipu6_fw_com_context *ctx = isys->fwcom; + struct device *dev = &isys->adev->auxdev.dev; + struct ipu6_fw_send_queue_token *token; + + if (send_type >= N_IPU6_FW_ISYS_SEND_TYPE) + return -EINVAL; + + dev_dbg(dev, "send_token: %s\n", send_msg_types[send_type]); + + /* + * Time to flush cache in case we have some payload. Not all messages + * have that + */ + if (cpu_mapped_buf) + clflush_cache_range(cpu_mapped_buf, size); + + token = ipu6_send_get_token(ctx, + stream_handle + IPU6_BASE_MSG_SEND_QUEUES); + if (!token) + return -EBUSY; + + token->payload = dma_mapped_buf; + token->buf_handle = (unsigned long)cpu_mapped_buf; + token->send_type = send_type; + + ipu6_send_put_token(ctx, stream_handle + IPU6_BASE_MSG_SEND_QUEUES); + + return 0; +} + +int ipu6_fw_isys_simple_cmd(struct ipu6_isys *isys, + const unsigned int stream_handle, u16 send_type) +{ + return ipu6_fw_isys_complex_cmd(isys, stream_handle, NULL, 0, 0, + send_type); +} + +int ipu6_fw_isys_close(struct ipu6_isys *isys) +{ + struct device *dev = &isys->adev->auxdev.dev; + int retry = IPU6_ISYS_CLOSE_RETRY; + unsigned long flags; + void *fwcom; + int ret; + + /* + * Stop the isys fw. Actual close takes + * some time as the FW must stop its actions including code fetch + * to SP icache. + * spinlock to wait the interrupt handler to be finished + */ + spin_lock_irqsave(&isys->power_lock, flags); + ret = ipu6_fw_com_close(isys->fwcom); + fwcom = isys->fwcom; + isys->fwcom = NULL; + spin_unlock_irqrestore(&isys->power_lock, flags); + if (ret) + dev_err(dev, "Device close failure: %d\n", ret); + + /* release probably fails if the close failed. Let's try still */ + do { + usleep_range(400, 500); + ret = ipu6_fw_com_release(fwcom, 0); + retry--; + } while (ret && retry); + + if (ret) { + dev_err(dev, "Device release time out %d\n", ret); + spin_lock_irqsave(&isys->power_lock, flags); + isys->fwcom = fwcom; + spin_unlock_irqrestore(&isys->power_lock, flags); + } + + return ret; +} + +void ipu6_fw_isys_cleanup(struct ipu6_isys *isys) +{ + int ret; + + ret = ipu6_fw_com_release(isys->fwcom, 1); + if (ret < 0) + dev_warn(&isys->adev->auxdev.dev, + "Device busy, fw_com release failed."); + isys->fwcom = NULL; +} + +static void start_sp(struct ipu6_bus_device *adev) +{ + struct ipu6_isys *isys = ipu6_bus_get_drvdata(adev); + void __iomem *spc_regs_base = isys->pdata->base + + isys->pdata->ipdata->hw_variant.spc_offset; + u32 val = IPU6_ISYS_SPC_STATUS_START | + IPU6_ISYS_SPC_STATUS_RUN | + IPU6_ISYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE; + + val |= isys->icache_prefetch ? IPU6_ISYS_SPC_STATUS_ICACHE_PREFETCH : 0; + + writel(val, spc_regs_base + IPU6_ISYS_REG_SPC_STATUS_CTRL); +} + +static int query_sp(struct ipu6_bus_device *adev) +{ + struct ipu6_isys *isys = ipu6_bus_get_drvdata(adev); + void __iomem *spc_regs_base = isys->pdata->base + + isys->pdata->ipdata->hw_variant.spc_offset; + u32 val; + + val = readl(spc_regs_base + IPU6_ISYS_REG_SPC_STATUS_CTRL); + /* return true when READY == 1, START == 0 */ + val &= IPU6_ISYS_SPC_STATUS_READY | IPU6_ISYS_SPC_STATUS_START; + + return val == IPU6_ISYS_SPC_STATUS_READY; +} + +static int ipu6_isys_fwcom_cfg_init(struct ipu6_isys *isys, + struct ipu6_fw_com_cfg *fwcom, + unsigned int num_streams) +{ + unsigned int max_send_queues, max_sram_blocks, max_devq_size; + struct ipu6_fw_syscom_queue_config *input_queue_cfg; + struct ipu6_fw_syscom_queue_config *output_queue_cfg; + struct device *dev = &isys->adev->auxdev.dev; + int type_proxy = IPU6_FW_ISYS_QUEUE_TYPE_PROXY; + int type_dev = IPU6_FW_ISYS_QUEUE_TYPE_DEV; + int type_msg = IPU6_FW_ISYS_QUEUE_TYPE_MSG; + int base_dev_send = IPU6_BASE_DEV_SEND_QUEUES; + int base_msg_send = IPU6_BASE_MSG_SEND_QUEUES; + int base_msg_recv = IPU6_BASE_MSG_RECV_QUEUES; + struct ipu6_fw_isys_fw_config *isys_fw_cfg; + u32 num_in_message_queues; + unsigned int max_streams; + unsigned int size; + unsigned int i; + + max_streams = isys->pdata->ipdata->max_streams; + max_send_queues = isys->pdata->ipdata->max_send_queues; + max_sram_blocks = isys->pdata->ipdata->max_sram_blocks; + max_devq_size = isys->pdata->ipdata->max_devq_size; + num_in_message_queues = clamp(num_streams, 1U, max_streams); + isys_fw_cfg = devm_kzalloc(dev, sizeof(*isys_fw_cfg), GFP_KERNEL); + if (!isys_fw_cfg) + return -ENOMEM; + + isys_fw_cfg->num_send_queues[type_proxy] = IPU6_N_MAX_PROXY_SEND_QUEUES; + isys_fw_cfg->num_send_queues[type_dev] = IPU6_N_MAX_DEV_SEND_QUEUES; + isys_fw_cfg->num_send_queues[type_msg] = num_in_message_queues; + isys_fw_cfg->num_recv_queues[type_proxy] = IPU6_N_MAX_PROXY_RECV_QUEUES; + /* Common msg/dev return queue */ + isys_fw_cfg->num_recv_queues[type_dev] = 0; + isys_fw_cfg->num_recv_queues[type_msg] = 1; + + size = sizeof(*input_queue_cfg) * max_send_queues; + input_queue_cfg = devm_kzalloc(dev, size, GFP_KERNEL); + if (!input_queue_cfg) + return -ENOMEM; + + size = sizeof(*output_queue_cfg) * IPU6_N_MAX_RECV_QUEUES; + output_queue_cfg = devm_kzalloc(dev, size, GFP_KERNEL); + if (!output_queue_cfg) + return -ENOMEM; + + fwcom->input = input_queue_cfg; + fwcom->output = output_queue_cfg; + + fwcom->num_input_queues = isys_fw_cfg->num_send_queues[type_proxy] + + isys_fw_cfg->num_send_queues[type_dev] + + isys_fw_cfg->num_send_queues[type_msg]; + + fwcom->num_output_queues = isys_fw_cfg->num_recv_queues[type_proxy] + + isys_fw_cfg->num_recv_queues[type_dev] + + isys_fw_cfg->num_recv_queues[type_msg]; + + /* SRAM partitioning. Equal partitioning is set. */ + for (i = 0; i < max_sram_blocks; i++) { + if (i < num_in_message_queues) + isys_fw_cfg->buffer_partition.num_gda_pages[i] = + (IPU6_DEVICE_GDA_NR_PAGES * + IPU6_DEVICE_GDA_VIRT_FACTOR) / + num_in_message_queues; + else + isys_fw_cfg->buffer_partition.num_gda_pages[i] = 0; + } + + /* FW assumes proxy interface at fwcom queue 0 */ + for (i = 0; i < isys_fw_cfg->num_send_queues[type_proxy]; i++) { + input_queue_cfg[i].token_size = + sizeof(struct ipu6_fw_proxy_send_queue_token); + input_queue_cfg[i].queue_size = IPU6_ISYS_SIZE_PROXY_SEND_QUEUE; + } + + for (i = 0; i < isys_fw_cfg->num_send_queues[type_dev]; i++) { + input_queue_cfg[base_dev_send + i].token_size = + sizeof(struct ipu6_fw_send_queue_token); + input_queue_cfg[base_dev_send + i].queue_size = max_devq_size; + } + + for (i = 0; i < isys_fw_cfg->num_send_queues[type_msg]; i++) { + input_queue_cfg[base_msg_send + i].token_size = + sizeof(struct ipu6_fw_send_queue_token); + input_queue_cfg[base_msg_send + i].queue_size = + IPU6_ISYS_SIZE_SEND_QUEUE; + } + + for (i = 0; i < isys_fw_cfg->num_recv_queues[type_proxy]; i++) { + output_queue_cfg[i].token_size = + sizeof(struct ipu6_fw_proxy_resp_queue_token); + output_queue_cfg[i].queue_size = + IPU6_ISYS_SIZE_PROXY_RECV_QUEUE; + } + /* There is no recv DEV queue */ + for (i = 0; i < isys_fw_cfg->num_recv_queues[type_msg]; i++) { + output_queue_cfg[base_msg_recv + i].token_size = + sizeof(struct ipu6_fw_resp_queue_token); + output_queue_cfg[base_msg_recv + i].queue_size = + IPU6_ISYS_SIZE_RECV_QUEUE; + } + + fwcom->dmem_addr = isys->pdata->ipdata->hw_variant.dmem_offset; + fwcom->specific_addr = isys_fw_cfg; + fwcom->specific_size = sizeof(*isys_fw_cfg); + + return 0; +} + +int ipu6_fw_isys_init(struct ipu6_isys *isys, unsigned int num_streams) +{ + struct device *dev = &isys->adev->auxdev.dev; + int retry = IPU6_ISYS_OPEN_RETRY; + struct ipu6_fw_com_cfg fwcom = { + .cell_start = start_sp, + .cell_ready = query_sp, + .buttress_boot_offset = SYSCOM_BUTTRESS_FW_PARAMS_ISYS_OFFSET, + }; + int ret; + + ipu6_isys_fwcom_cfg_init(isys, &fwcom, num_streams); + + isys->fwcom = ipu6_fw_com_prepare(&fwcom, isys->adev, + isys->pdata->base); + if (!isys->fwcom) { + dev_err(dev, "isys fw com prepare failed\n"); + return -EIO; + } + + ret = ipu6_fw_com_open(isys->fwcom); + if (ret) { + dev_err(dev, "isys fw com open failed %d\n", ret); + return ret; + } + + do { + usleep_range(400, 500); + if (ipu6_fw_com_ready(isys->fwcom)) + break; + retry--; + } while (retry > 0); + + if (!retry) { + dev_err(dev, "isys port open ready failed %d\n", ret); + ipu6_fw_isys_close(isys); + ret = -EIO; + } + + return ret; +} + +struct ipu6_fw_isys_resp_info_abi * +ipu6_fw_isys_get_resp(void *context, unsigned int queue) +{ + return ipu6_recv_get_token(context, queue); +} + +void ipu6_fw_isys_put_resp(void *context, unsigned int queue) +{ + ipu6_recv_put_token(context, queue); +} + +void ipu6_fw_isys_dump_stream_cfg(struct device *dev, + struct ipu6_fw_isys_stream_cfg_data_abi *cfg) +{ + unsigned int i; + + dev_dbg(dev, "-----------------------------------------------------\n"); + dev_dbg(dev, "IPU6_FW_ISYS_STREAM_CFG_DATA\n"); + + dev_dbg(dev, "compfmt = %d\n", cfg->vc); + dev_dbg(dev, "src = %d\n", cfg->src); + dev_dbg(dev, "vc = %d\n", cfg->vc); + dev_dbg(dev, "isl_use = %d\n", cfg->isl_use); + dev_dbg(dev, "sensor_type = %d\n", cfg->sensor_type); + + dev_dbg(dev, "send_irq_sof_discarded = %d\n", + cfg->send_irq_sof_discarded); + dev_dbg(dev, "send_irq_eof_discarded = %d\n", + cfg->send_irq_eof_discarded); + dev_dbg(dev, "send_resp_sof_discarded = %d\n", + cfg->send_resp_sof_discarded); + dev_dbg(dev, "send_resp_eof_discarded = %d\n", + cfg->send_resp_eof_discarded); + + dev_dbg(dev, "crop:\n"); + dev_dbg(dev, "\t.left_top = [%d, %d]\n", cfg->crop.left_offset, + cfg->crop.top_offset); + dev_dbg(dev, "\t.right_bottom = [%d, %d]\n", cfg->crop.right_offset, + cfg->crop.bottom_offset); + + dev_dbg(dev, "nof_input_pins = %d\n", cfg->nof_input_pins); + for (i = 0; i < cfg->nof_input_pins; i++) { + dev_dbg(dev, "input pin[%d]:\n", i); + dev_dbg(dev, "\t.dt = 0x%0x\n", cfg->input_pins[i].dt); + dev_dbg(dev, "\t.mipi_store_mode = %d\n", + cfg->input_pins[i].mipi_store_mode); + dev_dbg(dev, "\t.bits_per_pix = %d\n", + cfg->input_pins[i].bits_per_pix); + dev_dbg(dev, "\t.mapped_dt = 0x%0x\n", + cfg->input_pins[i].mapped_dt); + dev_dbg(dev, "\t.input_res = %dx%d\n", + cfg->input_pins[i].input_res.width, + cfg->input_pins[i].input_res.height); + dev_dbg(dev, "\t.mipi_decompression = %d\n", + cfg->input_pins[i].mipi_decompression); + dev_dbg(dev, "\t.capture_mode = %d\n", + cfg->input_pins[i].capture_mode); + } + + dev_dbg(dev, "nof_output_pins = %d\n", cfg->nof_output_pins); + for (i = 0; i < cfg->nof_output_pins; i++) { + dev_dbg(dev, "output_pin[%d]:\n", i); + dev_dbg(dev, "\t.input_pin_id = %d\n", + cfg->output_pins[i].input_pin_id); + dev_dbg(dev, "\t.output_res = %dx%d\n", + cfg->output_pins[i].output_res.width, + cfg->output_pins[i].output_res.height); + dev_dbg(dev, "\t.stride = %d\n", cfg->output_pins[i].stride); + dev_dbg(dev, "\t.pt = %d\n", cfg->output_pins[i].pt); + dev_dbg(dev, "\t.payload_buf_size = %d\n", + cfg->output_pins[i].payload_buf_size); + dev_dbg(dev, "\t.ft = %d\n", cfg->output_pins[i].ft); + dev_dbg(dev, "\t.watermark_in_lines = %d\n", + cfg->output_pins[i].watermark_in_lines); + dev_dbg(dev, "\t.send_irq = %d\n", + cfg->output_pins[i].send_irq); + dev_dbg(dev, "\t.reserve_compression = %d\n", + cfg->output_pins[i].reserve_compression); + dev_dbg(dev, "\t.snoopable = %d\n", + cfg->output_pins[i].snoopable); + dev_dbg(dev, "\t.error_handling_enable = %d\n", + cfg->output_pins[i].error_handling_enable); + dev_dbg(dev, "\t.sensor_type = %d\n", + cfg->output_pins[i].sensor_type); + } + dev_dbg(dev, "-----------------------------------------------------\n"); +} + +void +ipu6_fw_isys_dump_frame_buff_set(struct device *dev, + struct ipu6_fw_isys_frame_buff_set_abi *buf, + unsigned int outputs) +{ + unsigned int i; + + dev_dbg(dev, "-----------------------------------------------------\n"); + dev_dbg(dev, "IPU6_FW_ISYS_FRAME_BUFF_SET\n"); + + for (i = 0; i < outputs; i++) { + dev_dbg(dev, "output_pin[%d]:\n", i); + dev_dbg(dev, "\t.out_buf_id = %llu\n", + buf->output_pins[i].out_buf_id); + dev_dbg(dev, "\t.addr = 0x%x\n", buf->output_pins[i].addr); + dev_dbg(dev, "\t.compress = %d\n", + buf->output_pins[i].compress); + } + + dev_dbg(dev, "send_irq_sof = 0x%x\n", buf->send_irq_sof); + dev_dbg(dev, "send_irq_eof = 0x%x\n", buf->send_irq_eof); + dev_dbg(dev, "send_resp_sof = 0x%x\n", buf->send_resp_sof); + dev_dbg(dev, "send_resp_eof = 0x%x\n", buf->send_resp_eof); + dev_dbg(dev, "send_irq_capture_ack = 0x%x\n", + buf->send_irq_capture_ack); + dev_dbg(dev, "send_irq_capture_done = 0x%x\n", + buf->send_irq_capture_done); + dev_dbg(dev, "send_resp_capture_ack = 0x%x\n", + buf->send_resp_capture_ack); + dev_dbg(dev, "send_resp_capture_done = 0x%x\n", + buf->send_resp_capture_done); + + dev_dbg(dev, "-----------------------------------------------------\n"); +} diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h new file mode 100644 index 0000000..b60f020 --- /dev/null +++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h @@ -0,0 +1,596 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2013--2024 Intel Corporation */ + +#ifndef IPU6_FW_ISYS_H +#define IPU6_FW_ISYS_H + +#include + +struct device; +struct ipu6_isys; + +/* Max number of Input/Output Pins */ +#define IPU6_MAX_IPINS 4 + +#define IPU6_MAX_OPINS ((IPU6_MAX_IPINS) + 1) + +#define IPU6_STREAM_ID_MAX 16 +#define IPU6_NONSECURE_STREAM_ID_MAX 12 +#define IPU6_DEV_SEND_QUEUE_SIZE (IPU6_STREAM_ID_MAX) +#define IPU6_NOF_SRAM_BLOCKS_MAX (IPU6_STREAM_ID_MAX) +#define IPU6_N_MAX_MSG_SEND_QUEUES (IPU6_STREAM_ID_MAX) +#define IPU6SE_STREAM_ID_MAX 8 +#define IPU6SE_NONSECURE_STREAM_ID_MAX 4 +#define IPU6SE_DEV_SEND_QUEUE_SIZE (IPU6SE_STREAM_ID_MAX) +#define IPU6SE_NOF_SRAM_BLOCKS_MAX (IPU6SE_STREAM_ID_MAX) +#define IPU6SE_N_MAX_MSG_SEND_QUEUES (IPU6SE_STREAM_ID_MAX) + +/* Single return queue for all streams/commands type */ +#define IPU6_N_MAX_MSG_RECV_QUEUES 1 +/* Single device queue for high priority commands (bypass in-order queue) */ +#define IPU6_N_MAX_DEV_SEND_QUEUES 1 +/* Single dedicated send queue for proxy interface */ +#define IPU6_N_MAX_PROXY_SEND_QUEUES 1 +/* Single dedicated recv queue for proxy interface */ +#define IPU6_N_MAX_PROXY_RECV_QUEUES 1 +/* Send queues layout */ +#define IPU6_BASE_PROXY_SEND_QUEUES 0 +#define IPU6_BASE_DEV_SEND_QUEUES \ + (IPU6_BASE_PROXY_SEND_QUEUES + IPU6_N_MAX_PROXY_SEND_QUEUES) +#define IPU6_BASE_MSG_SEND_QUEUES \ + (IPU6_BASE_DEV_SEND_QUEUES + IPU6_N_MAX_DEV_SEND_QUEUES) +/* Recv queues layout */ +#define IPU6_BASE_PROXY_RECV_QUEUES 0 +#define IPU6_BASE_MSG_RECV_QUEUES \ + (IPU6_BASE_PROXY_RECV_QUEUES + IPU6_N_MAX_PROXY_RECV_QUEUES) +#define IPU6_N_MAX_RECV_QUEUES \ + (IPU6_BASE_MSG_RECV_QUEUES + IPU6_N_MAX_MSG_RECV_QUEUES) + +#define IPU6_N_MAX_SEND_QUEUES \ + (IPU6_BASE_MSG_SEND_QUEUES + IPU6_N_MAX_MSG_SEND_QUEUES) +#define IPU6SE_N_MAX_SEND_QUEUES \ + (IPU6_BASE_MSG_SEND_QUEUES + IPU6SE_N_MAX_MSG_SEND_QUEUES) + +/* Max number of planes for frame formats supported by the FW */ +#define IPU6_PIN_PLANES_MAX 4 + +#define IPU6_FW_ISYS_SENSOR_TYPE_START 14 +#define IPU6_FW_ISYS_SENSOR_TYPE_END 19 +#define IPU6SE_FW_ISYS_SENSOR_TYPE_START 6 +#define IPU6SE_FW_ISYS_SENSOR_TYPE_END 11 +/* + * Device close takes some time from last ack message to actual stopping + * of the SP processor. As long as the SP processor runs we can't proceed with + * clean up of resources. + */ +#define IPU6_ISYS_OPEN_RETRY 2000 +#define IPU6_ISYS_CLOSE_RETRY 2000 +#define IPU6_FW_CALL_TIMEOUT_JIFFIES msecs_to_jiffies(2000) + +enum ipu6_fw_isys_resp_type { + IPU6_FW_ISYS_RESP_TYPE_STREAM_OPEN_DONE = 0, + IPU6_FW_ISYS_RESP_TYPE_STREAM_START_ACK, + IPU6_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK, + IPU6_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK, + IPU6_FW_ISYS_RESP_TYPE_STREAM_STOP_ACK, + IPU6_FW_ISYS_RESP_TYPE_STREAM_FLUSH_ACK, + IPU6_FW_ISYS_RESP_TYPE_STREAM_CLOSE_ACK, + IPU6_FW_ISYS_RESP_TYPE_PIN_DATA_READY, + IPU6_FW_ISYS_RESP_TYPE_PIN_DATA_WATERMARK, + IPU6_FW_ISYS_RESP_TYPE_FRAME_SOF, + IPU6_FW_ISYS_RESP_TYPE_FRAME_EOF, + IPU6_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE, + IPU6_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_DONE, + IPU6_FW_ISYS_RESP_TYPE_PIN_DATA_SKIPPED, + IPU6_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_SKIPPED, + IPU6_FW_ISYS_RESP_TYPE_FRAME_SOF_DISCARDED, + IPU6_FW_ISYS_RESP_TYPE_FRAME_EOF_DISCARDED, + IPU6_FW_ISYS_RESP_TYPE_STATS_DATA_READY, + N_IPU6_FW_ISYS_RESP_TYPE +}; + +enum ipu6_fw_isys_send_type { + IPU6_FW_ISYS_SEND_TYPE_STREAM_OPEN = 0, + IPU6_FW_ISYS_SEND_TYPE_STREAM_START, + IPU6_FW_ISYS_SEND_TYPE_STREAM_START_AND_CAPTURE, + IPU6_FW_ISYS_SEND_TYPE_STREAM_CAPTURE, + IPU6_FW_ISYS_SEND_TYPE_STREAM_STOP, + IPU6_FW_ISYS_SEND_TYPE_STREAM_FLUSH, + IPU6_FW_ISYS_SEND_TYPE_STREAM_CLOSE, + N_IPU6_FW_ISYS_SEND_TYPE +}; + +enum ipu6_fw_isys_queue_type { + IPU6_FW_ISYS_QUEUE_TYPE_PROXY = 0, + IPU6_FW_ISYS_QUEUE_TYPE_DEV, + IPU6_FW_ISYS_QUEUE_TYPE_MSG, + N_IPU6_FW_ISYS_QUEUE_TYPE +}; + +enum ipu6_fw_isys_stream_source { + IPU6_FW_ISYS_STREAM_SRC_PORT_0 = 0, + IPU6_FW_ISYS_STREAM_SRC_PORT_1, + IPU6_FW_ISYS_STREAM_SRC_PORT_2, + IPU6_FW_ISYS_STREAM_SRC_PORT_3, + IPU6_FW_ISYS_STREAM_SRC_PORT_4, + IPU6_FW_ISYS_STREAM_SRC_PORT_5, + IPU6_FW_ISYS_STREAM_SRC_PORT_6, + IPU6_FW_ISYS_STREAM_SRC_PORT_7, + IPU6_FW_ISYS_STREAM_SRC_PORT_8, + IPU6_FW_ISYS_STREAM_SRC_PORT_9, + IPU6_FW_ISYS_STREAM_SRC_PORT_10, + IPU6_FW_ISYS_STREAM_SRC_PORT_11, + IPU6_FW_ISYS_STREAM_SRC_PORT_12, + IPU6_FW_ISYS_STREAM_SRC_PORT_13, + IPU6_FW_ISYS_STREAM_SRC_PORT_14, + IPU6_FW_ISYS_STREAM_SRC_PORT_15, + IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_0, + IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_1, + IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_2, + IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_3, + IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_4, + IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_5, + IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_6, + IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_7, + IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_8, + IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_9, + N_IPU6_FW_ISYS_STREAM_SRC +}; + +#define IPU6_FW_ISYS_STREAM_SRC_CSI2_PORT0 IPU6_FW_ISYS_STREAM_SRC_PORT_0 +#define IPU6_FW_ISYS_STREAM_SRC_CSI2_PORT1 IPU6_FW_ISYS_STREAM_SRC_PORT_1 +#define IPU6_FW_ISYS_STREAM_SRC_CSI2_PORT2 IPU6_FW_ISYS_STREAM_SRC_PORT_2 +#define IPU6_FW_ISYS_STREAM_SRC_CSI2_PORT3 IPU6_FW_ISYS_STREAM_SRC_PORT_3 + +#define IPU6_FW_ISYS_STREAM_SRC_CSI2_3PH_PORTA IPU6_FW_ISYS_STREAM_SRC_PORT_4 +#define IPU6_FW_ISYS_STREAM_SRC_CSI2_3PH_PORTB IPU6_FW_ISYS_STREAM_SRC_PORT_5 +#define IPU6_FW_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT0 \ + IPU6_FW_ISYS_STREAM_SRC_PORT_6 +#define IPU6_FW_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT1 \ + IPU6_FW_ISYS_STREAM_SRC_PORT_7 +#define IPU6_FW_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT2 \ + IPU6_FW_ISYS_STREAM_SRC_PORT_8 +#define IPU6_FW_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT3 \ + IPU6_FW_ISYS_STREAM_SRC_PORT_9 + +#define IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_PORT0 IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_0 +#define IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_PORT1 IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_1 + +/* + * enum ipu6_fw_isys_mipi_vc: MIPI csi2 spec + * supports up to 4 virtual per physical channel + */ +enum ipu6_fw_isys_mipi_vc { + IPU6_FW_ISYS_MIPI_VC_0 = 0, + IPU6_FW_ISYS_MIPI_VC_1, + IPU6_FW_ISYS_MIPI_VC_2, + IPU6_FW_ISYS_MIPI_VC_3, + N_IPU6_FW_ISYS_MIPI_VC +}; + +enum ipu6_fw_isys_frame_format_type { + IPU6_FW_ISYS_FRAME_FORMAT_NV11 = 0, /* 12 bit YUV 411, Y, UV plane */ + IPU6_FW_ISYS_FRAME_FORMAT_NV12, /* 12 bit YUV 420, Y, UV plane */ + IPU6_FW_ISYS_FRAME_FORMAT_NV12_16, /* 16 bit YUV 420, Y, UV plane */ + /* 12 bit YUV 420, Intel proprietary tiled format */ + IPU6_FW_ISYS_FRAME_FORMAT_NV12_TILEY, + + IPU6_FW_ISYS_FRAME_FORMAT_NV16, /* 16 bit YUV 422, Y, UV plane */ + IPU6_FW_ISYS_FRAME_FORMAT_NV21, /* 12 bit YUV 420, Y, VU plane */ + IPU6_FW_ISYS_FRAME_FORMAT_NV61, /* 16 bit YUV 422, Y, VU plane */ + IPU6_FW_ISYS_FRAME_FORMAT_YV12, /* 12 bit YUV 420, Y, V, U plane */ + IPU6_FW_ISYS_FRAME_FORMAT_YV16, /* 16 bit YUV 422, Y, V, U plane */ + IPU6_FW_ISYS_FRAME_FORMAT_YUV420, /* 12 bit YUV 420, Y, U, V plane */ + IPU6_FW_ISYS_FRAME_FORMAT_YUV420_10, /* yuv420, 10 bits per subpixel */ + IPU6_FW_ISYS_FRAME_FORMAT_YUV420_12, /* yuv420, 12 bits per subpixel */ + IPU6_FW_ISYS_FRAME_FORMAT_YUV420_14, /* yuv420, 14 bits per subpixel */ + IPU6_FW_ISYS_FRAME_FORMAT_YUV420_16, /* yuv420, 16 bits per subpixel */ + IPU6_FW_ISYS_FRAME_FORMAT_YUV422, /* 16 bit YUV 422, Y, U, V plane */ + IPU6_FW_ISYS_FRAME_FORMAT_YUV422_16, /* yuv422, 16 bits per subpixel */ + IPU6_FW_ISYS_FRAME_FORMAT_UYVY, /* 16 bit YUV 422, UYVY interleaved */ + IPU6_FW_ISYS_FRAME_FORMAT_YUYV, /* 16 bit YUV 422, YUYV interleaved */ + IPU6_FW_ISYS_FRAME_FORMAT_YUV444, /* 24 bit YUV 444, Y, U, V plane */ + /* Internal format, 2 y lines followed by a uvinterleaved line */ + IPU6_FW_ISYS_FRAME_FORMAT_YUV_LINE, + IPU6_FW_ISYS_FRAME_FORMAT_RAW8, /* RAW8, 1 plane */ + IPU6_FW_ISYS_FRAME_FORMAT_RAW10, /* RAW10, 1 plane */ + IPU6_FW_ISYS_FRAME_FORMAT_RAW12, /* RAW12, 1 plane */ + IPU6_FW_ISYS_FRAME_FORMAT_RAW14, /* RAW14, 1 plane */ + IPU6_FW_ISYS_FRAME_FORMAT_RAW16, /* RAW16, 1 plane */ + /** + * 16 bit RGB, 1 plane. Each 3 sub pixels are packed into one 16 bit + * value, 5 bits for R, 6 bits for G and 5 bits for B. + */ + IPU6_FW_ISYS_FRAME_FORMAT_RGB565, + IPU6_FW_ISYS_FRAME_FORMAT_PLANAR_RGB888, /* 24 bit RGB, 3 planes */ + IPU6_FW_ISYS_FRAME_FORMAT_RGBA888, /* 32 bit RGBA, 1 plane, A=Alpha */ + IPU6_FW_ISYS_FRAME_FORMAT_QPLANE6, /* Internal, for advanced ISP */ + IPU6_FW_ISYS_FRAME_FORMAT_BINARY_8, /* byte stream, used for jpeg. */ + N_IPU6_FW_ISYS_FRAME_FORMAT +}; + +enum ipu6_fw_isys_pin_type { + /* captured as MIPI packets */ + IPU6_FW_ISYS_PIN_TYPE_MIPI = 0, + /* captured through the SoC path */ + IPU6_FW_ISYS_PIN_TYPE_RAW_SOC = 3, +}; + +/* + * enum ipu6_fw_isys_mipi_store_mode. Describes if long MIPI packets reach + * MIPI SRAM with the long packet header or + * if not, then only option is to capture it with pin type MIPI. + */ +enum ipu6_fw_isys_mipi_store_mode { + IPU6_FW_ISYS_MIPI_STORE_MODE_NORMAL = 0, + IPU6_FW_ISYS_MIPI_STORE_MODE_DISCARD_LONG_HEADER, + N_IPU6_FW_ISYS_MIPI_STORE_MODE +}; + +enum ipu6_fw_isys_capture_mode { + IPU6_FW_ISYS_CAPTURE_MODE_REGULAR = 0, + IPU6_FW_ISYS_CAPTURE_MODE_BURST, + N_IPU6_FW_ISYS_CAPTURE_MODE, +}; + +enum ipu6_fw_isys_sensor_mode { + IPU6_FW_ISYS_SENSOR_MODE_NORMAL = 0, + IPU6_FW_ISYS_SENSOR_MODE_TOBII, + N_IPU6_FW_ISYS_SENSOR_MODE, +}; + +enum ipu6_fw_isys_error { + IPU6_FW_ISYS_ERROR_NONE = 0, + IPU6_FW_ISYS_ERROR_FW_INTERNAL_CONSISTENCY, + IPU6_FW_ISYS_ERROR_HW_CONSISTENCY, + IPU6_FW_ISYS_ERROR_DRIVER_INVALID_COMMAND_SEQUENCE, + IPU6_FW_ISYS_ERROR_DRIVER_INVALID_DEVICE_CONFIGURATION, + IPU6_FW_ISYS_ERROR_DRIVER_INVALID_STREAM_CONFIGURATION, + IPU6_FW_ISYS_ERROR_DRIVER_INVALID_FRAME_CONFIGURATION, + IPU6_FW_ISYS_ERROR_INSUFFICIENT_RESOURCES, + IPU6_FW_ISYS_ERROR_HW_REPORTED_STR2MMIO, + IPU6_FW_ISYS_ERROR_HW_REPORTED_SIG2CIO, + IPU6_FW_ISYS_ERROR_SENSOR_FW_SYNC, + IPU6_FW_ISYS_ERROR_STREAM_IN_SUSPENSION, + IPU6_FW_ISYS_ERROR_RESPONSE_QUEUE_FULL, + N_IPU6_FW_ISYS_ERROR +}; + +enum ipu6_fw_proxy_error { + IPU6_FW_PROXY_ERROR_NONE = 0, + IPU6_FW_PROXY_ERROR_INVALID_WRITE_REGION, + IPU6_FW_PROXY_ERROR_INVALID_WRITE_OFFSET, + N_IPU6_FW_PROXY_ERROR +}; + +/* firmware ABI structure below are aligned in firmware, no need pack */ +struct ipu6_fw_isys_buffer_partition_abi { + u32 num_gda_pages[IPU6_STREAM_ID_MAX]; +}; + +struct ipu6_fw_isys_fw_config { + struct ipu6_fw_isys_buffer_partition_abi buffer_partition; + u32 num_send_queues[N_IPU6_FW_ISYS_QUEUE_TYPE]; + u32 num_recv_queues[N_IPU6_FW_ISYS_QUEUE_TYPE]; +}; + +/* + * struct ipu6_fw_isys_resolution_abi: Generic resolution structure. + */ +struct ipu6_fw_isys_resolution_abi { + u32 width; + u32 height; +}; + +/** + * struct ipu6_fw_isys_output_pin_payload_abi - ISYS output pin buffer + * @out_buf_id: Points to output pin buffer - buffer identifier + * @addr: Points to output pin buffer - CSS Virtual Address + * @compress: Request frame compression (1), or not (0) + */ +struct ipu6_fw_isys_output_pin_payload_abi { + u64 out_buf_id; + u32 addr; + u32 compress; +}; + +/** + * struct ipu6_fw_isys_output_pin_info_abi - ISYS output pin info + * @output_res: output pin resolution + * @stride: output stride in Bytes (not valid for statistics) + * @watermark_in_lines: pin watermark level in lines + * @payload_buf_size: minimum size in Bytes of all buffers that will be + * supplied for capture on this pin + * @ts_offsets: ts_offsets + * @s2m_pixel_soc_pixel_remapping: pixel soc remapping (see the definition of + * S2M_PIXEL_SOC_PIXEL_REMAPPING_FLAG_NO_REMAPPING) + * @csi_be_soc_pixel_remapping: see s2m_pixel_soc_pixel_remapping + * @send_irq: assert if pin event should trigger irq + * @input_pin_id: related input pin id + * @pt: pin type -real format "enum ipu6_fw_isys_pin_type" + * @ft: frame format type -real format "enum ipu6_fw_isys_frame_format_type" + * @reserved: a reserved field + * @reserve_compression: reserve compression resources for pin + * @snoopable: snoopable + * @error_handling_enable: enable error handling + * @sensor_type: sensor_type + */ +struct ipu6_fw_isys_output_pin_info_abi { + struct ipu6_fw_isys_resolution_abi output_res; + u32 stride; + u32 watermark_in_lines; + u32 payload_buf_size; + u32 ts_offsets[IPU6_PIN_PLANES_MAX]; + u32 s2m_pixel_soc_pixel_remapping; + u32 csi_be_soc_pixel_remapping; + u8 send_irq; + u8 input_pin_id; + u8 pt; + u8 ft; + u8 reserved; + u8 reserve_compression; + u8 snoopable; + u8 error_handling_enable; + u32 sensor_type; +}; + +/** + * struct ipu6_fw_isys_input_pin_info_abi - ISYS input pin info + * @input_res: input resolution + * @dt: mipi data type ((enum ipu6_fw_isys_mipi_data_type) + * @mipi_store_mode: defines if legacy long packet header will be stored or + * discarded if discarded, output pin type for this + * input pin can only be MIPI + * (enum ipu6_fw_isys_mipi_store_mode) + * @bits_per_pix: native bits per pixel + * @mapped_dt: actual data type from sensor + * @mipi_decompression: defines which compression will be in mipi backend + * @crop_first_and_last_lines: Control whether to crop the first and last line + * of the input image. Crop done by HW device. + * @capture_mode: mode of capture, regular or burst, default value is regular + * @reserved: a reserved field + */ +struct ipu6_fw_isys_input_pin_info_abi { + struct ipu6_fw_isys_resolution_abi input_res; + u8 dt; + u8 mipi_store_mode; + u8 bits_per_pix; + u8 mapped_dt; + u8 mipi_decompression; + u8 crop_first_and_last_lines; + u8 capture_mode; + u8 reserved; +}; + +/** + * struct ipu6_fw_isys_cropping_abi - ISYS cropping coordinates + * @top_offset: Top offset + * @left_offset: Left offset + * @bottom_offset: Bottom offset + * @right_offset: Right offset + */ +struct ipu6_fw_isys_cropping_abi { + s32 top_offset; + s32 left_offset; + s32 bottom_offset; + s32 right_offset; +}; + +/** + * struct ipu6_fw_isys_stream_cfg_data_abi - ISYS stream configuration data + * ISYS stream configuration data structure + * @crop: for extended use and is not used in FW currently + * @input_pins: input pin descriptors + * @output_pins: output pin descriptors + * @compfmt: de-compression setting for User Defined Data + * @nof_input_pins: number of input pins + * @nof_output_pins: number of output pins + * @send_irq_sof_discarded: send irq on discarded frame sof response + * - if '1' it will override the send_resp_sof_discarded + * and send the response + * - if '0' the send_resp_sof_discarded will determine + * whether to send the response + * @send_irq_eof_discarded: send irq on discarded frame eof response + * - if '1' it will override the send_resp_eof_discarded + * and send the response + * - if '0' the send_resp_eof_discarded will determine + * whether to send the response + * @send_resp_sof_discarded: send response for discarded frame sof detected, + * used only when send_irq_sof_discarded is '0' + * @send_resp_eof_discarded: send response for discarded frame eof detected, + * used only when send_irq_eof_discarded is '0' + * @src: Stream source index e.g. MIPI_generator_0, CSI2-rx_1 + * @vc: MIPI Virtual Channel (up to 4 virtual per physical channel) + * @isl_use: indicates whether stream requires ISL and how + * @sensor_type: type of connected sensor, tobii or others, default is 0 + * @reserved: a reserved field + * @reserved2: a reserved field + */ +struct ipu6_fw_isys_stream_cfg_data_abi { + struct ipu6_fw_isys_cropping_abi crop; + struct ipu6_fw_isys_input_pin_info_abi input_pins[IPU6_MAX_IPINS]; + struct ipu6_fw_isys_output_pin_info_abi output_pins[IPU6_MAX_OPINS]; + u32 compfmt; + u8 nof_input_pins; + u8 nof_output_pins; + u8 send_irq_sof_discarded; + u8 send_irq_eof_discarded; + u8 send_resp_sof_discarded; + u8 send_resp_eof_discarded; + u8 src; + u8 vc; + u8 isl_use; + u8 sensor_type; + u8 reserved; + u8 reserved2; +}; + +/** + * struct ipu6_fw_isys_frame_buff_set_abi - ISYS frame buffer set (request) + * @output_pins: output pin addresses + * @send_irq_sof: send irq on frame sof response + * - if '1' it will override the send_resp_sof and + * send the response + * - if '0' the send_resp_sof will determine whether to + * send the response + * @send_irq_eof: send irq on frame eof response + * - if '1' it will override the send_resp_eof and + * send the response + * - if '0' the send_resp_eof will determine whether to + * send the response + * @send_irq_capture_ack: send irq on capture ack + * @send_irq_capture_done: send irq on capture done + * @send_resp_sof: send response for frame sof detected, + * used only when send_irq_sof is '0' + * @send_resp_eof: send response for frame eof detected, + * used only when send_irq_eof is '0' + * @send_resp_capture_ack: send response for capture ack event + * @send_resp_capture_done: send response for capture done event + * @reserved: a reserved field + */ +struct ipu6_fw_isys_frame_buff_set_abi { + struct ipu6_fw_isys_output_pin_payload_abi output_pins[IPU6_MAX_OPINS]; + u8 send_irq_sof; + u8 send_irq_eof; + u8 send_irq_capture_ack; + u8 send_irq_capture_done; + u8 send_resp_sof; + u8 send_resp_eof; + u8 send_resp_capture_ack; + u8 send_resp_capture_done; + u8 reserved[8]; +}; + +/** + * struct ipu6_fw_isys_error_info_abi - ISYS error information + * @error: error code if something went wrong + * @error_details: depending on error code, it may contain additional error info + */ +struct ipu6_fw_isys_error_info_abi { + u32 error; + u32 error_details; +}; + +/** + * struct ipu6_fw_isys_resp_info_abi - ISYS firmware response + * @buf_id: buffer ID + * @pin: this var is only valid for pin event related responses, + * contains pin addresses + * @error_info: error information from the FW + * @timestamp: Time information for event if available + * @stream_handle: stream id the response corresponds to + * @type: response type (enum ipu6_fw_isys_resp_type) + * @pin_id: pin id that the pin payload corresponds to + * @reserved: a reserved field + * @reserved2: a reserved field + */ +struct ipu6_fw_isys_resp_info_abi { + u64 buf_id; + struct ipu6_fw_isys_output_pin_payload_abi pin; + struct ipu6_fw_isys_error_info_abi error_info; + u32 timestamp[2]; + u8 stream_handle; + u8 type; + u8 pin_id; + u8 reserved; + u32 reserved2; +}; + +/** + * struct ipu6_fw_isys_proxy_error_info_abi - ISYS proxy error + * @error: error code if something went wrong + * @error_details: depending on error code, it may contain additional error info + */ +struct ipu6_fw_isys_proxy_error_info_abi { + u32 error; + u32 error_details; +}; + +struct ipu6_fw_isys_proxy_resp_info_abi { + u32 request_id; + struct ipu6_fw_isys_proxy_error_info_abi error_info; +}; + +/** + * struct ipu6_fw_proxy_write_queue_token - ISYS proxy write queue token + * @request_id: update id for the specific proxy write request + * @region_index: Region id for the proxy write request + * @offset: Offset of the write request according to the base address + * of the region + * @value: Value that is requested to be written with the proxy write request + */ +struct ipu6_fw_proxy_write_queue_token { + u32 request_id; + u32 region_index; + u32 offset; + u32 value; +}; + +/** + * struct ipu6_fw_resp_queue_token - ISYS response queue token + * @resp_info: response info + */ +struct ipu6_fw_resp_queue_token { + struct ipu6_fw_isys_resp_info_abi resp_info; +}; + +/** + * struct ipu6_fw_send_queue_token - ISYS send queue token + * @buf_handle: buffer handle + * @payload: payload + * @send_type: send_type + * @stream_id: stream_id + */ +struct ipu6_fw_send_queue_token { + u64 buf_handle; + u32 payload; + u16 send_type; + u16 stream_id; +}; + +/** + * struct ipu6_fw_proxy_resp_queue_token - ISYS proxy response queue token + * @proxy_resp_info: proxy response info + */ +struct ipu6_fw_proxy_resp_queue_token { + struct ipu6_fw_isys_proxy_resp_info_abi proxy_resp_info; +}; + +/** + * struct ipu6_fw_proxy_send_queue_token - SYS proxy send queue token + * @request_id: request_id + * @region_index: region_index + * @offset: offset + * @value: value + */ +struct ipu6_fw_proxy_send_queue_token { + u32 request_id; + u32 region_index; + u32 offset; + u32 value; +}; + +void +ipu6_fw_isys_dump_stream_cfg(struct device *dev, + struct ipu6_fw_isys_stream_cfg_data_abi *cfg); +void +ipu6_fw_isys_dump_frame_buff_set(struct device *dev, + struct ipu6_fw_isys_frame_buff_set_abi *buf, + unsigned int outputs); +int ipu6_fw_isys_init(struct ipu6_isys *isys, unsigned int num_streams); +int ipu6_fw_isys_close(struct ipu6_isys *isys); +int ipu6_fw_isys_simple_cmd(struct ipu6_isys *isys, + const unsigned int stream_handle, u16 send_type); +int ipu6_fw_isys_complex_cmd(struct ipu6_isys *isys, + const unsigned int stream_handle, + void *cpu_mapped_buf, dma_addr_t dma_mapped_buf, + size_t size, u16 send_type); +int ipu6_fw_isys_send_proxy_token(struct ipu6_isys *isys, + unsigned int req_id, + unsigned int index, + unsigned int offset, u32 value); +void ipu6_fw_isys_cleanup(struct ipu6_isys *isys); +struct ipu6_fw_isys_resp_info_abi * +ipu6_fw_isys_get_resp(void *context, unsigned int queue); +void ipu6_fw_isys_put_resp(void *context, unsigned int queue); +#endif diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c new file mode 100644 index 0000000..051898c --- /dev/null +++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c @@ -0,0 +1,649 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2013--2024 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "ipu6-bus.h" +#include "ipu6-isys.h" +#include "ipu6-isys-csi2.h" +#include "ipu6-isys-subdev.h" +#include "ipu6-platform-isys-csi2-reg.h" + +static const u32 csi2_supported_codes[] = { + MEDIA_BUS_FMT_RGB565_1X16, + MEDIA_BUS_FMT_RGB888_1X24, + MEDIA_BUS_FMT_UYVY8_1X16, + MEDIA_BUS_FMT_YUYV8_1X16, + MEDIA_BUS_FMT_SBGGR10_1X10, + MEDIA_BUS_FMT_SGBRG10_1X10, + MEDIA_BUS_FMT_SGRBG10_1X10, + MEDIA_BUS_FMT_SRGGB10_1X10, + MEDIA_BUS_FMT_SBGGR12_1X12, + MEDIA_BUS_FMT_SGBRG12_1X12, + MEDIA_BUS_FMT_SGRBG12_1X12, + MEDIA_BUS_FMT_SRGGB12_1X12, + MEDIA_BUS_FMT_SBGGR8_1X8, + MEDIA_BUS_FMT_SGBRG8_1X8, + MEDIA_BUS_FMT_SGRBG8_1X8, + MEDIA_BUS_FMT_SRGGB8_1X8, + MEDIA_BUS_FMT_META_8, + MEDIA_BUS_FMT_META_10, + MEDIA_BUS_FMT_META_12, + MEDIA_BUS_FMT_META_16, + MEDIA_BUS_FMT_META_24, + 0 +}; + +/* + * Strings corresponding to CSI-2 receiver errors are here. + * Corresponding macros are defined in the header file. + */ +static const struct ipu6_csi2_error dphy_rx_errors[] = { + { "Single packet header error corrected", true }, + { "Multiple packet header errors detected", true }, + { "Payload checksum (CRC) error", true }, + { "Transfer FIFO overflow", false }, + { "Reserved short packet data type detected", true }, + { "Reserved long packet data type detected", true }, + { "Incomplete long packet detected", false }, + { "Frame sync error", false }, + { "Line sync error", false }, + { "DPHY recoverable synchronization error", true }, + { "DPHY fatal error", false }, + { "DPHY elastic FIFO overflow", false }, + { "Inter-frame short packet discarded", true }, + { "Inter-frame long packet discarded", true }, + { "MIPI pktgen overflow", false }, + { "MIPI pktgen data loss", false }, + { "FIFO overflow", false }, + { "Lane deskew", false }, + { "SOT sync error", false }, + { "HSIDLE detected", false } +}; + +s64 ipu6_isys_csi2_get_link_freq(struct ipu6_isys_csi2 *csi2) +{ + struct media_pad *src_pad; + struct v4l2_subdev *ext_sd; + struct device *dev; + + if (!csi2) + return -EINVAL; + + dev = &csi2->isys->adev->auxdev.dev; + src_pad = media_entity_remote_source_pad_unique(&csi2->asd.sd.entity); + if (IS_ERR(src_pad)) { + dev_err(dev, "can't get source pad of %s (%ld)\n", + csi2->asd.sd.name, PTR_ERR(src_pad)); + return PTR_ERR(src_pad); + } + + ext_sd = media_entity_to_v4l2_subdev(src_pad->entity); + if (WARN(!ext_sd, "Failed to get subdev for %s\n", csi2->asd.sd.name)) + return -ENODEV; + + return v4l2_get_link_freq(ext_sd->ctrl_handler, 0, 0); +} + +static int csi2_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh, + struct v4l2_event_subscription *sub) +{ + struct ipu6_isys_subdev *asd = to_ipu6_isys_subdev(sd); + struct ipu6_isys_csi2 *csi2 = to_ipu6_isys_csi2(asd); + struct device *dev = &csi2->isys->adev->auxdev.dev; + + dev_dbg(dev, "csi2 subscribe event(type %u id %u)\n", + sub->type, sub->id); + + switch (sub->type) { + case V4L2_EVENT_FRAME_SYNC: + return v4l2_event_subscribe(fh, sub, 10, NULL); + case V4L2_EVENT_CTRL: + return v4l2_ctrl_subscribe_event(fh, sub); + default: + return -EINVAL; + } +} + +static const struct v4l2_subdev_core_ops csi2_sd_core_ops = { + .subscribe_event = csi2_subscribe_event, + .unsubscribe_event = v4l2_event_subdev_unsubscribe, +}; + +/* + * The input system CSI2+ receiver has several + * parameters affecting the receiver timings. These depend + * on the MIPI bus frequency F in Hz (sensor transmitter rate) + * as follows: + * register value = (A/1e9 + B * UI) / COUNT_ACC + * where + * UI = 1 / (2 * F) in seconds + * COUNT_ACC = counter accuracy in seconds + * COUNT_ACC = 0.125 ns = 1 / 8 ns, ACCINV = 8. + * + * A and B are coefficients from the table below, + * depending whether the register minimum or maximum value is + * calculated. + * Minimum Maximum + * Clock lane A B A B + * reg_rx_csi_dly_cnt_termen_clane 0 0 38 0 + * reg_rx_csi_dly_cnt_settle_clane 95 -8 300 -16 + * Data lanes + * reg_rx_csi_dly_cnt_termen_dlane0 0 0 35 4 + * reg_rx_csi_dly_cnt_settle_dlane0 85 -2 145 -6 + * reg_rx_csi_dly_cnt_termen_dlane1 0 0 35 4 + * reg_rx_csi_dly_cnt_settle_dlane1 85 -2 145 -6 + * reg_rx_csi_dly_cnt_termen_dlane2 0 0 35 4 + * reg_rx_csi_dly_cnt_settle_dlane2 85 -2 145 -6 + * reg_rx_csi_dly_cnt_termen_dlane3 0 0 35 4 + * reg_rx_csi_dly_cnt_settle_dlane3 85 -2 145 -6 + * + * We use the minimum values of both A and B. + */ + +#define DIV_SHIFT 8 +#define CSI2_ACCINV 8 + +static u32 calc_timing(s32 a, s32 b, s64 link_freq, s32 accinv) +{ + return accinv * a + (accinv * b * (500000000 >> DIV_SHIFT) + / (s32)(link_freq >> DIV_SHIFT)); +} + +static int +ipu6_isys_csi2_calc_timing(struct ipu6_isys_csi2 *csi2, + struct ipu6_isys_csi2_timing *timing, s32 accinv) +{ + struct device *dev = &csi2->isys->adev->auxdev.dev; + s64 link_freq; + + link_freq = ipu6_isys_csi2_get_link_freq(csi2); + if (link_freq < 0) + return link_freq; + + timing->ctermen = calc_timing(CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_A, + CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_B, + link_freq, accinv); + timing->csettle = calc_timing(CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_A, + CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_B, + link_freq, accinv); + timing->dtermen = calc_timing(CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_A, + CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_B, + link_freq, accinv); + timing->dsettle = calc_timing(CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_A, + CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_B, + link_freq, accinv); + + dev_dbg(dev, "ctermen %u csettle %u dtermen %u dsettle %u\n", + timing->ctermen, timing->csettle, + timing->dtermen, timing->dsettle); + + return 0; +} + +void ipu6_isys_register_errors(struct ipu6_isys_csi2 *csi2) +{ + u32 irq = readl(csi2->base + CSI_PORT_REG_BASE_IRQ_CSI + + CSI_PORT_REG_BASE_IRQ_STATUS_OFFSET); + struct ipu6_isys *isys = csi2->isys; + u32 mask; + + mask = isys->pdata->ipdata->csi2.irq_mask; + writel(irq & mask, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI + + CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET); + csi2->receiver_errors |= irq & mask; +} + +void ipu6_isys_csi2_error(struct ipu6_isys_csi2 *csi2) +{ + struct device *dev = &csi2->isys->adev->auxdev.dev; + const struct ipu6_csi2_error *errors; + u32 status; + u32 i; + + /* register errors once more in case of interrupts are disabled */ + ipu6_isys_register_errors(csi2); + status = csi2->receiver_errors; + csi2->receiver_errors = 0; + errors = dphy_rx_errors; + + for (i = 0; i < CSI_RX_NUM_ERRORS_IN_IRQ; i++) { + if (status & BIT(i)) + dev_err_ratelimited(dev, "csi2-%i error: %s\n", + csi2->port, errors[i].error_string); + } +} + +static int ipu6_isys_csi2_set_stream(struct v4l2_subdev *sd, + const struct ipu6_isys_csi2_timing *timing, + unsigned int nlanes, int enable) +{ + struct ipu6_isys_subdev *asd = to_ipu6_isys_subdev(sd); + struct ipu6_isys_csi2 *csi2 = to_ipu6_isys_csi2(asd); + struct ipu6_isys *isys = csi2->isys; + struct device *dev = &isys->adev->auxdev.dev; + struct ipu6_isys_csi2_config cfg; + unsigned int nports; + int ret = 0; + u32 mask = 0; + u32 i; + + dev_dbg(dev, "stream %s CSI2-%u with %u lanes\n", enable ? "on" : "off", + csi2->port, nlanes); + + cfg.port = csi2->port; + cfg.nlanes = nlanes; + + mask = isys->pdata->ipdata->csi2.irq_mask; + nports = isys->pdata->ipdata->csi2.nports; + + if (!enable) { + writel(0, csi2->base + CSI_REG_CSI_FE_ENABLE); + writel(0, csi2->base + CSI_REG_PPI2CSI_ENABLE); + + writel(0, + csi2->base + CSI_PORT_REG_BASE_IRQ_CSI + + CSI_PORT_REG_BASE_IRQ_ENABLE_OFFSET); + writel(mask, + csi2->base + CSI_PORT_REG_BASE_IRQ_CSI + + CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET); + writel(0, + csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC + + CSI_PORT_REG_BASE_IRQ_ENABLE_OFFSET); + writel(0xffffffff, + csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC + + CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET); + + isys->phy_set_power(isys, &cfg, timing, false); + + writel(0, isys->pdata->base + CSI_REG_HUB_FW_ACCESS_PORT + (isys->pdata->ipdata->csi2.fw_access_port_ofs, + csi2->port)); + writel(0, isys->pdata->base + + CSI_REG_HUB_DRV_ACCESS_PORT(csi2->port)); + + return ret; + } + + /* reset port reset */ + writel(0x1, csi2->base + CSI_REG_PORT_GPREG_SRST); + usleep_range(100, 200); + writel(0x0, csi2->base + CSI_REG_PORT_GPREG_SRST); + + /* enable port clock */ + for (i = 0; i < nports; i++) { + writel(1, isys->pdata->base + CSI_REG_HUB_DRV_ACCESS_PORT(i)); + writel(1, isys->pdata->base + CSI_REG_HUB_FW_ACCESS_PORT + (isys->pdata->ipdata->csi2.fw_access_port_ofs, i)); + } + + /* enable all error related irq */ + writel(mask, + csi2->base + CSI_PORT_REG_BASE_IRQ_CSI + + CSI_PORT_REG_BASE_IRQ_STATUS_OFFSET); + writel(mask, + csi2->base + CSI_PORT_REG_BASE_IRQ_CSI + + CSI_PORT_REG_BASE_IRQ_MASK_OFFSET); + writel(mask, + csi2->base + CSI_PORT_REG_BASE_IRQ_CSI + + CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET); + writel(mask, + csi2->base + CSI_PORT_REG_BASE_IRQ_CSI + + CSI_PORT_REG_BASE_IRQ_LEVEL_NOT_PULSE_OFFSET); + writel(mask, + csi2->base + CSI_PORT_REG_BASE_IRQ_CSI + + CSI_PORT_REG_BASE_IRQ_ENABLE_OFFSET); + + /* + * Using event from firmware instead of irq to handle CSI2 sync event + * which can reduce system wakeups. If CSI2 sync irq enabled, we need + * disable the firmware CSI2 sync event to avoid duplicate handling. + */ + writel(0xffffffff, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC + + CSI_PORT_REG_BASE_IRQ_STATUS_OFFSET); + writel(0, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC + + CSI_PORT_REG_BASE_IRQ_MASK_OFFSET); + writel(0xffffffff, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC + + CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET); + writel(0, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC + + CSI_PORT_REG_BASE_IRQ_LEVEL_NOT_PULSE_OFFSET); + writel(0xffffffff, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC + + CSI_PORT_REG_BASE_IRQ_ENABLE_OFFSET); + + /* configure to enable FE and PPI2CSI */ + writel(0, csi2->base + CSI_REG_CSI_FE_MODE); + writel(CSI_SENSOR_INPUT, csi2->base + CSI_REG_CSI_FE_MUX_CTRL); + writel(CSI_CNTR_SENSOR_LINE_ID | CSI_CNTR_SENSOR_FRAME_ID, + csi2->base + CSI_REG_CSI_FE_SYNC_CNTR_SEL); + writel(FIELD_PREP(PPI_INTF_CONFIG_NOF_ENABLED_DLANES_MASK, nlanes - 1), + csi2->base + CSI_REG_PPI2CSI_CONFIG_PPI_INTF); + + writel(1, csi2->base + CSI_REG_PPI2CSI_ENABLE); + writel(1, csi2->base + CSI_REG_CSI_FE_ENABLE); + + ret = isys->phy_set_power(isys, &cfg, timing, true); + if (ret) + dev_err(dev, "csi-%d phy power up failed %d\n", csi2->port, + ret); + + return ret; +} + +static int ipu6_isys_csi2_enable_streams(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + u32 pad, u64 streams_mask) +{ + struct ipu6_isys_subdev *asd = to_ipu6_isys_subdev(sd); + struct ipu6_isys_csi2 *csi2 = to_ipu6_isys_csi2(asd); + struct ipu6_isys_csi2_timing timing = { }; + struct v4l2_subdev *remote_sd; + struct media_pad *remote_pad; + u64 sink_streams; + int ret; + + remote_pad = media_pad_remote_pad_first(&sd->entity.pads[CSI2_PAD_SINK]); + remote_sd = media_entity_to_v4l2_subdev(remote_pad->entity); + + sink_streams = v4l2_subdev_state_xlate_streams(state, CSI2_PAD_SRC, + CSI2_PAD_SINK, + &streams_mask); + + ret = ipu6_isys_csi2_calc_timing(csi2, &timing, CSI2_ACCINV); + if (ret) + return ret; + + ret = ipu6_isys_csi2_set_stream(sd, &timing, csi2->nlanes, true); + if (ret) + return ret; + + ret = v4l2_subdev_enable_streams(remote_sd, remote_pad->index, + sink_streams); + if (ret) { + ipu6_isys_csi2_set_stream(sd, NULL, 0, false); + return ret; + } + + return 0; +} + +static int ipu6_isys_csi2_disable_streams(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + u32 pad, u64 streams_mask) +{ + struct v4l2_subdev *remote_sd; + struct media_pad *remote_pad; + u64 sink_streams; + + sink_streams = v4l2_subdev_state_xlate_streams(state, CSI2_PAD_SRC, + CSI2_PAD_SINK, + &streams_mask); + + remote_pad = media_pad_remote_pad_first(&sd->entity.pads[CSI2_PAD_SINK]); + remote_sd = media_entity_to_v4l2_subdev(remote_pad->entity); + + ipu6_isys_csi2_set_stream(sd, NULL, 0, false); + + v4l2_subdev_disable_streams(remote_sd, remote_pad->index, sink_streams); + + return 0; +} + +static int ipu6_isys_csi2_set_sel(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_selection *sel) +{ + struct ipu6_isys_subdev *asd = to_ipu6_isys_subdev(sd); + struct device *dev = &asd->isys->adev->auxdev.dev; + struct v4l2_mbus_framefmt *sink_ffmt; + struct v4l2_mbus_framefmt *src_ffmt; + struct v4l2_rect *crop; + + if (sel->pad == CSI2_PAD_SINK || sel->target != V4L2_SEL_TGT_CROP) + return -EINVAL; + + sink_ffmt = v4l2_subdev_state_get_opposite_stream_format(state, + sel->pad, + sel->stream); + if (!sink_ffmt) + return -EINVAL; + + src_ffmt = v4l2_subdev_state_get_format(state, sel->pad, sel->stream); + if (!src_ffmt) + return -EINVAL; + + crop = v4l2_subdev_state_get_crop(state, sel->pad, sel->stream); + if (!crop) + return -EINVAL; + + /* Only vertical cropping is supported */ + sel->r.left = 0; + sel->r.width = sink_ffmt->width; + /* Non-bayer formats can't be single line cropped */ + if (!ipu6_isys_is_bayer_format(sink_ffmt->code)) + sel->r.top &= ~1; + sel->r.height = clamp(sel->r.height & ~1, IPU6_ISYS_MIN_HEIGHT, + sink_ffmt->height - sel->r.top); + *crop = sel->r; + + /* update source pad format */ + src_ffmt->width = sel->r.width; + src_ffmt->height = sel->r.height; + if (ipu6_isys_is_bayer_format(sink_ffmt->code)) + src_ffmt->code = ipu6_isys_convert_bayer_order(sink_ffmt->code, + sel->r.left, + sel->r.top); + dev_dbg(dev, "set crop for %s sel: %d,%d,%d,%d code: 0x%x\n", + sd->name, sel->r.left, sel->r.top, sel->r.width, sel->r.height, + src_ffmt->code); + + return 0; +} + +static int ipu6_isys_csi2_get_sel(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_selection *sel) +{ + struct v4l2_mbus_framefmt *sink_ffmt; + struct v4l2_rect *crop; + int ret = 0; + + if (sd->entity.pads[sel->pad].flags & MEDIA_PAD_FL_SINK) + return -EINVAL; + + sink_ffmt = v4l2_subdev_state_get_opposite_stream_format(state, + sel->pad, + sel->stream); + if (!sink_ffmt) + return -EINVAL; + + crop = v4l2_subdev_state_get_crop(state, sel->pad, sel->stream); + if (!crop) + return -EINVAL; + + switch (sel->target) { + case V4L2_SEL_TGT_CROP_DEFAULT: + case V4L2_SEL_TGT_CROP_BOUNDS: + sel->r.left = 0; + sel->r.top = 0; + sel->r.width = sink_ffmt->width; + sel->r.height = sink_ffmt->height; + break; + case V4L2_SEL_TGT_CROP: + sel->r = *crop; + break; + default: + ret = -EINVAL; + } + + return ret; +} + +static const struct v4l2_subdev_pad_ops csi2_sd_pad_ops = { + .get_fmt = v4l2_subdev_get_fmt, + .set_fmt = ipu6_isys_subdev_set_fmt, + .get_selection = ipu6_isys_csi2_get_sel, + .set_selection = ipu6_isys_csi2_set_sel, + .enum_mbus_code = ipu6_isys_subdev_enum_mbus_code, + .set_routing = ipu6_isys_subdev_set_routing, + .enable_streams = ipu6_isys_csi2_enable_streams, + .disable_streams = ipu6_isys_csi2_disable_streams, +}; + +static const struct v4l2_subdev_ops csi2_sd_ops = { + .core = &csi2_sd_core_ops, + .pad = &csi2_sd_pad_ops, +}; + +static const struct media_entity_operations csi2_entity_ops = { + .link_validate = v4l2_subdev_link_validate, + .has_pad_interdep = v4l2_subdev_has_pad_interdep, +}; + +void ipu6_isys_csi2_cleanup(struct ipu6_isys_csi2 *csi2) +{ + if (!csi2->isys) + return; + + v4l2_device_unregister_subdev(&csi2->asd.sd); + v4l2_subdev_cleanup(&csi2->asd.sd); + ipu6_isys_subdev_cleanup(&csi2->asd); + csi2->isys = NULL; +} + +int ipu6_isys_csi2_init(struct ipu6_isys_csi2 *csi2, + struct ipu6_isys *isys, + void __iomem *base, unsigned int index) +{ + struct device *dev = &isys->adev->auxdev.dev; + int ret; + + csi2->isys = isys; + csi2->base = base; + csi2->port = index; + + csi2->asd.sd.entity.ops = &csi2_entity_ops; + csi2->asd.isys = isys; + ret = ipu6_isys_subdev_init(&csi2->asd, &csi2_sd_ops, 0, + NR_OF_CSI2_SINK_PADS, NR_OF_CSI2_SRC_PADS); + if (ret) + goto fail; + + csi2->asd.source = IPU6_FW_ISYS_STREAM_SRC_CSI2_PORT0 + index; + csi2->asd.supported_codes = csi2_supported_codes; + snprintf(csi2->asd.sd.name, sizeof(csi2->asd.sd.name), + IPU6_ISYS_ENTITY_PREFIX " CSI2 %u", index); + v4l2_set_subdevdata(&csi2->asd.sd, &csi2->asd); + ret = v4l2_subdev_init_finalize(&csi2->asd.sd); + if (ret) { + dev_err(dev, "failed to init v4l2 subdev\n"); + goto fail; + } + + ret = v4l2_device_register_subdev(&isys->v4l2_dev, &csi2->asd.sd); + if (ret) { + dev_err(dev, "failed to register v4l2 subdev\n"); + goto fail; + } + + return 0; + +fail: + ipu6_isys_csi2_cleanup(csi2); + + return ret; +} + +void ipu6_isys_csi2_sof_event_by_stream(struct ipu6_isys_stream *stream) +{ + struct video_device *vdev = stream->asd->sd.devnode; + struct device *dev = &stream->isys->adev->auxdev.dev; + struct ipu6_isys_csi2 *csi2 = ipu6_isys_subdev_to_csi2(stream->asd); + struct v4l2_event ev = { + .type = V4L2_EVENT_FRAME_SYNC, + }; + + ev.u.frame_sync.frame_sequence = atomic_fetch_inc(&stream->sequence); + v4l2_event_queue(vdev, &ev); + + dev_dbg(dev, "sof_event::csi2-%i sequence: %i, vc: %d\n", + csi2->port, ev.u.frame_sync.frame_sequence, stream->vc); +} + +void ipu6_isys_csi2_eof_event_by_stream(struct ipu6_isys_stream *stream) +{ + struct device *dev = &stream->isys->adev->auxdev.dev; + struct ipu6_isys_csi2 *csi2 = ipu6_isys_subdev_to_csi2(stream->asd); + u32 frame_sequence = atomic_read(&stream->sequence); + + dev_dbg(dev, "eof_event::csi2-%i sequence: %i\n", + csi2->port, frame_sequence); +} + +int ipu6_isys_csi2_get_remote_desc(u32 source_stream, + struct ipu6_isys_csi2 *csi2, + struct media_entity *source_entity, + struct v4l2_mbus_frame_desc_entry *entry) +{ + struct v4l2_mbus_frame_desc_entry *desc_entry = NULL; + struct device *dev = &csi2->isys->adev->auxdev.dev; + struct v4l2_mbus_frame_desc desc; + struct v4l2_subdev *source; + struct media_pad *pad; + unsigned int i; + int ret; + + source = media_entity_to_v4l2_subdev(source_entity); + if (!source) + return -EPIPE; + + pad = media_pad_remote_pad_first(&csi2->asd.pad[CSI2_PAD_SINK]); + if (!pad) + return -EPIPE; + + ret = v4l2_subdev_call(source, pad, get_frame_desc, pad->index, &desc); + if (ret) + return ret; + + if (desc.type != V4L2_MBUS_FRAME_DESC_TYPE_CSI2) { + dev_err(dev, "Unsupported frame descriptor type\n"); + return -EINVAL; + } + + for (i = 0; i < desc.num_entries; i++) { + if (source_stream == desc.entry[i].stream) { + desc_entry = &desc.entry[i]; + break; + } + } + + if (!desc_entry) { + dev_err(dev, "Failed to find stream %u from remote subdev\n", + source_stream); + return -EINVAL; + } + + if (desc_entry->bus.csi2.vc >= NR_OF_CSI2_VC) { + dev_err(dev, "invalid vc %d\n", desc_entry->bus.csi2.vc); + return -EINVAL; + } + + *entry = *desc_entry; + + return 0; +} diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h new file mode 100644 index 0000000..bc8594c --- /dev/null +++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2013--2024 Intel Corporation */ + +#ifndef IPU6_ISYS_CSI2_H +#define IPU6_ISYS_CSI2_H + +#include + +#include "ipu6-isys-subdev.h" +#include "ipu6-isys-video.h" + +struct media_entity; +struct v4l2_mbus_frame_desc_entry; + +struct ipu6_isys_video; +struct ipu6_isys; +struct ipu6_isys_csi2_pdata; +struct ipu6_isys_stream; + +#define NR_OF_CSI2_VC 16 +#define INVALID_VC_ID -1 +#define NR_OF_CSI2_SINK_PADS 1 +#define CSI2_PAD_SINK 0 +#define NR_OF_CSI2_SRC_PADS 8 +#define CSI2_PAD_SRC 1 +#define NR_OF_CSI2_PADS (NR_OF_CSI2_SINK_PADS + NR_OF_CSI2_SRC_PADS) + +#define CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_A 0 +#define CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_B 0 +#define CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_A 95 +#define CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_B -8 + +#define CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_A 0 +#define CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_B 0 +#define CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_A 85 +#define CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_B -2 + +struct ipu6_isys_csi2 { + struct ipu6_isys_subdev asd; + struct ipu6_isys_csi2_pdata *pdata; + struct ipu6_isys *isys; + struct ipu6_isys_video av[NR_OF_CSI2_SRC_PADS]; + + void __iomem *base; + u32 receiver_errors; + unsigned int nlanes; + unsigned int port; +}; + +struct ipu6_isys_csi2_timing { + u32 ctermen; + u32 csettle; + u32 dtermen; + u32 dsettle; +}; + +struct ipu6_csi2_error { + const char *error_string; + bool is_info_only; +}; + +#define ipu6_isys_subdev_to_csi2(__sd) \ + container_of(__sd, struct ipu6_isys_csi2, asd) + +#define to_ipu6_isys_csi2(__asd) container_of(__asd, struct ipu6_isys_csi2, asd) + +s64 ipu6_isys_csi2_get_link_freq(struct ipu6_isys_csi2 *csi2); +int ipu6_isys_csi2_init(struct ipu6_isys_csi2 *csi2, struct ipu6_isys *isys, + void __iomem *base, unsigned int index); +void ipu6_isys_csi2_cleanup(struct ipu6_isys_csi2 *csi2); +void ipu6_isys_csi2_sof_event_by_stream(struct ipu6_isys_stream *stream); +void ipu6_isys_csi2_eof_event_by_stream(struct ipu6_isys_stream *stream); +void ipu6_isys_register_errors(struct ipu6_isys_csi2 *csi2); +void ipu6_isys_csi2_error(struct ipu6_isys_csi2 *csi2); +int ipu6_isys_csi2_get_remote_desc(u32 source_stream, + struct ipu6_isys_csi2 *csi2, + struct media_entity *source_entity, + struct v4l2_mbus_frame_desc_entry *entry); + +#endif /* IPU6_ISYS_CSI2_H */ diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-dwc-phy.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-dwc-phy.c new file mode 100644 index 0000000..1715275 --- /dev/null +++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-dwc-phy.c @@ -0,0 +1,536 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2013--2024 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include + +#include "ipu6-bus.h" +#include "ipu6-isys.h" +#include "ipu6-platform-isys-csi2-reg.h" + +#define IPU6_DWC_DPHY_BASE(i) (0x238038 + 0x34 * (i)) +#define IPU6_DWC_DPHY_RSTZ 0x00 +#define IPU6_DWC_DPHY_SHUTDOWNZ 0x04 +#define IPU6_DWC_DPHY_HSFREQRANGE 0x08 +#define IPU6_DWC_DPHY_CFGCLKFREQRANGE 0x0c +#define IPU6_DWC_DPHY_TEST_IFC_ACCESS_MODE 0x10 +#define IPU6_DWC_DPHY_TEST_IFC_REQ 0x14 +#define IPU6_DWC_DPHY_TEST_IFC_REQ_COMPLETION 0x18 +#define IPU6_DWC_DPHY_DFT_CTRL0 0x28 +#define IPU6_DWC_DPHY_DFT_CTRL1 0x2c +#define IPU6_DWC_DPHY_DFT_CTRL2 0x30 + +/* + * test IFC request definition: + * - req: 0 for read, 1 for write + * - 12 bits address + * - 8bits data (will ignore for read) + * --24----16------4-----0 + * --|-data-|-addr-|-req-| + */ +#define IFC_REQ(req, addr, data) (FIELD_PREP(GENMASK(23, 16), data) | \ + FIELD_PREP(GENMASK(15, 4), addr) | \ + FIELD_PREP(GENMASK(1, 0), req)) + +#define TEST_IFC_REQ_READ 0 +#define TEST_IFC_REQ_WRITE 1 +#define TEST_IFC_REQ_RESET 2 + +#define TEST_IFC_ACCESS_MODE_FSM 0 +#define TEST_IFC_ACCESS_MODE_IFC_CTL 1 + +enum phy_fsm_state { + PHY_FSM_STATE_POWERON = 0, + PHY_FSM_STATE_BGPON = 1, + PHY_FSM_STATE_CAL_TYPE = 2, + PHY_FSM_STATE_BURNIN_CAL = 3, + PHY_FSM_STATE_TERMCAL = 4, + PHY_FSM_STATE_OFFSETCAL = 5, + PHY_FSM_STATE_OFFSET_LANE = 6, + PHY_FSM_STATE_IDLE = 7, + PHY_FSM_STATE_ULP = 8, + PHY_FSM_STATE_DDLTUNNING = 9, + PHY_FSM_STATE_SKEW_BACKWARD = 10, + PHY_FSM_STATE_INVALID, +}; + +static void dwc_dphy_write(struct ipu6_isys *isys, u32 phy_id, u32 addr, + u32 data) +{ + struct device *dev = &isys->adev->auxdev.dev; + void __iomem *isys_base = isys->pdata->base; + void __iomem *base = isys_base + IPU6_DWC_DPHY_BASE(phy_id); + + dev_dbg(dev, "write: reg 0x%lx = data 0x%x", base + addr - isys_base, + data); + writel(data, base + addr); +} + +static u32 dwc_dphy_read(struct ipu6_isys *isys, u32 phy_id, u32 addr) +{ + struct device *dev = &isys->adev->auxdev.dev; + void __iomem *isys_base = isys->pdata->base; + void __iomem *base = isys_base + IPU6_DWC_DPHY_BASE(phy_id); + u32 data; + + data = readl(base + addr); + dev_dbg(dev, "read: reg 0x%lx = data 0x%x", base + addr - isys_base, + data); + + return data; +} + +static void dwc_dphy_write_mask(struct ipu6_isys *isys, u32 phy_id, u32 addr, + u32 data, u8 shift, u8 width) +{ + u32 temp; + u32 mask; + + mask = (1 << width) - 1; + temp = dwc_dphy_read(isys, phy_id, addr); + temp &= ~(mask << shift); + temp |= (data & mask) << shift; + dwc_dphy_write(isys, phy_id, addr, temp); +} + +static u32 __maybe_unused dwc_dphy_read_mask(struct ipu6_isys *isys, u32 phy_id, + u32 addr, u8 shift, u8 width) +{ + u32 val; + + val = dwc_dphy_read(isys, phy_id, addr) >> shift; + return val & ((1 << width) - 1); +} + +#define DWC_DPHY_TIMEOUT (5 * USEC_PER_SEC) +static int dwc_dphy_ifc_read(struct ipu6_isys *isys, u32 phy_id, u32 addr, + u32 *val) +{ + struct device *dev = &isys->adev->auxdev.dev; + void __iomem *isys_base = isys->pdata->base; + void __iomem *base = isys_base + IPU6_DWC_DPHY_BASE(phy_id); + void __iomem *reg; + u32 completion; + int ret; + + dwc_dphy_write(isys, phy_id, IPU6_DWC_DPHY_TEST_IFC_REQ, + IFC_REQ(TEST_IFC_REQ_READ, addr, 0)); + reg = base + IPU6_DWC_DPHY_TEST_IFC_REQ_COMPLETION; + ret = readl_poll_timeout(reg, completion, !(completion & BIT(0)), + 10, DWC_DPHY_TIMEOUT); + if (ret) { + dev_err(dev, "DWC ifc request read timeout\n"); + return ret; + } + + *val = completion >> 8 & 0xff; + *val = FIELD_GET(GENMASK(15, 8), completion); + dev_dbg(dev, "DWC ifc read 0x%x = 0x%x", addr, *val); + + return 0; +} + +static int dwc_dphy_ifc_write(struct ipu6_isys *isys, u32 phy_id, u32 addr, + u32 data) +{ + struct device *dev = &isys->adev->auxdev.dev; + void __iomem *isys_base = isys->pdata->base; + void __iomem *base = isys_base + IPU6_DWC_DPHY_BASE(phy_id); + void __iomem *reg; + u32 completion; + int ret; + + dwc_dphy_write(isys, phy_id, IPU6_DWC_DPHY_TEST_IFC_REQ, + IFC_REQ(TEST_IFC_REQ_WRITE, addr, data)); + completion = readl(base + IPU6_DWC_DPHY_TEST_IFC_REQ_COMPLETION); + reg = base + IPU6_DWC_DPHY_TEST_IFC_REQ_COMPLETION; + ret = readl_poll_timeout(reg, completion, !(completion & BIT(0)), + 10, DWC_DPHY_TIMEOUT); + if (ret) + dev_err(dev, "DWC ifc request write timeout\n"); + + return ret; +} + +static void dwc_dphy_ifc_write_mask(struct ipu6_isys *isys, u32 phy_id, + u32 addr, u32 data, u8 shift, u8 width) +{ + u32 temp, mask; + int ret; + + ret = dwc_dphy_ifc_read(isys, phy_id, addr, &temp); + if (ret) + return; + + mask = (1 << width) - 1; + temp &= ~(mask << shift); + temp |= (data & mask) << shift; + dwc_dphy_ifc_write(isys, phy_id, addr, temp); +} + +static u32 dwc_dphy_ifc_read_mask(struct ipu6_isys *isys, u32 phy_id, u32 addr, + u8 shift, u8 width) +{ + int ret; + u32 val; + + ret = dwc_dphy_ifc_read(isys, phy_id, addr, &val); + if (ret) + return 0; + + return ((val >> shift) & ((1 << width) - 1)); +} + +static int dwc_dphy_pwr_up(struct ipu6_isys *isys, u32 phy_id) +{ + struct device *dev = &isys->adev->auxdev.dev; + u32 fsm_state; + int ret; + + dwc_dphy_write(isys, phy_id, IPU6_DWC_DPHY_RSTZ, 1); + usleep_range(10, 20); + dwc_dphy_write(isys, phy_id, IPU6_DWC_DPHY_SHUTDOWNZ, 1); + + ret = read_poll_timeout(dwc_dphy_ifc_read_mask, fsm_state, + (fsm_state == PHY_FSM_STATE_IDLE || + fsm_state == PHY_FSM_STATE_ULP), + 100, DWC_DPHY_TIMEOUT, false, isys, + phy_id, 0x1e, 0, 4); + + if (ret) + dev_err(dev, "Dphy %d power up failed, state 0x%x", phy_id, + fsm_state); + + return ret; +} + +struct dwc_dphy_freq_range { + u8 hsfreq; + u16 min; + u16 max; + u16 default_mbps; + u16 osc_freq_target; +}; + +#define DPHY_FREQ_RANGE_NUM (63) +#define DPHY_FREQ_RANGE_INVALID_INDEX (0xff) +static const struct dwc_dphy_freq_range freqranges[DPHY_FREQ_RANGE_NUM] = { + {0x00, 80, 97, 80, 335}, + {0x10, 80, 107, 90, 335}, + {0x20, 84, 118, 100, 335}, + {0x30, 93, 128, 110, 335}, + {0x01, 103, 139, 120, 335}, + {0x11, 112, 149, 130, 335}, + {0x21, 122, 160, 140, 335}, + {0x31, 131, 170, 150, 335}, + {0x02, 141, 181, 160, 335}, + {0x12, 150, 191, 170, 335}, + {0x22, 160, 202, 180, 335}, + {0x32, 169, 212, 190, 335}, + {0x03, 183, 228, 205, 335}, + {0x13, 198, 244, 220, 335}, + {0x23, 212, 259, 235, 335}, + {0x33, 226, 275, 250, 335}, + {0x04, 250, 301, 275, 335}, + {0x14, 274, 328, 300, 335}, + {0x25, 297, 354, 325, 335}, + {0x35, 321, 380, 350, 335}, + {0x05, 369, 433, 400, 335}, + {0x16, 416, 485, 450, 335}, + {0x26, 464, 538, 500, 335}, + {0x37, 511, 590, 550, 335}, + {0x07, 559, 643, 600, 335}, + {0x18, 606, 695, 650, 335}, + {0x28, 654, 748, 700, 335}, + {0x39, 701, 800, 750, 335}, + {0x09, 749, 853, 800, 335}, + {0x19, 796, 905, 850, 335}, + {0x29, 844, 958, 900, 335}, + {0x3a, 891, 1010, 950, 335}, + {0x0a, 939, 1063, 1000, 335}, + {0x1a, 986, 1115, 1050, 335}, + {0x2a, 1034, 1168, 1100, 335}, + {0x3b, 1081, 1220, 1150, 335}, + {0x0b, 1129, 1273, 1200, 335}, + {0x1b, 1176, 1325, 1250, 335}, + {0x2b, 1224, 1378, 1300, 335}, + {0x3c, 1271, 1430, 1350, 335}, + {0x0c, 1319, 1483, 1400, 335}, + {0x1c, 1366, 1535, 1450, 335}, + {0x2c, 1414, 1588, 1500, 335}, + {0x3d, 1461, 1640, 1550, 208}, + {0x0d, 1509, 1693, 1600, 214}, + {0x1d, 1556, 1745, 1650, 221}, + {0x2e, 1604, 1798, 1700, 228}, + {0x3e, 1651, 1850, 1750, 234}, + {0x0e, 1699, 1903, 1800, 241}, + {0x1e, 1746, 1955, 1850, 248}, + {0x2f, 1794, 2008, 1900, 255}, + {0x3f, 1841, 2060, 1950, 261}, + {0x0f, 1889, 2113, 2000, 268}, + {0x40, 1936, 2165, 2050, 275}, + {0x41, 1984, 2218, 2100, 281}, + {0x42, 2031, 2270, 2150, 288}, + {0x43, 2079, 2323, 2200, 294}, + {0x44, 2126, 2375, 2250, 302}, + {0x45, 2174, 2428, 2300, 308}, + {0x46, 2221, 2480, 2350, 315}, + {0x47, 2269, 2500, 2400, 321}, + {0x48, 2316, 2500, 2450, 328}, + {0x49, 2364, 2500, 2500, 335} +}; + +static u16 get_hsfreq_by_mbps(u32 mbps) +{ + unsigned int i = DPHY_FREQ_RANGE_NUM; + + while (i--) { + if (freqranges[i].default_mbps == mbps || + (mbps >= freqranges[i].min && mbps <= freqranges[i].max)) + return i; + } + + return DPHY_FREQ_RANGE_INVALID_INDEX; +} + +static int ipu6_isys_dwc_phy_config(struct ipu6_isys *isys, + u32 phy_id, u32 mbps) +{ + struct ipu6_bus_device *adev = isys->adev; + struct device *dev = &adev->auxdev.dev; + struct ipu6_device *isp = adev->isp; + u32 cfg_clk_freqrange; + u32 osc_freq_target; + u32 index; + + dev_dbg(dev, "config Dphy %u with %u mbps", phy_id, mbps); + + index = get_hsfreq_by_mbps(mbps); + if (index == DPHY_FREQ_RANGE_INVALID_INDEX) { + dev_err(dev, "link freq not found for mbps %u", mbps); + return -EINVAL; + } + + dwc_dphy_write_mask(isys, phy_id, IPU6_DWC_DPHY_HSFREQRANGE, + freqranges[index].hsfreq, 0, 7); + + /* Force termination Calibration */ + if (isys->phy_termcal_val) { + dwc_dphy_ifc_write_mask(isys, phy_id, 0x20a, 0x1, 0, 1); + dwc_dphy_ifc_write_mask(isys, phy_id, 0x209, 0x3, 0, 2); + dwc_dphy_ifc_write_mask(isys, phy_id, 0x209, + isys->phy_termcal_val, 4, 4); + } + + /* + * Enable override to configure the DDL target oscillation + * frequency on bit 0 of register 0xe4 + */ + dwc_dphy_ifc_write_mask(isys, phy_id, 0xe4, 0x1, 0, 1); + /* + * configure registers 0xe2, 0xe3 with the + * appropriate DDL target oscillation frequency + * 0x1cc(460) + */ + osc_freq_target = freqranges[index].osc_freq_target; + dwc_dphy_ifc_write_mask(isys, phy_id, 0xe2, + osc_freq_target & 0xff, 0, 8); + dwc_dphy_ifc_write_mask(isys, phy_id, 0xe3, + (osc_freq_target >> 8) & 0xf, 0, 4); + + if (mbps < 1500) { + /* deskew_polarity_rw, for < 1.5Gbps */ + dwc_dphy_ifc_write_mask(isys, phy_id, 0x8, 0x1, 5, 1); + } + + /* + * Set cfgclkfreqrange[5:0] = round[(Fcfg_clk(MHz)-17)*4] + * (38.4 - 17) * 4 = ~85 (0x55) + */ + cfg_clk_freqrange = (isp->buttress.ref_clk - 170) * 4 / 10; + dev_dbg(dev, "ref_clk = %u clk_freqrange = %u", + isp->buttress.ref_clk, cfg_clk_freqrange); + dwc_dphy_write_mask(isys, phy_id, IPU6_DWC_DPHY_CFGCLKFREQRANGE, + cfg_clk_freqrange, 0, 8); + + dwc_dphy_write_mask(isys, phy_id, IPU6_DWC_DPHY_DFT_CTRL2, 0x1, 4, 1); + dwc_dphy_write_mask(isys, phy_id, IPU6_DWC_DPHY_DFT_CTRL2, 0x1, 8, 1); + + return 0; +} + +static void ipu6_isys_dwc_phy_aggr_setup(struct ipu6_isys *isys, u32 master, + u32 slave, u32 mbps) +{ + /* Config mastermacro */ + dwc_dphy_ifc_write_mask(isys, master, 0x133, 0x1, 0, 1); + dwc_dphy_ifc_write_mask(isys, slave, 0x133, 0x0, 0, 1); + + /* Config master PHY clk lane to drive long channel clk */ + dwc_dphy_ifc_write_mask(isys, master, 0x307, 0x1, 2, 1); + dwc_dphy_ifc_write_mask(isys, slave, 0x307, 0x0, 2, 1); + + /* Config both PHYs data lanes to get clk from long channel */ + dwc_dphy_ifc_write_mask(isys, master, 0x508, 0x1, 5, 1); + dwc_dphy_ifc_write_mask(isys, slave, 0x508, 0x1, 5, 1); + dwc_dphy_ifc_write_mask(isys, master, 0x708, 0x1, 5, 1); + dwc_dphy_ifc_write_mask(isys, slave, 0x708, 0x1, 5, 1); + + /* Config slave PHY clk lane to bypass long channel clk to DDR clk */ + dwc_dphy_ifc_write_mask(isys, master, 0x308, 0x0, 3, 1); + dwc_dphy_ifc_write_mask(isys, slave, 0x308, 0x1, 3, 1); + + /* Override slave PHY clk lane enable (DPHYRXCLK_CLL_demux module) */ + dwc_dphy_ifc_write_mask(isys, slave, 0xe0, 0x3, 0, 2); + + /* Override slave PHY DDR clk lane enable (DPHYHSRX_div124 module) */ + dwc_dphy_ifc_write_mask(isys, slave, 0xe1, 0x1, 1, 1); + dwc_dphy_ifc_write_mask(isys, slave, 0x307, 0x1, 3, 1); + + /* Turn off slave PHY LP-RX clk lane */ + dwc_dphy_ifc_write_mask(isys, slave, 0x304, 0x1, 7, 1); + dwc_dphy_ifc_write_mask(isys, slave, 0x305, 0xa, 0, 5); +} + +#define PHY_E 4 +static int ipu6_isys_dwc_phy_powerup_ack(struct ipu6_isys *isys, u32 phy_id) +{ + struct device *dev = &isys->adev->auxdev.dev; + u32 rescal_done; + int ret; + + ret = dwc_dphy_pwr_up(isys, phy_id); + if (ret != 0) { + dev_err(dev, "Dphy %u power up failed(%d)", phy_id, ret); + return ret; + } + + /* reset forcerxmode */ + dwc_dphy_write_mask(isys, phy_id, IPU6_DWC_DPHY_DFT_CTRL2, 0, 4, 1); + dwc_dphy_write_mask(isys, phy_id, IPU6_DWC_DPHY_DFT_CTRL2, 0, 8, 1); + + dev_dbg(dev, "Dphy %u is ready!", phy_id); + + if (phy_id != PHY_E || isys->phy_termcal_val) + return 0; + + usleep_range(100, 200); + rescal_done = dwc_dphy_ifc_read_mask(isys, phy_id, 0x221, 7, 1); + if (rescal_done) { + isys->phy_termcal_val = dwc_dphy_ifc_read_mask(isys, phy_id, + 0x220, 2, 4); + dev_dbg(dev, "termcal done with value = %u", + isys->phy_termcal_val); + } + + return 0; +} + +static void ipu6_isys_dwc_phy_reset(struct ipu6_isys *isys, u32 phy_id) +{ + dev_dbg(&isys->adev->auxdev.dev, "Reset phy %u", phy_id); + + dwc_dphy_write(isys, phy_id, IPU6_DWC_DPHY_SHUTDOWNZ, 0); + dwc_dphy_write(isys, phy_id, IPU6_DWC_DPHY_RSTZ, 0); + dwc_dphy_write(isys, phy_id, IPU6_DWC_DPHY_TEST_IFC_ACCESS_MODE, + TEST_IFC_ACCESS_MODE_FSM); + dwc_dphy_write(isys, phy_id, IPU6_DWC_DPHY_TEST_IFC_REQ, + TEST_IFC_REQ_RESET); +} + +int ipu6_isys_dwc_phy_set_power(struct ipu6_isys *isys, + struct ipu6_isys_csi2_config *cfg, + const struct ipu6_isys_csi2_timing *timing, + bool on) +{ + struct device *dev = &isys->adev->auxdev.dev; + void __iomem *isys_base = isys->pdata->base; + u32 phy_id, primary, secondary; + u32 nlanes, port, mbps; + s64 link_freq; + int ret; + + port = cfg->port; + + if (!isys_base || port >= isys->pdata->ipdata->csi2.nports) { + dev_warn(dev, "invalid port ID %d\n", port); + return -EINVAL; + } + + nlanes = cfg->nlanes; + /* only port 0, 2 and 4 support 4 lanes */ + if (nlanes == 4 && port % 2) { + dev_err(dev, "invalid csi-port %u with %u lanes\n", port, + nlanes); + return -EINVAL; + } + + link_freq = ipu6_isys_csi2_get_link_freq(&isys->csi2[port]); + if (link_freq < 0) { + dev_err(dev, "get link freq failed(%lld).\n", link_freq); + return link_freq; + } + + mbps = div_u64(link_freq, 500000); + + phy_id = port; + primary = port & ~1; + secondary = primary + 1; + if (on) { + if (nlanes == 4) { + dev_dbg(dev, "config phy %u and %u in aggr mode\n", + primary, secondary); + + ipu6_isys_dwc_phy_reset(isys, primary); + ipu6_isys_dwc_phy_reset(isys, secondary); + ipu6_isys_dwc_phy_aggr_setup(isys, primary, + secondary, mbps); + + ret = ipu6_isys_dwc_phy_config(isys, primary, mbps); + if (ret) + return ret; + ret = ipu6_isys_dwc_phy_config(isys, secondary, mbps); + if (ret) + return ret; + + ret = ipu6_isys_dwc_phy_powerup_ack(isys, primary); + if (ret) + return ret; + + ret = ipu6_isys_dwc_phy_powerup_ack(isys, secondary); + return ret; + } + + dev_dbg(dev, "config phy %u with %u lanes in non-aggr mode\n", + phy_id, nlanes); + + ipu6_isys_dwc_phy_reset(isys, phy_id); + ret = ipu6_isys_dwc_phy_config(isys, phy_id, mbps); + if (ret) + return ret; + + ret = ipu6_isys_dwc_phy_powerup_ack(isys, phy_id); + return ret; + } + + if (nlanes == 4) { + dev_dbg(dev, "Power down phy %u and phy %u for port %u\n", + primary, secondary, port); + ipu6_isys_dwc_phy_reset(isys, secondary); + ipu6_isys_dwc_phy_reset(isys, primary); + + return 0; + } + + dev_dbg(dev, "Powerdown phy %u with %u lanes\n", phy_id, nlanes); + + ipu6_isys_dwc_phy_reset(isys, phy_id); + + return 0; +} diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-jsl-phy.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-jsl-phy.c new file mode 100644 index 0000000..c804291 --- /dev/null +++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-jsl-phy.c @@ -0,0 +1,242 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2013--2024 Intel Corporation + */ + +#include +#include +#include +#include + +#include "ipu6-bus.h" +#include "ipu6-isys.h" +#include "ipu6-isys-csi2.h" +#include "ipu6-platform-isys-csi2-reg.h" + +/* only use BB0, BB2, BB4, and BB6 on PHY0 */ +#define IPU6SE_ISYS_PHY_BB_NUM 4 +#define IPU6SE_ISYS_PHY_0_BASE 0x10000 + +#define PHY_CPHY_DLL_OVRD(x) (0x100 + 0x100 * (x)) +#define PHY_CPHY_RX_CONTROL1(x) (0x110 + 0x100 * (x)) +#define PHY_DPHY_CFG(x) (0x148 + 0x100 * (x)) +#define PHY_BB_AFE_CONFIG(x) (0x174 + 0x100 * (x)) + +/* + * use port_cfg to configure that which data lanes used + * +---------+ +------+ +-----+ + * | port0 x4<-----| | | | + * | | | port | | | + * | port1 x2<-----| | | | + * | | | <-| PHY | + * | port2 x4<-----| | | | + * | | |config| | | + * | port3 x2<-----| | | | + * +---------+ +------+ +-----+ + */ +static const unsigned int csi2_port_cfg[][3] = { + {0, 0, 0x1f}, /* no link */ + {4, 0, 0x10}, /* x4 + x4 config */ + {2, 0, 0x12}, /* x2 + x2 config */ + {1, 0, 0x13}, /* x1 + x1 config */ + {2, 1, 0x15}, /* x2x1 + x2x1 config */ + {1, 1, 0x16}, /* x1x1 + x1x1 config */ + {2, 2, 0x18}, /* x2x2 + x2x2 config */ + {1, 2, 0x19} /* x1x2 + x1x2 config */ +}; + +/* port, nlanes, bbindex, portcfg */ +static const unsigned int phy_port_cfg[][4] = { + /* sip0 */ + {0, 1, 0, 0x15}, + {0, 2, 0, 0x15}, + {0, 4, 0, 0x15}, + {0, 4, 2, 0x22}, + /* sip1 */ + {2, 1, 4, 0x15}, + {2, 2, 4, 0x15}, + {2, 4, 4, 0x15}, + {2, 4, 6, 0x22} +}; + +static void ipu6_isys_csi2_phy_config_by_port(struct ipu6_isys *isys, + unsigned int port, + unsigned int nlanes) +{ + struct device *dev = &isys->adev->auxdev.dev; + void __iomem *base = isys->adev->isp->base; + unsigned int bbnum; + u32 val, reg, i; + + dev_dbg(dev, "port %u with %u lanes", port, nlanes); + + /* only support <1.5Gbps */ + for (i = 0; i < IPU6SE_ISYS_PHY_BB_NUM; i++) { + /* cphy_dll_ovrd.crcdc_fsm_dlane0 = 13 */ + reg = IPU6SE_ISYS_PHY_0_BASE + PHY_CPHY_DLL_OVRD(i); + val = readl(base + reg); + val |= FIELD_PREP(GENMASK(6, 1), 13); + writel(val, base + reg); + + /* cphy_rx_control1.en_crc1 = 1 */ + reg = IPU6SE_ISYS_PHY_0_BASE + PHY_CPHY_RX_CONTROL1(i); + val = readl(base + reg); + val |= BIT(31); + writel(val, base + reg); + + /* dphy_cfg.reserved = 1, .lden_from_dll_ovrd_0 = 1 */ + reg = IPU6SE_ISYS_PHY_0_BASE + PHY_DPHY_CFG(i); + val = readl(base + reg); + val |= BIT(25) | BIT(26); + writel(val, base + reg); + + /* cphy_dll_ovrd.lden_crcdc_fsm_dlane0 = 1 */ + reg = IPU6SE_ISYS_PHY_0_BASE + PHY_CPHY_DLL_OVRD(i); + val = readl(base + reg); + val |= BIT(0); + writel(val, base + reg); + } + + /* Front end config, use minimal channel loss */ + for (i = 0; i < ARRAY_SIZE(phy_port_cfg); i++) { + if (phy_port_cfg[i][0] == port && + phy_port_cfg[i][1] == nlanes) { + bbnum = phy_port_cfg[i][2] / 2; + reg = IPU6SE_ISYS_PHY_0_BASE + PHY_BB_AFE_CONFIG(bbnum); + val = readl(base + reg); + val |= phy_port_cfg[i][3]; + writel(val, base + reg); + } + } +} + +static void ipu6_isys_csi2_rx_control(struct ipu6_isys *isys) +{ + void __iomem *base = isys->adev->isp->base; + u32 val, reg; + + reg = CSI2_HUB_GPREG_SIP0_CSI_RX_A_CONTROL; + val = readl(base + reg); + val |= BIT(0); + writel(val, base + CSI2_HUB_GPREG_SIP0_CSI_RX_A_CONTROL); + + reg = CSI2_HUB_GPREG_SIP0_CSI_RX_B_CONTROL; + val = readl(base + reg); + val |= BIT(0); + writel(val, base + CSI2_HUB_GPREG_SIP0_CSI_RX_B_CONTROL); + + reg = CSI2_HUB_GPREG_SIP1_CSI_RX_A_CONTROL; + val = readl(base + reg); + val |= BIT(0); + writel(val, base + CSI2_HUB_GPREG_SIP1_CSI_RX_A_CONTROL); + + reg = CSI2_HUB_GPREG_SIP1_CSI_RX_B_CONTROL; + val = readl(base + reg); + val |= BIT(0); + writel(val, base + CSI2_HUB_GPREG_SIP1_CSI_RX_B_CONTROL); +} + +static int ipu6_isys_csi2_set_port_cfg(struct ipu6_isys *isys, + unsigned int port, unsigned int nlanes) +{ + struct device *dev = &isys->adev->auxdev.dev; + unsigned int sip = port / 2; + unsigned int index; + + switch (nlanes) { + case 1: + index = 5; + break; + case 2: + index = 6; + break; + case 4: + index = 1; + break; + default: + dev_err(dev, "lanes nr %u is unsupported\n", nlanes); + return -EINVAL; + } + + dev_dbg(dev, "port config for port %u with %u lanes\n", port, nlanes); + + writel(csi2_port_cfg[index][2], + isys->pdata->base + CSI2_HUB_GPREG_SIP_FB_PORT_CFG(sip)); + + return 0; +} + +static void +ipu6_isys_csi2_set_timing(struct ipu6_isys *isys, + const struct ipu6_isys_csi2_timing *timing, + unsigned int port, unsigned int nlanes) +{ + struct device *dev = &isys->adev->auxdev.dev; + void __iomem *reg; + u32 port_base; + u32 i; + + port_base = (port % 2) ? CSI2_SIP_TOP_CSI_RX_PORT_BASE_1(port) : + CSI2_SIP_TOP_CSI_RX_PORT_BASE_0(port); + + dev_dbg(dev, "set timing for port %u with %u lanes\n", port, nlanes); + + reg = isys->pdata->base + port_base; + reg += CSI2_SIP_TOP_CSI_RX_DLY_CNT_TERMEN_CLANE; + + writel(timing->ctermen, reg); + + reg = isys->pdata->base + port_base; + reg += CSI2_SIP_TOP_CSI_RX_DLY_CNT_SETTLE_CLANE; + writel(timing->csettle, reg); + + for (i = 0; i < nlanes; i++) { + reg = isys->pdata->base + port_base; + reg += CSI2_SIP_TOP_CSI_RX_DLY_CNT_TERMEN_DLANE(i); + writel(timing->dtermen, reg); + + reg = isys->pdata->base + port_base; + reg += CSI2_SIP_TOP_CSI_RX_DLY_CNT_SETTLE_DLANE(i); + writel(timing->dsettle, reg); + } +} + +#define DPHY_TIMER_INCR 0x28 +int ipu6_isys_jsl_phy_set_power(struct ipu6_isys *isys, + struct ipu6_isys_csi2_config *cfg, + const struct ipu6_isys_csi2_timing *timing, + bool on) +{ + struct device *dev = &isys->adev->auxdev.dev; + void __iomem *isys_base = isys->pdata->base; + int ret = 0; + u32 nlanes; + u32 port; + + if (!on) + return 0; + + port = cfg->port; + nlanes = cfg->nlanes; + + if (!isys_base || port >= isys->pdata->ipdata->csi2.nports) { + dev_warn(dev, "invalid port ID %d\n", port); + return -EINVAL; + } + + ipu6_isys_csi2_phy_config_by_port(isys, port, nlanes); + + writel(DPHY_TIMER_INCR, + isys->pdata->base + CSI2_HUB_GPREG_DPHY_TIMER_INCR); + + /* set port cfg and rx timing */ + ipu6_isys_csi2_set_timing(isys, timing, port, nlanes); + + ret = ipu6_isys_csi2_set_port_cfg(isys, port, nlanes); + if (ret) + return ret; + + ipu6_isys_csi2_rx_control(isys); + + return 0; +} diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c new file mode 100644 index 0000000..71aa500 --- /dev/null +++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c @@ -0,0 +1,720 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2013--2024 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "ipu6.h" +#include "ipu6-bus.h" +#include "ipu6-isys.h" +#include "ipu6-isys-csi2.h" +#include "ipu6-platform-isys-csi2-reg.h" + +#define CSI_REG_HUB_GPREG_PHY_CTL(id) (CSI_REG_BASE + 0x18008 + (id) * 0x8) +#define CSI_REG_HUB_GPREG_PHY_CTL_RESET BIT(4) +#define CSI_REG_HUB_GPREG_PHY_CTL_PWR_EN BIT(0) +#define CSI_REG_HUB_GPREG_PHY_STATUS(id) (CSI_REG_BASE + 0x1800c + (id) * 0x8) +#define CSI_REG_HUB_GPREG_PHY_POWER_ACK BIT(0) +#define CSI_REG_HUB_GPREG_PHY_READY BIT(4) + +#define MCD_PHY_POWER_STATUS_TIMEOUT (200 * USEC_PER_MSEC) + +/* + * bridge to phy in buttress reg map, each phy has 16 kbytes + * only 2 phys for TGL U and Y + */ +#define IPU6_ISYS_MCD_PHY_BASE(i) (0x10000 + (i) * 0x4000) + +/* + * There are 2 MCD DPHY instances on TGL and 1 MCD DPHY instance on ADL. + * Each MCD PHY has 12-lanes which has 8 data lanes and 4 clock lanes. + * CSI port 1, 3 (5, 7) can support max 2 data lanes. + * CSI port 0, 2 (4, 6) can support max 4 data lanes. + * PHY configurations are PPI based instead of port. + * Left: + * +---------+---------+---------+---------+--------+---------+----------+ + * | | | | | | | | + * | PPI | PPI5 | PPI4 | PPI3 | PPI2 | PPI1 | PPI0 | + * +---------+---------+---------+---------+--------+---------+----------+ + * | | | | | | | | + * | x4 | unused | D3 | D2 | C0 | D0 | D1 | + * |---------+---------+---------+---------+--------+---------+----------+ + * | | | | | | | | + * | x2x2 | C1 | D0 | D1 | C0 | D0 | D1 | + * ----------+---------+---------+---------+--------+---------+----------+ + * | | | | | | | | + * | x2x1 | C1 | D0 | unused | C0 | D0 | D1 | + * +---------+---------+---------+---------+--------+---------+----------+ + * | | | | | | | | + * | x1x1 | C1 | D0 | unused | C0 | D0 | unused | + * +---------+---------+---------+---------+--------+---------+----------+ + * | | | | | | | | + * | x1x2 | C1 | D0 | D1 | C0 | D0 | unused | + * +---------+---------+---------+---------+--------+---------+----------+ + * + * Right: + * +---------+---------+---------+---------+--------+---------+----------+ + * | | | | | | | | + * | PPI | PPI6 | PPI7 | PPI8 | PPI9 | PPI10 | PPI11 | + * +---------+---------+---------+---------+--------+---------+----------+ + * | | | | | | | | + * | x4 | D1 | D0 | C2 | D2 | D3 | unused | + * |---------+---------+---------+---------+--------+---------+----------+ + * | | | | | | | | + * | x2x2 | D1 | D0 | C2 | D1 | D0 | C3 | + * ----------+---------+---------+---------+--------+---------+----------+ + * | | | | | | | | + * | x2x1 | D1 | D0 | C2 | unused | D0 | C3 | + * +---------+---------+---------+---------+--------+---------+----------+ + * | | | | | | | | + * | x1x1 | unused | D0 | C2 | unused | D0 | C3 | + * +---------+---------+---------+---------+--------+---------+----------+ + * | | | | | | | | + * | x1x2 | unused | D0 | C2 | D1 | D0 | C3 | + * +---------+---------+---------+---------+--------+---------+----------+ + * + * ppi mapping per phy : + * + * x4 + x4: + * Left : port0 - PPI range {0, 1, 2, 3, 4} + * Right: port2 - PPI range {6, 7, 8, 9, 10} + * + * x4 + x2x2: + * Left: port0 - PPI range {0, 1, 2, 3, 4} + * Right: port2 - PPI range {6, 7, 8}, port3 - PPI range {9, 10, 11} + * + * x2x2 + x4: + * Left: port0 - PPI range {0, 1, 2}, port1 - PPI range {3, 4, 5} + * Right: port2 - PPI range {6, 7, 8, 9, 10} + * + * x2x2 + x2x2: + * Left : port0 - PPI range {0, 1, 2}, port1 - PPI range {3, 4, 5} + * Right: port2 - PPI range {6, 7, 8}, port3 - PPI range {9, 10, 11} + */ + +struct phy_reg { + u32 reg; + u32 val; +}; + +static const struct phy_reg common_init_regs[] = { + /* for TGL-U, use 0x80000000 */ + {0x00000040, 0x80000000}, + {0x00000044, 0x00a80880}, + {0x00000044, 0x00b80880}, + {0x00000010, 0x0000078c}, + {0x00000344, 0x2f4401e2}, + {0x00000544, 0x924401e2}, + {0x00000744, 0x594401e2}, + {0x00000944, 0x624401e2}, + {0x00000b44, 0xfc4401e2}, + {0x00000d44, 0xc54401e2}, + {0x00000f44, 0x034401e2}, + {0x00001144, 0x8f4401e2}, + {0x00001344, 0x754401e2}, + {0x00001544, 0xe94401e2}, + {0x00001744, 0xcb4401e2}, + {0x00001944, 0xfa4401e2} +}; + +static const struct phy_reg x1_port0_config_regs[] = { + {0x00000694, 0xc80060fa}, + {0x00000680, 0x3d4f78ea}, + {0x00000690, 0x10a0140b}, + {0x000006a8, 0xdf04010a}, + {0x00000700, 0x57050060}, + {0x00000710, 0x0030001c}, + {0x00000738, 0x5f004444}, + {0x0000073c, 0x78464204}, + {0x00000748, 0x7821f940}, + {0x0000074c, 0xb2000433}, + {0x00000494, 0xfe6030fa}, + {0x00000480, 0x29ef5ed0}, + {0x00000490, 0x10a0540b}, + {0x000004a8, 0x7a01010a}, + {0x00000500, 0xef053460}, + {0x00000510, 0xe030101c}, + {0x00000538, 0xdf808444}, + {0x0000053c, 0xc8422204}, + {0x00000540, 0x0180088c}, + {0x00000574, 0x00000000}, + {0x00000000, 0x00000000} +}; + +static const struct phy_reg x1_port1_config_regs[] = { + {0x00000c94, 0xc80060fa}, + {0x00000c80, 0xcf47abea}, + {0x00000c90, 0x10a0840b}, + {0x00000ca8, 0xdf04010a}, + {0x00000d00, 0x57050060}, + {0x00000d10, 0x0030001c}, + {0x00000d38, 0x5f004444}, + {0x00000d3c, 0x78464204}, + {0x00000d48, 0x7821f940}, + {0x00000d4c, 0xb2000433}, + {0x00000a94, 0xc91030fa}, + {0x00000a80, 0x5a166ed0}, + {0x00000a90, 0x10a0540b}, + {0x00000aa8, 0x5d060100}, + {0x00000b00, 0xef053460}, + {0x00000b10, 0xa030101c}, + {0x00000b38, 0xdf808444}, + {0x00000b3c, 0xc8422204}, + {0x00000b40, 0x0180088c}, + {0x00000b74, 0x00000000}, + {0x00000000, 0x00000000} +}; + +static const struct phy_reg x1_port2_config_regs[] = { + {0x00001294, 0x28f000fa}, + {0x00001280, 0x08130cea}, + {0x00001290, 0x10a0140b}, + {0x000012a8, 0xd704010a}, + {0x00001300, 0x8d050060}, + {0x00001310, 0x0030001c}, + {0x00001338, 0xdf008444}, + {0x0000133c, 0x78422204}, + {0x00001348, 0x7821f940}, + {0x0000134c, 0x5a000433}, + {0x00001094, 0x2d20b0fa}, + {0x00001080, 0xade75dd0}, + {0x00001090, 0x10a0540b}, + {0x000010a8, 0xb101010a}, + {0x00001100, 0x33053460}, + {0x00001110, 0x0030101c}, + {0x00001138, 0xdf808444}, + {0x0000113c, 0xc8422204}, + {0x00001140, 0x8180088c}, + {0x00001174, 0x00000000}, + {0x00000000, 0x00000000} +}; + +static const struct phy_reg x1_port3_config_regs[] = { + {0x00001894, 0xc80060fa}, + {0x00001880, 0x0f90fd6a}, + {0x00001890, 0x10a0840b}, + {0x000018a8, 0xdf04010a}, + {0x00001900, 0x57050060}, + {0x00001910, 0x0030001c}, + {0x00001938, 0x5f004444}, + {0x0000193c, 0x78464204}, + {0x00001948, 0x7821f940}, + {0x0000194c, 0xb2000433}, + {0x00001694, 0x3050d0fa}, + {0x00001680, 0x0ef6d050}, + {0x00001690, 0x10a0540b}, + {0x000016a8, 0xe301010a}, + {0x00001700, 0x69053460}, + {0x00001710, 0xa030101c}, + {0x00001738, 0xdf808444}, + {0x0000173c, 0xc8422204}, + {0x00001740, 0x0180088c}, + {0x00001774, 0x00000000}, + {0x00000000, 0x00000000} +}; + +static const struct phy_reg x2_port0_config_regs[] = { + {0x00000694, 0xc80060fa}, + {0x00000680, 0x3d4f78ea}, + {0x00000690, 0x10a0140b}, + {0x000006a8, 0xdf04010a}, + {0x00000700, 0x57050060}, + {0x00000710, 0x0030001c}, + {0x00000738, 0x5f004444}, + {0x0000073c, 0x78464204}, + {0x00000748, 0x7821f940}, + {0x0000074c, 0xb2000433}, + {0x00000494, 0xc80060fa}, + {0x00000480, 0x29ef5ed8}, + {0x00000490, 0x10a0540b}, + {0x000004a8, 0x7a01010a}, + {0x00000500, 0xef053460}, + {0x00000510, 0xe030101c}, + {0x00000538, 0xdf808444}, + {0x0000053c, 0xc8422204}, + {0x00000540, 0x0180088c}, + {0x00000574, 0x00000000}, + {0x00000294, 0xc80060fa}, + {0x00000280, 0xcb45b950}, + {0x00000290, 0x10a0540b}, + {0x000002a8, 0x8c01010a}, + {0x00000300, 0xef053460}, + {0x00000310, 0x8030101c}, + {0x00000338, 0x41808444}, + {0x0000033c, 0x32422204}, + {0x00000340, 0x0180088c}, + {0x00000374, 0x00000000}, + {0x00000000, 0x00000000} +}; + +static const struct phy_reg x2_port1_config_regs[] = { + {0x00000c94, 0xc80060fa}, + {0x00000c80, 0xcf47abea}, + {0x00000c90, 0x10a0840b}, + {0x00000ca8, 0xdf04010a}, + {0x00000d00, 0x57050060}, + {0x00000d10, 0x0030001c}, + {0x00000d38, 0x5f004444}, + {0x00000d3c, 0x78464204}, + {0x00000d48, 0x7821f940}, + {0x00000d4c, 0xb2000433}, + {0x00000a94, 0xc80060fa}, + {0x00000a80, 0x5a166ed8}, + {0x00000a90, 0x10a0540b}, + {0x00000aa8, 0x7a01010a}, + {0x00000b00, 0xef053460}, + {0x00000b10, 0xa030101c}, + {0x00000b38, 0xdf808444}, + {0x00000b3c, 0xc8422204}, + {0x00000b40, 0x0180088c}, + {0x00000b74, 0x00000000}, + {0x00000894, 0xc80060fa}, + {0x00000880, 0x4d4f21d0}, + {0x00000890, 0x10a0540b}, + {0x000008a8, 0x5601010a}, + {0x00000900, 0xef053460}, + {0x00000910, 0x8030101c}, + {0x00000938, 0xdf808444}, + {0x0000093c, 0xc8422204}, + {0x00000940, 0x0180088c}, + {0x00000974, 0x00000000}, + {0x00000000, 0x00000000} +}; + +static const struct phy_reg x2_port2_config_regs[] = { + {0x00001294, 0xc80060fa}, + {0x00001280, 0x08130cea}, + {0x00001290, 0x10a0140b}, + {0x000012a8, 0xd704010a}, + {0x00001300, 0x8d050060}, + {0x00001310, 0x0030001c}, + {0x00001338, 0xdf008444}, + {0x0000133c, 0x78422204}, + {0x00001348, 0x7821f940}, + {0x0000134c, 0x5a000433}, + {0x00001094, 0xc80060fa}, + {0x00001080, 0xade75dd8}, + {0x00001090, 0x10a0540b}, + {0x000010a8, 0xb101010a}, + {0x00001100, 0x33053460}, + {0x00001110, 0x0030101c}, + {0x00001138, 0xdf808444}, + {0x0000113c, 0xc8422204}, + {0x00001140, 0x8180088c}, + {0x00001174, 0x00000000}, + {0x00000e94, 0xc80060fa}, + {0x00000e80, 0x0fbf16d0}, + {0x00000e90, 0x10a0540b}, + {0x00000ea8, 0x7a01010a}, + {0x00000f00, 0xf5053460}, + {0x00000f10, 0xc030101c}, + {0x00000f38, 0xdf808444}, + {0x00000f3c, 0xc8422204}, + {0x00000f40, 0x8180088c}, + {0x00000000, 0x00000000} +}; + +static const struct phy_reg x2_port3_config_regs[] = { + {0x00001894, 0xc80060fa}, + {0x00001880, 0x0f90fd6a}, + {0x00001890, 0x10a0840b}, + {0x000018a8, 0xdf04010a}, + {0x00001900, 0x57050060}, + {0x00001910, 0x0030001c}, + {0x00001938, 0x5f004444}, + {0x0000193c, 0x78464204}, + {0x00001948, 0x7821f940}, + {0x0000194c, 0xb2000433}, + {0x00001694, 0xc80060fa}, + {0x00001680, 0x0ef6d058}, + {0x00001690, 0x10a0540b}, + {0x000016a8, 0x7a01010a}, + {0x00001700, 0x69053460}, + {0x00001710, 0xa030101c}, + {0x00001738, 0xdf808444}, + {0x0000173c, 0xc8422204}, + {0x00001740, 0x0180088c}, + {0x00001774, 0x00000000}, + {0x00001494, 0xc80060fa}, + {0x00001480, 0xf9d34bd0}, + {0x00001490, 0x10a0540b}, + {0x000014a8, 0x7a01010a}, + {0x00001500, 0x1b053460}, + {0x00001510, 0x0030101c}, + {0x00001538, 0xdf808444}, + {0x0000153c, 0xc8422204}, + {0x00001540, 0x8180088c}, + {0x00001574, 0x00000000}, + {0x00000000, 0x00000000} +}; + +static const struct phy_reg x4_port0_config_regs[] = { + {0x00000694, 0xc80060fa}, + {0x00000680, 0x3d4f78fa}, + {0x00000690, 0x10a0140b}, + {0x000006a8, 0xdf04010a}, + {0x00000700, 0x57050060}, + {0x00000710, 0x0030001c}, + {0x00000738, 0x5f004444}, + {0x0000073c, 0x78464204}, + {0x00000748, 0x7821f940}, + {0x0000074c, 0xb2000433}, + {0x00000494, 0xfe6030fa}, + {0x00000480, 0x29ef5ed8}, + {0x00000490, 0x10a0540b}, + {0x000004a8, 0x7a01010a}, + {0x00000500, 0xef053460}, + {0x00000510, 0xe030101c}, + {0x00000538, 0xdf808444}, + {0x0000053c, 0xc8422204}, + {0x00000540, 0x0180088c}, + {0x00000574, 0x00000004}, + {0x00000294, 0x23e030fa}, + {0x00000280, 0xcb45b950}, + {0x00000290, 0x10a0540b}, + {0x000002a8, 0x8c01010a}, + {0x00000300, 0xef053460}, + {0x00000310, 0x8030101c}, + {0x00000338, 0x41808444}, + {0x0000033c, 0x32422204}, + {0x00000340, 0x0180088c}, + {0x00000374, 0x00000004}, + {0x00000894, 0x5620b0fa}, + {0x00000880, 0x4d4f21dc}, + {0x00000890, 0x10a0540b}, + {0x000008a8, 0x5601010a}, + {0x00000900, 0xef053460}, + {0x00000910, 0x8030101c}, + {0x00000938, 0xdf808444}, + {0x0000093c, 0xc8422204}, + {0x00000940, 0x0180088c}, + {0x00000974, 0x00000004}, + {0x00000a94, 0xc91030fa}, + {0x00000a80, 0x5a166ecc}, + {0x00000a90, 0x10a0540b}, + {0x00000aa8, 0x5d01010a}, + {0x00000b00, 0xef053460}, + {0x00000b10, 0xa030101c}, + {0x00000b38, 0xdf808444}, + {0x00000b3c, 0xc8422204}, + {0x00000b40, 0x0180088c}, + {0x00000b74, 0x00000004}, + {0x00000000, 0x00000000} +}; + +static const struct phy_reg x4_port1_config_regs[] = { + {0x00000000, 0x00000000} +}; + +static const struct phy_reg x4_port2_config_regs[] = { + {0x00001294, 0x28f000fa}, + {0x00001280, 0x08130cfa}, + {0x00001290, 0x10c0140b}, + {0x000012a8, 0xd704010a}, + {0x00001300, 0x8d050060}, + {0x00001310, 0x0030001c}, + {0x00001338, 0xdf008444}, + {0x0000133c, 0x78422204}, + {0x00001348, 0x7821f940}, + {0x0000134c, 0x5a000433}, + {0x00001094, 0x2d20b0fa}, + {0x00001080, 0xade75dd8}, + {0x00001090, 0x10a0540b}, + {0x000010a8, 0xb101010a}, + {0x00001100, 0x33053460}, + {0x00001110, 0x0030101c}, + {0x00001138, 0xdf808444}, + {0x0000113c, 0xc8422204}, + {0x00001140, 0x8180088c}, + {0x00001174, 0x00000004}, + {0x00000e94, 0xd308d0fa}, + {0x00000e80, 0x0fbf16d0}, + {0x00000e90, 0x10a0540b}, + {0x00000ea8, 0x2c01010a}, + {0x00000f00, 0xf5053460}, + {0x00000f10, 0xc030101c}, + {0x00000f38, 0xdf808444}, + {0x00000f3c, 0xc8422204}, + {0x00000f40, 0x8180088c}, + {0x00000f74, 0x00000004}, + {0x00001494, 0x136850fa}, + {0x00001480, 0xf9d34bdc}, + {0x00001490, 0x10a0540b}, + {0x000014a8, 0x5a01010a}, + {0x00001500, 0x1b053460}, + {0x00001510, 0x0030101c}, + {0x00001538, 0xdf808444}, + {0x0000153c, 0xc8422204}, + {0x00001540, 0x8180088c}, + {0x00001574, 0x00000004}, + {0x00001694, 0x3050d0fa}, + {0x00001680, 0x0ef6d04c}, + {0x00001690, 0x10a0540b}, + {0x000016a8, 0xe301010a}, + {0x00001700, 0x69053460}, + {0x00001710, 0xa030101c}, + {0x00001738, 0xdf808444}, + {0x0000173c, 0xc8422204}, + {0x00001740, 0x0180088c}, + {0x00001774, 0x00000004}, + {0x00000000, 0x00000000} +}; + +static const struct phy_reg x4_port3_config_regs[] = { + {0x00000000, 0x00000000} +}; + +static const struct phy_reg *x1_config_regs[4] = { + x1_port0_config_regs, + x1_port1_config_regs, + x1_port2_config_regs, + x1_port3_config_regs +}; + +static const struct phy_reg *x2_config_regs[4] = { + x2_port0_config_regs, + x2_port1_config_regs, + x2_port2_config_regs, + x2_port3_config_regs +}; + +static const struct phy_reg *x4_config_regs[4] = { + x4_port0_config_regs, + x4_port1_config_regs, + x4_port2_config_regs, + x4_port3_config_regs +}; + +static const struct phy_reg **config_regs[3] = { + x1_config_regs, + x2_config_regs, + x4_config_regs +}; + +static int ipu6_isys_mcd_phy_powerup_ack(struct ipu6_isys *isys, u8 id) +{ + struct device *dev = &isys->adev->auxdev.dev; + void __iomem *isys_base = isys->pdata->base; + u32 val; + int ret; + + val = readl(isys_base + CSI_REG_HUB_GPREG_PHY_CTL(id)); + val |= CSI_REG_HUB_GPREG_PHY_CTL_PWR_EN; + writel(val, isys_base + CSI_REG_HUB_GPREG_PHY_CTL(id)); + + ret = readl_poll_timeout(isys_base + CSI_REG_HUB_GPREG_PHY_STATUS(id), + val, val & CSI_REG_HUB_GPREG_PHY_POWER_ACK, + 200, MCD_PHY_POWER_STATUS_TIMEOUT); + if (ret) + dev_err(dev, "PHY%d powerup ack timeout", id); + + return ret; +} + +static int ipu6_isys_mcd_phy_powerdown_ack(struct ipu6_isys *isys, u8 id) +{ + struct device *dev = &isys->adev->auxdev.dev; + void __iomem *isys_base = isys->pdata->base; + u32 val; + int ret; + + writel(0, isys_base + CSI_REG_HUB_GPREG_PHY_CTL(id)); + ret = readl_poll_timeout(isys_base + CSI_REG_HUB_GPREG_PHY_STATUS(id), + val, !(val & CSI_REG_HUB_GPREG_PHY_POWER_ACK), + 200, MCD_PHY_POWER_STATUS_TIMEOUT); + if (ret) + dev_err(dev, "PHY%d powerdown ack timeout", id); + + return ret; +} + +static void ipu6_isys_mcd_phy_reset(struct ipu6_isys *isys, u8 id, bool assert) +{ + void __iomem *isys_base = isys->pdata->base; + u32 val; + + val = readl(isys_base + CSI_REG_HUB_GPREG_PHY_CTL(id)); + if (assert) + val |= CSI_REG_HUB_GPREG_PHY_CTL_RESET; + else + val &= ~(CSI_REG_HUB_GPREG_PHY_CTL_RESET); + + writel(val, isys_base + CSI_REG_HUB_GPREG_PHY_CTL(id)); +} + +static int ipu6_isys_mcd_phy_ready(struct ipu6_isys *isys, u8 id) +{ + struct device *dev = &isys->adev->auxdev.dev; + void __iomem *isys_base = isys->pdata->base; + u32 val; + int ret; + + ret = readl_poll_timeout(isys_base + CSI_REG_HUB_GPREG_PHY_STATUS(id), + val, val & CSI_REG_HUB_GPREG_PHY_READY, + 200, MCD_PHY_POWER_STATUS_TIMEOUT); + if (ret) + dev_err(dev, "PHY%d ready ack timeout", id); + + return ret; +} + +static void ipu6_isys_mcd_phy_common_init(struct ipu6_isys *isys) +{ + struct ipu6_bus_device *adev = isys->adev; + struct ipu6_device *isp = adev->isp; + void __iomem *isp_base = isp->base; + struct sensor_async_sd *s_asd; + struct v4l2_async_connection *asc; + void __iomem *phy_base; + unsigned int i; + u8 phy_id; + + list_for_each_entry(asc, &isys->notifier.done_list, asc_entry) { + s_asd = container_of(asc, struct sensor_async_sd, asc); + phy_id = s_asd->csi2.port / 4; + phy_base = isp_base + IPU6_ISYS_MCD_PHY_BASE(phy_id); + + for (i = 0; i < ARRAY_SIZE(common_init_regs); i++) + writel(common_init_regs[i].val, + phy_base + common_init_regs[i].reg); + } +} + +static int ipu6_isys_driver_port_to_phy_port(struct ipu6_isys_csi2_config *cfg) +{ + int phy_port; + int ret; + + if (!(cfg->nlanes == 4 || cfg->nlanes == 2 || cfg->nlanes == 1)) + return -EINVAL; + + /* B,F -> C0 A,E -> C1 C,G -> C2 D,H -> C4 */ + /* normalize driver port number */ + phy_port = cfg->port % 4; + + /* swap port number only for A and B */ + if (phy_port == 0) + phy_port = 1; + else if (phy_port == 1) + phy_port = 0; + + ret = phy_port; + + /* check validity per lane configuration */ + if (cfg->nlanes == 4 && !(phy_port == 0 || phy_port == 2)) + ret = -EINVAL; + else if ((cfg->nlanes == 2 || cfg->nlanes == 1) && + !(phy_port >= 0 && phy_port <= 3)) + ret = -EINVAL; + + return ret; +} + +static int ipu6_isys_mcd_phy_config(struct ipu6_isys *isys) +{ + struct device *dev = &isys->adev->auxdev.dev; + struct ipu6_bus_device *adev = isys->adev; + const struct phy_reg **phy_config_regs; + struct ipu6_device *isp = adev->isp; + void __iomem *isp_base = isp->base; + struct sensor_async_sd *s_asd; + struct ipu6_isys_csi2_config cfg; + struct v4l2_async_connection *asc; + int phy_port, phy_id; + unsigned int i; + void __iomem *phy_base; + + list_for_each_entry(asc, &isys->notifier.done_list, asc_entry) { + s_asd = container_of(asc, struct sensor_async_sd, asc); + cfg.port = s_asd->csi2.port; + cfg.nlanes = s_asd->csi2.nlanes; + phy_port = ipu6_isys_driver_port_to_phy_port(&cfg); + if (phy_port < 0) { + dev_err(dev, "invalid port %d for lane %d", cfg.port, + cfg.nlanes); + return -ENXIO; + } + + phy_id = cfg.port / 4; + phy_base = isp_base + IPU6_ISYS_MCD_PHY_BASE(phy_id); + dev_dbg(dev, "port%d PHY%u lanes %u\n", cfg.port, phy_id, + cfg.nlanes); + + phy_config_regs = config_regs[cfg.nlanes / 2]; + cfg.port = phy_port; + for (i = 0; phy_config_regs[cfg.port][i].reg; i++) + writel(phy_config_regs[cfg.port][i].val, + phy_base + phy_config_regs[cfg.port][i].reg); + } + + return 0; +} + +#define CSI_MCD_PHY_NUM 2 +static refcount_t phy_power_ref_count[CSI_MCD_PHY_NUM]; + +int ipu6_isys_mcd_phy_set_power(struct ipu6_isys *isys, + struct ipu6_isys_csi2_config *cfg, + const struct ipu6_isys_csi2_timing *timing, + bool on) +{ + struct device *dev = &isys->adev->auxdev.dev; + void __iomem *isys_base = isys->pdata->base; + u8 port, phy_id; + refcount_t *ref; + int ret; + + port = cfg->port; + phy_id = port / 4; + + ref = &phy_power_ref_count[phy_id]; + + dev_dbg(dev, "for phy %d port %d, lanes: %d\n", phy_id, port, + cfg->nlanes); + + if (!isys_base || port >= isys->pdata->ipdata->csi2.nports) { + dev_warn(dev, "invalid port ID %d\n", port); + return -EINVAL; + } + + if (on) { + if (refcount_read(ref)) { + dev_dbg(dev, "for phy %d is already UP", phy_id); + refcount_inc(ref); + return 0; + } + + ret = ipu6_isys_mcd_phy_powerup_ack(isys, phy_id); + if (ret) + return ret; + + ipu6_isys_mcd_phy_reset(isys, phy_id, 0); + ipu6_isys_mcd_phy_common_init(isys); + + ret = ipu6_isys_mcd_phy_config(isys); + if (ret) + return ret; + + ipu6_isys_mcd_phy_reset(isys, phy_id, 1); + ret = ipu6_isys_mcd_phy_ready(isys, phy_id); + if (ret) + return ret; + + refcount_set(ref, 1); + return 0; + } + + if (!refcount_dec_and_test(ref)) + return 0; + + return ipu6_isys_mcd_phy_powerdown_ack(isys, phy_id); +} diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c new file mode 100644 index 0000000..bbb66b5 --- /dev/null +++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c @@ -0,0 +1,847 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2013--2024 Intel Corporation + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "ipu6-bus.h" +#include "ipu6-dma.h" +#include "ipu6-fw-isys.h" +#include "ipu6-isys.h" +#include "ipu6-isys-video.h" + +static int ipu6_isys_buf_init(struct vb2_buffer *vb) +{ + struct ipu6_isys *isys = vb2_get_drv_priv(vb->vb2_queue); + struct sg_table *sg = vb2_dma_sg_plane_desc(vb, 0); + struct vb2_v4l2_buffer *vvb = to_vb2_v4l2_buffer(vb); + struct ipu6_isys_video_buffer *ivb = + vb2_buffer_to_ipu6_isys_video_buffer(vvb); + int ret; + + ret = ipu6_dma_map_sgtable(isys->adev, sg, DMA_TO_DEVICE, 0); + if (ret) + return ret; + + ivb->dma_addr = sg_dma_address(sg->sgl); + + return 0; +} + +static void ipu6_isys_buf_cleanup(struct vb2_buffer *vb) +{ + struct ipu6_isys *isys = vb2_get_drv_priv(vb->vb2_queue); + struct sg_table *sg = vb2_dma_sg_plane_desc(vb, 0); + struct vb2_v4l2_buffer *vvb = to_vb2_v4l2_buffer(vb); + struct ipu6_isys_video_buffer *ivb = + vb2_buffer_to_ipu6_isys_video_buffer(vvb); + + ivb->dma_addr = 0; + ipu6_dma_unmap_sgtable(isys->adev, sg, DMA_TO_DEVICE, 0); +} + +static int ipu6_isys_queue_setup(struct vb2_queue *q, unsigned int *num_buffers, + unsigned int *num_planes, unsigned int sizes[], + struct device *alloc_devs[]) +{ + struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(q); + struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq); + struct device *dev = &av->isys->adev->auxdev.dev; + u32 size = ipu6_isys_get_data_size(av); + + /* num_planes == 0: we're being called through VIDIOC_REQBUFS */ + if (!*num_planes) { + sizes[0] = size; + } else if (sizes[0] < size) { + dev_dbg(dev, "%s: queue setup: size %u < %u\n", + av->vdev.name, sizes[0], size); + return -EINVAL; + } + + *num_planes = 1; + + return 0; +} + +static int ipu6_isys_buf_prepare(struct vb2_buffer *vb) +{ + struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(vb->vb2_queue); + struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq); + struct device *dev = &av->isys->adev->auxdev.dev; + u32 bytesperline = ipu6_isys_get_bytes_per_line(av); + u32 height = ipu6_isys_get_frame_height(av); + u32 size = ipu6_isys_get_data_size(av); + + dev_dbg(dev, "buffer: %s: configured size %u, buffer size %lu\n", + av->vdev.name, size, vb2_plane_size(vb, 0)); + + if (size > vb2_plane_size(vb, 0)) + return -EINVAL; + + vb2_set_plane_payload(vb, 0, bytesperline * height); + + return 0; +} + +/* + * Queue a buffer list back to incoming or active queues. The buffers + * are removed from the buffer list. + */ +void ipu6_isys_buffer_list_queue(struct ipu6_isys_buffer_list *bl, + unsigned long op_flags, + enum vb2_buffer_state state) +{ + struct ipu6_isys_buffer *ib, *ib_safe; + unsigned long flags; + bool first = true; + + if (!bl) + return; + + WARN_ON_ONCE(!bl->nbufs); + WARN_ON_ONCE(op_flags & IPU6_ISYS_BUFFER_LIST_FL_ACTIVE && + op_flags & IPU6_ISYS_BUFFER_LIST_FL_INCOMING); + + list_for_each_entry_safe(ib, ib_safe, &bl->head, head) { + struct ipu6_isys_video *av; + struct vb2_buffer *vb = ipu6_isys_buffer_to_vb2_buffer(ib); + struct ipu6_isys_queue *aq = + vb2_queue_to_isys_queue(vb->vb2_queue); + struct device *dev; + + av = ipu6_isys_queue_to_video(aq); + dev = &av->isys->adev->auxdev.dev; + spin_lock_irqsave(&aq->lock, flags); + list_del(&ib->head); + if (op_flags & IPU6_ISYS_BUFFER_LIST_FL_ACTIVE) + list_add(&ib->head, &aq->active); + else if (op_flags & IPU6_ISYS_BUFFER_LIST_FL_INCOMING) + list_add_tail(&ib->head, &aq->incoming); + spin_unlock_irqrestore(&aq->lock, flags); + + if (op_flags & IPU6_ISYS_BUFFER_LIST_FL_SET_STATE) + vb2_buffer_done(vb, state); + + if (first) { + dev_dbg(dev, + "queue buf list %p flags %lx, s %d, %d bufs\n", + bl, op_flags, state, bl->nbufs); + first = false; + } + + bl->nbufs--; + } + + WARN_ON(bl->nbufs); +} + +/* + * flush_firmware_streamon_fail() - Flush in cases where requests may + * have been queued to firmware and the *firmware streamon fails for a + * reason or another. + */ +static void flush_firmware_streamon_fail(struct ipu6_isys_stream *stream) +{ + struct device *dev = &stream->isys->adev->auxdev.dev; + struct ipu6_isys_queue *aq; + unsigned long flags; + + lockdep_assert_held(&stream->mutex); + + list_for_each_entry(aq, &stream->queues, node) { + struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq); + struct ipu6_isys_buffer *ib, *ib_safe; + + spin_lock_irqsave(&aq->lock, flags); + list_for_each_entry_safe(ib, ib_safe, &aq->active, head) { + struct vb2_buffer *vb = + ipu6_isys_buffer_to_vb2_buffer(ib); + + list_del(&ib->head); + if (av->streaming) { + dev_dbg(dev, + "%s: queue buffer %u back to incoming\n", + av->vdev.name, vb->index); + /* Queue already streaming, return to driver. */ + list_add(&ib->head, &aq->incoming); + continue; + } + /* Queue not yet streaming, return to user. */ + dev_dbg(dev, "%s: return %u back to videobuf2\n", + av->vdev.name, vb->index); + vb2_buffer_done(ipu6_isys_buffer_to_vb2_buffer(ib), + VB2_BUF_STATE_QUEUED); + } + spin_unlock_irqrestore(&aq->lock, flags); + } +} + +/* + * Attempt obtaining a buffer list from the incoming queues, a list of buffers + * that contains one entry from each video buffer queue. If a buffer can't be + * obtained from every queue, the buffers are returned back to the queue. + */ +static int buffer_list_get(struct ipu6_isys_stream *stream, + struct ipu6_isys_buffer_list *bl) +{ + struct device *dev = &stream->isys->adev->auxdev.dev; + struct ipu6_isys_queue *aq; + unsigned long flags; + unsigned long buf_flag = IPU6_ISYS_BUFFER_LIST_FL_INCOMING; + + bl->nbufs = 0; + INIT_LIST_HEAD(&bl->head); + + list_for_each_entry(aq, &stream->queues, node) { + struct ipu6_isys_buffer *ib; + + spin_lock_irqsave(&aq->lock, flags); + if (list_empty(&aq->incoming)) { + spin_unlock_irqrestore(&aq->lock, flags); + if (!list_empty(&bl->head)) + ipu6_isys_buffer_list_queue(bl, buf_flag, 0); + return -ENODATA; + } + + ib = list_last_entry(&aq->incoming, + struct ipu6_isys_buffer, head); + + dev_dbg(dev, "buffer: %s: buffer %u\n", + ipu6_isys_queue_to_video(aq)->vdev.name, + ipu6_isys_buffer_to_vb2_buffer(ib)->index); + list_del(&ib->head); + list_add(&ib->head, &bl->head); + spin_unlock_irqrestore(&aq->lock, flags); + + bl->nbufs++; + } + + dev_dbg(dev, "get buffer list %p, %u buffers\n", bl, bl->nbufs); + + return 0; +} + +static void +ipu6_isys_buf_to_fw_frame_buf_pin(struct vb2_buffer *vb, + struct ipu6_fw_isys_frame_buff_set_abi *set) +{ + struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(vb->vb2_queue); + struct vb2_v4l2_buffer *vvb = to_vb2_v4l2_buffer(vb); + struct ipu6_isys_video_buffer *ivb = + vb2_buffer_to_ipu6_isys_video_buffer(vvb); + + set->output_pins[aq->fw_output].addr = ivb->dma_addr; + set->output_pins[aq->fw_output].out_buf_id = vb->index + 1; +} + +/* + * Convert a buffer list to a isys fw ABI framebuffer set. The + * buffer list is not modified. + */ +#define IPU6_ISYS_FRAME_NUM_THRESHOLD (30) +void +ipu6_isys_buf_to_fw_frame_buf(struct ipu6_fw_isys_frame_buff_set_abi *set, + struct ipu6_isys_stream *stream, + struct ipu6_isys_buffer_list *bl) +{ + struct ipu6_isys_buffer *ib; + + WARN_ON(!bl->nbufs); + + set->send_irq_sof = 1; + set->send_resp_sof = 1; + set->send_irq_eof = 0; + set->send_resp_eof = 0; + + if (stream->streaming) + set->send_irq_capture_ack = 0; + else + set->send_irq_capture_ack = 1; + set->send_irq_capture_done = 0; + + set->send_resp_capture_ack = 1; + set->send_resp_capture_done = 1; + if (atomic_read(&stream->sequence) >= IPU6_ISYS_FRAME_NUM_THRESHOLD) { + set->send_resp_capture_ack = 0; + set->send_resp_capture_done = 0; + } + + list_for_each_entry(ib, &bl->head, head) { + struct vb2_buffer *vb = ipu6_isys_buffer_to_vb2_buffer(ib); + + ipu6_isys_buf_to_fw_frame_buf_pin(vb, set); + } +} + +/* Start streaming for real. The buffer list must be available. */ +static int ipu6_isys_stream_start(struct ipu6_isys_video *av, + struct ipu6_isys_buffer_list *bl, bool error) +{ + struct ipu6_isys_stream *stream = av->stream; + struct device *dev = &stream->isys->adev->auxdev.dev; + struct ipu6_isys_buffer_list __bl; + int ret; + + mutex_lock(&stream->isys->stream_mutex); + ret = ipu6_isys_video_set_streaming(av, 1, bl); + mutex_unlock(&stream->isys->stream_mutex); + if (ret) + goto out_requeue; + + stream->streaming = 1; + + bl = &__bl; + + do { + struct ipu6_fw_isys_frame_buff_set_abi *buf = NULL; + struct isys_fw_msgs *msg; + u16 send_type = IPU6_FW_ISYS_SEND_TYPE_STREAM_CAPTURE; + + ret = buffer_list_get(stream, bl); + if (ret < 0) + break; + + msg = ipu6_get_fw_msg_buf(stream); + if (!msg) + return -ENOMEM; + + buf = &msg->fw_msg.frame; + ipu6_isys_buf_to_fw_frame_buf(buf, stream, bl); + ipu6_fw_isys_dump_frame_buff_set(dev, buf, + stream->nr_output_pins); + ipu6_isys_buffer_list_queue(bl, IPU6_ISYS_BUFFER_LIST_FL_ACTIVE, + 0); + ret = ipu6_fw_isys_complex_cmd(stream->isys, + stream->stream_handle, buf, + msg->dma_addr, sizeof(*buf), + send_type); + } while (!WARN_ON(ret)); + + return 0; + +out_requeue: + if (bl && bl->nbufs) + ipu6_isys_buffer_list_queue(bl, + IPU6_ISYS_BUFFER_LIST_FL_INCOMING | + (error ? + IPU6_ISYS_BUFFER_LIST_FL_SET_STATE : + 0), error ? VB2_BUF_STATE_ERROR : + VB2_BUF_STATE_QUEUED); + flush_firmware_streamon_fail(stream); + + return ret; +} + +static void buf_queue(struct vb2_buffer *vb) +{ + struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(vb->vb2_queue); + struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq); + struct vb2_v4l2_buffer *vvb = to_vb2_v4l2_buffer(vb); + struct ipu6_isys_video_buffer *ivb = + vb2_buffer_to_ipu6_isys_video_buffer(vvb); + struct ipu6_isys_buffer *ib = &ivb->ib; + struct device *dev = &av->isys->adev->auxdev.dev; + struct media_pipeline *media_pipe = + media_entity_pipeline(&av->vdev.entity); + struct ipu6_fw_isys_frame_buff_set_abi *buf = NULL; + struct ipu6_isys_stream *stream = av->stream; + struct ipu6_isys_buffer_list bl; + struct isys_fw_msgs *msg; + unsigned long flags; + dma_addr_t dma; + int ret; + + dev_dbg(dev, "queue buffer %u for %s\n", vb->index, av->vdev.name); + + dma = ivb->dma_addr; + dev_dbg(dev, "iova: iova %pad\n", &dma); + + spin_lock_irqsave(&aq->lock, flags); + list_add(&ib->head, &aq->incoming); + spin_unlock_irqrestore(&aq->lock, flags); + + if (!media_pipe || !vb->vb2_queue->start_streaming_called) { + dev_dbg(dev, "media pipeline is not ready for %s\n", + av->vdev.name); + return; + } + + mutex_lock(&stream->mutex); + + if (stream->nr_streaming != stream->nr_queues) { + dev_dbg(dev, "not streaming yet, adding to incoming\n"); + goto out; + } + + /* + * We just put one buffer to the incoming list of this queue + * (above). Let's see whether all queues in the pipeline would + * have a buffer. + */ + ret = buffer_list_get(stream, &bl); + if (ret < 0) { + dev_dbg(dev, "No buffers available\n"); + goto out; + } + + msg = ipu6_get_fw_msg_buf(stream); + if (!msg) { + ret = -ENOMEM; + goto out; + } + + buf = &msg->fw_msg.frame; + ipu6_isys_buf_to_fw_frame_buf(buf, stream, &bl); + ipu6_fw_isys_dump_frame_buff_set(dev, buf, stream->nr_output_pins); + + if (!stream->streaming) { + ret = ipu6_isys_stream_start(av, &bl, true); + if (ret) + dev_err(dev, "stream start failed.\n"); + goto out; + } + + /* + * We must queue the buffers in the buffer list to the + * appropriate video buffer queues BEFORE passing them to the + * firmware since we could get a buffer event back before we + * have queued them ourselves to the active queue. + */ + ipu6_isys_buffer_list_queue(&bl, IPU6_ISYS_BUFFER_LIST_FL_ACTIVE, 0); + + ret = ipu6_fw_isys_complex_cmd(stream->isys, stream->stream_handle, + buf, msg->dma_addr, sizeof(*buf), + IPU6_FW_ISYS_SEND_TYPE_STREAM_CAPTURE); + if (ret < 0) + dev_err(dev, "send stream capture failed\n"); + +out: + mutex_unlock(&stream->mutex); +} + +static int ipu6_isys_link_fmt_validate(struct ipu6_isys_queue *aq) +{ + struct v4l2_mbus_framefmt format; + struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq); + struct device *dev = &av->isys->adev->auxdev.dev; + struct media_pad *remote_pad = + media_pad_remote_pad_first(av->vdev.entity.pads); + struct v4l2_subdev *sd; + u32 r_stream, code; + int ret; + + if (!remote_pad) + return -ENOTCONN; + + sd = media_entity_to_v4l2_subdev(remote_pad->entity); + r_stream = ipu6_isys_get_src_stream_by_src_pad(sd, remote_pad->index); + + ret = ipu6_isys_get_stream_pad_fmt(sd, remote_pad->index, r_stream, + &format); + + if (ret) { + dev_dbg(dev, "failed to get %s: pad %d, stream:%d format\n", + sd->entity.name, remote_pad->index, r_stream); + return ret; + } + + if (format.width != ipu6_isys_get_frame_width(av) || + format.height != ipu6_isys_get_frame_height(av)) { + dev_dbg(dev, "wrong width or height %ux%u (%ux%u expected)\n", + ipu6_isys_get_frame_width(av), + ipu6_isys_get_frame_height(av), format.width, + format.height); + return -EINVAL; + } + + code = ipu6_isys_get_isys_format(ipu6_isys_get_format(av), 0)->code; + if (format.code != code) { + dev_dbg(dev, "wrong mbus code 0x%8.8x (0x%8.8x expected)\n", + code, format.code); + return -EINVAL; + } + + return 0; +} + +static void return_buffers(struct ipu6_isys_queue *aq, + enum vb2_buffer_state state) +{ + struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq); + struct ipu6_isys_buffer *ib; + bool need_reset = false; + unsigned long flags; + + spin_lock_irqsave(&aq->lock, flags); + while (!list_empty(&aq->incoming)) { + struct vb2_buffer *vb; + + ib = list_first_entry(&aq->incoming, struct ipu6_isys_buffer, + head); + vb = ipu6_isys_buffer_to_vb2_buffer(ib); + list_del(&ib->head); + spin_unlock_irqrestore(&aq->lock, flags); + + vb2_buffer_done(vb, state); + + spin_lock_irqsave(&aq->lock, flags); + } + + /* + * Something went wrong (FW crash / HW hang / not all buffers + * returned from isys) if there are still buffers queued in active + * queue. We have to clean up places a bit. + */ + while (!list_empty(&aq->active)) { + struct vb2_buffer *vb; + + ib = list_first_entry(&aq->active, struct ipu6_isys_buffer, + head); + vb = ipu6_isys_buffer_to_vb2_buffer(ib); + + list_del(&ib->head); + spin_unlock_irqrestore(&aq->lock, flags); + + vb2_buffer_done(vb, state); + + spin_lock_irqsave(&aq->lock, flags); + need_reset = true; + } + + spin_unlock_irqrestore(&aq->lock, flags); + + if (need_reset) { + mutex_lock(&av->isys->mutex); + av->isys->need_reset = true; + mutex_unlock(&av->isys->mutex); + } +} + +static void ipu6_isys_stream_cleanup(struct ipu6_isys_video *av) +{ + video_device_pipeline_stop(&av->vdev); + ipu6_isys_put_stream(av->stream); + av->stream = NULL; +} + +static int start_streaming(struct vb2_queue *q, unsigned int count) +{ + struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(q); + struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq); + struct device *dev = &av->isys->adev->auxdev.dev; + const struct ipu6_isys_pixelformat *pfmt = + ipu6_isys_get_isys_format(ipu6_isys_get_format(av), 0); + struct ipu6_isys_buffer_list __bl, *bl = NULL; + struct ipu6_isys_stream *stream; + struct media_entity *source_entity = NULL; + int nr_queues, ret; + + dev_dbg(dev, "stream: %s: width %u, height %u, css pixelformat %u\n", + av->vdev.name, ipu6_isys_get_frame_width(av), + ipu6_isys_get_frame_height(av), pfmt->css_pixelformat); + + ret = ipu6_isys_setup_video(av, &source_entity, &nr_queues); + if (ret < 0) { + dev_dbg(dev, "failed to setup video\n"); + goto out_return_buffers; + } + + ret = ipu6_isys_link_fmt_validate(aq); + if (ret) { + dev_dbg(dev, + "%s: link format validation failed (%d)\n", + av->vdev.name, ret); + goto out_pipeline_stop; + } + + ret = ipu6_isys_fw_open(av->isys); + if (ret) + goto out_pipeline_stop; + + stream = av->stream; + mutex_lock(&stream->mutex); + if (!stream->nr_streaming) { + ret = ipu6_isys_video_prepare_stream(av, source_entity, + nr_queues); + if (ret) + goto out_fw_close; + } + + stream->nr_streaming++; + dev_dbg(dev, "queue %u of %u\n", stream->nr_streaming, + stream->nr_queues); + + list_add(&aq->node, &stream->queues); + ipu6_isys_configure_stream_watermark(av, true); + ipu6_isys_update_stream_watermark(av, true); + + if (stream->nr_streaming != stream->nr_queues) + goto out; + + bl = &__bl; + ret = buffer_list_get(stream, bl); + if (ret < 0) { + dev_warn(dev, "no buffer available, DRIVER BUG?\n"); + goto out; + } + + ret = ipu6_isys_stream_start(av, bl, false); + if (ret) + goto out_stream_start; + +out: + mutex_unlock(&stream->mutex); + + return 0; + +out_stream_start: + ipu6_isys_update_stream_watermark(av, false); + list_del(&aq->node); + stream->nr_streaming--; + +out_fw_close: + mutex_unlock(&stream->mutex); + ipu6_isys_fw_close(av->isys); + +out_pipeline_stop: + ipu6_isys_stream_cleanup(av); + +out_return_buffers: + return_buffers(aq, VB2_BUF_STATE_QUEUED); + + return ret; +} + +static void stop_streaming(struct vb2_queue *q) +{ + struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(q); + struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq); + struct ipu6_isys_stream *stream = av->stream; + + mutex_lock(&stream->mutex); + + ipu6_isys_update_stream_watermark(av, false); + + mutex_lock(&av->isys->stream_mutex); + if (stream->nr_streaming == stream->nr_queues && stream->streaming) + ipu6_isys_video_set_streaming(av, 0, NULL); + mutex_unlock(&av->isys->stream_mutex); + + stream->nr_streaming--; + list_del(&aq->node); + stream->streaming = 0; + mutex_unlock(&stream->mutex); + + ipu6_isys_stream_cleanup(av); + + return_buffers(aq, VB2_BUF_STATE_ERROR); + + ipu6_isys_fw_close(av->isys); +} + +static unsigned int +get_sof_sequence_by_timestamp(struct ipu6_isys_stream *stream, + struct ipu6_fw_isys_resp_info_abi *info) +{ + u64 time = (u64)info->timestamp[1] << 32 | info->timestamp[0]; + struct ipu6_isys *isys = stream->isys; + struct device *dev = &isys->adev->auxdev.dev; + unsigned int i; + + /* + * The timestamp is invalid as no TSC in some FPGA platform, + * so get the sequence from pipeline directly in this case. + */ + if (time == 0) + return atomic_read(&stream->sequence) - 1; + + for (i = 0; i < IPU6_ISYS_MAX_PARALLEL_SOF; i++) + if (time == stream->seq[i].timestamp) { + dev_dbg(dev, "sof: using seq nr %u for ts %llu\n", + stream->seq[i].sequence, time); + return stream->seq[i].sequence; + } + + for (i = 0; i < IPU6_ISYS_MAX_PARALLEL_SOF; i++) + dev_dbg(dev, "sof: sequence %u, timestamp value %llu\n", + stream->seq[i].sequence, stream->seq[i].timestamp); + + return 0; +} + +static u64 get_sof_ns_delta(struct ipu6_isys_video *av, + struct ipu6_fw_isys_resp_info_abi *info) +{ + struct ipu6_bus_device *adev = av->isys->adev; + struct ipu6_device *isp = adev->isp; + u64 delta, tsc_now; + + ipu6_buttress_tsc_read(isp, &tsc_now); + if (!tsc_now) + return 0; + + delta = tsc_now - ((u64)info->timestamp[1] << 32 | info->timestamp[0]); + + return ipu6_buttress_tsc_ticks_to_ns(delta, isp); +} + +void ipu6_isys_buf_calc_sequence_time(struct ipu6_isys_buffer *ib, + struct ipu6_fw_isys_resp_info_abi *info) +{ + struct vb2_buffer *vb = ipu6_isys_buffer_to_vb2_buffer(ib); + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(vb->vb2_queue); + struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq); + struct device *dev = &av->isys->adev->auxdev.dev; + struct ipu6_isys_stream *stream = av->stream; + u64 ns; + u32 sequence; + + ns = ktime_get_ns() - get_sof_ns_delta(av, info); + sequence = get_sof_sequence_by_timestamp(stream, info); + + vbuf->vb2_buf.timestamp = ns; + vbuf->sequence = sequence; + + dev_dbg(dev, "buf: %s: buffer done, CPU-timestamp:%lld, sequence:%d\n", + av->vdev.name, ktime_get_ns(), sequence); + dev_dbg(dev, "index:%d, vbuf timestamp:%lld\n", vb->index, + vbuf->vb2_buf.timestamp); +} + +void ipu6_isys_queue_buf_done(struct ipu6_isys_buffer *ib) +{ + struct vb2_buffer *vb = ipu6_isys_buffer_to_vb2_buffer(ib); + + if (atomic_read(&ib->str2mmio_flag)) { + vb2_buffer_done(vb, VB2_BUF_STATE_ERROR); + /* + * Operation on buffer is ended with error and will be reported + * to the userspace when it is de-queued + */ + atomic_set(&ib->str2mmio_flag, 0); + } else { + vb2_buffer_done(vb, VB2_BUF_STATE_DONE); + } +} + +void ipu6_isys_queue_buf_ready(struct ipu6_isys_stream *stream, + struct ipu6_fw_isys_resp_info_abi *info) +{ + struct ipu6_isys_queue *aq = stream->output_pins[info->pin_id].aq; + struct ipu6_isys *isys = stream->isys; + struct device *dev = &isys->adev->auxdev.dev; + struct ipu6_isys_buffer *ib; + struct vb2_buffer *vb; + unsigned long flags; + bool first = true; + struct vb2_v4l2_buffer *buf; + + spin_lock_irqsave(&aq->lock, flags); + if (list_empty(&aq->active)) { + spin_unlock_irqrestore(&aq->lock, flags); + dev_err(dev, "active queue empty\n"); + return; + } + + list_for_each_entry_reverse(ib, &aq->active, head) { + struct ipu6_isys_video_buffer *ivb; + struct vb2_v4l2_buffer *vvb; + dma_addr_t addr; + + vb = ipu6_isys_buffer_to_vb2_buffer(ib); + vvb = to_vb2_v4l2_buffer(vb); + ivb = vb2_buffer_to_ipu6_isys_video_buffer(vvb); + addr = ivb->dma_addr; + + if (info->pin.addr != addr) { + if (first) + dev_err(dev, "Unexpected buffer address %pad\n", + &addr); + first = false; + continue; + } + + if (info->error_info.error == + IPU6_FW_ISYS_ERROR_HW_REPORTED_STR2MMIO) { + /* + * Check for error message: + * 'IPU6_FW_ISYS_ERROR_HW_REPORTED_STR2MMIO' + */ + atomic_set(&ib->str2mmio_flag, 1); + } + dev_dbg(dev, "buffer: found buffer %pad\n", &addr); + + buf = to_vb2_v4l2_buffer(vb); + buf->field = V4L2_FIELD_NONE; + + list_del(&ib->head); + spin_unlock_irqrestore(&aq->lock, flags); + + ipu6_isys_buf_calc_sequence_time(ib, info); + + ipu6_isys_queue_buf_done(ib); + + return; + } + + dev_err(dev, "Failed to find a matching video buffer"); + + spin_unlock_irqrestore(&aq->lock, flags); +} + +static const struct vb2_ops ipu6_isys_queue_ops = { + .queue_setup = ipu6_isys_queue_setup, + .wait_prepare = vb2_ops_wait_prepare, + .wait_finish = vb2_ops_wait_finish, + .buf_init = ipu6_isys_buf_init, + .buf_prepare = ipu6_isys_buf_prepare, + .buf_cleanup = ipu6_isys_buf_cleanup, + .start_streaming = start_streaming, + .stop_streaming = stop_streaming, + .buf_queue = buf_queue, +}; + +int ipu6_isys_queue_init(struct ipu6_isys_queue *aq) +{ + struct ipu6_isys *isys = ipu6_isys_queue_to_video(aq)->isys; + struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq); + struct ipu6_bus_device *adev = isys->adev; + int ret; + + /* no support for userptr */ + if (!aq->vbq.io_modes) + aq->vbq.io_modes = VB2_MMAP | VB2_DMABUF; + + aq->vbq.drv_priv = isys; + aq->vbq.ops = &ipu6_isys_queue_ops; + aq->vbq.lock = &av->mutex; + aq->vbq.mem_ops = &vb2_dma_sg_memops; + aq->vbq.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + aq->vbq.min_queued_buffers = 1; + aq->vbq.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; + + ret = vb2_queue_init(&aq->vbq); + if (ret) + return ret; + + aq->dev = &adev->auxdev.dev; + aq->vbq.dev = &adev->isp->pdev->dev; + spin_lock_init(&aq->lock); + INIT_LIST_HEAD(&aq->active); + INIT_LIST_HEAD(&aq->incoming); + + return 0; +} diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h new file mode 100644 index 0000000..fe8fc79 --- /dev/null +++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2013--2024 Intel Corporation */ + +#ifndef IPU6_ISYS_QUEUE_H +#define IPU6_ISYS_QUEUE_H + +#include +#include +#include +#include +#include + +#include + +#include "ipu6-fw-isys.h" +#include "ipu6-isys-video.h" + +struct ipu6_isys_stream; + +struct ipu6_isys_queue { + struct vb2_queue vbq; + struct list_head node; + struct device *dev; + /* + * @lock: serialise access to queued and pre_streamon_queued + */ + spinlock_t lock; + struct list_head active; + struct list_head incoming; + unsigned int fw_output; +}; + +struct ipu6_isys_buffer { + struct list_head head; + atomic_t str2mmio_flag; +}; + +struct ipu6_isys_video_buffer { + struct vb2_v4l2_buffer vb_v4l2; + struct ipu6_isys_buffer ib; + dma_addr_t dma_addr; +}; + +#define IPU6_ISYS_BUFFER_LIST_FL_INCOMING BIT(0) +#define IPU6_ISYS_BUFFER_LIST_FL_ACTIVE BIT(1) +#define IPU6_ISYS_BUFFER_LIST_FL_SET_STATE BIT(2) + +struct ipu6_isys_buffer_list { + struct list_head head; + unsigned int nbufs; +}; + +#define vb2_queue_to_isys_queue(__vb2) \ + container_of(__vb2, struct ipu6_isys_queue, vbq) + +#define ipu6_isys_to_isys_video_buffer(__ib) \ + container_of(__ib, struct ipu6_isys_video_buffer, ib) + +#define vb2_buffer_to_ipu6_isys_video_buffer(__vvb) \ + container_of(__vvb, struct ipu6_isys_video_buffer, vb_v4l2) + +#define ipu6_isys_buffer_to_vb2_buffer(__ib) \ + (&ipu6_isys_to_isys_video_buffer(__ib)->vb_v4l2.vb2_buf) + +void ipu6_isys_buffer_list_queue(struct ipu6_isys_buffer_list *bl, + unsigned long op_flags, + enum vb2_buffer_state state); +void +ipu6_isys_buf_to_fw_frame_buf(struct ipu6_fw_isys_frame_buff_set_abi *set, + struct ipu6_isys_stream *stream, + struct ipu6_isys_buffer_list *bl); +void +ipu6_isys_buf_calc_sequence_time(struct ipu6_isys_buffer *ib, + struct ipu6_fw_isys_resp_info_abi *info); +void ipu6_isys_queue_buf_done(struct ipu6_isys_buffer *ib); +void ipu6_isys_queue_buf_ready(struct ipu6_isys_stream *stream, + struct ipu6_fw_isys_resp_info_abi *info); +int ipu6_isys_queue_init(struct ipu6_isys_queue *aq); +#endif /* IPU6_ISYS_QUEUE_H */ diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c new file mode 100644 index 0000000..0a06de5 --- /dev/null +++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c @@ -0,0 +1,403 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2013--2024 Intel Corporation + */ + +#include +#include +#include + +#include +#include +#include +#include + +#include "ipu6-bus.h" +#include "ipu6-isys.h" +#include "ipu6-isys-subdev.h" + +unsigned int ipu6_isys_mbus_code_to_bpp(u32 code) +{ + switch (code) { + case MEDIA_BUS_FMT_RGB888_1X24: + case MEDIA_BUS_FMT_META_24: + return 24; + case MEDIA_BUS_FMT_RGB565_1X16: + case MEDIA_BUS_FMT_UYVY8_1X16: + case MEDIA_BUS_FMT_YUYV8_1X16: + case MEDIA_BUS_FMT_META_16: + return 16; + case MEDIA_BUS_FMT_SBGGR12_1X12: + case MEDIA_BUS_FMT_SGBRG12_1X12: + case MEDIA_BUS_FMT_SGRBG12_1X12: + case MEDIA_BUS_FMT_SRGGB12_1X12: + case MEDIA_BUS_FMT_META_12: + return 12; + case MEDIA_BUS_FMT_SBGGR10_1X10: + case MEDIA_BUS_FMT_SGBRG10_1X10: + case MEDIA_BUS_FMT_SGRBG10_1X10: + case MEDIA_BUS_FMT_SRGGB10_1X10: + case MEDIA_BUS_FMT_META_10: + return 10; + case MEDIA_BUS_FMT_SBGGR8_1X8: + case MEDIA_BUS_FMT_SGBRG8_1X8: + case MEDIA_BUS_FMT_SGRBG8_1X8: + case MEDIA_BUS_FMT_SRGGB8_1X8: + case MEDIA_BUS_FMT_META_8: + return 8; + default: + WARN_ON(1); + return 8; + } +} + +unsigned int ipu6_isys_mbus_code_to_mipi(u32 code) +{ + switch (code) { + case MEDIA_BUS_FMT_RGB565_1X16: + return MIPI_CSI2_DT_RGB565; + case MEDIA_BUS_FMT_RGB888_1X24: + return MIPI_CSI2_DT_RGB888; + case MEDIA_BUS_FMT_UYVY8_1X16: + case MEDIA_BUS_FMT_YUYV8_1X16: + return MIPI_CSI2_DT_YUV422_8B; + case MEDIA_BUS_FMT_SBGGR16_1X16: + case MEDIA_BUS_FMT_SGBRG16_1X16: + case MEDIA_BUS_FMT_SGRBG16_1X16: + case MEDIA_BUS_FMT_SRGGB16_1X16: + return MIPI_CSI2_DT_RAW16; + case MEDIA_BUS_FMT_SBGGR12_1X12: + case MEDIA_BUS_FMT_SGBRG12_1X12: + case MEDIA_BUS_FMT_SGRBG12_1X12: + case MEDIA_BUS_FMT_SRGGB12_1X12: + return MIPI_CSI2_DT_RAW12; + case MEDIA_BUS_FMT_SBGGR10_1X10: + case MEDIA_BUS_FMT_SGBRG10_1X10: + case MEDIA_BUS_FMT_SGRBG10_1X10: + case MEDIA_BUS_FMT_SRGGB10_1X10: + return MIPI_CSI2_DT_RAW10; + case MEDIA_BUS_FMT_SBGGR8_1X8: + case MEDIA_BUS_FMT_SGBRG8_1X8: + case MEDIA_BUS_FMT_SGRBG8_1X8: + case MEDIA_BUS_FMT_SRGGB8_1X8: + return MIPI_CSI2_DT_RAW8; + default: + /* return unavailable MIPI data type - 0x3f */ + WARN_ON(1); + return 0x3f; + } +} + +bool ipu6_isys_is_bayer_format(u32 code) +{ + switch (ipu6_isys_mbus_code_to_mipi(code)) { + case MIPI_CSI2_DT_RAW8: + case MIPI_CSI2_DT_RAW10: + case MIPI_CSI2_DT_RAW12: + case MIPI_CSI2_DT_RAW14: + case MIPI_CSI2_DT_RAW16: + case MIPI_CSI2_DT_RAW20: + case MIPI_CSI2_DT_RAW24: + case MIPI_CSI2_DT_RAW28: + return true; + default: + return false; + } +} + +u32 ipu6_isys_convert_bayer_order(u32 code, int x, int y) +{ + static const u32 code_map[] = { + MEDIA_BUS_FMT_SRGGB8_1X8, + MEDIA_BUS_FMT_SGRBG8_1X8, + MEDIA_BUS_FMT_SGBRG8_1X8, + MEDIA_BUS_FMT_SBGGR8_1X8, + MEDIA_BUS_FMT_SRGGB10_1X10, + MEDIA_BUS_FMT_SGRBG10_1X10, + MEDIA_BUS_FMT_SGBRG10_1X10, + MEDIA_BUS_FMT_SBGGR10_1X10, + MEDIA_BUS_FMT_SRGGB12_1X12, + MEDIA_BUS_FMT_SGRBG12_1X12, + MEDIA_BUS_FMT_SGBRG12_1X12, + MEDIA_BUS_FMT_SBGGR12_1X12, + MEDIA_BUS_FMT_SRGGB16_1X16, + MEDIA_BUS_FMT_SGRBG16_1X16, + MEDIA_BUS_FMT_SGBRG16_1X16, + MEDIA_BUS_FMT_SBGGR16_1X16, + }; + u32 i; + + for (i = 0; i < ARRAY_SIZE(code_map); i++) + if (code_map[i] == code) + break; + + if (WARN_ON(i == ARRAY_SIZE(code_map))) + return code; + + return code_map[i ^ (((y & 1) << 1) | (x & 1))]; +} + +int ipu6_isys_subdev_set_fmt(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_format *format) +{ + struct ipu6_isys_subdev *asd = to_ipu6_isys_subdev(sd); + struct v4l2_mbus_framefmt *fmt; + struct v4l2_rect *crop; + u32 code = asd->supported_codes[0]; + u32 other_pad, other_stream; + unsigned int i; + int ret; + + /* No transcoding, source and sink formats must match. */ + if ((sd->entity.pads[format->pad].flags & MEDIA_PAD_FL_SOURCE) && + sd->entity.num_pads > 1) + return v4l2_subdev_get_fmt(sd, state, format); + + format->format.width = clamp(format->format.width, IPU6_ISYS_MIN_WIDTH, + IPU6_ISYS_MAX_WIDTH); + format->format.height = clamp(format->format.height, + IPU6_ISYS_MIN_HEIGHT, + IPU6_ISYS_MAX_HEIGHT); + + for (i = 0; asd->supported_codes[i]; i++) { + if (asd->supported_codes[i] == format->format.code) { + code = asd->supported_codes[i]; + break; + } + } + format->format.code = code; + format->format.field = V4L2_FIELD_NONE; + + /* Store the format and propagate it to the source pad. */ + fmt = v4l2_subdev_state_get_format(state, format->pad, format->stream); + if (!fmt) + return -EINVAL; + + *fmt = format->format; + + if (!(sd->entity.pads[format->pad].flags & MEDIA_PAD_FL_SINK)) + return 0; + + /* propagate format to following source pad */ + fmt = v4l2_subdev_state_get_opposite_stream_format(state, format->pad, + format->stream); + if (!fmt) + return -EINVAL; + + *fmt = format->format; + + ret = v4l2_subdev_routing_find_opposite_end(&state->routing, + format->pad, + format->stream, + &other_pad, + &other_stream); + if (ret) + return -EINVAL; + + crop = v4l2_subdev_state_get_crop(state, other_pad, other_stream); + /* reset crop */ + crop->left = 0; + crop->top = 0; + crop->width = fmt->width; + crop->height = fmt->height; + + return 0; +} + +int ipu6_isys_subdev_enum_mbus_code(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_mbus_code_enum *code) +{ + struct ipu6_isys_subdev *asd = to_ipu6_isys_subdev(sd); + const u32 *supported_codes = asd->supported_codes; + u32 index; + + for (index = 0; supported_codes[index]; index++) { + if (index == code->index) { + code->code = supported_codes[index]; + return 0; + } + } + + return -EINVAL; +} + +static int subdev_set_routing(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_krouting *routing) +{ + static const struct v4l2_mbus_framefmt format = { + .width = 4096, + .height = 3072, + .code = MEDIA_BUS_FMT_SGRBG10_1X10, + .field = V4L2_FIELD_NONE, + }; + int ret; + + ret = v4l2_subdev_routing_validate(sd, routing, + V4L2_SUBDEV_ROUTING_ONLY_1_TO_1); + if (ret) + return ret; + + return v4l2_subdev_set_routing_with_fmt(sd, state, routing, &format); +} + +int ipu6_isys_get_stream_pad_fmt(struct v4l2_subdev *sd, u32 pad, u32 stream, + struct v4l2_mbus_framefmt *format) +{ + struct v4l2_mbus_framefmt *fmt; + struct v4l2_subdev_state *state; + + if (!sd || !format) + return -EINVAL; + + state = v4l2_subdev_lock_and_get_active_state(sd); + fmt = v4l2_subdev_state_get_format(state, pad, stream); + if (fmt) + *format = *fmt; + v4l2_subdev_unlock_state(state); + + return fmt ? 0 : -EINVAL; +} + +int ipu6_isys_get_stream_pad_crop(struct v4l2_subdev *sd, u32 pad, u32 stream, + struct v4l2_rect *crop) +{ + struct v4l2_subdev_state *state; + struct v4l2_rect *rect; + + if (!sd || !crop) + return -EINVAL; + + state = v4l2_subdev_lock_and_get_active_state(sd); + rect = v4l2_subdev_state_get_crop(state, pad, stream); + if (rect) + *crop = *rect; + v4l2_subdev_unlock_state(state); + + return rect ? 0 : -EINVAL; +} + +u32 ipu6_isys_get_src_stream_by_src_pad(struct v4l2_subdev *sd, u32 pad) +{ + struct v4l2_subdev_state *state; + struct v4l2_subdev_route *routes; + unsigned int i; + u32 source_stream = 0; + + state = v4l2_subdev_lock_and_get_active_state(sd); + if (!state) + return 0; + + routes = state->routing.routes; + for (i = 0; i < state->routing.num_routes; i++) { + if (routes[i].source_pad == pad) { + source_stream = routes[i].source_stream; + break; + } + } + + v4l2_subdev_unlock_state(state); + + return source_stream; +} + +static int ipu6_isys_subdev_init_state(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state) +{ + struct v4l2_subdev_route route = { + .sink_pad = 0, + .sink_stream = 0, + .source_pad = 1, + .source_stream = 0, + .flags = V4L2_SUBDEV_ROUTE_FL_ACTIVE, + }; + struct v4l2_subdev_krouting routing = { + .num_routes = 1, + .routes = &route, + }; + + return subdev_set_routing(sd, state, &routing); +} + +int ipu6_isys_subdev_set_routing(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + enum v4l2_subdev_format_whence which, + struct v4l2_subdev_krouting *routing) +{ + return subdev_set_routing(sd, state, routing); +} + +static const struct v4l2_subdev_internal_ops ipu6_isys_subdev_internal_ops = { + .init_state = ipu6_isys_subdev_init_state, +}; + +int ipu6_isys_subdev_init(struct ipu6_isys_subdev *asd, + const struct v4l2_subdev_ops *ops, + unsigned int nr_ctrls, + unsigned int num_sink_pads, + unsigned int num_source_pads) +{ + unsigned int num_pads = num_sink_pads + num_source_pads; + unsigned int i; + int ret; + + v4l2_subdev_init(&asd->sd, ops); + + asd->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | + V4L2_SUBDEV_FL_HAS_EVENTS | + V4L2_SUBDEV_FL_STREAMS; + asd->sd.owner = THIS_MODULE; + asd->sd.dev = &asd->isys->adev->auxdev.dev; + asd->sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE; + asd->sd.internal_ops = &ipu6_isys_subdev_internal_ops; + + asd->pad = devm_kcalloc(&asd->isys->adev->auxdev.dev, num_pads, + sizeof(*asd->pad), GFP_KERNEL); + if (!asd->pad) + return -ENOMEM; + + for (i = 0; i < num_sink_pads; i++) + asd->pad[i].flags = MEDIA_PAD_FL_SINK | + MEDIA_PAD_FL_MUST_CONNECT; + + for (i = num_sink_pads; i < num_pads; i++) + asd->pad[i].flags = MEDIA_PAD_FL_SOURCE; + + ret = media_entity_pads_init(&asd->sd.entity, num_pads, asd->pad); + if (ret) + return ret; + + if (asd->ctrl_init) { + ret = v4l2_ctrl_handler_init(&asd->ctrl_handler, nr_ctrls); + if (ret) + goto out_media_entity_cleanup; + + asd->ctrl_init(&asd->sd); + if (asd->ctrl_handler.error) { + ret = asd->ctrl_handler.error; + goto out_v4l2_ctrl_handler_free; + } + + asd->sd.ctrl_handler = &asd->ctrl_handler; + } + + asd->source = -1; + + return 0; + +out_v4l2_ctrl_handler_free: + v4l2_ctrl_handler_free(&asd->ctrl_handler); + +out_media_entity_cleanup: + media_entity_cleanup(&asd->sd.entity); + + return ret; +} + +void ipu6_isys_subdev_cleanup(struct ipu6_isys_subdev *asd) +{ + media_entity_cleanup(&asd->sd.entity); + v4l2_ctrl_handler_free(&asd->ctrl_handler); +} diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.h new file mode 100644 index 0000000..9ef8d95 --- /dev/null +++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2013--2024 Intel Corporation */ + +#ifndef IPU6_ISYS_SUBDEV_H +#define IPU6_ISYS_SUBDEV_H + +#include + +#include +#include +#include + +struct ipu6_isys; + +struct ipu6_isys_subdev { + struct v4l2_subdev sd; + struct ipu6_isys *isys; + u32 const *supported_codes; + struct media_pad *pad; + struct v4l2_ctrl_handler ctrl_handler; + void (*ctrl_init)(struct v4l2_subdev *sd); + int source; /* SSI stream source; -1 if unset */ +}; + +#define to_ipu6_isys_subdev(__sd) \ + container_of(__sd, struct ipu6_isys_subdev, sd) + +unsigned int ipu6_isys_mbus_code_to_bpp(u32 code); +unsigned int ipu6_isys_mbus_code_to_mipi(u32 code); +bool ipu6_isys_is_bayer_format(u32 code); +u32 ipu6_isys_convert_bayer_order(u32 code, int x, int y); + +int ipu6_isys_subdev_set_fmt(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_format *fmt); +int ipu6_isys_subdev_enum_mbus_code(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_mbus_code_enum + *code); +int ipu6_isys_subdev_link_validate(struct v4l2_subdev *sd, + struct media_link *link, + struct v4l2_subdev_format *source_fmt, + struct v4l2_subdev_format *sink_fmt); +u32 ipu6_isys_get_src_stream_by_src_pad(struct v4l2_subdev *sd, u32 pad); +int ipu6_isys_get_stream_pad_fmt(struct v4l2_subdev *sd, u32 pad, u32 stream, + struct v4l2_mbus_framefmt *format); +int ipu6_isys_get_stream_pad_crop(struct v4l2_subdev *sd, u32 pad, u32 stream, + struct v4l2_rect *crop); +int ipu6_isys_subdev_set_routing(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + enum v4l2_subdev_format_whence which, + struct v4l2_subdev_krouting *routing); +int ipu6_isys_subdev_init(struct ipu6_isys_subdev *asd, + const struct v4l2_subdev_ops *ops, + unsigned int nr_ctrls, + unsigned int num_sink_pads, + unsigned int num_source_pads); +void ipu6_isys_subdev_cleanup(struct ipu6_isys_subdev *asd); +#endif /* IPU6_ISYS_SUBDEV_H */ diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c new file mode 100644 index 0000000..48388c0 --- /dev/null +++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c @@ -0,0 +1,1392 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2013--2024 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "ipu6.h" +#include "ipu6-bus.h" +#include "ipu6-cpd.h" +#include "ipu6-fw-isys.h" +#include "ipu6-isys.h" +#include "ipu6-isys-csi2.h" +#include "ipu6-isys-queue.h" +#include "ipu6-isys-video.h" +#include "ipu6-platform-regs.h" + +const struct ipu6_isys_pixelformat ipu6_isys_pfmts[] = { + { V4L2_PIX_FMT_SBGGR12, 16, 12, MEDIA_BUS_FMT_SBGGR12_1X12, + IPU6_FW_ISYS_FRAME_FORMAT_RAW16 }, + { V4L2_PIX_FMT_SGBRG12, 16, 12, MEDIA_BUS_FMT_SGBRG12_1X12, + IPU6_FW_ISYS_FRAME_FORMAT_RAW16 }, + { V4L2_PIX_FMT_SGRBG12, 16, 12, MEDIA_BUS_FMT_SGRBG12_1X12, + IPU6_FW_ISYS_FRAME_FORMAT_RAW16 }, + { V4L2_PIX_FMT_SRGGB12, 16, 12, MEDIA_BUS_FMT_SRGGB12_1X12, + IPU6_FW_ISYS_FRAME_FORMAT_RAW16 }, + { V4L2_PIX_FMT_SBGGR10, 16, 10, MEDIA_BUS_FMT_SBGGR10_1X10, + IPU6_FW_ISYS_FRAME_FORMAT_RAW16 }, + { V4L2_PIX_FMT_SGBRG10, 16, 10, MEDIA_BUS_FMT_SGBRG10_1X10, + IPU6_FW_ISYS_FRAME_FORMAT_RAW16 }, + { V4L2_PIX_FMT_SGRBG10, 16, 10, MEDIA_BUS_FMT_SGRBG10_1X10, + IPU6_FW_ISYS_FRAME_FORMAT_RAW16 }, + { V4L2_PIX_FMT_SRGGB10, 16, 10, MEDIA_BUS_FMT_SRGGB10_1X10, + IPU6_FW_ISYS_FRAME_FORMAT_RAW16 }, + { V4L2_PIX_FMT_SBGGR8, 8, 8, MEDIA_BUS_FMT_SBGGR8_1X8, + IPU6_FW_ISYS_FRAME_FORMAT_RAW8 }, + { V4L2_PIX_FMT_SGBRG8, 8, 8, MEDIA_BUS_FMT_SGBRG8_1X8, + IPU6_FW_ISYS_FRAME_FORMAT_RAW8 }, + { V4L2_PIX_FMT_SGRBG8, 8, 8, MEDIA_BUS_FMT_SGRBG8_1X8, + IPU6_FW_ISYS_FRAME_FORMAT_RAW8 }, + { V4L2_PIX_FMT_SRGGB8, 8, 8, MEDIA_BUS_FMT_SRGGB8_1X8, + IPU6_FW_ISYS_FRAME_FORMAT_RAW8 }, + { V4L2_PIX_FMT_SBGGR12P, 12, 12, MEDIA_BUS_FMT_SBGGR12_1X12, + IPU6_FW_ISYS_FRAME_FORMAT_RAW12 }, + { V4L2_PIX_FMT_SGBRG12P, 12, 12, MEDIA_BUS_FMT_SGBRG12_1X12, + IPU6_FW_ISYS_FRAME_FORMAT_RAW12 }, + { V4L2_PIX_FMT_SGRBG12P, 12, 12, MEDIA_BUS_FMT_SGRBG12_1X12, + IPU6_FW_ISYS_FRAME_FORMAT_RAW12 }, + { V4L2_PIX_FMT_SRGGB12P, 12, 12, MEDIA_BUS_FMT_SRGGB12_1X12, + IPU6_FW_ISYS_FRAME_FORMAT_RAW12 }, + { V4L2_PIX_FMT_SBGGR10P, 10, 10, MEDIA_BUS_FMT_SBGGR10_1X10, + IPU6_FW_ISYS_FRAME_FORMAT_RAW10 }, + { V4L2_PIX_FMT_SGBRG10P, 10, 10, MEDIA_BUS_FMT_SGBRG10_1X10, + IPU6_FW_ISYS_FRAME_FORMAT_RAW10 }, + { V4L2_PIX_FMT_SGRBG10P, 10, 10, MEDIA_BUS_FMT_SGRBG10_1X10, + IPU6_FW_ISYS_FRAME_FORMAT_RAW10 }, + { V4L2_PIX_FMT_SRGGB10P, 10, 10, MEDIA_BUS_FMT_SRGGB10_1X10, + IPU6_FW_ISYS_FRAME_FORMAT_RAW10 }, + { V4L2_PIX_FMT_UYVY, 16, 16, MEDIA_BUS_FMT_UYVY8_1X16, + IPU6_FW_ISYS_FRAME_FORMAT_UYVY}, + { V4L2_PIX_FMT_YUYV, 16, 16, MEDIA_BUS_FMT_YUYV8_1X16, + IPU6_FW_ISYS_FRAME_FORMAT_YUYV}, + { V4L2_PIX_FMT_RGB565, 16, 16, MEDIA_BUS_FMT_RGB565_1X16, + IPU6_FW_ISYS_FRAME_FORMAT_RGB565 }, + { V4L2_PIX_FMT_BGR24, 24, 24, MEDIA_BUS_FMT_RGB888_1X24, + IPU6_FW_ISYS_FRAME_FORMAT_RGBA888 }, + { V4L2_META_FMT_GENERIC_8, 8, 8, MEDIA_BUS_FMT_META_8, + IPU6_FW_ISYS_FRAME_FORMAT_RAW8, true }, + { V4L2_META_FMT_GENERIC_CSI2_10, 10, 10, MEDIA_BUS_FMT_META_10, + IPU6_FW_ISYS_FRAME_FORMAT_RAW10, true }, + { V4L2_META_FMT_GENERIC_CSI2_12, 12, 12, MEDIA_BUS_FMT_META_12, + IPU6_FW_ISYS_FRAME_FORMAT_RAW12, true }, + { V4L2_META_FMT_GENERIC_CSI2_16, 16, 16, MEDIA_BUS_FMT_META_16, + IPU6_FW_ISYS_FRAME_FORMAT_RAW16, true }, +}; + +static int video_open(struct file *file) +{ + struct ipu6_isys_video *av = video_drvdata(file); + struct ipu6_isys *isys = av->isys; + struct ipu6_bus_device *adev = isys->adev; + + mutex_lock(&isys->mutex); + if (isys->need_reset) { + mutex_unlock(&isys->mutex); + dev_warn(&adev->auxdev.dev, "isys power cycle required\n"); + return -EIO; + } + mutex_unlock(&isys->mutex); + + return v4l2_fh_open(file); +} + +const struct ipu6_isys_pixelformat * +ipu6_isys_get_isys_format(u32 pixelformat, u32 type) +{ + const struct ipu6_isys_pixelformat *default_pfmt = NULL; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(ipu6_isys_pfmts); i++) { + const struct ipu6_isys_pixelformat *pfmt = &ipu6_isys_pfmts[i]; + + if (type && ((!pfmt->is_meta && + type != V4L2_BUF_TYPE_VIDEO_CAPTURE) || + (pfmt->is_meta && + type != V4L2_BUF_TYPE_META_CAPTURE))) + continue; + + if (!default_pfmt) + default_pfmt = pfmt; + + if (pfmt->pixelformat != pixelformat) + continue; + + return pfmt; + } + + return default_pfmt; +} + +static int ipu6_isys_vidioc_querycap(struct file *file, void *fh, + struct v4l2_capability *cap) +{ + struct ipu6_isys_video *av = video_drvdata(file); + + strscpy(cap->driver, IPU6_ISYS_NAME, sizeof(cap->driver)); + strscpy(cap->card, av->isys->media_dev.model, sizeof(cap->card)); + + return 0; +} + +static int ipu6_isys_vidioc_enum_fmt(struct file *file, void *fh, + struct v4l2_fmtdesc *f) +{ + unsigned int i, num_found; + + for (i = 0, num_found = 0; i < ARRAY_SIZE(ipu6_isys_pfmts); i++) { + if ((ipu6_isys_pfmts[i].is_meta && + f->type != V4L2_BUF_TYPE_META_CAPTURE) || + (!ipu6_isys_pfmts[i].is_meta && + f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)) + continue; + + if (f->mbus_code && f->mbus_code != ipu6_isys_pfmts[i].code) + continue; + + if (num_found < f->index) { + num_found++; + continue; + } + + f->flags = 0; + f->pixelformat = ipu6_isys_pfmts[i].pixelformat; + + return 0; + } + + return -EINVAL; +} + +static int ipu6_isys_vidioc_enum_framesizes(struct file *file, void *fh, + struct v4l2_frmsizeenum *fsize) +{ + unsigned int i; + + if (fsize->index > 0) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(ipu6_isys_pfmts); i++) { + if (fsize->pixel_format != ipu6_isys_pfmts[i].pixelformat) + continue; + + fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE; + fsize->stepwise.min_width = IPU6_ISYS_MIN_WIDTH; + fsize->stepwise.max_width = IPU6_ISYS_MAX_WIDTH; + fsize->stepwise.min_height = IPU6_ISYS_MIN_HEIGHT; + fsize->stepwise.max_height = IPU6_ISYS_MAX_HEIGHT; + fsize->stepwise.step_width = 2; + fsize->stepwise.step_height = 2; + + return 0; + } + + return -EINVAL; +} + +static int ipu6_isys_vidioc_g_fmt_vid_cap(struct file *file, void *fh, + struct v4l2_format *f) +{ + struct ipu6_isys_video *av = video_drvdata(file); + + f->fmt.pix = av->pix_fmt; + + return 0; +} + +static int ipu6_isys_vidioc_g_fmt_meta_cap(struct file *file, void *fh, + struct v4l2_format *f) +{ + struct ipu6_isys_video *av = video_drvdata(file); + + f->fmt.meta = av->meta_fmt; + + return 0; +} + +static void ipu6_isys_try_fmt_cap(struct ipu6_isys_video *av, u32 type, + u32 *format, u32 *width, u32 *height, + u32 *bytesperline, u32 *sizeimage) +{ + const struct ipu6_isys_pixelformat *pfmt = + ipu6_isys_get_isys_format(*format, type); + + *format = pfmt->pixelformat; + *width = clamp(*width, IPU6_ISYS_MIN_WIDTH, IPU6_ISYS_MAX_WIDTH); + *height = clamp(*height, IPU6_ISYS_MIN_HEIGHT, IPU6_ISYS_MAX_HEIGHT); + + if (pfmt->bpp != pfmt->bpp_packed) + *bytesperline = *width * DIV_ROUND_UP(pfmt->bpp, BITS_PER_BYTE); + else + *bytesperline = DIV_ROUND_UP(*width * pfmt->bpp, BITS_PER_BYTE); + + *bytesperline = ALIGN(*bytesperline, av->isys->line_align); + + /* + * (height + 1) * bytesperline due to a hardware issue: the DMA unit + * is a power of two, and a line should be transferred as few units + * as possible. The result is that up to line length more data than + * the image size may be transferred to memory after the image. + * Another limitation is the GDA allocation unit size. For low + * resolution it gives a bigger number. Use larger one to avoid + * memory corruption. + */ + *sizeimage = *bytesperline * *height + + max(*bytesperline, + av->isys->pdata->ipdata->isys_dma_overshoot); +} + +static void __ipu6_isys_vidioc_try_fmt_vid_cap(struct ipu6_isys_video *av, + struct v4l2_format *f) +{ + ipu6_isys_try_fmt_cap(av, f->type, &f->fmt.pix.pixelformat, + &f->fmt.pix.width, &f->fmt.pix.height, + &f->fmt.pix.bytesperline, &f->fmt.pix.sizeimage); + + f->fmt.pix.field = V4L2_FIELD_NONE; + f->fmt.pix.colorspace = V4L2_COLORSPACE_RAW; + f->fmt.pix.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; + f->fmt.pix.quantization = V4L2_QUANTIZATION_DEFAULT; + f->fmt.pix.xfer_func = V4L2_XFER_FUNC_DEFAULT; +} + +static int ipu6_isys_vidioc_try_fmt_vid_cap(struct file *file, void *fh, + struct v4l2_format *f) +{ + struct ipu6_isys_video *av = video_drvdata(file); + + if (vb2_is_busy(&av->aq.vbq)) + return -EBUSY; + + __ipu6_isys_vidioc_try_fmt_vid_cap(av, f); + + return 0; +} + +static int __ipu6_isys_vidioc_try_fmt_meta_cap(struct ipu6_isys_video *av, + struct v4l2_format *f) +{ + ipu6_isys_try_fmt_cap(av, f->type, &f->fmt.meta.dataformat, + &f->fmt.meta.width, &f->fmt.meta.height, + &f->fmt.meta.bytesperline, + &f->fmt.meta.buffersize); + + return 0; +} + +static int ipu6_isys_vidioc_try_fmt_meta_cap(struct file *file, void *fh, + struct v4l2_format *f) +{ + struct ipu6_isys_video *av = video_drvdata(file); + + __ipu6_isys_vidioc_try_fmt_meta_cap(av, f); + + return 0; +} + +static int ipu6_isys_vidioc_s_fmt_vid_cap(struct file *file, void *fh, + struct v4l2_format *f) +{ + struct ipu6_isys_video *av = video_drvdata(file); + + ipu6_isys_vidioc_try_fmt_vid_cap(file, fh, f); + av->pix_fmt = f->fmt.pix; + + return 0; +} + +static int ipu6_isys_vidioc_s_fmt_meta_cap(struct file *file, void *fh, + struct v4l2_format *f) +{ + struct ipu6_isys_video *av = video_drvdata(file); + + if (vb2_is_busy(&av->aq.vbq)) + return -EBUSY; + + ipu6_isys_vidioc_try_fmt_meta_cap(file, fh, f); + av->meta_fmt = f->fmt.meta; + + return 0; +} + +static int ipu6_isys_vidioc_reqbufs(struct file *file, void *priv, + struct v4l2_requestbuffers *p) +{ + struct ipu6_isys_video *av = video_drvdata(file); + int ret; + + av->aq.vbq.is_multiplanar = V4L2_TYPE_IS_MULTIPLANAR(p->type); + av->aq.vbq.is_output = V4L2_TYPE_IS_OUTPUT(p->type); + + ret = vb2_queue_change_type(&av->aq.vbq, p->type); + if (ret) + return ret; + + return vb2_ioctl_reqbufs(file, priv, p); +} + +static int ipu6_isys_vidioc_create_bufs(struct file *file, void *priv, + struct v4l2_create_buffers *p) +{ + struct ipu6_isys_video *av = video_drvdata(file); + int ret; + + av->aq.vbq.is_multiplanar = V4L2_TYPE_IS_MULTIPLANAR(p->format.type); + av->aq.vbq.is_output = V4L2_TYPE_IS_OUTPUT(p->format.type); + + ret = vb2_queue_change_type(&av->aq.vbq, p->format.type); + if (ret) + return ret; + + return vb2_ioctl_create_bufs(file, priv, p); +} + +static int link_validate(struct media_link *link) +{ + struct ipu6_isys_video *av = + container_of(link->sink, struct ipu6_isys_video, pad); + struct device *dev = &av->isys->adev->auxdev.dev; + struct v4l2_subdev_state *s_state; + struct v4l2_subdev *s_sd; + struct v4l2_mbus_framefmt *s_fmt; + struct media_pad *s_pad; + u32 s_stream, code; + int ret = -EPIPE; + + if (!link->source->entity) + return ret; + + s_sd = media_entity_to_v4l2_subdev(link->source->entity); + s_state = v4l2_subdev_get_unlocked_active_state(s_sd); + if (!s_state) + return ret; + + dev_dbg(dev, "validating link \"%s\":%u -> \"%s\"\n", + link->source->entity->name, link->source->index, + link->sink->entity->name); + + s_pad = media_pad_remote_pad_first(&av->pad); + s_stream = ipu6_isys_get_src_stream_by_src_pad(s_sd, s_pad->index); + + v4l2_subdev_lock_state(s_state); + + s_fmt = v4l2_subdev_state_get_format(s_state, s_pad->index, s_stream); + if (!s_fmt) { + dev_err(dev, "failed to get source pad format\n"); + goto unlock; + } + + code = ipu6_isys_get_isys_format(ipu6_isys_get_format(av), 0)->code; + + if (s_fmt->width != ipu6_isys_get_frame_width(av) || + s_fmt->height != ipu6_isys_get_frame_height(av) || + s_fmt->code != code) { + dev_dbg(dev, "format mismatch %dx%d,%x != %dx%d,%x\n", + s_fmt->width, s_fmt->height, s_fmt->code, + ipu6_isys_get_frame_width(av), + ipu6_isys_get_frame_height(av), code); + goto unlock; + } + + v4l2_subdev_unlock_state(s_state); + + return 0; +unlock: + v4l2_subdev_unlock_state(s_state); + + return ret; +} + +static void get_stream_opened(struct ipu6_isys_video *av) +{ + unsigned long flags; + + spin_lock_irqsave(&av->isys->streams_lock, flags); + av->isys->stream_opened++; + spin_unlock_irqrestore(&av->isys->streams_lock, flags); +} + +static void put_stream_opened(struct ipu6_isys_video *av) +{ + unsigned long flags; + + spin_lock_irqsave(&av->isys->streams_lock, flags); + av->isys->stream_opened--; + spin_unlock_irqrestore(&av->isys->streams_lock, flags); +} + +static int ipu6_isys_fw_pin_cfg(struct ipu6_isys_video *av, + struct ipu6_fw_isys_stream_cfg_data_abi *cfg) +{ + struct media_pad *src_pad = media_pad_remote_pad_first(&av->pad); + struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(src_pad->entity); + struct ipu6_fw_isys_input_pin_info_abi *input_pin; + struct ipu6_fw_isys_output_pin_info_abi *output_pin; + struct ipu6_isys_stream *stream = av->stream; + struct ipu6_isys_queue *aq = &av->aq; + struct v4l2_mbus_framefmt fmt; + const struct ipu6_isys_pixelformat *pfmt = + ipu6_isys_get_isys_format(ipu6_isys_get_format(av), 0); + struct v4l2_rect v4l2_crop; + struct ipu6_isys *isys = av->isys; + struct device *dev = &isys->adev->auxdev.dev; + int input_pins = cfg->nof_input_pins++; + int output_pins; + u32 src_stream; + int ret; + + src_stream = ipu6_isys_get_src_stream_by_src_pad(sd, src_pad->index); + ret = ipu6_isys_get_stream_pad_fmt(sd, src_pad->index, src_stream, + &fmt); + if (ret < 0) { + dev_err(dev, "can't get stream format (%d)\n", ret); + return ret; + } + + ret = ipu6_isys_get_stream_pad_crop(sd, src_pad->index, src_stream, + &v4l2_crop); + if (ret < 0) { + dev_err(dev, "can't get stream crop (%d)\n", ret); + return ret; + } + + input_pin = &cfg->input_pins[input_pins]; + input_pin->input_res.width = fmt.width; + input_pin->input_res.height = fmt.height; + input_pin->dt = av->dt; + input_pin->bits_per_pix = pfmt->bpp_packed; + input_pin->mapped_dt = 0x40; /* invalid mipi data type */ + input_pin->mipi_decompression = 0; + input_pin->capture_mode = IPU6_FW_ISYS_CAPTURE_MODE_REGULAR; + input_pin->mipi_store_mode = pfmt->bpp == pfmt->bpp_packed ? + IPU6_FW_ISYS_MIPI_STORE_MODE_DISCARD_LONG_HEADER : + IPU6_FW_ISYS_MIPI_STORE_MODE_NORMAL; + input_pin->crop_first_and_last_lines = v4l2_crop.top & 1; + + output_pins = cfg->nof_output_pins++; + aq->fw_output = output_pins; + stream->output_pins[output_pins].pin_ready = ipu6_isys_queue_buf_ready; + stream->output_pins[output_pins].aq = aq; + + output_pin = &cfg->output_pins[output_pins]; + output_pin->input_pin_id = input_pins; + output_pin->output_res.width = ipu6_isys_get_frame_width(av); + output_pin->output_res.height = ipu6_isys_get_frame_height(av); + + output_pin->stride = ipu6_isys_get_bytes_per_line(av); + if (pfmt->bpp != pfmt->bpp_packed) + output_pin->pt = IPU6_FW_ISYS_PIN_TYPE_RAW_SOC; + else + output_pin->pt = IPU6_FW_ISYS_PIN_TYPE_MIPI; + output_pin->ft = pfmt->css_pixelformat; + output_pin->send_irq = 1; + memset(output_pin->ts_offsets, 0, sizeof(output_pin->ts_offsets)); + output_pin->s2m_pixel_soc_pixel_remapping = + S2M_PIXEL_SOC_PIXEL_REMAPPING_FLAG_NO_REMAPPING; + output_pin->csi_be_soc_pixel_remapping = + CSI_BE_SOC_PIXEL_REMAPPING_FLAG_NO_REMAPPING; + + output_pin->snoopable = true; + output_pin->error_handling_enable = false; + output_pin->sensor_type = isys->sensor_type++; + if (isys->sensor_type > isys->pdata->ipdata->sensor_type_end) + isys->sensor_type = isys->pdata->ipdata->sensor_type_start; + + return 0; +} + +static int start_stream_firmware(struct ipu6_isys_video *av, + struct ipu6_isys_buffer_list *bl) +{ + struct ipu6_fw_isys_stream_cfg_data_abi *stream_cfg; + struct ipu6_fw_isys_frame_buff_set_abi *buf = NULL; + struct ipu6_isys_stream *stream = av->stream; + struct device *dev = &av->isys->adev->auxdev.dev; + struct isys_fw_msgs *msg = NULL; + struct ipu6_isys_queue *aq; + int ret, retout, tout; + u16 send_type; + + msg = ipu6_get_fw_msg_buf(stream); + if (!msg) + return -ENOMEM; + + stream_cfg = &msg->fw_msg.stream; + stream_cfg->src = stream->stream_source; + stream_cfg->vc = stream->vc; + stream_cfg->isl_use = 0; + stream_cfg->sensor_type = IPU6_FW_ISYS_SENSOR_MODE_NORMAL; + + list_for_each_entry(aq, &stream->queues, node) { + struct ipu6_isys_video *__av = ipu6_isys_queue_to_video(aq); + + ret = ipu6_isys_fw_pin_cfg(__av, stream_cfg); + if (ret < 0) { + ipu6_put_fw_msg_buf(av->isys, (u64)stream_cfg); + return ret; + } + } + + ipu6_fw_isys_dump_stream_cfg(dev, stream_cfg); + + stream->nr_output_pins = stream_cfg->nof_output_pins; + + reinit_completion(&stream->stream_open_completion); + + ret = ipu6_fw_isys_complex_cmd(av->isys, stream->stream_handle, + stream_cfg, msg->dma_addr, + sizeof(*stream_cfg), + IPU6_FW_ISYS_SEND_TYPE_STREAM_OPEN); + if (ret < 0) { + dev_err(dev, "can't open stream (%d)\n", ret); + ipu6_put_fw_msg_buf(av->isys, (u64)stream_cfg); + return ret; + } + + get_stream_opened(av); + + tout = wait_for_completion_timeout(&stream->stream_open_completion, + IPU6_FW_CALL_TIMEOUT_JIFFIES); + + ipu6_put_fw_msg_buf(av->isys, (u64)stream_cfg); + + if (!tout) { + dev_err(dev, "stream open time out\n"); + ret = -ETIMEDOUT; + goto out_put_stream_opened; + } + if (stream->error) { + dev_err(dev, "stream open error: %d\n", stream->error); + ret = -EIO; + goto out_put_stream_opened; + } + dev_dbg(dev, "start stream: open complete\n"); + + if (bl) { + msg = ipu6_get_fw_msg_buf(stream); + if (!msg) { + ret = -ENOMEM; + goto out_put_stream_opened; + } + buf = &msg->fw_msg.frame; + ipu6_isys_buf_to_fw_frame_buf(buf, stream, bl); + ipu6_isys_buffer_list_queue(bl, + IPU6_ISYS_BUFFER_LIST_FL_ACTIVE, 0); + } + + reinit_completion(&stream->stream_start_completion); + + if (bl) { + send_type = IPU6_FW_ISYS_SEND_TYPE_STREAM_START_AND_CAPTURE; + ipu6_fw_isys_dump_frame_buff_set(dev, buf, + stream_cfg->nof_output_pins); + ret = ipu6_fw_isys_complex_cmd(av->isys, stream->stream_handle, + buf, msg->dma_addr, + sizeof(*buf), send_type); + } else { + send_type = IPU6_FW_ISYS_SEND_TYPE_STREAM_START; + ret = ipu6_fw_isys_simple_cmd(av->isys, stream->stream_handle, + send_type); + } + + if (ret < 0) { + dev_err(dev, "can't start streaming (%d)\n", ret); + goto out_stream_close; + } + + tout = wait_for_completion_timeout(&stream->stream_start_completion, + IPU6_FW_CALL_TIMEOUT_JIFFIES); + if (!tout) { + dev_err(dev, "stream start time out\n"); + ret = -ETIMEDOUT; + goto out_stream_close; + } + if (stream->error) { + dev_err(dev, "stream start error: %d\n", stream->error); + ret = -EIO; + goto out_stream_close; + } + dev_dbg(dev, "start stream: complete\n"); + + return 0; + +out_stream_close: + reinit_completion(&stream->stream_close_completion); + + retout = ipu6_fw_isys_simple_cmd(av->isys, + stream->stream_handle, + IPU6_FW_ISYS_SEND_TYPE_STREAM_CLOSE); + if (retout < 0) { + dev_dbg(dev, "can't close stream (%d)\n", retout); + goto out_put_stream_opened; + } + + tout = wait_for_completion_timeout(&stream->stream_close_completion, + IPU6_FW_CALL_TIMEOUT_JIFFIES); + if (!tout) + dev_err(dev, "stream close time out\n"); + else if (stream->error) + dev_err(dev, "stream close error: %d\n", stream->error); + else + dev_dbg(dev, "stream close complete\n"); + +out_put_stream_opened: + put_stream_opened(av); + + return ret; +} + +static void stop_streaming_firmware(struct ipu6_isys_video *av) +{ + struct device *dev = &av->isys->adev->auxdev.dev; + struct ipu6_isys_stream *stream = av->stream; + int ret, tout; + + reinit_completion(&stream->stream_stop_completion); + + ret = ipu6_fw_isys_simple_cmd(av->isys, stream->stream_handle, + IPU6_FW_ISYS_SEND_TYPE_STREAM_FLUSH); + + if (ret < 0) { + dev_err(dev, "can't stop stream (%d)\n", ret); + return; + } + + tout = wait_for_completion_timeout(&stream->stream_stop_completion, + IPU6_FW_CALL_TIMEOUT_JIFFIES); + if (!tout) + dev_warn(dev, "stream stop time out\n"); + else if (stream->error) + dev_warn(dev, "stream stop error: %d\n", stream->error); + else + dev_dbg(dev, "stop stream: complete\n"); +} + +static void close_streaming_firmware(struct ipu6_isys_video *av) +{ + struct ipu6_isys_stream *stream = av->stream; + struct device *dev = &av->isys->adev->auxdev.dev; + int ret, tout; + + reinit_completion(&stream->stream_close_completion); + + ret = ipu6_fw_isys_simple_cmd(av->isys, stream->stream_handle, + IPU6_FW_ISYS_SEND_TYPE_STREAM_CLOSE); + if (ret < 0) { + dev_err(dev, "can't close stream (%d)\n", ret); + return; + } + + tout = wait_for_completion_timeout(&stream->stream_close_completion, + IPU6_FW_CALL_TIMEOUT_JIFFIES); + if (!tout) + dev_warn(dev, "stream close time out\n"); + else if (stream->error) + dev_warn(dev, "stream close error: %d\n", stream->error); + else + dev_dbg(dev, "close stream: complete\n"); + + put_stream_opened(av); +} + +int ipu6_isys_video_prepare_stream(struct ipu6_isys_video *av, + struct media_entity *source_entity, + int nr_queues) +{ + struct ipu6_isys_stream *stream = av->stream; + struct ipu6_isys_csi2 *csi2; + + if (WARN_ON(stream->nr_streaming)) + return -EINVAL; + + stream->nr_queues = nr_queues; + atomic_set(&stream->sequence, 0); + + stream->seq_index = 0; + memset(stream->seq, 0, sizeof(stream->seq)); + + if (WARN_ON(!list_empty(&stream->queues))) + return -EINVAL; + + stream->stream_source = stream->asd->source; + csi2 = ipu6_isys_subdev_to_csi2(stream->asd); + csi2->receiver_errors = 0; + stream->source_entity = source_entity; + + dev_dbg(&av->isys->adev->auxdev.dev, + "prepare stream: external entity %s\n", + stream->source_entity->name); + + return 0; +} + +void ipu6_isys_configure_stream_watermark(struct ipu6_isys_video *av, + bool state) +{ + struct ipu6_isys *isys = av->isys; + struct ipu6_isys_csi2 *csi2 = NULL; + struct isys_iwake_watermark *iwake_watermark = &isys->iwake_watermark; + struct device *dev = &isys->adev->auxdev.dev; + struct v4l2_mbus_framefmt format; + struct v4l2_subdev *esd; + struct v4l2_control hb = { .id = V4L2_CID_HBLANK, .value = 0 }; + unsigned int bpp, lanes; + s64 link_freq = 0; + u64 pixel_rate = 0; + int ret; + + if (!state) + return; + + esd = media_entity_to_v4l2_subdev(av->stream->source_entity); + + av->watermark.width = ipu6_isys_get_frame_width(av); + av->watermark.height = ipu6_isys_get_frame_height(av); + av->watermark.sram_gran_shift = isys->pdata->ipdata->sram_gran_shift; + av->watermark.sram_gran_size = isys->pdata->ipdata->sram_gran_size; + + ret = v4l2_g_ctrl(esd->ctrl_handler, &hb); + if (!ret && hb.value >= 0) + av->watermark.hblank = hb.value; + else + av->watermark.hblank = 0; + + csi2 = ipu6_isys_subdev_to_csi2(av->stream->asd); + link_freq = ipu6_isys_csi2_get_link_freq(csi2); + if (link_freq > 0) { + lanes = csi2->nlanes; + ret = ipu6_isys_get_stream_pad_fmt(&csi2->asd.sd, 0, + av->source_stream, &format); + if (!ret) { + bpp = ipu6_isys_mbus_code_to_bpp(format.code); + pixel_rate = mul_u64_u32_div(link_freq, lanes * 2, bpp); + } + } + + av->watermark.pixel_rate = pixel_rate; + + if (!pixel_rate) { + mutex_lock(&iwake_watermark->mutex); + iwake_watermark->force_iwake_disable = true; + mutex_unlock(&iwake_watermark->mutex); + dev_warn(dev, "unexpected pixel_rate from %s, disable iwake.\n", + av->stream->source_entity->name); + } +} + +static void calculate_stream_datarate(struct ipu6_isys_video *av) +{ + struct video_stream_watermark *watermark = &av->watermark; + const struct ipu6_isys_pixelformat *pfmt = + ipu6_isys_get_isys_format(ipu6_isys_get_format(av), 0); + u32 pages_per_line, pb_bytes_per_line, pixels_per_line, bytes_per_line; + u64 line_time_ns, stream_data_rate; + u16 shift, size; + + shift = watermark->sram_gran_shift; + size = watermark->sram_gran_size; + + pixels_per_line = watermark->width + watermark->hblank; + line_time_ns = div_u64(pixels_per_line * NSEC_PER_SEC, + watermark->pixel_rate); + bytes_per_line = watermark->width * pfmt->bpp / 8; + pages_per_line = DIV_ROUND_UP(bytes_per_line, size); + pb_bytes_per_line = pages_per_line << shift; + stream_data_rate = div64_u64(pb_bytes_per_line * 1000, line_time_ns); + + watermark->stream_data_rate = stream_data_rate; +} + +void ipu6_isys_update_stream_watermark(struct ipu6_isys_video *av, bool state) +{ + struct isys_iwake_watermark *iwake_watermark = + &av->isys->iwake_watermark; + + if (!av->watermark.pixel_rate) + return; + + if (state) { + calculate_stream_datarate(av); + mutex_lock(&iwake_watermark->mutex); + list_add(&av->watermark.stream_node, + &iwake_watermark->video_list); + mutex_unlock(&iwake_watermark->mutex); + } else { + av->watermark.stream_data_rate = 0; + mutex_lock(&iwake_watermark->mutex); + list_del(&av->watermark.stream_node); + mutex_unlock(&iwake_watermark->mutex); + } + + update_watermark_setting(av->isys); +} + +void ipu6_isys_put_stream(struct ipu6_isys_stream *stream) +{ + struct device *dev; + unsigned int i; + unsigned long flags; + + if (!stream) { + pr_err("ipu6-isys: no available stream\n"); + return; + } + + dev = &stream->isys->adev->auxdev.dev; + + spin_lock_irqsave(&stream->isys->streams_lock, flags); + for (i = 0; i < IPU6_ISYS_MAX_STREAMS; i++) { + if (&stream->isys->streams[i] == stream) { + if (stream->isys->streams_ref_count[i] > 0) + stream->isys->streams_ref_count[i]--; + else + dev_warn(dev, "invalid stream %d\n", i); + + break; + } + } + spin_unlock_irqrestore(&stream->isys->streams_lock, flags); +} + +static struct ipu6_isys_stream * +ipu6_isys_get_stream(struct ipu6_isys_video *av, struct ipu6_isys_subdev *asd) +{ + struct ipu6_isys_stream *stream = NULL; + struct ipu6_isys *isys = av->isys; + unsigned long flags; + unsigned int i; + u8 vc = av->vc; + + if (!isys) + return NULL; + + spin_lock_irqsave(&isys->streams_lock, flags); + for (i = 0; i < IPU6_ISYS_MAX_STREAMS; i++) { + if (isys->streams_ref_count[i] && isys->streams[i].vc == vc && + isys->streams[i].asd == asd) { + isys->streams_ref_count[i]++; + stream = &isys->streams[i]; + break; + } + } + + if (!stream) { + for (i = 0; i < IPU6_ISYS_MAX_STREAMS; i++) { + if (!isys->streams_ref_count[i]) { + isys->streams_ref_count[i]++; + stream = &isys->streams[i]; + stream->vc = vc; + stream->asd = asd; + break; + } + } + } + spin_unlock_irqrestore(&isys->streams_lock, flags); + + return stream; +} + +struct ipu6_isys_stream * +ipu6_isys_query_stream_by_handle(struct ipu6_isys *isys, u8 stream_handle) +{ + unsigned long flags; + struct ipu6_isys_stream *stream = NULL; + + if (!isys) + return NULL; + + if (stream_handle >= IPU6_ISYS_MAX_STREAMS) { + dev_err(&isys->adev->auxdev.dev, + "stream_handle %d is invalid\n", stream_handle); + return NULL; + } + + spin_lock_irqsave(&isys->streams_lock, flags); + if (isys->streams_ref_count[stream_handle] > 0) { + isys->streams_ref_count[stream_handle]++; + stream = &isys->streams[stream_handle]; + } + spin_unlock_irqrestore(&isys->streams_lock, flags); + + return stream; +} + +struct ipu6_isys_stream * +ipu6_isys_query_stream_by_source(struct ipu6_isys *isys, int source, u8 vc) +{ + struct ipu6_isys_stream *stream = NULL; + unsigned long flags; + unsigned int i; + + if (!isys) + return NULL; + + if (source < 0) { + dev_err(&isys->adev->auxdev.dev, + "query stream with invalid port number\n"); + return NULL; + } + + spin_lock_irqsave(&isys->streams_lock, flags); + for (i = 0; i < IPU6_ISYS_MAX_STREAMS; i++) { + if (!isys->streams_ref_count[i]) + continue; + + if (isys->streams[i].stream_source == source && + isys->streams[i].vc == vc) { + stream = &isys->streams[i]; + isys->streams_ref_count[i]++; + break; + } + } + spin_unlock_irqrestore(&isys->streams_lock, flags); + + return stream; +} + +static u64 get_stream_mask_by_pipeline(struct ipu6_isys_video *__av) +{ + struct media_pipeline *pipeline = + media_entity_pipeline(&__av->vdev.entity); + unsigned int i; + u64 stream_mask = 0; + + for (i = 0; i < NR_OF_CSI2_SRC_PADS; i++) { + struct ipu6_isys_video *av = &__av->csi2->av[i]; + + if (pipeline == media_entity_pipeline(&av->vdev.entity)) + stream_mask |= BIT_ULL(av->source_stream); + } + + return stream_mask; +} + +int ipu6_isys_video_set_streaming(struct ipu6_isys_video *av, int state, + struct ipu6_isys_buffer_list *bl) +{ + struct v4l2_subdev_krouting *routing; + struct ipu6_isys_stream *stream = av->stream; + struct v4l2_subdev_state *subdev_state; + struct device *dev = &av->isys->adev->auxdev.dev; + struct v4l2_subdev *sd; + struct media_pad *r_pad; + u32 sink_pad, sink_stream; + u64 r_stream; + u64 stream_mask = 0; + int ret = 0; + + dev_dbg(dev, "set stream: %d\n", state); + + if (WARN(!stream->source_entity, "No source entity for stream\n")) + return -ENODEV; + + sd = &stream->asd->sd; + r_pad = media_pad_remote_pad_first(&av->pad); + r_stream = ipu6_isys_get_src_stream_by_src_pad(sd, r_pad->index); + + subdev_state = v4l2_subdev_lock_and_get_active_state(sd); + routing = &subdev_state->routing; + ret = v4l2_subdev_routing_find_opposite_end(routing, r_pad->index, + r_stream, &sink_pad, + &sink_stream); + v4l2_subdev_unlock_state(subdev_state); + if (ret) + return ret; + + stream_mask = get_stream_mask_by_pipeline(av); + if (!state) { + stop_streaming_firmware(av); + + /* stop sub-device which connects with video */ + dev_dbg(dev, "stream off entity %s pad:%d mask:0x%llx\n", + sd->name, r_pad->index, stream_mask); + ret = v4l2_subdev_disable_streams(sd, r_pad->index, + stream_mask); + if (ret) { + dev_err(dev, "stream off %s failed with %d\n", sd->name, + ret); + return ret; + } + close_streaming_firmware(av); + } else { + ret = start_stream_firmware(av, bl); + if (ret) { + dev_err(dev, "start stream of firmware failed\n"); + return ret; + } + + /* start sub-device which connects with video */ + dev_dbg(dev, "stream on %s pad %d mask 0x%llx\n", sd->name, + r_pad->index, stream_mask); + ret = v4l2_subdev_enable_streams(sd, r_pad->index, stream_mask); + if (ret) { + dev_err(dev, "stream on %s failed with %d\n", sd->name, + ret); + goto out_media_entity_stop_streaming_firmware; + } + } + + av->streaming = state; + + return 0; + +out_media_entity_stop_streaming_firmware: + stop_streaming_firmware(av); + + return ret; +} + +static const struct v4l2_ioctl_ops ipu6_v4l2_ioctl_ops = { + .vidioc_querycap = ipu6_isys_vidioc_querycap, + .vidioc_enum_fmt_vid_cap = ipu6_isys_vidioc_enum_fmt, + .vidioc_enum_fmt_meta_cap = ipu6_isys_vidioc_enum_fmt, + .vidioc_enum_framesizes = ipu6_isys_vidioc_enum_framesizes, + .vidioc_g_fmt_vid_cap = ipu6_isys_vidioc_g_fmt_vid_cap, + .vidioc_s_fmt_vid_cap = ipu6_isys_vidioc_s_fmt_vid_cap, + .vidioc_try_fmt_vid_cap = ipu6_isys_vidioc_try_fmt_vid_cap, + .vidioc_g_fmt_meta_cap = ipu6_isys_vidioc_g_fmt_meta_cap, + .vidioc_s_fmt_meta_cap = ipu6_isys_vidioc_s_fmt_meta_cap, + .vidioc_try_fmt_meta_cap = ipu6_isys_vidioc_try_fmt_meta_cap, + .vidioc_reqbufs = ipu6_isys_vidioc_reqbufs, + .vidioc_create_bufs = ipu6_isys_vidioc_create_bufs, + .vidioc_prepare_buf = vb2_ioctl_prepare_buf, + .vidioc_querybuf = vb2_ioctl_querybuf, + .vidioc_qbuf = vb2_ioctl_qbuf, + .vidioc_dqbuf = vb2_ioctl_dqbuf, + .vidioc_streamon = vb2_ioctl_streamon, + .vidioc_streamoff = vb2_ioctl_streamoff, + .vidioc_expbuf = vb2_ioctl_expbuf, +}; + +static const struct media_entity_operations entity_ops = { + .link_validate = link_validate, +}; + +static const struct v4l2_file_operations isys_fops = { + .owner = THIS_MODULE, + .poll = vb2_fop_poll, + .unlocked_ioctl = video_ioctl2, + .mmap = vb2_fop_mmap, + .open = video_open, + .release = vb2_fop_release, +}; + +int ipu6_isys_fw_open(struct ipu6_isys *isys) +{ + struct ipu6_bus_device *adev = isys->adev; + const struct ipu6_isys_internal_pdata *ipdata = isys->pdata->ipdata; + int ret; + + ret = pm_runtime_resume_and_get(&adev->auxdev.dev); + if (ret < 0) + return ret; + + mutex_lock(&isys->mutex); + + if (isys->ref_count++) + goto unlock; + + ipu6_configure_spc(adev->isp, &ipdata->hw_variant, + IPU6_CPD_PKG_DIR_ISYS_SERVER_IDX, isys->pdata->base, + adev->pkg_dir, adev->pkg_dir_dma_addr); + + /* + * Buffers could have been left to wrong queue at last closure. + * Move them now back to empty buffer queue. + */ + ipu6_cleanup_fw_msg_bufs(isys); + + if (isys->fwcom) { + /* + * Something went wrong in previous shutdown. As we are now + * restarting isys we can safely delete old context. + */ + dev_warn(&adev->auxdev.dev, "clearing old context\n"); + ipu6_fw_isys_cleanup(isys); + } + + ret = ipu6_fw_isys_init(isys, ipdata->num_parallel_streams); + if (ret < 0) + goto out; + +unlock: + mutex_unlock(&isys->mutex); + + return 0; + +out: + isys->ref_count--; + mutex_unlock(&isys->mutex); + pm_runtime_put(&adev->auxdev.dev); + + return ret; +} + +void ipu6_isys_fw_close(struct ipu6_isys *isys) +{ + mutex_lock(&isys->mutex); + + isys->ref_count--; + if (!isys->ref_count) { + ipu6_fw_isys_close(isys); + if (isys->fwcom) { + isys->need_reset = true; + dev_warn(&isys->adev->auxdev.dev, + "failed to close fw isys\n"); + } + } + + mutex_unlock(&isys->mutex); + + if (isys->need_reset) + pm_runtime_put_sync(&isys->adev->auxdev.dev); + else + pm_runtime_put(&isys->adev->auxdev.dev); +} + +int ipu6_isys_setup_video(struct ipu6_isys_video *av, + struct media_entity **source_entity, int *nr_queues) +{ + const struct ipu6_isys_pixelformat *pfmt = + ipu6_isys_get_isys_format(ipu6_isys_get_format(av), 0); + struct device *dev = &av->isys->adev->auxdev.dev; + struct v4l2_mbus_frame_desc_entry entry; + struct v4l2_subdev_route *route = NULL; + struct v4l2_subdev_route *r; + struct v4l2_subdev_state *state; + struct ipu6_isys_subdev *asd; + struct v4l2_subdev *remote_sd; + struct media_pipeline *pipeline; + struct media_pad *source_pad, *remote_pad; + int ret = -EINVAL; + + *nr_queues = 0; + + remote_pad = media_pad_remote_pad_unique(&av->pad); + if (IS_ERR(remote_pad)) { + dev_dbg(dev, "failed to get remote pad\n"); + return PTR_ERR(remote_pad); + } + + remote_sd = media_entity_to_v4l2_subdev(remote_pad->entity); + asd = to_ipu6_isys_subdev(remote_sd); + source_pad = media_pad_remote_pad_first(&remote_pad->entity->pads[0]); + if (!source_pad) { + dev_dbg(dev, "No external source entity\n"); + return -ENODEV; + } + + *source_entity = source_pad->entity; + + /* Find the root */ + state = v4l2_subdev_lock_and_get_active_state(remote_sd); + for_each_active_route(&state->routing, r) { + (*nr_queues)++; + + if (r->source_pad == remote_pad->index) + route = r; + } + + if (!route) { + v4l2_subdev_unlock_state(state); + dev_dbg(dev, "Failed to find route\n"); + return -ENODEV; + } + av->source_stream = route->sink_stream; + v4l2_subdev_unlock_state(state); + + ret = ipu6_isys_csi2_get_remote_desc(av->source_stream, + to_ipu6_isys_csi2(asd), + *source_entity, &entry); + if (ret == -ENOIOCTLCMD) { + av->vc = 0; + av->dt = ipu6_isys_mbus_code_to_mipi(pfmt->code); + } else if (!ret) { + dev_dbg(dev, "Framedesc: stream %u, len %u, vc %u, dt %#x\n", + entry.stream, entry.length, entry.bus.csi2.vc, + entry.bus.csi2.dt); + + av->vc = entry.bus.csi2.vc; + av->dt = entry.bus.csi2.dt; + } else { + dev_err(dev, "failed to get remote frame desc\n"); + return ret; + } + + pipeline = media_entity_pipeline(&av->vdev.entity); + if (!pipeline) + ret = video_device_pipeline_alloc_start(&av->vdev); + else + ret = video_device_pipeline_start(&av->vdev, pipeline); + if (ret < 0) { + dev_dbg(dev, "media pipeline start failed\n"); + return ret; + } + + av->stream = ipu6_isys_get_stream(av, asd); + if (!av->stream) { + video_device_pipeline_stop(&av->vdev); + dev_err(dev, "no available stream for firmware\n"); + return -EINVAL; + } + + return 0; +} + +/* + * Do everything that's needed to initialise things related to video + * buffer queue, video node, and the related media entity. The caller + * is expected to assign isys field and set the name of the video + * device. + */ +int ipu6_isys_video_init(struct ipu6_isys_video *av) +{ + struct v4l2_format format = { + .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, + .fmt.pix = { + .width = 1920, + .height = 1080, + }, + }; + struct v4l2_format format_meta = { + .type = V4L2_BUF_TYPE_META_CAPTURE, + .fmt.meta = { + .width = 1920, + .height = 4, + }, + }; + int ret; + + mutex_init(&av->mutex); + av->vdev.device_caps = V4L2_CAP_STREAMING | V4L2_CAP_IO_MC | + V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_META_CAPTURE; + av->vdev.vfl_dir = VFL_DIR_RX; + + ret = ipu6_isys_queue_init(&av->aq); + if (ret) + goto out_free_watermark; + + av->pad.flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT; + ret = media_entity_pads_init(&av->vdev.entity, 1, &av->pad); + if (ret) + goto out_vb2_queue_release; + + av->vdev.entity.ops = &entity_ops; + av->vdev.release = video_device_release_empty; + av->vdev.fops = &isys_fops; + av->vdev.v4l2_dev = &av->isys->v4l2_dev; + av->vdev.dev_parent = &av->isys->adev->isp->pdev->dev; + if (!av->vdev.ioctl_ops) + av->vdev.ioctl_ops = &ipu6_v4l2_ioctl_ops; + av->vdev.queue = &av->aq.vbq; + av->vdev.lock = &av->mutex; + + __ipu6_isys_vidioc_try_fmt_vid_cap(av, &format); + av->pix_fmt = format.fmt.pix; + __ipu6_isys_vidioc_try_fmt_meta_cap(av, &format_meta); + av->meta_fmt = format_meta.fmt.meta; + + set_bit(V4L2_FL_USES_V4L2_FH, &av->vdev.flags); + video_set_drvdata(&av->vdev, av); + + ret = video_register_device(&av->vdev, VFL_TYPE_VIDEO, -1); + if (ret) + goto out_media_entity_cleanup; + + return ret; + +out_media_entity_cleanup: + vb2_video_unregister_device(&av->vdev); + media_entity_cleanup(&av->vdev.entity); + +out_vb2_queue_release: + vb2_queue_release(&av->aq.vbq); + +out_free_watermark: + mutex_destroy(&av->mutex); + + return ret; +} + +void ipu6_isys_video_cleanup(struct ipu6_isys_video *av) +{ + vb2_video_unregister_device(&av->vdev); + media_entity_cleanup(&av->vdev.entity); + mutex_destroy(&av->mutex); +} + +u32 ipu6_isys_get_format(struct ipu6_isys_video *av) +{ + if (av->aq.vbq.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) + return av->pix_fmt.pixelformat; + + if (av->aq.vbq.type == V4L2_BUF_TYPE_META_CAPTURE) + return av->meta_fmt.dataformat; + + return 0; +} + +u32 ipu6_isys_get_data_size(struct ipu6_isys_video *av) +{ + if (av->aq.vbq.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) + return av->pix_fmt.sizeimage; + + if (av->aq.vbq.type == V4L2_BUF_TYPE_META_CAPTURE) + return av->meta_fmt.buffersize; + + return 0; +} + +u32 ipu6_isys_get_bytes_per_line(struct ipu6_isys_video *av) +{ + if (av->aq.vbq.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) + return av->pix_fmt.bytesperline; + + if (av->aq.vbq.type == V4L2_BUF_TYPE_META_CAPTURE) + return av->meta_fmt.bytesperline; + + return 0; +} + +u32 ipu6_isys_get_frame_width(struct ipu6_isys_video *av) +{ + if (av->aq.vbq.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) + return av->pix_fmt.width; + + if (av->aq.vbq.type == V4L2_BUF_TYPE_META_CAPTURE) + return av->meta_fmt.width; + + return 0; +} + +u32 ipu6_isys_get_frame_height(struct ipu6_isys_video *av) +{ + if (av->aq.vbq.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) + return av->pix_fmt.height; + + if (av->aq.vbq.type == V4L2_BUF_TYPE_META_CAPTURE) + return av->meta_fmt.height; + + return 0; +} diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.h new file mode 100644 index 0000000..1d945be --- /dev/null +++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.h @@ -0,0 +1,141 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2013--2024 Intel Corporation */ + +#ifndef IPU6_ISYS_VIDEO_H +#define IPU6_ISYS_VIDEO_H + +#include +#include +#include +#include +#include + +#include +#include + +#include "ipu6-isys-queue.h" + +#define IPU6_ISYS_OUTPUT_PINS 11 +#define IPU6_ISYS_MAX_PARALLEL_SOF 2 + +struct file; +struct ipu6_isys; +struct ipu6_isys_csi2; +struct ipu6_isys_subdev; + +struct ipu6_isys_pixelformat { + u32 pixelformat; + u32 bpp; + u32 bpp_packed; + u32 code; + u32 css_pixelformat; + bool is_meta; +}; + +struct sequence_info { + unsigned int sequence; + u64 timestamp; +}; + +struct output_pin_data { + void (*pin_ready)(struct ipu6_isys_stream *stream, + struct ipu6_fw_isys_resp_info_abi *info); + struct ipu6_isys_queue *aq; +}; + +/* + * Align with firmware stream. Each stream represents a CSI virtual channel. + * May map to multiple video devices + */ +struct ipu6_isys_stream { + struct mutex mutex; + struct media_entity *source_entity; + atomic_t sequence; + unsigned int seq_index; + struct sequence_info seq[IPU6_ISYS_MAX_PARALLEL_SOF]; + int stream_source; + int stream_handle; + unsigned int nr_output_pins; + struct ipu6_isys_subdev *asd; + + int nr_queues; /* Number of capture queues */ + int nr_streaming; + int streaming; /* Has streaming been really started? */ + struct list_head queues; + struct completion stream_open_completion; + struct completion stream_close_completion; + struct completion stream_start_completion; + struct completion stream_stop_completion; + struct ipu6_isys *isys; + + struct output_pin_data output_pins[IPU6_ISYS_OUTPUT_PINS]; + int error; + u8 vc; +}; + +struct video_stream_watermark { + u32 width; + u32 height; + u32 hblank; + u32 frame_rate; + u64 pixel_rate; + u64 stream_data_rate; + u16 sram_gran_shift; + u16 sram_gran_size; + struct list_head stream_node; +}; + +struct ipu6_isys_video { + struct ipu6_isys_queue aq; + /* Serialise access to other fields in the struct. */ + struct mutex mutex; + struct media_pad pad; + struct video_device vdev; + struct v4l2_pix_format pix_fmt; + struct v4l2_meta_format meta_fmt; + struct ipu6_isys *isys; + struct ipu6_isys_csi2 *csi2; + struct ipu6_isys_stream *stream; + unsigned int streaming; + struct video_stream_watermark watermark; + u32 source_stream; + u8 vc; + u8 dt; +}; + +#define ipu6_isys_queue_to_video(__aq) \ + container_of(__aq, struct ipu6_isys_video, aq) + +extern const struct ipu6_isys_pixelformat ipu6_isys_pfmts[]; +extern const struct ipu6_isys_pixelformat ipu6_isys_pfmts_packed[]; + +const struct ipu6_isys_pixelformat * +ipu6_isys_get_isys_format(u32 pixelformat, u32 code); +int ipu6_isys_video_prepare_stream(struct ipu6_isys_video *av, + struct media_entity *source_entity, + int nr_queues); +int ipu6_isys_video_set_streaming(struct ipu6_isys_video *av, int state, + struct ipu6_isys_buffer_list *bl); +int ipu6_isys_fw_open(struct ipu6_isys *isys); +void ipu6_isys_fw_close(struct ipu6_isys *isys); +int ipu6_isys_setup_video(struct ipu6_isys_video *av, + struct media_entity **source_entity, int *nr_queues); +int ipu6_isys_video_init(struct ipu6_isys_video *av); +void ipu6_isys_video_cleanup(struct ipu6_isys_video *av); +void ipu6_isys_put_stream(struct ipu6_isys_stream *stream); +struct ipu6_isys_stream * +ipu6_isys_query_stream_by_handle(struct ipu6_isys *isys, u8 stream_handle); +struct ipu6_isys_stream * +ipu6_isys_query_stream_by_source(struct ipu6_isys *isys, int source, u8 vc); + +void ipu6_isys_configure_stream_watermark(struct ipu6_isys_video *av, + bool state); +void ipu6_isys_update_stream_watermark(struct ipu6_isys_video *av, bool state); + +u32 ipu6_isys_get_format(struct ipu6_isys_video *av); +u32 ipu6_isys_get_data_size(struct ipu6_isys_video *av); +u32 ipu6_isys_get_bytes_per_line(struct ipu6_isys_video *av); +u32 ipu6_isys_get_frame_width(struct ipu6_isys_video *av); +u32 ipu6_isys_get_frame_height(struct ipu6_isys_video *av); + +#endif /* IPU6_ISYS_VIDEO_H */ diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c new file mode 100644 index 0000000..17bc8ca --- /dev/null +++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c @@ -0,0 +1,1382 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2013--2024 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "ipu6-bus.h" +#include "ipu6-cpd.h" +#include "ipu6-dma.h" +#include "ipu6-isys.h" +#include "ipu6-isys-csi2.h" +#include "ipu6-mmu.h" +#include "ipu6-platform-buttress-regs.h" +#include "ipu6-platform-isys-csi2-reg.h" +#include "ipu6-platform-regs.h" + +#define IPU6_BUTTRESS_FABIC_CONTROL 0x68 +#define GDA_ENABLE_IWAKE_INDEX 2 +#define GDA_IWAKE_THRESHOLD_INDEX 1 +#define GDA_IRQ_CRITICAL_THRESHOLD_INDEX 0 +#define GDA_MEMOPEN_THRESHOLD_INDEX 3 +#define DEFAULT_DID_RATIO 90 +#define DEFAULT_IWAKE_THRESHOLD 0x42 +#define DEFAULT_MEM_OPEN_TIME 10 +#define ONE_THOUSAND_MICROSECOND 1000 +/* One page is 2KB, 8 x 16 x 16 = 2048B = 2KB */ +#define ISF_DMA_TOP_GDA_PROFERTY_PAGE_SIZE 0x800 + +/* LTR & DID value are 10 bit at most */ +#define LTR_DID_VAL_MAX 1023 +#define LTR_DEFAULT_VALUE 0x70503c19 +#define FILL_TIME_DEFAULT_VALUE 0xfff0783c +#define LTR_DID_PKGC_2R 20 +#define LTR_SCALE_DEFAULT 5 +#define LTR_SCALE_1024NS 2 +#define DID_SCALE_1US 2 +#define DID_SCALE_32US 3 +#define REG_PKGC_PMON_CFG 0xb00 + +#define VAL_PKGC_PMON_CFG_RESET 0x38 +#define VAL_PKGC_PMON_CFG_START 0x7 + +#define IS_PIXEL_BUFFER_PAGES 0x80 +/* + * when iwake mode is disabled, the critical threshold is statically set + * to 75% of the IS pixel buffer, criticalThreshold = (128 * 3) / 4 + */ +#define CRITICAL_THRESHOLD_IWAKE_DISABLE (IS_PIXEL_BUFFER_PAGES * 3 / 4) + +union fabric_ctrl { + struct { + u16 ltr_val : 10; + u16 ltr_scale : 3; + u16 reserved : 3; + u16 did_val : 10; + u16 did_scale : 3; + u16 reserved2 : 1; + u16 keep_power_in_D0 : 1; + u16 keep_power_override : 1; + } bits; + u32 value; +}; + +enum ltr_did_type { + LTR_IWAKE_ON, + LTR_IWAKE_OFF, + LTR_ISYS_ON, + LTR_ISYS_OFF, + LTR_ENHANNCE_IWAKE, + LTR_TYPE_MAX +}; + +#define ISYS_PM_QOS_VALUE 300 + +static int isys_isr_one(struct ipu6_bus_device *adev); + +static int +isys_complete_ext_device_registration(struct ipu6_isys *isys, + struct v4l2_subdev *sd, + struct ipu6_isys_csi2_config *csi2) +{ + struct device *dev = &isys->adev->auxdev.dev; + unsigned int i; + int ret; + + for (i = 0; i < sd->entity.num_pads; i++) { + if (sd->entity.pads[i].flags & MEDIA_PAD_FL_SOURCE) + break; + } + + if (i == sd->entity.num_pads) { + dev_warn(dev, "no src pad in external entity\n"); + ret = -ENOENT; + goto unregister_subdev; + } + + ret = media_create_pad_link(&sd->entity, i, + &isys->csi2[csi2->port].asd.sd.entity, + 0, MEDIA_LNK_FL_ENABLED | + MEDIA_LNK_FL_IMMUTABLE); + if (ret) { + dev_warn(dev, "can't create link\n"); + goto unregister_subdev; + } + + isys->csi2[csi2->port].nlanes = csi2->nlanes; + + return 0; + +unregister_subdev: + v4l2_device_unregister_subdev(sd); + + return ret; +} + +static void isys_stream_init(struct ipu6_isys *isys) +{ + u32 i; + + for (i = 0; i < IPU6_ISYS_MAX_STREAMS; i++) { + mutex_init(&isys->streams[i].mutex); + init_completion(&isys->streams[i].stream_open_completion); + init_completion(&isys->streams[i].stream_close_completion); + init_completion(&isys->streams[i].stream_start_completion); + init_completion(&isys->streams[i].stream_stop_completion); + INIT_LIST_HEAD(&isys->streams[i].queues); + isys->streams[i].isys = isys; + isys->streams[i].stream_handle = i; + isys->streams[i].vc = INVALID_VC_ID; + } +} + +static void isys_csi2_unregister_subdevices(struct ipu6_isys *isys) +{ + const struct ipu6_isys_internal_csi2_pdata *csi2 = + &isys->pdata->ipdata->csi2; + unsigned int i; + + for (i = 0; i < csi2->nports; i++) + ipu6_isys_csi2_cleanup(&isys->csi2[i]); +} + +static int isys_csi2_register_subdevices(struct ipu6_isys *isys) +{ + const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = + &isys->pdata->ipdata->csi2; + unsigned int i; + int ret; + + for (i = 0; i < csi2_pdata->nports; i++) { + ret = ipu6_isys_csi2_init(&isys->csi2[i], isys, + isys->pdata->base + + CSI_REG_PORT_BASE(i), i); + if (ret) + goto fail; + + isys->isr_csi2_bits |= IPU6_ISYS_UNISPART_IRQ_CSI2(i); + } + + return 0; + +fail: + while (i--) + ipu6_isys_csi2_cleanup(&isys->csi2[i]); + + return ret; +} + +static int isys_csi2_create_media_links(struct ipu6_isys *isys) +{ + const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = + &isys->pdata->ipdata->csi2; + struct device *dev = &isys->adev->auxdev.dev; + unsigned int i, j; + int ret; + + for (i = 0; i < csi2_pdata->nports; i++) { + struct media_entity *sd = &isys->csi2[i].asd.sd.entity; + + for (j = 0; j < NR_OF_CSI2_SRC_PADS; j++) { + struct ipu6_isys_video *av = &isys->csi2[i].av[j]; + + ret = media_create_pad_link(sd, CSI2_PAD_SRC + j, + &av->vdev.entity, 0, 0); + if (ret) { + dev_err(dev, "CSI2 can't create link\n"); + return ret; + } + + av->csi2 = &isys->csi2[i]; + } + } + + return 0; +} + +static void isys_unregister_video_devices(struct ipu6_isys *isys) +{ + const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = + &isys->pdata->ipdata->csi2; + unsigned int i, j; + + for (i = 0; i < csi2_pdata->nports; i++) + for (j = 0; j < NR_OF_CSI2_SRC_PADS; j++) + ipu6_isys_video_cleanup(&isys->csi2[i].av[j]); +} + +static int isys_register_video_devices(struct ipu6_isys *isys) +{ + const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = + &isys->pdata->ipdata->csi2; + unsigned int i, j; + int ret; + + for (i = 0; i < csi2_pdata->nports; i++) { + for (j = 0; j < NR_OF_CSI2_SRC_PADS; j++) { + struct ipu6_isys_video *av = &isys->csi2[i].av[j]; + + snprintf(av->vdev.name, sizeof(av->vdev.name), + IPU6_ISYS_ENTITY_PREFIX " ISYS Capture %u", + i * NR_OF_CSI2_SRC_PADS + j); + av->isys = isys; + av->aq.vbq.buf_struct_size = + sizeof(struct ipu6_isys_video_buffer); + + ret = ipu6_isys_video_init(av); + if (ret) + goto fail; + } + } + + return 0; + +fail: + while (i--) { + while (j--) + ipu6_isys_video_cleanup(&isys->csi2[i].av[j]); + j = NR_OF_CSI2_SRC_PADS; + } + + return ret; +} + +void isys_setup_hw(struct ipu6_isys *isys) +{ + void __iomem *base = isys->pdata->base; + const u8 *thd = isys->pdata->ipdata->hw_variant.cdc_fifo_threshold; + u32 irqs = 0; + unsigned int i, nports; + + nports = isys->pdata->ipdata->csi2.nports; + + /* Enable irqs for all MIPI ports */ + for (i = 0; i < nports; i++) + irqs |= IPU6_ISYS_UNISPART_IRQ_CSI2(i); + + writel(irqs, base + isys->pdata->ipdata->csi2.ctrl0_irq_edge); + writel(irqs, base + isys->pdata->ipdata->csi2.ctrl0_irq_lnp); + writel(irqs, base + isys->pdata->ipdata->csi2.ctrl0_irq_mask); + writel(irqs, base + isys->pdata->ipdata->csi2.ctrl0_irq_enable); + writel(GENMASK(19, 0), + base + isys->pdata->ipdata->csi2.ctrl0_irq_clear); + + irqs = ISYS_UNISPART_IRQS; + writel(irqs, base + IPU6_REG_ISYS_UNISPART_IRQ_EDGE); + writel(irqs, base + IPU6_REG_ISYS_UNISPART_IRQ_LEVEL_NOT_PULSE); + writel(GENMASK(28, 0), base + IPU6_REG_ISYS_UNISPART_IRQ_CLEAR); + writel(irqs, base + IPU6_REG_ISYS_UNISPART_IRQ_MASK); + writel(irqs, base + IPU6_REG_ISYS_UNISPART_IRQ_ENABLE); + + writel(0, base + IPU6_REG_ISYS_UNISPART_SW_IRQ_REG); + writel(0, base + IPU6_REG_ISYS_UNISPART_SW_IRQ_MUX_REG); + + /* Write CDC FIFO threshold values for isys */ + for (i = 0; i < isys->pdata->ipdata->hw_variant.cdc_fifos; i++) + writel(thd[i], base + IPU6_REG_ISYS_CDC_THRESHOLD(i)); +} + +static void ipu6_isys_csi2_isr(struct ipu6_isys_csi2 *csi2) +{ + struct ipu6_isys_stream *stream; + unsigned int i; + u32 status; + int source; + + ipu6_isys_register_errors(csi2); + + status = readl(csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC + + CSI_PORT_REG_BASE_IRQ_STATUS_OFFSET); + + writel(status, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC + + CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET); + + source = csi2->asd.source; + for (i = 0; i < NR_OF_CSI2_VC; i++) { + if (status & IPU_CSI_RX_IRQ_FS_VC(i)) { + stream = ipu6_isys_query_stream_by_source(csi2->isys, + source, i); + if (stream) { + ipu6_isys_csi2_sof_event_by_stream(stream); + ipu6_isys_put_stream(stream); + } + } + + if (status & IPU_CSI_RX_IRQ_FE_VC(i)) { + stream = ipu6_isys_query_stream_by_source(csi2->isys, + source, i); + if (stream) { + ipu6_isys_csi2_eof_event_by_stream(stream); + ipu6_isys_put_stream(stream); + } + } + } +} + +irqreturn_t isys_isr(struct ipu6_bus_device *adev) +{ + struct ipu6_isys *isys = ipu6_bus_get_drvdata(adev); + void __iomem *base = isys->pdata->base; + u32 status_sw, status_csi; + u32 ctrl0_status, ctrl0_clear; + + spin_lock(&isys->power_lock); + if (!isys->power) { + spin_unlock(&isys->power_lock); + return IRQ_NONE; + } + + ctrl0_status = isys->pdata->ipdata->csi2.ctrl0_irq_status; + ctrl0_clear = isys->pdata->ipdata->csi2.ctrl0_irq_clear; + + status_csi = readl(isys->pdata->base + ctrl0_status); + status_sw = readl(isys->pdata->base + + IPU6_REG_ISYS_UNISPART_IRQ_STATUS); + + writel(ISYS_UNISPART_IRQS & ~IPU6_ISYS_UNISPART_IRQ_SW, + base + IPU6_REG_ISYS_UNISPART_IRQ_MASK); + + do { + writel(status_csi, isys->pdata->base + ctrl0_clear); + + writel(status_sw, isys->pdata->base + + IPU6_REG_ISYS_UNISPART_IRQ_CLEAR); + + if (isys->isr_csi2_bits & status_csi) { + unsigned int i; + + for (i = 0; i < isys->pdata->ipdata->csi2.nports; i++) { + /* irq from not enabled port */ + if (!isys->csi2[i].base) + continue; + if (status_csi & IPU6_ISYS_UNISPART_IRQ_CSI2(i)) + ipu6_isys_csi2_isr(&isys->csi2[i]); + } + } + + writel(0, base + IPU6_REG_ISYS_UNISPART_SW_IRQ_REG); + + if (!isys_isr_one(adev)) + status_sw = IPU6_ISYS_UNISPART_IRQ_SW; + else + status_sw = 0; + + status_csi = readl(isys->pdata->base + ctrl0_status); + status_sw |= readl(isys->pdata->base + + IPU6_REG_ISYS_UNISPART_IRQ_STATUS); + } while ((status_csi & isys->isr_csi2_bits) || + (status_sw & IPU6_ISYS_UNISPART_IRQ_SW)); + + writel(ISYS_UNISPART_IRQS, base + IPU6_REG_ISYS_UNISPART_IRQ_MASK); + + spin_unlock(&isys->power_lock); + + return IRQ_HANDLED; +} + +static void get_lut_ltrdid(struct ipu6_isys *isys, struct ltr_did *pltr_did) +{ + struct isys_iwake_watermark *iwake_watermark = &isys->iwake_watermark; + struct ltr_did ltrdid_default; + + ltrdid_default.lut_ltr.value = LTR_DEFAULT_VALUE; + ltrdid_default.lut_fill_time.value = FILL_TIME_DEFAULT_VALUE; + + if (iwake_watermark->ltrdid.lut_ltr.value) + *pltr_did = iwake_watermark->ltrdid; + else + *pltr_did = ltrdid_default; +} + +static int set_iwake_register(struct ipu6_isys *isys, u32 index, u32 value) +{ + struct device *dev = &isys->adev->auxdev.dev; + u32 req_id = index; + u32 offset = 0; + int ret; + + ret = ipu6_fw_isys_send_proxy_token(isys, req_id, index, offset, value); + if (ret) + dev_err(dev, "write %d failed %d", index, ret); + + return ret; +} + +/* + * When input system is powered up and before enabling any new sensor capture, + * or after disabling any sensor capture the following values need to be set: + * LTR_value = LTR(usec) from calculation; + * LTR_scale = 2; + * DID_value = DID(usec) from calculation; + * DID_scale = 2; + * + * When input system is powered down, the LTR and DID values + * must be returned to the default values: + * LTR_value = 1023; + * LTR_scale = 5; + * DID_value = 1023; + * DID_scale = 2; + */ +static void set_iwake_ltrdid(struct ipu6_isys *isys, u16 ltr, u16 did, + enum ltr_did_type use) +{ + struct device *dev = &isys->adev->auxdev.dev; + u16 ltr_val, ltr_scale = LTR_SCALE_1024NS; + u16 did_val, did_scale = DID_SCALE_1US; + struct ipu6_device *isp = isys->adev->isp; + union fabric_ctrl fc; + + switch (use) { + case LTR_IWAKE_ON: + ltr_val = min_t(u16, ltr, (u16)LTR_DID_VAL_MAX); + did_val = min_t(u16, did, (u16)LTR_DID_VAL_MAX); + ltr_scale = (ltr == LTR_DID_VAL_MAX && + did == LTR_DID_VAL_MAX) ? + LTR_SCALE_DEFAULT : LTR_SCALE_1024NS; + break; + case LTR_ISYS_ON: + case LTR_IWAKE_OFF: + ltr_val = LTR_DID_PKGC_2R; + did_val = LTR_DID_PKGC_2R; + break; + case LTR_ISYS_OFF: + ltr_val = LTR_DID_VAL_MAX; + did_val = LTR_DID_VAL_MAX; + ltr_scale = LTR_SCALE_DEFAULT; + break; + case LTR_ENHANNCE_IWAKE: + if (ltr == LTR_DID_VAL_MAX && did == LTR_DID_VAL_MAX) { + ltr_val = LTR_DID_VAL_MAX; + did_val = LTR_DID_VAL_MAX; + ltr_scale = LTR_SCALE_DEFAULT; + } else if (did < ONE_THOUSAND_MICROSECOND) { + ltr_val = ltr; + did_val = did; + } else { + ltr_val = ltr; + /* div 90% value by 32 to account for scale change */ + did_val = did / 32; + did_scale = DID_SCALE_32US; + } + break; + default: + ltr_val = LTR_DID_VAL_MAX; + did_val = LTR_DID_VAL_MAX; + ltr_scale = LTR_SCALE_DEFAULT; + break; + } + + fc.value = readl(isp->base + IPU6_BUTTRESS_FABIC_CONTROL); + fc.bits.ltr_val = ltr_val; + fc.bits.ltr_scale = ltr_scale; + fc.bits.did_val = did_val; + fc.bits.did_scale = did_scale; + + dev_dbg(dev, "ltr: value %u scale %u, did: value %u scale %u\n", + ltr_val, ltr_scale, did_val, did_scale); + writel(fc.value, isp->base + IPU6_BUTTRESS_FABIC_CONTROL); +} + +/* + * Driver may clear register GDA_ENABLE_IWAKE before FW configures the + * stream for debug purpose. Otherwise driver should not access this register. + */ +static void enable_iwake(struct ipu6_isys *isys, bool enable) +{ + struct isys_iwake_watermark *iwake_watermark = &isys->iwake_watermark; + int ret; + + mutex_lock(&iwake_watermark->mutex); + + if (iwake_watermark->iwake_enabled == enable) { + mutex_unlock(&iwake_watermark->mutex); + return; + } + + ret = set_iwake_register(isys, GDA_ENABLE_IWAKE_INDEX, enable); + if (!ret) + iwake_watermark->iwake_enabled = enable; + + mutex_unlock(&iwake_watermark->mutex); +} + +void update_watermark_setting(struct ipu6_isys *isys) +{ + struct isys_iwake_watermark *iwake_watermark = &isys->iwake_watermark; + u32 iwake_threshold, iwake_critical_threshold, page_num; + struct device *dev = &isys->adev->auxdev.dev; + u32 calc_fill_time_us = 0, ltr = 0, did = 0; + struct video_stream_watermark *p_watermark; + enum ltr_did_type ltr_did_type; + struct list_head *stream_node; + u64 isys_pb_datarate_mbs = 0; + u32 mem_open_threshold = 0; + struct ltr_did ltrdid; + u64 threshold_bytes; + u32 max_sram_size; + u32 shift; + + shift = isys->pdata->ipdata->sram_gran_shift; + max_sram_size = isys->pdata->ipdata->max_sram_size; + + mutex_lock(&iwake_watermark->mutex); + if (iwake_watermark->force_iwake_disable) { + set_iwake_ltrdid(isys, 0, 0, LTR_IWAKE_OFF); + set_iwake_register(isys, GDA_IRQ_CRITICAL_THRESHOLD_INDEX, + CRITICAL_THRESHOLD_IWAKE_DISABLE); + goto unlock_exit; + } + + if (list_empty(&iwake_watermark->video_list)) { + isys_pb_datarate_mbs = 0; + } else { + list_for_each(stream_node, &iwake_watermark->video_list) { + p_watermark = list_entry(stream_node, + struct video_stream_watermark, + stream_node); + isys_pb_datarate_mbs += p_watermark->stream_data_rate; + } + } + mutex_unlock(&iwake_watermark->mutex); + + if (!isys_pb_datarate_mbs) { + enable_iwake(isys, false); + set_iwake_ltrdid(isys, 0, 0, LTR_IWAKE_OFF); + mutex_lock(&iwake_watermark->mutex); + set_iwake_register(isys, GDA_IRQ_CRITICAL_THRESHOLD_INDEX, + CRITICAL_THRESHOLD_IWAKE_DISABLE); + goto unlock_exit; + } + + enable_iwake(isys, true); + calc_fill_time_us = max_sram_size / isys_pb_datarate_mbs; + + if (isys->pdata->ipdata->enhanced_iwake) { + ltr = isys->pdata->ipdata->ltr; + did = calc_fill_time_us * DEFAULT_DID_RATIO / 100; + ltr_did_type = LTR_ENHANNCE_IWAKE; + } else { + get_lut_ltrdid(isys, <rdid); + + if (calc_fill_time_us <= ltrdid.lut_fill_time.bits.th0) + ltr = 0; + else if (calc_fill_time_us <= ltrdid.lut_fill_time.bits.th1) + ltr = ltrdid.lut_ltr.bits.val0; + else if (calc_fill_time_us <= ltrdid.lut_fill_time.bits.th2) + ltr = ltrdid.lut_ltr.bits.val1; + else if (calc_fill_time_us <= ltrdid.lut_fill_time.bits.th3) + ltr = ltrdid.lut_ltr.bits.val2; + else + ltr = ltrdid.lut_ltr.bits.val3; + + did = calc_fill_time_us - ltr; + ltr_did_type = LTR_IWAKE_ON; + } + + set_iwake_ltrdid(isys, ltr, did, ltr_did_type); + + /* calculate iwake threshold with 2KB granularity pages */ + threshold_bytes = did * isys_pb_datarate_mbs; + iwake_threshold = max_t(u32, 1, threshold_bytes >> shift); + iwake_threshold = min_t(u32, iwake_threshold, max_sram_size); + + mutex_lock(&iwake_watermark->mutex); + if (isys->pdata->ipdata->enhanced_iwake) { + set_iwake_register(isys, GDA_IWAKE_THRESHOLD_INDEX, + DEFAULT_IWAKE_THRESHOLD); + /* calculate number of pages that will be filled in 10 usec */ + page_num = (DEFAULT_MEM_OPEN_TIME * isys_pb_datarate_mbs) / + ISF_DMA_TOP_GDA_PROFERTY_PAGE_SIZE; + page_num += ((DEFAULT_MEM_OPEN_TIME * isys_pb_datarate_mbs) % + ISF_DMA_TOP_GDA_PROFERTY_PAGE_SIZE) ? 1 : 0; + mem_open_threshold = isys->pdata->ipdata->memopen_threshold; + mem_open_threshold = max_t(u32, mem_open_threshold, page_num); + dev_dbg(dev, "mem_open_threshold: %u\n", mem_open_threshold); + set_iwake_register(isys, GDA_MEMOPEN_THRESHOLD_INDEX, + mem_open_threshold); + } else { + set_iwake_register(isys, GDA_IWAKE_THRESHOLD_INDEX, + iwake_threshold); + } + + iwake_critical_threshold = iwake_threshold + + (IS_PIXEL_BUFFER_PAGES - iwake_threshold) / 2; + + dev_dbg(dev, "threshold: %u critical: %u\n", iwake_threshold, + iwake_critical_threshold); + + set_iwake_register(isys, GDA_IRQ_CRITICAL_THRESHOLD_INDEX, + iwake_critical_threshold); + + writel(VAL_PKGC_PMON_CFG_RESET, + isys->adev->isp->base + REG_PKGC_PMON_CFG); + writel(VAL_PKGC_PMON_CFG_START, + isys->adev->isp->base + REG_PKGC_PMON_CFG); +unlock_exit: + mutex_unlock(&iwake_watermark->mutex); +} + +static void isys_iwake_watermark_init(struct ipu6_isys *isys) +{ + struct isys_iwake_watermark *iwake_watermark = &isys->iwake_watermark; + + INIT_LIST_HEAD(&iwake_watermark->video_list); + mutex_init(&iwake_watermark->mutex); + + iwake_watermark->ltrdid.lut_ltr.value = 0; + iwake_watermark->isys = isys; + iwake_watermark->iwake_enabled = false; + iwake_watermark->force_iwake_disable = false; +} + +static void isys_iwake_watermark_cleanup(struct ipu6_isys *isys) +{ + struct isys_iwake_watermark *iwake_watermark = &isys->iwake_watermark; + + mutex_lock(&iwake_watermark->mutex); + list_del(&iwake_watermark->video_list); + mutex_unlock(&iwake_watermark->mutex); + + mutex_destroy(&iwake_watermark->mutex); +} + +/* The .bound() notifier callback when a match is found */ +static int isys_notifier_bound(struct v4l2_async_notifier *notifier, + struct v4l2_subdev *sd, + struct v4l2_async_connection *asc) +{ + struct ipu6_isys *isys = + container_of(notifier, struct ipu6_isys, notifier); + struct sensor_async_sd *s_asd = + container_of(asc, struct sensor_async_sd, asc); + int ret; + + if (s_asd->csi2.port >= isys->pdata->ipdata->csi2.nports) { + dev_err(&isys->adev->auxdev.dev, "invalid csi2 port %u\n", + s_asd->csi2.port); + return -EINVAL; + } + + ret = ipu_bridge_instantiate_vcm(sd->dev); + if (ret) { + dev_err(&isys->adev->auxdev.dev, "instantiate vcm failed\n"); + return ret; + } + + dev_dbg(&isys->adev->auxdev.dev, "bind %s nlanes is %d port is %d\n", + sd->name, s_asd->csi2.nlanes, s_asd->csi2.port); + ret = isys_complete_ext_device_registration(isys, sd, &s_asd->csi2); + if (ret) + return ret; + + return v4l2_device_register_subdev_nodes(&isys->v4l2_dev); +} + +static int isys_notifier_complete(struct v4l2_async_notifier *notifier) +{ + struct ipu6_isys *isys = + container_of(notifier, struct ipu6_isys, notifier); + + return v4l2_device_register_subdev_nodes(&isys->v4l2_dev); +} + +static const struct v4l2_async_notifier_operations isys_async_ops = { + .bound = isys_notifier_bound, + .complete = isys_notifier_complete, +}; + +#define ISYS_MAX_PORTS 8 +static int isys_notifier_init(struct ipu6_isys *isys) +{ + struct ipu6_device *isp = isys->adev->isp; + struct device *dev = &isp->pdev->dev; + unsigned int i; + int ret; + + v4l2_async_nf_init(&isys->notifier, &isys->v4l2_dev); + + for (i = 0; i < ISYS_MAX_PORTS; i++) { + struct v4l2_fwnode_endpoint vep = { + .bus_type = V4L2_MBUS_CSI2_DPHY + }; + struct sensor_async_sd *s_asd; + struct fwnode_handle *ep; + + ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), i, 0, + FWNODE_GRAPH_ENDPOINT_NEXT); + if (!ep) + continue; + + ret = v4l2_fwnode_endpoint_parse(ep, &vep); + if (ret) { + dev_err(dev, "fwnode endpoint parse failed: %d\n", ret); + goto err_parse; + } + + s_asd = v4l2_async_nf_add_fwnode_remote(&isys->notifier, ep, + struct sensor_async_sd); + if (IS_ERR(s_asd)) { + ret = PTR_ERR(s_asd); + dev_err(dev, "add remove fwnode failed: %d\n", ret); + goto err_parse; + } + + s_asd->csi2.port = vep.base.port; + s_asd->csi2.nlanes = vep.bus.mipi_csi2.num_data_lanes; + + dev_dbg(dev, "remote endpoint port %d with %d lanes added\n", + s_asd->csi2.port, s_asd->csi2.nlanes); + + fwnode_handle_put(ep); + + continue; + +err_parse: + fwnode_handle_put(ep); + return ret; + } + + isys->notifier.ops = &isys_async_ops; + ret = v4l2_async_nf_register(&isys->notifier); + if (ret) { + dev_err(dev, "failed to register async notifier : %d\n", ret); + v4l2_async_nf_cleanup(&isys->notifier); + } + + return ret; +} + +static void isys_notifier_cleanup(struct ipu6_isys *isys) +{ + v4l2_async_nf_unregister(&isys->notifier); + v4l2_async_nf_cleanup(&isys->notifier); +} + +static int isys_register_devices(struct ipu6_isys *isys) +{ + struct device *dev = &isys->adev->auxdev.dev; + struct pci_dev *pdev = isys->adev->isp->pdev; + int ret; + + isys->media_dev.dev = dev; + media_device_pci_init(&isys->media_dev, + pdev, IPU6_MEDIA_DEV_MODEL_NAME); + + strscpy(isys->v4l2_dev.name, isys->media_dev.model, + sizeof(isys->v4l2_dev.name)); + + ret = media_device_register(&isys->media_dev); + if (ret < 0) + goto out_media_device_unregister; + + isys->v4l2_dev.mdev = &isys->media_dev; + isys->v4l2_dev.ctrl_handler = NULL; + + ret = v4l2_device_register(dev, &isys->v4l2_dev); + if (ret < 0) + goto out_media_device_unregister; + + ret = isys_register_video_devices(isys); + if (ret) + goto out_v4l2_device_unregister; + + ret = isys_csi2_register_subdevices(isys); + if (ret) + goto out_isys_unregister_video_device; + + ret = isys_csi2_create_media_links(isys); + if (ret) + goto out_isys_unregister_subdevices; + + ret = isys_notifier_init(isys); + if (ret) + goto out_isys_unregister_subdevices; + + return 0; + +out_isys_unregister_subdevices: + isys_csi2_unregister_subdevices(isys); + +out_isys_unregister_video_device: + isys_unregister_video_devices(isys); + +out_v4l2_device_unregister: + v4l2_device_unregister(&isys->v4l2_dev); + +out_media_device_unregister: + media_device_unregister(&isys->media_dev); + media_device_cleanup(&isys->media_dev); + + dev_err(dev, "failed to register isys devices\n"); + + return ret; +} + +static void isys_unregister_devices(struct ipu6_isys *isys) +{ + isys_unregister_video_devices(isys); + isys_csi2_unregister_subdevices(isys); + v4l2_device_unregister(&isys->v4l2_dev); + media_device_unregister(&isys->media_dev); + media_device_cleanup(&isys->media_dev); +} + +static int isys_runtime_pm_resume(struct device *dev) +{ + struct ipu6_bus_device *adev = to_ipu6_bus_device(dev); + struct ipu6_isys *isys = ipu6_bus_get_drvdata(adev); + struct ipu6_device *isp = adev->isp; + unsigned long flags; + int ret; + + if (!isys) + return 0; + + ret = ipu6_mmu_hw_init(adev->mmu); + if (ret) + return ret; + + cpu_latency_qos_update_request(&isys->pm_qos, ISYS_PM_QOS_VALUE); + + ret = ipu6_buttress_start_tsc_sync(isp); + if (ret) + return ret; + + spin_lock_irqsave(&isys->power_lock, flags); + isys->power = 1; + spin_unlock_irqrestore(&isys->power_lock, flags); + + isys_setup_hw(isys); + + set_iwake_ltrdid(isys, 0, 0, LTR_ISYS_ON); + + return 0; +} + +static int isys_runtime_pm_suspend(struct device *dev) +{ + struct ipu6_bus_device *adev = to_ipu6_bus_device(dev); + struct ipu6_isys *isys; + unsigned long flags; + + isys = dev_get_drvdata(dev); + if (!isys) + return 0; + + spin_lock_irqsave(&isys->power_lock, flags); + isys->power = 0; + spin_unlock_irqrestore(&isys->power_lock, flags); + + mutex_lock(&isys->mutex); + isys->need_reset = false; + mutex_unlock(&isys->mutex); + + isys->phy_termcal_val = 0; + cpu_latency_qos_update_request(&isys->pm_qos, PM_QOS_DEFAULT_VALUE); + + set_iwake_ltrdid(isys, 0, 0, LTR_ISYS_OFF); + + ipu6_mmu_hw_cleanup(adev->mmu); + + return 0; +} + +static int isys_suspend(struct device *dev) +{ + struct ipu6_isys *isys = dev_get_drvdata(dev); + + /* If stream is open, refuse to suspend */ + if (isys->stream_opened) + return -EBUSY; + + return 0; +} + +static int isys_resume(struct device *dev) +{ + return 0; +} + +static const struct dev_pm_ops isys_pm_ops = { + .runtime_suspend = isys_runtime_pm_suspend, + .runtime_resume = isys_runtime_pm_resume, + .suspend = isys_suspend, + .resume = isys_resume, +}; + +static void free_fw_msg_bufs(struct ipu6_isys *isys) +{ + struct isys_fw_msgs *fwmsg, *safe; + + list_for_each_entry_safe(fwmsg, safe, &isys->framebuflist, head) + ipu6_dma_free(isys->adev, sizeof(struct isys_fw_msgs), fwmsg, + fwmsg->dma_addr, 0); + + list_for_each_entry_safe(fwmsg, safe, &isys->framebuflist_fw, head) + ipu6_dma_free(isys->adev, sizeof(struct isys_fw_msgs), fwmsg, + fwmsg->dma_addr, 0); +} + +static int alloc_fw_msg_bufs(struct ipu6_isys *isys, int amount) +{ + struct isys_fw_msgs *addr; + dma_addr_t dma_addr; + unsigned long flags; + unsigned int i; + + for (i = 0; i < amount; i++) { + addr = ipu6_dma_alloc(isys->adev, sizeof(*addr), + &dma_addr, GFP_KERNEL, 0); + if (!addr) + break; + addr->dma_addr = dma_addr; + + spin_lock_irqsave(&isys->listlock, flags); + list_add(&addr->head, &isys->framebuflist); + spin_unlock_irqrestore(&isys->listlock, flags); + } + + if (i == amount) + return 0; + + spin_lock_irqsave(&isys->listlock, flags); + while (!list_empty(&isys->framebuflist)) { + addr = list_first_entry(&isys->framebuflist, + struct isys_fw_msgs, head); + list_del(&addr->head); + spin_unlock_irqrestore(&isys->listlock, flags); + ipu6_dma_free(isys->adev, sizeof(struct isys_fw_msgs), addr, + addr->dma_addr, 0); + spin_lock_irqsave(&isys->listlock, flags); + } + spin_unlock_irqrestore(&isys->listlock, flags); + + return -ENOMEM; +} + +struct isys_fw_msgs *ipu6_get_fw_msg_buf(struct ipu6_isys_stream *stream) +{ + struct ipu6_isys *isys = stream->isys; + struct device *dev = &isys->adev->auxdev.dev; + struct isys_fw_msgs *msg; + unsigned long flags; + int ret; + + spin_lock_irqsave(&isys->listlock, flags); + if (list_empty(&isys->framebuflist)) { + spin_unlock_irqrestore(&isys->listlock, flags); + dev_dbg(dev, "Frame list empty\n"); + + ret = alloc_fw_msg_bufs(isys, 5); + if (ret < 0) + return NULL; + + spin_lock_irqsave(&isys->listlock, flags); + if (list_empty(&isys->framebuflist)) { + spin_unlock_irqrestore(&isys->listlock, flags); + dev_err(dev, "Frame list empty\n"); + return NULL; + } + } + msg = list_last_entry(&isys->framebuflist, struct isys_fw_msgs, head); + list_move(&msg->head, &isys->framebuflist_fw); + spin_unlock_irqrestore(&isys->listlock, flags); + memset(&msg->fw_msg, 0, sizeof(msg->fw_msg)); + + return msg; +} + +void ipu6_cleanup_fw_msg_bufs(struct ipu6_isys *isys) +{ + struct isys_fw_msgs *fwmsg, *fwmsg0; + unsigned long flags; + + spin_lock_irqsave(&isys->listlock, flags); + list_for_each_entry_safe(fwmsg, fwmsg0, &isys->framebuflist_fw, head) + list_move(&fwmsg->head, &isys->framebuflist); + spin_unlock_irqrestore(&isys->listlock, flags); +} + +void ipu6_put_fw_msg_buf(struct ipu6_isys *isys, u64 data) +{ + struct isys_fw_msgs *msg; + unsigned long flags; + u64 *ptr = (u64 *)data; + + if (!ptr) + return; + + spin_lock_irqsave(&isys->listlock, flags); + msg = container_of(ptr, struct isys_fw_msgs, fw_msg.dummy); + list_move(&msg->head, &isys->framebuflist); + spin_unlock_irqrestore(&isys->listlock, flags); +} + +static int isys_probe(struct auxiliary_device *auxdev, + const struct auxiliary_device_id *auxdev_id) +{ + const struct ipu6_isys_internal_csi2_pdata *csi2_pdata; + struct ipu6_bus_device *adev = auxdev_to_adev(auxdev); + struct ipu6_device *isp = adev->isp; + const struct firmware *fw; + struct ipu6_isys *isys; + unsigned int i; + int ret; + + if (!isp->bus_ready_to_probe) + return -EPROBE_DEFER; + + isys = devm_kzalloc(&auxdev->dev, sizeof(*isys), GFP_KERNEL); + if (!isys) + return -ENOMEM; + + adev->auxdrv_data = + (const struct ipu6_auxdrv_data *)auxdev_id->driver_data; + adev->auxdrv = to_auxiliary_drv(auxdev->dev.driver); + isys->adev = adev; + isys->pdata = adev->pdata; + csi2_pdata = &isys->pdata->ipdata->csi2; + + isys->csi2 = devm_kcalloc(&auxdev->dev, csi2_pdata->nports, + sizeof(*isys->csi2), GFP_KERNEL); + if (!isys->csi2) + return -ENOMEM; + + ret = ipu6_mmu_hw_init(adev->mmu); + if (ret) + return ret; + + /* initial sensor type */ + isys->sensor_type = isys->pdata->ipdata->sensor_type_start; + + spin_lock_init(&isys->streams_lock); + spin_lock_init(&isys->power_lock); + isys->power = 0; + isys->phy_termcal_val = 0; + + mutex_init(&isys->mutex); + mutex_init(&isys->stream_mutex); + + spin_lock_init(&isys->listlock); + INIT_LIST_HEAD(&isys->framebuflist); + INIT_LIST_HEAD(&isys->framebuflist_fw); + + isys->line_align = IPU6_ISYS_2600_MEM_LINE_ALIGN; + isys->icache_prefetch = 0; + + dev_set_drvdata(&auxdev->dev, isys); + + isys_stream_init(isys); + + if (!isp->secure_mode) { + fw = isp->cpd_fw; + ret = ipu6_buttress_map_fw_image(adev, fw, &adev->fw_sgt); + if (ret) + goto release_firmware; + + ret = ipu6_cpd_create_pkg_dir(adev, isp->cpd_fw->data); + if (ret) + goto remove_shared_buffer; + } + + cpu_latency_qos_add_request(&isys->pm_qos, PM_QOS_DEFAULT_VALUE); + + ret = alloc_fw_msg_bufs(isys, 20); + if (ret < 0) + goto out_remove_pkg_dir_shared_buffer; + + isys_iwake_watermark_init(isys); + + if (is_ipu6se(adev->isp->hw_ver)) + isys->phy_set_power = ipu6_isys_jsl_phy_set_power; + else if (is_ipu6ep_mtl(adev->isp->hw_ver)) + isys->phy_set_power = ipu6_isys_dwc_phy_set_power; + else + isys->phy_set_power = ipu6_isys_mcd_phy_set_power; + + ret = isys_register_devices(isys); + if (ret) + goto free_fw_msg_bufs; + + ipu6_mmu_hw_cleanup(adev->mmu); + + return 0; + +free_fw_msg_bufs: + free_fw_msg_bufs(isys); +out_remove_pkg_dir_shared_buffer: + cpu_latency_qos_remove_request(&isys->pm_qos); + if (!isp->secure_mode) + ipu6_cpd_free_pkg_dir(adev); +remove_shared_buffer: + if (!isp->secure_mode) + ipu6_buttress_unmap_fw_image(adev, &adev->fw_sgt); +release_firmware: + if (!isp->secure_mode) + release_firmware(adev->fw); + + for (i = 0; i < IPU6_ISYS_MAX_STREAMS; i++) + mutex_destroy(&isys->streams[i].mutex); + + mutex_destroy(&isys->mutex); + mutex_destroy(&isys->stream_mutex); + + ipu6_mmu_hw_cleanup(adev->mmu); + + return ret; +} + +static void isys_remove(struct auxiliary_device *auxdev) +{ + struct ipu6_bus_device *adev = auxdev_to_adev(auxdev); + struct ipu6_isys *isys = dev_get_drvdata(&auxdev->dev); + struct ipu6_device *isp = adev->isp; + unsigned int i; + + free_fw_msg_bufs(isys); + + isys_unregister_devices(isys); + isys_notifier_cleanup(isys); + + cpu_latency_qos_remove_request(&isys->pm_qos); + + if (!isp->secure_mode) { + ipu6_cpd_free_pkg_dir(adev); + ipu6_buttress_unmap_fw_image(adev, &adev->fw_sgt); + release_firmware(adev->fw); + } + + for (i = 0; i < IPU6_ISYS_MAX_STREAMS; i++) + mutex_destroy(&isys->streams[i].mutex); + + isys_iwake_watermark_cleanup(isys); + mutex_destroy(&isys->stream_mutex); + mutex_destroy(&isys->mutex); +} + +struct fwmsg { + int type; + char *msg; + bool valid_ts; +}; + +static const struct fwmsg fw_msg[] = { + {IPU6_FW_ISYS_RESP_TYPE_STREAM_OPEN_DONE, "STREAM_OPEN_DONE", 0}, + {IPU6_FW_ISYS_RESP_TYPE_STREAM_CLOSE_ACK, "STREAM_CLOSE_ACK", 0}, + {IPU6_FW_ISYS_RESP_TYPE_STREAM_START_ACK, "STREAM_START_ACK", 0}, + {IPU6_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK, + "STREAM_START_AND_CAPTURE_ACK", 0}, + {IPU6_FW_ISYS_RESP_TYPE_STREAM_STOP_ACK, "STREAM_STOP_ACK", 0}, + {IPU6_FW_ISYS_RESP_TYPE_STREAM_FLUSH_ACK, "STREAM_FLUSH_ACK", 0}, + {IPU6_FW_ISYS_RESP_TYPE_PIN_DATA_READY, "PIN_DATA_READY", 1}, + {IPU6_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK, "STREAM_CAPTURE_ACK", 0}, + {IPU6_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE, + "STREAM_START_AND_CAPTURE_DONE", 1}, + {IPU6_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_DONE, "STREAM_CAPTURE_DONE", 1}, + {IPU6_FW_ISYS_RESP_TYPE_FRAME_SOF, "FRAME_SOF", 1}, + {IPU6_FW_ISYS_RESP_TYPE_FRAME_EOF, "FRAME_EOF", 1}, + {IPU6_FW_ISYS_RESP_TYPE_STATS_DATA_READY, "STATS_READY", 1}, + {-1, "UNKNOWN MESSAGE", 0} +}; + +static u32 resp_type_to_index(int type) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(fw_msg); i++) + if (fw_msg[i].type == type) + return i; + + return ARRAY_SIZE(fw_msg) - 1; +} + +static int isys_isr_one(struct ipu6_bus_device *adev) +{ + struct ipu6_isys *isys = ipu6_bus_get_drvdata(adev); + struct ipu6_fw_isys_resp_info_abi *resp; + struct ipu6_isys_stream *stream; + struct ipu6_isys_csi2 *csi2 = NULL; + u32 index; + u64 ts; + + if (!isys->fwcom) + return 1; + + resp = ipu6_fw_isys_get_resp(isys->fwcom, IPU6_BASE_MSG_RECV_QUEUES); + if (!resp) + return 1; + + ts = (u64)resp->timestamp[1] << 32 | resp->timestamp[0]; + + index = resp_type_to_index(resp->type); + dev_dbg(&adev->auxdev.dev, + "FW resp %02d %s, stream %u, ts 0x%16.16llx, pin %d\n", + resp->type, fw_msg[index].msg, resp->stream_handle, + fw_msg[index].valid_ts ? ts : 0, resp->pin_id); + + if (resp->error_info.error == IPU6_FW_ISYS_ERROR_STREAM_IN_SUSPENSION) + /* Suspension is kind of special case: not enough buffers */ + dev_dbg(&adev->auxdev.dev, + "FW error resp SUSPENSION, details %d\n", + resp->error_info.error_details); + else if (resp->error_info.error) + dev_dbg(&adev->auxdev.dev, + "FW error resp error %d, details %d\n", + resp->error_info.error, resp->error_info.error_details); + + if (resp->stream_handle >= IPU6_ISYS_MAX_STREAMS) { + dev_err(&adev->auxdev.dev, "bad stream handle %u\n", + resp->stream_handle); + goto leave; + } + + stream = ipu6_isys_query_stream_by_handle(isys, resp->stream_handle); + if (!stream) { + dev_err(&adev->auxdev.dev, "stream of stream_handle %u is unused\n", + resp->stream_handle); + goto leave; + } + stream->error = resp->error_info.error; + + csi2 = ipu6_isys_subdev_to_csi2(stream->asd); + + switch (resp->type) { + case IPU6_FW_ISYS_RESP_TYPE_STREAM_OPEN_DONE: + complete(&stream->stream_open_completion); + break; + case IPU6_FW_ISYS_RESP_TYPE_STREAM_CLOSE_ACK: + complete(&stream->stream_close_completion); + break; + case IPU6_FW_ISYS_RESP_TYPE_STREAM_START_ACK: + complete(&stream->stream_start_completion); + break; + case IPU6_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK: + complete(&stream->stream_start_completion); + break; + case IPU6_FW_ISYS_RESP_TYPE_STREAM_STOP_ACK: + complete(&stream->stream_stop_completion); + break; + case IPU6_FW_ISYS_RESP_TYPE_STREAM_FLUSH_ACK: + complete(&stream->stream_stop_completion); + break; + case IPU6_FW_ISYS_RESP_TYPE_PIN_DATA_READY: + /* + * firmware only release the capture msg until software + * get pin_data_ready event + */ + ipu6_put_fw_msg_buf(ipu6_bus_get_drvdata(adev), resp->buf_id); + if (resp->pin_id < IPU6_ISYS_OUTPUT_PINS && + stream->output_pins[resp->pin_id].pin_ready) + stream->output_pins[resp->pin_id].pin_ready(stream, + resp); + else + dev_warn(&adev->auxdev.dev, + "%d:No data pin ready handler for pin id %d\n", + resp->stream_handle, resp->pin_id); + if (csi2) + ipu6_isys_csi2_error(csi2); + + break; + case IPU6_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK: + break; + case IPU6_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE: + case IPU6_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_DONE: + break; + case IPU6_FW_ISYS_RESP_TYPE_FRAME_SOF: + + ipu6_isys_csi2_sof_event_by_stream(stream); + stream->seq[stream->seq_index].sequence = + atomic_read(&stream->sequence) - 1; + stream->seq[stream->seq_index].timestamp = ts; + dev_dbg(&adev->auxdev.dev, + "sof: handle %d: (index %u), timestamp 0x%16.16llx\n", + resp->stream_handle, + stream->seq[stream->seq_index].sequence, ts); + stream->seq_index = (stream->seq_index + 1) + % IPU6_ISYS_MAX_PARALLEL_SOF; + break; + case IPU6_FW_ISYS_RESP_TYPE_FRAME_EOF: + ipu6_isys_csi2_eof_event_by_stream(stream); + dev_dbg(&adev->auxdev.dev, + "eof: handle %d: (index %u), timestamp 0x%16.16llx\n", + resp->stream_handle, + stream->seq[stream->seq_index].sequence, ts); + break; + case IPU6_FW_ISYS_RESP_TYPE_STATS_DATA_READY: + break; + default: + dev_err(&adev->auxdev.dev, "%d:unknown response type %u\n", + resp->stream_handle, resp->type); + break; + } + + ipu6_isys_put_stream(stream); +leave: + ipu6_fw_isys_put_resp(isys->fwcom, IPU6_BASE_MSG_RECV_QUEUES); + return 0; +} + +static const struct ipu6_auxdrv_data ipu6_isys_auxdrv_data = { + .isr = isys_isr, + .isr_threaded = NULL, + .wake_isr_thread = false, +}; + +static const struct auxiliary_device_id ipu6_isys_id_table[] = { + { + .name = "intel_ipu6.isys", + .driver_data = (kernel_ulong_t)&ipu6_isys_auxdrv_data, + }, + { } +}; +MODULE_DEVICE_TABLE(auxiliary, ipu6_isys_id_table); + +static struct auxiliary_driver isys_driver = { + .name = IPU6_ISYS_NAME, + .probe = isys_probe, + .remove = isys_remove, + .id_table = ipu6_isys_id_table, + .driver = { + .pm = &isys_pm_ops, + }, +}; + +module_auxiliary_driver(isys_driver); + +MODULE_AUTHOR("Sakari Ailus "); +MODULE_AUTHOR("Tianshu Qiu "); +MODULE_AUTHOR("Bingbu Cao "); +MODULE_AUTHOR("Yunliang Ding "); +MODULE_AUTHOR("Hongju Wang "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Intel IPU6 input system driver"); +MODULE_IMPORT_NS(INTEL_IPU6); +MODULE_IMPORT_NS(INTEL_IPU_BRIDGE); diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h new file mode 100644 index 0000000..86dfc2e --- /dev/null +++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h @@ -0,0 +1,206 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2013--2024 Intel Corporation */ + +#ifndef IPU6_ISYS_H +#define IPU6_ISYS_H + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "ipu6.h" +#include "ipu6-fw-isys.h" +#include "ipu6-isys-csi2.h" +#include "ipu6-isys-video.h" + +struct ipu6_bus_device; + +#define IPU6_ISYS_ENTITY_PREFIX "Intel IPU6" +/* FW support max 16 streams */ +#define IPU6_ISYS_MAX_STREAMS 16 +#define ISYS_UNISPART_IRQS (IPU6_ISYS_UNISPART_IRQ_SW | \ + IPU6_ISYS_UNISPART_IRQ_CSI0 | \ + IPU6_ISYS_UNISPART_IRQ_CSI1) + +#define IPU6_ISYS_2600_MEM_LINE_ALIGN 64 + +/* + * Current message queue configuration. These must be big enough + * so that they never gets full. Queues are located in system memory + */ +#define IPU6_ISYS_SIZE_RECV_QUEUE 40 +#define IPU6_ISYS_SIZE_SEND_QUEUE 40 +#define IPU6_ISYS_SIZE_PROXY_RECV_QUEUE 5 +#define IPU6_ISYS_SIZE_PROXY_SEND_QUEUE 5 +#define IPU6_ISYS_NUM_RECV_QUEUE 1 + +#define IPU6_ISYS_MIN_WIDTH 2U +#define IPU6_ISYS_MIN_HEIGHT 2U +#define IPU6_ISYS_MAX_WIDTH 4672U +#define IPU6_ISYS_MAX_HEIGHT 3416U + +/* the threshold granularity is 2KB on IPU6 */ +#define IPU6_SRAM_GRANULARITY_SHIFT 11 +#define IPU6_SRAM_GRANULARITY_SIZE 2048 +/* the threshold granularity is 1KB on IPU6SE */ +#define IPU6SE_SRAM_GRANULARITY_SHIFT 10 +#define IPU6SE_SRAM_GRANULARITY_SIZE 1024 +/* IS pixel buffer is 256KB, MaxSRAMSize is 200KB on IPU6 */ +#define IPU6_MAX_SRAM_SIZE (200 << 10) +/* IS pixel buffer is 128KB, MaxSRAMSize is 96KB on IPU6SE */ +#define IPU6SE_MAX_SRAM_SIZE (96 << 10) + +#define IPU6EP_LTR_VALUE 200 +#define IPU6EP_MIN_MEMOPEN_TH 0x4 +#define IPU6EP_MTL_LTR_VALUE 1023 +#define IPU6EP_MTL_MIN_MEMOPEN_TH 0xc + +struct ltr_did { + union { + u32 value; + struct { + u8 val0; + u8 val1; + u8 val2; + u8 val3; + } bits; + } lut_ltr; + union { + u32 value; + struct { + u8 th0; + u8 th1; + u8 th2; + u8 th3; + } bits; + } lut_fill_time; +}; + +struct isys_iwake_watermark { + bool iwake_enabled; + bool force_iwake_disable; + u32 iwake_threshold; + u64 isys_pixelbuffer_datarate; + struct ltr_did ltrdid; + struct mutex mutex; /* protect whole struct */ + struct ipu6_isys *isys; + struct list_head video_list; +}; + +struct ipu6_isys_csi2_config { + u32 nlanes; + u32 port; +}; + +struct sensor_async_sd { + struct v4l2_async_connection asc; + struct ipu6_isys_csi2_config csi2; +}; + +/* + * struct ipu6_isys + * + * @media_dev: Media device + * @v4l2_dev: V4L2 device + * @adev: ISYS bus device + * @power: Is ISYS powered on or not? + * @isr_bits: Which bits does the ISR handle? + * @power_lock: Serialise access to power (power state in general) + * @csi2_rx_ctrl_cached: cached shared value between all CSI2 receivers + * @streams_lock: serialise access to streams + * @streams: streams per firmware stream ID + * @fwcom: fw communication layer private pointer + * or optional external library private pointer + * @line_align: line alignment in memory + * @phy_termcal_val: the termination calibration value, only used for DWC PHY + * @need_reset: Isys requires d0i0->i3 transition + * @ref_count: total number of callers fw open + * @mutex: serialise access isys video open/release related operations + * @stream_mutex: serialise stream start and stop, queueing requests + * @pdata: platform data pointer + * @csi2: CSI-2 receivers + */ +struct ipu6_isys { + struct media_device media_dev; + struct v4l2_device v4l2_dev; + struct ipu6_bus_device *adev; + + int power; + spinlock_t power_lock; + u32 isr_csi2_bits; + u32 csi2_rx_ctrl_cached; + spinlock_t streams_lock; + struct ipu6_isys_stream streams[IPU6_ISYS_MAX_STREAMS]; + int streams_ref_count[IPU6_ISYS_MAX_STREAMS]; + void *fwcom; + unsigned int line_align; + u32 phy_termcal_val; + bool need_reset; + bool icache_prefetch; + bool csi2_cse_ipc_not_supported; + unsigned int ref_count; + unsigned int stream_opened; + unsigned int sensor_type; + + struct mutex mutex; + struct mutex stream_mutex; + + struct ipu6_isys_pdata *pdata; + + int (*phy_set_power)(struct ipu6_isys *isys, + struct ipu6_isys_csi2_config *cfg, + const struct ipu6_isys_csi2_timing *timing, + bool on); + + struct ipu6_isys_csi2 *csi2; + + struct pm_qos_request pm_qos; + spinlock_t listlock; /* Protect framebuflist */ + struct list_head framebuflist; + struct list_head framebuflist_fw; + struct v4l2_async_notifier notifier; + struct isys_iwake_watermark iwake_watermark; +}; + +struct isys_fw_msgs { + union { + u64 dummy; + struct ipu6_fw_isys_frame_buff_set_abi frame; + struct ipu6_fw_isys_stream_cfg_data_abi stream; + } fw_msg; + struct list_head head; + dma_addr_t dma_addr; +}; + +struct isys_fw_msgs *ipu6_get_fw_msg_buf(struct ipu6_isys_stream *stream); +void ipu6_put_fw_msg_buf(struct ipu6_isys *isys, u64 data); +void ipu6_cleanup_fw_msg_bufs(struct ipu6_isys *isys); + +extern const struct v4l2_ioctl_ops ipu6_isys_ioctl_ops; + +void isys_setup_hw(struct ipu6_isys *isys); +irqreturn_t isys_isr(struct ipu6_bus_device *adev); +void update_watermark_setting(struct ipu6_isys *isys); + +int ipu6_isys_mcd_phy_set_power(struct ipu6_isys *isys, + struct ipu6_isys_csi2_config *cfg, + const struct ipu6_isys_csi2_timing *timing, + bool on); + +int ipu6_isys_dwc_phy_set_power(struct ipu6_isys *isys, + struct ipu6_isys_csi2_config *cfg, + const struct ipu6_isys_csi2_timing *timing, + bool on); + +int ipu6_isys_jsl_phy_set_power(struct ipu6_isys *isys, + struct ipu6_isys_csi2_config *cfg, + const struct ipu6_isys_csi2_timing *timing, + bool on); +#endif /* IPU6_ISYS_H */ diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-mmu.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-mmu.c new file mode 100644 index 0000000..57298ac --- /dev/null +++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-mmu.c @@ -0,0 +1,852 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2013--2024 Intel Corporation + */ +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ipu6.h" +#include "ipu6-dma.h" +#include "ipu6-mmu.h" +#include "ipu6-platform-regs.h" + +#define ISP_PAGE_SHIFT 12 +#define ISP_PAGE_SIZE BIT(ISP_PAGE_SHIFT) +#define ISP_PAGE_MASK (~(ISP_PAGE_SIZE - 1)) + +#define ISP_L1PT_SHIFT 22 +#define ISP_L1PT_MASK (~((1U << ISP_L1PT_SHIFT) - 1)) + +#define ISP_L2PT_SHIFT 12 +#define ISP_L2PT_MASK (~(ISP_L1PT_MASK | (~(ISP_PAGE_MASK)))) + +#define ISP_L1PT_PTES 1024 +#define ISP_L2PT_PTES 1024 + +#define ISP_PADDR_SHIFT 12 + +#define REG_TLB_INVALIDATE 0x0000 + +#define REG_L1_PHYS 0x0004 /* 27-bit pfn */ +#define REG_INFO 0x0008 + +#define TBL_PHYS_ADDR(a) ((phys_addr_t)(a) << ISP_PADDR_SHIFT) + +static void tlb_invalidate(struct ipu6_mmu *mmu) +{ + unsigned long flags; + unsigned int i; + + spin_lock_irqsave(&mmu->ready_lock, flags); + if (!mmu->ready) { + spin_unlock_irqrestore(&mmu->ready_lock, flags); + return; + } + + for (i = 0; i < mmu->nr_mmus; i++) { + /* + * To avoid the HW bug induced dead lock in some of the IPU6 + * MMUs on successive invalidate calls, we need to first do a + * read to the page table base before writing the invalidate + * register. MMUs which need to implement this WA, will have + * the insert_read_before_invalidate flags set as true. + * Disregard the return value of the read. + */ + if (mmu->mmu_hw[i].insert_read_before_invalidate) + readl(mmu->mmu_hw[i].base + REG_L1_PHYS); + + writel(0xffffffff, mmu->mmu_hw[i].base + + REG_TLB_INVALIDATE); + /* + * The TLB invalidation is a "single cycle" (IOMMU clock cycles) + * When the actual MMIO write reaches the IPU6 TLB Invalidate + * register, wmb() will force the TLB invalidate out if the CPU + * attempts to update the IOMMU page table (or sooner). + */ + wmb(); + } + spin_unlock_irqrestore(&mmu->ready_lock, flags); +} + +#ifdef DEBUG +static void page_table_dump(struct ipu6_mmu_info *mmu_info) +{ + u32 l1_idx; + + dev_dbg(mmu_info->dev, "begin IOMMU page table dump\n"); + + for (l1_idx = 0; l1_idx < ISP_L1PT_PTES; l1_idx++) { + u32 l2_idx; + u32 iova = (phys_addr_t)l1_idx << ISP_L1PT_SHIFT; + phys_addr_t l2_phys; + + if (mmu_info->l1_pt[l1_idx] == mmu_info->dummy_l2_pteval) + continue; + + l2_phys = TBL_PHYS_ADDR(mmu_info->l1_pt[l1_idx];) + dev_dbg(mmu_info->dev, + "l1 entry %u; iovas 0x%8.8x-0x%8.8x, at %pap\n", + l1_idx, iova, iova + ISP_PAGE_SIZE, &l2_phys); + + for (l2_idx = 0; l2_idx < ISP_L2PT_PTES; l2_idx++) { + u32 *l2_pt = mmu_info->l2_pts[l1_idx]; + u32 iova2 = iova + (l2_idx << ISP_L2PT_SHIFT); + + if (l2_pt[l2_idx] == mmu_info->dummy_page_pteval) + continue; + + dev_dbg(mmu_info->dev, + "\tl2 entry %u; iova 0x%8.8x, phys %pa\n", + l2_idx, iova2, + TBL_PHYS_ADDR(l2_pt[l2_idx])); + } + } + + dev_dbg(mmu_info->dev, "end IOMMU page table dump\n"); +} +#endif /* DEBUG */ + +static dma_addr_t map_single(struct ipu6_mmu_info *mmu_info, void *ptr) +{ + dma_addr_t dma; + + dma = dma_map_single(mmu_info->dev, ptr, PAGE_SIZE, DMA_BIDIRECTIONAL); + if (dma_mapping_error(mmu_info->dev, dma)) + return 0; + + return dma; +} + +static int get_dummy_page(struct ipu6_mmu_info *mmu_info) +{ + void *pt = (void *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32); + dma_addr_t dma; + + if (!pt) + return -ENOMEM; + + dev_dbg(mmu_info->dev, "dummy_page: get_zeroed_page() == %p\n", pt); + + dma = map_single(mmu_info, pt); + if (!dma) { + dev_err(mmu_info->dev, "Failed to map dummy page\n"); + goto err_free_page; + } + + mmu_info->dummy_page = pt; + mmu_info->dummy_page_pteval = dma >> ISP_PAGE_SHIFT; + + return 0; + +err_free_page: + free_page((unsigned long)pt); + return -ENOMEM; +} + +static void free_dummy_page(struct ipu6_mmu_info *mmu_info) +{ + dma_unmap_single(mmu_info->dev, + TBL_PHYS_ADDR(mmu_info->dummy_page_pteval), + PAGE_SIZE, DMA_BIDIRECTIONAL); + free_page((unsigned long)mmu_info->dummy_page); +} + +static int alloc_dummy_l2_pt(struct ipu6_mmu_info *mmu_info) +{ + u32 *pt = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32); + dma_addr_t dma; + unsigned int i; + + if (!pt) + return -ENOMEM; + + dev_dbg(mmu_info->dev, "dummy_l2: get_zeroed_page() = %p\n", pt); + + dma = map_single(mmu_info, pt); + if (!dma) { + dev_err(mmu_info->dev, "Failed to map l2pt page\n"); + goto err_free_page; + } + + for (i = 0; i < ISP_L2PT_PTES; i++) + pt[i] = mmu_info->dummy_page_pteval; + + mmu_info->dummy_l2_pt = pt; + mmu_info->dummy_l2_pteval = dma >> ISP_PAGE_SHIFT; + + return 0; + +err_free_page: + free_page((unsigned long)pt); + return -ENOMEM; +} + +static void free_dummy_l2_pt(struct ipu6_mmu_info *mmu_info) +{ + dma_unmap_single(mmu_info->dev, + TBL_PHYS_ADDR(mmu_info->dummy_l2_pteval), + PAGE_SIZE, DMA_BIDIRECTIONAL); + free_page((unsigned long)mmu_info->dummy_l2_pt); +} + +static u32 *alloc_l1_pt(struct ipu6_mmu_info *mmu_info) +{ + u32 *pt = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32); + dma_addr_t dma; + unsigned int i; + + if (!pt) + return NULL; + + dev_dbg(mmu_info->dev, "alloc_l1: get_zeroed_page() = %p\n", pt); + + for (i = 0; i < ISP_L1PT_PTES; i++) + pt[i] = mmu_info->dummy_l2_pteval; + + dma = map_single(mmu_info, pt); + if (!dma) { + dev_err(mmu_info->dev, "Failed to map l1pt page\n"); + goto err_free_page; + } + + mmu_info->l1_pt_dma = dma >> ISP_PADDR_SHIFT; + dev_dbg(mmu_info->dev, "l1 pt %p mapped at %pad\n", pt, &dma); + + return pt; + +err_free_page: + free_page((unsigned long)pt); + return NULL; +} + +static u32 *alloc_l2_pt(struct ipu6_mmu_info *mmu_info) +{ + u32 *pt = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32); + unsigned int i; + + if (!pt) + return NULL; + + dev_dbg(mmu_info->dev, "alloc_l2: get_zeroed_page() = %p\n", pt); + + for (i = 0; i < ISP_L1PT_PTES; i++) + pt[i] = mmu_info->dummy_page_pteval; + + return pt; +} + +static int l2_map(struct ipu6_mmu_info *mmu_info, unsigned long iova, + phys_addr_t paddr, size_t size) +{ + u32 l1_idx = iova >> ISP_L1PT_SHIFT; + u32 iova_start = iova; + u32 *l2_pt, *l2_virt; + unsigned int l2_idx; + unsigned long flags; + dma_addr_t dma; + u32 l1_entry; + + dev_dbg(mmu_info->dev, + "mapping l2 page table for l1 index %u (iova %8.8x)\n", + l1_idx, (u32)iova); + + spin_lock_irqsave(&mmu_info->lock, flags); + l1_entry = mmu_info->l1_pt[l1_idx]; + if (l1_entry == mmu_info->dummy_l2_pteval) { + l2_virt = mmu_info->l2_pts[l1_idx]; + if (likely(!l2_virt)) { + l2_virt = alloc_l2_pt(mmu_info); + if (!l2_virt) { + spin_unlock_irqrestore(&mmu_info->lock, flags); + return -ENOMEM; + } + } + + dma = map_single(mmu_info, l2_virt); + if (!dma) { + dev_err(mmu_info->dev, "Failed to map l2pt page\n"); + free_page((unsigned long)l2_virt); + spin_unlock_irqrestore(&mmu_info->lock, flags); + return -EINVAL; + } + + l1_entry = dma >> ISP_PADDR_SHIFT; + + dev_dbg(mmu_info->dev, "page for l1_idx %u %p allocated\n", + l1_idx, l2_virt); + mmu_info->l1_pt[l1_idx] = l1_entry; + mmu_info->l2_pts[l1_idx] = l2_virt; + clflush_cache_range((void *)&mmu_info->l1_pt[l1_idx], + sizeof(mmu_info->l1_pt[l1_idx])); + } + + l2_pt = mmu_info->l2_pts[l1_idx]; + + dev_dbg(mmu_info->dev, "l2_pt at %p with dma 0x%x\n", l2_pt, l1_entry); + + paddr = ALIGN(paddr, ISP_PAGE_SIZE); + + l2_idx = (iova_start & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT; + + dev_dbg(mmu_info->dev, "l2_idx %u, phys 0x%8.8x\n", l2_idx, + l2_pt[l2_idx]); + if (l2_pt[l2_idx] != mmu_info->dummy_page_pteval) { + spin_unlock_irqrestore(&mmu_info->lock, flags); + return -EINVAL; + } + + l2_pt[l2_idx] = paddr >> ISP_PADDR_SHIFT; + + clflush_cache_range((void *)&l2_pt[l2_idx], sizeof(l2_pt[l2_idx])); + spin_unlock_irqrestore(&mmu_info->lock, flags); + + dev_dbg(mmu_info->dev, "l2 index %u mapped as 0x%8.8x\n", l2_idx, + l2_pt[l2_idx]); + + return 0; +} + +static int __ipu6_mmu_map(struct ipu6_mmu_info *mmu_info, unsigned long iova, + phys_addr_t paddr, size_t size) +{ + u32 iova_start = round_down(iova, ISP_PAGE_SIZE); + u32 iova_end = ALIGN(iova + size, ISP_PAGE_SIZE); + + dev_dbg(mmu_info->dev, + "mapping iova 0x%8.8x--0x%8.8x, size %zu at paddr %pap\n", + iova_start, iova_end, size, &paddr); + + return l2_map(mmu_info, iova_start, paddr, size); +} + +static size_t l2_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova, + phys_addr_t dummy, size_t size) +{ + u32 l1_idx = iova >> ISP_L1PT_SHIFT; + u32 iova_start = iova; + unsigned int l2_idx; + size_t unmapped = 0; + unsigned long flags; + u32 *l2_pt; + + dev_dbg(mmu_info->dev, "unmapping l2 page table for l1 index %u (iova 0x%8.8lx)\n", + l1_idx, iova); + + spin_lock_irqsave(&mmu_info->lock, flags); + if (mmu_info->l1_pt[l1_idx] == mmu_info->dummy_l2_pteval) { + spin_unlock_irqrestore(&mmu_info->lock, flags); + dev_err(mmu_info->dev, + "unmap iova 0x%8.8lx l1 idx %u which was not mapped\n", + iova, l1_idx); + return 0; + } + + for (l2_idx = (iova_start & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT; + (iova_start & ISP_L1PT_MASK) + (l2_idx << ISP_PAGE_SHIFT) + < iova_start + size && l2_idx < ISP_L2PT_PTES; l2_idx++) { + phys_addr_t pteval; + + l2_pt = mmu_info->l2_pts[l1_idx]; + pteval = TBL_PHYS_ADDR(l2_pt[l2_idx]); + dev_dbg(mmu_info->dev, + "unmap l2 index %u with pteval 0x%p\n", + l2_idx, &pteval); + l2_pt[l2_idx] = mmu_info->dummy_page_pteval; + + clflush_cache_range((void *)&l2_pt[l2_idx], + sizeof(l2_pt[l2_idx])); + unmapped++; + } + spin_unlock_irqrestore(&mmu_info->lock, flags); + + return unmapped << ISP_PAGE_SHIFT; +} + +static size_t __ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info, + unsigned long iova, size_t size) +{ + return l2_unmap(mmu_info, iova, 0, size); +} + +static int allocate_trash_buffer(struct ipu6_mmu *mmu) +{ + unsigned int n_pages = PHYS_PFN(PAGE_ALIGN(IPU6_MMUV2_TRASH_RANGE)); + struct iova *iova; + unsigned int i; + dma_addr_t dma; + unsigned long iova_addr; + int ret; + + /* Allocate 8MB in iova range */ + iova = alloc_iova(&mmu->dmap->iovad, n_pages, + PHYS_PFN(mmu->dmap->mmu_info->aperture_end), 0); + if (!iova) { + dev_err(mmu->dev, "cannot allocate iova range for trash\n"); + return -ENOMEM; + } + + dma = dma_map_page(mmu->dmap->mmu_info->dev, mmu->trash_page, 0, + PAGE_SIZE, DMA_BIDIRECTIONAL); + if (dma_mapping_error(mmu->dmap->mmu_info->dev, dma)) { + dev_err(mmu->dmap->mmu_info->dev, "Failed to map trash page\n"); + ret = -ENOMEM; + goto out_free_iova; + } + + mmu->pci_trash_page = dma; + + /* + * Map the 8MB iova address range to the same physical trash page + * mmu->trash_page which is already reserved at the probe + */ + iova_addr = iova->pfn_lo; + for (i = 0; i < n_pages; i++) { + ret = ipu6_mmu_map(mmu->dmap->mmu_info, PFN_PHYS(iova_addr), + mmu->pci_trash_page, PAGE_SIZE); + if (ret) { + dev_err(mmu->dev, + "mapping trash buffer range failed\n"); + goto out_unmap; + } + + iova_addr++; + } + + mmu->iova_trash_page = PFN_PHYS(iova->pfn_lo); + dev_dbg(mmu->dev, "iova trash buffer for MMUID: %d is %u\n", + mmu->mmid, (unsigned int)mmu->iova_trash_page); + return 0; + +out_unmap: + ipu6_mmu_unmap(mmu->dmap->mmu_info, PFN_PHYS(iova->pfn_lo), + PFN_PHYS(iova_size(iova))); + dma_unmap_page(mmu->dmap->mmu_info->dev, mmu->pci_trash_page, + PAGE_SIZE, DMA_BIDIRECTIONAL); +out_free_iova: + __free_iova(&mmu->dmap->iovad, iova); + return ret; +} + +int ipu6_mmu_hw_init(struct ipu6_mmu *mmu) +{ + struct ipu6_mmu_info *mmu_info; + unsigned long flags; + unsigned int i; + + mmu_info = mmu->dmap->mmu_info; + + /* Initialise the each MMU HW block */ + for (i = 0; i < mmu->nr_mmus; i++) { + struct ipu6_mmu_hw *mmu_hw = &mmu->mmu_hw[i]; + unsigned int j; + u16 block_addr; + + /* Write page table address per MMU */ + writel((phys_addr_t)mmu_info->l1_pt_dma, + mmu->mmu_hw[i].base + REG_L1_PHYS); + + /* Set info bits per MMU */ + writel(mmu->mmu_hw[i].info_bits, + mmu->mmu_hw[i].base + REG_INFO); + + /* Configure MMU TLB stream configuration for L1 */ + for (j = 0, block_addr = 0; j < mmu_hw->nr_l1streams; + block_addr += mmu->mmu_hw[i].l1_block_sz[j], j++) { + if (block_addr > IPU6_MAX_LI_BLOCK_ADDR) { + dev_err(mmu->dev, "invalid L1 configuration\n"); + return -EINVAL; + } + + /* Write block start address for each streams */ + writel(block_addr, mmu_hw->base + + mmu_hw->l1_stream_id_reg_offset + 4 * j); + } + + /* Configure MMU TLB stream configuration for L2 */ + for (j = 0, block_addr = 0; j < mmu_hw->nr_l2streams; + block_addr += mmu->mmu_hw[i].l2_block_sz[j], j++) { + if (block_addr > IPU6_MAX_L2_BLOCK_ADDR) { + dev_err(mmu->dev, "invalid L2 configuration\n"); + return -EINVAL; + } + + writel(block_addr, mmu_hw->base + + mmu_hw->l2_stream_id_reg_offset + 4 * j); + } + } + + if (!mmu->trash_page) { + int ret; + + mmu->trash_page = alloc_page(GFP_KERNEL); + if (!mmu->trash_page) { + dev_err(mmu->dev, "insufficient memory for trash buffer\n"); + return -ENOMEM; + } + + ret = allocate_trash_buffer(mmu); + if (ret) { + __free_page(mmu->trash_page); + mmu->trash_page = NULL; + dev_err(mmu->dev, "trash buffer allocation failed\n"); + return ret; + } + } + + spin_lock_irqsave(&mmu->ready_lock, flags); + mmu->ready = true; + spin_unlock_irqrestore(&mmu->ready_lock, flags); + + return 0; +} +EXPORT_SYMBOL_NS_GPL(ipu6_mmu_hw_init, INTEL_IPU6); + +static struct ipu6_mmu_info *ipu6_mmu_alloc(struct ipu6_device *isp) +{ + struct ipu6_mmu_info *mmu_info; + int ret; + + mmu_info = kzalloc(sizeof(*mmu_info), GFP_KERNEL); + if (!mmu_info) + return NULL; + + mmu_info->aperture_start = 0; + mmu_info->aperture_end = + (dma_addr_t)DMA_BIT_MASK(isp->secure_mode ? + IPU6_MMU_ADDR_BITS : + IPU6_MMU_ADDR_BITS_NON_SECURE); + mmu_info->pgsize_bitmap = SZ_4K; + mmu_info->dev = &isp->pdev->dev; + + ret = get_dummy_page(mmu_info); + if (ret) + goto err_free_info; + + ret = alloc_dummy_l2_pt(mmu_info); + if (ret) + goto err_free_dummy_page; + + mmu_info->l2_pts = vzalloc(ISP_L2PT_PTES * sizeof(*mmu_info->l2_pts)); + if (!mmu_info->l2_pts) + goto err_free_dummy_l2_pt; + + /* + * We always map the L1 page table (a single page as well as + * the L2 page tables). + */ + mmu_info->l1_pt = alloc_l1_pt(mmu_info); + if (!mmu_info->l1_pt) + goto err_free_l2_pts; + + spin_lock_init(&mmu_info->lock); + + dev_dbg(mmu_info->dev, "domain initialised\n"); + + return mmu_info; + +err_free_l2_pts: + vfree(mmu_info->l2_pts); +err_free_dummy_l2_pt: + free_dummy_l2_pt(mmu_info); +err_free_dummy_page: + free_dummy_page(mmu_info); +err_free_info: + kfree(mmu_info); + + return NULL; +} + +void ipu6_mmu_hw_cleanup(struct ipu6_mmu *mmu) +{ + unsigned long flags; + + spin_lock_irqsave(&mmu->ready_lock, flags); + mmu->ready = false; + spin_unlock_irqrestore(&mmu->ready_lock, flags); +} +EXPORT_SYMBOL_NS_GPL(ipu6_mmu_hw_cleanup, INTEL_IPU6); + +static struct ipu6_dma_mapping *alloc_dma_mapping(struct ipu6_device *isp) +{ + struct ipu6_dma_mapping *dmap; + + dmap = kzalloc(sizeof(*dmap), GFP_KERNEL); + if (!dmap) + return NULL; + + dmap->mmu_info = ipu6_mmu_alloc(isp); + if (!dmap->mmu_info) { + kfree(dmap); + return NULL; + } + + init_iova_domain(&dmap->iovad, SZ_4K, 1); + dmap->mmu_info->dmap = dmap; + + dev_dbg(&isp->pdev->dev, "alloc mapping\n"); + + iova_cache_get(); + + return dmap; +} + +phys_addr_t ipu6_mmu_iova_to_phys(struct ipu6_mmu_info *mmu_info, + dma_addr_t iova) +{ + phys_addr_t phy_addr; + unsigned long flags; + u32 *l2_pt; + + spin_lock_irqsave(&mmu_info->lock, flags); + l2_pt = mmu_info->l2_pts[iova >> ISP_L1PT_SHIFT]; + phy_addr = (phys_addr_t)l2_pt[(iova & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT]; + phy_addr <<= ISP_PAGE_SHIFT; + spin_unlock_irqrestore(&mmu_info->lock, flags); + + return phy_addr; +} + +static size_t ipu6_mmu_pgsize(unsigned long pgsize_bitmap, + unsigned long addr_merge, size_t size) +{ + unsigned int pgsize_idx; + size_t pgsize; + + /* Max page size that still fits into 'size' */ + pgsize_idx = __fls(size); + + if (likely(addr_merge)) { + /* Max page size allowed by address */ + unsigned int align_pgsize_idx = __ffs(addr_merge); + + pgsize_idx = min(pgsize_idx, align_pgsize_idx); + } + + pgsize = (1UL << (pgsize_idx + 1)) - 1; + pgsize &= pgsize_bitmap; + + WARN_ON(!pgsize); + + /* pick the biggest page */ + pgsize_idx = __fls(pgsize); + pgsize = 1UL << pgsize_idx; + + return pgsize; +} + +size_t ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova, + size_t size) +{ + size_t unmapped_page, unmapped = 0; + unsigned int min_pagesz; + + /* find out the minimum page size supported */ + min_pagesz = 1 << __ffs(mmu_info->pgsize_bitmap); + + /* + * The virtual address and the size of the mapping must be + * aligned (at least) to the size of the smallest page supported + * by the hardware + */ + if (!IS_ALIGNED(iova | size, min_pagesz)) { + dev_err(NULL, "unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n", + iova, size, min_pagesz); + return -EINVAL; + } + + /* + * Keep iterating until we either unmap 'size' bytes (or more) + * or we hit an area that isn't mapped. + */ + while (unmapped < size) { + size_t pgsize = ipu6_mmu_pgsize(mmu_info->pgsize_bitmap, + iova, size - unmapped); + + unmapped_page = __ipu6_mmu_unmap(mmu_info, iova, pgsize); + if (!unmapped_page) + break; + + dev_dbg(mmu_info->dev, "unmapped: iova 0x%lx size 0x%zx\n", + iova, unmapped_page); + + iova += unmapped_page; + unmapped += unmapped_page; + } + + return unmapped; +} + +int ipu6_mmu_map(struct ipu6_mmu_info *mmu_info, unsigned long iova, + phys_addr_t paddr, size_t size) +{ + unsigned long orig_iova = iova; + unsigned int min_pagesz; + size_t orig_size = size; + int ret = 0; + + if (mmu_info->pgsize_bitmap == 0UL) + return -ENODEV; + + /* find out the minimum page size supported */ + min_pagesz = 1 << __ffs(mmu_info->pgsize_bitmap); + + /* + * both the virtual address and the physical one, as well as + * the size of the mapping, must be aligned (at least) to the + * size of the smallest page supported by the hardware + */ + if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) { + dev_err(mmu_info->dev, + "unaligned: iova %lx pa %pa size %zx min_pagesz %x\n", + iova, &paddr, size, min_pagesz); + return -EINVAL; + } + + dev_dbg(mmu_info->dev, "map: iova 0x%lx pa %pa size 0x%zx\n", + iova, &paddr, size); + + while (size) { + size_t pgsize = ipu6_mmu_pgsize(mmu_info->pgsize_bitmap, + iova | paddr, size); + + dev_dbg(mmu_info->dev, + "mapping: iova 0x%lx pa %pa pgsize 0x%zx\n", + iova, &paddr, pgsize); + + ret = __ipu6_mmu_map(mmu_info, iova, paddr, pgsize); + if (ret) + break; + + iova += pgsize; + paddr += pgsize; + size -= pgsize; + } + + /* unroll mapping in case something went wrong */ + if (ret) + ipu6_mmu_unmap(mmu_info, orig_iova, orig_size - size); + + return ret; +} + +static void ipu6_mmu_destroy(struct ipu6_mmu *mmu) +{ + struct ipu6_dma_mapping *dmap = mmu->dmap; + struct ipu6_mmu_info *mmu_info = dmap->mmu_info; + struct iova *iova; + u32 l1_idx; + + if (mmu->iova_trash_page) { + iova = find_iova(&dmap->iovad, PHYS_PFN(mmu->iova_trash_page)); + if (iova) { + /* unmap and free the trash buffer iova */ + ipu6_mmu_unmap(mmu_info, PFN_PHYS(iova->pfn_lo), + PFN_PHYS(iova_size(iova))); + __free_iova(&dmap->iovad, iova); + } else { + dev_err(mmu->dev, "trash buffer iova not found.\n"); + } + + mmu->iova_trash_page = 0; + dma_unmap_page(mmu_info->dev, mmu->pci_trash_page, + PAGE_SIZE, DMA_BIDIRECTIONAL); + mmu->pci_trash_page = 0; + __free_page(mmu->trash_page); + } + + for (l1_idx = 0; l1_idx < ISP_L1PT_PTES; l1_idx++) { + if (mmu_info->l1_pt[l1_idx] != mmu_info->dummy_l2_pteval) { + dma_unmap_single(mmu_info->dev, + TBL_PHYS_ADDR(mmu_info->l1_pt[l1_idx]), + PAGE_SIZE, DMA_BIDIRECTIONAL); + free_page((unsigned long)mmu_info->l2_pts[l1_idx]); + } + } + + vfree(mmu_info->l2_pts); + free_dummy_page(mmu_info); + dma_unmap_single(mmu_info->dev, TBL_PHYS_ADDR(mmu_info->l1_pt_dma), + PAGE_SIZE, DMA_BIDIRECTIONAL); + free_page((unsigned long)mmu_info->dummy_l2_pt); + free_page((unsigned long)mmu_info->l1_pt); + kfree(mmu_info); +} + +struct ipu6_mmu *ipu6_mmu_init(struct device *dev, + void __iomem *base, int mmid, + const struct ipu6_hw_variants *hw) +{ + struct ipu6_device *isp = pci_get_drvdata(to_pci_dev(dev)); + struct ipu6_mmu_pdata *pdata; + struct ipu6_mmu *mmu; + unsigned int i; + + if (hw->nr_mmus > IPU6_MMU_MAX_DEVICES) + return ERR_PTR(-EINVAL); + + pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return ERR_PTR(-ENOMEM); + + for (i = 0; i < hw->nr_mmus; i++) { + struct ipu6_mmu_hw *pdata_mmu = &pdata->mmu_hw[i]; + const struct ipu6_mmu_hw *src_mmu = &hw->mmu_hw[i]; + + if (src_mmu->nr_l1streams > IPU6_MMU_MAX_TLB_L1_STREAMS || + src_mmu->nr_l2streams > IPU6_MMU_MAX_TLB_L2_STREAMS) + return ERR_PTR(-EINVAL); + + *pdata_mmu = *src_mmu; + pdata_mmu->base = base + src_mmu->offset; + } + + mmu = devm_kzalloc(dev, sizeof(*mmu), GFP_KERNEL); + if (!mmu) + return ERR_PTR(-ENOMEM); + + mmu->mmid = mmid; + mmu->mmu_hw = pdata->mmu_hw; + mmu->nr_mmus = hw->nr_mmus; + mmu->tlb_invalidate = tlb_invalidate; + mmu->ready = false; + INIT_LIST_HEAD(&mmu->vma_list); + spin_lock_init(&mmu->ready_lock); + + mmu->dmap = alloc_dma_mapping(isp); + if (!mmu->dmap) { + dev_err(dev, "can't alloc dma mapping\n"); + return ERR_PTR(-ENOMEM); + } + + return mmu; +} + +void ipu6_mmu_cleanup(struct ipu6_mmu *mmu) +{ + struct ipu6_dma_mapping *dmap = mmu->dmap; + + ipu6_mmu_destroy(mmu); + mmu->dmap = NULL; + iova_cache_put(); + put_iova_domain(&dmap->iovad); + kfree(dmap); +} diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-mmu.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-mmu.h new file mode 100644 index 0000000..21cdb0f --- /dev/null +++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-mmu.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2013--2024 Intel Corporation */ + +#ifndef IPU6_MMU_H +#define IPU6_MMU_H + +#define ISYS_MMID 1 +#define PSYS_MMID 0 + +#include +#include +#include + +struct device; +struct page; +struct ipu6_hw_variants; + +struct ipu6_mmu_info { + struct device *dev; + + u32 *l1_pt; + u32 l1_pt_dma; + u32 **l2_pts; + + u32 *dummy_l2_pt; + u32 dummy_l2_pteval; + void *dummy_page; + u32 dummy_page_pteval; + + dma_addr_t aperture_start; + dma_addr_t aperture_end; + unsigned long pgsize_bitmap; + + spinlock_t lock; /* Serialize access to users */ + struct ipu6_dma_mapping *dmap; +}; + +struct ipu6_mmu { + struct list_head node; + + struct ipu6_mmu_hw *mmu_hw; + unsigned int nr_mmus; + unsigned int mmid; + + phys_addr_t pgtbl; + struct device *dev; + + struct ipu6_dma_mapping *dmap; + struct list_head vma_list; + + struct page *trash_page; + dma_addr_t pci_trash_page; /* IOVA from PCI DMA services (parent) */ + dma_addr_t iova_trash_page; /* IOVA for IPU6 child nodes to use */ + + bool ready; + spinlock_t ready_lock; /* Serialize access to bool ready */ + + void (*tlb_invalidate)(struct ipu6_mmu *mmu); +}; + +struct ipu6_mmu *ipu6_mmu_init(struct device *dev, + void __iomem *base, int mmid, + const struct ipu6_hw_variants *hw); +void ipu6_mmu_cleanup(struct ipu6_mmu *mmu); +int ipu6_mmu_hw_init(struct ipu6_mmu *mmu); +void ipu6_mmu_hw_cleanup(struct ipu6_mmu *mmu); +int ipu6_mmu_map(struct ipu6_mmu_info *mmu_info, unsigned long iova, + phys_addr_t paddr, size_t size); +size_t ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova, + size_t size); +phys_addr_t ipu6_mmu_iova_to_phys(struct ipu6_mmu_info *mmu_info, + dma_addr_t iova); +#endif diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-platform-buttress-regs.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-platform-buttress-regs.h new file mode 100644 index 0000000..20f2701 --- /dev/null +++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-platform-buttress-regs.h @@ -0,0 +1,226 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2023--2024 Intel Corporation */ + +#ifndef IPU6_PLATFORM_BUTTRESS_REGS_H +#define IPU6_PLATFORM_BUTTRESS_REGS_H + +#include + +/* IS_WORKPOINT_REQ */ +#define IPU6_BUTTRESS_REG_IS_FREQ_CTL 0x34 +/* PS_WORKPOINT_REQ */ +#define IPU6_BUTTRESS_REG_PS_FREQ_CTL 0x38 + +/* should be tuned for real silicon */ +#define IPU6_IS_FREQ_CTL_DEFAULT_RATIO 0x08 +#define IPU6SE_IS_FREQ_CTL_DEFAULT_RATIO 0x0a +#define IPU6_PS_FREQ_CTL_DEFAULT_RATIO 0x0d + +#define IPU6_IS_FREQ_CTL_DEFAULT_QOS_FLOOR_RATIO 0x10 +#define IPU6_PS_FREQ_CTL_DEFAULT_QOS_FLOOR_RATIO 0x0708 + +#define IPU6_BUTTRESS_PWR_STATE_IS_PWR_SHIFT 3 +#define IPU6_BUTTRESS_PWR_STATE_IS_PWR_MASK GENMASK(4, 3) + +#define IPU6_BUTTRESS_PWR_STATE_PS_PWR_SHIFT 6 +#define IPU6_BUTTRESS_PWR_STATE_PS_PWR_MASK GENMASK(7, 6) + +#define IPU6_BUTTRESS_PWR_STATE_DN_DONE 0x0 +#define IPU6_BUTTRESS_PWR_STATE_UP_PROCESS 0x1 +#define IPU6_BUTTRESS_PWR_STATE_DN_PROCESS 0x2 +#define IPU6_BUTTRESS_PWR_STATE_UP_DONE 0x3 + +#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_0 0x270 +#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_1 0x274 +#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_2 0x278 +#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_3 0x27c +#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_4 0x280 +#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_5 0x284 +#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_6 0x288 +#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_7 0x28c + +#define BUTTRESS_REG_WDT 0x8 +#define BUTTRESS_REG_BTRS_CTRL 0xc +#define BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC0 BIT(0) +#define BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC1 BIT(1) +#define BUTTRESS_REG_BTRS_CTRL_REF_CLK_IND GENMASK(9, 8) + +#define BUTTRESS_REG_FW_RESET_CTL 0x30 +#define BUTTRESS_FW_RESET_CTL_START BIT(0) +#define BUTTRESS_FW_RESET_CTL_DONE BIT(1) + +#define BUTTRESS_REG_IS_FREQ_CTL 0x34 +#define BUTTRESS_REG_PS_FREQ_CTL 0x38 + +#define BUTTRESS_FREQ_CTL_START BIT(31) +#define BUTTRESS_FREQ_CTL_ICCMAX_LEVEL GENMASK(19, 16) +#define BUTTRESS_FREQ_CTL_QOS_FLOOR_MASK GENMASK(15, 8) +#define BUTTRESS_FREQ_CTL_RATIO_MASK GENMASK(7, 0) + +#define BUTTRESS_REG_PWR_STATE 0x5c + +#define BUTTRESS_PWR_STATE_RESET 0x0 +#define BUTTRESS_PWR_STATE_PWR_ON_DONE 0x1 +#define BUTTRESS_PWR_STATE_PWR_RDY 0x3 +#define BUTTRESS_PWR_STATE_PWR_IDLE 0x4 + +#define BUTTRESS_PWR_STATE_HH_STATUS_MASK GENMASK(12, 11) + +enum { + BUTTRESS_PWR_STATE_HH_STATE_IDLE, + BUTTRESS_PWR_STATE_HH_STATE_IN_PRGS, + BUTTRESS_PWR_STATE_HH_STATE_DONE, + BUTTRESS_PWR_STATE_HH_STATE_ERR, +}; + +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_MASK GENMASK(23, 19) + +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_IDLE 0x0 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_PLL_CMP 0x1 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_CLKACK 0x2 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_PG_ACK 0x3 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_RST_ASSRT_CYCLES 0x4 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_STOP_CLK_CYCLES1 0x5 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_STOP_CLK_CYCLES2 0x6 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_RST_DEASSRT_CYCLES 0x7 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_FUSE_WR_CMP 0x8 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_BRK_POINT 0x9 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_IS_RDY 0xa +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_HALT_HALTED 0xb +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_RST_DURATION_CNT3 0xc +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_CLKACK_PD 0xd +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_PD_BRK_POINT 0xe +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_PD_PG_ACK0 0xf + +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_MASK GENMASK(28, 24) + +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_IDLE 0x0 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PU_PLL_IP_RDY 0x1 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_RO_PRE_CNT_EXH 0x2 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PU_VGI_PWRGOOD 0x3 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_RO_POST_CNT_EXH 0x4 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WR_PLL_RATIO 0x5 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PU_PLL_CMP 0x6 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PU_CLKACK 0x7 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_RST_ASSRT_CYCLES 0x8 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_STOP_CLK_CYCLES1 0x9 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_STOP_CLK_CYCLES2 0xa +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_RST_DEASSRT_CYCLES 0xb +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_PU_BRK_PNT 0xc +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_FUSE_ACCPT 0xd +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_PS_PWR_UP 0xf +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_4_HALTED 0x10 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_RESET_CNT3 0x11 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PD_CLKACK 0x12 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PD_OFF_IND 0x13 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_DVFS_PH4 0x14 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_DVFS_PLL_CMP 0x15 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_DVFS_CLKACK 0x16 + +#define BUTTRESS_REG_SECURITY_CTL 0x300 +#define BUTTRESS_REG_SKU 0x314 +#define BUTTRESS_REG_SECURITY_TOUCH 0x318 +#define BUTTRESS_REG_CAMERA_MASK 0x84 + +#define BUTTRESS_SECURITY_CTL_FW_SECURE_MODE BIT(16) +#define BUTTRESS_SECURITY_CTL_FW_SETUP_MASK GENMASK(4, 0) + +#define BUTTRESS_SECURITY_CTL_FW_SETUP_DONE BIT(0) +#define BUTTRESS_SECURITY_CTL_AUTH_DONE BIT(1) +#define BUTTRESS_SECURITY_CTL_AUTH_FAILED BIT(3) + +#define BUTTRESS_REG_FW_SOURCE_BASE_LO 0x78 +#define BUTTRESS_REG_FW_SOURCE_BASE_HI 0x7C +#define BUTTRESS_REG_FW_SOURCE_SIZE 0x80 + +#define BUTTRESS_REG_ISR_STATUS 0x90 +#define BUTTRESS_REG_ISR_ENABLED_STATUS 0x94 +#define BUTTRESS_REG_ISR_ENABLE 0x98 +#define BUTTRESS_REG_ISR_CLEAR 0x9C + +#define BUTTRESS_ISR_IS_IRQ BIT(0) +#define BUTTRESS_ISR_PS_IRQ BIT(1) +#define BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE BIT(2) +#define BUTTRESS_ISR_IPC_EXEC_DONE_BY_ISH BIT(3) +#define BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING BIT(4) +#define BUTTRESS_ISR_IPC_FROM_ISH_IS_WAITING BIT(5) +#define BUTTRESS_ISR_CSE_CSR_SET BIT(6) +#define BUTTRESS_ISR_ISH_CSR_SET BIT(7) +#define BUTTRESS_ISR_SPURIOUS_CMP BIT(8) +#define BUTTRESS_ISR_WATCHDOG_EXPIRED BIT(9) +#define BUTTRESS_ISR_PUNIT_2_IUNIT_IRQ BIT(10) +#define BUTTRESS_ISR_SAI_VIOLATION BIT(11) +#define BUTTRESS_ISR_HW_ASSERTION BIT(12) +#define BUTTRESS_ISR_IS_CORRECTABLE_MEM_ERR BIT(13) +#define BUTTRESS_ISR_IS_FATAL_MEM_ERR BIT(14) +#define BUTTRESS_ISR_IS_NON_FATAL_MEM_ERR BIT(15) +#define BUTTRESS_ISR_PS_CORRECTABLE_MEM_ERR BIT(16) +#define BUTTRESS_ISR_PS_FATAL_MEM_ERR BIT(17) +#define BUTTRESS_ISR_PS_NON_FATAL_MEM_ERR BIT(18) +#define BUTTRESS_ISR_PS_FAST_THROTTLE BIT(19) +#define BUTTRESS_ISR_UFI_ERROR BIT(20) + +#define BUTTRESS_REG_IU2CSEDB0 0x100 + +#define BUTTRESS_IU2CSEDB0_BUSY BIT(31) +#define BUTTRESS_IU2CSEDB0_IPC_CLIENT_ID_VAL 2 + +#define BUTTRESS_REG_IU2CSEDATA0 0x104 + +#define BUTTRESS_IU2CSEDATA0_IPC_BOOT_LOAD 1 +#define BUTTRESS_IU2CSEDATA0_IPC_AUTH_RUN 2 +#define BUTTRESS_IU2CSEDATA0_IPC_AUTH_REPLACE 3 +#define BUTTRESS_IU2CSEDATA0_IPC_UPDATE_SECURE_TOUCH 16 + +#define BUTTRESS_CSE2IUDATA0_IPC_BOOT_LOAD_DONE BIT(0) +#define BUTTRESS_CSE2IUDATA0_IPC_AUTH_RUN_DONE BIT(1) +#define BUTTRESS_CSE2IUDATA0_IPC_AUTH_REPLACE_DONE BIT(2) +#define BUTTRESS_CSE2IUDATA0_IPC_UPDATE_SECURE_TOUCH_DONE BIT(4) + +#define BUTTRESS_REG_IU2CSECSR 0x108 + +#define BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE1 BIT(0) +#define BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE2 BIT(1) +#define BUTTRESS_IU2CSECSR_IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE BIT(2) +#define BUTTRESS_IU2CSECSR_IPC_PEER_ASSERTED_REG_VALID_REQ BIT(3) +#define BUTTRESS_IU2CSECSR_IPC_PEER_ACKED_REG_VALID BIT(4) +#define BUTTRESS_IU2CSECSR_IPC_PEER_DEASSERTED_REG_VALID_REQ BIT(5) + +#define BUTTRESS_REG_CSE2IUDB0 0x304 +#define BUTTRESS_REG_CSE2IUCSR 0x30C +#define BUTTRESS_REG_CSE2IUDATA0 0x308 + +/* 0x20 == NACK, 0xf == unknown command */ +#define BUTTRESS_CSE2IUDATA0_IPC_NACK 0xf20 +#define BUTTRESS_CSE2IUDATA0_IPC_NACK_MASK GENMASK(15, 0) + +#define BUTTRESS_REG_ISH2IUCSR 0x50 +#define BUTTRESS_REG_ISH2IUDB0 0x54 +#define BUTTRESS_REG_ISH2IUDATA0 0x58 + +#define BUTTRESS_REG_IU2ISHDB0 0x10C +#define BUTTRESS_REG_IU2ISHDATA0 0x110 +#define BUTTRESS_REG_IU2ISHDATA1 0x114 +#define BUTTRESS_REG_IU2ISHCSR 0x118 + +#define BUTTRESS_REG_FABRIC_CMD 0x88 + +#define BUTTRESS_FABRIC_CMD_START_TSC_SYNC BIT(0) +#define BUTTRESS_FABRIC_CMD_IS_DRAIN BIT(4) + +#define BUTTRESS_REG_TSW_CTL 0x120 +#define BUTTRESS_TSW_CTL_SOFT_RESET BIT(8) + +#define BUTTRESS_REG_TSC_LO 0x164 +#define BUTTRESS_REG_TSC_HI 0x168 + +#define BUTTRESS_IRQS (BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING | \ + BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE | \ + BUTTRESS_ISR_IS_IRQ | BUTTRESS_ISR_PS_IRQ) + +#define BUTTRESS_EVENT (BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING | \ + BUTTRESS_ISR_IPC_FROM_ISH_IS_WAITING | \ + BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE | \ + BUTTRESS_ISR_IPC_EXEC_DONE_BY_ISH | \ + BUTTRESS_ISR_SAI_VIOLATION) +#endif /* IPU6_PLATFORM_BUTTRESS_REGS_H */ diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-platform-isys-csi2-reg.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-platform-isys-csi2-reg.h new file mode 100644 index 0000000..cc58377 --- /dev/null +++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-platform-isys-csi2-reg.h @@ -0,0 +1,172 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2023--2024 Intel Corporation */ + +#ifndef IPU6_PLATFORM_ISYS_CSI2_REG_H +#define IPU6_PLATFORM_ISYS_CSI2_REG_H + +#include + +#define CSI_REG_BASE 0x220000 +#define CSI_REG_PORT_BASE(id) (CSI_REG_BASE + (id) * 0x1000) + +/* CSI Port Genral Purpose Registers */ +#define CSI_REG_PORT_GPREG_SRST 0x0 +#define CSI_REG_PORT_GPREG_CSI2_SLV_REG_SRST 0x4 +#define CSI_REG_PORT_GPREG_CSI2_PORT_CONTROL 0x8 + +/* + * Port IRQs mapping events: + * IRQ0 - CSI_FE event + * IRQ1 - CSI_SYNC + * IRQ2 - S2M_SIDS0TO7 + * IRQ3 - S2M_SIDS8TO15 + */ +#define CSI_PORT_REG_BASE_IRQ_CSI 0x80 +#define CSI_PORT_REG_BASE_IRQ_CSI_SYNC 0xA0 +#define CSI_PORT_REG_BASE_IRQ_S2M_SIDS0TOS7 0xC0 +#define CSI_PORT_REG_BASE_IRQ_S2M_SIDS8TOS15 0xE0 + +#define CSI_PORT_REG_BASE_IRQ_EDGE_OFFSET 0x0 +#define CSI_PORT_REG_BASE_IRQ_MASK_OFFSET 0x4 +#define CSI_PORT_REG_BASE_IRQ_STATUS_OFFSET 0x8 +#define CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET 0xc +#define CSI_PORT_REG_BASE_IRQ_ENABLE_OFFSET 0x10 +#define CSI_PORT_REG_BASE_IRQ_LEVEL_NOT_PULSE_OFFSET 0x14 + +#define IPU6SE_CSI_RX_ERROR_IRQ_MASK GENMASK(18, 0) +#define IPU6_CSI_RX_ERROR_IRQ_MASK GENMASK(19, 0) + +#define CSI_RX_NUM_ERRORS_IN_IRQ 20 +#define CSI_RX_NUM_IRQ 32 + +#define IPU_CSI_RX_IRQ_FS_VC(chn) (1 << ((chn) * 2)) +#define IPU_CSI_RX_IRQ_FE_VC(chn) (2 << ((chn) * 2)) + +/* PPI2CSI */ +#define CSI_REG_PPI2CSI_ENABLE 0x200 +#define CSI_REG_PPI2CSI_CONFIG_PPI_INTF 0x204 +#define PPI_INTF_CONFIG_NOF_ENABLED_DLANES_MASK GENMASK(4, 3) +#define CSI_REG_PPI2CSI_CONFIG_CSI_FEATURE 0x208 + +enum CSI_PPI2CSI_CTRL { + CSI_PPI2CSI_DISABLE = 0, + CSI_PPI2CSI_ENABLE = 1, +}; + +/* CSI_FE */ +#define CSI_REG_CSI_FE_ENABLE 0x280 +#define CSI_REG_CSI_FE_MODE 0x284 +#define CSI_REG_CSI_FE_MUX_CTRL 0x288 +#define CSI_REG_CSI_FE_SYNC_CNTR_SEL 0x290 + +enum CSI_FE_ENABLE_TYPE { + CSI_FE_DISABLE = 0, + CSI_FE_ENABLE = 1, +}; + +enum CSI_FE_MODE_TYPE { + CSI_FE_DPHY_MODE = 0, + CSI_FE_CPHY_MODE = 1, +}; + +enum CSI_FE_INPUT_SELECTOR { + CSI_SENSOR_INPUT = 0, + CSI_MIPIGEN_INPUT = 1, +}; + +enum CSI_FE_SYNC_CNTR_SEL_TYPE { + CSI_CNTR_SENSOR_LINE_ID = BIT(0), + CSI_CNTR_INT_LINE_PKT_ID = ~CSI_CNTR_SENSOR_LINE_ID, + CSI_CNTR_SENSOR_FRAME_ID = BIT(1), + CSI_CNTR_INT_FRAME_PKT_ID = ~CSI_CNTR_SENSOR_FRAME_ID, +}; + +/* CSI HUB General Purpose Registers */ +#define CSI_REG_HUB_GPREG_SRST (CSI_REG_BASE + 0x18000) +#define CSI_REG_HUB_GPREG_SLV_REG_SRST (CSI_REG_BASE + 0x18004) + +#define CSI_REG_HUB_DRV_ACCESS_PORT(id) (CSI_REG_BASE + 0x18018 + (id) * 4) +#define CSI_REG_HUB_FW_ACCESS_PORT_OFS 0x17000 +#define CSI_REG_HUB_FW_ACCESS_PORT_V6OFS 0x16000 +#define CSI_REG_HUB_FW_ACCESS_PORT(ofs, id) \ + (CSI_REG_BASE + (ofs) + (id) * 4) + +enum CSI_PORT_CLK_GATING_SWITCH { + CSI_PORT_CLK_GATING_OFF = 0, + CSI_PORT_CLK_GATING_ON = 1, +}; + +#define CSI_REG_BASE_HUB_IRQ 0x18200 + +#define IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_EDGE 0x238200 +#define IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_MASK 0x238204 +#define IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_STATUS 0x238208 +#define IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_CLEAR 0x23820c +#define IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_ENABLE 0x238210 +#define IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_LEVEL_NOT_PULSE 0x238214 + +#define IPU6_REG_ISYS_CSI_TOP_CTRL1_IRQ_EDGE 0x238220 +#define IPU6_REG_ISYS_CSI_TOP_CTRL1_IRQ_MASK 0x238224 +#define IPU6_REG_ISYS_CSI_TOP_CTRL1_IRQ_STATUS 0x238228 +#define IPU6_REG_ISYS_CSI_TOP_CTRL1_IRQ_CLEAR 0x23822c +#define IPU6_REG_ISYS_CSI_TOP_CTRL1_IRQ_ENABLE 0x238230 +#define IPU6_REG_ISYS_CSI_TOP_CTRL1_IRQ_LEVEL_NOT_PULSE 0x238234 + +/* MTL IPU6V6 irq ctrl0 & ctrl1 */ +#define IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_EDGE 0x238700 +#define IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_MASK 0x238704 +#define IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_STATUS 0x238708 +#define IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_CLEAR 0x23870c +#define IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_ENABLE 0x238710 +#define IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_LEVEL_NOT_PULSE 0x238714 + +#define IPU6V6_REG_ISYS_CSI_TOP_CTRL1_IRQ_EDGE 0x238720 +#define IPU6V6_REG_ISYS_CSI_TOP_CTRL1_IRQ_MASK 0x238724 +#define IPU6V6_REG_ISYS_CSI_TOP_CTRL1_IRQ_STATUS 0x238728 +#define IPU6V6_REG_ISYS_CSI_TOP_CTRL1_IRQ_CLEAR 0x23872c +#define IPU6V6_REG_ISYS_CSI_TOP_CTRL1_IRQ_ENABLE 0x238730 +#define IPU6V6_REG_ISYS_CSI_TOP_CTRL1_IRQ_LEVEL_NOT_PULSE 0x238734 + +/* + * 3:0 CSI_PORT.irq_out[3:0] CSI_PORT_CTRL0 IRQ outputs (4bits) + * [0] CSI_PORT.IRQ_CTRL0_csi + * [1] CSI_PORT.IRQ_CTRL1_csi_sync + * [2] CSI_PORT.IRQ_CTRL2_s2m_sids0to7 + * [3] CSI_PORT.IRQ_CTRL3_s2m_sids8to15 + */ +#define IPU6_ISYS_UNISPART_IRQ_CSI2(port) \ + (0x3 << ((port) * IPU6_CSI_IRQ_NUM_PER_PIPE)) + +/* + * ipu6se support 2 front ends, 2 port per front end, 4 ports 0..3 + * sip0 - 0, 1 + * sip1 - 2, 3 + * 0 and 2 support 4 data lanes, 1 and 3 support 2 data lanes + * all offset are base from isys base address + */ + +#define CSI2_HUB_GPREG_SIP_SRST(sip) (0x238038 + (sip) * 4) +#define CSI2_HUB_GPREG_SIP_FB_PORT_CFG(sip) (0x238050 + (sip) * 4) + +#define CSI2_HUB_GPREG_DPHY_TIMER_INCR 0x238040 +#define CSI2_HUB_GPREG_HPLL_FREQ 0x238044 +#define CSI2_HUB_GPREG_IS_CLK_RATIO 0x238048 +#define CSI2_HUB_GPREG_HPLL_FREQ_ISCLK_RATE_OVERRIDE 0x23804c +#define CSI2_HUB_GPREG_PORT_CLKGATING_DISABLE 0x238058 +#define CSI2_HUB_GPREG_SIP0_CSI_RX_A_CONTROL 0x23805c +#define CSI2_HUB_GPREG_SIP0_CSI_RX_B_CONTROL 0x238088 +#define CSI2_HUB_GPREG_SIP1_CSI_RX_A_CONTROL 0x2380a4 +#define CSI2_HUB_GPREG_SIP1_CSI_RX_B_CONTROL 0x2380d0 + +#define CSI2_SIP_TOP_CSI_RX_BASE(sip) (0x23805c + (sip) * 0x48) +#define CSI2_SIP_TOP_CSI_RX_PORT_BASE_0(port) (0x23805c + ((port) / 2) * 0x48) +#define CSI2_SIP_TOP_CSI_RX_PORT_BASE_1(port) (0x238088 + ((port) / 2) * 0x48) + +/* offset from port base */ +#define CSI2_SIP_TOP_CSI_RX_PORT_CONTROL 0x0 +#define CSI2_SIP_TOP_CSI_RX_DLY_CNT_TERMEN_CLANE 0x4 +#define CSI2_SIP_TOP_CSI_RX_DLY_CNT_SETTLE_CLANE 0x8 +#define CSI2_SIP_TOP_CSI_RX_DLY_CNT_TERMEN_DLANE(lane) (0xc + (lane) * 8) +#define CSI2_SIP_TOP_CSI_RX_DLY_CNT_SETTLE_DLANE(lane) (0x10 + (lane) * 8) + +#endif /* IPU6_ISYS_CSI2_REG_H */ diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-platform-regs.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-platform-regs.h new file mode 100644 index 0000000..b883385 --- /dev/null +++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-platform-regs.h @@ -0,0 +1,179 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2018 - 2024 Intel Corporation */ + +#ifndef IPU6_PLATFORM_REGS_H +#define IPU6_PLATFORM_REGS_H + +#include + +/* + * IPU6 uses uniform address within IPU6, therefore all subsystem registers + * locates in one single space starts from 0 but in different sctions with + * different addresses, the subsystem offsets are defined to 0 as the + * register definition will have the address offset to 0. + */ +#define IPU6_UNIFIED_OFFSET 0 + +#define IPU6_ISYS_IOMMU0_OFFSET 0x2e0000 +#define IPU6_ISYS_IOMMU1_OFFSET 0x2e0500 +#define IPU6_ISYS_IOMMUI_OFFSET 0x2e0a00 + +#define IPU6_PSYS_IOMMU0_OFFSET 0x1b0000 +#define IPU6_PSYS_IOMMU1_OFFSET 0x1b0700 +#define IPU6_PSYS_IOMMU1R_OFFSET 0x1b0e00 +#define IPU6_PSYS_IOMMUI_OFFSET 0x1b1500 + +/* the offset from IOMMU base register */ +#define IPU6_MMU_L1_STREAM_ID_REG_OFFSET 0x0c +#define IPU6_MMU_L2_STREAM_ID_REG_OFFSET 0x4c +#define IPU6_PSYS_MMU1W_L2_STREAM_ID_REG_OFFSET 0x8c + +#define IPU6_MMU_INFO_OFFSET 0x8 + +#define IPU6_ISYS_SPC_OFFSET 0x210000 + +#define IPU6SE_PSYS_SPC_OFFSET 0x110000 +#define IPU6_PSYS_SPC_OFFSET 0x118000 + +#define IPU6_ISYS_DMEM_OFFSET 0x200000 +#define IPU6_PSYS_DMEM_OFFSET 0x100000 + +#define IPU6_REG_ISYS_UNISPART_IRQ_EDGE 0x27c000 +#define IPU6_REG_ISYS_UNISPART_IRQ_MASK 0x27c004 +#define IPU6_REG_ISYS_UNISPART_IRQ_STATUS 0x27c008 +#define IPU6_REG_ISYS_UNISPART_IRQ_CLEAR 0x27c00c +#define IPU6_REG_ISYS_UNISPART_IRQ_ENABLE 0x27c010 +#define IPU6_REG_ISYS_UNISPART_IRQ_LEVEL_NOT_PULSE 0x27c014 +#define IPU6_REG_ISYS_UNISPART_SW_IRQ_REG 0x27c414 +#define IPU6_REG_ISYS_UNISPART_SW_IRQ_MUX_REG 0x27c418 +#define IPU6_ISYS_UNISPART_IRQ_CSI0 BIT(2) +#define IPU6_ISYS_UNISPART_IRQ_CSI1 BIT(3) +#define IPU6_ISYS_UNISPART_IRQ_SW BIT(22) + +#define IPU6_REG_ISYS_ISL_TOP_IRQ_EDGE 0x2b0200 +#define IPU6_REG_ISYS_ISL_TOP_IRQ_MASK 0x2b0204 +#define IPU6_REG_ISYS_ISL_TOP_IRQ_STATUS 0x2b0208 +#define IPU6_REG_ISYS_ISL_TOP_IRQ_CLEAR 0x2b020c +#define IPU6_REG_ISYS_ISL_TOP_IRQ_ENABLE 0x2b0210 +#define IPU6_REG_ISYS_ISL_TOP_IRQ_LEVEL_NOT_PULSE 0x2b0214 + +#define IPU6_REG_ISYS_CMPR_TOP_IRQ_EDGE 0x2d2100 +#define IPU6_REG_ISYS_CMPR_TOP_IRQ_MASK 0x2d2104 +#define IPU6_REG_ISYS_CMPR_TOP_IRQ_STATUS 0x2d2108 +#define IPU6_REG_ISYS_CMPR_TOP_IRQ_CLEAR 0x2d210c +#define IPU6_REG_ISYS_CMPR_TOP_IRQ_ENABLE 0x2d2110 +#define IPU6_REG_ISYS_CMPR_TOP_IRQ_LEVEL_NOT_PULSE 0x2d2114 + +/* CDC Burst collector thresholds for isys - 3 FIFOs i = 0..2 */ +#define IPU6_REG_ISYS_CDC_THRESHOLD(i) (0x27c400 + ((i) * 4)) + +#define IPU6_CSI_IRQ_NUM_PER_PIPE 4 +#define IPU6SE_ISYS_CSI_PORT_NUM 4 +#define IPU6_ISYS_CSI_PORT_NUM 8 + +#define IPU6_ISYS_CSI_PORT_IRQ(irq_num) BIT(irq_num) + +/* PKG DIR OFFSET in IMR in secure mode */ +#define IPU6_PKG_DIR_IMR_OFFSET 0x40 + +#define IPU6_ISYS_REG_SPC_STATUS_CTRL 0x0 + +#define IPU6_ISYS_SPC_STATUS_START BIT(1) +#define IPU6_ISYS_SPC_STATUS_RUN BIT(3) +#define IPU6_ISYS_SPC_STATUS_READY BIT(5) +#define IPU6_ISYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE BIT(12) +#define IPU6_ISYS_SPC_STATUS_ICACHE_PREFETCH BIT(13) + +#define IPU6_PSYS_REG_SPC_STATUS_CTRL 0x0 +#define IPU6_PSYS_REG_SPC_START_PC 0x4 +#define IPU6_PSYS_REG_SPC_ICACHE_BASE 0x10 +#define IPU6_REG_PSYS_INFO_SEG_0_CONFIG_ICACHE_MASTER 0x14 + +#define IPU6_PSYS_SPC_STATUS_START BIT(1) +#define IPU6_PSYS_SPC_STATUS_RUN BIT(3) +#define IPU6_PSYS_SPC_STATUS_READY BIT(5) +#define IPU6_PSYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE BIT(12) +#define IPU6_PSYS_SPC_STATUS_ICACHE_PREFETCH BIT(13) + +#define IPU6_PSYS_REG_SPP0_STATUS_CTRL 0x20000 + +#define IPU6_INFO_ENABLE_SNOOP BIT(0) +#define IPU6_INFO_DEC_FORCE_FLUSH BIT(1) +#define IPU6_INFO_DEC_PASS_THROUGH BIT(2) +#define IPU6_INFO_ZLW BIT(3) +#define IPU6_INFO_REQUEST_DESTINATION_IOSF BIT(9) +#define IPU6_INFO_IMR_BASE BIT(10) +#define IPU6_INFO_IMR_DESTINED BIT(11) + +#define IPU6_INFO_REQUEST_DESTINATION_PRIMARY IPU6_INFO_REQUEST_DESTINATION_IOSF + +/* + * s2m_pixel_soc_pixel_remapping is dedicated for the enabling of the + * pixel s2m remp ability.Remap here means that s2m rearange the order + * of the pixels in each 4 pixels group. + * For examle, mirroring remping means that if input's 4 first pixels + * are 1 2 3 4 then in output we should see 4 3 2 1 in this 4 first pixels. + * 0xE4 is from s2m MAS document. It means no remapping. + */ +#define S2M_PIXEL_SOC_PIXEL_REMAPPING_FLAG_NO_REMAPPING 0xe4 +/* + * csi_be_soc_pixel_remapping is for the enabling of the pixel remapping. + * This remapping is exactly like the stream2mmio remapping. + */ +#define CSI_BE_SOC_PIXEL_REMAPPING_FLAG_NO_REMAPPING 0xe4 + +#define IPU6_REG_DMA_TOP_AB_GROUP1_BASE_ADDR 0x1ae000 +#define IPU6_REG_DMA_TOP_AB_GROUP2_BASE_ADDR 0x1af000 +#define IPU6_REG_DMA_TOP_AB_RING_MIN_OFFSET(n) (0x4 + (n) * 0xc) +#define IPU6_REG_DMA_TOP_AB_RING_MAX_OFFSET(n) (0x8 + (n) * 0xc) +#define IPU6_REG_DMA_TOP_AB_RING_ACCESS_OFFSET(n) (0xc + (n) * 0xc) + +enum ipu6_device_ab_group1_target_id { + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R0_SPC_DMEM, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R1_SPC_DMEM, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R2_SPC_DMEM, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R3_SPC_STATUS_REG, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R4_SPC_MASTER_BASE_ADDR, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R5_SPC_PC_STALL, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R6_SPC_EQ, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R7_SPC_RESERVED, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R8_SPC_RESERVED, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R9_SPP0, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R10_SPP1, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R11_CENTRAL_R1, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R12_IRQ, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R13_CENTRAL_R2, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R14_DMA, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R15_DMA, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R16_GP, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R17_ZLW_INSERTER, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R18_AB, +}; + +enum nci_ab_access_mode { + NCI_AB_ACCESS_MODE_RW, /* read & write */ + NCI_AB_ACCESS_MODE_RO, /* read only */ + NCI_AB_ACCESS_MODE_WO, /* write only */ + NCI_AB_ACCESS_MODE_NA, /* No access at all */ +}; + +/* IRQ-related registers in PSYS */ +#define IPU6_REG_PSYS_GPDEV_IRQ_EDGE 0x1aa200 +#define IPU6_REG_PSYS_GPDEV_IRQ_MASK 0x1aa204 +#define IPU6_REG_PSYS_GPDEV_IRQ_STATUS 0x1aa208 +#define IPU6_REG_PSYS_GPDEV_IRQ_CLEAR 0x1aa20c +#define IPU6_REG_PSYS_GPDEV_IRQ_ENABLE 0x1aa210 +#define IPU6_REG_PSYS_GPDEV_IRQ_LEVEL_NOT_PULSE 0x1aa214 +/* There are 8 FW interrupts, n = 0..7 */ +#define IPU6_PSYS_GPDEV_FWIRQ0 5 +#define IPU6_PSYS_GPDEV_FWIRQ1 6 +#define IPU6_PSYS_GPDEV_FWIRQ2 7 +#define IPU6_PSYS_GPDEV_FWIRQ3 8 +#define IPU6_PSYS_GPDEV_FWIRQ4 9 +#define IPU6_PSYS_GPDEV_FWIRQ5 10 +#define IPU6_PSYS_GPDEV_FWIRQ6 11 +#define IPU6_PSYS_GPDEV_FWIRQ7 12 +#define IPU6_PSYS_GPDEV_IRQ_FWIRQ(n) BIT(n) +#define IPU6_REG_PSYS_GPDEV_FWIRQ(n) (4 * (n) + 0x1aa100) + +#endif /* IPU6_PLATFORM_REGS_H */ diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c new file mode 100644 index 0000000..91718ea --- /dev/null +++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c @@ -0,0 +1,850 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2013--2024 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "ipu6.h" +#include "ipu6-bus.h" +#include "ipu6-buttress.h" +#include "ipu6-cpd.h" +#include "ipu6-isys.h" +#include "ipu6-mmu.h" +#include "ipu6-platform-buttress-regs.h" +#include "ipu6-platform-isys-csi2-reg.h" +#include "ipu6-platform-regs.h" + +#define IPU6_PCI_BAR 0 + +struct ipu6_cell_program { + u32 magic_number; + + u32 blob_offset; + u32 blob_size; + + u32 start[3]; + + u32 icache_source; + u32 icache_target; + u32 icache_size; + + u32 pmem_source; + u32 pmem_target; + u32 pmem_size; + + u32 data_source; + u32 data_target; + u32 data_size; + + u32 bss_target; + u32 bss_size; + + u32 cell_id; + u32 regs_addr; + + u32 cell_pmem_data_bus_address; + u32 cell_dmem_data_bus_address; + u32 cell_pmem_control_bus_address; + u32 cell_dmem_control_bus_address; + + u32 next; + u32 dummy[2]; +}; + +static struct ipu6_isys_internal_pdata isys_ipdata = { + .hw_variant = { + .offset = IPU6_UNIFIED_OFFSET, + .nr_mmus = 3, + .mmu_hw = { + { + .offset = IPU6_ISYS_IOMMU0_OFFSET, + .info_bits = IPU6_INFO_REQUEST_DESTINATION_IOSF, + .nr_l1streams = 16, + .l1_block_sz = { + 3, 8, 2, 2, 2, 2, 2, 2, 1, 1, + 1, 1, 1, 1, 1, 1 + }, + .nr_l2streams = 16, + .l2_block_sz = { + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2 + }, + .insert_read_before_invalidate = false, + .l1_stream_id_reg_offset = + IPU6_MMU_L1_STREAM_ID_REG_OFFSET, + .l2_stream_id_reg_offset = + IPU6_MMU_L2_STREAM_ID_REG_OFFSET, + }, + { + .offset = IPU6_ISYS_IOMMU1_OFFSET, + .info_bits = 0, + .nr_l1streams = 16, + .l1_block_sz = { + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 1, 1, 4 + }, + .nr_l2streams = 16, + .l2_block_sz = { + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2 + }, + .insert_read_before_invalidate = false, + .l1_stream_id_reg_offset = + IPU6_MMU_L1_STREAM_ID_REG_OFFSET, + .l2_stream_id_reg_offset = + IPU6_MMU_L2_STREAM_ID_REG_OFFSET, + }, + { + .offset = IPU6_ISYS_IOMMUI_OFFSET, + .info_bits = 0, + .nr_l1streams = 0, + .nr_l2streams = 0, + .insert_read_before_invalidate = false, + }, + }, + .cdc_fifos = 3, + .cdc_fifo_threshold = {6, 8, 2}, + .dmem_offset = IPU6_ISYS_DMEM_OFFSET, + .spc_offset = IPU6_ISYS_SPC_OFFSET, + }, + .isys_dma_overshoot = IPU6_ISYS_OVERALLOC_MIN, +}; + +static struct ipu6_psys_internal_pdata psys_ipdata = { + .hw_variant = { + .offset = IPU6_UNIFIED_OFFSET, + .nr_mmus = 4, + .mmu_hw = { + { + .offset = IPU6_PSYS_IOMMU0_OFFSET, + .info_bits = + IPU6_INFO_REQUEST_DESTINATION_IOSF, + .nr_l1streams = 16, + .l1_block_sz = { + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2 + }, + .nr_l2streams = 16, + .l2_block_sz = { + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2 + }, + .insert_read_before_invalidate = false, + .l1_stream_id_reg_offset = + IPU6_MMU_L1_STREAM_ID_REG_OFFSET, + .l2_stream_id_reg_offset = + IPU6_MMU_L2_STREAM_ID_REG_OFFSET, + }, + { + .offset = IPU6_PSYS_IOMMU1_OFFSET, + .info_bits = 0, + .nr_l1streams = 32, + .l1_block_sz = { + 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 10, + 5, 4, 14, 6, 4, 14, 6, 4, 8, + 4, 2, 1, 1, 1, 1, 14 + }, + .nr_l2streams = 32, + .l2_block_sz = { + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2 + }, + .insert_read_before_invalidate = false, + .l1_stream_id_reg_offset = + IPU6_MMU_L1_STREAM_ID_REG_OFFSET, + .l2_stream_id_reg_offset = + IPU6_PSYS_MMU1W_L2_STREAM_ID_REG_OFFSET, + }, + { + .offset = IPU6_PSYS_IOMMU1R_OFFSET, + .info_bits = 0, + .nr_l1streams = 16, + .l1_block_sz = { + 1, 4, 4, 4, 4, 16, 8, 4, 32, + 16, 16, 2, 2, 2, 1, 12 + }, + .nr_l2streams = 16, + .l2_block_sz = { + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2 + }, + .insert_read_before_invalidate = false, + .l1_stream_id_reg_offset = + IPU6_MMU_L1_STREAM_ID_REG_OFFSET, + .l2_stream_id_reg_offset = + IPU6_MMU_L2_STREAM_ID_REG_OFFSET, + }, + { + .offset = IPU6_PSYS_IOMMUI_OFFSET, + .info_bits = 0, + .nr_l1streams = 0, + .nr_l2streams = 0, + .insert_read_before_invalidate = false, + }, + }, + .dmem_offset = IPU6_PSYS_DMEM_OFFSET, + }, +}; + +static const struct ipu6_buttress_ctrl isys_buttress_ctrl = { + .ratio = IPU6_IS_FREQ_CTL_DEFAULT_RATIO, + .qos_floor = IPU6_IS_FREQ_CTL_DEFAULT_QOS_FLOOR_RATIO, + .freq_ctl = IPU6_BUTTRESS_REG_IS_FREQ_CTL, + .pwr_sts_shift = IPU6_BUTTRESS_PWR_STATE_IS_PWR_SHIFT, + .pwr_sts_mask = IPU6_BUTTRESS_PWR_STATE_IS_PWR_MASK, + .pwr_sts_on = IPU6_BUTTRESS_PWR_STATE_UP_DONE, + .pwr_sts_off = IPU6_BUTTRESS_PWR_STATE_DN_DONE, +}; + +static const struct ipu6_buttress_ctrl psys_buttress_ctrl = { + .ratio = IPU6_PS_FREQ_CTL_DEFAULT_RATIO, + .qos_floor = IPU6_PS_FREQ_CTL_DEFAULT_QOS_FLOOR_RATIO, + .freq_ctl = IPU6_BUTTRESS_REG_PS_FREQ_CTL, + .pwr_sts_shift = IPU6_BUTTRESS_PWR_STATE_PS_PWR_SHIFT, + .pwr_sts_mask = IPU6_BUTTRESS_PWR_STATE_PS_PWR_MASK, + .pwr_sts_on = IPU6_BUTTRESS_PWR_STATE_UP_DONE, + .pwr_sts_off = IPU6_BUTTRESS_PWR_STATE_DN_DONE, +}; + +static void +ipu6_pkg_dir_configure_spc(struct ipu6_device *isp, + const struct ipu6_hw_variants *hw_variant, + int pkg_dir_idx, void __iomem *base, + u64 *pkg_dir, dma_addr_t pkg_dir_vied_address) +{ + struct ipu6_cell_program *prog; + void __iomem *spc_base; + u32 server_fw_addr; + dma_addr_t dma_addr; + u32 pg_offset; + + server_fw_addr = lower_32_bits(*(pkg_dir + (pkg_dir_idx + 1) * 2)); + if (pkg_dir_idx == IPU6_CPD_PKG_DIR_ISYS_SERVER_IDX) + dma_addr = sg_dma_address(isp->isys->fw_sgt.sgl); + else + dma_addr = sg_dma_address(isp->psys->fw_sgt.sgl); + + pg_offset = server_fw_addr - dma_addr; + prog = (struct ipu6_cell_program *)((u64)isp->cpd_fw->data + pg_offset); + spc_base = base + prog->regs_addr; + if (spc_base != (base + hw_variant->spc_offset)) + dev_warn(&isp->pdev->dev, + "SPC reg addr %p not matching value from CPD %p\n", + base + hw_variant->spc_offset, spc_base); + writel(server_fw_addr + prog->blob_offset + + prog->icache_source, spc_base + IPU6_PSYS_REG_SPC_ICACHE_BASE); + writel(IPU6_INFO_REQUEST_DESTINATION_IOSF, + spc_base + IPU6_REG_PSYS_INFO_SEG_0_CONFIG_ICACHE_MASTER); + writel(prog->start[1], spc_base + IPU6_PSYS_REG_SPC_START_PC); + writel(pkg_dir_vied_address, base + hw_variant->dmem_offset); +} + +void ipu6_configure_spc(struct ipu6_device *isp, + const struct ipu6_hw_variants *hw_variant, + int pkg_dir_idx, void __iomem *base, u64 *pkg_dir, + dma_addr_t pkg_dir_dma_addr) +{ + void __iomem *dmem_base = base + hw_variant->dmem_offset; + void __iomem *spc_regs_base = base + hw_variant->spc_offset; + u32 val; + + val = readl(spc_regs_base + IPU6_PSYS_REG_SPC_STATUS_CTRL); + val |= IPU6_PSYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE; + writel(val, spc_regs_base + IPU6_PSYS_REG_SPC_STATUS_CTRL); + + if (isp->secure_mode) + writel(IPU6_PKG_DIR_IMR_OFFSET, dmem_base); + else + ipu6_pkg_dir_configure_spc(isp, hw_variant, pkg_dir_idx, base, + pkg_dir, pkg_dir_dma_addr); +} +EXPORT_SYMBOL_NS_GPL(ipu6_configure_spc, INTEL_IPU6); + +#define IPU6_ISYS_CSI2_NPORTS 4 +#define IPU6SE_ISYS_CSI2_NPORTS 4 +#define IPU6_TGL_ISYS_CSI2_NPORTS 8 +#define IPU6EP_MTL_ISYS_CSI2_NPORTS 6 + +static void ipu6_internal_pdata_init(struct ipu6_device *isp) +{ + u8 hw_ver = isp->hw_ver; + + isys_ipdata.num_parallel_streams = IPU6_ISYS_NUM_STREAMS; + isys_ipdata.sram_gran_shift = IPU6_SRAM_GRANULARITY_SHIFT; + isys_ipdata.sram_gran_size = IPU6_SRAM_GRANULARITY_SIZE; + isys_ipdata.max_sram_size = IPU6_MAX_SRAM_SIZE; + isys_ipdata.sensor_type_start = IPU6_FW_ISYS_SENSOR_TYPE_START; + isys_ipdata.sensor_type_end = IPU6_FW_ISYS_SENSOR_TYPE_END; + isys_ipdata.max_streams = IPU6_ISYS_NUM_STREAMS; + isys_ipdata.max_send_queues = IPU6_N_MAX_SEND_QUEUES; + isys_ipdata.max_sram_blocks = IPU6_NOF_SRAM_BLOCKS_MAX; + isys_ipdata.max_devq_size = IPU6_DEV_SEND_QUEUE_SIZE; + isys_ipdata.csi2.nports = IPU6_ISYS_CSI2_NPORTS; + isys_ipdata.csi2.irq_mask = IPU6_CSI_RX_ERROR_IRQ_MASK; + isys_ipdata.csi2.ctrl0_irq_edge = IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_EDGE; + isys_ipdata.csi2.ctrl0_irq_clear = + IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_CLEAR; + isys_ipdata.csi2.ctrl0_irq_mask = IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_MASK; + isys_ipdata.csi2.ctrl0_irq_enable = + IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_ENABLE; + isys_ipdata.csi2.ctrl0_irq_status = + IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_STATUS; + isys_ipdata.csi2.ctrl0_irq_lnp = + IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_LEVEL_NOT_PULSE; + isys_ipdata.enhanced_iwake = is_ipu6ep_mtl(hw_ver) || is_ipu6ep(hw_ver); + psys_ipdata.hw_variant.spc_offset = IPU6_PSYS_SPC_OFFSET; + isys_ipdata.csi2.fw_access_port_ofs = CSI_REG_HUB_FW_ACCESS_PORT_OFS; + + if (is_ipu6ep(hw_ver)) { + isys_ipdata.ltr = IPU6EP_LTR_VALUE; + isys_ipdata.memopen_threshold = IPU6EP_MIN_MEMOPEN_TH; + } + + if (is_ipu6_tgl(hw_ver)) + isys_ipdata.csi2.nports = IPU6_TGL_ISYS_CSI2_NPORTS; + + if (is_ipu6ep_mtl(hw_ver)) { + isys_ipdata.csi2.nports = IPU6EP_MTL_ISYS_CSI2_NPORTS; + + isys_ipdata.csi2.ctrl0_irq_edge = + IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_EDGE; + isys_ipdata.csi2.ctrl0_irq_clear = + IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_CLEAR; + isys_ipdata.csi2.ctrl0_irq_mask = + IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_MASK; + isys_ipdata.csi2.ctrl0_irq_enable = + IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_ENABLE; + isys_ipdata.csi2.ctrl0_irq_lnp = + IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_LEVEL_NOT_PULSE; + isys_ipdata.csi2.ctrl0_irq_status = + IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_STATUS; + isys_ipdata.csi2.fw_access_port_ofs = + CSI_REG_HUB_FW_ACCESS_PORT_V6OFS; + isys_ipdata.ltr = IPU6EP_MTL_LTR_VALUE; + isys_ipdata.memopen_threshold = IPU6EP_MTL_MIN_MEMOPEN_TH; + } + + if (is_ipu6se(hw_ver)) { + isys_ipdata.csi2.nports = IPU6SE_ISYS_CSI2_NPORTS; + isys_ipdata.csi2.irq_mask = IPU6SE_CSI_RX_ERROR_IRQ_MASK; + isys_ipdata.num_parallel_streams = IPU6SE_ISYS_NUM_STREAMS; + isys_ipdata.sram_gran_shift = IPU6SE_SRAM_GRANULARITY_SHIFT; + isys_ipdata.sram_gran_size = IPU6SE_SRAM_GRANULARITY_SIZE; + isys_ipdata.max_sram_size = IPU6SE_MAX_SRAM_SIZE; + isys_ipdata.sensor_type_start = + IPU6SE_FW_ISYS_SENSOR_TYPE_START; + isys_ipdata.sensor_type_end = IPU6SE_FW_ISYS_SENSOR_TYPE_END; + isys_ipdata.max_streams = IPU6SE_ISYS_NUM_STREAMS; + isys_ipdata.max_send_queues = IPU6SE_N_MAX_SEND_QUEUES; + isys_ipdata.max_sram_blocks = IPU6SE_NOF_SRAM_BLOCKS_MAX; + isys_ipdata.max_devq_size = IPU6SE_DEV_SEND_QUEUE_SIZE; + psys_ipdata.hw_variant.spc_offset = IPU6SE_PSYS_SPC_OFFSET; + } +} + +static struct ipu6_bus_device * +ipu6_isys_init(struct pci_dev *pdev, struct device *parent, + struct ipu6_buttress_ctrl *ctrl, void __iomem *base, + const struct ipu6_isys_internal_pdata *ipdata) +{ + struct device *dev = &pdev->dev; + struct ipu6_bus_device *isys_adev; + struct ipu6_isys_pdata *pdata; + int ret; + + ret = ipu_bridge_init(dev, ipu_bridge_parse_ssdb); + if (ret) { + dev_err_probe(dev, ret, "IPU6 bridge init failed\n"); + return ERR_PTR(ret); + } + + pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return ERR_PTR(-ENOMEM); + + pdata->base = base; + pdata->ipdata = ipdata; + + isys_adev = ipu6_bus_initialize_device(pdev, parent, pdata, ctrl, + IPU6_ISYS_NAME); + if (IS_ERR(isys_adev)) { + kfree(pdata); + return dev_err_cast_probe(dev, isys_adev, + "ipu6_bus_initialize_device isys failed\n"); + } + + isys_adev->mmu = ipu6_mmu_init(dev, base, ISYS_MMID, + &ipdata->hw_variant); + if (IS_ERR(isys_adev->mmu)) { + put_device(&isys_adev->auxdev.dev); + kfree(pdata); + return dev_err_cast_probe(dev, isys_adev->mmu, + "ipu6_mmu_init(isys_adev->mmu) failed\n"); + } + + isys_adev->mmu->dev = &isys_adev->auxdev.dev; + + ret = ipu6_bus_add_device(isys_adev); + if (ret) { + kfree(pdata); + return ERR_PTR(ret); + } + + return isys_adev; +} + +static struct ipu6_bus_device * +ipu6_psys_init(struct pci_dev *pdev, struct device *parent, + struct ipu6_buttress_ctrl *ctrl, void __iomem *base, + const struct ipu6_psys_internal_pdata *ipdata) +{ + struct ipu6_bus_device *psys_adev; + struct ipu6_psys_pdata *pdata; + int ret; + + pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return ERR_PTR(-ENOMEM); + + pdata->base = base; + pdata->ipdata = ipdata; + + psys_adev = ipu6_bus_initialize_device(pdev, parent, pdata, ctrl, + IPU6_PSYS_NAME); + if (IS_ERR(psys_adev)) { + kfree(pdata); + return dev_err_cast_probe(&pdev->dev, psys_adev, + "ipu6_bus_initialize_device psys failed\n"); + } + + psys_adev->mmu = ipu6_mmu_init(&pdev->dev, base, PSYS_MMID, + &ipdata->hw_variant); + if (IS_ERR(psys_adev->mmu)) { + put_device(&psys_adev->auxdev.dev); + kfree(pdata); + return dev_err_cast_probe(&pdev->dev, psys_adev->mmu, + "ipu6_mmu_init(psys_adev->mmu) failed\n"); + } + + psys_adev->mmu->dev = &psys_adev->auxdev.dev; + + ret = ipu6_bus_add_device(psys_adev); + if (ret) { + kfree(pdata); + return ERR_PTR(ret); + } + + return psys_adev; +} + +static int ipu6_pci_config_setup(struct pci_dev *dev, u8 hw_ver) +{ + int ret; + + /* disable IPU6 PCI ATS on mtl ES2 */ + if (is_ipu6ep_mtl(hw_ver) && boot_cpu_data.x86_stepping == 0x2 && + pci_ats_supported(dev)) + pci_disable_ats(dev); + + /* No PCI msi capability for IPU6EP */ + if (is_ipu6ep(hw_ver) || is_ipu6ep_mtl(hw_ver)) { + /* likely do nothing as msi not enabled by default */ + pci_disable_msi(dev); + return 0; + } + + ret = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_MSI); + if (ret < 0) + return dev_err_probe(&dev->dev, ret, "Request msi failed"); + + return 0; +} + +static void ipu6_configure_vc_mechanism(struct ipu6_device *isp) +{ + u32 val = readl(isp->base + BUTTRESS_REG_BTRS_CTRL); + + if (IPU6_BTRS_ARB_STALL_MODE_VC0 == IPU6_BTRS_ARB_MODE_TYPE_STALL) + val |= BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC0; + else + val &= ~BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC0; + + if (IPU6_BTRS_ARB_STALL_MODE_VC1 == IPU6_BTRS_ARB_MODE_TYPE_STALL) + val |= BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC1; + else + val &= ~BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC1; + + writel(val, isp->base + BUTTRESS_REG_BTRS_CTRL); +} + +static int ipu6_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct ipu6_buttress_ctrl *isys_ctrl = NULL, *psys_ctrl = NULL; + struct device *dev = &pdev->dev; + void __iomem *isys_base = NULL; + void __iomem *psys_base = NULL; + struct ipu6_device *isp; + phys_addr_t phys; + u32 val, version, sku_id; + int ret; + + isp = devm_kzalloc(dev, sizeof(*isp), GFP_KERNEL); + if (!isp) + return -ENOMEM; + + isp->pdev = pdev; + INIT_LIST_HEAD(&isp->devices); + + ret = pcim_enable_device(pdev); + if (ret) + return dev_err_probe(dev, ret, "Enable PCI device failed\n"); + + phys = pci_resource_start(pdev, IPU6_PCI_BAR); + dev_dbg(dev, "IPU6 PCI bar[%u] = %pa\n", IPU6_PCI_BAR, &phys); + + ret = pcim_iomap_regions(pdev, 1 << IPU6_PCI_BAR, pci_name(pdev)); + if (ret) + return dev_err_probe(dev, ret, "Failed to I/O mem remapping\n"); + + isp->base = pcim_iomap_table(pdev)[IPU6_PCI_BAR]; + pci_set_drvdata(pdev, isp); + pci_set_master(pdev); + + isp->cpd_metadata_cmpnt_size = sizeof(struct ipu6_cpd_metadata_cmpnt); + switch (id->device) { + case PCI_DEVICE_ID_INTEL_IPU6: + isp->hw_ver = IPU6_VER_6; + isp->cpd_fw_name = IPU6_FIRMWARE_NAME; + break; + case PCI_DEVICE_ID_INTEL_IPU6SE: + isp->hw_ver = IPU6_VER_6SE; + isp->cpd_fw_name = IPU6SE_FIRMWARE_NAME; + isp->cpd_metadata_cmpnt_size = + sizeof(struct ipu6se_cpd_metadata_cmpnt); + break; + case PCI_DEVICE_ID_INTEL_IPU6EP_ADLP: + case PCI_DEVICE_ID_INTEL_IPU6EP_RPLP: + isp->hw_ver = IPU6_VER_6EP; + isp->cpd_fw_name = IPU6EP_FIRMWARE_NAME; + break; + case PCI_DEVICE_ID_INTEL_IPU6EP_ADLN: + isp->hw_ver = IPU6_VER_6EP; + isp->cpd_fw_name = IPU6EPADLN_FIRMWARE_NAME; + break; + case PCI_DEVICE_ID_INTEL_IPU6EP_MTL: + isp->hw_ver = IPU6_VER_6EP_MTL; + isp->cpd_fw_name = IPU6EPMTL_FIRMWARE_NAME; + break; + default: + return dev_err_probe(dev, -ENODEV, + "Unsupported IPU6 device %x\n", + id->device); + } + + ipu6_internal_pdata_init(isp); + + isys_base = isp->base + isys_ipdata.hw_variant.offset; + psys_base = isp->base + psys_ipdata.hw_variant.offset; + + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(39)); + if (ret) + return dev_err_probe(dev, ret, "Failed to set DMA mask\n"); + + dma_set_max_seg_size(dev, UINT_MAX); + + ret = ipu6_pci_config_setup(pdev, isp->hw_ver); + if (ret) + return ret; + + ret = ipu6_buttress_init(isp); + if (ret) + return ret; + + ret = request_firmware(&isp->cpd_fw, isp->cpd_fw_name, dev); + if (ret) { + dev_err_probe(&isp->pdev->dev, ret, + "Requesting signed firmware %s failed\n", + isp->cpd_fw_name); + goto buttress_exit; + } + + ret = ipu6_cpd_validate_cpd_file(isp, isp->cpd_fw->data, + isp->cpd_fw->size); + if (ret) { + dev_err_probe(&isp->pdev->dev, ret, + "Failed to validate cpd\n"); + goto out_ipu6_bus_del_devices; + } + + isys_ctrl = devm_kmemdup(dev, &isys_buttress_ctrl, + sizeof(isys_buttress_ctrl), GFP_KERNEL); + if (!isys_ctrl) { + ret = -ENOMEM; + goto out_ipu6_bus_del_devices; + } + + isp->isys = ipu6_isys_init(pdev, dev, isys_ctrl, isys_base, + &isys_ipdata); + if (IS_ERR(isp->isys)) { + ret = PTR_ERR(isp->isys); + goto out_ipu6_bus_del_devices; + } + + psys_ctrl = devm_kmemdup(dev, &psys_buttress_ctrl, + sizeof(psys_buttress_ctrl), GFP_KERNEL); + if (!psys_ctrl) { + ret = -ENOMEM; + goto out_ipu6_bus_del_devices; + } + + isp->psys = ipu6_psys_init(pdev, &isp->isys->auxdev.dev, psys_ctrl, + psys_base, &psys_ipdata); + if (IS_ERR(isp->psys)) { + ret = PTR_ERR(isp->psys); + goto out_ipu6_bus_del_devices; + } + + ret = pm_runtime_resume_and_get(&isp->psys->auxdev.dev); + if (ret < 0) + goto out_ipu6_bus_del_devices; + + ret = ipu6_mmu_hw_init(isp->psys->mmu); + if (ret) { + dev_err_probe(&isp->pdev->dev, ret, + "Failed to set MMU hardware\n"); + goto out_ipu6_bus_del_devices; + } + + ret = ipu6_buttress_map_fw_image(isp->psys, isp->cpd_fw, + &isp->psys->fw_sgt); + if (ret) { + dev_err_probe(&isp->pdev->dev, ret, "failed to map fw image\n"); + goto out_ipu6_bus_del_devices; + } + + ret = ipu6_cpd_create_pkg_dir(isp->psys, isp->cpd_fw->data); + if (ret) { + dev_err_probe(&isp->pdev->dev, ret, + "failed to create pkg dir\n"); + goto out_ipu6_bus_del_devices; + } + + ret = devm_request_threaded_irq(dev, pdev->irq, ipu6_buttress_isr, + ipu6_buttress_isr_threaded, + IRQF_SHARED, IPU6_NAME, isp); + if (ret) { + dev_err_probe(dev, ret, "Requesting irq failed\n"); + goto out_ipu6_bus_del_devices; + } + + ret = ipu6_buttress_authenticate(isp); + if (ret) { + dev_err_probe(&isp->pdev->dev, ret, + "FW authentication failed\n"); + goto out_free_irq; + } + + ipu6_mmu_hw_cleanup(isp->psys->mmu); + pm_runtime_put(&isp->psys->auxdev.dev); + + /* Configure the arbitration mechanisms for VC requests */ + ipu6_configure_vc_mechanism(isp); + + val = readl(isp->base + BUTTRESS_REG_SKU); + sku_id = FIELD_GET(GENMASK(6, 4), val); + version = FIELD_GET(GENMASK(3, 0), val); + dev_info(dev, "IPU%u-v%u[%x] hardware version %d\n", version, sku_id, + pdev->device, isp->hw_ver); + + pm_runtime_put_noidle(dev); + pm_runtime_allow(dev); + + isp->bus_ready_to_probe = true; + + return 0; + +out_free_irq: + devm_free_irq(dev, pdev->irq, isp); +out_ipu6_bus_del_devices: + if (isp->psys) { + ipu6_cpd_free_pkg_dir(isp->psys); + ipu6_buttress_unmap_fw_image(isp->psys, &isp->psys->fw_sgt); + } + if (!IS_ERR_OR_NULL(isp->psys) && !IS_ERR_OR_NULL(isp->psys->mmu)) + ipu6_mmu_cleanup(isp->psys->mmu); + if (!IS_ERR_OR_NULL(isp->isys) && !IS_ERR_OR_NULL(isp->isys->mmu)) + ipu6_mmu_cleanup(isp->isys->mmu); + ipu6_bus_del_devices(pdev); + release_firmware(isp->cpd_fw); +buttress_exit: + ipu6_buttress_exit(isp); + + return ret; +} + +static void ipu6_pci_remove(struct pci_dev *pdev) +{ + struct ipu6_device *isp = pci_get_drvdata(pdev); + struct ipu6_mmu *isys_mmu = isp->isys->mmu; + struct ipu6_mmu *psys_mmu = isp->psys->mmu; + + devm_free_irq(&pdev->dev, pdev->irq, isp); + ipu6_cpd_free_pkg_dir(isp->psys); + + ipu6_buttress_unmap_fw_image(isp->psys, &isp->psys->fw_sgt); + ipu6_buttress_exit(isp); + + ipu6_bus_del_devices(pdev); + + pm_runtime_forbid(&pdev->dev); + pm_runtime_get_noresume(&pdev->dev); + + release_firmware(isp->cpd_fw); + + ipu6_mmu_cleanup(psys_mmu); + ipu6_mmu_cleanup(isys_mmu); +} + +static void ipu6_pci_reset_prepare(struct pci_dev *pdev) +{ + struct ipu6_device *isp = pci_get_drvdata(pdev); + + pm_runtime_forbid(&isp->pdev->dev); +} + +static void ipu6_pci_reset_done(struct pci_dev *pdev) +{ + struct ipu6_device *isp = pci_get_drvdata(pdev); + + ipu6_buttress_restore(isp); + if (isp->secure_mode) + ipu6_buttress_reset_authentication(isp); + + isp->need_ipc_reset = true; + pm_runtime_allow(&isp->pdev->dev); +} + +/* + * PCI base driver code requires driver to provide these to enable + * PCI device level PM state transitions (D0<->D3) + */ +static int ipu6_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + + synchronize_irq(pdev->irq); + return 0; +} + +static int ipu6_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct ipu6_device *isp = pci_get_drvdata(pdev); + struct ipu6_buttress *b = &isp->buttress; + int ret; + + /* Configure the arbitration mechanisms for VC requests */ + ipu6_configure_vc_mechanism(isp); + + isp->secure_mode = ipu6_buttress_get_secure_mode(isp); + dev_info(dev, "IPU6 in %s mode\n", + isp->secure_mode ? "secure" : "non-secure"); + + ipu6_buttress_restore(isp); + + ret = ipu6_buttress_ipc_reset(isp, &b->cse); + if (ret) + dev_err(&isp->pdev->dev, "IPC reset protocol failed!\n"); + + ret = pm_runtime_resume_and_get(&isp->psys->auxdev.dev); + if (ret < 0) { + dev_err(&isp->psys->auxdev.dev, "Failed to get runtime PM\n"); + return 0; + } + + ret = ipu6_buttress_authenticate(isp); + if (ret) + dev_err(&isp->pdev->dev, "FW authentication failed(%d)\n", ret); + + pm_runtime_put(&isp->psys->auxdev.dev); + + return 0; +} + +static int ipu6_runtime_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct ipu6_device *isp = pci_get_drvdata(pdev); + int ret; + + ipu6_configure_vc_mechanism(isp); + ipu6_buttress_restore(isp); + + if (isp->need_ipc_reset) { + struct ipu6_buttress *b = &isp->buttress; + + isp->need_ipc_reset = false; + ret = ipu6_buttress_ipc_reset(isp, &b->cse); + if (ret) + dev_err(&isp->pdev->dev, "IPC reset protocol failed\n"); + } + + return 0; +} + +static const struct dev_pm_ops ipu6_pm_ops = { + SYSTEM_SLEEP_PM_OPS(&ipu6_suspend, &ipu6_resume) + RUNTIME_PM_OPS(&ipu6_suspend, &ipu6_runtime_resume, NULL) +}; + +MODULE_DEVICE_TABLE(pci, ipu6_pci_tbl); + +static const struct pci_error_handlers pci_err_handlers = { + .reset_prepare = ipu6_pci_reset_prepare, + .reset_done = ipu6_pci_reset_done, +}; + +static struct pci_driver ipu6_pci_driver = { + .name = IPU6_NAME, + .id_table = ipu6_pci_tbl, + .probe = ipu6_pci_probe, + .remove = ipu6_pci_remove, + .driver = { + .pm = pm_ptr(&ipu6_pm_ops), + }, + .err_handler = &pci_err_handlers, +}; + +module_pci_driver(ipu6_pci_driver); + +MODULE_IMPORT_NS(INTEL_IPU_BRIDGE); +MODULE_AUTHOR("Sakari Ailus "); +MODULE_AUTHOR("Tianshu Qiu "); +MODULE_AUTHOR("Bingbu Cao "); +MODULE_AUTHOR("Qingwu Zhang "); +MODULE_AUTHOR("Yunliang Ding "); +MODULE_AUTHOR("Hongju Wang "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Intel IPU6 PCI driver"); diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.h new file mode 100644 index 0000000..92e3c34 --- /dev/null +++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.h @@ -0,0 +1,342 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2013 - 2024 Intel Corporation */ + +#ifndef IPU6_H +#define IPU6_H + +#include +#include +#include + +#include "ipu6-buttress.h" + +struct firmware; +struct pci_dev; +struct ipu6_bus_device; + +#define IPU6_NAME "intel-ipu6" +#define IPU6_MEDIA_DEV_MODEL_NAME "ipu6" + +#define IPU6SE_FIRMWARE_NAME "intel/ipu/ipu6se_fw.bin" +#define IPU6EP_FIRMWARE_NAME "intel/ipu/ipu6ep_fw.bin" +#define IPU6_FIRMWARE_NAME "intel/ipu/ipu6_fw.bin" +#define IPU6EPMTL_FIRMWARE_NAME "intel/ipu/ipu6epmtl_fw.bin" +#define IPU6EPADLN_FIRMWARE_NAME "intel/ipu/ipu6epadln_fw.bin" + +enum ipu6_version { + IPU6_VER_INVALID = 0, + IPU6_VER_6 = 1, + IPU6_VER_6SE = 3, + IPU6_VER_6EP = 5, + IPU6_VER_6EP_MTL = 6, +}; + +/* + * IPU6 - TGL + * IPU6SE - JSL + * IPU6EP - ADL/RPL + * IPU6EP_MTL - MTL + */ +static inline bool is_ipu6se(u8 hw_ver) +{ + return hw_ver == IPU6_VER_6SE; +} + +static inline bool is_ipu6ep(u8 hw_ver) +{ + return hw_ver == IPU6_VER_6EP; +} + +static inline bool is_ipu6ep_mtl(u8 hw_ver) +{ + return hw_ver == IPU6_VER_6EP_MTL; +} + +static inline bool is_ipu6_tgl(u8 hw_ver) +{ + return hw_ver == IPU6_VER_6; +} + +/* + * ISYS DMA can overshoot. For higher resolutions over allocation is one line + * but it must be at minimum 1024 bytes. Value could be different in + * different versions / generations thus provide it via platform data. + */ +#define IPU6_ISYS_OVERALLOC_MIN 1024 + +/* Physical pages in GDA is 128, page size is 2K for IPU6, 1K for others */ +#define IPU6_DEVICE_GDA_NR_PAGES 128 + +/* Virtualization factor to calculate the available virtual pages */ +#define IPU6_DEVICE_GDA_VIRT_FACTOR 32 + +struct ipu6_device { + struct pci_dev *pdev; + struct list_head devices; + struct ipu6_bus_device *isys; + struct ipu6_bus_device *psys; + struct ipu6_buttress buttress; + + const struct firmware *cpd_fw; + const char *cpd_fw_name; + u32 cpd_metadata_cmpnt_size; + + void __iomem *base; + bool need_ipc_reset; + bool secure_mode; + u8 hw_ver; + bool bus_ready_to_probe; +}; + +#define IPU6_ISYS_NAME "isys" +#define IPU6_PSYS_NAME "psys" + +#define IPU6_MMU_MAX_DEVICES 4 +#define IPU6_MMU_ADDR_BITS 32 +/* The firmware is accessible within the first 2 GiB only in non-secure mode. */ +#define IPU6_MMU_ADDR_BITS_NON_SECURE 31 + +#define IPU6_MMU_MAX_TLB_L1_STREAMS 32 +#define IPU6_MMU_MAX_TLB_L2_STREAMS 32 +#define IPU6_MAX_LI_BLOCK_ADDR 128 +#define IPU6_MAX_L2_BLOCK_ADDR 64 + +#define IPU6SE_ISYS_NUM_STREAMS IPU6SE_NONSECURE_STREAM_ID_MAX +#define IPU6_ISYS_NUM_STREAMS IPU6_NONSECURE_STREAM_ID_MAX + +/* + * To maximize the IOSF utlization, IPU6 need to send requests in bursts. + * At the DMA interface with the buttress, there are CDC FIFOs with burst + * collection capability. CDC FIFO burst collectors have a configurable + * threshold and is configured based on the outcome of performance measurements. + * + * isys has 3 ports with IOSF interface for VC0, VC1 and VC2 + * psys has 4 ports with IOSF interface for VC0, VC1w, VC1r and VC2 + * + * Threshold values are pre-defined and are arrived at after performance + * evaluations on a type of IPU6 + */ +#define IPU6_MAX_VC_IOSF_PORTS 4 + +/* + * IPU6 must configure correct arbitration mechanism related to the IOSF VC + * requests. There are two options per VC0 and VC1 - > 0 means rearbitrate on + * stall and 1 means stall until the request is completed. + */ +#define IPU6_BTRS_ARB_MODE_TYPE_REARB 0 +#define IPU6_BTRS_ARB_MODE_TYPE_STALL 1 + +/* Currently chosen arbitration mechanism for VC0 */ +#define IPU6_BTRS_ARB_STALL_MODE_VC0 \ + IPU6_BTRS_ARB_MODE_TYPE_REARB + +/* Currently chosen arbitration mechanism for VC1 */ +#define IPU6_BTRS_ARB_STALL_MODE_VC1 \ + IPU6_BTRS_ARB_MODE_TYPE_REARB + +/* + * MMU Invalidation HW bug workaround by ZLW mechanism + * + * Old IPU6 MMUV2 has a bug in the invalidation mechanism which might result in + * wrong translation or replication of the translation. This will cause data + * corruption. So we cannot directly use the MMU V2 invalidation registers + * to invalidate the MMU. Instead, whenever an invalidate is called, we need to + * clear the TLB by evicting all the valid translations by filling it with trash + * buffer (which is guaranteed not to be used by any other processes). ZLW is + * used to fill the L1 and L2 caches with the trash buffer translations. ZLW + * or Zero length write, is pre-fetch mechanism to pre-fetch the pages in + * advance to the L1 and L2 caches without triggering any memory operations. + * + * In MMU V2, L1 -> 16 streams and 64 blocks, maximum 16 blocks per stream + * One L1 block has 16 entries, hence points to 16 * 4K pages + * L2 -> 16 streams and 32 blocks. 2 blocks per streams + * One L2 block maps to 1024 L1 entries, hence points to 4MB address range + * 2 blocks per L2 stream means, 1 stream points to 8MB range + * + * As we need to clear the caches and 8MB being the biggest cache size, we need + * to have trash buffer which points to 8MB address range. As these trash + * buffers are not used for any memory transactions, we need only the least + * amount of physical memory. So we reserve 8MB IOVA address range but only + * one page is reserved from physical memory. Each of this 8MB IOVA address + * range is then mapped to the same physical memory page. + */ +/* One L2 entry maps 1024 L1 entries and one L1 entry per page */ +#define IPU6_MMUV2_L2_RANGE (1024 * PAGE_SIZE) +/* Max L2 blocks per stream */ +#define IPU6_MMUV2_MAX_L2_BLOCKS 2 +/* Max L1 blocks per stream */ +#define IPU6_MMUV2_MAX_L1_BLOCKS 16 +#define IPU6_MMUV2_TRASH_RANGE (IPU6_MMUV2_L2_RANGE * IPU6_MMUV2_MAX_L2_BLOCKS) +/* Entries per L1 block */ +#define MMUV2_ENTRIES_PER_L1_BLOCK 16 +#define MMUV2_TRASH_L1_BLOCK_OFFSET (MMUV2_ENTRIES_PER_L1_BLOCK * PAGE_SIZE) +#define MMUV2_TRASH_L2_BLOCK_OFFSET IPU6_MMUV2_L2_RANGE + +/* + * In some of the IPU6 MMUs, there is provision to configure L1 and L2 page + * table caches. Both these L1 and L2 caches are divided into multiple sections + * called streams. There is maximum 16 streams for both caches. Each of these + * sections are subdivided into multiple blocks. When nr_l1streams = 0 and + * nr_l2streams = 0, means the MMU is of type MMU_V1 and do not support + * L1/L2 page table caches. + * + * L1 stream per block sizes are configurable and varies per usecase. + * L2 has constant block sizes - 2 blocks per stream. + * + * MMU1 support pre-fetching of the pages to have less cache lookup misses. To + * enable the pre-fetching, MMU1 AT (Address Translator) device registers + * need to be configured. + * + * There are four types of memory accesses which requires ZLW configuration. + * ZLW(Zero Length Write) is a mechanism to enable VT-d pre-fetching on IOMMU. + * + * 1. Sequential Access or 1D mode + * Set ZLW_EN -> 1 + * set ZLW_PAGE_CROSS_1D -> 1 + * Set ZLW_N to "N" pages so that ZLW will be inserte N pages ahead where + * N is pre-defined and hardcoded in the platform data + * Set ZLW_2D -> 0 + * + * 2. ZLW 2D mode + * Set ZLW_EN -> 1 + * set ZLW_PAGE_CROSS_1D -> 1, + * Set ZLW_N -> 0 + * Set ZLW_2D -> 1 + * + * 3. ZLW Enable (no 1D or 2D mode) + * Set ZLW_EN -> 1 + * set ZLW_PAGE_CROSS_1D -> 0, + * Set ZLW_N -> 0 + * Set ZLW_2D -> 0 + * + * 4. ZLW disable + * Set ZLW_EN -> 0 + * set ZLW_PAGE_CROSS_1D -> 0, + * Set ZLW_N -> 0 + * Set ZLW_2D -> 0 + * + * To configure the ZLW for the above memory access, four registers are + * available. Hence to track these four settings, we have the following entries + * in the struct ipu6_mmu_hw. Each of these entries are per stream and + * available only for the L1 streams. + * + * a. l1_zlw_en -> To track zlw enabled per stream (ZLW_EN) + * b. l1_zlw_1d_mode -> Track 1D mode per stream. ZLW inserted at page boundary + * c. l1_ins_zlw_ahead_pages -> to track how advance the ZLW need to be inserted + * Insert ZLW request N pages ahead address. + * d. l1_zlw_2d_mode -> To track 2D mode per stream (ZLW_2D) + * + * + * Currently L1/L2 streams, blocks, AT ZLW configurations etc. are pre-defined + * as per the usecase specific calculations. Any change to this pre-defined + * table has to happen in sync with IPU6 FW. + */ +struct ipu6_mmu_hw { + union { + unsigned long offset; + void __iomem *base; + }; + u32 info_bits; + u8 nr_l1streams; + /* + * L1 has variable blocks per stream - total of 64 blocks and maximum of + * 16 blocks per stream. Configurable by using the block start address + * per stream. Block start address is calculated from the block size + */ + u8 l1_block_sz[IPU6_MMU_MAX_TLB_L1_STREAMS]; + /* Is ZLW is enabled in each stream */ + bool l1_zlw_en[IPU6_MMU_MAX_TLB_L1_STREAMS]; + bool l1_zlw_1d_mode[IPU6_MMU_MAX_TLB_L1_STREAMS]; + u8 l1_ins_zlw_ahead_pages[IPU6_MMU_MAX_TLB_L1_STREAMS]; + bool l1_zlw_2d_mode[IPU6_MMU_MAX_TLB_L1_STREAMS]; + + u32 l1_stream_id_reg_offset; + u32 l2_stream_id_reg_offset; + + u8 nr_l2streams; + /* + * L2 has fixed 2 blocks per stream. Block address is calculated + * from the block size + */ + u8 l2_block_sz[IPU6_MMU_MAX_TLB_L2_STREAMS]; + /* flag to track if WA is needed for successive invalidate HW bug */ + bool insert_read_before_invalidate; +}; + +struct ipu6_mmu_pdata { + u32 nr_mmus; + struct ipu6_mmu_hw mmu_hw[IPU6_MMU_MAX_DEVICES]; + int mmid; +}; + +struct ipu6_isys_csi2_pdata { + void __iomem *base; +}; + +struct ipu6_isys_internal_csi2_pdata { + u32 nports; + u32 irq_mask; + u32 ctrl0_irq_edge; + u32 ctrl0_irq_clear; + u32 ctrl0_irq_mask; + u32 ctrl0_irq_enable; + u32 ctrl0_irq_lnp; + u32 ctrl0_irq_status; + u32 fw_access_port_ofs; +}; + +struct ipu6_isys_internal_tpg_pdata { + u32 ntpgs; + u32 *offsets; + u32 *sels; +}; + +struct ipu6_hw_variants { + unsigned long offset; + u32 nr_mmus; + struct ipu6_mmu_hw mmu_hw[IPU6_MMU_MAX_DEVICES]; + u8 cdc_fifos; + u8 cdc_fifo_threshold[IPU6_MAX_VC_IOSF_PORTS]; + u32 dmem_offset; + u32 spc_offset; +}; + +struct ipu6_isys_internal_pdata { + struct ipu6_isys_internal_csi2_pdata csi2; + struct ipu6_hw_variants hw_variant; + u32 num_parallel_streams; + u32 isys_dma_overshoot; + u32 sram_gran_shift; + u32 sram_gran_size; + u32 max_sram_size; + u32 max_streams; + u32 max_send_queues; + u32 max_sram_blocks; + u32 max_devq_size; + u32 sensor_type_start; + u32 sensor_type_end; + u32 ltr; + u32 memopen_threshold; + bool enhanced_iwake; +}; + +struct ipu6_isys_pdata { + void __iomem *base; + const struct ipu6_isys_internal_pdata *ipdata; +}; + +struct ipu6_psys_internal_pdata { + struct ipu6_hw_variants hw_variant; +}; + +struct ipu6_psys_pdata { + void __iomem *base; + const struct ipu6_psys_internal_pdata *ipdata; +}; + +int ipu6_fw_authenticate(void *data, u64 val); +void ipu6_configure_spc(struct ipu6_device *isp, + const struct ipu6_hw_variants *hw_variant, + int pkg_dir_idx, void __iomem *base, u64 *pkg_dir, + dma_addr_t pkg_dir_dma_addr); +#endif /* IPU6_H */ diff --git a/6.12.0/drivers/media/v4l2-core/Kconfig b/6.12.0/drivers/media/v4l2-core/Kconfig new file mode 100644 index 0000000..331b8e5 --- /dev/null +++ b/6.12.0/drivers/media/v4l2-core/Kconfig @@ -0,0 +1,84 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Generic video config states +# + +config VIDEO_V4L2_I2C + bool + depends on I2C && VIDEO_DEV + default y + +config VIDEO_V4L2_SUBDEV_API + bool + depends on VIDEO_DEV && MEDIA_CONTROLLER + help + Enables the V4L2 sub-device pad-level userspace API used to configure + video format, size and frame rate between hardware blocks. + + This API is mostly used by camera interfaces in embedded platforms. + +config VIDEO_ADV_DEBUG + bool "Enable advanced debug functionality on V4L2 drivers" + help + Say Y here to enable advanced debugging functionality on some + V4L devices. + In doubt, say N. + +config VIDEO_FIXED_MINOR_RANGES + bool "Enable old-style fixed minor ranges on drivers/video devices" + help + Say Y here to enable the old-style fixed-range minor assignments. + Only useful if you rely on the old behavior and use mknod instead of udev. + + When in doubt, say N. + +# Used by drivers that need tuner.ko +config VIDEO_TUNER + tristate + +# Used by drivers that need v4l2-jpeg.ko +config V4L2_JPEG_HELPER + tristate + +# Used by drivers that need v4l2-h264.ko +config V4L2_H264 + tristate + +# Used by drivers that need v4l2-vp9.ko +config V4L2_VP9 + tristate + +# Used by drivers that need v4l2-mem2mem.ko +config V4L2_MEM2MEM_DEV + tristate + depends on VIDEOBUF2_CORE + +# Used by LED subsystem flash drivers +config V4L2_FLASH_LED_CLASS + tristate "V4L2 flash API for LED flash class devices" + depends on VIDEO_DEV + depends on LEDS_CLASS_FLASH + select MEDIA_CONTROLLER + select V4L2_ASYNC + select VIDEO_V4L2_SUBDEV_API + help + Say Y here to enable V4L2 flash API support for LED flash + class drivers. + + When in doubt, say N. + +config V4L2_FWNODE + tristate + select V4L2_ASYNC + +config V4L2_ASYNC + tristate + +config V4L2_CCI + tristate + +config V4L2_CCI_I2C + tristate + depends on I2C + select REGMAP_I2C + select V4L2_CCI diff --git a/6.12.0/drivers/media/v4l2-core/Makefile b/6.12.0/drivers/media/v4l2-core/Makefile new file mode 100644 index 0000000..2177b9d --- /dev/null +++ b/6.12.0/drivers/media/v4l2-core/Makefile @@ -0,0 +1,37 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the V4L2 core +# + +ccflags-y += -I$(srctree)/drivers/media/dvb-frontends +ccflags-y += -I$(srctree)/drivers/media/tuners + +tuner-objs := tuner-core.o + +videodev-objs := v4l2-dev.o v4l2-ioctl.o v4l2-device.o v4l2-fh.o \ + v4l2-event.o v4l2-subdev.o v4l2-common.o \ + v4l2-ctrls-core.o v4l2-ctrls-api.o \ + v4l2-ctrls-request.o v4l2-ctrls-defs.o + +# Please keep it alphabetically sorted by Kconfig name +# (e. g. LC_ALL=C sort Makefile) +videodev-$(CONFIG_COMPAT) += v4l2-compat-ioctl32.o +videodev-$(CONFIG_MEDIA_CONTROLLER) += v4l2-mc.o +videodev-$(CONFIG_SPI) += v4l2-spi.o +videodev-$(CONFIG_TRACEPOINTS) += v4l2-trace.o +videodev-$(CONFIG_VIDEO_V4L2_I2C) += v4l2-i2c.o + +# Please keep it alphabetically sorted by Kconfig name +# (e. g. LC_ALL=C sort Makefile) + +obj-$(CONFIG_V4L2_ASYNC) += v4l2-async.o +obj-$(CONFIG_V4L2_CCI) += v4l2-cci.o +obj-$(CONFIG_V4L2_FLASH_LED_CLASS) += v4l2-flash-led-class.o +obj-$(CONFIG_V4L2_FWNODE) += v4l2-fwnode.o +obj-$(CONFIG_V4L2_H264) += v4l2-h264.o +obj-$(CONFIG_V4L2_JPEG_HELPER) += v4l2-jpeg.o +obj-$(CONFIG_V4L2_MEM2MEM_DEV) += v4l2-mem2mem.o +obj-$(CONFIG_V4L2_VP9) += v4l2-vp9.o + +obj-$(CONFIG_VIDEO_TUNER) += tuner.o +obj-$(CONFIG_VIDEO_DEV) += v4l2-dv-timings.o videodev.o diff --git a/6.12.0/drivers/media/v4l2-core/tuner-core.c b/6.12.0/drivers/media/v4l2-core/tuner-core.c new file mode 100644 index 0000000..5687089 --- /dev/null +++ b/6.12.0/drivers/media/v4l2-core/tuner-core.c @@ -0,0 +1,1424 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * i2c tv tuner chip device driver + * core core, i.e. kernel interfaces, registering and so on + * + * Copyright(c) by Ralph Metzler, Gerd Knorr, Gunther Mayer + * + * Copyright(c) 2005-2011 by Mauro Carvalho Chehab + * - Added support for a separate Radio tuner + * - Major rework and cleanups at the code + * + * This driver supports many devices and the idea is to let the driver + * detect which device is present. So rather than listing all supported + * devices here, we pretend to support a single, fake device type that will + * handle both radio and analog TV tuning. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mt20xx.h" +#include "tda8290.h" +#include "tea5761.h" +#include "tea5767.h" +#include "xc2028.h" +#include "tuner-simple.h" +#include "tda9887.h" +#include "xc5000.h" +#include "tda18271.h" +#include "xc4000.h" + +#define UNSET (-1U) + +/* + * Driver modprobe parameters + */ + +/* insmod options used at init time => read/only */ +static unsigned int addr; +static unsigned int no_autodetect; +static unsigned int show_i2c; + +module_param(addr, int, 0444); +module_param(no_autodetect, int, 0444); +module_param(show_i2c, int, 0444); + +/* insmod options used at runtime => read/write */ +static int tuner_debug; +static unsigned int tv_range[2] = { 44, 958 }; +static unsigned int radio_range[2] = { 65, 108 }; +static char pal[] = "--"; +static char secam[] = "--"; +static char ntsc[] = "-"; + +module_param_named(debug, tuner_debug, int, 0644); +module_param_array(tv_range, int, NULL, 0644); +module_param_array(radio_range, int, NULL, 0644); +module_param_string(pal, pal, sizeof(pal), 0644); +module_param_string(secam, secam, sizeof(secam), 0644); +module_param_string(ntsc, ntsc, sizeof(ntsc), 0644); + +/* + * Static vars + */ + +static LIST_HEAD(tuner_list); +static const struct v4l2_subdev_ops tuner_ops; + +/* + * Debug macros + */ + +#undef pr_fmt + +#define pr_fmt(fmt) KBUILD_MODNAME ": %d-%04x: " fmt, \ + i2c_adapter_id(t->i2c->adapter), t->i2c->addr + + +#define dprintk(fmt, arg...) do { \ + if (tuner_debug) \ + printk(KERN_DEBUG pr_fmt("%s: " fmt), __func__, ##arg); \ +} while (0) + +/* + * Internal enums/struct used inside the driver + */ + +/** + * enum tuner_pad_index - tuner pad index for MEDIA_ENT_F_TUNER + * + * @TUNER_PAD_RF_INPUT: + * Radiofrequency (RF) sink pad, usually linked to a RF connector entity. + * @TUNER_PAD_OUTPUT: + * tuner video output source pad. Contains the video chrominance + * and luminance or the hole bandwidth of the signal converted to + * an Intermediate Frequency (IF) or to baseband (on zero-IF tuners). + * @TUNER_PAD_AUD_OUT: + * Tuner audio output source pad. Tuners used to decode analog TV + * signals have an extra pad for audio output. Old tuners use an + * analog stage with a saw filter for the audio IF frequency. The + * output of the pad is, in this case, the audio IF, with should be + * decoded either by the bridge chipset (that's the case of cx2388x + * chipsets) or may require an external IF sound processor, like + * msp34xx. On modern silicon tuners, the audio IF decoder is usually + * incorporated at the tuner. On such case, the output of this pad + * is an audio sampled data. + * @TUNER_NUM_PADS: + * Number of pads of the tuner. + */ +enum tuner_pad_index { + TUNER_PAD_RF_INPUT, + TUNER_PAD_OUTPUT, + TUNER_PAD_AUD_OUT, + TUNER_NUM_PADS +}; + +/** + * enum if_vid_dec_pad_index - video IF-PLL pad index + * for MEDIA_ENT_F_IF_VID_DECODER + * + * @IF_VID_DEC_PAD_IF_INPUT: + * video Intermediate Frequency (IF) sink pad + * @IF_VID_DEC_PAD_OUT: + * IF-PLL video output source pad. Contains the video chrominance + * and luminance IF signals. + * @IF_VID_DEC_PAD_NUM_PADS: + * Number of pads of the video IF-PLL. + */ +enum if_vid_dec_pad_index { + IF_VID_DEC_PAD_IF_INPUT, + IF_VID_DEC_PAD_OUT, + IF_VID_DEC_PAD_NUM_PADS +}; + +struct tuner { + /* device */ + struct dvb_frontend fe; + struct i2c_client *i2c; + struct v4l2_subdev sd; + struct list_head list; + + /* keep track of the current settings */ + v4l2_std_id std; + unsigned int tv_freq; + unsigned int radio_freq; + unsigned int audmode; + + enum v4l2_tuner_type mode; + unsigned int mode_mask; /* Combination of allowable modes */ + + bool standby; /* Standby mode */ + + unsigned int type; /* chip type id */ + void *config; + const char *name; + +#if defined(CONFIG_MEDIA_CONTROLLER) + struct media_pad pad[TUNER_NUM_PADS]; +#endif +}; + +/* + * Function prototypes + */ + +static void set_tv_freq(struct i2c_client *c, unsigned int freq); +static void set_radio_freq(struct i2c_client *c, unsigned int freq); + +/* + * tuner attach/detach logic + */ + +/* This macro allows us to probe dynamically, avoiding static links */ +#ifdef CONFIG_MEDIA_ATTACH +#define tuner_symbol_probe(FUNCTION, ARGS...) ({ \ + int __r = -EINVAL; \ + typeof(&FUNCTION) __a = symbol_request(FUNCTION); \ + if (__a) { \ + __r = (int) __a(ARGS); \ + symbol_put(FUNCTION); \ + } else { \ + printk(KERN_ERR "TUNER: Unable to find " \ + "symbol "#FUNCTION"()\n"); \ + } \ + __r; \ +}) + +static void tuner_detach(struct dvb_frontend *fe) +{ + if (fe->ops.tuner_ops.release) { + fe->ops.tuner_ops.release(fe); + symbol_put_addr(fe->ops.tuner_ops.release); + } + if (fe->ops.analog_ops.release) { + fe->ops.analog_ops.release(fe); + symbol_put_addr(fe->ops.analog_ops.release); + } +} +#else +#define tuner_symbol_probe(FUNCTION, ARGS...) ({ \ + FUNCTION(ARGS); \ +}) + +static void tuner_detach(struct dvb_frontend *fe) +{ + if (fe->ops.tuner_ops.release) + fe->ops.tuner_ops.release(fe); + if (fe->ops.analog_ops.release) + fe->ops.analog_ops.release(fe); +} +#endif + + +static inline struct tuner *to_tuner(struct v4l2_subdev *sd) +{ + return container_of(sd, struct tuner, sd); +} + +/* + * struct analog_demod_ops callbacks + */ + +static void fe_set_params(struct dvb_frontend *fe, + struct analog_parameters *params) +{ + struct dvb_tuner_ops *fe_tuner_ops = &fe->ops.tuner_ops; + struct tuner *t = fe->analog_demod_priv; + + if (NULL == fe_tuner_ops->set_analog_params) { + pr_warn("Tuner frontend module has no way to set freq\n"); + return; + } + fe_tuner_ops->set_analog_params(fe, params); +} + +static void fe_standby(struct dvb_frontend *fe) +{ + struct dvb_tuner_ops *fe_tuner_ops = &fe->ops.tuner_ops; + + if (fe_tuner_ops->sleep) + fe_tuner_ops->sleep(fe); +} + +static int fe_set_config(struct dvb_frontend *fe, void *priv_cfg) +{ + struct dvb_tuner_ops *fe_tuner_ops = &fe->ops.tuner_ops; + struct tuner *t = fe->analog_demod_priv; + + if (fe_tuner_ops->set_config) + return fe_tuner_ops->set_config(fe, priv_cfg); + + pr_warn("Tuner frontend module has no way to set config\n"); + + return 0; +} + +static void tuner_status(struct dvb_frontend *fe); + +static const struct analog_demod_ops tuner_analog_ops = { + .set_params = fe_set_params, + .standby = fe_standby, + .set_config = fe_set_config, + .tuner_status = tuner_status +}; + +/* + * Functions to select between radio and TV and tuner probe/remove functions + */ + +/** + * set_type - Sets the tuner type for a given device + * + * @c: i2c_client descriptor + * @type: type of the tuner (e. g. tuner number) + * @new_mode_mask: Indicates if tuner supports TV and/or Radio + * @new_config: an optional parameter used by a few tuners to adjust + * internal parameters, like LNA mode + * @tuner_callback: an optional function to be called when switching + * to analog mode + * + * This function applies the tuner config to tuner specified + * by tun_setup structure. It contains several per-tuner initialization "magic" + */ +static void set_type(struct i2c_client *c, unsigned int type, + unsigned int new_mode_mask, void *new_config, + int (*tuner_callback) (void *dev, int component, int cmd, int arg)) +{ + struct tuner *t = to_tuner(i2c_get_clientdata(c)); + struct dvb_tuner_ops *fe_tuner_ops = &t->fe.ops.tuner_ops; + struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops; + unsigned char buffer[4]; + int tune_now = 1; + + if (type == UNSET || type == TUNER_ABSENT) { + dprintk("tuner 0x%02x: Tuner type absent\n", c->addr); + return; + } + + t->type = type; + t->config = new_config; + if (tuner_callback != NULL) { + dprintk("defining GPIO callback\n"); + t->fe.callback = tuner_callback; + } + + /* discard private data, in case set_type() was previously called */ + tuner_detach(&t->fe); + t->fe.analog_demod_priv = NULL; + + switch (t->type) { + case TUNER_MT2032: + if (!dvb_attach(microtune_attach, + &t->fe, t->i2c->adapter, t->i2c->addr)) + goto attach_failed; + break; + case TUNER_PHILIPS_TDA8290: + { + if (!dvb_attach(tda829x_attach, &t->fe, t->i2c->adapter, + t->i2c->addr, t->config)) + goto attach_failed; + break; + } + case TUNER_TEA5767: + if (!dvb_attach(tea5767_attach, &t->fe, + t->i2c->adapter, t->i2c->addr)) + goto attach_failed; + t->mode_mask = T_RADIO; + break; + case TUNER_TEA5761: + if (!dvb_attach(tea5761_attach, &t->fe, + t->i2c->adapter, t->i2c->addr)) + goto attach_failed; + t->mode_mask = T_RADIO; + break; + case TUNER_PHILIPS_FMD1216ME_MK3: + case TUNER_PHILIPS_FMD1216MEX_MK3: + buffer[0] = 0x0b; + buffer[1] = 0xdc; + buffer[2] = 0x9c; + buffer[3] = 0x60; + i2c_master_send(c, buffer, 4); + mdelay(1); + buffer[2] = 0x86; + buffer[3] = 0x54; + i2c_master_send(c, buffer, 4); + if (!dvb_attach(simple_tuner_attach, &t->fe, + t->i2c->adapter, t->i2c->addr, t->type)) + goto attach_failed; + break; + case TUNER_PHILIPS_TD1316: + buffer[0] = 0x0b; + buffer[1] = 0xdc; + buffer[2] = 0x86; + buffer[3] = 0xa4; + i2c_master_send(c, buffer, 4); + if (!dvb_attach(simple_tuner_attach, &t->fe, + t->i2c->adapter, t->i2c->addr, t->type)) + goto attach_failed; + break; + case TUNER_XC2028: + { + struct xc2028_config cfg = { + .i2c_adap = t->i2c->adapter, + .i2c_addr = t->i2c->addr, + }; + if (!dvb_attach(xc2028_attach, &t->fe, &cfg)) + goto attach_failed; + tune_now = 0; + break; + } + case TUNER_TDA9887: + if (!dvb_attach(tda9887_attach, + &t->fe, t->i2c->adapter, t->i2c->addr)) + goto attach_failed; + break; + case TUNER_XC5000: + { + struct xc5000_config xc5000_cfg = { + .i2c_address = t->i2c->addr, + /* if_khz will be set at dvb_attach() */ + .if_khz = 0, + }; + + if (!dvb_attach(xc5000_attach, + &t->fe, t->i2c->adapter, &xc5000_cfg)) + goto attach_failed; + tune_now = 0; + break; + } + case TUNER_XC5000C: + { + struct xc5000_config xc5000c_cfg = { + .i2c_address = t->i2c->addr, + /* if_khz will be set at dvb_attach() */ + .if_khz = 0, + .chip_id = XC5000C, + }; + + if (!dvb_attach(xc5000_attach, + &t->fe, t->i2c->adapter, &xc5000c_cfg)) + goto attach_failed; + tune_now = 0; + break; + } + case TUNER_NXP_TDA18271: + { + struct tda18271_config cfg = { + .small_i2c = TDA18271_03_BYTE_CHUNK_INIT, + }; + + if (!dvb_attach(tda18271_attach, &t->fe, t->i2c->addr, + t->i2c->adapter, &cfg)) + goto attach_failed; + tune_now = 0; + break; + } + case TUNER_XC4000: + { + struct xc4000_config xc4000_cfg = { + .i2c_address = t->i2c->addr, + /* FIXME: the correct parameters will be set */ + /* only when the digital dvb_attach() occurs */ + .default_pm = 0, + .dvb_amplitude = 0, + .set_smoothedcvbs = 0, + .if_khz = 0 + }; + if (!dvb_attach(xc4000_attach, + &t->fe, t->i2c->adapter, &xc4000_cfg)) + goto attach_failed; + tune_now = 0; + break; + } + default: + if (!dvb_attach(simple_tuner_attach, &t->fe, + t->i2c->adapter, t->i2c->addr, t->type)) + goto attach_failed; + + break; + } + + if ((NULL == analog_ops->set_params) && + (fe_tuner_ops->set_analog_params)) { + + t->name = fe_tuner_ops->info.name; + + t->fe.analog_demod_priv = t; + memcpy(analog_ops, &tuner_analog_ops, + sizeof(struct analog_demod_ops)); + + if (fe_tuner_ops->get_rf_strength) + analog_ops->has_signal = fe_tuner_ops->get_rf_strength; + if (fe_tuner_ops->get_afc) + analog_ops->get_afc = fe_tuner_ops->get_afc; + + } else { + t->name = analog_ops->info.name; + } + +#ifdef CONFIG_MEDIA_CONTROLLER + t->sd.entity.name = t->name; +#endif + + dprintk("type set to %s\n", t->name); + + t->mode_mask = new_mode_mask; + + /* Some tuners require more initialization setup before use, + such as firmware download or device calibration. + trying to set a frequency here will just fail + FIXME: better to move set_freq to the tuner code. This is needed + on analog tuners for PLL to properly work + */ + if (tune_now) { + if (V4L2_TUNER_RADIO == t->mode) + set_radio_freq(c, t->radio_freq); + else + set_tv_freq(c, t->tv_freq); + } + + dprintk("%s %s I2C addr 0x%02x with type %d used for 0x%02x\n", + c->adapter->name, c->dev.driver->name, c->addr << 1, type, + t->mode_mask); + return; + +attach_failed: + dprintk("Tuner attach for type = %d failed.\n", t->type); + t->type = TUNER_ABSENT; + + return; +} + +/** + * tuner_s_type_addr - Sets the tuner type for a device + * + * @sd: subdev descriptor + * @tun_setup: type to be associated to a given tuner i2c address + * + * This function applies the tuner config to tuner specified + * by tun_setup structure. + * If tuner I2C address is UNSET, then it will only set the device + * if the tuner supports the mode specified in the call. + * If the address is specified, the change will be applied only if + * tuner I2C address matches. + * The call can change the tuner number and the tuner mode. + */ +static int tuner_s_type_addr(struct v4l2_subdev *sd, + struct tuner_setup *tun_setup) +{ + struct tuner *t = to_tuner(sd); + struct i2c_client *c = v4l2_get_subdevdata(sd); + + dprintk("Calling set_type_addr for type=%d, addr=0x%02x, mode=0x%02x, config=%p\n", + tun_setup->type, + tun_setup->addr, + tun_setup->mode_mask, + tun_setup->config); + + if ((t->type == UNSET && ((tun_setup->addr == ADDR_UNSET) && + (t->mode_mask & tun_setup->mode_mask))) || + (tun_setup->addr == c->addr)) { + set_type(c, tun_setup->type, tun_setup->mode_mask, + tun_setup->config, tun_setup->tuner_callback); + } else + dprintk("set addr discarded for type %i, mask %x. Asked to change tuner at addr 0x%02x, with mask %x\n", + t->type, t->mode_mask, + tun_setup->addr, tun_setup->mode_mask); + + return 0; +} + +/** + * tuner_s_config - Sets tuner configuration + * + * @sd: subdev descriptor + * @cfg: tuner configuration + * + * Calls tuner set_config() private function to set some tuner-internal + * parameters + */ +static int tuner_s_config(struct v4l2_subdev *sd, + const struct v4l2_priv_tun_config *cfg) +{ + struct tuner *t = to_tuner(sd); + struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops; + + if (t->type != cfg->tuner) + return 0; + + if (analog_ops->set_config) { + analog_ops->set_config(&t->fe, cfg->priv); + return 0; + } + + dprintk("Tuner frontend module has no way to set config\n"); + return 0; +} + +/** + * tuner_lookup - Seek for tuner adapters + * + * @adap: i2c_adapter struct + * @radio: pointer to be filled if the adapter is radio + * @tv: pointer to be filled if the adapter is TV + * + * Search for existing radio and/or TV tuners on the given I2C adapter, + * discarding demod-only adapters (tda9887). + * + * Note that when this function is called from tuner_probe you can be + * certain no other devices will be added/deleted at the same time, I2C + * core protects against that. + */ +static void tuner_lookup(struct i2c_adapter *adap, + struct tuner **radio, struct tuner **tv) +{ + struct tuner *pos; + + *radio = NULL; + *tv = NULL; + + list_for_each_entry(pos, &tuner_list, list) { + int mode_mask; + + if (pos->i2c->adapter != adap || + strcmp(pos->i2c->dev.driver->name, "tuner")) + continue; + + mode_mask = pos->mode_mask; + if (*radio == NULL && mode_mask == T_RADIO) + *radio = pos; + /* Note: currently TDA9887 is the only demod-only + device. If other devices appear then we need to + make this test more general. */ + else if (*tv == NULL && pos->type != TUNER_TDA9887 && + (pos->mode_mask & T_ANALOG_TV)) + *tv = pos; + } +} + +/** + *tuner_probe - Probes the existing tuners on an I2C bus + * + * @client: i2c_client descriptor + * + * This routine probes for tuners at the expected I2C addresses. On most + * cases, if a device answers to a given I2C address, it assumes that the + * device is a tuner. On a few cases, however, an additional logic is needed + * to double check if the device is really a tuner, or to identify the tuner + * type, like on tea5767/5761 devices. + * + * During client attach, set_type is called by adapter's attach_inform callback. + * set_type must then be completed by tuner_probe. + */ +static int tuner_probe(struct i2c_client *client) +{ + struct tuner *t; + struct tuner *radio; + struct tuner *tv; +#ifdef CONFIG_MEDIA_CONTROLLER + int ret; +#endif + + t = kzalloc(sizeof(struct tuner), GFP_KERNEL); + if (NULL == t) + return -ENOMEM; + v4l2_i2c_subdev_init(&t->sd, client, &tuner_ops); + t->i2c = client; + t->name = "(tuner unset)"; + t->type = UNSET; + t->audmode = V4L2_TUNER_MODE_STEREO; + t->standby = true; + t->radio_freq = 87.5 * 16000; /* Initial freq range */ + t->tv_freq = 400 * 16; /* Sets freq to VHF High - needed for some PLL's to properly start */ + + if (show_i2c) { + unsigned char buffer[16]; + int rc; + + memset(buffer, 0, sizeof(buffer)); + rc = i2c_master_recv(client, buffer, sizeof(buffer)); + if (rc >= 0) + pr_info("I2C RECV = %*ph\n", rc, buffer); + } + + /* autodetection code based on the i2c addr */ + if (!no_autodetect) { + switch (client->addr) { + case 0x10: + if (tuner_symbol_probe(tea5761_autodetection, + t->i2c->adapter, + t->i2c->addr) >= 0) { + t->type = TUNER_TEA5761; + t->mode_mask = T_RADIO; + tuner_lookup(t->i2c->adapter, &radio, &tv); + if (tv) + tv->mode_mask &= ~T_RADIO; + + goto register_client; + } + kfree(t); + return -ENODEV; + case 0x42: + case 0x43: + case 0x4a: + case 0x4b: + /* If chip is not tda8290, don't register. + since it can be tda9887*/ + if (tuner_symbol_probe(tda829x_probe, t->i2c->adapter, + t->i2c->addr) >= 0) { + dprintk("tda829x detected\n"); + } else { + /* Default is being tda9887 */ + t->type = TUNER_TDA9887; + t->mode_mask = T_RADIO | T_ANALOG_TV; + goto register_client; + } + break; + case 0x60: + if (tuner_symbol_probe(tea5767_autodetection, + t->i2c->adapter, t->i2c->addr) + >= 0) { + t->type = TUNER_TEA5767; + t->mode_mask = T_RADIO; + /* Sets freq to FM range */ + tuner_lookup(t->i2c->adapter, &radio, &tv); + if (tv) + tv->mode_mask &= ~T_RADIO; + + goto register_client; + } + break; + } + } + + /* Initializes only the first TV tuner on this adapter. Why only the + first? Because there are some devices (notably the ones with TI + tuners) that have more than one i2c address for the *same* device. + Experience shows that, except for just one case, the first + address is the right one. The exception is a Russian tuner + (ACORP_Y878F). So, the desired behavior is just to enable the + first found TV tuner. */ + tuner_lookup(t->i2c->adapter, &radio, &tv); + if (tv == NULL) { + t->mode_mask = T_ANALOG_TV; + if (radio == NULL) + t->mode_mask |= T_RADIO; + dprintk("Setting mode_mask to 0x%02x\n", t->mode_mask); + } + + /* Should be just before return */ +register_client: +#if defined(CONFIG_MEDIA_CONTROLLER) + t->sd.entity.name = t->name; + /* + * Handle the special case where the tuner has actually + * two stages: the PLL to tune into a frequency and the + * IF-PLL demodulator (tda988x). + */ + if (t->type == TUNER_TDA9887) { + t->pad[IF_VID_DEC_PAD_IF_INPUT].flags = MEDIA_PAD_FL_SINK; + t->pad[IF_VID_DEC_PAD_IF_INPUT].sig_type = PAD_SIGNAL_ANALOG; + t->pad[IF_VID_DEC_PAD_OUT].flags = MEDIA_PAD_FL_SOURCE; + t->pad[IF_VID_DEC_PAD_OUT].sig_type = PAD_SIGNAL_ANALOG; + ret = media_entity_pads_init(&t->sd.entity, + IF_VID_DEC_PAD_NUM_PADS, + &t->pad[0]); + t->sd.entity.function = MEDIA_ENT_F_IF_VID_DECODER; + } else { + t->pad[TUNER_PAD_RF_INPUT].flags = MEDIA_PAD_FL_SINK; + t->pad[TUNER_PAD_RF_INPUT].sig_type = PAD_SIGNAL_ANALOG; + t->pad[TUNER_PAD_OUTPUT].flags = MEDIA_PAD_FL_SOURCE; + t->pad[TUNER_PAD_OUTPUT].sig_type = PAD_SIGNAL_ANALOG; + t->pad[TUNER_PAD_AUD_OUT].flags = MEDIA_PAD_FL_SOURCE; + t->pad[TUNER_PAD_AUD_OUT].sig_type = PAD_SIGNAL_AUDIO; + ret = media_entity_pads_init(&t->sd.entity, TUNER_NUM_PADS, + &t->pad[0]); + t->sd.entity.function = MEDIA_ENT_F_TUNER; + } + + if (ret < 0) { + pr_err("failed to initialize media entity!\n"); + kfree(t); + return ret; + } +#endif + /* Sets a default mode */ + if (t->mode_mask & T_ANALOG_TV) + t->mode = V4L2_TUNER_ANALOG_TV; + else + t->mode = V4L2_TUNER_RADIO; + set_type(client, t->type, t->mode_mask, t->config, t->fe.callback); + list_add_tail(&t->list, &tuner_list); + + pr_info("Tuner %d found with type(s)%s%s.\n", + t->type, + t->mode_mask & T_RADIO ? " Radio" : "", + t->mode_mask & T_ANALOG_TV ? " TV" : ""); + return 0; +} + +/** + * tuner_remove - detaches a tuner + * + * @client: i2c_client descriptor + */ + +static void tuner_remove(struct i2c_client *client) +{ + struct tuner *t = to_tuner(i2c_get_clientdata(client)); + + v4l2_device_unregister_subdev(&t->sd); + tuner_detach(&t->fe); + t->fe.analog_demod_priv = NULL; + + list_del(&t->list); + kfree(t); +} + +/* + * Functions to switch between Radio and TV + * + * A few cards have a separate I2C tuner for radio. Those routines + * take care of switching between TV/Radio mode, filtering only the + * commands that apply to the Radio or TV tuner. + */ + +/** + * check_mode - Verify if tuner supports the requested mode + * @t: a pointer to the module's internal struct_tuner + * @mode: mode of the tuner, as defined by &enum v4l2_tuner_type. + * + * This function checks if the tuner is capable of tuning analog TV, + * digital TV or radio, depending on what the caller wants. If the + * tuner can't support that mode, it returns -EINVAL. Otherwise, it + * returns 0. + * This function is needed for boards that have a separate tuner for + * radio (like devices with tea5767). + * + * NOTE: mt20xx uses V4L2_TUNER_DIGITAL_TV and calls set_tv_freq to + * select a TV frequency. So, t_mode = T_ANALOG_TV could actually + * be used to represent a Digital TV too. + */ +static inline int check_mode(struct tuner *t, enum v4l2_tuner_type mode) +{ + int t_mode; + if (mode == V4L2_TUNER_RADIO) + t_mode = T_RADIO; + else + t_mode = T_ANALOG_TV; + + if ((t_mode & t->mode_mask) == 0) + return -EINVAL; + + return 0; +} + +/** + * set_mode - Switch tuner to other mode. + * @t: a pointer to the module's internal struct_tuner + * @mode: enum v4l2_type (radio or TV) + * + * If tuner doesn't support the needed mode (radio or TV), prints a + * debug message and returns -EINVAL, changing its state to standby. + * Otherwise, changes the mode and returns 0. + */ +static int set_mode(struct tuner *t, enum v4l2_tuner_type mode) +{ + struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops; + + if (mode != t->mode) { + if (check_mode(t, mode) == -EINVAL) { + dprintk("Tuner doesn't support mode %d. Putting tuner to sleep\n", + mode); + t->standby = true; + if (analog_ops->standby) + analog_ops->standby(&t->fe); + return -EINVAL; + } + t->mode = mode; + dprintk("Changing to mode %d\n", mode); + } + return 0; +} + +/** + * set_freq - Set the tuner to the desired frequency. + * @t: a pointer to the module's internal struct_tuner + * @freq: frequency to set (0 means to use the current frequency) + */ +static void set_freq(struct tuner *t, unsigned int freq) +{ + struct i2c_client *client = v4l2_get_subdevdata(&t->sd); + + if (t->mode == V4L2_TUNER_RADIO) { + if (!freq) + freq = t->radio_freq; + set_radio_freq(client, freq); + } else { + if (!freq) + freq = t->tv_freq; + set_tv_freq(client, freq); + } +} + +/* + * Functions that are specific for TV mode + */ + +/** + * set_tv_freq - Set tuner frequency, freq in Units of 62.5 kHz = 1/16MHz + * + * @c: i2c_client descriptor + * @freq: frequency + */ +static void set_tv_freq(struct i2c_client *c, unsigned int freq) +{ + struct tuner *t = to_tuner(i2c_get_clientdata(c)); + struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops; + + struct analog_parameters params = { + .mode = t->mode, + .audmode = t->audmode, + .std = t->std + }; + + if (t->type == UNSET) { + pr_warn("tuner type not set\n"); + return; + } + if (NULL == analog_ops->set_params) { + pr_warn("Tuner has no way to set tv freq\n"); + return; + } + if (freq < tv_range[0] * 16 || freq > tv_range[1] * 16) { + dprintk("TV freq (%d.%02d) out of range (%d-%d)\n", + freq / 16, freq % 16 * 100 / 16, tv_range[0], + tv_range[1]); + /* V4L2 spec: if the freq is not possible then the closest + possible value should be selected */ + if (freq < tv_range[0] * 16) + freq = tv_range[0] * 16; + else + freq = tv_range[1] * 16; + } + params.frequency = freq; + dprintk("tv freq set to %d.%02d\n", + freq / 16, freq % 16 * 100 / 16); + t->tv_freq = freq; + t->standby = false; + + analog_ops->set_params(&t->fe, ¶ms); +} + +/** + * tuner_fixup_std - force a given video standard variant + * + * @t: tuner internal struct + * @std: TV standard + * + * A few devices or drivers have problem to detect some standard variations. + * On other operational systems, the drivers generally have a per-country + * code, and some logic to apply per-country hacks. V4L2 API doesn't provide + * such hacks. Instead, it relies on a proper video standard selection from + * the userspace application. However, as some apps are buggy, not allowing + * to distinguish all video standard variations, a modprobe parameter can + * be used to force a video standard match. + */ +static v4l2_std_id tuner_fixup_std(struct tuner *t, v4l2_std_id std) +{ + if (pal[0] != '-' && (std & V4L2_STD_PAL) == V4L2_STD_PAL) { + switch (pal[0]) { + case '6': + return V4L2_STD_PAL_60; + case 'b': + case 'B': + case 'g': + case 'G': + return V4L2_STD_PAL_BG; + case 'i': + case 'I': + return V4L2_STD_PAL_I; + case 'd': + case 'D': + case 'k': + case 'K': + return V4L2_STD_PAL_DK; + case 'M': + case 'm': + return V4L2_STD_PAL_M; + case 'N': + case 'n': + if (pal[1] == 'c' || pal[1] == 'C') + return V4L2_STD_PAL_Nc; + return V4L2_STD_PAL_N; + default: + pr_warn("pal= argument not recognised\n"); + break; + } + } + if (secam[0] != '-' && (std & V4L2_STD_SECAM) == V4L2_STD_SECAM) { + switch (secam[0]) { + case 'b': + case 'B': + case 'g': + case 'G': + case 'h': + case 'H': + return V4L2_STD_SECAM_B | + V4L2_STD_SECAM_G | + V4L2_STD_SECAM_H; + case 'd': + case 'D': + case 'k': + case 'K': + return V4L2_STD_SECAM_DK; + case 'l': + case 'L': + if ((secam[1] == 'C') || (secam[1] == 'c')) + return V4L2_STD_SECAM_LC; + return V4L2_STD_SECAM_L; + default: + pr_warn("secam= argument not recognised\n"); + break; + } + } + + if (ntsc[0] != '-' && (std & V4L2_STD_NTSC) == V4L2_STD_NTSC) { + switch (ntsc[0]) { + case 'm': + case 'M': + return V4L2_STD_NTSC_M; + case 'j': + case 'J': + return V4L2_STD_NTSC_M_JP; + case 'k': + case 'K': + return V4L2_STD_NTSC_M_KR; + default: + pr_info("ntsc= argument not recognised\n"); + break; + } + } + return std; +} + +/* + * Functions that are specific for Radio mode + */ + +/** + * set_radio_freq - Set tuner frequency, freq in Units of 62.5 Hz = 1/16kHz + * + * @c: i2c_client descriptor + * @freq: frequency + */ +static void set_radio_freq(struct i2c_client *c, unsigned int freq) +{ + struct tuner *t = to_tuner(i2c_get_clientdata(c)); + struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops; + + struct analog_parameters params = { + .mode = t->mode, + .audmode = t->audmode, + .std = t->std + }; + + if (t->type == UNSET) { + pr_warn("tuner type not set\n"); + return; + } + if (NULL == analog_ops->set_params) { + pr_warn("tuner has no way to set radio frequency\n"); + return; + } + if (freq < radio_range[0] * 16000 || freq > radio_range[1] * 16000) { + dprintk("radio freq (%d.%02d) out of range (%d-%d)\n", + freq / 16000, freq % 16000 * 100 / 16000, + radio_range[0], radio_range[1]); + /* V4L2 spec: if the freq is not possible then the closest + possible value should be selected */ + if (freq < radio_range[0] * 16000) + freq = radio_range[0] * 16000; + else + freq = radio_range[1] * 16000; + } + params.frequency = freq; + dprintk("radio freq set to %d.%02d\n", + freq / 16000, freq % 16000 * 100 / 16000); + t->radio_freq = freq; + t->standby = false; + + analog_ops->set_params(&t->fe, ¶ms); + /* + * The tuner driver might decide to change the audmode if it only + * supports stereo, so update t->audmode. + */ + t->audmode = params.audmode; +} + +/* + * Debug function for reporting tuner status to userspace + */ + +/** + * tuner_status - Dumps the current tuner status at dmesg + * @fe: pointer to struct dvb_frontend + * + * This callback is used only for driver debug purposes, answering to + * VIDIOC_LOG_STATUS. No changes should happen on this call. + */ +static void tuner_status(struct dvb_frontend *fe) +{ + struct tuner *t = fe->analog_demod_priv; + unsigned long freq, freq_fraction; + struct dvb_tuner_ops *fe_tuner_ops = &fe->ops.tuner_ops; + struct analog_demod_ops *analog_ops = &fe->ops.analog_ops; + const char *p; + + switch (t->mode) { + case V4L2_TUNER_RADIO: + p = "radio"; + break; + case V4L2_TUNER_DIGITAL_TV: /* Used by mt20xx */ + p = "digital TV"; + break; + case V4L2_TUNER_ANALOG_TV: + default: + p = "analog TV"; + break; + } + if (t->mode == V4L2_TUNER_RADIO) { + freq = t->radio_freq / 16000; + freq_fraction = (t->radio_freq % 16000) * 100 / 16000; + } else { + freq = t->tv_freq / 16; + freq_fraction = (t->tv_freq % 16) * 100 / 16; + } + pr_info("Tuner mode: %s%s\n", p, + t->standby ? " on standby mode" : ""); + pr_info("Frequency: %lu.%02lu MHz\n", freq, freq_fraction); + pr_info("Standard: 0x%08lx\n", (unsigned long)t->std); + if (t->mode != V4L2_TUNER_RADIO) + return; + if (fe_tuner_ops->get_status) { + u32 tuner_status = 0; + + fe_tuner_ops->get_status(&t->fe, &tuner_status); + if (tuner_status & TUNER_STATUS_LOCKED) + pr_info("Tuner is locked.\n"); + if (tuner_status & TUNER_STATUS_STEREO) + pr_info("Stereo: yes\n"); + } + if (analog_ops->has_signal) { + u16 signal; + + if (!analog_ops->has_signal(fe, &signal)) + pr_info("Signal strength: %hu\n", signal); + } +} + +/* + * Function to splicitly change mode to radio. Probably not needed anymore + */ + +static int tuner_s_radio(struct v4l2_subdev *sd) +{ + struct tuner *t = to_tuner(sd); + + if (set_mode(t, V4L2_TUNER_RADIO) == 0) + set_freq(t, 0); + return 0; +} + +/* + * Tuner callbacks to handle userspace ioctl's + */ + +/** + * tuner_standby - places the tuner in standby mode + * @sd: pointer to struct v4l2_subdev + */ +static int tuner_standby(struct v4l2_subdev *sd) +{ + struct tuner *t = to_tuner(sd); + struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops; + + dprintk("Putting tuner to sleep\n"); + t->standby = true; + if (analog_ops->standby) + analog_ops->standby(&t->fe); + return 0; +} + +static int tuner_s_std(struct v4l2_subdev *sd, v4l2_std_id std) +{ + struct tuner *t = to_tuner(sd); + + if (set_mode(t, V4L2_TUNER_ANALOG_TV)) + return 0; + + t->std = tuner_fixup_std(t, std); + if (t->std != std) + dprintk("Fixup standard %llx to %llx\n", std, t->std); + set_freq(t, 0); + return 0; +} + +static int tuner_s_frequency(struct v4l2_subdev *sd, const struct v4l2_frequency *f) +{ + struct tuner *t = to_tuner(sd); + + if (set_mode(t, f->type) == 0) + set_freq(t, f->frequency); + return 0; +} + +/** + * tuner_g_frequency - Get the tuned frequency for the tuner + * @sd: pointer to struct v4l2_subdev + * @f: pointer to struct v4l2_frequency + * + * At return, the structure f will be filled with tuner frequency + * if the tuner matches the f->type. + * Note: f->type should be initialized before calling it. + * This is done by either video_ioctl2 or by the bridge driver. + */ +static int tuner_g_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *f) +{ + struct tuner *t = to_tuner(sd); + struct dvb_tuner_ops *fe_tuner_ops = &t->fe.ops.tuner_ops; + + if (check_mode(t, f->type) == -EINVAL) + return 0; + if (f->type == t->mode && fe_tuner_ops->get_frequency && !t->standby) { + u32 abs_freq; + + fe_tuner_ops->get_frequency(&t->fe, &abs_freq); + f->frequency = (V4L2_TUNER_RADIO == t->mode) ? + DIV_ROUND_CLOSEST(abs_freq * 2, 125) : + DIV_ROUND_CLOSEST(abs_freq, 62500); + } else { + f->frequency = (V4L2_TUNER_RADIO == f->type) ? + t->radio_freq : t->tv_freq; + } + return 0; +} + +/** + * tuner_g_tuner - Fill in tuner information + * @sd: pointer to struct v4l2_subdev + * @vt: pointer to struct v4l2_tuner + * + * At return, the structure vt will be filled with tuner information + * if the tuner matches vt->type. + * Note: vt->type should be initialized before calling it. + * This is done by either video_ioctl2 or by the bridge driver. + */ +static int tuner_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt) +{ + struct tuner *t = to_tuner(sd); + struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops; + struct dvb_tuner_ops *fe_tuner_ops = &t->fe.ops.tuner_ops; + + if (check_mode(t, vt->type) == -EINVAL) + return 0; + if (vt->type == t->mode && analog_ops->get_afc) + analog_ops->get_afc(&t->fe, &vt->afc); + if (vt->type == t->mode && analog_ops->has_signal) { + u16 signal = (u16)vt->signal; + + if (!analog_ops->has_signal(&t->fe, &signal)) + vt->signal = signal; + } + if (vt->type != V4L2_TUNER_RADIO) { + vt->capability |= V4L2_TUNER_CAP_NORM; + vt->rangelow = tv_range[0] * 16; + vt->rangehigh = tv_range[1] * 16; + return 0; + } + + /* radio mode */ + if (vt->type == t->mode) { + vt->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO; + if (fe_tuner_ops->get_status) { + u32 tuner_status = 0; + + fe_tuner_ops->get_status(&t->fe, &tuner_status); + vt->rxsubchans = + (tuner_status & TUNER_STATUS_STEREO) ? + V4L2_TUNER_SUB_STEREO : + V4L2_TUNER_SUB_MONO; + } + vt->audmode = t->audmode; + } + vt->capability |= V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO; + vt->rangelow = radio_range[0] * 16000; + vt->rangehigh = radio_range[1] * 16000; + + return 0; +} + +/** + * tuner_s_tuner - Set the tuner's audio mode + * @sd: pointer to struct v4l2_subdev + * @vt: pointer to struct v4l2_tuner + * + * Sets the audio mode if the tuner matches vt->type. + * Note: vt->type should be initialized before calling it. + * This is done by either video_ioctl2 or by the bridge driver. + */ +static int tuner_s_tuner(struct v4l2_subdev *sd, const struct v4l2_tuner *vt) +{ + struct tuner *t = to_tuner(sd); + + if (set_mode(t, vt->type)) + return 0; + + if (t->mode == V4L2_TUNER_RADIO) { + t->audmode = vt->audmode; + /* + * For radio audmode can only be mono or stereo. Map any + * other values to stereo. The actual tuner driver that is + * called in set_radio_freq can decide to limit the audmode to + * mono if only mono is supported. + */ + if (t->audmode != V4L2_TUNER_MODE_MONO && + t->audmode != V4L2_TUNER_MODE_STEREO) + t->audmode = V4L2_TUNER_MODE_STEREO; + } + set_freq(t, 0); + + return 0; +} + +static int tuner_log_status(struct v4l2_subdev *sd) +{ + struct tuner *t = to_tuner(sd); + struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops; + + if (analog_ops->tuner_status) + analog_ops->tuner_status(&t->fe); + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int tuner_suspend(struct device *dev) +{ + struct i2c_client *c = to_i2c_client(dev); + struct tuner *t = to_tuner(i2c_get_clientdata(c)); + struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops; + + dprintk("suspend\n"); + + if (t->fe.ops.tuner_ops.suspend) + t->fe.ops.tuner_ops.suspend(&t->fe); + else if (!t->standby && analog_ops->standby) + analog_ops->standby(&t->fe); + + return 0; +} + +static int tuner_resume(struct device *dev) +{ + struct i2c_client *c = to_i2c_client(dev); + struct tuner *t = to_tuner(i2c_get_clientdata(c)); + + dprintk("resume\n"); + + if (t->fe.ops.tuner_ops.resume) + t->fe.ops.tuner_ops.resume(&t->fe); + else if (!t->standby) + if (set_mode(t, t->mode) == 0) + set_freq(t, 0); + + return 0; +} +#endif + +static int tuner_command(struct i2c_client *client, unsigned cmd, void *arg) +{ + struct v4l2_subdev *sd = i2c_get_clientdata(client); + + /* TUNER_SET_CONFIG is still called by tuner-simple.c, so we have + to handle it here. + There must be a better way of doing this... */ + switch (cmd) { + case TUNER_SET_CONFIG: + return tuner_s_config(sd, arg); + } + return -ENOIOCTLCMD; +} + +/* + * Callback structs + */ + +static const struct v4l2_subdev_core_ops tuner_core_ops = { + .log_status = tuner_log_status, +}; + +static const struct v4l2_subdev_tuner_ops tuner_tuner_ops = { + .standby = tuner_standby, + .s_radio = tuner_s_radio, + .g_tuner = tuner_g_tuner, + .s_tuner = tuner_s_tuner, + .s_frequency = tuner_s_frequency, + .g_frequency = tuner_g_frequency, + .s_type_addr = tuner_s_type_addr, + .s_config = tuner_s_config, +}; + +static const struct v4l2_subdev_video_ops tuner_video_ops = { + .s_std = tuner_s_std, +}; + +static const struct v4l2_subdev_ops tuner_ops = { + .core = &tuner_core_ops, + .tuner = &tuner_tuner_ops, + .video = &tuner_video_ops, +}; + +/* + * I2C structs and module init functions + */ + +static const struct dev_pm_ops tuner_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(tuner_suspend, tuner_resume) +}; + +static const struct i2c_device_id tuner_id[] = { + { "tuner", }, /* autodetect */ + { } +}; +MODULE_DEVICE_TABLE(i2c, tuner_id); + +static struct i2c_driver tuner_driver = { + .driver = { + .name = "tuner", + .pm = &tuner_pm_ops, + }, + .probe = tuner_probe, + .remove = tuner_remove, + .command = tuner_command, + .id_table = tuner_id, +}; + +module_i2c_driver(tuner_driver); + +MODULE_DESCRIPTION("device driver for various TV and TV+FM radio tuners"); +MODULE_AUTHOR("Ralph Metzler, Gerd Knorr, Gunther Mayer"); +MODULE_LICENSE("GPL"); diff --git a/6.12.0/drivers/media/v4l2-core/v4l2-async.c b/6.12.0/drivers/media/v4l2-core/v4l2-async.c new file mode 100644 index 0000000..ee884a8 --- /dev/null +++ b/6.12.0/drivers/media/v4l2-core/v4l2-async.c @@ -0,0 +1,976 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * V4L2 asynchronous subdevice registration API + * + * Copyright (C) 2012-2013, Guennadi Liakhovetski + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "v4l2-subdev-priv.h" + +static int v4l2_async_nf_call_bound(struct v4l2_async_notifier *n, + struct v4l2_subdev *subdev, + struct v4l2_async_connection *asc) +{ + if (!n->ops || !n->ops->bound) + return 0; + + return n->ops->bound(n, subdev, asc); +} + +static void v4l2_async_nf_call_unbind(struct v4l2_async_notifier *n, + struct v4l2_subdev *subdev, + struct v4l2_async_connection *asc) +{ + if (!n->ops || !n->ops->unbind) + return; + + n->ops->unbind(n, subdev, asc); +} + +static int v4l2_async_nf_call_complete(struct v4l2_async_notifier *n) +{ + if (!n->ops || !n->ops->complete) + return 0; + + return n->ops->complete(n); +} + +static void v4l2_async_nf_call_destroy(struct v4l2_async_notifier *n, + struct v4l2_async_connection *asc) +{ + if (!n->ops || !n->ops->destroy) + return; + + n->ops->destroy(asc); +} + +static bool match_i2c(struct v4l2_async_notifier *notifier, + struct v4l2_subdev *sd, + struct v4l2_async_match_desc *match) +{ +#if IS_ENABLED(CONFIG_I2C) + struct i2c_client *client = i2c_verify_client(sd->dev); + + return client && + match->i2c.adapter_id == client->adapter->nr && + match->i2c.address == client->addr; +#else + return false; +#endif +} + +static struct device *notifier_dev(struct v4l2_async_notifier *notifier) +{ + if (notifier->sd) + return notifier->sd->dev; + + if (notifier->v4l2_dev) + return notifier->v4l2_dev->dev; + + return NULL; +} + +static bool +match_fwnode_one(struct v4l2_async_notifier *notifier, + struct v4l2_subdev *sd, struct fwnode_handle *sd_fwnode, + struct v4l2_async_match_desc *match) +{ + struct fwnode_handle *asd_dev_fwnode; + bool ret; + + dev_dbg(notifier_dev(notifier), + "v4l2-async: fwnode match: need %pfw, trying %pfw\n", + sd_fwnode, match->fwnode); + + if (sd_fwnode == match->fwnode) { + dev_dbg(notifier_dev(notifier), + "v4l2-async: direct match found\n"); + return true; + } + + if (!fwnode_graph_is_endpoint(match->fwnode)) { + dev_dbg(notifier_dev(notifier), + "v4l2-async: direct match not found\n"); + return false; + } + + asd_dev_fwnode = fwnode_graph_get_port_parent(match->fwnode); + + ret = sd_fwnode == asd_dev_fwnode; + + fwnode_handle_put(asd_dev_fwnode); + + dev_dbg(notifier_dev(notifier), + "v4l2-async: device--endpoint match %sfound\n", + ret ? "" : "not "); + + return ret; +} + +static bool match_fwnode(struct v4l2_async_notifier *notifier, + struct v4l2_subdev *sd, + struct v4l2_async_match_desc *match) +{ + dev_dbg(notifier_dev(notifier), + "v4l2-async: matching for notifier %pfw, sd fwnode %pfw\n", + dev_fwnode(notifier_dev(notifier)), sd->fwnode); + + if (!list_empty(&sd->async_subdev_endpoint_list)) { + struct v4l2_async_subdev_endpoint *ase; + + dev_dbg(sd->dev, + "v4l2-async: endpoint fwnode list available, looking for %pfw\n", + match->fwnode); + + list_for_each_entry(ase, &sd->async_subdev_endpoint_list, + async_subdev_endpoint_entry) { + bool matched = ase->endpoint == match->fwnode; + + dev_dbg(sd->dev, + "v4l2-async: endpoint-endpoint match %sfound with %pfw\n", + matched ? "" : "not ", ase->endpoint); + + if (matched) + return true; + } + + dev_dbg(sd->dev, "async: no endpoint matched\n"); + + return false; + } + + if (match_fwnode_one(notifier, sd, sd->fwnode, match)) + return true; + + /* Also check the secondary fwnode. */ + if (IS_ERR_OR_NULL(sd->fwnode->secondary)) + return false; + + dev_dbg(notifier_dev(notifier), + "v4l2-async: trying secondary fwnode match\n"); + + return match_fwnode_one(notifier, sd, sd->fwnode->secondary, match); +} + +static LIST_HEAD(subdev_list); +static LIST_HEAD(notifier_list); +static DEFINE_MUTEX(list_lock); + +static struct v4l2_async_connection * +v4l2_async_find_match(struct v4l2_async_notifier *notifier, + struct v4l2_subdev *sd) +{ + bool (*match)(struct v4l2_async_notifier *notifier, + struct v4l2_subdev *sd, + struct v4l2_async_match_desc *match); + struct v4l2_async_connection *asc; + + list_for_each_entry(asc, ¬ifier->waiting_list, asc_entry) { + /* bus_type has been verified valid before */ + switch (asc->match.type) { + case V4L2_ASYNC_MATCH_TYPE_I2C: + match = match_i2c; + break; + case V4L2_ASYNC_MATCH_TYPE_FWNODE: + match = match_fwnode; + break; + default: + /* Cannot happen, unless someone breaks us */ + WARN_ON(true); + return NULL; + } + + /* match cannot be NULL here */ + if (match(notifier, sd, &asc->match)) + return asc; + } + + return NULL; +} + +/* Compare two async match descriptors for equivalence */ +static bool v4l2_async_match_equal(struct v4l2_async_match_desc *match1, + struct v4l2_async_match_desc *match2) +{ + if (match1->type != match2->type) + return false; + + switch (match1->type) { + case V4L2_ASYNC_MATCH_TYPE_I2C: + return match1->i2c.adapter_id == match2->i2c.adapter_id && + match1->i2c.address == match2->i2c.address; + case V4L2_ASYNC_MATCH_TYPE_FWNODE: + return match1->fwnode == match2->fwnode; + default: + break; + } + + return false; +} + +/* Find the sub-device notifier registered by a sub-device driver. */ +static struct v4l2_async_notifier * +v4l2_async_find_subdev_notifier(struct v4l2_subdev *sd) +{ + struct v4l2_async_notifier *n; + + list_for_each_entry(n, ¬ifier_list, notifier_entry) + if (n->sd == sd) + return n; + + return NULL; +} + +/* Get v4l2_device related to the notifier if one can be found. */ +static struct v4l2_device * +v4l2_async_nf_find_v4l2_dev(struct v4l2_async_notifier *notifier) +{ + while (notifier->parent) + notifier = notifier->parent; + + return notifier->v4l2_dev; +} + +/* + * Return true if all child sub-device notifiers are complete, false otherwise. + */ +static bool +v4l2_async_nf_can_complete(struct v4l2_async_notifier *notifier) +{ + struct v4l2_async_connection *asc; + + if (!list_empty(¬ifier->waiting_list)) + return false; + + list_for_each_entry(asc, ¬ifier->done_list, asc_entry) { + struct v4l2_async_notifier *subdev_notifier = + v4l2_async_find_subdev_notifier(asc->sd); + + if (subdev_notifier && + !v4l2_async_nf_can_complete(subdev_notifier)) + return false; + } + + return true; +} + +/* + * Complete the master notifier if possible. This is done when all async + * sub-devices have been bound; v4l2_device is also available then. + */ +static int +v4l2_async_nf_try_complete(struct v4l2_async_notifier *notifier) +{ + struct v4l2_async_notifier *__notifier = notifier; + + /* Quick check whether there are still more sub-devices here. */ + if (!list_empty(¬ifier->waiting_list)) + return 0; + + if (notifier->sd) + dev_dbg(notifier_dev(notifier), + "v4l2-async: trying to complete\n"); + + /* Check the entire notifier tree; find the root notifier first. */ + while (notifier->parent) + notifier = notifier->parent; + + /* This is root if it has v4l2_dev. */ + if (!notifier->v4l2_dev) { + dev_dbg(notifier_dev(__notifier), + "v4l2-async: V4L2 device not available\n"); + return 0; + } + + /* Is everything ready? */ + if (!v4l2_async_nf_can_complete(notifier)) + return 0; + + dev_dbg(notifier_dev(__notifier), "v4l2-async: complete\n"); + + return v4l2_async_nf_call_complete(notifier); +} + +static int +v4l2_async_nf_try_all_subdevs(struct v4l2_async_notifier *notifier); + +static int v4l2_async_create_ancillary_links(struct v4l2_async_notifier *n, + struct v4l2_subdev *sd) +{ +#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER) + struct media_link *link; + + if (sd->entity.function != MEDIA_ENT_F_LENS && + sd->entity.function != MEDIA_ENT_F_FLASH) + return 0; + + if (!n->sd) { + dev_warn(notifier_dev(n), + "not a sub-device notifier, not creating an ancillary link for %s!\n", + dev_name(sd->dev)); + return 0; + } + + link = media_create_ancillary_link(&n->sd->entity, &sd->entity); + + return IS_ERR(link) ? PTR_ERR(link) : 0; +#else + return 0; +#endif +} + +static int v4l2_async_match_notify(struct v4l2_async_notifier *notifier, + struct v4l2_device *v4l2_dev, + struct v4l2_subdev *sd, + struct v4l2_async_connection *asc) +{ + struct v4l2_async_notifier *subdev_notifier; + bool registered = false; + int ret; + + if (list_empty(&sd->asc_list)) { + ret = __v4l2_device_register_subdev(v4l2_dev, sd, sd->owner); + if (ret < 0) + return ret; + registered = true; + } + + ret = v4l2_async_nf_call_bound(notifier, sd, asc); + if (ret < 0) { + if (asc->match.type == V4L2_ASYNC_MATCH_TYPE_FWNODE) + dev_dbg(notifier_dev(notifier), + "failed binding %pfw (%d)\n", + asc->match.fwnode, ret); + goto err_unregister_subdev; + } + + if (registered) { + /* + * Depending of the function of the entities involved, we may + * want to create links between them (for example between a + * sensor and its lens or between a sensor's source pad and the + * connected device's sink pad). + */ + ret = v4l2_async_create_ancillary_links(notifier, sd); + if (ret) { + if (asc->match.type == V4L2_ASYNC_MATCH_TYPE_FWNODE) + dev_dbg(notifier_dev(notifier), + "failed creating links for %pfw (%d)\n", + asc->match.fwnode, ret); + goto err_call_unbind; + } + } + + list_add(&asc->asc_subdev_entry, &sd->asc_list); + asc->sd = sd; + + /* Move from the waiting list to notifier's done */ + list_move(&asc->asc_entry, ¬ifier->done_list); + + dev_dbg(notifier_dev(notifier), "v4l2-async: %s bound (ret %d)\n", + dev_name(sd->dev), ret); + + /* + * See if the sub-device has a notifier. If not, return here. + */ + subdev_notifier = v4l2_async_find_subdev_notifier(sd); + if (!subdev_notifier || subdev_notifier->parent) + return 0; + + /* + * Proceed with checking for the sub-device notifier's async + * sub-devices, and return the result. The error will be handled by the + * caller. + */ + subdev_notifier->parent = notifier; + + return v4l2_async_nf_try_all_subdevs(subdev_notifier); + +err_call_unbind: + v4l2_async_nf_call_unbind(notifier, sd, asc); + list_del(&asc->asc_subdev_entry); + +err_unregister_subdev: + if (registered) + v4l2_device_unregister_subdev(sd); + + return ret; +} + +/* Test all async sub-devices in a notifier for a match. */ +static int +v4l2_async_nf_try_all_subdevs(struct v4l2_async_notifier *notifier) +{ + struct v4l2_device *v4l2_dev = + v4l2_async_nf_find_v4l2_dev(notifier); + struct v4l2_subdev *sd; + + if (!v4l2_dev) + return 0; + + dev_dbg(notifier_dev(notifier), "v4l2-async: trying all sub-devices\n"); + +again: + list_for_each_entry(sd, &subdev_list, async_list) { + struct v4l2_async_connection *asc; + int ret; + + asc = v4l2_async_find_match(notifier, sd); + if (!asc) + continue; + + dev_dbg(notifier_dev(notifier), + "v4l2-async: match found, subdev %s\n", sd->name); + + ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asc); + if (ret < 0) + return ret; + + /* + * v4l2_async_match_notify() may lead to registering a + * new notifier and thus changing the async subdevs + * list. In order to proceed safely from here, restart + * parsing the list from the beginning. + */ + goto again; + } + + return 0; +} + +static void v4l2_async_unbind_subdev_one(struct v4l2_async_notifier *notifier, + struct v4l2_async_connection *asc) +{ + list_move_tail(&asc->asc_entry, ¬ifier->waiting_list); + if (list_is_singular(&asc->asc_subdev_entry)) { + v4l2_async_nf_call_unbind(notifier, asc->sd, asc); + v4l2_device_unregister_subdev(asc->sd); + asc->sd = NULL; + } + list_del(&asc->asc_subdev_entry); +} + +/* Unbind all sub-devices in the notifier tree. */ +static void +v4l2_async_nf_unbind_all_subdevs(struct v4l2_async_notifier *notifier) +{ + struct v4l2_async_connection *asc, *asc_tmp; + + list_for_each_entry_safe(asc, asc_tmp, ¬ifier->done_list, + asc_entry) { + struct v4l2_async_notifier *subdev_notifier = + v4l2_async_find_subdev_notifier(asc->sd); + + if (subdev_notifier) + v4l2_async_nf_unbind_all_subdevs(subdev_notifier); + + v4l2_async_unbind_subdev_one(notifier, asc); + } + + notifier->parent = NULL; +} + +/* See if an async sub-device can be found in a notifier's lists. */ +static bool +v4l2_async_nf_has_async_match_entry(struct v4l2_async_notifier *notifier, + struct v4l2_async_match_desc *match) +{ + struct v4l2_async_connection *asc; + + list_for_each_entry(asc, ¬ifier->waiting_list, asc_entry) + if (v4l2_async_match_equal(&asc->match, match)) + return true; + + list_for_each_entry(asc, ¬ifier->done_list, asc_entry) + if (v4l2_async_match_equal(&asc->match, match)) + return true; + + return false; +} + +/* + * Find out whether an async sub-device was set up already or whether it exists + * in a given notifier. + */ +static bool +v4l2_async_nf_has_async_match(struct v4l2_async_notifier *notifier, + struct v4l2_async_match_desc *match) +{ + struct list_head *heads[] = { + ¬ifier->waiting_list, + ¬ifier->done_list, + }; + unsigned int i; + + lockdep_assert_held(&list_lock); + + /* Check that an asd is not being added more than once. */ + for (i = 0; i < ARRAY_SIZE(heads); i++) { + struct v4l2_async_connection *asc; + + list_for_each_entry(asc, heads[i], asc_entry) { + if (&asc->match == match) + continue; + if (v4l2_async_match_equal(&asc->match, match)) + return true; + } + } + + /* Check that an asc does not exist in other notifiers. */ + list_for_each_entry(notifier, ¬ifier_list, notifier_entry) + if (v4l2_async_nf_has_async_match_entry(notifier, match)) + return true; + + return false; +} + +static int v4l2_async_nf_match_valid(struct v4l2_async_notifier *notifier, + struct v4l2_async_match_desc *match) +{ + struct device *dev = notifier_dev(notifier); + + switch (match->type) { + case V4L2_ASYNC_MATCH_TYPE_I2C: + case V4L2_ASYNC_MATCH_TYPE_FWNODE: + if (v4l2_async_nf_has_async_match(notifier, match)) { + dev_dbg(dev, "v4l2-async: match descriptor already listed in a notifier\n"); + return -EEXIST; + } + break; + default: + dev_err(dev, "v4l2-async: Invalid match type %u on %p\n", + match->type, match); + return -EINVAL; + } + + return 0; +} + +void v4l2_async_nf_init(struct v4l2_async_notifier *notifier, + struct v4l2_device *v4l2_dev) +{ + INIT_LIST_HEAD(¬ifier->waiting_list); + INIT_LIST_HEAD(¬ifier->done_list); + INIT_LIST_HEAD(¬ifier->notifier_entry); + notifier->v4l2_dev = v4l2_dev; +} +EXPORT_SYMBOL(v4l2_async_nf_init); + +void v4l2_async_subdev_nf_init(struct v4l2_async_notifier *notifier, + struct v4l2_subdev *sd) +{ + INIT_LIST_HEAD(¬ifier->waiting_list); + INIT_LIST_HEAD(¬ifier->done_list); + INIT_LIST_HEAD(¬ifier->notifier_entry); + notifier->sd = sd; +} +EXPORT_SYMBOL_GPL(v4l2_async_subdev_nf_init); + +static int __v4l2_async_nf_register(struct v4l2_async_notifier *notifier) +{ + struct v4l2_async_connection *asc; + int ret; + + mutex_lock(&list_lock); + + list_for_each_entry(asc, ¬ifier->waiting_list, asc_entry) { + ret = v4l2_async_nf_match_valid(notifier, &asc->match); + if (ret) + goto err_unlock; + } + + ret = v4l2_async_nf_try_all_subdevs(notifier); + if (ret < 0) + goto err_unbind; + + ret = v4l2_async_nf_try_complete(notifier); + if (ret < 0) + goto err_unbind; + + /* Keep also completed notifiers on the list */ + list_add(¬ifier->notifier_entry, ¬ifier_list); + + mutex_unlock(&list_lock); + + return 0; + +err_unbind: + /* + * On failure, unbind all sub-devices registered through this notifier. + */ + v4l2_async_nf_unbind_all_subdevs(notifier); + +err_unlock: + mutex_unlock(&list_lock); + + return ret; +} + +int v4l2_async_nf_register(struct v4l2_async_notifier *notifier) +{ + if (WARN_ON(!notifier->v4l2_dev == !notifier->sd)) + return -EINVAL; + + return __v4l2_async_nf_register(notifier); +} +EXPORT_SYMBOL(v4l2_async_nf_register); + +static void +__v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier) +{ + if (!notifier || (!notifier->v4l2_dev && !notifier->sd)) + return; + + v4l2_async_nf_unbind_all_subdevs(notifier); + + list_del_init(¬ifier->notifier_entry); +} + +void v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier) +{ + mutex_lock(&list_lock); + + __v4l2_async_nf_unregister(notifier); + + mutex_unlock(&list_lock); +} +EXPORT_SYMBOL(v4l2_async_nf_unregister); + +static void __v4l2_async_nf_cleanup(struct v4l2_async_notifier *notifier) +{ + struct v4l2_async_connection *asc, *tmp; + + if (!notifier || !notifier->waiting_list.next) + return; + + WARN_ON(!list_empty(¬ifier->done_list)); + + list_for_each_entry_safe(asc, tmp, ¬ifier->waiting_list, asc_entry) { + list_del(&asc->asc_entry); + v4l2_async_nf_call_destroy(notifier, asc); + + if (asc->match.type == V4L2_ASYNC_MATCH_TYPE_FWNODE) + fwnode_handle_put(asc->match.fwnode); + + kfree(asc); + } + + notifier->sd = NULL; + notifier->v4l2_dev = NULL; +} + +void v4l2_async_nf_cleanup(struct v4l2_async_notifier *notifier) +{ + mutex_lock(&list_lock); + + __v4l2_async_nf_cleanup(notifier); + + mutex_unlock(&list_lock); +} +EXPORT_SYMBOL_GPL(v4l2_async_nf_cleanup); + +static void __v4l2_async_nf_add_connection(struct v4l2_async_notifier *notifier, + struct v4l2_async_connection *asc) +{ + mutex_lock(&list_lock); + + list_add_tail(&asc->asc_entry, ¬ifier->waiting_list); + + mutex_unlock(&list_lock); +} + +struct v4l2_async_connection * +__v4l2_async_nf_add_fwnode(struct v4l2_async_notifier *notifier, + struct fwnode_handle *fwnode, + unsigned int asc_struct_size) +{ + struct v4l2_async_connection *asc; + + asc = kzalloc(asc_struct_size, GFP_KERNEL); + if (!asc) + return ERR_PTR(-ENOMEM); + + asc->notifier = notifier; + asc->match.type = V4L2_ASYNC_MATCH_TYPE_FWNODE; + asc->match.fwnode = fwnode_handle_get(fwnode); + + __v4l2_async_nf_add_connection(notifier, asc); + + return asc; +} +EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_fwnode); + +struct v4l2_async_connection * +__v4l2_async_nf_add_fwnode_remote(struct v4l2_async_notifier *notif, + struct fwnode_handle *endpoint, + unsigned int asc_struct_size) +{ + struct v4l2_async_connection *asc; + struct fwnode_handle *remote; + + remote = fwnode_graph_get_remote_endpoint(endpoint); + if (!remote) + return ERR_PTR(-ENOTCONN); + + asc = __v4l2_async_nf_add_fwnode(notif, remote, asc_struct_size); + /* + * Calling __v4l2_async_nf_add_fwnode grabs a refcount, + * so drop the one we got in fwnode_graph_get_remote_port_parent. + */ + fwnode_handle_put(remote); + return asc; +} +EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_fwnode_remote); + +struct v4l2_async_connection * +__v4l2_async_nf_add_i2c(struct v4l2_async_notifier *notifier, int adapter_id, + unsigned short address, unsigned int asc_struct_size) +{ + struct v4l2_async_connection *asc; + + asc = kzalloc(asc_struct_size, GFP_KERNEL); + if (!asc) + return ERR_PTR(-ENOMEM); + + asc->notifier = notifier; + asc->match.type = V4L2_ASYNC_MATCH_TYPE_I2C; + asc->match.i2c.adapter_id = adapter_id; + asc->match.i2c.address = address; + + __v4l2_async_nf_add_connection(notifier, asc); + + return asc; +} +EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_i2c); + +int v4l2_async_subdev_endpoint_add(struct v4l2_subdev *sd, + struct fwnode_handle *fwnode) +{ + struct v4l2_async_subdev_endpoint *ase; + + ase = kmalloc(sizeof(*ase), GFP_KERNEL); + if (!ase) + return -ENOMEM; + + ase->endpoint = fwnode; + list_add(&ase->async_subdev_endpoint_entry, + &sd->async_subdev_endpoint_list); + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_async_subdev_endpoint_add); + +struct v4l2_async_connection * +v4l2_async_connection_unique(struct v4l2_subdev *sd) +{ + if (!list_is_singular(&sd->asc_list)) + return NULL; + + return list_first_entry(&sd->asc_list, + struct v4l2_async_connection, asc_subdev_entry); +} +EXPORT_SYMBOL_GPL(v4l2_async_connection_unique); + +int __v4l2_async_register_subdev(struct v4l2_subdev *sd, struct module *module) +{ + struct v4l2_async_notifier *subdev_notifier; + struct v4l2_async_notifier *notifier; + struct v4l2_async_connection *asc; + int ret; + + INIT_LIST_HEAD(&sd->asc_list); + + /* + * No reference taken. The reference is held by the device (struct + * v4l2_subdev.dev), and async sub-device does not exist independently + * of the device at any point of time. + * + * The async sub-device shall always be registered for its device node, + * not the endpoint node. + */ + if (!sd->fwnode && sd->dev) { + sd->fwnode = dev_fwnode(sd->dev); + } else if (fwnode_graph_is_endpoint(sd->fwnode)) { + dev_warn(sd->dev, "sub-device fwnode is an endpoint!\n"); + return -EINVAL; + } + + sd->owner = module; + + mutex_lock(&list_lock); + + list_for_each_entry(notifier, ¬ifier_list, notifier_entry) { + struct v4l2_device *v4l2_dev = + v4l2_async_nf_find_v4l2_dev(notifier); + + if (!v4l2_dev) + continue; + + while ((asc = v4l2_async_find_match(notifier, sd))) { + ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, + asc); + if (ret) + goto err_unbind; + + ret = v4l2_async_nf_try_complete(notifier); + if (ret) + goto err_unbind; + } + } + + /* None matched, wait for hot-plugging */ + list_add(&sd->async_list, &subdev_list); + + mutex_unlock(&list_lock); + + return 0; + +err_unbind: + /* + * Complete failed. Unbind the sub-devices bound through registering + * this async sub-device. + */ + subdev_notifier = v4l2_async_find_subdev_notifier(sd); + if (subdev_notifier) + v4l2_async_nf_unbind_all_subdevs(subdev_notifier); + + if (asc) + v4l2_async_unbind_subdev_one(notifier, asc); + + mutex_unlock(&list_lock); + + sd->owner = NULL; + + return ret; +} +EXPORT_SYMBOL(__v4l2_async_register_subdev); + +void v4l2_async_unregister_subdev(struct v4l2_subdev *sd) +{ + struct v4l2_async_connection *asc, *asc_tmp; + + if (!sd->async_list.next) + return; + + v4l2_subdev_put_privacy_led(sd); + + mutex_lock(&list_lock); + + __v4l2_async_nf_unregister(sd->subdev_notifier); + __v4l2_async_nf_cleanup(sd->subdev_notifier); + kfree(sd->subdev_notifier); + sd->subdev_notifier = NULL; + + if (sd->asc_list.next) { + list_for_each_entry_safe(asc, asc_tmp, &sd->asc_list, + asc_subdev_entry) { + v4l2_async_unbind_subdev_one(asc->notifier, asc); + } + } + + list_del(&sd->async_list); + sd->async_list.next = NULL; + + mutex_unlock(&list_lock); +} +EXPORT_SYMBOL(v4l2_async_unregister_subdev); + +static void print_waiting_match(struct seq_file *s, + struct v4l2_async_match_desc *match) +{ + switch (match->type) { + case V4L2_ASYNC_MATCH_TYPE_I2C: + seq_printf(s, " [i2c] dev=%d-%04x\n", match->i2c.adapter_id, + match->i2c.address); + break; + case V4L2_ASYNC_MATCH_TYPE_FWNODE: { + struct fwnode_handle *devnode, *fwnode = match->fwnode; + + devnode = fwnode_graph_is_endpoint(fwnode) ? + fwnode_graph_get_port_parent(fwnode) : + fwnode_handle_get(fwnode); + + seq_printf(s, " [fwnode] dev=%s, node=%pfw\n", + devnode->dev ? dev_name(devnode->dev) : "nil", + fwnode); + + fwnode_handle_put(devnode); + break; + } + } +} + +static const char * +v4l2_async_nf_name(struct v4l2_async_notifier *notifier) +{ + if (notifier->v4l2_dev) + return notifier->v4l2_dev->name; + else if (notifier->sd) + return notifier->sd->name; + else + return "nil"; +} + +static int pending_subdevs_show(struct seq_file *s, void *data) +{ + struct v4l2_async_notifier *notif; + struct v4l2_async_connection *asc; + + mutex_lock(&list_lock); + + list_for_each_entry(notif, ¬ifier_list, notifier_entry) { + seq_printf(s, "%s:\n", v4l2_async_nf_name(notif)); + list_for_each_entry(asc, ¬if->waiting_list, asc_entry) + print_waiting_match(s, &asc->match); + } + + mutex_unlock(&list_lock); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(pending_subdevs); + +static struct dentry *v4l2_async_debugfs_dir; + +static int __init v4l2_async_init(void) +{ + v4l2_async_debugfs_dir = debugfs_create_dir("v4l2-async", NULL); + debugfs_create_file("pending_async_subdevices", 0444, + v4l2_async_debugfs_dir, NULL, + &pending_subdevs_fops); + + return 0; +} + +static void __exit v4l2_async_exit(void) +{ + debugfs_remove_recursive(v4l2_async_debugfs_dir); +} + +subsys_initcall(v4l2_async_init); +module_exit(v4l2_async_exit); + +MODULE_AUTHOR("Guennadi Liakhovetski "); +MODULE_AUTHOR("Sakari Ailus "); +MODULE_AUTHOR("Ezequiel Garcia "); +MODULE_DESCRIPTION("V4L2 asynchronous subdevice registration API"); +MODULE_LICENSE("GPL"); diff --git a/6.12.0/drivers/media/v4l2-core/v4l2-cci.c b/6.12.0/drivers/media/v4l2-core/v4l2-cci.c new file mode 100644 index 0000000..e9ecf47 --- /dev/null +++ b/6.12.0/drivers/media/v4l2-core/v4l2-cci.c @@ -0,0 +1,203 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * MIPI Camera Control Interface (CCI) register access helpers. + * + * Copyright (C) 2023 Hans de Goede + */ + +#include +#include +#include +#include +#include +#include + +#include + +#include + +int cci_read(struct regmap *map, u32 reg, u64 *val, int *err) +{ + bool little_endian; + unsigned int len; + u8 buf[8]; + int ret; + + /* + * TODO: Fix smatch. Assign *val to 0 here in order to avoid + * failing a smatch check on caller when the caller proceeds to + * read *val without initialising it on caller's side. *val is set + * to a valid value whenever this function returns 0 but smatch + * can't figure that out currently. + */ + *val = 0; + + if (err && *err) + return *err; + + little_endian = reg & CCI_REG_LE; + len = CCI_REG_WIDTH_BYTES(reg); + reg = CCI_REG_ADDR(reg); + + ret = regmap_bulk_read(map, reg, buf, len); + if (ret) { + dev_err(regmap_get_device(map), "Error reading reg 0x%04x: %d\n", + reg, ret); + goto out; + } + + switch (len) { + case 1: + *val = buf[0]; + break; + case 2: + if (little_endian) + *val = get_unaligned_le16(buf); + else + *val = get_unaligned_be16(buf); + break; + case 3: + if (little_endian) + *val = get_unaligned_le24(buf); + else + *val = get_unaligned_be24(buf); + break; + case 4: + if (little_endian) + *val = get_unaligned_le32(buf); + else + *val = get_unaligned_be32(buf); + break; + case 8: + if (little_endian) + *val = get_unaligned_le64(buf); + else + *val = get_unaligned_be64(buf); + break; + default: + dev_err(regmap_get_device(map), "Error invalid reg-width %u for reg 0x%04x\n", + len, reg); + ret = -EINVAL; + break; + } + +out: + if (ret && err) + *err = ret; + + return ret; +} +EXPORT_SYMBOL_GPL(cci_read); + +int cci_write(struct regmap *map, u32 reg, u64 val, int *err) +{ + bool little_endian; + unsigned int len; + u8 buf[8]; + int ret; + + if (err && *err) + return *err; + + little_endian = reg & CCI_REG_LE; + len = CCI_REG_WIDTH_BYTES(reg); + reg = CCI_REG_ADDR(reg); + + switch (len) { + case 1: + buf[0] = val; + break; + case 2: + if (little_endian) + put_unaligned_le16(val, buf); + else + put_unaligned_be16(val, buf); + break; + case 3: + if (little_endian) + put_unaligned_le24(val, buf); + else + put_unaligned_be24(val, buf); + break; + case 4: + if (little_endian) + put_unaligned_le32(val, buf); + else + put_unaligned_be32(val, buf); + break; + case 8: + if (little_endian) + put_unaligned_le64(val, buf); + else + put_unaligned_be64(val, buf); + break; + default: + dev_err(regmap_get_device(map), "Error invalid reg-width %u for reg 0x%04x\n", + len, reg); + ret = -EINVAL; + goto out; + } + + ret = regmap_bulk_write(map, reg, buf, len); + if (ret) + dev_err(regmap_get_device(map), "Error writing reg 0x%04x: %d\n", + reg, ret); + +out: + if (ret && err) + *err = ret; + + return ret; +} +EXPORT_SYMBOL_GPL(cci_write); + +int cci_update_bits(struct regmap *map, u32 reg, u64 mask, u64 val, int *err) +{ + u64 readval; + int ret; + + ret = cci_read(map, reg, &readval, err); + if (ret) + return ret; + + val = (readval & ~mask) | (val & mask); + + return cci_write(map, reg, val, err); +} +EXPORT_SYMBOL_GPL(cci_update_bits); + +int cci_multi_reg_write(struct regmap *map, const struct cci_reg_sequence *regs, + unsigned int num_regs, int *err) +{ + unsigned int i; + int ret; + + for (i = 0; i < num_regs; i++) { + ret = cci_write(map, regs[i].reg, regs[i].val, err); + if (ret) + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(cci_multi_reg_write); + +#if IS_ENABLED(CONFIG_V4L2_CCI_I2C) +struct regmap *devm_cci_regmap_init_i2c(struct i2c_client *client, + int reg_addr_bits) +{ + struct regmap_config config = { + .reg_bits = reg_addr_bits, + .val_bits = 8, + .reg_format_endian = REGMAP_ENDIAN_BIG, + .disable_locking = true, + }; + + return devm_regmap_init_i2c(client, &config); +} +EXPORT_SYMBOL_GPL(devm_cci_regmap_init_i2c); +#endif + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Hans de Goede "); +MODULE_DESCRIPTION("MIPI Camera Control Interface (CCI) support"); diff --git a/6.12.0/drivers/media/v4l2-core/v4l2-common.c b/6.12.0/drivers/media/v4l2-core/v4l2-common.c new file mode 100644 index 0000000..0a2f4f0 --- /dev/null +++ b/6.12.0/drivers/media/v4l2-core/v4l2-common.c @@ -0,0 +1,638 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Video for Linux Two + * + * A generic video device interface for the LINUX operating system + * using a set of device structures/vectors for low level operations. + * + * This file replaces the videodev.c file that comes with the + * regular kernel distribution. + * + * Author: Bill Dirks + * based on code by Alan Cox, + */ + +/* + * Video capture interface for Linux + * + * A generic video device interface for the LINUX operating system + * using a set of device structures/vectors for low level operations. + * + * Author: Alan Cox, + * + * Fixes: + */ + +/* + * Video4linux 1/2 integration by Justin Schoeman + * + * 2.4 PROCFS support ported from 2.4 kernels by + * Iñaki García Etxebarria + * Makefile fix by "W. Michael Petullo" + * 2.4 devfs support ported from 2.4 kernels by + * Dan Merillat + * Added Gerd Knorrs v4l1 enhancements (Justin Schoeman) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +/* + * + * V 4 L 2 D R I V E R H E L P E R A P I + * + */ + +/* + * Video Standard Operations (contributed by Michael Schimek) + */ + +/* Helper functions for control handling */ + +/* Fill in a struct v4l2_queryctrl */ +int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 _min, s32 _max, s32 _step, s32 _def) +{ + const char *name; + s64 min = _min; + s64 max = _max; + u64 step = _step; + s64 def = _def; + + v4l2_ctrl_fill(qctrl->id, &name, &qctrl->type, + &min, &max, &step, &def, &qctrl->flags); + + if (name == NULL) + return -EINVAL; + + qctrl->minimum = min; + qctrl->maximum = max; + qctrl->step = step; + qctrl->default_value = def; + qctrl->reserved[0] = qctrl->reserved[1] = 0; + strscpy(qctrl->name, name, sizeof(qctrl->name)); + return 0; +} +EXPORT_SYMBOL(v4l2_ctrl_query_fill); + +/* Clamp x to be between min and max, aligned to a multiple of 2^align. min + * and max don't have to be aligned, but there must be at least one valid + * value. E.g., min=17,max=31,align=4 is not allowed as there are no multiples + * of 16 between 17 and 31. */ +static unsigned int clamp_align(unsigned int x, unsigned int min, + unsigned int max, unsigned int align) +{ + /* Bits that must be zero to be aligned */ + unsigned int mask = ~((1 << align) - 1); + + /* Clamp to aligned min and max */ + x = clamp(x, (min + ~mask) & mask, max & mask); + + /* Round to nearest aligned value */ + if (align) + x = (x + (1 << (align - 1))) & mask; + + return x; +} + +static unsigned int clamp_roundup(unsigned int x, unsigned int min, + unsigned int max, unsigned int alignment) +{ + x = clamp(x, min, max); + if (alignment) + x = round_up(x, alignment); + + return x; +} + +void v4l_bound_align_image(u32 *w, unsigned int wmin, unsigned int wmax, + unsigned int walign, + u32 *h, unsigned int hmin, unsigned int hmax, + unsigned int halign, unsigned int salign) +{ + *w = clamp_align(*w, wmin, wmax, walign); + *h = clamp_align(*h, hmin, hmax, halign); + + /* Usually we don't need to align the size and are done now. */ + if (!salign) + return; + + /* How much alignment do we have? */ + walign = __ffs(*w); + halign = __ffs(*h); + /* Enough to satisfy the image alignment? */ + if (walign + halign < salign) { + /* Max walign where there is still a valid width */ + unsigned int wmaxa = __fls(wmax ^ (wmin - 1)); + /* Max halign where there is still a valid height */ + unsigned int hmaxa = __fls(hmax ^ (hmin - 1)); + + /* up the smaller alignment until we have enough */ + do { + if (halign >= hmaxa || + (walign <= halign && walign < wmaxa)) { + *w = clamp_align(*w, wmin, wmax, walign + 1); + walign = __ffs(*w); + } else { + *h = clamp_align(*h, hmin, hmax, halign + 1); + halign = __ffs(*h); + } + } while (halign + walign < salign); + } +} +EXPORT_SYMBOL_GPL(v4l_bound_align_image); + +const void * +__v4l2_find_nearest_size(const void *array, size_t array_size, + size_t entry_size, size_t width_offset, + size_t height_offset, s32 width, s32 height) +{ + u32 error, min_error = U32_MAX; + const void *best = NULL; + unsigned int i; + + if (!array) + return NULL; + + for (i = 0; i < array_size; i++, array += entry_size) { + const u32 *entry_width = array + width_offset; + const u32 *entry_height = array + height_offset; + + error = abs(*entry_width - width) + abs(*entry_height - height); + if (error > min_error) + continue; + + min_error = error; + best = array; + if (!error) + break; + } + + return best; +} +EXPORT_SYMBOL_GPL(__v4l2_find_nearest_size); + +int v4l2_g_parm_cap(struct video_device *vdev, + struct v4l2_subdev *sd, struct v4l2_streamparm *a) +{ + struct v4l2_subdev_frame_interval ival = { 0 }; + int ret; + + if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE && + a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) + return -EINVAL; + + if (vdev->device_caps & V4L2_CAP_READWRITE) + a->parm.capture.readbuffers = 2; + if (v4l2_subdev_has_op(sd, pad, get_frame_interval)) + a->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; + ret = v4l2_subdev_call_state_active(sd, pad, get_frame_interval, &ival); + if (!ret) + a->parm.capture.timeperframe = ival.interval; + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_g_parm_cap); + +int v4l2_s_parm_cap(struct video_device *vdev, + struct v4l2_subdev *sd, struct v4l2_streamparm *a) +{ + struct v4l2_subdev_frame_interval ival = { + .interval = a->parm.capture.timeperframe + }; + int ret; + + if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE && + a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) + return -EINVAL; + + memset(&a->parm, 0, sizeof(a->parm)); + if (vdev->device_caps & V4L2_CAP_READWRITE) + a->parm.capture.readbuffers = 2; + else + a->parm.capture.readbuffers = 0; + + if (v4l2_subdev_has_op(sd, pad, get_frame_interval)) + a->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; + ret = v4l2_subdev_call_state_active(sd, pad, set_frame_interval, &ival); + if (!ret) + a->parm.capture.timeperframe = ival.interval; + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_s_parm_cap); + +const struct v4l2_format_info *v4l2_format_info(u32 format) +{ + static const struct v4l2_format_info formats[] = { + /* RGB formats */ + { .format = V4L2_PIX_FMT_BGR24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_RGB24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_HSV24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_BGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_XBGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_BGRX32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_RGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_XRGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_RGBX32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_HSV32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_ARGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_RGBA32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_ABGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_BGRA32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_RGB565, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_RGB555, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_BGR666, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_BGR48_12, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 6, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_BGR48, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 6, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_RGB48, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 6, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_ABGR64_12, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 8, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_RGBA1010102, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_RGBX1010102, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_ARGB2101010, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + + /* YUV packed formats */ + { .format = V4L2_PIX_FMT_YUYV, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_YVYU, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_UYVY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_VYUY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_Y210, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_Y212, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_Y216, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_YUV48_12, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 6, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_MT2110T, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 5, 10, 0, 0 }, .bpp_div = { 4, 4, 1, 1 }, .hdiv = 2, .vdiv = 2, + .block_w = { 16, 8, 0, 0 }, .block_h = { 32, 16, 0, 0 }}, + { .format = V4L2_PIX_FMT_MT2110R, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 5, 10, 0, 0 }, .bpp_div = { 4, 4, 1, 1 }, .hdiv = 2, .vdiv = 2, + .block_w = { 16, 8, 0, 0 }, .block_h = { 32, 16, 0, 0 }}, + + /* YUV planar formats */ + { .format = V4L2_PIX_FMT_NV12, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 }, + { .format = V4L2_PIX_FMT_NV21, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 }, + { .format = V4L2_PIX_FMT_NV16, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_NV61, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_NV24, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_NV42, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_P010, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 2, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_P012, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 2, 4, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 }, + + { .format = V4L2_PIX_FMT_YUV410, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 4, .vdiv = 4 }, + { .format = V4L2_PIX_FMT_YVU410, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 4, .vdiv = 4 }, + { .format = V4L2_PIX_FMT_YUV411P, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 4, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_YUV420, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 }, + { .format = V4L2_PIX_FMT_YVU420, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 }, + { .format = V4L2_PIX_FMT_YUV422P, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_GREY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + + /* Tiled YUV formats */ + { .format = V4L2_PIX_FMT_NV12_4L4, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 }, + { .format = V4L2_PIX_FMT_NV15_4L4, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 5, 10, 0, 0 }, .bpp_div = { 4, 4, 1, 1 }, .hdiv = 2, .vdiv = 2, + .block_w = { 4, 2, 0, 0 }, .block_h = { 1, 1, 0, 0 }}, + { .format = V4L2_PIX_FMT_P010_4L4, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 2, 4, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 }, + + /* YUV planar formats, non contiguous variant */ + { .format = V4L2_PIX_FMT_YUV420M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 }, + { .format = V4L2_PIX_FMT_YVU420M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 }, + { .format = V4L2_PIX_FMT_YUV422M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_YVU422M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_YUV444M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_YVU444M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + + { .format = V4L2_PIX_FMT_NV12M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 }, + { .format = V4L2_PIX_FMT_NV21M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 }, + { .format = V4L2_PIX_FMT_NV16M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_NV61M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_P012M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 2, 4, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 }, + + /* Bayer RGB formats */ + { .format = V4L2_PIX_FMT_SBGGR8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SGBRG8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SGRBG8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SRGGB8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SBGGR10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SGBRG10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SGRBG10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SRGGB10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SBGGR10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SGBRG10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SGRBG10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SRGGB10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SBGGR10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SGBRG10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SGRBG10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SRGGB10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SBGGR12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SGBRG12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SGRBG12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SRGGB12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + }; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(formats); ++i) + if (formats[i].format == format) + return &formats[i]; + return NULL; +} +EXPORT_SYMBOL(v4l2_format_info); + +static inline unsigned int v4l2_format_block_width(const struct v4l2_format_info *info, int plane) +{ + if (!info->block_w[plane]) + return 1; + return info->block_w[plane]; +} + +static inline unsigned int v4l2_format_block_height(const struct v4l2_format_info *info, int plane) +{ + if (!info->block_h[plane]) + return 1; + return info->block_h[plane]; +} + +void v4l2_apply_frmsize_constraints(u32 *width, u32 *height, + const struct v4l2_frmsize_stepwise *frmsize) +{ + if (!frmsize) + return; + + /* + * Clamp width/height to meet min/max constraints and round it up to + * macroblock alignment. + */ + *width = clamp_roundup(*width, frmsize->min_width, frmsize->max_width, + frmsize->step_width); + *height = clamp_roundup(*height, frmsize->min_height, frmsize->max_height, + frmsize->step_height); +} +EXPORT_SYMBOL_GPL(v4l2_apply_frmsize_constraints); + +int v4l2_fill_pixfmt_mp(struct v4l2_pix_format_mplane *pixfmt, + u32 pixelformat, u32 width, u32 height) +{ + const struct v4l2_format_info *info; + struct v4l2_plane_pix_format *plane; + int i; + + info = v4l2_format_info(pixelformat); + if (!info) + return -EINVAL; + + pixfmt->width = width; + pixfmt->height = height; + pixfmt->pixelformat = pixelformat; + pixfmt->num_planes = info->mem_planes; + + if (info->mem_planes == 1) { + plane = &pixfmt->plane_fmt[0]; + plane->bytesperline = ALIGN(width, v4l2_format_block_width(info, 0)) * info->bpp[0] / info->bpp_div[0]; + plane->sizeimage = 0; + + for (i = 0; i < info->comp_planes; i++) { + unsigned int hdiv = (i == 0) ? 1 : info->hdiv; + unsigned int vdiv = (i == 0) ? 1 : info->vdiv; + unsigned int aligned_width; + unsigned int aligned_height; + + aligned_width = ALIGN(width, v4l2_format_block_width(info, i)); + aligned_height = ALIGN(height, v4l2_format_block_height(info, i)); + + plane->sizeimage += info->bpp[i] * + DIV_ROUND_UP(aligned_width, hdiv) * + DIV_ROUND_UP(aligned_height, vdiv) / info->bpp_div[i]; + } + } else { + for (i = 0; i < info->comp_planes; i++) { + unsigned int hdiv = (i == 0) ? 1 : info->hdiv; + unsigned int vdiv = (i == 0) ? 1 : info->vdiv; + unsigned int aligned_width; + unsigned int aligned_height; + + aligned_width = ALIGN(width, v4l2_format_block_width(info, i)); + aligned_height = ALIGN(height, v4l2_format_block_height(info, i)); + + plane = &pixfmt->plane_fmt[i]; + plane->bytesperline = + info->bpp[i] * DIV_ROUND_UP(aligned_width, hdiv) / info->bpp_div[i]; + plane->sizeimage = + plane->bytesperline * DIV_ROUND_UP(aligned_height, vdiv); + } + } + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_fill_pixfmt_mp); + +int v4l2_fill_pixfmt(struct v4l2_pix_format *pixfmt, u32 pixelformat, + u32 width, u32 height) +{ + const struct v4l2_format_info *info; + int i; + + info = v4l2_format_info(pixelformat); + if (!info) + return -EINVAL; + + /* Single planar API cannot be used for multi plane formats. */ + if (info->mem_planes > 1) + return -EINVAL; + + pixfmt->width = width; + pixfmt->height = height; + pixfmt->pixelformat = pixelformat; + pixfmt->bytesperline = ALIGN(width, v4l2_format_block_width(info, 0)) * info->bpp[0] / info->bpp_div[0]; + pixfmt->sizeimage = 0; + + for (i = 0; i < info->comp_planes; i++) { + unsigned int hdiv = (i == 0) ? 1 : info->hdiv; + unsigned int vdiv = (i == 0) ? 1 : info->vdiv; + unsigned int aligned_width; + unsigned int aligned_height; + + aligned_width = ALIGN(width, v4l2_format_block_width(info, i)); + aligned_height = ALIGN(height, v4l2_format_block_height(info, i)); + + pixfmt->sizeimage += info->bpp[i] * + DIV_ROUND_UP(aligned_width, hdiv) * + DIV_ROUND_UP(aligned_height, vdiv) / info->bpp_div[i]; + } + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_fill_pixfmt); + +s64 v4l2_get_link_freq(struct v4l2_ctrl_handler *handler, unsigned int mul, + unsigned int div) +{ + struct v4l2_ctrl *ctrl; + s64 freq; + + ctrl = v4l2_ctrl_find(handler, V4L2_CID_LINK_FREQ); + if (ctrl) { + struct v4l2_querymenu qm = { .id = V4L2_CID_LINK_FREQ }; + int ret; + + qm.index = v4l2_ctrl_g_ctrl(ctrl); + + ret = v4l2_querymenu(handler, &qm); + if (ret) + return -ENOENT; + + freq = qm.value; + } else { + if (!mul || !div) + return -ENOENT; + + ctrl = v4l2_ctrl_find(handler, V4L2_CID_PIXEL_RATE); + if (!ctrl) + return -ENOENT; + + freq = div_u64(v4l2_ctrl_g_ctrl_int64(ctrl) * mul, div); + + pr_warn("%s: Link frequency estimated using pixel rate: result might be inaccurate\n", + __func__); + pr_warn("%s: Consider implementing support for V4L2_CID_LINK_FREQ in the transmitter driver\n", + __func__); + } + + return freq > 0 ? freq : -EINVAL; +} +EXPORT_SYMBOL_GPL(v4l2_get_link_freq); + +/* + * Simplify a fraction using a simple continued fraction decomposition. The + * idea here is to convert fractions such as 333333/10000000 to 1/30 using + * 32 bit arithmetic only. The algorithm is not perfect and relies upon two + * arbitrary parameters to remove non-significative terms from the simple + * continued fraction decomposition. Using 8 and 333 for n_terms and threshold + * respectively seems to give nice results. + */ +void v4l2_simplify_fraction(u32 *numerator, u32 *denominator, + unsigned int n_terms, unsigned int threshold) +{ + u32 *an; + u32 x, y, r; + unsigned int i, n; + + an = kmalloc_array(n_terms, sizeof(*an), GFP_KERNEL); + if (an == NULL) + return; + + /* + * Convert the fraction to a simple continued fraction. See + * https://en.wikipedia.org/wiki/Continued_fraction + * Stop if the current term is bigger than or equal to the given + * threshold. + */ + x = *numerator; + y = *denominator; + + for (n = 0; n < n_terms && y != 0; ++n) { + an[n] = x / y; + if (an[n] >= threshold) { + if (n < 2) + n++; + break; + } + + r = x - an[n] * y; + x = y; + y = r; + } + + /* Expand the simple continued fraction back to an integer fraction. */ + x = 0; + y = 1; + + for (i = n; i > 0; --i) { + r = y; + y = an[i-1] * y + x; + x = r; + } + + *numerator = y; + *denominator = x; + kfree(an); +} +EXPORT_SYMBOL_GPL(v4l2_simplify_fraction); + +/* + * Convert a fraction to a frame interval in 100ns multiples. The idea here is + * to compute numerator / denominator * 10000000 using 32 bit fixed point + * arithmetic only. + */ +u32 v4l2_fraction_to_interval(u32 numerator, u32 denominator) +{ + u32 multiplier; + + /* Saturate the result if the operation would overflow. */ + if (denominator == 0 || + numerator/denominator >= ((u32)-1)/10000000) + return (u32)-1; + + /* + * Divide both the denominator and the multiplier by two until + * numerator * multiplier doesn't overflow. If anyone knows a better + * algorithm please let me know. + */ + multiplier = 10000000; + while (numerator > ((u32)-1)/multiplier) { + multiplier /= 2; + denominator /= 2; + } + + return denominator ? numerator * multiplier / denominator : 0; +} +EXPORT_SYMBOL_GPL(v4l2_fraction_to_interval); + +int v4l2_link_freq_to_bitmap(struct device *dev, const u64 *fw_link_freqs, + unsigned int num_of_fw_link_freqs, + const s64 *driver_link_freqs, + unsigned int num_of_driver_link_freqs, + unsigned long *bitmap) +{ + unsigned int i; + + *bitmap = 0; + + if (!num_of_fw_link_freqs) { + dev_err(dev, "no link frequencies in firmware\n"); + return -ENODATA; + } + + for (i = 0; i < num_of_fw_link_freqs; i++) { + unsigned int j; + + for (j = 0; j < num_of_driver_link_freqs; j++) { + if (fw_link_freqs[i] != driver_link_freqs[j]) + continue; + + dev_dbg(dev, "enabling link frequency %lld Hz\n", + driver_link_freqs[j]); + *bitmap |= BIT(j); + break; + } + } + + if (!*bitmap) { + dev_err(dev, "no matching link frequencies found\n"); + + dev_dbg(dev, "specified in firmware:\n"); + for (i = 0; i < num_of_fw_link_freqs; i++) + dev_dbg(dev, "\t%llu Hz\n", fw_link_freqs[i]); + + dev_dbg(dev, "driver supported:\n"); + for (i = 0; i < num_of_driver_link_freqs; i++) + dev_dbg(dev, "\t%lld Hz\n", driver_link_freqs[i]); + + return -ENOENT; + } + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_link_freq_to_bitmap); diff --git a/6.12.0/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/6.12.0/drivers/media/v4l2-core/v4l2-compat-ioctl32.c new file mode 100644 index 0000000..8c07400 --- /dev/null +++ b/6.12.0/drivers/media/v4l2-core/v4l2-compat-ioctl32.c @@ -0,0 +1,1210 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * ioctl32.c: Conversion between 32bit and 64bit native ioctls. + * Separated from fs stuff by Arnd Bergmann + * + * Copyright (C) 1997-2000 Jakub Jelinek (jakub@redhat.com) + * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be) + * Copyright (C) 2001,2002 Andi Kleen, SuSE Labs + * Copyright (C) 2003 Pavel Machek (pavel@ucw.cz) + * Copyright (C) 2005 Philippe De Muyter (phdm@macqel.be) + * Copyright (C) 2008 Hans Verkuil + * + * These routines maintain argument size conversion between 32bit and 64bit + * ioctls. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Per-ioctl data copy handlers. + * + * Those come in pairs, with a get_v4l2_foo() and a put_v4l2_foo() routine, + * where "v4l2_foo" is the name of the V4L2 struct. + * + * They basically get two __user pointers, one with a 32-bits struct that + * came from the userspace call and a 64-bits struct, also allocated as + * userspace, but filled internally by do_video_ioctl(). + * + * For ioctls that have pointers inside it, the functions will also + * receive an ancillary buffer with extra space, used to pass extra + * data to the routine. + */ + +struct v4l2_window32 { + struct v4l2_rect w; + __u32 field; /* enum v4l2_field */ + __u32 chromakey; + compat_caddr_t clips; /* always NULL */ + __u32 clipcount; /* always 0 */ + compat_caddr_t bitmap; /* always NULL */ + __u8 global_alpha; +}; + +static int get_v4l2_window32(struct v4l2_window *p64, + struct v4l2_window32 __user *p32) +{ + struct v4l2_window32 w32; + + if (copy_from_user(&w32, p32, sizeof(w32))) + return -EFAULT; + + *p64 = (struct v4l2_window) { + .w = w32.w, + .field = w32.field, + .chromakey = w32.chromakey, + .clips = NULL, + .clipcount = 0, + .bitmap = NULL, + .global_alpha = w32.global_alpha, + }; + + return 0; +} + +static int put_v4l2_window32(struct v4l2_window *p64, + struct v4l2_window32 __user *p32) +{ + struct v4l2_window32 w32; + + memset(&w32, 0, sizeof(w32)); + w32 = (struct v4l2_window32) { + .w = p64->w, + .field = p64->field, + .chromakey = p64->chromakey, + .clips = 0, + .clipcount = 0, + .bitmap = 0, + .global_alpha = p64->global_alpha, + }; + + if (copy_to_user(p32, &w32, sizeof(w32))) + return -EFAULT; + + return 0; +} + +struct v4l2_format32 { + __u32 type; /* enum v4l2_buf_type */ + union { + struct v4l2_pix_format pix; + struct v4l2_pix_format_mplane pix_mp; + struct v4l2_window32 win; + struct v4l2_vbi_format vbi; + struct v4l2_sliced_vbi_format sliced; + struct v4l2_sdr_format sdr; + struct v4l2_meta_format meta; + __u8 raw_data[200]; /* user-defined */ + } fmt; +}; + +/** + * struct v4l2_create_buffers32 - VIDIOC_CREATE_BUFS32 argument + * @index: on return, index of the first created buffer + * @count: entry: number of requested buffers, + * return: number of created buffers + * @memory: buffer memory type + * @format: frame format, for which buffers are requested + * @capabilities: capabilities of this buffer type. + * @flags: additional buffer management attributes (ignored unless the + * queue has V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS capability and + * configured for MMAP streaming I/O). + * @max_num_buffers: if V4L2_BUF_CAP_SUPPORTS_MAX_NUM_BUFFERS capability flag is set + * this field indicate the maximum possible number of buffers + * for this queue. + * @reserved: future extensions + */ +struct v4l2_create_buffers32 { + __u32 index; + __u32 count; + __u32 memory; /* enum v4l2_memory */ + struct v4l2_format32 format; + __u32 capabilities; + __u32 flags; + __u32 max_num_buffers; + __u32 reserved[5]; +}; + +static int get_v4l2_format32(struct v4l2_format *p64, + struct v4l2_format32 __user *p32) +{ + if (get_user(p64->type, &p32->type)) + return -EFAULT; + + switch (p64->type) { + case V4L2_BUF_TYPE_VIDEO_CAPTURE: + case V4L2_BUF_TYPE_VIDEO_OUTPUT: + return copy_from_user(&p64->fmt.pix, &p32->fmt.pix, + sizeof(p64->fmt.pix)) ? -EFAULT : 0; + case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: + case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: + return copy_from_user(&p64->fmt.pix_mp, &p32->fmt.pix_mp, + sizeof(p64->fmt.pix_mp)) ? -EFAULT : 0; + case V4L2_BUF_TYPE_VIDEO_OVERLAY: + case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: + return get_v4l2_window32(&p64->fmt.win, &p32->fmt.win); + case V4L2_BUF_TYPE_VBI_CAPTURE: + case V4L2_BUF_TYPE_VBI_OUTPUT: + return copy_from_user(&p64->fmt.vbi, &p32->fmt.vbi, + sizeof(p64->fmt.vbi)) ? -EFAULT : 0; + case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: + case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: + return copy_from_user(&p64->fmt.sliced, &p32->fmt.sliced, + sizeof(p64->fmt.sliced)) ? -EFAULT : 0; + case V4L2_BUF_TYPE_SDR_CAPTURE: + case V4L2_BUF_TYPE_SDR_OUTPUT: + return copy_from_user(&p64->fmt.sdr, &p32->fmt.sdr, + sizeof(p64->fmt.sdr)) ? -EFAULT : 0; + case V4L2_BUF_TYPE_META_CAPTURE: + case V4L2_BUF_TYPE_META_OUTPUT: + return copy_from_user(&p64->fmt.meta, &p32->fmt.meta, + sizeof(p64->fmt.meta)) ? -EFAULT : 0; + default: + return -EINVAL; + } +} + +static int get_v4l2_create32(struct v4l2_create_buffers *p64, + struct v4l2_create_buffers32 __user *p32) +{ + if (copy_from_user(p64, p32, + offsetof(struct v4l2_create_buffers32, format))) + return -EFAULT; + if (copy_from_user(&p64->flags, &p32->flags, sizeof(p32->flags))) + return -EFAULT; + if (copy_from_user(&p64->max_num_buffers, &p32->max_num_buffers, + sizeof(p32->max_num_buffers))) + return -EFAULT; + return get_v4l2_format32(&p64->format, &p32->format); +} + +static int put_v4l2_format32(struct v4l2_format *p64, + struct v4l2_format32 __user *p32) +{ + switch (p64->type) { + case V4L2_BUF_TYPE_VIDEO_CAPTURE: + case V4L2_BUF_TYPE_VIDEO_OUTPUT: + return copy_to_user(&p32->fmt.pix, &p64->fmt.pix, + sizeof(p64->fmt.pix)) ? -EFAULT : 0; + case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: + case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: + return copy_to_user(&p32->fmt.pix_mp, &p64->fmt.pix_mp, + sizeof(p64->fmt.pix_mp)) ? -EFAULT : 0; + case V4L2_BUF_TYPE_VIDEO_OVERLAY: + case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: + return put_v4l2_window32(&p64->fmt.win, &p32->fmt.win); + case V4L2_BUF_TYPE_VBI_CAPTURE: + case V4L2_BUF_TYPE_VBI_OUTPUT: + return copy_to_user(&p32->fmt.vbi, &p64->fmt.vbi, + sizeof(p64->fmt.vbi)) ? -EFAULT : 0; + case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: + case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: + return copy_to_user(&p32->fmt.sliced, &p64->fmt.sliced, + sizeof(p64->fmt.sliced)) ? -EFAULT : 0; + case V4L2_BUF_TYPE_SDR_CAPTURE: + case V4L2_BUF_TYPE_SDR_OUTPUT: + return copy_to_user(&p32->fmt.sdr, &p64->fmt.sdr, + sizeof(p64->fmt.sdr)) ? -EFAULT : 0; + case V4L2_BUF_TYPE_META_CAPTURE: + case V4L2_BUF_TYPE_META_OUTPUT: + return copy_to_user(&p32->fmt.meta, &p64->fmt.meta, + sizeof(p64->fmt.meta)) ? -EFAULT : 0; + default: + return -EINVAL; + } +} + +static int put_v4l2_create32(struct v4l2_create_buffers *p64, + struct v4l2_create_buffers32 __user *p32) +{ + if (copy_to_user(p32, p64, + offsetof(struct v4l2_create_buffers32, format)) || + put_user(p64->capabilities, &p32->capabilities) || + put_user(p64->flags, &p32->flags) || + put_user(p64->max_num_buffers, &p32->max_num_buffers) || + copy_to_user(p32->reserved, p64->reserved, sizeof(p64->reserved))) + return -EFAULT; + return put_v4l2_format32(&p64->format, &p32->format); +} + +struct v4l2_standard32 { + __u32 index; + compat_u64 id; + __u8 name[24]; + struct v4l2_fract frameperiod; /* Frames, not fields */ + __u32 framelines; + __u32 reserved[4]; +}; + +static int get_v4l2_standard32(struct v4l2_standard *p64, + struct v4l2_standard32 __user *p32) +{ + /* other fields are not set by the user, nor used by the driver */ + return get_user(p64->index, &p32->index); +} + +static int put_v4l2_standard32(struct v4l2_standard *p64, + struct v4l2_standard32 __user *p32) +{ + if (put_user(p64->index, &p32->index) || + put_user(p64->id, &p32->id) || + copy_to_user(p32->name, p64->name, sizeof(p32->name)) || + copy_to_user(&p32->frameperiod, &p64->frameperiod, + sizeof(p32->frameperiod)) || + put_user(p64->framelines, &p32->framelines) || + copy_to_user(p32->reserved, p64->reserved, sizeof(p32->reserved))) + return -EFAULT; + return 0; +} + +struct v4l2_plane32 { + __u32 bytesused; + __u32 length; + union { + __u32 mem_offset; + compat_long_t userptr; + __s32 fd; + } m; + __u32 data_offset; + __u32 reserved[11]; +}; + +/* + * This is correct for all architectures including i386, but not x32, + * which has different alignment requirements for timestamp + */ +struct v4l2_buffer32 { + __u32 index; + __u32 type; /* enum v4l2_buf_type */ + __u32 bytesused; + __u32 flags; + __u32 field; /* enum v4l2_field */ + struct { + compat_s64 tv_sec; + compat_s64 tv_usec; + } timestamp; + struct v4l2_timecode timecode; + __u32 sequence; + + /* memory location */ + __u32 memory; /* enum v4l2_memory */ + union { + __u32 offset; + compat_long_t userptr; + compat_caddr_t planes; + __s32 fd; + } m; + __u32 length; + __u32 reserved2; + __s32 request_fd; +}; + +#ifdef CONFIG_COMPAT_32BIT_TIME +struct v4l2_buffer32_time32 { + __u32 index; + __u32 type; /* enum v4l2_buf_type */ + __u32 bytesused; + __u32 flags; + __u32 field; /* enum v4l2_field */ + struct old_timeval32 timestamp; + struct v4l2_timecode timecode; + __u32 sequence; + + /* memory location */ + __u32 memory; /* enum v4l2_memory */ + union { + __u32 offset; + compat_long_t userptr; + compat_caddr_t planes; + __s32 fd; + } m; + __u32 length; + __u32 reserved2; + __s32 request_fd; +}; +#endif + +static int get_v4l2_plane32(struct v4l2_plane *p64, + struct v4l2_plane32 __user *p32, + enum v4l2_memory memory) +{ + struct v4l2_plane32 plane32; + typeof(p64->m) m = {}; + + if (copy_from_user(&plane32, p32, sizeof(plane32))) + return -EFAULT; + + switch (memory) { + case V4L2_MEMORY_MMAP: + case V4L2_MEMORY_OVERLAY: + m.mem_offset = plane32.m.mem_offset; + break; + case V4L2_MEMORY_USERPTR: + m.userptr = (unsigned long)compat_ptr(plane32.m.userptr); + break; + case V4L2_MEMORY_DMABUF: + m.fd = plane32.m.fd; + break; + } + + memset(p64, 0, sizeof(*p64)); + *p64 = (struct v4l2_plane) { + .bytesused = plane32.bytesused, + .length = plane32.length, + .m = m, + .data_offset = plane32.data_offset, + }; + + return 0; +} + +static int put_v4l2_plane32(struct v4l2_plane *p64, + struct v4l2_plane32 __user *p32, + enum v4l2_memory memory) +{ + struct v4l2_plane32 plane32; + + memset(&plane32, 0, sizeof(plane32)); + plane32 = (struct v4l2_plane32) { + .bytesused = p64->bytesused, + .length = p64->length, + .data_offset = p64->data_offset, + }; + + switch (memory) { + case V4L2_MEMORY_MMAP: + case V4L2_MEMORY_OVERLAY: + plane32.m.mem_offset = p64->m.mem_offset; + break; + case V4L2_MEMORY_USERPTR: + plane32.m.userptr = (uintptr_t)(p64->m.userptr); + break; + case V4L2_MEMORY_DMABUF: + plane32.m.fd = p64->m.fd; + break; + } + + if (copy_to_user(p32, &plane32, sizeof(plane32))) + return -EFAULT; + + return 0; +} + +static int get_v4l2_buffer32(struct v4l2_buffer *vb, + struct v4l2_buffer32 __user *arg) +{ + struct v4l2_buffer32 vb32; + + if (copy_from_user(&vb32, arg, sizeof(vb32))) + return -EFAULT; + + memset(vb, 0, sizeof(*vb)); + *vb = (struct v4l2_buffer) { + .index = vb32.index, + .type = vb32.type, + .bytesused = vb32.bytesused, + .flags = vb32.flags, + .field = vb32.field, + .timestamp.tv_sec = vb32.timestamp.tv_sec, + .timestamp.tv_usec = vb32.timestamp.tv_usec, + .timecode = vb32.timecode, + .sequence = vb32.sequence, + .memory = vb32.memory, + .m.offset = vb32.m.offset, + .length = vb32.length, + .request_fd = vb32.request_fd, + }; + + switch (vb->memory) { + case V4L2_MEMORY_MMAP: + case V4L2_MEMORY_OVERLAY: + vb->m.offset = vb32.m.offset; + break; + case V4L2_MEMORY_USERPTR: + vb->m.userptr = (unsigned long)compat_ptr(vb32.m.userptr); + break; + case V4L2_MEMORY_DMABUF: + vb->m.fd = vb32.m.fd; + break; + } + + if (V4L2_TYPE_IS_MULTIPLANAR(vb->type)) + vb->m.planes = (void __force *) + compat_ptr(vb32.m.planes); + + return 0; +} + +#ifdef CONFIG_COMPAT_32BIT_TIME +static int get_v4l2_buffer32_time32(struct v4l2_buffer *vb, + struct v4l2_buffer32_time32 __user *arg) +{ + struct v4l2_buffer32_time32 vb32; + + if (copy_from_user(&vb32, arg, sizeof(vb32))) + return -EFAULT; + + *vb = (struct v4l2_buffer) { + .index = vb32.index, + .type = vb32.type, + .bytesused = vb32.bytesused, + .flags = vb32.flags, + .field = vb32.field, + .timestamp.tv_sec = vb32.timestamp.tv_sec, + .timestamp.tv_usec = vb32.timestamp.tv_usec, + .timecode = vb32.timecode, + .sequence = vb32.sequence, + .memory = vb32.memory, + .m.offset = vb32.m.offset, + .length = vb32.length, + .request_fd = vb32.request_fd, + }; + switch (vb->memory) { + case V4L2_MEMORY_MMAP: + case V4L2_MEMORY_OVERLAY: + vb->m.offset = vb32.m.offset; + break; + case V4L2_MEMORY_USERPTR: + vb->m.userptr = (unsigned long)compat_ptr(vb32.m.userptr); + break; + case V4L2_MEMORY_DMABUF: + vb->m.fd = vb32.m.fd; + break; + } + + if (V4L2_TYPE_IS_MULTIPLANAR(vb->type)) + vb->m.planes = (void __force *) + compat_ptr(vb32.m.planes); + + return 0; +} +#endif + +static int put_v4l2_buffer32(struct v4l2_buffer *vb, + struct v4l2_buffer32 __user *arg) +{ + struct v4l2_buffer32 vb32; + + memset(&vb32, 0, sizeof(vb32)); + vb32 = (struct v4l2_buffer32) { + .index = vb->index, + .type = vb->type, + .bytesused = vb->bytesused, + .flags = vb->flags, + .field = vb->field, + .timestamp.tv_sec = vb->timestamp.tv_sec, + .timestamp.tv_usec = vb->timestamp.tv_usec, + .timecode = vb->timecode, + .sequence = vb->sequence, + .memory = vb->memory, + .m.offset = vb->m.offset, + .length = vb->length, + .request_fd = vb->request_fd, + }; + + switch (vb->memory) { + case V4L2_MEMORY_MMAP: + case V4L2_MEMORY_OVERLAY: + vb32.m.offset = vb->m.offset; + break; + case V4L2_MEMORY_USERPTR: + vb32.m.userptr = (uintptr_t)(vb->m.userptr); + break; + case V4L2_MEMORY_DMABUF: + vb32.m.fd = vb->m.fd; + break; + } + + if (V4L2_TYPE_IS_MULTIPLANAR(vb->type)) + vb32.m.planes = (uintptr_t)vb->m.planes; + + if (copy_to_user(arg, &vb32, sizeof(vb32))) + return -EFAULT; + + return 0; +} + +#ifdef CONFIG_COMPAT_32BIT_TIME +static int put_v4l2_buffer32_time32(struct v4l2_buffer *vb, + struct v4l2_buffer32_time32 __user *arg) +{ + struct v4l2_buffer32_time32 vb32; + + memset(&vb32, 0, sizeof(vb32)); + vb32 = (struct v4l2_buffer32_time32) { + .index = vb->index, + .type = vb->type, + .bytesused = vb->bytesused, + .flags = vb->flags, + .field = vb->field, + .timestamp.tv_sec = vb->timestamp.tv_sec, + .timestamp.tv_usec = vb->timestamp.tv_usec, + .timecode = vb->timecode, + .sequence = vb->sequence, + .memory = vb->memory, + .m.offset = vb->m.offset, + .length = vb->length, + .request_fd = vb->request_fd, + }; + switch (vb->memory) { + case V4L2_MEMORY_MMAP: + case V4L2_MEMORY_OVERLAY: + vb32.m.offset = vb->m.offset; + break; + case V4L2_MEMORY_USERPTR: + vb32.m.userptr = (uintptr_t)(vb->m.userptr); + break; + case V4L2_MEMORY_DMABUF: + vb32.m.fd = vb->m.fd; + break; + } + + if (V4L2_TYPE_IS_MULTIPLANAR(vb->type)) + vb32.m.planes = (uintptr_t)vb->m.planes; + + if (copy_to_user(arg, &vb32, sizeof(vb32))) + return -EFAULT; + + return 0; +} +#endif + +struct v4l2_framebuffer32 { + __u32 capability; + __u32 flags; + compat_caddr_t base; + struct { + __u32 width; + __u32 height; + __u32 pixelformat; + __u32 field; + __u32 bytesperline; + __u32 sizeimage; + __u32 colorspace; + __u32 priv; + } fmt; +}; + +static int get_v4l2_framebuffer32(struct v4l2_framebuffer *p64, + struct v4l2_framebuffer32 __user *p32) +{ + if (get_user(p64->capability, &p32->capability) || + get_user(p64->flags, &p32->flags) || + copy_from_user(&p64->fmt, &p32->fmt, sizeof(p64->fmt))) + return -EFAULT; + p64->base = NULL; + + return 0; +} + +static int put_v4l2_framebuffer32(struct v4l2_framebuffer *p64, + struct v4l2_framebuffer32 __user *p32) +{ + if (put_user((uintptr_t)p64->base, &p32->base) || + put_user(p64->capability, &p32->capability) || + put_user(p64->flags, &p32->flags) || + copy_to_user(&p32->fmt, &p64->fmt, sizeof(p64->fmt))) + return -EFAULT; + + return 0; +} + +struct v4l2_input32 { + __u32 index; /* Which input */ + __u8 name[32]; /* Label */ + __u32 type; /* Type of input */ + __u32 audioset; /* Associated audios (bitfield) */ + __u32 tuner; /* Associated tuner */ + compat_u64 std; + __u32 status; + __u32 capabilities; + __u32 reserved[3]; +}; + +/* + * The 64-bit v4l2_input struct has extra padding at the end of the struct. + * Otherwise it is identical to the 32-bit version. + */ +static inline int get_v4l2_input32(struct v4l2_input *p64, + struct v4l2_input32 __user *p32) +{ + if (copy_from_user(p64, p32, sizeof(*p32))) + return -EFAULT; + return 0; +} + +static inline int put_v4l2_input32(struct v4l2_input *p64, + struct v4l2_input32 __user *p32) +{ + if (copy_to_user(p32, p64, sizeof(*p32))) + return -EFAULT; + return 0; +} + +struct v4l2_ext_controls32 { + __u32 which; + __u32 count; + __u32 error_idx; + __s32 request_fd; + __u32 reserved[1]; + compat_caddr_t controls; /* actually struct v4l2_ext_control32 * */ +}; + +struct v4l2_ext_control32 { + __u32 id; + __u32 size; + __u32 reserved2[1]; + union { + __s32 value; + __s64 value64; + compat_caddr_t string; /* actually char * */ + }; +} __attribute__ ((packed)); + +/* Return true if this control is a pointer type. */ +static inline bool ctrl_is_pointer(struct file *file, u32 id) +{ + struct video_device *vdev = video_devdata(file); + struct v4l2_fh *fh = NULL; + struct v4l2_ctrl_handler *hdl = NULL; + struct v4l2_query_ext_ctrl qec = { id }; + const struct v4l2_ioctl_ops *ops = vdev->ioctl_ops; + + if (test_bit(V4L2_FL_USES_V4L2_FH, &vdev->flags)) + fh = file->private_data; + + if (fh && fh->ctrl_handler) + hdl = fh->ctrl_handler; + else if (vdev->ctrl_handler) + hdl = vdev->ctrl_handler; + + if (hdl) { + struct v4l2_ctrl *ctrl = v4l2_ctrl_find(hdl, id); + + return ctrl && ctrl->is_ptr; + } + + if (!ops || !ops->vidioc_query_ext_ctrl) + return false; + + return !ops->vidioc_query_ext_ctrl(file, fh, &qec) && + (qec.flags & V4L2_CTRL_FLAG_HAS_PAYLOAD); +} + +static int get_v4l2_ext_controls32(struct v4l2_ext_controls *p64, + struct v4l2_ext_controls32 __user *p32) +{ + struct v4l2_ext_controls32 ec32; + + if (copy_from_user(&ec32, p32, sizeof(ec32))) + return -EFAULT; + + *p64 = (struct v4l2_ext_controls) { + .which = ec32.which, + .count = ec32.count, + .error_idx = ec32.error_idx, + .request_fd = ec32.request_fd, + .reserved[0] = ec32.reserved[0], + .controls = (void __force *)compat_ptr(ec32.controls), + }; + + return 0; +} + +static int put_v4l2_ext_controls32(struct v4l2_ext_controls *p64, + struct v4l2_ext_controls32 __user *p32) +{ + struct v4l2_ext_controls32 ec32; + + memset(&ec32, 0, sizeof(ec32)); + ec32 = (struct v4l2_ext_controls32) { + .which = p64->which, + .count = p64->count, + .error_idx = p64->error_idx, + .request_fd = p64->request_fd, + .reserved[0] = p64->reserved[0], + .controls = (uintptr_t)p64->controls, + }; + + if (copy_to_user(p32, &ec32, sizeof(ec32))) + return -EFAULT; + + return 0; +} + +#ifdef CONFIG_X86_64 +/* + * x86 is the only compat architecture with different struct alignment + * between 32-bit and 64-bit tasks. + */ +struct v4l2_event32 { + __u32 type; + union { + compat_s64 value64; + __u8 data[64]; + } u; + __u32 pending; + __u32 sequence; + struct { + compat_s64 tv_sec; + compat_s64 tv_nsec; + } timestamp; + __u32 id; + __u32 reserved[8]; +}; + +static int put_v4l2_event32(struct v4l2_event *p64, + struct v4l2_event32 __user *p32) +{ + if (put_user(p64->type, &p32->type) || + copy_to_user(&p32->u, &p64->u, sizeof(p64->u)) || + put_user(p64->pending, &p32->pending) || + put_user(p64->sequence, &p32->sequence) || + put_user(p64->timestamp.tv_sec, &p32->timestamp.tv_sec) || + put_user(p64->timestamp.tv_nsec, &p32->timestamp.tv_nsec) || + put_user(p64->id, &p32->id) || + copy_to_user(p32->reserved, p64->reserved, sizeof(p32->reserved))) + return -EFAULT; + return 0; +} + +#endif + +#ifdef CONFIG_COMPAT_32BIT_TIME +struct v4l2_event32_time32 { + __u32 type; + union { + compat_s64 value64; + __u8 data[64]; + } u; + __u32 pending; + __u32 sequence; + struct old_timespec32 timestamp; + __u32 id; + __u32 reserved[8]; +}; + +static int put_v4l2_event32_time32(struct v4l2_event *p64, + struct v4l2_event32_time32 __user *p32) +{ + if (put_user(p64->type, &p32->type) || + copy_to_user(&p32->u, &p64->u, sizeof(p64->u)) || + put_user(p64->pending, &p32->pending) || + put_user(p64->sequence, &p32->sequence) || + put_user(p64->timestamp.tv_sec, &p32->timestamp.tv_sec) || + put_user(p64->timestamp.tv_nsec, &p32->timestamp.tv_nsec) || + put_user(p64->id, &p32->id) || + copy_to_user(p32->reserved, p64->reserved, sizeof(p32->reserved))) + return -EFAULT; + return 0; +} +#endif + +struct v4l2_edid32 { + __u32 pad; + __u32 start_block; + __u32 blocks; + __u32 reserved[5]; + compat_caddr_t edid; +}; + +static int get_v4l2_edid32(struct v4l2_edid *p64, + struct v4l2_edid32 __user *p32) +{ + compat_uptr_t edid; + + if (copy_from_user(p64, p32, offsetof(struct v4l2_edid32, edid)) || + get_user(edid, &p32->edid)) + return -EFAULT; + + p64->edid = (void __force *)compat_ptr(edid); + return 0; +} + +static int put_v4l2_edid32(struct v4l2_edid *p64, + struct v4l2_edid32 __user *p32) +{ + if (copy_to_user(p32, p64, offsetof(struct v4l2_edid32, edid))) + return -EFAULT; + return 0; +} + +/* + * List of ioctls that require 32-bits/64-bits conversion + * + * The V4L2 ioctls that aren't listed there don't have pointer arguments + * and the struct size is identical for both 32 and 64 bits versions, so + * they don't need translations. + */ + +#define VIDIOC_G_FMT32 _IOWR('V', 4, struct v4l2_format32) +#define VIDIOC_S_FMT32 _IOWR('V', 5, struct v4l2_format32) +#define VIDIOC_QUERYBUF32 _IOWR('V', 9, struct v4l2_buffer32) +#define VIDIOC_G_FBUF32 _IOR ('V', 10, struct v4l2_framebuffer32) +#define VIDIOC_S_FBUF32 _IOW ('V', 11, struct v4l2_framebuffer32) +#define VIDIOC_QBUF32 _IOWR('V', 15, struct v4l2_buffer32) +#define VIDIOC_DQBUF32 _IOWR('V', 17, struct v4l2_buffer32) +#define VIDIOC_ENUMSTD32 _IOWR('V', 25, struct v4l2_standard32) +#define VIDIOC_ENUMINPUT32 _IOWR('V', 26, struct v4l2_input32) +#define VIDIOC_G_EDID32 _IOWR('V', 40, struct v4l2_edid32) +#define VIDIOC_S_EDID32 _IOWR('V', 41, struct v4l2_edid32) +#define VIDIOC_TRY_FMT32 _IOWR('V', 64, struct v4l2_format32) +#define VIDIOC_G_EXT_CTRLS32 _IOWR('V', 71, struct v4l2_ext_controls32) +#define VIDIOC_S_EXT_CTRLS32 _IOWR('V', 72, struct v4l2_ext_controls32) +#define VIDIOC_TRY_EXT_CTRLS32 _IOWR('V', 73, struct v4l2_ext_controls32) +#define VIDIOC_DQEVENT32 _IOR ('V', 89, struct v4l2_event32) +#define VIDIOC_CREATE_BUFS32 _IOWR('V', 92, struct v4l2_create_buffers32) +#define VIDIOC_PREPARE_BUF32 _IOWR('V', 93, struct v4l2_buffer32) + +#ifdef CONFIG_COMPAT_32BIT_TIME +#define VIDIOC_QUERYBUF32_TIME32 _IOWR('V', 9, struct v4l2_buffer32_time32) +#define VIDIOC_QBUF32_TIME32 _IOWR('V', 15, struct v4l2_buffer32_time32) +#define VIDIOC_DQBUF32_TIME32 _IOWR('V', 17, struct v4l2_buffer32_time32) +#define VIDIOC_DQEVENT32_TIME32 _IOR ('V', 89, struct v4l2_event32_time32) +#define VIDIOC_PREPARE_BUF32_TIME32 _IOWR('V', 93, struct v4l2_buffer32_time32) +#endif + +unsigned int v4l2_compat_translate_cmd(unsigned int cmd) +{ + switch (cmd) { + case VIDIOC_G_FMT32: + return VIDIOC_G_FMT; + case VIDIOC_S_FMT32: + return VIDIOC_S_FMT; + case VIDIOC_TRY_FMT32: + return VIDIOC_TRY_FMT; + case VIDIOC_G_FBUF32: + return VIDIOC_G_FBUF; + case VIDIOC_S_FBUF32: + return VIDIOC_S_FBUF; +#ifdef CONFIG_COMPAT_32BIT_TIME + case VIDIOC_QUERYBUF32_TIME32: + return VIDIOC_QUERYBUF; + case VIDIOC_QBUF32_TIME32: + return VIDIOC_QBUF; + case VIDIOC_DQBUF32_TIME32: + return VIDIOC_DQBUF; + case VIDIOC_PREPARE_BUF32_TIME32: + return VIDIOC_PREPARE_BUF; +#endif + case VIDIOC_QUERYBUF32: + return VIDIOC_QUERYBUF; + case VIDIOC_QBUF32: + return VIDIOC_QBUF; + case VIDIOC_DQBUF32: + return VIDIOC_DQBUF; + case VIDIOC_CREATE_BUFS32: + return VIDIOC_CREATE_BUFS; + case VIDIOC_G_EXT_CTRLS32: + return VIDIOC_G_EXT_CTRLS; + case VIDIOC_S_EXT_CTRLS32: + return VIDIOC_S_EXT_CTRLS; + case VIDIOC_TRY_EXT_CTRLS32: + return VIDIOC_TRY_EXT_CTRLS; + case VIDIOC_PREPARE_BUF32: + return VIDIOC_PREPARE_BUF; + case VIDIOC_ENUMSTD32: + return VIDIOC_ENUMSTD; + case VIDIOC_ENUMINPUT32: + return VIDIOC_ENUMINPUT; + case VIDIOC_G_EDID32: + return VIDIOC_G_EDID; + case VIDIOC_S_EDID32: + return VIDIOC_S_EDID; +#ifdef CONFIG_X86_64 + case VIDIOC_DQEVENT32: + return VIDIOC_DQEVENT; +#endif +#ifdef CONFIG_COMPAT_32BIT_TIME + case VIDIOC_DQEVENT32_TIME32: + return VIDIOC_DQEVENT; +#endif + } + return cmd; +} + +int v4l2_compat_get_user(void __user *arg, void *parg, unsigned int cmd) +{ + switch (cmd) { + case VIDIOC_G_FMT32: + case VIDIOC_S_FMT32: + case VIDIOC_TRY_FMT32: + return get_v4l2_format32(parg, arg); + + case VIDIOC_S_FBUF32: + return get_v4l2_framebuffer32(parg, arg); +#ifdef CONFIG_COMPAT_32BIT_TIME + case VIDIOC_QUERYBUF32_TIME32: + case VIDIOC_QBUF32_TIME32: + case VIDIOC_DQBUF32_TIME32: + case VIDIOC_PREPARE_BUF32_TIME32: + return get_v4l2_buffer32_time32(parg, arg); +#endif + case VIDIOC_QUERYBUF32: + case VIDIOC_QBUF32: + case VIDIOC_DQBUF32: + case VIDIOC_PREPARE_BUF32: + return get_v4l2_buffer32(parg, arg); + + case VIDIOC_G_EXT_CTRLS32: + case VIDIOC_S_EXT_CTRLS32: + case VIDIOC_TRY_EXT_CTRLS32: + return get_v4l2_ext_controls32(parg, arg); + + case VIDIOC_CREATE_BUFS32: + return get_v4l2_create32(parg, arg); + + case VIDIOC_ENUMSTD32: + return get_v4l2_standard32(parg, arg); + + case VIDIOC_ENUMINPUT32: + return get_v4l2_input32(parg, arg); + + case VIDIOC_G_EDID32: + case VIDIOC_S_EDID32: + return get_v4l2_edid32(parg, arg); + } + return 0; +} + +int v4l2_compat_put_user(void __user *arg, void *parg, unsigned int cmd) +{ + switch (cmd) { + case VIDIOC_G_FMT32: + case VIDIOC_S_FMT32: + case VIDIOC_TRY_FMT32: + return put_v4l2_format32(parg, arg); + + case VIDIOC_G_FBUF32: + return put_v4l2_framebuffer32(parg, arg); +#ifdef CONFIG_COMPAT_32BIT_TIME + case VIDIOC_QUERYBUF32_TIME32: + case VIDIOC_QBUF32_TIME32: + case VIDIOC_DQBUF32_TIME32: + case VIDIOC_PREPARE_BUF32_TIME32: + return put_v4l2_buffer32_time32(parg, arg); +#endif + case VIDIOC_QUERYBUF32: + case VIDIOC_QBUF32: + case VIDIOC_DQBUF32: + case VIDIOC_PREPARE_BUF32: + return put_v4l2_buffer32(parg, arg); + + case VIDIOC_G_EXT_CTRLS32: + case VIDIOC_S_EXT_CTRLS32: + case VIDIOC_TRY_EXT_CTRLS32: + return put_v4l2_ext_controls32(parg, arg); + + case VIDIOC_CREATE_BUFS32: + return put_v4l2_create32(parg, arg); + + case VIDIOC_ENUMSTD32: + return put_v4l2_standard32(parg, arg); + + case VIDIOC_ENUMINPUT32: + return put_v4l2_input32(parg, arg); + + case VIDIOC_G_EDID32: + case VIDIOC_S_EDID32: + return put_v4l2_edid32(parg, arg); +#ifdef CONFIG_X86_64 + case VIDIOC_DQEVENT32: + return put_v4l2_event32(parg, arg); +#endif +#ifdef CONFIG_COMPAT_32BIT_TIME + case VIDIOC_DQEVENT32_TIME32: + return put_v4l2_event32_time32(parg, arg); +#endif + } + return 0; +} + +int v4l2_compat_get_array_args(struct file *file, void *mbuf, + void __user *user_ptr, size_t array_size, + unsigned int cmd, void *arg) +{ + int err = 0; + + memset(mbuf, 0, array_size); + + switch (cmd) { +#ifdef CONFIG_COMPAT_32BIT_TIME + case VIDIOC_QUERYBUF32_TIME32: + case VIDIOC_QBUF32_TIME32: + case VIDIOC_DQBUF32_TIME32: + case VIDIOC_PREPARE_BUF32_TIME32: +#endif + case VIDIOC_QUERYBUF32: + case VIDIOC_QBUF32: + case VIDIOC_DQBUF32: + case VIDIOC_PREPARE_BUF32: { + struct v4l2_buffer *b64 = arg; + struct v4l2_plane *p64 = mbuf; + struct v4l2_plane32 __user *p32 = user_ptr; + + if (V4L2_TYPE_IS_MULTIPLANAR(b64->type)) { + u32 num_planes = b64->length; + + if (num_planes == 0) + return 0; + + while (num_planes--) { + err = get_v4l2_plane32(p64, p32, b64->memory); + if (err) + return err; + ++p64; + ++p32; + } + } + break; + } + case VIDIOC_G_EXT_CTRLS32: + case VIDIOC_S_EXT_CTRLS32: + case VIDIOC_TRY_EXT_CTRLS32: { + struct v4l2_ext_controls *ecs64 = arg; + struct v4l2_ext_control *ec64 = mbuf; + struct v4l2_ext_control32 __user *ec32 = user_ptr; + int n; + + for (n = 0; n < ecs64->count; n++) { + if (copy_from_user(ec64, ec32, sizeof(*ec32))) + return -EFAULT; + + if (ctrl_is_pointer(file, ec64->id)) { + compat_uptr_t p; + + if (get_user(p, &ec32->string)) + return -EFAULT; + ec64->string = compat_ptr(p); + } + ec32++; + ec64++; + } + break; + } + default: + if (copy_from_user(mbuf, user_ptr, array_size)) + err = -EFAULT; + break; + } + + return err; +} + +int v4l2_compat_put_array_args(struct file *file, void __user *user_ptr, + void *mbuf, size_t array_size, + unsigned int cmd, void *arg) +{ + int err = 0; + + switch (cmd) { +#ifdef CONFIG_COMPAT_32BIT_TIME + case VIDIOC_QUERYBUF32_TIME32: + case VIDIOC_QBUF32_TIME32: + case VIDIOC_DQBUF32_TIME32: + case VIDIOC_PREPARE_BUF32_TIME32: +#endif + case VIDIOC_QUERYBUF32: + case VIDIOC_QBUF32: + case VIDIOC_DQBUF32: + case VIDIOC_PREPARE_BUF32: { + struct v4l2_buffer *b64 = arg; + struct v4l2_plane *p64 = mbuf; + struct v4l2_plane32 __user *p32 = user_ptr; + + if (V4L2_TYPE_IS_MULTIPLANAR(b64->type)) { + u32 num_planes = b64->length; + + if (num_planes == 0) + return 0; + + while (num_planes--) { + err = put_v4l2_plane32(p64, p32, b64->memory); + if (err) + return err; + ++p64; + ++p32; + } + } + break; + } + case VIDIOC_G_EXT_CTRLS32: + case VIDIOC_S_EXT_CTRLS32: + case VIDIOC_TRY_EXT_CTRLS32: { + struct v4l2_ext_controls *ecs64 = arg; + struct v4l2_ext_control *ec64 = mbuf; + struct v4l2_ext_control32 __user *ec32 = user_ptr; + int n; + + for (n = 0; n < ecs64->count; n++) { + unsigned int size = sizeof(*ec32); + /* + * Do not modify the pointer when copying a pointer + * control. The contents of the pointer was changed, + * not the pointer itself. + * The structures are otherwise compatible. + */ + if (ctrl_is_pointer(file, ec64->id)) + size -= sizeof(ec32->value64); + + if (copy_to_user(ec32, ec64, size)) + return -EFAULT; + + ec32++; + ec64++; + } + break; + } + default: + if (copy_to_user(user_ptr, mbuf, array_size)) + err = -EFAULT; + break; + } + + return err; +} + +/** + * v4l2_compat_ioctl32() - Handles a compat32 ioctl call + * + * @file: pointer to &struct file with the file handler + * @cmd: ioctl to be called + * @arg: arguments passed from/to the ioctl handler + * + * This function is meant to be used as .compat_ioctl fops at v4l2-dev.c + * in order to deal with 32-bit calls on a 64-bits Kernel. + * + * This function calls do_video_ioctl() for non-private V4L2 ioctls. + * If the function is a private one it calls vdev->fops->compat_ioctl32 + * instead. + */ +long v4l2_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct video_device *vdev = video_devdata(file); + long ret = -ENOIOCTLCMD; + + if (!file->f_op->unlocked_ioctl) + return ret; + + if (!video_is_registered(vdev)) + return -ENODEV; + + if (_IOC_TYPE(cmd) == 'V' && _IOC_NR(cmd) < BASE_VIDIOC_PRIVATE) + ret = file->f_op->unlocked_ioctl(file, cmd, + (unsigned long)compat_ptr(arg)); + else if (vdev->fops->compat_ioctl32) + ret = vdev->fops->compat_ioctl32(file, cmd, arg); + + if (ret == -ENOIOCTLCMD) + pr_debug("compat_ioctl32: unknown ioctl '%c', dir=%d, #%d (0x%08x)\n", + _IOC_TYPE(cmd), _IOC_DIR(cmd), _IOC_NR(cmd), cmd); + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_compat_ioctl32); diff --git a/6.12.0/drivers/media/v4l2-core/v4l2-ctrls-api.c b/6.12.0/drivers/media/v4l2-core/v4l2-ctrls-api.c new file mode 100644 index 0000000..95a2202 --- /dev/null +++ b/6.12.0/drivers/media/v4l2-core/v4l2-ctrls-api.c @@ -0,0 +1,1315 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * V4L2 controls framework uAPI implementation: + * + * Copyright (C) 2010-2021 Hans Verkuil + */ + +#define pr_fmt(fmt) "v4l2-ctrls: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "v4l2-ctrls-priv.h" + +/* Internal temporary helper struct, one for each v4l2_ext_control */ +struct v4l2_ctrl_helper { + /* Pointer to the control reference of the master control */ + struct v4l2_ctrl_ref *mref; + /* The control ref corresponding to the v4l2_ext_control ID field. */ + struct v4l2_ctrl_ref *ref; + /* + * v4l2_ext_control index of the next control belonging to the + * same cluster, or 0 if there isn't any. + */ + u32 next; +}; + +/* + * Helper functions to copy control payload data from kernel space to + * user space and vice versa. + */ + +/* Helper function: copy the given control value back to the caller */ +static int ptr_to_user(struct v4l2_ext_control *c, + struct v4l2_ctrl *ctrl, + union v4l2_ctrl_ptr ptr) +{ + u32 len; + + if (ctrl->is_ptr && !ctrl->is_string) + return copy_to_user(c->ptr, ptr.p_const, c->size) ? + -EFAULT : 0; + + switch (ctrl->type) { + case V4L2_CTRL_TYPE_STRING: + len = strlen(ptr.p_char); + if (c->size < len + 1) { + c->size = ctrl->elem_size; + return -ENOSPC; + } + return copy_to_user(c->string, ptr.p_char, len + 1) ? + -EFAULT : 0; + case V4L2_CTRL_TYPE_INTEGER64: + c->value64 = *ptr.p_s64; + break; + default: + c->value = *ptr.p_s32; + break; + } + return 0; +} + +/* Helper function: copy the current control value back to the caller */ +static int cur_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl) +{ + return ptr_to_user(c, ctrl, ctrl->p_cur); +} + +/* Helper function: copy the new control value back to the caller */ +static int new_to_user(struct v4l2_ext_control *c, + struct v4l2_ctrl *ctrl) +{ + return ptr_to_user(c, ctrl, ctrl->p_new); +} + +/* Helper function: copy the request value back to the caller */ +static int req_to_user(struct v4l2_ext_control *c, + struct v4l2_ctrl_ref *ref) +{ + return ptr_to_user(c, ref->ctrl, ref->p_req); +} + +/* Helper function: copy the initial control value back to the caller */ +static int def_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl) +{ + ctrl->type_ops->init(ctrl, 0, ctrl->p_new); + + return ptr_to_user(c, ctrl, ctrl->p_new); +} + +/* Helper function: copy the caller-provider value as the new control value */ +static int user_to_new(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl) +{ + int ret; + u32 size; + + ctrl->is_new = 0; + if (ctrl->is_dyn_array && + c->size > ctrl->p_array_alloc_elems * ctrl->elem_size) { + void *old = ctrl->p_array; + void *tmp = kvzalloc(2 * c->size, GFP_KERNEL); + + if (!tmp) + return -ENOMEM; + memcpy(tmp, ctrl->p_new.p, ctrl->elems * ctrl->elem_size); + memcpy(tmp + c->size, ctrl->p_cur.p, ctrl->elems * ctrl->elem_size); + ctrl->p_new.p = tmp; + ctrl->p_cur.p = tmp + c->size; + ctrl->p_array = tmp; + ctrl->p_array_alloc_elems = c->size / ctrl->elem_size; + kvfree(old); + } + + if (ctrl->is_ptr && !ctrl->is_string) { + unsigned int elems = c->size / ctrl->elem_size; + + if (copy_from_user(ctrl->p_new.p, c->ptr, c->size)) + return -EFAULT; + ctrl->is_new = 1; + if (ctrl->is_dyn_array) + ctrl->new_elems = elems; + else if (ctrl->is_array) + ctrl->type_ops->init(ctrl, elems, ctrl->p_new); + return 0; + } + + switch (ctrl->type) { + case V4L2_CTRL_TYPE_INTEGER64: + *ctrl->p_new.p_s64 = c->value64; + break; + case V4L2_CTRL_TYPE_STRING: + size = c->size; + if (size == 0) + return -ERANGE; + if (size > ctrl->maximum + 1) + size = ctrl->maximum + 1; + ret = copy_from_user(ctrl->p_new.p_char, c->string, size) ? -EFAULT : 0; + if (!ret) { + char last = ctrl->p_new.p_char[size - 1]; + + ctrl->p_new.p_char[size - 1] = 0; + /* + * If the string was longer than ctrl->maximum, + * then return an error. + */ + if (strlen(ctrl->p_new.p_char) == ctrl->maximum && last) + return -ERANGE; + ctrl->is_new = 1; + } + return ret; + default: + *ctrl->p_new.p_s32 = c->value; + break; + } + ctrl->is_new = 1; + return 0; +} + +/* + * VIDIOC_G/TRY/S_EXT_CTRLS implementation + */ + +/* + * Some general notes on the atomic requirements of VIDIOC_G/TRY/S_EXT_CTRLS: + * + * It is not a fully atomic operation, just best-effort only. After all, if + * multiple controls have to be set through multiple i2c writes (for example) + * then some initial writes may succeed while others fail. Thus leaving the + * system in an inconsistent state. The question is how much effort you are + * willing to spend on trying to make something atomic that really isn't. + * + * From the point of view of an application the main requirement is that + * when you call VIDIOC_S_EXT_CTRLS and some values are invalid then an + * error should be returned without actually affecting any controls. + * + * If all the values are correct, then it is acceptable to just give up + * in case of low-level errors. + * + * It is important though that the application can tell when only a partial + * configuration was done. The way we do that is through the error_idx field + * of struct v4l2_ext_controls: if that is equal to the count field then no + * controls were affected. Otherwise all controls before that index were + * successful in performing their 'get' or 'set' operation, the control at + * the given index failed, and you don't know what happened with the controls + * after the failed one. Since if they were part of a control cluster they + * could have been successfully processed (if a cluster member was encountered + * at index < error_idx), they could have failed (if a cluster member was at + * error_idx), or they may not have been processed yet (if the first cluster + * member appeared after error_idx). + * + * It is all fairly theoretical, though. In practice all you can do is to + * bail out. If error_idx == count, then it is an application bug. If + * error_idx < count then it is only an application bug if the error code was + * EBUSY. That usually means that something started streaming just when you + * tried to set the controls. In all other cases it is a driver/hardware + * problem and all you can do is to retry or bail out. + * + * Note that these rules do not apply to VIDIOC_TRY_EXT_CTRLS: since that + * never modifies controls the error_idx is just set to whatever control + * has an invalid value. + */ + +/* + * Prepare for the extended g/s/try functions. + * Find the controls in the control array and do some basic checks. + */ +static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl, + struct v4l2_ext_controls *cs, + struct v4l2_ctrl_helper *helpers, + struct video_device *vdev, + bool get) +{ + struct v4l2_ctrl_helper *h; + bool have_clusters = false; + u32 i; + + for (i = 0, h = helpers; i < cs->count; i++, h++) { + struct v4l2_ext_control *c = &cs->controls[i]; + struct v4l2_ctrl_ref *ref; + struct v4l2_ctrl *ctrl; + u32 id = c->id & V4L2_CTRL_ID_MASK; + + cs->error_idx = i; + + if (cs->which && + cs->which != V4L2_CTRL_WHICH_DEF_VAL && + cs->which != V4L2_CTRL_WHICH_REQUEST_VAL && + V4L2_CTRL_ID2WHICH(id) != cs->which) { + dprintk(vdev, + "invalid which 0x%x or control id 0x%x\n", + cs->which, id); + return -EINVAL; + } + + /* + * Old-style private controls are not allowed for + * extended controls. + */ + if (id >= V4L2_CID_PRIVATE_BASE) { + dprintk(vdev, + "old-style private controls not allowed\n"); + return -EINVAL; + } + ref = find_ref_lock(hdl, id); + if (!ref) { + dprintk(vdev, "cannot find control id 0x%x\n", id); + return -EINVAL; + } + h->ref = ref; + ctrl = ref->ctrl; + if (ctrl->flags & V4L2_CTRL_FLAG_DISABLED) { + dprintk(vdev, "control id 0x%x is disabled\n", id); + return -EINVAL; + } + + if (ctrl->cluster[0]->ncontrols > 1) + have_clusters = true; + if (ctrl->cluster[0] != ctrl) + ref = find_ref_lock(hdl, ctrl->cluster[0]->id); + if (ctrl->is_dyn_array) { + unsigned int max_size = ctrl->dims[0] * ctrl->elem_size; + unsigned int tot_size = ctrl->elem_size; + + if (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL) + tot_size *= ref->p_req_elems; + else + tot_size *= ctrl->elems; + + c->size = ctrl->elem_size * (c->size / ctrl->elem_size); + if (get) { + if (c->size < tot_size) { + c->size = tot_size; + return -ENOSPC; + } + c->size = tot_size; + } else { + if (c->size > max_size) { + c->size = max_size; + return -ENOSPC; + } + if (!c->size) + return -EFAULT; + } + } else if (ctrl->is_ptr && !ctrl->is_string) { + unsigned int tot_size = ctrl->elems * ctrl->elem_size; + + if (c->size < tot_size) { + /* + * In the get case the application first + * queries to obtain the size of the control. + */ + if (get) { + c->size = tot_size; + return -ENOSPC; + } + dprintk(vdev, + "pointer control id 0x%x size too small, %d bytes but %d bytes needed\n", + id, c->size, tot_size); + return -EFAULT; + } + c->size = tot_size; + } + /* Store the ref to the master control of the cluster */ + h->mref = ref; + /* + * Initially set next to 0, meaning that there is no other + * control in this helper array belonging to the same + * cluster. + */ + h->next = 0; + } + + /* + * We are done if there were no controls that belong to a multi- + * control cluster. + */ + if (!have_clusters) + return 0; + + /* + * The code below figures out in O(n) time which controls in the list + * belong to the same cluster. + */ + + /* This has to be done with the handler lock taken. */ + mutex_lock(hdl->lock); + + /* First zero the helper field in the master control references */ + for (i = 0; i < cs->count; i++) + helpers[i].mref->helper = NULL; + for (i = 0, h = helpers; i < cs->count; i++, h++) { + struct v4l2_ctrl_ref *mref = h->mref; + + /* + * If the mref->helper is set, then it points to an earlier + * helper that belongs to the same cluster. + */ + if (mref->helper) { + /* + * Set the next field of mref->helper to the current + * index: this means that the earlier helper now + * points to the next helper in the same cluster. + */ + mref->helper->next = i; + /* + * mref should be set only for the first helper in the + * cluster, clear the others. + */ + h->mref = NULL; + } + /* Point the mref helper to the current helper struct. */ + mref->helper = h; + } + mutex_unlock(hdl->lock); + return 0; +} + +/* + * Handles the corner case where cs->count == 0. It checks whether the + * specified control class exists. If that class ID is 0, then it checks + * whether there are any controls at all. + */ +static int class_check(struct v4l2_ctrl_handler *hdl, u32 which) +{ + if (which == 0 || which == V4L2_CTRL_WHICH_DEF_VAL || + which == V4L2_CTRL_WHICH_REQUEST_VAL) + return 0; + return find_ref_lock(hdl, which | 1) ? 0 : -EINVAL; +} + +/* + * Get extended controls. Allocates the helpers array if needed. + * + * Note that v4l2_g_ext_ctrls_common() with 'which' set to + * V4L2_CTRL_WHICH_REQUEST_VAL is only called if the request was + * completed, and in that case p_req_valid is true for all controls. + */ +int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl, + struct v4l2_ext_controls *cs, + struct video_device *vdev) +{ + struct v4l2_ctrl_helper helper[4]; + struct v4l2_ctrl_helper *helpers = helper; + int ret; + int i, j; + bool is_default, is_request; + + is_default = (cs->which == V4L2_CTRL_WHICH_DEF_VAL); + is_request = (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL); + + cs->error_idx = cs->count; + cs->which = V4L2_CTRL_ID2WHICH(cs->which); + + if (!hdl) + return -EINVAL; + + if (cs->count == 0) + return class_check(hdl, cs->which); + + if (cs->count > ARRAY_SIZE(helper)) { + helpers = kvmalloc_array(cs->count, sizeof(helper[0]), + GFP_KERNEL); + if (!helpers) + return -ENOMEM; + } + + ret = prepare_ext_ctrls(hdl, cs, helpers, vdev, true); + cs->error_idx = cs->count; + + for (i = 0; !ret && i < cs->count; i++) + if (helpers[i].ref->ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY) + ret = -EACCES; + + for (i = 0; !ret && i < cs->count; i++) { + struct v4l2_ctrl *master; + bool is_volatile = false; + u32 idx = i; + + if (!helpers[i].mref) + continue; + + master = helpers[i].mref->ctrl; + cs->error_idx = i; + + v4l2_ctrl_lock(master); + + /* + * g_volatile_ctrl will update the new control values. + * This makes no sense for V4L2_CTRL_WHICH_DEF_VAL and + * V4L2_CTRL_WHICH_REQUEST_VAL. In the case of requests + * it is v4l2_ctrl_request_complete() that copies the + * volatile controls at the time of request completion + * to the request, so you don't want to do that again. + */ + if (!is_default && !is_request && + ((master->flags & V4L2_CTRL_FLAG_VOLATILE) || + (master->has_volatiles && !is_cur_manual(master)))) { + for (j = 0; j < master->ncontrols; j++) + cur_to_new(master->cluster[j]); + ret = call_op(master, g_volatile_ctrl); + is_volatile = true; + } + + if (ret) { + v4l2_ctrl_unlock(master); + break; + } + + /* + * Copy the default value (if is_default is true), the + * request value (if is_request is true and p_req is valid), + * the new volatile value (if is_volatile is true) or the + * current value. + */ + do { + struct v4l2_ctrl_ref *ref = helpers[idx].ref; + + if (is_default) + ret = def_to_user(cs->controls + idx, ref->ctrl); + else if (is_request && ref->p_req_array_enomem) + ret = -ENOMEM; + else if (is_request && ref->p_req_valid) + ret = req_to_user(cs->controls + idx, ref); + else if (is_volatile) + ret = new_to_user(cs->controls + idx, ref->ctrl); + else + ret = cur_to_user(cs->controls + idx, ref->ctrl); + idx = helpers[idx].next; + } while (!ret && idx); + + v4l2_ctrl_unlock(master); + } + + if (cs->count > ARRAY_SIZE(helper)) + kvfree(helpers); + return ret; +} + +int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct video_device *vdev, + struct media_device *mdev, struct v4l2_ext_controls *cs) +{ + if (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL) + return v4l2_g_ext_ctrls_request(hdl, vdev, mdev, cs); + + return v4l2_g_ext_ctrls_common(hdl, cs, vdev); +} +EXPORT_SYMBOL(v4l2_g_ext_ctrls); + +/* Validate a new control */ +static int validate_new(const struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr p_new) +{ + return ctrl->type_ops->validate(ctrl, p_new); +} + +/* Validate controls. */ +static int validate_ctrls(struct v4l2_ext_controls *cs, + struct v4l2_ctrl_helper *helpers, + struct video_device *vdev, + bool set) +{ + unsigned int i; + int ret = 0; + + cs->error_idx = cs->count; + for (i = 0; i < cs->count; i++) { + struct v4l2_ctrl *ctrl = helpers[i].ref->ctrl; + union v4l2_ctrl_ptr p_new; + + cs->error_idx = i; + + if (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY) { + dprintk(vdev, + "control id 0x%x is read-only\n", + ctrl->id); + return -EACCES; + } + /* + * This test is also done in try_set_control_cluster() which + * is called in atomic context, so that has the final say, + * but it makes sense to do an up-front check as well. Once + * an error occurs in try_set_control_cluster() some other + * controls may have been set already and we want to do a + * best-effort to avoid that. + */ + if (set && (ctrl->flags & V4L2_CTRL_FLAG_GRABBED)) { + dprintk(vdev, + "control id 0x%x is grabbed, cannot set\n", + ctrl->id); + return -EBUSY; + } + /* + * Skip validation for now if the payload needs to be copied + * from userspace into kernelspace. We'll validate those later. + */ + if (ctrl->is_ptr) + continue; + if (ctrl->type == V4L2_CTRL_TYPE_INTEGER64) + p_new.p_s64 = &cs->controls[i].value64; + else + p_new.p_s32 = &cs->controls[i].value; + ret = validate_new(ctrl, p_new); + if (ret) + return ret; + } + return 0; +} + +/* Try or try-and-set controls */ +int try_set_ext_ctrls_common(struct v4l2_fh *fh, + struct v4l2_ctrl_handler *hdl, + struct v4l2_ext_controls *cs, + struct video_device *vdev, bool set) +{ + struct v4l2_ctrl_helper helper[4]; + struct v4l2_ctrl_helper *helpers = helper; + unsigned int i, j; + int ret; + + cs->error_idx = cs->count; + + /* Default value cannot be changed */ + if (cs->which == V4L2_CTRL_WHICH_DEF_VAL) { + dprintk(vdev, "%s: cannot change default value\n", + video_device_node_name(vdev)); + return -EINVAL; + } + + cs->which = V4L2_CTRL_ID2WHICH(cs->which); + + if (!hdl) { + dprintk(vdev, "%s: invalid null control handler\n", + video_device_node_name(vdev)); + return -EINVAL; + } + + if (cs->count == 0) + return class_check(hdl, cs->which); + + if (cs->count > ARRAY_SIZE(helper)) { + helpers = kvmalloc_array(cs->count, sizeof(helper[0]), + GFP_KERNEL); + if (!helpers) + return -ENOMEM; + } + ret = prepare_ext_ctrls(hdl, cs, helpers, vdev, false); + if (!ret) + ret = validate_ctrls(cs, helpers, vdev, set); + if (ret && set) + cs->error_idx = cs->count; + for (i = 0; !ret && i < cs->count; i++) { + struct v4l2_ctrl *master; + u32 idx = i; + + if (!helpers[i].mref) + continue; + + cs->error_idx = i; + master = helpers[i].mref->ctrl; + v4l2_ctrl_lock(master); + + /* Reset the 'is_new' flags of the cluster */ + for (j = 0; j < master->ncontrols; j++) + if (master->cluster[j]) + master->cluster[j]->is_new = 0; + + /* + * For volatile autoclusters that are currently in auto mode + * we need to discover if it will be set to manual mode. + * If so, then we have to copy the current volatile values + * first since those will become the new manual values (which + * may be overwritten by explicit new values from this set + * of controls). + */ + if (master->is_auto && master->has_volatiles && + !is_cur_manual(master)) { + /* Pick an initial non-manual value */ + s32 new_auto_val = master->manual_mode_value + 1; + u32 tmp_idx = idx; + + do { + /* + * Check if the auto control is part of the + * list, and remember the new value. + */ + if (helpers[tmp_idx].ref->ctrl == master) + new_auto_val = cs->controls[tmp_idx].value; + tmp_idx = helpers[tmp_idx].next; + } while (tmp_idx); + /* + * If the new value == the manual value, then copy + * the current volatile values. + */ + if (new_auto_val == master->manual_mode_value) + update_from_auto_cluster(master); + } + + /* + * Copy the new caller-supplied control values. + * user_to_new() sets 'is_new' to 1. + */ + do { + struct v4l2_ctrl *ctrl = helpers[idx].ref->ctrl; + + ret = user_to_new(cs->controls + idx, ctrl); + if (!ret && ctrl->is_ptr) { + ret = validate_new(ctrl, ctrl->p_new); + if (ret) + dprintk(vdev, + "failed to validate control %s (%d)\n", + v4l2_ctrl_get_name(ctrl->id), ret); + } + idx = helpers[idx].next; + } while (!ret && idx); + + if (!ret) + ret = try_or_set_cluster(fh, master, + !hdl->req_obj.req && set, 0); + if (!ret && hdl->req_obj.req && set) { + for (j = 0; j < master->ncontrols; j++) { + struct v4l2_ctrl_ref *ref = + find_ref(hdl, master->cluster[j]->id); + + new_to_req(ref); + } + } + + /* Copy the new values back to userspace. */ + if (!ret) { + idx = i; + do { + ret = new_to_user(cs->controls + idx, + helpers[idx].ref->ctrl); + idx = helpers[idx].next; + } while (!ret && idx); + } + v4l2_ctrl_unlock(master); + } + + if (cs->count > ARRAY_SIZE(helper)) + kvfree(helpers); + return ret; +} + +static int try_set_ext_ctrls(struct v4l2_fh *fh, + struct v4l2_ctrl_handler *hdl, + struct video_device *vdev, + struct media_device *mdev, + struct v4l2_ext_controls *cs, bool set) +{ + int ret; + + if (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL) + return try_set_ext_ctrls_request(fh, hdl, vdev, mdev, cs, set); + + ret = try_set_ext_ctrls_common(fh, hdl, cs, vdev, set); + if (ret) + dprintk(vdev, + "%s: try_set_ext_ctrls_common failed (%d)\n", + video_device_node_name(vdev), ret); + + return ret; +} + +int v4l2_try_ext_ctrls(struct v4l2_ctrl_handler *hdl, + struct video_device *vdev, + struct media_device *mdev, + struct v4l2_ext_controls *cs) +{ + return try_set_ext_ctrls(NULL, hdl, vdev, mdev, cs, false); +} +EXPORT_SYMBOL(v4l2_try_ext_ctrls); + +int v4l2_s_ext_ctrls(struct v4l2_fh *fh, + struct v4l2_ctrl_handler *hdl, + struct video_device *vdev, + struct media_device *mdev, + struct v4l2_ext_controls *cs) +{ + return try_set_ext_ctrls(fh, hdl, vdev, mdev, cs, true); +} +EXPORT_SYMBOL(v4l2_s_ext_ctrls); + +/* + * VIDIOC_G/S_CTRL implementation + */ + +/* Helper function to get a single control */ +static int get_ctrl(struct v4l2_ctrl *ctrl, struct v4l2_ext_control *c) +{ + struct v4l2_ctrl *master = ctrl->cluster[0]; + int ret = 0; + int i; + + /* Compound controls are not supported. The new_to_user() and + * cur_to_user() calls below would need to be modified not to access + * userspace memory when called from get_ctrl(). + */ + if (!ctrl->is_int && ctrl->type != V4L2_CTRL_TYPE_INTEGER64) + return -EINVAL; + + if (ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY) + return -EACCES; + + v4l2_ctrl_lock(master); + /* g_volatile_ctrl will update the current control values */ + if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) { + for (i = 0; i < master->ncontrols; i++) + cur_to_new(master->cluster[i]); + ret = call_op(master, g_volatile_ctrl); + if (!ret) + ret = new_to_user(c, ctrl); + } else { + ret = cur_to_user(c, ctrl); + } + v4l2_ctrl_unlock(master); + return ret; +} + +int v4l2_g_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_control *control) +{ + struct v4l2_ctrl *ctrl = v4l2_ctrl_find(hdl, control->id); + struct v4l2_ext_control c; + int ret; + + if (!ctrl || !ctrl->is_int) + return -EINVAL; + ret = get_ctrl(ctrl, &c); + + if (!ret) + control->value = c.value; + + return ret; +} +EXPORT_SYMBOL(v4l2_g_ctrl); + +/* Helper function for VIDIOC_S_CTRL compatibility */ +static int set_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 ch_flags) +{ + struct v4l2_ctrl *master = ctrl->cluster[0]; + int ret; + int i; + + /* Reset the 'is_new' flags of the cluster */ + for (i = 0; i < master->ncontrols; i++) + if (master->cluster[i]) + master->cluster[i]->is_new = 0; + + ret = validate_new(ctrl, ctrl->p_new); + if (ret) + return ret; + + /* + * For autoclusters with volatiles that are switched from auto to + * manual mode we have to update the current volatile values since + * those will become the initial manual values after such a switch. + */ + if (master->is_auto && master->has_volatiles && ctrl == master && + !is_cur_manual(master) && ctrl->val == master->manual_mode_value) + update_from_auto_cluster(master); + + ctrl->is_new = 1; + return try_or_set_cluster(fh, master, true, ch_flags); +} + +/* Helper function for VIDIOC_S_CTRL compatibility */ +static int set_ctrl_lock(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, + struct v4l2_ext_control *c) +{ + int ret; + + v4l2_ctrl_lock(ctrl); + ret = user_to_new(c, ctrl); + if (!ret) + ret = set_ctrl(fh, ctrl, 0); + if (!ret) + ret = cur_to_user(c, ctrl); + v4l2_ctrl_unlock(ctrl); + return ret; +} + +int v4l2_s_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl, + struct v4l2_control *control) +{ + struct v4l2_ctrl *ctrl = v4l2_ctrl_find(hdl, control->id); + struct v4l2_ext_control c = { control->id }; + int ret; + + if (!ctrl || !ctrl->is_int) + return -EINVAL; + + if (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY) + return -EACCES; + + c.value = control->value; + ret = set_ctrl_lock(fh, ctrl, &c); + control->value = c.value; + return ret; +} +EXPORT_SYMBOL(v4l2_s_ctrl); + +/* + * Helper functions for drivers to get/set controls. + */ + +s32 v4l2_ctrl_g_ctrl(struct v4l2_ctrl *ctrl) +{ + struct v4l2_ext_control c; + + /* It's a driver bug if this happens. */ + if (WARN_ON(!ctrl->is_int)) + return 0; + c.value = 0; + get_ctrl(ctrl, &c); + return c.value; +} +EXPORT_SYMBOL(v4l2_ctrl_g_ctrl); + +s64 v4l2_ctrl_g_ctrl_int64(struct v4l2_ctrl *ctrl) +{ + struct v4l2_ext_control c; + + /* It's a driver bug if this happens. */ + if (WARN_ON(ctrl->is_ptr || ctrl->type != V4L2_CTRL_TYPE_INTEGER64)) + return 0; + c.value64 = 0; + get_ctrl(ctrl, &c); + return c.value64; +} +EXPORT_SYMBOL(v4l2_ctrl_g_ctrl_int64); + +int __v4l2_ctrl_s_ctrl(struct v4l2_ctrl *ctrl, s32 val) +{ + lockdep_assert_held(ctrl->handler->lock); + + /* It's a driver bug if this happens. */ + if (WARN_ON(!ctrl->is_int)) + return -EINVAL; + ctrl->val = val; + return set_ctrl(NULL, ctrl, 0); +} +EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl); + +int __v4l2_ctrl_s_ctrl_int64(struct v4l2_ctrl *ctrl, s64 val) +{ + lockdep_assert_held(ctrl->handler->lock); + + /* It's a driver bug if this happens. */ + if (WARN_ON(ctrl->is_ptr || ctrl->type != V4L2_CTRL_TYPE_INTEGER64)) + return -EINVAL; + *ctrl->p_new.p_s64 = val; + return set_ctrl(NULL, ctrl, 0); +} +EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_int64); + +int __v4l2_ctrl_s_ctrl_string(struct v4l2_ctrl *ctrl, const char *s) +{ + lockdep_assert_held(ctrl->handler->lock); + + /* It's a driver bug if this happens. */ + if (WARN_ON(ctrl->type != V4L2_CTRL_TYPE_STRING)) + return -EINVAL; + strscpy(ctrl->p_new.p_char, s, ctrl->maximum + 1); + return set_ctrl(NULL, ctrl, 0); +} +EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_string); + +int __v4l2_ctrl_s_ctrl_compound(struct v4l2_ctrl *ctrl, + enum v4l2_ctrl_type type, const void *p) +{ + lockdep_assert_held(ctrl->handler->lock); + + /* It's a driver bug if this happens. */ + if (WARN_ON(ctrl->type != type)) + return -EINVAL; + /* Setting dynamic arrays is not (yet?) supported. */ + if (WARN_ON(ctrl->is_dyn_array)) + return -EINVAL; + memcpy(ctrl->p_new.p, p, ctrl->elems * ctrl->elem_size); + return set_ctrl(NULL, ctrl, 0); +} +EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_compound); + +/* + * Modify the range of a control. + */ +int __v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl, + s64 min, s64 max, u64 step, s64 def) +{ + bool value_changed; + bool range_changed = false; + int ret; + + lockdep_assert_held(ctrl->handler->lock); + + switch (ctrl->type) { + case V4L2_CTRL_TYPE_INTEGER: + case V4L2_CTRL_TYPE_INTEGER64: + case V4L2_CTRL_TYPE_BOOLEAN: + case V4L2_CTRL_TYPE_MENU: + case V4L2_CTRL_TYPE_INTEGER_MENU: + case V4L2_CTRL_TYPE_BITMASK: + case V4L2_CTRL_TYPE_U8: + case V4L2_CTRL_TYPE_U16: + case V4L2_CTRL_TYPE_U32: + if (ctrl->is_array) + return -EINVAL; + ret = check_range(ctrl->type, min, max, step, def); + if (ret) + return ret; + break; + default: + return -EINVAL; + } + if (ctrl->minimum != min || ctrl->maximum != max || + ctrl->step != step || ctrl->default_value != def) { + range_changed = true; + ctrl->minimum = min; + ctrl->maximum = max; + ctrl->step = step; + ctrl->default_value = def; + } + cur_to_new(ctrl); + if (validate_new(ctrl, ctrl->p_new)) { + if (ctrl->type == V4L2_CTRL_TYPE_INTEGER64) + *ctrl->p_new.p_s64 = def; + else + *ctrl->p_new.p_s32 = def; + } + + if (ctrl->type == V4L2_CTRL_TYPE_INTEGER64) + value_changed = *ctrl->p_new.p_s64 != *ctrl->p_cur.p_s64; + else + value_changed = *ctrl->p_new.p_s32 != *ctrl->p_cur.p_s32; + if (value_changed) + ret = set_ctrl(NULL, ctrl, V4L2_EVENT_CTRL_CH_RANGE); + else if (range_changed) + send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_RANGE); + return ret; +} +EXPORT_SYMBOL(__v4l2_ctrl_modify_range); + +int __v4l2_ctrl_modify_dimensions(struct v4l2_ctrl *ctrl, + u32 dims[V4L2_CTRL_MAX_DIMS]) +{ + unsigned int elems = 1; + unsigned int i; + void *p_array; + + lockdep_assert_held(ctrl->handler->lock); + + if (!ctrl->is_array || ctrl->is_dyn_array) + return -EINVAL; + + for (i = 0; i < ctrl->nr_of_dims; i++) + elems *= dims[i]; + if (elems == 0) + return -EINVAL; + p_array = kvzalloc(2 * elems * ctrl->elem_size, GFP_KERNEL); + if (!p_array) + return -ENOMEM; + kvfree(ctrl->p_array); + ctrl->p_array_alloc_elems = elems; + ctrl->elems = elems; + ctrl->new_elems = elems; + ctrl->p_array = p_array; + ctrl->p_new.p = p_array; + ctrl->p_cur.p = p_array + elems * ctrl->elem_size; + for (i = 0; i < ctrl->nr_of_dims; i++) + ctrl->dims[i] = dims[i]; + ctrl->type_ops->init(ctrl, 0, ctrl->p_cur); + cur_to_new(ctrl); + send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_VALUE | + V4L2_EVENT_CTRL_CH_DIMENSIONS); + return 0; +} +EXPORT_SYMBOL(__v4l2_ctrl_modify_dimensions); + +/* Implement VIDIOC_QUERY_EXT_CTRL */ +int v4l2_query_ext_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_query_ext_ctrl *qc) +{ + const unsigned int next_flags = V4L2_CTRL_FLAG_NEXT_CTRL | V4L2_CTRL_FLAG_NEXT_COMPOUND; + u32 id = qc->id & V4L2_CTRL_ID_MASK; + struct v4l2_ctrl_ref *ref; + struct v4l2_ctrl *ctrl; + + if (!hdl) + return -EINVAL; + + mutex_lock(hdl->lock); + + /* Try to find it */ + ref = find_ref(hdl, id); + + if ((qc->id & next_flags) && !list_empty(&hdl->ctrl_refs)) { + bool is_compound; + /* Match any control that is not hidden */ + unsigned int mask = 1; + bool match = false; + + if ((qc->id & next_flags) == V4L2_CTRL_FLAG_NEXT_COMPOUND) { + /* Match any hidden control */ + match = true; + } else if ((qc->id & next_flags) == next_flags) { + /* Match any control, compound or not */ + mask = 0; + } + + /* Find the next control with ID > qc->id */ + + /* Did we reach the end of the control list? */ + if (id >= node2id(hdl->ctrl_refs.prev)) { + ref = NULL; /* Yes, so there is no next control */ + } else if (ref) { + struct v4l2_ctrl_ref *pos = ref; + + /* + * We found a control with the given ID, so just get + * the next valid one in the list. + */ + ref = NULL; + list_for_each_entry_continue(pos, &hdl->ctrl_refs, node) { + is_compound = pos->ctrl->is_array || + pos->ctrl->type >= V4L2_CTRL_COMPOUND_TYPES; + if (id < pos->ctrl->id && + (is_compound & mask) == match) { + ref = pos; + break; + } + } + } else { + struct v4l2_ctrl_ref *pos; + + /* + * No control with the given ID exists, so start + * searching for the next largest ID. We know there + * is one, otherwise the first 'if' above would have + * been true. + */ + list_for_each_entry(pos, &hdl->ctrl_refs, node) { + is_compound = pos->ctrl->is_array || + pos->ctrl->type >= V4L2_CTRL_COMPOUND_TYPES; + if (id < pos->ctrl->id && + (is_compound & mask) == match) { + ref = pos; + break; + } + } + } + } + mutex_unlock(hdl->lock); + + if (!ref) + return -EINVAL; + + ctrl = ref->ctrl; + memset(qc, 0, sizeof(*qc)); + if (id >= V4L2_CID_PRIVATE_BASE) + qc->id = id; + else + qc->id = ctrl->id; + strscpy(qc->name, ctrl->name, sizeof(qc->name)); + qc->flags = user_flags(ctrl); + qc->type = ctrl->type; + qc->elem_size = ctrl->elem_size; + qc->elems = ctrl->elems; + qc->nr_of_dims = ctrl->nr_of_dims; + memcpy(qc->dims, ctrl->dims, qc->nr_of_dims * sizeof(qc->dims[0])); + qc->minimum = ctrl->minimum; + qc->maximum = ctrl->maximum; + qc->default_value = ctrl->default_value; + if (ctrl->type == V4L2_CTRL_TYPE_MENU || + ctrl->type == V4L2_CTRL_TYPE_INTEGER_MENU) + qc->step = 1; + else + qc->step = ctrl->step; + return 0; +} +EXPORT_SYMBOL(v4l2_query_ext_ctrl); + +/* Implement VIDIOC_QUERYCTRL */ +int v4l2_queryctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_queryctrl *qc) +{ + struct v4l2_query_ext_ctrl qec = { qc->id }; + int rc; + + rc = v4l2_query_ext_ctrl(hdl, &qec); + if (rc) + return rc; + + qc->id = qec.id; + qc->type = qec.type; + qc->flags = qec.flags; + strscpy(qc->name, qec.name, sizeof(qc->name)); + switch (qc->type) { + case V4L2_CTRL_TYPE_INTEGER: + case V4L2_CTRL_TYPE_BOOLEAN: + case V4L2_CTRL_TYPE_MENU: + case V4L2_CTRL_TYPE_INTEGER_MENU: + case V4L2_CTRL_TYPE_STRING: + case V4L2_CTRL_TYPE_BITMASK: + qc->minimum = qec.minimum; + qc->maximum = qec.maximum; + qc->step = qec.step; + qc->default_value = qec.default_value; + break; + default: + qc->minimum = 0; + qc->maximum = 0; + qc->step = 0; + qc->default_value = 0; + break; + } + return 0; +} +EXPORT_SYMBOL(v4l2_queryctrl); + +/* Implement VIDIOC_QUERYMENU */ +int v4l2_querymenu(struct v4l2_ctrl_handler *hdl, struct v4l2_querymenu *qm) +{ + struct v4l2_ctrl *ctrl; + u32 i = qm->index; + + ctrl = v4l2_ctrl_find(hdl, qm->id); + if (!ctrl) + return -EINVAL; + + qm->reserved = 0; + /* Sanity checks */ + switch (ctrl->type) { + case V4L2_CTRL_TYPE_MENU: + if (!ctrl->qmenu) + return -EINVAL; + break; + case V4L2_CTRL_TYPE_INTEGER_MENU: + if (!ctrl->qmenu_int) + return -EINVAL; + break; + default: + return -EINVAL; + } + + if (i < ctrl->minimum || i > ctrl->maximum) + return -EINVAL; + + /* Use mask to see if this menu item should be skipped */ + if (i < BITS_PER_LONG_LONG && (ctrl->menu_skip_mask & BIT_ULL(i))) + return -EINVAL; + /* Empty menu items should also be skipped */ + if (ctrl->type == V4L2_CTRL_TYPE_MENU) { + if (!ctrl->qmenu[i] || ctrl->qmenu[i][0] == '\0') + return -EINVAL; + strscpy(qm->name, ctrl->qmenu[i], sizeof(qm->name)); + } else { + qm->value = ctrl->qmenu_int[i]; + } + return 0; +} +EXPORT_SYMBOL(v4l2_querymenu); + +/* + * VIDIOC_LOG_STATUS helpers + */ + +int v4l2_ctrl_log_status(struct file *file, void *fh) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_fh *vfh = file->private_data; + + if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) && vfd->v4l2_dev) + v4l2_ctrl_handler_log_status(vfh->ctrl_handler, + vfd->v4l2_dev->name); + return 0; +} +EXPORT_SYMBOL(v4l2_ctrl_log_status); + +int v4l2_ctrl_subdev_log_status(struct v4l2_subdev *sd) +{ + v4l2_ctrl_handler_log_status(sd->ctrl_handler, sd->name); + return 0; +} +EXPORT_SYMBOL(v4l2_ctrl_subdev_log_status); + +/* + * VIDIOC_(UN)SUBSCRIBE_EVENT implementation + */ + +static int v4l2_ctrl_add_event(struct v4l2_subscribed_event *sev, + unsigned int elems) +{ + struct v4l2_ctrl *ctrl = v4l2_ctrl_find(sev->fh->ctrl_handler, sev->id); + + if (!ctrl) + return -EINVAL; + + v4l2_ctrl_lock(ctrl); + list_add_tail(&sev->node, &ctrl->ev_subs); + if (ctrl->type != V4L2_CTRL_TYPE_CTRL_CLASS && + (sev->flags & V4L2_EVENT_SUB_FL_SEND_INITIAL)) + send_initial_event(sev->fh, ctrl); + v4l2_ctrl_unlock(ctrl); + return 0; +} + +static void v4l2_ctrl_del_event(struct v4l2_subscribed_event *sev) +{ + struct v4l2_ctrl *ctrl = v4l2_ctrl_find(sev->fh->ctrl_handler, sev->id); + + if (!ctrl) + return; + + v4l2_ctrl_lock(ctrl); + list_del(&sev->node); + v4l2_ctrl_unlock(ctrl); +} + +void v4l2_ctrl_replace(struct v4l2_event *old, const struct v4l2_event *new) +{ + u32 old_changes = old->u.ctrl.changes; + + old->u.ctrl = new->u.ctrl; + old->u.ctrl.changes |= old_changes; +} +EXPORT_SYMBOL(v4l2_ctrl_replace); + +void v4l2_ctrl_merge(const struct v4l2_event *old, struct v4l2_event *new) +{ + new->u.ctrl.changes |= old->u.ctrl.changes; +} +EXPORT_SYMBOL(v4l2_ctrl_merge); + +const struct v4l2_subscribed_event_ops v4l2_ctrl_sub_ev_ops = { + .add = v4l2_ctrl_add_event, + .del = v4l2_ctrl_del_event, + .replace = v4l2_ctrl_replace, + .merge = v4l2_ctrl_merge, +}; +EXPORT_SYMBOL(v4l2_ctrl_sub_ev_ops); + +int v4l2_ctrl_subscribe_event(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub) +{ + if (sub->type == V4L2_EVENT_CTRL) + return v4l2_event_subscribe(fh, sub, 0, &v4l2_ctrl_sub_ev_ops); + return -EINVAL; +} +EXPORT_SYMBOL(v4l2_ctrl_subscribe_event); + +int v4l2_ctrl_subdev_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh, + struct v4l2_event_subscription *sub) +{ + if (!sd->ctrl_handler) + return -EINVAL; + return v4l2_ctrl_subscribe_event(fh, sub); +} +EXPORT_SYMBOL(v4l2_ctrl_subdev_subscribe_event); + +/* + * poll helper + */ +__poll_t v4l2_ctrl_poll(struct file *file, struct poll_table_struct *wait) +{ + struct v4l2_fh *fh = file->private_data; + + poll_wait(file, &fh->wait, wait); + if (v4l2_event_pending(fh)) + return EPOLLPRI; + return 0; +} +EXPORT_SYMBOL(v4l2_ctrl_poll); diff --git a/6.12.0/drivers/media/v4l2-core/v4l2-ctrls-core.c b/6.12.0/drivers/media/v4l2-core/v4l2-ctrls-core.c new file mode 100644 index 0000000..eeab6a5 --- /dev/null +++ b/6.12.0/drivers/media/v4l2-core/v4l2-ctrls-core.c @@ -0,0 +1,2598 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * V4L2 controls framework core implementation. + * + * Copyright (C) 2010-2021 Hans Verkuil + */ + +#include +#include +#include +#include +#include +#include + +#include "v4l2-ctrls-priv.h" + +static const union v4l2_ctrl_ptr ptr_null; + +static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, + u32 changes) +{ + memset(ev, 0, sizeof(*ev)); + ev->type = V4L2_EVENT_CTRL; + ev->id = ctrl->id; + ev->u.ctrl.changes = changes; + ev->u.ctrl.type = ctrl->type; + ev->u.ctrl.flags = user_flags(ctrl); + if (ctrl->is_ptr) + ev->u.ctrl.value64 = 0; + else + ev->u.ctrl.value64 = *ctrl->p_cur.p_s64; + ev->u.ctrl.minimum = ctrl->minimum; + ev->u.ctrl.maximum = ctrl->maximum; + if (ctrl->type == V4L2_CTRL_TYPE_MENU + || ctrl->type == V4L2_CTRL_TYPE_INTEGER_MENU) + ev->u.ctrl.step = 1; + else + ev->u.ctrl.step = ctrl->step; + ev->u.ctrl.default_value = ctrl->default_value; +} + +void send_initial_event(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl) +{ + struct v4l2_event ev; + u32 changes = V4L2_EVENT_CTRL_CH_FLAGS; + + if (!(ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY)) + changes |= V4L2_EVENT_CTRL_CH_VALUE; + fill_event(&ev, ctrl, changes); + v4l2_event_queue_fh(fh, &ev); +} + +void send_event(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 changes) +{ + struct v4l2_event ev; + struct v4l2_subscribed_event *sev; + + if (list_empty(&ctrl->ev_subs)) + return; + fill_event(&ev, ctrl, changes); + + list_for_each_entry(sev, &ctrl->ev_subs, node) + if (sev->fh != fh || + (sev->flags & V4L2_EVENT_SUB_FL_ALLOW_FEEDBACK)) + v4l2_event_queue_fh(sev->fh, &ev); +} + +bool v4l2_ctrl_type_op_equal(const struct v4l2_ctrl *ctrl, + union v4l2_ctrl_ptr ptr1, union v4l2_ctrl_ptr ptr2) +{ + unsigned int i; + + switch (ctrl->type) { + case V4L2_CTRL_TYPE_BUTTON: + return false; + case V4L2_CTRL_TYPE_STRING: + for (i = 0; i < ctrl->elems; i++) { + unsigned int idx = i * ctrl->elem_size; + + /* strings are always 0-terminated */ + if (strcmp(ptr1.p_char + idx, ptr2.p_char + idx)) + return false; + } + return true; + default: + return !memcmp(ptr1.p_const, ptr2.p_const, + ctrl->elems * ctrl->elem_size); + } +} +EXPORT_SYMBOL(v4l2_ctrl_type_op_equal); + +/* Default intra MPEG-2 quantisation coefficients, from the specification. */ +static const u8 mpeg2_intra_quant_matrix[64] = { + 8, 16, 16, 19, 16, 19, 22, 22, + 22, 22, 22, 22, 26, 24, 26, 27, + 27, 27, 26, 26, 26, 26, 27, 27, + 27, 29, 29, 29, 34, 34, 34, 29, + 29, 29, 27, 27, 29, 29, 32, 32, + 34, 34, 37, 38, 37, 35, 35, 34, + 35, 38, 38, 40, 40, 40, 48, 48, + 46, 46, 56, 56, 58, 69, 69, 83 +}; + +static void std_init_compound(const struct v4l2_ctrl *ctrl, u32 idx, + union v4l2_ctrl_ptr ptr) +{ + struct v4l2_ctrl_mpeg2_sequence *p_mpeg2_sequence; + struct v4l2_ctrl_mpeg2_picture *p_mpeg2_picture; + struct v4l2_ctrl_mpeg2_quantisation *p_mpeg2_quant; + struct v4l2_ctrl_vp8_frame *p_vp8_frame; + struct v4l2_ctrl_vp9_frame *p_vp9_frame; + struct v4l2_ctrl_fwht_params *p_fwht_params; + struct v4l2_ctrl_h264_scaling_matrix *p_h264_scaling_matrix; + struct v4l2_ctrl_av1_sequence *p_av1_sequence; + void *p = ptr.p + idx * ctrl->elem_size; + + if (ctrl->p_def.p_const) + memcpy(p, ctrl->p_def.p_const, ctrl->elem_size); + else + memset(p, 0, ctrl->elem_size); + + switch ((u32)ctrl->type) { + case V4L2_CTRL_TYPE_MPEG2_SEQUENCE: + p_mpeg2_sequence = p; + + /* 4:2:0 */ + p_mpeg2_sequence->chroma_format = 1; + break; + case V4L2_CTRL_TYPE_MPEG2_PICTURE: + p_mpeg2_picture = p; + + /* interlaced top field */ + p_mpeg2_picture->picture_structure = V4L2_MPEG2_PIC_TOP_FIELD; + p_mpeg2_picture->picture_coding_type = + V4L2_MPEG2_PIC_CODING_TYPE_I; + break; + case V4L2_CTRL_TYPE_MPEG2_QUANTISATION: + p_mpeg2_quant = p; + + memcpy(p_mpeg2_quant->intra_quantiser_matrix, + mpeg2_intra_quant_matrix, + ARRAY_SIZE(mpeg2_intra_quant_matrix)); + /* + * The default non-intra MPEG-2 quantisation + * coefficients are all 16, as per the specification. + */ + memset(p_mpeg2_quant->non_intra_quantiser_matrix, 16, + sizeof(p_mpeg2_quant->non_intra_quantiser_matrix)); + break; + case V4L2_CTRL_TYPE_VP8_FRAME: + p_vp8_frame = p; + p_vp8_frame->num_dct_parts = 1; + break; + case V4L2_CTRL_TYPE_VP9_FRAME: + p_vp9_frame = p; + p_vp9_frame->profile = 0; + p_vp9_frame->bit_depth = 8; + p_vp9_frame->flags |= V4L2_VP9_FRAME_FLAG_X_SUBSAMPLING | + V4L2_VP9_FRAME_FLAG_Y_SUBSAMPLING; + break; + case V4L2_CTRL_TYPE_AV1_SEQUENCE: + p_av1_sequence = p; + p_av1_sequence->bit_depth = 8; + break; + case V4L2_CTRL_TYPE_FWHT_PARAMS: + p_fwht_params = p; + p_fwht_params->version = V4L2_FWHT_VERSION; + p_fwht_params->width = 1280; + p_fwht_params->height = 720; + p_fwht_params->flags = V4L2_FWHT_FL_PIXENC_YUV | + (2 << V4L2_FWHT_FL_COMPONENTS_NUM_OFFSET); + break; + case V4L2_CTRL_TYPE_H264_SCALING_MATRIX: + p_h264_scaling_matrix = p; + /* + * The default (flat) H.264 scaling matrix when none are + * specified in the bitstream, this is according to formulas + * (7-8) and (7-9) of the specification. + */ + memset(p_h264_scaling_matrix, 16, sizeof(*p_h264_scaling_matrix)); + break; + } +} + +void v4l2_ctrl_type_op_init(const struct v4l2_ctrl *ctrl, u32 from_idx, + union v4l2_ctrl_ptr ptr) +{ + unsigned int i; + u32 tot_elems = ctrl->elems; + u32 elems = tot_elems - from_idx; + + if (from_idx >= tot_elems) + return; + + switch (ctrl->type) { + case V4L2_CTRL_TYPE_STRING: + for (i = from_idx; i < tot_elems; i++) { + unsigned int offset = i * ctrl->elem_size; + + memset(ptr.p_char + offset, ' ', ctrl->minimum); + ptr.p_char[offset + ctrl->minimum] = '\0'; + } + break; + case V4L2_CTRL_TYPE_INTEGER64: + if (ctrl->default_value) { + for (i = from_idx; i < tot_elems; i++) + ptr.p_s64[i] = ctrl->default_value; + } else { + memset(ptr.p_s64 + from_idx, 0, elems * sizeof(s64)); + } + break; + case V4L2_CTRL_TYPE_INTEGER: + case V4L2_CTRL_TYPE_INTEGER_MENU: + case V4L2_CTRL_TYPE_MENU: + case V4L2_CTRL_TYPE_BITMASK: + case V4L2_CTRL_TYPE_BOOLEAN: + if (ctrl->default_value) { + for (i = from_idx; i < tot_elems; i++) + ptr.p_s32[i] = ctrl->default_value; + } else { + memset(ptr.p_s32 + from_idx, 0, elems * sizeof(s32)); + } + break; + case V4L2_CTRL_TYPE_BUTTON: + case V4L2_CTRL_TYPE_CTRL_CLASS: + memset(ptr.p_s32 + from_idx, 0, elems * sizeof(s32)); + break; + case V4L2_CTRL_TYPE_U8: + memset(ptr.p_u8 + from_idx, ctrl->default_value, elems); + break; + case V4L2_CTRL_TYPE_U16: + if (ctrl->default_value) { + for (i = from_idx; i < tot_elems; i++) + ptr.p_u16[i] = ctrl->default_value; + } else { + memset(ptr.p_u16 + from_idx, 0, elems * sizeof(u16)); + } + break; + case V4L2_CTRL_TYPE_U32: + if (ctrl->default_value) { + for (i = from_idx; i < tot_elems; i++) + ptr.p_u32[i] = ctrl->default_value; + } else { + memset(ptr.p_u32 + from_idx, 0, elems * sizeof(u32)); + } + break; + default: + for (i = from_idx; i < tot_elems; i++) + std_init_compound(ctrl, i, ptr); + break; + } +} +EXPORT_SYMBOL(v4l2_ctrl_type_op_init); + +void v4l2_ctrl_type_op_log(const struct v4l2_ctrl *ctrl) +{ + union v4l2_ctrl_ptr ptr = ctrl->p_cur; + + if (ctrl->is_array) { + unsigned i; + + for (i = 0; i < ctrl->nr_of_dims; i++) + pr_cont("[%u]", ctrl->dims[i]); + pr_cont(" "); + } + + switch (ctrl->type) { + case V4L2_CTRL_TYPE_INTEGER: + pr_cont("%d", *ptr.p_s32); + break; + case V4L2_CTRL_TYPE_BOOLEAN: + pr_cont("%s", *ptr.p_s32 ? "true" : "false"); + break; + case V4L2_CTRL_TYPE_MENU: + pr_cont("%s", ctrl->qmenu[*ptr.p_s32]); + break; + case V4L2_CTRL_TYPE_INTEGER_MENU: + pr_cont("%lld", ctrl->qmenu_int[*ptr.p_s32]); + break; + case V4L2_CTRL_TYPE_BITMASK: + pr_cont("0x%08x", *ptr.p_s32); + break; + case V4L2_CTRL_TYPE_INTEGER64: + pr_cont("%lld", *ptr.p_s64); + break; + case V4L2_CTRL_TYPE_STRING: + pr_cont("%s", ptr.p_char); + break; + case V4L2_CTRL_TYPE_U8: + pr_cont("%u", (unsigned)*ptr.p_u8); + break; + case V4L2_CTRL_TYPE_U16: + pr_cont("%u", (unsigned)*ptr.p_u16); + break; + case V4L2_CTRL_TYPE_U32: + pr_cont("%u", (unsigned)*ptr.p_u32); + break; + case V4L2_CTRL_TYPE_AREA: + pr_cont("%ux%u", ptr.p_area->width, ptr.p_area->height); + break; + case V4L2_CTRL_TYPE_H264_SPS: + pr_cont("H264_SPS"); + break; + case V4L2_CTRL_TYPE_H264_PPS: + pr_cont("H264_PPS"); + break; + case V4L2_CTRL_TYPE_H264_SCALING_MATRIX: + pr_cont("H264_SCALING_MATRIX"); + break; + case V4L2_CTRL_TYPE_H264_SLICE_PARAMS: + pr_cont("H264_SLICE_PARAMS"); + break; + case V4L2_CTRL_TYPE_H264_DECODE_PARAMS: + pr_cont("H264_DECODE_PARAMS"); + break; + case V4L2_CTRL_TYPE_H264_PRED_WEIGHTS: + pr_cont("H264_PRED_WEIGHTS"); + break; + case V4L2_CTRL_TYPE_FWHT_PARAMS: + pr_cont("FWHT_PARAMS"); + break; + case V4L2_CTRL_TYPE_VP8_FRAME: + pr_cont("VP8_FRAME"); + break; + case V4L2_CTRL_TYPE_HDR10_CLL_INFO: + pr_cont("HDR10_CLL_INFO"); + break; + case V4L2_CTRL_TYPE_HDR10_MASTERING_DISPLAY: + pr_cont("HDR10_MASTERING_DISPLAY"); + break; + case V4L2_CTRL_TYPE_MPEG2_QUANTISATION: + pr_cont("MPEG2_QUANTISATION"); + break; + case V4L2_CTRL_TYPE_MPEG2_SEQUENCE: + pr_cont("MPEG2_SEQUENCE"); + break; + case V4L2_CTRL_TYPE_MPEG2_PICTURE: + pr_cont("MPEG2_PICTURE"); + break; + case V4L2_CTRL_TYPE_VP9_COMPRESSED_HDR: + pr_cont("VP9_COMPRESSED_HDR"); + break; + case V4L2_CTRL_TYPE_VP9_FRAME: + pr_cont("VP9_FRAME"); + break; + case V4L2_CTRL_TYPE_HEVC_SPS: + pr_cont("HEVC_SPS"); + break; + case V4L2_CTRL_TYPE_HEVC_PPS: + pr_cont("HEVC_PPS"); + break; + case V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS: + pr_cont("HEVC_SLICE_PARAMS"); + break; + case V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX: + pr_cont("HEVC_SCALING_MATRIX"); + break; + case V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS: + pr_cont("HEVC_DECODE_PARAMS"); + break; + case V4L2_CTRL_TYPE_AV1_SEQUENCE: + pr_cont("AV1_SEQUENCE"); + break; + case V4L2_CTRL_TYPE_AV1_TILE_GROUP_ENTRY: + pr_cont("AV1_TILE_GROUP_ENTRY"); + break; + case V4L2_CTRL_TYPE_AV1_FRAME: + pr_cont("AV1_FRAME"); + break; + case V4L2_CTRL_TYPE_AV1_FILM_GRAIN: + pr_cont("AV1_FILM_GRAIN"); + break; + + default: + pr_cont("unknown type %d", ctrl->type); + break; + } +} +EXPORT_SYMBOL(v4l2_ctrl_type_op_log); + +/* + * Round towards the closest legal value. Be careful when we are + * close to the maximum range of the control type to prevent + * wrap-arounds. + */ +#define ROUND_TO_RANGE(val, offset_type, ctrl) \ +({ \ + offset_type offset; \ + if ((ctrl)->maximum >= 0 && \ + val >= (ctrl)->maximum - (s32)((ctrl)->step / 2)) \ + val = (ctrl)->maximum; \ + else \ + val += (s32)((ctrl)->step / 2); \ + val = clamp_t(typeof(val), val, \ + (ctrl)->minimum, (ctrl)->maximum); \ + offset = (val) - (ctrl)->minimum; \ + offset = (ctrl)->step * (offset / (u32)(ctrl)->step); \ + val = (ctrl)->minimum + offset; \ + 0; \ +}) + +/* Validate a new control */ + +#define zero_padding(s) \ + memset(&(s).padding, 0, sizeof((s).padding)) +#define zero_reserved(s) \ + memset(&(s).reserved, 0, sizeof((s).reserved)) + +static int +validate_vp9_lf_params(struct v4l2_vp9_loop_filter *lf) +{ + unsigned int i; + + if (lf->flags & ~(V4L2_VP9_LOOP_FILTER_FLAG_DELTA_ENABLED | + V4L2_VP9_LOOP_FILTER_FLAG_DELTA_UPDATE)) + return -EINVAL; + + /* That all values are in the accepted range. */ + if (lf->level > GENMASK(5, 0)) + return -EINVAL; + + if (lf->sharpness > GENMASK(2, 0)) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(lf->ref_deltas); i++) + if (lf->ref_deltas[i] < -63 || lf->ref_deltas[i] > 63) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(lf->mode_deltas); i++) + if (lf->mode_deltas[i] < -63 || lf->mode_deltas[i] > 63) + return -EINVAL; + + zero_reserved(*lf); + return 0; +} + +static int +validate_vp9_quant_params(struct v4l2_vp9_quantization *quant) +{ + if (quant->delta_q_y_dc < -15 || quant->delta_q_y_dc > 15 || + quant->delta_q_uv_dc < -15 || quant->delta_q_uv_dc > 15 || + quant->delta_q_uv_ac < -15 || quant->delta_q_uv_ac > 15) + return -EINVAL; + + zero_reserved(*quant); + return 0; +} + +static int +validate_vp9_seg_params(struct v4l2_vp9_segmentation *seg) +{ + unsigned int i, j; + + if (seg->flags & ~(V4L2_VP9_SEGMENTATION_FLAG_ENABLED | + V4L2_VP9_SEGMENTATION_FLAG_UPDATE_MAP | + V4L2_VP9_SEGMENTATION_FLAG_TEMPORAL_UPDATE | + V4L2_VP9_SEGMENTATION_FLAG_UPDATE_DATA | + V4L2_VP9_SEGMENTATION_FLAG_ABS_OR_DELTA_UPDATE)) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(seg->feature_enabled); i++) { + if (seg->feature_enabled[i] & + ~V4L2_VP9_SEGMENT_FEATURE_ENABLED_MASK) + return -EINVAL; + } + + for (i = 0; i < ARRAY_SIZE(seg->feature_data); i++) { + static const int range[] = { 255, 63, 3, 0 }; + + for (j = 0; j < ARRAY_SIZE(seg->feature_data[j]); j++) { + if (seg->feature_data[i][j] < -range[j] || + seg->feature_data[i][j] > range[j]) + return -EINVAL; + } + } + + zero_reserved(*seg); + return 0; +} + +static int +validate_vp9_compressed_hdr(struct v4l2_ctrl_vp9_compressed_hdr *hdr) +{ + if (hdr->tx_mode > V4L2_VP9_TX_MODE_SELECT) + return -EINVAL; + + return 0; +} + +static int +validate_vp9_frame(struct v4l2_ctrl_vp9_frame *frame) +{ + int ret; + + /* Make sure we're not passed invalid flags. */ + if (frame->flags & ~(V4L2_VP9_FRAME_FLAG_KEY_FRAME | + V4L2_VP9_FRAME_FLAG_SHOW_FRAME | + V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT | + V4L2_VP9_FRAME_FLAG_INTRA_ONLY | + V4L2_VP9_FRAME_FLAG_ALLOW_HIGH_PREC_MV | + V4L2_VP9_FRAME_FLAG_REFRESH_FRAME_CTX | + V4L2_VP9_FRAME_FLAG_PARALLEL_DEC_MODE | + V4L2_VP9_FRAME_FLAG_X_SUBSAMPLING | + V4L2_VP9_FRAME_FLAG_Y_SUBSAMPLING | + V4L2_VP9_FRAME_FLAG_COLOR_RANGE_FULL_SWING)) + return -EINVAL; + + if (frame->flags & V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT && + frame->flags & V4L2_VP9_FRAME_FLAG_REFRESH_FRAME_CTX) + return -EINVAL; + + if (frame->profile > V4L2_VP9_PROFILE_MAX) + return -EINVAL; + + if (frame->reset_frame_context > V4L2_VP9_RESET_FRAME_CTX_ALL) + return -EINVAL; + + if (frame->frame_context_idx >= V4L2_VP9_NUM_FRAME_CTX) + return -EINVAL; + + /* + * Profiles 0 and 1 only support 8-bit depth, profiles 2 and 3 only 10 + * and 12 bit depths. + */ + if ((frame->profile < 2 && frame->bit_depth != 8) || + (frame->profile >= 2 && + (frame->bit_depth != 10 && frame->bit_depth != 12))) + return -EINVAL; + + /* Profile 0 and 2 only accept YUV 4:2:0. */ + if ((frame->profile == 0 || frame->profile == 2) && + (!(frame->flags & V4L2_VP9_FRAME_FLAG_X_SUBSAMPLING) || + !(frame->flags & V4L2_VP9_FRAME_FLAG_Y_SUBSAMPLING))) + return -EINVAL; + + /* Profile 1 and 3 only accept YUV 4:2:2, 4:4:0 and 4:4:4. */ + if ((frame->profile == 1 || frame->profile == 3) && + ((frame->flags & V4L2_VP9_FRAME_FLAG_X_SUBSAMPLING) && + (frame->flags & V4L2_VP9_FRAME_FLAG_Y_SUBSAMPLING))) + return -EINVAL; + + if (frame->interpolation_filter > V4L2_VP9_INTERP_FILTER_SWITCHABLE) + return -EINVAL; + + /* + * According to the spec, tile_cols_log2 shall be less than or equal + * to 6. + */ + if (frame->tile_cols_log2 > 6) + return -EINVAL; + + if (frame->reference_mode > V4L2_VP9_REFERENCE_MODE_SELECT) + return -EINVAL; + + ret = validate_vp9_lf_params(&frame->lf); + if (ret) + return ret; + + ret = validate_vp9_quant_params(&frame->quant); + if (ret) + return ret; + + ret = validate_vp9_seg_params(&frame->seg); + if (ret) + return ret; + + zero_reserved(*frame); + return 0; +} + +static int validate_av1_quantization(struct v4l2_av1_quantization *q) +{ + if (q->flags > GENMASK(2, 0)) + return -EINVAL; + + if (q->delta_q_y_dc < -64 || q->delta_q_y_dc > 63 || + q->delta_q_u_dc < -64 || q->delta_q_u_dc > 63 || + q->delta_q_v_dc < -64 || q->delta_q_v_dc > 63 || + q->delta_q_u_ac < -64 || q->delta_q_u_ac > 63 || + q->delta_q_v_ac < -64 || q->delta_q_v_ac > 63 || + q->delta_q_res > GENMASK(1, 0)) + return -EINVAL; + + if (q->qm_y > GENMASK(3, 0) || + q->qm_u > GENMASK(3, 0) || + q->qm_v > GENMASK(3, 0)) + return -EINVAL; + + return 0; +} + +static int validate_av1_segmentation(struct v4l2_av1_segmentation *s) +{ + u32 i; + u32 j; + + if (s->flags > GENMASK(4, 0)) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(s->feature_data); i++) { + static const int segmentation_feature_signed[] = { 1, 1, 1, 1, 1, 0, 0, 0 }; + static const int segmentation_feature_max[] = { 255, 63, 63, 63, 63, 7, 0, 0}; + + for (j = 0; j < ARRAY_SIZE(s->feature_data[j]); j++) { + s32 limit = segmentation_feature_max[j]; + + if (segmentation_feature_signed[j]) { + if (s->feature_data[i][j] < -limit || + s->feature_data[i][j] > limit) + return -EINVAL; + } else { + if (s->feature_data[i][j] < 0 || s->feature_data[i][j] > limit) + return -EINVAL; + } + } + } + + return 0; +} + +static int validate_av1_loop_filter(struct v4l2_av1_loop_filter *lf) +{ + u32 i; + + if (lf->flags > GENMASK(3, 0)) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(lf->level); i++) { + if (lf->level[i] > GENMASK(5, 0)) + return -EINVAL; + } + + if (lf->sharpness > GENMASK(2, 0)) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(lf->ref_deltas); i++) { + if (lf->ref_deltas[i] < -64 || lf->ref_deltas[i] > 63) + return -EINVAL; + } + + for (i = 0; i < ARRAY_SIZE(lf->mode_deltas); i++) { + if (lf->mode_deltas[i] < -64 || lf->mode_deltas[i] > 63) + return -EINVAL; + } + + return 0; +} + +static int validate_av1_cdef(struct v4l2_av1_cdef *cdef) +{ + u32 i; + + if (cdef->damping_minus_3 > GENMASK(1, 0) || + cdef->bits > GENMASK(1, 0)) + return -EINVAL; + + for (i = 0; i < 1 << cdef->bits; i++) { + if (cdef->y_pri_strength[i] > GENMASK(3, 0) || + cdef->y_sec_strength[i] > 4 || + cdef->uv_pri_strength[i] > GENMASK(3, 0) || + cdef->uv_sec_strength[i] > 4) + return -EINVAL; + } + + return 0; +} + +static int validate_av1_loop_restauration(struct v4l2_av1_loop_restoration *lr) +{ + if (lr->lr_unit_shift > 3 || lr->lr_uv_shift > 1) + return -EINVAL; + + return 0; +} + +static int validate_av1_film_grain(struct v4l2_ctrl_av1_film_grain *fg) +{ + u32 i; + + if (fg->flags > GENMASK(4, 0)) + return -EINVAL; + + if (fg->film_grain_params_ref_idx > GENMASK(2, 0) || + fg->num_y_points > 14 || + fg->num_cb_points > 10 || + fg->num_cr_points > GENMASK(3, 0) || + fg->grain_scaling_minus_8 > GENMASK(1, 0) || + fg->ar_coeff_lag > GENMASK(1, 0) || + fg->ar_coeff_shift_minus_6 > GENMASK(1, 0) || + fg->grain_scale_shift > GENMASK(1, 0)) + return -EINVAL; + + if (!(fg->flags & V4L2_AV1_FILM_GRAIN_FLAG_APPLY_GRAIN)) + return 0; + + for (i = 1; i < fg->num_y_points; i++) + if (fg->point_y_value[i] <= fg->point_y_value[i - 1]) + return -EINVAL; + + for (i = 1; i < fg->num_cb_points; i++) + if (fg->point_cb_value[i] <= fg->point_cb_value[i - 1]) + return -EINVAL; + + for (i = 1; i < fg->num_cr_points; i++) + if (fg->point_cr_value[i] <= fg->point_cr_value[i - 1]) + return -EINVAL; + + return 0; +} + +static int validate_av1_frame(struct v4l2_ctrl_av1_frame *f) +{ + int ret = 0; + + ret = validate_av1_quantization(&f->quantization); + if (ret) + return ret; + ret = validate_av1_segmentation(&f->segmentation); + if (ret) + return ret; + ret = validate_av1_loop_filter(&f->loop_filter); + if (ret) + return ret; + ret = validate_av1_cdef(&f->cdef); + if (ret) + return ret; + ret = validate_av1_loop_restauration(&f->loop_restoration); + if (ret) + return ret; + + if (f->flags & + ~(V4L2_AV1_FRAME_FLAG_SHOW_FRAME | + V4L2_AV1_FRAME_FLAG_SHOWABLE_FRAME | + V4L2_AV1_FRAME_FLAG_ERROR_RESILIENT_MODE | + V4L2_AV1_FRAME_FLAG_DISABLE_CDF_UPDATE | + V4L2_AV1_FRAME_FLAG_ALLOW_SCREEN_CONTENT_TOOLS | + V4L2_AV1_FRAME_FLAG_FORCE_INTEGER_MV | + V4L2_AV1_FRAME_FLAG_ALLOW_INTRABC | + V4L2_AV1_FRAME_FLAG_USE_SUPERRES | + V4L2_AV1_FRAME_FLAG_ALLOW_HIGH_PRECISION_MV | + V4L2_AV1_FRAME_FLAG_IS_MOTION_MODE_SWITCHABLE | + V4L2_AV1_FRAME_FLAG_USE_REF_FRAME_MVS | + V4L2_AV1_FRAME_FLAG_DISABLE_FRAME_END_UPDATE_CDF | + V4L2_AV1_FRAME_FLAG_ALLOW_WARPED_MOTION | + V4L2_AV1_FRAME_FLAG_REFERENCE_SELECT | + V4L2_AV1_FRAME_FLAG_REDUCED_TX_SET | + V4L2_AV1_FRAME_FLAG_SKIP_MODE_ALLOWED | + V4L2_AV1_FRAME_FLAG_SKIP_MODE_PRESENT | + V4L2_AV1_FRAME_FLAG_FRAME_SIZE_OVERRIDE | + V4L2_AV1_FRAME_FLAG_BUFFER_REMOVAL_TIME_PRESENT | + V4L2_AV1_FRAME_FLAG_FRAME_REFS_SHORT_SIGNALING)) + return -EINVAL; + + if (f->superres_denom > GENMASK(2, 0) + 9) + return -EINVAL; + + return 0; +} + +static int validate_av1_sequence(struct v4l2_ctrl_av1_sequence *s) +{ + if (s->flags & + ~(V4L2_AV1_SEQUENCE_FLAG_STILL_PICTURE | + V4L2_AV1_SEQUENCE_FLAG_USE_128X128_SUPERBLOCK | + V4L2_AV1_SEQUENCE_FLAG_ENABLE_FILTER_INTRA | + V4L2_AV1_SEQUENCE_FLAG_ENABLE_INTRA_EDGE_FILTER | + V4L2_AV1_SEQUENCE_FLAG_ENABLE_INTERINTRA_COMPOUND | + V4L2_AV1_SEQUENCE_FLAG_ENABLE_MASKED_COMPOUND | + V4L2_AV1_SEQUENCE_FLAG_ENABLE_WARPED_MOTION | + V4L2_AV1_SEQUENCE_FLAG_ENABLE_DUAL_FILTER | + V4L2_AV1_SEQUENCE_FLAG_ENABLE_ORDER_HINT | + V4L2_AV1_SEQUENCE_FLAG_ENABLE_JNT_COMP | + V4L2_AV1_SEQUENCE_FLAG_ENABLE_REF_FRAME_MVS | + V4L2_AV1_SEQUENCE_FLAG_ENABLE_SUPERRES | + V4L2_AV1_SEQUENCE_FLAG_ENABLE_CDEF | + V4L2_AV1_SEQUENCE_FLAG_ENABLE_RESTORATION | + V4L2_AV1_SEQUENCE_FLAG_MONO_CHROME | + V4L2_AV1_SEQUENCE_FLAG_COLOR_RANGE | + V4L2_AV1_SEQUENCE_FLAG_SUBSAMPLING_X | + V4L2_AV1_SEQUENCE_FLAG_SUBSAMPLING_Y | + V4L2_AV1_SEQUENCE_FLAG_FILM_GRAIN_PARAMS_PRESENT | + V4L2_AV1_SEQUENCE_FLAG_SEPARATE_UV_DELTA_Q)) + return -EINVAL; + + if (s->seq_profile == 1 && s->flags & V4L2_AV1_SEQUENCE_FLAG_MONO_CHROME) + return -EINVAL; + + /* reserved */ + if (s->seq_profile > 2) + return -EINVAL; + + /* TODO: PROFILES */ + return 0; +} + +/* + * Compound controls validation requires setting unused fields/flags to zero + * in order to properly detect unchanged controls with v4l2_ctrl_type_op_equal's + * memcmp. + */ +static int std_validate_compound(const struct v4l2_ctrl *ctrl, u32 idx, + union v4l2_ctrl_ptr ptr) +{ + struct v4l2_ctrl_mpeg2_sequence *p_mpeg2_sequence; + struct v4l2_ctrl_mpeg2_picture *p_mpeg2_picture; + struct v4l2_ctrl_vp8_frame *p_vp8_frame; + struct v4l2_ctrl_fwht_params *p_fwht_params; + struct v4l2_ctrl_h264_sps *p_h264_sps; + struct v4l2_ctrl_h264_pps *p_h264_pps; + struct v4l2_ctrl_h264_pred_weights *p_h264_pred_weights; + struct v4l2_ctrl_h264_slice_params *p_h264_slice_params; + struct v4l2_ctrl_h264_decode_params *p_h264_dec_params; + struct v4l2_ctrl_hevc_sps *p_hevc_sps; + struct v4l2_ctrl_hevc_pps *p_hevc_pps; + struct v4l2_ctrl_hdr10_mastering_display *p_hdr10_mastering; + struct v4l2_ctrl_hevc_decode_params *p_hevc_decode_params; + struct v4l2_area *area; + void *p = ptr.p + idx * ctrl->elem_size; + unsigned int i; + + switch ((u32)ctrl->type) { + case V4L2_CTRL_TYPE_MPEG2_SEQUENCE: + p_mpeg2_sequence = p; + + switch (p_mpeg2_sequence->chroma_format) { + case 1: /* 4:2:0 */ + case 2: /* 4:2:2 */ + case 3: /* 4:4:4 */ + break; + default: + return -EINVAL; + } + break; + + case V4L2_CTRL_TYPE_MPEG2_PICTURE: + p_mpeg2_picture = p; + + switch (p_mpeg2_picture->intra_dc_precision) { + case 0: /* 8 bits */ + case 1: /* 9 bits */ + case 2: /* 10 bits */ + case 3: /* 11 bits */ + break; + default: + return -EINVAL; + } + + switch (p_mpeg2_picture->picture_structure) { + case V4L2_MPEG2_PIC_TOP_FIELD: + case V4L2_MPEG2_PIC_BOTTOM_FIELD: + case V4L2_MPEG2_PIC_FRAME: + break; + default: + return -EINVAL; + } + + switch (p_mpeg2_picture->picture_coding_type) { + case V4L2_MPEG2_PIC_CODING_TYPE_I: + case V4L2_MPEG2_PIC_CODING_TYPE_P: + case V4L2_MPEG2_PIC_CODING_TYPE_B: + break; + default: + return -EINVAL; + } + zero_reserved(*p_mpeg2_picture); + break; + + case V4L2_CTRL_TYPE_MPEG2_QUANTISATION: + break; + + case V4L2_CTRL_TYPE_FWHT_PARAMS: + p_fwht_params = p; + if (p_fwht_params->version < V4L2_FWHT_VERSION) + return -EINVAL; + if (!p_fwht_params->width || !p_fwht_params->height) + return -EINVAL; + break; + + case V4L2_CTRL_TYPE_H264_SPS: + p_h264_sps = p; + + /* Some syntax elements are only conditionally valid */ + if (p_h264_sps->pic_order_cnt_type != 0) { + p_h264_sps->log2_max_pic_order_cnt_lsb_minus4 = 0; + } else if (p_h264_sps->pic_order_cnt_type != 1) { + p_h264_sps->num_ref_frames_in_pic_order_cnt_cycle = 0; + p_h264_sps->offset_for_non_ref_pic = 0; + p_h264_sps->offset_for_top_to_bottom_field = 0; + memset(&p_h264_sps->offset_for_ref_frame, 0, + sizeof(p_h264_sps->offset_for_ref_frame)); + } + + if (!V4L2_H264_SPS_HAS_CHROMA_FORMAT(p_h264_sps)) { + p_h264_sps->chroma_format_idc = 1; + p_h264_sps->bit_depth_luma_minus8 = 0; + p_h264_sps->bit_depth_chroma_minus8 = 0; + + p_h264_sps->flags &= + ~V4L2_H264_SPS_FLAG_QPPRIME_Y_ZERO_TRANSFORM_BYPASS; + + if (p_h264_sps->chroma_format_idc < 3) + p_h264_sps->flags &= + ~V4L2_H264_SPS_FLAG_SEPARATE_COLOUR_PLANE; + } + + if (p_h264_sps->flags & V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY) + p_h264_sps->flags &= + ~V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD; + + /* + * Chroma 4:2:2 format require at least High 4:2:2 profile. + * + * The H264 specification and well-known parser implementations + * use profile-idc values directly, as that is clearer and + * less ambiguous. We do the same here. + */ + if (p_h264_sps->profile_idc < 122 && + p_h264_sps->chroma_format_idc > 1) + return -EINVAL; + /* Chroma 4:4:4 format require at least High 4:2:2 profile */ + if (p_h264_sps->profile_idc < 244 && + p_h264_sps->chroma_format_idc > 2) + return -EINVAL; + if (p_h264_sps->chroma_format_idc > 3) + return -EINVAL; + + if (p_h264_sps->bit_depth_luma_minus8 > 6) + return -EINVAL; + if (p_h264_sps->bit_depth_chroma_minus8 > 6) + return -EINVAL; + if (p_h264_sps->log2_max_frame_num_minus4 > 12) + return -EINVAL; + if (p_h264_sps->pic_order_cnt_type > 2) + return -EINVAL; + if (p_h264_sps->log2_max_pic_order_cnt_lsb_minus4 > 12) + return -EINVAL; + if (p_h264_sps->max_num_ref_frames > V4L2_H264_REF_LIST_LEN) + return -EINVAL; + break; + + case V4L2_CTRL_TYPE_H264_PPS: + p_h264_pps = p; + + if (p_h264_pps->num_slice_groups_minus1 > 7) + return -EINVAL; + if (p_h264_pps->num_ref_idx_l0_default_active_minus1 > + (V4L2_H264_REF_LIST_LEN - 1)) + return -EINVAL; + if (p_h264_pps->num_ref_idx_l1_default_active_minus1 > + (V4L2_H264_REF_LIST_LEN - 1)) + return -EINVAL; + if (p_h264_pps->weighted_bipred_idc > 2) + return -EINVAL; + /* + * pic_init_qp_minus26 shall be in the range of + * -(26 + QpBdOffset_y) to +25, inclusive, + * where QpBdOffset_y is 6 * bit_depth_luma_minus8 + */ + if (p_h264_pps->pic_init_qp_minus26 < -62 || + p_h264_pps->pic_init_qp_minus26 > 25) + return -EINVAL; + if (p_h264_pps->pic_init_qs_minus26 < -26 || + p_h264_pps->pic_init_qs_minus26 > 25) + return -EINVAL; + if (p_h264_pps->chroma_qp_index_offset < -12 || + p_h264_pps->chroma_qp_index_offset > 12) + return -EINVAL; + if (p_h264_pps->second_chroma_qp_index_offset < -12 || + p_h264_pps->second_chroma_qp_index_offset > 12) + return -EINVAL; + break; + + case V4L2_CTRL_TYPE_H264_SCALING_MATRIX: + break; + + case V4L2_CTRL_TYPE_H264_PRED_WEIGHTS: + p_h264_pred_weights = p; + + if (p_h264_pred_weights->luma_log2_weight_denom > 7) + return -EINVAL; + if (p_h264_pred_weights->chroma_log2_weight_denom > 7) + return -EINVAL; + break; + + case V4L2_CTRL_TYPE_H264_SLICE_PARAMS: + p_h264_slice_params = p; + + if (p_h264_slice_params->slice_type != V4L2_H264_SLICE_TYPE_B) + p_h264_slice_params->flags &= + ~V4L2_H264_SLICE_FLAG_DIRECT_SPATIAL_MV_PRED; + + if (p_h264_slice_params->colour_plane_id > 2) + return -EINVAL; + if (p_h264_slice_params->cabac_init_idc > 2) + return -EINVAL; + if (p_h264_slice_params->disable_deblocking_filter_idc > 2) + return -EINVAL; + if (p_h264_slice_params->slice_alpha_c0_offset_div2 < -6 || + p_h264_slice_params->slice_alpha_c0_offset_div2 > 6) + return -EINVAL; + if (p_h264_slice_params->slice_beta_offset_div2 < -6 || + p_h264_slice_params->slice_beta_offset_div2 > 6) + return -EINVAL; + + if (p_h264_slice_params->slice_type == V4L2_H264_SLICE_TYPE_I || + p_h264_slice_params->slice_type == V4L2_H264_SLICE_TYPE_SI) + p_h264_slice_params->num_ref_idx_l0_active_minus1 = 0; + if (p_h264_slice_params->slice_type != V4L2_H264_SLICE_TYPE_B) + p_h264_slice_params->num_ref_idx_l1_active_minus1 = 0; + + if (p_h264_slice_params->num_ref_idx_l0_active_minus1 > + (V4L2_H264_REF_LIST_LEN - 1)) + return -EINVAL; + if (p_h264_slice_params->num_ref_idx_l1_active_minus1 > + (V4L2_H264_REF_LIST_LEN - 1)) + return -EINVAL; + zero_reserved(*p_h264_slice_params); + break; + + case V4L2_CTRL_TYPE_H264_DECODE_PARAMS: + p_h264_dec_params = p; + + if (p_h264_dec_params->nal_ref_idc > 3) + return -EINVAL; + for (i = 0; i < V4L2_H264_NUM_DPB_ENTRIES; i++) { + struct v4l2_h264_dpb_entry *dpb_entry = + &p_h264_dec_params->dpb[i]; + + zero_reserved(*dpb_entry); + } + zero_reserved(*p_h264_dec_params); + break; + + case V4L2_CTRL_TYPE_VP8_FRAME: + p_vp8_frame = p; + + switch (p_vp8_frame->num_dct_parts) { + case 1: + case 2: + case 4: + case 8: + break; + default: + return -EINVAL; + } + zero_padding(p_vp8_frame->segment); + zero_padding(p_vp8_frame->lf); + zero_padding(p_vp8_frame->quant); + zero_padding(p_vp8_frame->entropy); + zero_padding(p_vp8_frame->coder_state); + break; + + case V4L2_CTRL_TYPE_HEVC_SPS: + p_hevc_sps = p; + + if (!(p_hevc_sps->flags & V4L2_HEVC_SPS_FLAG_PCM_ENABLED)) { + p_hevc_sps->pcm_sample_bit_depth_luma_minus1 = 0; + p_hevc_sps->pcm_sample_bit_depth_chroma_minus1 = 0; + p_hevc_sps->log2_min_pcm_luma_coding_block_size_minus3 = 0; + p_hevc_sps->log2_diff_max_min_pcm_luma_coding_block_size = 0; + } + + if (!(p_hevc_sps->flags & + V4L2_HEVC_SPS_FLAG_LONG_TERM_REF_PICS_PRESENT)) + p_hevc_sps->num_long_term_ref_pics_sps = 0; + break; + + case V4L2_CTRL_TYPE_HEVC_PPS: + p_hevc_pps = p; + + if (!(p_hevc_pps->flags & + V4L2_HEVC_PPS_FLAG_CU_QP_DELTA_ENABLED)) + p_hevc_pps->diff_cu_qp_delta_depth = 0; + + if (!(p_hevc_pps->flags & V4L2_HEVC_PPS_FLAG_TILES_ENABLED)) { + p_hevc_pps->num_tile_columns_minus1 = 0; + p_hevc_pps->num_tile_rows_minus1 = 0; + memset(&p_hevc_pps->column_width_minus1, 0, + sizeof(p_hevc_pps->column_width_minus1)); + memset(&p_hevc_pps->row_height_minus1, 0, + sizeof(p_hevc_pps->row_height_minus1)); + + p_hevc_pps->flags &= + ~V4L2_HEVC_PPS_FLAG_LOOP_FILTER_ACROSS_TILES_ENABLED; + } + + if (p_hevc_pps->flags & + V4L2_HEVC_PPS_FLAG_PPS_DISABLE_DEBLOCKING_FILTER) { + p_hevc_pps->pps_beta_offset_div2 = 0; + p_hevc_pps->pps_tc_offset_div2 = 0; + } + break; + + case V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS: + p_hevc_decode_params = p; + + if (p_hevc_decode_params->num_active_dpb_entries > + V4L2_HEVC_DPB_ENTRIES_NUM_MAX) + return -EINVAL; + break; + + case V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS: + break; + + case V4L2_CTRL_TYPE_HDR10_CLL_INFO: + break; + + case V4L2_CTRL_TYPE_HDR10_MASTERING_DISPLAY: + p_hdr10_mastering = p; + + for (i = 0; i < 3; ++i) { + if (p_hdr10_mastering->display_primaries_x[i] < + V4L2_HDR10_MASTERING_PRIMARIES_X_LOW || + p_hdr10_mastering->display_primaries_x[i] > + V4L2_HDR10_MASTERING_PRIMARIES_X_HIGH || + p_hdr10_mastering->display_primaries_y[i] < + V4L2_HDR10_MASTERING_PRIMARIES_Y_LOW || + p_hdr10_mastering->display_primaries_y[i] > + V4L2_HDR10_MASTERING_PRIMARIES_Y_HIGH) + return -EINVAL; + } + + if (p_hdr10_mastering->white_point_x < + V4L2_HDR10_MASTERING_WHITE_POINT_X_LOW || + p_hdr10_mastering->white_point_x > + V4L2_HDR10_MASTERING_WHITE_POINT_X_HIGH || + p_hdr10_mastering->white_point_y < + V4L2_HDR10_MASTERING_WHITE_POINT_Y_LOW || + p_hdr10_mastering->white_point_y > + V4L2_HDR10_MASTERING_WHITE_POINT_Y_HIGH) + return -EINVAL; + + if (p_hdr10_mastering->max_display_mastering_luminance < + V4L2_HDR10_MASTERING_MAX_LUMA_LOW || + p_hdr10_mastering->max_display_mastering_luminance > + V4L2_HDR10_MASTERING_MAX_LUMA_HIGH || + p_hdr10_mastering->min_display_mastering_luminance < + V4L2_HDR10_MASTERING_MIN_LUMA_LOW || + p_hdr10_mastering->min_display_mastering_luminance > + V4L2_HDR10_MASTERING_MIN_LUMA_HIGH) + return -EINVAL; + + /* The following restriction comes from ITU-T Rec. H.265 spec */ + if (p_hdr10_mastering->max_display_mastering_luminance == + V4L2_HDR10_MASTERING_MAX_LUMA_LOW && + p_hdr10_mastering->min_display_mastering_luminance == + V4L2_HDR10_MASTERING_MIN_LUMA_HIGH) + return -EINVAL; + + break; + + case V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX: + break; + + case V4L2_CTRL_TYPE_VP9_COMPRESSED_HDR: + return validate_vp9_compressed_hdr(p); + + case V4L2_CTRL_TYPE_VP9_FRAME: + return validate_vp9_frame(p); + case V4L2_CTRL_TYPE_AV1_FRAME: + return validate_av1_frame(p); + case V4L2_CTRL_TYPE_AV1_SEQUENCE: + return validate_av1_sequence(p); + case V4L2_CTRL_TYPE_AV1_TILE_GROUP_ENTRY: + break; + case V4L2_CTRL_TYPE_AV1_FILM_GRAIN: + return validate_av1_film_grain(p); + + case V4L2_CTRL_TYPE_AREA: + area = p; + if (!area->width || !area->height) + return -EINVAL; + break; + + default: + return -EINVAL; + } + + return 0; +} + +static int std_validate_elem(const struct v4l2_ctrl *ctrl, u32 idx, + union v4l2_ctrl_ptr ptr) +{ + size_t len; + u64 offset; + s64 val; + + switch ((u32)ctrl->type) { + case V4L2_CTRL_TYPE_INTEGER: + return ROUND_TO_RANGE(ptr.p_s32[idx], u32, ctrl); + case V4L2_CTRL_TYPE_INTEGER64: + /* + * We can't use the ROUND_TO_RANGE define here due to + * the u64 divide that needs special care. + */ + val = ptr.p_s64[idx]; + if (ctrl->maximum >= 0 && val >= ctrl->maximum - (s64)(ctrl->step / 2)) + val = ctrl->maximum; + else + val += (s64)(ctrl->step / 2); + val = clamp_t(s64, val, ctrl->minimum, ctrl->maximum); + offset = val - ctrl->minimum; + do_div(offset, ctrl->step); + ptr.p_s64[idx] = ctrl->minimum + offset * ctrl->step; + return 0; + case V4L2_CTRL_TYPE_U8: + return ROUND_TO_RANGE(ptr.p_u8[idx], u8, ctrl); + case V4L2_CTRL_TYPE_U16: + return ROUND_TO_RANGE(ptr.p_u16[idx], u16, ctrl); + case V4L2_CTRL_TYPE_U32: + return ROUND_TO_RANGE(ptr.p_u32[idx], u32, ctrl); + + case V4L2_CTRL_TYPE_BOOLEAN: + ptr.p_s32[idx] = !!ptr.p_s32[idx]; + return 0; + + case V4L2_CTRL_TYPE_MENU: + case V4L2_CTRL_TYPE_INTEGER_MENU: + if (ptr.p_s32[idx] < ctrl->minimum || ptr.p_s32[idx] > ctrl->maximum) + return -ERANGE; + if (ptr.p_s32[idx] < BITS_PER_LONG_LONG && + (ctrl->menu_skip_mask & BIT_ULL(ptr.p_s32[idx]))) + return -EINVAL; + if (ctrl->type == V4L2_CTRL_TYPE_MENU && + ctrl->qmenu[ptr.p_s32[idx]][0] == '\0') + return -EINVAL; + return 0; + + case V4L2_CTRL_TYPE_BITMASK: + ptr.p_s32[idx] &= ctrl->maximum; + return 0; + + case V4L2_CTRL_TYPE_BUTTON: + case V4L2_CTRL_TYPE_CTRL_CLASS: + ptr.p_s32[idx] = 0; + return 0; + + case V4L2_CTRL_TYPE_STRING: + idx *= ctrl->elem_size; + len = strlen(ptr.p_char + idx); + if (len < ctrl->minimum) + return -ERANGE; + if ((len - (u32)ctrl->minimum) % (u32)ctrl->step) + return -ERANGE; + return 0; + + default: + return std_validate_compound(ctrl, idx, ptr); + } +} + +int v4l2_ctrl_type_op_validate(const struct v4l2_ctrl *ctrl, + union v4l2_ctrl_ptr ptr) +{ + unsigned int i; + int ret = 0; + + switch ((u32)ctrl->type) { + case V4L2_CTRL_TYPE_U8: + if (ctrl->maximum == 0xff && ctrl->minimum == 0 && ctrl->step == 1) + return 0; + break; + case V4L2_CTRL_TYPE_U16: + if (ctrl->maximum == 0xffff && ctrl->minimum == 0 && ctrl->step == 1) + return 0; + break; + case V4L2_CTRL_TYPE_U32: + if (ctrl->maximum == 0xffffffff && ctrl->minimum == 0 && ctrl->step == 1) + return 0; + break; + + case V4L2_CTRL_TYPE_BUTTON: + case V4L2_CTRL_TYPE_CTRL_CLASS: + memset(ptr.p_s32, 0, ctrl->new_elems * sizeof(s32)); + return 0; + } + + for (i = 0; !ret && i < ctrl->new_elems; i++) + ret = std_validate_elem(ctrl, i, ptr); + return ret; +} +EXPORT_SYMBOL(v4l2_ctrl_type_op_validate); + +static const struct v4l2_ctrl_type_ops std_type_ops = { + .equal = v4l2_ctrl_type_op_equal, + .init = v4l2_ctrl_type_op_init, + .log = v4l2_ctrl_type_op_log, + .validate = v4l2_ctrl_type_op_validate, +}; + +void v4l2_ctrl_notify(struct v4l2_ctrl *ctrl, v4l2_ctrl_notify_fnc notify, void *priv) +{ + if (!ctrl) + return; + if (!notify) { + ctrl->call_notify = 0; + return; + } + if (WARN_ON(ctrl->handler->notify && ctrl->handler->notify != notify)) + return; + ctrl->handler->notify = notify; + ctrl->handler->notify_priv = priv; + ctrl->call_notify = 1; +} +EXPORT_SYMBOL(v4l2_ctrl_notify); + +/* Copy the one value to another. */ +static void ptr_to_ptr(struct v4l2_ctrl *ctrl, + union v4l2_ctrl_ptr from, union v4l2_ctrl_ptr to, + unsigned int elems) +{ + if (ctrl == NULL) + return; + memcpy(to.p, from.p_const, elems * ctrl->elem_size); +} + +/* Copy the new value to the current value. */ +void new_to_cur(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 ch_flags) +{ + bool changed; + + if (ctrl == NULL) + return; + + /* has_changed is set by cluster_changed */ + changed = ctrl->has_changed; + if (changed) { + if (ctrl->is_dyn_array) + ctrl->elems = ctrl->new_elems; + ptr_to_ptr(ctrl, ctrl->p_new, ctrl->p_cur, ctrl->elems); + } + + if (ch_flags & V4L2_EVENT_CTRL_CH_FLAGS) { + /* Note: CH_FLAGS is only set for auto clusters. */ + ctrl->flags &= + ~(V4L2_CTRL_FLAG_INACTIVE | V4L2_CTRL_FLAG_VOLATILE); + if (!is_cur_manual(ctrl->cluster[0])) { + ctrl->flags |= V4L2_CTRL_FLAG_INACTIVE; + if (ctrl->cluster[0]->has_volatiles) + ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE; + } + fh = NULL; + } + if (changed || ch_flags) { + /* If a control was changed that was not one of the controls + modified by the application, then send the event to all. */ + if (!ctrl->is_new) + fh = NULL; + send_event(fh, ctrl, + (changed ? V4L2_EVENT_CTRL_CH_VALUE : 0) | ch_flags); + if (ctrl->call_notify && changed && ctrl->handler->notify) + ctrl->handler->notify(ctrl, ctrl->handler->notify_priv); + } +} + +/* Copy the current value to the new value */ +void cur_to_new(struct v4l2_ctrl *ctrl) +{ + if (ctrl == NULL) + return; + if (ctrl->is_dyn_array) + ctrl->new_elems = ctrl->elems; + ptr_to_ptr(ctrl, ctrl->p_cur, ctrl->p_new, ctrl->new_elems); +} + +static bool req_alloc_array(struct v4l2_ctrl_ref *ref, u32 elems) +{ + void *tmp; + + if (elems == ref->p_req_array_alloc_elems) + return true; + if (ref->ctrl->is_dyn_array && + elems < ref->p_req_array_alloc_elems) + return true; + + tmp = kvmalloc(elems * ref->ctrl->elem_size, GFP_KERNEL); + + if (!tmp) { + ref->p_req_array_enomem = true; + return false; + } + ref->p_req_array_enomem = false; + kvfree(ref->p_req.p); + ref->p_req.p = tmp; + ref->p_req_array_alloc_elems = elems; + return true; +} + +/* Copy the new value to the request value */ +void new_to_req(struct v4l2_ctrl_ref *ref) +{ + struct v4l2_ctrl *ctrl; + + if (!ref) + return; + + ctrl = ref->ctrl; + if (ctrl->is_array && !req_alloc_array(ref, ctrl->new_elems)) + return; + + ref->p_req_elems = ctrl->new_elems; + ptr_to_ptr(ctrl, ctrl->p_new, ref->p_req, ref->p_req_elems); + ref->p_req_valid = true; +} + +/* Copy the current value to the request value */ +void cur_to_req(struct v4l2_ctrl_ref *ref) +{ + struct v4l2_ctrl *ctrl; + + if (!ref) + return; + + ctrl = ref->ctrl; + if (ctrl->is_array && !req_alloc_array(ref, ctrl->elems)) + return; + + ref->p_req_elems = ctrl->elems; + ptr_to_ptr(ctrl, ctrl->p_cur, ref->p_req, ctrl->elems); + ref->p_req_valid = true; +} + +/* Copy the request value to the new value */ +int req_to_new(struct v4l2_ctrl_ref *ref) +{ + struct v4l2_ctrl *ctrl; + + if (!ref) + return 0; + + ctrl = ref->ctrl; + + /* + * This control was never set in the request, so just use the current + * value. + */ + if (!ref->p_req_valid) { + if (ctrl->is_dyn_array) + ctrl->new_elems = ctrl->elems; + ptr_to_ptr(ctrl, ctrl->p_cur, ctrl->p_new, ctrl->new_elems); + return 0; + } + + /* Not an array, so just copy the request value */ + if (!ctrl->is_array) { + ptr_to_ptr(ctrl, ref->p_req, ctrl->p_new, ctrl->new_elems); + return 0; + } + + /* Sanity check, should never happen */ + if (WARN_ON(!ref->p_req_array_alloc_elems)) + return -ENOMEM; + + if (!ctrl->is_dyn_array && + ref->p_req_elems != ctrl->p_array_alloc_elems) + return -ENOMEM; + + /* + * Check if the number of elements in the request is more than the + * elements in ctrl->p_array. If so, attempt to realloc ctrl->p_array. + * Note that p_array is allocated with twice the number of elements + * in the dynamic array since it has to store both the current and + * new value of such a control. + */ + if (ref->p_req_elems > ctrl->p_array_alloc_elems) { + unsigned int sz = ref->p_req_elems * ctrl->elem_size; + void *old = ctrl->p_array; + void *tmp = kvzalloc(2 * sz, GFP_KERNEL); + + if (!tmp) + return -ENOMEM; + memcpy(tmp, ctrl->p_new.p, ctrl->elems * ctrl->elem_size); + memcpy(tmp + sz, ctrl->p_cur.p, ctrl->elems * ctrl->elem_size); + ctrl->p_new.p = tmp; + ctrl->p_cur.p = tmp + sz; + ctrl->p_array = tmp; + ctrl->p_array_alloc_elems = ref->p_req_elems; + kvfree(old); + } + + ctrl->new_elems = ref->p_req_elems; + ptr_to_ptr(ctrl, ref->p_req, ctrl->p_new, ctrl->new_elems); + return 0; +} + +/* Control range checking */ +int check_range(enum v4l2_ctrl_type type, + s64 min, s64 max, u64 step, s64 def) +{ + switch (type) { + case V4L2_CTRL_TYPE_BOOLEAN: + if (step != 1 || max > 1 || min < 0) + return -ERANGE; + fallthrough; + case V4L2_CTRL_TYPE_U8: + case V4L2_CTRL_TYPE_U16: + case V4L2_CTRL_TYPE_U32: + case V4L2_CTRL_TYPE_INTEGER: + case V4L2_CTRL_TYPE_INTEGER64: + if (step == 0 || min > max || def < min || def > max) + return -ERANGE; + return 0; + case V4L2_CTRL_TYPE_BITMASK: + if (step || min || !max || (def & ~max)) + return -ERANGE; + return 0; + case V4L2_CTRL_TYPE_MENU: + case V4L2_CTRL_TYPE_INTEGER_MENU: + if (min > max || def < min || def > max || + min < 0 || (step && max >= BITS_PER_LONG_LONG)) + return -ERANGE; + /* Note: step == menu_skip_mask for menu controls. + So here we check if the default value is masked out. */ + if (def < BITS_PER_LONG_LONG && (step & BIT_ULL(def))) + return -EINVAL; + return 0; + case V4L2_CTRL_TYPE_STRING: + if (min > max || min < 0 || step < 1 || def) + return -ERANGE; + return 0; + default: + return 0; + } +} + +/* Set the handler's error code if it wasn't set earlier already */ +static inline int handler_set_err(struct v4l2_ctrl_handler *hdl, int err) +{ + if (hdl->error == 0) + hdl->error = err; + return err; +} + +/* Initialize the handler */ +int v4l2_ctrl_handler_init_class(struct v4l2_ctrl_handler *hdl, + unsigned nr_of_controls_hint, + struct lock_class_key *key, const char *name) +{ + mutex_init(&hdl->_lock); + hdl->lock = &hdl->_lock; + lockdep_set_class_and_name(hdl->lock, key, name); + INIT_LIST_HEAD(&hdl->ctrls); + INIT_LIST_HEAD(&hdl->ctrl_refs); + hdl->nr_of_buckets = 1 + nr_of_controls_hint / 8; + hdl->buckets = kvcalloc(hdl->nr_of_buckets, sizeof(hdl->buckets[0]), + GFP_KERNEL); + hdl->error = hdl->buckets ? 0 : -ENOMEM; + v4l2_ctrl_handler_init_request(hdl); + return hdl->error; +} +EXPORT_SYMBOL(v4l2_ctrl_handler_init_class); + +/* Free all controls and control refs */ +void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl) +{ + struct v4l2_ctrl_ref *ref, *next_ref; + struct v4l2_ctrl *ctrl, *next_ctrl; + struct v4l2_subscribed_event *sev, *next_sev; + + if (hdl == NULL || hdl->buckets == NULL) + return; + + v4l2_ctrl_handler_free_request(hdl); + + mutex_lock(hdl->lock); + /* Free all nodes */ + list_for_each_entry_safe(ref, next_ref, &hdl->ctrl_refs, node) { + list_del(&ref->node); + if (ref->p_req_array_alloc_elems) + kvfree(ref->p_req.p); + kfree(ref); + } + /* Free all controls owned by the handler */ + list_for_each_entry_safe(ctrl, next_ctrl, &hdl->ctrls, node) { + list_del(&ctrl->node); + list_for_each_entry_safe(sev, next_sev, &ctrl->ev_subs, node) + list_del(&sev->node); + kvfree(ctrl->p_array); + kvfree(ctrl); + } + kvfree(hdl->buckets); + hdl->buckets = NULL; + hdl->cached = NULL; + hdl->error = 0; + mutex_unlock(hdl->lock); + mutex_destroy(&hdl->_lock); +} +EXPORT_SYMBOL(v4l2_ctrl_handler_free); + +/* For backwards compatibility: V4L2_CID_PRIVATE_BASE should no longer + be used except in G_CTRL, S_CTRL, QUERYCTRL and QUERYMENU when dealing + with applications that do not use the NEXT_CTRL flag. + + We just find the n-th private user control. It's O(N), but that should not + be an issue in this particular case. */ +static struct v4l2_ctrl_ref *find_private_ref( + struct v4l2_ctrl_handler *hdl, u32 id) +{ + struct v4l2_ctrl_ref *ref; + + id -= V4L2_CID_PRIVATE_BASE; + list_for_each_entry(ref, &hdl->ctrl_refs, node) { + /* Search for private user controls that are compatible with + VIDIOC_G/S_CTRL. */ + if (V4L2_CTRL_ID2WHICH(ref->ctrl->id) == V4L2_CTRL_CLASS_USER && + V4L2_CTRL_DRIVER_PRIV(ref->ctrl->id)) { + if (!ref->ctrl->is_int) + continue; + if (id == 0) + return ref; + id--; + } + } + return NULL; +} + +/* Find a control with the given ID. */ +struct v4l2_ctrl_ref *find_ref(struct v4l2_ctrl_handler *hdl, u32 id) +{ + struct v4l2_ctrl_ref *ref; + int bucket; + + id &= V4L2_CTRL_ID_MASK; + + /* Old-style private controls need special handling */ + if (id >= V4L2_CID_PRIVATE_BASE) + return find_private_ref(hdl, id); + bucket = id % hdl->nr_of_buckets; + + /* Simple optimization: cache the last control found */ + if (hdl->cached && hdl->cached->ctrl->id == id) + return hdl->cached; + + /* Not in cache, search the hash */ + ref = hdl->buckets ? hdl->buckets[bucket] : NULL; + while (ref && ref->ctrl->id != id) + ref = ref->next; + + if (ref) + hdl->cached = ref; /* cache it! */ + return ref; +} + +/* Find a control with the given ID. Take the handler's lock first. */ +struct v4l2_ctrl_ref *find_ref_lock(struct v4l2_ctrl_handler *hdl, u32 id) +{ + struct v4l2_ctrl_ref *ref = NULL; + + if (hdl) { + mutex_lock(hdl->lock); + ref = find_ref(hdl, id); + mutex_unlock(hdl->lock); + } + return ref; +} + +/* Find a control with the given ID. */ +struct v4l2_ctrl *v4l2_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id) +{ + struct v4l2_ctrl_ref *ref = find_ref_lock(hdl, id); + + return ref ? ref->ctrl : NULL; +} +EXPORT_SYMBOL(v4l2_ctrl_find); + +/* Allocate a new v4l2_ctrl_ref and hook it into the handler. */ +int handler_new_ref(struct v4l2_ctrl_handler *hdl, + struct v4l2_ctrl *ctrl, + struct v4l2_ctrl_ref **ctrl_ref, + bool from_other_dev, bool allocate_req) +{ + struct v4l2_ctrl_ref *ref; + struct v4l2_ctrl_ref *new_ref; + u32 id = ctrl->id; + u32 class_ctrl = V4L2_CTRL_ID2WHICH(id) | 1; + int bucket = id % hdl->nr_of_buckets; /* which bucket to use */ + unsigned int size_extra_req = 0; + + if (ctrl_ref) + *ctrl_ref = NULL; + + /* + * Automatically add the control class if it is not yet present and + * the new control is not a compound control. + */ + if (ctrl->type < V4L2_CTRL_COMPOUND_TYPES && + id != class_ctrl && find_ref_lock(hdl, class_ctrl) == NULL) + if (!v4l2_ctrl_new_std(hdl, NULL, class_ctrl, 0, 0, 0, 0)) + return hdl->error; + + if (hdl->error) + return hdl->error; + + if (allocate_req && !ctrl->is_array) + size_extra_req = ctrl->elems * ctrl->elem_size; + new_ref = kzalloc(sizeof(*new_ref) + size_extra_req, GFP_KERNEL); + if (!new_ref) + return handler_set_err(hdl, -ENOMEM); + new_ref->ctrl = ctrl; + new_ref->from_other_dev = from_other_dev; + if (size_extra_req) + new_ref->p_req.p = &new_ref[1]; + + INIT_LIST_HEAD(&new_ref->node); + + mutex_lock(hdl->lock); + + /* Add immediately at the end of the list if the list is empty, or if + the last element in the list has a lower ID. + This ensures that when elements are added in ascending order the + insertion is an O(1) operation. */ + if (list_empty(&hdl->ctrl_refs) || id > node2id(hdl->ctrl_refs.prev)) { + list_add_tail(&new_ref->node, &hdl->ctrl_refs); + goto insert_in_hash; + } + + /* Find insert position in sorted list */ + list_for_each_entry(ref, &hdl->ctrl_refs, node) { + if (ref->ctrl->id < id) + continue; + /* Don't add duplicates */ + if (ref->ctrl->id == id) { + kfree(new_ref); + goto unlock; + } + list_add(&new_ref->node, ref->node.prev); + break; + } + +insert_in_hash: + /* Insert the control node in the hash */ + new_ref->next = hdl->buckets[bucket]; + hdl->buckets[bucket] = new_ref; + if (ctrl_ref) + *ctrl_ref = new_ref; + if (ctrl->handler == hdl) { + /* By default each control starts in a cluster of its own. + * new_ref->ctrl is basically a cluster array with one + * element, so that's perfect to use as the cluster pointer. + * But only do this for the handler that owns the control. + */ + ctrl->cluster = &new_ref->ctrl; + ctrl->ncontrols = 1; + } + +unlock: + mutex_unlock(hdl->lock); + return 0; +} + +/* Add a new control */ +static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl, + const struct v4l2_ctrl_ops *ops, + const struct v4l2_ctrl_type_ops *type_ops, + u32 id, const char *name, enum v4l2_ctrl_type type, + s64 min, s64 max, u64 step, s64 def, + const u32 dims[V4L2_CTRL_MAX_DIMS], u32 elem_size, + u32 flags, const char * const *qmenu, + const s64 *qmenu_int, const union v4l2_ctrl_ptr p_def, + void *priv) +{ + struct v4l2_ctrl *ctrl; + unsigned sz_extra; + unsigned nr_of_dims = 0; + unsigned elems = 1; + bool is_array; + unsigned tot_ctrl_size; + void *data; + int err; + + if (hdl->error) + return NULL; + + while (dims && dims[nr_of_dims]) { + elems *= dims[nr_of_dims]; + nr_of_dims++; + if (nr_of_dims == V4L2_CTRL_MAX_DIMS) + break; + } + is_array = nr_of_dims > 0; + + /* Prefill elem_size for all types handled by std_type_ops */ + switch ((u32)type) { + case V4L2_CTRL_TYPE_INTEGER64: + elem_size = sizeof(s64); + break; + case V4L2_CTRL_TYPE_STRING: + elem_size = max + 1; + break; + case V4L2_CTRL_TYPE_U8: + elem_size = sizeof(u8); + break; + case V4L2_CTRL_TYPE_U16: + elem_size = sizeof(u16); + break; + case V4L2_CTRL_TYPE_U32: + elem_size = sizeof(u32); + break; + case V4L2_CTRL_TYPE_MPEG2_SEQUENCE: + elem_size = sizeof(struct v4l2_ctrl_mpeg2_sequence); + break; + case V4L2_CTRL_TYPE_MPEG2_PICTURE: + elem_size = sizeof(struct v4l2_ctrl_mpeg2_picture); + break; + case V4L2_CTRL_TYPE_MPEG2_QUANTISATION: + elem_size = sizeof(struct v4l2_ctrl_mpeg2_quantisation); + break; + case V4L2_CTRL_TYPE_FWHT_PARAMS: + elem_size = sizeof(struct v4l2_ctrl_fwht_params); + break; + case V4L2_CTRL_TYPE_H264_SPS: + elem_size = sizeof(struct v4l2_ctrl_h264_sps); + break; + case V4L2_CTRL_TYPE_H264_PPS: + elem_size = sizeof(struct v4l2_ctrl_h264_pps); + break; + case V4L2_CTRL_TYPE_H264_SCALING_MATRIX: + elem_size = sizeof(struct v4l2_ctrl_h264_scaling_matrix); + break; + case V4L2_CTRL_TYPE_H264_SLICE_PARAMS: + elem_size = sizeof(struct v4l2_ctrl_h264_slice_params); + break; + case V4L2_CTRL_TYPE_H264_DECODE_PARAMS: + elem_size = sizeof(struct v4l2_ctrl_h264_decode_params); + break; + case V4L2_CTRL_TYPE_H264_PRED_WEIGHTS: + elem_size = sizeof(struct v4l2_ctrl_h264_pred_weights); + break; + case V4L2_CTRL_TYPE_VP8_FRAME: + elem_size = sizeof(struct v4l2_ctrl_vp8_frame); + break; + case V4L2_CTRL_TYPE_HEVC_SPS: + elem_size = sizeof(struct v4l2_ctrl_hevc_sps); + break; + case V4L2_CTRL_TYPE_HEVC_PPS: + elem_size = sizeof(struct v4l2_ctrl_hevc_pps); + break; + case V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS: + elem_size = sizeof(struct v4l2_ctrl_hevc_slice_params); + break; + case V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX: + elem_size = sizeof(struct v4l2_ctrl_hevc_scaling_matrix); + break; + case V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS: + elem_size = sizeof(struct v4l2_ctrl_hevc_decode_params); + break; + case V4L2_CTRL_TYPE_HDR10_CLL_INFO: + elem_size = sizeof(struct v4l2_ctrl_hdr10_cll_info); + break; + case V4L2_CTRL_TYPE_HDR10_MASTERING_DISPLAY: + elem_size = sizeof(struct v4l2_ctrl_hdr10_mastering_display); + break; + case V4L2_CTRL_TYPE_VP9_COMPRESSED_HDR: + elem_size = sizeof(struct v4l2_ctrl_vp9_compressed_hdr); + break; + case V4L2_CTRL_TYPE_VP9_FRAME: + elem_size = sizeof(struct v4l2_ctrl_vp9_frame); + break; + case V4L2_CTRL_TYPE_AV1_SEQUENCE: + elem_size = sizeof(struct v4l2_ctrl_av1_sequence); + break; + case V4L2_CTRL_TYPE_AV1_TILE_GROUP_ENTRY: + elem_size = sizeof(struct v4l2_ctrl_av1_tile_group_entry); + break; + case V4L2_CTRL_TYPE_AV1_FRAME: + elem_size = sizeof(struct v4l2_ctrl_av1_frame); + break; + case V4L2_CTRL_TYPE_AV1_FILM_GRAIN: + elem_size = sizeof(struct v4l2_ctrl_av1_film_grain); + break; + case V4L2_CTRL_TYPE_AREA: + elem_size = sizeof(struct v4l2_area); + break; + default: + if (type < V4L2_CTRL_COMPOUND_TYPES) + elem_size = sizeof(s32); + break; + } + + /* Sanity checks */ + if (id == 0 || name == NULL || !elem_size || + id >= V4L2_CID_PRIVATE_BASE || + (type == V4L2_CTRL_TYPE_MENU && qmenu == NULL) || + (type == V4L2_CTRL_TYPE_INTEGER_MENU && qmenu_int == NULL)) { + handler_set_err(hdl, -ERANGE); + return NULL; + } + err = check_range(type, min, max, step, def); + if (err) { + handler_set_err(hdl, err); + return NULL; + } + if (is_array && + (type == V4L2_CTRL_TYPE_BUTTON || + type == V4L2_CTRL_TYPE_CTRL_CLASS)) { + handler_set_err(hdl, -EINVAL); + return NULL; + } + if (flags & V4L2_CTRL_FLAG_DYNAMIC_ARRAY) { + /* + * For now only support this for one-dimensional arrays only. + * + * This can be relaxed in the future, but this will + * require more effort. + */ + if (nr_of_dims != 1) { + handler_set_err(hdl, -EINVAL); + return NULL; + } + /* Start with just 1 element */ + elems = 1; + } + + tot_ctrl_size = elem_size * elems; + sz_extra = 0; + if (type == V4L2_CTRL_TYPE_BUTTON) + flags |= V4L2_CTRL_FLAG_WRITE_ONLY | + V4L2_CTRL_FLAG_EXECUTE_ON_WRITE; + else if (type == V4L2_CTRL_TYPE_CTRL_CLASS) + flags |= V4L2_CTRL_FLAG_READ_ONLY; + else if (!is_array && + (type == V4L2_CTRL_TYPE_INTEGER64 || + type == V4L2_CTRL_TYPE_STRING || + type >= V4L2_CTRL_COMPOUND_TYPES)) + sz_extra += 2 * tot_ctrl_size; + + if (type >= V4L2_CTRL_COMPOUND_TYPES && p_def.p_const) + sz_extra += elem_size; + + ctrl = kvzalloc(sizeof(*ctrl) + sz_extra, GFP_KERNEL); + if (ctrl == NULL) { + handler_set_err(hdl, -ENOMEM); + return NULL; + } + + INIT_LIST_HEAD(&ctrl->node); + INIT_LIST_HEAD(&ctrl->ev_subs); + ctrl->handler = hdl; + ctrl->ops = ops; + ctrl->type_ops = type_ops ? type_ops : &std_type_ops; + ctrl->id = id; + ctrl->name = name; + ctrl->type = type; + ctrl->flags = flags; + ctrl->minimum = min; + ctrl->maximum = max; + ctrl->step = step; + ctrl->default_value = def; + ctrl->is_string = !is_array && type == V4L2_CTRL_TYPE_STRING; + ctrl->is_ptr = is_array || type >= V4L2_CTRL_COMPOUND_TYPES || ctrl->is_string; + ctrl->is_int = !ctrl->is_ptr && type != V4L2_CTRL_TYPE_INTEGER64; + ctrl->is_array = is_array; + ctrl->is_dyn_array = !!(flags & V4L2_CTRL_FLAG_DYNAMIC_ARRAY); + ctrl->elems = elems; + ctrl->new_elems = elems; + ctrl->nr_of_dims = nr_of_dims; + if (nr_of_dims) + memcpy(ctrl->dims, dims, nr_of_dims * sizeof(dims[0])); + ctrl->elem_size = elem_size; + if (type == V4L2_CTRL_TYPE_MENU) + ctrl->qmenu = qmenu; + else if (type == V4L2_CTRL_TYPE_INTEGER_MENU) + ctrl->qmenu_int = qmenu_int; + ctrl->priv = priv; + ctrl->cur.val = ctrl->val = def; + data = &ctrl[1]; + + if (ctrl->is_array) { + ctrl->p_array_alloc_elems = elems; + ctrl->p_array = kvzalloc(2 * elems * elem_size, GFP_KERNEL); + if (!ctrl->p_array) { + kvfree(ctrl); + return NULL; + } + data = ctrl->p_array; + } + + if (!ctrl->is_int) { + ctrl->p_new.p = data; + ctrl->p_cur.p = data + tot_ctrl_size; + } else { + ctrl->p_new.p = &ctrl->val; + ctrl->p_cur.p = &ctrl->cur.val; + } + + if (type >= V4L2_CTRL_COMPOUND_TYPES && p_def.p_const) { + if (ctrl->is_array) + ctrl->p_def.p = &ctrl[1]; + else + ctrl->p_def.p = ctrl->p_cur.p + tot_ctrl_size; + memcpy(ctrl->p_def.p, p_def.p_const, elem_size); + } + + ctrl->type_ops->init(ctrl, 0, ctrl->p_cur); + cur_to_new(ctrl); + + if (handler_new_ref(hdl, ctrl, NULL, false, false)) { + kvfree(ctrl->p_array); + kvfree(ctrl); + return NULL; + } + mutex_lock(hdl->lock); + list_add_tail(&ctrl->node, &hdl->ctrls); + mutex_unlock(hdl->lock); + return ctrl; +} + +struct v4l2_ctrl *v4l2_ctrl_new_custom(struct v4l2_ctrl_handler *hdl, + const struct v4l2_ctrl_config *cfg, void *priv) +{ + bool is_menu; + struct v4l2_ctrl *ctrl; + const char *name = cfg->name; + const char * const *qmenu = cfg->qmenu; + const s64 *qmenu_int = cfg->qmenu_int; + enum v4l2_ctrl_type type = cfg->type; + u32 flags = cfg->flags; + s64 min = cfg->min; + s64 max = cfg->max; + u64 step = cfg->step; + s64 def = cfg->def; + + if (name == NULL) + v4l2_ctrl_fill(cfg->id, &name, &type, &min, &max, &step, + &def, &flags); + + is_menu = (type == V4L2_CTRL_TYPE_MENU || + type == V4L2_CTRL_TYPE_INTEGER_MENU); + if (is_menu) + WARN_ON(step); + else + WARN_ON(cfg->menu_skip_mask); + if (type == V4L2_CTRL_TYPE_MENU && !qmenu) { + qmenu = v4l2_ctrl_get_menu(cfg->id); + } else if (type == V4L2_CTRL_TYPE_INTEGER_MENU && !qmenu_int) { + handler_set_err(hdl, -EINVAL); + return NULL; + } + + ctrl = v4l2_ctrl_new(hdl, cfg->ops, cfg->type_ops, cfg->id, name, + type, min, max, + is_menu ? cfg->menu_skip_mask : step, def, + cfg->dims, cfg->elem_size, + flags, qmenu, qmenu_int, cfg->p_def, priv); + if (ctrl) + ctrl->is_private = cfg->is_private; + return ctrl; +} +EXPORT_SYMBOL(v4l2_ctrl_new_custom); + +/* Helper function for standard non-menu controls */ +struct v4l2_ctrl *v4l2_ctrl_new_std(struct v4l2_ctrl_handler *hdl, + const struct v4l2_ctrl_ops *ops, + u32 id, s64 min, s64 max, u64 step, s64 def) +{ + const char *name; + enum v4l2_ctrl_type type; + u32 flags; + + v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags); + if (type == V4L2_CTRL_TYPE_MENU || + type == V4L2_CTRL_TYPE_INTEGER_MENU || + type >= V4L2_CTRL_COMPOUND_TYPES) { + handler_set_err(hdl, -EINVAL); + return NULL; + } + return v4l2_ctrl_new(hdl, ops, NULL, id, name, type, + min, max, step, def, NULL, 0, + flags, NULL, NULL, ptr_null, NULL); +} +EXPORT_SYMBOL(v4l2_ctrl_new_std); + +/* Helper function for standard menu controls */ +struct v4l2_ctrl *v4l2_ctrl_new_std_menu(struct v4l2_ctrl_handler *hdl, + const struct v4l2_ctrl_ops *ops, + u32 id, u8 _max, u64 mask, u8 _def) +{ + const char * const *qmenu = NULL; + const s64 *qmenu_int = NULL; + unsigned int qmenu_int_len = 0; + const char *name; + enum v4l2_ctrl_type type; + s64 min; + s64 max = _max; + s64 def = _def; + u64 step; + u32 flags; + + v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags); + + if (type == V4L2_CTRL_TYPE_MENU) + qmenu = v4l2_ctrl_get_menu(id); + else if (type == V4L2_CTRL_TYPE_INTEGER_MENU) + qmenu_int = v4l2_ctrl_get_int_menu(id, &qmenu_int_len); + + if ((!qmenu && !qmenu_int) || (qmenu_int && max >= qmenu_int_len)) { + handler_set_err(hdl, -EINVAL); + return NULL; + } + return v4l2_ctrl_new(hdl, ops, NULL, id, name, type, + 0, max, mask, def, NULL, 0, + flags, qmenu, qmenu_int, ptr_null, NULL); +} +EXPORT_SYMBOL(v4l2_ctrl_new_std_menu); + +/* Helper function for standard menu controls with driver defined menu */ +struct v4l2_ctrl *v4l2_ctrl_new_std_menu_items(struct v4l2_ctrl_handler *hdl, + const struct v4l2_ctrl_ops *ops, u32 id, u8 _max, + u64 mask, u8 _def, const char * const *qmenu) +{ + enum v4l2_ctrl_type type; + const char *name; + u32 flags; + u64 step; + s64 min; + s64 max = _max; + s64 def = _def; + + /* v4l2_ctrl_new_std_menu_items() should only be called for + * standard controls without a standard menu. + */ + if (v4l2_ctrl_get_menu(id)) { + handler_set_err(hdl, -EINVAL); + return NULL; + } + + v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags); + if (type != V4L2_CTRL_TYPE_MENU || qmenu == NULL) { + handler_set_err(hdl, -EINVAL); + return NULL; + } + return v4l2_ctrl_new(hdl, ops, NULL, id, name, type, + 0, max, mask, def, NULL, 0, + flags, qmenu, NULL, ptr_null, NULL); + +} +EXPORT_SYMBOL(v4l2_ctrl_new_std_menu_items); + +/* Helper function for standard compound controls */ +struct v4l2_ctrl *v4l2_ctrl_new_std_compound(struct v4l2_ctrl_handler *hdl, + const struct v4l2_ctrl_ops *ops, u32 id, + const union v4l2_ctrl_ptr p_def) +{ + const char *name; + enum v4l2_ctrl_type type; + u32 flags; + s64 min, max, step, def; + + v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags); + if (type < V4L2_CTRL_COMPOUND_TYPES) { + handler_set_err(hdl, -EINVAL); + return NULL; + } + return v4l2_ctrl_new(hdl, ops, NULL, id, name, type, + min, max, step, def, NULL, 0, + flags, NULL, NULL, p_def, NULL); +} +EXPORT_SYMBOL(v4l2_ctrl_new_std_compound); + +/* Helper function for standard integer menu controls */ +struct v4l2_ctrl *v4l2_ctrl_new_int_menu(struct v4l2_ctrl_handler *hdl, + const struct v4l2_ctrl_ops *ops, + u32 id, u8 _max, u8 _def, const s64 *qmenu_int) +{ + const char *name; + enum v4l2_ctrl_type type; + s64 min; + u64 step; + s64 max = _max; + s64 def = _def; + u32 flags; + + v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags); + if (type != V4L2_CTRL_TYPE_INTEGER_MENU) { + handler_set_err(hdl, -EINVAL); + return NULL; + } + return v4l2_ctrl_new(hdl, ops, NULL, id, name, type, + 0, max, 0, def, NULL, 0, + flags, NULL, qmenu_int, ptr_null, NULL); +} +EXPORT_SYMBOL(v4l2_ctrl_new_int_menu); + +/* Add the controls from another handler to our own. */ +int v4l2_ctrl_add_handler(struct v4l2_ctrl_handler *hdl, + struct v4l2_ctrl_handler *add, + bool (*filter)(const struct v4l2_ctrl *ctrl), + bool from_other_dev) +{ + struct v4l2_ctrl_ref *ref; + int ret = 0; + + /* Do nothing if either handler is NULL or if they are the same */ + if (!hdl || !add || hdl == add) + return 0; + if (hdl->error) + return hdl->error; + mutex_lock(add->lock); + list_for_each_entry(ref, &add->ctrl_refs, node) { + struct v4l2_ctrl *ctrl = ref->ctrl; + + /* Skip handler-private controls. */ + if (ctrl->is_private) + continue; + /* And control classes */ + if (ctrl->type == V4L2_CTRL_TYPE_CTRL_CLASS) + continue; + /* Filter any unwanted controls */ + if (filter && !filter(ctrl)) + continue; + ret = handler_new_ref(hdl, ctrl, NULL, from_other_dev, false); + if (ret) + break; + } + mutex_unlock(add->lock); + return ret; +} +EXPORT_SYMBOL(v4l2_ctrl_add_handler); + +bool v4l2_ctrl_radio_filter(const struct v4l2_ctrl *ctrl) +{ + if (V4L2_CTRL_ID2WHICH(ctrl->id) == V4L2_CTRL_CLASS_FM_TX) + return true; + if (V4L2_CTRL_ID2WHICH(ctrl->id) == V4L2_CTRL_CLASS_FM_RX) + return true; + switch (ctrl->id) { + case V4L2_CID_AUDIO_MUTE: + case V4L2_CID_AUDIO_VOLUME: + case V4L2_CID_AUDIO_BALANCE: + case V4L2_CID_AUDIO_BASS: + case V4L2_CID_AUDIO_TREBLE: + case V4L2_CID_AUDIO_LOUDNESS: + return true; + default: + break; + } + return false; +} +EXPORT_SYMBOL(v4l2_ctrl_radio_filter); + +/* Cluster controls */ +void v4l2_ctrl_cluster(unsigned ncontrols, struct v4l2_ctrl **controls) +{ + bool has_volatiles = false; + int i; + + /* The first control is the master control and it must not be NULL */ + if (WARN_ON(ncontrols == 0 || controls[0] == NULL)) + return; + + for (i = 0; i < ncontrols; i++) { + if (controls[i]) { + controls[i]->cluster = controls; + controls[i]->ncontrols = ncontrols; + if (controls[i]->flags & V4L2_CTRL_FLAG_VOLATILE) + has_volatiles = true; + } + } + controls[0]->has_volatiles = has_volatiles; +} +EXPORT_SYMBOL(v4l2_ctrl_cluster); + +void v4l2_ctrl_auto_cluster(unsigned ncontrols, struct v4l2_ctrl **controls, + u8 manual_val, bool set_volatile) +{ + struct v4l2_ctrl *master = controls[0]; + u32 flag = 0; + int i; + + v4l2_ctrl_cluster(ncontrols, controls); + WARN_ON(ncontrols <= 1); + WARN_ON(manual_val < master->minimum || manual_val > master->maximum); + WARN_ON(set_volatile && !has_op(master, g_volatile_ctrl)); + master->is_auto = true; + master->has_volatiles = set_volatile; + master->manual_mode_value = manual_val; + master->flags |= V4L2_CTRL_FLAG_UPDATE; + + if (!is_cur_manual(master)) + flag = V4L2_CTRL_FLAG_INACTIVE | + (set_volatile ? V4L2_CTRL_FLAG_VOLATILE : 0); + + for (i = 1; i < ncontrols; i++) + if (controls[i]) + controls[i]->flags |= flag; +} +EXPORT_SYMBOL(v4l2_ctrl_auto_cluster); + +/* + * Obtain the current volatile values of an autocluster and mark them + * as new. + */ +void update_from_auto_cluster(struct v4l2_ctrl *master) +{ + int i; + + for (i = 1; i < master->ncontrols; i++) + cur_to_new(master->cluster[i]); + if (!call_op(master, g_volatile_ctrl)) + for (i = 1; i < master->ncontrols; i++) + if (master->cluster[i]) + master->cluster[i]->is_new = 1; +} + +/* + * Return non-zero if one or more of the controls in the cluster has a new + * value that differs from the current value. + */ +static int cluster_changed(struct v4l2_ctrl *master) +{ + bool changed = false; + int i; + + for (i = 0; i < master->ncontrols; i++) { + struct v4l2_ctrl *ctrl = master->cluster[i]; + bool ctrl_changed = false; + + if (!ctrl) + continue; + + if (ctrl->flags & V4L2_CTRL_FLAG_EXECUTE_ON_WRITE) { + changed = true; + ctrl_changed = true; + } + + /* + * Set has_changed to false to avoid generating + * the event V4L2_EVENT_CTRL_CH_VALUE + */ + if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) { + ctrl->has_changed = false; + continue; + } + + if (ctrl->elems != ctrl->new_elems) + ctrl_changed = true; + if (!ctrl_changed) + ctrl_changed = !ctrl->type_ops->equal(ctrl, + ctrl->p_cur, ctrl->p_new); + ctrl->has_changed = ctrl_changed; + changed |= ctrl->has_changed; + } + return changed; +} + +/* + * Core function that calls try/s_ctrl and ensures that the new value is + * copied to the current value on a set. + * Must be called with ctrl->handler->lock held. + */ +int try_or_set_cluster(struct v4l2_fh *fh, struct v4l2_ctrl *master, + bool set, u32 ch_flags) +{ + bool update_flag; + int ret; + int i; + + /* + * Go through the cluster and either validate the new value or + * (if no new value was set), copy the current value to the new + * value, ensuring a consistent view for the control ops when + * called. + */ + for (i = 0; i < master->ncontrols; i++) { + struct v4l2_ctrl *ctrl = master->cluster[i]; + + if (!ctrl) + continue; + + if (!ctrl->is_new) { + cur_to_new(ctrl); + continue; + } + /* + * Check again: it may have changed since the + * previous check in try_or_set_ext_ctrls(). + */ + if (set && (ctrl->flags & V4L2_CTRL_FLAG_GRABBED)) + return -EBUSY; + } + + ret = call_op(master, try_ctrl); + + /* Don't set if there is no change */ + if (ret || !set || !cluster_changed(master)) + return ret; + ret = call_op(master, s_ctrl); + if (ret) + return ret; + + /* If OK, then make the new values permanent. */ + update_flag = is_cur_manual(master) != is_new_manual(master); + + for (i = 0; i < master->ncontrols; i++) { + /* + * If we switch from auto to manual mode, and this cluster + * contains volatile controls, then all non-master controls + * have to be marked as changed. The 'new' value contains + * the volatile value (obtained by update_from_auto_cluster), + * which now has to become the current value. + */ + if (i && update_flag && is_new_manual(master) && + master->has_volatiles && master->cluster[i]) + master->cluster[i]->has_changed = true; + + new_to_cur(fh, master->cluster[i], ch_flags | + ((update_flag && i > 0) ? V4L2_EVENT_CTRL_CH_FLAGS : 0)); + } + return 0; +} + +/* Activate/deactivate a control. */ +void v4l2_ctrl_activate(struct v4l2_ctrl *ctrl, bool active) +{ + /* invert since the actual flag is called 'inactive' */ + bool inactive = !active; + bool old; + + if (ctrl == NULL) + return; + + if (inactive) + /* set V4L2_CTRL_FLAG_INACTIVE */ + old = test_and_set_bit(4, &ctrl->flags); + else + /* clear V4L2_CTRL_FLAG_INACTIVE */ + old = test_and_clear_bit(4, &ctrl->flags); + if (old != inactive) + send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_FLAGS); +} +EXPORT_SYMBOL(v4l2_ctrl_activate); + +void __v4l2_ctrl_grab(struct v4l2_ctrl *ctrl, bool grabbed) +{ + bool old; + + if (ctrl == NULL) + return; + + lockdep_assert_held(ctrl->handler->lock); + + if (grabbed) + /* set V4L2_CTRL_FLAG_GRABBED */ + old = test_and_set_bit(1, &ctrl->flags); + else + /* clear V4L2_CTRL_FLAG_GRABBED */ + old = test_and_clear_bit(1, &ctrl->flags); + if (old != grabbed) + send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_FLAGS); +} +EXPORT_SYMBOL(__v4l2_ctrl_grab); + +/* Call s_ctrl for all controls owned by the handler */ +int __v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl) +{ + struct v4l2_ctrl *ctrl; + int ret = 0; + + if (hdl == NULL) + return 0; + + lockdep_assert_held(hdl->lock); + + list_for_each_entry(ctrl, &hdl->ctrls, node) + ctrl->done = false; + + list_for_each_entry(ctrl, &hdl->ctrls, node) { + struct v4l2_ctrl *master = ctrl->cluster[0]; + int i; + + /* Skip if this control was already handled by a cluster. */ + /* Skip button controls and read-only controls. */ + if (ctrl->done || ctrl->type == V4L2_CTRL_TYPE_BUTTON || + (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY)) + continue; + + for (i = 0; i < master->ncontrols; i++) { + if (master->cluster[i]) { + cur_to_new(master->cluster[i]); + master->cluster[i]->is_new = 1; + master->cluster[i]->done = true; + } + } + ret = call_op(master, s_ctrl); + if (ret) + break; + } + + return ret; +} +EXPORT_SYMBOL_GPL(__v4l2_ctrl_handler_setup); + +int v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl) +{ + int ret; + + if (hdl == NULL) + return 0; + + mutex_lock(hdl->lock); + ret = __v4l2_ctrl_handler_setup(hdl); + mutex_unlock(hdl->lock); + + return ret; +} +EXPORT_SYMBOL(v4l2_ctrl_handler_setup); + +/* Log the control name and value */ +static void log_ctrl(const struct v4l2_ctrl *ctrl, + const char *prefix, const char *colon) +{ + if (ctrl->flags & (V4L2_CTRL_FLAG_DISABLED | V4L2_CTRL_FLAG_WRITE_ONLY)) + return; + if (ctrl->type == V4L2_CTRL_TYPE_CTRL_CLASS) + return; + + pr_info("%s%s%s: ", prefix, colon, ctrl->name); + + ctrl->type_ops->log(ctrl); + + if (ctrl->flags & (V4L2_CTRL_FLAG_INACTIVE | + V4L2_CTRL_FLAG_GRABBED | + V4L2_CTRL_FLAG_VOLATILE)) { + if (ctrl->flags & V4L2_CTRL_FLAG_INACTIVE) + pr_cont(" inactive"); + if (ctrl->flags & V4L2_CTRL_FLAG_GRABBED) + pr_cont(" grabbed"); + if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) + pr_cont(" volatile"); + } + pr_cont("\n"); +} + +/* Log all controls owned by the handler */ +void v4l2_ctrl_handler_log_status(struct v4l2_ctrl_handler *hdl, + const char *prefix) +{ + struct v4l2_ctrl *ctrl; + const char *colon = ""; + int len; + + if (!hdl) + return; + if (!prefix) + prefix = ""; + len = strlen(prefix); + if (len && prefix[len - 1] != ' ') + colon = ": "; + mutex_lock(hdl->lock); + list_for_each_entry(ctrl, &hdl->ctrls, node) + if (!(ctrl->flags & V4L2_CTRL_FLAG_DISABLED)) + log_ctrl(ctrl, prefix, colon); + mutex_unlock(hdl->lock); +} +EXPORT_SYMBOL(v4l2_ctrl_handler_log_status); + +int v4l2_ctrl_new_fwnode_properties(struct v4l2_ctrl_handler *hdl, + const struct v4l2_ctrl_ops *ctrl_ops, + const struct v4l2_fwnode_device_properties *p) +{ + if (hdl->error) + return hdl->error; + + if (p->orientation != V4L2_FWNODE_PROPERTY_UNSET) { + u32 orientation_ctrl; + + switch (p->orientation) { + case V4L2_FWNODE_ORIENTATION_FRONT: + orientation_ctrl = V4L2_CAMERA_ORIENTATION_FRONT; + break; + case V4L2_FWNODE_ORIENTATION_BACK: + orientation_ctrl = V4L2_CAMERA_ORIENTATION_BACK; + break; + case V4L2_FWNODE_ORIENTATION_EXTERNAL: + orientation_ctrl = V4L2_CAMERA_ORIENTATION_EXTERNAL; + break; + default: + return -EINVAL; + } + if (!v4l2_ctrl_new_std_menu(hdl, ctrl_ops, + V4L2_CID_CAMERA_ORIENTATION, + V4L2_CAMERA_ORIENTATION_EXTERNAL, 0, + orientation_ctrl)) + return hdl->error; + } + + if (p->rotation != V4L2_FWNODE_PROPERTY_UNSET) { + if (!v4l2_ctrl_new_std(hdl, ctrl_ops, + V4L2_CID_CAMERA_SENSOR_ROTATION, + p->rotation, p->rotation, 1, + p->rotation)) + return hdl->error; + } + + return hdl->error; +} +EXPORT_SYMBOL(v4l2_ctrl_new_fwnode_properties); diff --git a/6.12.0/drivers/media/v4l2-core/v4l2-ctrls-defs.c b/6.12.0/drivers/media/v4l2-core/v4l2-ctrls-defs.c new file mode 100644 index 0000000..1ea5201 --- /dev/null +++ b/6.12.0/drivers/media/v4l2-core/v4l2-ctrls-defs.c @@ -0,0 +1,1685 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * V4L2 controls framework control definitions. + * + * Copyright (C) 2010-2021 Hans Verkuil + */ + +#include +#include + +/* + * Returns NULL or a character pointer array containing the menu for + * the given control ID. The pointer array ends with a NULL pointer. + * An empty string signifies a menu entry that is invalid. This allows + * drivers to disable certain options if it is not supported. + */ +const char * const *v4l2_ctrl_get_menu(u32 id) +{ + static const char * const mpeg_audio_sampling_freq[] = { + "44.1 kHz", + "48 kHz", + "32 kHz", + NULL + }; + static const char * const mpeg_audio_encoding[] = { + "MPEG-1/2 Layer I", + "MPEG-1/2 Layer II", + "MPEG-1/2 Layer III", + "MPEG-2/4 AAC", + "AC-3", + NULL + }; + static const char * const mpeg_audio_l1_bitrate[] = { + "32 kbps", + "64 kbps", + "96 kbps", + "128 kbps", + "160 kbps", + "192 kbps", + "224 kbps", + "256 kbps", + "288 kbps", + "320 kbps", + "352 kbps", + "384 kbps", + "416 kbps", + "448 kbps", + NULL + }; + static const char * const mpeg_audio_l2_bitrate[] = { + "32 kbps", + "48 kbps", + "56 kbps", + "64 kbps", + "80 kbps", + "96 kbps", + "112 kbps", + "128 kbps", + "160 kbps", + "192 kbps", + "224 kbps", + "256 kbps", + "320 kbps", + "384 kbps", + NULL + }; + static const char * const mpeg_audio_l3_bitrate[] = { + "32 kbps", + "40 kbps", + "48 kbps", + "56 kbps", + "64 kbps", + "80 kbps", + "96 kbps", + "112 kbps", + "128 kbps", + "160 kbps", + "192 kbps", + "224 kbps", + "256 kbps", + "320 kbps", + NULL + }; + static const char * const mpeg_audio_ac3_bitrate[] = { + "32 kbps", + "40 kbps", + "48 kbps", + "56 kbps", + "64 kbps", + "80 kbps", + "96 kbps", + "112 kbps", + "128 kbps", + "160 kbps", + "192 kbps", + "224 kbps", + "256 kbps", + "320 kbps", + "384 kbps", + "448 kbps", + "512 kbps", + "576 kbps", + "640 kbps", + NULL + }; + static const char * const mpeg_audio_mode[] = { + "Stereo", + "Joint Stereo", + "Dual", + "Mono", + NULL + }; + static const char * const mpeg_audio_mode_extension[] = { + "Bound 4", + "Bound 8", + "Bound 12", + "Bound 16", + NULL + }; + static const char * const mpeg_audio_emphasis[] = { + "No Emphasis", + "50/15 us", + "CCITT J17", + NULL + }; + static const char * const mpeg_audio_crc[] = { + "No CRC", + "16-bit CRC", + NULL + }; + static const char * const mpeg_audio_dec_playback[] = { + "Auto", + "Stereo", + "Left", + "Right", + "Mono", + "Swapped Stereo", + NULL + }; + static const char * const mpeg_video_encoding[] = { + "MPEG-1", + "MPEG-2", + "MPEG-4 AVC", + NULL + }; + static const char * const mpeg_video_aspect[] = { + "1x1", + "4x3", + "16x9", + "2.21x1", + NULL + }; + static const char * const mpeg_video_bitrate_mode[] = { + "Variable Bitrate", + "Constant Bitrate", + "Constant Quality", + NULL + }; + static const char * const mpeg_stream_type[] = { + "MPEG-2 Program Stream", + "MPEG-2 Transport Stream", + "MPEG-1 System Stream", + "MPEG-2 DVD-compatible Stream", + "MPEG-1 VCD-compatible Stream", + "MPEG-2 SVCD-compatible Stream", + NULL + }; + static const char * const mpeg_stream_vbi_fmt[] = { + "No VBI", + "Private Packet, IVTV Format", + NULL + }; + static const char * const camera_power_line_frequency[] = { + "Disabled", + "50 Hz", + "60 Hz", + "Auto", + NULL + }; + static const char * const camera_exposure_auto[] = { + "Auto Mode", + "Manual Mode", + "Shutter Priority Mode", + "Aperture Priority Mode", + NULL + }; + static const char * const camera_exposure_metering[] = { + "Average", + "Center Weighted", + "Spot", + "Matrix", + NULL + }; + static const char * const camera_auto_focus_range[] = { + "Auto", + "Normal", + "Macro", + "Infinity", + NULL + }; + static const char * const colorfx[] = { + "None", + "Black & White", + "Sepia", + "Negative", + "Emboss", + "Sketch", + "Sky Blue", + "Grass Green", + "Skin Whiten", + "Vivid", + "Aqua", + "Art Freeze", + "Silhouette", + "Solarization", + "Antique", + "Set Cb/Cr", + NULL + }; + static const char * const auto_n_preset_white_balance[] = { + "Manual", + "Auto", + "Incandescent", + "Fluorescent", + "Fluorescent H", + "Horizon", + "Daylight", + "Flash", + "Cloudy", + "Shade", + NULL, + }; + static const char * const camera_iso_sensitivity_auto[] = { + "Manual", + "Auto", + NULL + }; + static const char * const scene_mode[] = { + "None", + "Backlight", + "Beach/Snow", + "Candle Light", + "Dusk/Dawn", + "Fall Colors", + "Fireworks", + "Landscape", + "Night", + "Party/Indoor", + "Portrait", + "Sports", + "Sunset", + "Text", + NULL + }; + static const char * const tune_emphasis[] = { + "None", + "50 Microseconds", + "75 Microseconds", + NULL, + }; + static const char * const header_mode[] = { + "Separate Buffer", + "Joined With 1st Frame", + NULL, + }; + static const char * const multi_slice[] = { + "Single", + "Max Macroblocks", + "Max Bytes", + NULL, + }; + static const char * const entropy_mode[] = { + "CAVLC", + "CABAC", + NULL, + }; + static const char * const mpeg_h264_level[] = { + "1", + "1b", + "1.1", + "1.2", + "1.3", + "2", + "2.1", + "2.2", + "3", + "3.1", + "3.2", + "4", + "4.1", + "4.2", + "5", + "5.1", + "5.2", + "6.0", + "6.1", + "6.2", + NULL, + }; + static const char * const h264_loop_filter[] = { + "Enabled", + "Disabled", + "Disabled at Slice Boundary", + NULL, + }; + static const char * const h264_profile[] = { + "Baseline", + "Constrained Baseline", + "Main", + "Extended", + "High", + "High 10", + "High 422", + "High 444 Predictive", + "High 10 Intra", + "High 422 Intra", + "High 444 Intra", + "CAVLC 444 Intra", + "Scalable Baseline", + "Scalable High", + "Scalable High Intra", + "Stereo High", + "Multiview High", + "Constrained High", + NULL, + }; + static const char * const vui_sar_idc[] = { + "Unspecified", + "1:1", + "12:11", + "10:11", + "16:11", + "40:33", + "24:11", + "20:11", + "32:11", + "80:33", + "18:11", + "15:11", + "64:33", + "160:99", + "4:3", + "3:2", + "2:1", + "Extended SAR", + NULL, + }; + static const char * const h264_fp_arrangement_type[] = { + "Checkerboard", + "Column", + "Row", + "Side by Side", + "Top Bottom", + "Temporal", + NULL, + }; + static const char * const h264_fmo_map_type[] = { + "Interleaved Slices", + "Scattered Slices", + "Foreground with Leftover", + "Box Out", + "Raster Scan", + "Wipe Scan", + "Explicit", + NULL, + }; + static const char * const h264_decode_mode[] = { + "Slice-Based", + "Frame-Based", + NULL, + }; + static const char * const h264_start_code[] = { + "No Start Code", + "Annex B Start Code", + NULL, + }; + static const char * const h264_hierarchical_coding_type[] = { + "Hier Coding B", + "Hier Coding P", + NULL, + }; + static const char * const mpeg_mpeg2_level[] = { + "Low", + "Main", + "High 1440", + "High", + NULL, + }; + static const char * const mpeg2_profile[] = { + "Simple", + "Main", + "SNR Scalable", + "Spatially Scalable", + "High", + NULL, + }; + static const char * const mpeg_mpeg4_level[] = { + "0", + "0b", + "1", + "2", + "3", + "3b", + "4", + "5", + NULL, + }; + static const char * const mpeg4_profile[] = { + "Simple", + "Advanced Simple", + "Core", + "Simple Scalable", + "Advanced Coding Efficiency", + NULL, + }; + + static const char * const vpx_golden_frame_sel[] = { + "Use Previous Frame", + "Use Previous Specific Frame", + NULL, + }; + static const char * const vp8_profile[] = { + "0", + "1", + "2", + "3", + NULL, + }; + static const char * const vp9_profile[] = { + "0", + "1", + "2", + "3", + NULL, + }; + static const char * const vp9_level[] = { + "1", + "1.1", + "2", + "2.1", + "3", + "3.1", + "4", + "4.1", + "5", + "5.1", + "5.2", + "6", + "6.1", + "6.2", + NULL, + }; + + static const char * const flash_led_mode[] = { + "Off", + "Flash", + "Torch", + NULL, + }; + static const char * const flash_strobe_source[] = { + "Software", + "External", + NULL, + }; + + static const char * const jpeg_chroma_subsampling[] = { + "4:4:4", + "4:2:2", + "4:2:0", + "4:1:1", + "4:1:0", + "Gray", + NULL, + }; + static const char * const dv_tx_mode[] = { + "DVI-D", + "HDMI", + NULL, + }; + static const char * const dv_rgb_range[] = { + "Automatic", + "RGB Limited Range (16-235)", + "RGB Full Range (0-255)", + NULL, + }; + static const char * const dv_it_content_type[] = { + "Graphics", + "Photo", + "Cinema", + "Game", + "No IT Content", + NULL, + }; + static const char * const detect_md_mode[] = { + "Disabled", + "Global", + "Threshold Grid", + "Region Grid", + NULL, + }; + + static const char * const av1_profile[] = { + "Main", + "High", + "Professional", + NULL, + }; + static const char * const av1_level[] = { + "2.0", + "2.1", + "2.2", + "2.3", + "3.0", + "3.1", + "3.2", + "3.3", + "4.0", + "4.1", + "4.2", + "4.3", + "5.0", + "5.1", + "5.2", + "5.3", + "6.0", + "6.1", + "6.2", + "6.3", + "7.0", + "7.1", + "7.2", + "7.3", + NULL, + }; + + static const char * const hevc_profile[] = { + "Main", + "Main Still Picture", + "Main 10", + NULL, + }; + static const char * const hevc_level[] = { + "1", + "2", + "2.1", + "3", + "3.1", + "4", + "4.1", + "5", + "5.1", + "5.2", + "6", + "6.1", + "6.2", + NULL, + }; + static const char * const hevc_hierarchial_coding_type[] = { + "B", + "P", + NULL, + }; + static const char * const hevc_refresh_type[] = { + "None", + "CRA", + "IDR", + NULL, + }; + static const char * const hevc_size_of_length_field[] = { + "0", + "1", + "2", + "4", + NULL, + }; + static const char * const hevc_tier[] = { + "Main", + "High", + NULL, + }; + static const char * const hevc_loop_filter_mode[] = { + "Disabled", + "Enabled", + "Disabled at slice boundary", + "NULL", + }; + static const char * const hevc_decode_mode[] = { + "Slice-Based", + "Frame-Based", + NULL, + }; + static const char * const hevc_start_code[] = { + "No Start Code", + "Annex B Start Code", + NULL, + }; + static const char * const camera_orientation[] = { + "Front", + "Back", + "External", + NULL, + }; + static const char * const mpeg_video_frame_skip[] = { + "Disabled", + "Level Limit", + "VBV/CPB Limit", + NULL, + }; + static const char * const intra_refresh_period_type[] = { + "Random", + "Cyclic", + NULL, + }; + + switch (id) { + case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ: + return mpeg_audio_sampling_freq; + case V4L2_CID_MPEG_AUDIO_ENCODING: + return mpeg_audio_encoding; + case V4L2_CID_MPEG_AUDIO_L1_BITRATE: + return mpeg_audio_l1_bitrate; + case V4L2_CID_MPEG_AUDIO_L2_BITRATE: + return mpeg_audio_l2_bitrate; + case V4L2_CID_MPEG_AUDIO_L3_BITRATE: + return mpeg_audio_l3_bitrate; + case V4L2_CID_MPEG_AUDIO_AC3_BITRATE: + return mpeg_audio_ac3_bitrate; + case V4L2_CID_MPEG_AUDIO_MODE: + return mpeg_audio_mode; + case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION: + return mpeg_audio_mode_extension; + case V4L2_CID_MPEG_AUDIO_EMPHASIS: + return mpeg_audio_emphasis; + case V4L2_CID_MPEG_AUDIO_CRC: + return mpeg_audio_crc; + case V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK: + case V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK: + return mpeg_audio_dec_playback; + case V4L2_CID_MPEG_VIDEO_ENCODING: + return mpeg_video_encoding; + case V4L2_CID_MPEG_VIDEO_ASPECT: + return mpeg_video_aspect; + case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: + return mpeg_video_bitrate_mode; + case V4L2_CID_MPEG_STREAM_TYPE: + return mpeg_stream_type; + case V4L2_CID_MPEG_STREAM_VBI_FMT: + return mpeg_stream_vbi_fmt; + case V4L2_CID_POWER_LINE_FREQUENCY: + return camera_power_line_frequency; + case V4L2_CID_EXPOSURE_AUTO: + return camera_exposure_auto; + case V4L2_CID_EXPOSURE_METERING: + return camera_exposure_metering; + case V4L2_CID_AUTO_FOCUS_RANGE: + return camera_auto_focus_range; + case V4L2_CID_COLORFX: + return colorfx; + case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE: + return auto_n_preset_white_balance; + case V4L2_CID_ISO_SENSITIVITY_AUTO: + return camera_iso_sensitivity_auto; + case V4L2_CID_SCENE_MODE: + return scene_mode; + case V4L2_CID_TUNE_PREEMPHASIS: + return tune_emphasis; + case V4L2_CID_TUNE_DEEMPHASIS: + return tune_emphasis; + case V4L2_CID_FLASH_LED_MODE: + return flash_led_mode; + case V4L2_CID_FLASH_STROBE_SOURCE: + return flash_strobe_source; + case V4L2_CID_MPEG_VIDEO_HEADER_MODE: + return header_mode; + case V4L2_CID_MPEG_VIDEO_FRAME_SKIP_MODE: + return mpeg_video_frame_skip; + case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: + return multi_slice; + case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE: + return entropy_mode; + case V4L2_CID_MPEG_VIDEO_H264_LEVEL: + return mpeg_h264_level; + case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE: + return h264_loop_filter; + case V4L2_CID_MPEG_VIDEO_H264_PROFILE: + return h264_profile; + case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC: + return vui_sar_idc; + case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE: + return h264_fp_arrangement_type; + case V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE: + return h264_fmo_map_type; + case V4L2_CID_STATELESS_H264_DECODE_MODE: + return h264_decode_mode; + case V4L2_CID_STATELESS_H264_START_CODE: + return h264_start_code; + case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_TYPE: + return h264_hierarchical_coding_type; + case V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL: + return mpeg_mpeg2_level; + case V4L2_CID_MPEG_VIDEO_MPEG2_PROFILE: + return mpeg2_profile; + case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL: + return mpeg_mpeg4_level; + case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE: + return mpeg4_profile; + case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL: + return vpx_golden_frame_sel; + case V4L2_CID_MPEG_VIDEO_VP8_PROFILE: + return vp8_profile; + case V4L2_CID_MPEG_VIDEO_VP9_PROFILE: + return vp9_profile; + case V4L2_CID_MPEG_VIDEO_VP9_LEVEL: + return vp9_level; + case V4L2_CID_JPEG_CHROMA_SUBSAMPLING: + return jpeg_chroma_subsampling; + case V4L2_CID_DV_TX_MODE: + return dv_tx_mode; + case V4L2_CID_DV_TX_RGB_RANGE: + case V4L2_CID_DV_RX_RGB_RANGE: + return dv_rgb_range; + case V4L2_CID_DV_TX_IT_CONTENT_TYPE: + case V4L2_CID_DV_RX_IT_CONTENT_TYPE: + return dv_it_content_type; + case V4L2_CID_DETECT_MD_MODE: + return detect_md_mode; + case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE: + return hevc_profile; + case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL: + return hevc_level; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE: + return hevc_hierarchial_coding_type; + case V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_TYPE: + return hevc_refresh_type; + case V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD: + return hevc_size_of_length_field; + case V4L2_CID_MPEG_VIDEO_HEVC_TIER: + return hevc_tier; + case V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE: + return hevc_loop_filter_mode; + case V4L2_CID_MPEG_VIDEO_AV1_PROFILE: + return av1_profile; + case V4L2_CID_MPEG_VIDEO_AV1_LEVEL: + return av1_level; + case V4L2_CID_STATELESS_HEVC_DECODE_MODE: + return hevc_decode_mode; + case V4L2_CID_STATELESS_HEVC_START_CODE: + return hevc_start_code; + case V4L2_CID_CAMERA_ORIENTATION: + return camera_orientation; + case V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE: + return intra_refresh_period_type; + default: + return NULL; + } +} +EXPORT_SYMBOL(v4l2_ctrl_get_menu); + +#define __v4l2_qmenu_int_len(arr, len) ({ *(len) = ARRAY_SIZE(arr); (arr); }) +/* + * Returns NULL or an s64 type array containing the menu for given + * control ID. The total number of the menu items is returned in @len. + */ +const s64 *v4l2_ctrl_get_int_menu(u32 id, u32 *len) +{ + static const s64 qmenu_int_vpx_num_partitions[] = { + 1, 2, 4, 8, + }; + + static const s64 qmenu_int_vpx_num_ref_frames[] = { + 1, 2, 3, + }; + + switch (id) { + case V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS: + return __v4l2_qmenu_int_len(qmenu_int_vpx_num_partitions, len); + case V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES: + return __v4l2_qmenu_int_len(qmenu_int_vpx_num_ref_frames, len); + default: + *len = 0; + return NULL; + } +} +EXPORT_SYMBOL(v4l2_ctrl_get_int_menu); + +/* Return the control name. */ +const char *v4l2_ctrl_get_name(u32 id) +{ + switch (id) { + /* USER controls */ + /* Keep the order of the 'case's the same as in v4l2-controls.h! */ + case V4L2_CID_USER_CLASS: return "User Controls"; + case V4L2_CID_BRIGHTNESS: return "Brightness"; + case V4L2_CID_CONTRAST: return "Contrast"; + case V4L2_CID_SATURATION: return "Saturation"; + case V4L2_CID_HUE: return "Hue"; + case V4L2_CID_AUDIO_VOLUME: return "Volume"; + case V4L2_CID_AUDIO_BALANCE: return "Balance"; + case V4L2_CID_AUDIO_BASS: return "Bass"; + case V4L2_CID_AUDIO_TREBLE: return "Treble"; + case V4L2_CID_AUDIO_MUTE: return "Mute"; + case V4L2_CID_AUDIO_LOUDNESS: return "Loudness"; + case V4L2_CID_BLACK_LEVEL: return "Black Level"; + case V4L2_CID_AUTO_WHITE_BALANCE: return "White Balance, Automatic"; + case V4L2_CID_DO_WHITE_BALANCE: return "Do White Balance"; + case V4L2_CID_RED_BALANCE: return "Red Balance"; + case V4L2_CID_BLUE_BALANCE: return "Blue Balance"; + case V4L2_CID_GAMMA: return "Gamma"; + case V4L2_CID_EXPOSURE: return "Exposure"; + case V4L2_CID_AUTOGAIN: return "Gain, Automatic"; + case V4L2_CID_GAIN: return "Gain"; + case V4L2_CID_HFLIP: return "Horizontal Flip"; + case V4L2_CID_VFLIP: return "Vertical Flip"; + case V4L2_CID_POWER_LINE_FREQUENCY: return "Power Line Frequency"; + case V4L2_CID_HUE_AUTO: return "Hue, Automatic"; + case V4L2_CID_WHITE_BALANCE_TEMPERATURE: return "White Balance Temperature"; + case V4L2_CID_SHARPNESS: return "Sharpness"; + case V4L2_CID_BACKLIGHT_COMPENSATION: return "Backlight Compensation"; + case V4L2_CID_CHROMA_AGC: return "Chroma AGC"; + case V4L2_CID_COLOR_KILLER: return "Color Killer"; + case V4L2_CID_COLORFX: return "Color Effects"; + case V4L2_CID_AUTOBRIGHTNESS: return "Brightness, Automatic"; + case V4L2_CID_BAND_STOP_FILTER: return "Band-Stop Filter"; + case V4L2_CID_ROTATE: return "Rotate"; + case V4L2_CID_BG_COLOR: return "Background Color"; + case V4L2_CID_CHROMA_GAIN: return "Chroma Gain"; + case V4L2_CID_ILLUMINATORS_1: return "Illuminator 1"; + case V4L2_CID_ILLUMINATORS_2: return "Illuminator 2"; + case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE: return "Min Number of Capture Buffers"; + case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT: return "Min Number of Output Buffers"; + case V4L2_CID_ALPHA_COMPONENT: return "Alpha Component"; + case V4L2_CID_COLORFX_CBCR: return "Color Effects, CbCr"; + case V4L2_CID_COLORFX_RGB: return "Color Effects, RGB"; + + /* + * Codec controls + * + * The MPEG controls are applicable to all codec controls + * and the 'MPEG' part of the define is historical. + * + * Keep the order of the 'case's the same as in videodev2.h! + */ + case V4L2_CID_CODEC_CLASS: return "Codec Controls"; + case V4L2_CID_MPEG_STREAM_TYPE: return "Stream Type"; + case V4L2_CID_MPEG_STREAM_PID_PMT: return "Stream PMT Program ID"; + case V4L2_CID_MPEG_STREAM_PID_AUDIO: return "Stream Audio Program ID"; + case V4L2_CID_MPEG_STREAM_PID_VIDEO: return "Stream Video Program ID"; + case V4L2_CID_MPEG_STREAM_PID_PCR: return "Stream PCR Program ID"; + case V4L2_CID_MPEG_STREAM_PES_ID_AUDIO: return "Stream PES Audio ID"; + case V4L2_CID_MPEG_STREAM_PES_ID_VIDEO: return "Stream PES Video ID"; + case V4L2_CID_MPEG_STREAM_VBI_FMT: return "Stream VBI Format"; + case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ: return "Audio Sampling Frequency"; + case V4L2_CID_MPEG_AUDIO_ENCODING: return "Audio Encoding"; + case V4L2_CID_MPEG_AUDIO_L1_BITRATE: return "Audio Layer I Bitrate"; + case V4L2_CID_MPEG_AUDIO_L2_BITRATE: return "Audio Layer II Bitrate"; + case V4L2_CID_MPEG_AUDIO_L3_BITRATE: return "Audio Layer III Bitrate"; + case V4L2_CID_MPEG_AUDIO_MODE: return "Audio Stereo Mode"; + case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION: return "Audio Stereo Mode Extension"; + case V4L2_CID_MPEG_AUDIO_EMPHASIS: return "Audio Emphasis"; + case V4L2_CID_MPEG_AUDIO_CRC: return "Audio CRC"; + case V4L2_CID_MPEG_AUDIO_MUTE: return "Audio Mute"; + case V4L2_CID_MPEG_AUDIO_AAC_BITRATE: return "Audio AAC Bitrate"; + case V4L2_CID_MPEG_AUDIO_AC3_BITRATE: return "Audio AC-3 Bitrate"; + case V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK: return "Audio Playback"; + case V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK: return "Audio Multilingual Playback"; + case V4L2_CID_MPEG_VIDEO_ENCODING: return "Video Encoding"; + case V4L2_CID_MPEG_VIDEO_ASPECT: return "Video Aspect"; + case V4L2_CID_MPEG_VIDEO_B_FRAMES: return "Video B Frames"; + case V4L2_CID_MPEG_VIDEO_GOP_SIZE: return "Video GOP Size"; + case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE: return "Video GOP Closure"; + case V4L2_CID_MPEG_VIDEO_PULLDOWN: return "Video Pulldown"; + case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: return "Video Bitrate Mode"; + case V4L2_CID_MPEG_VIDEO_CONSTANT_QUALITY: return "Constant Quality"; + case V4L2_CID_MPEG_VIDEO_BITRATE: return "Video Bitrate"; + case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK: return "Video Peak Bitrate"; + case V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION: return "Video Temporal Decimation"; + case V4L2_CID_MPEG_VIDEO_MUTE: return "Video Mute"; + case V4L2_CID_MPEG_VIDEO_MUTE_YUV: return "Video Mute YUV"; + case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE: return "Decoder Slice Interface"; + case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER: return "MPEG4 Loop Filter Enable"; + case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB: return "Number of Intra Refresh MBs"; + case V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE: return "Intra Refresh Period Type"; + case V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD: return "Intra Refresh Period"; + case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE: return "Frame Level Rate Control Enable"; + case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE: return "H264 MB Level Rate Control"; + case V4L2_CID_MPEG_VIDEO_HEADER_MODE: return "Sequence Header Mode"; + case V4L2_CID_MPEG_VIDEO_MAX_REF_PIC: return "Max Number of Reference Pics"; + case V4L2_CID_MPEG_VIDEO_FRAME_SKIP_MODE: return "Frame Skip Mode"; + case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY: return "Display Delay"; + case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY_ENABLE: return "Display Delay Enable"; + case V4L2_CID_MPEG_VIDEO_AU_DELIMITER: return "Generate Access Unit Delimiters"; + case V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP: return "H263 I-Frame QP Value"; + case V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP: return "H263 P-Frame QP Value"; + case V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP: return "H263 B-Frame QP Value"; + case V4L2_CID_MPEG_VIDEO_H263_MIN_QP: return "H263 Minimum QP Value"; + case V4L2_CID_MPEG_VIDEO_H263_MAX_QP: return "H263 Maximum QP Value"; + case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP: return "H264 I-Frame QP Value"; + case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP: return "H264 P-Frame QP Value"; + case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP: return "H264 B-Frame QP Value"; + case V4L2_CID_MPEG_VIDEO_H264_MAX_QP: return "H264 Maximum QP Value"; + case V4L2_CID_MPEG_VIDEO_H264_MIN_QP: return "H264 Minimum QP Value"; + case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM: return "H264 8x8 Transform Enable"; + case V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE: return "H264 CPB Buffer Size"; + case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE: return "H264 Entropy Mode"; + case V4L2_CID_MPEG_VIDEO_H264_I_PERIOD: return "H264 I-Frame Period"; + case V4L2_CID_MPEG_VIDEO_H264_LEVEL: return "H264 Level"; + case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA: return "H264 Loop Filter Alpha Offset"; + case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA: return "H264 Loop Filter Beta Offset"; + case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE: return "H264 Loop Filter Mode"; + case V4L2_CID_MPEG_VIDEO_H264_PROFILE: return "H264 Profile"; + case V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_HEIGHT: return "Vertical Size of SAR"; + case V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH: return "Horizontal Size of SAR"; + case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE: return "Aspect Ratio VUI Enable"; + case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC: return "VUI Aspect Ratio IDC"; + case V4L2_CID_MPEG_VIDEO_H264_SEI_FRAME_PACKING: return "H264 Enable Frame Packing SEI"; + case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_CURRENT_FRAME_0: return "H264 Set Curr. Frame as Frame0"; + case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE: return "H264 FP Arrangement Type"; + case V4L2_CID_MPEG_VIDEO_H264_FMO: return "H264 Flexible MB Ordering"; + case V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE: return "H264 Map Type for FMO"; + case V4L2_CID_MPEG_VIDEO_H264_FMO_SLICE_GROUP: return "H264 FMO Number of Slice Groups"; + case V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_DIRECTION: return "H264 FMO Direction of Change"; + case V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_RATE: return "H264 FMO Size of 1st Slice Grp"; + case V4L2_CID_MPEG_VIDEO_H264_FMO_RUN_LENGTH: return "H264 FMO No. of Consecutive MBs"; + case V4L2_CID_MPEG_VIDEO_H264_ASO: return "H264 Arbitrary Slice Ordering"; + case V4L2_CID_MPEG_VIDEO_H264_ASO_SLICE_ORDER: return "H264 ASO Slice Order"; + case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING: return "Enable H264 Hierarchical Coding"; + case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_TYPE: return "H264 Hierarchical Coding Type"; + case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER:return "H264 Number of HC Layers"; + case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER_QP: + return "H264 Set QP Value for HC Layers"; + case V4L2_CID_MPEG_VIDEO_H264_CONSTRAINED_INTRA_PREDICTION: + return "H264 Constrained Intra Pred"; + case V4L2_CID_MPEG_VIDEO_H264_CHROMA_QP_INDEX_OFFSET: return "H264 Chroma QP Index Offset"; + case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MIN_QP: return "H264 I-Frame Minimum QP Value"; + case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MAX_QP: return "H264 I-Frame Maximum QP Value"; + case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MIN_QP: return "H264 P-Frame Minimum QP Value"; + case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MAX_QP: return "H264 P-Frame Maximum QP Value"; + case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MIN_QP: return "H264 B-Frame Minimum QP Value"; + case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MAX_QP: return "H264 B-Frame Maximum QP Value"; + case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L0_BR: return "H264 Hierarchical Lay 0 Bitrate"; + case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L1_BR: return "H264 Hierarchical Lay 1 Bitrate"; + case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L2_BR: return "H264 Hierarchical Lay 2 Bitrate"; + case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L3_BR: return "H264 Hierarchical Lay 3 Bitrate"; + case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L4_BR: return "H264 Hierarchical Lay 4 Bitrate"; + case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L5_BR: return "H264 Hierarchical Lay 5 Bitrate"; + case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L6_BR: return "H264 Hierarchical Lay 6 Bitrate"; + case V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL: return "MPEG2 Level"; + case V4L2_CID_MPEG_VIDEO_MPEG2_PROFILE: return "MPEG2 Profile"; + case V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP: return "MPEG4 I-Frame QP Value"; + case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP: return "MPEG4 P-Frame QP Value"; + case V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP: return "MPEG4 B-Frame QP Value"; + case V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP: return "MPEG4 Minimum QP Value"; + case V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP: return "MPEG4 Maximum QP Value"; + case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL: return "MPEG4 Level"; + case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE: return "MPEG4 Profile"; + case V4L2_CID_MPEG_VIDEO_MPEG4_QPEL: return "Quarter Pixel Search Enable"; + case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES: return "Maximum Bytes in a Slice"; + case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB: return "Number of MBs in a Slice"; + case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: return "Slice Partitioning Method"; + case V4L2_CID_MPEG_VIDEO_VBV_SIZE: return "VBV Buffer Size"; + case V4L2_CID_MPEG_VIDEO_DEC_PTS: return "Video Decoder PTS"; + case V4L2_CID_MPEG_VIDEO_DEC_FRAME: return "Video Decoder Frame Count"; + case V4L2_CID_MPEG_VIDEO_DEC_CONCEAL_COLOR: return "Video Decoder Conceal Color"; + case V4L2_CID_MPEG_VIDEO_VBV_DELAY: return "Initial Delay for VBV Control"; + case V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE: return "Horizontal MV Search Range"; + case V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE: return "Vertical MV Search Range"; + case V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER: return "Repeat Sequence Header"; + case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME: return "Force Key Frame"; + case V4L2_CID_MPEG_VIDEO_BASELAYER_PRIORITY_ID: return "Base Layer Priority ID"; + case V4L2_CID_MPEG_VIDEO_LTR_COUNT: return "LTR Count"; + case V4L2_CID_MPEG_VIDEO_FRAME_LTR_INDEX: return "Frame LTR Index"; + case V4L2_CID_MPEG_VIDEO_USE_LTR_FRAMES: return "Use LTR Frames"; + case V4L2_CID_MPEG_VIDEO_AVERAGE_QP: return "Average QP Value"; + case V4L2_CID_FWHT_I_FRAME_QP: return "FWHT I-Frame QP Value"; + case V4L2_CID_FWHT_P_FRAME_QP: return "FWHT P-Frame QP Value"; + + /* VPX controls */ + case V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS: return "VPX Number of Partitions"; + case V4L2_CID_MPEG_VIDEO_VPX_IMD_DISABLE_4X4: return "VPX Intra Mode Decision Disable"; + case V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES: return "VPX No. of Refs for P Frame"; + case V4L2_CID_MPEG_VIDEO_VPX_FILTER_LEVEL: return "VPX Loop Filter Level Range"; + case V4L2_CID_MPEG_VIDEO_VPX_FILTER_SHARPNESS: return "VPX Deblocking Effect Control"; + case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_REF_PERIOD: return "VPX Golden Frame Refresh Period"; + case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL: return "VPX Golden Frame Indicator"; + case V4L2_CID_MPEG_VIDEO_VPX_MIN_QP: return "VPX Minimum QP Value"; + case V4L2_CID_MPEG_VIDEO_VPX_MAX_QP: return "VPX Maximum QP Value"; + case V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP: return "VPX I-Frame QP Value"; + case V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP: return "VPX P-Frame QP Value"; + case V4L2_CID_MPEG_VIDEO_VP8_PROFILE: return "VP8 Profile"; + case V4L2_CID_MPEG_VIDEO_VP9_PROFILE: return "VP9 Profile"; + case V4L2_CID_MPEG_VIDEO_VP9_LEVEL: return "VP9 Level"; + + /* HEVC controls */ + case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP: return "HEVC I-Frame QP Value"; + case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP: return "HEVC P-Frame QP Value"; + case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP: return "HEVC B-Frame QP Value"; + case V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP: return "HEVC Minimum QP Value"; + case V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP: return "HEVC Maximum QP Value"; + case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MIN_QP: return "HEVC I-Frame Minimum QP Value"; + case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MAX_QP: return "HEVC I-Frame Maximum QP Value"; + case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MIN_QP: return "HEVC P-Frame Minimum QP Value"; + case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MAX_QP: return "HEVC P-Frame Maximum QP Value"; + case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MIN_QP: return "HEVC B-Frame Minimum QP Value"; + case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MAX_QP: return "HEVC B-Frame Maximum QP Value"; + case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE: return "HEVC Profile"; + case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL: return "HEVC Level"; + case V4L2_CID_MPEG_VIDEO_HEVC_TIER: return "HEVC Tier"; + case V4L2_CID_MPEG_VIDEO_HEVC_FRAME_RATE_RESOLUTION: return "HEVC Frame Rate Resolution"; + case V4L2_CID_MPEG_VIDEO_HEVC_MAX_PARTITION_DEPTH: return "HEVC Maximum Coding Unit Depth"; + case V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_TYPE: return "HEVC Refresh Type"; + case V4L2_CID_MPEG_VIDEO_HEVC_CONST_INTRA_PRED: return "HEVC Constant Intra Prediction"; + case V4L2_CID_MPEG_VIDEO_HEVC_LOSSLESS_CU: return "HEVC Lossless Encoding"; + case V4L2_CID_MPEG_VIDEO_HEVC_WAVEFRONT: return "HEVC Wavefront"; + case V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE: return "HEVC Loop Filter"; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_QP: return "HEVC QP Values"; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE: return "HEVC Hierarchical Coding Type"; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_LAYER: return "HEVC Hierarchical Coding Layer"; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_QP: return "HEVC Hierarchical Layer 0 QP"; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_QP: return "HEVC Hierarchical Layer 1 QP"; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_QP: return "HEVC Hierarchical Layer 2 QP"; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_QP: return "HEVC Hierarchical Layer 3 QP"; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_QP: return "HEVC Hierarchical Layer 4 QP"; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_QP: return "HEVC Hierarchical Layer 5 QP"; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L6_QP: return "HEVC Hierarchical Layer 6 QP"; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_BR: return "HEVC Hierarchical Lay 0 BitRate"; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_BR: return "HEVC Hierarchical Lay 1 BitRate"; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_BR: return "HEVC Hierarchical Lay 2 BitRate"; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_BR: return "HEVC Hierarchical Lay 3 BitRate"; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_BR: return "HEVC Hierarchical Lay 4 BitRate"; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_BR: return "HEVC Hierarchical Lay 5 BitRate"; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L6_BR: return "HEVC Hierarchical Lay 6 BitRate"; + case V4L2_CID_MPEG_VIDEO_HEVC_GENERAL_PB: return "HEVC General PB"; + case V4L2_CID_MPEG_VIDEO_HEVC_TEMPORAL_ID: return "HEVC Temporal ID"; + case V4L2_CID_MPEG_VIDEO_HEVC_STRONG_SMOOTHING: return "HEVC Strong Intra Smoothing"; + case V4L2_CID_MPEG_VIDEO_HEVC_INTRA_PU_SPLIT: return "HEVC Intra PU Split"; + case V4L2_CID_MPEG_VIDEO_HEVC_TMV_PREDICTION: return "HEVC TMV Prediction"; + case V4L2_CID_MPEG_VIDEO_HEVC_MAX_NUM_MERGE_MV_MINUS1: return "HEVC Max Num of Candidate MVs"; + case V4L2_CID_MPEG_VIDEO_HEVC_WITHOUT_STARTCODE: return "HEVC ENC Without Startcode"; + case V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_PERIOD: return "HEVC Num of I-Frame b/w 2 IDR"; + case V4L2_CID_MPEG_VIDEO_HEVC_LF_BETA_OFFSET_DIV2: return "HEVC Loop Filter Beta Offset"; + case V4L2_CID_MPEG_VIDEO_HEVC_LF_TC_OFFSET_DIV2: return "HEVC Loop Filter TC Offset"; + case V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD: return "HEVC Size of Length Field"; + case V4L2_CID_MPEG_VIDEO_REF_NUMBER_FOR_PFRAMES: return "Reference Frames for a P-Frame"; + case V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR: return "Prepend SPS and PPS to IDR"; + + /* AV1 controls */ + case V4L2_CID_MPEG_VIDEO_AV1_PROFILE: return "AV1 Profile"; + case V4L2_CID_MPEG_VIDEO_AV1_LEVEL: return "AV1 Level"; + + /* CAMERA controls */ + /* Keep the order of the 'case's the same as in v4l2-controls.h! */ + case V4L2_CID_CAMERA_CLASS: return "Camera Controls"; + case V4L2_CID_EXPOSURE_AUTO: return "Auto Exposure"; + case V4L2_CID_EXPOSURE_ABSOLUTE: return "Exposure Time, Absolute"; + case V4L2_CID_EXPOSURE_AUTO_PRIORITY: return "Exposure, Dynamic Framerate"; + case V4L2_CID_PAN_RELATIVE: return "Pan, Relative"; + case V4L2_CID_TILT_RELATIVE: return "Tilt, Relative"; + case V4L2_CID_PAN_RESET: return "Pan, Reset"; + case V4L2_CID_TILT_RESET: return "Tilt, Reset"; + case V4L2_CID_PAN_ABSOLUTE: return "Pan, Absolute"; + case V4L2_CID_TILT_ABSOLUTE: return "Tilt, Absolute"; + case V4L2_CID_FOCUS_ABSOLUTE: return "Focus, Absolute"; + case V4L2_CID_FOCUS_RELATIVE: return "Focus, Relative"; + case V4L2_CID_FOCUS_AUTO: return "Focus, Automatic Continuous"; + case V4L2_CID_ZOOM_ABSOLUTE: return "Zoom, Absolute"; + case V4L2_CID_ZOOM_RELATIVE: return "Zoom, Relative"; + case V4L2_CID_ZOOM_CONTINUOUS: return "Zoom, Continuous"; + case V4L2_CID_PRIVACY: return "Privacy"; + case V4L2_CID_IRIS_ABSOLUTE: return "Iris, Absolute"; + case V4L2_CID_IRIS_RELATIVE: return "Iris, Relative"; + case V4L2_CID_AUTO_EXPOSURE_BIAS: return "Auto Exposure, Bias"; + case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE: return "White Balance, Auto & Preset"; + case V4L2_CID_WIDE_DYNAMIC_RANGE: return "Wide Dynamic Range"; + case V4L2_CID_IMAGE_STABILIZATION: return "Image Stabilization"; + case V4L2_CID_ISO_SENSITIVITY: return "ISO Sensitivity"; + case V4L2_CID_ISO_SENSITIVITY_AUTO: return "ISO Sensitivity, Auto"; + case V4L2_CID_EXPOSURE_METERING: return "Exposure, Metering Mode"; + case V4L2_CID_SCENE_MODE: return "Scene Mode"; + case V4L2_CID_3A_LOCK: return "3A Lock"; + case V4L2_CID_AUTO_FOCUS_START: return "Auto Focus, Start"; + case V4L2_CID_AUTO_FOCUS_STOP: return "Auto Focus, Stop"; + case V4L2_CID_AUTO_FOCUS_STATUS: return "Auto Focus, Status"; + case V4L2_CID_AUTO_FOCUS_RANGE: return "Auto Focus, Range"; + case V4L2_CID_PAN_SPEED: return "Pan, Speed"; + case V4L2_CID_TILT_SPEED: return "Tilt, Speed"; + case V4L2_CID_UNIT_CELL_SIZE: return "Unit Cell Size"; + case V4L2_CID_CAMERA_ORIENTATION: return "Camera Orientation"; + case V4L2_CID_CAMERA_SENSOR_ROTATION: return "Camera Sensor Rotation"; + case V4L2_CID_HDR_SENSOR_MODE: return "HDR Sensor Mode"; + + /* FM Radio Modulator controls */ + /* Keep the order of the 'case's the same as in v4l2-controls.h! */ + case V4L2_CID_FM_TX_CLASS: return "FM Radio Modulator Controls"; + case V4L2_CID_RDS_TX_DEVIATION: return "RDS Signal Deviation"; + case V4L2_CID_RDS_TX_PI: return "RDS Program ID"; + case V4L2_CID_RDS_TX_PTY: return "RDS Program Type"; + case V4L2_CID_RDS_TX_PS_NAME: return "RDS PS Name"; + case V4L2_CID_RDS_TX_RADIO_TEXT: return "RDS Radio Text"; + case V4L2_CID_RDS_TX_MONO_STEREO: return "RDS Stereo"; + case V4L2_CID_RDS_TX_ARTIFICIAL_HEAD: return "RDS Artificial Head"; + case V4L2_CID_RDS_TX_COMPRESSED: return "RDS Compressed"; + case V4L2_CID_RDS_TX_DYNAMIC_PTY: return "RDS Dynamic PTY"; + case V4L2_CID_RDS_TX_TRAFFIC_ANNOUNCEMENT: return "RDS Traffic Announcement"; + case V4L2_CID_RDS_TX_TRAFFIC_PROGRAM: return "RDS Traffic Program"; + case V4L2_CID_RDS_TX_MUSIC_SPEECH: return "RDS Music"; + case V4L2_CID_RDS_TX_ALT_FREQS_ENABLE: return "RDS Enable Alt Frequencies"; + case V4L2_CID_RDS_TX_ALT_FREQS: return "RDS Alternate Frequencies"; + case V4L2_CID_AUDIO_LIMITER_ENABLED: return "Audio Limiter Feature Enabled"; + case V4L2_CID_AUDIO_LIMITER_RELEASE_TIME: return "Audio Limiter Release Time"; + case V4L2_CID_AUDIO_LIMITER_DEVIATION: return "Audio Limiter Deviation"; + case V4L2_CID_AUDIO_COMPRESSION_ENABLED: return "Audio Compression Enabled"; + case V4L2_CID_AUDIO_COMPRESSION_GAIN: return "Audio Compression Gain"; + case V4L2_CID_AUDIO_COMPRESSION_THRESHOLD: return "Audio Compression Threshold"; + case V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME: return "Audio Compression Attack Time"; + case V4L2_CID_AUDIO_COMPRESSION_RELEASE_TIME: return "Audio Compression Release Time"; + case V4L2_CID_PILOT_TONE_ENABLED: return "Pilot Tone Feature Enabled"; + case V4L2_CID_PILOT_TONE_DEVIATION: return "Pilot Tone Deviation"; + case V4L2_CID_PILOT_TONE_FREQUENCY: return "Pilot Tone Frequency"; + case V4L2_CID_TUNE_PREEMPHASIS: return "Pre-Emphasis"; + case V4L2_CID_TUNE_POWER_LEVEL: return "Tune Power Level"; + case V4L2_CID_TUNE_ANTENNA_CAPACITOR: return "Tune Antenna Capacitor"; + + /* Flash controls */ + /* Keep the order of the 'case's the same as in v4l2-controls.h! */ + case V4L2_CID_FLASH_CLASS: return "Flash Controls"; + case V4L2_CID_FLASH_LED_MODE: return "LED Mode"; + case V4L2_CID_FLASH_STROBE_SOURCE: return "Strobe Source"; + case V4L2_CID_FLASH_STROBE: return "Strobe"; + case V4L2_CID_FLASH_STROBE_STOP: return "Stop Strobe"; + case V4L2_CID_FLASH_STROBE_STATUS: return "Strobe Status"; + case V4L2_CID_FLASH_TIMEOUT: return "Strobe Timeout"; + case V4L2_CID_FLASH_INTENSITY: return "Intensity, Flash Mode"; + case V4L2_CID_FLASH_TORCH_INTENSITY: return "Intensity, Torch Mode"; + case V4L2_CID_FLASH_INDICATOR_INTENSITY: return "Intensity, Indicator"; + case V4L2_CID_FLASH_FAULT: return "Faults"; + case V4L2_CID_FLASH_CHARGE: return "Charge"; + case V4L2_CID_FLASH_READY: return "Ready to Strobe"; + + /* JPEG encoder controls */ + /* Keep the order of the 'case's the same as in v4l2-controls.h! */ + case V4L2_CID_JPEG_CLASS: return "JPEG Compression Controls"; + case V4L2_CID_JPEG_CHROMA_SUBSAMPLING: return "Chroma Subsampling"; + case V4L2_CID_JPEG_RESTART_INTERVAL: return "Restart Interval"; + case V4L2_CID_JPEG_COMPRESSION_QUALITY: return "Compression Quality"; + case V4L2_CID_JPEG_ACTIVE_MARKER: return "Active Markers"; + + /* Image source controls */ + /* Keep the order of the 'case's the same as in v4l2-controls.h! */ + case V4L2_CID_IMAGE_SOURCE_CLASS: return "Image Source Controls"; + case V4L2_CID_VBLANK: return "Vertical Blanking"; + case V4L2_CID_HBLANK: return "Horizontal Blanking"; + case V4L2_CID_ANALOGUE_GAIN: return "Analogue Gain"; + case V4L2_CID_TEST_PATTERN_RED: return "Red Pixel Value"; + case V4L2_CID_TEST_PATTERN_GREENR: return "Green (Red) Pixel Value"; + case V4L2_CID_TEST_PATTERN_BLUE: return "Blue Pixel Value"; + case V4L2_CID_TEST_PATTERN_GREENB: return "Green (Blue) Pixel Value"; + case V4L2_CID_NOTIFY_GAINS: return "Notify Gains"; + + /* Image processing controls */ + /* Keep the order of the 'case's the same as in v4l2-controls.h! */ + case V4L2_CID_IMAGE_PROC_CLASS: return "Image Processing Controls"; + case V4L2_CID_LINK_FREQ: return "Link Frequency"; + case V4L2_CID_PIXEL_RATE: return "Pixel Rate"; + case V4L2_CID_TEST_PATTERN: return "Test Pattern"; + case V4L2_CID_DEINTERLACING_MODE: return "Deinterlacing Mode"; + case V4L2_CID_DIGITAL_GAIN: return "Digital Gain"; + + /* DV controls */ + /* Keep the order of the 'case's the same as in v4l2-controls.h! */ + case V4L2_CID_DV_CLASS: return "Digital Video Controls"; + case V4L2_CID_DV_TX_HOTPLUG: return "Hotplug Present"; + case V4L2_CID_DV_TX_RXSENSE: return "RxSense Present"; + case V4L2_CID_DV_TX_EDID_PRESENT: return "EDID Present"; + case V4L2_CID_DV_TX_MODE: return "Transmit Mode"; + case V4L2_CID_DV_TX_RGB_RANGE: return "Tx RGB Quantization Range"; + case V4L2_CID_DV_TX_IT_CONTENT_TYPE: return "Tx IT Content Type"; + case V4L2_CID_DV_RX_POWER_PRESENT: return "Power Present"; + case V4L2_CID_DV_RX_RGB_RANGE: return "Rx RGB Quantization Range"; + case V4L2_CID_DV_RX_IT_CONTENT_TYPE: return "Rx IT Content Type"; + + case V4L2_CID_FM_RX_CLASS: return "FM Radio Receiver Controls"; + case V4L2_CID_TUNE_DEEMPHASIS: return "De-Emphasis"; + case V4L2_CID_RDS_RECEPTION: return "RDS Reception"; + case V4L2_CID_RF_TUNER_CLASS: return "RF Tuner Controls"; + case V4L2_CID_RF_TUNER_RF_GAIN: return "RF Gain"; + case V4L2_CID_RF_TUNER_LNA_GAIN_AUTO: return "LNA Gain, Auto"; + case V4L2_CID_RF_TUNER_LNA_GAIN: return "LNA Gain"; + case V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO: return "Mixer Gain, Auto"; + case V4L2_CID_RF_TUNER_MIXER_GAIN: return "Mixer Gain"; + case V4L2_CID_RF_TUNER_IF_GAIN_AUTO: return "IF Gain, Auto"; + case V4L2_CID_RF_TUNER_IF_GAIN: return "IF Gain"; + case V4L2_CID_RF_TUNER_BANDWIDTH_AUTO: return "Bandwidth, Auto"; + case V4L2_CID_RF_TUNER_BANDWIDTH: return "Bandwidth"; + case V4L2_CID_RF_TUNER_PLL_LOCK: return "PLL Lock"; + case V4L2_CID_RDS_RX_PTY: return "RDS Program Type"; + case V4L2_CID_RDS_RX_PS_NAME: return "RDS PS Name"; + case V4L2_CID_RDS_RX_RADIO_TEXT: return "RDS Radio Text"; + case V4L2_CID_RDS_RX_TRAFFIC_ANNOUNCEMENT: return "RDS Traffic Announcement"; + case V4L2_CID_RDS_RX_TRAFFIC_PROGRAM: return "RDS Traffic Program"; + case V4L2_CID_RDS_RX_MUSIC_SPEECH: return "RDS Music"; + + /* Detection controls */ + /* Keep the order of the 'case's the same as in v4l2-controls.h! */ + case V4L2_CID_DETECT_CLASS: return "Detection Controls"; + case V4L2_CID_DETECT_MD_MODE: return "Motion Detection Mode"; + case V4L2_CID_DETECT_MD_GLOBAL_THRESHOLD: return "MD Global Threshold"; + case V4L2_CID_DETECT_MD_THRESHOLD_GRID: return "MD Threshold Grid"; + case V4L2_CID_DETECT_MD_REGION_GRID: return "MD Region Grid"; + + /* Stateless Codec controls */ + /* Keep the order of the 'case's the same as in v4l2-controls.h! */ + case V4L2_CID_CODEC_STATELESS_CLASS: return "Stateless Codec Controls"; + case V4L2_CID_STATELESS_H264_DECODE_MODE: return "H264 Decode Mode"; + case V4L2_CID_STATELESS_H264_START_CODE: return "H264 Start Code"; + case V4L2_CID_STATELESS_H264_SPS: return "H264 Sequence Parameter Set"; + case V4L2_CID_STATELESS_H264_PPS: return "H264 Picture Parameter Set"; + case V4L2_CID_STATELESS_H264_SCALING_MATRIX: return "H264 Scaling Matrix"; + case V4L2_CID_STATELESS_H264_PRED_WEIGHTS: return "H264 Prediction Weight Table"; + case V4L2_CID_STATELESS_H264_SLICE_PARAMS: return "H264 Slice Parameters"; + case V4L2_CID_STATELESS_H264_DECODE_PARAMS: return "H264 Decode Parameters"; + case V4L2_CID_STATELESS_FWHT_PARAMS: return "FWHT Stateless Parameters"; + case V4L2_CID_STATELESS_VP8_FRAME: return "VP8 Frame Parameters"; + case V4L2_CID_STATELESS_MPEG2_SEQUENCE: return "MPEG-2 Sequence Header"; + case V4L2_CID_STATELESS_MPEG2_PICTURE: return "MPEG-2 Picture Header"; + case V4L2_CID_STATELESS_MPEG2_QUANTISATION: return "MPEG-2 Quantisation Matrices"; + case V4L2_CID_STATELESS_VP9_COMPRESSED_HDR: return "VP9 Probabilities Updates"; + case V4L2_CID_STATELESS_VP9_FRAME: return "VP9 Frame Decode Parameters"; + case V4L2_CID_STATELESS_HEVC_SPS: return "HEVC Sequence Parameter Set"; + case V4L2_CID_STATELESS_HEVC_PPS: return "HEVC Picture Parameter Set"; + case V4L2_CID_STATELESS_HEVC_SLICE_PARAMS: return "HEVC Slice Parameters"; + case V4L2_CID_STATELESS_HEVC_SCALING_MATRIX: return "HEVC Scaling Matrix"; + case V4L2_CID_STATELESS_HEVC_DECODE_PARAMS: return "HEVC Decode Parameters"; + case V4L2_CID_STATELESS_HEVC_DECODE_MODE: return "HEVC Decode Mode"; + case V4L2_CID_STATELESS_HEVC_START_CODE: return "HEVC Start Code"; + case V4L2_CID_STATELESS_HEVC_ENTRY_POINT_OFFSETS: return "HEVC Entry Point Offsets"; + case V4L2_CID_STATELESS_AV1_SEQUENCE: return "AV1 Sequence Parameters"; + case V4L2_CID_STATELESS_AV1_TILE_GROUP_ENTRY: return "AV1 Tile Group Entry"; + case V4L2_CID_STATELESS_AV1_FRAME: return "AV1 Frame Parameters"; + case V4L2_CID_STATELESS_AV1_FILM_GRAIN: return "AV1 Film Grain"; + + /* Colorimetry controls */ + /* Keep the order of the 'case's the same as in v4l2-controls.h! */ + case V4L2_CID_COLORIMETRY_CLASS: return "Colorimetry Controls"; + case V4L2_CID_COLORIMETRY_HDR10_CLL_INFO: return "HDR10 Content Light Info"; + case V4L2_CID_COLORIMETRY_HDR10_MASTERING_DISPLAY: return "HDR10 Mastering Display"; + default: + return NULL; + } +} +EXPORT_SYMBOL(v4l2_ctrl_get_name); + +void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type, + s64 *min, s64 *max, u64 *step, s64 *def, u32 *flags) +{ + *name = v4l2_ctrl_get_name(id); + *flags = 0; + + switch (id) { + case V4L2_CID_AUDIO_MUTE: + case V4L2_CID_AUDIO_LOUDNESS: + case V4L2_CID_AUTO_WHITE_BALANCE: + case V4L2_CID_AUTOGAIN: + case V4L2_CID_HFLIP: + case V4L2_CID_VFLIP: + case V4L2_CID_HUE_AUTO: + case V4L2_CID_CHROMA_AGC: + case V4L2_CID_COLOR_KILLER: + case V4L2_CID_AUTOBRIGHTNESS: + case V4L2_CID_MPEG_AUDIO_MUTE: + case V4L2_CID_MPEG_VIDEO_MUTE: + case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE: + case V4L2_CID_MPEG_VIDEO_PULLDOWN: + case V4L2_CID_EXPOSURE_AUTO_PRIORITY: + case V4L2_CID_FOCUS_AUTO: + case V4L2_CID_PRIVACY: + case V4L2_CID_AUDIO_LIMITER_ENABLED: + case V4L2_CID_AUDIO_COMPRESSION_ENABLED: + case V4L2_CID_PILOT_TONE_ENABLED: + case V4L2_CID_ILLUMINATORS_1: + case V4L2_CID_ILLUMINATORS_2: + case V4L2_CID_FLASH_STROBE_STATUS: + case V4L2_CID_FLASH_CHARGE: + case V4L2_CID_FLASH_READY: + case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER: + case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE: + case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY_ENABLE: + case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE: + case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE: + case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM: + case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE: + case V4L2_CID_MPEG_VIDEO_MPEG4_QPEL: + case V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER: + case V4L2_CID_MPEG_VIDEO_AU_DELIMITER: + case V4L2_CID_WIDE_DYNAMIC_RANGE: + case V4L2_CID_IMAGE_STABILIZATION: + case V4L2_CID_RDS_RECEPTION: + case V4L2_CID_RF_TUNER_LNA_GAIN_AUTO: + case V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO: + case V4L2_CID_RF_TUNER_IF_GAIN_AUTO: + case V4L2_CID_RF_TUNER_BANDWIDTH_AUTO: + case V4L2_CID_RF_TUNER_PLL_LOCK: + case V4L2_CID_RDS_TX_MONO_STEREO: + case V4L2_CID_RDS_TX_ARTIFICIAL_HEAD: + case V4L2_CID_RDS_TX_COMPRESSED: + case V4L2_CID_RDS_TX_DYNAMIC_PTY: + case V4L2_CID_RDS_TX_TRAFFIC_ANNOUNCEMENT: + case V4L2_CID_RDS_TX_TRAFFIC_PROGRAM: + case V4L2_CID_RDS_TX_MUSIC_SPEECH: + case V4L2_CID_RDS_TX_ALT_FREQS_ENABLE: + case V4L2_CID_RDS_RX_TRAFFIC_ANNOUNCEMENT: + case V4L2_CID_RDS_RX_TRAFFIC_PROGRAM: + case V4L2_CID_RDS_RX_MUSIC_SPEECH: + *type = V4L2_CTRL_TYPE_BOOLEAN; + *min = 0; + *max = *step = 1; + break; + case V4L2_CID_ROTATE: + *type = V4L2_CTRL_TYPE_INTEGER; + *flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT; + break; + case V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE: + case V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE: + case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY: + case V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD: + *type = V4L2_CTRL_TYPE_INTEGER; + break; + case V4L2_CID_MPEG_VIDEO_LTR_COUNT: + *type = V4L2_CTRL_TYPE_INTEGER; + break; + case V4L2_CID_MPEG_VIDEO_FRAME_LTR_INDEX: + *type = V4L2_CTRL_TYPE_INTEGER; + *flags |= V4L2_CTRL_FLAG_EXECUTE_ON_WRITE; + break; + case V4L2_CID_MPEG_VIDEO_USE_LTR_FRAMES: + *type = V4L2_CTRL_TYPE_BITMASK; + *flags |= V4L2_CTRL_FLAG_EXECUTE_ON_WRITE; + break; + case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME: + case V4L2_CID_PAN_RESET: + case V4L2_CID_TILT_RESET: + case V4L2_CID_FLASH_STROBE: + case V4L2_CID_FLASH_STROBE_STOP: + case V4L2_CID_AUTO_FOCUS_START: + case V4L2_CID_AUTO_FOCUS_STOP: + case V4L2_CID_DO_WHITE_BALANCE: + *type = V4L2_CTRL_TYPE_BUTTON; + *flags |= V4L2_CTRL_FLAG_WRITE_ONLY | + V4L2_CTRL_FLAG_EXECUTE_ON_WRITE; + *min = *max = *step = *def = 0; + break; + case V4L2_CID_POWER_LINE_FREQUENCY: + case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ: + case V4L2_CID_MPEG_AUDIO_ENCODING: + case V4L2_CID_MPEG_AUDIO_L1_BITRATE: + case V4L2_CID_MPEG_AUDIO_L2_BITRATE: + case V4L2_CID_MPEG_AUDIO_L3_BITRATE: + case V4L2_CID_MPEG_AUDIO_AC3_BITRATE: + case V4L2_CID_MPEG_AUDIO_MODE: + case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION: + case V4L2_CID_MPEG_AUDIO_EMPHASIS: + case V4L2_CID_MPEG_AUDIO_CRC: + case V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK: + case V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK: + case V4L2_CID_MPEG_VIDEO_ENCODING: + case V4L2_CID_MPEG_VIDEO_ASPECT: + case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: + case V4L2_CID_MPEG_STREAM_TYPE: + case V4L2_CID_MPEG_STREAM_VBI_FMT: + case V4L2_CID_EXPOSURE_AUTO: + case V4L2_CID_AUTO_FOCUS_RANGE: + case V4L2_CID_COLORFX: + case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE: + case V4L2_CID_TUNE_PREEMPHASIS: + case V4L2_CID_FLASH_LED_MODE: + case V4L2_CID_FLASH_STROBE_SOURCE: + case V4L2_CID_MPEG_VIDEO_HEADER_MODE: + case V4L2_CID_MPEG_VIDEO_FRAME_SKIP_MODE: + case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: + case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE: + case V4L2_CID_MPEG_VIDEO_H264_LEVEL: + case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE: + case V4L2_CID_MPEG_VIDEO_H264_PROFILE: + case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC: + case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE: + case V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE: + case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_TYPE: + case V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL: + case V4L2_CID_MPEG_VIDEO_MPEG2_PROFILE: + case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL: + case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE: + case V4L2_CID_JPEG_CHROMA_SUBSAMPLING: + case V4L2_CID_ISO_SENSITIVITY_AUTO: + case V4L2_CID_EXPOSURE_METERING: + case V4L2_CID_SCENE_MODE: + case V4L2_CID_DV_TX_MODE: + case V4L2_CID_DV_TX_RGB_RANGE: + case V4L2_CID_DV_TX_IT_CONTENT_TYPE: + case V4L2_CID_DV_RX_RGB_RANGE: + case V4L2_CID_DV_RX_IT_CONTENT_TYPE: + case V4L2_CID_TEST_PATTERN: + case V4L2_CID_DEINTERLACING_MODE: + case V4L2_CID_TUNE_DEEMPHASIS: + case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL: + case V4L2_CID_MPEG_VIDEO_VP8_PROFILE: + case V4L2_CID_MPEG_VIDEO_VP9_PROFILE: + case V4L2_CID_MPEG_VIDEO_VP9_LEVEL: + case V4L2_CID_DETECT_MD_MODE: + case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE: + case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL: + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE: + case V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_TYPE: + case V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD: + case V4L2_CID_MPEG_VIDEO_HEVC_TIER: + case V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE: + case V4L2_CID_MPEG_VIDEO_AV1_PROFILE: + case V4L2_CID_MPEG_VIDEO_AV1_LEVEL: + case V4L2_CID_STATELESS_HEVC_DECODE_MODE: + case V4L2_CID_STATELESS_HEVC_START_CODE: + case V4L2_CID_STATELESS_H264_DECODE_MODE: + case V4L2_CID_STATELESS_H264_START_CODE: + case V4L2_CID_CAMERA_ORIENTATION: + case V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE: + case V4L2_CID_HDR_SENSOR_MODE: + *type = V4L2_CTRL_TYPE_MENU; + break; + case V4L2_CID_LINK_FREQ: + *type = V4L2_CTRL_TYPE_INTEGER_MENU; + break; + case V4L2_CID_RDS_TX_PS_NAME: + case V4L2_CID_RDS_TX_RADIO_TEXT: + case V4L2_CID_RDS_RX_PS_NAME: + case V4L2_CID_RDS_RX_RADIO_TEXT: + *type = V4L2_CTRL_TYPE_STRING; + break; + case V4L2_CID_ISO_SENSITIVITY: + case V4L2_CID_AUTO_EXPOSURE_BIAS: + case V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS: + case V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES: + *type = V4L2_CTRL_TYPE_INTEGER_MENU; + break; + case V4L2_CID_USER_CLASS: + case V4L2_CID_CAMERA_CLASS: + case V4L2_CID_CODEC_CLASS: + case V4L2_CID_FM_TX_CLASS: + case V4L2_CID_FLASH_CLASS: + case V4L2_CID_JPEG_CLASS: + case V4L2_CID_IMAGE_SOURCE_CLASS: + case V4L2_CID_IMAGE_PROC_CLASS: + case V4L2_CID_DV_CLASS: + case V4L2_CID_FM_RX_CLASS: + case V4L2_CID_RF_TUNER_CLASS: + case V4L2_CID_DETECT_CLASS: + case V4L2_CID_CODEC_STATELESS_CLASS: + case V4L2_CID_COLORIMETRY_CLASS: + *type = V4L2_CTRL_TYPE_CTRL_CLASS; + /* You can neither read nor write these */ + *flags |= V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_WRITE_ONLY; + *min = *max = *step = *def = 0; + break; + case V4L2_CID_BG_COLOR: + case V4L2_CID_COLORFX_RGB: + *type = V4L2_CTRL_TYPE_INTEGER; + *step = 1; + *min = 0; + /* Max is calculated as RGB888 that is 2^24 - 1 */ + *max = 0xffffff; + break; + case V4L2_CID_COLORFX_CBCR: + *type = V4L2_CTRL_TYPE_INTEGER; + *step = 1; + *min = 0; + *max = 0xffff; + break; + case V4L2_CID_FLASH_FAULT: + case V4L2_CID_JPEG_ACTIVE_MARKER: + case V4L2_CID_3A_LOCK: + case V4L2_CID_AUTO_FOCUS_STATUS: + case V4L2_CID_DV_TX_HOTPLUG: + case V4L2_CID_DV_TX_RXSENSE: + case V4L2_CID_DV_TX_EDID_PRESENT: + case V4L2_CID_DV_RX_POWER_PRESENT: + *type = V4L2_CTRL_TYPE_BITMASK; + break; + case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE: + case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT: + *type = V4L2_CTRL_TYPE_INTEGER; + *flags |= V4L2_CTRL_FLAG_READ_ONLY; + break; + case V4L2_CID_MPEG_VIDEO_DEC_PTS: + *type = V4L2_CTRL_TYPE_INTEGER64; + *flags |= V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY; + *min = *def = 0; + *max = 0x1ffffffffLL; + *step = 1; + break; + case V4L2_CID_MPEG_VIDEO_DEC_FRAME: + *type = V4L2_CTRL_TYPE_INTEGER64; + *flags |= V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY; + *min = *def = 0; + *max = 0x7fffffffffffffffLL; + *step = 1; + break; + case V4L2_CID_MPEG_VIDEO_DEC_CONCEAL_COLOR: + *type = V4L2_CTRL_TYPE_INTEGER64; + *min = 0; + /* default for 8 bit black, luma is 16, chroma is 128 */ + *def = 0x8000800010LL; + *max = 0xffffffffffffLL; + *step = 1; + break; + case V4L2_CID_MPEG_VIDEO_AVERAGE_QP: + *type = V4L2_CTRL_TYPE_INTEGER; + *flags |= V4L2_CTRL_FLAG_READ_ONLY; + break; + case V4L2_CID_PIXEL_RATE: + *type = V4L2_CTRL_TYPE_INTEGER64; + *flags |= V4L2_CTRL_FLAG_READ_ONLY; + break; + case V4L2_CID_DETECT_MD_REGION_GRID: + *type = V4L2_CTRL_TYPE_U8; + break; + case V4L2_CID_DETECT_MD_THRESHOLD_GRID: + *type = V4L2_CTRL_TYPE_U16; + break; + case V4L2_CID_RDS_TX_ALT_FREQS: + *type = V4L2_CTRL_TYPE_U32; + break; + case V4L2_CID_STATELESS_MPEG2_SEQUENCE: + *type = V4L2_CTRL_TYPE_MPEG2_SEQUENCE; + break; + case V4L2_CID_STATELESS_MPEG2_PICTURE: + *type = V4L2_CTRL_TYPE_MPEG2_PICTURE; + break; + case V4L2_CID_STATELESS_MPEG2_QUANTISATION: + *type = V4L2_CTRL_TYPE_MPEG2_QUANTISATION; + break; + case V4L2_CID_STATELESS_FWHT_PARAMS: + *type = V4L2_CTRL_TYPE_FWHT_PARAMS; + break; + case V4L2_CID_STATELESS_H264_SPS: + *type = V4L2_CTRL_TYPE_H264_SPS; + break; + case V4L2_CID_STATELESS_H264_PPS: + *type = V4L2_CTRL_TYPE_H264_PPS; + break; + case V4L2_CID_STATELESS_H264_SCALING_MATRIX: + *type = V4L2_CTRL_TYPE_H264_SCALING_MATRIX; + break; + case V4L2_CID_STATELESS_H264_SLICE_PARAMS: + *type = V4L2_CTRL_TYPE_H264_SLICE_PARAMS; + break; + case V4L2_CID_STATELESS_H264_DECODE_PARAMS: + *type = V4L2_CTRL_TYPE_H264_DECODE_PARAMS; + break; + case V4L2_CID_STATELESS_H264_PRED_WEIGHTS: + *type = V4L2_CTRL_TYPE_H264_PRED_WEIGHTS; + break; + case V4L2_CID_STATELESS_VP8_FRAME: + *type = V4L2_CTRL_TYPE_VP8_FRAME; + break; + case V4L2_CID_STATELESS_HEVC_SPS: + *type = V4L2_CTRL_TYPE_HEVC_SPS; + break; + case V4L2_CID_STATELESS_HEVC_PPS: + *type = V4L2_CTRL_TYPE_HEVC_PPS; + break; + case V4L2_CID_STATELESS_HEVC_SLICE_PARAMS: + *type = V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS; + *flags |= V4L2_CTRL_FLAG_DYNAMIC_ARRAY; + break; + case V4L2_CID_STATELESS_HEVC_SCALING_MATRIX: + *type = V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX; + break; + case V4L2_CID_STATELESS_HEVC_DECODE_PARAMS: + *type = V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS; + break; + case V4L2_CID_STATELESS_HEVC_ENTRY_POINT_OFFSETS: + *type = V4L2_CTRL_TYPE_U32; + *flags |= V4L2_CTRL_FLAG_DYNAMIC_ARRAY; + break; + case V4L2_CID_STATELESS_VP9_COMPRESSED_HDR: + *type = V4L2_CTRL_TYPE_VP9_COMPRESSED_HDR; + break; + case V4L2_CID_STATELESS_VP9_FRAME: + *type = V4L2_CTRL_TYPE_VP9_FRAME; + break; + case V4L2_CID_STATELESS_AV1_SEQUENCE: + *type = V4L2_CTRL_TYPE_AV1_SEQUENCE; + break; + case V4L2_CID_STATELESS_AV1_TILE_GROUP_ENTRY: + *type = V4L2_CTRL_TYPE_AV1_TILE_GROUP_ENTRY; + *flags |= V4L2_CTRL_FLAG_DYNAMIC_ARRAY; + break; + case V4L2_CID_STATELESS_AV1_FRAME: + *type = V4L2_CTRL_TYPE_AV1_FRAME; + break; + case V4L2_CID_STATELESS_AV1_FILM_GRAIN: + *type = V4L2_CTRL_TYPE_AV1_FILM_GRAIN; + break; + case V4L2_CID_UNIT_CELL_SIZE: + *type = V4L2_CTRL_TYPE_AREA; + *flags |= V4L2_CTRL_FLAG_READ_ONLY; + break; + case V4L2_CID_COLORIMETRY_HDR10_CLL_INFO: + *type = V4L2_CTRL_TYPE_HDR10_CLL_INFO; + break; + case V4L2_CID_COLORIMETRY_HDR10_MASTERING_DISPLAY: + *type = V4L2_CTRL_TYPE_HDR10_MASTERING_DISPLAY; + break; + default: + *type = V4L2_CTRL_TYPE_INTEGER; + break; + } + switch (id) { + case V4L2_CID_MPEG_AUDIO_ENCODING: + case V4L2_CID_MPEG_AUDIO_MODE: + case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: + case V4L2_CID_MPEG_VIDEO_B_FRAMES: + case V4L2_CID_MPEG_STREAM_TYPE: + *flags |= V4L2_CTRL_FLAG_UPDATE; + break; + case V4L2_CID_AUDIO_VOLUME: + case V4L2_CID_AUDIO_BALANCE: + case V4L2_CID_AUDIO_BASS: + case V4L2_CID_AUDIO_TREBLE: + case V4L2_CID_BRIGHTNESS: + case V4L2_CID_CONTRAST: + case V4L2_CID_SATURATION: + case V4L2_CID_HUE: + case V4L2_CID_RED_BALANCE: + case V4L2_CID_BLUE_BALANCE: + case V4L2_CID_GAMMA: + case V4L2_CID_SHARPNESS: + case V4L2_CID_CHROMA_GAIN: + case V4L2_CID_RDS_TX_DEVIATION: + case V4L2_CID_AUDIO_LIMITER_RELEASE_TIME: + case V4L2_CID_AUDIO_LIMITER_DEVIATION: + case V4L2_CID_AUDIO_COMPRESSION_GAIN: + case V4L2_CID_AUDIO_COMPRESSION_THRESHOLD: + case V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME: + case V4L2_CID_AUDIO_COMPRESSION_RELEASE_TIME: + case V4L2_CID_PILOT_TONE_DEVIATION: + case V4L2_CID_PILOT_TONE_FREQUENCY: + case V4L2_CID_TUNE_POWER_LEVEL: + case V4L2_CID_TUNE_ANTENNA_CAPACITOR: + case V4L2_CID_RF_TUNER_RF_GAIN: + case V4L2_CID_RF_TUNER_LNA_GAIN: + case V4L2_CID_RF_TUNER_MIXER_GAIN: + case V4L2_CID_RF_TUNER_IF_GAIN: + case V4L2_CID_RF_TUNER_BANDWIDTH: + case V4L2_CID_DETECT_MD_GLOBAL_THRESHOLD: + *flags |= V4L2_CTRL_FLAG_SLIDER; + break; + case V4L2_CID_PAN_RELATIVE: + case V4L2_CID_TILT_RELATIVE: + case V4L2_CID_FOCUS_RELATIVE: + case V4L2_CID_IRIS_RELATIVE: + case V4L2_CID_ZOOM_RELATIVE: + *flags |= V4L2_CTRL_FLAG_WRITE_ONLY | + V4L2_CTRL_FLAG_EXECUTE_ON_WRITE; + break; + case V4L2_CID_FLASH_STROBE_STATUS: + case V4L2_CID_AUTO_FOCUS_STATUS: + case V4L2_CID_FLASH_READY: + case V4L2_CID_DV_TX_HOTPLUG: + case V4L2_CID_DV_TX_RXSENSE: + case V4L2_CID_DV_TX_EDID_PRESENT: + case V4L2_CID_DV_RX_POWER_PRESENT: + case V4L2_CID_DV_RX_IT_CONTENT_TYPE: + case V4L2_CID_RDS_RX_PTY: + case V4L2_CID_RDS_RX_PS_NAME: + case V4L2_CID_RDS_RX_RADIO_TEXT: + case V4L2_CID_RDS_RX_TRAFFIC_ANNOUNCEMENT: + case V4L2_CID_RDS_RX_TRAFFIC_PROGRAM: + case V4L2_CID_RDS_RX_MUSIC_SPEECH: + case V4L2_CID_CAMERA_ORIENTATION: + case V4L2_CID_CAMERA_SENSOR_ROTATION: + *flags |= V4L2_CTRL_FLAG_READ_ONLY; + break; + case V4L2_CID_RF_TUNER_PLL_LOCK: + *flags |= V4L2_CTRL_FLAG_VOLATILE; + break; + } +} +EXPORT_SYMBOL(v4l2_ctrl_fill); diff --git a/6.12.0/drivers/media/v4l2-core/v4l2-ctrls-priv.h b/6.12.0/drivers/media/v4l2-core/v4l2-ctrls-priv.h new file mode 100644 index 0000000..aba6176 --- /dev/null +++ b/6.12.0/drivers/media/v4l2-core/v4l2-ctrls-priv.h @@ -0,0 +1,95 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * V4L2 controls framework private header. + * + * Copyright (C) 2010-2021 Hans Verkuil + */ + +#ifndef _V4L2_CTRLS_PRIV_H_ +#define _V4L2_CTRLS_PRIV_H_ + +#define dprintk(vdev, fmt, arg...) do { \ + if (!WARN_ON(!(vdev)) && ((vdev)->dev_debug & V4L2_DEV_DEBUG_CTRL)) \ + printk(KERN_DEBUG pr_fmt("%s: %s: " fmt), \ + __func__, video_device_node_name(vdev), ##arg); \ +} while (0) + +#define has_op(master, op) \ + ((master)->ops && (master)->ops->op) +#define call_op(master, op) \ + (has_op(master, op) ? (master)->ops->op(master) : 0) + +static inline u32 node2id(struct list_head *node) +{ + return list_entry(node, struct v4l2_ctrl_ref, node)->ctrl->id; +} + +/* + * Small helper function to determine if the autocluster is set to manual + * mode. + */ +static inline bool is_cur_manual(const struct v4l2_ctrl *master) +{ + return master->is_auto && master->cur.val == master->manual_mode_value; +} + +/* + * Small helper function to determine if the autocluster will be set to manual + * mode. + */ +static inline bool is_new_manual(const struct v4l2_ctrl *master) +{ + return master->is_auto && master->val == master->manual_mode_value; +} + +static inline u32 user_flags(const struct v4l2_ctrl *ctrl) +{ + u32 flags = ctrl->flags; + + if (ctrl->is_ptr) + flags |= V4L2_CTRL_FLAG_HAS_PAYLOAD; + + return flags; +} + +/* v4l2-ctrls-core.c */ +void cur_to_new(struct v4l2_ctrl *ctrl); +void cur_to_req(struct v4l2_ctrl_ref *ref); +void new_to_cur(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 ch_flags); +void new_to_req(struct v4l2_ctrl_ref *ref); +int req_to_new(struct v4l2_ctrl_ref *ref); +void send_initial_event(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl); +void send_event(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 changes); +int handler_new_ref(struct v4l2_ctrl_handler *hdl, + struct v4l2_ctrl *ctrl, + struct v4l2_ctrl_ref **ctrl_ref, + bool from_other_dev, bool allocate_req); +struct v4l2_ctrl_ref *find_ref(struct v4l2_ctrl_handler *hdl, u32 id); +struct v4l2_ctrl_ref *find_ref_lock(struct v4l2_ctrl_handler *hdl, u32 id); +int check_range(enum v4l2_ctrl_type type, + s64 min, s64 max, u64 step, s64 def); +void update_from_auto_cluster(struct v4l2_ctrl *master); +int try_or_set_cluster(struct v4l2_fh *fh, struct v4l2_ctrl *master, + bool set, u32 ch_flags); + +/* v4l2-ctrls-api.c */ +int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl, + struct v4l2_ext_controls *cs, + struct video_device *vdev); +int try_set_ext_ctrls_common(struct v4l2_fh *fh, + struct v4l2_ctrl_handler *hdl, + struct v4l2_ext_controls *cs, + struct video_device *vdev, bool set); + +/* v4l2-ctrls-request.c */ +void v4l2_ctrl_handler_init_request(struct v4l2_ctrl_handler *hdl); +void v4l2_ctrl_handler_free_request(struct v4l2_ctrl_handler *hdl); +int v4l2_g_ext_ctrls_request(struct v4l2_ctrl_handler *hdl, struct video_device *vdev, + struct media_device *mdev, struct v4l2_ext_controls *cs); +int try_set_ext_ctrls_request(struct v4l2_fh *fh, + struct v4l2_ctrl_handler *hdl, + struct video_device *vdev, + struct media_device *mdev, + struct v4l2_ext_controls *cs, bool set); + +#endif diff --git a/6.12.0/drivers/media/v4l2-core/v4l2-ctrls-request.c b/6.12.0/drivers/media/v4l2-core/v4l2-ctrls-request.c new file mode 100644 index 0000000..c637049 --- /dev/null +++ b/6.12.0/drivers/media/v4l2-core/v4l2-ctrls-request.c @@ -0,0 +1,501 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * V4L2 controls framework Request API implementation. + * + * Copyright (C) 2018-2021 Hans Verkuil + */ + +#define pr_fmt(fmt) "v4l2-ctrls: " fmt + +#include +#include +#include +#include +#include + +#include "v4l2-ctrls-priv.h" + +/* Initialize the request-related fields in a control handler */ +void v4l2_ctrl_handler_init_request(struct v4l2_ctrl_handler *hdl) +{ + INIT_LIST_HEAD(&hdl->requests); + INIT_LIST_HEAD(&hdl->requests_queued); + hdl->request_is_queued = false; + media_request_object_init(&hdl->req_obj); +} + +/* Free the request-related fields in a control handler */ +void v4l2_ctrl_handler_free_request(struct v4l2_ctrl_handler *hdl) +{ + struct v4l2_ctrl_handler *req, *next_req; + + /* + * Do nothing if this isn't the main handler or the main + * handler is not used in any request. + * + * The main handler can be identified by having a NULL ops pointer in + * the request object. + */ + if (hdl->req_obj.ops || list_empty(&hdl->requests)) + return; + + /* + * If the main handler is freed and it is used by handler objects in + * outstanding requests, then unbind and put those objects before + * freeing the main handler. + */ + list_for_each_entry_safe(req, next_req, &hdl->requests, requests) { + media_request_object_unbind(&req->req_obj); + media_request_object_put(&req->req_obj); + } +} + +static int v4l2_ctrl_request_clone(struct v4l2_ctrl_handler *hdl, + const struct v4l2_ctrl_handler *from) +{ + struct v4l2_ctrl_ref *ref; + int err = 0; + + if (WARN_ON(!hdl || hdl == from)) + return -EINVAL; + + if (hdl->error) + return hdl->error; + + WARN_ON(hdl->lock != &hdl->_lock); + + mutex_lock(from->lock); + list_for_each_entry(ref, &from->ctrl_refs, node) { + struct v4l2_ctrl *ctrl = ref->ctrl; + struct v4l2_ctrl_ref *new_ref; + + /* Skip refs inherited from other devices */ + if (ref->from_other_dev) + continue; + err = handler_new_ref(hdl, ctrl, &new_ref, false, true); + if (err) + break; + } + mutex_unlock(from->lock); + return err; +} + +static void v4l2_ctrl_request_queue(struct media_request_object *obj) +{ + struct v4l2_ctrl_handler *hdl = + container_of(obj, struct v4l2_ctrl_handler, req_obj); + struct v4l2_ctrl_handler *main_hdl = obj->priv; + + mutex_lock(main_hdl->lock); + list_add_tail(&hdl->requests_queued, &main_hdl->requests_queued); + hdl->request_is_queued = true; + mutex_unlock(main_hdl->lock); +} + +static void v4l2_ctrl_request_unbind(struct media_request_object *obj) +{ + struct v4l2_ctrl_handler *hdl = + container_of(obj, struct v4l2_ctrl_handler, req_obj); + struct v4l2_ctrl_handler *main_hdl = obj->priv; + + mutex_lock(main_hdl->lock); + list_del_init(&hdl->requests); + if (hdl->request_is_queued) { + list_del_init(&hdl->requests_queued); + hdl->request_is_queued = false; + } + mutex_unlock(main_hdl->lock); +} + +static void v4l2_ctrl_request_release(struct media_request_object *obj) +{ + struct v4l2_ctrl_handler *hdl = + container_of(obj, struct v4l2_ctrl_handler, req_obj); + + v4l2_ctrl_handler_free(hdl); + kfree(hdl); +} + +static const struct media_request_object_ops req_ops = { + .queue = v4l2_ctrl_request_queue, + .unbind = v4l2_ctrl_request_unbind, + .release = v4l2_ctrl_request_release, +}; + +struct v4l2_ctrl_handler *v4l2_ctrl_request_hdl_find(struct media_request *req, + struct v4l2_ctrl_handler *parent) +{ + struct media_request_object *obj; + + if (WARN_ON(req->state != MEDIA_REQUEST_STATE_VALIDATING && + req->state != MEDIA_REQUEST_STATE_QUEUED)) + return NULL; + + obj = media_request_object_find(req, &req_ops, parent); + if (obj) + return container_of(obj, struct v4l2_ctrl_handler, req_obj); + return NULL; +} +EXPORT_SYMBOL_GPL(v4l2_ctrl_request_hdl_find); + +struct v4l2_ctrl * +v4l2_ctrl_request_hdl_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id) +{ + struct v4l2_ctrl_ref *ref = find_ref_lock(hdl, id); + + return (ref && ref->p_req_valid) ? ref->ctrl : NULL; +} +EXPORT_SYMBOL_GPL(v4l2_ctrl_request_hdl_ctrl_find); + +static int v4l2_ctrl_request_bind(struct media_request *req, + struct v4l2_ctrl_handler *hdl, + struct v4l2_ctrl_handler *from) +{ + int ret; + + ret = v4l2_ctrl_request_clone(hdl, from); + + if (!ret) { + ret = media_request_object_bind(req, &req_ops, + from, false, &hdl->req_obj); + if (!ret) { + mutex_lock(from->lock); + list_add_tail(&hdl->requests, &from->requests); + mutex_unlock(from->lock); + } + } + return ret; +} + +static struct media_request_object * +v4l2_ctrls_find_req_obj(struct v4l2_ctrl_handler *hdl, + struct media_request *req, bool set) +{ + struct media_request_object *obj; + struct v4l2_ctrl_handler *new_hdl; + int ret; + + if (IS_ERR(req)) + return ERR_CAST(req); + + if (set && WARN_ON(req->state != MEDIA_REQUEST_STATE_UPDATING)) + return ERR_PTR(-EBUSY); + + obj = media_request_object_find(req, &req_ops, hdl); + if (obj) + return obj; + /* + * If there are no controls in this completed request, + * then that can only happen if: + * + * 1) no controls were present in the queued request, and + * 2) v4l2_ctrl_request_complete() could not allocate a + * control handler object to store the completed state in. + * + * So return ENOMEM to indicate that there was an out-of-memory + * error. + */ + if (!set) + return ERR_PTR(-ENOMEM); + + new_hdl = kzalloc(sizeof(*new_hdl), GFP_KERNEL); + if (!new_hdl) + return ERR_PTR(-ENOMEM); + + obj = &new_hdl->req_obj; + ret = v4l2_ctrl_handler_init(new_hdl, (hdl->nr_of_buckets - 1) * 8); + if (!ret) + ret = v4l2_ctrl_request_bind(req, new_hdl, hdl); + if (ret) { + v4l2_ctrl_handler_free(new_hdl); + kfree(new_hdl); + return ERR_PTR(ret); + } + + media_request_object_get(obj); + return obj; +} + +int v4l2_g_ext_ctrls_request(struct v4l2_ctrl_handler *hdl, struct video_device *vdev, + struct media_device *mdev, struct v4l2_ext_controls *cs) +{ + struct media_request_object *obj = NULL; + struct media_request *req = NULL; + int ret; + + if (!mdev || cs->request_fd < 0) + return -EINVAL; + + req = media_request_get_by_fd(mdev, cs->request_fd); + if (IS_ERR(req)) + return PTR_ERR(req); + + if (req->state != MEDIA_REQUEST_STATE_COMPLETE) { + media_request_put(req); + return -EACCES; + } + + ret = media_request_lock_for_access(req); + if (ret) { + media_request_put(req); + return ret; + } + + obj = v4l2_ctrls_find_req_obj(hdl, req, false); + if (IS_ERR(obj)) { + media_request_unlock_for_access(req); + media_request_put(req); + return PTR_ERR(obj); + } + + hdl = container_of(obj, struct v4l2_ctrl_handler, + req_obj); + ret = v4l2_g_ext_ctrls_common(hdl, cs, vdev); + + media_request_unlock_for_access(req); + media_request_object_put(obj); + media_request_put(req); + return ret; +} + +int try_set_ext_ctrls_request(struct v4l2_fh *fh, + struct v4l2_ctrl_handler *hdl, + struct video_device *vdev, + struct media_device *mdev, + struct v4l2_ext_controls *cs, bool set) +{ + struct media_request_object *obj = NULL; + struct media_request *req = NULL; + int ret; + + if (!mdev) { + dprintk(vdev, "%s: missing media device\n", + video_device_node_name(vdev)); + return -EINVAL; + } + + if (cs->request_fd < 0) { + dprintk(vdev, "%s: invalid request fd %d\n", + video_device_node_name(vdev), cs->request_fd); + return -EINVAL; + } + + req = media_request_get_by_fd(mdev, cs->request_fd); + if (IS_ERR(req)) { + dprintk(vdev, "%s: cannot find request fd %d\n", + video_device_node_name(vdev), cs->request_fd); + return PTR_ERR(req); + } + + ret = media_request_lock_for_update(req); + if (ret) { + dprintk(vdev, "%s: cannot lock request fd %d\n", + video_device_node_name(vdev), cs->request_fd); + media_request_put(req); + return ret; + } + + obj = v4l2_ctrls_find_req_obj(hdl, req, set); + if (IS_ERR(obj)) { + dprintk(vdev, + "%s: cannot find request object for request fd %d\n", + video_device_node_name(vdev), + cs->request_fd); + media_request_unlock_for_update(req); + media_request_put(req); + return PTR_ERR(obj); + } + + hdl = container_of(obj, struct v4l2_ctrl_handler, + req_obj); + ret = try_set_ext_ctrls_common(fh, hdl, cs, vdev, set); + if (ret) + dprintk(vdev, + "%s: try_set_ext_ctrls_common failed (%d)\n", + video_device_node_name(vdev), ret); + + media_request_unlock_for_update(req); + media_request_object_put(obj); + media_request_put(req); + + return ret; +} + +void v4l2_ctrl_request_complete(struct media_request *req, + struct v4l2_ctrl_handler *main_hdl) +{ + struct media_request_object *obj; + struct v4l2_ctrl_handler *hdl; + struct v4l2_ctrl_ref *ref; + + if (!req || !main_hdl) + return; + + /* + * Note that it is valid if nothing was found. It means + * that this request doesn't have any controls and so just + * wants to leave the controls unchanged. + */ + obj = media_request_object_find(req, &req_ops, main_hdl); + if (!obj) { + int ret; + + /* Create a new request so the driver can return controls */ + hdl = kzalloc(sizeof(*hdl), GFP_KERNEL); + if (!hdl) + return; + + ret = v4l2_ctrl_handler_init(hdl, (main_hdl->nr_of_buckets - 1) * 8); + if (!ret) + ret = v4l2_ctrl_request_bind(req, hdl, main_hdl); + if (ret) { + v4l2_ctrl_handler_free(hdl); + kfree(hdl); + return; + } + hdl->request_is_queued = true; + obj = media_request_object_find(req, &req_ops, main_hdl); + } + hdl = container_of(obj, struct v4l2_ctrl_handler, req_obj); + + list_for_each_entry(ref, &hdl->ctrl_refs, node) { + struct v4l2_ctrl *ctrl = ref->ctrl; + struct v4l2_ctrl *master = ctrl->cluster[0]; + unsigned int i; + + if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) { + v4l2_ctrl_lock(master); + /* g_volatile_ctrl will update the current control values */ + for (i = 0; i < master->ncontrols; i++) + cur_to_new(master->cluster[i]); + call_op(master, g_volatile_ctrl); + new_to_req(ref); + v4l2_ctrl_unlock(master); + continue; + } + if (ref->p_req_valid) + continue; + + /* Copy the current control value into the request */ + v4l2_ctrl_lock(ctrl); + cur_to_req(ref); + v4l2_ctrl_unlock(ctrl); + } + + mutex_lock(main_hdl->lock); + WARN_ON(!hdl->request_is_queued); + list_del_init(&hdl->requests_queued); + hdl->request_is_queued = false; + mutex_unlock(main_hdl->lock); + media_request_object_complete(obj); + media_request_object_put(obj); +} +EXPORT_SYMBOL(v4l2_ctrl_request_complete); + +int v4l2_ctrl_request_setup(struct media_request *req, + struct v4l2_ctrl_handler *main_hdl) +{ + struct media_request_object *obj; + struct v4l2_ctrl_handler *hdl; + struct v4l2_ctrl_ref *ref; + int ret = 0; + + if (!req || !main_hdl) + return 0; + + if (WARN_ON(req->state != MEDIA_REQUEST_STATE_QUEUED)) + return -EBUSY; + + /* + * Note that it is valid if nothing was found. It means + * that this request doesn't have any controls and so just + * wants to leave the controls unchanged. + */ + obj = media_request_object_find(req, &req_ops, main_hdl); + if (!obj) + return 0; + if (obj->completed) { + media_request_object_put(obj); + return -EBUSY; + } + hdl = container_of(obj, struct v4l2_ctrl_handler, req_obj); + + list_for_each_entry(ref, &hdl->ctrl_refs, node) + ref->req_done = false; + + list_for_each_entry(ref, &hdl->ctrl_refs, node) { + struct v4l2_ctrl *ctrl = ref->ctrl; + struct v4l2_ctrl *master = ctrl->cluster[0]; + bool have_new_data = false; + int i; + + /* + * Skip if this control was already handled by a cluster. + * Skip button controls and read-only controls. + */ + if (ref->req_done || (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY)) + continue; + + v4l2_ctrl_lock(master); + for (i = 0; i < master->ncontrols; i++) { + if (master->cluster[i]) { + struct v4l2_ctrl_ref *r = + find_ref(hdl, master->cluster[i]->id); + + if (r->p_req_valid) { + have_new_data = true; + break; + } + } + } + if (!have_new_data) { + v4l2_ctrl_unlock(master); + continue; + } + + for (i = 0; i < master->ncontrols; i++) { + if (master->cluster[i]) { + struct v4l2_ctrl_ref *r = + find_ref(hdl, master->cluster[i]->id); + + ret = req_to_new(r); + if (ret) { + v4l2_ctrl_unlock(master); + goto error; + } + master->cluster[i]->is_new = 1; + r->req_done = true; + } + } + /* + * For volatile autoclusters that are currently in auto mode + * we need to discover if it will be set to manual mode. + * If so, then we have to copy the current volatile values + * first since those will become the new manual values (which + * may be overwritten by explicit new values from this set + * of controls). + */ + if (master->is_auto && master->has_volatiles && + !is_cur_manual(master)) { + s32 new_auto_val = *master->p_new.p_s32; + + /* + * If the new value == the manual value, then copy + * the current volatile values. + */ + if (new_auto_val == master->manual_mode_value) + update_from_auto_cluster(master); + } + + ret = try_or_set_cluster(NULL, master, true, 0); + v4l2_ctrl_unlock(master); + + if (ret) + break; + } + +error: + media_request_object_put(obj); + return ret; +} +EXPORT_SYMBOL(v4l2_ctrl_request_setup); diff --git a/6.12.0/drivers/media/v4l2-core/v4l2-dev.c b/6.12.0/drivers/media/v4l2-core/v4l2-dev.c new file mode 100644 index 0000000..3d7711c --- /dev/null +++ b/6.12.0/drivers/media/v4l2-core/v4l2-dev.c @@ -0,0 +1,1233 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Video capture interface for Linux version 2 + * + * A generic video device interface for the LINUX operating system + * using a set of device structures/vectors for low level operations. + * + * Authors: Alan Cox, (version 1) + * Mauro Carvalho Chehab (version 2) + * + * Fixes: 20000516 Claudio Matsuoka + * - Added procfs support + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#define VIDEO_NUM_DEVICES 256 +#define VIDEO_NAME "video4linux" + +#define dprintk(fmt, arg...) do { \ + printk(KERN_DEBUG pr_fmt("%s: " fmt), \ + __func__, ##arg); \ +} while (0) + +/* + * sysfs stuff + */ + +static ssize_t index_show(struct device *cd, + struct device_attribute *attr, char *buf) +{ + struct video_device *vdev = to_video_device(cd); + + return sprintf(buf, "%i\n", vdev->index); +} +static DEVICE_ATTR_RO(index); + +static ssize_t dev_debug_show(struct device *cd, + struct device_attribute *attr, char *buf) +{ + struct video_device *vdev = to_video_device(cd); + + return sprintf(buf, "%i\n", vdev->dev_debug); +} + +static ssize_t dev_debug_store(struct device *cd, struct device_attribute *attr, + const char *buf, size_t len) +{ + struct video_device *vdev = to_video_device(cd); + int res = 0; + u16 value; + + res = kstrtou16(buf, 0, &value); + if (res) + return res; + + vdev->dev_debug = value; + return len; +} +static DEVICE_ATTR_RW(dev_debug); + +static ssize_t name_show(struct device *cd, + struct device_attribute *attr, char *buf) +{ + struct video_device *vdev = to_video_device(cd); + + return sprintf(buf, "%.*s\n", (int)sizeof(vdev->name), vdev->name); +} +static DEVICE_ATTR_RO(name); + +static struct attribute *video_device_attrs[] = { + &dev_attr_name.attr, + &dev_attr_dev_debug.attr, + &dev_attr_index.attr, + NULL, +}; +ATTRIBUTE_GROUPS(video_device); + +/* + * Active devices + */ +static struct video_device *video_devices[VIDEO_NUM_DEVICES]; +static DEFINE_MUTEX(videodev_lock); +static DECLARE_BITMAP(devnode_nums[VFL_TYPE_MAX], VIDEO_NUM_DEVICES); + +/* Device node utility functions */ + +/* Note: these utility functions all assume that vfl_type is in the range + [0, VFL_TYPE_MAX-1]. */ + +#ifdef CONFIG_VIDEO_FIXED_MINOR_RANGES +/* Return the bitmap corresponding to vfl_type. */ +static inline unsigned long *devnode_bits(enum vfl_devnode_type vfl_type) +{ + /* Any types not assigned to fixed minor ranges must be mapped to + one single bitmap for the purposes of finding a free node number + since all those unassigned types use the same minor range. */ + int idx = (vfl_type > VFL_TYPE_RADIO) ? VFL_TYPE_MAX - 1 : vfl_type; + + return devnode_nums[idx]; +} +#else +/* Return the bitmap corresponding to vfl_type. */ +static inline unsigned long *devnode_bits(enum vfl_devnode_type vfl_type) +{ + return devnode_nums[vfl_type]; +} +#endif + +/* Mark device node number vdev->num as used */ +static inline void devnode_set(struct video_device *vdev) +{ + set_bit(vdev->num, devnode_bits(vdev->vfl_type)); +} + +/* Mark device node number vdev->num as unused */ +static inline void devnode_clear(struct video_device *vdev) +{ + clear_bit(vdev->num, devnode_bits(vdev->vfl_type)); +} + +/* Try to find a free device node number in the range [from, to> */ +static inline int devnode_find(struct video_device *vdev, int from, int to) +{ + return find_next_zero_bit(devnode_bits(vdev->vfl_type), to, from); +} + +struct video_device *video_device_alloc(void) +{ + return kzalloc(sizeof(struct video_device), GFP_KERNEL); +} +EXPORT_SYMBOL(video_device_alloc); + +void video_device_release(struct video_device *vdev) +{ + kfree(vdev); +} +EXPORT_SYMBOL(video_device_release); + +void video_device_release_empty(struct video_device *vdev) +{ + /* Do nothing */ + /* Only valid when the video_device struct is a static. */ +} +EXPORT_SYMBOL(video_device_release_empty); + +static inline void video_get(struct video_device *vdev) +{ + get_device(&vdev->dev); +} + +static inline void video_put(struct video_device *vdev) +{ + put_device(&vdev->dev); +} + +/* Called when the last user of the video device exits. */ +static void v4l2_device_release(struct device *cd) +{ + struct video_device *vdev = to_video_device(cd); + struct v4l2_device *v4l2_dev = vdev->v4l2_dev; + + mutex_lock(&videodev_lock); + if (WARN_ON(video_devices[vdev->minor] != vdev)) { + /* should not happen */ + mutex_unlock(&videodev_lock); + return; + } + + /* Free up this device for reuse */ + video_devices[vdev->minor] = NULL; + + /* Delete the cdev on this minor as well */ + cdev_del(vdev->cdev); + /* Just in case some driver tries to access this from + the release() callback. */ + vdev->cdev = NULL; + + /* Mark device node number as free */ + devnode_clear(vdev); + + mutex_unlock(&videodev_lock); + +#if defined(CONFIG_MEDIA_CONTROLLER) + if (v4l2_dev->mdev && vdev->vfl_dir != VFL_DIR_M2M) { + /* Remove interfaces and interface links */ + media_devnode_remove(vdev->intf_devnode); + if (vdev->entity.function != MEDIA_ENT_F_UNKNOWN) + media_device_unregister_entity(&vdev->entity); + } +#endif + + /* Do not call v4l2_device_put if there is no release callback set. + * Drivers that have no v4l2_device release callback might free the + * v4l2_dev instance in the video_device release callback below, so we + * must perform this check here. + * + * TODO: In the long run all drivers that use v4l2_device should use the + * v4l2_device release callback. This check will then be unnecessary. + */ + if (v4l2_dev->release == NULL) + v4l2_dev = NULL; + + /* Release video_device and perform other + cleanups as needed. */ + vdev->release(vdev); + + /* Decrease v4l2_device refcount */ + if (v4l2_dev) + v4l2_device_put(v4l2_dev); +} + +static struct class video_class = { + .name = VIDEO_NAME, + .dev_groups = video_device_groups, +}; + +struct video_device *video_devdata(struct file *file) +{ + return video_devices[iminor(file_inode(file))]; +} +EXPORT_SYMBOL(video_devdata); + + +/* Priority handling */ + +static inline bool prio_is_valid(enum v4l2_priority prio) +{ + return prio == V4L2_PRIORITY_BACKGROUND || + prio == V4L2_PRIORITY_INTERACTIVE || + prio == V4L2_PRIORITY_RECORD; +} + +void v4l2_prio_init(struct v4l2_prio_state *global) +{ + memset(global, 0, sizeof(*global)); +} +EXPORT_SYMBOL(v4l2_prio_init); + +int v4l2_prio_change(struct v4l2_prio_state *global, enum v4l2_priority *local, + enum v4l2_priority new) +{ + if (!prio_is_valid(new)) + return -EINVAL; + if (*local == new) + return 0; + + atomic_inc(&global->prios[new]); + if (prio_is_valid(*local)) + atomic_dec(&global->prios[*local]); + *local = new; + return 0; +} +EXPORT_SYMBOL(v4l2_prio_change); + +void v4l2_prio_open(struct v4l2_prio_state *global, enum v4l2_priority *local) +{ + v4l2_prio_change(global, local, V4L2_PRIORITY_DEFAULT); +} +EXPORT_SYMBOL(v4l2_prio_open); + +void v4l2_prio_close(struct v4l2_prio_state *global, enum v4l2_priority local) +{ + if (prio_is_valid(local)) + atomic_dec(&global->prios[local]); +} +EXPORT_SYMBOL(v4l2_prio_close); + +enum v4l2_priority v4l2_prio_max(struct v4l2_prio_state *global) +{ + if (atomic_read(&global->prios[V4L2_PRIORITY_RECORD]) > 0) + return V4L2_PRIORITY_RECORD; + if (atomic_read(&global->prios[V4L2_PRIORITY_INTERACTIVE]) > 0) + return V4L2_PRIORITY_INTERACTIVE; + if (atomic_read(&global->prios[V4L2_PRIORITY_BACKGROUND]) > 0) + return V4L2_PRIORITY_BACKGROUND; + return V4L2_PRIORITY_UNSET; +} +EXPORT_SYMBOL(v4l2_prio_max); + +int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local) +{ + return (local < v4l2_prio_max(global)) ? -EBUSY : 0; +} +EXPORT_SYMBOL(v4l2_prio_check); + + +static ssize_t v4l2_read(struct file *filp, char __user *buf, + size_t sz, loff_t *off) +{ + struct video_device *vdev = video_devdata(filp); + int ret = -ENODEV; + + if (!vdev->fops->read) + return -EINVAL; + if (video_is_registered(vdev)) + ret = vdev->fops->read(filp, buf, sz, off); + if ((vdev->dev_debug & V4L2_DEV_DEBUG_FOP) && + (vdev->dev_debug & V4L2_DEV_DEBUG_STREAMING)) + dprintk("%s: read: %zd (%d)\n", + video_device_node_name(vdev), sz, ret); + return ret; +} + +static ssize_t v4l2_write(struct file *filp, const char __user *buf, + size_t sz, loff_t *off) +{ + struct video_device *vdev = video_devdata(filp); + int ret = -ENODEV; + + if (!vdev->fops->write) + return -EINVAL; + if (video_is_registered(vdev)) + ret = vdev->fops->write(filp, buf, sz, off); + if ((vdev->dev_debug & V4L2_DEV_DEBUG_FOP) && + (vdev->dev_debug & V4L2_DEV_DEBUG_STREAMING)) + dprintk("%s: write: %zd (%d)\n", + video_device_node_name(vdev), sz, ret); + return ret; +} + +static __poll_t v4l2_poll(struct file *filp, struct poll_table_struct *poll) +{ + struct video_device *vdev = video_devdata(filp); + __poll_t res = EPOLLERR | EPOLLHUP | EPOLLPRI; + + if (video_is_registered(vdev)) { + if (!vdev->fops->poll) + res = DEFAULT_POLLMASK; + else + res = vdev->fops->poll(filp, poll); + } + if (vdev->dev_debug & V4L2_DEV_DEBUG_POLL) + dprintk("%s: poll: %08x %08x\n", + video_device_node_name(vdev), res, + poll_requested_events(poll)); + return res; +} + +static long v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + struct video_device *vdev = video_devdata(filp); + int ret = -ENODEV; + + if (vdev->fops->unlocked_ioctl) { + if (video_is_registered(vdev)) + ret = vdev->fops->unlocked_ioctl(filp, cmd, arg); + } else + ret = -ENOTTY; + + return ret; +} + +#ifdef CONFIG_MMU +#define v4l2_get_unmapped_area NULL +#else +static unsigned long v4l2_get_unmapped_area(struct file *filp, + unsigned long addr, unsigned long len, unsigned long pgoff, + unsigned long flags) +{ + struct video_device *vdev = video_devdata(filp); + int ret; + + if (!vdev->fops->get_unmapped_area) + return -ENOSYS; + if (!video_is_registered(vdev)) + return -ENODEV; + ret = vdev->fops->get_unmapped_area(filp, addr, len, pgoff, flags); + if (vdev->dev_debug & V4L2_DEV_DEBUG_FOP) + dprintk("%s: get_unmapped_area (%d)\n", + video_device_node_name(vdev), ret); + return ret; +} +#endif + +static int v4l2_mmap(struct file *filp, struct vm_area_struct *vm) +{ + struct video_device *vdev = video_devdata(filp); + int ret = -ENODEV; + + if (!vdev->fops->mmap) + return -ENODEV; + if (video_is_registered(vdev)) + ret = vdev->fops->mmap(filp, vm); + if (vdev->dev_debug & V4L2_DEV_DEBUG_FOP) + dprintk("%s: mmap (%d)\n", + video_device_node_name(vdev), ret); + return ret; +} + +/* Override for the open function */ +static int v4l2_open(struct inode *inode, struct file *filp) +{ + struct video_device *vdev; + int ret = 0; + + /* Check if the video device is available */ + mutex_lock(&videodev_lock); + vdev = video_devdata(filp); + /* return ENODEV if the video device has already been removed. */ + if (vdev == NULL || !video_is_registered(vdev)) { + mutex_unlock(&videodev_lock); + return -ENODEV; + } + /* and increase the device refcount */ + video_get(vdev); + mutex_unlock(&videodev_lock); + if (vdev->fops->open) { + if (video_is_registered(vdev)) + ret = vdev->fops->open(filp); + else + ret = -ENODEV; + } + + if (vdev->dev_debug & V4L2_DEV_DEBUG_FOP) + dprintk("%s: open (%d)\n", + video_device_node_name(vdev), ret); + /* decrease the refcount in case of an error */ + if (ret) + video_put(vdev); + return ret; +} + +/* Override for the release function */ +static int v4l2_release(struct inode *inode, struct file *filp) +{ + struct video_device *vdev = video_devdata(filp); + int ret = 0; + + /* + * We need to serialize the release() with queueing new requests. + * The release() may trigger the cancellation of a streaming + * operation, and that should not be mixed with queueing a new + * request at the same time. + */ + if (vdev->fops->release) { + if (v4l2_device_supports_requests(vdev->v4l2_dev)) { + mutex_lock(&vdev->v4l2_dev->mdev->req_queue_mutex); + ret = vdev->fops->release(filp); + mutex_unlock(&vdev->v4l2_dev->mdev->req_queue_mutex); + } else { + ret = vdev->fops->release(filp); + } + } + + if (vdev->dev_debug & V4L2_DEV_DEBUG_FOP) + dprintk("%s: release\n", + video_device_node_name(vdev)); + + /* decrease the refcount unconditionally since the release() + return value is ignored. */ + video_put(vdev); + return ret; +} + +static const struct file_operations v4l2_fops = { + .owner = THIS_MODULE, + .read = v4l2_read, + .write = v4l2_write, + .open = v4l2_open, + .get_unmapped_area = v4l2_get_unmapped_area, + .mmap = v4l2_mmap, + .unlocked_ioctl = v4l2_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = v4l2_compat_ioctl32, +#endif + .release = v4l2_release, + .poll = v4l2_poll, +}; + +/** + * get_index - assign stream index number based on v4l2_dev + * @vdev: video_device to assign index number to, vdev->v4l2_dev should be assigned + * + * Note that when this is called the new device has not yet been registered + * in the video_device array, but it was able to obtain a minor number. + * + * This means that we can always obtain a free stream index number since + * the worst case scenario is that there are VIDEO_NUM_DEVICES - 1 slots in + * use of the video_device array. + * + * Returns a free index number. + */ +static int get_index(struct video_device *vdev) +{ + /* This can be static since this function is called with the global + videodev_lock held. */ + static DECLARE_BITMAP(used, VIDEO_NUM_DEVICES); + int i; + + bitmap_zero(used, VIDEO_NUM_DEVICES); + + for (i = 0; i < VIDEO_NUM_DEVICES; i++) { + if (video_devices[i] != NULL && + video_devices[i]->v4l2_dev == vdev->v4l2_dev) { + __set_bit(video_devices[i]->index, used); + } + } + + return find_first_zero_bit(used, VIDEO_NUM_DEVICES); +} + +#define SET_VALID_IOCTL(ops, cmd, op) \ + do { if ((ops)->op) __set_bit(_IOC_NR(cmd), valid_ioctls); } while (0) + +/* This determines which ioctls are actually implemented in the driver. + It's a one-time thing which simplifies video_ioctl2 as it can just do + a bit test. + + Note that drivers can override this by setting bits to 1 in + vdev->valid_ioctls. If an ioctl is marked as 1 when this function is + called, then that ioctl will actually be marked as unimplemented. + + It does that by first setting up the local valid_ioctls bitmap, and + at the end do a: + + vdev->valid_ioctls = valid_ioctls & ~(vdev->valid_ioctls) + */ +static void determine_valid_ioctls(struct video_device *vdev) +{ + const u32 vid_caps = V4L2_CAP_VIDEO_CAPTURE | + V4L2_CAP_VIDEO_CAPTURE_MPLANE | + V4L2_CAP_VIDEO_OUTPUT | + V4L2_CAP_VIDEO_OUTPUT_MPLANE | + V4L2_CAP_VIDEO_M2M | V4L2_CAP_VIDEO_M2M_MPLANE; + const u32 meta_caps = V4L2_CAP_META_CAPTURE | + V4L2_CAP_META_OUTPUT; + DECLARE_BITMAP(valid_ioctls, BASE_VIDIOC_PRIVATE); + const struct v4l2_ioctl_ops *ops = vdev->ioctl_ops; + bool is_vid = vdev->vfl_type == VFL_TYPE_VIDEO && + (vdev->device_caps & vid_caps); + bool is_vbi = vdev->vfl_type == VFL_TYPE_VBI; + bool is_radio = vdev->vfl_type == VFL_TYPE_RADIO; + bool is_sdr = vdev->vfl_type == VFL_TYPE_SDR; + bool is_tch = vdev->vfl_type == VFL_TYPE_TOUCH; + bool is_meta = vdev->vfl_type == VFL_TYPE_VIDEO && + (vdev->device_caps & meta_caps); + bool is_rx = vdev->vfl_dir != VFL_DIR_TX; + bool is_tx = vdev->vfl_dir != VFL_DIR_RX; + bool is_io_mc = vdev->device_caps & V4L2_CAP_IO_MC; + bool has_streaming = vdev->device_caps & V4L2_CAP_STREAMING; + bool is_edid = vdev->device_caps & V4L2_CAP_EDID; + + bitmap_zero(valid_ioctls, BASE_VIDIOC_PRIVATE); + + /* vfl_type and vfl_dir independent ioctls */ + + SET_VALID_IOCTL(ops, VIDIOC_QUERYCAP, vidioc_querycap); + __set_bit(_IOC_NR(VIDIOC_G_PRIORITY), valid_ioctls); + __set_bit(_IOC_NR(VIDIOC_S_PRIORITY), valid_ioctls); + + /* Note: the control handler can also be passed through the filehandle, + and that can't be tested here. If the bit for these control ioctls + is set, then the ioctl is valid. But if it is 0, then it can still + be valid if the filehandle passed the control handler. */ + if (vdev->ctrl_handler || ops->vidioc_queryctrl) + __set_bit(_IOC_NR(VIDIOC_QUERYCTRL), valid_ioctls); + if (vdev->ctrl_handler || ops->vidioc_query_ext_ctrl) + __set_bit(_IOC_NR(VIDIOC_QUERY_EXT_CTRL), valid_ioctls); + if (vdev->ctrl_handler || ops->vidioc_g_ctrl || ops->vidioc_g_ext_ctrls) + __set_bit(_IOC_NR(VIDIOC_G_CTRL), valid_ioctls); + if (vdev->ctrl_handler || ops->vidioc_s_ctrl || ops->vidioc_s_ext_ctrls) + __set_bit(_IOC_NR(VIDIOC_S_CTRL), valid_ioctls); + if (vdev->ctrl_handler || ops->vidioc_g_ext_ctrls) + __set_bit(_IOC_NR(VIDIOC_G_EXT_CTRLS), valid_ioctls); + if (vdev->ctrl_handler || ops->vidioc_s_ext_ctrls) + __set_bit(_IOC_NR(VIDIOC_S_EXT_CTRLS), valid_ioctls); + if (vdev->ctrl_handler || ops->vidioc_try_ext_ctrls) + __set_bit(_IOC_NR(VIDIOC_TRY_EXT_CTRLS), valid_ioctls); + if (vdev->ctrl_handler || ops->vidioc_querymenu) + __set_bit(_IOC_NR(VIDIOC_QUERYMENU), valid_ioctls); + if (!is_tch) { + SET_VALID_IOCTL(ops, VIDIOC_G_FREQUENCY, vidioc_g_frequency); + SET_VALID_IOCTL(ops, VIDIOC_S_FREQUENCY, vidioc_s_frequency); + } + SET_VALID_IOCTL(ops, VIDIOC_LOG_STATUS, vidioc_log_status); +#ifdef CONFIG_VIDEO_ADV_DEBUG + __set_bit(_IOC_NR(VIDIOC_DBG_G_CHIP_INFO), valid_ioctls); + __set_bit(_IOC_NR(VIDIOC_DBG_G_REGISTER), valid_ioctls); + __set_bit(_IOC_NR(VIDIOC_DBG_S_REGISTER), valid_ioctls); +#endif + /* yes, really vidioc_subscribe_event */ + SET_VALID_IOCTL(ops, VIDIOC_DQEVENT, vidioc_subscribe_event); + SET_VALID_IOCTL(ops, VIDIOC_SUBSCRIBE_EVENT, vidioc_subscribe_event); + SET_VALID_IOCTL(ops, VIDIOC_UNSUBSCRIBE_EVENT, vidioc_unsubscribe_event); + if (ops->vidioc_enum_freq_bands || ops->vidioc_g_tuner || ops->vidioc_g_modulator) + __set_bit(_IOC_NR(VIDIOC_ENUM_FREQ_BANDS), valid_ioctls); + + if (is_vid) { + /* video specific ioctls */ + if ((is_rx && (ops->vidioc_enum_fmt_vid_cap || + ops->vidioc_enum_fmt_vid_overlay)) || + (is_tx && ops->vidioc_enum_fmt_vid_out)) + __set_bit(_IOC_NR(VIDIOC_ENUM_FMT), valid_ioctls); + if ((is_rx && (ops->vidioc_g_fmt_vid_cap || + ops->vidioc_g_fmt_vid_cap_mplane || + ops->vidioc_g_fmt_vid_overlay)) || + (is_tx && (ops->vidioc_g_fmt_vid_out || + ops->vidioc_g_fmt_vid_out_mplane || + ops->vidioc_g_fmt_vid_out_overlay))) + __set_bit(_IOC_NR(VIDIOC_G_FMT), valid_ioctls); + if ((is_rx && (ops->vidioc_s_fmt_vid_cap || + ops->vidioc_s_fmt_vid_cap_mplane || + ops->vidioc_s_fmt_vid_overlay)) || + (is_tx && (ops->vidioc_s_fmt_vid_out || + ops->vidioc_s_fmt_vid_out_mplane || + ops->vidioc_s_fmt_vid_out_overlay))) + __set_bit(_IOC_NR(VIDIOC_S_FMT), valid_ioctls); + if ((is_rx && (ops->vidioc_try_fmt_vid_cap || + ops->vidioc_try_fmt_vid_cap_mplane || + ops->vidioc_try_fmt_vid_overlay)) || + (is_tx && (ops->vidioc_try_fmt_vid_out || + ops->vidioc_try_fmt_vid_out_mplane || + ops->vidioc_try_fmt_vid_out_overlay))) + __set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls); + SET_VALID_IOCTL(ops, VIDIOC_OVERLAY, vidioc_overlay); + SET_VALID_IOCTL(ops, VIDIOC_G_FBUF, vidioc_g_fbuf); + SET_VALID_IOCTL(ops, VIDIOC_S_FBUF, vidioc_s_fbuf); + SET_VALID_IOCTL(ops, VIDIOC_G_JPEGCOMP, vidioc_g_jpegcomp); + SET_VALID_IOCTL(ops, VIDIOC_S_JPEGCOMP, vidioc_s_jpegcomp); + SET_VALID_IOCTL(ops, VIDIOC_G_ENC_INDEX, vidioc_g_enc_index); + SET_VALID_IOCTL(ops, VIDIOC_ENCODER_CMD, vidioc_encoder_cmd); + SET_VALID_IOCTL(ops, VIDIOC_TRY_ENCODER_CMD, vidioc_try_encoder_cmd); + SET_VALID_IOCTL(ops, VIDIOC_DECODER_CMD, vidioc_decoder_cmd); + SET_VALID_IOCTL(ops, VIDIOC_TRY_DECODER_CMD, vidioc_try_decoder_cmd); + SET_VALID_IOCTL(ops, VIDIOC_ENUM_FRAMESIZES, vidioc_enum_framesizes); + SET_VALID_IOCTL(ops, VIDIOC_ENUM_FRAMEINTERVALS, vidioc_enum_frameintervals); + if (ops->vidioc_g_selection && + !test_bit(_IOC_NR(VIDIOC_G_SELECTION), vdev->valid_ioctls)) { + __set_bit(_IOC_NR(VIDIOC_G_CROP), valid_ioctls); + __set_bit(_IOC_NR(VIDIOC_CROPCAP), valid_ioctls); + } + if (ops->vidioc_s_selection && + !test_bit(_IOC_NR(VIDIOC_S_SELECTION), vdev->valid_ioctls)) + __set_bit(_IOC_NR(VIDIOC_S_CROP), valid_ioctls); + SET_VALID_IOCTL(ops, VIDIOC_G_SELECTION, vidioc_g_selection); + SET_VALID_IOCTL(ops, VIDIOC_S_SELECTION, vidioc_s_selection); + } + if (is_meta && is_rx) { + /* metadata capture specific ioctls */ + SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_meta_cap); + SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_meta_cap); + SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_meta_cap); + SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_meta_cap); + } else if (is_meta && is_tx) { + /* metadata output specific ioctls */ + SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_meta_out); + SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_meta_out); + SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_meta_out); + SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_meta_out); + } + if (is_vbi) { + /* vbi specific ioctls */ + if ((is_rx && (ops->vidioc_g_fmt_vbi_cap || + ops->vidioc_g_fmt_sliced_vbi_cap)) || + (is_tx && (ops->vidioc_g_fmt_vbi_out || + ops->vidioc_g_fmt_sliced_vbi_out))) + __set_bit(_IOC_NR(VIDIOC_G_FMT), valid_ioctls); + if ((is_rx && (ops->vidioc_s_fmt_vbi_cap || + ops->vidioc_s_fmt_sliced_vbi_cap)) || + (is_tx && (ops->vidioc_s_fmt_vbi_out || + ops->vidioc_s_fmt_sliced_vbi_out))) + __set_bit(_IOC_NR(VIDIOC_S_FMT), valid_ioctls); + if ((is_rx && (ops->vidioc_try_fmt_vbi_cap || + ops->vidioc_try_fmt_sliced_vbi_cap)) || + (is_tx && (ops->vidioc_try_fmt_vbi_out || + ops->vidioc_try_fmt_sliced_vbi_out))) + __set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls); + SET_VALID_IOCTL(ops, VIDIOC_G_SLICED_VBI_CAP, vidioc_g_sliced_vbi_cap); + } else if (is_tch) { + /* touch specific ioctls */ + SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_vid_cap); + SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_vid_cap); + SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_vid_cap); + SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_vid_cap); + SET_VALID_IOCTL(ops, VIDIOC_ENUM_FRAMESIZES, vidioc_enum_framesizes); + SET_VALID_IOCTL(ops, VIDIOC_ENUM_FRAMEINTERVALS, vidioc_enum_frameintervals); + SET_VALID_IOCTL(ops, VIDIOC_ENUMINPUT, vidioc_enum_input); + SET_VALID_IOCTL(ops, VIDIOC_G_INPUT, vidioc_g_input); + SET_VALID_IOCTL(ops, VIDIOC_S_INPUT, vidioc_s_input); + SET_VALID_IOCTL(ops, VIDIOC_G_PARM, vidioc_g_parm); + SET_VALID_IOCTL(ops, VIDIOC_S_PARM, vidioc_s_parm); + } else if (is_sdr && is_rx) { + /* SDR receiver specific ioctls */ + SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_sdr_cap); + SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_sdr_cap); + SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_sdr_cap); + SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_sdr_cap); + } else if (is_sdr && is_tx) { + /* SDR transmitter specific ioctls */ + SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_sdr_out); + SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_sdr_out); + SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_sdr_out); + SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_sdr_out); + } + + if (has_streaming) { + /* ioctls valid for streaming I/O */ + SET_VALID_IOCTL(ops, VIDIOC_REQBUFS, vidioc_reqbufs); + SET_VALID_IOCTL(ops, VIDIOC_QUERYBUF, vidioc_querybuf); + SET_VALID_IOCTL(ops, VIDIOC_QBUF, vidioc_qbuf); + SET_VALID_IOCTL(ops, VIDIOC_EXPBUF, vidioc_expbuf); + SET_VALID_IOCTL(ops, VIDIOC_DQBUF, vidioc_dqbuf); + SET_VALID_IOCTL(ops, VIDIOC_CREATE_BUFS, vidioc_create_bufs); + SET_VALID_IOCTL(ops, VIDIOC_PREPARE_BUF, vidioc_prepare_buf); + SET_VALID_IOCTL(ops, VIDIOC_STREAMON, vidioc_streamon); + SET_VALID_IOCTL(ops, VIDIOC_STREAMOFF, vidioc_streamoff); + /* VIDIOC_CREATE_BUFS support is mandatory to enable VIDIOC_REMOVE_BUFS */ + if (ops->vidioc_create_bufs) + SET_VALID_IOCTL(ops, VIDIOC_REMOVE_BUFS, vidioc_remove_bufs); + } + + if (is_vid || is_vbi || is_meta) { + /* ioctls valid for video, vbi and metadata */ + if (ops->vidioc_s_std) + __set_bit(_IOC_NR(VIDIOC_ENUMSTD), valid_ioctls); + SET_VALID_IOCTL(ops, VIDIOC_S_STD, vidioc_s_std); + SET_VALID_IOCTL(ops, VIDIOC_G_STD, vidioc_g_std); + if (is_rx) { + SET_VALID_IOCTL(ops, VIDIOC_QUERYSTD, vidioc_querystd); + if (is_io_mc) { + __set_bit(_IOC_NR(VIDIOC_ENUMINPUT), valid_ioctls); + __set_bit(_IOC_NR(VIDIOC_G_INPUT), valid_ioctls); + __set_bit(_IOC_NR(VIDIOC_S_INPUT), valid_ioctls); + } else { + SET_VALID_IOCTL(ops, VIDIOC_ENUMINPUT, vidioc_enum_input); + SET_VALID_IOCTL(ops, VIDIOC_G_INPUT, vidioc_g_input); + SET_VALID_IOCTL(ops, VIDIOC_S_INPUT, vidioc_s_input); + } + SET_VALID_IOCTL(ops, VIDIOC_ENUMAUDIO, vidioc_enumaudio); + SET_VALID_IOCTL(ops, VIDIOC_G_AUDIO, vidioc_g_audio); + SET_VALID_IOCTL(ops, VIDIOC_S_AUDIO, vidioc_s_audio); + SET_VALID_IOCTL(ops, VIDIOC_QUERY_DV_TIMINGS, vidioc_query_dv_timings); + SET_VALID_IOCTL(ops, VIDIOC_S_EDID, vidioc_s_edid); + } + if (is_tx) { + if (is_io_mc) { + __set_bit(_IOC_NR(VIDIOC_ENUMOUTPUT), valid_ioctls); + __set_bit(_IOC_NR(VIDIOC_G_OUTPUT), valid_ioctls); + __set_bit(_IOC_NR(VIDIOC_S_OUTPUT), valid_ioctls); + } else { + SET_VALID_IOCTL(ops, VIDIOC_ENUMOUTPUT, vidioc_enum_output); + SET_VALID_IOCTL(ops, VIDIOC_G_OUTPUT, vidioc_g_output); + SET_VALID_IOCTL(ops, VIDIOC_S_OUTPUT, vidioc_s_output); + } + SET_VALID_IOCTL(ops, VIDIOC_ENUMAUDOUT, vidioc_enumaudout); + SET_VALID_IOCTL(ops, VIDIOC_G_AUDOUT, vidioc_g_audout); + SET_VALID_IOCTL(ops, VIDIOC_S_AUDOUT, vidioc_s_audout); + } + if (ops->vidioc_g_parm || ops->vidioc_g_std) + __set_bit(_IOC_NR(VIDIOC_G_PARM), valid_ioctls); + SET_VALID_IOCTL(ops, VIDIOC_S_PARM, vidioc_s_parm); + SET_VALID_IOCTL(ops, VIDIOC_S_DV_TIMINGS, vidioc_s_dv_timings); + SET_VALID_IOCTL(ops, VIDIOC_G_DV_TIMINGS, vidioc_g_dv_timings); + SET_VALID_IOCTL(ops, VIDIOC_ENUM_DV_TIMINGS, vidioc_enum_dv_timings); + SET_VALID_IOCTL(ops, VIDIOC_DV_TIMINGS_CAP, vidioc_dv_timings_cap); + SET_VALID_IOCTL(ops, VIDIOC_G_EDID, vidioc_g_edid); + } + if (is_tx && (is_radio || is_sdr)) { + /* radio transmitter only ioctls */ + SET_VALID_IOCTL(ops, VIDIOC_G_MODULATOR, vidioc_g_modulator); + SET_VALID_IOCTL(ops, VIDIOC_S_MODULATOR, vidioc_s_modulator); + } + if (is_rx && !is_tch) { + /* receiver only ioctls */ + SET_VALID_IOCTL(ops, VIDIOC_G_TUNER, vidioc_g_tuner); + SET_VALID_IOCTL(ops, VIDIOC_S_TUNER, vidioc_s_tuner); + SET_VALID_IOCTL(ops, VIDIOC_S_HW_FREQ_SEEK, vidioc_s_hw_freq_seek); + } + if (is_edid) { + SET_VALID_IOCTL(ops, VIDIOC_G_EDID, vidioc_g_edid); + if (is_tx) { + SET_VALID_IOCTL(ops, VIDIOC_G_OUTPUT, vidioc_g_output); + SET_VALID_IOCTL(ops, VIDIOC_S_OUTPUT, vidioc_s_output); + SET_VALID_IOCTL(ops, VIDIOC_ENUMOUTPUT, vidioc_enum_output); + } + if (is_rx) { + SET_VALID_IOCTL(ops, VIDIOC_ENUMINPUT, vidioc_enum_input); + SET_VALID_IOCTL(ops, VIDIOC_G_INPUT, vidioc_g_input); + SET_VALID_IOCTL(ops, VIDIOC_S_INPUT, vidioc_s_input); + SET_VALID_IOCTL(ops, VIDIOC_S_EDID, vidioc_s_edid); + } + } + + bitmap_andnot(vdev->valid_ioctls, valid_ioctls, vdev->valid_ioctls, + BASE_VIDIOC_PRIVATE); +} + +static int video_register_media_controller(struct video_device *vdev) +{ +#if defined(CONFIG_MEDIA_CONTROLLER) + u32 intf_type; + int ret; + + /* Memory-to-memory devices are more complex and use + * their own function to register its mc entities. + */ + if (!vdev->v4l2_dev->mdev || vdev->vfl_dir == VFL_DIR_M2M) + return 0; + + vdev->entity.obj_type = MEDIA_ENTITY_TYPE_VIDEO_DEVICE; + vdev->entity.function = MEDIA_ENT_F_UNKNOWN; + + switch (vdev->vfl_type) { + case VFL_TYPE_VIDEO: + intf_type = MEDIA_INTF_T_V4L_VIDEO; + vdev->entity.function = MEDIA_ENT_F_IO_V4L; + break; + case VFL_TYPE_VBI: + intf_type = MEDIA_INTF_T_V4L_VBI; + vdev->entity.function = MEDIA_ENT_F_IO_VBI; + break; + case VFL_TYPE_SDR: + intf_type = MEDIA_INTF_T_V4L_SWRADIO; + vdev->entity.function = MEDIA_ENT_F_IO_SWRADIO; + break; + case VFL_TYPE_TOUCH: + intf_type = MEDIA_INTF_T_V4L_TOUCH; + vdev->entity.function = MEDIA_ENT_F_IO_V4L; + break; + case VFL_TYPE_RADIO: + intf_type = MEDIA_INTF_T_V4L_RADIO; + /* + * Radio doesn't have an entity at the V4L2 side to represent + * radio input or output. Instead, the audio input/output goes + * via either physical wires or ALSA. + */ + break; + case VFL_TYPE_SUBDEV: + intf_type = MEDIA_INTF_T_V4L_SUBDEV; + /* Entity will be created via v4l2_device_register_subdev() */ + break; + default: + return 0; + } + + if (vdev->entity.function != MEDIA_ENT_F_UNKNOWN) { + vdev->entity.name = vdev->name; + + /* Needed just for backward compatibility with legacy MC API */ + vdev->entity.info.dev.major = VIDEO_MAJOR; + vdev->entity.info.dev.minor = vdev->minor; + + ret = media_device_register_entity(vdev->v4l2_dev->mdev, + &vdev->entity); + if (ret < 0) { + pr_warn("%s: media_device_register_entity failed\n", + __func__); + return ret; + } + } + + vdev->intf_devnode = media_devnode_create(vdev->v4l2_dev->mdev, + intf_type, + 0, VIDEO_MAJOR, + vdev->minor); + if (!vdev->intf_devnode) { + media_device_unregister_entity(&vdev->entity); + return -ENOMEM; + } + + if (vdev->entity.function != MEDIA_ENT_F_UNKNOWN) { + struct media_link *link; + + link = media_create_intf_link(&vdev->entity, + &vdev->intf_devnode->intf, + MEDIA_LNK_FL_ENABLED | + MEDIA_LNK_FL_IMMUTABLE); + if (!link) { + media_devnode_remove(vdev->intf_devnode); + media_device_unregister_entity(&vdev->entity); + return -ENOMEM; + } + } + + /* FIXME: how to create the other interface links? */ + +#endif + return 0; +} + +int __video_register_device(struct video_device *vdev, + enum vfl_devnode_type type, + int nr, int warn_if_nr_in_use, + struct module *owner) +{ + int i = 0; + int ret; + int minor_offset = 0; + int minor_cnt = VIDEO_NUM_DEVICES; + const char *name_base; + + /* A minor value of -1 marks this video device as never + having been registered */ + vdev->minor = -1; + + /* the release callback MUST be present */ + if (WARN_ON(!vdev->release)) + return -EINVAL; + /* the v4l2_dev pointer MUST be present */ + if (WARN_ON(!vdev->v4l2_dev)) + return -EINVAL; + /* the device_caps field MUST be set for all but subdevs */ + if (WARN_ON(type != VFL_TYPE_SUBDEV && !vdev->device_caps)) + return -EINVAL; + + /* v4l2_fh support */ + spin_lock_init(&vdev->fh_lock); + INIT_LIST_HEAD(&vdev->fh_list); + + /* Part 1: check device type */ + switch (type) { + case VFL_TYPE_VIDEO: + name_base = "video"; + break; + case VFL_TYPE_VBI: + name_base = "vbi"; + break; + case VFL_TYPE_RADIO: + name_base = "radio"; + break; + case VFL_TYPE_SUBDEV: + name_base = "v4l-subdev"; + break; + case VFL_TYPE_SDR: + /* Use device name 'swradio' because 'sdr' was already taken. */ + name_base = "swradio"; + break; + case VFL_TYPE_TOUCH: + name_base = "v4l-touch"; + break; + default: + pr_err("%s called with unknown type: %d\n", + __func__, type); + return -EINVAL; + } + + vdev->vfl_type = type; + vdev->cdev = NULL; + if (vdev->dev_parent == NULL) + vdev->dev_parent = vdev->v4l2_dev->dev; + if (vdev->ctrl_handler == NULL) + vdev->ctrl_handler = vdev->v4l2_dev->ctrl_handler; + /* If the prio state pointer is NULL, then use the v4l2_device + prio state. */ + if (vdev->prio == NULL) + vdev->prio = &vdev->v4l2_dev->prio; + + /* Part 2: find a free minor, device node number and device index. */ +#ifdef CONFIG_VIDEO_FIXED_MINOR_RANGES + /* Keep the ranges for the first four types for historical + * reasons. + * Newer devices (not yet in place) should use the range + * of 128-191 and just pick the first free minor there + * (new style). */ + switch (type) { + case VFL_TYPE_VIDEO: + minor_offset = 0; + minor_cnt = 64; + break; + case VFL_TYPE_RADIO: + minor_offset = 64; + minor_cnt = 64; + break; + case VFL_TYPE_VBI: + minor_offset = 224; + minor_cnt = 32; + break; + default: + minor_offset = 128; + minor_cnt = 64; + break; + } +#endif + + /* Pick a device node number */ + mutex_lock(&videodev_lock); + nr = devnode_find(vdev, nr == -1 ? 0 : nr, minor_cnt); + if (nr == minor_cnt) + nr = devnode_find(vdev, 0, minor_cnt); + if (nr == minor_cnt) { + pr_err("could not get a free device node number\n"); + mutex_unlock(&videodev_lock); + return -ENFILE; + } +#ifdef CONFIG_VIDEO_FIXED_MINOR_RANGES + /* 1-on-1 mapping of device node number to minor number */ + i = nr; +#else + /* The device node number and minor numbers are independent, so + we just find the first free minor number. */ + for (i = 0; i < VIDEO_NUM_DEVICES; i++) + if (video_devices[i] == NULL) + break; + if (i == VIDEO_NUM_DEVICES) { + mutex_unlock(&videodev_lock); + pr_err("could not get a free minor\n"); + return -ENFILE; + } +#endif + vdev->minor = i + minor_offset; + vdev->num = nr; + + /* Should not happen since we thought this minor was free */ + if (WARN_ON(video_devices[vdev->minor])) { + mutex_unlock(&videodev_lock); + pr_err("video_device not empty!\n"); + return -ENFILE; + } + devnode_set(vdev); + vdev->index = get_index(vdev); + video_devices[vdev->minor] = vdev; + mutex_unlock(&videodev_lock); + + if (vdev->ioctl_ops) + determine_valid_ioctls(vdev); + + /* Part 3: Initialize the character device */ + vdev->cdev = cdev_alloc(); + if (vdev->cdev == NULL) { + ret = -ENOMEM; + goto cleanup; + } + vdev->cdev->ops = &v4l2_fops; + vdev->cdev->owner = owner; + ret = cdev_add(vdev->cdev, MKDEV(VIDEO_MAJOR, vdev->minor), 1); + if (ret < 0) { + pr_err("%s: cdev_add failed\n", __func__); + kfree(vdev->cdev); + vdev->cdev = NULL; + goto cleanup; + } + + /* Part 4: register the device with sysfs */ + vdev->dev.class = &video_class; + vdev->dev.devt = MKDEV(VIDEO_MAJOR, vdev->minor); + vdev->dev.parent = vdev->dev_parent; + dev_set_name(&vdev->dev, "%s%d", name_base, vdev->num); + mutex_lock(&videodev_lock); + ret = device_register(&vdev->dev); + if (ret < 0) { + mutex_unlock(&videodev_lock); + pr_err("%s: device_register failed\n", __func__); + goto cleanup; + } + /* Register the release callback that will be called when the last + reference to the device goes away. */ + vdev->dev.release = v4l2_device_release; + + if (nr != -1 && nr != vdev->num && warn_if_nr_in_use) + pr_warn("%s: requested %s%d, got %s\n", __func__, + name_base, nr, video_device_node_name(vdev)); + + /* Increase v4l2_device refcount */ + v4l2_device_get(vdev->v4l2_dev); + + /* Part 5: Register the entity. */ + ret = video_register_media_controller(vdev); + + /* Part 6: Activate this minor. The char device can now be used. */ + set_bit(V4L2_FL_REGISTERED, &vdev->flags); + mutex_unlock(&videodev_lock); + + return 0; + +cleanup: + mutex_lock(&videodev_lock); + if (vdev->cdev) + cdev_del(vdev->cdev); + video_devices[vdev->minor] = NULL; + devnode_clear(vdev); + mutex_unlock(&videodev_lock); + /* Mark this video device as never having been registered. */ + vdev->minor = -1; + return ret; +} +EXPORT_SYMBOL(__video_register_device); + +/** + * video_unregister_device - unregister a video4linux device + * @vdev: the device to unregister + * + * This unregisters the passed device. Future open calls will + * be met with errors. + */ +void video_unregister_device(struct video_device *vdev) +{ + /* Check if vdev was ever registered at all */ + if (!vdev || !video_is_registered(vdev)) + return; + + mutex_lock(&videodev_lock); + /* This must be in a critical section to prevent a race with v4l2_open. + * Once this bit has been cleared video_get may never be called again. + */ + clear_bit(V4L2_FL_REGISTERED, &vdev->flags); + mutex_unlock(&videodev_lock); + if (test_bit(V4L2_FL_USES_V4L2_FH, &vdev->flags)) + v4l2_event_wake_all(vdev); + device_unregister(&vdev->dev); +} +EXPORT_SYMBOL(video_unregister_device); + +#if defined(CONFIG_MEDIA_CONTROLLER) + +__must_check int video_device_pipeline_start(struct video_device *vdev, + struct media_pipeline *pipe) +{ + struct media_entity *entity = &vdev->entity; + + if (entity->num_pads != 1) + return -ENODEV; + + return media_pipeline_start(&entity->pads[0], pipe); +} +EXPORT_SYMBOL_GPL(video_device_pipeline_start); + +__must_check int __video_device_pipeline_start(struct video_device *vdev, + struct media_pipeline *pipe) +{ + struct media_entity *entity = &vdev->entity; + + if (entity->num_pads != 1) + return -ENODEV; + + return __media_pipeline_start(&entity->pads[0], pipe); +} +EXPORT_SYMBOL_GPL(__video_device_pipeline_start); + +void video_device_pipeline_stop(struct video_device *vdev) +{ + struct media_entity *entity = &vdev->entity; + + if (WARN_ON(entity->num_pads != 1)) + return; + + return media_pipeline_stop(&entity->pads[0]); +} +EXPORT_SYMBOL_GPL(video_device_pipeline_stop); + +void __video_device_pipeline_stop(struct video_device *vdev) +{ + struct media_entity *entity = &vdev->entity; + + if (WARN_ON(entity->num_pads != 1)) + return; + + return __media_pipeline_stop(&entity->pads[0]); +} +EXPORT_SYMBOL_GPL(__video_device_pipeline_stop); + +__must_check int video_device_pipeline_alloc_start(struct video_device *vdev) +{ + struct media_entity *entity = &vdev->entity; + + if (entity->num_pads != 1) + return -ENODEV; + + return media_pipeline_alloc_start(&entity->pads[0]); +} +EXPORT_SYMBOL_GPL(video_device_pipeline_alloc_start); + +struct media_pipeline *video_device_pipeline(struct video_device *vdev) +{ + struct media_entity *entity = &vdev->entity; + + if (WARN_ON(entity->num_pads != 1)) + return NULL; + + return media_pad_pipeline(&entity->pads[0]); +} +EXPORT_SYMBOL_GPL(video_device_pipeline); + +#endif /* CONFIG_MEDIA_CONTROLLER */ + +/* + * Initialise video for linux + */ +static int __init videodev_init(void) +{ + dev_t dev = MKDEV(VIDEO_MAJOR, 0); + int ret; + + pr_info("Linux video capture interface: v2.00\n"); + ret = register_chrdev_region(dev, VIDEO_NUM_DEVICES, VIDEO_NAME); + if (ret < 0) { + pr_warn("videodev: unable to get major %d\n", + VIDEO_MAJOR); + return ret; + } + + ret = class_register(&video_class); + if (ret < 0) { + unregister_chrdev_region(dev, VIDEO_NUM_DEVICES); + pr_warn("video_dev: class_register failed\n"); + return -EIO; + } + + return 0; +} + +static void __exit videodev_exit(void) +{ + dev_t dev = MKDEV(VIDEO_MAJOR, 0); + + class_unregister(&video_class); + unregister_chrdev_region(dev, VIDEO_NUM_DEVICES); +} + +subsys_initcall(videodev_init); +module_exit(videodev_exit) + +MODULE_AUTHOR("Alan Cox, Mauro Carvalho Chehab , Bill Dirks, Justin Schoeman, Gerd Knorr"); +MODULE_DESCRIPTION("Video4Linux2 core driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_CHARDEV_MAJOR(VIDEO_MAJOR); diff --git a/6.12.0/drivers/media/v4l2-core/v4l2-device.c b/6.12.0/drivers/media/v4l2-core/v4l2-device.c new file mode 100644 index 0000000..5e53745 --- /dev/null +++ b/6.12.0/drivers/media/v4l2-core/v4l2-device.c @@ -0,0 +1,295 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + V4L2 device support. + + Copyright (C) 2008 Hans Verkuil + + */ + +#include +#include +#include +#include +#include +#include +#include + +int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev) +{ + if (v4l2_dev == NULL) + return -EINVAL; + + INIT_LIST_HEAD(&v4l2_dev->subdevs); + spin_lock_init(&v4l2_dev->lock); + v4l2_prio_init(&v4l2_dev->prio); + kref_init(&v4l2_dev->ref); + get_device(dev); + v4l2_dev->dev = dev; + if (dev == NULL) { + /* If dev == NULL, then name must be filled in by the caller */ + if (WARN_ON(!v4l2_dev->name[0])) + return -EINVAL; + return 0; + } + + /* Set name to driver name + device name if it is empty. */ + if (!v4l2_dev->name[0]) + snprintf(v4l2_dev->name, sizeof(v4l2_dev->name), "%s %s", + dev->driver->name, dev_name(dev)); + if (!dev_get_drvdata(dev)) + dev_set_drvdata(dev, v4l2_dev); + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_device_register); + +static void v4l2_device_release(struct kref *ref) +{ + struct v4l2_device *v4l2_dev = + container_of(ref, struct v4l2_device, ref); + + if (v4l2_dev->release) + v4l2_dev->release(v4l2_dev); +} + +int v4l2_device_put(struct v4l2_device *v4l2_dev) +{ + return kref_put(&v4l2_dev->ref, v4l2_device_release); +} +EXPORT_SYMBOL_GPL(v4l2_device_put); + +int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename, + atomic_t *instance) +{ + int num = atomic_inc_return(instance) - 1; + int len = strlen(basename); + + if (basename[len - 1] >= '0' && basename[len - 1] <= '9') + snprintf(v4l2_dev->name, sizeof(v4l2_dev->name), + "%s-%d", basename, num); + else + snprintf(v4l2_dev->name, sizeof(v4l2_dev->name), + "%s%d", basename, num); + return num; +} +EXPORT_SYMBOL_GPL(v4l2_device_set_name); + +void v4l2_device_disconnect(struct v4l2_device *v4l2_dev) +{ + if (v4l2_dev->dev == NULL) + return; + + if (dev_get_drvdata(v4l2_dev->dev) == v4l2_dev) + dev_set_drvdata(v4l2_dev->dev, NULL); + put_device(v4l2_dev->dev); + v4l2_dev->dev = NULL; +} +EXPORT_SYMBOL_GPL(v4l2_device_disconnect); + +void v4l2_device_unregister(struct v4l2_device *v4l2_dev) +{ + struct v4l2_subdev *sd, *next; + + /* Just return if v4l2_dev is NULL or if it was already + * unregistered before. */ + if (v4l2_dev == NULL || !v4l2_dev->name[0]) + return; + v4l2_device_disconnect(v4l2_dev); + + /* Unregister subdevs */ + list_for_each_entry_safe(sd, next, &v4l2_dev->subdevs, list) { + v4l2_device_unregister_subdev(sd); + if (sd->flags & V4L2_SUBDEV_FL_IS_I2C) + v4l2_i2c_subdev_unregister(sd); + else if (sd->flags & V4L2_SUBDEV_FL_IS_SPI) + v4l2_spi_subdev_unregister(sd); + } + /* Mark as unregistered, thus preventing duplicate unregistrations */ + v4l2_dev->name[0] = '\0'; +} +EXPORT_SYMBOL_GPL(v4l2_device_unregister); + +int __v4l2_device_register_subdev(struct v4l2_device *v4l2_dev, + struct v4l2_subdev *sd, struct module *module) +{ + int err; + + /* Check for valid input */ + if (!v4l2_dev || !sd || sd->v4l2_dev || !sd->name[0]) + return -EINVAL; + + /* + * The reason to acquire the module here is to avoid unloading + * a module of sub-device which is registered to a media + * device. To make it possible to unload modules for media + * devices that also register sub-devices, do not + * try_module_get() such sub-device owners. + */ + sd->owner_v4l2_dev = v4l2_dev->dev && v4l2_dev->dev->driver && + module == v4l2_dev->dev->driver->owner; + + if (!sd->owner_v4l2_dev && !try_module_get(module)) + return -ENODEV; + + sd->v4l2_dev = v4l2_dev; + /* This just returns 0 if either of the two args is NULL */ + err = v4l2_ctrl_add_handler(v4l2_dev->ctrl_handler, sd->ctrl_handler, + NULL, true); + if (err) + goto error_module; + +#if defined(CONFIG_MEDIA_CONTROLLER) + /* Register the entity. */ + if (v4l2_dev->mdev) { + err = media_device_register_entity(v4l2_dev->mdev, &sd->entity); + if (err < 0) + goto error_module; + } +#endif + + if (sd->internal_ops && sd->internal_ops->registered) { + err = sd->internal_ops->registered(sd); + if (err) + goto error_unregister; + } + + sd->owner = module; + + spin_lock(&v4l2_dev->lock); + list_add_tail(&sd->list, &v4l2_dev->subdevs); + spin_unlock(&v4l2_dev->lock); + + return 0; + +error_unregister: +#if defined(CONFIG_MEDIA_CONTROLLER) + media_device_unregister_entity(&sd->entity); +#endif +error_module: + if (!sd->owner_v4l2_dev) + module_put(sd->owner); + sd->v4l2_dev = NULL; + return err; +} +EXPORT_SYMBOL_GPL(__v4l2_device_register_subdev); + +static void v4l2_subdev_release(struct v4l2_subdev *sd) +{ + struct module *owner = !sd->owner_v4l2_dev ? sd->owner : NULL; + + if (sd->internal_ops && sd->internal_ops->release) + sd->internal_ops->release(sd); + sd->devnode = NULL; + module_put(owner); +} + +static void v4l2_device_release_subdev_node(struct video_device *vdev) +{ + v4l2_subdev_release(video_get_drvdata(vdev)); + kfree(vdev); +} + +int __v4l2_device_register_subdev_nodes(struct v4l2_device *v4l2_dev, + bool read_only) +{ + struct video_device *vdev; + struct v4l2_subdev *sd; + int err; + + /* Register a device node for every subdev marked with the + * V4L2_SUBDEV_FL_HAS_DEVNODE flag. + */ + list_for_each_entry(sd, &v4l2_dev->subdevs, list) { + if (!(sd->flags & V4L2_SUBDEV_FL_HAS_DEVNODE)) + continue; + + if (sd->devnode) + continue; + + vdev = kzalloc(sizeof(*vdev), GFP_KERNEL); + if (!vdev) { + err = -ENOMEM; + goto clean_up; + } + + video_set_drvdata(vdev, sd); + strscpy(vdev->name, sd->name, sizeof(vdev->name)); + vdev->dev_parent = sd->dev; + vdev->v4l2_dev = v4l2_dev; + vdev->fops = &v4l2_subdev_fops; + vdev->release = v4l2_device_release_subdev_node; + vdev->ctrl_handler = sd->ctrl_handler; + if (read_only) + set_bit(V4L2_FL_SUBDEV_RO_DEVNODE, &vdev->flags); + sd->devnode = vdev; + err = __video_register_device(vdev, VFL_TYPE_SUBDEV, -1, 1, + sd->owner); + if (err < 0) { + sd->devnode = NULL; + kfree(vdev); + goto clean_up; + } +#if defined(CONFIG_MEDIA_CONTROLLER) + sd->entity.info.dev.major = VIDEO_MAJOR; + sd->entity.info.dev.minor = vdev->minor; + + /* Interface is created by __video_register_device() */ + if (vdev->v4l2_dev->mdev) { + struct media_link *link; + + link = media_create_intf_link(&sd->entity, + &vdev->intf_devnode->intf, + MEDIA_LNK_FL_ENABLED | + MEDIA_LNK_FL_IMMUTABLE); + if (!link) { + err = -ENOMEM; + goto clean_up; + } + } +#endif + } + return 0; + +clean_up: + list_for_each_entry(sd, &v4l2_dev->subdevs, list) { + if (!sd->devnode) + break; + video_unregister_device(sd->devnode); + } + + return err; +} +EXPORT_SYMBOL_GPL(__v4l2_device_register_subdev_nodes); + +void v4l2_device_unregister_subdev(struct v4l2_subdev *sd) +{ + struct v4l2_device *v4l2_dev; + + /* return if it isn't registered */ + if (sd == NULL || sd->v4l2_dev == NULL) + return; + + v4l2_dev = sd->v4l2_dev; + + spin_lock(&v4l2_dev->lock); + list_del(&sd->list); + spin_unlock(&v4l2_dev->lock); + + if (sd->internal_ops && sd->internal_ops->unregistered) + sd->internal_ops->unregistered(sd); + sd->v4l2_dev = NULL; + +#if defined(CONFIG_MEDIA_CONTROLLER) + if (v4l2_dev->mdev) { + /* + * No need to explicitly remove links, as both pads and + * links are removed by the function below, in the right order + */ + media_device_unregister_entity(&sd->entity); + } +#endif + if (sd->devnode) + video_unregister_device(sd->devnode); + else + v4l2_subdev_release(sd); +} +EXPORT_SYMBOL_GPL(v4l2_device_unregister_subdev); diff --git a/6.12.0/drivers/media/v4l2-core/v4l2-dv-timings.c b/6.12.0/drivers/media/v4l2-core/v4l2-dv-timings.c new file mode 100644 index 0000000..4d05873 --- /dev/null +++ b/6.12.0/drivers/media/v4l2-core/v4l2-dv-timings.c @@ -0,0 +1,1168 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * v4l2-dv-timings - dv-timings helper functions + * + * Copyright 2013 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +MODULE_AUTHOR("Hans Verkuil"); +MODULE_DESCRIPTION("V4L2 DV Timings Helper Functions"); +MODULE_LICENSE("GPL"); + +const struct v4l2_dv_timings v4l2_dv_timings_presets[] = { + V4L2_DV_BT_CEA_640X480P59_94, + V4L2_DV_BT_CEA_720X480I59_94, + V4L2_DV_BT_CEA_720X480P59_94, + V4L2_DV_BT_CEA_720X576I50, + V4L2_DV_BT_CEA_720X576P50, + V4L2_DV_BT_CEA_1280X720P24, + V4L2_DV_BT_CEA_1280X720P25, + V4L2_DV_BT_CEA_1280X720P30, + V4L2_DV_BT_CEA_1280X720P50, + V4L2_DV_BT_CEA_1280X720P60, + V4L2_DV_BT_CEA_1920X1080P24, + V4L2_DV_BT_CEA_1920X1080P25, + V4L2_DV_BT_CEA_1920X1080P30, + V4L2_DV_BT_CEA_1920X1080I50, + V4L2_DV_BT_CEA_1920X1080P50, + V4L2_DV_BT_CEA_1920X1080I60, + V4L2_DV_BT_CEA_1920X1080P60, + V4L2_DV_BT_DMT_640X350P85, + V4L2_DV_BT_DMT_640X400P85, + V4L2_DV_BT_DMT_720X400P85, + V4L2_DV_BT_DMT_640X480P72, + V4L2_DV_BT_DMT_640X480P75, + V4L2_DV_BT_DMT_640X480P85, + V4L2_DV_BT_DMT_800X600P56, + V4L2_DV_BT_DMT_800X600P60, + V4L2_DV_BT_DMT_800X600P72, + V4L2_DV_BT_DMT_800X600P75, + V4L2_DV_BT_DMT_800X600P85, + V4L2_DV_BT_DMT_800X600P120_RB, + V4L2_DV_BT_DMT_848X480P60, + V4L2_DV_BT_DMT_1024X768I43, + V4L2_DV_BT_DMT_1024X768P60, + V4L2_DV_BT_DMT_1024X768P70, + V4L2_DV_BT_DMT_1024X768P75, + V4L2_DV_BT_DMT_1024X768P85, + V4L2_DV_BT_DMT_1024X768P120_RB, + V4L2_DV_BT_DMT_1152X864P75, + V4L2_DV_BT_DMT_1280X768P60_RB, + V4L2_DV_BT_DMT_1280X768P60, + V4L2_DV_BT_DMT_1280X768P75, + V4L2_DV_BT_DMT_1280X768P85, + V4L2_DV_BT_DMT_1280X768P120_RB, + V4L2_DV_BT_DMT_1280X800P60_RB, + V4L2_DV_BT_DMT_1280X800P60, + V4L2_DV_BT_DMT_1280X800P75, + V4L2_DV_BT_DMT_1280X800P85, + V4L2_DV_BT_DMT_1280X800P120_RB, + V4L2_DV_BT_DMT_1280X960P60, + V4L2_DV_BT_DMT_1280X960P85, + V4L2_DV_BT_DMT_1280X960P120_RB, + V4L2_DV_BT_DMT_1280X1024P60, + V4L2_DV_BT_DMT_1280X1024P75, + V4L2_DV_BT_DMT_1280X1024P85, + V4L2_DV_BT_DMT_1280X1024P120_RB, + V4L2_DV_BT_DMT_1360X768P60, + V4L2_DV_BT_DMT_1360X768P120_RB, + V4L2_DV_BT_DMT_1366X768P60, + V4L2_DV_BT_DMT_1366X768P60_RB, + V4L2_DV_BT_DMT_1400X1050P60_RB, + V4L2_DV_BT_DMT_1400X1050P60, + V4L2_DV_BT_DMT_1400X1050P75, + V4L2_DV_BT_DMT_1400X1050P85, + V4L2_DV_BT_DMT_1400X1050P120_RB, + V4L2_DV_BT_DMT_1440X900P60_RB, + V4L2_DV_BT_DMT_1440X900P60, + V4L2_DV_BT_DMT_1440X900P75, + V4L2_DV_BT_DMT_1440X900P85, + V4L2_DV_BT_DMT_1440X900P120_RB, + V4L2_DV_BT_DMT_1600X900P60_RB, + V4L2_DV_BT_DMT_1600X1200P60, + V4L2_DV_BT_DMT_1600X1200P65, + V4L2_DV_BT_DMT_1600X1200P70, + V4L2_DV_BT_DMT_1600X1200P75, + V4L2_DV_BT_DMT_1600X1200P85, + V4L2_DV_BT_DMT_1600X1200P120_RB, + V4L2_DV_BT_DMT_1680X1050P60_RB, + V4L2_DV_BT_DMT_1680X1050P60, + V4L2_DV_BT_DMT_1680X1050P75, + V4L2_DV_BT_DMT_1680X1050P85, + V4L2_DV_BT_DMT_1680X1050P120_RB, + V4L2_DV_BT_DMT_1792X1344P60, + V4L2_DV_BT_DMT_1792X1344P75, + V4L2_DV_BT_DMT_1792X1344P120_RB, + V4L2_DV_BT_DMT_1856X1392P60, + V4L2_DV_BT_DMT_1856X1392P75, + V4L2_DV_BT_DMT_1856X1392P120_RB, + V4L2_DV_BT_DMT_1920X1200P60_RB, + V4L2_DV_BT_DMT_1920X1200P60, + V4L2_DV_BT_DMT_1920X1200P75, + V4L2_DV_BT_DMT_1920X1200P85, + V4L2_DV_BT_DMT_1920X1200P120_RB, + V4L2_DV_BT_DMT_1920X1440P60, + V4L2_DV_BT_DMT_1920X1440P75, + V4L2_DV_BT_DMT_1920X1440P120_RB, + V4L2_DV_BT_DMT_2048X1152P60_RB, + V4L2_DV_BT_DMT_2560X1600P60_RB, + V4L2_DV_BT_DMT_2560X1600P60, + V4L2_DV_BT_DMT_2560X1600P75, + V4L2_DV_BT_DMT_2560X1600P85, + V4L2_DV_BT_DMT_2560X1600P120_RB, + V4L2_DV_BT_CEA_3840X2160P24, + V4L2_DV_BT_CEA_3840X2160P25, + V4L2_DV_BT_CEA_3840X2160P30, + V4L2_DV_BT_CEA_3840X2160P50, + V4L2_DV_BT_CEA_3840X2160P60, + V4L2_DV_BT_CEA_4096X2160P24, + V4L2_DV_BT_CEA_4096X2160P25, + V4L2_DV_BT_CEA_4096X2160P30, + V4L2_DV_BT_CEA_4096X2160P50, + V4L2_DV_BT_DMT_4096X2160P59_94_RB, + V4L2_DV_BT_CEA_4096X2160P60, + { } +}; +EXPORT_SYMBOL_GPL(v4l2_dv_timings_presets); + +bool v4l2_valid_dv_timings(const struct v4l2_dv_timings *t, + const struct v4l2_dv_timings_cap *dvcap, + v4l2_check_dv_timings_fnc fnc, + void *fnc_handle) +{ + const struct v4l2_bt_timings *bt = &t->bt; + const struct v4l2_bt_timings_cap *cap = &dvcap->bt; + u32 caps = cap->capabilities; + const u32 max_vert = 10240; + u32 max_hor = 3 * bt->width; + + if (t->type != V4L2_DV_BT_656_1120) + return false; + if (t->type != dvcap->type || + bt->height < cap->min_height || + bt->height > cap->max_height || + bt->width < cap->min_width || + bt->width > cap->max_width || + bt->pixelclock < cap->min_pixelclock || + bt->pixelclock > cap->max_pixelclock || + (!(caps & V4L2_DV_BT_CAP_CUSTOM) && + cap->standards && bt->standards && + !(bt->standards & cap->standards)) || + (bt->interlaced && !(caps & V4L2_DV_BT_CAP_INTERLACED)) || + (!bt->interlaced && !(caps & V4L2_DV_BT_CAP_PROGRESSIVE))) + return false; + + /* sanity checks for the blanking timings */ + if (!bt->interlaced && + (bt->il_vbackporch || bt->il_vsync || bt->il_vfrontporch)) + return false; + /* + * Some video receivers cannot properly separate the frontporch, + * backporch and sync values, and instead they only have the total + * blanking. That can be assigned to any of these three fields. + * So just check that none of these are way out of range. + */ + if (bt->hfrontporch > max_hor || + bt->hsync > max_hor || bt->hbackporch > max_hor) + return false; + if (bt->vfrontporch > max_vert || + bt->vsync > max_vert || bt->vbackporch > max_vert) + return false; + if (bt->interlaced && (bt->il_vfrontporch > max_vert || + bt->il_vsync > max_vert || bt->il_vbackporch > max_vert)) + return false; + return fnc == NULL || fnc(t, fnc_handle); +} +EXPORT_SYMBOL_GPL(v4l2_valid_dv_timings); + +int v4l2_enum_dv_timings_cap(struct v4l2_enum_dv_timings *t, + const struct v4l2_dv_timings_cap *cap, + v4l2_check_dv_timings_fnc fnc, + void *fnc_handle) +{ + u32 i, idx; + + memset(t->reserved, 0, sizeof(t->reserved)); + for (i = idx = 0; v4l2_dv_timings_presets[i].bt.width; i++) { + if (v4l2_valid_dv_timings(v4l2_dv_timings_presets + i, cap, + fnc, fnc_handle) && + idx++ == t->index) { + t->timings = v4l2_dv_timings_presets[i]; + return 0; + } + } + return -EINVAL; +} +EXPORT_SYMBOL_GPL(v4l2_enum_dv_timings_cap); + +bool v4l2_find_dv_timings_cap(struct v4l2_dv_timings *t, + const struct v4l2_dv_timings_cap *cap, + unsigned pclock_delta, + v4l2_check_dv_timings_fnc fnc, + void *fnc_handle) +{ + int i; + + if (!v4l2_valid_dv_timings(t, cap, fnc, fnc_handle)) + return false; + + for (i = 0; v4l2_dv_timings_presets[i].bt.width; i++) { + if (v4l2_valid_dv_timings(v4l2_dv_timings_presets + i, cap, + fnc, fnc_handle) && + v4l2_match_dv_timings(t, v4l2_dv_timings_presets + i, + pclock_delta, false)) { + u32 flags = t->bt.flags & V4L2_DV_FL_REDUCED_FPS; + + *t = v4l2_dv_timings_presets[i]; + if (can_reduce_fps(&t->bt)) + t->bt.flags |= flags; + + return true; + } + } + return false; +} +EXPORT_SYMBOL_GPL(v4l2_find_dv_timings_cap); + +bool v4l2_find_dv_timings_cea861_vic(struct v4l2_dv_timings *t, u8 vic) +{ + unsigned int i; + + for (i = 0; v4l2_dv_timings_presets[i].bt.width; i++) { + const struct v4l2_bt_timings *bt = + &v4l2_dv_timings_presets[i].bt; + + if ((bt->flags & V4L2_DV_FL_HAS_CEA861_VIC) && + bt->cea861_vic == vic) { + *t = v4l2_dv_timings_presets[i]; + return true; + } + } + return false; +} +EXPORT_SYMBOL_GPL(v4l2_find_dv_timings_cea861_vic); + +/** + * v4l2_match_dv_timings - check if two timings match + * @t1: compare this v4l2_dv_timings struct... + * @t2: with this struct. + * @pclock_delta: the allowed pixelclock deviation. + * @match_reduced_fps: if true, then fail if V4L2_DV_FL_REDUCED_FPS does not + * match. + * + * Compare t1 with t2 with a given margin of error for the pixelclock. + */ +bool v4l2_match_dv_timings(const struct v4l2_dv_timings *t1, + const struct v4l2_dv_timings *t2, + unsigned pclock_delta, bool match_reduced_fps) +{ + if (t1->type != t2->type || t1->type != V4L2_DV_BT_656_1120) + return false; + if (t1->bt.width == t2->bt.width && + t1->bt.height == t2->bt.height && + t1->bt.interlaced == t2->bt.interlaced && + t1->bt.polarities == t2->bt.polarities && + t1->bt.pixelclock >= t2->bt.pixelclock - pclock_delta && + t1->bt.pixelclock <= t2->bt.pixelclock + pclock_delta && + t1->bt.hfrontporch == t2->bt.hfrontporch && + t1->bt.hsync == t2->bt.hsync && + t1->bt.hbackporch == t2->bt.hbackporch && + t1->bt.vfrontporch == t2->bt.vfrontporch && + t1->bt.vsync == t2->bt.vsync && + t1->bt.vbackporch == t2->bt.vbackporch && + (!match_reduced_fps || + (t1->bt.flags & V4L2_DV_FL_REDUCED_FPS) == + (t2->bt.flags & V4L2_DV_FL_REDUCED_FPS)) && + (!t1->bt.interlaced || + (t1->bt.il_vfrontporch == t2->bt.il_vfrontporch && + t1->bt.il_vsync == t2->bt.il_vsync && + t1->bt.il_vbackporch == t2->bt.il_vbackporch))) + return true; + return false; +} +EXPORT_SYMBOL_GPL(v4l2_match_dv_timings); + +void v4l2_print_dv_timings(const char *dev_prefix, const char *prefix, + const struct v4l2_dv_timings *t, bool detailed) +{ + const struct v4l2_bt_timings *bt = &t->bt; + u32 htot, vtot; + u32 fps; + + if (t->type != V4L2_DV_BT_656_1120) + return; + + htot = V4L2_DV_BT_FRAME_WIDTH(bt); + vtot = V4L2_DV_BT_FRAME_HEIGHT(bt); + if (bt->interlaced) + vtot /= 2; + + fps = (htot * vtot) > 0 ? div_u64((100 * (u64)bt->pixelclock), + (htot * vtot)) : 0; + + if (prefix == NULL) + prefix = ""; + + pr_info("%s: %s%ux%u%s%u.%02u (%ux%u)\n", dev_prefix, prefix, + bt->width, bt->height, bt->interlaced ? "i" : "p", + fps / 100, fps % 100, htot, vtot); + + if (!detailed) + return; + + pr_info("%s: horizontal: fp = %u, %ssync = %u, bp = %u\n", + dev_prefix, bt->hfrontporch, + (bt->polarities & V4L2_DV_HSYNC_POS_POL) ? "+" : "-", + bt->hsync, bt->hbackporch); + pr_info("%s: vertical: fp = %u, %ssync = %u, bp = %u\n", + dev_prefix, bt->vfrontporch, + (bt->polarities & V4L2_DV_VSYNC_POS_POL) ? "+" : "-", + bt->vsync, bt->vbackporch); + if (bt->interlaced) + pr_info("%s: vertical bottom field: fp = %u, %ssync = %u, bp = %u\n", + dev_prefix, bt->il_vfrontporch, + (bt->polarities & V4L2_DV_VSYNC_POS_POL) ? "+" : "-", + bt->il_vsync, bt->il_vbackporch); + pr_info("%s: pixelclock: %llu\n", dev_prefix, bt->pixelclock); + pr_info("%s: flags (0x%x):%s%s%s%s%s%s%s%s%s%s\n", + dev_prefix, bt->flags, + (bt->flags & V4L2_DV_FL_REDUCED_BLANKING) ? + " REDUCED_BLANKING" : "", + ((bt->flags & V4L2_DV_FL_REDUCED_BLANKING) && + bt->vsync == 8) ? " (V2)" : "", + (bt->flags & V4L2_DV_FL_CAN_REDUCE_FPS) ? + " CAN_REDUCE_FPS" : "", + (bt->flags & V4L2_DV_FL_REDUCED_FPS) ? + " REDUCED_FPS" : "", + (bt->flags & V4L2_DV_FL_HALF_LINE) ? + " HALF_LINE" : "", + (bt->flags & V4L2_DV_FL_IS_CE_VIDEO) ? + " CE_VIDEO" : "", + (bt->flags & V4L2_DV_FL_FIRST_FIELD_EXTRA_LINE) ? + " FIRST_FIELD_EXTRA_LINE" : "", + (bt->flags & V4L2_DV_FL_HAS_PICTURE_ASPECT) ? + " HAS_PICTURE_ASPECT" : "", + (bt->flags & V4L2_DV_FL_HAS_CEA861_VIC) ? + " HAS_CEA861_VIC" : "", + (bt->flags & V4L2_DV_FL_HAS_HDMI_VIC) ? + " HAS_HDMI_VIC" : ""); + pr_info("%s: standards (0x%x):%s%s%s%s%s\n", dev_prefix, bt->standards, + (bt->standards & V4L2_DV_BT_STD_CEA861) ? " CEA" : "", + (bt->standards & V4L2_DV_BT_STD_DMT) ? " DMT" : "", + (bt->standards & V4L2_DV_BT_STD_CVT) ? " CVT" : "", + (bt->standards & V4L2_DV_BT_STD_GTF) ? " GTF" : "", + (bt->standards & V4L2_DV_BT_STD_SDI) ? " SDI" : ""); + if (bt->flags & V4L2_DV_FL_HAS_PICTURE_ASPECT) + pr_info("%s: picture aspect (hor:vert): %u:%u\n", dev_prefix, + bt->picture_aspect.numerator, + bt->picture_aspect.denominator); + if (bt->flags & V4L2_DV_FL_HAS_CEA861_VIC) + pr_info("%s: CEA-861 VIC: %u\n", dev_prefix, bt->cea861_vic); + if (bt->flags & V4L2_DV_FL_HAS_HDMI_VIC) + pr_info("%s: HDMI VIC: %u\n", dev_prefix, bt->hdmi_vic); +} +EXPORT_SYMBOL_GPL(v4l2_print_dv_timings); + +struct v4l2_fract v4l2_dv_timings_aspect_ratio(const struct v4l2_dv_timings *t) +{ + struct v4l2_fract ratio = { 1, 1 }; + unsigned long n, d; + + if (t->type != V4L2_DV_BT_656_1120) + return ratio; + if (!(t->bt.flags & V4L2_DV_FL_HAS_PICTURE_ASPECT)) + return ratio; + + ratio.numerator = t->bt.width * t->bt.picture_aspect.denominator; + ratio.denominator = t->bt.height * t->bt.picture_aspect.numerator; + + rational_best_approximation(ratio.numerator, ratio.denominator, + ratio.numerator, ratio.denominator, &n, &d); + ratio.numerator = n; + ratio.denominator = d; + return ratio; +} +EXPORT_SYMBOL_GPL(v4l2_dv_timings_aspect_ratio); + +/** v4l2_calc_timeperframe - helper function to calculate timeperframe based + * v4l2_dv_timings fields. + * @t - Timings for the video mode. + * + * Calculates the expected timeperframe using the pixel clock value and + * horizontal/vertical measures. This means that v4l2_dv_timings structure + * must be correctly and fully filled. + */ +struct v4l2_fract v4l2_calc_timeperframe(const struct v4l2_dv_timings *t) +{ + const struct v4l2_bt_timings *bt = &t->bt; + struct v4l2_fract fps_fract = { 1, 1 }; + unsigned long n, d; + u32 htot, vtot, fps; + u64 pclk; + + if (t->type != V4L2_DV_BT_656_1120) + return fps_fract; + + htot = V4L2_DV_BT_FRAME_WIDTH(bt); + vtot = V4L2_DV_BT_FRAME_HEIGHT(bt); + pclk = bt->pixelclock; + + if ((bt->flags & V4L2_DV_FL_CAN_DETECT_REDUCED_FPS) && + (bt->flags & V4L2_DV_FL_REDUCED_FPS)) + pclk = div_u64(pclk * 1000ULL, 1001); + + fps = (htot * vtot) > 0 ? div_u64((100 * pclk), (htot * vtot)) : 0; + if (!fps) + return fps_fract; + + rational_best_approximation(fps, 100, fps, 100, &n, &d); + + fps_fract.numerator = d; + fps_fract.denominator = n; + return fps_fract; +} +EXPORT_SYMBOL_GPL(v4l2_calc_timeperframe); + +/* + * CVT defines + * Based on Coordinated Video Timings Standard + * version 1.1 September 10, 2003 + */ + +#define CVT_PXL_CLK_GRAN 250000 /* pixel clock granularity */ +#define CVT_PXL_CLK_GRAN_RB_V2 1000 /* granularity for reduced blanking v2*/ + +/* Normal blanking */ +#define CVT_MIN_V_BPORCH 7 /* lines */ +#define CVT_MIN_V_PORCH_RND 3 /* lines */ +#define CVT_MIN_VSYNC_BP 550 /* min time of vsync + back porch (us) */ +#define CVT_HSYNC_PERCENT 8 /* nominal hsync as percentage of line */ + +/* Normal blanking for CVT uses GTF to calculate horizontal blanking */ +#define CVT_CELL_GRAN 8 /* character cell granularity */ +#define CVT_M 600 /* blanking formula gradient */ +#define CVT_C 40 /* blanking formula offset */ +#define CVT_K 128 /* blanking formula scaling factor */ +#define CVT_J 20 /* blanking formula scaling factor */ +#define CVT_C_PRIME (((CVT_C - CVT_J) * CVT_K / 256) + CVT_J) +#define CVT_M_PRIME (CVT_K * CVT_M / 256) + +/* Reduced Blanking */ +#define CVT_RB_MIN_V_BPORCH 7 /* lines */ +#define CVT_RB_V_FPORCH 3 /* lines */ +#define CVT_RB_MIN_V_BLANK 460 /* us */ +#define CVT_RB_H_SYNC 32 /* pixels */ +#define CVT_RB_H_BLANK 160 /* pixels */ +/* Reduce blanking Version 2 */ +#define CVT_RB_V2_H_BLANK 80 /* pixels */ +#define CVT_RB_MIN_V_FPORCH 3 /* lines */ +#define CVT_RB_V2_MIN_V_FPORCH 1 /* lines */ +#define CVT_RB_V_BPORCH 6 /* lines */ + +/** v4l2_detect_cvt - detect if the given timings follow the CVT standard + * @frame_height - the total height of the frame (including blanking) in lines. + * @hfreq - the horizontal frequency in Hz. + * @vsync - the height of the vertical sync in lines. + * @active_width - active width of image (does not include blanking). This + * information is needed only in case of version 2 of reduced blanking. + * In other cases, this parameter does not have any effect on timings. + * @polarities - the horizontal and vertical polarities (same as struct + * v4l2_bt_timings polarities). + * @interlaced - if this flag is true, it indicates interlaced format + * @cap - the v4l2_dv_timings_cap capabilities. + * @timings - the resulting timings. + * + * This function will attempt to detect if the given values correspond to a + * valid CVT format. If so, then it will return true, and fmt will be filled + * in with the found CVT timings. + */ +bool v4l2_detect_cvt(unsigned int frame_height, + unsigned int hfreq, + unsigned int vsync, + unsigned int active_width, + u32 polarities, + bool interlaced, + const struct v4l2_dv_timings_cap *cap, + struct v4l2_dv_timings *timings) +{ + struct v4l2_dv_timings t = {}; + int v_fp, v_bp, h_fp, h_bp, hsync; + int frame_width, image_height, image_width; + bool reduced_blanking; + bool rb_v2 = false; + unsigned int pix_clk; + + if (vsync < 4 || vsync > 8) + return false; + + if (polarities == V4L2_DV_VSYNC_POS_POL) + reduced_blanking = false; + else if (polarities == V4L2_DV_HSYNC_POS_POL) + reduced_blanking = true; + else + return false; + + if (reduced_blanking && vsync == 8) + rb_v2 = true; + + if (rb_v2 && active_width == 0) + return false; + + if (!rb_v2 && vsync > 7) + return false; + + if (hfreq == 0) + return false; + + /* Vertical */ + if (reduced_blanking) { + if (rb_v2) { + v_bp = CVT_RB_V_BPORCH; + v_fp = (CVT_RB_MIN_V_BLANK * hfreq) / 1000000 + 1; + v_fp -= vsync + v_bp; + + if (v_fp < CVT_RB_V2_MIN_V_FPORCH) + v_fp = CVT_RB_V2_MIN_V_FPORCH; + } else { + v_fp = CVT_RB_V_FPORCH; + v_bp = (CVT_RB_MIN_V_BLANK * hfreq) / 1000000 + 1; + v_bp -= vsync + v_fp; + + if (v_bp < CVT_RB_MIN_V_BPORCH) + v_bp = CVT_RB_MIN_V_BPORCH; + } + } else { + v_fp = CVT_MIN_V_PORCH_RND; + v_bp = (CVT_MIN_VSYNC_BP * hfreq) / 1000000 + 1 - vsync; + + if (v_bp < CVT_MIN_V_BPORCH) + v_bp = CVT_MIN_V_BPORCH; + } + + if (interlaced) + image_height = (frame_height - 2 * v_fp - 2 * vsync - 2 * v_bp) & ~0x1; + else + image_height = (frame_height - v_fp - vsync - v_bp + 1) & ~0x1; + + if (image_height < 0) + return false; + + /* Aspect ratio based on vsync */ + switch (vsync) { + case 4: + image_width = (image_height * 4) / 3; + break; + case 5: + image_width = (image_height * 16) / 9; + break; + case 6: + image_width = (image_height * 16) / 10; + break; + case 7: + /* special case */ + if (image_height == 1024) + image_width = (image_height * 5) / 4; + else if (image_height == 768) + image_width = (image_height * 15) / 9; + else + return false; + break; + case 8: + image_width = active_width; + break; + default: + return false; + } + + if (!rb_v2) + image_width = image_width & ~7; + + /* Horizontal */ + if (reduced_blanking) { + int h_blank; + int clk_gran; + + h_blank = rb_v2 ? CVT_RB_V2_H_BLANK : CVT_RB_H_BLANK; + clk_gran = rb_v2 ? CVT_PXL_CLK_GRAN_RB_V2 : CVT_PXL_CLK_GRAN; + + pix_clk = (image_width + h_blank) * hfreq; + pix_clk = (pix_clk / clk_gran) * clk_gran; + + h_bp = h_blank / 2; + hsync = CVT_RB_H_SYNC; + h_fp = h_blank - h_bp - hsync; + + frame_width = image_width + h_blank; + } else { + unsigned ideal_duty_cycle_per_myriad = + 100 * CVT_C_PRIME - (CVT_M_PRIME * 100000) / hfreq; + int h_blank; + + if (ideal_duty_cycle_per_myriad < 2000) + ideal_duty_cycle_per_myriad = 2000; + + h_blank = image_width * ideal_duty_cycle_per_myriad / + (10000 - ideal_duty_cycle_per_myriad); + h_blank = (h_blank / (2 * CVT_CELL_GRAN)) * 2 * CVT_CELL_GRAN; + + pix_clk = (image_width + h_blank) * hfreq; + pix_clk = (pix_clk / CVT_PXL_CLK_GRAN) * CVT_PXL_CLK_GRAN; + + h_bp = h_blank / 2; + frame_width = image_width + h_blank; + + hsync = frame_width * CVT_HSYNC_PERCENT / 100; + hsync = (hsync / CVT_CELL_GRAN) * CVT_CELL_GRAN; + h_fp = h_blank - hsync - h_bp; + } + + t.type = V4L2_DV_BT_656_1120; + t.bt.polarities = polarities; + t.bt.width = image_width; + t.bt.height = image_height; + t.bt.hfrontporch = h_fp; + t.bt.vfrontporch = v_fp; + t.bt.hsync = hsync; + t.bt.vsync = vsync; + t.bt.hbackporch = frame_width - image_width - h_fp - hsync; + + if (!interlaced) { + t.bt.vbackporch = frame_height - image_height - v_fp - vsync; + t.bt.interlaced = V4L2_DV_PROGRESSIVE; + } else { + t.bt.vbackporch = (frame_height - image_height - 2 * v_fp - + 2 * vsync) / 2; + t.bt.il_vbackporch = frame_height - image_height - 2 * v_fp - + 2 * vsync - t.bt.vbackporch; + t.bt.il_vfrontporch = v_fp; + t.bt.il_vsync = vsync; + t.bt.flags |= V4L2_DV_FL_HALF_LINE; + t.bt.interlaced = V4L2_DV_INTERLACED; + } + + t.bt.pixelclock = pix_clk; + t.bt.standards = V4L2_DV_BT_STD_CVT; + + if (reduced_blanking) + t.bt.flags |= V4L2_DV_FL_REDUCED_BLANKING; + + if (!v4l2_valid_dv_timings(&t, cap, NULL, NULL)) + return false; + *timings = t; + return true; +} +EXPORT_SYMBOL_GPL(v4l2_detect_cvt); + +/* + * GTF defines + * Based on Generalized Timing Formula Standard + * Version 1.1 September 2, 1999 + */ + +#define GTF_PXL_CLK_GRAN 250000 /* pixel clock granularity */ + +#define GTF_MIN_VSYNC_BP 550 /* min time of vsync + back porch (us) */ +#define GTF_V_FP 1 /* vertical front porch (lines) */ +#define GTF_CELL_GRAN 8 /* character cell granularity */ + +/* Default */ +#define GTF_D_M 600 /* blanking formula gradient */ +#define GTF_D_C 40 /* blanking formula offset */ +#define GTF_D_K 128 /* blanking formula scaling factor */ +#define GTF_D_J 20 /* blanking formula scaling factor */ +#define GTF_D_C_PRIME ((((GTF_D_C - GTF_D_J) * GTF_D_K) / 256) + GTF_D_J) +#define GTF_D_M_PRIME ((GTF_D_K * GTF_D_M) / 256) + +/* Secondary */ +#define GTF_S_M 3600 /* blanking formula gradient */ +#define GTF_S_C 40 /* blanking formula offset */ +#define GTF_S_K 128 /* blanking formula scaling factor */ +#define GTF_S_J 35 /* blanking formula scaling factor */ +#define GTF_S_C_PRIME ((((GTF_S_C - GTF_S_J) * GTF_S_K) / 256) + GTF_S_J) +#define GTF_S_M_PRIME ((GTF_S_K * GTF_S_M) / 256) + +/** v4l2_detect_gtf - detect if the given timings follow the GTF standard + * @frame_height - the total height of the frame (including blanking) in lines. + * @hfreq - the horizontal frequency in Hz. + * @vsync - the height of the vertical sync in lines. + * @polarities - the horizontal and vertical polarities (same as struct + * v4l2_bt_timings polarities). + * @interlaced - if this flag is true, it indicates interlaced format + * @aspect - preferred aspect ratio. GTF has no method of determining the + * aspect ratio in order to derive the image width from the + * image height, so it has to be passed explicitly. Usually + * the native screen aspect ratio is used for this. If it + * is not filled in correctly, then 16:9 will be assumed. + * @cap - the v4l2_dv_timings_cap capabilities. + * @timings - the resulting timings. + * + * This function will attempt to detect if the given values correspond to a + * valid GTF format. If so, then it will return true, and fmt will be filled + * in with the found GTF timings. + */ +bool v4l2_detect_gtf(unsigned int frame_height, + unsigned int hfreq, + unsigned int vsync, + u32 polarities, + bool interlaced, + struct v4l2_fract aspect, + const struct v4l2_dv_timings_cap *cap, + struct v4l2_dv_timings *timings) +{ + struct v4l2_dv_timings t = {}; + int pix_clk; + int v_fp, v_bp, h_fp, hsync; + int frame_width, image_height, image_width; + bool default_gtf; + int h_blank; + + if (vsync != 3) + return false; + + if (polarities == V4L2_DV_VSYNC_POS_POL) + default_gtf = true; + else if (polarities == V4L2_DV_HSYNC_POS_POL) + default_gtf = false; + else + return false; + + if (hfreq == 0) + return false; + + /* Vertical */ + v_fp = GTF_V_FP; + v_bp = (GTF_MIN_VSYNC_BP * hfreq + 500000) / 1000000 - vsync; + if (interlaced) + image_height = (frame_height - 2 * v_fp - 2 * vsync - 2 * v_bp) & ~0x1; + else + image_height = (frame_height - v_fp - vsync - v_bp + 1) & ~0x1; + + if (image_height < 0) + return false; + + if (aspect.numerator == 0 || aspect.denominator == 0) { + aspect.numerator = 16; + aspect.denominator = 9; + } + image_width = ((image_height * aspect.numerator) / aspect.denominator); + image_width = (image_width + GTF_CELL_GRAN/2) & ~(GTF_CELL_GRAN - 1); + + /* Horizontal */ + if (default_gtf) { + u64 num; + u32 den; + + num = (((u64)image_width * GTF_D_C_PRIME * hfreq) - + ((u64)image_width * GTF_D_M_PRIME * 1000)); + den = (hfreq * (100 - GTF_D_C_PRIME) + GTF_D_M_PRIME * 1000) * + (2 * GTF_CELL_GRAN); + h_blank = div_u64((num + (den >> 1)), den); + h_blank *= (2 * GTF_CELL_GRAN); + } else { + u64 num; + u32 den; + + num = (((u64)image_width * GTF_S_C_PRIME * hfreq) - + ((u64)image_width * GTF_S_M_PRIME * 1000)); + den = (hfreq * (100 - GTF_S_C_PRIME) + GTF_S_M_PRIME * 1000) * + (2 * GTF_CELL_GRAN); + h_blank = div_u64((num + (den >> 1)), den); + h_blank *= (2 * GTF_CELL_GRAN); + } + + frame_width = image_width + h_blank; + + pix_clk = (image_width + h_blank) * hfreq; + pix_clk = pix_clk / GTF_PXL_CLK_GRAN * GTF_PXL_CLK_GRAN; + + hsync = (frame_width * 8 + 50) / 100; + hsync = DIV_ROUND_CLOSEST(hsync, GTF_CELL_GRAN) * GTF_CELL_GRAN; + + h_fp = h_blank / 2 - hsync; + + t.type = V4L2_DV_BT_656_1120; + t.bt.polarities = polarities; + t.bt.width = image_width; + t.bt.height = image_height; + t.bt.hfrontporch = h_fp; + t.bt.vfrontporch = v_fp; + t.bt.hsync = hsync; + t.bt.vsync = vsync; + t.bt.hbackporch = frame_width - image_width - h_fp - hsync; + + if (!interlaced) { + t.bt.vbackporch = frame_height - image_height - v_fp - vsync; + t.bt.interlaced = V4L2_DV_PROGRESSIVE; + } else { + t.bt.vbackporch = (frame_height - image_height - 2 * v_fp - + 2 * vsync) / 2; + t.bt.il_vbackporch = frame_height - image_height - 2 * v_fp - + 2 * vsync - t.bt.vbackporch; + t.bt.il_vfrontporch = v_fp; + t.bt.il_vsync = vsync; + t.bt.flags |= V4L2_DV_FL_HALF_LINE; + t.bt.interlaced = V4L2_DV_INTERLACED; + } + + t.bt.pixelclock = pix_clk; + t.bt.standards = V4L2_DV_BT_STD_GTF; + + if (!default_gtf) + t.bt.flags |= V4L2_DV_FL_REDUCED_BLANKING; + + if (!v4l2_valid_dv_timings(&t, cap, NULL, NULL)) + return false; + *timings = t; + return true; +} +EXPORT_SYMBOL_GPL(v4l2_detect_gtf); + +/** v4l2_calc_aspect_ratio - calculate the aspect ratio based on bytes + * 0x15 and 0x16 from the EDID. + * @hor_landscape - byte 0x15 from the EDID. + * @vert_portrait - byte 0x16 from the EDID. + * + * Determines the aspect ratio from the EDID. + * See VESA Enhanced EDID standard, release A, rev 2, section 3.6.2: + * "Horizontal and Vertical Screen Size or Aspect Ratio" + */ +struct v4l2_fract v4l2_calc_aspect_ratio(u8 hor_landscape, u8 vert_portrait) +{ + struct v4l2_fract aspect = { 16, 9 }; + u8 ratio; + + /* Nothing filled in, fallback to 16:9 */ + if (!hor_landscape && !vert_portrait) + return aspect; + /* Both filled in, so they are interpreted as the screen size in cm */ + if (hor_landscape && vert_portrait) { + aspect.numerator = hor_landscape; + aspect.denominator = vert_portrait; + return aspect; + } + /* Only one is filled in, so interpret them as a ratio: + (val + 99) / 100 */ + ratio = hor_landscape | vert_portrait; + /* Change some rounded values into the exact aspect ratio */ + if (ratio == 79) { + aspect.numerator = 16; + aspect.denominator = 9; + } else if (ratio == 34) { + aspect.numerator = 4; + aspect.denominator = 3; + } else if (ratio == 68) { + aspect.numerator = 15; + aspect.denominator = 9; + } else { + aspect.numerator = hor_landscape + 99; + aspect.denominator = 100; + } + if (hor_landscape) + return aspect; + /* The aspect ratio is for portrait, so swap numerator and denominator */ + swap(aspect.denominator, aspect.numerator); + return aspect; +} +EXPORT_SYMBOL_GPL(v4l2_calc_aspect_ratio); + +/** v4l2_hdmi_rx_colorimetry - determine HDMI colorimetry information + * based on various InfoFrames. + * @avi: the AVI InfoFrame + * @hdmi: the HDMI Vendor InfoFrame, may be NULL + * @height: the frame height + * + * Determines the HDMI colorimetry information, i.e. how the HDMI + * pixel color data should be interpreted. + * + * Note that some of the newer features (DCI-P3, HDR) are not yet + * implemented: the hdmi.h header needs to be updated to the HDMI 2.0 + * and CTA-861-G standards. + */ +struct v4l2_hdmi_colorimetry +v4l2_hdmi_rx_colorimetry(const struct hdmi_avi_infoframe *avi, + const struct hdmi_vendor_infoframe *hdmi, + unsigned int height) +{ + struct v4l2_hdmi_colorimetry c = { + V4L2_COLORSPACE_SRGB, + V4L2_YCBCR_ENC_DEFAULT, + V4L2_QUANTIZATION_FULL_RANGE, + V4L2_XFER_FUNC_SRGB + }; + bool is_ce = avi->video_code || (hdmi && hdmi->vic); + bool is_sdtv = height <= 576; + bool default_is_lim_range_rgb = avi->video_code > 1; + + switch (avi->colorspace) { + case HDMI_COLORSPACE_RGB: + /* RGB pixel encoding */ + switch (avi->colorimetry) { + case HDMI_COLORIMETRY_EXTENDED: + switch (avi->extended_colorimetry) { + case HDMI_EXTENDED_COLORIMETRY_OPRGB: + c.colorspace = V4L2_COLORSPACE_OPRGB; + c.xfer_func = V4L2_XFER_FUNC_OPRGB; + break; + case HDMI_EXTENDED_COLORIMETRY_BT2020: + c.colorspace = V4L2_COLORSPACE_BT2020; + c.xfer_func = V4L2_XFER_FUNC_709; + break; + default: + break; + } + break; + default: + break; + } + switch (avi->quantization_range) { + case HDMI_QUANTIZATION_RANGE_LIMITED: + c.quantization = V4L2_QUANTIZATION_LIM_RANGE; + break; + case HDMI_QUANTIZATION_RANGE_FULL: + break; + default: + if (default_is_lim_range_rgb) + c.quantization = V4L2_QUANTIZATION_LIM_RANGE; + break; + } + break; + + default: + /* YCbCr pixel encoding */ + c.quantization = V4L2_QUANTIZATION_LIM_RANGE; + switch (avi->colorimetry) { + case HDMI_COLORIMETRY_NONE: + if (!is_ce) + break; + if (is_sdtv) { + c.colorspace = V4L2_COLORSPACE_SMPTE170M; + c.ycbcr_enc = V4L2_YCBCR_ENC_601; + } else { + c.colorspace = V4L2_COLORSPACE_REC709; + c.ycbcr_enc = V4L2_YCBCR_ENC_709; + } + c.xfer_func = V4L2_XFER_FUNC_709; + break; + case HDMI_COLORIMETRY_ITU_601: + c.colorspace = V4L2_COLORSPACE_SMPTE170M; + c.ycbcr_enc = V4L2_YCBCR_ENC_601; + c.xfer_func = V4L2_XFER_FUNC_709; + break; + case HDMI_COLORIMETRY_ITU_709: + c.colorspace = V4L2_COLORSPACE_REC709; + c.ycbcr_enc = V4L2_YCBCR_ENC_709; + c.xfer_func = V4L2_XFER_FUNC_709; + break; + case HDMI_COLORIMETRY_EXTENDED: + switch (avi->extended_colorimetry) { + case HDMI_EXTENDED_COLORIMETRY_XV_YCC_601: + c.colorspace = V4L2_COLORSPACE_REC709; + c.ycbcr_enc = V4L2_YCBCR_ENC_XV709; + c.xfer_func = V4L2_XFER_FUNC_709; + break; + case HDMI_EXTENDED_COLORIMETRY_XV_YCC_709: + c.colorspace = V4L2_COLORSPACE_REC709; + c.ycbcr_enc = V4L2_YCBCR_ENC_XV601; + c.xfer_func = V4L2_XFER_FUNC_709; + break; + case HDMI_EXTENDED_COLORIMETRY_S_YCC_601: + c.colorspace = V4L2_COLORSPACE_SRGB; + c.ycbcr_enc = V4L2_YCBCR_ENC_601; + c.xfer_func = V4L2_XFER_FUNC_SRGB; + break; + case HDMI_EXTENDED_COLORIMETRY_OPYCC_601: + c.colorspace = V4L2_COLORSPACE_OPRGB; + c.ycbcr_enc = V4L2_YCBCR_ENC_601; + c.xfer_func = V4L2_XFER_FUNC_OPRGB; + break; + case HDMI_EXTENDED_COLORIMETRY_BT2020: + c.colorspace = V4L2_COLORSPACE_BT2020; + c.ycbcr_enc = V4L2_YCBCR_ENC_BT2020; + c.xfer_func = V4L2_XFER_FUNC_709; + break; + case HDMI_EXTENDED_COLORIMETRY_BT2020_CONST_LUM: + c.colorspace = V4L2_COLORSPACE_BT2020; + c.ycbcr_enc = V4L2_YCBCR_ENC_BT2020_CONST_LUM; + c.xfer_func = V4L2_XFER_FUNC_709; + break; + default: /* fall back to ITU_709 */ + c.colorspace = V4L2_COLORSPACE_REC709; + c.ycbcr_enc = V4L2_YCBCR_ENC_709; + c.xfer_func = V4L2_XFER_FUNC_709; + break; + } + break; + default: + break; + } + /* + * YCC Quantization Range signaling is more-or-less broken, + * let's just ignore this. + */ + break; + } + return c; +} +EXPORT_SYMBOL_GPL(v4l2_hdmi_rx_colorimetry); + +/** + * v4l2_get_edid_phys_addr() - find and return the physical address + * + * @edid: pointer to the EDID data + * @size: size in bytes of the EDID data + * @offset: If not %NULL then the location of the physical address + * bytes in the EDID will be returned here. This is set to 0 + * if there is no physical address found. + * + * Return: the physical address or CEC_PHYS_ADDR_INVALID if there is none. + */ +u16 v4l2_get_edid_phys_addr(const u8 *edid, unsigned int size, + unsigned int *offset) +{ + unsigned int loc = cec_get_edid_spa_location(edid, size); + + if (offset) + *offset = loc; + if (loc == 0) + return CEC_PHYS_ADDR_INVALID; + return (edid[loc] << 8) | edid[loc + 1]; +} +EXPORT_SYMBOL_GPL(v4l2_get_edid_phys_addr); + +/** + * v4l2_set_edid_phys_addr() - find and set the physical address + * + * @edid: pointer to the EDID data + * @size: size in bytes of the EDID data + * @phys_addr: the new physical address + * + * This function finds the location of the physical address in the EDID + * and fills in the given physical address and updates the checksum + * at the end of the EDID block. It does nothing if the EDID doesn't + * contain a physical address. + */ +void v4l2_set_edid_phys_addr(u8 *edid, unsigned int size, u16 phys_addr) +{ + unsigned int loc = cec_get_edid_spa_location(edid, size); + u8 sum = 0; + unsigned int i; + + if (loc == 0) + return; + edid[loc] = phys_addr >> 8; + edid[loc + 1] = phys_addr & 0xff; + loc &= ~0x7f; + + /* update the checksum */ + for (i = loc; i < loc + 127; i++) + sum += edid[i]; + edid[i] = 256 - sum; +} +EXPORT_SYMBOL_GPL(v4l2_set_edid_phys_addr); + +/** + * v4l2_phys_addr_for_input() - calculate the PA for an input + * + * @phys_addr: the physical address of the parent + * @input: the number of the input port, must be between 1 and 15 + * + * This function calculates a new physical address based on the input + * port number. For example: + * + * PA = 0.0.0.0 and input = 2 becomes 2.0.0.0 + * + * PA = 3.0.0.0 and input = 1 becomes 3.1.0.0 + * + * PA = 3.2.1.0 and input = 5 becomes 3.2.1.5 + * + * PA = 3.2.1.3 and input = 5 becomes f.f.f.f since it maxed out the depth. + * + * Return: the new physical address or CEC_PHYS_ADDR_INVALID. + */ +u16 v4l2_phys_addr_for_input(u16 phys_addr, u8 input) +{ + /* Check if input is sane */ + if (WARN_ON(input == 0 || input > 0xf)) + return CEC_PHYS_ADDR_INVALID; + + if (phys_addr == 0) + return input << 12; + + if ((phys_addr & 0x0fff) == 0) + return phys_addr | (input << 8); + + if ((phys_addr & 0x00ff) == 0) + return phys_addr | (input << 4); + + if ((phys_addr & 0x000f) == 0) + return phys_addr | input; + + /* + * All nibbles are used so no valid physical addresses can be assigned + * to the input. + */ + return CEC_PHYS_ADDR_INVALID; +} +EXPORT_SYMBOL_GPL(v4l2_phys_addr_for_input); + +/** + * v4l2_phys_addr_validate() - validate a physical address from an EDID + * + * @phys_addr: the physical address to validate + * @parent: if not %NULL, then this is filled with the parents PA. + * @port: if not %NULL, then this is filled with the input port. + * + * This validates a physical address as read from an EDID. If the + * PA is invalid (such as 1.0.1.0 since '0' is only allowed at the end), + * then it will return -EINVAL. + * + * The parent PA is passed into %parent and the input port is passed into + * %port. For example: + * + * PA = 0.0.0.0: has parent 0.0.0.0 and input port 0. + * + * PA = 1.0.0.0: has parent 0.0.0.0 and input port 1. + * + * PA = 3.2.0.0: has parent 3.0.0.0 and input port 2. + * + * PA = f.f.f.f: has parent f.f.f.f and input port 0. + * + * Return: 0 if the PA is valid, -EINVAL if not. + */ +int v4l2_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port) +{ + int i; + + if (parent) + *parent = phys_addr; + if (port) + *port = 0; + if (phys_addr == CEC_PHYS_ADDR_INVALID) + return 0; + for (i = 0; i < 16; i += 4) + if (phys_addr & (0xf << i)) + break; + if (i == 16) + return 0; + if (parent) + *parent = phys_addr & (0xfff0 << i); + if (port) + *port = (phys_addr >> i) & 0xf; + for (i += 4; i < 16; i += 4) + if ((phys_addr & (0xf << i)) == 0) + return -EINVAL; + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_phys_addr_validate); diff --git a/6.12.0/drivers/media/v4l2-core/v4l2-event.c b/6.12.0/drivers/media/v4l2-core/v4l2-event.c new file mode 100644 index 0000000..3898ff7 --- /dev/null +++ b/6.12.0/drivers/media/v4l2-core/v4l2-event.c @@ -0,0 +1,373 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * v4l2-event.c + * + * V4L2 events. + * + * Copyright (C) 2009--2010 Nokia Corporation. + * + * Contact: Sakari Ailus + */ + +#include +#include +#include + +#include +#include +#include +#include + +static unsigned int sev_pos(const struct v4l2_subscribed_event *sev, unsigned int idx) +{ + idx += sev->first; + return idx >= sev->elems ? idx - sev->elems : idx; +} + +static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event) +{ + struct v4l2_kevent *kev; + struct timespec64 ts; + unsigned long flags; + + spin_lock_irqsave(&fh->vdev->fh_lock, flags); + + if (list_empty(&fh->available)) { + spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); + return -ENOENT; + } + + WARN_ON(fh->navailable == 0); + + kev = list_first_entry(&fh->available, struct v4l2_kevent, list); + list_del(&kev->list); + fh->navailable--; + + kev->event.pending = fh->navailable; + *event = kev->event; + ts = ns_to_timespec64(kev->ts); + event->timestamp.tv_sec = ts.tv_sec; + event->timestamp.tv_nsec = ts.tv_nsec; + kev->sev->first = sev_pos(kev->sev, 1); + kev->sev->in_use--; + + spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); + + return 0; +} + +int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event, + int nonblocking) +{ + int ret; + + if (nonblocking) + return __v4l2_event_dequeue(fh, event); + + /* Release the vdev lock while waiting */ + if (fh->vdev->lock) + mutex_unlock(fh->vdev->lock); + + do { + ret = wait_event_interruptible(fh->wait, + fh->navailable != 0); + if (ret < 0) + break; + + ret = __v4l2_event_dequeue(fh, event); + } while (ret == -ENOENT); + + if (fh->vdev->lock) + mutex_lock(fh->vdev->lock); + + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_event_dequeue); + +/* Caller must hold fh->vdev->fh_lock! */ +static struct v4l2_subscribed_event *v4l2_event_subscribed( + struct v4l2_fh *fh, u32 type, u32 id) +{ + struct v4l2_subscribed_event *sev; + + assert_spin_locked(&fh->vdev->fh_lock); + + list_for_each_entry(sev, &fh->subscribed, list) + if (sev->type == type && sev->id == id) + return sev; + + return NULL; +} + +static void __v4l2_event_queue_fh(struct v4l2_fh *fh, + const struct v4l2_event *ev, u64 ts) +{ + struct v4l2_subscribed_event *sev; + struct v4l2_kevent *kev; + bool copy_payload = true; + + /* Are we subscribed? */ + sev = v4l2_event_subscribed(fh, ev->type, ev->id); + if (sev == NULL) + return; + + /* Increase event sequence number on fh. */ + fh->sequence++; + + /* Do we have any free events? */ + if (sev->in_use == sev->elems) { + /* no, remove the oldest one */ + kev = sev->events + sev_pos(sev, 0); + list_del(&kev->list); + sev->in_use--; + sev->first = sev_pos(sev, 1); + fh->navailable--; + if (sev->elems == 1) { + if (sev->ops && sev->ops->replace) { + sev->ops->replace(&kev->event, ev); + copy_payload = false; + } + } else if (sev->ops && sev->ops->merge) { + struct v4l2_kevent *second_oldest = + sev->events + sev_pos(sev, 0); + sev->ops->merge(&kev->event, &second_oldest->event); + } + } + + /* Take one and fill it. */ + kev = sev->events + sev_pos(sev, sev->in_use); + kev->event.type = ev->type; + if (copy_payload) + kev->event.u = ev->u; + kev->event.id = ev->id; + kev->ts = ts; + kev->event.sequence = fh->sequence; + sev->in_use++; + list_add_tail(&kev->list, &fh->available); + + fh->navailable++; + + wake_up_all(&fh->wait); +} + +void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev) +{ + struct v4l2_fh *fh; + unsigned long flags; + u64 ts; + + if (vdev == NULL) + return; + + ts = ktime_get_ns(); + + spin_lock_irqsave(&vdev->fh_lock, flags); + + list_for_each_entry(fh, &vdev->fh_list, list) + __v4l2_event_queue_fh(fh, ev, ts); + + spin_unlock_irqrestore(&vdev->fh_lock, flags); +} +EXPORT_SYMBOL_GPL(v4l2_event_queue); + +void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev) +{ + unsigned long flags; + u64 ts = ktime_get_ns(); + + spin_lock_irqsave(&fh->vdev->fh_lock, flags); + __v4l2_event_queue_fh(fh, ev, ts); + spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); +} +EXPORT_SYMBOL_GPL(v4l2_event_queue_fh); + +int v4l2_event_pending(struct v4l2_fh *fh) +{ + return fh->navailable; +} +EXPORT_SYMBOL_GPL(v4l2_event_pending); + +void v4l2_event_wake_all(struct video_device *vdev) +{ + struct v4l2_fh *fh; + unsigned long flags; + + if (!vdev) + return; + + spin_lock_irqsave(&vdev->fh_lock, flags); + + list_for_each_entry(fh, &vdev->fh_list, list) + wake_up_all(&fh->wait); + + spin_unlock_irqrestore(&vdev->fh_lock, flags); +} +EXPORT_SYMBOL_GPL(v4l2_event_wake_all); + +static void __v4l2_event_unsubscribe(struct v4l2_subscribed_event *sev) +{ + struct v4l2_fh *fh = sev->fh; + unsigned int i; + + lockdep_assert_held(&fh->subscribe_lock); + assert_spin_locked(&fh->vdev->fh_lock); + + /* Remove any pending events for this subscription */ + for (i = 0; i < sev->in_use; i++) { + list_del(&sev->events[sev_pos(sev, i)].list); + fh->navailable--; + } + list_del(&sev->list); +} + +int v4l2_event_subscribe(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub, unsigned int elems, + const struct v4l2_subscribed_event_ops *ops) +{ + struct v4l2_subscribed_event *sev, *found_ev; + unsigned long flags; + unsigned int i; + int ret = 0; + + if (sub->type == V4L2_EVENT_ALL) + return -EINVAL; + + if (elems < 1) + elems = 1; + + sev = kvzalloc(struct_size(sev, events, elems), GFP_KERNEL); + if (!sev) + return -ENOMEM; + sev->elems = elems; + for (i = 0; i < elems; i++) + sev->events[i].sev = sev; + sev->type = sub->type; + sev->id = sub->id; + sev->flags = sub->flags; + sev->fh = fh; + sev->ops = ops; + + mutex_lock(&fh->subscribe_lock); + + spin_lock_irqsave(&fh->vdev->fh_lock, flags); + found_ev = v4l2_event_subscribed(fh, sub->type, sub->id); + if (!found_ev) + list_add(&sev->list, &fh->subscribed); + spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); + + if (found_ev) { + /* Already listening */ + kvfree(sev); + } else if (sev->ops && sev->ops->add) { + ret = sev->ops->add(sev, elems); + if (ret) { + spin_lock_irqsave(&fh->vdev->fh_lock, flags); + __v4l2_event_unsubscribe(sev); + spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); + kvfree(sev); + } + } + + mutex_unlock(&fh->subscribe_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_event_subscribe); + +void v4l2_event_unsubscribe_all(struct v4l2_fh *fh) +{ + struct v4l2_event_subscription sub; + struct v4l2_subscribed_event *sev; + unsigned long flags; + + do { + sev = NULL; + + spin_lock_irqsave(&fh->vdev->fh_lock, flags); + if (!list_empty(&fh->subscribed)) { + sev = list_first_entry(&fh->subscribed, + struct v4l2_subscribed_event, list); + sub.type = sev->type; + sub.id = sev->id; + } + spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); + if (sev) + v4l2_event_unsubscribe(fh, &sub); + } while (sev); +} +EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe_all); + +int v4l2_event_unsubscribe(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub) +{ + struct v4l2_subscribed_event *sev; + unsigned long flags; + + if (sub->type == V4L2_EVENT_ALL) { + v4l2_event_unsubscribe_all(fh); + return 0; + } + + mutex_lock(&fh->subscribe_lock); + + spin_lock_irqsave(&fh->vdev->fh_lock, flags); + + sev = v4l2_event_subscribed(fh, sub->type, sub->id); + if (sev != NULL) + __v4l2_event_unsubscribe(sev); + + spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); + + if (sev && sev->ops && sev->ops->del) + sev->ops->del(sev); + + mutex_unlock(&fh->subscribe_lock); + + kvfree(sev); + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe); + +int v4l2_event_subdev_unsubscribe(struct v4l2_subdev *sd, struct v4l2_fh *fh, + struct v4l2_event_subscription *sub) +{ + return v4l2_event_unsubscribe(fh, sub); +} +EXPORT_SYMBOL_GPL(v4l2_event_subdev_unsubscribe); + +static void v4l2_event_src_replace(struct v4l2_event *old, + const struct v4l2_event *new) +{ + u32 old_changes = old->u.src_change.changes; + + old->u.src_change = new->u.src_change; + old->u.src_change.changes |= old_changes; +} + +static void v4l2_event_src_merge(const struct v4l2_event *old, + struct v4l2_event *new) +{ + new->u.src_change.changes |= old->u.src_change.changes; +} + +static const struct v4l2_subscribed_event_ops v4l2_event_src_ch_ops = { + .replace = v4l2_event_src_replace, + .merge = v4l2_event_src_merge, +}; + +int v4l2_src_change_event_subscribe(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub) +{ + if (sub->type == V4L2_EVENT_SOURCE_CHANGE) + return v4l2_event_subscribe(fh, sub, 0, &v4l2_event_src_ch_ops); + return -EINVAL; +} +EXPORT_SYMBOL_GPL(v4l2_src_change_event_subscribe); + +int v4l2_src_change_event_subdev_subscribe(struct v4l2_subdev *sd, + struct v4l2_fh *fh, struct v4l2_event_subscription *sub) +{ + return v4l2_src_change_event_subscribe(fh, sub); +} +EXPORT_SYMBOL_GPL(v4l2_src_change_event_subdev_subscribe); diff --git a/6.12.0/drivers/media/v4l2-core/v4l2-fh.c b/6.12.0/drivers/media/v4l2-core/v4l2-fh.c new file mode 100644 index 0000000..90eec79 --- /dev/null +++ b/6.12.0/drivers/media/v4l2-core/v4l2-fh.c @@ -0,0 +1,117 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * v4l2-fh.c + * + * V4L2 file handles. + * + * Copyright (C) 2009--2010 Nokia Corporation. + * + * Contact: Sakari Ailus + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +void v4l2_fh_init(struct v4l2_fh *fh, struct video_device *vdev) +{ + fh->vdev = vdev; + /* Inherit from video_device. May be overridden by the driver. */ + fh->ctrl_handler = vdev->ctrl_handler; + INIT_LIST_HEAD(&fh->list); + set_bit(V4L2_FL_USES_V4L2_FH, &fh->vdev->flags); + /* + * determine_valid_ioctls() does not know if struct v4l2_fh + * is used by this driver, but here we do. So enable the + * prio ioctls here. + */ + set_bit(_IOC_NR(VIDIOC_G_PRIORITY), vdev->valid_ioctls); + set_bit(_IOC_NR(VIDIOC_S_PRIORITY), vdev->valid_ioctls); + fh->prio = V4L2_PRIORITY_UNSET; + init_waitqueue_head(&fh->wait); + INIT_LIST_HEAD(&fh->available); + INIT_LIST_HEAD(&fh->subscribed); + fh->sequence = -1; + mutex_init(&fh->subscribe_lock); +} +EXPORT_SYMBOL_GPL(v4l2_fh_init); + +void v4l2_fh_add(struct v4l2_fh *fh) +{ + unsigned long flags; + + v4l2_prio_open(fh->vdev->prio, &fh->prio); + spin_lock_irqsave(&fh->vdev->fh_lock, flags); + list_add(&fh->list, &fh->vdev->fh_list); + spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); +} +EXPORT_SYMBOL_GPL(v4l2_fh_add); + +int v4l2_fh_open(struct file *filp) +{ + struct video_device *vdev = video_devdata(filp); + struct v4l2_fh *fh = kzalloc(sizeof(*fh), GFP_KERNEL); + + filp->private_data = fh; + if (fh == NULL) + return -ENOMEM; + v4l2_fh_init(fh, vdev); + v4l2_fh_add(fh); + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_fh_open); + +void v4l2_fh_del(struct v4l2_fh *fh) +{ + unsigned long flags; + + spin_lock_irqsave(&fh->vdev->fh_lock, flags); + list_del_init(&fh->list); + spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); + v4l2_prio_close(fh->vdev->prio, fh->prio); +} +EXPORT_SYMBOL_GPL(v4l2_fh_del); + +void v4l2_fh_exit(struct v4l2_fh *fh) +{ + if (fh->vdev == NULL) + return; + v4l_disable_media_source(fh->vdev); + v4l2_event_unsubscribe_all(fh); + mutex_destroy(&fh->subscribe_lock); + fh->vdev = NULL; +} +EXPORT_SYMBOL_GPL(v4l2_fh_exit); + +int v4l2_fh_release(struct file *filp) +{ + struct v4l2_fh *fh = filp->private_data; + + if (fh) { + v4l2_fh_del(fh); + v4l2_fh_exit(fh); + kfree(fh); + filp->private_data = NULL; + } + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_fh_release); + +int v4l2_fh_is_singular(struct v4l2_fh *fh) +{ + unsigned long flags; + int is_singular; + + if (fh == NULL || fh->vdev == NULL) + return 0; + spin_lock_irqsave(&fh->vdev->fh_lock, flags); + is_singular = list_is_singular(&fh->list); + spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); + return is_singular; +} +EXPORT_SYMBOL_GPL(v4l2_fh_is_singular); diff --git a/6.12.0/drivers/media/v4l2-core/v4l2-flash-led-class.c b/6.12.0/drivers/media/v4l2-core/v4l2-flash-led-class.c new file mode 100644 index 0000000..355595a --- /dev/null +++ b/6.12.0/drivers/media/v4l2-core/v4l2-flash-led-class.c @@ -0,0 +1,746 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * V4L2 flash LED sub-device registration helpers. + * + * Copyright (C) 2015 Samsung Electronics Co., Ltd + * Author: Jacek Anaszewski + */ + +#include +#include +#include +#include +#include +#include +#include + +#define has_flash_op(v4l2_flash, op) \ + (v4l2_flash && v4l2_flash->ops && v4l2_flash->ops->op) + +#define call_flash_op(v4l2_flash, op, arg) \ + (has_flash_op(v4l2_flash, op) ? \ + v4l2_flash->ops->op(v4l2_flash, arg) : \ + -EINVAL) + +enum ctrl_init_data_id { + LED_MODE, + TORCH_INTENSITY, + FLASH_INTENSITY, + INDICATOR_INTENSITY, + FLASH_TIMEOUT, + STROBE_SOURCE, + /* + * Only above values are applicable to + * the 'ctrls' array in the struct v4l2_flash. + */ + FLASH_STROBE, + STROBE_STOP, + STROBE_STATUS, + FLASH_FAULT, + NUM_FLASH_CTRLS, +}; + +static enum led_brightness __intensity_to_led_brightness( + struct v4l2_ctrl *ctrl, s32 intensity) +{ + intensity -= ctrl->minimum; + intensity /= (u32) ctrl->step; + + /* + * Indicator LEDs, unlike torch LEDs, are turned on/off basing on + * the state of V4L2_CID_FLASH_INDICATOR_INTENSITY control only. + * Therefore it must be possible to set it to 0 level which in + * the LED subsystem reflects LED_OFF state. + */ + if (ctrl->minimum) + ++intensity; + + return intensity; +} + +static s32 __led_brightness_to_intensity(struct v4l2_ctrl *ctrl, + enum led_brightness brightness) +{ + /* + * Indicator LEDs, unlike torch LEDs, are turned on/off basing on + * the state of V4L2_CID_FLASH_INDICATOR_INTENSITY control only. + * Do not decrement brightness read from the LED subsystem for + * indicator LED as it may equal 0. For torch LEDs this function + * is called only when V4L2_FLASH_LED_MODE_TORCH is set and the + * brightness read is guaranteed to be greater than 0. In the mode + * V4L2_FLASH_LED_MODE_NONE the cached torch intensity value is used. + */ + if (ctrl->id != V4L2_CID_FLASH_INDICATOR_INTENSITY) + --brightness; + + return (brightness * ctrl->step) + ctrl->minimum; +} + +static int v4l2_flash_set_led_brightness(struct v4l2_flash *v4l2_flash, + struct v4l2_ctrl *ctrl) +{ + struct v4l2_ctrl **ctrls = v4l2_flash->ctrls; + struct led_classdev *led_cdev; + enum led_brightness brightness; + + if (has_flash_op(v4l2_flash, intensity_to_led_brightness)) + brightness = call_flash_op(v4l2_flash, + intensity_to_led_brightness, + ctrl->val); + else + brightness = __intensity_to_led_brightness(ctrl, ctrl->val); + /* + * In case a LED Flash class driver provides ops for custom + * brightness <-> intensity conversion, it also must have defined + * related v4l2 control step == 1. In such a case a backward conversion + * from led brightness to v4l2 intensity is required to find out the + * aligned intensity value. + */ + if (has_flash_op(v4l2_flash, led_brightness_to_intensity)) + ctrl->val = call_flash_op(v4l2_flash, + led_brightness_to_intensity, + brightness); + + if (ctrl == ctrls[TORCH_INTENSITY]) { + if (ctrls[LED_MODE]->val != V4L2_FLASH_LED_MODE_TORCH) + return 0; + + if (WARN_ON_ONCE(!v4l2_flash->fled_cdev)) + return -EINVAL; + + led_cdev = &v4l2_flash->fled_cdev->led_cdev; + } else { + if (WARN_ON_ONCE(!v4l2_flash->iled_cdev)) + return -EINVAL; + + led_cdev = v4l2_flash->iled_cdev; + } + + return led_set_brightness_sync(led_cdev, brightness); +} + +static int v4l2_flash_update_led_brightness(struct v4l2_flash *v4l2_flash, + struct v4l2_ctrl *ctrl) +{ + struct v4l2_ctrl **ctrls = v4l2_flash->ctrls; + struct led_classdev *led_cdev; + int ret; + + if (ctrl == ctrls[TORCH_INTENSITY]) { + /* + * Update torch brightness only if in TORCH_MODE. In other modes + * torch led is turned off, which would spuriously inform the + * user space that V4L2_CID_FLASH_TORCH_INTENSITY control value + * has changed to 0. + */ + if (ctrls[LED_MODE]->val != V4L2_FLASH_LED_MODE_TORCH) + return 0; + + if (WARN_ON_ONCE(!v4l2_flash->fled_cdev)) + return -EINVAL; + + led_cdev = &v4l2_flash->fled_cdev->led_cdev; + } else { + if (WARN_ON_ONCE(!v4l2_flash->iled_cdev)) + return -EINVAL; + + led_cdev = v4l2_flash->iled_cdev; + } + + ret = led_update_brightness(led_cdev); + if (ret < 0) + return ret; + + if (has_flash_op(v4l2_flash, led_brightness_to_intensity)) + ctrl->val = call_flash_op(v4l2_flash, + led_brightness_to_intensity, + led_cdev->brightness); + else + ctrl->val = __led_brightness_to_intensity(ctrl, + led_cdev->brightness); + + return 0; +} + +static int v4l2_flash_g_volatile_ctrl(struct v4l2_ctrl *c) +{ + struct v4l2_flash *v4l2_flash = v4l2_ctrl_to_v4l2_flash(c); + struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev; + bool is_strobing; + int ret; + + switch (c->id) { + case V4L2_CID_FLASH_TORCH_INTENSITY: + case V4L2_CID_FLASH_INDICATOR_INTENSITY: + return v4l2_flash_update_led_brightness(v4l2_flash, c); + } + + if (!fled_cdev) + return -EINVAL; + + switch (c->id) { + case V4L2_CID_FLASH_INTENSITY: + ret = led_update_flash_brightness(fled_cdev); + if (ret < 0) + return ret; + /* + * No conversion is needed as LED Flash class also uses + * microamperes for flash intensity units. + */ + c->val = fled_cdev->brightness.val; + return 0; + case V4L2_CID_FLASH_STROBE_STATUS: + ret = led_get_flash_strobe(fled_cdev, &is_strobing); + if (ret < 0) + return ret; + c->val = is_strobing; + return 0; + case V4L2_CID_FLASH_FAULT: + /* LED faults map directly to V4L2 flash faults */ + return led_get_flash_fault(fled_cdev, &c->val); + default: + return -EINVAL; + } +} + +static bool __software_strobe_mode_inactive(struct v4l2_ctrl **ctrls) +{ + return ((ctrls[LED_MODE]->val != V4L2_FLASH_LED_MODE_FLASH) || + (ctrls[STROBE_SOURCE] && (ctrls[STROBE_SOURCE]->val != + V4L2_FLASH_STROBE_SOURCE_SOFTWARE))); +} + +static int v4l2_flash_s_ctrl(struct v4l2_ctrl *c) +{ + struct v4l2_flash *v4l2_flash = v4l2_ctrl_to_v4l2_flash(c); + struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev; + struct led_classdev *led_cdev; + struct v4l2_ctrl **ctrls = v4l2_flash->ctrls; + bool external_strobe; + int ret = 0; + + switch (c->id) { + case V4L2_CID_FLASH_TORCH_INTENSITY: + case V4L2_CID_FLASH_INDICATOR_INTENSITY: + return v4l2_flash_set_led_brightness(v4l2_flash, c); + } + + if (!fled_cdev) + return -EINVAL; + + led_cdev = &fled_cdev->led_cdev; + + switch (c->id) { + case V4L2_CID_FLASH_LED_MODE: + switch (c->val) { + case V4L2_FLASH_LED_MODE_NONE: + led_set_brightness_sync(led_cdev, LED_OFF); + return led_set_flash_strobe(fled_cdev, false); + case V4L2_FLASH_LED_MODE_FLASH: + /* Turn the torch LED off */ + led_set_brightness_sync(led_cdev, LED_OFF); + if (ctrls[STROBE_SOURCE]) { + external_strobe = (ctrls[STROBE_SOURCE]->val == + V4L2_FLASH_STROBE_SOURCE_EXTERNAL); + + ret = call_flash_op(v4l2_flash, + external_strobe_set, + external_strobe); + } + return ret; + case V4L2_FLASH_LED_MODE_TORCH: + if (ctrls[STROBE_SOURCE]) { + ret = call_flash_op(v4l2_flash, + external_strobe_set, + false); + if (ret < 0) + return ret; + } + /* Stop flash strobing */ + ret = led_set_flash_strobe(fled_cdev, false); + if (ret < 0) + return ret; + + return v4l2_flash_set_led_brightness(v4l2_flash, + ctrls[TORCH_INTENSITY]); + } + break; + case V4L2_CID_FLASH_STROBE_SOURCE: + external_strobe = (c->val == V4L2_FLASH_STROBE_SOURCE_EXTERNAL); + /* + * For some hardware arrangements setting strobe source may + * affect torch mode. Therefore, if not in the flash mode, + * cache only this setting. It will be applied upon switching + * to flash mode. + */ + if (ctrls[LED_MODE]->val != V4L2_FLASH_LED_MODE_FLASH) + return 0; + + return call_flash_op(v4l2_flash, external_strobe_set, + external_strobe); + case V4L2_CID_FLASH_STROBE: + if (__software_strobe_mode_inactive(ctrls)) + return -EBUSY; + return led_set_flash_strobe(fled_cdev, true); + case V4L2_CID_FLASH_STROBE_STOP: + if (__software_strobe_mode_inactive(ctrls)) + return -EBUSY; + return led_set_flash_strobe(fled_cdev, false); + case V4L2_CID_FLASH_TIMEOUT: + /* + * No conversion is needed as LED Flash class also uses + * microseconds for flash timeout units. + */ + return led_set_flash_timeout(fled_cdev, c->val); + case V4L2_CID_FLASH_INTENSITY: + /* + * No conversion is needed as LED Flash class also uses + * microamperes for flash intensity units. + */ + return led_set_flash_brightness(fled_cdev, c->val); + } + + return -EINVAL; +} + +static const struct v4l2_ctrl_ops v4l2_flash_ctrl_ops = { + .g_volatile_ctrl = v4l2_flash_g_volatile_ctrl, + .s_ctrl = v4l2_flash_s_ctrl, +}; + +static void __lfs_to_v4l2_ctrl_config(struct led_flash_setting *s, + struct v4l2_ctrl_config *c) +{ + c->min = s->min; + c->max = s->max; + c->step = s->step; + c->def = s->val; +} + +static void __fill_ctrl_init_data(struct v4l2_flash *v4l2_flash, + struct v4l2_flash_config *flash_cfg, + struct v4l2_flash_ctrl_data *ctrl_init_data) +{ + struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev; + struct led_classdev *led_cdev = fled_cdev ? &fled_cdev->led_cdev : NULL; + struct v4l2_ctrl_config *ctrl_cfg; + u32 mask; + + /* Init INDICATOR_INTENSITY ctrl data */ + if (v4l2_flash->iled_cdev) { + ctrl_init_data[INDICATOR_INTENSITY].cid = + V4L2_CID_FLASH_INDICATOR_INTENSITY; + ctrl_cfg = &ctrl_init_data[INDICATOR_INTENSITY].config; + __lfs_to_v4l2_ctrl_config(&flash_cfg->intensity, + ctrl_cfg); + ctrl_cfg->id = V4L2_CID_FLASH_INDICATOR_INTENSITY; + ctrl_cfg->min = 0; + ctrl_cfg->flags = V4L2_CTRL_FLAG_VOLATILE | + V4L2_CTRL_FLAG_EXECUTE_ON_WRITE; + } + + if (!led_cdev || WARN_ON(!(led_cdev->flags & LED_DEV_CAP_FLASH))) + return; + + /* Init FLASH_FAULT ctrl data */ + if (flash_cfg->flash_faults) { + ctrl_init_data[FLASH_FAULT].cid = V4L2_CID_FLASH_FAULT; + ctrl_cfg = &ctrl_init_data[FLASH_FAULT].config; + ctrl_cfg->id = V4L2_CID_FLASH_FAULT; + ctrl_cfg->max = flash_cfg->flash_faults; + ctrl_cfg->flags = V4L2_CTRL_FLAG_VOLATILE | + V4L2_CTRL_FLAG_READ_ONLY; + } + + /* Init FLASH_LED_MODE ctrl data */ + mask = 1 << V4L2_FLASH_LED_MODE_NONE | + 1 << V4L2_FLASH_LED_MODE_TORCH; + if (led_cdev->flags & LED_DEV_CAP_FLASH) + mask |= 1 << V4L2_FLASH_LED_MODE_FLASH; + + ctrl_init_data[LED_MODE].cid = V4L2_CID_FLASH_LED_MODE; + ctrl_cfg = &ctrl_init_data[LED_MODE].config; + ctrl_cfg->id = V4L2_CID_FLASH_LED_MODE; + ctrl_cfg->max = V4L2_FLASH_LED_MODE_TORCH; + ctrl_cfg->menu_skip_mask = ~mask; + ctrl_cfg->def = V4L2_FLASH_LED_MODE_NONE; + ctrl_cfg->flags = 0; + + /* Init TORCH_INTENSITY ctrl data */ + ctrl_init_data[TORCH_INTENSITY].cid = V4L2_CID_FLASH_TORCH_INTENSITY; + ctrl_cfg = &ctrl_init_data[TORCH_INTENSITY].config; + __lfs_to_v4l2_ctrl_config(&flash_cfg->intensity, ctrl_cfg); + ctrl_cfg->id = V4L2_CID_FLASH_TORCH_INTENSITY; + ctrl_cfg->flags = V4L2_CTRL_FLAG_VOLATILE | + V4L2_CTRL_FLAG_EXECUTE_ON_WRITE; + + /* Init FLASH_STROBE ctrl data */ + ctrl_init_data[FLASH_STROBE].cid = V4L2_CID_FLASH_STROBE; + ctrl_cfg = &ctrl_init_data[FLASH_STROBE].config; + ctrl_cfg->id = V4L2_CID_FLASH_STROBE; + + /* Init STROBE_STOP ctrl data */ + ctrl_init_data[STROBE_STOP].cid = V4L2_CID_FLASH_STROBE_STOP; + ctrl_cfg = &ctrl_init_data[STROBE_STOP].config; + ctrl_cfg->id = V4L2_CID_FLASH_STROBE_STOP; + + /* Init FLASH_STROBE_SOURCE ctrl data */ + if (flash_cfg->has_external_strobe) { + mask = (1 << V4L2_FLASH_STROBE_SOURCE_SOFTWARE) | + (1 << V4L2_FLASH_STROBE_SOURCE_EXTERNAL); + ctrl_init_data[STROBE_SOURCE].cid = + V4L2_CID_FLASH_STROBE_SOURCE; + ctrl_cfg = &ctrl_init_data[STROBE_SOURCE].config; + ctrl_cfg->id = V4L2_CID_FLASH_STROBE_SOURCE; + ctrl_cfg->max = V4L2_FLASH_STROBE_SOURCE_EXTERNAL; + ctrl_cfg->menu_skip_mask = ~mask; + ctrl_cfg->def = V4L2_FLASH_STROBE_SOURCE_SOFTWARE; + } + + /* Init STROBE_STATUS ctrl data */ + if (has_flash_op(fled_cdev, strobe_get)) { + ctrl_init_data[STROBE_STATUS].cid = + V4L2_CID_FLASH_STROBE_STATUS; + ctrl_cfg = &ctrl_init_data[STROBE_STATUS].config; + ctrl_cfg->id = V4L2_CID_FLASH_STROBE_STATUS; + ctrl_cfg->flags = V4L2_CTRL_FLAG_VOLATILE | + V4L2_CTRL_FLAG_READ_ONLY; + } + + /* Init FLASH_TIMEOUT ctrl data */ + if (has_flash_op(fled_cdev, timeout_set)) { + ctrl_init_data[FLASH_TIMEOUT].cid = V4L2_CID_FLASH_TIMEOUT; + ctrl_cfg = &ctrl_init_data[FLASH_TIMEOUT].config; + __lfs_to_v4l2_ctrl_config(&fled_cdev->timeout, ctrl_cfg); + ctrl_cfg->id = V4L2_CID_FLASH_TIMEOUT; + } + + /* Init FLASH_INTENSITY ctrl data */ + if (has_flash_op(fled_cdev, flash_brightness_set)) { + ctrl_init_data[FLASH_INTENSITY].cid = V4L2_CID_FLASH_INTENSITY; + ctrl_cfg = &ctrl_init_data[FLASH_INTENSITY].config; + __lfs_to_v4l2_ctrl_config(&fled_cdev->brightness, ctrl_cfg); + ctrl_cfg->id = V4L2_CID_FLASH_INTENSITY; + ctrl_cfg->flags = V4L2_CTRL_FLAG_VOLATILE | + V4L2_CTRL_FLAG_EXECUTE_ON_WRITE; + } +} + +static int v4l2_flash_init_controls(struct v4l2_flash *v4l2_flash, + struct v4l2_flash_config *flash_cfg) + +{ + struct v4l2_flash_ctrl_data *ctrl_init_data; + struct v4l2_ctrl *ctrl; + struct v4l2_ctrl_config *ctrl_cfg; + int i, ret, num_ctrls = 0; + + v4l2_flash->ctrls = devm_kcalloc(v4l2_flash->sd.dev, + STROBE_SOURCE + 1, + sizeof(*v4l2_flash->ctrls), + GFP_KERNEL); + if (!v4l2_flash->ctrls) + return -ENOMEM; + + /* allocate memory dynamically so as not to exceed stack frame size */ + ctrl_init_data = kcalloc(NUM_FLASH_CTRLS, sizeof(*ctrl_init_data), + GFP_KERNEL); + if (!ctrl_init_data) + return -ENOMEM; + + __fill_ctrl_init_data(v4l2_flash, flash_cfg, ctrl_init_data); + + for (i = 0; i < NUM_FLASH_CTRLS; ++i) + if (ctrl_init_data[i].cid) + ++num_ctrls; + + v4l2_ctrl_handler_init(&v4l2_flash->hdl, num_ctrls); + + for (i = 0; i < NUM_FLASH_CTRLS; ++i) { + ctrl_cfg = &ctrl_init_data[i].config; + if (!ctrl_init_data[i].cid) + continue; + + if (ctrl_cfg->id == V4L2_CID_FLASH_LED_MODE || + ctrl_cfg->id == V4L2_CID_FLASH_STROBE_SOURCE) + ctrl = v4l2_ctrl_new_std_menu(&v4l2_flash->hdl, + &v4l2_flash_ctrl_ops, + ctrl_cfg->id, + ctrl_cfg->max, + ctrl_cfg->menu_skip_mask, + ctrl_cfg->def); + else + ctrl = v4l2_ctrl_new_std(&v4l2_flash->hdl, + &v4l2_flash_ctrl_ops, + ctrl_cfg->id, + ctrl_cfg->min, + ctrl_cfg->max, + ctrl_cfg->step, + ctrl_cfg->def); + + if (ctrl) + ctrl->flags |= ctrl_cfg->flags; + + if (i <= STROBE_SOURCE) + v4l2_flash->ctrls[i] = ctrl; + } + + kfree(ctrl_init_data); + + if (v4l2_flash->hdl.error) { + ret = v4l2_flash->hdl.error; + goto error_free_handler; + } + + v4l2_ctrl_handler_setup(&v4l2_flash->hdl); + + v4l2_flash->sd.ctrl_handler = &v4l2_flash->hdl; + + return 0; + +error_free_handler: + v4l2_ctrl_handler_free(&v4l2_flash->hdl); + return ret; +} + +static int __sync_device_with_v4l2_controls(struct v4l2_flash *v4l2_flash) +{ + struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev; + struct v4l2_ctrl **ctrls = v4l2_flash->ctrls; + int ret = 0; + + if (ctrls[TORCH_INTENSITY]) { + ret = v4l2_flash_set_led_brightness(v4l2_flash, + ctrls[TORCH_INTENSITY]); + if (ret < 0) + return ret; + } + + if (ctrls[INDICATOR_INTENSITY]) { + ret = v4l2_flash_set_led_brightness(v4l2_flash, + ctrls[INDICATOR_INTENSITY]); + if (ret < 0) + return ret; + } + + if (ctrls[FLASH_TIMEOUT]) { + if (WARN_ON_ONCE(!fled_cdev)) + return -EINVAL; + + ret = led_set_flash_timeout(fled_cdev, + ctrls[FLASH_TIMEOUT]->val); + if (ret < 0) + return ret; + } + + if (ctrls[FLASH_INTENSITY]) { + if (WARN_ON_ONCE(!fled_cdev)) + return -EINVAL; + + ret = led_set_flash_brightness(fled_cdev, + ctrls[FLASH_INTENSITY]->val); + if (ret < 0) + return ret; + } + + /* + * For some hardware arrangements setting strobe source may affect + * torch mode. Synchronize strobe source setting only if not in torch + * mode. For torch mode case it will get synchronized upon switching + * to flash mode. + */ + if (ctrls[STROBE_SOURCE] && + ctrls[LED_MODE]->val != V4L2_FLASH_LED_MODE_TORCH) + ret = call_flash_op(v4l2_flash, external_strobe_set, + ctrls[STROBE_SOURCE]->val); + + return ret; +} + +/* + * V4L2 subdev internal operations + */ + +static int v4l2_flash_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) +{ + struct v4l2_flash *v4l2_flash = v4l2_subdev_to_v4l2_flash(sd); + struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev; + struct led_classdev *led_cdev = fled_cdev ? &fled_cdev->led_cdev : NULL; + struct led_classdev *led_cdev_ind = v4l2_flash->iled_cdev; + int ret = 0; + + if (!v4l2_fh_is_singular(&fh->vfh)) + return 0; + + if (led_cdev) { + mutex_lock(&led_cdev->led_access); + + led_sysfs_disable(led_cdev); + led_trigger_remove(led_cdev); + + mutex_unlock(&led_cdev->led_access); + } + + if (led_cdev_ind) { + mutex_lock(&led_cdev_ind->led_access); + + led_sysfs_disable(led_cdev_ind); + led_trigger_remove(led_cdev_ind); + + mutex_unlock(&led_cdev_ind->led_access); + } + + ret = __sync_device_with_v4l2_controls(v4l2_flash); + if (ret < 0) + goto out_sync_device; + + return 0; +out_sync_device: + if (led_cdev) { + mutex_lock(&led_cdev->led_access); + led_sysfs_enable(led_cdev); + mutex_unlock(&led_cdev->led_access); + } + + if (led_cdev_ind) { + mutex_lock(&led_cdev_ind->led_access); + led_sysfs_enable(led_cdev_ind); + mutex_unlock(&led_cdev_ind->led_access); + } + + return ret; +} + +static int v4l2_flash_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) +{ + struct v4l2_flash *v4l2_flash = v4l2_subdev_to_v4l2_flash(sd); + struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev; + struct led_classdev *led_cdev = fled_cdev ? &fled_cdev->led_cdev : NULL; + struct led_classdev *led_cdev_ind = v4l2_flash->iled_cdev; + int ret = 0; + + if (!v4l2_fh_is_singular(&fh->vfh)) + return 0; + + if (led_cdev) { + mutex_lock(&led_cdev->led_access); + + if (v4l2_flash->ctrls[STROBE_SOURCE]) + ret = v4l2_ctrl_s_ctrl( + v4l2_flash->ctrls[STROBE_SOURCE], + V4L2_FLASH_STROBE_SOURCE_SOFTWARE); + led_sysfs_enable(led_cdev); + + mutex_unlock(&led_cdev->led_access); + } + + if (led_cdev_ind) { + mutex_lock(&led_cdev_ind->led_access); + led_sysfs_enable(led_cdev_ind); + mutex_unlock(&led_cdev_ind->led_access); + } + + return ret; +} + +static const struct v4l2_subdev_internal_ops v4l2_flash_subdev_internal_ops = { + .open = v4l2_flash_open, + .close = v4l2_flash_close, +}; + +static const struct v4l2_subdev_ops v4l2_flash_subdev_ops; + +static struct v4l2_flash *__v4l2_flash_init( + struct device *dev, struct fwnode_handle *fwn, + struct led_classdev_flash *fled_cdev, struct led_classdev *iled_cdev, + const struct v4l2_flash_ops *ops, struct v4l2_flash_config *config) +{ + struct v4l2_flash *v4l2_flash; + struct v4l2_subdev *sd; + int ret; + + if (!config) + return ERR_PTR(-EINVAL); + + v4l2_flash = devm_kzalloc(dev, sizeof(*v4l2_flash), GFP_KERNEL); + if (!v4l2_flash) + return ERR_PTR(-ENOMEM); + + sd = &v4l2_flash->sd; + v4l2_flash->fled_cdev = fled_cdev; + v4l2_flash->iled_cdev = iled_cdev; + v4l2_flash->ops = ops; + sd->dev = dev; + sd->fwnode = fwn ? fwn : dev_fwnode(dev); + v4l2_subdev_init(sd, &v4l2_flash_subdev_ops); + sd->internal_ops = &v4l2_flash_subdev_internal_ops; + sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; + strscpy(sd->name, config->dev_name, sizeof(sd->name)); + + ret = media_entity_pads_init(&sd->entity, 0, NULL); + if (ret < 0) + return ERR_PTR(ret); + + sd->entity.function = MEDIA_ENT_F_FLASH; + + ret = v4l2_flash_init_controls(v4l2_flash, config); + if (ret < 0) + goto err_init_controls; + + fwnode_handle_get(sd->fwnode); + + ret = v4l2_async_register_subdev(sd); + if (ret < 0) + goto err_async_register_sd; + + return v4l2_flash; + +err_async_register_sd: + fwnode_handle_put(sd->fwnode); + v4l2_ctrl_handler_free(sd->ctrl_handler); +err_init_controls: + media_entity_cleanup(&sd->entity); + + return ERR_PTR(ret); +} + +struct v4l2_flash *v4l2_flash_init( + struct device *dev, struct fwnode_handle *fwn, + struct led_classdev_flash *fled_cdev, + const struct v4l2_flash_ops *ops, + struct v4l2_flash_config *config) +{ + return __v4l2_flash_init(dev, fwn, fled_cdev, NULL, ops, config); +} +EXPORT_SYMBOL_GPL(v4l2_flash_init); + +struct v4l2_flash *v4l2_flash_indicator_init( + struct device *dev, struct fwnode_handle *fwn, + struct led_classdev *iled_cdev, + struct v4l2_flash_config *config) +{ + return __v4l2_flash_init(dev, fwn, NULL, iled_cdev, NULL, config); +} +EXPORT_SYMBOL_GPL(v4l2_flash_indicator_init); + +void v4l2_flash_release(struct v4l2_flash *v4l2_flash) +{ + struct v4l2_subdev *sd; + + if (IS_ERR_OR_NULL(v4l2_flash)) + return; + + sd = &v4l2_flash->sd; + + v4l2_async_unregister_subdev(sd); + + fwnode_handle_put(sd->fwnode); + + v4l2_ctrl_handler_free(sd->ctrl_handler); + media_entity_cleanup(&sd->entity); +} +EXPORT_SYMBOL_GPL(v4l2_flash_release); + +MODULE_AUTHOR("Jacek Anaszewski "); +MODULE_DESCRIPTION("V4L2 Flash sub-device helpers"); +MODULE_LICENSE("GPL v2"); diff --git a/6.12.0/drivers/media/v4l2-core/v4l2-fwnode.c b/6.12.0/drivers/media/v4l2-core/v4l2-fwnode.c new file mode 100644 index 0000000..f19c8ad --- /dev/null +++ b/6.12.0/drivers/media/v4l2-core/v4l2-fwnode.c @@ -0,0 +1,1258 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * V4L2 fwnode binding parsing library + * + * The origins of the V4L2 fwnode library are in V4L2 OF library that + * formerly was located in v4l2-of.c. + * + * Copyright (c) 2016 Intel Corporation. + * Author: Sakari Ailus + * + * Copyright (C) 2012 - 2013 Samsung Electronics Co., Ltd. + * Author: Sylwester Nawrocki + * + * Copyright (C) 2012 Renesas Electronics Corp. + * Author: Guennadi Liakhovetski + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "v4l2-subdev-priv.h" + +static const struct v4l2_fwnode_bus_conv { + enum v4l2_fwnode_bus_type fwnode_bus_type; + enum v4l2_mbus_type mbus_type; + const char *name; +} buses[] = { + { + V4L2_FWNODE_BUS_TYPE_GUESS, + V4L2_MBUS_UNKNOWN, + "not specified", + }, { + V4L2_FWNODE_BUS_TYPE_CSI2_CPHY, + V4L2_MBUS_CSI2_CPHY, + "MIPI CSI-2 C-PHY", + }, { + V4L2_FWNODE_BUS_TYPE_CSI1, + V4L2_MBUS_CSI1, + "MIPI CSI-1", + }, { + V4L2_FWNODE_BUS_TYPE_CCP2, + V4L2_MBUS_CCP2, + "compact camera port 2", + }, { + V4L2_FWNODE_BUS_TYPE_CSI2_DPHY, + V4L2_MBUS_CSI2_DPHY, + "MIPI CSI-2 D-PHY", + }, { + V4L2_FWNODE_BUS_TYPE_PARALLEL, + V4L2_MBUS_PARALLEL, + "parallel", + }, { + V4L2_FWNODE_BUS_TYPE_BT656, + V4L2_MBUS_BT656, + "Bt.656", + }, { + V4L2_FWNODE_BUS_TYPE_DPI, + V4L2_MBUS_DPI, + "DPI", + } +}; + +static const struct v4l2_fwnode_bus_conv * +get_v4l2_fwnode_bus_conv_by_fwnode_bus(enum v4l2_fwnode_bus_type type) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(buses); i++) + if (buses[i].fwnode_bus_type == type) + return &buses[i]; + + return NULL; +} + +static enum v4l2_mbus_type +v4l2_fwnode_bus_type_to_mbus(enum v4l2_fwnode_bus_type type) +{ + const struct v4l2_fwnode_bus_conv *conv = + get_v4l2_fwnode_bus_conv_by_fwnode_bus(type); + + return conv ? conv->mbus_type : V4L2_MBUS_INVALID; +} + +static const char * +v4l2_fwnode_bus_type_to_string(enum v4l2_fwnode_bus_type type) +{ + const struct v4l2_fwnode_bus_conv *conv = + get_v4l2_fwnode_bus_conv_by_fwnode_bus(type); + + return conv ? conv->name : "not found"; +} + +static const struct v4l2_fwnode_bus_conv * +get_v4l2_fwnode_bus_conv_by_mbus(enum v4l2_mbus_type type) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(buses); i++) + if (buses[i].mbus_type == type) + return &buses[i]; + + return NULL; +} + +static const char * +v4l2_fwnode_mbus_type_to_string(enum v4l2_mbus_type type) +{ + const struct v4l2_fwnode_bus_conv *conv = + get_v4l2_fwnode_bus_conv_by_mbus(type); + + return conv ? conv->name : "not found"; +} + +static int v4l2_fwnode_endpoint_parse_csi2_bus(struct fwnode_handle *fwnode, + struct v4l2_fwnode_endpoint *vep, + enum v4l2_mbus_type bus_type) +{ + struct v4l2_mbus_config_mipi_csi2 *bus = &vep->bus.mipi_csi2; + bool have_clk_lane = false, have_data_lanes = false, + have_lane_polarities = false; + unsigned int flags = 0, lanes_used = 0; + u32 array[1 + V4L2_MBUS_CSI2_MAX_DATA_LANES]; + u32 clock_lane = 0; + unsigned int num_data_lanes = 0; + bool use_default_lane_mapping = false; + unsigned int i; + u32 v; + int rval; + + if (bus_type == V4L2_MBUS_CSI2_DPHY || + bus_type == V4L2_MBUS_CSI2_CPHY) { + use_default_lane_mapping = true; + + num_data_lanes = min_t(u32, bus->num_data_lanes, + V4L2_MBUS_CSI2_MAX_DATA_LANES); + + clock_lane = bus->clock_lane; + if (clock_lane) + use_default_lane_mapping = false; + + for (i = 0; i < num_data_lanes; i++) { + array[i] = bus->data_lanes[i]; + if (array[i]) + use_default_lane_mapping = false; + } + + if (use_default_lane_mapping) + pr_debug("no lane mapping given, using defaults\n"); + } + + rval = fwnode_property_count_u32(fwnode, "data-lanes"); + if (rval > 0) { + num_data_lanes = + min_t(int, V4L2_MBUS_CSI2_MAX_DATA_LANES, rval); + + fwnode_property_read_u32_array(fwnode, "data-lanes", array, + num_data_lanes); + + have_data_lanes = true; + if (use_default_lane_mapping) { + pr_debug("data-lanes property exists; disabling default mapping\n"); + use_default_lane_mapping = false; + } + } + + for (i = 0; i < num_data_lanes; i++) { + if (lanes_used & BIT(array[i])) { + if (have_data_lanes || !use_default_lane_mapping) + pr_warn("duplicated lane %u in data-lanes, using defaults\n", + array[i]); + use_default_lane_mapping = true; + } + lanes_used |= BIT(array[i]); + + if (have_data_lanes) + pr_debug("lane %u position %u\n", i, array[i]); + } + + rval = fwnode_property_count_u32(fwnode, "lane-polarities"); + if (rval > 0) { + if (rval != 1 + num_data_lanes /* clock+data */) { + pr_warn("invalid number of lane-polarities entries (need %u, got %u)\n", + 1 + num_data_lanes, rval); + return -EINVAL; + } + + have_lane_polarities = true; + } + + if (!fwnode_property_read_u32(fwnode, "clock-lanes", &v)) { + clock_lane = v; + pr_debug("clock lane position %u\n", v); + have_clk_lane = true; + } + + if (have_clk_lane && lanes_used & BIT(clock_lane) && + !use_default_lane_mapping) { + pr_warn("duplicated lane %u in clock-lanes, using defaults\n", + v); + use_default_lane_mapping = true; + } + + if (fwnode_property_present(fwnode, "clock-noncontinuous")) { + flags |= V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK; + pr_debug("non-continuous clock\n"); + } + + if (bus_type == V4L2_MBUS_CSI2_DPHY || + bus_type == V4L2_MBUS_CSI2_CPHY || + lanes_used || have_clk_lane || flags) { + /* Only D-PHY has a clock lane. */ + unsigned int dfl_data_lane_index = + bus_type == V4L2_MBUS_CSI2_DPHY; + + bus->flags = flags; + if (bus_type == V4L2_MBUS_UNKNOWN) + vep->bus_type = V4L2_MBUS_CSI2_DPHY; + bus->num_data_lanes = num_data_lanes; + + if (use_default_lane_mapping) { + bus->clock_lane = 0; + for (i = 0; i < num_data_lanes; i++) + bus->data_lanes[i] = dfl_data_lane_index + i; + } else { + bus->clock_lane = clock_lane; + for (i = 0; i < num_data_lanes; i++) + bus->data_lanes[i] = array[i]; + } + + if (have_lane_polarities) { + fwnode_property_read_u32_array(fwnode, + "lane-polarities", array, + 1 + num_data_lanes); + + for (i = 0; i < 1 + num_data_lanes; i++) { + bus->lane_polarities[i] = array[i]; + pr_debug("lane %u polarity %sinverted", + i, array[i] ? "" : "not "); + } + } else { + pr_debug("no lane polarities defined, assuming not inverted\n"); + } + } + + return 0; +} + +#define PARALLEL_MBUS_FLAGS (V4L2_MBUS_HSYNC_ACTIVE_HIGH | \ + V4L2_MBUS_HSYNC_ACTIVE_LOW | \ + V4L2_MBUS_VSYNC_ACTIVE_HIGH | \ + V4L2_MBUS_VSYNC_ACTIVE_LOW | \ + V4L2_MBUS_FIELD_EVEN_HIGH | \ + V4L2_MBUS_FIELD_EVEN_LOW) + +static void +v4l2_fwnode_endpoint_parse_parallel_bus(struct fwnode_handle *fwnode, + struct v4l2_fwnode_endpoint *vep, + enum v4l2_mbus_type bus_type) +{ + struct v4l2_mbus_config_parallel *bus = &vep->bus.parallel; + unsigned int flags = 0; + u32 v; + + if (bus_type == V4L2_MBUS_PARALLEL || bus_type == V4L2_MBUS_BT656) + flags = bus->flags; + + if (!fwnode_property_read_u32(fwnode, "hsync-active", &v)) { + flags &= ~(V4L2_MBUS_HSYNC_ACTIVE_HIGH | + V4L2_MBUS_HSYNC_ACTIVE_LOW); + flags |= v ? V4L2_MBUS_HSYNC_ACTIVE_HIGH : + V4L2_MBUS_HSYNC_ACTIVE_LOW; + pr_debug("hsync-active %s\n", v ? "high" : "low"); + } + + if (!fwnode_property_read_u32(fwnode, "vsync-active", &v)) { + flags &= ~(V4L2_MBUS_VSYNC_ACTIVE_HIGH | + V4L2_MBUS_VSYNC_ACTIVE_LOW); + flags |= v ? V4L2_MBUS_VSYNC_ACTIVE_HIGH : + V4L2_MBUS_VSYNC_ACTIVE_LOW; + pr_debug("vsync-active %s\n", v ? "high" : "low"); + } + + if (!fwnode_property_read_u32(fwnode, "field-even-active", &v)) { + flags &= ~(V4L2_MBUS_FIELD_EVEN_HIGH | + V4L2_MBUS_FIELD_EVEN_LOW); + flags |= v ? V4L2_MBUS_FIELD_EVEN_HIGH : + V4L2_MBUS_FIELD_EVEN_LOW; + pr_debug("field-even-active %s\n", v ? "high" : "low"); + } + + if (!fwnode_property_read_u32(fwnode, "pclk-sample", &v)) { + flags &= ~(V4L2_MBUS_PCLK_SAMPLE_RISING | + V4L2_MBUS_PCLK_SAMPLE_FALLING | + V4L2_MBUS_PCLK_SAMPLE_DUALEDGE); + switch (v) { + case 0: + flags |= V4L2_MBUS_PCLK_SAMPLE_FALLING; + pr_debug("pclk-sample low\n"); + break; + case 1: + flags |= V4L2_MBUS_PCLK_SAMPLE_RISING; + pr_debug("pclk-sample high\n"); + break; + case 2: + flags |= V4L2_MBUS_PCLK_SAMPLE_DUALEDGE; + pr_debug("pclk-sample dual edge\n"); + break; + default: + pr_warn("invalid argument for pclk-sample"); + break; + } + } + + if (!fwnode_property_read_u32(fwnode, "data-active", &v)) { + flags &= ~(V4L2_MBUS_DATA_ACTIVE_HIGH | + V4L2_MBUS_DATA_ACTIVE_LOW); + flags |= v ? V4L2_MBUS_DATA_ACTIVE_HIGH : + V4L2_MBUS_DATA_ACTIVE_LOW; + pr_debug("data-active %s\n", v ? "high" : "low"); + } + + if (fwnode_property_present(fwnode, "slave-mode")) { + pr_debug("slave mode\n"); + flags &= ~V4L2_MBUS_MASTER; + flags |= V4L2_MBUS_SLAVE; + } else { + flags &= ~V4L2_MBUS_SLAVE; + flags |= V4L2_MBUS_MASTER; + } + + if (!fwnode_property_read_u32(fwnode, "bus-width", &v)) { + bus->bus_width = v; + pr_debug("bus-width %u\n", v); + } + + if (!fwnode_property_read_u32(fwnode, "data-shift", &v)) { + bus->data_shift = v; + pr_debug("data-shift %u\n", v); + } + + if (!fwnode_property_read_u32(fwnode, "sync-on-green-active", &v)) { + flags &= ~(V4L2_MBUS_VIDEO_SOG_ACTIVE_HIGH | + V4L2_MBUS_VIDEO_SOG_ACTIVE_LOW); + flags |= v ? V4L2_MBUS_VIDEO_SOG_ACTIVE_HIGH : + V4L2_MBUS_VIDEO_SOG_ACTIVE_LOW; + pr_debug("sync-on-green-active %s\n", v ? "high" : "low"); + } + + if (!fwnode_property_read_u32(fwnode, "data-enable-active", &v)) { + flags &= ~(V4L2_MBUS_DATA_ENABLE_HIGH | + V4L2_MBUS_DATA_ENABLE_LOW); + flags |= v ? V4L2_MBUS_DATA_ENABLE_HIGH : + V4L2_MBUS_DATA_ENABLE_LOW; + pr_debug("data-enable-active %s\n", v ? "high" : "low"); + } + + switch (bus_type) { + default: + bus->flags = flags; + if (flags & PARALLEL_MBUS_FLAGS) + vep->bus_type = V4L2_MBUS_PARALLEL; + else + vep->bus_type = V4L2_MBUS_BT656; + break; + case V4L2_MBUS_PARALLEL: + vep->bus_type = V4L2_MBUS_PARALLEL; + bus->flags = flags; + break; + case V4L2_MBUS_BT656: + vep->bus_type = V4L2_MBUS_BT656; + bus->flags = flags & ~PARALLEL_MBUS_FLAGS; + break; + } +} + +static void +v4l2_fwnode_endpoint_parse_csi1_bus(struct fwnode_handle *fwnode, + struct v4l2_fwnode_endpoint *vep, + enum v4l2_mbus_type bus_type) +{ + struct v4l2_mbus_config_mipi_csi1 *bus = &vep->bus.mipi_csi1; + u32 v; + + if (!fwnode_property_read_u32(fwnode, "clock-inv", &v)) { + bus->clock_inv = v; + pr_debug("clock-inv %u\n", v); + } + + if (!fwnode_property_read_u32(fwnode, "strobe", &v)) { + bus->strobe = v; + pr_debug("strobe %u\n", v); + } + + if (!fwnode_property_read_u32(fwnode, "data-lanes", &v)) { + bus->data_lane = v; + pr_debug("data-lanes %u\n", v); + } + + if (!fwnode_property_read_u32(fwnode, "clock-lanes", &v)) { + bus->clock_lane = v; + pr_debug("clock-lanes %u\n", v); + } + + if (bus_type == V4L2_MBUS_CCP2) + vep->bus_type = V4L2_MBUS_CCP2; + else + vep->bus_type = V4L2_MBUS_CSI1; +} + +static int __v4l2_fwnode_endpoint_parse(struct fwnode_handle *fwnode, + struct v4l2_fwnode_endpoint *vep) +{ + u32 bus_type = V4L2_FWNODE_BUS_TYPE_GUESS; + enum v4l2_mbus_type mbus_type; + int rval; + + pr_debug("===== begin parsing endpoint %pfw\n", fwnode); + + fwnode_property_read_u32(fwnode, "bus-type", &bus_type); + pr_debug("fwnode video bus type %s (%u), mbus type %s (%u)\n", + v4l2_fwnode_bus_type_to_string(bus_type), bus_type, + v4l2_fwnode_mbus_type_to_string(vep->bus_type), + vep->bus_type); + mbus_type = v4l2_fwnode_bus_type_to_mbus(bus_type); + if (mbus_type == V4L2_MBUS_INVALID) { + pr_debug("unsupported bus type %u\n", bus_type); + return -EINVAL; + } + + if (vep->bus_type != V4L2_MBUS_UNKNOWN) { + if (mbus_type != V4L2_MBUS_UNKNOWN && + vep->bus_type != mbus_type) { + pr_debug("expecting bus type %s\n", + v4l2_fwnode_mbus_type_to_string(vep->bus_type)); + return -ENXIO; + } + } else { + vep->bus_type = mbus_type; + } + + switch (vep->bus_type) { + case V4L2_MBUS_UNKNOWN: + rval = v4l2_fwnode_endpoint_parse_csi2_bus(fwnode, vep, + V4L2_MBUS_UNKNOWN); + if (rval) + return rval; + + if (vep->bus_type == V4L2_MBUS_UNKNOWN) + v4l2_fwnode_endpoint_parse_parallel_bus(fwnode, vep, + V4L2_MBUS_UNKNOWN); + + pr_debug("assuming media bus type %s (%u)\n", + v4l2_fwnode_mbus_type_to_string(vep->bus_type), + vep->bus_type); + + break; + case V4L2_MBUS_CCP2: + case V4L2_MBUS_CSI1: + v4l2_fwnode_endpoint_parse_csi1_bus(fwnode, vep, vep->bus_type); + + break; + case V4L2_MBUS_CSI2_DPHY: + case V4L2_MBUS_CSI2_CPHY: + rval = v4l2_fwnode_endpoint_parse_csi2_bus(fwnode, vep, + vep->bus_type); + if (rval) + return rval; + + break; + case V4L2_MBUS_PARALLEL: + case V4L2_MBUS_BT656: + v4l2_fwnode_endpoint_parse_parallel_bus(fwnode, vep, + vep->bus_type); + + break; + default: + pr_warn("unsupported bus type %u\n", mbus_type); + return -EINVAL; + } + + fwnode_graph_parse_endpoint(fwnode, &vep->base); + + return 0; +} + +int v4l2_fwnode_endpoint_parse(struct fwnode_handle *fwnode, + struct v4l2_fwnode_endpoint *vep) +{ + int ret; + + ret = __v4l2_fwnode_endpoint_parse(fwnode, vep); + + pr_debug("===== end parsing endpoint %pfw\n", fwnode); + + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_fwnode_endpoint_parse); + +void v4l2_fwnode_endpoint_free(struct v4l2_fwnode_endpoint *vep) +{ + if (IS_ERR_OR_NULL(vep)) + return; + + kfree(vep->link_frequencies); + vep->link_frequencies = NULL; +} +EXPORT_SYMBOL_GPL(v4l2_fwnode_endpoint_free); + +int v4l2_fwnode_endpoint_alloc_parse(struct fwnode_handle *fwnode, + struct v4l2_fwnode_endpoint *vep) +{ + int rval; + + rval = __v4l2_fwnode_endpoint_parse(fwnode, vep); + if (rval < 0) + return rval; + + rval = fwnode_property_count_u64(fwnode, "link-frequencies"); + if (rval > 0) { + unsigned int i; + + vep->link_frequencies = + kmalloc_array(rval, sizeof(*vep->link_frequencies), + GFP_KERNEL); + if (!vep->link_frequencies) + return -ENOMEM; + + vep->nr_of_link_frequencies = rval; + + rval = fwnode_property_read_u64_array(fwnode, + "link-frequencies", + vep->link_frequencies, + vep->nr_of_link_frequencies); + if (rval < 0) { + v4l2_fwnode_endpoint_free(vep); + return rval; + } + + for (i = 0; i < vep->nr_of_link_frequencies; i++) + pr_debug("link-frequencies %u value %llu\n", i, + vep->link_frequencies[i]); + } + + pr_debug("===== end parsing endpoint %pfw\n", fwnode); + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_fwnode_endpoint_alloc_parse); + +int v4l2_fwnode_parse_link(struct fwnode_handle *fwnode, + struct v4l2_fwnode_link *link) +{ + struct fwnode_endpoint fwep; + + memset(link, 0, sizeof(*link)); + + fwnode_graph_parse_endpoint(fwnode, &fwep); + link->local_id = fwep.id; + link->local_port = fwep.port; + link->local_node = fwnode_graph_get_port_parent(fwnode); + if (!link->local_node) + return -ENOLINK; + + fwnode = fwnode_graph_get_remote_endpoint(fwnode); + if (!fwnode) + goto err_put_local_node; + + fwnode_graph_parse_endpoint(fwnode, &fwep); + link->remote_id = fwep.id; + link->remote_port = fwep.port; + link->remote_node = fwnode_graph_get_port_parent(fwnode); + if (!link->remote_node) + goto err_put_remote_endpoint; + + return 0; + +err_put_remote_endpoint: + fwnode_handle_put(fwnode); + +err_put_local_node: + fwnode_handle_put(link->local_node); + + return -ENOLINK; +} +EXPORT_SYMBOL_GPL(v4l2_fwnode_parse_link); + +void v4l2_fwnode_put_link(struct v4l2_fwnode_link *link) +{ + fwnode_handle_put(link->local_node); + fwnode_handle_put(link->remote_node); +} +EXPORT_SYMBOL_GPL(v4l2_fwnode_put_link); + +static const struct v4l2_fwnode_connector_conv { + enum v4l2_connector_type type; + const char *compatible; +} connectors[] = { + { + .type = V4L2_CONN_COMPOSITE, + .compatible = "composite-video-connector", + }, { + .type = V4L2_CONN_SVIDEO, + .compatible = "svideo-connector", + }, +}; + +static enum v4l2_connector_type +v4l2_fwnode_string_to_connector_type(const char *con_str) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(connectors); i++) + if (!strcmp(con_str, connectors[i].compatible)) + return connectors[i].type; + + return V4L2_CONN_UNKNOWN; +} + +static void +v4l2_fwnode_connector_parse_analog(struct fwnode_handle *fwnode, + struct v4l2_fwnode_connector *vc) +{ + u32 stds; + int ret; + + ret = fwnode_property_read_u32(fwnode, "sdtv-standards", &stds); + + /* The property is optional. */ + vc->connector.analog.sdtv_stds = ret ? V4L2_STD_ALL : stds; +} + +void v4l2_fwnode_connector_free(struct v4l2_fwnode_connector *connector) +{ + struct v4l2_connector_link *link, *tmp; + + if (IS_ERR_OR_NULL(connector) || connector->type == V4L2_CONN_UNKNOWN) + return; + + list_for_each_entry_safe(link, tmp, &connector->links, head) { + v4l2_fwnode_put_link(&link->fwnode_link); + list_del(&link->head); + kfree(link); + } + + kfree(connector->label); + connector->label = NULL; + connector->type = V4L2_CONN_UNKNOWN; +} +EXPORT_SYMBOL_GPL(v4l2_fwnode_connector_free); + +static enum v4l2_connector_type +v4l2_fwnode_get_connector_type(struct fwnode_handle *fwnode) +{ + const char *type_name; + int err; + + if (!fwnode) + return V4L2_CONN_UNKNOWN; + + /* The connector-type is stored within the compatible string. */ + err = fwnode_property_read_string(fwnode, "compatible", &type_name); + if (err) + return V4L2_CONN_UNKNOWN; + + return v4l2_fwnode_string_to_connector_type(type_name); +} + +int v4l2_fwnode_connector_parse(struct fwnode_handle *fwnode, + struct v4l2_fwnode_connector *connector) +{ + struct fwnode_handle *connector_node; + enum v4l2_connector_type connector_type; + const char *label; + int err; + + if (!fwnode) + return -EINVAL; + + memset(connector, 0, sizeof(*connector)); + + INIT_LIST_HEAD(&connector->links); + + connector_node = fwnode_graph_get_port_parent(fwnode); + connector_type = v4l2_fwnode_get_connector_type(connector_node); + if (connector_type == V4L2_CONN_UNKNOWN) { + fwnode_handle_put(connector_node); + connector_node = fwnode_graph_get_remote_port_parent(fwnode); + connector_type = v4l2_fwnode_get_connector_type(connector_node); + } + + if (connector_type == V4L2_CONN_UNKNOWN) { + pr_err("Unknown connector type\n"); + err = -ENOTCONN; + goto out; + } + + connector->type = connector_type; + connector->name = fwnode_get_name(connector_node); + err = fwnode_property_read_string(connector_node, "label", &label); + connector->label = err ? NULL : kstrdup_const(label, GFP_KERNEL); + + /* Parse the connector specific properties. */ + switch (connector->type) { + case V4L2_CONN_COMPOSITE: + case V4L2_CONN_SVIDEO: + v4l2_fwnode_connector_parse_analog(connector_node, connector); + break; + /* Avoid compiler warnings */ + case V4L2_CONN_UNKNOWN: + break; + } + +out: + fwnode_handle_put(connector_node); + + return err; +} +EXPORT_SYMBOL_GPL(v4l2_fwnode_connector_parse); + +int v4l2_fwnode_connector_add_link(struct fwnode_handle *fwnode, + struct v4l2_fwnode_connector *connector) +{ + struct fwnode_handle *connector_ep; + struct v4l2_connector_link *link; + int err; + + if (!fwnode || !connector || connector->type == V4L2_CONN_UNKNOWN) + return -EINVAL; + + connector_ep = fwnode_graph_get_remote_endpoint(fwnode); + if (!connector_ep) + return -ENOTCONN; + + link = kzalloc(sizeof(*link), GFP_KERNEL); + if (!link) { + err = -ENOMEM; + goto err; + } + + err = v4l2_fwnode_parse_link(connector_ep, &link->fwnode_link); + if (err) + goto err; + + fwnode_handle_put(connector_ep); + + list_add(&link->head, &connector->links); + connector->nr_of_links++; + + return 0; + +err: + kfree(link); + fwnode_handle_put(connector_ep); + + return err; +} +EXPORT_SYMBOL_GPL(v4l2_fwnode_connector_add_link); + +int v4l2_fwnode_device_parse(struct device *dev, + struct v4l2_fwnode_device_properties *props) +{ + struct fwnode_handle *fwnode = dev_fwnode(dev); + u32 val; + int ret; + + memset(props, 0, sizeof(*props)); + + props->orientation = V4L2_FWNODE_PROPERTY_UNSET; + ret = fwnode_property_read_u32(fwnode, "orientation", &val); + if (!ret) { + switch (val) { + case V4L2_FWNODE_ORIENTATION_FRONT: + case V4L2_FWNODE_ORIENTATION_BACK: + case V4L2_FWNODE_ORIENTATION_EXTERNAL: + break; + default: + dev_warn(dev, "Unsupported device orientation: %u\n", val); + return -EINVAL; + } + + props->orientation = val; + dev_dbg(dev, "device orientation: %u\n", val); + } + + props->rotation = V4L2_FWNODE_PROPERTY_UNSET; + ret = fwnode_property_read_u32(fwnode, "rotation", &val); + if (!ret) { + if (val >= 360) { + dev_warn(dev, "Unsupported device rotation: %u\n", val); + return -EINVAL; + } + + props->rotation = val; + dev_dbg(dev, "device rotation: %u\n", val); + } + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_fwnode_device_parse); + +/* + * v4l2_fwnode_reference_parse - parse references for async sub-devices + * @dev: the device node the properties of which are parsed for references + * @notifier: the async notifier where the async subdevs will be added + * @prop: the name of the property + * + * Return: 0 on success + * -ENOENT if no entries were found + * -ENOMEM if memory allocation failed + * -EINVAL if property parsing failed + */ +static int v4l2_fwnode_reference_parse(struct device *dev, + struct v4l2_async_notifier *notifier, + const char *prop) +{ + struct fwnode_reference_args args; + unsigned int index; + int ret; + + for (index = 0; + !(ret = fwnode_property_get_reference_args(dev_fwnode(dev), prop, + NULL, 0, index, &args)); + index++) { + struct v4l2_async_connection *asd; + + asd = v4l2_async_nf_add_fwnode(notifier, args.fwnode, + struct v4l2_async_connection); + fwnode_handle_put(args.fwnode); + if (IS_ERR(asd)) { + /* not an error if asd already exists */ + if (PTR_ERR(asd) == -EEXIST) + continue; + + return PTR_ERR(asd); + } + } + + /* -ENOENT here means successful parsing */ + if (ret != -ENOENT) + return ret; + + /* Return -ENOENT if no references were found */ + return index ? 0 : -ENOENT; +} + +/* + * v4l2_fwnode_reference_get_int_prop - parse a reference with integer + * arguments + * @fwnode: fwnode to read @prop from + * @notifier: notifier for @dev + * @prop: the name of the property + * @index: the index of the reference to get + * @props: the array of integer property names + * @nprops: the number of integer property names in @nprops + * + * First find an fwnode referred to by the reference at @index in @prop. + * + * Then under that fwnode, @nprops times, for each property in @props, + * iteratively follow child nodes starting from fwnode such that they have the + * property in @props array at the index of the child node distance from the + * root node and the value of that property matching with the integer argument + * of the reference, at the same index. + * + * The child fwnode reached at the end of the iteration is then returned to the + * caller. + * + * The core reason for this is that you cannot refer to just any node in ACPI. + * So to refer to an endpoint (easy in DT) you need to refer to a device, then + * provide a list of (property name, property value) tuples where each tuple + * uniquely identifies a child node. The first tuple identifies a child directly + * underneath the device fwnode, the next tuple identifies a child node + * underneath the fwnode identified by the previous tuple, etc. until you + * reached the fwnode you need. + * + * THIS EXAMPLE EXISTS MERELY TO DOCUMENT THIS FUNCTION. DO NOT USE IT AS A + * REFERENCE IN HOW ACPI TABLES SHOULD BE WRITTEN!! See documentation under + * Documentation/firmware-guide/acpi/dsd/ instead and especially graph.txt, + * data-node-references.txt and leds.txt . + * + * Scope (\_SB.PCI0.I2C2) + * { + * Device (CAM0) + * { + * Name (_DSD, Package () { + * ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"), + * Package () { + * Package () { + * "compatible", + * Package () { "nokia,smia" } + * }, + * }, + * ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"), + * Package () { + * Package () { "port0", "PRT0" }, + * } + * }) + * Name (PRT0, Package() { + * ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"), + * Package () { + * Package () { "port", 0 }, + * }, + * ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"), + * Package () { + * Package () { "endpoint0", "EP00" }, + * } + * }) + * Name (EP00, Package() { + * ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"), + * Package () { + * Package () { "endpoint", 0 }, + * Package () { + * "remote-endpoint", + * Package() { + * \_SB.PCI0.ISP, 4, 0 + * } + * }, + * } + * }) + * } + * } + * + * Scope (\_SB.PCI0) + * { + * Device (ISP) + * { + * Name (_DSD, Package () { + * ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"), + * Package () { + * Package () { "port4", "PRT4" }, + * } + * }) + * + * Name (PRT4, Package() { + * ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"), + * Package () { + * Package () { "port", 4 }, + * }, + * ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"), + * Package () { + * Package () { "endpoint0", "EP40" }, + * } + * }) + * + * Name (EP40, Package() { + * ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"), + * Package () { + * Package () { "endpoint", 0 }, + * Package () { + * "remote-endpoint", + * Package () { + * \_SB.PCI0.I2C2.CAM0, + * 0, 0 + * } + * }, + * } + * }) + * } + * } + * + * From the EP40 node under ISP device, you could parse the graph remote + * endpoint using v4l2_fwnode_reference_get_int_prop with these arguments: + * + * @fwnode: fwnode referring to EP40 under ISP. + * @prop: "remote-endpoint" + * @index: 0 + * @props: "port", "endpoint" + * @nprops: 2 + * + * And you'd get back fwnode referring to EP00 under CAM0. + * + * The same works the other way around: if you use EP00 under CAM0 as the + * fwnode, you'll get fwnode referring to EP40 under ISP. + * + * The same example in DT syntax would look like this: + * + * cam: cam0 { + * compatible = "nokia,smia"; + * + * port { + * port = <0>; + * endpoint { + * endpoint = <0>; + * remote-endpoint = <&isp 4 0>; + * }; + * }; + * }; + * + * isp: isp { + * ports { + * port@4 { + * port = <4>; + * endpoint { + * endpoint = <0>; + * remote-endpoint = <&cam 0 0>; + * }; + * }; + * }; + * }; + * + * Return: 0 on success + * -ENOENT if no entries (or the property itself) were found + * -EINVAL if property parsing otherwise failed + * -ENOMEM if memory allocation failed + */ +static struct fwnode_handle * +v4l2_fwnode_reference_get_int_prop(struct fwnode_handle *fwnode, + const char *prop, + unsigned int index, + const char * const *props, + unsigned int nprops) +{ + struct fwnode_reference_args fwnode_args; + u64 *args = fwnode_args.args; + struct fwnode_handle *child; + int ret; + + /* + * Obtain remote fwnode as well as the integer arguments. + * + * Note that right now both -ENODATA and -ENOENT may signal + * out-of-bounds access. Return -ENOENT in that case. + */ + ret = fwnode_property_get_reference_args(fwnode, prop, NULL, nprops, + index, &fwnode_args); + if (ret) + return ERR_PTR(ret == -ENODATA ? -ENOENT : ret); + + /* + * Find a node in the tree under the referred fwnode corresponding to + * the integer arguments. + */ + fwnode = fwnode_args.fwnode; + while (nprops--) { + u32 val; + + /* Loop over all child nodes under fwnode. */ + fwnode_for_each_child_node(fwnode, child) { + if (fwnode_property_read_u32(child, *props, &val)) + continue; + + /* Found property, see if its value matches. */ + if (val == *args) + break; + } + + fwnode_handle_put(fwnode); + + /* No property found; return an error here. */ + if (!child) { + fwnode = ERR_PTR(-ENOENT); + break; + } + + props++; + args++; + fwnode = child; + } + + return fwnode; +} + +struct v4l2_fwnode_int_props { + const char *name; + const char * const *props; + unsigned int nprops; +}; + +/* + * v4l2_fwnode_reference_parse_int_props - parse references for async + * sub-devices + * @dev: struct device pointer + * @notifier: notifier for @dev + * @prop: the name of the property + * @props: the array of integer property names + * @nprops: the number of integer properties + * + * Use v4l2_fwnode_reference_get_int_prop to find fwnodes through reference in + * property @prop with integer arguments with child nodes matching in properties + * @props. Then, set up V4L2 async sub-devices for those fwnodes in the notifier + * accordingly. + * + * While it is technically possible to use this function on DT, it is only + * meaningful on ACPI. On Device tree you can refer to any node in the tree but + * on ACPI the references are limited to devices. + * + * Return: 0 on success + * -ENOENT if no entries (or the property itself) were found + * -EINVAL if property parsing otherwisefailed + * -ENOMEM if memory allocation failed + */ +static int +v4l2_fwnode_reference_parse_int_props(struct device *dev, + struct v4l2_async_notifier *notifier, + const struct v4l2_fwnode_int_props *p) +{ + struct fwnode_handle *fwnode; + unsigned int index; + int ret; + const char *prop = p->name; + const char * const *props = p->props; + unsigned int nprops = p->nprops; + + index = 0; + do { + fwnode = v4l2_fwnode_reference_get_int_prop(dev_fwnode(dev), + prop, index, + props, nprops); + if (IS_ERR(fwnode)) { + /* + * Note that right now both -ENODATA and -ENOENT may + * signal out-of-bounds access. Return the error in + * cases other than that. + */ + if (PTR_ERR(fwnode) != -ENOENT && + PTR_ERR(fwnode) != -ENODATA) + return PTR_ERR(fwnode); + break; + } + fwnode_handle_put(fwnode); + index++; + } while (1); + + for (index = 0; + !IS_ERR((fwnode = v4l2_fwnode_reference_get_int_prop(dev_fwnode(dev), + prop, index, + props, + nprops))); + index++) { + struct v4l2_async_connection *asd; + + asd = v4l2_async_nf_add_fwnode(notifier, fwnode, + struct v4l2_async_connection); + fwnode_handle_put(fwnode); + if (IS_ERR(asd)) { + ret = PTR_ERR(asd); + /* not an error if asd already exists */ + if (ret == -EEXIST) + continue; + + return PTR_ERR(asd); + } + } + + return !fwnode || PTR_ERR(fwnode) == -ENOENT ? 0 : PTR_ERR(fwnode); +} + +/** + * v4l2_async_nf_parse_fwnode_sensor - parse common references on + * sensors for async sub-devices + * @dev: the device node the properties of which are parsed for references + * @notifier: the async notifier where the async subdevs will be added + * + * Parse common sensor properties for remote devices related to the + * sensor and set up async sub-devices for them. + * + * Any notifier populated using this function must be released with a call to + * v4l2_async_nf_release() after it has been unregistered and the async + * sub-devices are no longer in use, even in the case the function returned an + * error. + * + * Return: 0 on success + * -ENOMEM if memory allocation failed + * -EINVAL if property parsing failed + */ +static int +v4l2_async_nf_parse_fwnode_sensor(struct device *dev, + struct v4l2_async_notifier *notifier) +{ + static const char * const led_props[] = { "led" }; + static const struct v4l2_fwnode_int_props props[] = { + { "flash-leds", led_props, ARRAY_SIZE(led_props) }, + { "mipi-img-flash-leds", }, + { "lens-focus", }, + { "mipi-img-lens-focus", }, + }; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(props); i++) { + int ret; + + if (props[i].props && is_acpi_node(dev_fwnode(dev))) + ret = v4l2_fwnode_reference_parse_int_props(dev, + notifier, + &props[i]); + else + ret = v4l2_fwnode_reference_parse(dev, notifier, + props[i].name); + if (ret && ret != -ENOENT) { + dev_warn(dev, "parsing property \"%s\" failed (%d)\n", + props[i].name, ret); + return ret; + } + } + + return 0; +} + +int v4l2_async_register_subdev_sensor(struct v4l2_subdev *sd) +{ + struct v4l2_async_notifier *notifier; + int ret; + + if (WARN_ON(!sd->dev)) + return -ENODEV; + + notifier = kzalloc(sizeof(*notifier), GFP_KERNEL); + if (!notifier) + return -ENOMEM; + + v4l2_async_subdev_nf_init(notifier, sd); + + ret = v4l2_subdev_get_privacy_led(sd); + if (ret < 0) + goto out_cleanup; + + ret = v4l2_async_nf_parse_fwnode_sensor(sd->dev, notifier); + if (ret < 0) + goto out_cleanup; + + ret = v4l2_async_nf_register(notifier); + if (ret < 0) + goto out_cleanup; + + ret = v4l2_async_register_subdev(sd); + if (ret < 0) + goto out_unregister; + + sd->subdev_notifier = notifier; + + return 0; + +out_unregister: + v4l2_async_nf_unregister(notifier); + +out_cleanup: + v4l2_subdev_put_privacy_led(sd); + v4l2_async_nf_cleanup(notifier); + kfree(notifier); + + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_async_register_subdev_sensor); + +MODULE_DESCRIPTION("V4L2 fwnode binding parsing library"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Sakari Ailus "); +MODULE_AUTHOR("Sylwester Nawrocki "); +MODULE_AUTHOR("Guennadi Liakhovetski "); diff --git a/6.12.0/drivers/media/v4l2-core/v4l2-h264.c b/6.12.0/drivers/media/v4l2-core/v4l2-h264.c new file mode 100644 index 0000000..c00197d --- /dev/null +++ b/6.12.0/drivers/media/v4l2-core/v4l2-h264.c @@ -0,0 +1,453 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * V4L2 H264 helpers. + * + * Copyright (C) 2019 Collabora, Ltd. + * + * Author: Boris Brezillon + */ + +#include +#include + +#include + +/* + * Size of the tempory buffer allocated when printing reference lists. The + * output will be truncated if the size is too small. + */ +static const int tmp_str_size = 1024; + +/** + * v4l2_h264_init_reflist_builder() - Initialize a P/B0/B1 reference list + * builder + * + * @b: the builder context to initialize + * @dec_params: decode parameters control + * @sps: SPS control + * @dpb: DPB to use when creating the reference list + */ +void +v4l2_h264_init_reflist_builder(struct v4l2_h264_reflist_builder *b, + const struct v4l2_ctrl_h264_decode_params *dec_params, + const struct v4l2_ctrl_h264_sps *sps, + const struct v4l2_h264_dpb_entry dpb[V4L2_H264_NUM_DPB_ENTRIES]) +{ + int cur_frame_num, max_frame_num; + unsigned int i; + + max_frame_num = 1 << (sps->log2_max_frame_num_minus4 + 4); + cur_frame_num = dec_params->frame_num; + + memset(b, 0, sizeof(*b)); + if (!(dec_params->flags & V4L2_H264_DECODE_PARAM_FLAG_FIELD_PIC)) { + b->cur_pic_order_count = min(dec_params->bottom_field_order_cnt, + dec_params->top_field_order_cnt); + b->cur_pic_fields = V4L2_H264_FRAME_REF; + } else if (dec_params->flags & V4L2_H264_DECODE_PARAM_FLAG_BOTTOM_FIELD) { + b->cur_pic_order_count = dec_params->bottom_field_order_cnt; + b->cur_pic_fields = V4L2_H264_BOTTOM_FIELD_REF; + } else { + b->cur_pic_order_count = dec_params->top_field_order_cnt; + b->cur_pic_fields = V4L2_H264_TOP_FIELD_REF; + } + + for (i = 0; i < V4L2_H264_NUM_DPB_ENTRIES; i++) { + if (!(dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE)) + continue; + + if (dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM) + b->refs[i].longterm = true; + + /* + * Handle frame_num wraparound as described in section + * '8.2.4.1 Decoding process for picture numbers' of the spec. + * For long term references, frame_num is set to + * long_term_frame_idx which requires no wrapping. + */ + if (!b->refs[i].longterm && dpb[i].frame_num > cur_frame_num) + b->refs[i].frame_num = (int)dpb[i].frame_num - + max_frame_num; + else + b->refs[i].frame_num = dpb[i].frame_num; + + b->refs[i].top_field_order_cnt = dpb[i].top_field_order_cnt; + b->refs[i].bottom_field_order_cnt = dpb[i].bottom_field_order_cnt; + + if (b->cur_pic_fields == V4L2_H264_FRAME_REF) { + u8 fields = V4L2_H264_FRAME_REF; + + b->unordered_reflist[b->num_valid].index = i; + b->unordered_reflist[b->num_valid].fields = fields; + b->num_valid++; + continue; + } + + if (dpb[i].fields & V4L2_H264_TOP_FIELD_REF) { + u8 fields = V4L2_H264_TOP_FIELD_REF; + + b->unordered_reflist[b->num_valid].index = i; + b->unordered_reflist[b->num_valid].fields = fields; + b->num_valid++; + } + + if (dpb[i].fields & V4L2_H264_BOTTOM_FIELD_REF) { + u8 fields = V4L2_H264_BOTTOM_FIELD_REF; + + b->unordered_reflist[b->num_valid].index = i; + b->unordered_reflist[b->num_valid].fields = fields; + b->num_valid++; + } + } + + for (i = b->num_valid; i < ARRAY_SIZE(b->unordered_reflist); i++) + b->unordered_reflist[i].index = i; +} +EXPORT_SYMBOL_GPL(v4l2_h264_init_reflist_builder); + +static s32 v4l2_h264_get_poc(const struct v4l2_h264_reflist_builder *b, + const struct v4l2_h264_reference *ref) +{ + switch (ref->fields) { + case V4L2_H264_FRAME_REF: + return min(b->refs[ref->index].top_field_order_cnt, + b->refs[ref->index].bottom_field_order_cnt); + case V4L2_H264_TOP_FIELD_REF: + return b->refs[ref->index].top_field_order_cnt; + case V4L2_H264_BOTTOM_FIELD_REF: + return b->refs[ref->index].bottom_field_order_cnt; + } + + /* not reached */ + return 0; +} + +static int v4l2_h264_p_ref_list_cmp(const void *ptra, const void *ptrb, + const void *data) +{ + const struct v4l2_h264_reflist_builder *builder = data; + u8 idxa, idxb; + + idxa = ((struct v4l2_h264_reference *)ptra)->index; + idxb = ((struct v4l2_h264_reference *)ptrb)->index; + + if (WARN_ON(idxa >= V4L2_H264_NUM_DPB_ENTRIES || + idxb >= V4L2_H264_NUM_DPB_ENTRIES)) + return 1; + + if (builder->refs[idxa].longterm != builder->refs[idxb].longterm) { + /* Short term pics first. */ + if (!builder->refs[idxa].longterm) + return -1; + else + return 1; + } + + /* + * For frames, short term pics are in descending pic num order and long + * term ones in ascending order. For fields, the same direction is used + * but with frame_num (wrapped). For frames, the value of pic_num and + * frame_num are the same (see formula (8-28) and (8-29)). For this + * reason we can use frame_num only and share this function between + * frames and fields reflist. + */ + if (!builder->refs[idxa].longterm) + return builder->refs[idxb].frame_num < + builder->refs[idxa].frame_num ? + -1 : 1; + + return builder->refs[idxa].frame_num < builder->refs[idxb].frame_num ? + -1 : 1; +} + +static int v4l2_h264_b0_ref_list_cmp(const void *ptra, const void *ptrb, + const void *data) +{ + const struct v4l2_h264_reflist_builder *builder = data; + s32 poca, pocb; + u8 idxa, idxb; + + idxa = ((struct v4l2_h264_reference *)ptra)->index; + idxb = ((struct v4l2_h264_reference *)ptrb)->index; + + if (WARN_ON(idxa >= V4L2_H264_NUM_DPB_ENTRIES || + idxb >= V4L2_H264_NUM_DPB_ENTRIES)) + return 1; + + if (builder->refs[idxa].longterm != builder->refs[idxb].longterm) { + /* Short term pics first. */ + if (!builder->refs[idxa].longterm) + return -1; + else + return 1; + } + + /* Long term pics in ascending frame num order. */ + if (builder->refs[idxa].longterm) + return builder->refs[idxa].frame_num < + builder->refs[idxb].frame_num ? + -1 : 1; + + poca = v4l2_h264_get_poc(builder, ptra); + pocb = v4l2_h264_get_poc(builder, ptrb); + + /* + * Short term pics with POC < cur POC first in POC descending order + * followed by short term pics with POC > cur POC in POC ascending + * order. + */ + if ((poca < builder->cur_pic_order_count) != + (pocb < builder->cur_pic_order_count)) + return poca < pocb ? -1 : 1; + else if (poca < builder->cur_pic_order_count) + return pocb < poca ? -1 : 1; + + return poca < pocb ? -1 : 1; +} + +static int v4l2_h264_b1_ref_list_cmp(const void *ptra, const void *ptrb, + const void *data) +{ + const struct v4l2_h264_reflist_builder *builder = data; + s32 poca, pocb; + u8 idxa, idxb; + + idxa = ((struct v4l2_h264_reference *)ptra)->index; + idxb = ((struct v4l2_h264_reference *)ptrb)->index; + + if (WARN_ON(idxa >= V4L2_H264_NUM_DPB_ENTRIES || + idxb >= V4L2_H264_NUM_DPB_ENTRIES)) + return 1; + + if (builder->refs[idxa].longterm != builder->refs[idxb].longterm) { + /* Short term pics first. */ + if (!builder->refs[idxa].longterm) + return -1; + else + return 1; + } + + /* Long term pics in ascending frame num order. */ + if (builder->refs[idxa].longterm) + return builder->refs[idxa].frame_num < + builder->refs[idxb].frame_num ? + -1 : 1; + + poca = v4l2_h264_get_poc(builder, ptra); + pocb = v4l2_h264_get_poc(builder, ptrb); + + /* + * Short term pics with POC > cur POC first in POC ascending order + * followed by short term pics with POC < cur POC in POC descending + * order. + */ + if ((poca < builder->cur_pic_order_count) != + (pocb < builder->cur_pic_order_count)) + return pocb < poca ? -1 : 1; + else if (poca < builder->cur_pic_order_count) + return pocb < poca ? -1 : 1; + + return poca < pocb ? -1 : 1; +} + +/* + * The references need to be reordered so that references are alternating + * between top and bottom field references starting with the current picture + * parity. This has to be done for short term and long term references + * separately. + */ +static void reorder_field_reflist(const struct v4l2_h264_reflist_builder *b, + struct v4l2_h264_reference *reflist) +{ + struct v4l2_h264_reference tmplist[V4L2_H264_REF_LIST_LEN]; + u8 lt, i = 0, j = 0, k = 0; + + memcpy(tmplist, reflist, sizeof(tmplist[0]) * b->num_valid); + + for (lt = 0; lt <= 1; lt++) { + do { + for (; i < b->num_valid && b->refs[tmplist[i].index].longterm == lt; i++) { + if (tmplist[i].fields == b->cur_pic_fields) { + reflist[k++] = tmplist[i++]; + break; + } + } + + for (; j < b->num_valid && b->refs[tmplist[j].index].longterm == lt; j++) { + if (tmplist[j].fields != b->cur_pic_fields) { + reflist[k++] = tmplist[j++]; + break; + } + } + } while ((i < b->num_valid && b->refs[tmplist[i].index].longterm == lt) || + (j < b->num_valid && b->refs[tmplist[j].index].longterm == lt)); + } +} + +static char ref_type_to_char(u8 ref_type) +{ + switch (ref_type) { + case V4L2_H264_FRAME_REF: + return 'f'; + case V4L2_H264_TOP_FIELD_REF: + return 't'; + case V4L2_H264_BOTTOM_FIELD_REF: + return 'b'; + } + + return '?'; +} + +static const char *format_ref_list_p(const struct v4l2_h264_reflist_builder *builder, + struct v4l2_h264_reference *reflist, + char **out_str) +{ + int n = 0, i; + + *out_str = kmalloc(tmp_str_size, GFP_KERNEL); + if (!(*out_str)) + return NULL; + + n += snprintf(*out_str + n, tmp_str_size - n, "|"); + + for (i = 0; i < builder->num_valid; i++) { + /* this is pic_num for frame and frame_num (wrapped) for field, + * but for frame pic_num is equal to frame_num (wrapped). + */ + int frame_num = builder->refs[reflist[i].index].frame_num; + bool longterm = builder->refs[reflist[i].index].longterm; + + n += scnprintf(*out_str + n, tmp_str_size - n, "%i%c%c|", + frame_num, longterm ? 'l' : 's', + ref_type_to_char(reflist[i].fields)); + } + + return *out_str; +} + +static void print_ref_list_p(const struct v4l2_h264_reflist_builder *builder, + struct v4l2_h264_reference *reflist) +{ + char *buf = NULL; + + pr_debug("ref_pic_list_p (cur_poc %u%c) %s\n", + builder->cur_pic_order_count, + ref_type_to_char(builder->cur_pic_fields), + format_ref_list_p(builder, reflist, &buf)); + + kfree(buf); +} + +static const char *format_ref_list_b(const struct v4l2_h264_reflist_builder *builder, + struct v4l2_h264_reference *reflist, + char **out_str) +{ + int n = 0, i; + + *out_str = kmalloc(tmp_str_size, GFP_KERNEL); + if (!(*out_str)) + return NULL; + + n += snprintf(*out_str + n, tmp_str_size - n, "|"); + + for (i = 0; i < builder->num_valid; i++) { + int frame_num = builder->refs[reflist[i].index].frame_num; + u32 poc = v4l2_h264_get_poc(builder, reflist + i); + bool longterm = builder->refs[reflist[i].index].longterm; + + n += scnprintf(*out_str + n, tmp_str_size - n, "%i%c%c|", + longterm ? frame_num : poc, + longterm ? 'l' : 's', + ref_type_to_char(reflist[i].fields)); + } + + return *out_str; +} + +static void print_ref_list_b(const struct v4l2_h264_reflist_builder *builder, + struct v4l2_h264_reference *reflist, u8 list_num) +{ + char *buf = NULL; + + pr_debug("ref_pic_list_b%u (cur_poc %u%c) %s", + list_num, builder->cur_pic_order_count, + ref_type_to_char(builder->cur_pic_fields), + format_ref_list_b(builder, reflist, &buf)); + + kfree(buf); +} + +/** + * v4l2_h264_build_p_ref_list() - Build the P reference list + * + * @builder: reference list builder context + * @reflist: 32 sized array used to store the P reference list. Each entry + * is a v4l2_h264_reference structure + * + * This functions builds the P reference lists. This procedure is describe in + * section '8.2.4 Decoding process for reference picture lists construction' + * of the H264 spec. This function can be used by H264 decoder drivers that + * need to pass a P reference list to the hardware. + */ +void +v4l2_h264_build_p_ref_list(const struct v4l2_h264_reflist_builder *builder, + struct v4l2_h264_reference *reflist) +{ + memcpy(reflist, builder->unordered_reflist, + sizeof(builder->unordered_reflist[0]) * builder->num_valid); + sort_r(reflist, builder->num_valid, sizeof(*reflist), + v4l2_h264_p_ref_list_cmp, NULL, builder); + + if (builder->cur_pic_fields != V4L2_H264_FRAME_REF) + reorder_field_reflist(builder, reflist); + + print_ref_list_p(builder, reflist); +} +EXPORT_SYMBOL_GPL(v4l2_h264_build_p_ref_list); + +/** + * v4l2_h264_build_b_ref_lists() - Build the B0/B1 reference lists + * + * @builder: reference list builder context + * @b0_reflist: 32 sized array used to store the B0 reference list. Each entry + * is a v4l2_h264_reference structure + * @b1_reflist: 32 sized array used to store the B1 reference list. Each entry + * is a v4l2_h264_reference structure + * + * This functions builds the B0/B1 reference lists. This procedure is described + * in section '8.2.4 Decoding process for reference picture lists construction' + * of the H264 spec. This function can be used by H264 decoder drivers that + * need to pass B0/B1 reference lists to the hardware. + */ +void +v4l2_h264_build_b_ref_lists(const struct v4l2_h264_reflist_builder *builder, + struct v4l2_h264_reference *b0_reflist, + struct v4l2_h264_reference *b1_reflist) +{ + memcpy(b0_reflist, builder->unordered_reflist, + sizeof(builder->unordered_reflist[0]) * builder->num_valid); + sort_r(b0_reflist, builder->num_valid, sizeof(*b0_reflist), + v4l2_h264_b0_ref_list_cmp, NULL, builder); + + memcpy(b1_reflist, builder->unordered_reflist, + sizeof(builder->unordered_reflist[0]) * builder->num_valid); + sort_r(b1_reflist, builder->num_valid, sizeof(*b1_reflist), + v4l2_h264_b1_ref_list_cmp, NULL, builder); + + if (builder->cur_pic_fields != V4L2_H264_FRAME_REF) { + reorder_field_reflist(builder, b0_reflist); + reorder_field_reflist(builder, b1_reflist); + } + + if (builder->num_valid > 1 && + !memcmp(b1_reflist, b0_reflist, builder->num_valid)) + swap(b1_reflist[0], b1_reflist[1]); + + print_ref_list_b(builder, b0_reflist, 0); + print_ref_list_b(builder, b1_reflist, 1); +} +EXPORT_SYMBOL_GPL(v4l2_h264_build_b_ref_lists); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("V4L2 H264 Helpers"); +MODULE_AUTHOR("Boris Brezillon "); diff --git a/6.12.0/drivers/media/v4l2-core/v4l2-i2c.c b/6.12.0/drivers/media/v4l2-core/v4l2-i2c.c new file mode 100644 index 0000000..586c465 --- /dev/null +++ b/6.12.0/drivers/media/v4l2-core/v4l2-i2c.c @@ -0,0 +1,184 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * v4l2-i2c - I2C helpers for Video4Linux2 + */ + +#include +#include +#include +#include + +void v4l2_i2c_subdev_unregister(struct v4l2_subdev *sd) +{ + struct i2c_client *client = v4l2_get_subdevdata(sd); + + /* + * We need to unregister the i2c client + * explicitly. We cannot rely on + * i2c_del_adapter to always unregister + * clients for us, since if the i2c bus is a + * platform bus, then it is never deleted. + * + * Device tree or ACPI based devices must not + * be unregistered as they have not been + * registered by us, and would not be + * re-created by just probing the V4L2 driver. + */ + if (client && !client->dev.of_node && !client->dev.fwnode) + i2c_unregister_device(client); +} + +void v4l2_i2c_subdev_set_name(struct v4l2_subdev *sd, + struct i2c_client *client, + const char *devname, const char *postfix) +{ + if (!devname) + devname = client->dev.driver->name; + if (!postfix) + postfix = ""; + + snprintf(sd->name, sizeof(sd->name), "%s%s %d-%04x", devname, postfix, + i2c_adapter_id(client->adapter), client->addr); +} +EXPORT_SYMBOL_GPL(v4l2_i2c_subdev_set_name); + +void v4l2_i2c_subdev_init(struct v4l2_subdev *sd, struct i2c_client *client, + const struct v4l2_subdev_ops *ops) +{ + v4l2_subdev_init(sd, ops); + sd->flags |= V4L2_SUBDEV_FL_IS_I2C; + /* the owner is the same as the i2c_client's driver owner */ + sd->owner = client->dev.driver->owner; + sd->dev = &client->dev; + /* i2c_client and v4l2_subdev point to one another */ + v4l2_set_subdevdata(sd, client); + i2c_set_clientdata(client, sd); + v4l2_i2c_subdev_set_name(sd, client, NULL, NULL); +} +EXPORT_SYMBOL_GPL(v4l2_i2c_subdev_init); + +/* Load an i2c sub-device. */ +struct v4l2_subdev +*v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev, + struct i2c_adapter *adapter, + struct i2c_board_info *info, + const unsigned short *probe_addrs) +{ + struct v4l2_subdev *sd = NULL; + struct i2c_client *client; + + if (!v4l2_dev) + return NULL; + + request_module(I2C_MODULE_PREFIX "%s", info->type); + + /* Create the i2c client */ + if (info->addr == 0 && probe_addrs) + client = i2c_new_scanned_device(adapter, info, probe_addrs, + NULL); + else + client = i2c_new_client_device(adapter, info); + + /* + * Note: by loading the module first we are certain that c->driver + * will be set if the driver was found. If the module was not loaded + * first, then the i2c core tries to delay-load the module for us, + * and then c->driver is still NULL until the module is finally + * loaded. This delay-load mechanism doesn't work if other drivers + * want to use the i2c device, so explicitly loading the module + * is the best alternative. + */ + if (!i2c_client_has_driver(client)) + goto error; + + /* Lock the module so we can safely get the v4l2_subdev pointer */ + if (!try_module_get(client->dev.driver->owner)) + goto error; + sd = i2c_get_clientdata(client); + + /* + * Register with the v4l2_device which increases the module's + * use count as well. + */ + if (__v4l2_device_register_subdev(v4l2_dev, sd, sd->owner)) + sd = NULL; + /* Decrease the module use count to match the first try_module_get. */ + module_put(client->dev.driver->owner); + +error: + /* + * If we have a client but no subdev, then something went wrong and + * we must unregister the client. + */ + if (!IS_ERR(client) && !sd) + i2c_unregister_device(client); + return sd; +} +EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev_board); + +struct v4l2_subdev *v4l2_i2c_new_subdev(struct v4l2_device *v4l2_dev, + struct i2c_adapter *adapter, + const char *client_type, + u8 addr, + const unsigned short *probe_addrs) +{ + struct i2c_board_info info; + + /* + * Setup the i2c board info with the device type and + * the device address. + */ + memset(&info, 0, sizeof(info)); + strscpy(info.type, client_type, sizeof(info.type)); + info.addr = addr; + + return v4l2_i2c_new_subdev_board(v4l2_dev, adapter, &info, + probe_addrs); +} +EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev); + +/* Return i2c client address of v4l2_subdev. */ +unsigned short v4l2_i2c_subdev_addr(struct v4l2_subdev *sd) +{ + struct i2c_client *client = v4l2_get_subdevdata(sd); + + return client ? client->addr : I2C_CLIENT_END; +} +EXPORT_SYMBOL_GPL(v4l2_i2c_subdev_addr); + +/* + * Return a list of I2C tuner addresses to probe. Use only if the tuner + * addresses are unknown. + */ +const unsigned short *v4l2_i2c_tuner_addrs(enum v4l2_i2c_tuner_type type) +{ + static const unsigned short radio_addrs[] = { +#if IS_ENABLED(CONFIG_MEDIA_TUNER_TEA5761) + 0x10, +#endif + 0x60, + I2C_CLIENT_END + }; + static const unsigned short demod_addrs[] = { + 0x42, 0x43, 0x4a, 0x4b, + I2C_CLIENT_END + }; + static const unsigned short tv_addrs[] = { + 0x42, 0x43, 0x4a, 0x4b, /* tda8290 */ + 0x60, 0x61, 0x62, 0x63, 0x64, + I2C_CLIENT_END + }; + + switch (type) { + case ADDRS_RADIO: + return radio_addrs; + case ADDRS_DEMOD: + return demod_addrs; + case ADDRS_TV: + return tv_addrs; + case ADDRS_TV_WITH_DEMOD: + return tv_addrs + 4; + } + return NULL; +} +EXPORT_SYMBOL_GPL(v4l2_i2c_tuner_addrs); diff --git a/6.12.0/drivers/media/v4l2-core/v4l2-ioctl.c b/6.12.0/drivers/media/v4l2-core/v4l2-ioctl.c new file mode 100644 index 0000000..e14db67 --- /dev/null +++ b/6.12.0/drivers/media/v4l2-core/v4l2-ioctl.c @@ -0,0 +1,3519 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Video capture interface for Linux version 2 + * + * A generic framework to process V4L2 ioctl commands. + * + * Authors: Alan Cox, (version 1) + * Mauro Carvalho Chehab (version 2) + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include /* for media_set_bus_info() */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define is_valid_ioctl(vfd, cmd) test_bit(_IOC_NR(cmd), (vfd)->valid_ioctls) + +struct std_descr { + v4l2_std_id std; + const char *descr; +}; + +static const struct std_descr standards[] = { + { V4L2_STD_NTSC, "NTSC" }, + { V4L2_STD_NTSC_M, "NTSC-M" }, + { V4L2_STD_NTSC_M_JP, "NTSC-M-JP" }, + { V4L2_STD_NTSC_M_KR, "NTSC-M-KR" }, + { V4L2_STD_NTSC_443, "NTSC-443" }, + { V4L2_STD_PAL, "PAL" }, + { V4L2_STD_PAL_BG, "PAL-BG" }, + { V4L2_STD_PAL_B, "PAL-B" }, + { V4L2_STD_PAL_B1, "PAL-B1" }, + { V4L2_STD_PAL_G, "PAL-G" }, + { V4L2_STD_PAL_H, "PAL-H" }, + { V4L2_STD_PAL_I, "PAL-I" }, + { V4L2_STD_PAL_DK, "PAL-DK" }, + { V4L2_STD_PAL_D, "PAL-D" }, + { V4L2_STD_PAL_D1, "PAL-D1" }, + { V4L2_STD_PAL_K, "PAL-K" }, + { V4L2_STD_PAL_M, "PAL-M" }, + { V4L2_STD_PAL_N, "PAL-N" }, + { V4L2_STD_PAL_Nc, "PAL-Nc" }, + { V4L2_STD_PAL_60, "PAL-60" }, + { V4L2_STD_SECAM, "SECAM" }, + { V4L2_STD_SECAM_B, "SECAM-B" }, + { V4L2_STD_SECAM_G, "SECAM-G" }, + { V4L2_STD_SECAM_H, "SECAM-H" }, + { V4L2_STD_SECAM_DK, "SECAM-DK" }, + { V4L2_STD_SECAM_D, "SECAM-D" }, + { V4L2_STD_SECAM_K, "SECAM-K" }, + { V4L2_STD_SECAM_K1, "SECAM-K1" }, + { V4L2_STD_SECAM_L, "SECAM-L" }, + { V4L2_STD_SECAM_LC, "SECAM-Lc" }, + { 0, "Unknown" } +}; + +/* video4linux standard ID conversion to standard name + */ +const char *v4l2_norm_to_name(v4l2_std_id id) +{ + u32 myid = id; + int i; + + /* HACK: ppc32 architecture doesn't have __ucmpdi2 function to handle + 64 bit comparisons. So, on that architecture, with some gcc + variants, compilation fails. Currently, the max value is 30bit wide. + */ + BUG_ON(myid != id); + + for (i = 0; standards[i].std; i++) + if (myid == standards[i].std) + break; + return standards[i].descr; +} +EXPORT_SYMBOL(v4l2_norm_to_name); + +/* Returns frame period for the given standard */ +void v4l2_video_std_frame_period(int id, struct v4l2_fract *frameperiod) +{ + if (id & V4L2_STD_525_60) { + frameperiod->numerator = 1001; + frameperiod->denominator = 30000; + } else { + frameperiod->numerator = 1; + frameperiod->denominator = 25; + } +} +EXPORT_SYMBOL(v4l2_video_std_frame_period); + +/* Fill in the fields of a v4l2_standard structure according to the + 'id' and 'transmission' parameters. Returns negative on error. */ +int v4l2_video_std_construct(struct v4l2_standard *vs, + int id, const char *name) +{ + vs->id = id; + v4l2_video_std_frame_period(id, &vs->frameperiod); + vs->framelines = (id & V4L2_STD_525_60) ? 525 : 625; + strscpy(vs->name, name, sizeof(vs->name)); + return 0; +} +EXPORT_SYMBOL(v4l2_video_std_construct); + +/* Fill in the fields of a v4l2_standard structure according to the + * 'id' and 'vs->index' parameters. Returns negative on error. */ +int v4l_video_std_enumstd(struct v4l2_standard *vs, v4l2_std_id id) +{ + v4l2_std_id curr_id = 0; + unsigned int index = vs->index, i, j = 0; + const char *descr = ""; + + /* Return -ENODATA if the id for the current input + or output is 0, meaning that it doesn't support this API. */ + if (id == 0) + return -ENODATA; + + /* Return norm array in a canonical way */ + for (i = 0; i <= index && id; i++) { + /* last std value in the standards array is 0, so this + while always ends there since (id & 0) == 0. */ + while ((id & standards[j].std) != standards[j].std) + j++; + curr_id = standards[j].std; + descr = standards[j].descr; + j++; + if (curr_id == 0) + break; + if (curr_id != V4L2_STD_PAL && + curr_id != V4L2_STD_SECAM && + curr_id != V4L2_STD_NTSC) + id &= ~curr_id; + } + if (i <= index) + return -EINVAL; + + v4l2_video_std_construct(vs, curr_id, descr); + return 0; +} + +/* ----------------------------------------------------------------- */ +/* some arrays for pretty-printing debug messages of enum types */ + +const char *v4l2_field_names[] = { + [V4L2_FIELD_ANY] = "any", + [V4L2_FIELD_NONE] = "none", + [V4L2_FIELD_TOP] = "top", + [V4L2_FIELD_BOTTOM] = "bottom", + [V4L2_FIELD_INTERLACED] = "interlaced", + [V4L2_FIELD_SEQ_TB] = "seq-tb", + [V4L2_FIELD_SEQ_BT] = "seq-bt", + [V4L2_FIELD_ALTERNATE] = "alternate", + [V4L2_FIELD_INTERLACED_TB] = "interlaced-tb", + [V4L2_FIELD_INTERLACED_BT] = "interlaced-bt", +}; +EXPORT_SYMBOL(v4l2_field_names); + +const char *v4l2_type_names[] = { + [0] = "0", + [V4L2_BUF_TYPE_VIDEO_CAPTURE] = "vid-cap", + [V4L2_BUF_TYPE_VIDEO_OVERLAY] = "vid-overlay", + [V4L2_BUF_TYPE_VIDEO_OUTPUT] = "vid-out", + [V4L2_BUF_TYPE_VBI_CAPTURE] = "vbi-cap", + [V4L2_BUF_TYPE_VBI_OUTPUT] = "vbi-out", + [V4L2_BUF_TYPE_SLICED_VBI_CAPTURE] = "sliced-vbi-cap", + [V4L2_BUF_TYPE_SLICED_VBI_OUTPUT] = "sliced-vbi-out", + [V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY] = "vid-out-overlay", + [V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE] = "vid-cap-mplane", + [V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE] = "vid-out-mplane", + [V4L2_BUF_TYPE_SDR_CAPTURE] = "sdr-cap", + [V4L2_BUF_TYPE_SDR_OUTPUT] = "sdr-out", + [V4L2_BUF_TYPE_META_CAPTURE] = "meta-cap", + [V4L2_BUF_TYPE_META_OUTPUT] = "meta-out", +}; +EXPORT_SYMBOL(v4l2_type_names); + +static const char *v4l2_memory_names[] = { + [V4L2_MEMORY_MMAP] = "mmap", + [V4L2_MEMORY_USERPTR] = "userptr", + [V4L2_MEMORY_OVERLAY] = "overlay", + [V4L2_MEMORY_DMABUF] = "dmabuf", +}; + +#define prt_names(a, arr) (((unsigned)(a)) < ARRAY_SIZE(arr) ? arr[a] : "unknown") + +/* ------------------------------------------------------------------ */ +/* debug help functions */ + +static void v4l_print_querycap(const void *arg, bool write_only) +{ + const struct v4l2_capability *p = arg; + + pr_cont("driver=%.*s, card=%.*s, bus=%.*s, version=0x%08x, capabilities=0x%08x, device_caps=0x%08x\n", + (int)sizeof(p->driver), p->driver, + (int)sizeof(p->card), p->card, + (int)sizeof(p->bus_info), p->bus_info, + p->version, p->capabilities, p->device_caps); +} + +static void v4l_print_enuminput(const void *arg, bool write_only) +{ + const struct v4l2_input *p = arg; + + pr_cont("index=%u, name=%.*s, type=%u, audioset=0x%x, tuner=%u, std=0x%08Lx, status=0x%x, capabilities=0x%x\n", + p->index, (int)sizeof(p->name), p->name, p->type, p->audioset, + p->tuner, (unsigned long long)p->std, p->status, + p->capabilities); +} + +static void v4l_print_enumoutput(const void *arg, bool write_only) +{ + const struct v4l2_output *p = arg; + + pr_cont("index=%u, name=%.*s, type=%u, audioset=0x%x, modulator=%u, std=0x%08Lx, capabilities=0x%x\n", + p->index, (int)sizeof(p->name), p->name, p->type, p->audioset, + p->modulator, (unsigned long long)p->std, p->capabilities); +} + +static void v4l_print_audio(const void *arg, bool write_only) +{ + const struct v4l2_audio *p = arg; + + if (write_only) + pr_cont("index=%u, mode=0x%x\n", p->index, p->mode); + else + pr_cont("index=%u, name=%.*s, capability=0x%x, mode=0x%x\n", + p->index, (int)sizeof(p->name), p->name, + p->capability, p->mode); +} + +static void v4l_print_audioout(const void *arg, bool write_only) +{ + const struct v4l2_audioout *p = arg; + + if (write_only) + pr_cont("index=%u\n", p->index); + else + pr_cont("index=%u, name=%.*s, capability=0x%x, mode=0x%x\n", + p->index, (int)sizeof(p->name), p->name, + p->capability, p->mode); +} + +static void v4l_print_fmtdesc(const void *arg, bool write_only) +{ + const struct v4l2_fmtdesc *p = arg; + + pr_cont("index=%u, type=%s, flags=0x%x, pixelformat=%p4cc, mbus_code=0x%04x, description='%.*s'\n", + p->index, prt_names(p->type, v4l2_type_names), + p->flags, &p->pixelformat, p->mbus_code, + (int)sizeof(p->description), p->description); +} + +static void v4l_print_format(const void *arg, bool write_only) +{ + const struct v4l2_format *p = arg; + const struct v4l2_pix_format *pix; + const struct v4l2_pix_format_mplane *mp; + const struct v4l2_vbi_format *vbi; + const struct v4l2_sliced_vbi_format *sliced; + const struct v4l2_window *win; + const struct v4l2_meta_format *meta; + u32 pixelformat; + u32 planes; + unsigned i; + + pr_cont("type=%s", prt_names(p->type, v4l2_type_names)); + switch (p->type) { + case V4L2_BUF_TYPE_VIDEO_CAPTURE: + case V4L2_BUF_TYPE_VIDEO_OUTPUT: + pix = &p->fmt.pix; + pr_cont(", width=%u, height=%u, pixelformat=%p4cc, field=%s, bytesperline=%u, sizeimage=%u, colorspace=%d, flags=0x%x, ycbcr_enc=%u, quantization=%u, xfer_func=%u\n", + pix->width, pix->height, &pix->pixelformat, + prt_names(pix->field, v4l2_field_names), + pix->bytesperline, pix->sizeimage, + pix->colorspace, pix->flags, pix->ycbcr_enc, + pix->quantization, pix->xfer_func); + break; + case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: + case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: + mp = &p->fmt.pix_mp; + pixelformat = mp->pixelformat; + pr_cont(", width=%u, height=%u, format=%p4cc, field=%s, colorspace=%d, num_planes=%u, flags=0x%x, ycbcr_enc=%u, quantization=%u, xfer_func=%u\n", + mp->width, mp->height, &pixelformat, + prt_names(mp->field, v4l2_field_names), + mp->colorspace, mp->num_planes, mp->flags, + mp->ycbcr_enc, mp->quantization, mp->xfer_func); + planes = min_t(u32, mp->num_planes, VIDEO_MAX_PLANES); + for (i = 0; i < planes; i++) + printk(KERN_DEBUG "plane %u: bytesperline=%u sizeimage=%u\n", i, + mp->plane_fmt[i].bytesperline, + mp->plane_fmt[i].sizeimage); + break; + case V4L2_BUF_TYPE_VIDEO_OVERLAY: + case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: + win = &p->fmt.win; + pr_cont(", wxh=%dx%d, x,y=%d,%d, field=%s, chromakey=0x%08x, global_alpha=0x%02x\n", + win->w.width, win->w.height, win->w.left, win->w.top, + prt_names(win->field, v4l2_field_names), + win->chromakey, win->global_alpha); + break; + case V4L2_BUF_TYPE_VBI_CAPTURE: + case V4L2_BUF_TYPE_VBI_OUTPUT: + vbi = &p->fmt.vbi; + pr_cont(", sampling_rate=%u, offset=%u, samples_per_line=%u, sample_format=%p4cc, start=%u,%u, count=%u,%u\n", + vbi->sampling_rate, vbi->offset, + vbi->samples_per_line, &vbi->sample_format, + vbi->start[0], vbi->start[1], + vbi->count[0], vbi->count[1]); + break; + case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: + case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: + sliced = &p->fmt.sliced; + pr_cont(", service_set=0x%08x, io_size=%d\n", + sliced->service_set, sliced->io_size); + for (i = 0; i < 24; i++) + printk(KERN_DEBUG "line[%02u]=0x%04x, 0x%04x\n", i, + sliced->service_lines[0][i], + sliced->service_lines[1][i]); + break; + case V4L2_BUF_TYPE_SDR_CAPTURE: + case V4L2_BUF_TYPE_SDR_OUTPUT: + pixelformat = p->fmt.sdr.pixelformat; + pr_cont(", pixelformat=%p4cc\n", &pixelformat); + break; + case V4L2_BUF_TYPE_META_CAPTURE: + case V4L2_BUF_TYPE_META_OUTPUT: + meta = &p->fmt.meta; + pixelformat = meta->dataformat; + pr_cont(", dataformat=%p4cc, buffersize=%u, width=%u, height=%u, bytesperline=%u\n", + &pixelformat, meta->buffersize, meta->width, + meta->height, meta->bytesperline); + break; + } +} + +static void v4l_print_framebuffer(const void *arg, bool write_only) +{ + const struct v4l2_framebuffer *p = arg; + + pr_cont("capability=0x%x, flags=0x%x, base=0x%p, width=%u, height=%u, pixelformat=%p4cc, bytesperline=%u, sizeimage=%u, colorspace=%d\n", + p->capability, p->flags, p->base, p->fmt.width, p->fmt.height, + &p->fmt.pixelformat, p->fmt.bytesperline, p->fmt.sizeimage, + p->fmt.colorspace); +} + +static void v4l_print_buftype(const void *arg, bool write_only) +{ + pr_cont("type=%s\n", prt_names(*(u32 *)arg, v4l2_type_names)); +} + +static void v4l_print_modulator(const void *arg, bool write_only) +{ + const struct v4l2_modulator *p = arg; + + if (write_only) + pr_cont("index=%u, txsubchans=0x%x\n", p->index, p->txsubchans); + else + pr_cont("index=%u, name=%.*s, capability=0x%x, rangelow=%u, rangehigh=%u, txsubchans=0x%x\n", + p->index, (int)sizeof(p->name), p->name, p->capability, + p->rangelow, p->rangehigh, p->txsubchans); +} + +static void v4l_print_tuner(const void *arg, bool write_only) +{ + const struct v4l2_tuner *p = arg; + + if (write_only) + pr_cont("index=%u, audmode=%u\n", p->index, p->audmode); + else + pr_cont("index=%u, name=%.*s, type=%u, capability=0x%x, rangelow=%u, rangehigh=%u, signal=%u, afc=%d, rxsubchans=0x%x, audmode=%u\n", + p->index, (int)sizeof(p->name), p->name, p->type, + p->capability, p->rangelow, + p->rangehigh, p->signal, p->afc, + p->rxsubchans, p->audmode); +} + +static void v4l_print_frequency(const void *arg, bool write_only) +{ + const struct v4l2_frequency *p = arg; + + pr_cont("tuner=%u, type=%u, frequency=%u\n", + p->tuner, p->type, p->frequency); +} + +static void v4l_print_standard(const void *arg, bool write_only) +{ + const struct v4l2_standard *p = arg; + + pr_cont("index=%u, id=0x%Lx, name=%.*s, fps=%u/%u, framelines=%u\n", + p->index, + (unsigned long long)p->id, (int)sizeof(p->name), p->name, + p->frameperiod.numerator, + p->frameperiod.denominator, + p->framelines); +} + +static void v4l_print_std(const void *arg, bool write_only) +{ + pr_cont("std=0x%08Lx\n", *(const long long unsigned *)arg); +} + +static void v4l_print_hw_freq_seek(const void *arg, bool write_only) +{ + const struct v4l2_hw_freq_seek *p = arg; + + pr_cont("tuner=%u, type=%u, seek_upward=%u, wrap_around=%u, spacing=%u, rangelow=%u, rangehigh=%u\n", + p->tuner, p->type, p->seek_upward, p->wrap_around, p->spacing, + p->rangelow, p->rangehigh); +} + +static void v4l_print_requestbuffers(const void *arg, bool write_only) +{ + const struct v4l2_requestbuffers *p = arg; + + pr_cont("count=%d, type=%s, memory=%s\n", + p->count, + prt_names(p->type, v4l2_type_names), + prt_names(p->memory, v4l2_memory_names)); +} + +static void v4l_print_buffer(const void *arg, bool write_only) +{ + const struct v4l2_buffer *p = arg; + const struct v4l2_timecode *tc = &p->timecode; + const struct v4l2_plane *plane; + int i; + + pr_cont("%02d:%02d:%02d.%06ld index=%d, type=%s, request_fd=%d, flags=0x%08x, field=%s, sequence=%d, memory=%s", + (int)p->timestamp.tv_sec / 3600, + ((int)p->timestamp.tv_sec / 60) % 60, + ((int)p->timestamp.tv_sec % 60), + (long)p->timestamp.tv_usec, + p->index, + prt_names(p->type, v4l2_type_names), p->request_fd, + p->flags, prt_names(p->field, v4l2_field_names), + p->sequence, prt_names(p->memory, v4l2_memory_names)); + + if (V4L2_TYPE_IS_MULTIPLANAR(p->type) && p->m.planes) { + pr_cont("\n"); + for (i = 0; i < p->length; ++i) { + plane = &p->m.planes[i]; + printk(KERN_DEBUG + "plane %d: bytesused=%d, data_offset=0x%08x, offset/userptr=0x%lx, length=%d\n", + i, plane->bytesused, plane->data_offset, + plane->m.userptr, plane->length); + } + } else { + pr_cont(", bytesused=%d, offset/userptr=0x%lx, length=%d\n", + p->bytesused, p->m.userptr, p->length); + } + + printk(KERN_DEBUG "timecode=%02d:%02d:%02d type=%d, flags=0x%08x, frames=%d, userbits=0x%08x\n", + tc->hours, tc->minutes, tc->seconds, + tc->type, tc->flags, tc->frames, *(__u32 *)tc->userbits); +} + +static void v4l_print_exportbuffer(const void *arg, bool write_only) +{ + const struct v4l2_exportbuffer *p = arg; + + pr_cont("fd=%d, type=%s, index=%u, plane=%u, flags=0x%08x\n", + p->fd, prt_names(p->type, v4l2_type_names), + p->index, p->plane, p->flags); +} + +static void v4l_print_create_buffers(const void *arg, bool write_only) +{ + const struct v4l2_create_buffers *p = arg; + + pr_cont("index=%d, count=%d, memory=%s, capabilities=0x%08x, max num buffers=%u, ", + p->index, p->count, prt_names(p->memory, v4l2_memory_names), + p->capabilities, p->max_num_buffers); + v4l_print_format(&p->format, write_only); +} + +static void v4l_print_remove_buffers(const void *arg, bool write_only) +{ + const struct v4l2_remove_buffers *p = arg; + + pr_cont("type=%s, index=%u, count=%u\n", + prt_names(p->type, v4l2_type_names), p->index, p->count); +} + +static void v4l_print_streamparm(const void *arg, bool write_only) +{ + const struct v4l2_streamparm *p = arg; + + pr_cont("type=%s", prt_names(p->type, v4l2_type_names)); + + if (p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE || + p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { + const struct v4l2_captureparm *c = &p->parm.capture; + + pr_cont(", capability=0x%x, capturemode=0x%x, timeperframe=%d/%d, extendedmode=%d, readbuffers=%d\n", + c->capability, c->capturemode, + c->timeperframe.numerator, c->timeperframe.denominator, + c->extendedmode, c->readbuffers); + } else if (p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT || + p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { + const struct v4l2_outputparm *c = &p->parm.output; + + pr_cont(", capability=0x%x, outputmode=0x%x, timeperframe=%d/%d, extendedmode=%d, writebuffers=%d\n", + c->capability, c->outputmode, + c->timeperframe.numerator, c->timeperframe.denominator, + c->extendedmode, c->writebuffers); + } else { + pr_cont("\n"); + } +} + +static void v4l_print_queryctrl(const void *arg, bool write_only) +{ + const struct v4l2_queryctrl *p = arg; + + pr_cont("id=0x%x, type=%d, name=%.*s, min/max=%d/%d, step=%d, default=%d, flags=0x%08x\n", + p->id, p->type, (int)sizeof(p->name), p->name, + p->minimum, p->maximum, + p->step, p->default_value, p->flags); +} + +static void v4l_print_query_ext_ctrl(const void *arg, bool write_only) +{ + const struct v4l2_query_ext_ctrl *p = arg; + + pr_cont("id=0x%x, type=%d, name=%.*s, min/max=%lld/%lld, step=%lld, default=%lld, flags=0x%08x, elem_size=%u, elems=%u, nr_of_dims=%u, dims=%u,%u,%u,%u\n", + p->id, p->type, (int)sizeof(p->name), p->name, + p->minimum, p->maximum, + p->step, p->default_value, p->flags, + p->elem_size, p->elems, p->nr_of_dims, + p->dims[0], p->dims[1], p->dims[2], p->dims[3]); +} + +static void v4l_print_querymenu(const void *arg, bool write_only) +{ + const struct v4l2_querymenu *p = arg; + + pr_cont("id=0x%x, index=%d\n", p->id, p->index); +} + +static void v4l_print_control(const void *arg, bool write_only) +{ + const struct v4l2_control *p = arg; + const char *name = v4l2_ctrl_get_name(p->id); + + if (name) + pr_cont("name=%s, ", name); + pr_cont("id=0x%x, value=%d\n", p->id, p->value); +} + +static void v4l_print_ext_controls(const void *arg, bool write_only) +{ + const struct v4l2_ext_controls *p = arg; + int i; + + pr_cont("which=0x%x, count=%d, error_idx=%d, request_fd=%d", + p->which, p->count, p->error_idx, p->request_fd); + for (i = 0; i < p->count; i++) { + unsigned int id = p->controls[i].id; + const char *name = v4l2_ctrl_get_name(id); + + if (name) + pr_cont(", name=%s", name); + if (!p->controls[i].size) + pr_cont(", id/val=0x%x/0x%x", id, p->controls[i].value); + else + pr_cont(", id/size=0x%x/%u", id, p->controls[i].size); + } + pr_cont("\n"); +} + +static void v4l_print_cropcap(const void *arg, bool write_only) +{ + const struct v4l2_cropcap *p = arg; + + pr_cont("type=%s, bounds wxh=%dx%d, x,y=%d,%d, defrect wxh=%dx%d, x,y=%d,%d, pixelaspect %d/%d\n", + prt_names(p->type, v4l2_type_names), + p->bounds.width, p->bounds.height, + p->bounds.left, p->bounds.top, + p->defrect.width, p->defrect.height, + p->defrect.left, p->defrect.top, + p->pixelaspect.numerator, p->pixelaspect.denominator); +} + +static void v4l_print_crop(const void *arg, bool write_only) +{ + const struct v4l2_crop *p = arg; + + pr_cont("type=%s, wxh=%dx%d, x,y=%d,%d\n", + prt_names(p->type, v4l2_type_names), + p->c.width, p->c.height, + p->c.left, p->c.top); +} + +static void v4l_print_selection(const void *arg, bool write_only) +{ + const struct v4l2_selection *p = arg; + + pr_cont("type=%s, target=%d, flags=0x%x, wxh=%dx%d, x,y=%d,%d\n", + prt_names(p->type, v4l2_type_names), + p->target, p->flags, + p->r.width, p->r.height, p->r.left, p->r.top); +} + +static void v4l_print_jpegcompression(const void *arg, bool write_only) +{ + const struct v4l2_jpegcompression *p = arg; + + pr_cont("quality=%d, APPn=%d, APP_len=%d, COM_len=%d, jpeg_markers=0x%x\n", + p->quality, p->APPn, p->APP_len, + p->COM_len, p->jpeg_markers); +} + +static void v4l_print_enc_idx(const void *arg, bool write_only) +{ + const struct v4l2_enc_idx *p = arg; + + pr_cont("entries=%d, entries_cap=%d\n", + p->entries, p->entries_cap); +} + +static void v4l_print_encoder_cmd(const void *arg, bool write_only) +{ + const struct v4l2_encoder_cmd *p = arg; + + pr_cont("cmd=%d, flags=0x%x\n", + p->cmd, p->flags); +} + +static void v4l_print_decoder_cmd(const void *arg, bool write_only) +{ + const struct v4l2_decoder_cmd *p = arg; + + pr_cont("cmd=%d, flags=0x%x\n", p->cmd, p->flags); + + if (p->cmd == V4L2_DEC_CMD_START) + pr_info("speed=%d, format=%u\n", + p->start.speed, p->start.format); + else if (p->cmd == V4L2_DEC_CMD_STOP) + pr_info("pts=%llu\n", p->stop.pts); +} + +static void v4l_print_dbg_chip_info(const void *arg, bool write_only) +{ + const struct v4l2_dbg_chip_info *p = arg; + + pr_cont("type=%u, ", p->match.type); + if (p->match.type == V4L2_CHIP_MATCH_I2C_DRIVER) + pr_cont("name=%.*s, ", + (int)sizeof(p->match.name), p->match.name); + else + pr_cont("addr=%u, ", p->match.addr); + pr_cont("name=%.*s\n", (int)sizeof(p->name), p->name); +} + +static void v4l_print_dbg_register(const void *arg, bool write_only) +{ + const struct v4l2_dbg_register *p = arg; + + pr_cont("type=%u, ", p->match.type); + if (p->match.type == V4L2_CHIP_MATCH_I2C_DRIVER) + pr_cont("name=%.*s, ", + (int)sizeof(p->match.name), p->match.name); + else + pr_cont("addr=%u, ", p->match.addr); + pr_cont("reg=0x%llx, val=0x%llx\n", + p->reg, p->val); +} + +static void v4l_print_dv_timings(const void *arg, bool write_only) +{ + const struct v4l2_dv_timings *p = arg; + + switch (p->type) { + case V4L2_DV_BT_656_1120: + pr_cont("type=bt-656/1120, interlaced=%u, pixelclock=%llu, width=%u, height=%u, polarities=0x%x, hfrontporch=%u, hsync=%u, hbackporch=%u, vfrontporch=%u, vsync=%u, vbackporch=%u, il_vfrontporch=%u, il_vsync=%u, il_vbackporch=%u, standards=0x%x, flags=0x%x\n", + p->bt.interlaced, p->bt.pixelclock, + p->bt.width, p->bt.height, + p->bt.polarities, p->bt.hfrontporch, + p->bt.hsync, p->bt.hbackporch, + p->bt.vfrontporch, p->bt.vsync, + p->bt.vbackporch, p->bt.il_vfrontporch, + p->bt.il_vsync, p->bt.il_vbackporch, + p->bt.standards, p->bt.flags); + break; + default: + pr_cont("type=%d\n", p->type); + break; + } +} + +static void v4l_print_enum_dv_timings(const void *arg, bool write_only) +{ + const struct v4l2_enum_dv_timings *p = arg; + + pr_cont("index=%u, ", p->index); + v4l_print_dv_timings(&p->timings, write_only); +} + +static void v4l_print_dv_timings_cap(const void *arg, bool write_only) +{ + const struct v4l2_dv_timings_cap *p = arg; + + switch (p->type) { + case V4L2_DV_BT_656_1120: + pr_cont("type=bt-656/1120, width=%u-%u, height=%u-%u, pixelclock=%llu-%llu, standards=0x%x, capabilities=0x%x\n", + p->bt.min_width, p->bt.max_width, + p->bt.min_height, p->bt.max_height, + p->bt.min_pixelclock, p->bt.max_pixelclock, + p->bt.standards, p->bt.capabilities); + break; + default: + pr_cont("type=%u\n", p->type); + break; + } +} + +static void v4l_print_frmsizeenum(const void *arg, bool write_only) +{ + const struct v4l2_frmsizeenum *p = arg; + + pr_cont("index=%u, pixelformat=%p4cc, type=%u", + p->index, &p->pixel_format, p->type); + switch (p->type) { + case V4L2_FRMSIZE_TYPE_DISCRETE: + pr_cont(", wxh=%ux%u\n", + p->discrete.width, p->discrete.height); + break; + case V4L2_FRMSIZE_TYPE_STEPWISE: + pr_cont(", min=%ux%u, max=%ux%u, step=%ux%u\n", + p->stepwise.min_width, + p->stepwise.min_height, + p->stepwise.max_width, + p->stepwise.max_height, + p->stepwise.step_width, + p->stepwise.step_height); + break; + case V4L2_FRMSIZE_TYPE_CONTINUOUS: + default: + pr_cont("\n"); + break; + } +} + +static void v4l_print_frmivalenum(const void *arg, bool write_only) +{ + const struct v4l2_frmivalenum *p = arg; + + pr_cont("index=%u, pixelformat=%p4cc, wxh=%ux%u, type=%u", + p->index, &p->pixel_format, p->width, p->height, p->type); + switch (p->type) { + case V4L2_FRMIVAL_TYPE_DISCRETE: + pr_cont(", fps=%d/%d\n", + p->discrete.numerator, + p->discrete.denominator); + break; + case V4L2_FRMIVAL_TYPE_STEPWISE: + pr_cont(", min=%d/%d, max=%d/%d, step=%d/%d\n", + p->stepwise.min.numerator, + p->stepwise.min.denominator, + p->stepwise.max.numerator, + p->stepwise.max.denominator, + p->stepwise.step.numerator, + p->stepwise.step.denominator); + break; + case V4L2_FRMIVAL_TYPE_CONTINUOUS: + default: + pr_cont("\n"); + break; + } +} + +static void v4l_print_event(const void *arg, bool write_only) +{ + const struct v4l2_event *p = arg; + const struct v4l2_event_ctrl *c; + + pr_cont("type=0x%x, pending=%u, sequence=%u, id=%u, timestamp=%llu.%9.9llu\n", + p->type, p->pending, p->sequence, p->id, + p->timestamp.tv_sec, p->timestamp.tv_nsec); + switch (p->type) { + case V4L2_EVENT_VSYNC: + printk(KERN_DEBUG "field=%s\n", + prt_names(p->u.vsync.field, v4l2_field_names)); + break; + case V4L2_EVENT_CTRL: + c = &p->u.ctrl; + printk(KERN_DEBUG "changes=0x%x, type=%u, ", + c->changes, c->type); + if (c->type == V4L2_CTRL_TYPE_INTEGER64) + pr_cont("value64=%lld, ", c->value64); + else + pr_cont("value=%d, ", c->value); + pr_cont("flags=0x%x, minimum=%d, maximum=%d, step=%d, default_value=%d\n", + c->flags, c->minimum, c->maximum, + c->step, c->default_value); + break; + case V4L2_EVENT_FRAME_SYNC: + pr_cont("frame_sequence=%u\n", + p->u.frame_sync.frame_sequence); + break; + } +} + +static void v4l_print_event_subscription(const void *arg, bool write_only) +{ + const struct v4l2_event_subscription *p = arg; + + pr_cont("type=0x%x, id=0x%x, flags=0x%x\n", + p->type, p->id, p->flags); +} + +static void v4l_print_sliced_vbi_cap(const void *arg, bool write_only) +{ + const struct v4l2_sliced_vbi_cap *p = arg; + int i; + + pr_cont("type=%s, service_set=0x%08x\n", + prt_names(p->type, v4l2_type_names), p->service_set); + for (i = 0; i < 24; i++) + printk(KERN_DEBUG "line[%02u]=0x%04x, 0x%04x\n", i, + p->service_lines[0][i], + p->service_lines[1][i]); +} + +static void v4l_print_freq_band(const void *arg, bool write_only) +{ + const struct v4l2_frequency_band *p = arg; + + pr_cont("tuner=%u, type=%u, index=%u, capability=0x%x, rangelow=%u, rangehigh=%u, modulation=0x%x\n", + p->tuner, p->type, p->index, + p->capability, p->rangelow, + p->rangehigh, p->modulation); +} + +static void v4l_print_edid(const void *arg, bool write_only) +{ + const struct v4l2_edid *p = arg; + + pr_cont("pad=%u, start_block=%u, blocks=%u\n", + p->pad, p->start_block, p->blocks); +} + +static void v4l_print_u32(const void *arg, bool write_only) +{ + pr_cont("value=%u\n", *(const u32 *)arg); +} + +static void v4l_print_newline(const void *arg, bool write_only) +{ + pr_cont("\n"); +} + +static void v4l_print_default(const void *arg, bool write_only) +{ + pr_cont("driver-specific ioctl\n"); +} + +static bool check_ext_ctrls(struct v4l2_ext_controls *c, unsigned long ioctl) +{ + __u32 i; + + /* zero the reserved fields */ + c->reserved[0] = 0; + for (i = 0; i < c->count; i++) + c->controls[i].reserved2[0] = 0; + + switch (c->which) { + case V4L2_CID_PRIVATE_BASE: + /* + * V4L2_CID_PRIVATE_BASE cannot be used as control class + * when using extended controls. + * Only when passed in through VIDIOC_G_CTRL and VIDIOC_S_CTRL + * is it allowed for backwards compatibility. + */ + if (ioctl == VIDIOC_G_CTRL || ioctl == VIDIOC_S_CTRL) + return false; + break; + case V4L2_CTRL_WHICH_DEF_VAL: + /* Default value cannot be changed */ + if (ioctl == VIDIOC_S_EXT_CTRLS || + ioctl == VIDIOC_TRY_EXT_CTRLS) { + c->error_idx = c->count; + return false; + } + return true; + case V4L2_CTRL_WHICH_CUR_VAL: + return true; + case V4L2_CTRL_WHICH_REQUEST_VAL: + c->error_idx = c->count; + return false; + } + + /* Check that all controls are from the same control class. */ + for (i = 0; i < c->count; i++) { + if (V4L2_CTRL_ID2WHICH(c->controls[i].id) != c->which) { + c->error_idx = ioctl == VIDIOC_TRY_EXT_CTRLS ? i : + c->count; + return false; + } + } + return true; +} + +static int check_fmt(struct file *file, enum v4l2_buf_type type) +{ + const u32 vid_caps = V4L2_CAP_VIDEO_CAPTURE | + V4L2_CAP_VIDEO_CAPTURE_MPLANE | + V4L2_CAP_VIDEO_OUTPUT | + V4L2_CAP_VIDEO_OUTPUT_MPLANE | + V4L2_CAP_VIDEO_M2M | V4L2_CAP_VIDEO_M2M_MPLANE; + const u32 meta_caps = V4L2_CAP_META_CAPTURE | + V4L2_CAP_META_OUTPUT; + struct video_device *vfd = video_devdata(file); + const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops; + bool is_vid = vfd->vfl_type == VFL_TYPE_VIDEO && + (vfd->device_caps & vid_caps); + bool is_vbi = vfd->vfl_type == VFL_TYPE_VBI; + bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR; + bool is_tch = vfd->vfl_type == VFL_TYPE_TOUCH; + bool is_meta = vfd->vfl_type == VFL_TYPE_VIDEO && + (vfd->device_caps & meta_caps); + bool is_rx = vfd->vfl_dir != VFL_DIR_TX; + bool is_tx = vfd->vfl_dir != VFL_DIR_RX; + + if (ops == NULL) + return -EINVAL; + + switch (type) { + case V4L2_BUF_TYPE_VIDEO_CAPTURE: + if ((is_vid || is_tch) && is_rx && + (ops->vidioc_g_fmt_vid_cap || ops->vidioc_g_fmt_vid_cap_mplane)) + return 0; + break; + case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: + if ((is_vid || is_tch) && is_rx && ops->vidioc_g_fmt_vid_cap_mplane) + return 0; + break; + case V4L2_BUF_TYPE_VIDEO_OVERLAY: + if (is_vid && is_rx && ops->vidioc_g_fmt_vid_overlay) + return 0; + break; + case V4L2_BUF_TYPE_VIDEO_OUTPUT: + if (is_vid && is_tx && + (ops->vidioc_g_fmt_vid_out || ops->vidioc_g_fmt_vid_out_mplane)) + return 0; + break; + case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: + if (is_vid && is_tx && ops->vidioc_g_fmt_vid_out_mplane) + return 0; + break; + case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: + if (is_vid && is_tx && ops->vidioc_g_fmt_vid_out_overlay) + return 0; + break; + case V4L2_BUF_TYPE_VBI_CAPTURE: + if (is_vbi && is_rx && ops->vidioc_g_fmt_vbi_cap) + return 0; + break; + case V4L2_BUF_TYPE_VBI_OUTPUT: + if (is_vbi && is_tx && ops->vidioc_g_fmt_vbi_out) + return 0; + break; + case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: + if (is_vbi && is_rx && ops->vidioc_g_fmt_sliced_vbi_cap) + return 0; + break; + case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: + if (is_vbi && is_tx && ops->vidioc_g_fmt_sliced_vbi_out) + return 0; + break; + case V4L2_BUF_TYPE_SDR_CAPTURE: + if (is_sdr && is_rx && ops->vidioc_g_fmt_sdr_cap) + return 0; + break; + case V4L2_BUF_TYPE_SDR_OUTPUT: + if (is_sdr && is_tx && ops->vidioc_g_fmt_sdr_out) + return 0; + break; + case V4L2_BUF_TYPE_META_CAPTURE: + if (is_meta && is_rx && ops->vidioc_g_fmt_meta_cap) + return 0; + break; + case V4L2_BUF_TYPE_META_OUTPUT: + if (is_meta && is_tx && ops->vidioc_g_fmt_meta_out) + return 0; + break; + default: + break; + } + return -EINVAL; +} + +static void v4l_sanitize_colorspace(u32 pixelformat, u32 *colorspace, + u32 *encoding, u32 *quantization, + u32 *xfer_func) +{ + bool is_hsv = pixelformat == V4L2_PIX_FMT_HSV24 || + pixelformat == V4L2_PIX_FMT_HSV32; + + if (!v4l2_is_colorspace_valid(*colorspace)) { + *colorspace = V4L2_COLORSPACE_DEFAULT; + *encoding = V4L2_YCBCR_ENC_DEFAULT; + *quantization = V4L2_QUANTIZATION_DEFAULT; + *xfer_func = V4L2_XFER_FUNC_DEFAULT; + } + + if ((!is_hsv && !v4l2_is_ycbcr_enc_valid(*encoding)) || + (is_hsv && !v4l2_is_hsv_enc_valid(*encoding))) + *encoding = V4L2_YCBCR_ENC_DEFAULT; + + if (!v4l2_is_quant_valid(*quantization)) + *quantization = V4L2_QUANTIZATION_DEFAULT; + + if (!v4l2_is_xfer_func_valid(*xfer_func)) + *xfer_func = V4L2_XFER_FUNC_DEFAULT; +} + +static void v4l_sanitize_format(struct v4l2_format *fmt) +{ + unsigned int offset; + + /* Make sure num_planes is not bogus */ + if (fmt->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE || + fmt->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + fmt->fmt.pix_mp.num_planes = min_t(u32, fmt->fmt.pix_mp.num_planes, + VIDEO_MAX_PLANES); + + /* + * The v4l2_pix_format structure has been extended with fields that were + * not previously required to be set to zero by applications. The priv + * field, when set to a magic value, indicates that the extended fields + * are valid. Otherwise they will contain undefined values. To simplify + * the API towards drivers zero the extended fields and set the priv + * field to the magic value when the extended pixel format structure + * isn't used by applications. + */ + if (fmt->type == V4L2_BUF_TYPE_VIDEO_CAPTURE || + fmt->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) { + if (fmt->fmt.pix.priv != V4L2_PIX_FMT_PRIV_MAGIC) { + fmt->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; + + offset = offsetof(struct v4l2_pix_format, priv) + + sizeof(fmt->fmt.pix.priv); + memset(((void *)&fmt->fmt.pix) + offset, 0, + sizeof(fmt->fmt.pix) - offset); + } + } + + /* Replace invalid colorspace values with defaults. */ + if (fmt->type == V4L2_BUF_TYPE_VIDEO_CAPTURE || + fmt->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) { + v4l_sanitize_colorspace(fmt->fmt.pix.pixelformat, + &fmt->fmt.pix.colorspace, + &fmt->fmt.pix.ycbcr_enc, + &fmt->fmt.pix.quantization, + &fmt->fmt.pix.xfer_func); + } else if (fmt->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE || + fmt->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { + u32 ycbcr_enc = fmt->fmt.pix_mp.ycbcr_enc; + u32 quantization = fmt->fmt.pix_mp.quantization; + u32 xfer_func = fmt->fmt.pix_mp.xfer_func; + + v4l_sanitize_colorspace(fmt->fmt.pix_mp.pixelformat, + &fmt->fmt.pix_mp.colorspace, &ycbcr_enc, + &quantization, &xfer_func); + + fmt->fmt.pix_mp.ycbcr_enc = ycbcr_enc; + fmt->fmt.pix_mp.quantization = quantization; + fmt->fmt.pix_mp.xfer_func = xfer_func; + } +} + +static int v4l_querycap(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct v4l2_capability *cap = (struct v4l2_capability *)arg; + struct video_device *vfd = video_devdata(file); + int ret; + + cap->version = LINUX_VERSION_CODE; + cap->device_caps = vfd->device_caps; + cap->capabilities = vfd->device_caps | V4L2_CAP_DEVICE_CAPS; + + media_set_bus_info(cap->bus_info, sizeof(cap->bus_info), + vfd->dev_parent); + + ret = ops->vidioc_querycap(file, fh, cap); + + /* + * Drivers must not change device_caps, so check for this and + * warn if this happened. + */ + WARN_ON(cap->device_caps != vfd->device_caps); + /* + * Check that capabilities is a superset of + * vfd->device_caps | V4L2_CAP_DEVICE_CAPS + */ + WARN_ON((cap->capabilities & + (vfd->device_caps | V4L2_CAP_DEVICE_CAPS)) != + (vfd->device_caps | V4L2_CAP_DEVICE_CAPS)); + cap->capabilities |= V4L2_CAP_EXT_PIX_FORMAT; + cap->device_caps |= V4L2_CAP_EXT_PIX_FORMAT; + + return ret; +} + +static int v4l_g_input(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + + if (vfd->device_caps & V4L2_CAP_IO_MC) { + *(int *)arg = 0; + return 0; + } + + return ops->vidioc_g_input(file, fh, arg); +} + +static int v4l_g_output(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + + if (vfd->device_caps & V4L2_CAP_IO_MC) { + *(int *)arg = 0; + return 0; + } + + return ops->vidioc_g_output(file, fh, arg); +} + +static int v4l_s_input(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + int ret; + + ret = v4l_enable_media_source(vfd); + if (ret) + return ret; + + if (vfd->device_caps & V4L2_CAP_IO_MC) + return *(int *)arg ? -EINVAL : 0; + + return ops->vidioc_s_input(file, fh, *(unsigned int *)arg); +} + +static int v4l_s_output(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + + if (vfd->device_caps & V4L2_CAP_IO_MC) + return *(int *)arg ? -EINVAL : 0; + + return ops->vidioc_s_output(file, fh, *(unsigned int *)arg); +} + +static int v4l_g_priority(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd; + u32 *p = arg; + + vfd = video_devdata(file); + *p = v4l2_prio_max(vfd->prio); + return 0; +} + +static int v4l_s_priority(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd; + struct v4l2_fh *vfh; + u32 *p = arg; + + vfd = video_devdata(file); + if (!test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) + return -ENOTTY; + vfh = file->private_data; + return v4l2_prio_change(vfd->prio, &vfh->prio, *p); +} + +static int v4l_enuminput(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_input *p = arg; + + /* + * We set the flags for CAP_DV_TIMINGS & + * CAP_STD here based on ioctl handler provided by the + * driver. If the driver doesn't support these + * for a specific input, it must override these flags. + */ + if (is_valid_ioctl(vfd, VIDIOC_S_STD)) + p->capabilities |= V4L2_IN_CAP_STD; + + if (vfd->device_caps & V4L2_CAP_IO_MC) { + if (p->index) + return -EINVAL; + strscpy(p->name, vfd->name, sizeof(p->name)); + p->type = V4L2_INPUT_TYPE_CAMERA; + return 0; + } + + return ops->vidioc_enum_input(file, fh, p); +} + +static int v4l_enumoutput(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_output *p = arg; + + /* + * We set the flags for CAP_DV_TIMINGS & + * CAP_STD here based on ioctl handler provided by the + * driver. If the driver doesn't support these + * for a specific output, it must override these flags. + */ + if (is_valid_ioctl(vfd, VIDIOC_S_STD)) + p->capabilities |= V4L2_OUT_CAP_STD; + + if (vfd->device_caps & V4L2_CAP_IO_MC) { + if (p->index) + return -EINVAL; + strscpy(p->name, vfd->name, sizeof(p->name)); + p->type = V4L2_OUTPUT_TYPE_ANALOG; + return 0; + } + + return ops->vidioc_enum_output(file, fh, p); +} + +static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt) +{ + const unsigned sz = sizeof(fmt->description); + const char *descr = NULL; + u32 flags = 0; + + /* + * We depart from the normal coding style here since the descriptions + * should be aligned so it is easy to see which descriptions will be + * longer than 31 characters (the max length for a description). + * And frankly, this is easier to read anyway. + * + * Note that gcc will use O(log N) comparisons to find the right case. + */ + switch (fmt->pixelformat) { + /* Max description length mask: descr = "0123456789012345678901234567890" */ + case V4L2_PIX_FMT_RGB332: descr = "8-bit RGB 3-3-2"; break; + case V4L2_PIX_FMT_RGB444: descr = "16-bit A/XRGB 4-4-4-4"; break; + case V4L2_PIX_FMT_ARGB444: descr = "16-bit ARGB 4-4-4-4"; break; + case V4L2_PIX_FMT_XRGB444: descr = "16-bit XRGB 4-4-4-4"; break; + case V4L2_PIX_FMT_RGBA444: descr = "16-bit RGBA 4-4-4-4"; break; + case V4L2_PIX_FMT_RGBX444: descr = "16-bit RGBX 4-4-4-4"; break; + case V4L2_PIX_FMT_ABGR444: descr = "16-bit ABGR 4-4-4-4"; break; + case V4L2_PIX_FMT_XBGR444: descr = "16-bit XBGR 4-4-4-4"; break; + case V4L2_PIX_FMT_BGRA444: descr = "16-bit BGRA 4-4-4-4"; break; + case V4L2_PIX_FMT_BGRX444: descr = "16-bit BGRX 4-4-4-4"; break; + case V4L2_PIX_FMT_RGB555: descr = "16-bit A/XRGB 1-5-5-5"; break; + case V4L2_PIX_FMT_ARGB555: descr = "16-bit ARGB 1-5-5-5"; break; + case V4L2_PIX_FMT_XRGB555: descr = "16-bit XRGB 1-5-5-5"; break; + case V4L2_PIX_FMT_ABGR555: descr = "16-bit ABGR 1-5-5-5"; break; + case V4L2_PIX_FMT_XBGR555: descr = "16-bit XBGR 1-5-5-5"; break; + case V4L2_PIX_FMT_RGBA555: descr = "16-bit RGBA 5-5-5-1"; break; + case V4L2_PIX_FMT_RGBX555: descr = "16-bit RGBX 5-5-5-1"; break; + case V4L2_PIX_FMT_BGRA555: descr = "16-bit BGRA 5-5-5-1"; break; + case V4L2_PIX_FMT_BGRX555: descr = "16-bit BGRX 5-5-5-1"; break; + case V4L2_PIX_FMT_RGB565: descr = "16-bit RGB 5-6-5"; break; + case V4L2_PIX_FMT_RGB555X: descr = "16-bit A/XRGB 1-5-5-5 BE"; break; + case V4L2_PIX_FMT_ARGB555X: descr = "16-bit ARGB 1-5-5-5 BE"; break; + case V4L2_PIX_FMT_XRGB555X: descr = "16-bit XRGB 1-5-5-5 BE"; break; + case V4L2_PIX_FMT_RGB565X: descr = "16-bit RGB 5-6-5 BE"; break; + case V4L2_PIX_FMT_BGR666: descr = "18-bit BGRX 6-6-6-14"; break; + case V4L2_PIX_FMT_BGR24: descr = "24-bit BGR 8-8-8"; break; + case V4L2_PIX_FMT_RGB24: descr = "24-bit RGB 8-8-8"; break; + case V4L2_PIX_FMT_BGR32: descr = "32-bit BGRA/X 8-8-8-8"; break; + case V4L2_PIX_FMT_ABGR32: descr = "32-bit BGRA 8-8-8-8"; break; + case V4L2_PIX_FMT_XBGR32: descr = "32-bit BGRX 8-8-8-8"; break; + case V4L2_PIX_FMT_RGB32: descr = "32-bit A/XRGB 8-8-8-8"; break; + case V4L2_PIX_FMT_ARGB32: descr = "32-bit ARGB 8-8-8-8"; break; + case V4L2_PIX_FMT_XRGB32: descr = "32-bit XRGB 8-8-8-8"; break; + case V4L2_PIX_FMT_BGRA32: descr = "32-bit ABGR 8-8-8-8"; break; + case V4L2_PIX_FMT_BGRX32: descr = "32-bit XBGR 8-8-8-8"; break; + case V4L2_PIX_FMT_RGBA32: descr = "32-bit RGBA 8-8-8-8"; break; + case V4L2_PIX_FMT_RGBX32: descr = "32-bit RGBX 8-8-8-8"; break; + case V4L2_PIX_FMT_RGBX1010102: descr = "32-bit RGBX 10-10-10-2"; break; + case V4L2_PIX_FMT_RGBA1010102: descr = "32-bit RGBA 10-10-10-2"; break; + case V4L2_PIX_FMT_ARGB2101010: descr = "32-bit ARGB 2-10-10-10"; break; + case V4L2_PIX_FMT_BGR48: descr = "48-bit BGR 16-16-16"; break; + case V4L2_PIX_FMT_RGB48: descr = "48-bit RGB 16-16-16"; break; + case V4L2_PIX_FMT_BGR48_12: descr = "12-bit Depth BGR"; break; + case V4L2_PIX_FMT_ABGR64_12: descr = "12-bit Depth BGRA"; break; + case V4L2_PIX_FMT_GREY: descr = "8-bit Greyscale"; break; + case V4L2_PIX_FMT_Y4: descr = "4-bit Greyscale"; break; + case V4L2_PIX_FMT_Y6: descr = "6-bit Greyscale"; break; + case V4L2_PIX_FMT_Y10: descr = "10-bit Greyscale"; break; + case V4L2_PIX_FMT_Y12: descr = "12-bit Greyscale"; break; + case V4L2_PIX_FMT_Y012: descr = "12-bit Greyscale (bits 15-4)"; break; + case V4L2_PIX_FMT_Y14: descr = "14-bit Greyscale"; break; + case V4L2_PIX_FMT_Y16: descr = "16-bit Greyscale"; break; + case V4L2_PIX_FMT_Y16_BE: descr = "16-bit Greyscale BE"; break; + case V4L2_PIX_FMT_Y10BPACK: descr = "10-bit Greyscale (Packed)"; break; + case V4L2_PIX_FMT_Y10P: descr = "10-bit Greyscale (MIPI Packed)"; break; + case V4L2_PIX_FMT_IPU3_Y10: descr = "10-bit greyscale (IPU3 Packed)"; break; + case V4L2_PIX_FMT_Y12P: descr = "12-bit Greyscale (MIPI Packed)"; break; + case V4L2_PIX_FMT_Y14P: descr = "14-bit Greyscale (MIPI Packed)"; break; + case V4L2_PIX_FMT_Y8I: descr = "Interleaved 8-bit Greyscale"; break; + case V4L2_PIX_FMT_Y12I: descr = "Interleaved 12-bit Greyscale"; break; + case V4L2_PIX_FMT_Z16: descr = "16-bit Depth"; break; + case V4L2_PIX_FMT_INZI: descr = "Planar 10:16 Greyscale Depth"; break; + case V4L2_PIX_FMT_CNF4: descr = "4-bit Depth Confidence (Packed)"; break; + case V4L2_PIX_FMT_PAL8: descr = "8-bit Palette"; break; + case V4L2_PIX_FMT_UV8: descr = "8-bit Chrominance UV 4-4"; break; + case V4L2_PIX_FMT_YVU410: descr = "Planar YVU 4:1:0"; break; + case V4L2_PIX_FMT_YVU420: descr = "Planar YVU 4:2:0"; break; + case V4L2_PIX_FMT_YUYV: descr = "YUYV 4:2:2"; break; + case V4L2_PIX_FMT_YYUV: descr = "YYUV 4:2:2"; break; + case V4L2_PIX_FMT_YVYU: descr = "YVYU 4:2:2"; break; + case V4L2_PIX_FMT_UYVY: descr = "UYVY 4:2:2"; break; + case V4L2_PIX_FMT_VYUY: descr = "VYUY 4:2:2"; break; + case V4L2_PIX_FMT_YUV422P: descr = "Planar YUV 4:2:2"; break; + case V4L2_PIX_FMT_YUV411P: descr = "Planar YUV 4:1:1"; break; + case V4L2_PIX_FMT_Y41P: descr = "YUV 4:1:1 (Packed)"; break; + case V4L2_PIX_FMT_YUV444: descr = "16-bit A/XYUV 4-4-4-4"; break; + case V4L2_PIX_FMT_YUV555: descr = "16-bit A/XYUV 1-5-5-5"; break; + case V4L2_PIX_FMT_YUV565: descr = "16-bit YUV 5-6-5"; break; + case V4L2_PIX_FMT_YUV24: descr = "24-bit YUV 4:4:4 8-8-8"; break; + case V4L2_PIX_FMT_YUV32: descr = "32-bit A/XYUV 8-8-8-8"; break; + case V4L2_PIX_FMT_AYUV32: descr = "32-bit AYUV 8-8-8-8"; break; + case V4L2_PIX_FMT_XYUV32: descr = "32-bit XYUV 8-8-8-8"; break; + case V4L2_PIX_FMT_VUYA32: descr = "32-bit VUYA 8-8-8-8"; break; + case V4L2_PIX_FMT_VUYX32: descr = "32-bit VUYX 8-8-8-8"; break; + case V4L2_PIX_FMT_YUVA32: descr = "32-bit YUVA 8-8-8-8"; break; + case V4L2_PIX_FMT_YUVX32: descr = "32-bit YUVX 8-8-8-8"; break; + case V4L2_PIX_FMT_YUV410: descr = "Planar YUV 4:1:0"; break; + case V4L2_PIX_FMT_YUV420: descr = "Planar YUV 4:2:0"; break; + case V4L2_PIX_FMT_HI240: descr = "8-bit Dithered RGB (BTTV)"; break; + case V4L2_PIX_FMT_M420: descr = "YUV 4:2:0 (M420)"; break; + case V4L2_PIX_FMT_YUV48_12: descr = "12-bit YUV 4:4:4 Packed"; break; + case V4L2_PIX_FMT_NV12: descr = "Y/UV 4:2:0"; break; + case V4L2_PIX_FMT_NV21: descr = "Y/VU 4:2:0"; break; + case V4L2_PIX_FMT_NV16: descr = "Y/UV 4:2:2"; break; + case V4L2_PIX_FMT_NV61: descr = "Y/VU 4:2:2"; break; + case V4L2_PIX_FMT_NV24: descr = "Y/UV 4:4:4"; break; + case V4L2_PIX_FMT_NV42: descr = "Y/VU 4:4:4"; break; + case V4L2_PIX_FMT_P010: descr = "10-bit Y/UV 4:2:0"; break; + case V4L2_PIX_FMT_P012: descr = "12-bit Y/UV 4:2:0"; break; + case V4L2_PIX_FMT_NV12_4L4: descr = "Y/UV 4:2:0 (4x4 Linear)"; break; + case V4L2_PIX_FMT_NV12_16L16: descr = "Y/UV 4:2:0 (16x16 Linear)"; break; + case V4L2_PIX_FMT_NV12_32L32: descr = "Y/UV 4:2:0 (32x32 Linear)"; break; + case V4L2_PIX_FMT_NV15_4L4: descr = "10-bit Y/UV 4:2:0 (4x4 Linear)"; break; + case V4L2_PIX_FMT_P010_4L4: descr = "10-bit Y/UV 4:2:0 (4x4 Linear)"; break; + case V4L2_PIX_FMT_NV12M: descr = "Y/UV 4:2:0 (N-C)"; break; + case V4L2_PIX_FMT_NV21M: descr = "Y/VU 4:2:0 (N-C)"; break; + case V4L2_PIX_FMT_NV16M: descr = "Y/UV 4:2:2 (N-C)"; break; + case V4L2_PIX_FMT_NV61M: descr = "Y/VU 4:2:2 (N-C)"; break; + case V4L2_PIX_FMT_NV12MT: descr = "Y/UV 4:2:0 (64x32 MB, N-C)"; break; + case V4L2_PIX_FMT_NV12MT_16X16: descr = "Y/UV 4:2:0 (16x16 MB, N-C)"; break; + case V4L2_PIX_FMT_P012M: descr = "12-bit Y/UV 4:2:0 (N-C)"; break; + case V4L2_PIX_FMT_YUV420M: descr = "Planar YUV 4:2:0 (N-C)"; break; + case V4L2_PIX_FMT_YVU420M: descr = "Planar YVU 4:2:0 (N-C)"; break; + case V4L2_PIX_FMT_YUV422M: descr = "Planar YUV 4:2:2 (N-C)"; break; + case V4L2_PIX_FMT_YVU422M: descr = "Planar YVU 4:2:2 (N-C)"; break; + case V4L2_PIX_FMT_YUV444M: descr = "Planar YUV 4:4:4 (N-C)"; break; + case V4L2_PIX_FMT_YVU444M: descr = "Planar YVU 4:4:4 (N-C)"; break; + case V4L2_PIX_FMT_SBGGR8: descr = "8-bit Bayer BGBG/GRGR"; break; + case V4L2_PIX_FMT_SGBRG8: descr = "8-bit Bayer GBGB/RGRG"; break; + case V4L2_PIX_FMT_SGRBG8: descr = "8-bit Bayer GRGR/BGBG"; break; + case V4L2_PIX_FMT_SRGGB8: descr = "8-bit Bayer RGRG/GBGB"; break; + case V4L2_PIX_FMT_SBGGR10: descr = "10-bit Bayer BGBG/GRGR"; break; + case V4L2_PIX_FMT_SGBRG10: descr = "10-bit Bayer GBGB/RGRG"; break; + case V4L2_PIX_FMT_SGRBG10: descr = "10-bit Bayer GRGR/BGBG"; break; + case V4L2_PIX_FMT_SRGGB10: descr = "10-bit Bayer RGRG/GBGB"; break; + case V4L2_PIX_FMT_SBGGR10P: descr = "10-bit Bayer BGBG/GRGR Packed"; break; + case V4L2_PIX_FMT_SGBRG10P: descr = "10-bit Bayer GBGB/RGRG Packed"; break; + case V4L2_PIX_FMT_SGRBG10P: descr = "10-bit Bayer GRGR/BGBG Packed"; break; + case V4L2_PIX_FMT_SRGGB10P: descr = "10-bit Bayer RGRG/GBGB Packed"; break; + case V4L2_PIX_FMT_IPU3_SBGGR10: descr = "10-bit bayer BGGR IPU3 Packed"; break; + case V4L2_PIX_FMT_IPU3_SGBRG10: descr = "10-bit bayer GBRG IPU3 Packed"; break; + case V4L2_PIX_FMT_IPU3_SGRBG10: descr = "10-bit bayer GRBG IPU3 Packed"; break; + case V4L2_PIX_FMT_IPU3_SRGGB10: descr = "10-bit bayer RGGB IPU3 Packed"; break; + case V4L2_PIX_FMT_SBGGR10ALAW8: descr = "8-bit Bayer BGBG/GRGR (A-law)"; break; + case V4L2_PIX_FMT_SGBRG10ALAW8: descr = "8-bit Bayer GBGB/RGRG (A-law)"; break; + case V4L2_PIX_FMT_SGRBG10ALAW8: descr = "8-bit Bayer GRGR/BGBG (A-law)"; break; + case V4L2_PIX_FMT_SRGGB10ALAW8: descr = "8-bit Bayer RGRG/GBGB (A-law)"; break; + case V4L2_PIX_FMT_SBGGR10DPCM8: descr = "8-bit Bayer BGBG/GRGR (DPCM)"; break; + case V4L2_PIX_FMT_SGBRG10DPCM8: descr = "8-bit Bayer GBGB/RGRG (DPCM)"; break; + case V4L2_PIX_FMT_SGRBG10DPCM8: descr = "8-bit Bayer GRGR/BGBG (DPCM)"; break; + case V4L2_PIX_FMT_SRGGB10DPCM8: descr = "8-bit Bayer RGRG/GBGB (DPCM)"; break; + case V4L2_PIX_FMT_SBGGR12: descr = "12-bit Bayer BGBG/GRGR"; break; + case V4L2_PIX_FMT_SGBRG12: descr = "12-bit Bayer GBGB/RGRG"; break; + case V4L2_PIX_FMT_SGRBG12: descr = "12-bit Bayer GRGR/BGBG"; break; + case V4L2_PIX_FMT_SRGGB12: descr = "12-bit Bayer RGRG/GBGB"; break; + case V4L2_PIX_FMT_SBGGR12P: descr = "12-bit Bayer BGBG/GRGR Packed"; break; + case V4L2_PIX_FMT_SGBRG12P: descr = "12-bit Bayer GBGB/RGRG Packed"; break; + case V4L2_PIX_FMT_SGRBG12P: descr = "12-bit Bayer GRGR/BGBG Packed"; break; + case V4L2_PIX_FMT_SRGGB12P: descr = "12-bit Bayer RGRG/GBGB Packed"; break; + case V4L2_PIX_FMT_SBGGR14: descr = "14-bit Bayer BGBG/GRGR"; break; + case V4L2_PIX_FMT_SGBRG14: descr = "14-bit Bayer GBGB/RGRG"; break; + case V4L2_PIX_FMT_SGRBG14: descr = "14-bit Bayer GRGR/BGBG"; break; + case V4L2_PIX_FMT_SRGGB14: descr = "14-bit Bayer RGRG/GBGB"; break; + case V4L2_PIX_FMT_SBGGR14P: descr = "14-bit Bayer BGBG/GRGR Packed"; break; + case V4L2_PIX_FMT_SGBRG14P: descr = "14-bit Bayer GBGB/RGRG Packed"; break; + case V4L2_PIX_FMT_SGRBG14P: descr = "14-bit Bayer GRGR/BGBG Packed"; break; + case V4L2_PIX_FMT_SRGGB14P: descr = "14-bit Bayer RGRG/GBGB Packed"; break; + case V4L2_PIX_FMT_SBGGR16: descr = "16-bit Bayer BGBG/GRGR"; break; + case V4L2_PIX_FMT_SGBRG16: descr = "16-bit Bayer GBGB/RGRG"; break; + case V4L2_PIX_FMT_SGRBG16: descr = "16-bit Bayer GRGR/BGBG"; break; + case V4L2_PIX_FMT_SRGGB16: descr = "16-bit Bayer RGRG/GBGB"; break; + case V4L2_PIX_FMT_SN9C20X_I420: descr = "GSPCA SN9C20X I420"; break; + case V4L2_PIX_FMT_SPCA501: descr = "GSPCA SPCA501"; break; + case V4L2_PIX_FMT_SPCA505: descr = "GSPCA SPCA505"; break; + case V4L2_PIX_FMT_SPCA508: descr = "GSPCA SPCA508"; break; + case V4L2_PIX_FMT_STV0680: descr = "GSPCA STV0680"; break; + case V4L2_PIX_FMT_TM6000: descr = "A/V + VBI Mux Packet"; break; + case V4L2_PIX_FMT_CIT_YYVYUY: descr = "GSPCA CIT YYVYUY"; break; + case V4L2_PIX_FMT_KONICA420: descr = "GSPCA KONICA420"; break; + case V4L2_PIX_FMT_MM21: descr = "Mediatek 8-bit Block Format"; break; + case V4L2_PIX_FMT_HSV24: descr = "24-bit HSV 8-8-8"; break; + case V4L2_PIX_FMT_HSV32: descr = "32-bit XHSV 8-8-8-8"; break; + case V4L2_SDR_FMT_CU8: descr = "Complex U8"; break; + case V4L2_SDR_FMT_CU16LE: descr = "Complex U16LE"; break; + case V4L2_SDR_FMT_CS8: descr = "Complex S8"; break; + case V4L2_SDR_FMT_CS14LE: descr = "Complex S14LE"; break; + case V4L2_SDR_FMT_RU12LE: descr = "Real U12LE"; break; + case V4L2_SDR_FMT_PCU16BE: descr = "Planar Complex U16BE"; break; + case V4L2_SDR_FMT_PCU18BE: descr = "Planar Complex U18BE"; break; + case V4L2_SDR_FMT_PCU20BE: descr = "Planar Complex U20BE"; break; + case V4L2_TCH_FMT_DELTA_TD16: descr = "16-bit Signed Deltas"; break; + case V4L2_TCH_FMT_DELTA_TD08: descr = "8-bit Signed Deltas"; break; + case V4L2_TCH_FMT_TU16: descr = "16-bit Unsigned Touch Data"; break; + case V4L2_TCH_FMT_TU08: descr = "8-bit Unsigned Touch Data"; break; + case V4L2_META_FMT_VSP1_HGO: descr = "R-Car VSP1 1-D Histogram"; break; + case V4L2_META_FMT_VSP1_HGT: descr = "R-Car VSP1 2-D Histogram"; break; + case V4L2_META_FMT_UVC: descr = "UVC Payload Header Metadata"; break; + case V4L2_META_FMT_D4XX: descr = "Intel D4xx UVC Metadata"; break; + case V4L2_META_FMT_VIVID: descr = "Vivid Metadata"; break; + case V4L2_META_FMT_RK_ISP1_PARAMS: descr = "Rockchip ISP1 3A Parameters"; break; + case V4L2_META_FMT_RK_ISP1_STAT_3A: descr = "Rockchip ISP1 3A Statistics"; break; + case V4L2_META_FMT_RK_ISP1_EXT_PARAMS: descr = "Rockchip ISP1 Ext 3A Params"; break; + case V4L2_PIX_FMT_NV12_8L128: descr = "NV12 (8x128 Linear)"; break; + case V4L2_PIX_FMT_NV12M_8L128: descr = "NV12M (8x128 Linear)"; break; + case V4L2_PIX_FMT_NV12_10BE_8L128: descr = "10-bit NV12 (8x128 Linear, BE)"; break; + case V4L2_PIX_FMT_NV12M_10BE_8L128: descr = "10-bit NV12M (8x128 Linear, BE)"; break; + case V4L2_PIX_FMT_Y210: descr = "10-bit YUYV Packed"; break; + case V4L2_PIX_FMT_Y212: descr = "12-bit YUYV Packed"; break; + case V4L2_PIX_FMT_Y216: descr = "16-bit YUYV Packed"; break; + case V4L2_META_FMT_RPI_BE_CFG: descr = "RPi PiSP BE Config format"; break; + case V4L2_META_FMT_GENERIC_8: descr = "8-bit Generic Metadata"; break; + case V4L2_META_FMT_GENERIC_CSI2_10: descr = "8-bit Generic Meta, 10b CSI-2"; break; + case V4L2_META_FMT_GENERIC_CSI2_12: descr = "8-bit Generic Meta, 12b CSI-2"; break; + case V4L2_META_FMT_GENERIC_CSI2_14: descr = "8-bit Generic Meta, 14b CSI-2"; break; + case V4L2_META_FMT_GENERIC_CSI2_16: descr = "8-bit Generic Meta, 16b CSI-2"; break; + case V4L2_META_FMT_GENERIC_CSI2_20: descr = "8-bit Generic Meta, 20b CSI-2"; break; + case V4L2_META_FMT_GENERIC_CSI2_24: descr = "8-bit Generic Meta, 24b CSI-2"; break; + + default: + /* Compressed formats */ + flags = V4L2_FMT_FLAG_COMPRESSED; + switch (fmt->pixelformat) { + /* Max description length mask: descr = "0123456789012345678901234567890" */ + case V4L2_PIX_FMT_MJPEG: descr = "Motion-JPEG"; break; + case V4L2_PIX_FMT_JPEG: descr = "JFIF JPEG"; break; + case V4L2_PIX_FMT_DV: descr = "1394"; break; + case V4L2_PIX_FMT_MPEG: descr = "MPEG-1/2/4"; break; + case V4L2_PIX_FMT_H264: descr = "H.264"; break; + case V4L2_PIX_FMT_H264_NO_SC: descr = "H.264 (No Start Codes)"; break; + case V4L2_PIX_FMT_H264_MVC: descr = "H.264 MVC"; break; + case V4L2_PIX_FMT_H264_SLICE: descr = "H.264 Parsed Slice Data"; break; + case V4L2_PIX_FMT_H263: descr = "H.263"; break; + case V4L2_PIX_FMT_MPEG1: descr = "MPEG-1 ES"; break; + case V4L2_PIX_FMT_MPEG2: descr = "MPEG-2 ES"; break; + case V4L2_PIX_FMT_MPEG2_SLICE: descr = "MPEG-2 Parsed Slice Data"; break; + case V4L2_PIX_FMT_MPEG4: descr = "MPEG-4 Part 2 ES"; break; + case V4L2_PIX_FMT_XVID: descr = "Xvid"; break; + case V4L2_PIX_FMT_VC1_ANNEX_G: descr = "VC-1 (SMPTE 412M Annex G)"; break; + case V4L2_PIX_FMT_VC1_ANNEX_L: descr = "VC-1 (SMPTE 412M Annex L)"; break; + case V4L2_PIX_FMT_VP8: descr = "VP8"; break; + case V4L2_PIX_FMT_VP8_FRAME: descr = "VP8 Frame"; break; + case V4L2_PIX_FMT_VP9: descr = "VP9"; break; + case V4L2_PIX_FMT_VP9_FRAME: descr = "VP9 Frame"; break; + case V4L2_PIX_FMT_HEVC: descr = "HEVC"; break; /* aka H.265 */ + case V4L2_PIX_FMT_HEVC_SLICE: descr = "HEVC Parsed Slice Data"; break; + case V4L2_PIX_FMT_FWHT: descr = "FWHT"; break; /* used in vicodec */ + case V4L2_PIX_FMT_FWHT_STATELESS: descr = "FWHT Stateless"; break; /* used in vicodec */ + case V4L2_PIX_FMT_SPK: descr = "Sorenson Spark"; break; + case V4L2_PIX_FMT_RV30: descr = "RealVideo 8"; break; + case V4L2_PIX_FMT_RV40: descr = "RealVideo 9 & 10"; break; + case V4L2_PIX_FMT_CPIA1: descr = "GSPCA CPiA YUV"; break; + case V4L2_PIX_FMT_WNVA: descr = "WNVA"; break; + case V4L2_PIX_FMT_SN9C10X: descr = "GSPCA SN9C10X"; break; + case V4L2_PIX_FMT_PWC1: descr = "Raw Philips Webcam Type (Old)"; break; + case V4L2_PIX_FMT_PWC2: descr = "Raw Philips Webcam Type (New)"; break; + case V4L2_PIX_FMT_ET61X251: descr = "GSPCA ET61X251"; break; + case V4L2_PIX_FMT_SPCA561: descr = "GSPCA SPCA561"; break; + case V4L2_PIX_FMT_PAC207: descr = "GSPCA PAC207"; break; + case V4L2_PIX_FMT_MR97310A: descr = "GSPCA MR97310A"; break; + case V4L2_PIX_FMT_JL2005BCD: descr = "GSPCA JL2005BCD"; break; + case V4L2_PIX_FMT_SN9C2028: descr = "GSPCA SN9C2028"; break; + case V4L2_PIX_FMT_SQ905C: descr = "GSPCA SQ905C"; break; + case V4L2_PIX_FMT_PJPG: descr = "GSPCA PJPG"; break; + case V4L2_PIX_FMT_OV511: descr = "GSPCA OV511"; break; + case V4L2_PIX_FMT_OV518: descr = "GSPCA OV518"; break; + case V4L2_PIX_FMT_JPGL: descr = "JPEG Lite"; break; + case V4L2_PIX_FMT_SE401: descr = "GSPCA SE401"; break; + case V4L2_PIX_FMT_S5C_UYVY_JPG: descr = "S5C73MX interleaved UYVY/JPEG"; break; + case V4L2_PIX_FMT_MT21C: descr = "Mediatek Compressed Format"; break; + case V4L2_PIX_FMT_QC08C: descr = "QCOM Compressed 8-bit Format"; break; + case V4L2_PIX_FMT_QC10C: descr = "QCOM Compressed 10-bit Format"; break; + case V4L2_PIX_FMT_AJPG: descr = "Aspeed JPEG"; break; + case V4L2_PIX_FMT_AV1_FRAME: descr = "AV1 Frame"; break; + case V4L2_PIX_FMT_MT2110T: descr = "Mediatek 10bit Tile Mode"; break; + case V4L2_PIX_FMT_MT2110R: descr = "Mediatek 10bit Raster Mode"; break; + case V4L2_PIX_FMT_HEXTILE: descr = "Hextile Compressed Format"; break; + case V4L2_PIX_FMT_PISP_COMP1_RGGB: descr = "PiSP 8b RGRG/GBGB mode1 compr"; break; + case V4L2_PIX_FMT_PISP_COMP1_GRBG: descr = "PiSP 8b GRGR/BGBG mode1 compr"; break; + case V4L2_PIX_FMT_PISP_COMP1_GBRG: descr = "PiSP 8b GBGB/RGRG mode1 compr"; break; + case V4L2_PIX_FMT_PISP_COMP1_BGGR: descr = "PiSP 8b BGBG/GRGR mode1 compr"; break; + case V4L2_PIX_FMT_PISP_COMP1_MONO: descr = "PiSP 8b monochrome mode1 compr"; break; + case V4L2_PIX_FMT_PISP_COMP2_RGGB: descr = "PiSP 8b RGRG/GBGB mode2 compr"; break; + case V4L2_PIX_FMT_PISP_COMP2_GRBG: descr = "PiSP 8b GRGR/BGBG mode2 compr"; break; + case V4L2_PIX_FMT_PISP_COMP2_GBRG: descr = "PiSP 8b GBGB/RGRG mode2 compr"; break; + case V4L2_PIX_FMT_PISP_COMP2_BGGR: descr = "PiSP 8b BGBG/GRGR mode2 compr"; break; + case V4L2_PIX_FMT_PISP_COMP2_MONO: descr = "PiSP 8b monochrome mode2 compr"; break; + default: + if (fmt->description[0]) + return; + WARN(1, "Unknown pixelformat 0x%08x\n", fmt->pixelformat); + flags = 0; + snprintf(fmt->description, sz, "%p4cc", + &fmt->pixelformat); + break; + } + } + + if (fmt->type == V4L2_BUF_TYPE_META_CAPTURE) { + switch (fmt->pixelformat) { + case V4L2_META_FMT_GENERIC_8: + case V4L2_META_FMT_GENERIC_CSI2_10: + case V4L2_META_FMT_GENERIC_CSI2_12: + case V4L2_META_FMT_GENERIC_CSI2_14: + case V4L2_META_FMT_GENERIC_CSI2_16: + case V4L2_META_FMT_GENERIC_CSI2_20: + case V4L2_META_FMT_GENERIC_CSI2_24: + fmt->flags |= V4L2_FMT_FLAG_META_LINE_BASED; + break; + default: + fmt->flags &= ~V4L2_FMT_FLAG_META_LINE_BASED; + } + } + + if (descr) + WARN_ON(strscpy(fmt->description, descr, sz) < 0); + fmt->flags |= flags; +} + +static int v4l_enum_fmt(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vdev = video_devdata(file); + struct v4l2_fmtdesc *p = arg; + int ret = check_fmt(file, p->type); + u32 mbus_code; + u32 cap_mask; + + if (ret) + return ret; + ret = -EINVAL; + + if (!(vdev->device_caps & V4L2_CAP_IO_MC)) + p->mbus_code = 0; + + mbus_code = p->mbus_code; + memset_after(p, 0, type); + p->mbus_code = mbus_code; + + switch (p->type) { + case V4L2_BUF_TYPE_VIDEO_CAPTURE: + case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: + cap_mask = V4L2_CAP_VIDEO_CAPTURE_MPLANE | + V4L2_CAP_VIDEO_M2M_MPLANE; + if (!!(vdev->device_caps & cap_mask) != + (p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)) + break; + + if (unlikely(!ops->vidioc_enum_fmt_vid_cap)) + break; + ret = ops->vidioc_enum_fmt_vid_cap(file, fh, arg); + break; + case V4L2_BUF_TYPE_VIDEO_OVERLAY: + if (unlikely(!ops->vidioc_enum_fmt_vid_overlay)) + break; + ret = ops->vidioc_enum_fmt_vid_overlay(file, fh, arg); + break; + case V4L2_BUF_TYPE_VIDEO_OUTPUT: + case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: + cap_mask = V4L2_CAP_VIDEO_OUTPUT_MPLANE | + V4L2_CAP_VIDEO_M2M_MPLANE; + if (!!(vdev->device_caps & cap_mask) != + (p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)) + break; + + if (unlikely(!ops->vidioc_enum_fmt_vid_out)) + break; + ret = ops->vidioc_enum_fmt_vid_out(file, fh, arg); + break; + case V4L2_BUF_TYPE_SDR_CAPTURE: + if (unlikely(!ops->vidioc_enum_fmt_sdr_cap)) + break; + ret = ops->vidioc_enum_fmt_sdr_cap(file, fh, arg); + break; + case V4L2_BUF_TYPE_SDR_OUTPUT: + if (unlikely(!ops->vidioc_enum_fmt_sdr_out)) + break; + ret = ops->vidioc_enum_fmt_sdr_out(file, fh, arg); + break; + case V4L2_BUF_TYPE_META_CAPTURE: + if (unlikely(!ops->vidioc_enum_fmt_meta_cap)) + break; + ret = ops->vidioc_enum_fmt_meta_cap(file, fh, arg); + break; + case V4L2_BUF_TYPE_META_OUTPUT: + if (unlikely(!ops->vidioc_enum_fmt_meta_out)) + break; + ret = ops->vidioc_enum_fmt_meta_out(file, fh, arg); + break; + } + if (ret == 0) + v4l_fill_fmtdesc(p); + return ret; +} + +static void v4l_pix_format_touch(struct v4l2_pix_format *p) +{ + /* + * The v4l2_pix_format structure contains fields that make no sense for + * touch. Set them to default values in this case. + */ + + p->field = V4L2_FIELD_NONE; + p->colorspace = V4L2_COLORSPACE_RAW; + p->flags = 0; + p->ycbcr_enc = 0; + p->quantization = 0; + p->xfer_func = 0; +} + +static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct v4l2_format *p = arg; + struct video_device *vfd = video_devdata(file); + int ret = check_fmt(file, p->type); + + if (ret) + return ret; + + memset(&p->fmt, 0, sizeof(p->fmt)); + + switch (p->type) { + case V4L2_BUF_TYPE_VIDEO_CAPTURE: + if (unlikely(!ops->vidioc_g_fmt_vid_cap)) + break; + p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; + ret = ops->vidioc_g_fmt_vid_cap(file, fh, arg); + /* just in case the driver zeroed it again */ + p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; + if (vfd->vfl_type == VFL_TYPE_TOUCH) + v4l_pix_format_touch(&p->fmt.pix); + return ret; + case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: + return ops->vidioc_g_fmt_vid_cap_mplane(file, fh, arg); + case V4L2_BUF_TYPE_VIDEO_OVERLAY: + return ops->vidioc_g_fmt_vid_overlay(file, fh, arg); + case V4L2_BUF_TYPE_VBI_CAPTURE: + return ops->vidioc_g_fmt_vbi_cap(file, fh, arg); + case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: + return ops->vidioc_g_fmt_sliced_vbi_cap(file, fh, arg); + case V4L2_BUF_TYPE_VIDEO_OUTPUT: + if (unlikely(!ops->vidioc_g_fmt_vid_out)) + break; + p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; + ret = ops->vidioc_g_fmt_vid_out(file, fh, arg); + /* just in case the driver zeroed it again */ + p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; + return ret; + case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: + return ops->vidioc_g_fmt_vid_out_mplane(file, fh, arg); + case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: + return ops->vidioc_g_fmt_vid_out_overlay(file, fh, arg); + case V4L2_BUF_TYPE_VBI_OUTPUT: + return ops->vidioc_g_fmt_vbi_out(file, fh, arg); + case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: + return ops->vidioc_g_fmt_sliced_vbi_out(file, fh, arg); + case V4L2_BUF_TYPE_SDR_CAPTURE: + return ops->vidioc_g_fmt_sdr_cap(file, fh, arg); + case V4L2_BUF_TYPE_SDR_OUTPUT: + return ops->vidioc_g_fmt_sdr_out(file, fh, arg); + case V4L2_BUF_TYPE_META_CAPTURE: + return ops->vidioc_g_fmt_meta_cap(file, fh, arg); + case V4L2_BUF_TYPE_META_OUTPUT: + return ops->vidioc_g_fmt_meta_out(file, fh, arg); + } + return -EINVAL; +} + +static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct v4l2_format *p = arg; + struct video_device *vfd = video_devdata(file); + int ret = check_fmt(file, p->type); + unsigned int i; + + if (ret) + return ret; + + ret = v4l_enable_media_source(vfd); + if (ret) + return ret; + v4l_sanitize_format(p); + + switch (p->type) { + case V4L2_BUF_TYPE_VIDEO_CAPTURE: + if (unlikely(!ops->vidioc_s_fmt_vid_cap)) + break; + memset_after(p, 0, fmt.pix); + ret = ops->vidioc_s_fmt_vid_cap(file, fh, arg); + /* just in case the driver zeroed it again */ + p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; + if (vfd->vfl_type == VFL_TYPE_TOUCH) + v4l_pix_format_touch(&p->fmt.pix); + return ret; + case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: + if (unlikely(!ops->vidioc_s_fmt_vid_cap_mplane)) + break; + memset_after(p, 0, fmt.pix_mp.xfer_func); + for (i = 0; i < p->fmt.pix_mp.num_planes; i++) + memset_after(&p->fmt.pix_mp.plane_fmt[i], + 0, bytesperline); + return ops->vidioc_s_fmt_vid_cap_mplane(file, fh, arg); + case V4L2_BUF_TYPE_VIDEO_OVERLAY: + if (unlikely(!ops->vidioc_s_fmt_vid_overlay)) + break; + memset_after(p, 0, fmt.win); + p->fmt.win.clips = NULL; + p->fmt.win.clipcount = 0; + p->fmt.win.bitmap = NULL; + return ops->vidioc_s_fmt_vid_overlay(file, fh, arg); + case V4L2_BUF_TYPE_VBI_CAPTURE: + if (unlikely(!ops->vidioc_s_fmt_vbi_cap)) + break; + memset_after(p, 0, fmt.vbi.flags); + return ops->vidioc_s_fmt_vbi_cap(file, fh, arg); + case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: + if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_cap)) + break; + memset_after(p, 0, fmt.sliced.io_size); + return ops->vidioc_s_fmt_sliced_vbi_cap(file, fh, arg); + case V4L2_BUF_TYPE_VIDEO_OUTPUT: + if (unlikely(!ops->vidioc_s_fmt_vid_out)) + break; + memset_after(p, 0, fmt.pix); + ret = ops->vidioc_s_fmt_vid_out(file, fh, arg); + /* just in case the driver zeroed it again */ + p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; + return ret; + case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: + if (unlikely(!ops->vidioc_s_fmt_vid_out_mplane)) + break; + memset_after(p, 0, fmt.pix_mp.xfer_func); + for (i = 0; i < p->fmt.pix_mp.num_planes; i++) + memset_after(&p->fmt.pix_mp.plane_fmt[i], + 0, bytesperline); + return ops->vidioc_s_fmt_vid_out_mplane(file, fh, arg); + case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: + if (unlikely(!ops->vidioc_s_fmt_vid_out_overlay)) + break; + memset_after(p, 0, fmt.win); + p->fmt.win.clips = NULL; + p->fmt.win.clipcount = 0; + p->fmt.win.bitmap = NULL; + return ops->vidioc_s_fmt_vid_out_overlay(file, fh, arg); + case V4L2_BUF_TYPE_VBI_OUTPUT: + if (unlikely(!ops->vidioc_s_fmt_vbi_out)) + break; + memset_after(p, 0, fmt.vbi.flags); + return ops->vidioc_s_fmt_vbi_out(file, fh, arg); + case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: + if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_out)) + break; + memset_after(p, 0, fmt.sliced.io_size); + return ops->vidioc_s_fmt_sliced_vbi_out(file, fh, arg); + case V4L2_BUF_TYPE_SDR_CAPTURE: + if (unlikely(!ops->vidioc_s_fmt_sdr_cap)) + break; + memset_after(p, 0, fmt.sdr.buffersize); + return ops->vidioc_s_fmt_sdr_cap(file, fh, arg); + case V4L2_BUF_TYPE_SDR_OUTPUT: + if (unlikely(!ops->vidioc_s_fmt_sdr_out)) + break; + memset_after(p, 0, fmt.sdr.buffersize); + return ops->vidioc_s_fmt_sdr_out(file, fh, arg); + case V4L2_BUF_TYPE_META_CAPTURE: + if (unlikely(!ops->vidioc_s_fmt_meta_cap)) + break; + memset_after(p, 0, fmt.meta); + return ops->vidioc_s_fmt_meta_cap(file, fh, arg); + case V4L2_BUF_TYPE_META_OUTPUT: + if (unlikely(!ops->vidioc_s_fmt_meta_out)) + break; + memset_after(p, 0, fmt.meta); + return ops->vidioc_s_fmt_meta_out(file, fh, arg); + } + return -EINVAL; +} + +static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct v4l2_format *p = arg; + struct video_device *vfd = video_devdata(file); + int ret = check_fmt(file, p->type); + unsigned int i; + + if (ret) + return ret; + + v4l_sanitize_format(p); + + switch (p->type) { + case V4L2_BUF_TYPE_VIDEO_CAPTURE: + if (unlikely(!ops->vidioc_try_fmt_vid_cap)) + break; + memset_after(p, 0, fmt.pix); + ret = ops->vidioc_try_fmt_vid_cap(file, fh, arg); + /* just in case the driver zeroed it again */ + p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; + if (vfd->vfl_type == VFL_TYPE_TOUCH) + v4l_pix_format_touch(&p->fmt.pix); + return ret; + case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: + if (unlikely(!ops->vidioc_try_fmt_vid_cap_mplane)) + break; + memset_after(p, 0, fmt.pix_mp.xfer_func); + for (i = 0; i < p->fmt.pix_mp.num_planes; i++) + memset_after(&p->fmt.pix_mp.plane_fmt[i], + 0, bytesperline); + return ops->vidioc_try_fmt_vid_cap_mplane(file, fh, arg); + case V4L2_BUF_TYPE_VIDEO_OVERLAY: + if (unlikely(!ops->vidioc_try_fmt_vid_overlay)) + break; + memset_after(p, 0, fmt.win); + p->fmt.win.clips = NULL; + p->fmt.win.clipcount = 0; + p->fmt.win.bitmap = NULL; + return ops->vidioc_try_fmt_vid_overlay(file, fh, arg); + case V4L2_BUF_TYPE_VBI_CAPTURE: + if (unlikely(!ops->vidioc_try_fmt_vbi_cap)) + break; + memset_after(p, 0, fmt.vbi.flags); + return ops->vidioc_try_fmt_vbi_cap(file, fh, arg); + case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: + if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_cap)) + break; + memset_after(p, 0, fmt.sliced.io_size); + return ops->vidioc_try_fmt_sliced_vbi_cap(file, fh, arg); + case V4L2_BUF_TYPE_VIDEO_OUTPUT: + if (unlikely(!ops->vidioc_try_fmt_vid_out)) + break; + memset_after(p, 0, fmt.pix); + ret = ops->vidioc_try_fmt_vid_out(file, fh, arg); + /* just in case the driver zeroed it again */ + p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; + return ret; + case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: + if (unlikely(!ops->vidioc_try_fmt_vid_out_mplane)) + break; + memset_after(p, 0, fmt.pix_mp.xfer_func); + for (i = 0; i < p->fmt.pix_mp.num_planes; i++) + memset_after(&p->fmt.pix_mp.plane_fmt[i], + 0, bytesperline); + return ops->vidioc_try_fmt_vid_out_mplane(file, fh, arg); + case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: + if (unlikely(!ops->vidioc_try_fmt_vid_out_overlay)) + break; + memset_after(p, 0, fmt.win); + p->fmt.win.clips = NULL; + p->fmt.win.clipcount = 0; + p->fmt.win.bitmap = NULL; + return ops->vidioc_try_fmt_vid_out_overlay(file, fh, arg); + case V4L2_BUF_TYPE_VBI_OUTPUT: + if (unlikely(!ops->vidioc_try_fmt_vbi_out)) + break; + memset_after(p, 0, fmt.vbi.flags); + return ops->vidioc_try_fmt_vbi_out(file, fh, arg); + case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: + if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_out)) + break; + memset_after(p, 0, fmt.sliced.io_size); + return ops->vidioc_try_fmt_sliced_vbi_out(file, fh, arg); + case V4L2_BUF_TYPE_SDR_CAPTURE: + if (unlikely(!ops->vidioc_try_fmt_sdr_cap)) + break; + memset_after(p, 0, fmt.sdr.buffersize); + return ops->vidioc_try_fmt_sdr_cap(file, fh, arg); + case V4L2_BUF_TYPE_SDR_OUTPUT: + if (unlikely(!ops->vidioc_try_fmt_sdr_out)) + break; + memset_after(p, 0, fmt.sdr.buffersize); + return ops->vidioc_try_fmt_sdr_out(file, fh, arg); + case V4L2_BUF_TYPE_META_CAPTURE: + if (unlikely(!ops->vidioc_try_fmt_meta_cap)) + break; + memset_after(p, 0, fmt.meta); + return ops->vidioc_try_fmt_meta_cap(file, fh, arg); + case V4L2_BUF_TYPE_META_OUTPUT: + if (unlikely(!ops->vidioc_try_fmt_meta_out)) + break; + memset_after(p, 0, fmt.meta); + return ops->vidioc_try_fmt_meta_out(file, fh, arg); + } + return -EINVAL; +} + +static int v4l_streamon(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + return ops->vidioc_streamon(file, fh, *(unsigned int *)arg); +} + +static int v4l_streamoff(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + return ops->vidioc_streamoff(file, fh, *(unsigned int *)arg); +} + +static int v4l_g_tuner(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_tuner *p = arg; + int err; + + p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ? + V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV; + err = ops->vidioc_g_tuner(file, fh, p); + if (!err) + p->capability |= V4L2_TUNER_CAP_FREQ_BANDS; + return err; +} + +static int v4l_s_tuner(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_tuner *p = arg; + int ret; + + ret = v4l_enable_media_source(vfd); + if (ret) + return ret; + p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ? + V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV; + return ops->vidioc_s_tuner(file, fh, p); +} + +static int v4l_g_modulator(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_modulator *p = arg; + int err; + + if (vfd->vfl_type == VFL_TYPE_RADIO) + p->type = V4L2_TUNER_RADIO; + + err = ops->vidioc_g_modulator(file, fh, p); + if (!err) + p->capability |= V4L2_TUNER_CAP_FREQ_BANDS; + return err; +} + +static int v4l_s_modulator(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_modulator *p = arg; + + if (vfd->vfl_type == VFL_TYPE_RADIO) + p->type = V4L2_TUNER_RADIO; + + return ops->vidioc_s_modulator(file, fh, p); +} + +static int v4l_g_frequency(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_frequency *p = arg; + + if (vfd->vfl_type == VFL_TYPE_SDR) + p->type = V4L2_TUNER_SDR; + else + p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ? + V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV; + return ops->vidioc_g_frequency(file, fh, p); +} + +static int v4l_s_frequency(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + const struct v4l2_frequency *p = arg; + enum v4l2_tuner_type type; + int ret; + + ret = v4l_enable_media_source(vfd); + if (ret) + return ret; + if (vfd->vfl_type == VFL_TYPE_SDR) { + if (p->type != V4L2_TUNER_SDR && p->type != V4L2_TUNER_RF) + return -EINVAL; + } else { + type = (vfd->vfl_type == VFL_TYPE_RADIO) ? + V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV; + if (type != p->type) + return -EINVAL; + } + return ops->vidioc_s_frequency(file, fh, p); +} + +static int v4l_enumstd(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_standard *p = arg; + + return v4l_video_std_enumstd(p, vfd->tvnorms); +} + +static int v4l_s_std(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + v4l2_std_id id = *(v4l2_std_id *)arg, norm; + int ret; + + ret = v4l_enable_media_source(vfd); + if (ret) + return ret; + norm = id & vfd->tvnorms; + if (vfd->tvnorms && !norm) /* Check if std is supported */ + return -EINVAL; + + /* Calls the specific handler */ + return ops->vidioc_s_std(file, fh, norm); +} + +static int v4l_querystd(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + v4l2_std_id *p = arg; + int ret; + + ret = v4l_enable_media_source(vfd); + if (ret) + return ret; + /* + * If no signal is detected, then the driver should return + * V4L2_STD_UNKNOWN. Otherwise it should return tvnorms with + * any standards that do not apply removed. + * + * This means that tuners, audio and video decoders can join + * their efforts to improve the standards detection. + */ + *p = vfd->tvnorms; + return ops->vidioc_querystd(file, fh, arg); +} + +static int v4l_s_hw_freq_seek(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_hw_freq_seek *p = arg; + enum v4l2_tuner_type type; + int ret; + + ret = v4l_enable_media_source(vfd); + if (ret) + return ret; + /* s_hw_freq_seek is not supported for SDR for now */ + if (vfd->vfl_type == VFL_TYPE_SDR) + return -EINVAL; + + type = (vfd->vfl_type == VFL_TYPE_RADIO) ? + V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV; + if (p->type != type) + return -EINVAL; + return ops->vidioc_s_hw_freq_seek(file, fh, p); +} + +static int v4l_s_fbuf(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct v4l2_framebuffer *p = arg; + + p->base = NULL; + return ops->vidioc_s_fbuf(file, fh, p); +} + +static int v4l_overlay(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + return ops->vidioc_overlay(file, fh, *(unsigned int *)arg); +} + +static int v4l_reqbufs(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_requestbuffers *p = arg; + int ret = check_fmt(file, p->type); + + if (ret) + return ret; + + memset_after(p, 0, flags); + + p->capabilities = 0; + if (is_valid_ioctl(vfd, VIDIOC_REMOVE_BUFS)) + p->capabilities = V4L2_BUF_CAP_SUPPORTS_REMOVE_BUFS; + + return ops->vidioc_reqbufs(file, fh, p); +} + +static int v4l_querybuf(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct v4l2_buffer *p = arg; + int ret = check_fmt(file, p->type); + + return ret ? ret : ops->vidioc_querybuf(file, fh, p); +} + +static int v4l_qbuf(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct v4l2_buffer *p = arg; + int ret = check_fmt(file, p->type); + + return ret ? ret : ops->vidioc_qbuf(file, fh, p); +} + +static int v4l_dqbuf(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct v4l2_buffer *p = arg; + int ret = check_fmt(file, p->type); + + return ret ? ret : ops->vidioc_dqbuf(file, fh, p); +} + +static int v4l_create_bufs(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_create_buffers *create = arg; + int ret = check_fmt(file, create->format.type); + + if (ret) + return ret; + + memset_after(create, 0, flags); + + v4l_sanitize_format(&create->format); + + create->capabilities = 0; + if (is_valid_ioctl(vfd, VIDIOC_REMOVE_BUFS)) + create->capabilities = V4L2_BUF_CAP_SUPPORTS_REMOVE_BUFS; + + ret = ops->vidioc_create_bufs(file, fh, create); + + if (create->format.type == V4L2_BUF_TYPE_VIDEO_CAPTURE || + create->format.type == V4L2_BUF_TYPE_VIDEO_OUTPUT) + create->format.fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; + + return ret; +} + +static int v4l_prepare_buf(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct v4l2_buffer *b = arg; + int ret = check_fmt(file, b->type); + + return ret ? ret : ops->vidioc_prepare_buf(file, fh, b); +} + +static int v4l_remove_bufs(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct v4l2_remove_buffers *remove = arg; + + if (ops->vidioc_remove_bufs) + return ops->vidioc_remove_bufs(file, fh, remove); + + return -ENOTTY; +} + +static int v4l_g_parm(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_streamparm *p = arg; + v4l2_std_id std; + int ret = check_fmt(file, p->type); + + if (ret) + return ret; + if (ops->vidioc_g_parm) + return ops->vidioc_g_parm(file, fh, p); + if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE && + p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) + return -EINVAL; + if (vfd->device_caps & V4L2_CAP_READWRITE) + p->parm.capture.readbuffers = 2; + ret = ops->vidioc_g_std(file, fh, &std); + if (ret == 0) + v4l2_video_std_frame_period(std, &p->parm.capture.timeperframe); + return ret; +} + +static int v4l_s_parm(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct v4l2_streamparm *p = arg; + int ret = check_fmt(file, p->type); + + if (ret) + return ret; + + /* Note: extendedmode is never used in drivers */ + if (V4L2_TYPE_IS_OUTPUT(p->type)) { + memset(p->parm.output.reserved, 0, + sizeof(p->parm.output.reserved)); + p->parm.output.extendedmode = 0; + p->parm.output.outputmode &= V4L2_MODE_HIGHQUALITY; + } else { + memset(p->parm.capture.reserved, 0, + sizeof(p->parm.capture.reserved)); + p->parm.capture.extendedmode = 0; + p->parm.capture.capturemode &= V4L2_MODE_HIGHQUALITY; + } + return ops->vidioc_s_parm(file, fh, p); +} + +static int v4l_queryctrl(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_queryctrl *p = arg; + struct v4l2_fh *vfh = + test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL; + + if (vfh && vfh->ctrl_handler) + return v4l2_queryctrl(vfh->ctrl_handler, p); + if (vfd->ctrl_handler) + return v4l2_queryctrl(vfd->ctrl_handler, p); + if (ops->vidioc_queryctrl) + return ops->vidioc_queryctrl(file, fh, p); + return -ENOTTY; +} + +static int v4l_query_ext_ctrl(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_query_ext_ctrl *p = arg; + struct v4l2_fh *vfh = + test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL; + + if (vfh && vfh->ctrl_handler) + return v4l2_query_ext_ctrl(vfh->ctrl_handler, p); + if (vfd->ctrl_handler) + return v4l2_query_ext_ctrl(vfd->ctrl_handler, p); + if (ops->vidioc_query_ext_ctrl) + return ops->vidioc_query_ext_ctrl(file, fh, p); + return -ENOTTY; +} + +static int v4l_querymenu(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_querymenu *p = arg; + struct v4l2_fh *vfh = + test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL; + + if (vfh && vfh->ctrl_handler) + return v4l2_querymenu(vfh->ctrl_handler, p); + if (vfd->ctrl_handler) + return v4l2_querymenu(vfd->ctrl_handler, p); + if (ops->vidioc_querymenu) + return ops->vidioc_querymenu(file, fh, p); + return -ENOTTY; +} + +static int v4l_g_ctrl(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_control *p = arg; + struct v4l2_fh *vfh = + test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL; + struct v4l2_ext_controls ctrls; + struct v4l2_ext_control ctrl; + + if (vfh && vfh->ctrl_handler) + return v4l2_g_ctrl(vfh->ctrl_handler, p); + if (vfd->ctrl_handler) + return v4l2_g_ctrl(vfd->ctrl_handler, p); + if (ops->vidioc_g_ctrl) + return ops->vidioc_g_ctrl(file, fh, p); + if (ops->vidioc_g_ext_ctrls == NULL) + return -ENOTTY; + + ctrls.which = V4L2_CTRL_ID2WHICH(p->id); + ctrls.count = 1; + ctrls.controls = &ctrl; + ctrl.id = p->id; + ctrl.value = p->value; + if (check_ext_ctrls(&ctrls, VIDIOC_G_CTRL)) { + int ret = ops->vidioc_g_ext_ctrls(file, fh, &ctrls); + + if (ret == 0) + p->value = ctrl.value; + return ret; + } + return -EINVAL; +} + +static int v4l_s_ctrl(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_control *p = arg; + struct v4l2_fh *vfh = + test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL; + struct v4l2_ext_controls ctrls; + struct v4l2_ext_control ctrl; + int ret; + + if (vfh && vfh->ctrl_handler) + return v4l2_s_ctrl(vfh, vfh->ctrl_handler, p); + if (vfd->ctrl_handler) + return v4l2_s_ctrl(NULL, vfd->ctrl_handler, p); + if (ops->vidioc_s_ctrl) + return ops->vidioc_s_ctrl(file, fh, p); + if (ops->vidioc_s_ext_ctrls == NULL) + return -ENOTTY; + + ctrls.which = V4L2_CTRL_ID2WHICH(p->id); + ctrls.count = 1; + ctrls.controls = &ctrl; + ctrl.id = p->id; + ctrl.value = p->value; + if (!check_ext_ctrls(&ctrls, VIDIOC_S_CTRL)) + return -EINVAL; + ret = ops->vidioc_s_ext_ctrls(file, fh, &ctrls); + p->value = ctrl.value; + return ret; +} + +static int v4l_g_ext_ctrls(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_ext_controls *p = arg; + struct v4l2_fh *vfh = + test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL; + + p->error_idx = p->count; + if (vfh && vfh->ctrl_handler) + return v4l2_g_ext_ctrls(vfh->ctrl_handler, + vfd, vfd->v4l2_dev->mdev, p); + if (vfd->ctrl_handler) + return v4l2_g_ext_ctrls(vfd->ctrl_handler, + vfd, vfd->v4l2_dev->mdev, p); + if (ops->vidioc_g_ext_ctrls == NULL) + return -ENOTTY; + return check_ext_ctrls(p, VIDIOC_G_EXT_CTRLS) ? + ops->vidioc_g_ext_ctrls(file, fh, p) : -EINVAL; +} + +static int v4l_s_ext_ctrls(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_ext_controls *p = arg; + struct v4l2_fh *vfh = + test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL; + + p->error_idx = p->count; + if (vfh && vfh->ctrl_handler) + return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler, + vfd, vfd->v4l2_dev->mdev, p); + if (vfd->ctrl_handler) + return v4l2_s_ext_ctrls(NULL, vfd->ctrl_handler, + vfd, vfd->v4l2_dev->mdev, p); + if (ops->vidioc_s_ext_ctrls == NULL) + return -ENOTTY; + return check_ext_ctrls(p, VIDIOC_S_EXT_CTRLS) ? + ops->vidioc_s_ext_ctrls(file, fh, p) : -EINVAL; +} + +static int v4l_try_ext_ctrls(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_ext_controls *p = arg; + struct v4l2_fh *vfh = + test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL; + + p->error_idx = p->count; + if (vfh && vfh->ctrl_handler) + return v4l2_try_ext_ctrls(vfh->ctrl_handler, + vfd, vfd->v4l2_dev->mdev, p); + if (vfd->ctrl_handler) + return v4l2_try_ext_ctrls(vfd->ctrl_handler, + vfd, vfd->v4l2_dev->mdev, p); + if (ops->vidioc_try_ext_ctrls == NULL) + return -ENOTTY; + return check_ext_ctrls(p, VIDIOC_TRY_EXT_CTRLS) ? + ops->vidioc_try_ext_ctrls(file, fh, p) : -EINVAL; +} + +/* + * The selection API specified originally that the _MPLANE buffer types + * shouldn't be used. The reasons for this are lost in the mists of time + * (or just really crappy memories). Regardless, this is really annoying + * for userspace. So to keep things simple we map _MPLANE buffer types + * to their 'regular' counterparts before calling the driver. And we + * restore it afterwards. This way applications can use either buffer + * type and drivers don't need to check for both. + */ +static int v4l_g_selection(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct v4l2_selection *p = arg; + u32 old_type = p->type; + int ret; + + if (p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) + p->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + else if (p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + p->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; + ret = ops->vidioc_g_selection(file, fh, p); + p->type = old_type; + return ret; +} + +static int v4l_s_selection(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct v4l2_selection *p = arg; + u32 old_type = p->type; + int ret; + + if (p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) + p->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + else if (p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + p->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; + ret = ops->vidioc_s_selection(file, fh, p); + p->type = old_type; + return ret; +} + +static int v4l_g_crop(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_crop *p = arg; + struct v4l2_selection s = { + .type = p->type, + }; + int ret; + + /* simulate capture crop using selection api */ + + /* crop means compose for output devices */ + if (V4L2_TYPE_IS_OUTPUT(p->type)) + s.target = V4L2_SEL_TGT_COMPOSE; + else + s.target = V4L2_SEL_TGT_CROP; + + if (test_bit(V4L2_FL_QUIRK_INVERTED_CROP, &vfd->flags)) + s.target = s.target == V4L2_SEL_TGT_COMPOSE ? + V4L2_SEL_TGT_CROP : V4L2_SEL_TGT_COMPOSE; + + ret = v4l_g_selection(ops, file, fh, &s); + + /* copying results to old structure on success */ + if (!ret) + p->c = s.r; + return ret; +} + +static int v4l_s_crop(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_crop *p = arg; + struct v4l2_selection s = { + .type = p->type, + .r = p->c, + }; + + /* simulate capture crop using selection api */ + + /* crop means compose for output devices */ + if (V4L2_TYPE_IS_OUTPUT(p->type)) + s.target = V4L2_SEL_TGT_COMPOSE; + else + s.target = V4L2_SEL_TGT_CROP; + + if (test_bit(V4L2_FL_QUIRK_INVERTED_CROP, &vfd->flags)) + s.target = s.target == V4L2_SEL_TGT_COMPOSE ? + V4L2_SEL_TGT_CROP : V4L2_SEL_TGT_COMPOSE; + + return v4l_s_selection(ops, file, fh, &s); +} + +static int v4l_cropcap(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_cropcap *p = arg; + struct v4l2_selection s = { .type = p->type }; + int ret = 0; + + /* setting trivial pixelaspect */ + p->pixelaspect.numerator = 1; + p->pixelaspect.denominator = 1; + + if (s.type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) + s.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + else if (s.type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + s.type = V4L2_BUF_TYPE_VIDEO_OUTPUT; + + /* + * The determine_valid_ioctls() call already should ensure + * that this can never happen, but just in case... + */ + if (WARN_ON(!ops->vidioc_g_selection)) + return -ENOTTY; + + if (ops->vidioc_g_pixelaspect) + ret = ops->vidioc_g_pixelaspect(file, fh, s.type, + &p->pixelaspect); + + /* + * Ignore ENOTTY or ENOIOCTLCMD error returns, just use the + * square pixel aspect ratio in that case. + */ + if (ret && ret != -ENOTTY && ret != -ENOIOCTLCMD) + return ret; + + /* Use g_selection() to fill in the bounds and defrect rectangles */ + + /* obtaining bounds */ + if (V4L2_TYPE_IS_OUTPUT(p->type)) + s.target = V4L2_SEL_TGT_COMPOSE_BOUNDS; + else + s.target = V4L2_SEL_TGT_CROP_BOUNDS; + + if (test_bit(V4L2_FL_QUIRK_INVERTED_CROP, &vfd->flags)) + s.target = s.target == V4L2_SEL_TGT_COMPOSE_BOUNDS ? + V4L2_SEL_TGT_CROP_BOUNDS : V4L2_SEL_TGT_COMPOSE_BOUNDS; + + ret = v4l_g_selection(ops, file, fh, &s); + if (ret) + return ret; + p->bounds = s.r; + + /* obtaining defrect */ + if (s.target == V4L2_SEL_TGT_COMPOSE_BOUNDS) + s.target = V4L2_SEL_TGT_COMPOSE_DEFAULT; + else + s.target = V4L2_SEL_TGT_CROP_DEFAULT; + + ret = v4l_g_selection(ops, file, fh, &s); + if (ret) + return ret; + p->defrect = s.r; + + return 0; +} + +static int v4l_log_status(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + int ret; + + if (vfd->v4l2_dev) + pr_info("%s: ================= START STATUS =================\n", + vfd->v4l2_dev->name); + ret = ops->vidioc_log_status(file, fh); + if (vfd->v4l2_dev) + pr_info("%s: ================== END STATUS ==================\n", + vfd->v4l2_dev->name); + return ret; +} + +static int v4l_dbg_g_register(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ +#ifdef CONFIG_VIDEO_ADV_DEBUG + struct v4l2_dbg_register *p = arg; + struct video_device *vfd = video_devdata(file); + struct v4l2_subdev *sd; + int idx = 0; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if (p->match.type == V4L2_CHIP_MATCH_SUBDEV) { + if (vfd->v4l2_dev == NULL) + return -EINVAL; + v4l2_device_for_each_subdev(sd, vfd->v4l2_dev) + if (p->match.addr == idx++) + return v4l2_subdev_call(sd, core, g_register, p); + return -EINVAL; + } + if (ops->vidioc_g_register && p->match.type == V4L2_CHIP_MATCH_BRIDGE && + (ops->vidioc_g_chip_info || p->match.addr == 0)) + return ops->vidioc_g_register(file, fh, p); + return -EINVAL; +#else + return -ENOTTY; +#endif +} + +static int v4l_dbg_s_register(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ +#ifdef CONFIG_VIDEO_ADV_DEBUG + const struct v4l2_dbg_register *p = arg; + struct video_device *vfd = video_devdata(file); + struct v4l2_subdev *sd; + int idx = 0; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if (p->match.type == V4L2_CHIP_MATCH_SUBDEV) { + if (vfd->v4l2_dev == NULL) + return -EINVAL; + v4l2_device_for_each_subdev(sd, vfd->v4l2_dev) + if (p->match.addr == idx++) + return v4l2_subdev_call(sd, core, s_register, p); + return -EINVAL; + } + if (ops->vidioc_s_register && p->match.type == V4L2_CHIP_MATCH_BRIDGE && + (ops->vidioc_g_chip_info || p->match.addr == 0)) + return ops->vidioc_s_register(file, fh, p); + return -EINVAL; +#else + return -ENOTTY; +#endif +} + +static int v4l_dbg_g_chip_info(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ +#ifdef CONFIG_VIDEO_ADV_DEBUG + struct video_device *vfd = video_devdata(file); + struct v4l2_dbg_chip_info *p = arg; + struct v4l2_subdev *sd; + int idx = 0; + + switch (p->match.type) { + case V4L2_CHIP_MATCH_BRIDGE: + if (ops->vidioc_s_register) + p->flags |= V4L2_CHIP_FL_WRITABLE; + if (ops->vidioc_g_register) + p->flags |= V4L2_CHIP_FL_READABLE; + strscpy(p->name, vfd->v4l2_dev->name, sizeof(p->name)); + if (ops->vidioc_g_chip_info) + return ops->vidioc_g_chip_info(file, fh, arg); + if (p->match.addr) + return -EINVAL; + return 0; + + case V4L2_CHIP_MATCH_SUBDEV: + if (vfd->v4l2_dev == NULL) + break; + v4l2_device_for_each_subdev(sd, vfd->v4l2_dev) { + if (p->match.addr != idx++) + continue; + if (sd->ops->core && sd->ops->core->s_register) + p->flags |= V4L2_CHIP_FL_WRITABLE; + if (sd->ops->core && sd->ops->core->g_register) + p->flags |= V4L2_CHIP_FL_READABLE; + strscpy(p->name, sd->name, sizeof(p->name)); + return 0; + } + break; + } + return -EINVAL; +#else + return -ENOTTY; +#endif +} + +static int v4l_dqevent(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + return v4l2_event_dequeue(fh, arg, file->f_flags & O_NONBLOCK); +} + +static int v4l_subscribe_event(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + return ops->vidioc_subscribe_event(fh, arg); +} + +static int v4l_unsubscribe_event(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + return ops->vidioc_unsubscribe_event(fh, arg); +} + +static int v4l_g_sliced_vbi_cap(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct v4l2_sliced_vbi_cap *p = arg; + int ret = check_fmt(file, p->type); + + if (ret) + return ret; + + /* Clear up to type, everything after type is zeroed already */ + memset(p, 0, offsetof(struct v4l2_sliced_vbi_cap, type)); + + return ops->vidioc_g_sliced_vbi_cap(file, fh, p); +} + +static int v4l_enum_freq_bands(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_frequency_band *p = arg; + enum v4l2_tuner_type type; + int err; + + if (vfd->vfl_type == VFL_TYPE_SDR) { + if (p->type != V4L2_TUNER_SDR && p->type != V4L2_TUNER_RF) + return -EINVAL; + type = p->type; + } else { + type = (vfd->vfl_type == VFL_TYPE_RADIO) ? + V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV; + if (type != p->type) + return -EINVAL; + } + if (ops->vidioc_enum_freq_bands) { + err = ops->vidioc_enum_freq_bands(file, fh, p); + if (err != -ENOTTY) + return err; + } + if (is_valid_ioctl(vfd, VIDIOC_G_TUNER)) { + struct v4l2_tuner t = { + .index = p->tuner, + .type = type, + }; + + if (p->index) + return -EINVAL; + err = ops->vidioc_g_tuner(file, fh, &t); + if (err) + return err; + p->capability = t.capability | V4L2_TUNER_CAP_FREQ_BANDS; + p->rangelow = t.rangelow; + p->rangehigh = t.rangehigh; + p->modulation = (type == V4L2_TUNER_RADIO) ? + V4L2_BAND_MODULATION_FM : V4L2_BAND_MODULATION_VSB; + return 0; + } + if (is_valid_ioctl(vfd, VIDIOC_G_MODULATOR)) { + struct v4l2_modulator m = { + .index = p->tuner, + }; + + if (type != V4L2_TUNER_RADIO) + return -EINVAL; + if (p->index) + return -EINVAL; + err = ops->vidioc_g_modulator(file, fh, &m); + if (err) + return err; + p->capability = m.capability | V4L2_TUNER_CAP_FREQ_BANDS; + p->rangelow = m.rangelow; + p->rangehigh = m.rangehigh; + p->modulation = (type == V4L2_TUNER_RADIO) ? + V4L2_BAND_MODULATION_FM : V4L2_BAND_MODULATION_VSB; + return 0; + } + return -ENOTTY; +} + +struct v4l2_ioctl_info { + unsigned int ioctl; + u32 flags; + const char * const name; + int (*func)(const struct v4l2_ioctl_ops *ops, struct file *file, + void *fh, void *p); + void (*debug)(const void *arg, bool write_only); +}; + +/* This control needs a priority check */ +#define INFO_FL_PRIO (1 << 0) +/* This control can be valid if the filehandle passes a control handler. */ +#define INFO_FL_CTRL (1 << 1) +/* Queuing ioctl */ +#define INFO_FL_QUEUE (1 << 2) +/* Always copy back result, even on error */ +#define INFO_FL_ALWAYS_COPY (1 << 3) +/* Zero struct from after the field to the end */ +#define INFO_FL_CLEAR(v4l2_struct, field) \ + ((offsetof(struct v4l2_struct, field) + \ + sizeof_field(struct v4l2_struct, field)) << 16) +#define INFO_FL_CLEAR_MASK (_IOC_SIZEMASK << 16) + +#define DEFINE_V4L_STUB_FUNC(_vidioc) \ + static int v4l_stub_ ## _vidioc( \ + const struct v4l2_ioctl_ops *ops, \ + struct file *file, void *fh, void *p) \ + { \ + return ops->vidioc_ ## _vidioc(file, fh, p); \ + } + +#define IOCTL_INFO(_ioctl, _func, _debug, _flags) \ + [_IOC_NR(_ioctl)] = { \ + .ioctl = _ioctl, \ + .flags = _flags, \ + .name = #_ioctl, \ + .func = _func, \ + .debug = _debug, \ + } + +DEFINE_V4L_STUB_FUNC(g_fbuf) +DEFINE_V4L_STUB_FUNC(expbuf) +DEFINE_V4L_STUB_FUNC(g_std) +DEFINE_V4L_STUB_FUNC(g_audio) +DEFINE_V4L_STUB_FUNC(s_audio) +DEFINE_V4L_STUB_FUNC(g_edid) +DEFINE_V4L_STUB_FUNC(s_edid) +DEFINE_V4L_STUB_FUNC(g_audout) +DEFINE_V4L_STUB_FUNC(s_audout) +DEFINE_V4L_STUB_FUNC(g_jpegcomp) +DEFINE_V4L_STUB_FUNC(s_jpegcomp) +DEFINE_V4L_STUB_FUNC(enumaudio) +DEFINE_V4L_STUB_FUNC(enumaudout) +DEFINE_V4L_STUB_FUNC(enum_framesizes) +DEFINE_V4L_STUB_FUNC(enum_frameintervals) +DEFINE_V4L_STUB_FUNC(g_enc_index) +DEFINE_V4L_STUB_FUNC(encoder_cmd) +DEFINE_V4L_STUB_FUNC(try_encoder_cmd) +DEFINE_V4L_STUB_FUNC(decoder_cmd) +DEFINE_V4L_STUB_FUNC(try_decoder_cmd) +DEFINE_V4L_STUB_FUNC(s_dv_timings) +DEFINE_V4L_STUB_FUNC(g_dv_timings) +DEFINE_V4L_STUB_FUNC(enum_dv_timings) +DEFINE_V4L_STUB_FUNC(query_dv_timings) +DEFINE_V4L_STUB_FUNC(dv_timings_cap) + +static const struct v4l2_ioctl_info v4l2_ioctls[] = { + IOCTL_INFO(VIDIOC_QUERYCAP, v4l_querycap, v4l_print_querycap, 0), + IOCTL_INFO(VIDIOC_ENUM_FMT, v4l_enum_fmt, v4l_print_fmtdesc, 0), + IOCTL_INFO(VIDIOC_G_FMT, v4l_g_fmt, v4l_print_format, 0), + IOCTL_INFO(VIDIOC_S_FMT, v4l_s_fmt, v4l_print_format, INFO_FL_PRIO), + IOCTL_INFO(VIDIOC_REQBUFS, v4l_reqbufs, v4l_print_requestbuffers, INFO_FL_PRIO | INFO_FL_QUEUE), + IOCTL_INFO(VIDIOC_QUERYBUF, v4l_querybuf, v4l_print_buffer, INFO_FL_QUEUE | INFO_FL_CLEAR(v4l2_buffer, length)), + IOCTL_INFO(VIDIOC_G_FBUF, v4l_stub_g_fbuf, v4l_print_framebuffer, 0), + IOCTL_INFO(VIDIOC_S_FBUF, v4l_s_fbuf, v4l_print_framebuffer, INFO_FL_PRIO), + IOCTL_INFO(VIDIOC_OVERLAY, v4l_overlay, v4l_print_u32, INFO_FL_PRIO), + IOCTL_INFO(VIDIOC_QBUF, v4l_qbuf, v4l_print_buffer, INFO_FL_QUEUE), + IOCTL_INFO(VIDIOC_EXPBUF, v4l_stub_expbuf, v4l_print_exportbuffer, INFO_FL_QUEUE | INFO_FL_CLEAR(v4l2_exportbuffer, flags)), + IOCTL_INFO(VIDIOC_DQBUF, v4l_dqbuf, v4l_print_buffer, INFO_FL_QUEUE), + IOCTL_INFO(VIDIOC_STREAMON, v4l_streamon, v4l_print_buftype, INFO_FL_PRIO | INFO_FL_QUEUE), + IOCTL_INFO(VIDIOC_STREAMOFF, v4l_streamoff, v4l_print_buftype, INFO_FL_PRIO | INFO_FL_QUEUE), + IOCTL_INFO(VIDIOC_G_PARM, v4l_g_parm, v4l_print_streamparm, INFO_FL_CLEAR(v4l2_streamparm, type)), + IOCTL_INFO(VIDIOC_S_PARM, v4l_s_parm, v4l_print_streamparm, INFO_FL_PRIO), + IOCTL_INFO(VIDIOC_G_STD, v4l_stub_g_std, v4l_print_std, 0), + IOCTL_INFO(VIDIOC_S_STD, v4l_s_std, v4l_print_std, INFO_FL_PRIO), + IOCTL_INFO(VIDIOC_ENUMSTD, v4l_enumstd, v4l_print_standard, INFO_FL_CLEAR(v4l2_standard, index)), + IOCTL_INFO(VIDIOC_ENUMINPUT, v4l_enuminput, v4l_print_enuminput, INFO_FL_CLEAR(v4l2_input, index)), + IOCTL_INFO(VIDIOC_G_CTRL, v4l_g_ctrl, v4l_print_control, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_control, id)), + IOCTL_INFO(VIDIOC_S_CTRL, v4l_s_ctrl, v4l_print_control, INFO_FL_PRIO | INFO_FL_CTRL), + IOCTL_INFO(VIDIOC_G_TUNER, v4l_g_tuner, v4l_print_tuner, INFO_FL_CLEAR(v4l2_tuner, index)), + IOCTL_INFO(VIDIOC_S_TUNER, v4l_s_tuner, v4l_print_tuner, INFO_FL_PRIO), + IOCTL_INFO(VIDIOC_G_AUDIO, v4l_stub_g_audio, v4l_print_audio, 0), + IOCTL_INFO(VIDIOC_S_AUDIO, v4l_stub_s_audio, v4l_print_audio, INFO_FL_PRIO), + IOCTL_INFO(VIDIOC_QUERYCTRL, v4l_queryctrl, v4l_print_queryctrl, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_queryctrl, id)), + IOCTL_INFO(VIDIOC_QUERYMENU, v4l_querymenu, v4l_print_querymenu, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_querymenu, index)), + IOCTL_INFO(VIDIOC_G_INPUT, v4l_g_input, v4l_print_u32, 0), + IOCTL_INFO(VIDIOC_S_INPUT, v4l_s_input, v4l_print_u32, INFO_FL_PRIO), + IOCTL_INFO(VIDIOC_G_EDID, v4l_stub_g_edid, v4l_print_edid, INFO_FL_ALWAYS_COPY), + IOCTL_INFO(VIDIOC_S_EDID, v4l_stub_s_edid, v4l_print_edid, INFO_FL_PRIO | INFO_FL_ALWAYS_COPY), + IOCTL_INFO(VIDIOC_G_OUTPUT, v4l_g_output, v4l_print_u32, 0), + IOCTL_INFO(VIDIOC_S_OUTPUT, v4l_s_output, v4l_print_u32, INFO_FL_PRIO), + IOCTL_INFO(VIDIOC_ENUMOUTPUT, v4l_enumoutput, v4l_print_enumoutput, INFO_FL_CLEAR(v4l2_output, index)), + IOCTL_INFO(VIDIOC_G_AUDOUT, v4l_stub_g_audout, v4l_print_audioout, 0), + IOCTL_INFO(VIDIOC_S_AUDOUT, v4l_stub_s_audout, v4l_print_audioout, INFO_FL_PRIO), + IOCTL_INFO(VIDIOC_G_MODULATOR, v4l_g_modulator, v4l_print_modulator, INFO_FL_CLEAR(v4l2_modulator, index)), + IOCTL_INFO(VIDIOC_S_MODULATOR, v4l_s_modulator, v4l_print_modulator, INFO_FL_PRIO), + IOCTL_INFO(VIDIOC_G_FREQUENCY, v4l_g_frequency, v4l_print_frequency, INFO_FL_CLEAR(v4l2_frequency, tuner)), + IOCTL_INFO(VIDIOC_S_FREQUENCY, v4l_s_frequency, v4l_print_frequency, INFO_FL_PRIO), + IOCTL_INFO(VIDIOC_CROPCAP, v4l_cropcap, v4l_print_cropcap, INFO_FL_CLEAR(v4l2_cropcap, type)), + IOCTL_INFO(VIDIOC_G_CROP, v4l_g_crop, v4l_print_crop, INFO_FL_CLEAR(v4l2_crop, type)), + IOCTL_INFO(VIDIOC_S_CROP, v4l_s_crop, v4l_print_crop, INFO_FL_PRIO), + IOCTL_INFO(VIDIOC_G_SELECTION, v4l_g_selection, v4l_print_selection, INFO_FL_CLEAR(v4l2_selection, r)), + IOCTL_INFO(VIDIOC_S_SELECTION, v4l_s_selection, v4l_print_selection, INFO_FL_PRIO | INFO_FL_CLEAR(v4l2_selection, r)), + IOCTL_INFO(VIDIOC_G_JPEGCOMP, v4l_stub_g_jpegcomp, v4l_print_jpegcompression, 0), + IOCTL_INFO(VIDIOC_S_JPEGCOMP, v4l_stub_s_jpegcomp, v4l_print_jpegcompression, INFO_FL_PRIO), + IOCTL_INFO(VIDIOC_QUERYSTD, v4l_querystd, v4l_print_std, 0), + IOCTL_INFO(VIDIOC_TRY_FMT, v4l_try_fmt, v4l_print_format, 0), + IOCTL_INFO(VIDIOC_ENUMAUDIO, v4l_stub_enumaudio, v4l_print_audio, INFO_FL_CLEAR(v4l2_audio, index)), + IOCTL_INFO(VIDIOC_ENUMAUDOUT, v4l_stub_enumaudout, v4l_print_audioout, INFO_FL_CLEAR(v4l2_audioout, index)), + IOCTL_INFO(VIDIOC_G_PRIORITY, v4l_g_priority, v4l_print_u32, 0), + IOCTL_INFO(VIDIOC_S_PRIORITY, v4l_s_priority, v4l_print_u32, INFO_FL_PRIO), + IOCTL_INFO(VIDIOC_G_SLICED_VBI_CAP, v4l_g_sliced_vbi_cap, v4l_print_sliced_vbi_cap, INFO_FL_CLEAR(v4l2_sliced_vbi_cap, type)), + IOCTL_INFO(VIDIOC_LOG_STATUS, v4l_log_status, v4l_print_newline, 0), + IOCTL_INFO(VIDIOC_G_EXT_CTRLS, v4l_g_ext_ctrls, v4l_print_ext_controls, INFO_FL_CTRL | INFO_FL_ALWAYS_COPY), + IOCTL_INFO(VIDIOC_S_EXT_CTRLS, v4l_s_ext_ctrls, v4l_print_ext_controls, INFO_FL_PRIO | INFO_FL_CTRL | INFO_FL_ALWAYS_COPY), + IOCTL_INFO(VIDIOC_TRY_EXT_CTRLS, v4l_try_ext_ctrls, v4l_print_ext_controls, INFO_FL_CTRL | INFO_FL_ALWAYS_COPY), + IOCTL_INFO(VIDIOC_ENUM_FRAMESIZES, v4l_stub_enum_framesizes, v4l_print_frmsizeenum, INFO_FL_CLEAR(v4l2_frmsizeenum, pixel_format)), + IOCTL_INFO(VIDIOC_ENUM_FRAMEINTERVALS, v4l_stub_enum_frameintervals, v4l_print_frmivalenum, INFO_FL_CLEAR(v4l2_frmivalenum, height)), + IOCTL_INFO(VIDIOC_G_ENC_INDEX, v4l_stub_g_enc_index, v4l_print_enc_idx, 0), + IOCTL_INFO(VIDIOC_ENCODER_CMD, v4l_stub_encoder_cmd, v4l_print_encoder_cmd, INFO_FL_PRIO | INFO_FL_CLEAR(v4l2_encoder_cmd, flags)), + IOCTL_INFO(VIDIOC_TRY_ENCODER_CMD, v4l_stub_try_encoder_cmd, v4l_print_encoder_cmd, INFO_FL_CLEAR(v4l2_encoder_cmd, flags)), + IOCTL_INFO(VIDIOC_DECODER_CMD, v4l_stub_decoder_cmd, v4l_print_decoder_cmd, INFO_FL_PRIO), + IOCTL_INFO(VIDIOC_TRY_DECODER_CMD, v4l_stub_try_decoder_cmd, v4l_print_decoder_cmd, 0), + IOCTL_INFO(VIDIOC_DBG_S_REGISTER, v4l_dbg_s_register, v4l_print_dbg_register, 0), + IOCTL_INFO(VIDIOC_DBG_G_REGISTER, v4l_dbg_g_register, v4l_print_dbg_register, 0), + IOCTL_INFO(VIDIOC_S_HW_FREQ_SEEK, v4l_s_hw_freq_seek, v4l_print_hw_freq_seek, INFO_FL_PRIO), + IOCTL_INFO(VIDIOC_S_DV_TIMINGS, v4l_stub_s_dv_timings, v4l_print_dv_timings, INFO_FL_PRIO | INFO_FL_CLEAR(v4l2_dv_timings, bt.flags)), + IOCTL_INFO(VIDIOC_G_DV_TIMINGS, v4l_stub_g_dv_timings, v4l_print_dv_timings, 0), + IOCTL_INFO(VIDIOC_DQEVENT, v4l_dqevent, v4l_print_event, 0), + IOCTL_INFO(VIDIOC_SUBSCRIBE_EVENT, v4l_subscribe_event, v4l_print_event_subscription, 0), + IOCTL_INFO(VIDIOC_UNSUBSCRIBE_EVENT, v4l_unsubscribe_event, v4l_print_event_subscription, 0), + IOCTL_INFO(VIDIOC_CREATE_BUFS, v4l_create_bufs, v4l_print_create_buffers, INFO_FL_PRIO | INFO_FL_QUEUE), + IOCTL_INFO(VIDIOC_PREPARE_BUF, v4l_prepare_buf, v4l_print_buffer, INFO_FL_QUEUE), + IOCTL_INFO(VIDIOC_ENUM_DV_TIMINGS, v4l_stub_enum_dv_timings, v4l_print_enum_dv_timings, INFO_FL_CLEAR(v4l2_enum_dv_timings, pad)), + IOCTL_INFO(VIDIOC_QUERY_DV_TIMINGS, v4l_stub_query_dv_timings, v4l_print_dv_timings, INFO_FL_ALWAYS_COPY), + IOCTL_INFO(VIDIOC_DV_TIMINGS_CAP, v4l_stub_dv_timings_cap, v4l_print_dv_timings_cap, INFO_FL_CLEAR(v4l2_dv_timings_cap, pad)), + IOCTL_INFO(VIDIOC_ENUM_FREQ_BANDS, v4l_enum_freq_bands, v4l_print_freq_band, 0), + IOCTL_INFO(VIDIOC_DBG_G_CHIP_INFO, v4l_dbg_g_chip_info, v4l_print_dbg_chip_info, INFO_FL_CLEAR(v4l2_dbg_chip_info, match)), + IOCTL_INFO(VIDIOC_QUERY_EXT_CTRL, v4l_query_ext_ctrl, v4l_print_query_ext_ctrl, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_query_ext_ctrl, id)), + IOCTL_INFO(VIDIOC_REMOVE_BUFS, v4l_remove_bufs, v4l_print_remove_buffers, INFO_FL_PRIO | INFO_FL_QUEUE | INFO_FL_CLEAR(v4l2_remove_buffers, type)), +}; +#define V4L2_IOCTLS ARRAY_SIZE(v4l2_ioctls) + +static bool v4l2_is_known_ioctl(unsigned int cmd) +{ + if (_IOC_NR(cmd) >= V4L2_IOCTLS) + return false; + return v4l2_ioctls[_IOC_NR(cmd)].ioctl == cmd; +} + +static struct mutex *v4l2_ioctl_get_lock(struct video_device *vdev, + struct v4l2_fh *vfh, unsigned int cmd, + void *arg) +{ + if (_IOC_NR(cmd) >= V4L2_IOCTLS) + return vdev->lock; + if (vfh && vfh->m2m_ctx && + (v4l2_ioctls[_IOC_NR(cmd)].flags & INFO_FL_QUEUE)) { + if (vfh->m2m_ctx->q_lock) + return vfh->m2m_ctx->q_lock; + } + if (vdev->queue && vdev->queue->lock && + (v4l2_ioctls[_IOC_NR(cmd)].flags & INFO_FL_QUEUE)) + return vdev->queue->lock; + return vdev->lock; +} + +/* Common ioctl debug function. This function can be used by + external ioctl messages as well as internal V4L ioctl */ +void v4l_printk_ioctl(const char *prefix, unsigned int cmd) +{ + const char *dir, *type; + + if (prefix) + printk(KERN_DEBUG "%s: ", prefix); + + switch (_IOC_TYPE(cmd)) { + case 'd': + type = "v4l2_int"; + break; + case 'V': + if (!v4l2_is_known_ioctl(cmd)) { + type = "v4l2"; + break; + } + pr_cont("%s", v4l2_ioctls[_IOC_NR(cmd)].name); + return; + default: + type = "unknown"; + break; + } + + switch (_IOC_DIR(cmd)) { + case _IOC_NONE: dir = "--"; break; + case _IOC_READ: dir = "r-"; break; + case _IOC_WRITE: dir = "-w"; break; + case _IOC_READ | _IOC_WRITE: dir = "rw"; break; + default: dir = "*ERR*"; break; + } + pr_cont("%s ioctl '%c', dir=%s, #%d (0x%08x)", + type, _IOC_TYPE(cmd), dir, _IOC_NR(cmd), cmd); +} +EXPORT_SYMBOL(v4l_printk_ioctl); + +static long __video_do_ioctl(struct file *file, + unsigned int cmd, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct mutex *req_queue_lock = NULL; + struct mutex *lock; /* ioctl serialization mutex */ + const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops; + bool write_only = false; + struct v4l2_ioctl_info default_info; + const struct v4l2_ioctl_info *info; + void *fh = file->private_data; + struct v4l2_fh *vfh = NULL; + int dev_debug = vfd->dev_debug; + long ret = -ENOTTY; + + if (ops == NULL) { + pr_warn("%s: has no ioctl_ops.\n", + video_device_node_name(vfd)); + return ret; + } + + if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) + vfh = file->private_data; + + /* + * We need to serialize streamon/off with queueing new requests. + * These ioctls may trigger the cancellation of a streaming + * operation, and that should not be mixed with queueing a new + * request at the same time. + */ + if (v4l2_device_supports_requests(vfd->v4l2_dev) && + (cmd == VIDIOC_STREAMON || cmd == VIDIOC_STREAMOFF)) { + req_queue_lock = &vfd->v4l2_dev->mdev->req_queue_mutex; + + if (mutex_lock_interruptible(req_queue_lock)) + return -ERESTARTSYS; + } + + lock = v4l2_ioctl_get_lock(vfd, vfh, cmd, arg); + + if (lock && mutex_lock_interruptible(lock)) { + if (req_queue_lock) + mutex_unlock(req_queue_lock); + return -ERESTARTSYS; + } + + if (!video_is_registered(vfd)) { + ret = -ENODEV; + goto unlock; + } + + if (v4l2_is_known_ioctl(cmd)) { + info = &v4l2_ioctls[_IOC_NR(cmd)]; + + if (!is_valid_ioctl(vfd, cmd) && + !((info->flags & INFO_FL_CTRL) && vfh && vfh->ctrl_handler)) + goto done; + + if (vfh && (info->flags & INFO_FL_PRIO)) { + ret = v4l2_prio_check(vfd->prio, vfh->prio); + if (ret) + goto done; + } + } else { + default_info.ioctl = cmd; + default_info.flags = 0; + default_info.debug = v4l_print_default; + info = &default_info; + } + + write_only = _IOC_DIR(cmd) == _IOC_WRITE; + if (info != &default_info) { + ret = info->func(ops, file, fh, arg); + } else if (!ops->vidioc_default) { + ret = -ENOTTY; + } else { + ret = ops->vidioc_default(file, fh, + vfh ? v4l2_prio_check(vfd->prio, vfh->prio) >= 0 : 0, + cmd, arg); + } + +done: + if (dev_debug & (V4L2_DEV_DEBUG_IOCTL | V4L2_DEV_DEBUG_IOCTL_ARG)) { + if (!(dev_debug & V4L2_DEV_DEBUG_STREAMING) && + (cmd == VIDIOC_QBUF || cmd == VIDIOC_DQBUF)) + goto unlock; + + v4l_printk_ioctl(video_device_node_name(vfd), cmd); + if (ret < 0) + pr_cont(": error %ld", ret); + if (!(dev_debug & V4L2_DEV_DEBUG_IOCTL_ARG)) + pr_cont("\n"); + else if (_IOC_DIR(cmd) == _IOC_NONE) + info->debug(arg, write_only); + else { + pr_cont(": "); + info->debug(arg, write_only); + } + } + +unlock: + if (lock) + mutex_unlock(lock); + if (req_queue_lock) + mutex_unlock(req_queue_lock); + return ret; +} + +static int check_array_args(unsigned int cmd, void *parg, size_t *array_size, + void __user **user_ptr, void ***kernel_ptr) +{ + int ret = 0; + + switch (cmd) { + case VIDIOC_PREPARE_BUF: + case VIDIOC_QUERYBUF: + case VIDIOC_QBUF: + case VIDIOC_DQBUF: { + struct v4l2_buffer *buf = parg; + + if (V4L2_TYPE_IS_MULTIPLANAR(buf->type) && buf->length > 0) { + if (buf->length > VIDEO_MAX_PLANES) { + ret = -EINVAL; + break; + } + *user_ptr = (void __user *)buf->m.planes; + *kernel_ptr = (void **)&buf->m.planes; + *array_size = sizeof(struct v4l2_plane) * buf->length; + ret = 1; + } + break; + } + + case VIDIOC_G_EDID: + case VIDIOC_S_EDID: { + struct v4l2_edid *edid = parg; + + if (edid->blocks) { + if (edid->blocks > 256) { + ret = -EINVAL; + break; + } + *user_ptr = (void __user *)edid->edid; + *kernel_ptr = (void **)&edid->edid; + *array_size = edid->blocks * 128; + ret = 1; + } + break; + } + + case VIDIOC_S_EXT_CTRLS: + case VIDIOC_G_EXT_CTRLS: + case VIDIOC_TRY_EXT_CTRLS: { + struct v4l2_ext_controls *ctrls = parg; + + if (ctrls->count != 0) { + if (ctrls->count > V4L2_CID_MAX_CTRLS) { + ret = -EINVAL; + break; + } + *user_ptr = (void __user *)ctrls->controls; + *kernel_ptr = (void **)&ctrls->controls; + *array_size = sizeof(struct v4l2_ext_control) + * ctrls->count; + ret = 1; + } + break; + } + + case VIDIOC_SUBDEV_G_ROUTING: + case VIDIOC_SUBDEV_S_ROUTING: { + struct v4l2_subdev_routing *routing = parg; + + if (routing->len_routes > 256) + return -E2BIG; + + *user_ptr = u64_to_user_ptr(routing->routes); + *kernel_ptr = (void **)&routing->routes; + *array_size = sizeof(struct v4l2_subdev_route) + * routing->len_routes; + ret = 1; + break; + } + } + + return ret; +} + +static unsigned int video_translate_cmd(unsigned int cmd) +{ +#if !defined(CONFIG_64BIT) && defined(CONFIG_COMPAT_32BIT_TIME) + switch (cmd) { + case VIDIOC_DQEVENT_TIME32: + return VIDIOC_DQEVENT; + case VIDIOC_QUERYBUF_TIME32: + return VIDIOC_QUERYBUF; + case VIDIOC_QBUF_TIME32: + return VIDIOC_QBUF; + case VIDIOC_DQBUF_TIME32: + return VIDIOC_DQBUF; + case VIDIOC_PREPARE_BUF_TIME32: + return VIDIOC_PREPARE_BUF; + } +#endif + if (in_compat_syscall()) + return v4l2_compat_translate_cmd(cmd); + + return cmd; +} + +static int video_get_user(void __user *arg, void *parg, + unsigned int real_cmd, unsigned int cmd, + bool *always_copy) +{ + unsigned int n = _IOC_SIZE(real_cmd); + int err = 0; + + if (!(_IOC_DIR(cmd) & _IOC_WRITE)) { + /* read-only ioctl */ + memset(parg, 0, n); + return 0; + } + + /* + * In some cases, only a few fields are used as input, + * i.e. when the app sets "index" and then the driver + * fills in the rest of the structure for the thing + * with that index. We only need to copy up the first + * non-input field. + */ + if (v4l2_is_known_ioctl(real_cmd)) { + u32 flags = v4l2_ioctls[_IOC_NR(real_cmd)].flags; + + if (flags & INFO_FL_CLEAR_MASK) + n = (flags & INFO_FL_CLEAR_MASK) >> 16; + *always_copy = flags & INFO_FL_ALWAYS_COPY; + } + + if (cmd == real_cmd) { + if (copy_from_user(parg, (void __user *)arg, n)) + err = -EFAULT; + } else if (in_compat_syscall()) { + memset(parg, 0, n); + err = v4l2_compat_get_user(arg, parg, cmd); + } else { + memset(parg, 0, n); +#if !defined(CONFIG_64BIT) && defined(CONFIG_COMPAT_32BIT_TIME) + switch (cmd) { + case VIDIOC_QUERYBUF_TIME32: + case VIDIOC_QBUF_TIME32: + case VIDIOC_DQBUF_TIME32: + case VIDIOC_PREPARE_BUF_TIME32: { + struct v4l2_buffer_time32 vb32; + struct v4l2_buffer *vb = parg; + + if (copy_from_user(&vb32, arg, sizeof(vb32))) + return -EFAULT; + + *vb = (struct v4l2_buffer) { + .index = vb32.index, + .type = vb32.type, + .bytesused = vb32.bytesused, + .flags = vb32.flags, + .field = vb32.field, + .timestamp.tv_sec = vb32.timestamp.tv_sec, + .timestamp.tv_usec = vb32.timestamp.tv_usec, + .timecode = vb32.timecode, + .sequence = vb32.sequence, + .memory = vb32.memory, + .m.userptr = vb32.m.userptr, + .length = vb32.length, + .request_fd = vb32.request_fd, + }; + break; + } + } +#endif + } + + /* zero out anything we don't copy from userspace */ + if (!err && n < _IOC_SIZE(real_cmd)) + memset((u8 *)parg + n, 0, _IOC_SIZE(real_cmd) - n); + return err; +} + +static int video_put_user(void __user *arg, void *parg, + unsigned int real_cmd, unsigned int cmd) +{ + if (!(_IOC_DIR(cmd) & _IOC_READ)) + return 0; + + if (cmd == real_cmd) { + /* Copy results into user buffer */ + if (copy_to_user(arg, parg, _IOC_SIZE(cmd))) + return -EFAULT; + return 0; + } + + if (in_compat_syscall()) + return v4l2_compat_put_user(arg, parg, cmd); + +#if !defined(CONFIG_64BIT) && defined(CONFIG_COMPAT_32BIT_TIME) + switch (cmd) { + case VIDIOC_DQEVENT_TIME32: { + struct v4l2_event *ev = parg; + struct v4l2_event_time32 ev32; + + memset(&ev32, 0, sizeof(ev32)); + + ev32.type = ev->type; + ev32.pending = ev->pending; + ev32.sequence = ev->sequence; + ev32.timestamp.tv_sec = ev->timestamp.tv_sec; + ev32.timestamp.tv_nsec = ev->timestamp.tv_nsec; + ev32.id = ev->id; + + memcpy(&ev32.u, &ev->u, sizeof(ev->u)); + memcpy(&ev32.reserved, &ev->reserved, sizeof(ev->reserved)); + + if (copy_to_user(arg, &ev32, sizeof(ev32))) + return -EFAULT; + break; + } + case VIDIOC_QUERYBUF_TIME32: + case VIDIOC_QBUF_TIME32: + case VIDIOC_DQBUF_TIME32: + case VIDIOC_PREPARE_BUF_TIME32: { + struct v4l2_buffer *vb = parg; + struct v4l2_buffer_time32 vb32; + + memset(&vb32, 0, sizeof(vb32)); + + vb32.index = vb->index; + vb32.type = vb->type; + vb32.bytesused = vb->bytesused; + vb32.flags = vb->flags; + vb32.field = vb->field; + vb32.timestamp.tv_sec = vb->timestamp.tv_sec; + vb32.timestamp.tv_usec = vb->timestamp.tv_usec; + vb32.timecode = vb->timecode; + vb32.sequence = vb->sequence; + vb32.memory = vb->memory; + vb32.m.userptr = vb->m.userptr; + vb32.length = vb->length; + vb32.request_fd = vb->request_fd; + + if (copy_to_user(arg, &vb32, sizeof(vb32))) + return -EFAULT; + break; + } + } +#endif + + return 0; +} + +long +video_usercopy(struct file *file, unsigned int orig_cmd, unsigned long arg, + v4l2_kioctl func) +{ + char sbuf[128]; + void *mbuf = NULL, *array_buf = NULL; + void *parg = (void *)arg; + long err = -EINVAL; + bool has_array_args; + bool always_copy = false; + size_t array_size = 0; + void __user *user_ptr = NULL; + void **kernel_ptr = NULL; + unsigned int cmd = video_translate_cmd(orig_cmd); + const size_t ioc_size = _IOC_SIZE(cmd); + + /* Copy arguments into temp kernel buffer */ + if (_IOC_DIR(cmd) != _IOC_NONE) { + if (ioc_size <= sizeof(sbuf)) { + parg = sbuf; + } else { + /* too big to allocate from stack */ + mbuf = kmalloc(ioc_size, GFP_KERNEL); + if (NULL == mbuf) + return -ENOMEM; + parg = mbuf; + } + + err = video_get_user((void __user *)arg, parg, cmd, + orig_cmd, &always_copy); + if (err) + goto out; + } + + err = check_array_args(cmd, parg, &array_size, &user_ptr, &kernel_ptr); + if (err < 0) + goto out; + has_array_args = err; + + if (has_array_args) { + array_buf = kvmalloc(array_size, GFP_KERNEL); + err = -ENOMEM; + if (array_buf == NULL) + goto out; + if (in_compat_syscall()) + err = v4l2_compat_get_array_args(file, array_buf, + user_ptr, array_size, + orig_cmd, parg); + else + err = copy_from_user(array_buf, user_ptr, array_size) ? + -EFAULT : 0; + if (err) + goto out; + *kernel_ptr = array_buf; + } + + /* Handles IOCTL */ + err = func(file, cmd, parg); + if (err == -ENOTTY || err == -ENOIOCTLCMD) { + err = -ENOTTY; + goto out; + } + + if (err == 0) { + if (cmd == VIDIOC_DQBUF) + trace_v4l2_dqbuf(video_devdata(file)->minor, parg); + else if (cmd == VIDIOC_QBUF) + trace_v4l2_qbuf(video_devdata(file)->minor, parg); + } + + /* + * Some ioctls can return an error, but still have valid + * results that must be returned. + * + * FIXME: subdev IOCTLS are partially handled here and partially in + * v4l2-subdev.c and the 'always_copy' flag can only be set for IOCTLS + * defined here as part of the 'v4l2_ioctls' array. As + * VIDIOC_SUBDEV_[GS]_ROUTING needs to return results to applications + * even in case of failure, but it is not defined here as part of the + * 'v4l2_ioctls' array, insert an ad-hoc check to address that. + */ + if (cmd == VIDIOC_SUBDEV_G_ROUTING || cmd == VIDIOC_SUBDEV_S_ROUTING) + always_copy = true; + + if (err < 0 && !always_copy) + goto out; + + if (has_array_args) { + *kernel_ptr = (void __force *)user_ptr; + if (in_compat_syscall()) { + int put_err; + + put_err = v4l2_compat_put_array_args(file, user_ptr, + array_buf, + array_size, + orig_cmd, parg); + if (put_err) + err = put_err; + } else if (copy_to_user(user_ptr, array_buf, array_size)) { + err = -EFAULT; + } + } + + if (video_put_user((void __user *)arg, parg, cmd, orig_cmd)) + err = -EFAULT; +out: + kvfree(array_buf); + kfree(mbuf); + return err; +} + +long video_ioctl2(struct file *file, + unsigned int cmd, unsigned long arg) +{ + return video_usercopy(file, cmd, arg, __video_do_ioctl); +} +EXPORT_SYMBOL(video_ioctl2); diff --git a/6.12.0/drivers/media/v4l2-core/v4l2-jpeg.c b/6.12.0/drivers/media/v4l2-core/v4l2-jpeg.c new file mode 100644 index 0000000..6e26473 --- /dev/null +++ b/6.12.0/drivers/media/v4l2-core/v4l2-jpeg.c @@ -0,0 +1,793 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * V4L2 JPEG header parser helpers. + * + * Copyright (C) 2019 Pengutronix, Philipp Zabel + * + * For reference, see JPEG ITU-T.81 (ISO/IEC 10918-1) [1] + * + * [1] https://www.w3.org/Graphics/JPEG/itu-t81.pdf + */ + +#include +#include +#include +#include +#include +#include + +MODULE_DESCRIPTION("V4L2 JPEG header parser helpers"); +MODULE_AUTHOR("Philipp Zabel "); +MODULE_LICENSE("GPL"); + +/* Table B.1 - Marker code assignments */ +#define SOF0 0xffc0 /* start of frame */ +#define SOF1 0xffc1 +#define SOF2 0xffc2 +#define SOF3 0xffc3 +#define SOF5 0xffc5 +#define SOF7 0xffc7 +#define JPG 0xffc8 /* extensions */ +#define SOF9 0xffc9 +#define SOF11 0xffcb +#define SOF13 0xffcd +#define SOF15 0xffcf +#define DHT 0xffc4 /* huffman table */ +#define DAC 0xffcc /* arithmetic coding conditioning */ +#define RST0 0xffd0 /* restart */ +#define RST7 0xffd7 +#define SOI 0xffd8 /* start of image */ +#define EOI 0xffd9 /* end of image */ +#define SOS 0xffda /* start of stream */ +#define DQT 0xffdb /* quantization table */ +#define DNL 0xffdc /* number of lines */ +#define DRI 0xffdd /* restart interval */ +#define DHP 0xffde /* hierarchical progression */ +#define EXP 0xffdf /* expand reference */ +#define APP0 0xffe0 /* application data */ +#define APP14 0xffee /* application data for colour encoding */ +#define APP15 0xffef +#define JPG0 0xfff0 /* extensions */ +#define JPG13 0xfffd +#define COM 0xfffe /* comment */ +#define TEM 0xff01 /* temporary */ + +/* Luma and chroma qp tables to achieve 50% compression quality + * This is as per example in Annex K.1 of ITU-T.81 + */ +const u8 v4l2_jpeg_ref_table_luma_qt[V4L2_JPEG_PIXELS_IN_BLOCK] = { + 16, 11, 10, 16, 24, 40, 51, 61, + 12, 12, 14, 19, 26, 58, 60, 55, + 14, 13, 16, 24, 40, 57, 69, 56, + 14, 17, 22, 29, 51, 87, 80, 62, + 18, 22, 37, 56, 68, 109, 103, 77, + 24, 35, 55, 64, 81, 104, 113, 92, + 49, 64, 78, 87, 103, 121, 120, 101, + 72, 92, 95, 98, 112, 100, 103, 99 +}; +EXPORT_SYMBOL_GPL(v4l2_jpeg_ref_table_luma_qt); + +const u8 v4l2_jpeg_ref_table_chroma_qt[V4L2_JPEG_PIXELS_IN_BLOCK] = { + 17, 18, 24, 47, 99, 99, 99, 99, + 18, 21, 26, 66, 99, 99, 99, 99, + 24, 26, 56, 99, 99, 99, 99, 99, + 47, 66, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99 +}; +EXPORT_SYMBOL_GPL(v4l2_jpeg_ref_table_chroma_qt); + +/* Zigzag scan pattern indexes */ +const u8 v4l2_jpeg_zigzag_scan_index[V4L2_JPEG_PIXELS_IN_BLOCK] = { + 0, 1, 8, 16, 9, 2, 3, 10, + 17, 24, 32, 25, 18, 11, 4, 5, + 12, 19, 26, 33, 40, 48, 41, 34, + 27, 20, 13, 6, 7, 14, 21, 28, + 35, 42, 49, 56, 57, 50, 43, 36, + 29, 22, 15, 23, 30, 37, 44, 51, + 58, 59, 52, 45, 38, 31, 39, 46, + 53, 60, 61, 54, 47, 55, 62, 63 +}; +EXPORT_SYMBOL_GPL(v4l2_jpeg_zigzag_scan_index); + +/* + * Contains the data that needs to be sent in the marker segment of an + * interchange format JPEG stream or an abbreviated format table specification + * data stream. Specifies the huffman table used for encoding the luminance DC + * coefficient differences. The table represents Table K.3 of ITU-T.81 + */ +const u8 v4l2_jpeg_ref_table_luma_dc_ht[V4L2_JPEG_REF_HT_DC_LEN] = { + 0x00, 0x01, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B +}; +EXPORT_SYMBOL_GPL(v4l2_jpeg_ref_table_luma_dc_ht); + +/* + * Contains the data that needs to be sent in the marker segment of an + * interchange format JPEG stream or an abbreviated format table specification + * data stream. Specifies the huffman table used for encoding the luminance AC + * coefficients. The table represents Table K.5 of ITU-T.81 + */ +const u8 v4l2_jpeg_ref_table_luma_ac_ht[V4L2_JPEG_REF_HT_AC_LEN] = { + 0x00, 0x02, 0x01, 0x03, 0x03, 0x02, 0x04, 0x03, 0x05, 0x05, 0x04, 0x04, + 0x00, 0x00, 0x01, 0x7D, 0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12, + 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07, 0x22, 0x71, 0x14, 0x32, + 0x81, 0x91, 0xA1, 0x08, 0x23, 0x42, 0xB1, 0xC1, 0x15, 0x52, 0xD1, 0xF0, + 0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0A, 0x16, 0x17, 0x18, 0x19, 0x1A, + 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, + 0x3A, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x53, 0x54, 0x55, + 0x56, 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, + 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x83, 0x84, 0x85, + 0x86, 0x87, 0x88, 0x89, 0x8A, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, + 0x99, 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, 0xA9, 0xAA, 0xB2, + 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xC2, 0xC3, 0xC4, 0xC5, + 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, + 0xD9, 0xDA, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, + 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA +}; +EXPORT_SYMBOL_GPL(v4l2_jpeg_ref_table_luma_ac_ht); + +/* + * Contains the data that needs to be sent in the marker segment of an interchange format JPEG + * stream or an abbreviated format table specification data stream. + * Specifies the huffman table used for encoding the chrominance DC coefficient differences. + * The table represents Table K.4 of ITU-T.81 + */ +const u8 v4l2_jpeg_ref_table_chroma_dc_ht[V4L2_JPEG_REF_HT_DC_LEN] = { + 0x00, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B +}; +EXPORT_SYMBOL_GPL(v4l2_jpeg_ref_table_chroma_dc_ht); + +/* + * Contains the data that needs to be sent in the marker segment of an + * interchange format JPEG stream or an abbreviated format table specification + * data stream. Specifies the huffman table used for encoding the chrominance + * AC coefficients. The table represents Table K.6 of ITU-T.81 + */ +const u8 v4l2_jpeg_ref_table_chroma_ac_ht[V4L2_JPEG_REF_HT_AC_LEN] = { + 0x00, 0x02, 0x01, 0x02, 0x04, 0x04, 0x03, 0x04, 0x07, 0x05, 0x04, 0x04, + 0x00, 0x01, 0x02, 0x77, 0x00, 0x01, 0x02, 0x03, 0x11, 0x04, 0x05, 0x21, + 0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71, 0x13, 0x22, 0x32, 0x81, + 0x08, 0x14, 0x42, 0x91, 0xA1, 0xB1, 0xC1, 0x09, 0x23, 0x33, 0x52, 0xF0, + 0x15, 0x62, 0x72, 0xD1, 0x0A, 0x16, 0x24, 0x34, 0xE1, 0x25, 0xF1, 0x17, + 0x18, 0x19, 0x1A, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x35, 0x36, 0x37, 0x38, + 0x39, 0x3A, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x53, 0x54, + 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, + 0x69, 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x82, 0x83, + 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x92, 0x93, 0x94, 0x95, 0x96, + 0x97, 0x98, 0x99, 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, 0xA9, + 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xC2, 0xC3, + 0xC4, 0xC5, 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, + 0xD7, 0xD8, 0xD9, 0xDA, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, + 0xEA, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA +}; +EXPORT_SYMBOL_GPL(v4l2_jpeg_ref_table_chroma_ac_ht); + +/** + * struct jpeg_stream - JPEG byte stream + * @curr: current position in stream + * @end: end position, after last byte + */ +struct jpeg_stream { + u8 *curr; + u8 *end; +}; + +/* returns a value that fits into u8, or negative error */ +static int jpeg_get_byte(struct jpeg_stream *stream) +{ + if (stream->curr >= stream->end) + return -EINVAL; + + return *stream->curr++; +} + +/* returns a value that fits into u16, or negative error */ +static int jpeg_get_word_be(struct jpeg_stream *stream) +{ + u16 word; + + if (stream->curr + sizeof(__be16) > stream->end) + return -EINVAL; + + word = get_unaligned_be16(stream->curr); + stream->curr += sizeof(__be16); + + return word; +} + +static int jpeg_skip(struct jpeg_stream *stream, size_t len) +{ + if (stream->curr + len > stream->end) + return -EINVAL; + + stream->curr += len; + + return 0; +} + +static int jpeg_next_marker(struct jpeg_stream *stream) +{ + int byte; + u16 marker = 0; + + while ((byte = jpeg_get_byte(stream)) >= 0) { + marker = (marker << 8) | byte; + /* skip stuffing bytes and REServed markers */ + if (marker == TEM || (marker > 0xffbf && marker < 0xffff)) + return marker; + } + + return byte; +} + +/* this does not advance the current position in the stream */ +static int jpeg_reference_segment(struct jpeg_stream *stream, + struct v4l2_jpeg_reference *segment) +{ + u16 len; + + if (stream->curr + sizeof(__be16) > stream->end) + return -EINVAL; + + len = get_unaligned_be16(stream->curr); + if (stream->curr + len > stream->end) + return -EINVAL; + + segment->start = stream->curr; + segment->length = len; + + return 0; +} + +static int v4l2_jpeg_decode_subsampling(u8 nf, u8 h_v) +{ + if (nf == 1) + return V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY; + + /* no chroma subsampling for 4-component images */ + if (nf == 4 && h_v != 0x11) + return -EINVAL; + + switch (h_v) { + case 0x11: + return V4L2_JPEG_CHROMA_SUBSAMPLING_444; + case 0x21: + return V4L2_JPEG_CHROMA_SUBSAMPLING_422; + case 0x22: + return V4L2_JPEG_CHROMA_SUBSAMPLING_420; + case 0x41: + return V4L2_JPEG_CHROMA_SUBSAMPLING_411; + default: + return -EINVAL; + } +} + +static int jpeg_parse_frame_header(struct jpeg_stream *stream, u16 sof_marker, + struct v4l2_jpeg_frame_header *frame_header) +{ + int len = jpeg_get_word_be(stream); + + if (len < 0) + return len; + /* Lf = 8 + 3 * Nf, Nf >= 1 */ + if (len < 8 + 3) + return -EINVAL; + + if (frame_header) { + /* Table B.2 - Frame header parameter sizes and values */ + int p, y, x, nf; + int i; + + p = jpeg_get_byte(stream); + if (p < 0) + return p; + /* + * Baseline DCT only supports 8-bit precision. + * Extended sequential DCT also supports 12-bit precision. + */ + if (p != 8 && (p != 12 || sof_marker != SOF1)) + return -EINVAL; + + y = jpeg_get_word_be(stream); + if (y < 0) + return y; + if (y == 0) + return -EINVAL; + + x = jpeg_get_word_be(stream); + if (x < 0) + return x; + if (x == 0) + return -EINVAL; + + nf = jpeg_get_byte(stream); + if (nf < 0) + return nf; + /* + * The spec allows 1 <= Nf <= 255, but we only support up to 4 + * components. + */ + if (nf < 1 || nf > V4L2_JPEG_MAX_COMPONENTS) + return -EINVAL; + if (len != 8 + 3 * nf) + return -EINVAL; + + frame_header->precision = p; + frame_header->height = y; + frame_header->width = x; + frame_header->num_components = nf; + + for (i = 0; i < nf; i++) { + struct v4l2_jpeg_frame_component_spec *component; + int c, h_v, tq; + + c = jpeg_get_byte(stream); + if (c < 0) + return c; + + h_v = jpeg_get_byte(stream); + if (h_v < 0) + return h_v; + if (i == 0) { + int subs; + + subs = v4l2_jpeg_decode_subsampling(nf, h_v); + if (subs < 0) + return subs; + frame_header->subsampling = subs; + } else if (h_v != 0x11) { + /* all chroma sampling factors must be 1 */ + return -EINVAL; + } + + tq = jpeg_get_byte(stream); + if (tq < 0) + return tq; + + component = &frame_header->component[i]; + component->component_identifier = c; + component->horizontal_sampling_factor = + (h_v >> 4) & 0xf; + component->vertical_sampling_factor = h_v & 0xf; + component->quantization_table_selector = tq; + } + } else { + return jpeg_skip(stream, len - 2); + } + + return 0; +} + +static int jpeg_parse_scan_header(struct jpeg_stream *stream, + struct v4l2_jpeg_scan_header *scan_header) +{ + size_t skip; + int len = jpeg_get_word_be(stream); + + if (len < 0) + return len; + /* Ls = 8 + 3 * Ns, Ns >= 1 */ + if (len < 6 + 2) + return -EINVAL; + + if (scan_header) { + int ns; + int i; + + ns = jpeg_get_byte(stream); + if (ns < 0) + return ns; + if (ns < 1 || ns > 4 || len != 6 + 2 * ns) + return -EINVAL; + + scan_header->num_components = ns; + + for (i = 0; i < ns; i++) { + struct v4l2_jpeg_scan_component_spec *component; + int cs, td_ta; + + cs = jpeg_get_byte(stream); + if (cs < 0) + return cs; + + td_ta = jpeg_get_byte(stream); + if (td_ta < 0) + return td_ta; + + component = &scan_header->component[i]; + component->component_selector = cs; + component->dc_entropy_coding_table_selector = + (td_ta >> 4) & 0xf; + component->ac_entropy_coding_table_selector = + td_ta & 0xf; + } + + skip = 3; /* skip Ss, Se, Ah, and Al */ + } else { + skip = len - 2; + } + + return jpeg_skip(stream, skip); +} + +/* B.2.4.1 Quantization table-specification syntax */ +static int jpeg_parse_quantization_tables(struct jpeg_stream *stream, + u8 precision, + struct v4l2_jpeg_reference *tables) +{ + int len = jpeg_get_word_be(stream); + + if (len < 0) + return len; + /* Lq = 2 + n * 65 (for baseline DCT), n >= 1 */ + if (len < 2 + 65) + return -EINVAL; + + len -= 2; + while (len >= 65) { + u8 pq, tq, *qk; + int ret; + int pq_tq = jpeg_get_byte(stream); + + if (pq_tq < 0) + return pq_tq; + + /* quantization table element precision */ + pq = (pq_tq >> 4) & 0xf; + /* + * Only 8-bit Qk values for 8-bit sample precision. Extended + * sequential DCT with 12-bit sample precision also supports + * 16-bit Qk values. + */ + if (pq != 0 && (pq != 1 || precision != 12)) + return -EINVAL; + + /* quantization table destination identifier */ + tq = pq_tq & 0xf; + if (tq > 3) + return -EINVAL; + + /* quantization table element */ + qk = stream->curr; + ret = jpeg_skip(stream, pq ? 128 : 64); + if (ret < 0) + return -EINVAL; + + if (tables) { + tables[tq].start = qk; + tables[tq].length = pq ? 128 : 64; + } + + len -= pq ? 129 : 65; + } + + return 0; +} + +/* B.2.4.2 Huffman table-specification syntax */ +static int jpeg_parse_huffman_tables(struct jpeg_stream *stream, + struct v4l2_jpeg_reference *tables) +{ + int mt; + int len = jpeg_get_word_be(stream); + + if (len < 0) + return len; + /* Table B.5 - Huffman table specification parameter sizes and values */ + if (len < 2 + 17) + return -EINVAL; + + for (len -= 2; len >= 17; len -= 17 + mt) { + u8 tc, th, *table; + int tc_th = jpeg_get_byte(stream); + int i, ret; + + if (tc_th < 0) + return tc_th; + + /* table class - 0 = DC, 1 = AC */ + tc = (tc_th >> 4) & 0xf; + if (tc > 1) + return -EINVAL; + + /* huffman table destination identifier */ + th = tc_th & 0xf; + /* only two Huffman tables for baseline DCT */ + if (th > 1) + return -EINVAL; + + /* BITS - number of Huffman codes with length i */ + table = stream->curr; + mt = 0; + for (i = 0; i < 16; i++) { + int li; + + li = jpeg_get_byte(stream); + if (li < 0) + return li; + + mt += li; + } + /* HUFFVAL - values associated with each Huffman code */ + ret = jpeg_skip(stream, mt); + if (ret < 0) + return ret; + + if (tables) { + tables[(tc << 1) | th].start = table; + tables[(tc << 1) | th].length = stream->curr - table; + } + } + + return jpeg_skip(stream, len - 2); +} + +/* B.2.4.4 Restart interval definition syntax */ +static int jpeg_parse_restart_interval(struct jpeg_stream *stream, + u16 *restart_interval) +{ + int len = jpeg_get_word_be(stream); + int ri; + + if (len < 0) + return len; + if (len != 4) + return -EINVAL; + + ri = jpeg_get_word_be(stream); + if (ri < 0) + return ri; + + *restart_interval = ri; + + return 0; +} + +static int jpeg_skip_segment(struct jpeg_stream *stream) +{ + int len = jpeg_get_word_be(stream); + + if (len < 0) + return len; + if (len < 2) + return -EINVAL; + + return jpeg_skip(stream, len - 2); +} + +/* Rec. ITU-T T.872 (06/2012) 6.5.3 */ +static int jpeg_parse_app14_data(struct jpeg_stream *stream, + enum v4l2_jpeg_app14_tf *tf) +{ + int ret; + int lp; + int skip; + + lp = jpeg_get_word_be(stream); + if (lp < 0) + return lp; + + /* Check for "Adobe\0" in Ap1..6 */ + if (stream->curr + 6 > stream->end || + strncmp(stream->curr, "Adobe\0", 6)) + return jpeg_skip(stream, lp - 2); + + /* get to Ap12 */ + ret = jpeg_skip(stream, 11); + if (ret < 0) + return ret; + + ret = jpeg_get_byte(stream); + if (ret < 0) + return ret; + + *tf = ret; + + /* skip the rest of the segment, this ensures at least it is complete */ + skip = lp - 2 - 11 - 1; + return jpeg_skip(stream, skip); +} + +/** + * v4l2_jpeg_parse_header - locate marker segments and optionally parse headers + * @buf: address of the JPEG buffer, should start with a SOI marker + * @len: length of the JPEG buffer + * @out: returns marker segment positions and optionally parsed headers + * + * The out->scan_header pointer must be initialized to NULL or point to a valid + * v4l2_jpeg_scan_header structure. The out->huffman_tables and + * out->quantization_tables pointers must be initialized to NULL or point to a + * valid array of 4 v4l2_jpeg_reference structures each. + * + * Returns 0 or negative error if parsing failed. + */ +int v4l2_jpeg_parse_header(void *buf, size_t len, struct v4l2_jpeg_header *out) +{ + struct jpeg_stream stream; + int marker; + int ret = 0; + + stream.curr = buf; + stream.end = stream.curr + len; + + out->num_dht = 0; + out->num_dqt = 0; + + /* the first bytes must be SOI, B.2.1 High-level syntax */ + if (jpeg_get_word_be(&stream) != SOI) + return -EINVAL; + + /* init value to signal if this marker is not present */ + out->app14_tf = V4L2_JPEG_APP14_TF_UNKNOWN; + + /* loop through marker segments */ + while ((marker = jpeg_next_marker(&stream)) >= 0) { + switch (marker) { + /* baseline DCT, extended sequential DCT */ + case SOF0 ... SOF1: + ret = jpeg_reference_segment(&stream, &out->sof); + if (ret < 0) + return ret; + ret = jpeg_parse_frame_header(&stream, marker, + &out->frame); + break; + /* progressive, lossless */ + case SOF2 ... SOF3: + /* differential coding */ + case SOF5 ... SOF7: + /* arithmetic coding */ + case SOF9 ... SOF11: + case SOF13 ... SOF15: + case DAC: + case TEM: + return -EINVAL; + + case DHT: + ret = jpeg_reference_segment(&stream, + &out->dht[out->num_dht++ % 4]); + if (ret < 0) + return ret; + if (!out->huffman_tables) { + ret = jpeg_skip_segment(&stream); + break; + } + ret = jpeg_parse_huffman_tables(&stream, + out->huffman_tables); + break; + case DQT: + ret = jpeg_reference_segment(&stream, + &out->dqt[out->num_dqt++ % 4]); + if (ret < 0) + return ret; + if (!out->quantization_tables) { + ret = jpeg_skip_segment(&stream); + break; + } + ret = jpeg_parse_quantization_tables(&stream, + out->frame.precision, + out->quantization_tables); + break; + case DRI: + ret = jpeg_parse_restart_interval(&stream, + &out->restart_interval); + break; + case APP14: + ret = jpeg_parse_app14_data(&stream, + &out->app14_tf); + break; + case SOS: + ret = jpeg_reference_segment(&stream, &out->sos); + if (ret < 0) + return ret; + ret = jpeg_parse_scan_header(&stream, out->scan); + /* + * stop parsing, the scan header marks the beginning of + * the entropy coded segment + */ + out->ecs_offset = stream.curr - (u8 *)buf; + return ret; + + /* markers without parameters */ + case RST0 ... RST7: /* restart */ + case SOI: /* start of image */ + case EOI: /* end of image */ + break; + + /* skip unknown or unsupported marker segments */ + default: + ret = jpeg_skip_segment(&stream); + break; + } + if (ret < 0) + return ret; + } + + return marker; +} +EXPORT_SYMBOL_GPL(v4l2_jpeg_parse_header); + +/** + * v4l2_jpeg_parse_frame_header - parse frame header + * @buf: address of the frame header, after the SOF0 marker + * @len: length of the frame header + * @frame_header: returns the parsed frame header + * + * Returns 0 or negative error if parsing failed. + */ +int v4l2_jpeg_parse_frame_header(void *buf, size_t len, + struct v4l2_jpeg_frame_header *frame_header) +{ + struct jpeg_stream stream; + + stream.curr = buf; + stream.end = stream.curr + len; + return jpeg_parse_frame_header(&stream, SOF0, frame_header); +} +EXPORT_SYMBOL_GPL(v4l2_jpeg_parse_frame_header); + +/** + * v4l2_jpeg_parse_scan_header - parse scan header + * @buf: address of the scan header, after the SOS marker + * @len: length of the scan header + * @scan_header: returns the parsed scan header + * + * Returns 0 or negative error if parsing failed. + */ +int v4l2_jpeg_parse_scan_header(void *buf, size_t len, + struct v4l2_jpeg_scan_header *scan_header) +{ + struct jpeg_stream stream; + + stream.curr = buf; + stream.end = stream.curr + len; + return jpeg_parse_scan_header(&stream, scan_header); +} +EXPORT_SYMBOL_GPL(v4l2_jpeg_parse_scan_header); + +/** + * v4l2_jpeg_parse_quantization_tables - parse quantization tables segment + * @buf: address of the quantization table segment, after the DQT marker + * @len: length of the quantization table segment + * @precision: sample precision (P) in bits per component + * @q_tables: returns four references into the buffer for the + * four possible quantization table destinations + * + * Returns 0 or negative error if parsing failed. + */ +int v4l2_jpeg_parse_quantization_tables(void *buf, size_t len, u8 precision, + struct v4l2_jpeg_reference *q_tables) +{ + struct jpeg_stream stream; + + stream.curr = buf; + stream.end = stream.curr + len; + return jpeg_parse_quantization_tables(&stream, precision, q_tables); +} +EXPORT_SYMBOL_GPL(v4l2_jpeg_parse_quantization_tables); + +/** + * v4l2_jpeg_parse_huffman_tables - parse huffman tables segment + * @buf: address of the Huffman table segment, after the DHT marker + * @len: length of the Huffman table segment + * @huffman_tables: returns four references into the buffer for the + * four possible Huffman table destinations, in + * the order DC0, DC1, AC0, AC1 + * + * Returns 0 or negative error if parsing failed. + */ +int v4l2_jpeg_parse_huffman_tables(void *buf, size_t len, + struct v4l2_jpeg_reference *huffman_tables) +{ + struct jpeg_stream stream; + + stream.curr = buf; + stream.end = stream.curr + len; + return jpeg_parse_huffman_tables(&stream, huffman_tables); +} +EXPORT_SYMBOL_GPL(v4l2_jpeg_parse_huffman_tables); diff --git a/6.12.0/drivers/media/v4l2-core/v4l2-mc.c b/6.12.0/drivers/media/v4l2-core/v4l2-mc.c new file mode 100644 index 0000000..937d358 --- /dev/null +++ b/6.12.0/drivers/media/v4l2-core/v4l2-mc.c @@ -0,0 +1,614 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +/* + * Media Controller ancillary functions + * + * Copyright (c) 2016 Mauro Carvalho Chehab + * Copyright (C) 2016 Shuah Khan + * Copyright (C) 2006-2010 Nokia Corporation + * Copyright (c) 2016 Intel Corporation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +int v4l2_mc_create_media_graph(struct media_device *mdev) + +{ + struct media_entity *entity; + struct media_entity *if_vid = NULL, *if_aud = NULL; + struct media_entity *tuner = NULL, *decoder = NULL; + struct media_entity *io_v4l = NULL, *io_vbi = NULL, *io_swradio = NULL; + bool is_webcam = false; + u32 flags; + int ret, pad_sink, pad_source; + + if (!mdev) + return 0; + + media_device_for_each_entity(entity, mdev) { + switch (entity->function) { + case MEDIA_ENT_F_IF_VID_DECODER: + if_vid = entity; + break; + case MEDIA_ENT_F_IF_AUD_DECODER: + if_aud = entity; + break; + case MEDIA_ENT_F_TUNER: + tuner = entity; + break; + case MEDIA_ENT_F_ATV_DECODER: + decoder = entity; + break; + case MEDIA_ENT_F_IO_V4L: + io_v4l = entity; + break; + case MEDIA_ENT_F_IO_VBI: + io_vbi = entity; + break; + case MEDIA_ENT_F_IO_SWRADIO: + io_swradio = entity; + break; + case MEDIA_ENT_F_CAM_SENSOR: + is_webcam = true; + break; + } + } + + /* It should have at least one I/O entity */ + if (!io_v4l && !io_vbi && !io_swradio) { + dev_warn(mdev->dev, "Didn't find any I/O entity\n"); + return -EINVAL; + } + + /* + * Here, webcams are modelled on a very simple way: the sensor is + * connected directly to the I/O entity. All dirty details, like + * scaler and crop HW are hidden. While such mapping is not enough + * for mc-centric hardware, it is enough for v4l2 interface centric + * PC-consumer's hardware. + */ + if (is_webcam) { + if (!io_v4l) { + dev_warn(mdev->dev, "Didn't find a MEDIA_ENT_F_IO_V4L\n"); + return -EINVAL; + } + + media_device_for_each_entity(entity, mdev) { + if (entity->function != MEDIA_ENT_F_CAM_SENSOR) + continue; + ret = media_create_pad_link(entity, 0, + io_v4l, 0, + MEDIA_LNK_FL_ENABLED); + if (ret) { + dev_warn(mdev->dev, "Failed to create a sensor link\n"); + return ret; + } + } + if (!decoder) + return 0; + } + + /* The device isn't a webcam. So, it should have a decoder */ + if (!decoder) { + dev_warn(mdev->dev, "Decoder not found\n"); + return -EINVAL; + } + + /* Link the tuner and IF video output pads */ + if (tuner) { + if (if_vid) { + pad_source = media_get_pad_index(tuner, + MEDIA_PAD_FL_SOURCE, + PAD_SIGNAL_ANALOG); + pad_sink = media_get_pad_index(if_vid, + MEDIA_PAD_FL_SINK, + PAD_SIGNAL_ANALOG); + if (pad_source < 0 || pad_sink < 0) { + dev_warn(mdev->dev, "Couldn't get tuner and/or PLL pad(s): (%d, %d)\n", + pad_source, pad_sink); + return -EINVAL; + } + ret = media_create_pad_link(tuner, pad_source, + if_vid, pad_sink, + MEDIA_LNK_FL_ENABLED); + if (ret) { + dev_warn(mdev->dev, "Couldn't create tuner->PLL link)\n"); + return ret; + } + + pad_source = media_get_pad_index(if_vid, + MEDIA_PAD_FL_SOURCE, + PAD_SIGNAL_ANALOG); + pad_sink = media_get_pad_index(decoder, + MEDIA_PAD_FL_SINK, + PAD_SIGNAL_ANALOG); + if (pad_source < 0 || pad_sink < 0) { + dev_warn(mdev->dev, "get decoder and/or PLL pad(s): (%d, %d)\n", + pad_source, pad_sink); + return -EINVAL; + } + ret = media_create_pad_link(if_vid, pad_source, + decoder, pad_sink, + MEDIA_LNK_FL_ENABLED); + if (ret) { + dev_warn(mdev->dev, "couldn't link PLL to decoder\n"); + return ret; + } + } else { + pad_source = media_get_pad_index(tuner, + MEDIA_PAD_FL_SOURCE, + PAD_SIGNAL_ANALOG); + pad_sink = media_get_pad_index(decoder, + MEDIA_PAD_FL_SINK, + PAD_SIGNAL_ANALOG); + if (pad_source < 0 || pad_sink < 0) { + dev_warn(mdev->dev, "couldn't get tuner and/or decoder pad(s): (%d, %d)\n", + pad_source, pad_sink); + return -EINVAL; + } + ret = media_create_pad_link(tuner, pad_source, + decoder, pad_sink, + MEDIA_LNK_FL_ENABLED); + if (ret) + return ret; + } + + if (if_aud) { + pad_source = media_get_pad_index(tuner, + MEDIA_PAD_FL_SOURCE, + PAD_SIGNAL_AUDIO); + pad_sink = media_get_pad_index(if_aud, + MEDIA_PAD_FL_SINK, + PAD_SIGNAL_AUDIO); + if (pad_source < 0 || pad_sink < 0) { + dev_warn(mdev->dev, "couldn't get tuner and/or decoder pad(s) for audio: (%d, %d)\n", + pad_source, pad_sink); + return -EINVAL; + } + ret = media_create_pad_link(tuner, pad_source, + if_aud, pad_sink, + MEDIA_LNK_FL_ENABLED); + if (ret) { + dev_warn(mdev->dev, "couldn't link tuner->audio PLL\n"); + return ret; + } + } else { + if_aud = tuner; + } + + } + + /* Create demod to V4L, VBI and SDR radio links */ + if (io_v4l) { + pad_source = media_get_pad_index(decoder, MEDIA_PAD_FL_SOURCE, + PAD_SIGNAL_DV); + if (pad_source < 0) { + dev_warn(mdev->dev, "couldn't get decoder output pad for V4L I/O\n"); + return -EINVAL; + } + ret = media_create_pad_link(decoder, pad_source, + io_v4l, 0, + MEDIA_LNK_FL_ENABLED); + if (ret) { + dev_warn(mdev->dev, "couldn't link decoder output to V4L I/O\n"); + return ret; + } + } + + if (io_swradio) { + pad_source = media_get_pad_index(decoder, MEDIA_PAD_FL_SOURCE, + PAD_SIGNAL_DV); + if (pad_source < 0) { + dev_warn(mdev->dev, "couldn't get decoder output pad for SDR\n"); + return -EINVAL; + } + ret = media_create_pad_link(decoder, pad_source, + io_swradio, 0, + MEDIA_LNK_FL_ENABLED); + if (ret) { + dev_warn(mdev->dev, "couldn't link decoder output to SDR\n"); + return ret; + } + } + + if (io_vbi) { + pad_source = media_get_pad_index(decoder, MEDIA_PAD_FL_SOURCE, + PAD_SIGNAL_DV); + if (pad_source < 0) { + dev_warn(mdev->dev, "couldn't get decoder output pad for VBI\n"); + return -EINVAL; + } + ret = media_create_pad_link(decoder, pad_source, + io_vbi, 0, + MEDIA_LNK_FL_ENABLED); + if (ret) { + dev_warn(mdev->dev, "couldn't link decoder output to VBI\n"); + return ret; + } + } + + /* Create links for the media connectors */ + flags = MEDIA_LNK_FL_ENABLED; + media_device_for_each_entity(entity, mdev) { + switch (entity->function) { + case MEDIA_ENT_F_CONN_RF: + if (!tuner) + continue; + pad_sink = media_get_pad_index(tuner, MEDIA_PAD_FL_SINK, + PAD_SIGNAL_ANALOG); + if (pad_sink < 0) { + dev_warn(mdev->dev, "couldn't get tuner analog pad sink\n"); + return -EINVAL; + } + ret = media_create_pad_link(entity, 0, tuner, + pad_sink, + flags); + break; + case MEDIA_ENT_F_CONN_SVIDEO: + case MEDIA_ENT_F_CONN_COMPOSITE: + pad_sink = media_get_pad_index(decoder, + MEDIA_PAD_FL_SINK, + PAD_SIGNAL_ANALOG); + if (pad_sink < 0) { + dev_warn(mdev->dev, "couldn't get decoder analog pad sink\n"); + return -EINVAL; + } + ret = media_create_pad_link(entity, 0, decoder, + pad_sink, + flags); + break; + default: + continue; + } + if (ret) + return ret; + + flags = 0; + } + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_mc_create_media_graph); + +int v4l_enable_media_source(struct video_device *vdev) +{ + struct media_device *mdev = vdev->entity.graph_obj.mdev; + int ret = 0, err; + + if (!mdev) + return 0; + + mutex_lock(&mdev->graph_mutex); + if (!mdev->enable_source) + goto end; + err = mdev->enable_source(&vdev->entity, &vdev->pipe); + if (err) + ret = -EBUSY; +end: + mutex_unlock(&mdev->graph_mutex); + return ret; +} +EXPORT_SYMBOL_GPL(v4l_enable_media_source); + +void v4l_disable_media_source(struct video_device *vdev) +{ + struct media_device *mdev = vdev->entity.graph_obj.mdev; + + if (mdev) { + mutex_lock(&mdev->graph_mutex); + if (mdev->disable_source) + mdev->disable_source(&vdev->entity); + mutex_unlock(&mdev->graph_mutex); + } +} +EXPORT_SYMBOL_GPL(v4l_disable_media_source); + +int v4l_vb2q_enable_media_source(struct vb2_queue *q) +{ + struct v4l2_fh *fh = q->owner; + + if (fh && fh->vdev) + return v4l_enable_media_source(fh->vdev); + return 0; +} +EXPORT_SYMBOL_GPL(v4l_vb2q_enable_media_source); + +int v4l2_create_fwnode_links_to_pad(struct v4l2_subdev *src_sd, + struct media_pad *sink, u32 flags) +{ + struct fwnode_handle *endpoint; + + if (!(sink->flags & MEDIA_PAD_FL_SINK)) + return -EINVAL; + + fwnode_graph_for_each_endpoint(src_sd->fwnode, endpoint) { + struct fwnode_handle *remote_ep; + int src_idx, sink_idx, ret; + struct media_pad *src; + + src_idx = media_entity_get_fwnode_pad(&src_sd->entity, + endpoint, + MEDIA_PAD_FL_SOURCE); + if (src_idx < 0) { + dev_dbg(src_sd->dev, "no source pad found for %pfw\n", + endpoint); + continue; + } + + remote_ep = fwnode_graph_get_remote_endpoint(endpoint); + if (!remote_ep) { + dev_dbg(src_sd->dev, "no remote ep found for %pfw\n", + endpoint); + continue; + } + + /* + * ask the sink to verify it owns the remote endpoint, + * and translate to a sink pad. + */ + sink_idx = media_entity_get_fwnode_pad(sink->entity, + remote_ep, + MEDIA_PAD_FL_SINK); + fwnode_handle_put(remote_ep); + + if (sink_idx < 0 || sink_idx != sink->index) { + dev_dbg(src_sd->dev, + "sink pad index mismatch or error (is %d, expected %u)\n", + sink_idx, sink->index); + continue; + } + + /* + * the source endpoint corresponds to one of its source pads, + * the source endpoint connects to an endpoint at the sink + * entity, and the sink endpoint corresponds to the sink + * pad requested, so we have found an endpoint connection + * that works, create the media link for it. + */ + + src = &src_sd->entity.pads[src_idx]; + + /* skip if link already exists */ + if (media_entity_find_link(src, sink)) { + dev_dbg(src_sd->dev, + "link %s:%d -> %s:%d already exists\n", + src_sd->entity.name, src_idx, + sink->entity->name, sink_idx); + continue; + } + + dev_dbg(src_sd->dev, "creating link %s:%d -> %s:%d\n", + src_sd->entity.name, src_idx, + sink->entity->name, sink_idx); + + ret = media_create_pad_link(&src_sd->entity, src_idx, + sink->entity, sink_idx, flags); + if (ret) { + dev_err(src_sd->dev, + "link %s:%d -> %s:%d failed with %d\n", + src_sd->entity.name, src_idx, + sink->entity->name, sink_idx, ret); + + fwnode_handle_put(endpoint); + return ret; + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_create_fwnode_links_to_pad); + +int v4l2_create_fwnode_links(struct v4l2_subdev *src_sd, + struct v4l2_subdev *sink_sd) +{ + unsigned int i; + + for (i = 0; i < sink_sd->entity.num_pads; i++) { + struct media_pad *pad = &sink_sd->entity.pads[i]; + int ret; + + if (!(pad->flags & MEDIA_PAD_FL_SINK)) + continue; + + ret = v4l2_create_fwnode_links_to_pad(src_sd, pad, 0); + if (ret) + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_create_fwnode_links); + +/* ----------------------------------------------------------------------------- + * Pipeline power management + * + * Entities must be powered up when part of a pipeline that contains at least + * one open video device node. + * + * To achieve this use the entity use_count field to track the number of users. + * For entities corresponding to video device nodes the use_count field stores + * the users count of the node. For entities corresponding to subdevs the + * use_count field stores the total number of users of all video device nodes + * in the pipeline. + * + * The v4l2_pipeline_pm_{get, put}() functions must be called in the open() and + * close() handlers of video device nodes. It increments or decrements the use + * count of all subdev entities in the pipeline. + * + * To react to link management on powered pipelines, the link setup notification + * callback updates the use count of all entities in the source and sink sides + * of the link. + */ + +/* + * pipeline_pm_use_count - Count the number of users of a pipeline + * @entity: The entity + * + * Return the total number of users of all video device nodes in the pipeline. + */ +static int pipeline_pm_use_count(struct media_entity *entity, + struct media_graph *graph) +{ + int use = 0; + + media_graph_walk_start(graph, entity); + + while ((entity = media_graph_walk_next(graph))) { + if (is_media_entity_v4l2_video_device(entity)) + use += entity->use_count; + } + + return use; +} + +/* + * pipeline_pm_power_one - Apply power change to an entity + * @entity: The entity + * @change: Use count change + * + * Change the entity use count by @change. If the entity is a subdev update its + * power state by calling the core::s_power operation when the use count goes + * from 0 to != 0 or from != 0 to 0. + * + * Return 0 on success or a negative error code on failure. + */ +static int pipeline_pm_power_one(struct media_entity *entity, int change) +{ + struct v4l2_subdev *subdev; + int ret; + + subdev = is_media_entity_v4l2_subdev(entity) + ? media_entity_to_v4l2_subdev(entity) : NULL; + + if (entity->use_count == 0 && change > 0 && subdev != NULL) { + ret = v4l2_subdev_call(subdev, core, s_power, 1); + if (ret < 0 && ret != -ENOIOCTLCMD) + return ret; + } + + entity->use_count += change; + WARN_ON(entity->use_count < 0); + + if (entity->use_count == 0 && change < 0 && subdev != NULL) + v4l2_subdev_call(subdev, core, s_power, 0); + + return 0; +} + +/* + * pipeline_pm_power - Apply power change to all entities in a pipeline + * @entity: The entity + * @change: Use count change + * + * Walk the pipeline to update the use count and the power state of all non-node + * entities. + * + * Return 0 on success or a negative error code on failure. + */ +static int pipeline_pm_power(struct media_entity *entity, int change, + struct media_graph *graph) +{ + struct media_entity *first = entity; + int ret = 0; + + if (!change) + return 0; + + media_graph_walk_start(graph, entity); + + while (!ret && (entity = media_graph_walk_next(graph))) + if (is_media_entity_v4l2_subdev(entity)) + ret = pipeline_pm_power_one(entity, change); + + if (!ret) + return ret; + + media_graph_walk_start(graph, first); + + while ((first = media_graph_walk_next(graph)) + && first != entity) + if (is_media_entity_v4l2_subdev(first)) + pipeline_pm_power_one(first, -change); + + return ret; +} + +static int v4l2_pipeline_pm_use(struct media_entity *entity, unsigned int use) +{ + struct media_device *mdev = entity->graph_obj.mdev; + int change = use ? 1 : -1; + int ret; + + mutex_lock(&mdev->graph_mutex); + + /* Apply use count to node. */ + entity->use_count += change; + WARN_ON(entity->use_count < 0); + + /* Apply power change to connected non-nodes. */ + ret = pipeline_pm_power(entity, change, &mdev->pm_count_walk); + if (ret < 0) + entity->use_count -= change; + + mutex_unlock(&mdev->graph_mutex); + + return ret; +} + +int v4l2_pipeline_pm_get(struct media_entity *entity) +{ + return v4l2_pipeline_pm_use(entity, 1); +} +EXPORT_SYMBOL_GPL(v4l2_pipeline_pm_get); + +void v4l2_pipeline_pm_put(struct media_entity *entity) +{ + /* Powering off entities shouldn't fail. */ + WARN_ON(v4l2_pipeline_pm_use(entity, 0)); +} +EXPORT_SYMBOL_GPL(v4l2_pipeline_pm_put); + +int v4l2_pipeline_link_notify(struct media_link *link, u32 flags, + unsigned int notification) +{ + struct media_graph *graph = &link->graph_obj.mdev->pm_count_walk; + struct media_entity *source = link->source->entity; + struct media_entity *sink = link->sink->entity; + int source_use; + int sink_use; + int ret = 0; + + source_use = pipeline_pm_use_count(source, graph); + sink_use = pipeline_pm_use_count(sink, graph); + + if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH && + !(flags & MEDIA_LNK_FL_ENABLED)) { + /* Powering off entities is assumed to never fail. */ + pipeline_pm_power(source, -sink_use, graph); + pipeline_pm_power(sink, -source_use, graph); + return 0; + } + + if (notification == MEDIA_DEV_NOTIFY_PRE_LINK_CH && + (flags & MEDIA_LNK_FL_ENABLED)) { + + ret = pipeline_pm_power(source, sink_use, graph); + if (ret < 0) + return ret; + + ret = pipeline_pm_power(sink, source_use, graph); + if (ret < 0) + pipeline_pm_power(source, -sink_use, graph); + } + + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_pipeline_link_notify); diff --git a/6.12.0/drivers/media/v4l2-core/v4l2-mem2mem.c b/6.12.0/drivers/media/v4l2-core/v4l2-mem2mem.c new file mode 100644 index 0000000..eb22d61 --- /dev/null +++ b/6.12.0/drivers/media/v4l2-core/v4l2-mem2mem.c @@ -0,0 +1,1643 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Memory-to-memory device framework for Video for Linux 2 and vb2. + * + * Helper functions for devices that use vb2 buffers for both their + * source and destination. + * + * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd. + * Pawel Osciak, + * Marek Szyprowski, + */ +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +MODULE_DESCRIPTION("Mem to mem device framework for vb2"); +MODULE_AUTHOR("Pawel Osciak, "); +MODULE_LICENSE("GPL"); + +static bool debug; +module_param(debug, bool, 0644); + +#define dprintk(fmt, arg...) \ + do { \ + if (debug) \ + printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\ + } while (0) + + +/* Instance is already queued on the job_queue */ +#define TRANS_QUEUED (1 << 0) +/* Instance is currently running in hardware */ +#define TRANS_RUNNING (1 << 1) +/* Instance is currently aborting */ +#define TRANS_ABORT (1 << 2) + + +/* The job queue is not running new jobs */ +#define QUEUE_PAUSED (1 << 0) + + +/* Offset base for buffers on the destination queue - used to distinguish + * between source and destination buffers when mmapping - they receive the same + * offsets but for different queues */ +#define DST_QUEUE_OFF_BASE (1 << 30) + +enum v4l2_m2m_entity_type { + MEM2MEM_ENT_TYPE_SOURCE, + MEM2MEM_ENT_TYPE_SINK, + MEM2MEM_ENT_TYPE_PROC +}; + +static const char * const m2m_entity_name[] = { + "source", + "sink", + "proc" +}; + +/** + * struct v4l2_m2m_dev - per-device context + * @source: &struct media_entity pointer with the source entity + * Used only when the M2M device is registered via + * v4l2_m2m_register_media_controller(). + * @source_pad: &struct media_pad with the source pad. + * Used only when the M2M device is registered via + * v4l2_m2m_register_media_controller(). + * @sink: &struct media_entity pointer with the sink entity + * Used only when the M2M device is registered via + * v4l2_m2m_register_media_controller(). + * @sink_pad: &struct media_pad with the sink pad. + * Used only when the M2M device is registered via + * v4l2_m2m_register_media_controller(). + * @proc: &struct media_entity pointer with the M2M device itself. + * @proc_pads: &struct media_pad with the @proc pads. + * Used only when the M2M device is registered via + * v4l2_m2m_unregister_media_controller(). + * @intf_devnode: &struct media_intf devnode pointer with the interface + * with controls the M2M device. + * @curr_ctx: currently running instance + * @job_queue: instances queued to run + * @job_spinlock: protects job_queue + * @job_work: worker to run queued jobs. + * @job_queue_flags: flags of the queue status, %QUEUE_PAUSED. + * @m2m_ops: driver callbacks + */ +struct v4l2_m2m_dev { + struct v4l2_m2m_ctx *curr_ctx; +#ifdef CONFIG_MEDIA_CONTROLLER + struct media_entity *source; + struct media_pad source_pad; + struct media_entity sink; + struct media_pad sink_pad; + struct media_entity proc; + struct media_pad proc_pads[2]; + struct media_intf_devnode *intf_devnode; +#endif + + struct list_head job_queue; + spinlock_t job_spinlock; + struct work_struct job_work; + unsigned long job_queue_flags; + + const struct v4l2_m2m_ops *m2m_ops; +}; + +static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx, + enum v4l2_buf_type type) +{ + if (V4L2_TYPE_IS_OUTPUT(type)) + return &m2m_ctx->out_q_ctx; + else + return &m2m_ctx->cap_q_ctx; +} + +struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx, + enum v4l2_buf_type type) +{ + struct v4l2_m2m_queue_ctx *q_ctx; + + q_ctx = get_queue_ctx(m2m_ctx, type); + if (!q_ctx) + return NULL; + + return &q_ctx->q; +} +EXPORT_SYMBOL(v4l2_m2m_get_vq); + +struct vb2_v4l2_buffer *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx) +{ + struct v4l2_m2m_buffer *b; + unsigned long flags; + + spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); + + if (list_empty(&q_ctx->rdy_queue)) { + spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); + return NULL; + } + + b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); + spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); + return &b->vb; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf); + +struct vb2_v4l2_buffer *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx) +{ + struct v4l2_m2m_buffer *b; + unsigned long flags; + + spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); + + if (list_empty(&q_ctx->rdy_queue)) { + spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); + return NULL; + } + + b = list_last_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); + spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); + return &b->vb; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_last_buf); + +struct vb2_v4l2_buffer *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx) +{ + struct v4l2_m2m_buffer *b; + unsigned long flags; + + spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); + if (list_empty(&q_ctx->rdy_queue)) { + spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); + return NULL; + } + b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); + list_del(&b->list); + q_ctx->num_rdy--; + spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); + + return &b->vb; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove); + +void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx *q_ctx, + struct vb2_v4l2_buffer *vbuf) +{ + struct v4l2_m2m_buffer *b; + unsigned long flags; + + spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); + b = container_of(vbuf, struct v4l2_m2m_buffer, vb); + list_del(&b->list); + q_ctx->num_rdy--; + spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_buf); + +struct vb2_v4l2_buffer * +v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx *q_ctx, unsigned int idx) + +{ + struct v4l2_m2m_buffer *b, *tmp; + struct vb2_v4l2_buffer *ret = NULL; + unsigned long flags; + + spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); + list_for_each_entry_safe(b, tmp, &q_ctx->rdy_queue, list) { + if (b->vb.vb2_buf.index == idx) { + list_del(&b->list); + q_ctx->num_rdy--; + ret = &b->vb; + break; + } + } + spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); + + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_idx); + +/* + * Scheduling handlers + */ + +void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev) +{ + unsigned long flags; + void *ret = NULL; + + spin_lock_irqsave(&m2m_dev->job_spinlock, flags); + if (m2m_dev->curr_ctx) + ret = m2m_dev->curr_ctx->priv; + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); + + return ret; +} +EXPORT_SYMBOL(v4l2_m2m_get_curr_priv); + +/** + * v4l2_m2m_try_run() - select next job to perform and run it if possible + * @m2m_dev: per-device context + * + * Get next transaction (if present) from the waiting jobs list and run it. + * + * Note that this function can run on a given v4l2_m2m_ctx context, + * but call .device_run for another context. + */ +static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev) +{ + unsigned long flags; + + spin_lock_irqsave(&m2m_dev->job_spinlock, flags); + if (NULL != m2m_dev->curr_ctx) { + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); + dprintk("Another instance is running, won't run now\n"); + return; + } + + if (list_empty(&m2m_dev->job_queue)) { + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); + dprintk("No job pending\n"); + return; + } + + if (m2m_dev->job_queue_flags & QUEUE_PAUSED) { + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); + dprintk("Running new jobs is paused\n"); + return; + } + + m2m_dev->curr_ctx = list_first_entry(&m2m_dev->job_queue, + struct v4l2_m2m_ctx, queue); + m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING; + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); + + dprintk("Running job on m2m_ctx: %p\n", m2m_dev->curr_ctx); + m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv); +} + +/* + * __v4l2_m2m_try_queue() - queue a job + * @m2m_dev: m2m device + * @m2m_ctx: m2m context + * + * Check if this context is ready to queue a job. + * + * This function can run in interrupt context. + */ +static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev, + struct v4l2_m2m_ctx *m2m_ctx) +{ + unsigned long flags_job; + struct vb2_v4l2_buffer *dst, *src; + + dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx); + + if (!m2m_ctx->out_q_ctx.q.streaming || + (!m2m_ctx->cap_q_ctx.q.streaming && !m2m_ctx->ignore_cap_streaming)) { + if (!m2m_ctx->ignore_cap_streaming) + dprintk("Streaming needs to be on for both queues\n"); + else + dprintk("Streaming needs to be on for the OUTPUT queue\n"); + return; + } + + spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job); + + /* If the context is aborted then don't schedule it */ + if (m2m_ctx->job_flags & TRANS_ABORT) { + dprintk("Aborted context\n"); + goto job_unlock; + } + + if (m2m_ctx->job_flags & TRANS_QUEUED) { + dprintk("On job queue already\n"); + goto job_unlock; + } + + src = v4l2_m2m_next_src_buf(m2m_ctx); + dst = v4l2_m2m_next_dst_buf(m2m_ctx); + if (!src && !m2m_ctx->out_q_ctx.buffered) { + dprintk("No input buffers available\n"); + goto job_unlock; + } + if (!dst && !m2m_ctx->cap_q_ctx.buffered) { + dprintk("No output buffers available\n"); + goto job_unlock; + } + + m2m_ctx->new_frame = true; + + if (src && dst && dst->is_held && + dst->vb2_buf.copied_timestamp && + dst->vb2_buf.timestamp != src->vb2_buf.timestamp) { + dprintk("Timestamp mismatch, returning held capture buffer\n"); + dst->is_held = false; + v4l2_m2m_dst_buf_remove(m2m_ctx); + v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE); + dst = v4l2_m2m_next_dst_buf(m2m_ctx); + + if (!dst && !m2m_ctx->cap_q_ctx.buffered) { + dprintk("No output buffers available after returning held buffer\n"); + goto job_unlock; + } + } + + if (src && dst && (m2m_ctx->out_q_ctx.q.subsystem_flags & + VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF)) + m2m_ctx->new_frame = !dst->vb2_buf.copied_timestamp || + dst->vb2_buf.timestamp != src->vb2_buf.timestamp; + + if (m2m_ctx->has_stopped) { + dprintk("Device has stopped\n"); + goto job_unlock; + } + + if (m2m_dev->m2m_ops->job_ready + && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) { + dprintk("Driver not ready\n"); + goto job_unlock; + } + + list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue); + m2m_ctx->job_flags |= TRANS_QUEUED; + +job_unlock: + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); +} + +/** + * v4l2_m2m_try_schedule() - schedule and possibly run a job for any context + * @m2m_ctx: m2m context + * + * Check if this context is ready to queue a job. If suitable, + * run the next queued job on the mem2mem device. + * + * This function shouldn't run in interrupt context. + * + * Note that v4l2_m2m_try_schedule() can schedule one job for this context, + * and then run another job for another context. + */ +void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx) +{ + struct v4l2_m2m_dev *m2m_dev = m2m_ctx->m2m_dev; + + __v4l2_m2m_try_queue(m2m_dev, m2m_ctx); + v4l2_m2m_try_run(m2m_dev); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule); + +/** + * v4l2_m2m_device_run_work() - run pending jobs for the context + * @work: Work structure used for scheduling the execution of this function. + */ +static void v4l2_m2m_device_run_work(struct work_struct *work) +{ + struct v4l2_m2m_dev *m2m_dev = + container_of(work, struct v4l2_m2m_dev, job_work); + + v4l2_m2m_try_run(m2m_dev); +} + +/** + * v4l2_m2m_cancel_job() - cancel pending jobs for the context + * @m2m_ctx: m2m context with jobs to be canceled + * + * In case of streamoff or release called on any context, + * 1] If the context is currently running, then abort job will be called + * 2] If the context is queued, then the context will be removed from + * the job_queue + */ +static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx) +{ + struct v4l2_m2m_dev *m2m_dev; + unsigned long flags; + + m2m_dev = m2m_ctx->m2m_dev; + spin_lock_irqsave(&m2m_dev->job_spinlock, flags); + + m2m_ctx->job_flags |= TRANS_ABORT; + if (m2m_ctx->job_flags & TRANS_RUNNING) { + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); + if (m2m_dev->m2m_ops->job_abort) + m2m_dev->m2m_ops->job_abort(m2m_ctx->priv); + dprintk("m2m_ctx %p running, will wait to complete\n", m2m_ctx); + wait_event(m2m_ctx->finished, + !(m2m_ctx->job_flags & TRANS_RUNNING)); + } else if (m2m_ctx->job_flags & TRANS_QUEUED) { + list_del(&m2m_ctx->queue); + m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING); + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); + dprintk("m2m_ctx: %p had been on queue and was removed\n", + m2m_ctx); + } else { + /* Do nothing, was not on queue/running */ + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); + } +} + +/* + * Schedule the next job, called from v4l2_m2m_job_finish() or + * v4l2_m2m_buf_done_and_job_finish(). + */ +static void v4l2_m2m_schedule_next_job(struct v4l2_m2m_dev *m2m_dev, + struct v4l2_m2m_ctx *m2m_ctx) +{ + /* + * This instance might have more buffers ready, but since we do not + * allow more than one job on the job_queue per instance, each has + * to be scheduled separately after the previous one finishes. + */ + __v4l2_m2m_try_queue(m2m_dev, m2m_ctx); + + /* + * We might be running in atomic context, + * but the job must be run in non-atomic context. + */ + schedule_work(&m2m_dev->job_work); +} + +/* + * Assumes job_spinlock is held, called from v4l2_m2m_job_finish() or + * v4l2_m2m_buf_done_and_job_finish(). + */ +static bool _v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, + struct v4l2_m2m_ctx *m2m_ctx) +{ + if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) { + dprintk("Called by an instance not currently running\n"); + return false; + } + + list_del(&m2m_dev->curr_ctx->queue); + m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING); + wake_up(&m2m_dev->curr_ctx->finished); + m2m_dev->curr_ctx = NULL; + return true; +} + +void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, + struct v4l2_m2m_ctx *m2m_ctx) +{ + unsigned long flags; + bool schedule_next; + + /* + * This function should not be used for drivers that support + * holding capture buffers. Those should use + * v4l2_m2m_buf_done_and_job_finish() instead. + */ + WARN_ON(m2m_ctx->out_q_ctx.q.subsystem_flags & + VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF); + spin_lock_irqsave(&m2m_dev->job_spinlock, flags); + schedule_next = _v4l2_m2m_job_finish(m2m_dev, m2m_ctx); + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); + + if (schedule_next) + v4l2_m2m_schedule_next_job(m2m_dev, m2m_ctx); +} +EXPORT_SYMBOL(v4l2_m2m_job_finish); + +void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev *m2m_dev, + struct v4l2_m2m_ctx *m2m_ctx, + enum vb2_buffer_state state) +{ + struct vb2_v4l2_buffer *src_buf, *dst_buf; + bool schedule_next = false; + unsigned long flags; + + spin_lock_irqsave(&m2m_dev->job_spinlock, flags); + src_buf = v4l2_m2m_src_buf_remove(m2m_ctx); + dst_buf = v4l2_m2m_next_dst_buf(m2m_ctx); + + if (WARN_ON(!src_buf || !dst_buf)) + goto unlock; + dst_buf->is_held = src_buf->flags & V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF; + if (!dst_buf->is_held) { + v4l2_m2m_dst_buf_remove(m2m_ctx); + v4l2_m2m_buf_done(dst_buf, state); + } + /* + * If the request API is being used, returning the OUTPUT + * (src) buffer will wake-up any process waiting on the + * request file descriptor. + * + * Therefore, return the CAPTURE (dst) buffer first, + * to avoid signalling the request file descriptor + * before the CAPTURE buffer is done. + */ + v4l2_m2m_buf_done(src_buf, state); + schedule_next = _v4l2_m2m_job_finish(m2m_dev, m2m_ctx); +unlock: + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); + + if (schedule_next) + v4l2_m2m_schedule_next_job(m2m_dev, m2m_ctx); +} +EXPORT_SYMBOL(v4l2_m2m_buf_done_and_job_finish); + +void v4l2_m2m_suspend(struct v4l2_m2m_dev *m2m_dev) +{ + unsigned long flags; + struct v4l2_m2m_ctx *curr_ctx; + + spin_lock_irqsave(&m2m_dev->job_spinlock, flags); + m2m_dev->job_queue_flags |= QUEUE_PAUSED; + curr_ctx = m2m_dev->curr_ctx; + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); + + if (curr_ctx) + wait_event(curr_ctx->finished, + !(curr_ctx->job_flags & TRANS_RUNNING)); +} +EXPORT_SYMBOL(v4l2_m2m_suspend); + +void v4l2_m2m_resume(struct v4l2_m2m_dev *m2m_dev) +{ + unsigned long flags; + + spin_lock_irqsave(&m2m_dev->job_spinlock, flags); + m2m_dev->job_queue_flags &= ~QUEUE_PAUSED; + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); + + v4l2_m2m_try_run(m2m_dev); +} +EXPORT_SYMBOL(v4l2_m2m_resume); + +int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_requestbuffers *reqbufs) +{ + struct vb2_queue *vq; + int ret; + + vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type); + ret = vb2_reqbufs(vq, reqbufs); + /* If count == 0, then the owner has released all buffers and he + is no longer owner of the queue. Otherwise we have an owner. */ + if (ret == 0) + vq->owner = reqbufs->count ? file->private_data : NULL; + + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs); + +static void v4l2_m2m_adjust_mem_offset(struct vb2_queue *vq, + struct v4l2_buffer *buf) +{ + /* Adjust MMAP memory offsets for the CAPTURE queue */ + if (buf->memory == V4L2_MEMORY_MMAP && V4L2_TYPE_IS_CAPTURE(vq->type)) { + if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) { + unsigned int i; + + for (i = 0; i < buf->length; ++i) + buf->m.planes[i].m.mem_offset + += DST_QUEUE_OFF_BASE; + } else { + buf->m.offset += DST_QUEUE_OFF_BASE; + } + } +} + +int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_buffer *buf) +{ + struct vb2_queue *vq; + int ret; + + vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); + ret = vb2_querybuf(vq, buf); + if (ret) + return ret; + + /* Adjust MMAP memory offsets for the CAPTURE queue */ + v4l2_m2m_adjust_mem_offset(vq, buf); + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf); + +/* + * This will add the LAST flag and mark the buffer management + * state as stopped. + * This is called when the last capture buffer must be flagged as LAST + * in draining mode from the encoder/decoder driver buf_queue() callback + * or from v4l2_update_last_buf_state() when a capture buffer is available. + */ +void v4l2_m2m_last_buffer_done(struct v4l2_m2m_ctx *m2m_ctx, + struct vb2_v4l2_buffer *vbuf) +{ + vbuf->flags |= V4L2_BUF_FLAG_LAST; + vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_DONE); + + v4l2_m2m_mark_stopped(m2m_ctx); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_last_buffer_done); + +/* When stop command is issued, update buffer management state */ +static int v4l2_update_last_buf_state(struct v4l2_m2m_ctx *m2m_ctx) +{ + struct vb2_v4l2_buffer *next_dst_buf; + + if (m2m_ctx->is_draining) + return -EBUSY; + + if (m2m_ctx->has_stopped) + return 0; + + m2m_ctx->last_src_buf = v4l2_m2m_last_src_buf(m2m_ctx); + m2m_ctx->is_draining = true; + + /* + * The processing of the last output buffer queued before + * the STOP command is expected to mark the buffer management + * state as stopped with v4l2_m2m_mark_stopped(). + */ + if (m2m_ctx->last_src_buf) + return 0; + + /* + * In case the output queue is empty, try to mark the last capture + * buffer as LAST. + */ + next_dst_buf = v4l2_m2m_dst_buf_remove(m2m_ctx); + if (!next_dst_buf) { + /* + * Wait for the next queued one in encoder/decoder driver + * buf_queue() callback using the v4l2_m2m_dst_buf_is_last() + * helper or in v4l2_m2m_qbuf() if encoder/decoder is not yet + * streaming. + */ + m2m_ctx->next_buf_last = true; + return 0; + } + + v4l2_m2m_last_buffer_done(m2m_ctx, next_dst_buf); + + return 0; +} + +/* + * Updates the encoding/decoding buffer management state, should + * be called from encoder/decoder drivers start_streaming() + */ +void v4l2_m2m_update_start_streaming_state(struct v4l2_m2m_ctx *m2m_ctx, + struct vb2_queue *q) +{ + /* If start streaming again, untag the last output buffer */ + if (V4L2_TYPE_IS_OUTPUT(q->type)) + m2m_ctx->last_src_buf = NULL; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_update_start_streaming_state); + +/* + * Updates the encoding/decoding buffer management state, should + * be called from encoder/decoder driver stop_streaming() + */ +void v4l2_m2m_update_stop_streaming_state(struct v4l2_m2m_ctx *m2m_ctx, + struct vb2_queue *q) +{ + if (V4L2_TYPE_IS_OUTPUT(q->type)) { + /* + * If in draining state, either mark next dst buffer as + * done or flag next one to be marked as done either + * in encoder/decoder driver buf_queue() callback using + * the v4l2_m2m_dst_buf_is_last() helper or in v4l2_m2m_qbuf() + * if encoder/decoder is not yet streaming + */ + if (m2m_ctx->is_draining) { + struct vb2_v4l2_buffer *next_dst_buf; + + m2m_ctx->last_src_buf = NULL; + next_dst_buf = v4l2_m2m_dst_buf_remove(m2m_ctx); + if (!next_dst_buf) + m2m_ctx->next_buf_last = true; + else + v4l2_m2m_last_buffer_done(m2m_ctx, + next_dst_buf); + } + } else { + v4l2_m2m_clear_state(m2m_ctx); + } +} +EXPORT_SYMBOL_GPL(v4l2_m2m_update_stop_streaming_state); + +static void v4l2_m2m_force_last_buf_done(struct v4l2_m2m_ctx *m2m_ctx, + struct vb2_queue *q) +{ + struct vb2_buffer *vb; + struct vb2_v4l2_buffer *vbuf; + unsigned int i; + + if (WARN_ON(q->is_output)) + return; + if (list_empty(&q->queued_list)) + return; + + vb = list_first_entry(&q->queued_list, struct vb2_buffer, queued_entry); + for (i = 0; i < vb->num_planes; i++) + vb2_set_plane_payload(vb, i, 0); + + /* + * Since the buffer hasn't been queued to the ready queue, + * mark is active and owned before marking it LAST and DONE + */ + vb->state = VB2_BUF_STATE_ACTIVE; + atomic_inc(&q->owned_by_drv_count); + + vbuf = to_vb2_v4l2_buffer(vb); + vbuf->field = V4L2_FIELD_NONE; + + v4l2_m2m_last_buffer_done(m2m_ctx, vbuf); +} + +int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_buffer *buf) +{ + struct video_device *vdev = video_devdata(file); + struct vb2_queue *vq; + int ret; + + vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); + if (V4L2_TYPE_IS_CAPTURE(vq->type) && + (buf->flags & V4L2_BUF_FLAG_REQUEST_FD)) { + dprintk("%s: requests cannot be used with capture buffers\n", + __func__); + return -EPERM; + } + + ret = vb2_qbuf(vq, vdev->v4l2_dev->mdev, buf); + if (ret) + return ret; + + /* Adjust MMAP memory offsets for the CAPTURE queue */ + v4l2_m2m_adjust_mem_offset(vq, buf); + + /* + * If the capture queue is streaming, but streaming hasn't started + * on the device, but was asked to stop, mark the previously queued + * buffer as DONE with LAST flag since it won't be queued on the + * device. + */ + if (V4L2_TYPE_IS_CAPTURE(vq->type) && + vb2_is_streaming(vq) && !vb2_start_streaming_called(vq) && + (v4l2_m2m_has_stopped(m2m_ctx) || v4l2_m2m_dst_buf_is_last(m2m_ctx))) + v4l2_m2m_force_last_buf_done(m2m_ctx, vq); + else if (!(buf->flags & V4L2_BUF_FLAG_IN_REQUEST)) + v4l2_m2m_try_schedule(m2m_ctx); + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf); + +int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_buffer *buf) +{ + struct vb2_queue *vq; + int ret; + + vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); + ret = vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK); + if (ret) + return ret; + + /* Adjust MMAP memory offsets for the CAPTURE queue */ + v4l2_m2m_adjust_mem_offset(vq, buf); + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf); + +int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_buffer *buf) +{ + struct video_device *vdev = video_devdata(file); + struct vb2_queue *vq; + int ret; + + vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); + ret = vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf); + if (ret) + return ret; + + /* Adjust MMAP memory offsets for the CAPTURE queue */ + v4l2_m2m_adjust_mem_offset(vq, buf); + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf); + +int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_create_buffers *create) +{ + struct vb2_queue *vq; + + vq = v4l2_m2m_get_vq(m2m_ctx, create->format.type); + return vb2_create_bufs(vq, create); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs); + +int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_exportbuffer *eb) +{ + struct vb2_queue *vq; + + vq = v4l2_m2m_get_vq(m2m_ctx, eb->type); + return vb2_expbuf(vq, eb); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf); + +int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + enum v4l2_buf_type type) +{ + struct vb2_queue *vq; + int ret; + + vq = v4l2_m2m_get_vq(m2m_ctx, type); + ret = vb2_streamon(vq, type); + if (!ret) + v4l2_m2m_try_schedule(m2m_ctx); + + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_streamon); + +int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + enum v4l2_buf_type type) +{ + struct v4l2_m2m_dev *m2m_dev; + struct v4l2_m2m_queue_ctx *q_ctx; + unsigned long flags_job, flags; + int ret; + + /* wait until the current context is dequeued from job_queue */ + v4l2_m2m_cancel_job(m2m_ctx); + + q_ctx = get_queue_ctx(m2m_ctx, type); + ret = vb2_streamoff(&q_ctx->q, type); + if (ret) + return ret; + + m2m_dev = m2m_ctx->m2m_dev; + spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job); + /* We should not be scheduled anymore, since we're dropping a queue. */ + if (m2m_ctx->job_flags & TRANS_QUEUED) + list_del(&m2m_ctx->queue); + m2m_ctx->job_flags = 0; + + spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); + /* Drop queue, since streamoff returns device to the same state as after + * calling reqbufs. */ + INIT_LIST_HEAD(&q_ctx->rdy_queue); + q_ctx->num_rdy = 0; + spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); + + if (m2m_dev->curr_ctx == m2m_ctx) { + m2m_dev->curr_ctx = NULL; + wake_up(&m2m_ctx->finished); + } + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff); + +static __poll_t v4l2_m2m_poll_for_data(struct file *file, + struct v4l2_m2m_ctx *m2m_ctx, + struct poll_table_struct *wait) +{ + struct vb2_queue *src_q, *dst_q; + __poll_t rc = 0; + unsigned long flags; + + src_q = v4l2_m2m_get_src_vq(m2m_ctx); + dst_q = v4l2_m2m_get_dst_vq(m2m_ctx); + + /* + * There has to be at least one buffer queued on each queued_list, which + * means either in driver already or waiting for driver to claim it + * and start processing. + */ + if ((!vb2_is_streaming(src_q) || src_q->error || + list_empty(&src_q->queued_list)) && + (!vb2_is_streaming(dst_q) || dst_q->error || + (list_empty(&dst_q->queued_list) && !dst_q->last_buffer_dequeued))) + return EPOLLERR; + + spin_lock_irqsave(&src_q->done_lock, flags); + if (!list_empty(&src_q->done_list)) + rc |= EPOLLOUT | EPOLLWRNORM; + spin_unlock_irqrestore(&src_q->done_lock, flags); + + spin_lock_irqsave(&dst_q->done_lock, flags); + /* + * If the last buffer was dequeued from the capture queue, signal + * userspace. DQBUF(CAPTURE) will return -EPIPE. + */ + if (!list_empty(&dst_q->done_list) || dst_q->last_buffer_dequeued) + rc |= EPOLLIN | EPOLLRDNORM; + spin_unlock_irqrestore(&dst_q->done_lock, flags); + + return rc; +} + +__poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct poll_table_struct *wait) +{ + struct video_device *vfd = video_devdata(file); + struct vb2_queue *src_q = v4l2_m2m_get_src_vq(m2m_ctx); + struct vb2_queue *dst_q = v4l2_m2m_get_dst_vq(m2m_ctx); + __poll_t req_events = poll_requested_events(wait); + __poll_t rc = 0; + + /* + * poll_wait() MUST be called on the first invocation on all the + * potential queues of interest, even if we are not interested in their + * events during this first call. Failure to do so will result in + * queue's events to be ignored because the poll_table won't be capable + * of adding new wait queues thereafter. + */ + poll_wait(file, &src_q->done_wq, wait); + poll_wait(file, &dst_q->done_wq, wait); + + if (req_events & (EPOLLOUT | EPOLLWRNORM | EPOLLIN | EPOLLRDNORM)) + rc = v4l2_m2m_poll_for_data(file, m2m_ctx, wait); + + if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) { + struct v4l2_fh *fh = file->private_data; + + poll_wait(file, &fh->wait, wait); + if (v4l2_event_pending(fh)) + rc |= EPOLLPRI; + } + + return rc; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_poll); + +int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct vm_area_struct *vma) +{ + unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; + struct vb2_queue *vq; + + if (offset < DST_QUEUE_OFF_BASE) { + vq = v4l2_m2m_get_src_vq(m2m_ctx); + } else { + vq = v4l2_m2m_get_dst_vq(m2m_ctx); + vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT); + } + + return vb2_mmap(vq, vma); +} +EXPORT_SYMBOL(v4l2_m2m_mmap); + +#ifndef CONFIG_MMU +unsigned long v4l2_m2m_get_unmapped_area(struct file *file, unsigned long addr, + unsigned long len, unsigned long pgoff, + unsigned long flags) +{ + struct v4l2_fh *fh = file->private_data; + unsigned long offset = pgoff << PAGE_SHIFT; + struct vb2_queue *vq; + + if (offset < DST_QUEUE_OFF_BASE) { + vq = v4l2_m2m_get_src_vq(fh->m2m_ctx); + } else { + vq = v4l2_m2m_get_dst_vq(fh->m2m_ctx); + pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT); + } + + return vb2_get_unmapped_area(vq, addr, len, pgoff, flags); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_get_unmapped_area); +#endif + +#if defined(CONFIG_MEDIA_CONTROLLER) +void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev) +{ + media_remove_intf_links(&m2m_dev->intf_devnode->intf); + media_devnode_remove(m2m_dev->intf_devnode); + + media_entity_remove_links(m2m_dev->source); + media_entity_remove_links(&m2m_dev->sink); + media_entity_remove_links(&m2m_dev->proc); + media_device_unregister_entity(m2m_dev->source); + media_device_unregister_entity(&m2m_dev->sink); + media_device_unregister_entity(&m2m_dev->proc); + kfree(m2m_dev->source->name); + kfree(m2m_dev->sink.name); + kfree(m2m_dev->proc.name); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_unregister_media_controller); + +static int v4l2_m2m_register_entity(struct media_device *mdev, + struct v4l2_m2m_dev *m2m_dev, enum v4l2_m2m_entity_type type, + struct video_device *vdev, int function) +{ + struct media_entity *entity; + struct media_pad *pads; + char *name; + unsigned int len; + int num_pads; + int ret; + + switch (type) { + case MEM2MEM_ENT_TYPE_SOURCE: + entity = m2m_dev->source; + pads = &m2m_dev->source_pad; + pads[0].flags = MEDIA_PAD_FL_SOURCE; + num_pads = 1; + break; + case MEM2MEM_ENT_TYPE_SINK: + entity = &m2m_dev->sink; + pads = &m2m_dev->sink_pad; + pads[0].flags = MEDIA_PAD_FL_SINK; + num_pads = 1; + break; + case MEM2MEM_ENT_TYPE_PROC: + entity = &m2m_dev->proc; + pads = m2m_dev->proc_pads; + pads[0].flags = MEDIA_PAD_FL_SINK; + pads[1].flags = MEDIA_PAD_FL_SOURCE; + num_pads = 2; + break; + default: + return -EINVAL; + } + + entity->obj_type = MEDIA_ENTITY_TYPE_BASE; + if (type != MEM2MEM_ENT_TYPE_PROC) { + entity->info.dev.major = VIDEO_MAJOR; + entity->info.dev.minor = vdev->minor; + } + len = strlen(vdev->name) + 2 + strlen(m2m_entity_name[type]); + name = kmalloc(len, GFP_KERNEL); + if (!name) + return -ENOMEM; + snprintf(name, len, "%s-%s", vdev->name, m2m_entity_name[type]); + entity->name = name; + entity->function = function; + + ret = media_entity_pads_init(entity, num_pads, pads); + if (ret) { + kfree(entity->name); + entity->name = NULL; + return ret; + } + ret = media_device_register_entity(mdev, entity); + if (ret) { + kfree(entity->name); + entity->name = NULL; + return ret; + } + + return 0; +} + +int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev, + struct video_device *vdev, int function) +{ + struct media_device *mdev = vdev->v4l2_dev->mdev; + struct media_link *link; + int ret; + + if (!mdev) + return 0; + + /* A memory-to-memory device consists in two + * DMA engine and one video processing entities. + * The DMA engine entities are linked to a V4L interface + */ + + /* Create the three entities with their pads */ + m2m_dev->source = &vdev->entity; + ret = v4l2_m2m_register_entity(mdev, m2m_dev, + MEM2MEM_ENT_TYPE_SOURCE, vdev, MEDIA_ENT_F_IO_V4L); + if (ret) + return ret; + ret = v4l2_m2m_register_entity(mdev, m2m_dev, + MEM2MEM_ENT_TYPE_PROC, vdev, function); + if (ret) + goto err_rel_entity0; + ret = v4l2_m2m_register_entity(mdev, m2m_dev, + MEM2MEM_ENT_TYPE_SINK, vdev, MEDIA_ENT_F_IO_V4L); + if (ret) + goto err_rel_entity1; + + /* Connect the three entities */ + ret = media_create_pad_link(m2m_dev->source, 0, &m2m_dev->proc, 0, + MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); + if (ret) + goto err_rel_entity2; + + ret = media_create_pad_link(&m2m_dev->proc, 1, &m2m_dev->sink, 0, + MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); + if (ret) + goto err_rm_links0; + + /* Create video interface */ + m2m_dev->intf_devnode = media_devnode_create(mdev, + MEDIA_INTF_T_V4L_VIDEO, 0, + VIDEO_MAJOR, vdev->minor); + if (!m2m_dev->intf_devnode) { + ret = -ENOMEM; + goto err_rm_links1; + } + + /* Connect the two DMA engines to the interface */ + link = media_create_intf_link(m2m_dev->source, + &m2m_dev->intf_devnode->intf, + MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); + if (!link) { + ret = -ENOMEM; + goto err_rm_devnode; + } + + link = media_create_intf_link(&m2m_dev->sink, + &m2m_dev->intf_devnode->intf, + MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); + if (!link) { + ret = -ENOMEM; + goto err_rm_intf_link; + } + return 0; + +err_rm_intf_link: + media_remove_intf_links(&m2m_dev->intf_devnode->intf); +err_rm_devnode: + media_devnode_remove(m2m_dev->intf_devnode); +err_rm_links1: + media_entity_remove_links(&m2m_dev->sink); +err_rm_links0: + media_entity_remove_links(&m2m_dev->proc); + media_entity_remove_links(m2m_dev->source); +err_rel_entity2: + media_device_unregister_entity(&m2m_dev->proc); + kfree(m2m_dev->proc.name); +err_rel_entity1: + media_device_unregister_entity(&m2m_dev->sink); + kfree(m2m_dev->sink.name); +err_rel_entity0: + media_device_unregister_entity(m2m_dev->source); + kfree(m2m_dev->source->name); + return ret; + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_register_media_controller); +#endif + +struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops) +{ + struct v4l2_m2m_dev *m2m_dev; + + if (!m2m_ops || WARN_ON(!m2m_ops->device_run)) + return ERR_PTR(-EINVAL); + + m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL); + if (!m2m_dev) + return ERR_PTR(-ENOMEM); + + m2m_dev->curr_ctx = NULL; + m2m_dev->m2m_ops = m2m_ops; + INIT_LIST_HEAD(&m2m_dev->job_queue); + spin_lock_init(&m2m_dev->job_spinlock); + INIT_WORK(&m2m_dev->job_work, v4l2_m2m_device_run_work); + + return m2m_dev; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_init); + +void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev) +{ + kfree(m2m_dev); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_release); + +struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev, + void *drv_priv, + int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)) +{ + struct v4l2_m2m_ctx *m2m_ctx; + struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx; + int ret; + + m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL); + if (!m2m_ctx) + return ERR_PTR(-ENOMEM); + + m2m_ctx->priv = drv_priv; + m2m_ctx->m2m_dev = m2m_dev; + init_waitqueue_head(&m2m_ctx->finished); + + out_q_ctx = &m2m_ctx->out_q_ctx; + cap_q_ctx = &m2m_ctx->cap_q_ctx; + + INIT_LIST_HEAD(&out_q_ctx->rdy_queue); + INIT_LIST_HEAD(&cap_q_ctx->rdy_queue); + spin_lock_init(&out_q_ctx->rdy_spinlock); + spin_lock_init(&cap_q_ctx->rdy_spinlock); + + INIT_LIST_HEAD(&m2m_ctx->queue); + + ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q); + + if (ret) + goto err; + /* + * Both queues should use same the mutex to lock the m2m context. + * This lock is used in some v4l2_m2m_* helpers. + */ + if (WARN_ON(out_q_ctx->q.lock != cap_q_ctx->q.lock)) { + ret = -EINVAL; + goto err; + } + m2m_ctx->q_lock = out_q_ctx->q.lock; + + return m2m_ctx; +err: + kfree(m2m_ctx); + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init); + +void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx) +{ + /* wait until the current context is dequeued from job_queue */ + v4l2_m2m_cancel_job(m2m_ctx); + + vb2_queue_release(&m2m_ctx->cap_q_ctx.q); + vb2_queue_release(&m2m_ctx->out_q_ctx.q); + + kfree(m2m_ctx); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release); + +void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, + struct vb2_v4l2_buffer *vbuf) +{ + struct v4l2_m2m_buffer *b = container_of(vbuf, + struct v4l2_m2m_buffer, vb); + struct v4l2_m2m_queue_ctx *q_ctx; + unsigned long flags; + + q_ctx = get_queue_ctx(m2m_ctx, vbuf->vb2_buf.vb2_queue->type); + if (!q_ctx) + return; + + spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); + list_add_tail(&b->list, &q_ctx->rdy_queue); + q_ctx->num_rdy++; + spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue); + +void v4l2_m2m_buf_copy_metadata(const struct vb2_v4l2_buffer *out_vb, + struct vb2_v4l2_buffer *cap_vb, + bool copy_frame_flags) +{ + u32 mask = V4L2_BUF_FLAG_TIMECODE | V4L2_BUF_FLAG_TSTAMP_SRC_MASK; + + if (copy_frame_flags) + mask |= V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_PFRAME | + V4L2_BUF_FLAG_BFRAME; + + cap_vb->vb2_buf.timestamp = out_vb->vb2_buf.timestamp; + + if (out_vb->flags & V4L2_BUF_FLAG_TIMECODE) + cap_vb->timecode = out_vb->timecode; + cap_vb->field = out_vb->field; + cap_vb->flags &= ~mask; + cap_vb->flags |= out_vb->flags & mask; + cap_vb->vb2_buf.copied_timestamp = 1; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_buf_copy_metadata); + +void v4l2_m2m_request_queue(struct media_request *req) +{ + struct media_request_object *obj, *obj_safe; + struct v4l2_m2m_ctx *m2m_ctx = NULL; + + /* + * Queue all objects. Note that buffer objects are at the end of the + * objects list, after all other object types. Once buffer objects + * are queued, the driver might delete them immediately (if the driver + * processes the buffer at once), so we have to use + * list_for_each_entry_safe() to handle the case where the object we + * queue is deleted. + */ + list_for_each_entry_safe(obj, obj_safe, &req->objects, list) { + struct v4l2_m2m_ctx *m2m_ctx_obj; + struct vb2_buffer *vb; + + if (!obj->ops->queue) + continue; + + if (vb2_request_object_is_buffer(obj)) { + /* Sanity checks */ + vb = container_of(obj, struct vb2_buffer, req_obj); + WARN_ON(!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)); + m2m_ctx_obj = container_of(vb->vb2_queue, + struct v4l2_m2m_ctx, + out_q_ctx.q); + WARN_ON(m2m_ctx && m2m_ctx_obj != m2m_ctx); + m2m_ctx = m2m_ctx_obj; + } + + /* + * The buffer we queue here can in theory be immediately + * unbound, hence the use of list_for_each_entry_safe() + * above and why we call the queue op last. + */ + obj->ops->queue(obj); + } + + WARN_ON(!m2m_ctx); + + if (m2m_ctx) + v4l2_m2m_try_schedule(m2m_ctx); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_request_queue); + +/* Videobuf2 ioctl helpers */ + +int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv, + struct v4l2_requestbuffers *rb) +{ + struct v4l2_fh *fh = file->private_data; + + return v4l2_m2m_reqbufs(file, fh->m2m_ctx, rb); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_reqbufs); + +int v4l2_m2m_ioctl_create_bufs(struct file *file, void *priv, + struct v4l2_create_buffers *create) +{ + struct v4l2_fh *fh = file->private_data; + + return v4l2_m2m_create_bufs(file, fh->m2m_ctx, create); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs); + +int v4l2_m2m_ioctl_remove_bufs(struct file *file, void *priv, + struct v4l2_remove_buffers *remove) +{ + struct v4l2_fh *fh = file->private_data; + struct vb2_queue *q = v4l2_m2m_get_vq(fh->m2m_ctx, remove->type); + + if (!q) + return -EINVAL; + if (q->type != remove->type) + return -EINVAL; + + return vb2_core_remove_bufs(q, remove->index, remove->count); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_remove_bufs); + +int v4l2_m2m_ioctl_querybuf(struct file *file, void *priv, + struct v4l2_buffer *buf) +{ + struct v4l2_fh *fh = file->private_data; + + return v4l2_m2m_querybuf(file, fh->m2m_ctx, buf); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_querybuf); + +int v4l2_m2m_ioctl_qbuf(struct file *file, void *priv, + struct v4l2_buffer *buf) +{ + struct v4l2_fh *fh = file->private_data; + + return v4l2_m2m_qbuf(file, fh->m2m_ctx, buf); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_qbuf); + +int v4l2_m2m_ioctl_dqbuf(struct file *file, void *priv, + struct v4l2_buffer *buf) +{ + struct v4l2_fh *fh = file->private_data; + + return v4l2_m2m_dqbuf(file, fh->m2m_ctx, buf); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_dqbuf); + +int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *priv, + struct v4l2_buffer *buf) +{ + struct v4l2_fh *fh = file->private_data; + + return v4l2_m2m_prepare_buf(file, fh->m2m_ctx, buf); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_prepare_buf); + +int v4l2_m2m_ioctl_expbuf(struct file *file, void *priv, + struct v4l2_exportbuffer *eb) +{ + struct v4l2_fh *fh = file->private_data; + + return v4l2_m2m_expbuf(file, fh->m2m_ctx, eb); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_expbuf); + +int v4l2_m2m_ioctl_streamon(struct file *file, void *priv, + enum v4l2_buf_type type) +{ + struct v4l2_fh *fh = file->private_data; + + return v4l2_m2m_streamon(file, fh->m2m_ctx, type); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamon); + +int v4l2_m2m_ioctl_streamoff(struct file *file, void *priv, + enum v4l2_buf_type type) +{ + struct v4l2_fh *fh = file->private_data; + + return v4l2_m2m_streamoff(file, fh->m2m_ctx, type); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff); + +int v4l2_m2m_ioctl_try_encoder_cmd(struct file *file, void *fh, + struct v4l2_encoder_cmd *ec) +{ + if (ec->cmd != V4L2_ENC_CMD_STOP && ec->cmd != V4L2_ENC_CMD_START) + return -EINVAL; + + ec->flags = 0; + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_encoder_cmd); + +int v4l2_m2m_ioctl_try_decoder_cmd(struct file *file, void *fh, + struct v4l2_decoder_cmd *dc) +{ + if (dc->cmd != V4L2_DEC_CMD_STOP && dc->cmd != V4L2_DEC_CMD_START) + return -EINVAL; + + dc->flags = 0; + + if (dc->cmd == V4L2_DEC_CMD_STOP) { + dc->stop.pts = 0; + } else if (dc->cmd == V4L2_DEC_CMD_START) { + dc->start.speed = 0; + dc->start.format = V4L2_DEC_START_FMT_NONE; + } + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_decoder_cmd); + +/* + * Updates the encoding state on ENC_CMD_STOP/ENC_CMD_START + * Should be called from the encoder driver encoder_cmd() callback + */ +int v4l2_m2m_encoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_encoder_cmd *ec) +{ + if (ec->cmd != V4L2_ENC_CMD_STOP && ec->cmd != V4L2_ENC_CMD_START) + return -EINVAL; + + if (ec->cmd == V4L2_ENC_CMD_STOP) + return v4l2_update_last_buf_state(m2m_ctx); + + if (m2m_ctx->is_draining) + return -EBUSY; + + if (m2m_ctx->has_stopped) + m2m_ctx->has_stopped = false; + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_encoder_cmd); + +/* + * Updates the decoding state on DEC_CMD_STOP/DEC_CMD_START + * Should be called from the decoder driver decoder_cmd() callback + */ +int v4l2_m2m_decoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_decoder_cmd *dc) +{ + if (dc->cmd != V4L2_DEC_CMD_STOP && dc->cmd != V4L2_DEC_CMD_START) + return -EINVAL; + + if (dc->cmd == V4L2_DEC_CMD_STOP) + return v4l2_update_last_buf_state(m2m_ctx); + + if (m2m_ctx->is_draining) + return -EBUSY; + + if (m2m_ctx->has_stopped) + m2m_ctx->has_stopped = false; + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_decoder_cmd); + +int v4l2_m2m_ioctl_encoder_cmd(struct file *file, void *priv, + struct v4l2_encoder_cmd *ec) +{ + struct v4l2_fh *fh = file->private_data; + + return v4l2_m2m_encoder_cmd(file, fh->m2m_ctx, ec); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_encoder_cmd); + +int v4l2_m2m_ioctl_decoder_cmd(struct file *file, void *priv, + struct v4l2_decoder_cmd *dc) +{ + struct v4l2_fh *fh = file->private_data; + + return v4l2_m2m_decoder_cmd(file, fh->m2m_ctx, dc); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_decoder_cmd); + +int v4l2_m2m_ioctl_stateless_try_decoder_cmd(struct file *file, void *fh, + struct v4l2_decoder_cmd *dc) +{ + if (dc->cmd != V4L2_DEC_CMD_FLUSH) + return -EINVAL; + + dc->flags = 0; + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_stateless_try_decoder_cmd); + +int v4l2_m2m_ioctl_stateless_decoder_cmd(struct file *file, void *priv, + struct v4l2_decoder_cmd *dc) +{ + struct v4l2_fh *fh = file->private_data; + struct vb2_v4l2_buffer *out_vb, *cap_vb; + struct v4l2_m2m_dev *m2m_dev = fh->m2m_ctx->m2m_dev; + unsigned long flags; + int ret; + + ret = v4l2_m2m_ioctl_stateless_try_decoder_cmd(file, priv, dc); + if (ret < 0) + return ret; + + spin_lock_irqsave(&m2m_dev->job_spinlock, flags); + out_vb = v4l2_m2m_last_src_buf(fh->m2m_ctx); + cap_vb = v4l2_m2m_last_dst_buf(fh->m2m_ctx); + + /* + * If there is an out buffer pending, then clear any HOLD flag. + * + * By clearing this flag we ensure that when this output + * buffer is processed any held capture buffer will be released. + */ + if (out_vb) { + out_vb->flags &= ~V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF; + } else if (cap_vb && cap_vb->is_held) { + /* + * If there were no output buffers, but there is a + * capture buffer that is held, then release that + * buffer. + */ + cap_vb->is_held = false; + v4l2_m2m_dst_buf_remove(fh->m2m_ctx); + v4l2_m2m_buf_done(cap_vb, VB2_BUF_STATE_DONE); + } + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_stateless_decoder_cmd); + +/* + * v4l2_file_operations helpers. It is assumed here same lock is used + * for the output and the capture buffer queue. + */ + +int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct v4l2_fh *fh = file->private_data; + + return v4l2_m2m_mmap(file, fh->m2m_ctx, vma); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap); + +__poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait) +{ + struct v4l2_fh *fh = file->private_data; + struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx; + __poll_t ret; + + if (m2m_ctx->q_lock) + mutex_lock(m2m_ctx->q_lock); + + ret = v4l2_m2m_poll(file, m2m_ctx, wait); + + if (m2m_ctx->q_lock) + mutex_unlock(m2m_ctx->q_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_fop_poll); + diff --git a/6.12.0/drivers/media/v4l2-core/v4l2-spi.c b/6.12.0/drivers/media/v4l2-core/v4l2-spi.c new file mode 100644 index 0000000..1baf8e6 --- /dev/null +++ b/6.12.0/drivers/media/v4l2-core/v4l2-spi.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * v4l2-spi - SPI helpers for Video4Linux2 + */ + +#include +#include +#include +#include + +void v4l2_spi_subdev_unregister(struct v4l2_subdev *sd) +{ + struct spi_device *spi = v4l2_get_subdevdata(sd); + + if (spi && !spi->dev.of_node && !spi->dev.fwnode) + spi_unregister_device(spi); +} + +void v4l2_spi_subdev_init(struct v4l2_subdev *sd, struct spi_device *spi, + const struct v4l2_subdev_ops *ops) +{ + v4l2_subdev_init(sd, ops); + sd->flags |= V4L2_SUBDEV_FL_IS_SPI; + /* the owner is the same as the spi_device's driver owner */ + sd->owner = spi->dev.driver->owner; + sd->dev = &spi->dev; + /* spi_device and v4l2_subdev point to one another */ + v4l2_set_subdevdata(sd, spi); + spi_set_drvdata(spi, sd); + /* initialize name */ + snprintf(sd->name, sizeof(sd->name), "%s %s", + spi->dev.driver->name, dev_name(&spi->dev)); +} +EXPORT_SYMBOL_GPL(v4l2_spi_subdev_init); + +struct v4l2_subdev *v4l2_spi_new_subdev(struct v4l2_device *v4l2_dev, + struct spi_controller *ctlr, + struct spi_board_info *info) +{ + struct v4l2_subdev *sd = NULL; + struct spi_device *spi = NULL; + + if (!v4l2_dev) + return NULL; + if (info->modalias[0]) + request_module(info->modalias); + + spi = spi_new_device(ctlr, info); + + if (!spi || !spi->dev.driver) + goto error; + + if (!try_module_get(spi->dev.driver->owner)) + goto error; + + sd = spi_get_drvdata(spi); + + /* + * Register with the v4l2_device which increases the module's + * use count as well. + */ + if (__v4l2_device_register_subdev(v4l2_dev, sd, sd->owner)) + sd = NULL; + + /* Decrease the module use count to match the first try_module_get. */ + module_put(spi->dev.driver->owner); + +error: + /* + * If we have a client but no subdev, then something went wrong and + * we must unregister the client. + */ + if (!sd) + spi_unregister_device(spi); + + return sd; +} +EXPORT_SYMBOL_GPL(v4l2_spi_new_subdev); diff --git a/6.12.0/drivers/media/v4l2-core/v4l2-subdev-priv.h b/6.12.0/drivers/media/v4l2-core/v4l2-subdev-priv.h new file mode 100644 index 0000000..52391d6 --- /dev/null +++ b/6.12.0/drivers/media/v4l2-core/v4l2-subdev-priv.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * V4L2 sub-device pivate header. + * + * Copyright (C) 2023 Hans de Goede + */ + +#ifndef _V4L2_SUBDEV_PRIV_H_ +#define _V4L2_SUBDEV_PRIV_H_ + +int v4l2_subdev_get_privacy_led(struct v4l2_subdev *sd); +void v4l2_subdev_put_privacy_led(struct v4l2_subdev *sd); + +#endif diff --git a/6.12.0/drivers/media/v4l2-core/v4l2-subdev.c b/6.12.0/drivers/media/v4l2-core/v4l2-subdev.c new file mode 100644 index 0000000..3a4ba08 --- /dev/null +++ b/6.12.0/drivers/media/v4l2-core/v4l2-subdev.c @@ -0,0 +1,2569 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * V4L2 sub-device + * + * Copyright (C) 2010 Nokia Corporation + * + * Contact: Laurent Pinchart + * Sakari Ailus + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) +/* + * The Streams API is an experimental feature. To use the Streams API, set + * 'v4l2_subdev_enable_streams_api' to 1 below. + */ + +static bool v4l2_subdev_enable_streams_api; +#endif + +/* + * Maximum stream ID is 63 for now, as we use u64 bitmask to represent a set + * of streams. + * + * Note that V4L2_FRAME_DESC_ENTRY_MAX is related: V4L2_FRAME_DESC_ENTRY_MAX + * restricts the total number of streams in a pad, although the stream ID is + * not restricted. + */ +#define V4L2_SUBDEV_MAX_STREAM_ID 63 + +#include "v4l2-subdev-priv.h" + +#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) +static int subdev_fh_init(struct v4l2_subdev_fh *fh, struct v4l2_subdev *sd) +{ + struct v4l2_subdev_state *state; + static struct lock_class_key key; + + state = __v4l2_subdev_state_alloc(sd, "fh->state->lock", &key); + if (IS_ERR(state)) + return PTR_ERR(state); + + fh->state = state; + + return 0; +} + +static void subdev_fh_free(struct v4l2_subdev_fh *fh) +{ + __v4l2_subdev_state_free(fh->state); + fh->state = NULL; +} + +static int subdev_open(struct file *file) +{ + struct video_device *vdev = video_devdata(file); + struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev); + struct v4l2_subdev_fh *subdev_fh; + int ret; + + subdev_fh = kzalloc(sizeof(*subdev_fh), GFP_KERNEL); + if (subdev_fh == NULL) + return -ENOMEM; + + ret = subdev_fh_init(subdev_fh, sd); + if (ret) { + kfree(subdev_fh); + return ret; + } + + v4l2_fh_init(&subdev_fh->vfh, vdev); + v4l2_fh_add(&subdev_fh->vfh); + file->private_data = &subdev_fh->vfh; + + if (sd->v4l2_dev->mdev && sd->entity.graph_obj.mdev->dev) { + struct module *owner; + + owner = sd->entity.graph_obj.mdev->dev->driver->owner; + if (!try_module_get(owner)) { + ret = -EBUSY; + goto err; + } + subdev_fh->owner = owner; + } + + if (sd->internal_ops && sd->internal_ops->open) { + ret = sd->internal_ops->open(sd, subdev_fh); + if (ret < 0) + goto err; + } + + return 0; + +err: + module_put(subdev_fh->owner); + v4l2_fh_del(&subdev_fh->vfh); + v4l2_fh_exit(&subdev_fh->vfh); + subdev_fh_free(subdev_fh); + kfree(subdev_fh); + + return ret; +} + +static int subdev_close(struct file *file) +{ + struct video_device *vdev = video_devdata(file); + struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev); + struct v4l2_fh *vfh = file->private_data; + struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh); + + if (sd->internal_ops && sd->internal_ops->close) + sd->internal_ops->close(sd, subdev_fh); + module_put(subdev_fh->owner); + v4l2_fh_del(vfh); + v4l2_fh_exit(vfh); + subdev_fh_free(subdev_fh); + kfree(subdev_fh); + file->private_data = NULL; + + return 0; +} +#else /* CONFIG_VIDEO_V4L2_SUBDEV_API */ +static int subdev_open(struct file *file) +{ + return -ENODEV; +} + +static int subdev_close(struct file *file) +{ + return -ENODEV; +} +#endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */ + +static void v4l2_subdev_enable_privacy_led(struct v4l2_subdev *sd) +{ +#if IS_REACHABLE(CONFIG_LEDS_CLASS) + if (!IS_ERR_OR_NULL(sd->privacy_led)) + led_set_brightness(sd->privacy_led, + sd->privacy_led->max_brightness); +#endif +} + +static void v4l2_subdev_disable_privacy_led(struct v4l2_subdev *sd) +{ +#if IS_REACHABLE(CONFIG_LEDS_CLASS) + if (!IS_ERR_OR_NULL(sd->privacy_led)) + led_set_brightness(sd->privacy_led, 0); +#endif +} + +static inline int check_which(u32 which) +{ + if (which != V4L2_SUBDEV_FORMAT_TRY && + which != V4L2_SUBDEV_FORMAT_ACTIVE) + return -EINVAL; + + return 0; +} + +static inline int check_pad(struct v4l2_subdev *sd, u32 pad) +{ +#if defined(CONFIG_MEDIA_CONTROLLER) + if (sd->entity.num_pads) { + if (pad >= sd->entity.num_pads) + return -EINVAL; + return 0; + } +#endif + /* allow pad 0 on subdevices not registered as media entities */ + if (pad > 0) + return -EINVAL; + return 0; +} + +static int check_state(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, + u32 which, u32 pad, u32 stream) +{ + if (sd->flags & V4L2_SUBDEV_FL_STREAMS) { +#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) + if (!v4l2_subdev_state_get_format(state, pad, stream)) + return -EINVAL; + return 0; +#else + return -EINVAL; +#endif + } + + if (stream != 0) + return -EINVAL; + + if (which == V4L2_SUBDEV_FORMAT_TRY && (!state || !state->pads)) + return -EINVAL; + + return 0; +} + +static inline int check_format(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_format *format) +{ + if (!format) + return -EINVAL; + + return check_which(format->which) ? : check_pad(sd, format->pad) ? : + check_state(sd, state, format->which, format->pad, format->stream); +} + +static int call_get_fmt(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_format *format) +{ + return check_format(sd, state, format) ? : + sd->ops->pad->get_fmt(sd, state, format); +} + +static int call_set_fmt(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_format *format) +{ + return check_format(sd, state, format) ? : + sd->ops->pad->set_fmt(sd, state, format); +} + +static int call_enum_mbus_code(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_mbus_code_enum *code) +{ + if (!code) + return -EINVAL; + + return check_which(code->which) ? : check_pad(sd, code->pad) ? : + check_state(sd, state, code->which, code->pad, code->stream) ? : + sd->ops->pad->enum_mbus_code(sd, state, code); +} + +static int call_enum_frame_size(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_frame_size_enum *fse) +{ + if (!fse) + return -EINVAL; + + return check_which(fse->which) ? : check_pad(sd, fse->pad) ? : + check_state(sd, state, fse->which, fse->pad, fse->stream) ? : + sd->ops->pad->enum_frame_size(sd, state, fse); +} + +static int call_enum_frame_interval(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_frame_interval_enum *fie) +{ + if (!fie) + return -EINVAL; + + return check_which(fie->which) ? : check_pad(sd, fie->pad) ? : + check_state(sd, state, fie->which, fie->pad, fie->stream) ? : + sd->ops->pad->enum_frame_interval(sd, state, fie); +} + +static inline int check_selection(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_selection *sel) +{ + if (!sel) + return -EINVAL; + + return check_which(sel->which) ? : check_pad(sd, sel->pad) ? : + check_state(sd, state, sel->which, sel->pad, sel->stream); +} + +static int call_get_selection(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_selection *sel) +{ + return check_selection(sd, state, sel) ? : + sd->ops->pad->get_selection(sd, state, sel); +} + +static int call_set_selection(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_selection *sel) +{ + return check_selection(sd, state, sel) ? : + sd->ops->pad->set_selection(sd, state, sel); +} + +static inline int check_frame_interval(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_frame_interval *fi) +{ + if (!fi) + return -EINVAL; + + return check_which(fi->which) ? : check_pad(sd, fi->pad) ? : + check_state(sd, state, fi->which, fi->pad, fi->stream); +} + +static int call_get_frame_interval(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_frame_interval *fi) +{ + return check_frame_interval(sd, state, fi) ? : + sd->ops->pad->get_frame_interval(sd, state, fi); +} + +static int call_set_frame_interval(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_frame_interval *fi) +{ + return check_frame_interval(sd, state, fi) ? : + sd->ops->pad->set_frame_interval(sd, state, fi); +} + +static int call_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad, + struct v4l2_mbus_frame_desc *fd) +{ + unsigned int i; + int ret; + + memset(fd, 0, sizeof(*fd)); + + ret = sd->ops->pad->get_frame_desc(sd, pad, fd); + if (ret) + return ret; + + dev_dbg(sd->dev, "Frame descriptor on pad %u, type %s\n", pad, + fd->type == V4L2_MBUS_FRAME_DESC_TYPE_PARALLEL ? "parallel" : + fd->type == V4L2_MBUS_FRAME_DESC_TYPE_CSI2 ? "CSI-2" : + "unknown"); + + for (i = 0; i < fd->num_entries; i++) { + struct v4l2_mbus_frame_desc_entry *entry = &fd->entry[i]; + char buf[20] = ""; + + if (fd->type == V4L2_MBUS_FRAME_DESC_TYPE_CSI2) + WARN_ON(snprintf(buf, sizeof(buf), + ", vc %u, dt 0x%02x", + entry->bus.csi2.vc, + entry->bus.csi2.dt) >= sizeof(buf)); + + dev_dbg(sd->dev, + "\tstream %u, code 0x%04x, length %u, flags 0x%04x%s\n", + entry->stream, entry->pixelcode, entry->length, + entry->flags, buf); + } + + return 0; +} + +static inline int check_edid(struct v4l2_subdev *sd, + struct v4l2_subdev_edid *edid) +{ + if (!edid) + return -EINVAL; + + if (edid->blocks && edid->edid == NULL) + return -EINVAL; + + return check_pad(sd, edid->pad); +} + +static int call_get_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid) +{ + return check_edid(sd, edid) ? : sd->ops->pad->get_edid(sd, edid); +} + +static int call_set_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid) +{ + return check_edid(sd, edid) ? : sd->ops->pad->set_edid(sd, edid); +} + +static int call_s_dv_timings(struct v4l2_subdev *sd, unsigned int pad, + struct v4l2_dv_timings *timings) +{ + if (!timings) + return -EINVAL; + + return check_pad(sd, pad) ? : + sd->ops->pad->s_dv_timings(sd, pad, timings); +} + +static int call_g_dv_timings(struct v4l2_subdev *sd, unsigned int pad, + struct v4l2_dv_timings *timings) +{ + if (!timings) + return -EINVAL; + + return check_pad(sd, pad) ? : + sd->ops->pad->g_dv_timings(sd, pad, timings); +} + +static int call_query_dv_timings(struct v4l2_subdev *sd, unsigned int pad, + struct v4l2_dv_timings *timings) +{ + if (!timings) + return -EINVAL; + + return check_pad(sd, pad) ? : + sd->ops->pad->query_dv_timings(sd, pad, timings); +} + +static int call_dv_timings_cap(struct v4l2_subdev *sd, + struct v4l2_dv_timings_cap *cap) +{ + if (!cap) + return -EINVAL; + + return check_pad(sd, cap->pad) ? : + sd->ops->pad->dv_timings_cap(sd, cap); +} + +static int call_enum_dv_timings(struct v4l2_subdev *sd, + struct v4l2_enum_dv_timings *dvt) +{ + if (!dvt) + return -EINVAL; + + return check_pad(sd, dvt->pad) ? : + sd->ops->pad->enum_dv_timings(sd, dvt); +} + +static int call_get_mbus_config(struct v4l2_subdev *sd, unsigned int pad, + struct v4l2_mbus_config *config) +{ + return check_pad(sd, pad) ? : + sd->ops->pad->get_mbus_config(sd, pad, config); +} + +static int call_s_stream(struct v4l2_subdev *sd, int enable) +{ + int ret; + + /* + * The .s_stream() operation must never be called to start or stop an + * already started or stopped subdev. Catch offenders but don't return + * an error yet to avoid regressions. + */ + if (WARN_ON(sd->s_stream_enabled == !!enable)) + return 0; + + ret = sd->ops->video->s_stream(sd, enable); + + if (!enable && ret < 0) { + dev_warn(sd->dev, "disabling streaming failed (%d)\n", ret); + ret = 0; + } + + if (!ret) { + sd->s_stream_enabled = enable; + + if (enable) + v4l2_subdev_enable_privacy_led(sd); + else + v4l2_subdev_disable_privacy_led(sd); + } + + return ret; +} + +#ifdef CONFIG_MEDIA_CONTROLLER +/* + * Create state-management wrapper for pad ops dealing with subdev state. The + * wrapper handles the case where the caller does not provide the called + * subdev's state. This should be removed when all the callers are fixed. + */ +#define DEFINE_STATE_WRAPPER(f, arg_type) \ + static int call_##f##_state(struct v4l2_subdev *sd, \ + struct v4l2_subdev_state *_state, \ + arg_type *arg) \ + { \ + struct v4l2_subdev_state *state = _state; \ + int ret; \ + if (!_state) \ + state = v4l2_subdev_lock_and_get_active_state(sd); \ + ret = call_##f(sd, state, arg); \ + if (!_state && state) \ + v4l2_subdev_unlock_state(state); \ + return ret; \ + } + +#else /* CONFIG_MEDIA_CONTROLLER */ + +#define DEFINE_STATE_WRAPPER(f, arg_type) \ + static int call_##f##_state(struct v4l2_subdev *sd, \ + struct v4l2_subdev_state *state, \ + arg_type *arg) \ + { \ + return call_##f(sd, state, arg); \ + } + +#endif /* CONFIG_MEDIA_CONTROLLER */ + +DEFINE_STATE_WRAPPER(get_fmt, struct v4l2_subdev_format); +DEFINE_STATE_WRAPPER(set_fmt, struct v4l2_subdev_format); +DEFINE_STATE_WRAPPER(enum_mbus_code, struct v4l2_subdev_mbus_code_enum); +DEFINE_STATE_WRAPPER(enum_frame_size, struct v4l2_subdev_frame_size_enum); +DEFINE_STATE_WRAPPER(enum_frame_interval, struct v4l2_subdev_frame_interval_enum); +DEFINE_STATE_WRAPPER(get_selection, struct v4l2_subdev_selection); +DEFINE_STATE_WRAPPER(set_selection, struct v4l2_subdev_selection); + +static const struct v4l2_subdev_pad_ops v4l2_subdev_call_pad_wrappers = { + .get_fmt = call_get_fmt_state, + .set_fmt = call_set_fmt_state, + .enum_mbus_code = call_enum_mbus_code_state, + .enum_frame_size = call_enum_frame_size_state, + .enum_frame_interval = call_enum_frame_interval_state, + .get_selection = call_get_selection_state, + .set_selection = call_set_selection_state, + .get_frame_interval = call_get_frame_interval, + .set_frame_interval = call_set_frame_interval, + .get_edid = call_get_edid, + .set_edid = call_set_edid, + .s_dv_timings = call_s_dv_timings, + .g_dv_timings = call_g_dv_timings, + .query_dv_timings = call_query_dv_timings, + .dv_timings_cap = call_dv_timings_cap, + .enum_dv_timings = call_enum_dv_timings, + .get_frame_desc = call_get_frame_desc, + .get_mbus_config = call_get_mbus_config, +}; + +static const struct v4l2_subdev_video_ops v4l2_subdev_call_video_wrappers = { + .s_stream = call_s_stream, +}; + +const struct v4l2_subdev_ops v4l2_subdev_call_wrappers = { + .pad = &v4l2_subdev_call_pad_wrappers, + .video = &v4l2_subdev_call_video_wrappers, +}; +EXPORT_SYMBOL(v4l2_subdev_call_wrappers); + +#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) + +static struct v4l2_subdev_state * +subdev_ioctl_get_state(struct v4l2_subdev *sd, struct v4l2_subdev_fh *subdev_fh, + unsigned int cmd, void *arg) +{ + u32 which; + + switch (cmd) { + default: + return NULL; + case VIDIOC_SUBDEV_G_FMT: + case VIDIOC_SUBDEV_S_FMT: + which = ((struct v4l2_subdev_format *)arg)->which; + break; + case VIDIOC_SUBDEV_G_CROP: + case VIDIOC_SUBDEV_S_CROP: + which = ((struct v4l2_subdev_crop *)arg)->which; + break; + case VIDIOC_SUBDEV_ENUM_MBUS_CODE: + which = ((struct v4l2_subdev_mbus_code_enum *)arg)->which; + break; + case VIDIOC_SUBDEV_ENUM_FRAME_SIZE: + which = ((struct v4l2_subdev_frame_size_enum *)arg)->which; + break; + case VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL: + which = ((struct v4l2_subdev_frame_interval_enum *)arg)->which; + break; + case VIDIOC_SUBDEV_G_SELECTION: + case VIDIOC_SUBDEV_S_SELECTION: + which = ((struct v4l2_subdev_selection *)arg)->which; + break; + case VIDIOC_SUBDEV_G_FRAME_INTERVAL: + case VIDIOC_SUBDEV_S_FRAME_INTERVAL: { + struct v4l2_subdev_frame_interval *fi = arg; + + if (!(subdev_fh->client_caps & + V4L2_SUBDEV_CLIENT_CAP_INTERVAL_USES_WHICH)) + fi->which = V4L2_SUBDEV_FORMAT_ACTIVE; + + which = fi->which; + break; + } + case VIDIOC_SUBDEV_G_ROUTING: + case VIDIOC_SUBDEV_S_ROUTING: + which = ((struct v4l2_subdev_routing *)arg)->which; + break; + } + + return which == V4L2_SUBDEV_FORMAT_TRY ? + subdev_fh->state : + v4l2_subdev_get_unlocked_active_state(sd); +} + +static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg, + struct v4l2_subdev_state *state) +{ + struct video_device *vdev = video_devdata(file); + struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev); + struct v4l2_fh *vfh = file->private_data; + struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh); + bool ro_subdev = test_bit(V4L2_FL_SUBDEV_RO_DEVNODE, &vdev->flags); + bool streams_subdev = sd->flags & V4L2_SUBDEV_FL_STREAMS; + bool client_supports_streams = subdev_fh->client_caps & + V4L2_SUBDEV_CLIENT_CAP_STREAMS; + int rval; + + /* + * If the streams API is not enabled, remove V4L2_SUBDEV_CAP_STREAMS. + * Remove this when the API is no longer experimental. + */ + if (!v4l2_subdev_enable_streams_api) + streams_subdev = false; + + switch (cmd) { + case VIDIOC_SUBDEV_QUERYCAP: { + struct v4l2_subdev_capability *cap = arg; + + memset(cap->reserved, 0, sizeof(cap->reserved)); + cap->version = LINUX_VERSION_CODE; + cap->capabilities = + (ro_subdev ? V4L2_SUBDEV_CAP_RO_SUBDEV : 0) | + (streams_subdev ? V4L2_SUBDEV_CAP_STREAMS : 0); + + return 0; + } + + case VIDIOC_QUERYCTRL: + /* + * TODO: this really should be folded into v4l2_queryctrl (this + * currently returns -EINVAL for NULL control handlers). + * However, v4l2_queryctrl() is still called directly by + * drivers as well and until that has been addressed I believe + * it is safer to do the check here. The same is true for the + * other control ioctls below. + */ + if (!vfh->ctrl_handler) + return -ENOTTY; + return v4l2_queryctrl(vfh->ctrl_handler, arg); + + case VIDIOC_QUERY_EXT_CTRL: + if (!vfh->ctrl_handler) + return -ENOTTY; + return v4l2_query_ext_ctrl(vfh->ctrl_handler, arg); + + case VIDIOC_QUERYMENU: + if (!vfh->ctrl_handler) + return -ENOTTY; + return v4l2_querymenu(vfh->ctrl_handler, arg); + + case VIDIOC_G_CTRL: + if (!vfh->ctrl_handler) + return -ENOTTY; + return v4l2_g_ctrl(vfh->ctrl_handler, arg); + + case VIDIOC_S_CTRL: + if (!vfh->ctrl_handler) + return -ENOTTY; + return v4l2_s_ctrl(vfh, vfh->ctrl_handler, arg); + + case VIDIOC_G_EXT_CTRLS: + if (!vfh->ctrl_handler) + return -ENOTTY; + return v4l2_g_ext_ctrls(vfh->ctrl_handler, + vdev, sd->v4l2_dev->mdev, arg); + + case VIDIOC_S_EXT_CTRLS: + if (!vfh->ctrl_handler) + return -ENOTTY; + return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler, + vdev, sd->v4l2_dev->mdev, arg); + + case VIDIOC_TRY_EXT_CTRLS: + if (!vfh->ctrl_handler) + return -ENOTTY; + return v4l2_try_ext_ctrls(vfh->ctrl_handler, + vdev, sd->v4l2_dev->mdev, arg); + + case VIDIOC_DQEVENT: + if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS)) + return -ENOIOCTLCMD; + + return v4l2_event_dequeue(vfh, arg, file->f_flags & O_NONBLOCK); + + case VIDIOC_SUBSCRIBE_EVENT: + return v4l2_subdev_call(sd, core, subscribe_event, vfh, arg); + + case VIDIOC_UNSUBSCRIBE_EVENT: + return v4l2_subdev_call(sd, core, unsubscribe_event, vfh, arg); + +#ifdef CONFIG_VIDEO_ADV_DEBUG + case VIDIOC_DBG_G_REGISTER: + { + struct v4l2_dbg_register *p = arg; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + return v4l2_subdev_call(sd, core, g_register, p); + } + case VIDIOC_DBG_S_REGISTER: + { + struct v4l2_dbg_register *p = arg; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + return v4l2_subdev_call(sd, core, s_register, p); + } + case VIDIOC_DBG_G_CHIP_INFO: + { + struct v4l2_dbg_chip_info *p = arg; + + if (p->match.type != V4L2_CHIP_MATCH_SUBDEV || p->match.addr) + return -EINVAL; + if (sd->ops->core && sd->ops->core->s_register) + p->flags |= V4L2_CHIP_FL_WRITABLE; + if (sd->ops->core && sd->ops->core->g_register) + p->flags |= V4L2_CHIP_FL_READABLE; + strscpy(p->name, sd->name, sizeof(p->name)); + return 0; + } +#endif + + case VIDIOC_LOG_STATUS: { + int ret; + + pr_info("%s: ================= START STATUS =================\n", + sd->name); + ret = v4l2_subdev_call(sd, core, log_status); + pr_info("%s: ================== END STATUS ==================\n", + sd->name); + return ret; + } + + case VIDIOC_SUBDEV_G_FMT: { + struct v4l2_subdev_format *format = arg; + + if (!client_supports_streams) + format->stream = 0; + + memset(format->reserved, 0, sizeof(format->reserved)); + memset(format->format.reserved, 0, sizeof(format->format.reserved)); + return v4l2_subdev_call(sd, pad, get_fmt, state, format); + } + + case VIDIOC_SUBDEV_S_FMT: { + struct v4l2_subdev_format *format = arg; + + if (format->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev) + return -EPERM; + + if (!client_supports_streams) + format->stream = 0; + + memset(format->reserved, 0, sizeof(format->reserved)); + memset(format->format.reserved, 0, sizeof(format->format.reserved)); + return v4l2_subdev_call(sd, pad, set_fmt, state, format); + } + + case VIDIOC_SUBDEV_G_CROP: { + struct v4l2_subdev_crop *crop = arg; + struct v4l2_subdev_selection sel; + + if (!client_supports_streams) + crop->stream = 0; + + memset(crop->reserved, 0, sizeof(crop->reserved)); + memset(&sel, 0, sizeof(sel)); + sel.which = crop->which; + sel.pad = crop->pad; + sel.stream = crop->stream; + sel.target = V4L2_SEL_TGT_CROP; + + rval = v4l2_subdev_call( + sd, pad, get_selection, state, &sel); + + crop->rect = sel.r; + + return rval; + } + + case VIDIOC_SUBDEV_S_CROP: { + struct v4l2_subdev_crop *crop = arg; + struct v4l2_subdev_selection sel; + + if (crop->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev) + return -EPERM; + + if (!client_supports_streams) + crop->stream = 0; + + memset(crop->reserved, 0, sizeof(crop->reserved)); + memset(&sel, 0, sizeof(sel)); + sel.which = crop->which; + sel.pad = crop->pad; + sel.stream = crop->stream; + sel.target = V4L2_SEL_TGT_CROP; + sel.r = crop->rect; + + rval = v4l2_subdev_call( + sd, pad, set_selection, state, &sel); + + crop->rect = sel.r; + + return rval; + } + + case VIDIOC_SUBDEV_ENUM_MBUS_CODE: { + struct v4l2_subdev_mbus_code_enum *code = arg; + + if (!client_supports_streams) + code->stream = 0; + + memset(code->reserved, 0, sizeof(code->reserved)); + return v4l2_subdev_call(sd, pad, enum_mbus_code, state, + code); + } + + case VIDIOC_SUBDEV_ENUM_FRAME_SIZE: { + struct v4l2_subdev_frame_size_enum *fse = arg; + + if (!client_supports_streams) + fse->stream = 0; + + memset(fse->reserved, 0, sizeof(fse->reserved)); + return v4l2_subdev_call(sd, pad, enum_frame_size, state, + fse); + } + + case VIDIOC_SUBDEV_G_FRAME_INTERVAL: { + struct v4l2_subdev_frame_interval *fi = arg; + + if (!client_supports_streams) + fi->stream = 0; + + memset(fi->reserved, 0, sizeof(fi->reserved)); + return v4l2_subdev_call(sd, pad, get_frame_interval, state, fi); + } + + case VIDIOC_SUBDEV_S_FRAME_INTERVAL: { + struct v4l2_subdev_frame_interval *fi = arg; + + if (!client_supports_streams) + fi->stream = 0; + + if (fi->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev) + return -EPERM; + + memset(fi->reserved, 0, sizeof(fi->reserved)); + return v4l2_subdev_call(sd, pad, set_frame_interval, state, fi); + } + + case VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL: { + struct v4l2_subdev_frame_interval_enum *fie = arg; + + if (!client_supports_streams) + fie->stream = 0; + + memset(fie->reserved, 0, sizeof(fie->reserved)); + return v4l2_subdev_call(sd, pad, enum_frame_interval, state, + fie); + } + + case VIDIOC_SUBDEV_G_SELECTION: { + struct v4l2_subdev_selection *sel = arg; + + if (!client_supports_streams) + sel->stream = 0; + + memset(sel->reserved, 0, sizeof(sel->reserved)); + return v4l2_subdev_call( + sd, pad, get_selection, state, sel); + } + + case VIDIOC_SUBDEV_S_SELECTION: { + struct v4l2_subdev_selection *sel = arg; + + if (sel->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev) + return -EPERM; + + if (!client_supports_streams) + sel->stream = 0; + + memset(sel->reserved, 0, sizeof(sel->reserved)); + return v4l2_subdev_call( + sd, pad, set_selection, state, sel); + } + + case VIDIOC_G_EDID: { + struct v4l2_subdev_edid *edid = arg; + + return v4l2_subdev_call(sd, pad, get_edid, edid); + } + + case VIDIOC_S_EDID: { + struct v4l2_subdev_edid *edid = arg; + + return v4l2_subdev_call(sd, pad, set_edid, edid); + } + + case VIDIOC_SUBDEV_DV_TIMINGS_CAP: { + struct v4l2_dv_timings_cap *cap = arg; + + return v4l2_subdev_call(sd, pad, dv_timings_cap, cap); + } + + case VIDIOC_SUBDEV_ENUM_DV_TIMINGS: { + struct v4l2_enum_dv_timings *dvt = arg; + + return v4l2_subdev_call(sd, pad, enum_dv_timings, dvt); + } + + case VIDIOC_SUBDEV_QUERY_DV_TIMINGS: + return v4l2_subdev_call(sd, pad, query_dv_timings, 0, arg); + + case VIDIOC_SUBDEV_G_DV_TIMINGS: + return v4l2_subdev_call(sd, pad, g_dv_timings, 0, arg); + + case VIDIOC_SUBDEV_S_DV_TIMINGS: + if (ro_subdev) + return -EPERM; + + return v4l2_subdev_call(sd, pad, s_dv_timings, 0, arg); + + case VIDIOC_SUBDEV_G_STD: + return v4l2_subdev_call(sd, video, g_std, arg); + + case VIDIOC_SUBDEV_S_STD: { + v4l2_std_id *std = arg; + + if (ro_subdev) + return -EPERM; + + return v4l2_subdev_call(sd, video, s_std, *std); + } + + case VIDIOC_SUBDEV_ENUMSTD: { + struct v4l2_standard *p = arg; + v4l2_std_id id; + + if (v4l2_subdev_call(sd, video, g_tvnorms, &id)) + return -EINVAL; + + return v4l_video_std_enumstd(p, id); + } + + case VIDIOC_SUBDEV_QUERYSTD: + return v4l2_subdev_call(sd, video, querystd, arg); + + case VIDIOC_SUBDEV_G_ROUTING: { + struct v4l2_subdev_routing *routing = arg; + struct v4l2_subdev_krouting *krouting; + + if (!v4l2_subdev_enable_streams_api) + return -ENOIOCTLCMD; + + if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS)) + return -ENOIOCTLCMD; + + memset(routing->reserved, 0, sizeof(routing->reserved)); + + krouting = &state->routing; + + memcpy((struct v4l2_subdev_route *)(uintptr_t)routing->routes, + krouting->routes, + min(krouting->num_routes, routing->len_routes) * + sizeof(*krouting->routes)); + routing->num_routes = krouting->num_routes; + + return 0; + } + + case VIDIOC_SUBDEV_S_ROUTING: { + struct v4l2_subdev_routing *routing = arg; + struct v4l2_subdev_route *routes = + (struct v4l2_subdev_route *)(uintptr_t)routing->routes; + struct v4l2_subdev_krouting krouting = {}; + unsigned int i; + + if (!v4l2_subdev_enable_streams_api) + return -ENOIOCTLCMD; + + if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS)) + return -ENOIOCTLCMD; + + if (routing->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev) + return -EPERM; + + if (routing->num_routes > routing->len_routes) + return -EINVAL; + + memset(routing->reserved, 0, sizeof(routing->reserved)); + + for (i = 0; i < routing->num_routes; ++i) { + const struct v4l2_subdev_route *route = &routes[i]; + const struct media_pad *pads = sd->entity.pads; + + if (route->sink_stream > V4L2_SUBDEV_MAX_STREAM_ID || + route->source_stream > V4L2_SUBDEV_MAX_STREAM_ID) + return -EINVAL; + + if (route->sink_pad >= sd->entity.num_pads) + return -EINVAL; + + if (!(pads[route->sink_pad].flags & + MEDIA_PAD_FL_SINK)) + return -EINVAL; + + if (route->source_pad >= sd->entity.num_pads) + return -EINVAL; + + if (!(pads[route->source_pad].flags & + MEDIA_PAD_FL_SOURCE)) + return -EINVAL; + } + + /* + * If the driver doesn't support setting routing, just return + * the routing table. + */ + if (!v4l2_subdev_has_op(sd, pad, set_routing)) { + memcpy((struct v4l2_subdev_route *)(uintptr_t)routing->routes, + state->routing.routes, + min(state->routing.num_routes, routing->len_routes) * + sizeof(*state->routing.routes)); + routing->num_routes = state->routing.num_routes; + + return 0; + } + + krouting.num_routes = routing->num_routes; + krouting.len_routes = routing->len_routes; + krouting.routes = routes; + + rval = v4l2_subdev_call(sd, pad, set_routing, state, + routing->which, &krouting); + if (rval < 0) + return rval; + + memcpy((struct v4l2_subdev_route *)(uintptr_t)routing->routes, + state->routing.routes, + min(state->routing.num_routes, routing->len_routes) * + sizeof(*state->routing.routes)); + routing->num_routes = state->routing.num_routes; + + return 0; + } + + case VIDIOC_SUBDEV_G_CLIENT_CAP: { + struct v4l2_subdev_client_capability *client_cap = arg; + + client_cap->capabilities = subdev_fh->client_caps; + + return 0; + } + + case VIDIOC_SUBDEV_S_CLIENT_CAP: { + struct v4l2_subdev_client_capability *client_cap = arg; + + /* + * Clear V4L2_SUBDEV_CLIENT_CAP_STREAMS if streams API is not + * enabled. Remove this when streams API is no longer + * experimental. + */ + if (!v4l2_subdev_enable_streams_api) + client_cap->capabilities &= ~V4L2_SUBDEV_CLIENT_CAP_STREAMS; + + /* Filter out unsupported capabilities */ + client_cap->capabilities &= (V4L2_SUBDEV_CLIENT_CAP_STREAMS | + V4L2_SUBDEV_CLIENT_CAP_INTERVAL_USES_WHICH); + + subdev_fh->client_caps = client_cap->capabilities; + + return 0; + } + + default: + return v4l2_subdev_call(sd, core, ioctl, cmd, arg); + } + + return 0; +} + +static long subdev_do_ioctl_lock(struct file *file, unsigned int cmd, void *arg) +{ + struct video_device *vdev = video_devdata(file); + struct mutex *lock = vdev->lock; + long ret = -ENODEV; + + if (lock && mutex_lock_interruptible(lock)) + return -ERESTARTSYS; + + if (video_is_registered(vdev)) { + struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev); + struct v4l2_fh *vfh = file->private_data; + struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh); + struct v4l2_subdev_state *state; + + state = subdev_ioctl_get_state(sd, subdev_fh, cmd, arg); + + if (state) + v4l2_subdev_lock_state(state); + + ret = subdev_do_ioctl(file, cmd, arg, state); + + if (state) + v4l2_subdev_unlock_state(state); + } + + if (lock) + mutex_unlock(lock); + return ret; +} + +static long subdev_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + return video_usercopy(file, cmd, arg, subdev_do_ioctl_lock); +} + +#ifdef CONFIG_COMPAT +static long subdev_compat_ioctl32(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct video_device *vdev = video_devdata(file); + struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev); + + return v4l2_subdev_call(sd, core, compat_ioctl32, cmd, arg); +} +#endif + +#else /* CONFIG_VIDEO_V4L2_SUBDEV_API */ +static long subdev_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + return -ENODEV; +} + +#ifdef CONFIG_COMPAT +static long subdev_compat_ioctl32(struct file *file, unsigned int cmd, + unsigned long arg) +{ + return -ENODEV; +} +#endif +#endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */ + +static __poll_t subdev_poll(struct file *file, poll_table *wait) +{ + struct video_device *vdev = video_devdata(file); + struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev); + struct v4l2_fh *fh = file->private_data; + + if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS)) + return EPOLLERR; + + poll_wait(file, &fh->wait, wait); + + if (v4l2_event_pending(fh)) + return EPOLLPRI; + + return 0; +} + +const struct v4l2_file_operations v4l2_subdev_fops = { + .owner = THIS_MODULE, + .open = subdev_open, + .unlocked_ioctl = subdev_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl32 = subdev_compat_ioctl32, +#endif + .release = subdev_close, + .poll = subdev_poll, +}; + +#ifdef CONFIG_MEDIA_CONTROLLER + +int v4l2_subdev_get_fwnode_pad_1_to_1(struct media_entity *entity, + struct fwnode_endpoint *endpoint) +{ + struct fwnode_handle *fwnode; + struct v4l2_subdev *sd; + + if (!is_media_entity_v4l2_subdev(entity)) + return -EINVAL; + + sd = media_entity_to_v4l2_subdev(entity); + + fwnode = fwnode_graph_get_port_parent(endpoint->local_fwnode); + fwnode_handle_put(fwnode); + + if (device_match_fwnode(sd->dev, fwnode)) + return endpoint->port; + + return -ENXIO; +} +EXPORT_SYMBOL_GPL(v4l2_subdev_get_fwnode_pad_1_to_1); + +int v4l2_subdev_link_validate_default(struct v4l2_subdev *sd, + struct media_link *link, + struct v4l2_subdev_format *source_fmt, + struct v4l2_subdev_format *sink_fmt) +{ + bool pass = true; + + /* The width, height and code must match. */ + if (source_fmt->format.width != sink_fmt->format.width) { + dev_dbg(sd->entity.graph_obj.mdev->dev, + "%s: width does not match (source %u, sink %u)\n", + __func__, + source_fmt->format.width, sink_fmt->format.width); + pass = false; + } + + if (source_fmt->format.height != sink_fmt->format.height) { + dev_dbg(sd->entity.graph_obj.mdev->dev, + "%s: height does not match (source %u, sink %u)\n", + __func__, + source_fmt->format.height, sink_fmt->format.height); + pass = false; + } + + if (source_fmt->format.code != sink_fmt->format.code) { + dev_dbg(sd->entity.graph_obj.mdev->dev, + "%s: media bus code does not match (source 0x%8.8x, sink 0x%8.8x)\n", + __func__, + source_fmt->format.code, sink_fmt->format.code); + pass = false; + } + + /* The field order must match, or the sink field order must be NONE + * to support interlaced hardware connected to bridges that support + * progressive formats only. + */ + if (source_fmt->format.field != sink_fmt->format.field && + sink_fmt->format.field != V4L2_FIELD_NONE) { + dev_dbg(sd->entity.graph_obj.mdev->dev, + "%s: field does not match (source %u, sink %u)\n", + __func__, + source_fmt->format.field, sink_fmt->format.field); + pass = false; + } + + if (pass) + return 0; + + dev_dbg(sd->entity.graph_obj.mdev->dev, + "%s: link was \"%s\":%u -> \"%s\":%u\n", __func__, + link->source->entity->name, link->source->index, + link->sink->entity->name, link->sink->index); + + return -EPIPE; +} +EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate_default); + +static int +v4l2_subdev_link_validate_get_format(struct media_pad *pad, u32 stream, + struct v4l2_subdev_format *fmt, + bool states_locked) +{ + struct v4l2_subdev_state *state; + struct v4l2_subdev *sd; + int ret; + + sd = media_entity_to_v4l2_subdev(pad->entity); + + fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE; + fmt->pad = pad->index; + fmt->stream = stream; + + if (states_locked) + state = v4l2_subdev_get_locked_active_state(sd); + else + state = v4l2_subdev_lock_and_get_active_state(sd); + + ret = v4l2_subdev_call(sd, pad, get_fmt, state, fmt); + + if (!states_locked && state) + v4l2_subdev_unlock_state(state); + + return ret; +} + +#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) + +static void __v4l2_link_validate_get_streams(struct media_pad *pad, + u64 *streams_mask, + bool states_locked) +{ + struct v4l2_subdev_route *route; + struct v4l2_subdev_state *state; + struct v4l2_subdev *subdev; + + subdev = media_entity_to_v4l2_subdev(pad->entity); + + *streams_mask = 0; + + if (states_locked) + state = v4l2_subdev_get_locked_active_state(subdev); + else + state = v4l2_subdev_lock_and_get_active_state(subdev); + + if (WARN_ON(!state)) + return; + + for_each_active_route(&state->routing, route) { + u32 route_pad; + u32 route_stream; + + if (pad->flags & MEDIA_PAD_FL_SOURCE) { + route_pad = route->source_pad; + route_stream = route->source_stream; + } else { + route_pad = route->sink_pad; + route_stream = route->sink_stream; + } + + if (route_pad != pad->index) + continue; + + *streams_mask |= BIT_ULL(route_stream); + } + + if (!states_locked) + v4l2_subdev_unlock_state(state); +} + +#endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */ + +static void v4l2_link_validate_get_streams(struct media_pad *pad, + u64 *streams_mask, + bool states_locked) +{ + struct v4l2_subdev *subdev = media_entity_to_v4l2_subdev(pad->entity); + + if (!(subdev->flags & V4L2_SUBDEV_FL_STREAMS)) { + /* Non-streams subdevs have an implicit stream 0 */ + *streams_mask = BIT_ULL(0); + return; + } + +#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) + __v4l2_link_validate_get_streams(pad, streams_mask, states_locked); +#else + /* This shouldn't happen */ + *streams_mask = 0; +#endif +} + +static int v4l2_subdev_link_validate_locked(struct media_link *link, bool states_locked) +{ + struct v4l2_subdev *sink_subdev = + media_entity_to_v4l2_subdev(link->sink->entity); + struct device *dev = sink_subdev->entity.graph_obj.mdev->dev; + u64 source_streams_mask; + u64 sink_streams_mask; + u64 dangling_sink_streams; + u32 stream; + int ret; + + dev_dbg(dev, "validating link \"%s\":%u -> \"%s\":%u\n", + link->source->entity->name, link->source->index, + link->sink->entity->name, link->sink->index); + + v4l2_link_validate_get_streams(link->source, &source_streams_mask, states_locked); + v4l2_link_validate_get_streams(link->sink, &sink_streams_mask, states_locked); + + /* + * It is ok to have more source streams than sink streams as extra + * source streams can just be ignored by the receiver, but having extra + * sink streams is an error as streams must have a source. + */ + dangling_sink_streams = (source_streams_mask ^ sink_streams_mask) & + sink_streams_mask; + if (dangling_sink_streams) { + dev_err(dev, "Dangling sink streams: mask %#llx\n", + dangling_sink_streams); + return -EINVAL; + } + + /* Validate source and sink stream formats */ + + for (stream = 0; stream < sizeof(sink_streams_mask) * 8; ++stream) { + struct v4l2_subdev_format sink_fmt, source_fmt; + + if (!(sink_streams_mask & BIT_ULL(stream))) + continue; + + dev_dbg(dev, "validating stream \"%s\":%u:%u -> \"%s\":%u:%u\n", + link->source->entity->name, link->source->index, stream, + link->sink->entity->name, link->sink->index, stream); + + ret = v4l2_subdev_link_validate_get_format(link->source, stream, + &source_fmt, states_locked); + if (ret < 0) { + dev_dbg(dev, + "Failed to get format for \"%s\":%u:%u (but that's ok)\n", + link->source->entity->name, link->source->index, + stream); + continue; + } + + ret = v4l2_subdev_link_validate_get_format(link->sink, stream, + &sink_fmt, states_locked); + if (ret < 0) { + dev_dbg(dev, + "Failed to get format for \"%s\":%u:%u (but that's ok)\n", + link->sink->entity->name, link->sink->index, + stream); + continue; + } + + /* TODO: add stream number to link_validate() */ + ret = v4l2_subdev_call(sink_subdev, pad, link_validate, link, + &source_fmt, &sink_fmt); + if (!ret) + continue; + + if (ret != -ENOIOCTLCMD) + return ret; + + ret = v4l2_subdev_link_validate_default(sink_subdev, link, + &source_fmt, &sink_fmt); + + if (ret) + return ret; + } + + return 0; +} + +int v4l2_subdev_link_validate(struct media_link *link) +{ + struct v4l2_subdev *source_sd, *sink_sd; + struct v4l2_subdev_state *source_state, *sink_state; + bool states_locked; + int ret; + + /* + * Links are validated in the context of the sink entity. Usage of this + * helper on a sink that is not a subdev is a clear driver bug. + */ + if (WARN_ON_ONCE(!is_media_entity_v4l2_subdev(link->sink->entity))) + return -EINVAL; + + /* + * If the source is a video device, delegate link validation to it. This + * allows usage of this helper for subdev connected to a video output + * device, provided that the driver implement the video output device's + * .link_validate() operation. + */ + if (is_media_entity_v4l2_video_device(link->source->entity)) { + struct media_entity *source = link->source->entity; + + if (!source->ops || !source->ops->link_validate) { + /* + * Many existing drivers do not implement the required + * .link_validate() operation for their video devices. + * Print a warning to get the drivers fixed, and return + * 0 to avoid breaking userspace. This should + * eventually be turned into a WARN_ON() when all + * drivers will have been fixed. + */ + pr_warn_once("video device '%s' does not implement .link_validate(), driver bug!\n", + source->name); + return 0; + } + + /* + * Avoid infinite loops in case a video device incorrectly uses + * this helper function as its .link_validate() handler. + */ + if (WARN_ON(source->ops->link_validate == v4l2_subdev_link_validate)) + return -EINVAL; + + return source->ops->link_validate(link); + } + + /* + * If the source is still not a subdev, usage of this helper is a clear + * driver bug. + */ + if (WARN_ON(!is_media_entity_v4l2_subdev(link->source->entity))) + return -EINVAL; + + sink_sd = media_entity_to_v4l2_subdev(link->sink->entity); + source_sd = media_entity_to_v4l2_subdev(link->source->entity); + + sink_state = v4l2_subdev_get_unlocked_active_state(sink_sd); + source_state = v4l2_subdev_get_unlocked_active_state(source_sd); + + states_locked = sink_state && source_state; + + if (states_locked) + v4l2_subdev_lock_states(sink_state, source_state); + + ret = v4l2_subdev_link_validate_locked(link, states_locked); + + if (states_locked) + v4l2_subdev_unlock_states(sink_state, source_state); + + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate); + +bool v4l2_subdev_has_pad_interdep(struct media_entity *entity, + unsigned int pad0, unsigned int pad1) +{ + struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity); + struct v4l2_subdev_krouting *routing; + struct v4l2_subdev_state *state; + unsigned int i; + + state = v4l2_subdev_lock_and_get_active_state(sd); + + routing = &state->routing; + + for (i = 0; i < routing->num_routes; ++i) { + struct v4l2_subdev_route *route = &routing->routes[i]; + + if (!(route->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE)) + continue; + + if ((route->sink_pad == pad0 && route->source_pad == pad1) || + (route->source_pad == pad0 && route->sink_pad == pad1)) { + v4l2_subdev_unlock_state(state); + return true; + } + } + + v4l2_subdev_unlock_state(state); + + return false; +} +EXPORT_SYMBOL_GPL(v4l2_subdev_has_pad_interdep); + +struct v4l2_subdev_state * +__v4l2_subdev_state_alloc(struct v4l2_subdev *sd, const char *lock_name, + struct lock_class_key *lock_key) +{ + struct v4l2_subdev_state *state; + int ret; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return ERR_PTR(-ENOMEM); + + __mutex_init(&state->_lock, lock_name, lock_key); + if (sd->state_lock) + state->lock = sd->state_lock; + else + state->lock = &state->_lock; + + state->sd = sd; + + /* Drivers that support streams do not need the legacy pad config */ + if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS) && sd->entity.num_pads) { + state->pads = kvcalloc(sd->entity.num_pads, + sizeof(*state->pads), GFP_KERNEL); + if (!state->pads) { + ret = -ENOMEM; + goto err; + } + } + + if (sd->internal_ops && sd->internal_ops->init_state) { + /* + * There can be no race at this point, but we lock the state + * anyway to satisfy lockdep checks. + */ + v4l2_subdev_lock_state(state); + ret = sd->internal_ops->init_state(sd, state); + v4l2_subdev_unlock_state(state); + + if (ret) + goto err; + } + + return state; + +err: + if (state && state->pads) + kvfree(state->pads); + + kfree(state); + + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(__v4l2_subdev_state_alloc); + +void __v4l2_subdev_state_free(struct v4l2_subdev_state *state) +{ + if (!state) + return; + + mutex_destroy(&state->_lock); + + kfree(state->routing.routes); + kvfree(state->stream_configs.configs); + kvfree(state->pads); + kfree(state); +} +EXPORT_SYMBOL_GPL(__v4l2_subdev_state_free); + +int __v4l2_subdev_init_finalize(struct v4l2_subdev *sd, const char *name, + struct lock_class_key *key) +{ + struct v4l2_subdev_state *state; + struct device *dev = sd->dev; + bool has_disable_streams; + bool has_enable_streams; + bool has_s_stream; + + /* Check that the subdevice implements the required features */ + + has_s_stream = v4l2_subdev_has_op(sd, video, s_stream); + has_enable_streams = v4l2_subdev_has_op(sd, pad, enable_streams); + has_disable_streams = v4l2_subdev_has_op(sd, pad, disable_streams); + + if (has_enable_streams != has_disable_streams) { + dev_err(dev, + "subdev '%s' must implement both or neither of .enable_streams() and .disable_streams()\n", + sd->name); + return -EINVAL; + } + + if (sd->flags & V4L2_SUBDEV_FL_STREAMS) { + if (has_s_stream && !has_enable_streams) { + dev_err(dev, + "subdev '%s' must implement .enable/disable_streams()\n", + sd->name); + + return -EINVAL; + } + } + + state = __v4l2_subdev_state_alloc(sd, name, key); + if (IS_ERR(state)) + return PTR_ERR(state); + + sd->active_state = state; + + return 0; +} +EXPORT_SYMBOL_GPL(__v4l2_subdev_init_finalize); + +void v4l2_subdev_cleanup(struct v4l2_subdev *sd) +{ + struct v4l2_async_subdev_endpoint *ase, *ase_tmp; + + __v4l2_subdev_state_free(sd->active_state); + sd->active_state = NULL; + + /* Uninitialised sub-device, bail out here. */ + if (!sd->async_subdev_endpoint_list.next) + return; + + list_for_each_entry_safe(ase, ase_tmp, &sd->async_subdev_endpoint_list, + async_subdev_endpoint_entry) { + list_del(&ase->async_subdev_endpoint_entry); + + kfree(ase); + } +} +EXPORT_SYMBOL_GPL(v4l2_subdev_cleanup); + +struct v4l2_mbus_framefmt * +__v4l2_subdev_state_get_format(struct v4l2_subdev_state *state, + unsigned int pad, u32 stream) +{ + struct v4l2_subdev_stream_configs *stream_configs; + unsigned int i; + + if (WARN_ON_ONCE(!state)) + return NULL; + + if (state->pads) { + if (stream) + return NULL; + + if (pad >= state->sd->entity.num_pads) + return NULL; + + return &state->pads[pad].format; + } + + lockdep_assert_held(state->lock); + + stream_configs = &state->stream_configs; + + for (i = 0; i < stream_configs->num_configs; ++i) { + if (stream_configs->configs[i].pad == pad && + stream_configs->configs[i].stream == stream) + return &stream_configs->configs[i].fmt; + } + + return NULL; +} +EXPORT_SYMBOL_GPL(__v4l2_subdev_state_get_format); + +struct v4l2_rect * +__v4l2_subdev_state_get_crop(struct v4l2_subdev_state *state, unsigned int pad, + u32 stream) +{ + struct v4l2_subdev_stream_configs *stream_configs; + unsigned int i; + + if (WARN_ON_ONCE(!state)) + return NULL; + + if (state->pads) { + if (stream) + return NULL; + + if (pad >= state->sd->entity.num_pads) + return NULL; + + return &state->pads[pad].crop; + } + + lockdep_assert_held(state->lock); + + stream_configs = &state->stream_configs; + + for (i = 0; i < stream_configs->num_configs; ++i) { + if (stream_configs->configs[i].pad == pad && + stream_configs->configs[i].stream == stream) + return &stream_configs->configs[i].crop; + } + + return NULL; +} +EXPORT_SYMBOL_GPL(__v4l2_subdev_state_get_crop); + +struct v4l2_rect * +__v4l2_subdev_state_get_compose(struct v4l2_subdev_state *state, + unsigned int pad, u32 stream) +{ + struct v4l2_subdev_stream_configs *stream_configs; + unsigned int i; + + if (WARN_ON_ONCE(!state)) + return NULL; + + if (state->pads) { + if (stream) + return NULL; + + if (pad >= state->sd->entity.num_pads) + return NULL; + + return &state->pads[pad].compose; + } + + lockdep_assert_held(state->lock); + + stream_configs = &state->stream_configs; + + for (i = 0; i < stream_configs->num_configs; ++i) { + if (stream_configs->configs[i].pad == pad && + stream_configs->configs[i].stream == stream) + return &stream_configs->configs[i].compose; + } + + return NULL; +} +EXPORT_SYMBOL_GPL(__v4l2_subdev_state_get_compose); + +struct v4l2_fract * +__v4l2_subdev_state_get_interval(struct v4l2_subdev_state *state, + unsigned int pad, u32 stream) +{ + struct v4l2_subdev_stream_configs *stream_configs; + unsigned int i; + + if (WARN_ON(!state)) + return NULL; + + lockdep_assert_held(state->lock); + + if (state->pads) { + if (stream) + return NULL; + + if (pad >= state->sd->entity.num_pads) + return NULL; + + return &state->pads[pad].interval; + } + + lockdep_assert_held(state->lock); + + stream_configs = &state->stream_configs; + + for (i = 0; i < stream_configs->num_configs; ++i) { + if (stream_configs->configs[i].pad == pad && + stream_configs->configs[i].stream == stream) + return &stream_configs->configs[i].interval; + } + + return NULL; +} +EXPORT_SYMBOL_GPL(__v4l2_subdev_state_get_interval); + +#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) + +static int +v4l2_subdev_init_stream_configs(struct v4l2_subdev_stream_configs *stream_configs, + const struct v4l2_subdev_krouting *routing) +{ + struct v4l2_subdev_stream_configs new_configs = { 0 }; + struct v4l2_subdev_route *route; + u32 idx; + + /* Count number of formats needed */ + for_each_active_route(routing, route) { + /* + * Each route needs a format on both ends of the route. + */ + new_configs.num_configs += 2; + } + + if (new_configs.num_configs) { + new_configs.configs = kvcalloc(new_configs.num_configs, + sizeof(*new_configs.configs), + GFP_KERNEL); + + if (!new_configs.configs) + return -ENOMEM; + } + + /* + * Fill in the 'pad' and stream' value for each item in the array from + * the routing table + */ + idx = 0; + + for_each_active_route(routing, route) { + new_configs.configs[idx].pad = route->sink_pad; + new_configs.configs[idx].stream = route->sink_stream; + + idx++; + + new_configs.configs[idx].pad = route->source_pad; + new_configs.configs[idx].stream = route->source_stream; + + idx++; + } + + kvfree(stream_configs->configs); + *stream_configs = new_configs; + + return 0; +} + +int v4l2_subdev_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, + struct v4l2_subdev_format *format) +{ + struct v4l2_mbus_framefmt *fmt; + + fmt = v4l2_subdev_state_get_format(state, format->pad, format->stream); + if (!fmt) + return -EINVAL; + + format->format = *fmt; + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_subdev_get_fmt); + +int v4l2_subdev_get_frame_interval(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_frame_interval *fi) +{ + struct v4l2_fract *interval; + + interval = v4l2_subdev_state_get_interval(state, fi->pad, fi->stream); + if (!interval) + return -EINVAL; + + fi->interval = *interval; + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_subdev_get_frame_interval); + +int v4l2_subdev_set_routing(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + const struct v4l2_subdev_krouting *routing) +{ + struct v4l2_subdev_krouting *dst = &state->routing; + const struct v4l2_subdev_krouting *src = routing; + struct v4l2_subdev_krouting new_routing = { 0 }; + size_t bytes; + int r; + + if (unlikely(check_mul_overflow((size_t)src->num_routes, + sizeof(*src->routes), &bytes))) + return -EOVERFLOW; + + lockdep_assert_held(state->lock); + + if (src->num_routes > 0) { + new_routing.routes = kmemdup(src->routes, bytes, GFP_KERNEL); + if (!new_routing.routes) + return -ENOMEM; + } + + new_routing.num_routes = src->num_routes; + + r = v4l2_subdev_init_stream_configs(&state->stream_configs, + &new_routing); + if (r) { + kfree(new_routing.routes); + return r; + } + + kfree(dst->routes); + *dst = new_routing; + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_subdev_set_routing); + +struct v4l2_subdev_route * +__v4l2_subdev_next_active_route(const struct v4l2_subdev_krouting *routing, + struct v4l2_subdev_route *route) +{ + if (route) + ++route; + else + route = &routing->routes[0]; + + for (; route < routing->routes + routing->num_routes; ++route) { + if (!(route->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE)) + continue; + + return route; + } + + return NULL; +} +EXPORT_SYMBOL_GPL(__v4l2_subdev_next_active_route); + +int v4l2_subdev_set_routing_with_fmt(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + const struct v4l2_subdev_krouting *routing, + const struct v4l2_mbus_framefmt *fmt) +{ + struct v4l2_subdev_stream_configs *stream_configs; + unsigned int i; + int ret; + + ret = v4l2_subdev_set_routing(sd, state, routing); + if (ret) + return ret; + + stream_configs = &state->stream_configs; + + for (i = 0; i < stream_configs->num_configs; ++i) + stream_configs->configs[i].fmt = *fmt; + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_subdev_set_routing_with_fmt); + +int v4l2_subdev_routing_find_opposite_end(const struct v4l2_subdev_krouting *routing, + u32 pad, u32 stream, u32 *other_pad, + u32 *other_stream) +{ + unsigned int i; + + for (i = 0; i < routing->num_routes; ++i) { + struct v4l2_subdev_route *route = &routing->routes[i]; + + if (route->source_pad == pad && + route->source_stream == stream) { + if (other_pad) + *other_pad = route->sink_pad; + if (other_stream) + *other_stream = route->sink_stream; + return 0; + } + + if (route->sink_pad == pad && route->sink_stream == stream) { + if (other_pad) + *other_pad = route->source_pad; + if (other_stream) + *other_stream = route->source_stream; + return 0; + } + } + + return -EINVAL; +} +EXPORT_SYMBOL_GPL(v4l2_subdev_routing_find_opposite_end); + +struct v4l2_mbus_framefmt * +v4l2_subdev_state_get_opposite_stream_format(struct v4l2_subdev_state *state, + u32 pad, u32 stream) +{ + u32 other_pad, other_stream; + int ret; + + ret = v4l2_subdev_routing_find_opposite_end(&state->routing, + pad, stream, + &other_pad, &other_stream); + if (ret) + return NULL; + + return v4l2_subdev_state_get_format(state, other_pad, other_stream); +} +EXPORT_SYMBOL_GPL(v4l2_subdev_state_get_opposite_stream_format); + +u64 v4l2_subdev_state_xlate_streams(const struct v4l2_subdev_state *state, + u32 pad0, u32 pad1, u64 *streams) +{ + const struct v4l2_subdev_krouting *routing = &state->routing; + struct v4l2_subdev_route *route; + u64 streams0 = 0; + u64 streams1 = 0; + + for_each_active_route(routing, route) { + if (route->sink_pad == pad0 && route->source_pad == pad1 && + (*streams & BIT_ULL(route->sink_stream))) { + streams0 |= BIT_ULL(route->sink_stream); + streams1 |= BIT_ULL(route->source_stream); + } + if (route->source_pad == pad0 && route->sink_pad == pad1 && + (*streams & BIT_ULL(route->source_stream))) { + streams0 |= BIT_ULL(route->source_stream); + streams1 |= BIT_ULL(route->sink_stream); + } + } + + *streams = streams0; + return streams1; +} +EXPORT_SYMBOL_GPL(v4l2_subdev_state_xlate_streams); + +int v4l2_subdev_routing_validate(struct v4l2_subdev *sd, + const struct v4l2_subdev_krouting *routing, + enum v4l2_subdev_routing_restriction disallow) +{ + u32 *remote_pads = NULL; + unsigned int i, j; + int ret = -EINVAL; + + if (disallow & (V4L2_SUBDEV_ROUTING_NO_STREAM_MIX | + V4L2_SUBDEV_ROUTING_NO_MULTIPLEXING)) { + remote_pads = kcalloc(sd->entity.num_pads, sizeof(*remote_pads), + GFP_KERNEL); + if (!remote_pads) + return -ENOMEM; + + for (i = 0; i < sd->entity.num_pads; ++i) + remote_pads[i] = U32_MAX; + } + + for (i = 0; i < routing->num_routes; ++i) { + const struct v4l2_subdev_route *route = &routing->routes[i]; + + /* Validate the sink and source pad numbers. */ + if (route->sink_pad >= sd->entity.num_pads || + !(sd->entity.pads[route->sink_pad].flags & MEDIA_PAD_FL_SINK)) { + dev_dbg(sd->dev, "route %u sink (%u) is not a sink pad\n", + i, route->sink_pad); + goto out; + } + + if (route->source_pad >= sd->entity.num_pads || + !(sd->entity.pads[route->source_pad].flags & MEDIA_PAD_FL_SOURCE)) { + dev_dbg(sd->dev, "route %u source (%u) is not a source pad\n", + i, route->source_pad); + goto out; + } + + /* + * V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX: all streams from a + * sink pad must be routed to a single source pad. + */ + if (disallow & V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX) { + if (remote_pads[route->sink_pad] != U32_MAX && + remote_pads[route->sink_pad] != route->source_pad) { + dev_dbg(sd->dev, + "route %u attempts to mix %s streams\n", + i, "sink"); + goto out; + } + } + + /* + * V4L2_SUBDEV_ROUTING_NO_SOURCE_STREAM_MIX: all streams on a + * source pad must originate from a single sink pad. + */ + if (disallow & V4L2_SUBDEV_ROUTING_NO_SOURCE_STREAM_MIX) { + if (remote_pads[route->source_pad] != U32_MAX && + remote_pads[route->source_pad] != route->sink_pad) { + dev_dbg(sd->dev, + "route %u attempts to mix %s streams\n", + i, "source"); + goto out; + } + } + + /* + * V4L2_SUBDEV_ROUTING_NO_SINK_MULTIPLEXING: Pads on the sink + * side can not do stream multiplexing, i.e. there can be only + * a single stream in a sink pad. + */ + if (disallow & V4L2_SUBDEV_ROUTING_NO_SINK_MULTIPLEXING) { + if (remote_pads[route->sink_pad] != U32_MAX) { + dev_dbg(sd->dev, + "route %u attempts to multiplex on %s pad %u\n", + i, "sink", route->sink_pad); + goto out; + } + } + + /* + * V4L2_SUBDEV_ROUTING_NO_SOURCE_MULTIPLEXING: Pads on the + * source side can not do stream multiplexing, i.e. there can + * be only a single stream in a source pad. + */ + if (disallow & V4L2_SUBDEV_ROUTING_NO_SOURCE_MULTIPLEXING) { + if (remote_pads[route->source_pad] != U32_MAX) { + dev_dbg(sd->dev, + "route %u attempts to multiplex on %s pad %u\n", + i, "source", route->source_pad); + goto out; + } + } + + if (remote_pads) { + remote_pads[route->sink_pad] = route->source_pad; + remote_pads[route->source_pad] = route->sink_pad; + } + + for (j = i + 1; j < routing->num_routes; ++j) { + const struct v4l2_subdev_route *r = &routing->routes[j]; + + /* + * V4L2_SUBDEV_ROUTING_NO_1_TO_N: No two routes can + * originate from the same (sink) stream. + */ + if ((disallow & V4L2_SUBDEV_ROUTING_NO_1_TO_N) && + route->sink_pad == r->sink_pad && + route->sink_stream == r->sink_stream) { + dev_dbg(sd->dev, + "routes %u and %u originate from same sink (%u/%u)\n", + i, j, route->sink_pad, + route->sink_stream); + goto out; + } + + /* + * V4L2_SUBDEV_ROUTING_NO_N_TO_1: No two routes can end + * at the same (source) stream. + */ + if ((disallow & V4L2_SUBDEV_ROUTING_NO_N_TO_1) && + route->source_pad == r->source_pad && + route->source_stream == r->source_stream) { + dev_dbg(sd->dev, + "routes %u and %u end at same source (%u/%u)\n", + i, j, route->source_pad, + route->source_stream); + goto out; + } + } + } + + ret = 0; + +out: + kfree(remote_pads); + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_subdev_routing_validate); + +static void v4l2_subdev_collect_streams(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + u32 pad, u64 streams_mask, + u64 *found_streams, + u64 *enabled_streams) +{ + if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS)) { + *found_streams = BIT_ULL(0); + *enabled_streams = + (sd->enabled_pads & BIT_ULL(pad)) ? BIT_ULL(0) : 0; + return; + } + + *found_streams = 0; + *enabled_streams = 0; + + for (unsigned int i = 0; i < state->stream_configs.num_configs; ++i) { + const struct v4l2_subdev_stream_config *cfg = + &state->stream_configs.configs[i]; + + if (cfg->pad != pad || !(streams_mask & BIT_ULL(cfg->stream))) + continue; + + *found_streams |= BIT_ULL(cfg->stream); + if (cfg->enabled) + *enabled_streams |= BIT_ULL(cfg->stream); + } +} + +static void v4l2_subdev_set_streams_enabled(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + u32 pad, u64 streams_mask, + bool enabled) +{ + if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS)) { + if (enabled) + sd->enabled_pads |= BIT_ULL(pad); + else + sd->enabled_pads &= ~BIT_ULL(pad); + return; + } + + for (unsigned int i = 0; i < state->stream_configs.num_configs; ++i) { + struct v4l2_subdev_stream_config *cfg = + &state->stream_configs.configs[i]; + + if (cfg->pad == pad && (streams_mask & BIT_ULL(cfg->stream))) + cfg->enabled = enabled; + } +} + +int v4l2_subdev_enable_streams(struct v4l2_subdev *sd, u32 pad, + u64 streams_mask) +{ + struct device *dev = sd->entity.graph_obj.mdev->dev; + struct v4l2_subdev_state *state; + bool already_streaming; + u64 enabled_streams; + u64 found_streams; + bool use_s_stream; + int ret; + + /* A few basic sanity checks first. */ + if (pad >= sd->entity.num_pads) + return -EINVAL; + + if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE)) + return -EOPNOTSUPP; + + /* + * We use a 64-bit bitmask for tracking enabled pads, so only subdevices + * with 64 pads or less can be supported. + */ + if (pad >= sizeof(sd->enabled_pads) * BITS_PER_BYTE) + return -EOPNOTSUPP; + + if (!streams_mask) + return 0; + + /* Fallback on .s_stream() if .enable_streams() isn't available. */ + use_s_stream = !v4l2_subdev_has_op(sd, pad, enable_streams); + + if (!use_s_stream) + state = v4l2_subdev_lock_and_get_active_state(sd); + else + state = NULL; + + /* + * Verify that the requested streams exist and that they are not + * already enabled. + */ + + v4l2_subdev_collect_streams(sd, state, pad, streams_mask, + &found_streams, &enabled_streams); + + if (found_streams != streams_mask) { + dev_dbg(dev, "streams 0x%llx not found on %s:%u\n", + streams_mask & ~found_streams, sd->entity.name, pad); + ret = -EINVAL; + goto done; + } + + if (enabled_streams) { + dev_dbg(dev, "streams 0x%llx already enabled on %s:%u\n", + enabled_streams, sd->entity.name, pad); + ret = -EALREADY; + goto done; + } + + dev_dbg(dev, "enable streams %u:%#llx\n", pad, streams_mask); + + already_streaming = v4l2_subdev_is_streaming(sd); + + if (!use_s_stream) { + /* Call the .enable_streams() operation. */ + ret = v4l2_subdev_call(sd, pad, enable_streams, state, pad, + streams_mask); + } else { + /* Start streaming when the first pad is enabled. */ + if (!already_streaming) + ret = v4l2_subdev_call(sd, video, s_stream, 1); + else + ret = 0; + } + + if (ret) { + dev_dbg(dev, "enable streams %u:%#llx failed: %d\n", pad, + streams_mask, ret); + goto done; + } + + /* Mark the streams as enabled. */ + v4l2_subdev_set_streams_enabled(sd, state, pad, streams_mask, true); + + /* + * TODO: When all the drivers have been changed to use + * v4l2_subdev_enable_streams() and v4l2_subdev_disable_streams(), + * instead of calling .s_stream() operation directly, we can remove + * the privacy LED handling from call_s_stream() and do it here + * for all cases. + */ + if (!use_s_stream && !already_streaming) + v4l2_subdev_enable_privacy_led(sd); + +done: + if (!use_s_stream) + v4l2_subdev_unlock_state(state); + + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_subdev_enable_streams); + +int v4l2_subdev_disable_streams(struct v4l2_subdev *sd, u32 pad, + u64 streams_mask) +{ + struct device *dev = sd->entity.graph_obj.mdev->dev; + struct v4l2_subdev_state *state; + u64 enabled_streams; + u64 found_streams; + bool use_s_stream; + int ret; + + /* A few basic sanity checks first. */ + if (pad >= sd->entity.num_pads) + return -EINVAL; + + if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE)) + return -EOPNOTSUPP; + + /* + * We use a 64-bit bitmask for tracking enabled pads, so only subdevices + * with 64 pads or less can be supported. + */ + if (pad >= sizeof(sd->enabled_pads) * BITS_PER_BYTE) + return -EOPNOTSUPP; + + if (!streams_mask) + return 0; + + /* Fallback on .s_stream() if .disable_streams() isn't available. */ + use_s_stream = !v4l2_subdev_has_op(sd, pad, disable_streams); + + if (!use_s_stream) + state = v4l2_subdev_lock_and_get_active_state(sd); + else + state = NULL; + + /* + * Verify that the requested streams exist and that they are not + * already disabled. + */ + + v4l2_subdev_collect_streams(sd, state, pad, streams_mask, + &found_streams, &enabled_streams); + + if (found_streams != streams_mask) { + dev_dbg(dev, "streams 0x%llx not found on %s:%u\n", + streams_mask & ~found_streams, sd->entity.name, pad); + ret = -EINVAL; + goto done; + } + + if (enabled_streams != streams_mask) { + dev_dbg(dev, "streams 0x%llx already disabled on %s:%u\n", + streams_mask & ~enabled_streams, sd->entity.name, pad); + ret = -EALREADY; + goto done; + } + + dev_dbg(dev, "disable streams %u:%#llx\n", pad, streams_mask); + + if (!use_s_stream) { + /* Call the .disable_streams() operation. */ + ret = v4l2_subdev_call(sd, pad, disable_streams, state, pad, + streams_mask); + } else { + /* Stop streaming when the last streams are disabled. */ + + if (!(sd->enabled_pads & ~BIT_ULL(pad))) + ret = v4l2_subdev_call(sd, video, s_stream, 0); + else + ret = 0; + } + + if (ret) { + dev_dbg(dev, "disable streams %u:%#llx failed: %d\n", pad, + streams_mask, ret); + goto done; + } + + v4l2_subdev_set_streams_enabled(sd, state, pad, streams_mask, false); + +done: + if (!use_s_stream) { + if (!v4l2_subdev_is_streaming(sd)) + v4l2_subdev_disable_privacy_led(sd); + + v4l2_subdev_unlock_state(state); + } + + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_subdev_disable_streams); + +int v4l2_subdev_s_stream_helper(struct v4l2_subdev *sd, int enable) +{ + struct v4l2_subdev_state *state; + struct v4l2_subdev_route *route; + struct media_pad *pad; + u64 source_mask = 0; + int pad_index = -1; + + /* + * Find the source pad. This helper is meant for subdevs that have a + * single source pad, so failures shouldn't happen, but catch them + * loudly nonetheless as they indicate a driver bug. + */ + media_entity_for_each_pad(&sd->entity, pad) { + if (pad->flags & MEDIA_PAD_FL_SOURCE) { + pad_index = pad->index; + break; + } + } + + if (WARN_ON(pad_index == -1)) + return -EINVAL; + + if (sd->flags & V4L2_SUBDEV_FL_STREAMS) { + /* + * As there's a single source pad, just collect all the source + * streams. + */ + state = v4l2_subdev_lock_and_get_active_state(sd); + + for_each_active_route(&state->routing, route) + source_mask |= BIT_ULL(route->source_stream); + + v4l2_subdev_unlock_state(state); + } else { + /* + * For non-streams subdevices, there's a single implicit stream + * per pad. + */ + source_mask = BIT_ULL(0); + } + + if (enable) + return v4l2_subdev_enable_streams(sd, pad_index, source_mask); + else + return v4l2_subdev_disable_streams(sd, pad_index, source_mask); +} +EXPORT_SYMBOL_GPL(v4l2_subdev_s_stream_helper); + +#endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */ + +#endif /* CONFIG_MEDIA_CONTROLLER */ + +void v4l2_subdev_init(struct v4l2_subdev *sd, const struct v4l2_subdev_ops *ops) +{ + INIT_LIST_HEAD(&sd->list); + BUG_ON(!ops); + sd->ops = ops; + sd->v4l2_dev = NULL; + sd->flags = 0; + sd->name[0] = '\0'; + sd->grp_id = 0; + sd->dev_priv = NULL; + sd->host_priv = NULL; + sd->privacy_led = NULL; + INIT_LIST_HEAD(&sd->async_subdev_endpoint_list); +#if defined(CONFIG_MEDIA_CONTROLLER) + sd->entity.name = sd->name; + sd->entity.obj_type = MEDIA_ENTITY_TYPE_V4L2_SUBDEV; + sd->entity.function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN; +#endif +} +EXPORT_SYMBOL(v4l2_subdev_init); + +void v4l2_subdev_notify_event(struct v4l2_subdev *sd, + const struct v4l2_event *ev) +{ + v4l2_event_queue(sd->devnode, ev); + v4l2_subdev_notify(sd, V4L2_DEVICE_NOTIFY_EVENT, (void *)ev); +} +EXPORT_SYMBOL_GPL(v4l2_subdev_notify_event); + +bool v4l2_subdev_is_streaming(struct v4l2_subdev *sd) +{ + struct v4l2_subdev_state *state; + + if (!v4l2_subdev_has_op(sd, pad, enable_streams)) + return sd->s_stream_enabled; + + if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS)) + return !!sd->enabled_pads; + + state = v4l2_subdev_get_locked_active_state(sd); + + for (unsigned int i = 0; i < state->stream_configs.num_configs; ++i) { + const struct v4l2_subdev_stream_config *cfg; + + cfg = &state->stream_configs.configs[i]; + + if (cfg->enabled) + return true; + } + + return false; +} +EXPORT_SYMBOL_GPL(v4l2_subdev_is_streaming); + +int v4l2_subdev_get_privacy_led(struct v4l2_subdev *sd) +{ +#if IS_REACHABLE(CONFIG_LEDS_CLASS) + sd->privacy_led = led_get(sd->dev, "privacy-led"); + if (IS_ERR(sd->privacy_led) && PTR_ERR(sd->privacy_led) != -ENOENT) + return dev_err_probe(sd->dev, PTR_ERR(sd->privacy_led), + "getting privacy LED\n"); + + if (!IS_ERR_OR_NULL(sd->privacy_led)) { + mutex_lock(&sd->privacy_led->led_access); + led_sysfs_disable(sd->privacy_led); + led_trigger_remove(sd->privacy_led); + led_set_brightness(sd->privacy_led, 0); + mutex_unlock(&sd->privacy_led->led_access); + } +#endif + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_subdev_get_privacy_led); + +void v4l2_subdev_put_privacy_led(struct v4l2_subdev *sd) +{ +#if IS_REACHABLE(CONFIG_LEDS_CLASS) + if (!IS_ERR_OR_NULL(sd->privacy_led)) { + mutex_lock(&sd->privacy_led->led_access); + led_sysfs_enable(sd->privacy_led); + mutex_unlock(&sd->privacy_led->led_access); + led_put(sd->privacy_led); + } +#endif +} +EXPORT_SYMBOL_GPL(v4l2_subdev_put_privacy_led); diff --git a/6.12.0/drivers/media/v4l2-core/v4l2-trace.c b/6.12.0/drivers/media/v4l2-core/v4l2-trace.c new file mode 100644 index 0000000..95f3b02 --- /dev/null +++ b/6.12.0/drivers/media/v4l2-core/v4l2-trace.c @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include + +#define CREATE_TRACE_POINTS +#include + +EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_v4l2_buf_done); +EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_v4l2_buf_queue); +EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_v4l2_dqbuf); +EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_v4l2_qbuf); diff --git a/6.12.0/drivers/media/v4l2-core/v4l2-vp9.c b/6.12.0/drivers/media/v4l2-core/v4l2-vp9.c new file mode 100644 index 0000000..859589f --- /dev/null +++ b/6.12.0/drivers/media/v4l2-core/v4l2-vp9.c @@ -0,0 +1,1850 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * V4L2 VP9 helpers. + * + * Copyright (C) 2021 Collabora, Ltd. + * + * Author: Andrzej Pietrasiewicz + */ + +#include + +#include + +const u8 v4l2_vp9_kf_y_mode_prob[10][10][9] = { + { + /* above = dc */ + { 137, 30, 42, 148, 151, 207, 70, 52, 91 }, /*left = dc */ + { 92, 45, 102, 136, 116, 180, 74, 90, 100 }, /*left = v */ + { 73, 32, 19, 187, 222, 215, 46, 34, 100 }, /*left = h */ + { 91, 30, 32, 116, 121, 186, 93, 86, 94 }, /*left = d45 */ + { 72, 35, 36, 149, 68, 206, 68, 63, 105 }, /*left = d135*/ + { 73, 31, 28, 138, 57, 124, 55, 122, 151 }, /*left = d117*/ + { 67, 23, 21, 140, 126, 197, 40, 37, 171 }, /*left = d153*/ + { 86, 27, 28, 128, 154, 212, 45, 43, 53 }, /*left = d207*/ + { 74, 32, 27, 107, 86, 160, 63, 134, 102 }, /*left = d63 */ + { 59, 67, 44, 140, 161, 202, 78, 67, 119 }, /*left = tm */ + }, { /* above = v */ + { 63, 36, 126, 146, 123, 158, 60, 90, 96 }, /*left = dc */ + { 43, 46, 168, 134, 107, 128, 69, 142, 92 }, /*left = v */ + { 44, 29, 68, 159, 201, 177, 50, 57, 77 }, /*left = h */ + { 58, 38, 76, 114, 97, 172, 78, 133, 92 }, /*left = d45 */ + { 46, 41, 76, 140, 63, 184, 69, 112, 57 }, /*left = d135*/ + { 38, 32, 85, 140, 46, 112, 54, 151, 133 }, /*left = d117*/ + { 39, 27, 61, 131, 110, 175, 44, 75, 136 }, /*left = d153*/ + { 52, 30, 74, 113, 130, 175, 51, 64, 58 }, /*left = d207*/ + { 47, 35, 80, 100, 74, 143, 64, 163, 74 }, /*left = d63 */ + { 36, 61, 116, 114, 128, 162, 80, 125, 82 }, /*left = tm */ + }, { /* above = h */ + { 82, 26, 26, 171, 208, 204, 44, 32, 105 }, /*left = dc */ + { 55, 44, 68, 166, 179, 192, 57, 57, 108 }, /*left = v */ + { 42, 26, 11, 199, 241, 228, 23, 15, 85 }, /*left = h */ + { 68, 42, 19, 131, 160, 199, 55, 52, 83 }, /*left = d45 */ + { 58, 50, 25, 139, 115, 232, 39, 52, 118 }, /*left = d135*/ + { 50, 35, 33, 153, 104, 162, 64, 59, 131 }, /*left = d117*/ + { 44, 24, 16, 150, 177, 202, 33, 19, 156 }, /*left = d153*/ + { 55, 27, 12, 153, 203, 218, 26, 27, 49 }, /*left = d207*/ + { 53, 49, 21, 110, 116, 168, 59, 80, 76 }, /*left = d63 */ + { 38, 72, 19, 168, 203, 212, 50, 50, 107 }, /*left = tm */ + }, { /* above = d45 */ + { 103, 26, 36, 129, 132, 201, 83, 80, 93 }, /*left = dc */ + { 59, 38, 83, 112, 103, 162, 98, 136, 90 }, /*left = v */ + { 62, 30, 23, 158, 200, 207, 59, 57, 50 }, /*left = h */ + { 67, 30, 29, 84, 86, 191, 102, 91, 59 }, /*left = d45 */ + { 60, 32, 33, 112, 71, 220, 64, 89, 104 }, /*left = d135*/ + { 53, 26, 34, 130, 56, 149, 84, 120, 103 }, /*left = d117*/ + { 53, 21, 23, 133, 109, 210, 56, 77, 172 }, /*left = d153*/ + { 77, 19, 29, 112, 142, 228, 55, 66, 36 }, /*left = d207*/ + { 61, 29, 29, 93, 97, 165, 83, 175, 162 }, /*left = d63 */ + { 47, 47, 43, 114, 137, 181, 100, 99, 95 }, /*left = tm */ + }, { /* above = d135 */ + { 69, 23, 29, 128, 83, 199, 46, 44, 101 }, /*left = dc */ + { 53, 40, 55, 139, 69, 183, 61, 80, 110 }, /*left = v */ + { 40, 29, 19, 161, 180, 207, 43, 24, 91 }, /*left = h */ + { 60, 34, 19, 105, 61, 198, 53, 64, 89 }, /*left = d45 */ + { 52, 31, 22, 158, 40, 209, 58, 62, 89 }, /*left = d135*/ + { 44, 31, 29, 147, 46, 158, 56, 102, 198 }, /*left = d117*/ + { 35, 19, 12, 135, 87, 209, 41, 45, 167 }, /*left = d153*/ + { 55, 25, 21, 118, 95, 215, 38, 39, 66 }, /*left = d207*/ + { 51, 38, 25, 113, 58, 164, 70, 93, 97 }, /*left = d63 */ + { 47, 54, 34, 146, 108, 203, 72, 103, 151 }, /*left = tm */ + }, { /* above = d117 */ + { 64, 19, 37, 156, 66, 138, 49, 95, 133 }, /*left = dc */ + { 46, 27, 80, 150, 55, 124, 55, 121, 135 }, /*left = v */ + { 36, 23, 27, 165, 149, 166, 54, 64, 118 }, /*left = h */ + { 53, 21, 36, 131, 63, 163, 60, 109, 81 }, /*left = d45 */ + { 40, 26, 35, 154, 40, 185, 51, 97, 123 }, /*left = d135*/ + { 35, 19, 34, 179, 19, 97, 48, 129, 124 }, /*left = d117*/ + { 36, 20, 26, 136, 62, 164, 33, 77, 154 }, /*left = d153*/ + { 45, 18, 32, 130, 90, 157, 40, 79, 91 }, /*left = d207*/ + { 45, 26, 28, 129, 45, 129, 49, 147, 123 }, /*left = d63 */ + { 38, 44, 51, 136, 74, 162, 57, 97, 121 }, /*left = tm */ + }, { /* above = d153 */ + { 75, 17, 22, 136, 138, 185, 32, 34, 166 }, /*left = dc */ + { 56, 39, 58, 133, 117, 173, 48, 53, 187 }, /*left = v */ + { 35, 21, 12, 161, 212, 207, 20, 23, 145 }, /*left = h */ + { 56, 29, 19, 117, 109, 181, 55, 68, 112 }, /*left = d45 */ + { 47, 29, 17, 153, 64, 220, 59, 51, 114 }, /*left = d135*/ + { 46, 16, 24, 136, 76, 147, 41, 64, 172 }, /*left = d117*/ + { 34, 17, 11, 108, 152, 187, 13, 15, 209 }, /*left = d153*/ + { 51, 24, 14, 115, 133, 209, 32, 26, 104 }, /*left = d207*/ + { 55, 30, 18, 122, 79, 179, 44, 88, 116 }, /*left = d63 */ + { 37, 49, 25, 129, 168, 164, 41, 54, 148 }, /*left = tm */ + }, { /* above = d207 */ + { 82, 22, 32, 127, 143, 213, 39, 41, 70 }, /*left = dc */ + { 62, 44, 61, 123, 105, 189, 48, 57, 64 }, /*left = v */ + { 47, 25, 17, 175, 222, 220, 24, 30, 86 }, /*left = h */ + { 68, 36, 17, 106, 102, 206, 59, 74, 74 }, /*left = d45 */ + { 57, 39, 23, 151, 68, 216, 55, 63, 58 }, /*left = d135*/ + { 49, 30, 35, 141, 70, 168, 82, 40, 115 }, /*left = d117*/ + { 51, 25, 15, 136, 129, 202, 38, 35, 139 }, /*left = d153*/ + { 68, 26, 16, 111, 141, 215, 29, 28, 28 }, /*left = d207*/ + { 59, 39, 19, 114, 75, 180, 77, 104, 42 }, /*left = d63 */ + { 40, 61, 26, 126, 152, 206, 61, 59, 93 }, /*left = tm */ + }, { /* above = d63 */ + { 78, 23, 39, 111, 117, 170, 74, 124, 94 }, /*left = dc */ + { 48, 34, 86, 101, 92, 146, 78, 179, 134 }, /*left = v */ + { 47, 22, 24, 138, 187, 178, 68, 69, 59 }, /*left = h */ + { 56, 25, 33, 105, 112, 187, 95, 177, 129 }, /*left = d45 */ + { 48, 31, 27, 114, 63, 183, 82, 116, 56 }, /*left = d135*/ + { 43, 28, 37, 121, 63, 123, 61, 192, 169 }, /*left = d117*/ + { 42, 17, 24, 109, 97, 177, 56, 76, 122 }, /*left = d153*/ + { 58, 18, 28, 105, 139, 182, 70, 92, 63 }, /*left = d207*/ + { 46, 23, 32, 74, 86, 150, 67, 183, 88 }, /*left = d63 */ + { 36, 38, 48, 92, 122, 165, 88, 137, 91 }, /*left = tm */ + }, { /* above = tm */ + { 65, 70, 60, 155, 159, 199, 61, 60, 81 }, /*left = dc */ + { 44, 78, 115, 132, 119, 173, 71, 112, 93 }, /*left = v */ + { 39, 38, 21, 184, 227, 206, 42, 32, 64 }, /*left = h */ + { 58, 47, 36, 124, 137, 193, 80, 82, 78 }, /*left = d45 */ + { 49, 50, 35, 144, 95, 205, 63, 78, 59 }, /*left = d135*/ + { 41, 53, 52, 148, 71, 142, 65, 128, 51 }, /*left = d117*/ + { 40, 36, 28, 143, 143, 202, 40, 55, 137 }, /*left = d153*/ + { 52, 34, 29, 129, 183, 227, 42, 35, 43 }, /*left = d207*/ + { 42, 44, 44, 104, 105, 164, 64, 130, 80 }, /*left = d63 */ + { 43, 81, 53, 140, 169, 204, 68, 84, 72 }, /*left = tm */ + } +}; +EXPORT_SYMBOL_GPL(v4l2_vp9_kf_y_mode_prob); + +const u8 v4l2_vp9_kf_partition_probs[16][3] = { + /* 8x8 -> 4x4 */ + { 158, 97, 94 }, /* a/l both not split */ + { 93, 24, 99 }, /* a split, l not split */ + { 85, 119, 44 }, /* l split, a not split */ + { 62, 59, 67 }, /* a/l both split */ + /* 16x16 -> 8x8 */ + { 149, 53, 53 }, /* a/l both not split */ + { 94, 20, 48 }, /* a split, l not split */ + { 83, 53, 24 }, /* l split, a not split */ + { 52, 18, 18 }, /* a/l both split */ + /* 32x32 -> 16x16 */ + { 150, 40, 39 }, /* a/l both not split */ + { 78, 12, 26 }, /* a split, l not split */ + { 67, 33, 11 }, /* l split, a not split */ + { 24, 7, 5 }, /* a/l both split */ + /* 64x64 -> 32x32 */ + { 174, 35, 49 }, /* a/l both not split */ + { 68, 11, 27 }, /* a split, l not split */ + { 57, 15, 9 }, /* l split, a not split */ + { 12, 3, 3 }, /* a/l both split */ +}; +EXPORT_SYMBOL_GPL(v4l2_vp9_kf_partition_probs); + +const u8 v4l2_vp9_kf_uv_mode_prob[10][9] = { + { 144, 11, 54, 157, 195, 130, 46, 58, 108 }, /* y = dc */ + { 118, 15, 123, 148, 131, 101, 44, 93, 131 }, /* y = v */ + { 113, 12, 23, 188, 226, 142, 26, 32, 125 }, /* y = h */ + { 120, 11, 50, 123, 163, 135, 64, 77, 103 }, /* y = d45 */ + { 113, 9, 36, 155, 111, 157, 32, 44, 161 }, /* y = d135 */ + { 116, 9, 55, 176, 76, 96, 37, 61, 149 }, /* y = d117 */ + { 115, 9, 28, 141, 161, 167, 21, 25, 193 }, /* y = d153 */ + { 120, 12, 32, 145, 195, 142, 32, 38, 86 }, /* y = d207 */ + { 116, 12, 64, 120, 140, 125, 49, 115, 121 }, /* y = d63 */ + { 102, 19, 66, 162, 182, 122, 35, 59, 128 } /* y = tm */ +}; +EXPORT_SYMBOL_GPL(v4l2_vp9_kf_uv_mode_prob); + +const struct v4l2_vp9_frame_context v4l2_vp9_default_probs = { + .tx8 = { + { 100 }, + { 66 }, + }, + .tx16 = { + { 20, 152 }, + { 15, 101 }, + }, + .tx32 = { + { 3, 136, 37 }, + { 5, 52, 13 }, + }, + .coef = { + { /* tx = 4x4 */ + { /* block Type 0 */ + { /* Intra */ + { /* Coeff Band 0 */ + { 195, 29, 183 }, + { 84, 49, 136 }, + { 8, 42, 71 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + }, + { /* Coeff Band 1 */ + { 31, 107, 169 }, + { 35, 99, 159 }, + { 17, 82, 140 }, + { 8, 66, 114 }, + { 2, 44, 76 }, + { 1, 19, 32 }, + }, + { /* Coeff Band 2 */ + { 40, 132, 201 }, + { 29, 114, 187 }, + { 13, 91, 157 }, + { 7, 75, 127 }, + { 3, 58, 95 }, + { 1, 28, 47 }, + }, + { /* Coeff Band 3 */ + { 69, 142, 221 }, + { 42, 122, 201 }, + { 15, 91, 159 }, + { 6, 67, 121 }, + { 1, 42, 77 }, + { 1, 17, 31 }, + }, + { /* Coeff Band 4 */ + { 102, 148, 228 }, + { 67, 117, 204 }, + { 17, 82, 154 }, + { 6, 59, 114 }, + { 2, 39, 75 }, + { 1, 15, 29 }, + }, + { /* Coeff Band 5 */ + { 156, 57, 233 }, + { 119, 57, 212 }, + { 58, 48, 163 }, + { 29, 40, 124 }, + { 12, 30, 81 }, + { 3, 12, 31 } + }, + }, + { /* Inter */ + { /* Coeff Band 0 */ + { 191, 107, 226 }, + { 124, 117, 204 }, + { 25, 99, 155 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + }, + { /* Coeff Band 1 */ + { 29, 148, 210 }, + { 37, 126, 194 }, + { 8, 93, 157 }, + { 2, 68, 118 }, + { 1, 39, 69 }, + { 1, 17, 33 }, + }, + { /* Coeff Band 2 */ + { 41, 151, 213 }, + { 27, 123, 193 }, + { 3, 82, 144 }, + { 1, 58, 105 }, + { 1, 32, 60 }, + { 1, 13, 26 }, + }, + { /* Coeff Band 3 */ + { 59, 159, 220 }, + { 23, 126, 198 }, + { 4, 88, 151 }, + { 1, 66, 114 }, + { 1, 38, 71 }, + { 1, 18, 34 }, + }, + { /* Coeff Band 4 */ + { 114, 136, 232 }, + { 51, 114, 207 }, + { 11, 83, 155 }, + { 3, 56, 105 }, + { 1, 33, 65 }, + { 1, 17, 34 }, + }, + { /* Coeff Band 5 */ + { 149, 65, 234 }, + { 121, 57, 215 }, + { 61, 49, 166 }, + { 28, 36, 114 }, + { 12, 25, 76 }, + { 3, 16, 42 }, + }, + }, + }, + { /* block Type 1 */ + { /* Intra */ + { /* Coeff Band 0 */ + { 214, 49, 220 }, + { 132, 63, 188 }, + { 42, 65, 137 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + }, + { /* Coeff Band 1 */ + { 85, 137, 221 }, + { 104, 131, 216 }, + { 49, 111, 192 }, + { 21, 87, 155 }, + { 2, 49, 87 }, + { 1, 16, 28 }, + }, + { /* Coeff Band 2 */ + { 89, 163, 230 }, + { 90, 137, 220 }, + { 29, 100, 183 }, + { 10, 70, 135 }, + { 2, 42, 81 }, + { 1, 17, 33 }, + }, + { /* Coeff Band 3 */ + { 108, 167, 237 }, + { 55, 133, 222 }, + { 15, 97, 179 }, + { 4, 72, 135 }, + { 1, 45, 85 }, + { 1, 19, 38 }, + }, + { /* Coeff Band 4 */ + { 124, 146, 240 }, + { 66, 124, 224 }, + { 17, 88, 175 }, + { 4, 58, 122 }, + { 1, 36, 75 }, + { 1, 18, 37 }, + }, + { /* Coeff Band 5 */ + { 141, 79, 241 }, + { 126, 70, 227 }, + { 66, 58, 182 }, + { 30, 44, 136 }, + { 12, 34, 96 }, + { 2, 20, 47 }, + }, + }, + { /* Inter */ + { /* Coeff Band 0 */ + { 229, 99, 249 }, + { 143, 111, 235 }, + { 46, 109, 192 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + }, + { /* Coeff Band 1 */ + { 82, 158, 236 }, + { 94, 146, 224 }, + { 25, 117, 191 }, + { 9, 87, 149 }, + { 3, 56, 99 }, + { 1, 33, 57 }, + }, + { /* Coeff Band 2 */ + { 83, 167, 237 }, + { 68, 145, 222 }, + { 10, 103, 177 }, + { 2, 72, 131 }, + { 1, 41, 79 }, + { 1, 20, 39 }, + }, + { /* Coeff Band 3 */ + { 99, 167, 239 }, + { 47, 141, 224 }, + { 10, 104, 178 }, + { 2, 73, 133 }, + { 1, 44, 85 }, + { 1, 22, 47 }, + }, + { /* Coeff Band 4 */ + { 127, 145, 243 }, + { 71, 129, 228 }, + { 17, 93, 177 }, + { 3, 61, 124 }, + { 1, 41, 84 }, + { 1, 21, 52 }, + }, + { /* Coeff Band 5 */ + { 157, 78, 244 }, + { 140, 72, 231 }, + { 69, 58, 184 }, + { 31, 44, 137 }, + { 14, 38, 105 }, + { 8, 23, 61 }, + }, + }, + }, + }, + { /* tx = 8x8 */ + { /* block Type 0 */ + { /* Intra */ + { /* Coeff Band 0 */ + { 125, 34, 187 }, + { 52, 41, 133 }, + { 6, 31, 56 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + }, + { /* Coeff Band 1 */ + { 37, 109, 153 }, + { 51, 102, 147 }, + { 23, 87, 128 }, + { 8, 67, 101 }, + { 1, 41, 63 }, + { 1, 19, 29 }, + }, + { /* Coeff Band 2 */ + { 31, 154, 185 }, + { 17, 127, 175 }, + { 6, 96, 145 }, + { 2, 73, 114 }, + { 1, 51, 82 }, + { 1, 28, 45 }, + }, + { /* Coeff Band 3 */ + { 23, 163, 200 }, + { 10, 131, 185 }, + { 2, 93, 148 }, + { 1, 67, 111 }, + { 1, 41, 69 }, + { 1, 14, 24 }, + }, + { /* Coeff Band 4 */ + { 29, 176, 217 }, + { 12, 145, 201 }, + { 3, 101, 156 }, + { 1, 69, 111 }, + { 1, 39, 63 }, + { 1, 14, 23 }, + }, + { /* Coeff Band 5 */ + { 57, 192, 233 }, + { 25, 154, 215 }, + { 6, 109, 167 }, + { 3, 78, 118 }, + { 1, 48, 69 }, + { 1, 21, 29 }, + }, + }, + { /* Inter */ + { /* Coeff Band 0 */ + { 202, 105, 245 }, + { 108, 106, 216 }, + { 18, 90, 144 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + }, + { /* Coeff Band 1 */ + { 33, 172, 219 }, + { 64, 149, 206 }, + { 14, 117, 177 }, + { 5, 90, 141 }, + { 2, 61, 95 }, + { 1, 37, 57 }, + }, + { /* Coeff Band 2 */ + { 33, 179, 220 }, + { 11, 140, 198 }, + { 1, 89, 148 }, + { 1, 60, 104 }, + { 1, 33, 57 }, + { 1, 12, 21 }, + }, + { /* Coeff Band 3 */ + { 30, 181, 221 }, + { 8, 141, 198 }, + { 1, 87, 145 }, + { 1, 58, 100 }, + { 1, 31, 55 }, + { 1, 12, 20 }, + }, + { /* Coeff Band 4 */ + { 32, 186, 224 }, + { 7, 142, 198 }, + { 1, 86, 143 }, + { 1, 58, 100 }, + { 1, 31, 55 }, + { 1, 12, 22 }, + }, + { /* Coeff Band 5 */ + { 57, 192, 227 }, + { 20, 143, 204 }, + { 3, 96, 154 }, + { 1, 68, 112 }, + { 1, 42, 69 }, + { 1, 19, 32 }, + }, + }, + }, + { /* block Type 1 */ + { /* Intra */ + { /* Coeff Band 0 */ + { 212, 35, 215 }, + { 113, 47, 169 }, + { 29, 48, 105 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + }, + { /* Coeff Band 1 */ + { 74, 129, 203 }, + { 106, 120, 203 }, + { 49, 107, 178 }, + { 19, 84, 144 }, + { 4, 50, 84 }, + { 1, 15, 25 }, + }, + { /* Coeff Band 2 */ + { 71, 172, 217 }, + { 44, 141, 209 }, + { 15, 102, 173 }, + { 6, 76, 133 }, + { 2, 51, 89 }, + { 1, 24, 42 }, + }, + { /* Coeff Band 3 */ + { 64, 185, 231 }, + { 31, 148, 216 }, + { 8, 103, 175 }, + { 3, 74, 131 }, + { 1, 46, 81 }, + { 1, 18, 30 }, + }, + { /* Coeff Band 4 */ + { 65, 196, 235 }, + { 25, 157, 221 }, + { 5, 105, 174 }, + { 1, 67, 120 }, + { 1, 38, 69 }, + { 1, 15, 30 }, + }, + { /* Coeff Band 5 */ + { 65, 204, 238 }, + { 30, 156, 224 }, + { 7, 107, 177 }, + { 2, 70, 124 }, + { 1, 42, 73 }, + { 1, 18, 34 }, + }, + }, + { /* Inter */ + { /* Coeff Band 0 */ + { 225, 86, 251 }, + { 144, 104, 235 }, + { 42, 99, 181 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + }, + { /* Coeff Band 1 */ + { 85, 175, 239 }, + { 112, 165, 229 }, + { 29, 136, 200 }, + { 12, 103, 162 }, + { 6, 77, 123 }, + { 2, 53, 84 }, + }, + { /* Coeff Band 2 */ + { 75, 183, 239 }, + { 30, 155, 221 }, + { 3, 106, 171 }, + { 1, 74, 128 }, + { 1, 44, 76 }, + { 1, 17, 28 }, + }, + { /* Coeff Band 3 */ + { 73, 185, 240 }, + { 27, 159, 222 }, + { 2, 107, 172 }, + { 1, 75, 127 }, + { 1, 42, 73 }, + { 1, 17, 29 }, + }, + { /* Coeff Band 4 */ + { 62, 190, 238 }, + { 21, 159, 222 }, + { 2, 107, 172 }, + { 1, 72, 122 }, + { 1, 40, 71 }, + { 1, 18, 32 }, + }, + { /* Coeff Band 5 */ + { 61, 199, 240 }, + { 27, 161, 226 }, + { 4, 113, 180 }, + { 1, 76, 129 }, + { 1, 46, 80 }, + { 1, 23, 41 }, + }, + }, + }, + }, + { /* tx = 16x16 */ + { /* block Type 0 */ + { /* Intra */ + { /* Coeff Band 0 */ + { 7, 27, 153 }, + { 5, 30, 95 }, + { 1, 16, 30 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + }, + { /* Coeff Band 1 */ + { 50, 75, 127 }, + { 57, 75, 124 }, + { 27, 67, 108 }, + { 10, 54, 86 }, + { 1, 33, 52 }, + { 1, 12, 18 }, + }, + { /* Coeff Band 2 */ + { 43, 125, 151 }, + { 26, 108, 148 }, + { 7, 83, 122 }, + { 2, 59, 89 }, + { 1, 38, 60 }, + { 1, 17, 27 }, + }, + { /* Coeff Band 3 */ + { 23, 144, 163 }, + { 13, 112, 154 }, + { 2, 75, 117 }, + { 1, 50, 81 }, + { 1, 31, 51 }, + { 1, 14, 23 }, + }, + { /* Coeff Band 4 */ + { 18, 162, 185 }, + { 6, 123, 171 }, + { 1, 78, 125 }, + { 1, 51, 86 }, + { 1, 31, 54 }, + { 1, 14, 23 }, + }, + { /* Coeff Band 5 */ + { 15, 199, 227 }, + { 3, 150, 204 }, + { 1, 91, 146 }, + { 1, 55, 95 }, + { 1, 30, 53 }, + { 1, 11, 20 }, + } + }, + { /* Inter */ + { /* Coeff Band 0 */ + { 19, 55, 240 }, + { 19, 59, 196 }, + { 3, 52, 105 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + }, + { /* Coeff Band 1 */ + { 41, 166, 207 }, + { 104, 153, 199 }, + { 31, 123, 181 }, + { 14, 101, 152 }, + { 5, 72, 106 }, + { 1, 36, 52 }, + }, + { /* Coeff Band 2 */ + { 35, 176, 211 }, + { 12, 131, 190 }, + { 2, 88, 144 }, + { 1, 60, 101 }, + { 1, 36, 60 }, + { 1, 16, 28 }, + }, + { /* Coeff Band 3 */ + { 28, 183, 213 }, + { 8, 134, 191 }, + { 1, 86, 142 }, + { 1, 56, 96 }, + { 1, 30, 53 }, + { 1, 12, 20 }, + }, + { /* Coeff Band 4 */ + { 20, 190, 215 }, + { 4, 135, 192 }, + { 1, 84, 139 }, + { 1, 53, 91 }, + { 1, 28, 49 }, + { 1, 11, 20 }, + }, + { /* Coeff Band 5 */ + { 13, 196, 216 }, + { 2, 137, 192 }, + { 1, 86, 143 }, + { 1, 57, 99 }, + { 1, 32, 56 }, + { 1, 13, 24 }, + }, + }, + }, + { /* block Type 1 */ + { /* Intra */ + { /* Coeff Band 0 */ + { 211, 29, 217 }, + { 96, 47, 156 }, + { 22, 43, 87 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + }, + { /* Coeff Band 1 */ + { 78, 120, 193 }, + { 111, 116, 186 }, + { 46, 102, 164 }, + { 15, 80, 128 }, + { 2, 49, 76 }, + { 1, 18, 28 }, + }, + { /* Coeff Band 2 */ + { 71, 161, 203 }, + { 42, 132, 192 }, + { 10, 98, 150 }, + { 3, 69, 109 }, + { 1, 44, 70 }, + { 1, 18, 29 }, + }, + { /* Coeff Band 3 */ + { 57, 186, 211 }, + { 30, 140, 196 }, + { 4, 93, 146 }, + { 1, 62, 102 }, + { 1, 38, 65 }, + { 1, 16, 27 }, + }, + { /* Coeff Band 4 */ + { 47, 199, 217 }, + { 14, 145, 196 }, + { 1, 88, 142 }, + { 1, 57, 98 }, + { 1, 36, 62 }, + { 1, 15, 26 }, + }, + { /* Coeff Band 5 */ + { 26, 219, 229 }, + { 5, 155, 207 }, + { 1, 94, 151 }, + { 1, 60, 104 }, + { 1, 36, 62 }, + { 1, 16, 28 }, + } + }, + { /* Inter */ + { /* Coeff Band 0 */ + { 233, 29, 248 }, + { 146, 47, 220 }, + { 43, 52, 140 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + }, + { /* Coeff Band 1 */ + { 100, 163, 232 }, + { 179, 161, 222 }, + { 63, 142, 204 }, + { 37, 113, 174 }, + { 26, 89, 137 }, + { 18, 68, 97 }, + }, + { /* Coeff Band 2 */ + { 85, 181, 230 }, + { 32, 146, 209 }, + { 7, 100, 164 }, + { 3, 71, 121 }, + { 1, 45, 77 }, + { 1, 18, 30 }, + }, + { /* Coeff Band 3 */ + { 65, 187, 230 }, + { 20, 148, 207 }, + { 2, 97, 159 }, + { 1, 68, 116 }, + { 1, 40, 70 }, + { 1, 14, 29 }, + }, + { /* Coeff Band 4 */ + { 40, 194, 227 }, + { 8, 147, 204 }, + { 1, 94, 155 }, + { 1, 65, 112 }, + { 1, 39, 66 }, + { 1, 14, 26 }, + }, + { /* Coeff Band 5 */ + { 16, 208, 228 }, + { 3, 151, 207 }, + { 1, 98, 160 }, + { 1, 67, 117 }, + { 1, 41, 74 }, + { 1, 17, 31 }, + }, + }, + }, + }, + { /* tx = 32x32 */ + { /* block Type 0 */ + { /* Intra */ + { /* Coeff Band 0 */ + { 17, 38, 140 }, + { 7, 34, 80 }, + { 1, 17, 29 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + }, + { /* Coeff Band 1 */ + { 37, 75, 128 }, + { 41, 76, 128 }, + { 26, 66, 116 }, + { 12, 52, 94 }, + { 2, 32, 55 }, + { 1, 10, 16 }, + }, + { /* Coeff Band 2 */ + { 50, 127, 154 }, + { 37, 109, 152 }, + { 16, 82, 121 }, + { 5, 59, 85 }, + { 1, 35, 54 }, + { 1, 13, 20 }, + }, + { /* Coeff Band 3 */ + { 40, 142, 167 }, + { 17, 110, 157 }, + { 2, 71, 112 }, + { 1, 44, 72 }, + { 1, 27, 45 }, + { 1, 11, 17 }, + }, + { /* Coeff Band 4 */ + { 30, 175, 188 }, + { 9, 124, 169 }, + { 1, 74, 116 }, + { 1, 48, 78 }, + { 1, 30, 49 }, + { 1, 11, 18 }, + }, + { /* Coeff Band 5 */ + { 10, 222, 223 }, + { 2, 150, 194 }, + { 1, 83, 128 }, + { 1, 48, 79 }, + { 1, 27, 45 }, + { 1, 11, 17 }, + }, + }, + { /* Inter */ + { /* Coeff Band 0 */ + { 36, 41, 235 }, + { 29, 36, 193 }, + { 10, 27, 111 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + }, + { /* Coeff Band 1 */ + { 85, 165, 222 }, + { 177, 162, 215 }, + { 110, 135, 195 }, + { 57, 113, 168 }, + { 23, 83, 120 }, + { 10, 49, 61 }, + }, + { /* Coeff Band 2 */ + { 85, 190, 223 }, + { 36, 139, 200 }, + { 5, 90, 146 }, + { 1, 60, 103 }, + { 1, 38, 65 }, + { 1, 18, 30 }, + }, + { /* Coeff Band 3 */ + { 72, 202, 223 }, + { 23, 141, 199 }, + { 2, 86, 140 }, + { 1, 56, 97 }, + { 1, 36, 61 }, + { 1, 16, 27 }, + }, + { /* Coeff Band 4 */ + { 55, 218, 225 }, + { 13, 145, 200 }, + { 1, 86, 141 }, + { 1, 57, 99 }, + { 1, 35, 61 }, + { 1, 13, 22 }, + }, + { /* Coeff Band 5 */ + { 15, 235, 212 }, + { 1, 132, 184 }, + { 1, 84, 139 }, + { 1, 57, 97 }, + { 1, 34, 56 }, + { 1, 14, 23 }, + }, + }, + }, + { /* block Type 1 */ + { /* Intra */ + { /* Coeff Band 0 */ + { 181, 21, 201 }, + { 61, 37, 123 }, + { 10, 38, 71 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + }, + { /* Coeff Band 1 */ + { 47, 106, 172 }, + { 95, 104, 173 }, + { 42, 93, 159 }, + { 18, 77, 131 }, + { 4, 50, 81 }, + { 1, 17, 23 }, + }, + { /* Coeff Band 2 */ + { 62, 147, 199 }, + { 44, 130, 189 }, + { 28, 102, 154 }, + { 18, 75, 115 }, + { 2, 44, 65 }, + { 1, 12, 19 }, + }, + { /* Coeff Band 3 */ + { 55, 153, 210 }, + { 24, 130, 194 }, + { 3, 93, 146 }, + { 1, 61, 97 }, + { 1, 31, 50 }, + { 1, 10, 16 }, + }, + { /* Coeff Band 4 */ + { 49, 186, 223 }, + { 17, 148, 204 }, + { 1, 96, 142 }, + { 1, 53, 83 }, + { 1, 26, 44 }, + { 1, 11, 17 }, + }, + { /* Coeff Band 5 */ + { 13, 217, 212 }, + { 2, 136, 180 }, + { 1, 78, 124 }, + { 1, 50, 83 }, + { 1, 29, 49 }, + { 1, 14, 23 }, + }, + }, + { /* Inter */ + { /* Coeff Band 0 */ + { 197, 13, 247 }, + { 82, 17, 222 }, + { 25, 17, 162 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + }, + { /* Coeff Band 1 */ + { 126, 186, 247 }, + { 234, 191, 243 }, + { 176, 177, 234 }, + { 104, 158, 220 }, + { 66, 128, 186 }, + { 55, 90, 137 }, + }, + { /* Coeff Band 2 */ + { 111, 197, 242 }, + { 46, 158, 219 }, + { 9, 104, 171 }, + { 2, 65, 125 }, + { 1, 44, 80 }, + { 1, 17, 91 }, + }, + { /* Coeff Band 3 */ + { 104, 208, 245 }, + { 39, 168, 224 }, + { 3, 109, 162 }, + { 1, 79, 124 }, + { 1, 50, 102 }, + { 1, 43, 102 }, + }, + { /* Coeff Band 4 */ + { 84, 220, 246 }, + { 31, 177, 231 }, + { 2, 115, 180 }, + { 1, 79, 134 }, + { 1, 55, 77 }, + { 1, 60, 79 }, + }, + { /* Coeff Band 5 */ + { 43, 243, 240 }, + { 8, 180, 217 }, + { 1, 115, 166 }, + { 1, 84, 121 }, + { 1, 51, 67 }, + { 1, 16, 6 }, + }, + }, + }, + }, + }, + + .skip = { 192, 128, 64 }, + .inter_mode = { + { 2, 173, 34 }, + { 7, 145, 85 }, + { 7, 166, 63 }, + { 7, 94, 66 }, + { 8, 64, 46 }, + { 17, 81, 31 }, + { 25, 29, 30 }, + }, + .interp_filter = { + { 235, 162 }, + { 36, 255 }, + { 34, 3 }, + { 149, 144 }, + }, + .is_inter = { 9, 102, 187, 225 }, + .comp_mode = { 239, 183, 119, 96, 41 }, + .single_ref = { + { 33, 16 }, + { 77, 74 }, + { 142, 142 }, + { 172, 170 }, + { 238, 247 }, + }, + .comp_ref = { 50, 126, 123, 221, 226 }, + .y_mode = { + { 65, 32, 18, 144, 162, 194, 41, 51, 98 }, + { 132, 68, 18, 165, 217, 196, 45, 40, 78 }, + { 173, 80, 19, 176, 240, 193, 64, 35, 46 }, + { 221, 135, 38, 194, 248, 121, 96, 85, 29 }, + }, + .uv_mode = { + { 120, 7, 76, 176, 208, 126, 28, 54, 103 } /* y = dc */, + { 48, 12, 154, 155, 139, 90, 34, 117, 119 } /* y = v */, + { 67, 6, 25, 204, 243, 158, 13, 21, 96 } /* y = h */, + { 97, 5, 44, 131, 176, 139, 48, 68, 97 } /* y = d45 */, + { 83, 5, 42, 156, 111, 152, 26, 49, 152 } /* y = d135 */, + { 80, 5, 58, 178, 74, 83, 33, 62, 145 } /* y = d117 */, + { 86, 5, 32, 154, 192, 168, 14, 22, 163 } /* y = d153 */, + { 85, 5, 32, 156, 216, 148, 19, 29, 73 } /* y = d207 */, + { 77, 7, 64, 116, 132, 122, 37, 126, 120 } /* y = d63 */, + { 101, 21, 107, 181, 192, 103, 19, 67, 125 } /* y = tm */ + }, + .partition = { + /* 8x8 -> 4x4 */ + { 199, 122, 141 } /* a/l both not split */, + { 147, 63, 159 } /* a split, l not split */, + { 148, 133, 118 } /* l split, a not split */, + { 121, 104, 114 } /* a/l both split */, + /* 16x16 -> 8x8 */ + { 174, 73, 87 } /* a/l both not split */, + { 92, 41, 83 } /* a split, l not split */, + { 82, 99, 50 } /* l split, a not split */, + { 53, 39, 39 } /* a/l both split */, + /* 32x32 -> 16x16 */ + { 177, 58, 59 } /* a/l both not split */, + { 68, 26, 63 } /* a split, l not split */, + { 52, 79, 25 } /* l split, a not split */, + { 17, 14, 12 } /* a/l both split */, + /* 64x64 -> 32x32 */ + { 222, 34, 30 } /* a/l both not split */, + { 72, 16, 44 } /* a split, l not split */, + { 58, 32, 12 } /* l split, a not split */, + { 10, 7, 6 } /* a/l both split */, + }, + + .mv = { + .joint = { 32, 64, 96 }, + .sign = { 128, 128 }, + .classes = { + { 224, 144, 192, 168, 192, 176, 192, 198, 198, 245 }, + { 216, 128, 176, 160, 176, 176, 192, 198, 198, 208 }, + }, + .class0_bit = { 216, 208 }, + .bits = { + { 136, 140, 148, 160, 176, 192, 224, 234, 234, 240}, + { 136, 140, 148, 160, 176, 192, 224, 234, 234, 240}, + }, + .class0_fr = { + { + { 128, 128, 64 }, + { 96, 112, 64 }, + }, + { + { 128, 128, 64 }, + { 96, 112, 64 }, + }, + }, + .fr = { + { 64, 96, 64 }, + { 64, 96, 64 }, + }, + .class0_hp = { 160, 160 }, + .hp = { 128, 128 }, + }, +}; +EXPORT_SYMBOL_GPL(v4l2_vp9_default_probs); + +static u32 fastdiv(u32 dividend, u16 divisor) +{ +#define DIV_INV(d) ((u32)(((1ULL << 32) + ((d) - 1)) / (d))) +#define DIVS_INV(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) \ + DIV_INV(d0), DIV_INV(d1), DIV_INV(d2), DIV_INV(d3), \ + DIV_INV(d4), DIV_INV(d5), DIV_INV(d6), DIV_INV(d7), \ + DIV_INV(d8), DIV_INV(d9) + + static const u32 inv[] = { + DIV_INV(2), DIV_INV(3), DIV_INV(4), DIV_INV(5), + DIV_INV(6), DIV_INV(7), DIV_INV(8), DIV_INV(9), + DIVS_INV(10, 11, 12, 13, 14, 15, 16, 17, 18, 19), + DIVS_INV(20, 21, 22, 23, 24, 25, 26, 27, 28, 29), + DIVS_INV(30, 31, 32, 33, 34, 35, 36, 37, 38, 39), + DIVS_INV(40, 41, 42, 43, 44, 45, 46, 47, 48, 49), + DIVS_INV(50, 51, 52, 53, 54, 55, 56, 57, 58, 59), + DIVS_INV(60, 61, 62, 63, 64, 65, 66, 67, 68, 69), + DIVS_INV(70, 71, 72, 73, 74, 75, 76, 77, 78, 79), + DIVS_INV(80, 81, 82, 83, 84, 85, 86, 87, 88, 89), + DIVS_INV(90, 91, 92, 93, 94, 95, 96, 97, 98, 99), + DIVS_INV(100, 101, 102, 103, 104, 105, 106, 107, 108, 109), + DIVS_INV(110, 111, 112, 113, 114, 115, 116, 117, 118, 119), + DIVS_INV(120, 121, 122, 123, 124, 125, 126, 127, 128, 129), + DIVS_INV(130, 131, 132, 133, 134, 135, 136, 137, 138, 139), + DIVS_INV(140, 141, 142, 143, 144, 145, 146, 147, 148, 149), + DIVS_INV(150, 151, 152, 153, 154, 155, 156, 157, 158, 159), + DIVS_INV(160, 161, 162, 163, 164, 165, 166, 167, 168, 169), + DIVS_INV(170, 171, 172, 173, 174, 175, 176, 177, 178, 179), + DIVS_INV(180, 181, 182, 183, 184, 185, 186, 187, 188, 189), + DIVS_INV(190, 191, 192, 193, 194, 195, 196, 197, 198, 199), + DIVS_INV(200, 201, 202, 203, 204, 205, 206, 207, 208, 209), + DIVS_INV(210, 211, 212, 213, 214, 215, 216, 217, 218, 219), + DIVS_INV(220, 221, 222, 223, 224, 225, 226, 227, 228, 229), + DIVS_INV(230, 231, 232, 233, 234, 235, 236, 237, 238, 239), + DIVS_INV(240, 241, 242, 243, 244, 245, 246, 247, 248, 249), + DIV_INV(250), DIV_INV(251), DIV_INV(252), DIV_INV(253), + DIV_INV(254), DIV_INV(255), DIV_INV(256), + }; + + if (divisor == 0) + return 0; + else if (divisor == 1) + return dividend; + + if (WARN_ON(divisor - 2 >= ARRAY_SIZE(inv))) + return dividend; + + return ((u64)dividend * inv[divisor - 2]) >> 32; +} + +/* 6.3.6 inv_recenter_nonneg(v, m) */ +static int inv_recenter_nonneg(int v, int m) +{ + if (v > 2 * m) + return v; + + if (v & 1) + return m - ((v + 1) >> 1); + + return m + (v >> 1); +} + +/* + * part of 6.3.5 inv_remap_prob(deltaProb, prob) + * delta = inv_map_table[deltaProb] done by userspace + */ +static int update_prob(int delta, int prob) +{ + if (!delta) + return prob; + + return prob <= 128 ? + 1 + inv_recenter_nonneg(delta, prob - 1) : + 255 - inv_recenter_nonneg(delta, 255 - prob); +} + +/* Counterpart to 6.3.2 tx_mode_probs() */ +static void update_tx_probs(struct v4l2_vp9_frame_context *probs, + const struct v4l2_ctrl_vp9_compressed_hdr *deltas) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(probs->tx8); i++) { + u8 *p8x8 = probs->tx8[i]; + u8 *p16x16 = probs->tx16[i]; + u8 *p32x32 = probs->tx32[i]; + const u8 *d8x8 = deltas->tx8[i]; + const u8 *d16x16 = deltas->tx16[i]; + const u8 *d32x32 = deltas->tx32[i]; + + p8x8[0] = update_prob(d8x8[0], p8x8[0]); + p16x16[0] = update_prob(d16x16[0], p16x16[0]); + p16x16[1] = update_prob(d16x16[1], p16x16[1]); + p32x32[0] = update_prob(d32x32[0], p32x32[0]); + p32x32[1] = update_prob(d32x32[1], p32x32[1]); + p32x32[2] = update_prob(d32x32[2], p32x32[2]); + } +} + +#define BAND_6(band) ((band) == 0 ? 3 : 6) + +static void update_coeff(const u8 deltas[6][6][3], u8 probs[6][6][3]) +{ + int l, m, n; + + for (l = 0; l < 6; l++) + for (m = 0; m < BAND_6(l); m++) { + u8 *p = probs[l][m]; + const u8 *d = deltas[l][m]; + + for (n = 0; n < 3; n++) + p[n] = update_prob(d[n], p[n]); + } +} + +/* Counterpart to 6.3.7 read_coef_probs() */ +static void update_coef_probs(struct v4l2_vp9_frame_context *probs, + const struct v4l2_ctrl_vp9_compressed_hdr *deltas, + const struct v4l2_ctrl_vp9_frame *dec_params) +{ + int i, j, k; + + for (i = 0; i < ARRAY_SIZE(probs->coef); i++) { + for (j = 0; j < ARRAY_SIZE(probs->coef[0]); j++) + for (k = 0; k < ARRAY_SIZE(probs->coef[0][0]); k++) + update_coeff(deltas->coef[i][j][k], probs->coef[i][j][k]); + + if (deltas->tx_mode == i) + break; + } +} + +/* Counterpart to 6.3.8 read_skip_prob() */ +static void update_skip_probs(struct v4l2_vp9_frame_context *probs, + const struct v4l2_ctrl_vp9_compressed_hdr *deltas) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(probs->skip); i++) + probs->skip[i] = update_prob(deltas->skip[i], probs->skip[i]); +} + +/* Counterpart to 6.3.9 read_inter_mode_probs() */ +static void update_inter_mode_probs(struct v4l2_vp9_frame_context *probs, + const struct v4l2_ctrl_vp9_compressed_hdr *deltas) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(probs->inter_mode); i++) { + u8 *p = probs->inter_mode[i]; + const u8 *d = deltas->inter_mode[i]; + + p[0] = update_prob(d[0], p[0]); + p[1] = update_prob(d[1], p[1]); + p[2] = update_prob(d[2], p[2]); + } +} + +/* Counterpart to 6.3.10 read_interp_filter_probs() */ +static void update_interp_filter_probs(struct v4l2_vp9_frame_context *probs, + const struct v4l2_ctrl_vp9_compressed_hdr *deltas) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(probs->interp_filter); i++) { + u8 *p = probs->interp_filter[i]; + const u8 *d = deltas->interp_filter[i]; + + p[0] = update_prob(d[0], p[0]); + p[1] = update_prob(d[1], p[1]); + } +} + +/* Counterpart to 6.3.11 read_is_inter_probs() */ +static void update_is_inter_probs(struct v4l2_vp9_frame_context *probs, + const struct v4l2_ctrl_vp9_compressed_hdr *deltas) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(probs->is_inter); i++) + probs->is_inter[i] = update_prob(deltas->is_inter[i], probs->is_inter[i]); +} + +/* 6.3.12 frame_reference_mode() done entirely in userspace */ + +/* Counterpart to 6.3.13 frame_reference_mode_probs() */ +static void +update_frame_reference_mode_probs(unsigned int reference_mode, + struct v4l2_vp9_frame_context *probs, + const struct v4l2_ctrl_vp9_compressed_hdr *deltas) +{ + int i; + + if (reference_mode == V4L2_VP9_REFERENCE_MODE_SELECT) + for (i = 0; i < ARRAY_SIZE(probs->comp_mode); i++) + probs->comp_mode[i] = update_prob(deltas->comp_mode[i], + probs->comp_mode[i]); + + if (reference_mode != V4L2_VP9_REFERENCE_MODE_COMPOUND_REFERENCE) + for (i = 0; i < ARRAY_SIZE(probs->single_ref); i++) { + u8 *p = probs->single_ref[i]; + const u8 *d = deltas->single_ref[i]; + + p[0] = update_prob(d[0], p[0]); + p[1] = update_prob(d[1], p[1]); + } + + if (reference_mode != V4L2_VP9_REFERENCE_MODE_SINGLE_REFERENCE) + for (i = 0; i < ARRAY_SIZE(probs->comp_ref); i++) + probs->comp_ref[i] = update_prob(deltas->comp_ref[i], probs->comp_ref[i]); +} + +/* Counterpart to 6.3.14 read_y_mode_probs() */ +static void update_y_mode_probs(struct v4l2_vp9_frame_context *probs, + const struct v4l2_ctrl_vp9_compressed_hdr *deltas) +{ + int i, j; + + for (i = 0; i < ARRAY_SIZE(probs->y_mode); i++) + for (j = 0; j < ARRAY_SIZE(probs->y_mode[0]); ++j) + probs->y_mode[i][j] = + update_prob(deltas->y_mode[i][j], probs->y_mode[i][j]); +} + +/* Counterpart to 6.3.15 read_partition_probs() */ +static void update_partition_probs(struct v4l2_vp9_frame_context *probs, + const struct v4l2_ctrl_vp9_compressed_hdr *deltas) +{ + int i, j; + + for (i = 0; i < 4; i++) + for (j = 0; j < 4; j++) { + u8 *p = probs->partition[i * 4 + j]; + const u8 *d = deltas->partition[i * 4 + j]; + + p[0] = update_prob(d[0], p[0]); + p[1] = update_prob(d[1], p[1]); + p[2] = update_prob(d[2], p[2]); + } +} + +static inline int update_mv_prob(int delta, int prob) +{ + if (!delta) + return prob; + + return delta; +} + +/* Counterpart to 6.3.16 mv_probs() */ +static void update_mv_probs(struct v4l2_vp9_frame_context *probs, + const struct v4l2_ctrl_vp9_compressed_hdr *deltas, + const struct v4l2_ctrl_vp9_frame *dec_params) +{ + u8 *p = probs->mv.joint; + const u8 *d = deltas->mv.joint; + unsigned int i, j; + + p[0] = update_mv_prob(d[0], p[0]); + p[1] = update_mv_prob(d[1], p[1]); + p[2] = update_mv_prob(d[2], p[2]); + + for (i = 0; i < ARRAY_SIZE(probs->mv.sign); i++) { + p = probs->mv.sign; + d = deltas->mv.sign; + p[i] = update_mv_prob(d[i], p[i]); + + p = probs->mv.classes[i]; + d = deltas->mv.classes[i]; + for (j = 0; j < ARRAY_SIZE(probs->mv.classes[0]); j++) + p[j] = update_mv_prob(d[j], p[j]); + + p = probs->mv.class0_bit; + d = deltas->mv.class0_bit; + p[i] = update_mv_prob(d[i], p[i]); + + p = probs->mv.bits[i]; + d = deltas->mv.bits[i]; + for (j = 0; j < ARRAY_SIZE(probs->mv.bits[0]); j++) + p[j] = update_mv_prob(d[j], p[j]); + + for (j = 0; j < ARRAY_SIZE(probs->mv.class0_fr[0]); j++) { + p = probs->mv.class0_fr[i][j]; + d = deltas->mv.class0_fr[i][j]; + + p[0] = update_mv_prob(d[0], p[0]); + p[1] = update_mv_prob(d[1], p[1]); + p[2] = update_mv_prob(d[2], p[2]); + } + + p = probs->mv.fr[i]; + d = deltas->mv.fr[i]; + for (j = 0; j < ARRAY_SIZE(probs->mv.fr[i]); j++) + p[j] = update_mv_prob(d[j], p[j]); + + if (dec_params->flags & V4L2_VP9_FRAME_FLAG_ALLOW_HIGH_PREC_MV) { + p = probs->mv.class0_hp; + d = deltas->mv.class0_hp; + p[i] = update_mv_prob(d[i], p[i]); + + p = probs->mv.hp; + d = deltas->mv.hp; + p[i] = update_mv_prob(d[i], p[i]); + } + } +} + +/* Counterpart to 6.3 compressed_header(), but parsing has been done in userspace. */ +void v4l2_vp9_fw_update_probs(struct v4l2_vp9_frame_context *probs, + const struct v4l2_ctrl_vp9_compressed_hdr *deltas, + const struct v4l2_ctrl_vp9_frame *dec_params) +{ + if (deltas->tx_mode == V4L2_VP9_TX_MODE_SELECT) + update_tx_probs(probs, deltas); + + update_coef_probs(probs, deltas, dec_params); + + update_skip_probs(probs, deltas); + + if (dec_params->flags & V4L2_VP9_FRAME_FLAG_KEY_FRAME || + dec_params->flags & V4L2_VP9_FRAME_FLAG_INTRA_ONLY) + return; + + update_inter_mode_probs(probs, deltas); + + if (dec_params->interpolation_filter == V4L2_VP9_INTERP_FILTER_SWITCHABLE) + update_interp_filter_probs(probs, deltas); + + update_is_inter_probs(probs, deltas); + + update_frame_reference_mode_probs(dec_params->reference_mode, probs, deltas); + + update_y_mode_probs(probs, deltas); + + update_partition_probs(probs, deltas); + + update_mv_probs(probs, deltas, dec_params); +} +EXPORT_SYMBOL_GPL(v4l2_vp9_fw_update_probs); + +u8 v4l2_vp9_reset_frame_ctx(const struct v4l2_ctrl_vp9_frame *dec_params, + struct v4l2_vp9_frame_context *frame_context) +{ + int i; + + u8 fctx_idx = dec_params->frame_context_idx; + + if (dec_params->flags & V4L2_VP9_FRAME_FLAG_KEY_FRAME || + dec_params->flags & V4L2_VP9_FRAME_FLAG_INTRA_ONLY || + dec_params->flags & V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT) { + /* + * setup_past_independence() + * We do nothing here. Instead of storing default probs in some intermediate + * location and then copying from that location to appropriate contexts + * in save_probs() below, we skip that step and save default probs directly + * to appropriate contexts. + */ + if (dec_params->flags & V4L2_VP9_FRAME_FLAG_KEY_FRAME || + dec_params->flags & V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT || + dec_params->reset_frame_context == V4L2_VP9_RESET_FRAME_CTX_ALL) + for (i = 0; i < 4; ++i) + /* save_probs(i) */ + memcpy(&frame_context[i], &v4l2_vp9_default_probs, + sizeof(v4l2_vp9_default_probs)); + else if (dec_params->reset_frame_context == V4L2_VP9_RESET_FRAME_CTX_SPEC) + /* save_probs(fctx_idx) */ + memcpy(&frame_context[fctx_idx], &v4l2_vp9_default_probs, + sizeof(v4l2_vp9_default_probs)); + fctx_idx = 0; + } + + return fctx_idx; +} +EXPORT_SYMBOL_GPL(v4l2_vp9_reset_frame_ctx); + +/* 8.4.1 Merge prob process */ +static u8 merge_prob(u8 pre_prob, u32 ct0, u32 ct1, u16 count_sat, u32 max_update_factor) +{ + u32 den, prob, count, factor; + + den = ct0 + ct1; + if (!den) { + /* + * prob = 128, count = 0, update_factor = 0 + * Round2's argument: pre_prob * 256 + * (pre_prob * 256 + 128) >> 8 == pre_prob + */ + return pre_prob; + } + + prob = clamp(((ct0 << 8) + (den >> 1)) / den, (u32)1, (u32)255); + count = min_t(u32, den, count_sat); + factor = fastdiv(max_update_factor * count, count_sat); + + /* + * Round2(pre_prob * (256 - factor) + prob * factor, 8) + * Round2(pre_prob * 256 + (prob - pre_prob) * factor, 8) + * (pre_prob * 256 >> 8) + (((prob - pre_prob) * factor + 128) >> 8) + */ + return pre_prob + (((prob - pre_prob) * factor + 128) >> 8); +} + +static inline u8 noncoef_merge_prob(u8 pre_prob, u32 ct0, u32 ct1) +{ + return merge_prob(pre_prob, ct0, ct1, 20, 128); +} + +/* 8.4.2 Merge probs process */ +/* + * merge_probs() is a recursive function in the spec. We avoid recursion in the kernel. + * That said, the "tree" parameter of merge_probs() controls how deep the recursion goes. + * It turns out that in all cases the recursive calls boil down to a short-ish series + * of merge_prob() invocations (note no "s"). + * + * Variant A + * --------- + * merge_probs(small_token_tree, 2): + * merge_prob(p[1], c[0], c[1] + c[2]) + * merge_prob(p[2], c[1], c[2]) + * + * Variant B + * --------- + * merge_probs(binary_tree, 0) or + * merge_probs(tx_size_8_tree, 0): + * merge_prob(p[0], c[0], c[1]) + * + * Variant C + * --------- + * merge_probs(inter_mode_tree, 0): + * merge_prob(p[0], c[2], c[1] + c[0] + c[3]) + * merge_prob(p[1], c[0], c[1] + c[3]) + * merge_prob(p[2], c[1], c[3]) + * + * Variant D + * --------- + * merge_probs(intra_mode_tree, 0): + * merge_prob(p[0], c[0], c[1] + ... + c[9]) + * merge_prob(p[1], c[9], c[1] + ... + c[8]) + * merge_prob(p[2], c[1], c[2] + ... + c[8]) + * merge_prob(p[3], c[2] + c[4] + c[5], c[3] + c[8] + c[6] + c[7]) + * merge_prob(p[4], c[2], c[4] + c[5]) + * merge_prob(p[5], c[4], c[5]) + * merge_prob(p[6], c[3], c[8] + c[6] + c[7]) + * merge_prob(p[7], c[8], c[6] + c[7]) + * merge_prob(p[8], c[6], c[7]) + * + * Variant E + * --------- + * merge_probs(partition_tree, 0) or + * merge_probs(tx_size_32_tree, 0) or + * merge_probs(mv_joint_tree, 0) or + * merge_probs(mv_fr_tree, 0): + * merge_prob(p[0], c[0], c[1] + c[2] + c[3]) + * merge_prob(p[1], c[1], c[2] + c[3]) + * merge_prob(p[2], c[2], c[3]) + * + * Variant F + * --------- + * merge_probs(interp_filter_tree, 0) or + * merge_probs(tx_size_16_tree, 0): + * merge_prob(p[0], c[0], c[1] + c[2]) + * merge_prob(p[1], c[1], c[2]) + * + * Variant G + * --------- + * merge_probs(mv_class_tree, 0): + * merge_prob(p[0], c[0], c[1] + ... + c[10]) + * merge_prob(p[1], c[1], c[2] + ... + c[10]) + * merge_prob(p[2], c[2] + c[3], c[4] + ... + c[10]) + * merge_prob(p[3], c[2], c[3]) + * merge_prob(p[4], c[4] + c[5], c[6] + ... + c[10]) + * merge_prob(p[5], c[4], c[5]) + * merge_prob(p[6], c[6], c[7] + ... + c[10]) + * merge_prob(p[7], c[7] + c[8], c[9] + c[10]) + * merge_prob(p[8], c[7], c[8]) + * merge_prob(p[9], c[9], [10]) + */ + +static inline void merge_probs_variant_a(u8 *p, const u32 *c, u16 count_sat, u32 update_factor) +{ + p[1] = merge_prob(p[1], c[0], c[1] + c[2], count_sat, update_factor); + p[2] = merge_prob(p[2], c[1], c[2], count_sat, update_factor); +} + +static inline void merge_probs_variant_b(u8 *p, const u32 *c, u16 count_sat, u32 update_factor) +{ + p[0] = merge_prob(p[0], c[0], c[1], count_sat, update_factor); +} + +static inline void merge_probs_variant_c(u8 *p, const u32 *c) +{ + p[0] = noncoef_merge_prob(p[0], c[2], c[1] + c[0] + c[3]); + p[1] = noncoef_merge_prob(p[1], c[0], c[1] + c[3]); + p[2] = noncoef_merge_prob(p[2], c[1], c[3]); +} + +static void merge_probs_variant_d(u8 *p, const u32 *c) +{ + u32 sum = 0, s2; + + sum = c[1] + c[2] + c[3] + c[4] + c[5] + c[6] + c[7] + c[8] + c[9]; + + p[0] = noncoef_merge_prob(p[0], c[0], sum); + sum -= c[9]; + p[1] = noncoef_merge_prob(p[1], c[9], sum); + sum -= c[1]; + p[2] = noncoef_merge_prob(p[2], c[1], sum); + s2 = c[2] + c[4] + c[5]; + sum -= s2; + p[3] = noncoef_merge_prob(p[3], s2, sum); + s2 -= c[2]; + p[4] = noncoef_merge_prob(p[4], c[2], s2); + p[5] = noncoef_merge_prob(p[5], c[4], c[5]); + sum -= c[3]; + p[6] = noncoef_merge_prob(p[6], c[3], sum); + sum -= c[8]; + p[7] = noncoef_merge_prob(p[7], c[8], sum); + p[8] = noncoef_merge_prob(p[8], c[6], c[7]); +} + +static inline void merge_probs_variant_e(u8 *p, const u32 *c) +{ + p[0] = noncoef_merge_prob(p[0], c[0], c[1] + c[2] + c[3]); + p[1] = noncoef_merge_prob(p[1], c[1], c[2] + c[3]); + p[2] = noncoef_merge_prob(p[2], c[2], c[3]); +} + +static inline void merge_probs_variant_f(u8 *p, const u32 *c) +{ + p[0] = noncoef_merge_prob(p[0], c[0], c[1] + c[2]); + p[1] = noncoef_merge_prob(p[1], c[1], c[2]); +} + +static void merge_probs_variant_g(u8 *p, const u32 *c) +{ + u32 sum; + + sum = c[1] + c[2] + c[3] + c[4] + c[5] + c[6] + c[7] + c[8] + c[9] + c[10]; + p[0] = noncoef_merge_prob(p[0], c[0], sum); + sum -= c[1]; + p[1] = noncoef_merge_prob(p[1], c[1], sum); + sum -= c[2] + c[3]; + p[2] = noncoef_merge_prob(p[2], c[2] + c[3], sum); + p[3] = noncoef_merge_prob(p[3], c[2], c[3]); + sum -= c[4] + c[5]; + p[4] = noncoef_merge_prob(p[4], c[4] + c[5], sum); + p[5] = noncoef_merge_prob(p[5], c[4], c[5]); + sum -= c[6]; + p[6] = noncoef_merge_prob(p[6], c[6], sum); + p[7] = noncoef_merge_prob(p[7], c[7] + c[8], c[9] + c[10]); + p[8] = noncoef_merge_prob(p[8], c[7], c[8]); + p[9] = noncoef_merge_prob(p[9], c[9], c[10]); +} + +/* 8.4.3 Coefficient probability adaptation process */ +static inline void adapt_probs_variant_a_coef(u8 *p, const u32 *c, u32 update_factor) +{ + merge_probs_variant_a(p, c, 24, update_factor); +} + +static inline void adapt_probs_variant_b_coef(u8 *p, const u32 *c, u32 update_factor) +{ + merge_probs_variant_b(p, c, 24, update_factor); +} + +static void _adapt_coeff(unsigned int i, unsigned int j, unsigned int k, + struct v4l2_vp9_frame_context *probs, + const struct v4l2_vp9_frame_symbol_counts *counts, + u32 uf) +{ + s32 l, m; + + for (l = 0; l < ARRAY_SIZE(probs->coef[0][0][0]); l++) { + for (m = 0; m < BAND_6(l); m++) { + u8 *p = probs->coef[i][j][k][l][m]; + const u32 counts_more_coefs[2] = { + *counts->eob[i][j][k][l][m][1], + *counts->eob[i][j][k][l][m][0] - *counts->eob[i][j][k][l][m][1], + }; + + adapt_probs_variant_a_coef(p, *counts->coeff[i][j][k][l][m], uf); + adapt_probs_variant_b_coef(p, counts_more_coefs, uf); + } + } +} + +static void _adapt_coef_probs(struct v4l2_vp9_frame_context *probs, + const struct v4l2_vp9_frame_symbol_counts *counts, + unsigned int uf) +{ + unsigned int i, j, k; + + for (i = 0; i < ARRAY_SIZE(probs->coef); i++) + for (j = 0; j < ARRAY_SIZE(probs->coef[0]); j++) + for (k = 0; k < ARRAY_SIZE(probs->coef[0][0]); k++) + _adapt_coeff(i, j, k, probs, counts, uf); +} + +void v4l2_vp9_adapt_coef_probs(struct v4l2_vp9_frame_context *probs, + struct v4l2_vp9_frame_symbol_counts *counts, + bool use_128, + bool frame_is_intra) +{ + if (frame_is_intra) { + _adapt_coef_probs(probs, counts, 112); + } else { + if (use_128) + _adapt_coef_probs(probs, counts, 128); + else + _adapt_coef_probs(probs, counts, 112); + } +} +EXPORT_SYMBOL_GPL(v4l2_vp9_adapt_coef_probs); + +/* 8.4.4 Non coefficient probability adaptation process, adapt_probs() */ +static inline void adapt_probs_variant_b(u8 *p, const u32 *c) +{ + merge_probs_variant_b(p, c, 20, 128); +} + +static inline void adapt_probs_variant_c(u8 *p, const u32 *c) +{ + merge_probs_variant_c(p, c); +} + +static inline void adapt_probs_variant_d(u8 *p, const u32 *c) +{ + merge_probs_variant_d(p, c); +} + +static inline void adapt_probs_variant_e(u8 *p, const u32 *c) +{ + merge_probs_variant_e(p, c); +} + +static inline void adapt_probs_variant_f(u8 *p, const u32 *c) +{ + merge_probs_variant_f(p, c); +} + +static inline void adapt_probs_variant_g(u8 *p, const u32 *c) +{ + merge_probs_variant_g(p, c); +} + +/* 8.4.4 Non coefficient probability adaptation process, adapt_prob() */ +static inline u8 adapt_prob(u8 prob, const u32 counts[2]) +{ + return noncoef_merge_prob(prob, counts[0], counts[1]); +} + +/* 8.4.4 Non coefficient probability adaptation process */ +void v4l2_vp9_adapt_noncoef_probs(struct v4l2_vp9_frame_context *probs, + struct v4l2_vp9_frame_symbol_counts *counts, + u8 reference_mode, u8 interpolation_filter, u8 tx_mode, + u32 flags) +{ + unsigned int i, j; + + for (i = 0; i < ARRAY_SIZE(probs->is_inter); i++) + probs->is_inter[i] = adapt_prob(probs->is_inter[i], (*counts->intra_inter)[i]); + + for (i = 0; i < ARRAY_SIZE(probs->comp_mode); i++) + probs->comp_mode[i] = adapt_prob(probs->comp_mode[i], (*counts->comp)[i]); + + for (i = 0; i < ARRAY_SIZE(probs->comp_ref); i++) + probs->comp_ref[i] = adapt_prob(probs->comp_ref[i], (*counts->comp_ref)[i]); + + if (reference_mode != V4L2_VP9_REFERENCE_MODE_COMPOUND_REFERENCE) + for (i = 0; i < ARRAY_SIZE(probs->single_ref); i++) + for (j = 0; j < ARRAY_SIZE(probs->single_ref[0]); j++) + probs->single_ref[i][j] = adapt_prob(probs->single_ref[i][j], + (*counts->single_ref)[i][j]); + + for (i = 0; i < ARRAY_SIZE(probs->inter_mode); i++) + adapt_probs_variant_c(probs->inter_mode[i], (*counts->mv_mode)[i]); + + for (i = 0; i < ARRAY_SIZE(probs->y_mode); i++) + adapt_probs_variant_d(probs->y_mode[i], (*counts->y_mode)[i]); + + for (i = 0; i < ARRAY_SIZE(probs->uv_mode); i++) + adapt_probs_variant_d(probs->uv_mode[i], (*counts->uv_mode)[i]); + + for (i = 0; i < ARRAY_SIZE(probs->partition); i++) + adapt_probs_variant_e(probs->partition[i], (*counts->partition)[i]); + + for (i = 0; i < ARRAY_SIZE(probs->skip); i++) + probs->skip[i] = adapt_prob(probs->skip[i], (*counts->skip)[i]); + + if (interpolation_filter == V4L2_VP9_INTERP_FILTER_SWITCHABLE) + for (i = 0; i < ARRAY_SIZE(probs->interp_filter); i++) + adapt_probs_variant_f(probs->interp_filter[i], (*counts->filter)[i]); + + if (tx_mode == V4L2_VP9_TX_MODE_SELECT) + for (i = 0; i < ARRAY_SIZE(probs->tx8); i++) { + adapt_probs_variant_b(probs->tx8[i], (*counts->tx8p)[i]); + adapt_probs_variant_f(probs->tx16[i], (*counts->tx16p)[i]); + adapt_probs_variant_e(probs->tx32[i], (*counts->tx32p)[i]); + } + + adapt_probs_variant_e(probs->mv.joint, *counts->mv_joint); + + for (i = 0; i < ARRAY_SIZE(probs->mv.sign); i++) { + probs->mv.sign[i] = adapt_prob(probs->mv.sign[i], (*counts->sign)[i]); + + adapt_probs_variant_g(probs->mv.classes[i], (*counts->classes)[i]); + + probs->mv.class0_bit[i] = adapt_prob(probs->mv.class0_bit[i], (*counts->class0)[i]); + + for (j = 0; j < ARRAY_SIZE(probs->mv.bits[0]); j++) + probs->mv.bits[i][j] = adapt_prob(probs->mv.bits[i][j], + (*counts->bits)[i][j]); + + for (j = 0; j < ARRAY_SIZE(probs->mv.class0_fr[0]); j++) + adapt_probs_variant_e(probs->mv.class0_fr[i][j], + (*counts->class0_fp)[i][j]); + + adapt_probs_variant_e(probs->mv.fr[i], (*counts->fp)[i]); + + if (!(flags & V4L2_VP9_FRAME_FLAG_ALLOW_HIGH_PREC_MV)) + continue; + + probs->mv.class0_hp[i] = adapt_prob(probs->mv.class0_hp[i], + (*counts->class0_hp)[i]); + + probs->mv.hp[i] = adapt_prob(probs->mv.hp[i], (*counts->hp)[i]); + } +} +EXPORT_SYMBOL_GPL(v4l2_vp9_adapt_noncoef_probs); + +bool +v4l2_vp9_seg_feat_enabled(const u8 *feature_enabled, + unsigned int feature, + unsigned int segid) +{ + u8 mask = V4L2_VP9_SEGMENT_FEATURE_ENABLED(feature); + + return !!(feature_enabled[segid] & mask); +} +EXPORT_SYMBOL_GPL(v4l2_vp9_seg_feat_enabled); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("V4L2 VP9 Helpers"); +MODULE_AUTHOR("Andrzej Pietrasiewicz "); diff --git a/6.12.0/include-overrides/media/cec-notifier.h b/6.12.0/include-overrides/media/cec-notifier.h new file mode 100644 index 0000000..b1c8397 --- /dev/null +++ b/6.12.0/include-overrides/media/cec-notifier.h @@ -0,0 +1,166 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * cec-notifier.h - notify CEC drivers of physical address changes + * + * Copyright 2016 Russell King. + * Copyright 2016-2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + */ + +#ifndef LINUX_CEC_NOTIFIER_H +#define LINUX_CEC_NOTIFIER_H + +#include +#include + +struct device; +struct edid; +struct cec_adapter; +struct cec_notifier; + +#if IS_REACHABLE(CONFIG_CEC_CORE) && IS_ENABLED(CONFIG_CEC_NOTIFIER) + +/** + * cec_notifier_conn_register - find or create a new cec_notifier for the given + * HDMI device and connector tuple. + * @hdmi_dev: HDMI device that sends the events. + * @port_name: the connector name from which the event occurs. May be NULL + * if there is always only one HDMI connector created by the HDMI device. + * @conn_info: the connector info from which the event occurs (may be NULL) + * + * If a notifier for device @dev and connector @port_name already exists, then + * increase the refcount and return that notifier. + * + * If it doesn't exist, then allocate a new notifier struct and return a + * pointer to that new struct. + * + * Return NULL if the memory could not be allocated. + */ +struct cec_notifier * +cec_notifier_conn_register(struct device *hdmi_dev, const char *port_name, + const struct cec_connector_info *conn_info); + +/** + * cec_notifier_conn_unregister - decrease refcount and delete when the + * refcount reaches 0. + * @n: notifier. If NULL, then this function does nothing. + */ +void cec_notifier_conn_unregister(struct cec_notifier *n); + +/** + * cec_notifier_cec_adap_register - find or create a new cec_notifier for the + * given device. + * @hdmi_dev: HDMI device that sends the events. + * @port_name: the connector name from which the event occurs. May be NULL + * if there is always only one HDMI connector created by the HDMI device. + * @adap: the cec adapter that registered this notifier. + * + * If a notifier for device @dev and connector @port_name already exists, then + * increase the refcount and return that notifier. + * + * If it doesn't exist, then allocate a new notifier struct and return a + * pointer to that new struct. + * + * Return NULL if the memory could not be allocated. + */ +struct cec_notifier * +cec_notifier_cec_adap_register(struct device *hdmi_dev, const char *port_name, + struct cec_adapter *adap); + +/** + * cec_notifier_cec_adap_unregister - decrease refcount and delete when the + * refcount reaches 0. + * @n: notifier. If NULL, then this function does nothing. + * @adap: the cec adapter that registered this notifier. + */ +void cec_notifier_cec_adap_unregister(struct cec_notifier *n, + struct cec_adapter *adap); + +/** + * cec_notifier_set_phys_addr - set a new physical address. + * @n: the CEC notifier + * @pa: the CEC physical address + * + * Set a new CEC physical address. + * Does nothing if @n == NULL. + */ +void cec_notifier_set_phys_addr(struct cec_notifier *n, u16 pa); + +/** + * cec_notifier_set_phys_addr_from_edid - set parse the PA from the EDID. + * @n: the CEC notifier + * @edid: the struct edid pointer + * + * Parses the EDID to obtain the new CEC physical address and set it. + * Does nothing if @n == NULL. + */ +void cec_notifier_set_phys_addr_from_edid(struct cec_notifier *n, + const struct edid *edid); + +/** + * cec_notifier_parse_hdmi_phandle - find the hdmi device from "hdmi-phandle" + * @dev: the device with the "hdmi-phandle" device tree property + * + * Returns the device pointer referenced by the "hdmi-phandle" property. + * Note that the refcount of the returned device is not incremented. + * This device pointer is only used as a key value in the notifier + * list, but it is never accessed by the CEC driver. + */ +struct device *cec_notifier_parse_hdmi_phandle(struct device *dev); + +#else + +static inline struct cec_notifier * +cec_notifier_conn_register(struct device *hdmi_dev, const char *port_name, + const struct cec_connector_info *conn_info) +{ + /* A non-NULL pointer is expected on success */ + return (struct cec_notifier *)0xdeadfeed; +} + +static inline void cec_notifier_conn_unregister(struct cec_notifier *n) +{ +} + +static inline struct cec_notifier * +cec_notifier_cec_adap_register(struct device *hdmi_dev, const char *port_name, + struct cec_adapter *adap) +{ + /* A non-NULL pointer is expected on success */ + return (struct cec_notifier *)0xdeadfeed; +} + +static inline void cec_notifier_cec_adap_unregister(struct cec_notifier *n, + struct cec_adapter *adap) +{ +} + +static inline void cec_notifier_set_phys_addr(struct cec_notifier *n, u16 pa) +{ +} + +static inline void cec_notifier_set_phys_addr_from_edid(struct cec_notifier *n, + const struct edid *edid) +{ +} + +static inline struct device *cec_notifier_parse_hdmi_phandle(struct device *dev) +{ + return ERR_PTR(-ENODEV); +} + +#endif + +/** + * cec_notifier_phys_addr_invalidate() - set the physical address to INVALID + * + * @n: the CEC notifier + * + * This is a simple helper function to invalidate the physical + * address. Does nothing if @n == NULL. + */ +static inline void cec_notifier_phys_addr_invalidate(struct cec_notifier *n) +{ + cec_notifier_set_phys_addr(n, CEC_PHYS_ADDR_INVALID); +} + +#endif diff --git a/6.12.0/include-overrides/media/cec-pin.h b/6.12.0/include-overrides/media/cec-pin.h new file mode 100644 index 0000000..483bc47 --- /dev/null +++ b/6.12.0/include-overrides/media/cec-pin.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * cec-pin.h - low-level CEC pin control + * + * Copyright 2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + */ + +#ifndef LINUX_CEC_PIN_H +#define LINUX_CEC_PIN_H + +#include +#include + +/** + * struct cec_pin_ops - low-level CEC pin operations + * @read: read the CEC pin. Returns > 0 if high, 0 if low, or an error + * if negative. + * @low: drive the CEC pin low. + * @high: stop driving the CEC pin. The pull-up will drive the pin + * high, unless someone else is driving the pin low. + * @enable_irq: optional, enable the interrupt to detect pin voltage changes. + * @disable_irq: optional, disable the interrupt. + * @free: optional. Free any allocated resources. Called when the + * adapter is deleted. + * @status: optional, log status information. + * @read_hpd: optional. Read the HPD pin. Returns > 0 if high, 0 if low or + * an error if negative. + * @read_5v: optional. Read the 5V pin. Returns > 0 if high, 0 if low or + * an error if negative. + * @received: optional. High-level CEC message callback. Allows the driver + * to process CEC messages. + * + * These operations (except for the @received op) are used by the + * cec pin framework to manipulate the CEC pin. + */ +struct cec_pin_ops { + int (*read)(struct cec_adapter *adap); + void (*low)(struct cec_adapter *adap); + void (*high)(struct cec_adapter *adap); + bool (*enable_irq)(struct cec_adapter *adap); + void (*disable_irq)(struct cec_adapter *adap); + void (*free)(struct cec_adapter *adap); + void (*status)(struct cec_adapter *adap, struct seq_file *file); + int (*read_hpd)(struct cec_adapter *adap); + int (*read_5v)(struct cec_adapter *adap); + + /* High-level CEC message callback */ + int (*received)(struct cec_adapter *adap, struct cec_msg *msg); +}; + +/** + * cec_pin_changed() - update pin state from interrupt + * + * @adap: pointer to the cec adapter + * @value: when true the pin is high, otherwise it is low + * + * If changes of the CEC voltage are detected via an interrupt, then + * cec_pin_changed is called from the interrupt with the new value. + */ +void cec_pin_changed(struct cec_adapter *adap, bool value); + +/** + * cec_pin_allocate_adapter() - allocate a pin-based cec adapter + * + * @pin_ops: low-level pin operations + * @priv: will be stored in adap->priv and can be used by the adapter ops. + * Use cec_get_drvdata(adap) to get the priv pointer. + * @name: the name of the CEC adapter. Note: this name will be copied. + * @caps: capabilities of the CEC adapter. This will be ORed with + * CEC_CAP_MONITOR_ALL and CEC_CAP_MONITOR_PIN. + * + * Allocate a cec adapter using the cec pin framework. + * + * Return: a pointer to the cec adapter or an error pointer + */ +struct cec_adapter *cec_pin_allocate_adapter(const struct cec_pin_ops *pin_ops, + void *priv, const char *name, u32 caps); + +#endif diff --git a/6.12.0/include-overrides/media/cec.h b/6.12.0/include-overrides/media/cec.h new file mode 100644 index 0000000..16b412b --- /dev/null +++ b/6.12.0/include-overrides/media/cec.h @@ -0,0 +1,598 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * cec - HDMI Consumer Electronics Control support header + * + * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + */ + +#ifndef _MEDIA_CEC_H +#define _MEDIA_CEC_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define CEC_CAP_DEFAULTS (CEC_CAP_LOG_ADDRS | CEC_CAP_TRANSMIT | \ + CEC_CAP_PASSTHROUGH | CEC_CAP_RC) + +/** + * struct cec_devnode - cec device node + * @dev: cec device + * @cdev: cec character device + * @minor: device node minor number + * @lock: lock to serialize open/release and registration + * @registered: the device was correctly registered + * @unregistered: the device was unregistered + * @lock_fhs: lock to control access to @fhs + * @fhs: the list of open filehandles (cec_fh) + * + * This structure represents a cec-related device node. + * + * To add or remove filehandles from @fhs the @lock must be taken first, + * followed by @lock_fhs. It is safe to access @fhs if either lock is held. + * + * The @parent is a physical device. It must be set by core or device drivers + * before registering the node. + */ +struct cec_devnode { + /* sysfs */ + struct device dev; + struct cdev cdev; + + /* device info */ + int minor; + /* serialize open/release and registration */ + struct mutex lock; + bool registered; + bool unregistered; + /* protect access to fhs */ + struct mutex lock_fhs; + struct list_head fhs; +}; + +struct cec_adapter; +struct cec_data; +struct cec_pin; +struct cec_notifier; + +struct cec_data { + struct list_head list; + struct list_head xfer_list; + struct cec_adapter *adap; + struct cec_msg msg; + u8 match_len; + u8 match_reply[5]; + struct cec_fh *fh; + struct delayed_work work; + struct completion c; + u8 attempts; + bool blocking; + bool completed; +}; + +struct cec_msg_entry { + struct list_head list; + struct cec_msg msg; +}; + +struct cec_event_entry { + struct list_head list; + struct cec_event ev; +}; + +#define CEC_NUM_CORE_EVENTS 2 +#define CEC_NUM_EVENTS CEC_EVENT_PIN_5V_HIGH + +struct cec_fh { + struct list_head list; + struct list_head xfer_list; + struct cec_adapter *adap; + u8 mode_initiator; + u8 mode_follower; + + /* Events */ + wait_queue_head_t wait; + struct mutex lock; + struct list_head events[CEC_NUM_EVENTS]; /* queued events */ + u16 queued_events[CEC_NUM_EVENTS]; + unsigned int total_queued_events; + struct cec_event_entry core_events[CEC_NUM_CORE_EVENTS]; + struct list_head msgs; /* queued messages */ + unsigned int queued_msgs; +}; + +#define CEC_SIGNAL_FREE_TIME_RETRY 3 +#define CEC_SIGNAL_FREE_TIME_NEW_INITIATOR 5 +#define CEC_SIGNAL_FREE_TIME_NEXT_XFER 7 + +/* The nominal data bit period is 2.4 ms */ +#define CEC_FREE_TIME_TO_USEC(ft) ((ft) * 2400) + +struct cec_adap_ops { + /* Low-level callbacks, called with adap->lock held */ + int (*adap_enable)(struct cec_adapter *adap, bool enable); + int (*adap_monitor_all_enable)(struct cec_adapter *adap, bool enable); + int (*adap_monitor_pin_enable)(struct cec_adapter *adap, bool enable); + int (*adap_log_addr)(struct cec_adapter *adap, u8 logical_addr); + void (*adap_unconfigured)(struct cec_adapter *adap); + int (*adap_transmit)(struct cec_adapter *adap, u8 attempts, + u32 signal_free_time, struct cec_msg *msg); + void (*adap_nb_transmit_canceled)(struct cec_adapter *adap, + const struct cec_msg *msg); + void (*adap_status)(struct cec_adapter *adap, struct seq_file *file); + void (*adap_free)(struct cec_adapter *adap); + + /* Error injection callbacks, called without adap->lock held */ + int (*error_inj_show)(struct cec_adapter *adap, struct seq_file *sf); + bool (*error_inj_parse_line)(struct cec_adapter *adap, char *line); + + /* High-level CEC message callback, called without adap->lock held */ + void (*configured)(struct cec_adapter *adap); + int (*received)(struct cec_adapter *adap, struct cec_msg *msg); +}; + +/* + * The minimum message length you can receive (excepting poll messages) is 2. + * With a transfer rate of at most 36 bytes per second this makes 18 messages + * per second worst case. + * + * We queue at most 3 seconds worth of received messages. The CEC specification + * requires that messages are replied to within a second, so 3 seconds should + * give more than enough margin. Since most messages are actually more than 2 + * bytes, this is in practice a lot more than 3 seconds. + */ +#define CEC_MAX_MSG_RX_QUEUE_SZ (18 * 3) + +/* + * The transmit queue is limited to 1 second worth of messages (worst case). + * Messages can be transmitted by userspace and kernel space. But for both it + * makes no sense to have a lot of messages queued up. One second seems + * reasonable. + */ +#define CEC_MAX_MSG_TX_QUEUE_SZ (18 * 1) + +/** + * struct cec_adapter - cec adapter structure + * @owner: module owner + * @name: name of the CEC adapter + * @devnode: device node for the /dev/cecX device + * @lock: mutex controlling access to this structure + * @rc: remote control device + * @transmit_queue: queue of pending transmits + * @transmit_queue_sz: number of pending transmits + * @wait_queue: queue of transmits waiting for a reply + * @transmitting: CEC messages currently being transmitted + * @transmit_in_progress: true if a transmit is in progress + * @transmit_in_progress_aborted: true if a transmit is in progress is to be + * aborted. This happens if the logical address is + * invalidated while the transmit is ongoing. In that + * case the transmit will finish, but will not retransmit + * and be marked as ABORTED. + * @xfer_timeout_ms: the transfer timeout in ms. + * If 0, then timeout after 2100 ms. + * @kthread_config: kthread used to configure a CEC adapter + * @config_completion: used to signal completion of the config kthread + * @kthread: main CEC processing thread + * @kthread_waitq: main CEC processing wait_queue + * @ops: cec adapter ops + * @priv: cec driver's private data + * @capabilities: cec adapter capabilities + * @available_log_addrs: maximum number of available logical addresses + * @phys_addr: the current physical address + * @needs_hpd: if true, then the HDMI HotPlug Detect pin must be high + * in order to transmit or receive CEC messages. This is usually a HW + * limitation. + * @is_enabled: the CEC adapter is enabled + * @is_claiming_log_addrs: true if cec_claim_log_addrs() is running + * @is_configuring: the CEC adapter is configuring (i.e. claiming LAs) + * @must_reconfigure: while configuring, the PA changed, so reclaim LAs + * @is_configured: the CEC adapter is configured (i.e. has claimed LAs) + * @cec_pin_is_high: if true then the CEC pin is high. Only used with the + * CEC pin framework. + * @adap_controls_phys_addr: if true, then the CEC adapter controls the + * physical address, i.e. the CEC hardware can detect HPD changes and + * read the EDID and is not dependent on an external HDMI driver. + * Drivers that need this can set this field to true after the + * cec_allocate_adapter() call. + * @last_initiator: the initiator of the last transmitted message. + * @monitor_all_cnt: number of filehandles monitoring all msgs + * @monitor_pin_cnt: number of filehandles monitoring pin changes + * @follower_cnt: number of filehandles in follower mode + * @cec_follower: filehandle of the exclusive follower + * @cec_initiator: filehandle of the exclusive initiator + * @passthrough: if true, then the exclusive follower is in + * passthrough mode. + * @log_addrs: current logical addresses + * @conn_info: current connector info + * @tx_timeout_cnt: count the number of Timed Out transmits. + * Reset to 0 when this is reported in cec_adap_status(). + * @tx_low_drive_cnt: count the number of Low Drive transmits. + * Reset to 0 when this is reported in cec_adap_status(). + * @tx_error_cnt: count the number of Error transmits. + * Reset to 0 when this is reported in cec_adap_status(). + * @tx_arb_lost_cnt: count the number of Arb Lost transmits. + * Reset to 0 when this is reported in cec_adap_status(). + * @tx_low_drive_log_cnt: number of logged Low Drive transmits since the + * adapter was enabled. Used to avoid flooding the kernel + * log if this happens a lot. + * @tx_error_log_cnt: number of logged Error transmits since the adapter was + * enabled. Used to avoid flooding the kernel log if this + * happens a lot. + * @notifier: CEC notifier + * @pin: CEC pin status struct + * @cec_dir: debugfs cec directory + * @sequence: transmit sequence counter + * @input_phys: remote control input_phys name + * + * This structure represents a cec adapter. + */ +struct cec_adapter { + struct module *owner; + char name[32]; + struct cec_devnode devnode; + struct mutex lock; + struct rc_dev *rc; + + struct list_head transmit_queue; + unsigned int transmit_queue_sz; + struct list_head wait_queue; + struct cec_data *transmitting; + bool transmit_in_progress; + bool transmit_in_progress_aborted; + unsigned int xfer_timeout_ms; + + struct task_struct *kthread_config; + struct completion config_completion; + + struct task_struct *kthread; + wait_queue_head_t kthread_waitq; + + const struct cec_adap_ops *ops; + void *priv; + u32 capabilities; + u8 available_log_addrs; + + u16 phys_addr; + bool needs_hpd; + bool is_enabled; + bool is_claiming_log_addrs; + bool is_configuring; + bool must_reconfigure; + bool is_configured; + bool cec_pin_is_high; + bool adap_controls_phys_addr; + u8 last_initiator; + u32 monitor_all_cnt; + u32 monitor_pin_cnt; + u32 follower_cnt; + struct cec_fh *cec_follower; + struct cec_fh *cec_initiator; + bool passthrough; + struct cec_log_addrs log_addrs; + struct cec_connector_info conn_info; + + u32 tx_timeout_cnt; + u32 tx_low_drive_cnt; + u32 tx_error_cnt; + u32 tx_arb_lost_cnt; + u32 tx_low_drive_log_cnt; + u32 tx_error_log_cnt; + +#ifdef CONFIG_CEC_NOTIFIER + struct cec_notifier *notifier; +#endif +#ifdef CONFIG_CEC_PIN + struct cec_pin *pin; +#endif + + struct dentry *cec_dir; + + u32 sequence; + + char input_phys[40]; +}; + +static inline int cec_get_device(struct cec_adapter *adap) +{ + struct cec_devnode *devnode = &adap->devnode; + + /* + * Check if the cec device is available. This needs to be done with + * the devnode->lock held to prevent an open/unregister race: + * without the lock, the device could be unregistered and freed between + * the devnode->registered check and get_device() calls, leading to + * a crash. + */ + mutex_lock(&devnode->lock); + /* + * return ENODEV if the cec device has been removed + * already or if it is not registered anymore. + */ + if (!devnode->registered) { + mutex_unlock(&devnode->lock); + return -ENODEV; + } + /* and increase the device refcount */ + get_device(&devnode->dev); + mutex_unlock(&devnode->lock); + return 0; +} + +static inline void cec_put_device(struct cec_adapter *adap) +{ + put_device(&adap->devnode.dev); +} + +static inline void *cec_get_drvdata(const struct cec_adapter *adap) +{ + return adap->priv; +} + +static inline bool cec_has_log_addr(const struct cec_adapter *adap, u8 log_addr) +{ + return adap->log_addrs.log_addr_mask & (1 << log_addr); +} + +static inline bool cec_is_sink(const struct cec_adapter *adap) +{ + return adap->phys_addr == 0; +} + +/** + * cec_is_registered() - is the CEC adapter registered? + * + * @adap: the CEC adapter, may be NULL. + * + * Return: true if the adapter is registered, false otherwise. + */ +static inline bool cec_is_registered(const struct cec_adapter *adap) +{ + return adap && adap->devnode.registered; +} + +#define cec_phys_addr_exp(pa) \ + ((pa) >> 12), ((pa) >> 8) & 0xf, ((pa) >> 4) & 0xf, (pa) & 0xf + +struct edid; +struct drm_connector; + +#if IS_REACHABLE(CONFIG_CEC_CORE) +struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops, + void *priv, const char *name, u32 caps, u8 available_las); +int cec_register_adapter(struct cec_adapter *adap, struct device *parent); +void cec_unregister_adapter(struct cec_adapter *adap); +void cec_delete_adapter(struct cec_adapter *adap); + +int cec_s_log_addrs(struct cec_adapter *adap, struct cec_log_addrs *log_addrs, + bool block); +void cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, + bool block); +void cec_s_phys_addr_from_edid(struct cec_adapter *adap, + const struct edid *edid); +void cec_s_conn_info(struct cec_adapter *adap, + const struct cec_connector_info *conn_info); +int cec_transmit_msg(struct cec_adapter *adap, struct cec_msg *msg, + bool block); + +/* Called by the adapter */ +void cec_transmit_done_ts(struct cec_adapter *adap, u8 status, + u8 arb_lost_cnt, u8 nack_cnt, u8 low_drive_cnt, + u8 error_cnt, ktime_t ts); + +static inline void cec_transmit_done(struct cec_adapter *adap, u8 status, + u8 arb_lost_cnt, u8 nack_cnt, + u8 low_drive_cnt, u8 error_cnt) +{ + cec_transmit_done_ts(adap, status, arb_lost_cnt, nack_cnt, + low_drive_cnt, error_cnt, ktime_get()); +} +/* + * Simplified version of cec_transmit_done for hardware that doesn't retry + * failed transmits. So this is always just one attempt in which case + * the status is sufficient. + */ +void cec_transmit_attempt_done_ts(struct cec_adapter *adap, + u8 status, ktime_t ts); + +static inline void cec_transmit_attempt_done(struct cec_adapter *adap, + u8 status) +{ + cec_transmit_attempt_done_ts(adap, status, ktime_get()); +} + +void cec_received_msg_ts(struct cec_adapter *adap, + struct cec_msg *msg, ktime_t ts); + +static inline void cec_received_msg(struct cec_adapter *adap, + struct cec_msg *msg) +{ + cec_received_msg_ts(adap, msg, ktime_get()); +} + +/** + * cec_queue_pin_cec_event() - queue a CEC pin event with a given timestamp. + * + * @adap: pointer to the cec adapter + * @is_high: when true the CEC pin is high, otherwise it is low + * @dropped_events: when true some events were dropped + * @ts: the timestamp for this event + * + */ +void cec_queue_pin_cec_event(struct cec_adapter *adap, bool is_high, + bool dropped_events, ktime_t ts); + +/** + * cec_queue_pin_hpd_event() - queue a pin event with a given timestamp. + * + * @adap: pointer to the cec adapter + * @is_high: when true the HPD pin is high, otherwise it is low + * @ts: the timestamp for this event + * + */ +void cec_queue_pin_hpd_event(struct cec_adapter *adap, bool is_high, ktime_t ts); + +/** + * cec_queue_pin_5v_event() - queue a pin event with a given timestamp. + * + * @adap: pointer to the cec adapter + * @is_high: when true the 5V pin is high, otherwise it is low + * @ts: the timestamp for this event + * + */ +void cec_queue_pin_5v_event(struct cec_adapter *adap, bool is_high, ktime_t ts); + +/** + * cec_get_edid_phys_addr() - find and return the physical address + * + * @edid: pointer to the EDID data + * @size: size in bytes of the EDID data + * @offset: If not %NULL then the location of the physical address + * bytes in the EDID will be returned here. This is set to 0 + * if there is no physical address found. + * + * Return: the physical address or CEC_PHYS_ADDR_INVALID if there is none. + */ +u16 cec_get_edid_phys_addr(const u8 *edid, unsigned int size, + unsigned int *offset); + +void cec_fill_conn_info_from_drm(struct cec_connector_info *conn_info, + const struct drm_connector *connector); + +#else + +static inline int cec_register_adapter(struct cec_adapter *adap, + struct device *parent) +{ + return 0; +} + +static inline void cec_unregister_adapter(struct cec_adapter *adap) +{ +} + +static inline void cec_delete_adapter(struct cec_adapter *adap) +{ +} + +static inline void cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, + bool block) +{ +} + +static inline void cec_s_phys_addr_from_edid(struct cec_adapter *adap, + const struct edid *edid) +{ +} + +static inline u16 cec_get_edid_phys_addr(const u8 *edid, unsigned int size, + unsigned int *offset) +{ + if (offset) + *offset = 0; + return CEC_PHYS_ADDR_INVALID; +} + +static inline void cec_s_conn_info(struct cec_adapter *adap, + const struct cec_connector_info *conn_info) +{ +} + +static inline void +cec_fill_conn_info_from_drm(struct cec_connector_info *conn_info, + const struct drm_connector *connector) +{ + memset(conn_info, 0, sizeof(*conn_info)); +} + +#endif + +/** + * cec_phys_addr_invalidate() - set the physical address to INVALID + * + * @adap: the CEC adapter + * + * This is a simple helper function to invalidate the physical + * address. + */ +static inline void cec_phys_addr_invalidate(struct cec_adapter *adap) +{ + cec_s_phys_addr(adap, CEC_PHYS_ADDR_INVALID, false); +} + +/** + * cec_get_edid_spa_location() - find location of the Source Physical Address + * + * @edid: the EDID + * @size: the size of the EDID + * + * This EDID is expected to be a CEA-861 compliant, which means that there are + * at least two blocks and one or more of the extensions blocks are CEA-861 + * blocks. + * + * The returned location is guaranteed to be <= size-2. + * + * This is an inline function since it is used by both CEC and V4L2. + * Ideally this would go in a module shared by both, but it is overkill to do + * that for just a single function. + */ +static inline unsigned int cec_get_edid_spa_location(const u8 *edid, + unsigned int size) +{ + unsigned int blocks = size / 128; + unsigned int block; + u8 d; + + /* Sanity check: at least 2 blocks and a multiple of the block size */ + if (blocks < 2 || size % 128) + return 0; + + /* + * If there are fewer extension blocks than the size, then update + * 'blocks'. It is allowed to have more extension blocks than the size, + * since some hardware can only read e.g. 256 bytes of the EDID, even + * though more blocks are present. The first CEA-861 extension block + * should normally be in block 1 anyway. + */ + if (edid[0x7e] + 1 < blocks) + blocks = edid[0x7e] + 1; + + for (block = 1; block < blocks; block++) { + unsigned int offset = block * 128; + + /* Skip any non-CEA-861 extension blocks */ + if (edid[offset] != 0x02 || edid[offset + 1] != 0x03) + continue; + + /* search Vendor Specific Data Block (tag 3) */ + d = edid[offset + 2] & 0x7f; + /* Check if there are Data Blocks */ + if (d <= 4) + continue; + if (d > 4) { + unsigned int i = offset + 4; + unsigned int end = offset + d; + + /* Note: 'end' is always < 'size' */ + do { + u8 tag = edid[i] >> 5; + u8 len = edid[i] & 0x1f; + + if (tag == 3 && len >= 5 && i + len <= end && + edid[i + 1] == 0x03 && + edid[i + 2] == 0x0c && + edid[i + 3] == 0x00) + return i + 4; + i += len + 1; + } while (i < end); + } + } + return 0; +} + +#endif /* _MEDIA_CEC_H */ diff --git a/6.12.0/include-overrides/media/demux.h b/6.12.0/include-overrides/media/demux.h new file mode 100644 index 0000000..bf00a5a --- /dev/null +++ b/6.12.0/include-overrides/media/demux.h @@ -0,0 +1,600 @@ +/* + * demux.h + * + * The Kernel Digital TV Demux kABI defines a driver-internal interface for + * registering low-level, hardware specific driver to a hardware independent + * demux layer. + * + * Copyright (c) 2002 Convergence GmbH + * + * based on code: + * Copyright (c) 2000 Nokia Research Center + * Tampere, FINLAND + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either version 2.1 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __DEMUX_H +#define __DEMUX_H + +#include +#include +#include +#include +#include + +/* + * Common definitions + */ + +/* + * DMX_MAX_FILTER_SIZE: Maximum length (in bytes) of a section/PES filter. + */ + +#ifndef DMX_MAX_FILTER_SIZE +#define DMX_MAX_FILTER_SIZE 18 +#endif + +/* + * DMX_MAX_SECFEED_SIZE: Maximum length (in bytes) of a private section feed + * filter. + */ + +#ifndef DMX_MAX_SECTION_SIZE +#define DMX_MAX_SECTION_SIZE 4096 +#endif +#ifndef DMX_MAX_SECFEED_SIZE +#define DMX_MAX_SECFEED_SIZE (DMX_MAX_SECTION_SIZE + 188) +#endif + +/* + * TS packet reception + */ + +/** + * enum ts_filter_type - filter type bitmap for dmx_ts_feed.set\(\) + * + * @TS_PACKET: Send TS packets (188 bytes) to callback (default). + * @TS_PAYLOAD_ONLY: In case TS_PACKET is set, only send the TS payload + * (<=184 bytes per packet) to callback + * @TS_DECODER: Send stream to built-in decoder (if present). + * @TS_DEMUX: In case TS_PACKET is set, send the TS to the demux + * device, not to the dvr device + */ +enum ts_filter_type { + TS_PACKET = 1, + TS_PAYLOAD_ONLY = 2, + TS_DECODER = 4, + TS_DEMUX = 8, +}; + +/** + * struct dmx_ts_feed - Structure that contains a TS feed filter + * + * @is_filtering: Set to non-zero when filtering in progress + * @parent: pointer to struct dmx_demux + * @priv: pointer to private data of the API client + * @set: sets the TS filter + * @start_filtering: starts TS filtering + * @stop_filtering: stops TS filtering + * + * A TS feed is typically mapped to a hardware PID filter on the demux chip. + * Using this API, the client can set the filtering properties to start/stop + * filtering TS packets on a particular TS feed. + */ +struct dmx_ts_feed { + int is_filtering; + struct dmx_demux *parent; + void *priv; + int (*set)(struct dmx_ts_feed *feed, + u16 pid, + int type, + enum dmx_ts_pes pes_type, + ktime_t timeout); + int (*start_filtering)(struct dmx_ts_feed *feed); + int (*stop_filtering)(struct dmx_ts_feed *feed); +}; + +/* + * Section reception + */ + +/** + * struct dmx_section_filter - Structure that describes a section filter + * + * @filter_value: Contains up to 16 bytes (128 bits) of the TS section header + * that will be matched by the section filter + * @filter_mask: Contains a 16 bytes (128 bits) filter mask with the bits + * specified by @filter_value that will be used on the filter + * match logic. + * @filter_mode: Contains a 16 bytes (128 bits) filter mode. + * @parent: Back-pointer to struct dmx_section_feed. + * @priv: Pointer to private data of the API client. + * + * + * The @filter_mask controls which bits of @filter_value are compared with + * the section headers/payload. On a binary value of 1 in filter_mask, the + * corresponding bits are compared. The filter only accepts sections that are + * equal to filter_value in all the tested bit positions. + */ +struct dmx_section_filter { + u8 filter_value[DMX_MAX_FILTER_SIZE]; + u8 filter_mask[DMX_MAX_FILTER_SIZE]; + u8 filter_mode[DMX_MAX_FILTER_SIZE]; + struct dmx_section_feed *parent; + + void *priv; +}; + +/** + * struct dmx_section_feed - Structure that contains a section feed filter + * + * @is_filtering: Set to non-zero when filtering in progress + * @parent: pointer to struct dmx_demux + * @priv: pointer to private data of the API client + * @check_crc: If non-zero, check the CRC values of filtered sections. + * @set: sets the section filter + * @allocate_filter: This function is used to allocate a section filter on + * the demux. It should only be called when no filtering + * is in progress on this section feed. If a filter cannot + * be allocated, the function fails with -ENOSPC. + * @release_filter: This function releases all the resources of a + * previously allocated section filter. The function + * should not be called while filtering is in progress + * on this section feed. After calling this function, + * the caller should not try to dereference the filter + * pointer. + * @start_filtering: starts section filtering + * @stop_filtering: stops section filtering + * + * A TS feed is typically mapped to a hardware PID filter on the demux chip. + * Using this API, the client can set the filtering properties to start/stop + * filtering TS packets on a particular TS feed. + */ +struct dmx_section_feed { + int is_filtering; + struct dmx_demux *parent; + void *priv; + + int check_crc; + + /* private: Used internally at dvb_demux.c */ + u32 crc_val; + + u8 *secbuf; + u8 secbuf_base[DMX_MAX_SECFEED_SIZE]; + u16 secbufp, seclen, tsfeedp; + + /* public: */ + int (*set)(struct dmx_section_feed *feed, + u16 pid, + int check_crc); + int (*allocate_filter)(struct dmx_section_feed *feed, + struct dmx_section_filter **filter); + int (*release_filter)(struct dmx_section_feed *feed, + struct dmx_section_filter *filter); + int (*start_filtering)(struct dmx_section_feed *feed); + int (*stop_filtering)(struct dmx_section_feed *feed); +}; + +/** + * typedef dmx_ts_cb - DVB demux TS filter callback function prototype + * + * @buffer1: Pointer to the start of the filtered TS packets. + * @buffer1_length: Length of the TS data in buffer1. + * @buffer2: Pointer to the tail of the filtered TS packets, or NULL. + * @buffer2_length: Length of the TS data in buffer2. + * @source: Indicates which TS feed is the source of the callback. + * @buffer_flags: Address where buffer flags are stored. Those are + * used to report discontinuity users via DVB + * memory mapped API, as defined by + * &enum dmx_buffer_flags. + * + * This function callback prototype, provided by the client of the demux API, + * is called from the demux code. The function is only called when filtering + * on a TS feed has been enabled using the start_filtering\(\) function at + * the &dmx_demux. + * Any TS packets that match the filter settings are copied to a circular + * buffer. The filtered TS packets are delivered to the client using this + * callback function. + * It is expected that the @buffer1 and @buffer2 callback parameters point to + * addresses within the circular buffer, but other implementations are also + * possible. Note that the called party should not try to free the memory + * the @buffer1 and @buffer2 parameters point to. + * + * When this function is called, the @buffer1 parameter typically points to + * the start of the first undelivered TS packet within a circular buffer. + * The @buffer2 buffer parameter is normally NULL, except when the received + * TS packets have crossed the last address of the circular buffer and + * "wrapped" to the beginning of the buffer. In the latter case the @buffer1 + * parameter would contain an address within the circular buffer, while the + * @buffer2 parameter would contain the first address of the circular buffer. + * The number of bytes delivered with this function (i.e. @buffer1_length + + * @buffer2_length) is usually equal to the value of callback_length parameter + * given in the set() function, with one exception: if a timeout occurs before + * receiving callback_length bytes of TS data, any undelivered packets are + * immediately delivered to the client by calling this function. The timeout + * duration is controlled by the set() function in the TS Feed API. + * + * If a TS packet is received with errors that could not be fixed by the + * TS-level forward error correction (FEC), the Transport_error_indicator + * flag of the TS packet header should be set. The TS packet should not be + * discarded, as the error can possibly be corrected by a higher layer + * protocol. If the called party is slow in processing the callback, it + * is possible that the circular buffer eventually fills up. If this happens, + * the demux driver should discard any TS packets received while the buffer + * is full and return -EOVERFLOW. + * + * The type of data returned to the callback can be selected by the + * &dmx_ts_feed.@set function. The type parameter decides if the raw + * TS packet (TS_PACKET) or just the payload (TS_PACKET|TS_PAYLOAD_ONLY) + * should be returned. If additionally the TS_DECODER bit is set the stream + * will also be sent to the hardware MPEG decoder. + * + * Return: + * + * - 0, on success; + * + * - -EOVERFLOW, on buffer overflow. + */ +typedef int (*dmx_ts_cb)(const u8 *buffer1, + size_t buffer1_length, + const u8 *buffer2, + size_t buffer2_length, + struct dmx_ts_feed *source, + u32 *buffer_flags); + +/** + * typedef dmx_section_cb - DVB demux TS filter callback function prototype + * + * @buffer1: Pointer to the start of the filtered section, e.g. + * within the circular buffer of the demux driver. + * @buffer1_len: Length of the filtered section data in @buffer1, + * including headers and CRC. + * @buffer2: Pointer to the tail of the filtered section data, + * or NULL. Useful to handle the wrapping of a + * circular buffer. + * @buffer2_len: Length of the filtered section data in @buffer2, + * including headers and CRC. + * @source: Indicates which section feed is the source of the + * callback. + * @buffer_flags: Address where buffer flags are stored. Those are + * used to report discontinuity users via DVB + * memory mapped API, as defined by + * &enum dmx_buffer_flags. + * + * This function callback prototype, provided by the client of the demux API, + * is called from the demux code. The function is only called when + * filtering of sections has been enabled using the function + * &dmx_ts_feed.@start_filtering. When the demux driver has received a + * complete section that matches at least one section filter, the client + * is notified via this callback function. Normally this function is called + * for each received section; however, it is also possible to deliver + * multiple sections with one callback, for example when the system load + * is high. If an error occurs while receiving a section, this + * function should be called with the corresponding error type set in the + * success field, whether or not there is data to deliver. The Section Feed + * implementation should maintain a circular buffer for received sections. + * However, this is not necessary if the Section Feed API is implemented as + * a client of the TS Feed API, because the TS Feed implementation then + * buffers the received data. The size of the circular buffer can be + * configured using the &dmx_ts_feed.@set function in the Section Feed API. + * If there is no room in the circular buffer when a new section is received, + * the section must be discarded. If this happens, the value of the success + * parameter should be DMX_OVERRUN_ERROR on the next callback. + */ +typedef int (*dmx_section_cb)(const u8 *buffer1, + size_t buffer1_len, + const u8 *buffer2, + size_t buffer2_len, + struct dmx_section_filter *source, + u32 *buffer_flags); + +/* + * DVB Front-End + */ + +/** + * enum dmx_frontend_source - Used to identify the type of frontend + * + * @DMX_MEMORY_FE: The source of the demux is memory. It means that + * the MPEG-TS to be filtered comes from userspace, + * via write() syscall. + * + * @DMX_FRONTEND_0: The source of the demux is a frontend connected + * to the demux. + */ +enum dmx_frontend_source { + DMX_MEMORY_FE, + DMX_FRONTEND_0, +}; + +/** + * struct dmx_frontend - Structure that lists the frontends associated with + * a demux + * + * @connectivity_list: List of front-ends that can be connected to a + * particular demux; + * @source: Type of the frontend. + * + * FIXME: this structure should likely be replaced soon by some + * media-controller based logic. + */ +struct dmx_frontend { + struct list_head connectivity_list; + enum dmx_frontend_source source; +}; + +/* + * MPEG-2 TS Demux + */ + +/** + * enum dmx_demux_caps - MPEG-2 TS Demux capabilities bitmap + * + * @DMX_TS_FILTERING: set if TS filtering is supported; + * @DMX_SECTION_FILTERING: set if section filtering is supported; + * @DMX_MEMORY_BASED_FILTERING: set if write() available. + * + * Those flags are OR'ed in the &dmx_demux.capabilities field + */ +enum dmx_demux_caps { + DMX_TS_FILTERING = 1, + DMX_SECTION_FILTERING = 4, + DMX_MEMORY_BASED_FILTERING = 8, +}; + +/* + * Demux resource type identifier. + */ + +/** + * DMX_FE_ENTRY - Casts elements in the list of registered + * front-ends from the generic type struct list_head + * to the type * struct dmx_frontend + * + * @list: list of struct dmx_frontend + */ +#define DMX_FE_ENTRY(list) \ + list_entry(list, struct dmx_frontend, connectivity_list) + +/** + * struct dmx_demux - Structure that contains the demux capabilities and + * callbacks. + * + * @capabilities: Bitfield of capability flags. + * + * @frontend: Front-end connected to the demux + * + * @priv: Pointer to private data of the API client + * + * @open: This function reserves the demux for use by the caller and, if + * necessary, initializes the demux. When the demux is no longer needed, + * the function @close should be called. It should be possible for + * multiple clients to access the demux at the same time. Thus, the + * function implementation should increment the demux usage count when + * @open is called and decrement it when @close is called. + * The @demux function parameter contains a pointer to the demux API and + * instance data. + * It returns: + * 0 on success; + * -EUSERS, if maximum usage count was reached; + * -EINVAL, on bad parameter. + * + * @close: This function reserves the demux for use by the caller and, if + * necessary, initializes the demux. When the demux is no longer needed, + * the function @close should be called. It should be possible for + * multiple clients to access the demux at the same time. Thus, the + * function implementation should increment the demux usage count when + * @open is called and decrement it when @close is called. + * The @demux function parameter contains a pointer to the demux API and + * instance data. + * It returns: + * 0 on success; + * -ENODEV, if demux was not in use (e. g. no users); + * -EINVAL, on bad parameter. + * + * @write: This function provides the demux driver with a memory buffer + * containing TS packets. Instead of receiving TS packets from the DVB + * front-end, the demux driver software will read packets from memory. + * Any clients of this demux with active TS, PES or Section filters will + * receive filtered data via the Demux callback API (see 0). The function + * returns when all the data in the buffer has been consumed by the demux. + * Demux hardware typically cannot read TS from memory. If this is the + * case, memory-based filtering has to be implemented entirely in software. + * The @demux function parameter contains a pointer to the demux API and + * instance data. + * The @buf function parameter contains a pointer to the TS data in + * kernel-space memory. + * The @count function parameter contains the length of the TS data. + * It returns: + * 0 on success; + * -ERESTARTSYS, if mutex lock was interrupted; + * -EINTR, if a signal handling is pending; + * -ENODEV, if demux was removed; + * -EINVAL, on bad parameter. + * + * @allocate_ts_feed: Allocates a new TS feed, which is used to filter the TS + * packets carrying a certain PID. The TS feed normally corresponds to a + * hardware PID filter on the demux chip. + * The @demux function parameter contains a pointer to the demux API and + * instance data. + * The @feed function parameter contains a pointer to the TS feed API and + * instance data. + * The @callback function parameter contains a pointer to the callback + * function for passing received TS packet. + * It returns: + * 0 on success; + * -ERESTARTSYS, if mutex lock was interrupted; + * -EBUSY, if no more TS feeds is available; + * -EINVAL, on bad parameter. + * + * @release_ts_feed: Releases the resources allocated with @allocate_ts_feed. + * Any filtering in progress on the TS feed should be stopped before + * calling this function. + * The @demux function parameter contains a pointer to the demux API and + * instance data. + * The @feed function parameter contains a pointer to the TS feed API and + * instance data. + * It returns: + * 0 on success; + * -EINVAL on bad parameter. + * + * @allocate_section_feed: Allocates a new section feed, i.e. a demux resource + * for filtering and receiving sections. On platforms with hardware + * support for section filtering, a section feed is directly mapped to + * the demux HW. On other platforms, TS packets are first PID filtered in + * hardware and a hardware section filter then emulated in software. The + * caller obtains an API pointer of type dmx_section_feed_t as an out + * parameter. Using this API the caller can set filtering parameters and + * start receiving sections. + * The @demux function parameter contains a pointer to the demux API and + * instance data. + * The @feed function parameter contains a pointer to the TS feed API and + * instance data. + * The @callback function parameter contains a pointer to the callback + * function for passing received TS packet. + * It returns: + * 0 on success; + * -EBUSY, if no more TS feeds is available; + * -EINVAL, on bad parameter. + * + * @release_section_feed: Releases the resources allocated with + * @allocate_section_feed, including allocated filters. Any filtering in + * progress on the section feed should be stopped before calling this + * function. + * The @demux function parameter contains a pointer to the demux API and + * instance data. + * The @feed function parameter contains a pointer to the TS feed API and + * instance data. + * It returns: + * 0 on success; + * -EINVAL, on bad parameter. + * + * @add_frontend: Registers a connectivity between a demux and a front-end, + * i.e., indicates that the demux can be connected via a call to + * @connect_frontend to use the given front-end as a TS source. The + * client of this function has to allocate dynamic or static memory for + * the frontend structure and initialize its fields before calling this + * function. This function is normally called during the driver + * initialization. The caller must not free the memory of the frontend + * struct before successfully calling @remove_frontend. + * The @demux function parameter contains a pointer to the demux API and + * instance data. + * The @frontend function parameter contains a pointer to the front-end + * instance data. + * It returns: + * 0 on success; + * -EINVAL, on bad parameter. + * + * @remove_frontend: Indicates that the given front-end, registered by a call + * to @add_frontend, can no longer be connected as a TS source by this + * demux. The function should be called when a front-end driver or a demux + * driver is removed from the system. If the front-end is in use, the + * function fails with the return value of -EBUSY. After successfully + * calling this function, the caller can free the memory of the frontend + * struct if it was dynamically allocated before the @add_frontend + * operation. + * The @demux function parameter contains a pointer to the demux API and + * instance data. + * The @frontend function parameter contains a pointer to the front-end + * instance data. + * It returns: + * 0 on success; + * -ENODEV, if the front-end was not found, + * -EINVAL, on bad parameter. + * + * @get_frontends: Provides the APIs of the front-ends that have been + * registered for this demux. Any of the front-ends obtained with this + * call can be used as a parameter for @connect_frontend. The include + * file demux.h contains the macro DMX_FE_ENTRY() for converting an + * element of the generic type struct &list_head * to the type + * struct &dmx_frontend *. The caller must not free the memory of any of + * the elements obtained via this function call. + * The @demux function parameter contains a pointer to the demux API and + * instance data. + * It returns a struct list_head pointer to the list of front-end + * interfaces, or NULL in the case of an empty list. + * + * @connect_frontend: Connects the TS output of the front-end to the input of + * the demux. A demux can only be connected to a front-end registered to + * the demux with the function @add_frontend. It may or may not be + * possible to connect multiple demuxes to the same front-end, depending + * on the capabilities of the HW platform. When not used, the front-end + * should be released by calling @disconnect_frontend. + * The @demux function parameter contains a pointer to the demux API and + * instance data. + * The @frontend function parameter contains a pointer to the front-end + * instance data. + * It returns: + * 0 on success; + * -EINVAL, on bad parameter. + * + * @disconnect_frontend: Disconnects the demux and a front-end previously + * connected by a @connect_frontend call. + * The @demux function parameter contains a pointer to the demux API and + * instance data. + * It returns: + * 0 on success; + * -EINVAL on bad parameter. + * + * @get_pes_pids: Get the PIDs for DMX_PES_AUDIO0, DMX_PES_VIDEO0, + * DMX_PES_TELETEXT0, DMX_PES_SUBTITLE0 and DMX_PES_PCR0. + * The @demux function parameter contains a pointer to the demux API and + * instance data. + * The @pids function parameter contains an array with five u16 elements + * where the PIDs will be stored. + * It returns: + * 0 on success; + * -EINVAL on bad parameter. + */ +struct dmx_demux { + enum dmx_demux_caps capabilities; + struct dmx_frontend *frontend; + void *priv; + int (*open)(struct dmx_demux *demux); + int (*close)(struct dmx_demux *demux); + int (*write)(struct dmx_demux *demux, const char __user *buf, + size_t count); + int (*allocate_ts_feed)(struct dmx_demux *demux, + struct dmx_ts_feed **feed, + dmx_ts_cb callback); + int (*release_ts_feed)(struct dmx_demux *demux, + struct dmx_ts_feed *feed); + int (*allocate_section_feed)(struct dmx_demux *demux, + struct dmx_section_feed **feed, + dmx_section_cb callback); + int (*release_section_feed)(struct dmx_demux *demux, + struct dmx_section_feed *feed); + int (*add_frontend)(struct dmx_demux *demux, + struct dmx_frontend *frontend); + int (*remove_frontend)(struct dmx_demux *demux, + struct dmx_frontend *frontend); + struct list_head *(*get_frontends)(struct dmx_demux *demux); + int (*connect_frontend)(struct dmx_demux *demux, + struct dmx_frontend *frontend); + int (*disconnect_frontend)(struct dmx_demux *demux); + + int (*get_pes_pids)(struct dmx_demux *demux, u16 *pids); + + /* private: */ + + /* + * Only used at av7110, to read some data from firmware. + * As this was never documented, we have no clue about what's + * there, and its usage on other drivers aren't encouraged. + */ + int (*get_stc)(struct dmx_demux *demux, unsigned int num, + u64 *stc, unsigned int *base); +}; + +#endif /* #ifndef __DEMUX_H */ diff --git a/6.12.0/include-overrides/media/dmxdev.h b/6.12.0/include-overrides/media/dmxdev.h new file mode 100644 index 0000000..63219a6 --- /dev/null +++ b/6.12.0/include-overrides/media/dmxdev.h @@ -0,0 +1,213 @@ +/* + * dmxdev.h + * + * Copyright (C) 2000 Ralph Metzler & Marcus Metzler + * for convergence integrated media GmbH + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either version 2.1 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _DMXDEV_H_ +#define _DMXDEV_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + +/** + * enum dmxdev_type - type of demux filter type. + * + * @DMXDEV_TYPE_NONE: no filter set. + * @DMXDEV_TYPE_SEC: section filter. + * @DMXDEV_TYPE_PES: Program Elementary Stream (PES) filter. + */ +enum dmxdev_type { + DMXDEV_TYPE_NONE, + DMXDEV_TYPE_SEC, + DMXDEV_TYPE_PES, +}; + +/** + * enum dmxdev_state - state machine for the dmxdev. + * + * @DMXDEV_STATE_FREE: indicates that the filter is freed. + * @DMXDEV_STATE_ALLOCATED: indicates that the filter was allocated + * to be used. + * @DMXDEV_STATE_SET: indicates that the filter parameters are set. + * @DMXDEV_STATE_GO: indicates that the filter is running. + * @DMXDEV_STATE_DONE: indicates that a packet was already filtered + * and the filter is now disabled. + * Set only if %DMX_ONESHOT. See + * &dmx_sct_filter_params. + * @DMXDEV_STATE_TIMEDOUT: Indicates a timeout condition. + */ +enum dmxdev_state { + DMXDEV_STATE_FREE, + DMXDEV_STATE_ALLOCATED, + DMXDEV_STATE_SET, + DMXDEV_STATE_GO, + DMXDEV_STATE_DONE, + DMXDEV_STATE_TIMEDOUT +}; + +/** + * struct dmxdev_feed - digital TV dmxdev feed + * + * @pid: Program ID to be filtered + * @ts: pointer to &struct dmx_ts_feed + * @next: &struct list_head pointing to the next feed. + */ + +struct dmxdev_feed { + u16 pid; + struct dmx_ts_feed *ts; + struct list_head next; +}; + +/** + * struct dmxdev_filter - digital TV dmxdev filter + * + * @filter: a union describing a dmxdev filter. + * Currently used only for section filters. + * @filter.sec: a &struct dmx_section_filter pointer. + * For section filter only. + * @feed: a union describing a dmxdev feed. + * Depending on the filter type, it can be either + * @feed.ts or @feed.sec. + * @feed.ts: a &struct list_head list. + * For TS and PES feeds. + * @feed.sec: a &struct dmx_section_feed pointer. + * For section feed only. + * @params: a union describing dmxdev filter parameters. + * Depending on the filter type, it can be either + * @params.sec or @params.pes. + * @params.sec: a &struct dmx_sct_filter_params embedded struct. + * For section filter only. + * @params.pes: a &struct dmx_pes_filter_params embedded struct. + * For PES filter only. + * @type: type of the dmxdev filter, as defined by &enum dmxdev_type. + * @state: state of the dmxdev filter, as defined by &enum dmxdev_state. + * @dev: pointer to &struct dmxdev. + * @buffer: an embedded &struct dvb_ringbuffer buffer. + * @vb2_ctx: control struct for VB2 handler + * @mutex: protects the access to &struct dmxdev_filter. + * @timer: &struct timer_list embedded timer, used to check for + * feed timeouts. + * Only for section filter. + * @todo: index for the @secheader. + * Only for section filter. + * @secheader: buffer cache to parse the section header. + * Only for section filter. + */ +struct dmxdev_filter { + union { + struct dmx_section_filter *sec; + } filter; + + union { + /* list of TS and PES feeds (struct dmxdev_feed) */ + struct list_head ts; + struct dmx_section_feed *sec; + } feed; + + union { + struct dmx_sct_filter_params sec; + struct dmx_pes_filter_params pes; + } params; + + enum dmxdev_type type; + enum dmxdev_state state; + struct dmxdev *dev; + struct dvb_ringbuffer buffer; + struct dvb_vb2_ctx vb2_ctx; + + struct mutex mutex; + + /* only for sections */ + struct timer_list timer; + int todo; + u8 secheader[3]; +}; + +/** + * struct dmxdev - Describes a digital TV demux device. + * + * @dvbdev: pointer to &struct dvb_device associated with + * the demux device node. + * @dvr_dvbdev: pointer to &struct dvb_device associated with + * the dvr device node. + * @filter: pointer to &struct dmxdev_filter. + * @demux: pointer to &struct dmx_demux. + * @filternum: number of filters. + * @capabilities: demux capabilities as defined by &enum dmx_demux_caps. + * @may_do_mmap: flag used to indicate if the device may do mmap. + * @exit: flag to indicate that the demux is being released. + * @dvr_orig_fe: pointer to &struct dmx_frontend. + * @dvr_buffer: embedded &struct dvb_ringbuffer for DVB output. + * @dvr_vb2_ctx: control struct for VB2 handler + * @mutex: protects the usage of this structure. + * @lock: protects access to &dmxdev->filter->data. + */ +struct dmxdev { + struct dvb_device *dvbdev; + struct dvb_device *dvr_dvbdev; + + struct dmxdev_filter *filter; + struct dmx_demux *demux; + + int filternum; + int capabilities; + + unsigned int may_do_mmap:1; + unsigned int exit:1; +#define DMXDEV_CAP_DUPLEX 1 + struct dmx_frontend *dvr_orig_fe; + + struct dvb_ringbuffer dvr_buffer; +#define DVR_BUFFER_SIZE (10*188*1024) + + struct dvb_vb2_ctx dvr_vb2_ctx; + + struct mutex mutex; + spinlock_t lock; +}; + +/** + * dvb_dmxdev_init - initializes a digital TV demux and registers both demux + * and DVR devices. + * + * @dmxdev: pointer to &struct dmxdev. + * @adap: pointer to &struct dvb_adapter. + */ +int dvb_dmxdev_init(struct dmxdev *dmxdev, struct dvb_adapter *adap); + +/** + * dvb_dmxdev_release - releases a digital TV demux and unregisters it. + * + * @dmxdev: pointer to &struct dmxdev. + */ +void dvb_dmxdev_release(struct dmxdev *dmxdev); + +#endif /* _DMXDEV_H_ */ diff --git a/6.12.0/include-overrides/media/dvb-usb-ids.h b/6.12.0/include-overrides/media/dvb-usb-ids.h new file mode 100644 index 0000000..1b7d10f --- /dev/null +++ b/6.12.0/include-overrides/media/dvb-usb-ids.h @@ -0,0 +1,471 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* dvb-usb-ids.h is part of the DVB USB library. + * + * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de) see + * dvb-usb-init.c for copyright information. + * + * a header file containing define's for the USB device supported by the + * various drivers. + */ +#ifndef _DVB_USB_IDS_H_ +#define _DVB_USB_IDS_H_ + +#include + +#define DVB_USB_DEV(pid, vid) \ + [vid] = { USB_DEVICE(USB_VID_ ## pid, USB_PID_ ## vid) } + +#define DVB_USB_DEV_VER(pid, vid, lo, hi) \ + [vid] = { USB_DEVICE_VER(USB_VID_ ## pid, USB_PID_ ## vid, lo, hi) } + +/* Vendor IDs */ + +#define USB_VID_774 0x7a69 +#define USB_VID_ADSTECH 0x06e1 +#define USB_VID_AFATECH 0x15a4 +#define USB_VID_ALCOR_MICRO 0x058f +#define USB_VID_ALINK 0x05e3 +#define USB_VID_AME 0x06be +#define USB_VID_AMT 0x1c73 +#define USB_VID_ANCHOR 0x0547 +#define USB_VID_ANSONIC 0x10b9 +#define USB_VID_ANUBIS_ELECTRONIC 0x10fd +#define USB_VID_ASUS 0x0b05 +#define USB_VID_AVERMEDIA 0x07ca +#define USB_VID_AZUREWAVE 0x13d3 +#define USB_VID_COMPRO 0x185b +#define USB_VID_COMPRO_UNK 0x145f +#define USB_VID_CONEXANT 0x0572 +#define USB_VID_CYPRESS 0x04b4 +#define USB_VID_DEXATEK 0x1d19 +#define USB_VID_DIBCOM 0x10b8 +#define USB_VID_DPOSH 0x1498 +#define USB_VID_DVICO 0x0fe9 +#define USB_VID_E3C 0x18b4 +#define USB_VID_ELGATO 0x0fd9 +#define USB_VID_EMPIA 0xeb1a +#define USB_VID_EVOLUTEPC 0x1e59 +#define USB_VID_GENPIX 0x09c0 +#define USB_VID_GIGABYTE 0x1044 +#define USB_VID_GOTVIEW 0x1fe1 +#define USB_VID_GRANDTEC 0x5032 +#define USB_VID_GTEK 0x1f4d +#define USB_VID_HAMA 0x147f +#define USB_VID_HANFTEK 0x15f4 +#define USB_VID_HAUPPAUGE 0x2040 +#define USB_VID_HUMAX_COEX 0x10b9 +#define USB_VID_HYPER_PALTEK 0x1025 +#define USB_VID_INTEL 0x8086 +#define USB_VID_ITETECH 0x048d +#define USB_VID_KWORLD 0xeb2a +#define USB_VID_KWORLD_2 0x1b80 +#define USB_VID_KYE 0x0458 +#define USB_VID_LEADTEK 0x0413 +#define USB_VID_LITEON 0x04ca +#define USB_VID_MEDION 0x1660 +#define USB_VID_MICROSOFT 0x045e +#define USB_VID_MIGLIA 0x18f3 +#define USB_VID_MSI 0x0db0 +#define USB_VID_MSI_2 0x1462 +#define USB_VID_OPERA1 0x695c +#define USB_VID_PCTV 0x2013 +#define USB_VID_PINNACLE 0x2304 +#define USB_VID_PIXELVIEW 0x1554 +#define USB_VID_PROF_1 0x3011 +#define USB_VID_PROF_2 0x3034 +#define USB_VID_REALTEK 0x0bda +#define USB_VID_SONY 0x1415 +#define USB_VID_TECHNISAT 0x14f7 +#define USB_VID_TECHNOTREND 0x0b48 +#define USB_VID_TELESTAR 0x10b9 +#define USB_VID_TERRATEC 0x0ccd +#define USB_VID_TERRATEC_2 0x153b +#define USB_VID_TEVII 0x9022 +#define USB_VID_TWINHAN 0x1822 +#define USB_VID_ULTIMA_ELECTRONIC 0x05d8 +#define USB_VID_UNIWILL 0x1584 +#define USB_VID_VISIONPLUS 0x13d3 +#define USB_VID_WIDEVIEW 0x14aa +#define USB_VID_XTENSIONS 0x1ae7 +#define USB_VID_YUAN 0x1164 +#define USB_VID_ZYDAS 0x0ace + +/* Product IDs */ + +#define USB_PID_ADSTECH_USB2_COLD 0xa333 +#define USB_PID_ADSTECH_USB2_WARM 0xa334 +#define USB_PID_AFATECH_AF9005 0x9020 +#define USB_PID_AFATECH_AF9015_9015 0x9015 +#define USB_PID_AFATECH_AF9015_9016 0x9016 +#define USB_PID_AFATECH_AF9035_1000 0x1000 +#define USB_PID_AFATECH_AF9035_1001 0x1001 +#define USB_PID_AFATECH_AF9035_1002 0x1002 +#define USB_PID_AFATECH_AF9035_1003 0x1003 +#define USB_PID_AFATECH_AF9035_9035 0x9035 +#define USB_PID_ALINK_DTU 0xf170 +#define USB_PID_AME_DTV5100 0xa232 +#define USB_PID_ANCHOR_NEBULA_DIGITV 0x0201 +#define USB_PID_ANSONIC_DVBT_USB 0x6000 +#define USB_PID_ANUBIS_LIFEVIEW_TV_WALKER_TWIN_COLD 0x0514 +#define USB_PID_ANUBIS_LIFEVIEW_TV_WALKER_TWIN_WARM 0x0513 +#define USB_PID_ANUBIS_MSI_DIGI_VOX_MINI_II 0x1513 +#define USB_PID_ANYSEE 0x861f +#define USB_PID_ASUS_U3000 0x171f +#define USB_PID_ASUS_U3000H 0x1736 +#define USB_PID_ASUS_U3100 0x173f +#define USB_PID_ASUS_U3100MINI_PLUS 0x1779 +#define USB_PID_AVERMEDIA_1867 0x1867 +#define USB_PID_AVERMEDIA_A309 0xa309 +#define USB_PID_AVERMEDIA_A310 0xa310 +#define USB_PID_AVERMEDIA_A805 0xa805 +#define USB_PID_AVERMEDIA_A815M 0x815a +#define USB_PID_AVERMEDIA_A835 0xa835 +#define USB_PID_AVERMEDIA_A835B_1835 0x1835 +#define USB_PID_AVERMEDIA_A835B_2835 0x2835 +#define USB_PID_AVERMEDIA_A835B_3835 0x3835 +#define USB_PID_AVERMEDIA_A835B_4835 0x4835 +#define USB_PID_AVERMEDIA_A850 0x850a +#define USB_PID_AVERMEDIA_A850T 0x850b +#define USB_PID_AVERMEDIA_A867 0xa867 +#define USB_PID_AVERMEDIA_B835 0xb835 +#define USB_PID_AVERMEDIA_DVBT_USB2_COLD 0xa800 +#define USB_PID_AVERMEDIA_DVBT_USB2_WARM 0xa801 +#define USB_PID_AVERMEDIA_EXPRESS 0xb568 +#define USB_PID_AVERMEDIA_H335 0x0335 +#define USB_PID_AVERMEDIA_HYBRID_ULTRA_USB_M039R 0x0039 +#define USB_PID_AVERMEDIA_HYBRID_ULTRA_USB_M039R_ATSC 0x1039 +#define USB_PID_AVERMEDIA_HYBRID_ULTRA_USB_M039R_DVBT 0x2039 +#define USB_PID_AVERMEDIA_MCE_USB_M038 0x1228 +#define USB_PID_AVERMEDIA_TD110 0xa110 +#define USB_PID_AVERMEDIA_TD310 0x1871 +#define USB_PID_AVERMEDIA_TWINSTAR 0x0825 +#define USB_PID_AVERMEDIA_VOLAR 0xa807 +#define USB_PID_AVERMEDIA_VOLAR_2 0xb808 +#define USB_PID_AVERMEDIA_VOLAR_A868R 0xa868 +#define USB_PID_AVERMEDIA_VOLAR_X 0xa815 +#define USB_PID_AVERMEDIA_VOLAR_X_2 0x8150 +#define USB_PID_AZUREWAVE_6007 0x0ccd +#define USB_PID_AZUREWAVE_AD_TU700 0x3237 +#define USB_PID_AZUREWAVE_AZ6027 0x3275 +#define USB_PID_AZUREWAVE_TWINHAN_VP7049 0x3219 +#define USB_PID_COMPRO_DVBU2000_COLD 0xd000 +#define USB_PID_COMPRO_DVBU2000_UNK_COLD 0x010c +#define USB_PID_COMPRO_DVBU2000_UNK_WARM 0x010d +#define USB_PID_COMPRO_DVBU2000_WARM 0xd001 +#define USB_PID_COMPRO_VIDEOMATE_U500 0x1e78 +#define USB_PID_COMPRO_VIDEOMATE_U500_PC 0x1e80 +#define USB_PID_CONCEPTRONIC_CTVDIGRCU 0xe397 +#define USB_PID_CONEXANT_D680_DMB 0x86d6 +#define USB_PID_CPYTO_REDI_PC50A 0xa803 +#define USB_PID_CTVDIGDUAL_V2 0xe410 +#define USB_PID_CYPRESS_DW2101 0x2101 +#define USB_PID_CYPRESS_DW2102 0x2102 +#define USB_PID_CYPRESS_DW2104 0x2104 +#define USB_PID_CYPRESS_DW3101 0x3101 +#define USB_PID_CYPRESS_OPERA1_COLD 0x2830 +#define USB_PID_DELOCK_USB2_DVBT 0xb803 +#define USB_PID_DIBCOM_ANCHOR_2135_COLD 0x2131 +#define USB_PID_DIBCOM_HOOK_DEFAULT 0x0064 +#define USB_PID_DIBCOM_HOOK_DEFAULT_REENUM 0x0065 +#define USB_PID_DIBCOM_MOD3000_COLD 0x0bb8 +#define USB_PID_DIBCOM_MOD3000_WARM 0x0bb9 +#define USB_PID_DIBCOM_MOD3001_COLD 0x0bc6 +#define USB_PID_DIBCOM_MOD3001_WARM 0x0bc7 +#define USB_PID_DIBCOM_NIM7090 0x1bb2 +#define USB_PID_DIBCOM_NIM8096MD 0x1fa8 +#define USB_PID_DIBCOM_NIM9090M 0x2383 +#define USB_PID_DIBCOM_NIM9090MD 0x2384 +#define USB_PID_DIBCOM_STK7070P 0x1ebc +#define USB_PID_DIBCOM_STK7070PD 0x1ebe +#define USB_PID_DIBCOM_STK7700D 0x1ef0 +#define USB_PID_DIBCOM_STK7700P 0x1e14 +#define USB_PID_DIBCOM_STK7700P_PC 0x1e78 +#define USB_PID_DIBCOM_STK7700_U7000 0x7001 +#define USB_PID_DIBCOM_STK7770P 0x1e80 +#define USB_PID_DIBCOM_STK807XP 0x1f90 +#define USB_PID_DIBCOM_STK807XPVR 0x1f98 +#define USB_PID_DIBCOM_STK8096GP 0x1fa0 +#define USB_PID_DIBCOM_STK8096PVR 0x1faa +#define USB_PID_DIBCOM_TFE7090PVR 0x1bb4 +#define USB_PID_DIBCOM_TFE7790P 0x1e6e +#define USB_PID_DIBCOM_TFE8096P 0x1f9C +#define USB_PID_DIGITALNOW_BLUEBIRD_DUAL_1_COLD 0xdb54 +#define USB_PID_DIGITALNOW_BLUEBIRD_DUAL_1_WARM 0xdb55 +#define USB_PID_DPOSH_M9206_COLD 0x9206 +#define USB_PID_DPOSH_M9206_WARM 0xa090 +#define USB_PID_DVICO_BLUEBIRD_DUAL_1_COLD 0xdb50 +#define USB_PID_DVICO_BLUEBIRD_DUAL_1_WARM 0xdb51 +#define USB_PID_DVICO_BLUEBIRD_DUAL_2_COLD 0xdb58 +#define USB_PID_DVICO_BLUEBIRD_DUAL_2_WARM 0xdb59 +#define USB_PID_DVICO_BLUEBIRD_DUAL_4 0xdb78 +#define USB_PID_DVICO_BLUEBIRD_DUAL_4_REV_2 0xdb98 +#define USB_PID_DVICO_BLUEBIRD_DVB_T_NANO_2 0xdb70 +#define USB_PID_DVICO_BLUEBIRD_DVB_T_NANO_2_NFW_WARM 0xdb71 +#define USB_PID_DVICO_BLUEBIRD_LG064F_COLD 0xd500 +#define USB_PID_DVICO_BLUEBIRD_LG064F_WARM 0xd501 +#define USB_PID_DVICO_BLUEBIRD_LGDT 0xd820 +#define USB_PID_DVICO_BLUEBIRD_LGZ201_COLD 0xdb00 +#define USB_PID_DVICO_BLUEBIRD_LGZ201_WARM 0xdb01 +#define USB_PID_DVICO_BLUEBIRD_TH7579_COLD 0xdb10 +#define USB_PID_DVICO_BLUEBIRD_TH7579_WARM 0xdb11 +#define USB_PID_E3C_EC168 0x1689 +#define USB_PID_E3C_EC168_2 0xfffa +#define USB_PID_E3C_EC168_3 0xfffb +#define USB_PID_E3C_EC168_4 0x1001 +#define USB_PID_E3C_EC168_5 0x1002 +#define USB_PID_ELGATO_EYETV_DIVERSITY 0x0011 +#define USB_PID_ELGATO_EYETV_DTT 0x0021 +#define USB_PID_ELGATO_EYETV_DTT_2 0x003f +#define USB_PID_ELGATO_EYETV_DTT_Dlx 0x0020 +#define USB_PID_ELGATO_EYETV_SAT 0x002a +#define USB_PID_ELGATO_EYETV_SAT_V2 0x0025 +#define USB_PID_ELGATO_EYETV_SAT_V3 0x0036 +#define USB_PID_EMPIA_DIGIVOX_MINI_SL_COLD 0xe360 +#define USB_PID_EMPIA_DIGIVOX_MINI_SL_WARM 0xe361 +#define USB_PID_EMPIA_VSTREAM_COLD 0x17de +#define USB_PID_EMPIA_VSTREAM_WARM 0x17df +#define USB_PID_EVOLUTEPC_TVWAY_PLUS 0x0002 +#define USB_PID_EVOLVEO_XTRATV_STICK 0xa115 +#define USB_PID_FREECOM_DVBT 0x0160 +#define USB_PID_FREECOM_DVBT_2 0x0161 +#define USB_PID_FRIIO_WHITE 0x0001 +#define USB_PID_GENIATECH_SU3000 0x3000 +#define USB_PID_GENIATECH_T220 0xd220 +#define USB_PID_GENIATECH_X3M_SPC1400HD 0x3100 +#define USB_PID_GENIUS_TVGO_DVB_T03 0x4012 +#define USB_PID_GENPIX_8PSK_REV_1_COLD 0x0200 +#define USB_PID_GENPIX_8PSK_REV_1_WARM 0x0201 +#define USB_PID_GENPIX_8PSK_REV_2 0x0202 +#define USB_PID_GENPIX_SKYWALKER_1 0x0203 +#define USB_PID_GENPIX_SKYWALKER_2 0x0206 +#define USB_PID_GENPIX_SKYWALKER_CW3K 0x0204 +#define USB_PID_GIGABYTE_U7000 0x7001 +#define USB_PID_GIGABYTE_U8000 0x7002 +#define USB_PID_GOTVIEW_SAT_HD 0x5456 +#define USB_PID_GRANDTEC_DVBT_USB2_COLD 0x0bc6 +#define USB_PID_GRANDTEC_DVBT_USB2_WARM 0x0bc7 +#define USB_PID_GRANDTEC_DVBT_USB_COLD 0x0fa0 +#define USB_PID_GRANDTEC_DVBT_USB_WARM 0x0fa1 +#define USB_PID_GRANDTEC_MOD3000_COLD 0x0bb8 +#define USB_PID_GRANDTEC_MOD3000_WARM 0x0bb9 +#define USB_PID_HAMA_DVBT_HYBRID 0x2758 +#define USB_PID_HANFTEK_UMT_010_COLD 0x0001 +#define USB_PID_HANFTEK_UMT_010_WARM 0x0015 +#define USB_PID_HAUPPAUGE_MAX_S2 0xd900 +#define USB_PID_HAUPPAUGE_MYTV_T 0x7080 +#define USB_PID_HAUPPAUGE_NOVA_TD_STICK 0x9580 +#define USB_PID_HAUPPAUGE_NOVA_TD_STICK_52009 0x5200 +#define USB_PID_HAUPPAUGE_NOVA_T_500 0x9941 +#define USB_PID_HAUPPAUGE_NOVA_T_500_2 0x9950 +#define USB_PID_HAUPPAUGE_NOVA_T_500_3 0x8400 +#define USB_PID_HAUPPAUGE_NOVA_T_STICK 0x7050 +#define USB_PID_HAUPPAUGE_NOVA_T_STICK_2 0x7060 +#define USB_PID_HAUPPAUGE_NOVA_T_STICK_3 0x7070 +#define USB_PID_HAUPPAUGE_TIGER_ATSC 0xb200 +#define USB_PID_HAUPPAUGE_TIGER_ATSC_B210 0xb210 +#define USB_PID_HAUPPAUGE_WINTV_NOVA_T_USB2_COLD 0x9300 +#define USB_PID_HAUPPAUGE_WINTV_NOVA_T_USB2_WARM 0x9301 +#define USB_PID_HUMAX_DVB_T_STICK_HIGH_SPEED_COLD 0x5000 +#define USB_PID_HUMAX_DVB_T_STICK_HIGH_SPEED_WARM 0x5001 +#define USB_PID_INTEL_CE9500 0x9500 +#define USB_PID_ITETECH_IT9135 0x9135 +#define USB_PID_ITETECH_IT9135_9005 0x9005 +#define USB_PID_ITETECH_IT9135_9006 0x9006 +#define USB_PID_ITETECH_IT9303 0x9306 +#define USB_PID_KWORLD_395U 0xe396 +#define USB_PID_KWORLD_395U_2 0xe39b +#define USB_PID_KWORLD_395U_3 0xe395 +#define USB_PID_KWORLD_395U_4 0xe39a +#define USB_PID_KWORLD_399U 0xe399 +#define USB_PID_KWORLD_399U_2 0xe400 +#define USB_PID_KWORLD_MC810 0xc810 +#define USB_PID_KWORLD_PC160_2T 0xc160 +#define USB_PID_KWORLD_PC160_T 0xc161 +#define USB_PID_KWORLD_UB383_T 0xe383 +#define USB_PID_KWORLD_UB499_2T_T09 0xe409 +#define USB_PID_KWORLD_VSTREAM_COLD 0x17de +#define USB_PID_KYE_DVB_T_COLD 0x701e +#define USB_PID_KYE_DVB_T_WARM 0x701f +#define USB_PID_LEADTEK_WINFAST_DTV_DONGLE_COLD 0x6025 +#define USB_PID_LEADTEK_WINFAST_DTV_DONGLE_H 0x60f6 +#define USB_PID_LEADTEK_WINFAST_DTV_DONGLE_STK7700P 0x6f00 +#define USB_PID_LEADTEK_WINFAST_DTV_DONGLE_STK7700P_2 0x6f01 +#define USB_PID_LEADTEK_WINFAST_DTV_DONGLE_WARM 0x6026 +#define USB_PID_LITEON_DVB_T_COLD 0xf000 +#define USB_PID_LITEON_DVB_T_WARM 0xf001 +#define USB_PID_MEDION_CREATIX_CTX1921 0x1921 +#define USB_PID_MEDION_MD95700 0x0932 +#define USB_PID_MICROSOFT_XBOX_ONE_TUNER 0x02d5 +#define USB_PID_MIGLIA_WT220U_ZAP250_COLD 0x0220 +#define USB_PID_MSI_DIGIVOX_DUO 0x8801 +#define USB_PID_MSI_DIGI_VOX_MINI_III 0x8807 +#define USB_PID_MSI_MEGASKY580 0x5580 +#define USB_PID_MSI_MEGASKY580_55801 0x5581 +#define USB_PID_MYGICA_D689 0xd811 +#define USB_PID_MYGICA_T230 0xc688 +#define USB_PID_MYGICA_T230A 0x689a +#define USB_PID_MYGICA_T230C 0xc689 +#define USB_PID_MYGICA_T230C2 0xc68a +#define USB_PID_MYGICA_T230C2_LITE 0xc69a +#define USB_PID_MYGICA_T230C_LITE 0xc699 +#define USB_PID_NOXON_DAB_STICK 0x00b3 +#define USB_PID_NOXON_DAB_STICK_REV2 0x00e0 +#define USB_PID_NOXON_DAB_STICK_REV3 0x00b4 +#define USB_PID_OPERA1_WARM 0x3829 +#define USB_PID_PCTV_2002E 0x025c +#define USB_PID_PCTV_2002E_SE 0x025d +#define USB_PID_PCTV_200E 0x020e +#define USB_PID_PCTV_78E 0x025a +#define USB_PID_PCTV_79E 0x0262 +#define USB_PID_PCTV_DIBCOM_STK8096PVR 0x1faa +#define USB_PID_PCTV_PINNACLE_PCTV282E 0x0248 +#define USB_PID_PCTV_PINNACLE_PCTV73ESE 0x0245 +#define USB_PID_PINNACLE_EXPRESSCARD_320CX 0x022e +#define USB_PID_PINNACLE_PCTV2000E 0x022c +#define USB_PID_PINNACLE_PCTV282E 0x0248 +#define USB_PID_PINNACLE_PCTV340E 0x023d +#define USB_PID_PINNACLE_PCTV340E_SE 0x023e +#define USB_PID_PINNACLE_PCTV71E 0x022b +#define USB_PID_PINNACLE_PCTV72E 0x0236 +#define USB_PID_PINNACLE_PCTV73A 0x0243 +#define USB_PID_PINNACLE_PCTV73E 0x0237 +#define USB_PID_PINNACLE_PCTV73ESE 0x0245 +#define USB_PID_PINNACLE_PCTV74E 0x0246 +#define USB_PID_PINNACLE_PCTV801E 0x023a +#define USB_PID_PINNACLE_PCTV801E_SE 0x023b +#define USB_PID_PINNACLE_PCTV_400E 0x020f +#define USB_PID_PINNACLE_PCTV_450E 0x0222 +#define USB_PID_PINNACLE_PCTV_452E 0x021f +#define USB_PID_PINNACLE_PCTV_DUAL_DIVERSITY_DVB_T 0x0229 +#define USB_PID_PINNACLE_PCTV_DVB_T_FLASH 0x0228 +#define USB_PID_PIXELVIEW_SBTVD 0x5010 +#define USB_PID_PROF_1100 0xb012 +#define USB_PID_PROF_7500 0x7500 +#define USB_PID_PROLECTRIX_DV107669 0xd803 +#define USB_PID_REALTEK_RTL2831U 0x2831 +#define USB_PID_REALTEK_RTL2832U 0x2832 +#define USB_PID_SIGMATEK_DVB_110 0x6610 +#define USB_PID_SONY_PLAYTV 0x0003 +#define USB_PID_SVEON_STV20 0xe39d +#define USB_PID_SVEON_STV20_RTL2832U 0xd39d +#define USB_PID_SVEON_STV21 0xd3b0 +#define USB_PID_SVEON_STV22 0xe401 +#define USB_PID_SVEON_STV22_IT9137 0xe411 +#define USB_PID_SVEON_STV27 0xd3af +#define USB_PID_TECHNISAT_AIRSTAR_TELESTICK_2 0x0004 +#define USB_PID_TECHNISAT_USB2_CABLESTAR_HDCI 0x0003 +#define USB_PID_TECHNISAT_USB2_DVB_S2 0x0500 +#define USB_PID_TECHNISAT_USB2_HDCI_V1 0x0001 +#define USB_PID_TECHNISAT_USB2_HDCI_V2 0x0002 +#define USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI 0x3012 +#define USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI_2 0x3015 +#define USB_PID_TECHNOTREND_CONNECT_CT3650 0x300d +#define USB_PID_TECHNOTREND_CONNECT_S2400 0x3006 +#define USB_PID_TECHNOTREND_CONNECT_S2400_8KEEPROM 0x3009 +#define USB_PID_TECHNOTREND_CONNECT_S2_3600 0x3007 +#define USB_PID_TECHNOTREND_CONNECT_S2_3650_CI 0x300a +#define USB_PID_TECHNOTREND_CONNECT_S2_4600 0x3011 +#define USB_PID_TECHNOTREND_CONNECT_S2_4650_CI 0x3017 +#define USB_PID_TECHNOTREND_TVSTICK_CT2_4400 0x3014 +#define USB_PID_TELESTAR_STARSTICK_2 0x8000 +#define USB_PID_TERRATEC_CINERGY_DT_XS_DIVERSITY 0x005a +#define USB_PID_TERRATEC_CINERGY_DT_XS_DIVERSITY_2 0x0081 +#define USB_PID_TERRATEC_CINERGY_HT_EXPRESS 0x0060 +#define USB_PID_TERRATEC_CINERGY_HT_USB_XE 0x0058 +#define USB_PID_TERRATEC_CINERGY_S 0x0064 +#define USB_PID_TERRATEC_CINERGY_S2_1 0x1181 +#define USB_PID_TERRATEC_CINERGY_S2_2 0x1182 +#define USB_PID_TERRATEC_CINERGY_S2_BOX 0x0105 +#define USB_PID_TERRATEC_CINERGY_S2_R1 0x00a8 +#define USB_PID_TERRATEC_CINERGY_S2_R2 0x00b0 +#define USB_PID_TERRATEC_CINERGY_S2_R3 0x0102 +#define USB_PID_TERRATEC_CINERGY_S2_R4 0x0105 +#define USB_PID_TERRATEC_CINERGY_T2 0x0038 +#define USB_PID_TERRATEC_CINERGY_TC2_STICK 0x10b2 +#define USB_PID_TERRATEC_CINERGY_T_EXPRESS 0x0062 +#define USB_PID_TERRATEC_CINERGY_T_STICK 0x0093 +#define USB_PID_TERRATEC_CINERGY_T_STICK_BLACK_REV1 0x00a9 +#define USB_PID_TERRATEC_CINERGY_T_STICK_DUAL_RC 0x0099 +#define USB_PID_TERRATEC_CINERGY_T_STICK_RC 0x0097 +#define USB_PID_TERRATEC_CINERGY_T_USB_XE 0x0055 +#define USB_PID_TERRATEC_CINERGY_T_USB_XE_REV2 0x0069 +#define USB_PID_TERRATEC_CINERGY_T_XXS 0x0078 +#define USB_PID_TERRATEC_CINERGY_T_XXS_2 0x00ab +#define USB_PID_TERRATEC_DVBS2CI_V1 0x10a4 +#define USB_PID_TERRATEC_DVBS2CI_V2 0x10ac +#define USB_PID_TERRATEC_H7 0x10b4 +#define USB_PID_TERRATEC_H7_2 0x10a3 +#define USB_PID_TERRATEC_H7_3 0x10a5 +#define USB_PID_TERRATEC_T1 0x10ae +#define USB_PID_TERRATEC_T3 0x10a0 +#define USB_PID_TERRATEC_T5 0x10a1 +#define USB_PID_TEVII_S421 0xd421 +#define USB_PID_TEVII_S480_1 0xd481 +#define USB_PID_TEVII_S480_2 0xd482 +#define USB_PID_TEVII_S482_1 0xd483 +#define USB_PID_TEVII_S482_2 0xd484 +#define USB_PID_TEVII_S630 0xd630 +#define USB_PID_TEVII_S632 0xd632 +#define USB_PID_TEVII_S650 0xd650 +#define USB_PID_TEVII_S660 0xd660 +#define USB_PID_TEVII_S662 0xd662 +#define USB_PID_TINYTWIN 0x3226 +#define USB_PID_TINYTWIN_2 0xe402 +#define USB_PID_TINYTWIN_3 0x9016 +#define USB_PID_TREKSTOR_DVBT 0x901b +#define USB_PID_TREKSTOR_TERRES_2_0 0xC803 +#define USB_PID_TURBOX_DTT_2000 0xd3a4 +#define USB_PID_TWINHAN_VP7021_WARM 0x3208 +#define USB_PID_TWINHAN_VP7041_COLD 0x3201 +#define USB_PID_TWINHAN_VP7041_WARM 0x3202 +#define USB_PID_ULTIMA_ARTEC_T14BR 0x810f +#define USB_PID_ULTIMA_ARTEC_T14_COLD 0x810b +#define USB_PID_ULTIMA_ARTEC_T14_WARM 0x810c +#define USB_PID_ULTIMA_TVBOX_AN2235_COLD 0x8107 +#define USB_PID_ULTIMA_TVBOX_AN2235_WARM 0x8108 +#define USB_PID_ULTIMA_TVBOX_ANCHOR_COLD 0x2235 +#define USB_PID_ULTIMA_TVBOX_COLD 0x8105 +#define USB_PID_ULTIMA_TVBOX_USB2_COLD 0x8109 +#define USB_PID_ULTIMA_TVBOX_USB2_FX_COLD 0x8613 +#define USB_PID_ULTIMA_TVBOX_USB2_FX_WARM 0x1002 +#define USB_PID_ULTIMA_TVBOX_USB2_WARM 0x810a +#define USB_PID_ULTIMA_TVBOX_WARM 0x8106 +#define USB_PID_UNIWILL_STK7700P 0x6003 +#define USB_PID_UNK_HYPER_PALTEK_COLD 0x005e +#define USB_PID_UNK_HYPER_PALTEK_WARM 0x005f +#define USB_PID_VISIONPLUS_PINNACLE_PCTV310E 0x3211 +#define USB_PID_VISIONPLUS_TINYUSB2_COLD 0x3223 +#define USB_PID_VISIONPLUS_TINYUSB2_WARM 0x3224 +#define USB_PID_VISIONPLUS_VP7020_COLD 0x3203 +#define USB_PID_VISIONPLUS_VP7020_WARM 0x3204 +#define USB_PID_VISIONPLUS_VP7021_COLD 0x3207 +#define USB_PID_VISIONPLUS_VP7041_COLD 0x3201 +#define USB_PID_VISIONPLUS_VP7041_WARM 0x3202 +#define USB_PID_VISIONPLUS_VP7045_COLD 0x3205 +#define USB_PID_VISIONPLUS_VP7045_WARM 0x3206 +#define USB_PID_WIDEVIEW_DTT200U_COLD 0x0201 +#define USB_PID_WIDEVIEW_DTT200U_WARM 0x0301 +#define USB_PID_WIDEVIEW_DVBT_USB_COLD 0x0001 +#define USB_PID_WIDEVIEW_DVBT_USB_WARM 0x0002 +#define USB_PID_WIDEVIEW_WT220U_COLD 0x0222 +#define USB_PID_WIDEVIEW_WT220U_FC_COLD 0x0225 +#define USB_PID_WIDEVIEW_WT220U_FC_WARM 0x0226 +#define USB_PID_WIDEVIEW_WT220U_WARM 0x0221 +#define USB_PID_WIDEVIEW_WT220U_ZAP250_COLD 0x0220 +#define USB_PID_WIDEVIEW_WT220U_ZL0353_COLD 0x022a +#define USB_PID_WIDEVIEW_WT220U_ZL0353_WARM 0x022b +#define USB_PID_WINFAST_DTV2000DS 0x6a04 +#define USB_PID_WINFAST_DTV2000DS_PLUS 0x6f12 +#define USB_PID_WINFAST_DTV_DONGLE_GOLD 0x6029 +#define USB_PID_WINFAST_DTV_DONGLE_MINID 0x6f0f +#define USB_PID_WINTV_SOLOHD 0x0264 +#define USB_PID_WINTV_SOLOHD_2 0x8268 +#define USB_PID_XTENSIONS_XD_380 0x0381 +#define USB_PID_YUAN_EC372S 0x1edc +#define USB_PID_YUAN_MC770 0x0871 +#define USB_PID_YUAN_PD378S 0x2edc +#define USB_PID_YUAN_STK7700D 0x1efc +#define USB_PID_YUAN_STK7700D_2 0x1e8c +#define USB_PID_YUAN_STK7700PH 0x1f08 + +#endif diff --git a/6.12.0/include-overrides/media/dvb_ca_en50221.h b/6.12.0/include-overrides/media/dvb_ca_en50221.h new file mode 100644 index 0000000..a1c014b --- /dev/null +++ b/6.12.0/include-overrides/media/dvb_ca_en50221.h @@ -0,0 +1,142 @@ +/* + * dvb_ca.h: generic DVB functions for EN50221 CA interfaces + * + * Copyright (C) 2004 Andrew de Quincey + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either version 2.1 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DVB_CA_EN50221_H_ +#define _DVB_CA_EN50221_H_ + +#include +#include + +#include + +#define DVB_CA_EN50221_POLL_CAM_PRESENT 1 +#define DVB_CA_EN50221_POLL_CAM_CHANGED 2 +#define DVB_CA_EN50221_POLL_CAM_READY 4 + +#define DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE 1 +#define DVB_CA_EN50221_FLAG_IRQ_FR 2 +#define DVB_CA_EN50221_FLAG_IRQ_DA 4 + +#define DVB_CA_EN50221_CAMCHANGE_REMOVED 0 +#define DVB_CA_EN50221_CAMCHANGE_INSERTED 1 + +/** + * struct dvb_ca_en50221- Structure describing a CA interface + * + * @owner: the module owning this structure + * @read_attribute_mem: function for reading attribute memory on the CAM + * @write_attribute_mem: function for writing attribute memory on the CAM + * @read_cam_control: function for reading the control interface on the CAM + * @write_cam_control: function for reading the control interface on the CAM + * @read_data: function for reading data (block mode) + * @write_data: function for writing data (block mode) + * @slot_reset: function to reset the CAM slot + * @slot_shutdown: function to shutdown a CAM slot + * @slot_ts_enable: function to enable the Transport Stream on a CAM slot + * @poll_slot_status: function to poll slot status. Only necessary if + * DVB_CA_FLAG_EN50221_IRQ_CAMCHANGE is not set. + * @data: private data, used by caller. + * @private: Opaque data used by the dvb_ca core. Do not modify! + * + * NOTE: the read_*, write_* and poll_slot_status functions will be + * called for different slots concurrently and need to use locks where + * and if appropriate. There will be no concurrent access to one slot. + */ +struct dvb_ca_en50221 { + struct module *owner; + + int (*read_attribute_mem)(struct dvb_ca_en50221 *ca, + int slot, int address); + int (*write_attribute_mem)(struct dvb_ca_en50221 *ca, + int slot, int address, u8 value); + + int (*read_cam_control)(struct dvb_ca_en50221 *ca, + int slot, u8 address); + int (*write_cam_control)(struct dvb_ca_en50221 *ca, + int slot, u8 address, u8 value); + + int (*read_data)(struct dvb_ca_en50221 *ca, + int slot, u8 *ebuf, int ecount); + int (*write_data)(struct dvb_ca_en50221 *ca, + int slot, u8 *ebuf, int ecount); + + int (*slot_reset)(struct dvb_ca_en50221 *ca, int slot); + int (*slot_shutdown)(struct dvb_ca_en50221 *ca, int slot); + int (*slot_ts_enable)(struct dvb_ca_en50221 *ca, int slot); + + int (*poll_slot_status)(struct dvb_ca_en50221 *ca, int slot, int open); + + void *data; + + void *private; +}; + +/* + * Functions for reporting IRQ events + */ + +/** + * dvb_ca_en50221_camchange_irq - A CAMCHANGE IRQ has occurred. + * + * @pubca: CA instance. + * @slot: Slot concerned. + * @change_type: One of the DVB_CA_CAMCHANGE_* values + */ +void dvb_ca_en50221_camchange_irq(struct dvb_ca_en50221 *pubca, int slot, + int change_type); + +/** + * dvb_ca_en50221_camready_irq - A CAMREADY IRQ has occurred. + * + * @pubca: CA instance. + * @slot: Slot concerned. + */ +void dvb_ca_en50221_camready_irq(struct dvb_ca_en50221 *pubca, int slot); + +/** + * dvb_ca_en50221_frda_irq - An FR or a DA IRQ has occurred. + * + * @ca: CA instance. + * @slot: Slot concerned. + */ +void dvb_ca_en50221_frda_irq(struct dvb_ca_en50221 *ca, int slot); + +/* + * Initialisation/shutdown functions + */ + +/** + * dvb_ca_en50221_init - Initialise a new DVB CA device. + * + * @dvb_adapter: DVB adapter to attach the new CA device to. + * @ca: The dvb_ca instance. + * @flags: Flags describing the CA device (DVB_CA_EN50221_FLAG_*). + * @slot_count: Number of slots supported. + * + * @return 0 on success, nonzero on failure + */ +int dvb_ca_en50221_init(struct dvb_adapter *dvb_adapter, + struct dvb_ca_en50221 *ca, int flags, + int slot_count); + +/** + * dvb_ca_en50221_release - Release a DVB CA device. + * + * @ca: The associated dvb_ca instance. + */ +void dvb_ca_en50221_release(struct dvb_ca_en50221 *ca); + +#endif diff --git a/6.12.0/include-overrides/media/dvb_demux.h b/6.12.0/include-overrides/media/dvb_demux.h new file mode 100644 index 0000000..3b6aeca --- /dev/null +++ b/6.12.0/include-overrides/media/dvb_demux.h @@ -0,0 +1,354 @@ +/* + * dvb_demux.h: DVB kernel demux API + * + * Copyright (C) 2000-2001 Marcus Metzler & Ralph Metzler + * for convergence integrated media GmbH + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either version 2.1 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _DVB_DEMUX_H_ +#define _DVB_DEMUX_H_ + +#include +#include +#include +#include + +#include + +/** + * enum dvb_dmx_filter_type - type of demux feed. + * + * @DMX_TYPE_TS: feed is in TS mode. + * @DMX_TYPE_SEC: feed is in Section mode. + */ +enum dvb_dmx_filter_type { + DMX_TYPE_TS, + DMX_TYPE_SEC, +}; + +/** + * enum dvb_dmx_state - state machine for a demux filter. + * + * @DMX_STATE_FREE: indicates that the filter is freed. + * @DMX_STATE_ALLOCATED: indicates that the filter was allocated + * to be used. + * @DMX_STATE_READY: indicates that the filter is ready + * to be used. + * @DMX_STATE_GO: indicates that the filter is running. + */ +enum dvb_dmx_state { + DMX_STATE_FREE, + DMX_STATE_ALLOCATED, + DMX_STATE_READY, + DMX_STATE_GO, +}; + +#define DVB_DEMUX_MASK_MAX 18 + +#define MAX_PID 0x1fff + +#define SPEED_PKTS_INTERVAL 50000 + +/** + * struct dvb_demux_filter - Describes a DVB demux section filter. + * + * @filter: Section filter as defined by &struct dmx_section_filter. + * @maskandmode: logical ``and`` bit mask. + * @maskandnotmode: logical ``and not`` bit mask. + * @doneq: flag that indicates when a filter is ready. + * @next: pointer to the next section filter. + * @feed: &struct dvb_demux_feed pointer. + * @index: index of the used demux filter. + * @state: state of the filter as described by &enum dvb_dmx_state. + * @type: type of the filter as described + * by &enum dvb_dmx_filter_type. + */ + +struct dvb_demux_filter { + struct dmx_section_filter filter; + u8 maskandmode[DMX_MAX_FILTER_SIZE]; + u8 maskandnotmode[DMX_MAX_FILTER_SIZE]; + bool doneq; + + struct dvb_demux_filter *next; + struct dvb_demux_feed *feed; + int index; + enum dvb_dmx_state state; + enum dvb_dmx_filter_type type; + + /* private: used only by av7110 */ + u16 hw_handle; +}; + +/** + * struct dvb_demux_feed - describes a DVB field + * + * @feed: a union describing a digital TV feed. + * Depending on the feed type, it can be either + * @feed.ts or @feed.sec. + * @feed.ts: a &struct dmx_ts_feed pointer. + * For TS feed only. + * @feed.sec: a &struct dmx_section_feed pointer. + * For section feed only. + * @cb: a union describing digital TV callbacks. + * Depending on the feed type, it can be either + * @cb.ts or @cb.sec. + * @cb.ts: a dmx_ts_cb() calback function pointer. + * For TS feed only. + * @cb.sec: a dmx_section_cb() callback function pointer. + * For section feed only. + * @demux: pointer to &struct dvb_demux. + * @priv: private data that can optionally be used by a DVB driver. + * @type: type of the filter, as defined by &enum dvb_dmx_filter_type. + * @state: state of the filter as defined by &enum dvb_dmx_state. + * @pid: PID to be filtered. + * @timeout: feed timeout. + * @filter: pointer to &struct dvb_demux_filter. + * @buffer_flags: Buffer flags used to report discontinuity users via DVB + * memory mapped API, as defined by &enum dmx_buffer_flags. + * @ts_type: type of TS, as defined by &enum ts_filter_type. + * @pes_type: type of PES, as defined by &enum dmx_ts_pes. + * @cc: MPEG-TS packet continuity counter + * @pusi_seen: if true, indicates that a discontinuity was detected. + * it is used to prevent feeding of garbage from previous section. + * @peslen: length of the PES (Packet Elementary Stream). + * @list_head: head for the list of digital TV demux feeds. + * @index: a unique index for each feed. Can be used as hardware + * pid filter index. + */ +struct dvb_demux_feed { + union { + struct dmx_ts_feed ts; + struct dmx_section_feed sec; + } feed; + + union { + dmx_ts_cb ts; + dmx_section_cb sec; + } cb; + + struct dvb_demux *demux; + void *priv; + enum dvb_dmx_filter_type type; + enum dvb_dmx_state state; + u16 pid; + + ktime_t timeout; + struct dvb_demux_filter *filter; + + u32 buffer_flags; + + enum ts_filter_type ts_type; + enum dmx_ts_pes pes_type; + + int cc; + bool pusi_seen; + + u16 peslen; + + struct list_head list_head; + unsigned int index; +}; + +/** + * struct dvb_demux - represents a digital TV demux + * @dmx: embedded &struct dmx_demux with demux capabilities + * and callbacks. + * @priv: private data that can optionally be used by + * a DVB driver. + * @filternum: maximum amount of DVB filters. + * @feednum: maximum amount of DVB feeds. + * @start_feed: callback routine to be called in order to start + * a DVB feed. + * @stop_feed: callback routine to be called in order to stop + * a DVB feed. + * @write_to_decoder: callback routine to be called if the feed is TS and + * it is routed to an A/V decoder, when a new TS packet + * is received. + * Used only on av7110-av.c. + * @check_crc32: callback routine to check CRC. If not initialized, + * dvb_demux will use an internal one. + * @memcopy: callback routine to memcopy received data. + * If not initialized, dvb_demux will default to memcpy(). + * @users: counter for the number of demux opened file descriptors. + * Currently, it is limited to 10 users. + * @filter: pointer to &struct dvb_demux_filter. + * @feed: pointer to &struct dvb_demux_feed. + * @frontend_list: &struct list_head with frontends used by the demux. + * @pesfilter: array of &struct dvb_demux_feed with the PES types + * that will be filtered. + * @pids: list of filtered program IDs. + * @feed_list: &struct list_head with feeds. + * @tsbuf: temporary buffer used internally to store TS packets. + * @tsbufp: temporary buffer index used internally. + * @mutex: pointer to &struct mutex used to protect feed set + * logic. + * @lock: pointer to &spinlock_t, used to protect buffer handling. + * @cnt_storage: buffer used for TS/TEI continuity check. + * @speed_last_time: &ktime_t used for TS speed check. + * @speed_pkts_cnt: packets count used for TS speed check. + */ +struct dvb_demux { + struct dmx_demux dmx; + void *priv; + int filternum; + int feednum; + int (*start_feed)(struct dvb_demux_feed *feed); + int (*stop_feed)(struct dvb_demux_feed *feed); + int (*write_to_decoder)(struct dvb_demux_feed *feed, + const u8 *buf, size_t len); + u32 (*check_crc32)(struct dvb_demux_feed *feed, + const u8 *buf, size_t len); + void (*memcopy)(struct dvb_demux_feed *feed, u8 *dst, + const u8 *src, size_t len); + + int users; +#define MAX_DVB_DEMUX_USERS 10 + struct dvb_demux_filter *filter; + struct dvb_demux_feed *feed; + + struct list_head frontend_list; + + struct dvb_demux_feed *pesfilter[DMX_PES_OTHER]; + u16 pids[DMX_PES_OTHER]; + +#define DMX_MAX_PID 0x2000 + struct list_head feed_list; + u8 tsbuf[204]; + int tsbufp; + + struct mutex mutex; + spinlock_t lock; + + uint8_t *cnt_storage; /* for TS continuity check */ + + ktime_t speed_last_time; /* for TS speed check */ + uint32_t speed_pkts_cnt; /* for TS speed check */ + + /* private: used only on av7110 */ + int playing; + int recording; +}; + +/** + * dvb_dmx_init - initialize a digital TV demux struct. + * + * @demux: &struct dvb_demux to be initialized. + * + * Before being able to register a digital TV demux struct, drivers + * should call this routine. On its typical usage, some fields should + * be initialized at the driver before calling it. + * + * A typical usecase is:: + * + * dvb->demux.dmx.capabilities = + * DMX_TS_FILTERING | DMX_SECTION_FILTERING | + * DMX_MEMORY_BASED_FILTERING; + * dvb->demux.priv = dvb; + * dvb->demux.filternum = 256; + * dvb->demux.feednum = 256; + * dvb->demux.start_feed = driver_start_feed; + * dvb->demux.stop_feed = driver_stop_feed; + * ret = dvb_dmx_init(&dvb->demux); + * if (ret < 0) + * return ret; + */ +int dvb_dmx_init(struct dvb_demux *demux); + +/** + * dvb_dmx_release - releases a digital TV demux internal buffers. + * + * @demux: &struct dvb_demux to be released. + * + * The DVB core internally allocates data at @demux. This routine + * releases those data. Please notice that the struct itelf is not + * released, as it can be embedded on other structs. + */ +void dvb_dmx_release(struct dvb_demux *demux); + +/** + * dvb_dmx_swfilter_packets - use dvb software filter for a buffer with + * multiple MPEG-TS packets with 188 bytes each. + * + * @demux: pointer to &struct dvb_demux + * @buf: buffer with data to be filtered + * @count: number of MPEG-TS packets with size of 188. + * + * The routine will discard a DVB packet that don't start with 0x47. + * + * Use this routine if the DVB demux fills MPEG-TS buffers that are + * already aligned. + * + * NOTE: The @buf size should have size equal to ``count * 188``. + */ +void dvb_dmx_swfilter_packets(struct dvb_demux *demux, const u8 *buf, + size_t count); + +/** + * dvb_dmx_swfilter - use dvb software filter for a buffer with + * multiple MPEG-TS packets with 188 bytes each. + * + * @demux: pointer to &struct dvb_demux + * @buf: buffer with data to be filtered + * @count: number of MPEG-TS packets with size of 188. + * + * If a DVB packet doesn't start with 0x47, it will seek for the first + * byte that starts with 0x47. + * + * Use this routine if the DVB demux fill buffers that may not start with + * a packet start mark (0x47). + * + * NOTE: The @buf size should have size equal to ``count * 188``. + */ +void dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf, size_t count); + +/** + * dvb_dmx_swfilter_204 - use dvb software filter for a buffer with + * multiple MPEG-TS packets with 204 bytes each. + * + * @demux: pointer to &struct dvb_demux + * @buf: buffer with data to be filtered + * @count: number of MPEG-TS packets with size of 204. + * + * If a DVB packet doesn't start with 0x47, it will seek for the first + * byte that starts with 0x47. + * + * Use this routine if the DVB demux fill buffers that may not start with + * a packet start mark (0x47). + * + * NOTE: The @buf size should have size equal to ``count * 204``. + */ +void dvb_dmx_swfilter_204(struct dvb_demux *demux, const u8 *buf, + size_t count); + +/** + * dvb_dmx_swfilter_raw - make the raw data available to userspace without + * filtering + * + * @demux: pointer to &struct dvb_demux + * @buf: buffer with data + * @count: number of packets to be passed. The actual size of each packet + * depends on the &dvb_demux->feed->cb.ts logic. + * + * Use it if the driver needs to deliver the raw payload to userspace without + * passing through the kernel demux. That is meant to support some + * delivery systems that aren't based on MPEG-TS. + * + * This function relies on &dvb_demux->feed->cb.ts to actually handle the + * buffer. + */ +void dvb_dmx_swfilter_raw(struct dvb_demux *demux, const u8 *buf, + size_t count); + +#endif /* _DVB_DEMUX_H_ */ diff --git a/6.12.0/include-overrides/media/dvb_frontend.h b/6.12.0/include-overrides/media/dvb_frontend.h new file mode 100644 index 0000000..e7c4487 --- /dev/null +++ b/6.12.0/include-overrides/media/dvb_frontend.h @@ -0,0 +1,834 @@ +/* + * dvb_frontend.h + * + * The Digital TV Frontend kABI defines a driver-internal interface for + * registering low-level, hardware specific driver to a hardware independent + * frontend layer. + * + * Copyright (C) 2001 convergence integrated media GmbH + * Copyright (C) 2004 convergence GmbH + * + * Written by Ralph Metzler + * Overhauled by Holger Waechtler + * Kernel I2C stuff by Michael Hunold + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either version 2.1 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + + * You should have received a copy of the GNU Lesser General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +#ifndef _DVB_FRONTEND_H_ +#define _DVB_FRONTEND_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +/* + * Maximum number of Delivery systems per frontend. It + * should be smaller or equal to 32 + */ +#define MAX_DELSYS 8 + +/* Helper definitions to be used at frontend drivers */ +#define kHz 1000UL +#define MHz 1000000UL + +/** + * struct dvb_frontend_tune_settings - parameters to adjust frontend tuning + * + * @min_delay_ms: minimum delay for tuning, in ms + * @step_size: step size between two consecutive frequencies + * @max_drift: maximum drift + * + * NOTE: step_size is in Hz, for terrestrial/cable or kHz for satellite + */ +struct dvb_frontend_tune_settings { + int min_delay_ms; + int step_size; + int max_drift; +}; + +struct dvb_frontend; + +/** + * struct dvb_tuner_info - Frontend name and min/max ranges/bandwidths + * + * @name: name of the Frontend + * @frequency_min_hz: minimal frequency supported in Hz + * @frequency_max_hz: maximum frequency supported in Hz + * @frequency_step_hz: frequency step in Hz + * @bandwidth_min: minimal frontend bandwidth supported + * @bandwidth_max: maximum frontend bandwidth supported + * @bandwidth_step: frontend bandwidth step + */ +struct dvb_tuner_info { + char name[128]; + + u32 frequency_min_hz; + u32 frequency_max_hz; + u32 frequency_step_hz; + + u32 bandwidth_min; + u32 bandwidth_max; + u32 bandwidth_step; +}; + +/** + * struct analog_parameters - Parameters to tune into an analog/radio channel + * + * @frequency: Frequency used by analog TV tuner (either in 62.5 kHz step, + * for TV, or 62.5 Hz for radio) + * @mode: Tuner mode, as defined on enum v4l2_tuner_type + * @audmode: Audio mode as defined for the rxsubchans field at videodev2.h, + * e. g. V4L2_TUNER_MODE_* + * @std: TV standard bitmap as defined at videodev2.h, e. g. V4L2_STD_* + * + * Hybrid tuners should be supported by both V4L2 and DVB APIs. This + * struct contains the data that are used by the V4L2 side. To avoid + * dependencies from V4L2 headers, all enums here are declared as integers. + */ +struct analog_parameters { + unsigned int frequency; + unsigned int mode; + unsigned int audmode; + u64 std; +}; + +/** + * enum dvbfe_algo - defines the algorithm used to tune into a channel + * + * @DVBFE_ALGO_HW: Hardware Algorithm - + * Devices that support this algorithm do everything in hardware + * and no software support is needed to handle them. + * Requesting these devices to LOCK is the only thing required, + * device is supposed to do everything in the hardware. + * + * @DVBFE_ALGO_SW: Software Algorithm - + * These are dumb devices, that require software to do everything + * + * @DVBFE_ALGO_CUSTOM: Customizable Agorithm - + * Devices having this algorithm can be customized to have specific + * algorithms in the frontend driver, rather than simply doing a + * software zig-zag. In this case the zigzag maybe hardware assisted + * or it maybe completely done in hardware. In all cases, usage of + * this algorithm, in conjunction with the search and track + * callbacks, utilizes the driver specific algorithm. + * + * @DVBFE_ALGO_RECOVERY: Recovery Algorithm - + * These devices have AUTO recovery capabilities from LOCK failure + */ +enum dvbfe_algo { + DVBFE_ALGO_HW = BIT(0), + DVBFE_ALGO_SW = BIT(1), + DVBFE_ALGO_CUSTOM = BIT(2), + DVBFE_ALGO_RECOVERY = BIT(31), +}; + +/** + * enum dvbfe_search - search callback possible return status + * + * @DVBFE_ALGO_SEARCH_SUCCESS: + * The frontend search algorithm completed and returned successfully + * + * @DVBFE_ALGO_SEARCH_ASLEEP: + * The frontend search algorithm is sleeping + * + * @DVBFE_ALGO_SEARCH_FAILED: + * The frontend search for a signal failed + * + * @DVBFE_ALGO_SEARCH_INVALID: + * The frontend search algorithm was probably supplied with invalid + * parameters and the search is an invalid one + * + * @DVBFE_ALGO_SEARCH_ERROR: + * The frontend search algorithm failed due to some error + * + * @DVBFE_ALGO_SEARCH_AGAIN: + * The frontend search algorithm was requested to search again + */ +enum dvbfe_search { + DVBFE_ALGO_SEARCH_SUCCESS = BIT(0), + DVBFE_ALGO_SEARCH_ASLEEP = BIT(1), + DVBFE_ALGO_SEARCH_FAILED = BIT(2), + DVBFE_ALGO_SEARCH_INVALID = BIT(3), + DVBFE_ALGO_SEARCH_AGAIN = BIT(4), + DVBFE_ALGO_SEARCH_ERROR = BIT(31), +}; + +/** + * struct dvb_tuner_ops - Tuner information and callbacks + * + * @info: embedded &struct dvb_tuner_info with tuner properties + * @release: callback function called when frontend is detached. + * drivers should free any allocated memory. + * @init: callback function used to initialize the tuner device. + * @sleep: callback function used to put the tuner to sleep. + * @suspend: callback function used to inform that the Kernel will + * suspend. + * @resume: callback function used to inform that the Kernel is + * resuming from suspend. + * @set_params: callback function used to inform the tuner to tune + * into a digital TV channel. The properties to be used + * are stored at &struct dvb_frontend.dtv_property_cache. + * The tuner demod can change the parameters to reflect + * the changes needed for the channel to be tuned, and + * update statistics. This is the recommended way to set + * the tuner parameters and should be used on newer + * drivers. + * @set_analog_params: callback function used to tune into an analog TV + * channel on hybrid tuners. It passes @analog_parameters + * to the driver. + * @set_config: callback function used to send some tuner-specific + * parameters. + * @get_frequency: get the actual tuned frequency + * @get_bandwidth: get the bandwidth used by the low pass filters + * @get_if_frequency: get the Intermediate Frequency, in Hz. For baseband, + * should return 0. + * @get_status: returns the frontend lock status + * @get_rf_strength: returns the RF signal strength. Used mostly to support + * analog TV and radio. Digital TV should report, instead, + * via DVBv5 API (&struct dvb_frontend.dtv_property_cache). + * @get_afc: Used only by analog TV core. Reports the frequency + * drift due to AFC. + * @calc_regs: callback function used to pass register data settings + * for simple tuners. Shouldn't be used on newer drivers. + * @set_frequency: Set a new frequency. Shouldn't be used on newer drivers. + * @set_bandwidth: Set a new frequency. Shouldn't be used on newer drivers. + * + * NOTE: frequencies used on @get_frequency and @set_frequency are in Hz for + * terrestrial/cable or kHz for satellite. + * + */ +struct dvb_tuner_ops { + + struct dvb_tuner_info info; + + void (*release)(struct dvb_frontend *fe); + int (*init)(struct dvb_frontend *fe); + int (*sleep)(struct dvb_frontend *fe); + int (*suspend)(struct dvb_frontend *fe); + int (*resume)(struct dvb_frontend *fe); + + /* This is the recommended way to set the tuner */ + int (*set_params)(struct dvb_frontend *fe); + int (*set_analog_params)(struct dvb_frontend *fe, struct analog_parameters *p); + + int (*set_config)(struct dvb_frontend *fe, void *priv_cfg); + + int (*get_frequency)(struct dvb_frontend *fe, u32 *frequency); + int (*get_bandwidth)(struct dvb_frontend *fe, u32 *bandwidth); + int (*get_if_frequency)(struct dvb_frontend *fe, u32 *frequency); + +#define TUNER_STATUS_LOCKED 1 +#define TUNER_STATUS_STEREO 2 + int (*get_status)(struct dvb_frontend *fe, u32 *status); + int (*get_rf_strength)(struct dvb_frontend *fe, u16 *strength); + int (*get_afc)(struct dvb_frontend *fe, s32 *afc); + + /* + * This is support for demods like the mt352 - fills out the supplied + * buffer with what to write. + * + * Don't use on newer drivers. + */ + int (*calc_regs)(struct dvb_frontend *fe, u8 *buf, int buf_len); + + /* + * These are provided separately from set_params in order to + * facilitate silicon tuners which require sophisticated tuning loops, + * controlling each parameter separately. + * + * Don't use on newer drivers. + */ + int (*set_frequency)(struct dvb_frontend *fe, u32 frequency); + int (*set_bandwidth)(struct dvb_frontend *fe, u32 bandwidth); +}; + +/** + * struct analog_demod_info - Information struct for analog TV part of the demod + * + * @name: Name of the analog TV demodulator + */ +struct analog_demod_info { + char *name; +}; + +/** + * struct analog_demod_ops - Demodulation information and callbacks for + * analog TV and radio + * + * @info: pointer to struct analog_demod_info + * @set_params: callback function used to inform the demod to set the + * demodulator parameters needed to decode an analog or + * radio channel. The properties are passed via + * &struct analog_params. + * @has_signal: returns 0xffff if has signal, or 0 if it doesn't. + * @get_afc: Used only by analog TV core. Reports the frequency + * drift due to AFC. + * @tuner_status: callback function that returns tuner status bits, e. g. + * %TUNER_STATUS_LOCKED and %TUNER_STATUS_STEREO. + * @standby: set the tuner to standby mode. + * @release: callback function called when frontend is detached. + * drivers should free any allocated memory. + * @i2c_gate_ctrl: controls the I2C gate. Newer drivers should use I2C + * mux support instead. + * @set_config: callback function used to send some tuner-specific + * parameters. + */ +struct analog_demod_ops { + + struct analog_demod_info info; + + void (*set_params)(struct dvb_frontend *fe, + struct analog_parameters *params); + int (*has_signal)(struct dvb_frontend *fe, u16 *signal); + int (*get_afc)(struct dvb_frontend *fe, s32 *afc); + void (*tuner_status)(struct dvb_frontend *fe); + void (*standby)(struct dvb_frontend *fe); + void (*release)(struct dvb_frontend *fe); + int (*i2c_gate_ctrl)(struct dvb_frontend *fe, int enable); + + /** This is to allow setting tuner-specific configuration */ + int (*set_config)(struct dvb_frontend *fe, void *priv_cfg); +}; + +struct dtv_frontend_properties; + +/** + * struct dvb_frontend_internal_info - Frontend properties and capabilities + * + * @name: Name of the frontend + * @frequency_min_hz: Minimal frequency supported by the frontend. + * @frequency_max_hz: Minimal frequency supported by the frontend. + * @frequency_stepsize_hz: All frequencies are multiple of this value. + * @frequency_tolerance_hz: Frequency tolerance. + * @symbol_rate_min: Minimal symbol rate, in bauds + * (for Cable/Satellite systems). + * @symbol_rate_max: Maximal symbol rate, in bauds + * (for Cable/Satellite systems). + * @symbol_rate_tolerance: Maximal symbol rate tolerance, in ppm + * (for Cable/Satellite systems). + * @caps: Capabilities supported by the frontend, + * as specified in &enum fe_caps. + */ +struct dvb_frontend_internal_info { + char name[128]; + u32 frequency_min_hz; + u32 frequency_max_hz; + u32 frequency_stepsize_hz; + u32 frequency_tolerance_hz; + u32 symbol_rate_min; + u32 symbol_rate_max; + u32 symbol_rate_tolerance; + enum fe_caps caps; +}; + +/** + * struct dvb_frontend_ops - Demodulation information and callbacks for + * ditialt TV + * + * @info: embedded &struct dvb_tuner_info with tuner properties + * @delsys: Delivery systems supported by the frontend + * @detach: callback function called when frontend is detached. + * drivers should clean up, but not yet free the &struct + * dvb_frontend allocation. + * @release: callback function called when frontend is ready to be + * freed. + * drivers should free any allocated memory. + * @release_sec: callback function requesting that the Satellite Equipment + * Control (SEC) driver to release and free any memory + * allocated by the driver. + * @init: callback function used to initialize the tuner device. + * @sleep: callback function used to put the tuner to sleep. + * @suspend: callback function used to inform that the Kernel will + * suspend. + * @resume: callback function used to inform that the Kernel is + * resuming from suspend. + * @write: callback function used by some demod legacy drivers to + * allow other drivers to write data into their registers. + * Should not be used on new drivers. + * @tune: callback function used by demod drivers that use + * @DVBFE_ALGO_HW to tune into a frequency. + * @get_frontend_algo: returns the desired hardware algorithm. + * @set_frontend: callback function used to inform the demod to set the + * parameters for demodulating a digital TV channel. + * The properties to be used are stored at &struct + * dvb_frontend.dtv_property_cache. The demod can change + * the parameters to reflect the changes needed for the + * channel to be decoded, and update statistics. + * @get_tune_settings: callback function + * @get_frontend: callback function used to inform the parameters + * actuall in use. The properties to be used are stored at + * &struct dvb_frontend.dtv_property_cache and update + * statistics. Please notice that it should not return + * an error code if the statistics are not available + * because the demog is not locked. + * @read_status: returns the locking status of the frontend. + * @read_ber: legacy callback function to return the bit error rate. + * Newer drivers should provide such info via DVBv5 API, + * e. g. @set_frontend;/@get_frontend, implementing this + * callback only if DVBv3 API compatibility is wanted. + * @read_signal_strength: legacy callback function to return the signal + * strength. Newer drivers should provide such info via + * DVBv5 API, e. g. @set_frontend/@get_frontend, + * implementing this callback only if DVBv3 API + * compatibility is wanted. + * @read_snr: legacy callback function to return the Signal/Noise + * rate. Newer drivers should provide such info via + * DVBv5 API, e. g. @set_frontend/@get_frontend, + * implementing this callback only if DVBv3 API + * compatibility is wanted. + * @read_ucblocks: legacy callback function to return the Uncorrected Error + * Blocks. Newer drivers should provide such info via + * DVBv5 API, e. g. @set_frontend/@get_frontend, + * implementing this callback only if DVBv3 API + * compatibility is wanted. + * @diseqc_reset_overload: callback function to implement the + * FE_DISEQC_RESET_OVERLOAD() ioctl (only Satellite) + * @diseqc_send_master_cmd: callback function to implement the + * FE_DISEQC_SEND_MASTER_CMD() ioctl (only Satellite). + * @diseqc_recv_slave_reply: callback function to implement the + * FE_DISEQC_RECV_SLAVE_REPLY() ioctl (only Satellite) + * @diseqc_send_burst: callback function to implement the + * FE_DISEQC_SEND_BURST() ioctl (only Satellite). + * @set_tone: callback function to implement the + * FE_SET_TONE() ioctl (only Satellite). + * @set_voltage: callback function to implement the + * FE_SET_VOLTAGE() ioctl (only Satellite). + * @enable_high_lnb_voltage: callback function to implement the + * FE_ENABLE_HIGH_LNB_VOLTAGE() ioctl (only Satellite). + * @dishnetwork_send_legacy_command: callback function to implement the + * FE_DISHNETWORK_SEND_LEGACY_CMD() ioctl (only Satellite). + * Drivers should not use this, except when the DVB + * core emulation fails to provide proper support (e.g. + * if @set_voltage takes more than 8ms to work), and + * when backward compatibility with this legacy API is + * required. + * @i2c_gate_ctrl: controls the I2C gate. Newer drivers should use I2C + * mux support instead. + * @ts_bus_ctrl: callback function used to take control of the TS bus. + * @set_lna: callback function to power on/off/auto the LNA. + * @search: callback function used on some custom algo search algos. + * @tuner_ops: pointer to &struct dvb_tuner_ops + * @analog_ops: pointer to &struct analog_demod_ops + */ +struct dvb_frontend_ops { + struct dvb_frontend_internal_info info; + + u8 delsys[MAX_DELSYS]; + + void (*detach)(struct dvb_frontend *fe); + void (*release)(struct dvb_frontend* fe); + void (*release_sec)(struct dvb_frontend* fe); + + int (*init)(struct dvb_frontend* fe); + int (*sleep)(struct dvb_frontend* fe); + int (*suspend)(struct dvb_frontend *fe); + int (*resume)(struct dvb_frontend *fe); + + int (*write)(struct dvb_frontend* fe, const u8 buf[], int len); + + /* if this is set, it overrides the default swzigzag */ + int (*tune)(struct dvb_frontend* fe, + bool re_tune, + unsigned int mode_flags, + unsigned int *delay, + enum fe_status *status); + + /* get frontend tuning algorithm from the module */ + enum dvbfe_algo (*get_frontend_algo)(struct dvb_frontend *fe); + + /* these two are only used for the swzigzag code */ + int (*set_frontend)(struct dvb_frontend *fe); + int (*get_tune_settings)(struct dvb_frontend* fe, struct dvb_frontend_tune_settings* settings); + + int (*get_frontend)(struct dvb_frontend *fe, + struct dtv_frontend_properties *props); + + int (*read_status)(struct dvb_frontend *fe, enum fe_status *status); + int (*read_ber)(struct dvb_frontend* fe, u32* ber); + int (*read_signal_strength)(struct dvb_frontend* fe, u16* strength); + int (*read_snr)(struct dvb_frontend* fe, u16* snr); + int (*read_ucblocks)(struct dvb_frontend* fe, u32* ucblocks); + + int (*diseqc_reset_overload)(struct dvb_frontend* fe); + int (*diseqc_send_master_cmd)(struct dvb_frontend* fe, struct dvb_diseqc_master_cmd* cmd); + int (*diseqc_recv_slave_reply)(struct dvb_frontend* fe, struct dvb_diseqc_slave_reply* reply); + int (*diseqc_send_burst)(struct dvb_frontend *fe, + enum fe_sec_mini_cmd minicmd); + int (*set_tone)(struct dvb_frontend *fe, enum fe_sec_tone_mode tone); + int (*set_voltage)(struct dvb_frontend *fe, + enum fe_sec_voltage voltage); + int (*enable_high_lnb_voltage)(struct dvb_frontend* fe, long arg); + int (*dishnetwork_send_legacy_command)(struct dvb_frontend* fe, unsigned long cmd); + int (*i2c_gate_ctrl)(struct dvb_frontend* fe, int enable); + int (*ts_bus_ctrl)(struct dvb_frontend* fe, int acquire); + int (*set_lna)(struct dvb_frontend *); + + /* + * These callbacks are for devices that implement their own + * tuning algorithms, rather than a simple swzigzag + */ + enum dvbfe_search (*search)(struct dvb_frontend *fe); + + struct dvb_tuner_ops tuner_ops; + struct analog_demod_ops analog_ops; +}; + +#ifdef __DVB_CORE__ +#define MAX_EVENT 8 + +/* Used only internally at dvb_frontend.c */ +struct dvb_fe_events { + struct dvb_frontend_event events[MAX_EVENT]; + int eventw; + int eventr; + int overflow; + wait_queue_head_t wait_queue; + struct mutex mtx; +}; +#endif + +/** + * struct dtv_frontend_properties - contains a list of properties that are + * specific to a digital TV standard. + * + * @frequency: frequency in Hz for terrestrial/cable or in kHz for + * Satellite + * @modulation: Frontend modulation type + * @voltage: SEC voltage (only Satellite) + * @sectone: SEC tone mode (only Satellite) + * @inversion: Spectral inversion + * @fec_inner: Forward error correction inner Code Rate + * @transmission_mode: Transmission Mode + * @bandwidth_hz: Bandwidth, in Hz. A zero value means that userspace + * wants to autodetect. + * @guard_interval: Guard Interval + * @hierarchy: Hierarchy + * @symbol_rate: Symbol Rate + * @code_rate_HP: high priority stream code rate + * @code_rate_LP: low priority stream code rate + * @pilot: Enable/disable/autodetect pilot tones + * @rolloff: Rolloff factor (alpha) + * @delivery_system: FE delivery system (e. g. digital TV standard) + * @interleaving: interleaving + * @isdbt_partial_reception: ISDB-T partial reception (only ISDB standard) + * @isdbt_sb_mode: ISDB-T Sound Broadcast (SB) mode (only ISDB standard) + * @isdbt_sb_subchannel: ISDB-T SB subchannel (only ISDB standard) + * @isdbt_sb_segment_idx: ISDB-T SB segment index (only ISDB standard) + * @isdbt_sb_segment_count: ISDB-T SB segment count (only ISDB standard) + * @isdbt_layer_enabled: ISDB Layer enabled (only ISDB standard) + * @layer: ISDB per-layer data (only ISDB standard) + * @layer.segment_count: Segment Count; + * @layer.fec: per layer code rate; + * @layer.modulation: per layer modulation; + * @layer.interleaving: per layer interleaving. + * @stream_id: If different than zero, enable substream filtering, if + * hardware supports (DVB-S2 and DVB-T2). + * @scrambling_sequence_index: Carries the index of the DVB-S2 physical layer + * scrambling sequence. + * @atscmh_fic_ver: Version number of the FIC (Fast Information Channel) + * signaling data (only ATSC-M/H) + * @atscmh_parade_id: Parade identification number (only ATSC-M/H) + * @atscmh_nog: Number of MH groups per MH subframe for a designated + * parade (only ATSC-M/H) + * @atscmh_tnog: Total number of MH groups including all MH groups + * belonging to all MH parades in one MH subframe + * (only ATSC-M/H) + * @atscmh_sgn: Start group number (only ATSC-M/H) + * @atscmh_prc: Parade repetition cycle (only ATSC-M/H) + * @atscmh_rs_frame_mode: Reed Solomon (RS) frame mode (only ATSC-M/H) + * @atscmh_rs_frame_ensemble: RS frame ensemble (only ATSC-M/H) + * @atscmh_rs_code_mode_pri: RS code mode pri (only ATSC-M/H) + * @atscmh_rs_code_mode_sec: RS code mode sec (only ATSC-M/H) + * @atscmh_sccc_block_mode: Series Concatenated Convolutional Code (SCCC) + * Block Mode (only ATSC-M/H) + * @atscmh_sccc_code_mode_a: SCCC code mode A (only ATSC-M/H) + * @atscmh_sccc_code_mode_b: SCCC code mode B (only ATSC-M/H) + * @atscmh_sccc_code_mode_c: SCCC code mode C (only ATSC-M/H) + * @atscmh_sccc_code_mode_d: SCCC code mode D (only ATSC-M/H) + * @lna: Power ON/OFF/AUTO the Linear Now-noise Amplifier (LNA) + * @strength: DVBv5 API statistics: Signal Strength + * @cnr: DVBv5 API statistics: Signal to Noise ratio of the + * (main) carrier + * @pre_bit_error: DVBv5 API statistics: pre-Viterbi bit error count + * @pre_bit_count: DVBv5 API statistics: pre-Viterbi bit count + * @post_bit_error: DVBv5 API statistics: post-Viterbi bit error count + * @post_bit_count: DVBv5 API statistics: post-Viterbi bit count + * @block_error: DVBv5 API statistics: block error count + * @block_count: DVBv5 API statistics: block count + * + * NOTE: derivated statistics like Uncorrected Error blocks (UCE) are + * calculated on userspace. + * + * Only a subset of the properties are needed for a given delivery system. + * For more info, consult the media_api.html with the documentation of the + * Userspace API. + */ +struct dtv_frontend_properties { + u32 frequency; + enum fe_modulation modulation; + + enum fe_sec_voltage voltage; + enum fe_sec_tone_mode sectone; + enum fe_spectral_inversion inversion; + enum fe_code_rate fec_inner; + enum fe_transmit_mode transmission_mode; + u32 bandwidth_hz; /* 0 = AUTO */ + enum fe_guard_interval guard_interval; + enum fe_hierarchy hierarchy; + u32 symbol_rate; + enum fe_code_rate code_rate_HP; + enum fe_code_rate code_rate_LP; + + enum fe_pilot pilot; + enum fe_rolloff rolloff; + + enum fe_delivery_system delivery_system; + + enum fe_interleaving interleaving; + + /* ISDB-T specifics */ + u8 isdbt_partial_reception; + u8 isdbt_sb_mode; + u8 isdbt_sb_subchannel; + u32 isdbt_sb_segment_idx; + u32 isdbt_sb_segment_count; + u8 isdbt_layer_enabled; + struct { + u8 segment_count; + enum fe_code_rate fec; + enum fe_modulation modulation; + u8 interleaving; + } layer[3]; + + /* Multistream specifics */ + u32 stream_id; + + /* Physical Layer Scrambling specifics */ + u32 scrambling_sequence_index; + + /* ATSC-MH specifics */ + u8 atscmh_fic_ver; + u8 atscmh_parade_id; + u8 atscmh_nog; + u8 atscmh_tnog; + u8 atscmh_sgn; + u8 atscmh_prc; + + u8 atscmh_rs_frame_mode; + u8 atscmh_rs_frame_ensemble; + u8 atscmh_rs_code_mode_pri; + u8 atscmh_rs_code_mode_sec; + u8 atscmh_sccc_block_mode; + u8 atscmh_sccc_code_mode_a; + u8 atscmh_sccc_code_mode_b; + u8 atscmh_sccc_code_mode_c; + u8 atscmh_sccc_code_mode_d; + + u32 lna; + + /* statistics data */ + struct dtv_fe_stats strength; + struct dtv_fe_stats cnr; + struct dtv_fe_stats pre_bit_error; + struct dtv_fe_stats pre_bit_count; + struct dtv_fe_stats post_bit_error; + struct dtv_fe_stats post_bit_count; + struct dtv_fe_stats block_error; + struct dtv_fe_stats block_count; +}; + +#define DVB_FE_NO_EXIT 0 +#define DVB_FE_NORMAL_EXIT 1 +#define DVB_FE_DEVICE_REMOVED 2 +#define DVB_FE_DEVICE_RESUME 3 + +/** + * struct dvb_frontend - Frontend structure to be used on drivers. + * + * @refcount: refcount to keep track of &struct dvb_frontend + * references + * @ops: embedded &struct dvb_frontend_ops + * @dvb: pointer to &struct dvb_adapter + * @demodulator_priv: demod private data + * @tuner_priv: tuner private data + * @frontend_priv: frontend private data + * @sec_priv: SEC private data + * @analog_demod_priv: Analog demod private data + * @dtv_property_cache: embedded &struct dtv_frontend_properties + * @callback: callback function used on some drivers to call + * either the tuner or the demodulator. + * @id: Frontend ID + * @exit: Used to inform the DVB core that the frontend + * thread should exit (usually, means that the hardware + * got disconnected. + */ + +struct dvb_frontend { + struct kref refcount; + struct dvb_frontend_ops ops; + struct dvb_adapter *dvb; + void *demodulator_priv; + void *tuner_priv; + void *frontend_priv; + void *sec_priv; + void *analog_demod_priv; + struct dtv_frontend_properties dtv_property_cache; +#define DVB_FRONTEND_COMPONENT_TUNER 0 +#define DVB_FRONTEND_COMPONENT_DEMOD 1 + int (*callback)(void *adapter_priv, int component, int cmd, int arg); + int id; + unsigned int exit; +}; + +/** + * dvb_register_frontend() - Registers a DVB frontend at the adapter + * + * @dvb: pointer to &struct dvb_adapter + * @fe: pointer to &struct dvb_frontend + * + * Allocate and initialize the private data needed by the frontend core to + * manage the frontend and calls dvb_register_device() to register a new + * frontend. It also cleans the property cache that stores the frontend + * parameters and selects the first available delivery system. + */ +int dvb_register_frontend(struct dvb_adapter *dvb, + struct dvb_frontend *fe); + +/** + * dvb_unregister_frontend() - Unregisters a DVB frontend + * + * @fe: pointer to &struct dvb_frontend + * + * Stops the frontend kthread, calls dvb_unregister_device() and frees the + * private frontend data allocated by dvb_register_frontend(). + * + * NOTE: This function doesn't frees the memory allocated by the demod, + * by the SEC driver and by the tuner. In order to free it, an explicit call to + * dvb_frontend_detach() is needed, after calling this function. + */ +int dvb_unregister_frontend(struct dvb_frontend *fe); + +/** + * dvb_frontend_detach() - Detaches and frees frontend specific data + * + * @fe: pointer to &struct dvb_frontend + * + * This function should be called after dvb_unregister_frontend(). It + * calls the SEC, tuner and demod release functions: + * &dvb_frontend_ops.release_sec, &dvb_frontend_ops.tuner_ops.release, + * &dvb_frontend_ops.analog_ops.release and &dvb_frontend_ops.release. + * + * If the driver is compiled with %CONFIG_MEDIA_ATTACH, it also decreases + * the module reference count, needed to allow userspace to remove the + * previously used DVB frontend modules. + */ +void dvb_frontend_detach(struct dvb_frontend *fe); + +/** + * dvb_frontend_suspend() - Suspends a Digital TV frontend + * + * @fe: pointer to &struct dvb_frontend + * + * This function prepares a Digital TV frontend to suspend. + * + * In order to prepare the tuner to suspend, if + * &dvb_frontend_ops.tuner_ops.suspend\(\) is available, it calls it. Otherwise, + * it will call &dvb_frontend_ops.tuner_ops.sleep\(\), if available. + * + * It will also call &dvb_frontend_ops.suspend\(\) to put the demod to suspend, + * if available. Otherwise it will call &dvb_frontend_ops.sleep\(\). + * + * The drivers should also call dvb_frontend_suspend\(\) as part of their + * handler for the &device_driver.suspend\(\). + */ +int dvb_frontend_suspend(struct dvb_frontend *fe); + +/** + * dvb_frontend_resume() - Resumes a Digital TV frontend + * + * @fe: pointer to &struct dvb_frontend + * + * This function resumes the usual operation of the tuner after resume. + * + * In order to resume the frontend, it calls the demod + * &dvb_frontend_ops.resume\(\) if available. Otherwise it calls demod + * &dvb_frontend_ops.init\(\). + * + * If &dvb_frontend_ops.tuner_ops.resume\(\) is available, It, it calls it. + * Otherwise,t will call &dvb_frontend_ops.tuner_ops.init\(\), if available. + * + * Once tuner and demods are resumed, it will enforce that the SEC voltage and + * tone are restored to their previous values and wake up the frontend's + * kthread in order to retune the frontend. + * + * The drivers should also call dvb_frontend_resume() as part of their + * handler for the &device_driver.resume\(\). + */ +int dvb_frontend_resume(struct dvb_frontend *fe); + +/** + * dvb_frontend_reinitialise() - forces a reinitialisation at the frontend + * + * @fe: pointer to &struct dvb_frontend + * + * Calls &dvb_frontend_ops.init\(\) and &dvb_frontend_ops.tuner_ops.init\(\), + * and resets SEC tone and voltage (for Satellite systems). + * + * NOTE: Currently, this function is used only by one driver (budget-av). + * It seems to be due to address some special issue with that specific + * frontend. + */ +void dvb_frontend_reinitialise(struct dvb_frontend *fe); + +/** + * dvb_frontend_sleep_until() - Sleep for the amount of time given by + * add_usec parameter + * + * @waketime: pointer to &struct ktime_t + * @add_usec: time to sleep, in microseconds + * + * This function is used to measure the time required for the + * FE_DISHNETWORK_SEND_LEGACY_CMD() ioctl to work. It needs to be as precise + * as possible, as it affects the detection of the dish tone command at the + * satellite subsystem. + * + * Its used internally by the DVB frontend core, in order to emulate + * FE_DISHNETWORK_SEND_LEGACY_CMD() using the &dvb_frontend_ops.set_voltage\(\) + * callback. + * + * NOTE: it should not be used at the drivers, as the emulation for the + * legacy callback is provided by the Kernel. The only situation where this + * should be at the drivers is when there are some bugs at the hardware that + * would prevent the core emulation to work. On such cases, the driver would + * be writing a &dvb_frontend_ops.dishnetwork_send_legacy_command\(\) and + * calling this function directly. + */ +void dvb_frontend_sleep_until(ktime_t *waketime, u32 add_usec); + +#endif diff --git a/6.12.0/include-overrides/media/dvb_net.h b/6.12.0/include-overrides/media/dvb_net.h new file mode 100644 index 0000000..4a921ea --- /dev/null +++ b/6.12.0/include-overrides/media/dvb_net.h @@ -0,0 +1,95 @@ +/* + * dvb_net.h + * + * Copyright (C) 2001 Ralph Metzler for convergence integrated media GmbH + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either version 2.1 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _DVB_NET_H_ +#define _DVB_NET_H_ + +#include + +#include + +struct net_device; + +#define DVB_NET_DEVICES_MAX 10 + +#ifdef CONFIG_DVB_NET + +/** + * struct dvb_net - describes a DVB network interface + * + * @dvbdev: pointer to &struct dvb_device. + * @device: array of pointers to &struct net_device. + * @state: array of integers to each net device. A value + * different than zero means that the interface is + * in usage. + * @exit: flag to indicate when the device is being removed. + * @demux: pointer to &struct dmx_demux. + * @ioctl_mutex: protect access to this struct. + * @remove_mutex: mutex that avoids a race condition between a callback + * called when the hardware is disconnected and the + * file_operations of dvb_net. + * + * Currently, the core supports up to %DVB_NET_DEVICES_MAX (10) network + * devices. + */ + +struct dvb_net { + struct dvb_device *dvbdev; + struct net_device *device[DVB_NET_DEVICES_MAX]; + int state[DVB_NET_DEVICES_MAX]; + unsigned int exit:1; + struct dmx_demux *demux; + struct mutex ioctl_mutex; + struct mutex remove_mutex; +}; + +/** + * dvb_net_init - nitializes a digital TV network device and registers it. + * + * @adap: pointer to &struct dvb_adapter. + * @dvbnet: pointer to &struct dvb_net. + * @dmxdemux: pointer to &struct dmx_demux. + */ +int dvb_net_init(struct dvb_adapter *adap, struct dvb_net *dvbnet, + struct dmx_demux *dmxdemux); + +/** + * dvb_net_release - releases a digital TV network device and unregisters it. + * + * @dvbnet: pointer to &struct dvb_net. + */ +void dvb_net_release(struct dvb_net *dvbnet); + +#else + +struct dvb_net { + struct dvb_device *dvbdev; +}; + +static inline void dvb_net_release(struct dvb_net *dvbnet) +{ +} + +static inline int dvb_net_init(struct dvb_adapter *adap, + struct dvb_net *dvbnet, struct dmx_demux *dmx) +{ + return 0; +} + +#endif /* ifdef CONFIG_DVB_NET */ + +#endif diff --git a/6.12.0/include-overrides/media/dvb_ringbuffer.h b/6.12.0/include-overrides/media/dvb_ringbuffer.h new file mode 100644 index 0000000..029c8b6 --- /dev/null +++ b/6.12.0/include-overrides/media/dvb_ringbuffer.h @@ -0,0 +1,280 @@ +/* + * + * dvb_ringbuffer.h: ring buffer implementation for the dvb driver + * + * Copyright (C) 2003 Oliver Endriss + * Copyright (C) 2004 Andrew de Quincey + * + * based on code originally found in av7110.c & dvb_ci.c: + * Copyright (C) 1999-2003 Ralph Metzler & Marcus Metzler + * for convergence integrated media GmbH + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either version 2.1 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + */ + +#ifndef _DVB_RINGBUFFER_H_ +#define _DVB_RINGBUFFER_H_ + +#include +#include + +/** + * struct dvb_ringbuffer - Describes a ring buffer used at DVB framework + * + * @data: Area were the ringbuffer data is written + * @size: size of the ringbuffer + * @pread: next position to read + * @pwrite: next position to write + * @error: used by ringbuffer clients to indicate that an error happened. + * @queue: Wait queue used by ringbuffer clients to indicate when buffer + * was filled + * @lock: Spinlock used to protect the ringbuffer + */ +struct dvb_ringbuffer { + u8 *data; + ssize_t size; + ssize_t pread; + ssize_t pwrite; + int error; + + wait_queue_head_t queue; + spinlock_t lock; +}; + +#define DVB_RINGBUFFER_PKTHDRSIZE 3 + +/** + * dvb_ringbuffer_init - initialize ring buffer, lock and queue + * + * @rbuf: pointer to struct dvb_ringbuffer + * @data: pointer to the buffer where the data will be stored + * @len: bytes from ring buffer into @buf + */ +extern void dvb_ringbuffer_init(struct dvb_ringbuffer *rbuf, void *data, + size_t len); + +/** + * dvb_ringbuffer_empty - test whether buffer is empty + * + * @rbuf: pointer to struct dvb_ringbuffer + */ +extern int dvb_ringbuffer_empty(struct dvb_ringbuffer *rbuf); + +/** + * dvb_ringbuffer_free - returns the number of free bytes in the buffer + * + * @rbuf: pointer to struct dvb_ringbuffer + * + * Return: number of free bytes in the buffer + */ +extern ssize_t dvb_ringbuffer_free(struct dvb_ringbuffer *rbuf); + +/** + * dvb_ringbuffer_avail - returns the number of bytes waiting in the buffer + * + * @rbuf: pointer to struct dvb_ringbuffer + * + * Return: number of bytes waiting in the buffer + */ +extern ssize_t dvb_ringbuffer_avail(struct dvb_ringbuffer *rbuf); + +/** + * dvb_ringbuffer_reset - resets the ringbuffer to initial state + * + * @rbuf: pointer to struct dvb_ringbuffer + * + * Resets the read and write pointers to zero and flush the buffer. + * + * This counts as a read and write operation + */ +extern void dvb_ringbuffer_reset(struct dvb_ringbuffer *rbuf); + +/* + * read routines & macros + */ + +/** + * dvb_ringbuffer_flush - flush buffer + * + * @rbuf: pointer to struct dvb_ringbuffer + */ +extern void dvb_ringbuffer_flush(struct dvb_ringbuffer *rbuf); + +/** + * dvb_ringbuffer_flush_spinlock_wakeup- flush buffer protected by spinlock + * and wake-up waiting task(s) + * + * @rbuf: pointer to struct dvb_ringbuffer + */ +extern void dvb_ringbuffer_flush_spinlock_wakeup(struct dvb_ringbuffer *rbuf); + +/** + * DVB_RINGBUFFER_PEEK - peek at byte @offs in the buffer + * + * @rbuf: pointer to struct dvb_ringbuffer + * @offs: offset inside the ringbuffer + */ +#define DVB_RINGBUFFER_PEEK(rbuf, offs) \ + ((rbuf)->data[((rbuf)->pread + (offs)) % (rbuf)->size]) + +/** + * DVB_RINGBUFFER_SKIP - advance read ptr by @num bytes + * + * @rbuf: pointer to struct dvb_ringbuffer + * @num: number of bytes to advance + */ +#define DVB_RINGBUFFER_SKIP(rbuf, num) {\ + (rbuf)->pread = ((rbuf)->pread + (num)) % (rbuf)->size;\ +} + +/** + * dvb_ringbuffer_read_user - Reads a buffer into a user pointer + * + * @rbuf: pointer to struct dvb_ringbuffer + * @buf: pointer to the buffer where the data will be stored + * @len: bytes from ring buffer into @buf + * + * This variant assumes that the buffer is a memory at the userspace. So, + * it will internally call copy_to_user(). + * + * Return: number of bytes transferred or -EFAULT + */ +extern ssize_t dvb_ringbuffer_read_user(struct dvb_ringbuffer *rbuf, + u8 __user *buf, size_t len); + +/** + * dvb_ringbuffer_read - Reads a buffer into a pointer + * + * @rbuf: pointer to struct dvb_ringbuffer + * @buf: pointer to the buffer where the data will be stored + * @len: bytes from ring buffer into @buf + * + * This variant assumes that the buffer is a memory at the Kernel space + * + * Return: number of bytes transferred or -EFAULT + */ +extern void dvb_ringbuffer_read(struct dvb_ringbuffer *rbuf, + u8 *buf, size_t len); + +/* + * write routines & macros + */ + +/** + * DVB_RINGBUFFER_WRITE_BYTE - write single byte to ring buffer + * + * @rbuf: pointer to struct dvb_ringbuffer + * @byte: byte to write + */ +#define DVB_RINGBUFFER_WRITE_BYTE(rbuf, byte) \ + { (rbuf)->data[(rbuf)->pwrite] = (byte); \ + (rbuf)->pwrite = ((rbuf)->pwrite + 1) % (rbuf)->size; } + +/** + * dvb_ringbuffer_write - Writes a buffer into the ringbuffer + * + * @rbuf: pointer to struct dvb_ringbuffer + * @buf: pointer to the buffer where the data will be read + * @len: bytes from ring buffer into @buf + * + * This variant assumes that the buffer is a memory at the Kernel space + * + * return: number of bytes transferred or -EFAULT + */ +extern ssize_t dvb_ringbuffer_write(struct dvb_ringbuffer *rbuf, const u8 *buf, + size_t len); + +/** + * dvb_ringbuffer_write_user - Writes a buffer received via a user pointer + * + * @rbuf: pointer to struct dvb_ringbuffer + * @buf: pointer to the buffer where the data will be read + * @len: bytes from ring buffer into @buf + * + * This variant assumes that the buffer is a memory at the userspace. So, + * it will internally call copy_from_user(). + * + * Return: number of bytes transferred or -EFAULT + */ +extern ssize_t dvb_ringbuffer_write_user(struct dvb_ringbuffer *rbuf, + const u8 __user *buf, size_t len); + +/** + * dvb_ringbuffer_pkt_write - Write a packet into the ringbuffer. + * + * @rbuf: Ringbuffer to write to. + * @buf: Buffer to write. + * @len: Length of buffer (currently limited to 65535 bytes max). + * + * Return: Number of bytes written, or -EFAULT, -ENOMEM, -EINVAL. + */ +extern ssize_t dvb_ringbuffer_pkt_write(struct dvb_ringbuffer *rbuf, u8 *buf, + size_t len); + +/** + * dvb_ringbuffer_pkt_read_user - Read from a packet in the ringbuffer. + * + * @rbuf: Ringbuffer concerned. + * @idx: Packet index as returned by dvb_ringbuffer_pkt_next(). + * @offset: Offset into packet to read from. + * @buf: Destination buffer for data. + * @len: Size of destination buffer. + * + * Return: Number of bytes read, or -EFAULT. + * + * .. note:: + * + * unlike dvb_ringbuffer_read(), this does **NOT** update the read pointer + * in the ringbuffer. You must use dvb_ringbuffer_pkt_dispose() to mark a + * packet as no longer required. + */ +extern ssize_t dvb_ringbuffer_pkt_read_user(struct dvb_ringbuffer *rbuf, + size_t idx, + int offset, u8 __user *buf, + size_t len); + +/** + * dvb_ringbuffer_pkt_read - Read from a packet in the ringbuffer. + * Note: unlike dvb_ringbuffer_read_user(), this DOES update the read pointer + * in the ringbuffer. + * + * @rbuf: Ringbuffer concerned. + * @idx: Packet index as returned by dvb_ringbuffer_pkt_next(). + * @offset: Offset into packet to read from. + * @buf: Destination buffer for data. + * @len: Size of destination buffer. + * + * Return: Number of bytes read, or -EFAULT. + */ +extern ssize_t dvb_ringbuffer_pkt_read(struct dvb_ringbuffer *rbuf, size_t idx, + int offset, u8 *buf, size_t len); + +/** + * dvb_ringbuffer_pkt_dispose - Dispose of a packet in the ring buffer. + * + * @rbuf: Ring buffer concerned. + * @idx: Packet index as returned by dvb_ringbuffer_pkt_next(). + */ +extern void dvb_ringbuffer_pkt_dispose(struct dvb_ringbuffer *rbuf, size_t idx); + +/** + * dvb_ringbuffer_pkt_next - Get the index of the next packet in a ringbuffer. + * + * @rbuf: Ringbuffer concerned. + * @idx: Previous packet index, or -1 to return the first packet index. + * @pktlen: On success, will be updated to contain the length of the packet + * in bytes. + * returns Packet index (if >=0), or -1 if no packets available. + */ +extern ssize_t dvb_ringbuffer_pkt_next(struct dvb_ringbuffer *rbuf, + size_t idx, size_t *pktlen); + +#endif /* _DVB_RINGBUFFER_H_ */ diff --git a/6.12.0/include-overrides/media/dvb_vb2.h b/6.12.0/include-overrides/media/dvb_vb2.h new file mode 100644 index 0000000..8cb8845 --- /dev/null +++ b/6.12.0/include-overrides/media/dvb_vb2.h @@ -0,0 +1,280 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * dvb-vb2.h - DVB driver helper framework for streaming I/O + * + * Copyright (C) 2015 Samsung Electronics + * + * Author: jh1009.sung@samsung.com + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef _DVB_VB2_H +#define _DVB_VB2_H + +#include +#include +#include +#include +#include +#include + +/** + * enum dvb_buf_type - types of Digital TV memory-mapped buffers + * + * @DVB_BUF_TYPE_CAPTURE: buffer is filled by the Kernel, + * with a received Digital TV stream + */ +enum dvb_buf_type { + DVB_BUF_TYPE_CAPTURE = 1, +}; + +/** + * enum dvb_vb2_states - states to control VB2 state machine + * @DVB_VB2_STATE_NONE: + * VB2 engine not initialized yet, init failed or VB2 was released. + * @DVB_VB2_STATE_INIT: + * VB2 engine initialized. + * @DVB_VB2_STATE_REQBUFS: + * Buffers were requested + * @DVB_VB2_STATE_STREAMON: + * VB2 is streaming. Callers should not check it directly. Instead, + * they should use dvb_vb2_is_streaming(). + * + * Note: + * + * Callers should not touch at the state machine directly. This + * is handled inside dvb_vb2.c. + */ +enum dvb_vb2_states { + DVB_VB2_STATE_NONE = 0x0, + DVB_VB2_STATE_INIT = 0x1, + DVB_VB2_STATE_REQBUFS = 0x2, + DVB_VB2_STATE_STREAMON = 0x4, +}; + +#define DVB_VB2_NAME_MAX (20) + +/** + * struct dvb_buffer - video buffer information for v4l2. + * + * @vb: embedded struct &vb2_buffer. + * @list: list of &struct dvb_buffer. + */ +struct dvb_buffer { + struct vb2_buffer vb; + struct list_head list; +}; + +/** + * struct dvb_vb2_ctx - control struct for VB2 handler + * @vb_q: pointer to &struct vb2_queue with videobuf2 queue. + * @mutex: mutex to serialize vb2 operations. Used by + * vb2 core %wait_prepare and %wait_finish operations. + * @slock: spin lock used to protect buffer filling at dvb_vb2.c. + * @dvb_q: List of buffers that are not filled yet. + * @buf: Pointer to the buffer that are currently being filled. + * @offset: index to the next position at the @buf to be filled. + * @remain: How many bytes are left to be filled at @buf. + * @state: bitmask of buffer states as defined by &enum dvb_vb2_states. + * @buf_siz: size of each VB2 buffer. + * @buf_cnt: number of VB2 buffers. + * @nonblocking: + * If different than zero, device is operating on non-blocking + * mode. + * @flags: buffer flags as defined by &enum dmx_buffer_flags. + * Filled only at &DMX_DQBUF. &DMX_QBUF should zero this field. + * @count: monotonic counter for filled buffers. Helps to identify + * data stream loses. Filled only at &DMX_DQBUF. &DMX_QBUF should + * zero this field. + * + * @name: name of the device type. Currently, it can either be + * "dvr" or "demux_filter". + */ +struct dvb_vb2_ctx { + struct vb2_queue vb_q; + struct mutex mutex; + spinlock_t slock; + struct list_head dvb_q; + struct dvb_buffer *buf; + int offset; + int remain; + int state; + int buf_siz; + int buf_cnt; + int nonblocking; + + enum dmx_buffer_flags flags; + u32 count; + + char name[DVB_VB2_NAME_MAX + 1]; +}; + +#ifndef CONFIG_DVB_MMAP +static inline int dvb_vb2_init(struct dvb_vb2_ctx *ctx, + const char *name, int non_blocking) +{ + return 0; +}; +static inline int dvb_vb2_release(struct dvb_vb2_ctx *ctx) +{ + return 0; +}; +#define dvb_vb2_is_streaming(ctx) (0) +#define dvb_vb2_fill_buffer(ctx, file, wait, flags) (0) + +static inline __poll_t dvb_vb2_poll(struct dvb_vb2_ctx *ctx, + struct file *file, + poll_table *wait) +{ + return 0; +} +#else +/** + * dvb_vb2_init - initializes VB2 handler + * + * @ctx: control struct for VB2 handler + * @name: name for the VB2 handler + * @non_blocking: + * if not zero, it means that the device is at non-blocking mode + */ +int dvb_vb2_init(struct dvb_vb2_ctx *ctx, const char *name, int non_blocking); + +/** + * dvb_vb2_release - Releases the VB2 handler allocated resources and + * put @ctx at DVB_VB2_STATE_NONE state. + * @ctx: control struct for VB2 handler + */ +int dvb_vb2_release(struct dvb_vb2_ctx *ctx); + +/** + * dvb_vb2_is_streaming - checks if the VB2 handler is streaming + * @ctx: control struct for VB2 handler + * + * Return: 0 if not streaming, 1 otherwise. + */ +int dvb_vb2_is_streaming(struct dvb_vb2_ctx *ctx); + +/** + * dvb_vb2_fill_buffer - fills a VB2 buffer + * @ctx: control struct for VB2 handler + * @src: place where the data is stored + * @len: number of bytes to be copied from @src + * @buffer_flags: + * pointer to buffer flags as defined by &enum dmx_buffer_flags. + * can be NULL. + */ +int dvb_vb2_fill_buffer(struct dvb_vb2_ctx *ctx, + const unsigned char *src, int len, + enum dmx_buffer_flags *buffer_flags); + +/** + * dvb_vb2_poll - Wrapper to vb2_core_streamon() for Digital TV + * buffer handling. + * + * @ctx: control struct for VB2 handler + * @file: &struct file argument passed to the poll + * file operation handler. + * @wait: &poll_table wait argument passed to the poll + * file operation handler. + * + * Implements poll syscall() logic. + */ +__poll_t dvb_vb2_poll(struct dvb_vb2_ctx *ctx, struct file *file, + poll_table *wait); +#endif + +/** + * dvb_vb2_stream_on() - Wrapper to vb2_core_streamon() for Digital TV + * buffer handling. + * + * @ctx: control struct for VB2 handler + * + * Starts dvb streaming + */ +int dvb_vb2_stream_on(struct dvb_vb2_ctx *ctx); +/** + * dvb_vb2_stream_off() - Wrapper to vb2_core_streamoff() for Digital TV + * buffer handling. + * + * @ctx: control struct for VB2 handler + * + * Stops dvb streaming + */ +int dvb_vb2_stream_off(struct dvb_vb2_ctx *ctx); + +/** + * dvb_vb2_reqbufs() - Wrapper to vb2_core_reqbufs() for Digital TV + * buffer handling. + * + * @ctx: control struct for VB2 handler + * @req: &struct dmx_requestbuffers passed from userspace in + * order to handle &DMX_REQBUFS. + * + * Initiate streaming by requesting a number of buffers. Also used to + * free previously requested buffers, is ``req->count`` is zero. + */ +int dvb_vb2_reqbufs(struct dvb_vb2_ctx *ctx, struct dmx_requestbuffers *req); + +/** + * dvb_vb2_querybuf() - Wrapper to vb2_core_querybuf() for Digital TV + * buffer handling. + * + * @ctx: control struct for VB2 handler + * @b: &struct dmx_buffer passed from userspace in + * order to handle &DMX_QUERYBUF. + * + * + */ +int dvb_vb2_querybuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b); + +/** + * dvb_vb2_expbuf() - Wrapper to vb2_core_expbuf() for Digital TV + * buffer handling. + * + * @ctx: control struct for VB2 handler + * @exp: &struct dmx_exportbuffer passed from userspace in + * order to handle &DMX_EXPBUF. + * + * Export a buffer as a file descriptor. + */ +int dvb_vb2_expbuf(struct dvb_vb2_ctx *ctx, struct dmx_exportbuffer *exp); + +/** + * dvb_vb2_qbuf() - Wrapper to vb2_core_qbuf() for Digital TV buffer handling. + * + * @ctx: control struct for VB2 handler + * @b: &struct dmx_buffer passed from userspace in + * order to handle &DMX_QBUF. + * + * Queue a Digital TV buffer as requested by userspace + */ +int dvb_vb2_qbuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b); + +/** + * dvb_vb2_dqbuf() - Wrapper to vb2_core_dqbuf() for Digital TV + * buffer handling. + * + * @ctx: control struct for VB2 handler + * @b: &struct dmx_buffer passed from userspace in + * order to handle &DMX_DQBUF. + * + * Dequeue a Digital TV buffer to the userspace + */ +int dvb_vb2_dqbuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b); + +/** + * dvb_vb2_mmap() - Wrapper to vb2_mmap() for Digital TV buffer handling. + * + * @ctx: control struct for VB2 handler + * @vma: pointer to &struct vm_area_struct with the vma passed + * to the mmap file operation handler in the driver. + * + * map Digital TV video buffers into application address space. + */ +int dvb_vb2_mmap(struct dvb_vb2_ctx *ctx, struct vm_area_struct *vma); + +#endif /* _DVB_VB2_H */ diff --git a/6.12.0/include-overrides/media/dvbdev.h b/6.12.0/include-overrides/media/dvbdev.h new file mode 100644 index 0000000..e5a00d1 --- /dev/null +++ b/6.12.0/include-overrides/media/dvbdev.h @@ -0,0 +1,493 @@ +/* + * dvbdev.h + * + * Copyright (C) 2000 Ralph Metzler & Marcus Metzler + * for convergence integrated media GmbH + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Lesser Public License + * as published by the Free Software Foundation; either version 2.1 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _DVBDEV_H_ +#define _DVBDEV_H_ + +#include +#include +#include +#include +#include + +#define DVB_MAJOR 212 + +#if defined(CONFIG_DVB_MAX_ADAPTERS) && CONFIG_DVB_MAX_ADAPTERS > 0 + #define DVB_MAX_ADAPTERS CONFIG_DVB_MAX_ADAPTERS +#else + #define DVB_MAX_ADAPTERS 16 +#endif + +#define DVB_UNSET (-1) + +/* List of DVB device types */ + +/** + * enum dvb_device_type - type of the Digital TV device + * + * @DVB_DEVICE_SEC: Digital TV standalone Common Interface (CI) + * @DVB_DEVICE_FRONTEND: Digital TV frontend. + * @DVB_DEVICE_DEMUX: Digital TV demux. + * @DVB_DEVICE_DVR: Digital TV digital video record (DVR). + * @DVB_DEVICE_CA: Digital TV Conditional Access (CA). + * @DVB_DEVICE_NET: Digital TV network. + * + * @DVB_DEVICE_VIDEO: Digital TV video decoder. + * Deprecated. Used only on av7110-av. + * @DVB_DEVICE_AUDIO: Digital TV audio decoder. + * Deprecated. Used only on av7110-av. + * @DVB_DEVICE_OSD: Digital TV On Screen Display (OSD). + * Deprecated. Used only on av7110. + */ +enum dvb_device_type { + DVB_DEVICE_SEC, + DVB_DEVICE_FRONTEND, + DVB_DEVICE_DEMUX, + DVB_DEVICE_DVR, + DVB_DEVICE_CA, + DVB_DEVICE_NET, + + DVB_DEVICE_VIDEO, + DVB_DEVICE_AUDIO, + DVB_DEVICE_OSD, +}; + +#define DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr) \ + static short adapter_nr[] = \ + {[0 ... (DVB_MAX_ADAPTERS - 1)] = DVB_UNSET }; \ + module_param_array(adapter_nr, short, NULL, 0444); \ + MODULE_PARM_DESC(adapter_nr, "DVB adapter numbers") + +struct dvb_frontend; + +/** + * struct dvb_adapter - represents a Digital TV adapter using Linux DVB API + * + * @num: Number of the adapter + * @list_head: List with the DVB adapters + * @device_list: List with the DVB devices + * @name: Name of the adapter + * @proposed_mac: proposed MAC address for the adapter + * @priv: private data + * @device: pointer to struct device + * @module: pointer to struct module + * @mfe_shared: indicates mutually exclusive frontends. + * 1 = legacy exclusion behavior: blocking any open() call + * 2 = enhanced exclusion behavior, emulating the standard + * behavior of busy frontends: allowing read-only sharing + * and otherwise returning immediately with -EBUSY when any + * of the frontends is already opened with write access. + * @mfe_dvbdev: Frontend device in use, in the case of MFE + * @mfe_lock: Lock to prevent using the other frontends when MFE is + * used. + * @mdev_lock: Protect access to the mdev pointer. + * @mdev: pointer to struct media_device, used when the media + * controller is used. + * @conn: RF connector. Used only if the device has no separate + * tuner. + * @conn_pads: pointer to struct media_pad associated with @conn; + */ +struct dvb_adapter { + int num; + struct list_head list_head; + struct list_head device_list; + const char *name; + u8 proposed_mac [6]; + void* priv; + + struct device *device; + + struct module *module; + + int mfe_shared; /* indicates mutually exclusive frontends */ + struct dvb_device *mfe_dvbdev; /* frontend device in use */ + struct mutex mfe_lock; /* access lock for thread creation */ + +#if defined(CONFIG_MEDIA_CONTROLLER_DVB) + struct mutex mdev_lock; + struct media_device *mdev; + struct media_entity *conn; + struct media_pad *conn_pads; +#endif +}; + +/** + * struct dvb_device - represents a DVB device node + * + * @list_head: List head with all DVB devices + * @ref: reference count for this device + * @fops: pointer to struct file_operations + * @adapter: pointer to the adapter that holds this device node + * @type: type of the device, as defined by &enum dvb_device_type. + * @minor: devnode minor number. Major number is always DVB_MAJOR. + * @id: device ID number, inside the adapter + * @readers: Initialized by the caller. Each call to open() in Read Only mode + * decreases this counter by one. + * @writers: Initialized by the caller. Each call to open() in Read/Write + * mode decreases this counter by one. + * @users: Initialized by the caller. Each call to open() in any mode + * decreases this counter by one. + * @wait_queue: wait queue, used to wait for certain events inside one of + * the DVB API callers + * @kernel_ioctl: callback function used to handle ioctl calls from userspace. + * @name: Name to be used for the device at the Media Controller + * @entity: pointer to struct media_entity associated with the device node + * @pads: pointer to struct media_pad associated with @entity; + * @priv: private data + * @intf_devnode: Pointer to media_intf_devnode. Used by the dvbdev core to + * store the MC device node interface + * @tsout_num_entities: Number of Transport Stream output entities + * @tsout_entity: array with MC entities associated to each TS output node + * @tsout_pads: array with the source pads for each @tsout_entity + * + * This structure is used by the DVB core (frontend, CA, net, demux) in + * order to create the device nodes. Usually, driver should not initialize + * this struct diretly. + */ +struct dvb_device { + struct list_head list_head; + struct kref ref; + const struct file_operations *fops; + struct dvb_adapter *adapter; + enum dvb_device_type type; + int minor; + u32 id; + + /* in theory, 'users' can vanish now, + but I don't want to change too much now... */ + int readers; + int writers; + int users; + + wait_queue_head_t wait_queue; + /* don't really need those !? -- FIXME: use video_usercopy */ + int (*kernel_ioctl)(struct file *file, unsigned int cmd, void *arg); + + /* Needed for media controller register/unregister */ +#if defined(CONFIG_MEDIA_CONTROLLER_DVB) + const char *name; + + /* Allocated and filled inside dvbdev.c */ + struct media_intf_devnode *intf_devnode; + + unsigned tsout_num_entities; + struct media_entity *entity, *tsout_entity; + struct media_pad *pads, *tsout_pads; +#endif + + void *priv; +}; + +/** + * struct dvbdevfops_node - fops nodes registered in dvbdevfops_list + * + * @fops: Dynamically allocated fops for ->owner registration + * @type: type of dvb_device + * @template: dvb_device used for registration + * @list_head: list_head for dvbdevfops_list + */ +struct dvbdevfops_node { + struct file_operations *fops; + enum dvb_device_type type; + const struct dvb_device *template; + struct list_head list_head; +}; + +/** + * dvb_device_get - Increase dvb_device reference + * + * @dvbdev: pointer to struct dvb_device + */ +struct dvb_device *dvb_device_get(struct dvb_device *dvbdev); + +/** + * dvb_device_put - Decrease dvb_device reference + * + * @dvbdev: pointer to struct dvb_device + */ +void dvb_device_put(struct dvb_device *dvbdev); + +/** + * dvb_register_adapter - Registers a new DVB adapter + * + * @adap: pointer to struct dvb_adapter + * @name: Adapter's name + * @module: initialized with THIS_MODULE at the caller + * @device: pointer to struct device that corresponds to the device driver + * @adapter_nums: Array with a list of the numbers for @dvb_register_adapter; + * to select among them. Typically, initialized with: + * DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nums) + */ +int dvb_register_adapter(struct dvb_adapter *adap, const char *name, + struct module *module, struct device *device, + short *adapter_nums); + +/** + * dvb_unregister_adapter - Unregisters a DVB adapter + * + * @adap: pointer to struct dvb_adapter + */ +int dvb_unregister_adapter(struct dvb_adapter *adap); + +/** + * dvb_register_device - Registers a new DVB device + * + * @adap: pointer to struct dvb_adapter + * @pdvbdev: pointer to the place where the new struct dvb_device will be + * stored + * @template: Template used to create &pdvbdev; + * @priv: private data + * @type: type of the device, as defined by &enum dvb_device_type. + * @demux_sink_pads: Number of demux outputs, to be used to create the TS + * outputs via the Media Controller. + */ +int dvb_register_device(struct dvb_adapter *adap, + struct dvb_device **pdvbdev, + const struct dvb_device *template, + void *priv, + enum dvb_device_type type, + int demux_sink_pads); + +/** + * dvb_remove_device - Remove a registered DVB device + * + * @dvbdev: pointer to struct dvb_device + * + * This does not free memory. dvb_free_device() will do that when + * reference counter is empty + */ +void dvb_remove_device(struct dvb_device *dvbdev); + + +/** + * dvb_unregister_device - Unregisters a DVB device + * + * @dvbdev: pointer to struct dvb_device + */ +void dvb_unregister_device(struct dvb_device *dvbdev); + +#ifdef CONFIG_MEDIA_CONTROLLER_DVB +/** + * dvb_create_media_graph - Creates media graph for the Digital TV part of the + * device. + * + * @adap: pointer to &struct dvb_adapter + * @create_rf_connector: if true, it creates the RF connector too + * + * This function checks all DVB-related functions at the media controller + * entities and creates the needed links for the media graph. It is + * capable of working with multiple tuners or multiple frontends, but it + * won't create links if the device has multiple tuners and multiple frontends + * or if the device has multiple muxes. In such case, the caller driver should + * manually create the remaining links. + */ +__must_check int dvb_create_media_graph(struct dvb_adapter *adap, + bool create_rf_connector); + +/** + * dvb_register_media_controller - registers a media controller at DVB adapter + * + * @adap: pointer to &struct dvb_adapter + * @mdev: pointer to &struct media_device + */ +static inline void dvb_register_media_controller(struct dvb_adapter *adap, + struct media_device *mdev) +{ + adap->mdev = mdev; +} + +/** + * dvb_get_media_controller - gets the associated media controller + * + * @adap: pointer to &struct dvb_adapter + */ +static inline struct media_device * +dvb_get_media_controller(struct dvb_adapter *adap) +{ + return adap->mdev; +} +#else +static inline +int dvb_create_media_graph(struct dvb_adapter *adap, + bool create_rf_connector) +{ + return 0; +}; +#define dvb_register_media_controller(a, b) {} +#define dvb_get_media_controller(a) NULL +#endif + +/** + * dvb_generic_open - Digital TV open function, used by DVB devices + * + * @inode: pointer to &struct inode. + * @file: pointer to &struct file. + * + * Checks if a DVB devnode is still valid, and if the permissions are + * OK and increment negative use count. + */ +int dvb_generic_open(struct inode *inode, struct file *file); + +/** + * dvb_generic_release - Digital TV close function, used by DVB devices + * + * @inode: pointer to &struct inode. + * @file: pointer to &struct file. + * + * Checks if a DVB devnode is still valid, and if the permissions are + * OK and decrement negative use count. + */ +int dvb_generic_release(struct inode *inode, struct file *file); + +/** + * dvb_generic_ioctl - Digital TV close function, used by DVB devices + * + * @file: pointer to &struct file. + * @cmd: Ioctl name. + * @arg: Ioctl argument. + * + * Checks if a DVB devnode and struct dvbdev.kernel_ioctl is still valid. + * If so, calls dvb_usercopy(). + */ +long dvb_generic_ioctl(struct file *file, + unsigned int cmd, unsigned long arg); + +/** + * dvb_usercopy - copies data from/to userspace memory when an ioctl is + * issued. + * + * @file: Pointer to struct &file. + * @cmd: Ioctl name. + * @arg: Ioctl argument. + * @func: function that will actually handle the ioctl + * + * Ancillary function that uses ioctl direction and size to copy from + * userspace. Then, it calls @func, and, if needed, data is copied back + * to userspace. + */ +int dvb_usercopy(struct file *file, unsigned int cmd, unsigned long arg, + int (*func)(struct file *file, unsigned int cmd, void *arg)); + +#if IS_ENABLED(CONFIG_I2C) + +struct i2c_adapter; +struct i2c_client; +/** + * dvb_module_probe - helper routine to probe an I2C module + * + * @module_name: + * Name of the I2C module to be probed + * @name: + * Optional name for the I2C module. Used for debug purposes. + * If %NULL, defaults to @module_name. + * @adap: + * pointer to &struct i2c_adapter that describes the I2C adapter where + * the module will be bound. + * @addr: + * I2C address of the adapter, in 7-bit notation. + * @platform_data: + * Platform data to be passed to the I2C module probed. + * + * This function binds an I2C device into the DVB core. Should be used by + * all drivers that use I2C bus to control the hardware. A module bound + * with dvb_module_probe() should use dvb_module_release() to unbind. + * + * Return: + * On success, return an &struct i2c_client, pointing to the bound + * I2C device. %NULL otherwise. + * + * .. note:: + * + * In the past, DVB modules (mainly, frontends) were bound via dvb_attach() + * macro, with does an ugly hack, using I2C low level functions. Such + * usage is deprecated and will be removed soon. Instead, use this routine. + */ +struct i2c_client *dvb_module_probe(const char *module_name, + const char *name, + struct i2c_adapter *adap, + unsigned char addr, + void *platform_data); + +/** + * dvb_module_release - releases an I2C device allocated with + * dvb_module_probe(). + * + * @client: pointer to &struct i2c_client with the I2C client to be released. + * can be %NULL. + * + * This function should be used to free all resources reserved by + * dvb_module_probe() and unbinding the I2C hardware. + */ +void dvb_module_release(struct i2c_client *client); + +#endif /* CONFIG_I2C */ + +/* Legacy generic DVB attach function. */ +#ifdef CONFIG_MEDIA_ATTACH + +/** + * dvb_attach - attaches a DVB frontend into the DVB core. + * + * @FUNCTION: function on a frontend module to be called. + * @ARGS: @FUNCTION arguments. + * + * This ancillary function loads a frontend module in runtime and runs + * the @FUNCTION function there, with @ARGS. + * As it increments symbol usage cont, at unregister, dvb_detach() + * should be called. + * + * .. note:: + * + * In the past, DVB modules (mainly, frontends) were bound via dvb_attach() + * macro, with does an ugly hack, using I2C low level functions. Such + * usage is deprecated and will be removed soon. Instead, you should use + * dvb_module_probe(). + */ +#define dvb_attach(FUNCTION, ARGS...) ({ \ + void *__r = NULL; \ + typeof(&FUNCTION) __a = symbol_request(FUNCTION); \ + if (__a) { \ + __r = (void *) __a(ARGS); \ + if (__r == NULL) \ + symbol_put(FUNCTION); \ + } else { \ + printk(KERN_ERR "DVB: Unable to find symbol "#FUNCTION"()\n"); \ + } \ + __r; \ +}) + +/** + * dvb_detach - detaches a DVB frontend loaded via dvb_attach() + * + * @FUNC: attach function + * + * Decrements usage count for a function previously called via dvb_attach(). + */ + +#define dvb_detach(FUNC) symbol_put_addr(FUNC) + +#else +#define dvb_attach(FUNCTION, ARGS...) ({ \ + FUNCTION(ARGS); \ +}) + +#define dvb_detach(FUNC) {} + +#endif /* CONFIG_MEDIA_ATTACH */ + +#endif /* #ifndef _DVBDEV_H_ */ diff --git a/6.12.0/include-overrides/media/frame_vector.h b/6.12.0/include-overrides/media/frame_vector.h new file mode 100644 index 0000000..541c71a --- /dev/null +++ b/6.12.0/include-overrides/media/frame_vector.h @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: GPL-2.0 +#ifndef _MEDIA_FRAME_VECTOR_H +#define _MEDIA_FRAME_VECTOR_H + +/* Container for pinned pfns / pages in frame_vector.c */ +struct frame_vector { + unsigned int nr_allocated; /* Number of frames we have space for */ + unsigned int nr_frames; /* Number of frames stored in ptrs array */ + bool got_ref; /* Did we pin pages by getting page ref? */ + bool is_pfns; /* Does array contain pages or pfns? */ + void *ptrs[]; /* Array of pinned pfns / pages. Use + * pfns_vector_pages() or pfns_vector_pfns() + * for access */ +}; + +struct frame_vector *frame_vector_create(unsigned int nr_frames); +void frame_vector_destroy(struct frame_vector *vec); +int get_vaddr_frames(unsigned long start, unsigned int nr_pfns, + bool write, struct frame_vector *vec); +void put_vaddr_frames(struct frame_vector *vec); +int frame_vector_to_pages(struct frame_vector *vec); +void frame_vector_to_pfns(struct frame_vector *vec); + +static inline unsigned int frame_vector_count(struct frame_vector *vec) +{ + return vec->nr_frames; +} + +static inline struct page **frame_vector_pages(struct frame_vector *vec) +{ + if (vec->is_pfns) { + int err = frame_vector_to_pages(vec); + + if (err) + return ERR_PTR(err); + } + return (struct page **)(vec->ptrs); +} + +static inline unsigned long *frame_vector_pfns(struct frame_vector *vec) +{ + if (!vec->is_pfns) + frame_vector_to_pfns(vec); + return (unsigned long *)(vec->ptrs); +} + +#endif /* _MEDIA_FRAME_VECTOR_H */ diff --git a/6.12.0/include-overrides/media/imx.h b/6.12.0/include-overrides/media/imx.h new file mode 100644 index 0000000..5f14acd --- /dev/null +++ b/6.12.0/include-overrides/media/imx.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2014-2017 Mentor Graphics Inc. + */ + +#ifndef __MEDIA_IMX_H__ +#define __MEDIA_IMX_H__ + +#include + +#endif diff --git a/6.12.0/include-overrides/media/ipu-bridge.h b/6.12.0/include-overrides/media/ipu-bridge.h new file mode 100644 index 0000000..16fac76 --- /dev/null +++ b/6.12.0/include-overrides/media/ipu-bridge.h @@ -0,0 +1,182 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Author: Dan Scally */ +#ifndef __IPU_BRIDGE_H +#define __IPU_BRIDGE_H + +#include +#include +#include +#include + +#define IPU_HID "INT343E" +#define IPU_MAX_LANES 4 +#define IPU_MAX_PORTS 4 +#define MAX_NUM_LINK_FREQS 3 + +/* Values are educated guesses as we don't have a spec */ +#define IPU_SENSOR_ROTATION_NORMAL 0 +#define IPU_SENSOR_ROTATION_INVERTED 1 + +#define IPU_SENSOR_CONFIG(_HID, _NR, ...) \ + (const struct ipu_sensor_config) { \ + .hid = _HID, \ + .nr_link_freqs = _NR, \ + .link_freqs = { __VA_ARGS__ } \ + } + +#define NODE_SENSOR(_HID, _PROPS) \ + (const struct software_node) { \ + .name = _HID, \ + .properties = _PROPS, \ + } + +#define NODE_PORT(_PORT, _SENSOR_NODE) \ + (const struct software_node) { \ + .name = _PORT, \ + .parent = _SENSOR_NODE, \ + } + +#define NODE_ENDPOINT(_EP, _PORT, _PROPS) \ + (const struct software_node) { \ + .name = _EP, \ + .parent = _PORT, \ + .properties = _PROPS, \ + } + +#define NODE_VCM(_TYPE) \ + (const struct software_node) { \ + .name = _TYPE, \ + } + +enum ipu_sensor_swnodes { + SWNODE_SENSOR_HID, + SWNODE_SENSOR_PORT, + SWNODE_SENSOR_ENDPOINT, + SWNODE_IPU_PORT, + SWNODE_IPU_ENDPOINT, + /* below are optional / maybe empty */ + SWNODE_IVSC_HID, + SWNODE_IVSC_SENSOR_PORT, + SWNODE_IVSC_SENSOR_ENDPOINT, + SWNODE_IVSC_IPU_PORT, + SWNODE_IVSC_IPU_ENDPOINT, + SWNODE_VCM, + SWNODE_COUNT +}; + +/* Data representation as it is in ACPI SSDB buffer */ +struct ipu_sensor_ssdb { + u8 version; + u8 sku; + u8 guid_csi2[16]; + u8 devfunction; + u8 bus; + u32 dphylinkenfuses; + u32 clockdiv; + u8 link; + u8 lanes; + u32 csiparams[10]; + u32 maxlanespeed; + u8 sensorcalibfileidx; + u8 sensorcalibfileidxInMBZ[3]; + u8 romtype; + u8 vcmtype; + u8 platforminfo; + u8 platformsubinfo; + u8 flash; + u8 privacyled; + u8 degree; + u8 mipilinkdefined; + u32 mclkspeed; + u8 controllogicid; + u8 reserved1[3]; + u8 mclkport; + u8 reserved2[13]; +} __packed; + +struct ipu_property_names { + char clock_frequency[16]; + char rotation[9]; + char orientation[12]; + char bus_type[9]; + char data_lanes[11]; + char remote_endpoint[16]; + char link_frequencies[17]; +}; + +struct ipu_node_names { + char port[7]; + char ivsc_sensor_port[7]; + char ivsc_ipu_port[7]; + char endpoint[11]; + char remote_port[9]; + char vcm[16]; +}; + +struct ipu_sensor_config { + const char *hid; + const u8 nr_link_freqs; + const u64 link_freqs[MAX_NUM_LINK_FREQS]; +}; + +struct ipu_sensor { + /* append ssdb.link(u8) in "-%u" format as suffix of HID */ + char name[ACPI_ID_LEN + 4]; + struct acpi_device *adev; + + struct device *csi_dev; + struct acpi_device *ivsc_adev; + char ivsc_name[ACPI_ID_LEN + 4]; + + /* SWNODE_COUNT + 1 for terminating NULL */ + const struct software_node *group[SWNODE_COUNT + 1]; + struct software_node swnodes[SWNODE_COUNT]; + struct ipu_node_names node_names; + + u8 link; + u8 lanes; + u32 mclkspeed; + u32 rotation; + enum v4l2_fwnode_orientation orientation; + const char *vcm_type; + + struct ipu_property_names prop_names; + struct property_entry ep_properties[5]; + struct property_entry dev_properties[5]; + struct property_entry ipu_properties[3]; + struct property_entry ivsc_properties[1]; + struct property_entry ivsc_sensor_ep_properties[4]; + struct property_entry ivsc_ipu_ep_properties[4]; + + struct software_node_ref_args local_ref[1]; + struct software_node_ref_args remote_ref[1]; + struct software_node_ref_args vcm_ref[1]; + struct software_node_ref_args ivsc_sensor_ref[1]; + struct software_node_ref_args ivsc_ipu_ref[1]; +}; + +typedef int (*ipu_parse_sensor_fwnode_t)(struct acpi_device *adev, + struct ipu_sensor *sensor); + +struct ipu_bridge { + struct device *dev; + ipu_parse_sensor_fwnode_t parse_sensor_fwnode; + char ipu_node_name[ACPI_ID_LEN]; + struct software_node ipu_hid_node; + u32 data_lanes[4]; + unsigned int n_sensors; + struct ipu_sensor sensors[IPU_MAX_PORTS]; +}; + +#if IS_ENABLED(CONFIG_IPU_BRIDGE) +int ipu_bridge_init(struct device *dev, + ipu_parse_sensor_fwnode_t parse_sensor_fwnode); +int ipu_bridge_parse_ssdb(struct acpi_device *adev, struct ipu_sensor *sensor); +int ipu_bridge_instantiate_vcm(struct device *sensor); +#else +/* Use a define to avoid the @parse_sensor_fwnode argument getting evaluated */ +#define ipu_bridge_init(dev, parse_sensor_fwnode) (0) +static inline int ipu_bridge_instantiate_vcm(struct device *s) { return 0; } +#endif + +#endif diff --git a/6.12.0/include-overrides/media/ipu6-pci-table.h b/6.12.0/include-overrides/media/ipu6-pci-table.h new file mode 100644 index 0000000..0899d9d --- /dev/null +++ b/6.12.0/include-overrides/media/ipu6-pci-table.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2024 Intel Corporation + */ + +#ifndef __IPU6_PCI_TBL_H__ +#define __IPU6_PCI_TBL_H__ + +#include + +#define PCI_DEVICE_ID_INTEL_IPU6 0x9a19 +#define PCI_DEVICE_ID_INTEL_IPU6SE 0x4e19 +#define PCI_DEVICE_ID_INTEL_IPU6EP_ADLP 0x465d +#define PCI_DEVICE_ID_INTEL_IPU6EP_ADLN 0x462e +#define PCI_DEVICE_ID_INTEL_IPU6EP_RPLP 0xa75d +#define PCI_DEVICE_ID_INTEL_IPU6EP_MTL 0x7d19 + +static const struct pci_device_id ipu6_pci_tbl[] = { + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IPU6) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IPU6SE) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IPU6EP_ADLP) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IPU6EP_ADLN) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IPU6EP_RPLP) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IPU6EP_MTL) }, + { } +}; + +#endif /* __IPU6_PCI_TBL_H__ */ diff --git a/6.12.0/include-overrides/media/jpeg.h b/6.12.0/include-overrides/media/jpeg.h new file mode 100644 index 0000000..a01e142 --- /dev/null +++ b/6.12.0/include-overrides/media/jpeg.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef _MEDIA_JPEG_H_ +#define _MEDIA_JPEG_H_ + +/* JPEG markers */ +#define JPEG_MARKER_TEM 0x01 +#define JPEG_MARKER_SOF0 0xc0 +#define JPEG_MARKER_DHT 0xc4 +#define JPEG_MARKER_RST 0xd0 +#define JPEG_MARKER_SOI 0xd8 +#define JPEG_MARKER_EOI 0xd9 +#define JPEG_MARKER_SOS 0xda +#define JPEG_MARKER_DQT 0xdb +#define JPEG_MARKER_DRI 0xdd +#define JPEG_MARKER_DHP 0xde +#define JPEG_MARKER_APP0 0xe0 +#define JPEG_MARKER_COM 0xfe + +#endif /* _MEDIA_JPEG_H_ */ diff --git a/6.12.0/include-overrides/media/media-dev-allocator.h b/6.12.0/include-overrides/media/media-dev-allocator.h new file mode 100644 index 0000000..2ab54d4 --- /dev/null +++ b/6.12.0/include-overrides/media/media-dev-allocator.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * media-dev-allocator.h - Media Controller Device Allocator API + * + * Copyright (c) 2019 Shuah Khan + * + * Credits: Suggested by Laurent Pinchart + */ + +/* + * This file adds a global ref-counted Media Controller Device Instance API. + * A system wide global media device list is managed and each media device + * includes a kref count. The last put on the media device releases the media + * device instance. + */ + +#ifndef _MEDIA_DEV_ALLOCATOR_H +#define _MEDIA_DEV_ALLOCATOR_H + +struct usb_device; + +#if defined(CONFIG_MEDIA_CONTROLLER) && IS_ENABLED(CONFIG_USB) +/** + * media_device_usb_allocate() - Allocate and return struct &media device + * + * @udev: struct &usb_device pointer + * @module_name: should be filled with %KBUILD_MODNAME + * @owner: struct module pointer %THIS_MODULE for the driver. + * %THIS_MODULE is null for a built-in driver. + * It is safe even when %THIS_MODULE is null. + * + * This interface should be called to allocate a Media Device when multiple + * drivers share usb_device and the media device. This interface allocates + * &media_device structure and calls media_device_usb_init() to initialize + * it. + * + */ +struct media_device *media_device_usb_allocate(struct usb_device *udev, + const char *module_name, + struct module *owner); +/** + * media_device_delete() - Release media device. Calls kref_put(). + * + * @mdev: struct &media_device pointer + * @module_name: should be filled with %KBUILD_MODNAME + * @owner: struct module pointer %THIS_MODULE for the driver. + * %THIS_MODULE is null for a built-in driver. + * It is safe even when %THIS_MODULE is null. + * + * This interface should be called to put Media Device Instance kref. + */ +void media_device_delete(struct media_device *mdev, const char *module_name, + struct module *owner); +#else +static inline struct media_device *media_device_usb_allocate( + struct usb_device *udev, const char *module_name, + struct module *owner) + { return NULL; } +static inline void media_device_delete( + struct media_device *mdev, const char *module_name, + struct module *owner) { } +#endif /* CONFIG_MEDIA_CONTROLLER && CONFIG_USB */ +#endif /* _MEDIA_DEV_ALLOCATOR_H */ diff --git a/6.12.0/include-overrides/media/media-device.h b/6.12.0/include-overrides/media/media-device.h new file mode 100644 index 0000000..53d2a16 --- /dev/null +++ b/6.12.0/include-overrides/media/media-device.h @@ -0,0 +1,518 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Media device + * + * Copyright (C) 2010 Nokia Corporation + * + * Contacts: Laurent Pinchart + * Sakari Ailus + */ + +#ifndef _MEDIA_DEVICE_H +#define _MEDIA_DEVICE_H + +#include +#include +#include +#include + +#include +#include + +struct ida; +struct media_device; + +/** + * struct media_entity_notify - Media Entity Notify + * + * @list: List head + * @notify_data: Input data to invoke the callback + * @notify: Callback function pointer + * + * Drivers may register a callback to take action when new entities get + * registered with the media device. This handler is intended for creating + * links between existing entities and should not create entities and register + * them. + */ +struct media_entity_notify { + struct list_head list; + void *notify_data; + void (*notify)(struct media_entity *entity, void *notify_data); +}; + +/** + * struct media_device_ops - Media device operations + * @link_notify: Link state change notification callback. This callback is + * called with the graph_mutex held. + * @req_alloc: Allocate a request. Set this if you need to allocate a struct + * larger then struct media_request. @req_alloc and @req_free must + * either both be set or both be NULL. + * @req_free: Free a request. Set this if @req_alloc was set as well, leave + * to NULL otherwise. + * @req_validate: Validate a request, but do not queue yet. The req_queue_mutex + * lock is held when this op is called. + * @req_queue: Queue a validated request, cannot fail. If something goes + * wrong when queueing this request then it should be marked + * as such internally in the driver and any related buffers + * must eventually return to vb2 with state VB2_BUF_STATE_ERROR. + * The req_queue_mutex lock is held when this op is called. + * It is important that vb2 buffer objects are queued last after + * all other object types are queued: queueing a buffer kickstarts + * the request processing, so all other objects related to the + * request (and thus the buffer) must be available to the driver. + * And once a buffer is queued, then the driver can complete + * or delete objects from the request before req_queue exits. + */ +struct media_device_ops { + int (*link_notify)(struct media_link *link, u32 flags, + unsigned int notification); + struct media_request *(*req_alloc)(struct media_device *mdev); + void (*req_free)(struct media_request *req); + int (*req_validate)(struct media_request *req); + void (*req_queue)(struct media_request *req); +}; + +/** + * struct media_device - Media device + * @dev: Parent device + * @devnode: Media device node + * @driver_name: Optional device driver name. If not set, calls to + * %MEDIA_IOC_DEVICE_INFO will return ``dev->driver->name``. + * This is needed for USB drivers for example, as otherwise + * they'll all appear as if the driver name was "usb". + * @model: Device model name + * @serial: Device serial number (optional) + * @bus_info: Unique and stable device location identifier + * @hw_revision: Hardware device revision + * @topology_version: Monotonic counter for storing the version of the graph + * topology. Should be incremented each time the topology changes. + * @id: Unique ID used on the last registered graph object + * @entity_internal_idx: Unique internal entity ID used by the graph traversal + * algorithms + * @entity_internal_idx_max: Allocated internal entity indices + * @entities: List of registered entities + * @interfaces: List of registered interfaces + * @pads: List of registered pads + * @links: List of registered links + * @entity_notify: List of registered entity_notify callbacks + * @graph_mutex: Protects access to struct media_device data + * @pm_count_walk: Graph walk for power state walk. Access serialised using + * graph_mutex. + * + * @source_priv: Driver Private data for enable/disable source handlers + * @enable_source: Enable Source Handler function pointer + * @disable_source: Disable Source Handler function pointer + * + * @ops: Operation handler callbacks + * @req_queue_mutex: Serialise the MEDIA_REQUEST_IOC_QUEUE ioctl w.r.t. + * other operations that stop or start streaming. + * @request_id: Used to generate unique request IDs + * + * This structure represents an abstract high-level media device. It allows easy + * access to entities and provides basic media device-level support. The + * structure can be allocated directly or embedded in a larger structure. + * + * The parent @dev is a physical device. It must be set before registering the + * media device. + * + * @model is a descriptive model name exported through sysfs. It doesn't have to + * be unique. + * + * @enable_source is a handler to find source entity for the + * sink entity and activate the link between them if source + * entity is free. Drivers should call this handler before + * accessing the source. + * + * @disable_source is a handler to find source entity for the + * sink entity and deactivate the link between them. Drivers + * should call this handler to release the source. + * + * Use-case: find tuner entity connected to the decoder + * entity and check if it is available, and activate the + * link between them from @enable_source and deactivate + * from @disable_source. + * + * .. note:: + * + * Bridge driver is expected to implement and set the + * handler when &media_device is registered or when + * bridge driver finds the media_device during probe. + * Bridge driver sets source_priv with information + * necessary to run @enable_source and @disable_source handlers. + * Callers should hold graph_mutex to access and call @enable_source + * and @disable_source handlers. + */ +struct media_device { + /* dev->driver_data points to this struct. */ + struct device *dev; + struct media_devnode *devnode; + + char model[32]; + char driver_name[32]; + char serial[40]; + char bus_info[32]; + u32 hw_revision; + + u64 topology_version; + + u32 id; + struct ida entity_internal_idx; + int entity_internal_idx_max; + + struct list_head entities; + struct list_head interfaces; + struct list_head pads; + struct list_head links; + + /* notify callback list invoked when a new entity is registered */ + struct list_head entity_notify; + + /* Serializes graph operations. */ + struct mutex graph_mutex; + struct media_graph pm_count_walk; + + void *source_priv; + int (*enable_source)(struct media_entity *entity, + struct media_pipeline *pipe); + void (*disable_source)(struct media_entity *entity); + + const struct media_device_ops *ops; + + struct mutex req_queue_mutex; + atomic_t request_id; +}; + +/* We don't need to include usb.h here */ +struct usb_device; + +#ifdef CONFIG_MEDIA_CONTROLLER + +/* Supported link_notify @notification values. */ +#define MEDIA_DEV_NOTIFY_PRE_LINK_CH 0 +#define MEDIA_DEV_NOTIFY_POST_LINK_CH 1 + +/** + * media_device_init() - Initializes a media device element + * + * @mdev: pointer to struct &media_device + * + * This function initializes the media device prior to its registration. + * The media device initialization and registration is split in two functions + * to avoid race conditions and make the media device available to user-space + * before the media graph has been completed. + * + * So drivers need to first initialize the media device, register any entity + * within the media device, create pad to pad links and then finally register + * the media device by calling media_device_register() as a final step. + * + * The caller is responsible for initializing the media device before + * registration. The following fields must be set: + * + * - dev must point to the parent device + * - model must be filled with the device model name + * + * The bus_info field is set by media_device_init() for PCI and platform devices + * if the field begins with '\0'. + */ +void media_device_init(struct media_device *mdev); + +/** + * media_device_cleanup() - Cleanups a media device element + * + * @mdev: pointer to struct &media_device + * + * This function that will destroy the graph_mutex that is + * initialized in media_device_init(). + */ +void media_device_cleanup(struct media_device *mdev); + +/** + * __media_device_register() - Registers a media device element + * + * @mdev: pointer to struct &media_device + * @owner: should be filled with %THIS_MODULE + * + * Users, should, instead, call the media_device_register() macro. + * + * The caller is responsible for initializing the &media_device structure + * before registration. The following fields of &media_device must be set: + * + * - &media_device.model must be filled with the device model name as a + * NUL-terminated UTF-8 string. The device/model revision must not be + * stored in this field. + * + * The following fields are optional: + * + * - &media_device.serial is a unique serial number stored as a + * NUL-terminated ASCII string. The field is big enough to store a GUID + * in text form. If the hardware doesn't provide a unique serial number + * this field must be left empty. + * + * - &media_device.bus_info represents the location of the device in the + * system as a NUL-terminated ASCII string. For PCI/PCIe devices + * &media_device.bus_info must be set to "PCI:" (or "PCIe:") followed by + * the value of pci_name(). For USB devices,the usb_make_path() function + * must be used. This field is used by applications to distinguish between + * otherwise identical devices that don't provide a serial number. + * + * - &media_device.hw_revision is the hardware device revision in a + * driver-specific format. When possible the revision should be formatted + * with the KERNEL_VERSION() macro. + * + * .. note:: + * + * #) Upon successful registration a character device named media[0-9]+ is created. The device major and minor numbers are dynamic. The model name is exported as a sysfs attribute. + * + * #) Unregistering a media device that hasn't been registered is **NOT** safe. + * + * Return: returns zero on success or a negative error code. + */ +int __must_check __media_device_register(struct media_device *mdev, + struct module *owner); + + +/** + * media_device_register() - Registers a media device element + * + * @mdev: pointer to struct &media_device + * + * This macro calls __media_device_register() passing %THIS_MODULE as + * the __media_device_register() second argument (**owner**). + */ +#define media_device_register(mdev) __media_device_register(mdev, THIS_MODULE) + +/** + * media_device_unregister() - Unregisters a media device element + * + * @mdev: pointer to struct &media_device + * + * It is safe to call this function on an unregistered (but initialised) + * media device. + */ +void media_device_unregister(struct media_device *mdev); + +/** + * media_device_register_entity() - registers a media entity inside a + * previously registered media device. + * + * @mdev: pointer to struct &media_device + * @entity: pointer to struct &media_entity to be registered + * + * Entities are identified by a unique positive integer ID. The media + * controller framework will such ID automatically. IDs are not guaranteed + * to be contiguous, and the ID number can change on newer Kernel versions. + * So, neither the driver nor userspace should hardcode ID numbers to refer + * to the entities, but, instead, use the framework to find the ID, when + * needed. + * + * The media_entity name, type and flags fields should be initialized before + * calling media_device_register_entity(). Entities embedded in higher-level + * standard structures can have some of those fields set by the higher-level + * framework. + * + * If the device has pads, media_entity_pads_init() should be called before + * this function. Otherwise, the &media_entity.pad and &media_entity.num_pads + * should be zeroed before calling this function. + * + * Entities have flags that describe the entity capabilities and state: + * + * %MEDIA_ENT_FL_DEFAULT + * indicates the default entity for a given type. + * This can be used to report the default audio and video devices or the + * default camera sensor. + * + * .. note:: + * + * Drivers should set the entity function before calling this function. + * Please notice that the values %MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN and + * %MEDIA_ENT_F_UNKNOWN should not be used by the drivers. + */ +int __must_check media_device_register_entity(struct media_device *mdev, + struct media_entity *entity); + +/** + * media_device_unregister_entity() - unregisters a media entity. + * + * @entity: pointer to struct &media_entity to be unregistered + * + * All links associated with the entity and all PADs are automatically + * unregistered from the media_device when this function is called. + * + * Unregistering an entity will not change the IDs of the other entities and + * the previoully used ID will never be reused for a newly registered entities. + * + * When a media device is unregistered, all its entities are unregistered + * automatically. No manual entities unregistration is then required. + * + * .. note:: + * + * The media_entity instance itself must be freed explicitly by + * the driver if required. + */ +void media_device_unregister_entity(struct media_entity *entity); + +/** + * media_device_register_entity_notify() - Registers a media entity_notify + * callback + * + * @mdev: The media device + * @nptr: The media_entity_notify + * + * .. note:: + * + * When a new entity is registered, all the registered + * media_entity_notify callbacks are invoked. + */ + +void media_device_register_entity_notify(struct media_device *mdev, + struct media_entity_notify *nptr); + +/** + * media_device_unregister_entity_notify() - Unregister a media entity notify + * callback + * + * @mdev: The media device + * @nptr: The media_entity_notify + * + */ +void media_device_unregister_entity_notify(struct media_device *mdev, + struct media_entity_notify *nptr); + +/* Iterate over all entities. */ +#define media_device_for_each_entity(entity, mdev) \ + list_for_each_entry(entity, &(mdev)->entities, graph_obj.list) + +/* Iterate over all interfaces. */ +#define media_device_for_each_intf(intf, mdev) \ + list_for_each_entry(intf, &(mdev)->interfaces, graph_obj.list) + +/* Iterate over all pads. */ +#define media_device_for_each_pad(pad, mdev) \ + list_for_each_entry(pad, &(mdev)->pads, graph_obj.list) + +/* Iterate over all links. */ +#define media_device_for_each_link(link, mdev) \ + list_for_each_entry(link, &(mdev)->links, graph_obj.list) + +/** + * media_device_pci_init() - create and initialize a + * struct &media_device from a PCI device. + * + * @mdev: pointer to struct &media_device + * @pci_dev: pointer to struct pci_dev + * @name: media device name. If %NULL, the routine will use the default + * name for the pci device, given by pci_name() macro. + */ +void media_device_pci_init(struct media_device *mdev, + struct pci_dev *pci_dev, + const char *name); +/** + * __media_device_usb_init() - create and initialize a + * struct &media_device from a PCI device. + * + * @mdev: pointer to struct &media_device + * @udev: pointer to struct usb_device + * @board_name: media device name. If %NULL, the routine will use the usb + * product name, if available. + * @driver_name: name of the driver. if %NULL, the routine will use the name + * given by ``udev->dev->driver->name``, with is usually the wrong + * thing to do. + * + * .. note:: + * + * It is better to call media_device_usb_init() instead, as + * such macro fills driver_name with %KBUILD_MODNAME. + */ +void __media_device_usb_init(struct media_device *mdev, + struct usb_device *udev, + const char *board_name, + const char *driver_name); + +#else +static inline void media_device_init(struct media_device *mdev) +{ +} +static inline int media_device_register(struct media_device *mdev) +{ + return 0; +} +static inline void media_device_unregister(struct media_device *mdev) +{ +} +static inline void media_device_cleanup(struct media_device *mdev) +{ +} +static inline int media_device_register_entity(struct media_device *mdev, + struct media_entity *entity) +{ + return 0; +} +static inline void media_device_unregister_entity(struct media_entity *entity) +{ +} +static inline void media_device_register_entity_notify( + struct media_device *mdev, + struct media_entity_notify *nptr) +{ +} +static inline void media_device_unregister_entity_notify( + struct media_device *mdev, + struct media_entity_notify *nptr) +{ +} + +static inline void media_device_pci_init(struct media_device *mdev, + struct pci_dev *pci_dev, + char *name) +{ +} + +static inline void __media_device_usb_init(struct media_device *mdev, + struct usb_device *udev, + char *board_name, + char *driver_name) +{ +} + +#endif /* CONFIG_MEDIA_CONTROLLER */ + +/** + * media_device_usb_init() - create and initialize a + * struct &media_device from a PCI device. + * + * @mdev: pointer to struct &media_device + * @udev: pointer to struct usb_device + * @name: media device name. If %NULL, the routine will use the usb + * product name, if available. + * + * This macro calls media_device_usb_init() passing the + * media_device_usb_init() **driver_name** parameter filled with + * %KBUILD_MODNAME. + */ +#define media_device_usb_init(mdev, udev, name) \ + __media_device_usb_init(mdev, udev, name, KBUILD_MODNAME) + +/** + * media_set_bus_info() - Set bus_info field + * + * @bus_info: Variable where to write the bus info (char array) + * @bus_info_size: Length of the bus_info + * @dev: Related struct device + * + * Sets bus information based on &dev. This is currently done for PCI and + * platform devices. dev is required to be non-NULL for this to happen. + * + * This function is not meant to be called from drivers. + */ +static inline void +media_set_bus_info(char *bus_info, size_t bus_info_size, struct device *dev) +{ + if (!dev) + strscpy(bus_info, "no bus info", bus_info_size); + else if (dev_is_platform(dev)) + snprintf(bus_info, bus_info_size, "platform:%s", dev_name(dev)); + else if (dev_is_pci(dev)) + snprintf(bus_info, bus_info_size, "PCI:%s", dev_name(dev)); +} + +#endif diff --git a/6.12.0/include-overrides/media/media-devnode.h b/6.12.0/include-overrides/media/media-devnode.h new file mode 100644 index 0000000..d27c1c6 --- /dev/null +++ b/6.12.0/include-overrides/media/media-devnode.h @@ -0,0 +1,168 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Media device node + * + * Copyright (C) 2010 Nokia Corporation + * + * Contacts: Laurent Pinchart + * Sakari Ailus + * + * -- + * + * Common functions for media-related drivers to register and unregister media + * device nodes. + */ + +#ifndef _MEDIA_DEVNODE_H +#define _MEDIA_DEVNODE_H + +#include +#include +#include +#include + +struct media_device; + +/* + * Flag to mark the media_devnode struct as registered. Drivers must not touch + * this flag directly, it will be set and cleared by media_devnode_register and + * media_devnode_unregister. + */ +#define MEDIA_FLAG_REGISTERED 0 + +/** + * struct media_file_operations - Media device file operations + * + * @owner: should be filled with %THIS_MODULE + * @read: pointer to the function that implements read() syscall + * @write: pointer to the function that implements write() syscall + * @poll: pointer to the function that implements poll() syscall + * @ioctl: pointer to the function that implements ioctl() syscall + * @compat_ioctl: pointer to the function that will handle 32 bits userspace + * calls to the ioctl() syscall on a Kernel compiled with 64 bits. + * @open: pointer to the function that implements open() syscall + * @release: pointer to the function that will release the resources allocated + * by the @open function. + */ +struct media_file_operations { + struct module *owner; + ssize_t (*read) (struct file *, char __user *, size_t, loff_t *); + ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *); + __poll_t (*poll) (struct file *, struct poll_table_struct *); + long (*ioctl) (struct file *, unsigned int, unsigned long); + long (*compat_ioctl) (struct file *, unsigned int, unsigned long); + int (*open) (struct file *); + int (*release) (struct file *); +}; + +/** + * struct media_devnode - Media device node + * @media_dev: pointer to struct &media_device + * @fops: pointer to struct &media_file_operations with media device ops + * @dev: pointer to struct &device containing the media controller device + * @cdev: struct cdev pointer character device + * @parent: parent device + * @minor: device node minor number + * @flags: flags, combination of the ``MEDIA_FLAG_*`` constants + * @release: release callback called at the end of ``media_devnode_release()`` + * routine at media-device.c. + * + * This structure represents a media-related device node. + * + * The @parent is a physical device. It must be set by core or device drivers + * before registering the node. + */ +struct media_devnode { + struct media_device *media_dev; + + /* device ops */ + const struct media_file_operations *fops; + + /* sysfs */ + struct device dev; /* media device */ + struct cdev cdev; /* character device */ + struct device *parent; /* device parent */ + + /* device info */ + int minor; + unsigned long flags; /* Use bitops to access flags */ + + /* callbacks */ + void (*release)(struct media_devnode *devnode); +}; + +/* dev to media_devnode */ +#define to_media_devnode(cd) container_of(cd, struct media_devnode, dev) + +/** + * media_devnode_register - register a media device node + * + * @mdev: struct media_device we want to register a device node + * @devnode: media device node structure we want to register + * @owner: should be filled with %THIS_MODULE + * + * The registration code assigns minor numbers and registers the new device node + * with the kernel. An error is returned if no free minor number can be found, + * or if the registration of the device node fails. + * + * Zero is returned on success. + * + * Note that if the media_devnode_register call fails, the release() callback of + * the media_devnode structure is *not* called, so the caller is responsible for + * freeing any data. + */ +int __must_check media_devnode_register(struct media_device *mdev, + struct media_devnode *devnode, + struct module *owner); + +/** + * media_devnode_unregister_prepare - clear the media device node register bit + * @devnode: the device node to prepare for unregister + * + * This clears the passed device register bit. Future open calls will be met + * with errors. Should be called before media_devnode_unregister() to avoid + * races with unregister and device file open calls. + * + * This function can safely be called if the device node has never been + * registered or has already been unregistered. + */ +void media_devnode_unregister_prepare(struct media_devnode *devnode); + +/** + * media_devnode_unregister - unregister a media device node + * @devnode: the device node to unregister + * + * This unregisters the passed device. Future open calls will be met with + * errors. + * + * Should be called after media_devnode_unregister_prepare() + */ +void media_devnode_unregister(struct media_devnode *devnode); + +/** + * media_devnode_data - returns a pointer to the &media_devnode + * + * @filp: pointer to struct &file + */ +static inline struct media_devnode *media_devnode_data(struct file *filp) +{ + return filp->private_data; +} + +/** + * media_devnode_is_registered - returns true if &media_devnode is registered; + * false otherwise. + * + * @devnode: pointer to struct &media_devnode. + * + * Note: If mdev is NULL, it also returns false. + */ +static inline int media_devnode_is_registered(struct media_devnode *devnode) +{ + if (!devnode) + return false; + + return test_bit(MEDIA_FLAG_REGISTERED, &devnode->flags); +} + +#endif /* _MEDIA_DEVNODE_H */ diff --git a/6.12.0/include-overrides/media/media-entity.h b/6.12.0/include-overrides/media/media-entity.h new file mode 100644 index 0000000..0393b23 --- /dev/null +++ b/6.12.0/include-overrides/media/media-entity.h @@ -0,0 +1,1450 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Media entity + * + * Copyright (C) 2010 Nokia Corporation + * + * Contacts: Laurent Pinchart + * Sakari Ailus + */ + +#ifndef _MEDIA_ENTITY_H +#define _MEDIA_ENTITY_H + +#include +#include +#include +#include +#include +#include +#include +#include + +/* Enums used internally at the media controller to represent graphs */ + +/** + * enum media_gobj_type - type of a graph object + * + * @MEDIA_GRAPH_ENTITY: Identify a media entity + * @MEDIA_GRAPH_PAD: Identify a media pad + * @MEDIA_GRAPH_LINK: Identify a media link + * @MEDIA_GRAPH_INTF_DEVNODE: Identify a media Kernel API interface via + * a device node + */ +enum media_gobj_type { + MEDIA_GRAPH_ENTITY, + MEDIA_GRAPH_PAD, + MEDIA_GRAPH_LINK, + MEDIA_GRAPH_INTF_DEVNODE, +}; + +#define MEDIA_BITS_PER_TYPE 8 +#define MEDIA_BITS_PER_ID (32 - MEDIA_BITS_PER_TYPE) +#define MEDIA_ID_MASK GENMASK_ULL(MEDIA_BITS_PER_ID - 1, 0) + +/* Structs to represent the objects that belong to a media graph */ + +/** + * struct media_gobj - Define a graph object. + * + * @mdev: Pointer to the struct &media_device that owns the object + * @id: Non-zero object ID identifier. The ID should be unique + * inside a media_device, as it is composed by + * %MEDIA_BITS_PER_TYPE to store the type plus + * %MEDIA_BITS_PER_ID to store the ID + * @list: List entry stored in one of the per-type mdev object lists + * + * All objects on the media graph should have this struct embedded + */ +struct media_gobj { + struct media_device *mdev; + u32 id; + struct list_head list; +}; + +#define MEDIA_ENTITY_ENUM_MAX_DEPTH 16 + +/** + * struct media_entity_enum - An enumeration of media entities. + * + * @bmap: Bit map in which each bit represents one entity at struct + * media_entity->internal_idx. + * @idx_max: Number of bits in bmap + */ +struct media_entity_enum { + unsigned long *bmap; + int idx_max; +}; + +/** + * struct media_graph - Media graph traversal state + * + * @stack: Graph traversal stack; the stack contains information + * on the path the media entities to be walked and the + * links through which they were reached. + * @stack.entity: pointer to &struct media_entity at the graph. + * @stack.link: pointer to &struct list_head. + * @ent_enum: Visited entities + * @top: The top of the stack + */ +struct media_graph { + struct { + struct media_entity *entity; + struct list_head *link; + } stack[MEDIA_ENTITY_ENUM_MAX_DEPTH]; + + struct media_entity_enum ent_enum; + int top; +}; + +/** + * struct media_pipeline - Media pipeline related information + * + * @allocated: Media pipeline allocated and freed by the framework + * @mdev: The media device the pipeline is part of + * @pads: List of media_pipeline_pad + * @start_count: Media pipeline start - stop count + */ +struct media_pipeline { + bool allocated; + struct media_device *mdev; + struct list_head pads; + int start_count; +}; + +/** + * struct media_pipeline_pad - A pad part of a media pipeline + * + * @list: Entry in the media_pad pads list + * @pipe: The media_pipeline that the pad is part of + * @pad: The media pad + * + * This structure associate a pad with a media pipeline. Instances of + * media_pipeline_pad are created by media_pipeline_start() when it builds the + * pipeline, and stored in the &media_pad.pads list. media_pipeline_stop() + * removes the entries from the list and deletes them. + */ +struct media_pipeline_pad { + struct list_head list; + struct media_pipeline *pipe; + struct media_pad *pad; +}; + +/** + * struct media_pipeline_pad_iter - Iterator for media_pipeline_for_each_pad + * + * @cursor: The current element + */ +struct media_pipeline_pad_iter { + struct list_head *cursor; +}; + +/** + * struct media_pipeline_entity_iter - Iterator for media_pipeline_for_each_entity + * + * @ent_enum: The entity enumeration tracker + * @cursor: The current element + */ +struct media_pipeline_entity_iter { + struct media_entity_enum ent_enum; + struct list_head *cursor; +}; + +/** + * struct media_link - A link object part of a media graph. + * + * @graph_obj: Embedded structure containing the media object common data + * @list: Linked list associated with an entity or an interface that + * owns the link. + * @gobj0: Part of a union. Used to get the pointer for the first + * graph_object of the link. + * @source: Part of a union. Used only if the first object (gobj0) is + * a pad. In that case, it represents the source pad. + * @intf: Part of a union. Used only if the first object (gobj0) is + * an interface. + * @gobj1: Part of a union. Used to get the pointer for the second + * graph_object of the link. + * @sink: Part of a union. Used only if the second object (gobj1) is + * a pad. In that case, it represents the sink pad. + * @entity: Part of a union. Used only if the second object (gobj1) is + * an entity. + * @reverse: Pointer to the link for the reverse direction of a pad to pad + * link. + * @flags: Link flags, as defined in uapi/media.h (MEDIA_LNK_FL_*) + * @is_backlink: Indicate if the link is a backlink. + */ +struct media_link { + struct media_gobj graph_obj; + struct list_head list; + union { + struct media_gobj *gobj0; + struct media_pad *source; + struct media_interface *intf; + }; + union { + struct media_gobj *gobj1; + struct media_pad *sink; + struct media_entity *entity; + }; + struct media_link *reverse; + unsigned long flags; + bool is_backlink; +}; + +/** + * enum media_pad_signal_type - type of the signal inside a media pad + * + * @PAD_SIGNAL_DEFAULT: + * Default signal. Use this when all inputs or all outputs are + * uniquely identified by the pad number. + * @PAD_SIGNAL_ANALOG: + * The pad contains an analog signal. It can be Radio Frequency, + * Intermediate Frequency, a baseband signal or sub-carriers. + * Tuner inputs, IF-PLL demodulators, composite and s-video signals + * should use it. + * @PAD_SIGNAL_DV: + * Contains a digital video signal, with can be a bitstream of samples + * taken from an analog TV video source. On such case, it usually + * contains the VBI data on it. + * @PAD_SIGNAL_AUDIO: + * Contains an Intermediate Frequency analog signal from an audio + * sub-carrier or an audio bitstream. IF signals are provided by tuners + * and consumed by audio AM/FM decoders. Bitstream audio is provided by + * an audio decoder. + */ +enum media_pad_signal_type { + PAD_SIGNAL_DEFAULT = 0, + PAD_SIGNAL_ANALOG, + PAD_SIGNAL_DV, + PAD_SIGNAL_AUDIO, +}; + +/** + * struct media_pad - A media pad graph object. + * + * @graph_obj: Embedded structure containing the media object common data + * @entity: Entity this pad belongs to + * @index: Pad index in the entity pads array, numbered from 0 to n + * @num_links: Number of links connected to this pad + * @sig_type: Type of the signal inside a media pad + * @flags: Pad flags, as defined in + * :ref:`include/uapi/linux/media.h ` + * (seek for ``MEDIA_PAD_FL_*``) + * @pipe: Pipeline this pad belongs to. Use media_entity_pipeline() to + * access this field. + */ +struct media_pad { + struct media_gobj graph_obj; /* must be first field in struct */ + struct media_entity *entity; + u16 index; + u16 num_links; + enum media_pad_signal_type sig_type; + unsigned long flags; + + /* + * The fields below are private, and should only be accessed via + * appropriate functions. + */ + struct media_pipeline *pipe; +}; + +/** + * struct media_entity_operations - Media entity operations + * @get_fwnode_pad: Return the pad number based on a fwnode endpoint or + * a negative value on error. This operation can be used + * to map a fwnode to a media pad number. Optional. + * @link_setup: Notify the entity of link changes. The operation can + * return an error, in which case link setup will be + * cancelled. Optional. + * @link_validate: Return whether a link is valid from the entity point of + * view. The media_pipeline_start() function + * validates all links by calling this operation. Optional. + * @has_pad_interdep: Return whether two pads of the entity are + * interdependent. If two pads are interdependent they are + * part of the same pipeline and enabling one of the pads + * means that the other pad will become "locked" and + * doesn't allow configuration changes. pad0 and pad1 are + * guaranteed to not both be sinks or sources. Never call + * the .has_pad_interdep() operation directly, always use + * media_entity_has_pad_interdep(). + * Optional: If the operation isn't implemented all pads + * will be considered as interdependent. + * + * .. note:: + * + * Those these callbacks are called with struct &media_device.graph_mutex + * mutex held. + */ +struct media_entity_operations { + int (*get_fwnode_pad)(struct media_entity *entity, + struct fwnode_endpoint *endpoint); + int (*link_setup)(struct media_entity *entity, + const struct media_pad *local, + const struct media_pad *remote, u32 flags); + int (*link_validate)(struct media_link *link); + bool (*has_pad_interdep)(struct media_entity *entity, unsigned int pad0, + unsigned int pad1); +}; + +/** + * enum media_entity_type - Media entity type + * + * @MEDIA_ENTITY_TYPE_BASE: + * The entity isn't embedded in another subsystem structure. + * @MEDIA_ENTITY_TYPE_VIDEO_DEVICE: + * The entity is embedded in a struct video_device instance. + * @MEDIA_ENTITY_TYPE_V4L2_SUBDEV: + * The entity is embedded in a struct v4l2_subdev instance. + * + * Media entity objects are often not instantiated directly, but the media + * entity structure is inherited by (through embedding) other subsystem-specific + * structures. The media entity type identifies the type of the subclass + * structure that implements a media entity instance. + * + * This allows runtime type identification of media entities and safe casting to + * the correct object type. For instance, a media entity structure instance + * embedded in a v4l2_subdev structure instance will have the type + * %MEDIA_ENTITY_TYPE_V4L2_SUBDEV and can safely be cast to a &v4l2_subdev + * structure using the container_of() macro. + */ +enum media_entity_type { + MEDIA_ENTITY_TYPE_BASE, + MEDIA_ENTITY_TYPE_VIDEO_DEVICE, + MEDIA_ENTITY_TYPE_V4L2_SUBDEV, +}; + +/** + * struct media_entity - A media entity graph object. + * + * @graph_obj: Embedded structure containing the media object common data. + * @name: Entity name. + * @obj_type: Type of the object that implements the media_entity. + * @function: Entity main function, as defined in + * :ref:`include/uapi/linux/media.h ` + * (seek for ``MEDIA_ENT_F_*``) + * @flags: Entity flags, as defined in + * :ref:`include/uapi/linux/media.h ` + * (seek for ``MEDIA_ENT_FL_*``) + * @num_pads: Number of sink and source pads. + * @num_links: Total number of links, forward and back, enabled and disabled. + * @num_backlinks: Number of backlinks + * @internal_idx: An unique internal entity specific number. The numbers are + * re-used if entities are unregistered or registered again. + * @pads: Pads array with the size defined by @num_pads. + * @links: List of data links. + * @ops: Entity operations. + * @use_count: Use count for the entity. + * @info: Union with devnode information. Kept just for backward + * compatibility. + * @info.dev: Contains device major and minor info. + * @info.dev.major: device node major, if the device is a devnode. + * @info.dev.minor: device node minor, if the device is a devnode. + * + * .. note:: + * + * The @use_count reference count must never be negative, but is a signed + * integer on purpose: a simple ``WARN_ON(<0)`` check can be used to detect + * reference count bugs that would make it negative. + */ +struct media_entity { + struct media_gobj graph_obj; /* must be first field in struct */ + const char *name; + enum media_entity_type obj_type; + u32 function; + unsigned long flags; + + u16 num_pads; + u16 num_links; + u16 num_backlinks; + int internal_idx; + + struct media_pad *pads; + struct list_head links; + + const struct media_entity_operations *ops; + + int use_count; + + union { + struct { + u32 major; + u32 minor; + } dev; + } info; +}; + +/** + * media_entity_for_each_pad - Iterate on all pads in an entity + * @entity: The entity the pads belong to + * @iter: The iterator pad + * + * Iterate on all pads in a media entity. + */ +#define media_entity_for_each_pad(entity, iter) \ + for (iter = (entity)->pads; \ + iter < &(entity)->pads[(entity)->num_pads]; \ + ++iter) + +/** + * struct media_interface - A media interface graph object. + * + * @graph_obj: embedded graph object + * @links: List of links pointing to graph entities + * @type: Type of the interface as defined in + * :ref:`include/uapi/linux/media.h ` + * (seek for ``MEDIA_INTF_T_*``) + * @flags: Interface flags as defined in + * :ref:`include/uapi/linux/media.h ` + * (seek for ``MEDIA_INTF_FL_*``) + * + * .. note:: + * + * Currently, no flags for &media_interface is defined. + */ +struct media_interface { + struct media_gobj graph_obj; + struct list_head links; + u32 type; + u32 flags; +}; + +/** + * struct media_intf_devnode - A media interface via a device node. + * + * @intf: embedded interface object + * @major: Major number of a device node + * @minor: Minor number of a device node + */ +struct media_intf_devnode { + struct media_interface intf; + + /* Should match the fields at media_v2_intf_devnode */ + u32 major; + u32 minor; +}; + +/** + * media_entity_id() - return the media entity graph object id + * + * @entity: pointer to &media_entity + */ +static inline u32 media_entity_id(struct media_entity *entity) +{ + return entity->graph_obj.id; +} + +/** + * media_type() - return the media object type + * + * @gobj: Pointer to the struct &media_gobj graph object + */ +static inline enum media_gobj_type media_type(struct media_gobj *gobj) +{ + return gobj->id >> MEDIA_BITS_PER_ID; +} + +/** + * media_id() - return the media object ID + * + * @gobj: Pointer to the struct &media_gobj graph object + */ +static inline u32 media_id(struct media_gobj *gobj) +{ + return gobj->id & MEDIA_ID_MASK; +} + +/** + * media_gobj_gen_id() - encapsulates type and ID on at the object ID + * + * @type: object type as define at enum &media_gobj_type. + * @local_id: next ID, from struct &media_device.id. + */ +static inline u32 media_gobj_gen_id(enum media_gobj_type type, u64 local_id) +{ + u32 id; + + id = type << MEDIA_BITS_PER_ID; + id |= local_id & MEDIA_ID_MASK; + + return id; +} + +/** + * is_media_entity_v4l2_video_device() - Check if the entity is a video_device + * @entity: pointer to entity + * + * Return: %true if the entity is an instance of a video_device object and can + * safely be cast to a struct video_device using the container_of() macro, or + * %false otherwise. + */ +static inline bool is_media_entity_v4l2_video_device(struct media_entity *entity) +{ + return entity && entity->obj_type == MEDIA_ENTITY_TYPE_VIDEO_DEVICE; +} + +/** + * is_media_entity_v4l2_subdev() - Check if the entity is a v4l2_subdev + * @entity: pointer to entity + * + * Return: %true if the entity is an instance of a &v4l2_subdev object and can + * safely be cast to a struct &v4l2_subdev using the container_of() macro, or + * %false otherwise. + */ +static inline bool is_media_entity_v4l2_subdev(struct media_entity *entity) +{ + return entity && entity->obj_type == MEDIA_ENTITY_TYPE_V4L2_SUBDEV; +} + +/** + * media_entity_enum_init - Initialise an entity enumeration + * + * @ent_enum: Entity enumeration to be initialised + * @mdev: The related media device + * + * Return: zero on success or a negative error code. + */ +__must_check int media_entity_enum_init(struct media_entity_enum *ent_enum, + struct media_device *mdev); + +/** + * media_entity_enum_cleanup - Release resources of an entity enumeration + * + * @ent_enum: Entity enumeration to be released + */ +void media_entity_enum_cleanup(struct media_entity_enum *ent_enum); + +/** + * media_entity_enum_zero - Clear the entire enum + * + * @ent_enum: Entity enumeration to be cleared + */ +static inline void media_entity_enum_zero(struct media_entity_enum *ent_enum) +{ + bitmap_zero(ent_enum->bmap, ent_enum->idx_max); +} + +/** + * media_entity_enum_set - Mark a single entity in the enum + * + * @ent_enum: Entity enumeration + * @entity: Entity to be marked + */ +static inline void media_entity_enum_set(struct media_entity_enum *ent_enum, + struct media_entity *entity) +{ + if (WARN_ON(entity->internal_idx >= ent_enum->idx_max)) + return; + + __set_bit(entity->internal_idx, ent_enum->bmap); +} + +/** + * media_entity_enum_clear - Unmark a single entity in the enum + * + * @ent_enum: Entity enumeration + * @entity: Entity to be unmarked + */ +static inline void media_entity_enum_clear(struct media_entity_enum *ent_enum, + struct media_entity *entity) +{ + if (WARN_ON(entity->internal_idx >= ent_enum->idx_max)) + return; + + __clear_bit(entity->internal_idx, ent_enum->bmap); +} + +/** + * media_entity_enum_test - Test whether the entity is marked + * + * @ent_enum: Entity enumeration + * @entity: Entity to be tested + * + * Returns %true if the entity was marked. + */ +static inline bool media_entity_enum_test(struct media_entity_enum *ent_enum, + struct media_entity *entity) +{ + if (WARN_ON(entity->internal_idx >= ent_enum->idx_max)) + return true; + + return test_bit(entity->internal_idx, ent_enum->bmap); +} + +/** + * media_entity_enum_test_and_set - Test whether the entity is marked, + * and mark it + * + * @ent_enum: Entity enumeration + * @entity: Entity to be tested + * + * Returns %true if the entity was marked, and mark it before doing so. + */ +static inline bool +media_entity_enum_test_and_set(struct media_entity_enum *ent_enum, + struct media_entity *entity) +{ + if (WARN_ON(entity->internal_idx >= ent_enum->idx_max)) + return true; + + return __test_and_set_bit(entity->internal_idx, ent_enum->bmap); +} + +/** + * media_entity_enum_empty - Test whether the entire enum is empty + * + * @ent_enum: Entity enumeration + * + * Return: %true if the entity was empty. + */ +static inline bool media_entity_enum_empty(struct media_entity_enum *ent_enum) +{ + return bitmap_empty(ent_enum->bmap, ent_enum->idx_max); +} + +/** + * media_entity_enum_intersects - Test whether two enums intersect + * + * @ent_enum1: First entity enumeration + * @ent_enum2: Second entity enumeration + * + * Return: %true if entity enumerations @ent_enum1 and @ent_enum2 intersect, + * otherwise %false. + */ +static inline bool media_entity_enum_intersects( + struct media_entity_enum *ent_enum1, + struct media_entity_enum *ent_enum2) +{ + WARN_ON(ent_enum1->idx_max != ent_enum2->idx_max); + + return bitmap_intersects(ent_enum1->bmap, ent_enum2->bmap, + min(ent_enum1->idx_max, ent_enum2->idx_max)); +} + +/** + * gobj_to_entity - returns the struct &media_entity pointer from the + * @gobj contained on it. + * + * @gobj: Pointer to the struct &media_gobj graph object + */ +#define gobj_to_entity(gobj) \ + container_of(gobj, struct media_entity, graph_obj) + +/** + * gobj_to_pad - returns the struct &media_pad pointer from the + * @gobj contained on it. + * + * @gobj: Pointer to the struct &media_gobj graph object + */ +#define gobj_to_pad(gobj) \ + container_of(gobj, struct media_pad, graph_obj) + +/** + * gobj_to_link - returns the struct &media_link pointer from the + * @gobj contained on it. + * + * @gobj: Pointer to the struct &media_gobj graph object + */ +#define gobj_to_link(gobj) \ + container_of(gobj, struct media_link, graph_obj) + +/** + * gobj_to_intf - returns the struct &media_interface pointer from the + * @gobj contained on it. + * + * @gobj: Pointer to the struct &media_gobj graph object + */ +#define gobj_to_intf(gobj) \ + container_of(gobj, struct media_interface, graph_obj) + +/** + * intf_to_devnode - returns the struct media_intf_devnode pointer from the + * @intf contained on it. + * + * @intf: Pointer to struct &media_intf_devnode + */ +#define intf_to_devnode(intf) \ + container_of(intf, struct media_intf_devnode, intf) + +/** + * media_gobj_create - Initialize a graph object + * + * @mdev: Pointer to the &media_device that contains the object + * @type: Type of the object + * @gobj: Pointer to the struct &media_gobj graph object + * + * This routine initializes the embedded struct &media_gobj inside a + * media graph object. It is called automatically if ``media_*_create`` + * function calls are used. However, if the object (entity, link, pad, + * interface) is embedded on some other object, this function should be + * called before registering the object at the media controller. + */ +void media_gobj_create(struct media_device *mdev, + enum media_gobj_type type, + struct media_gobj *gobj); + +/** + * media_gobj_destroy - Stop using a graph object on a media device + * + * @gobj: Pointer to the struct &media_gobj graph object + * + * This should be called by all routines like media_device_unregister() + * that remove/destroy media graph objects. + */ +void media_gobj_destroy(struct media_gobj *gobj); + +/** + * media_entity_pads_init() - Initialize the entity pads + * + * @entity: entity where the pads belong + * @num_pads: total number of sink and source pads + * @pads: Array of @num_pads pads. + * + * The pads array is managed by the entity driver and passed to + * media_entity_pads_init() where its pointer will be stored in the + * &media_entity structure. + * + * If no pads are needed, drivers could either directly fill + * &media_entity->num_pads with 0 and &media_entity->pads with %NULL or call + * this function that will do the same. + * + * As the number of pads is known in advance, the pads array is not allocated + * dynamically but is managed by the entity driver. Most drivers will embed the + * pads array in a driver-specific structure, avoiding dynamic allocation. + * + * Drivers must set the direction of every pad in the pads array before calling + * media_entity_pads_init(). The function will initialize the other pads fields. + */ +int media_entity_pads_init(struct media_entity *entity, u16 num_pads, + struct media_pad *pads); + +/** + * media_entity_cleanup() - free resources associated with an entity + * + * @entity: entity where the pads belong + * + * This function must be called during the cleanup phase after unregistering + * the entity (currently, it does nothing). + * + * Calling media_entity_cleanup() on a media_entity whose memory has been + * zeroed but that has not been initialized with media_entity_pad_init() is + * valid and is a no-op. + */ +#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER) +static inline void media_entity_cleanup(struct media_entity *entity) {} +#else +#define media_entity_cleanup(entity) do { } while (false) +#endif + +/** + * media_get_pad_index() - retrieves a pad index from an entity + * + * @entity: entity where the pads belong + * @pad_type: the type of the pad, one of MEDIA_PAD_FL_* pad types + * @sig_type: type of signal of the pad to be search + * + * This helper function finds the first pad index inside an entity that + * satisfies both @is_sink and @sig_type conditions. + * + * Return: + * + * On success, return the pad number. If the pad was not found or the media + * entity is a NULL pointer, return -EINVAL. + */ +int media_get_pad_index(struct media_entity *entity, u32 pad_type, + enum media_pad_signal_type sig_type); + +/** + * media_create_pad_link() - creates a link between two entities. + * + * @source: pointer to &media_entity of the source pad. + * @source_pad: number of the source pad in the pads array + * @sink: pointer to &media_entity of the sink pad. + * @sink_pad: number of the sink pad in the pads array. + * @flags: Link flags, as defined in + * :ref:`include/uapi/linux/media.h ` + * ( seek for ``MEDIA_LNK_FL_*``) + * + * Valid values for flags: + * + * %MEDIA_LNK_FL_ENABLED + * Indicates that the link is enabled and can be used to transfer media data. + * When two or more links target a sink pad, only one of them can be + * enabled at a time. + * + * %MEDIA_LNK_FL_IMMUTABLE + * Indicates that the link enabled state can't be modified at runtime. If + * %MEDIA_LNK_FL_IMMUTABLE is set, then %MEDIA_LNK_FL_ENABLED must also be + * set, since an immutable link is always enabled. + * + * .. note:: + * + * Before calling this function, media_entity_pads_init() and + * media_device_register_entity() should be called previously for both ends. + */ +__must_check int media_create_pad_link(struct media_entity *source, + u16 source_pad, struct media_entity *sink, + u16 sink_pad, u32 flags); + +/** + * media_create_pad_links() - creates a link between two entities. + * + * @mdev: Pointer to the media_device that contains the object + * @source_function: Function of the source entities. Used only if @source is + * NULL. + * @source: pointer to &media_entity of the source pad. If NULL, it will use + * all entities that matches the @sink_function. + * @source_pad: number of the source pad in the pads array + * @sink_function: Function of the sink entities. Used only if @sink is NULL. + * @sink: pointer to &media_entity of the sink pad. If NULL, it will use + * all entities that matches the @sink_function. + * @sink_pad: number of the sink pad in the pads array. + * @flags: Link flags, as defined in include/uapi/linux/media.h. + * @allow_both_undefined: if %true, then both @source and @sink can be NULL. + * In such case, it will create a crossbar between all entities that + * matches @source_function to all entities that matches @sink_function. + * If %false, it will return 0 and won't create any link if both @source + * and @sink are NULL. + * + * Valid values for flags: + * + * A %MEDIA_LNK_FL_ENABLED flag indicates that the link is enabled and can be + * used to transfer media data. If multiple links are created and this + * flag is passed as an argument, only the first created link will have + * this flag. + * + * A %MEDIA_LNK_FL_IMMUTABLE flag indicates that the link enabled state can't + * be modified at runtime. If %MEDIA_LNK_FL_IMMUTABLE is set, then + * %MEDIA_LNK_FL_ENABLED must also be set since an immutable link is + * always enabled. + * + * It is common for some devices to have multiple source and/or sink entities + * of the same type that should be linked. While media_create_pad_link() + * creates link by link, this function is meant to allow 1:n, n:1 and even + * cross-bar (n:n) links. + * + * .. note:: + * + * Before calling this function, media_entity_pads_init() and + * media_device_register_entity() should be called previously for the + * entities to be linked. + */ +int media_create_pad_links(const struct media_device *mdev, + const u32 source_function, + struct media_entity *source, + const u16 source_pad, + const u32 sink_function, + struct media_entity *sink, + const u16 sink_pad, + u32 flags, + const bool allow_both_undefined); + +void __media_entity_remove_links(struct media_entity *entity); + +/** + * media_entity_remove_links() - remove all links associated with an entity + * + * @entity: pointer to &media_entity + * + * .. note:: + * + * This is called automatically when an entity is unregistered via + * media_device_register_entity(). + */ +void media_entity_remove_links(struct media_entity *entity); + +/** + * __media_entity_setup_link - Configure a media link without locking + * @link: The link being configured + * @flags: Link configuration flags + * + * The bulk of link setup is handled by the two entities connected through the + * link. This function notifies both entities of the link configuration change. + * + * If the link is immutable or if the current and new configuration are + * identical, return immediately. + * + * The user is expected to hold link->source->parent->mutex. If not, + * media_entity_setup_link() should be used instead. + */ +int __media_entity_setup_link(struct media_link *link, u32 flags); + +/** + * media_entity_setup_link() - changes the link flags properties in runtime + * + * @link: pointer to &media_link + * @flags: the requested new link flags + * + * The only configurable property is the %MEDIA_LNK_FL_ENABLED link flag + * to enable/disable a link. Links marked with the + * %MEDIA_LNK_FL_IMMUTABLE link flag can not be enabled or disabled. + * + * When a link is enabled or disabled, the media framework calls the + * link_setup operation for the two entities at the source and sink of the + * link, in that order. If the second link_setup call fails, another + * link_setup call is made on the first entity to restore the original link + * flags. + * + * Media device drivers can be notified of link setup operations by setting the + * &media_device.link_notify pointer to a callback function. If provided, the + * notification callback will be called before enabling and after disabling + * links. + * + * Entity drivers must implement the link_setup operation if any of their links + * is non-immutable. The operation must either configure the hardware or store + * the configuration information to be applied later. + * + * Link configuration must not have any side effect on other links. If an + * enabled link at a sink pad prevents another link at the same pad from + * being enabled, the link_setup operation must return %-EBUSY and can't + * implicitly disable the first enabled link. + * + * .. note:: + * + * The valid values of the flags for the link is the same as described + * on media_create_pad_link(), for pad to pad links or the same as described + * on media_create_intf_link(), for interface to entity links. + */ +int media_entity_setup_link(struct media_link *link, u32 flags); + +/** + * media_entity_find_link - Find a link between two pads + * @source: Source pad + * @sink: Sink pad + * + * Return: returns a pointer to the link between the two entities. If no + * such link exists, return %NULL. + */ +struct media_link *media_entity_find_link(struct media_pad *source, + struct media_pad *sink); + +/** + * media_pad_remote_pad_first - Find the first pad at the remote end of a link + * @pad: Pad at the local end of the link + * + * Search for a remote pad connected to the given pad by iterating over all + * links originating or terminating at that pad until an enabled link is found. + * + * Return: returns a pointer to the pad at the remote end of the first found + * enabled link, or %NULL if no enabled link has been found. + */ +struct media_pad *media_pad_remote_pad_first(const struct media_pad *pad); + +/** + * media_pad_remote_pad_unique - Find a remote pad connected to a pad + * @pad: The pad + * + * Search for and return a remote pad connected to @pad through an enabled + * link. If multiple (or no) remote pads are found, an error is returned. + * + * The uniqueness constraint makes this helper function suitable for entities + * that support a single active source at a time on a given pad. + * + * Return: A pointer to the remote pad, or one of the following error pointers + * if an error occurs: + * + * * -ENOTUNIQ - Multiple links are enabled + * * -ENOLINK - No connected pad found + */ +struct media_pad *media_pad_remote_pad_unique(const struct media_pad *pad); + +/** + * media_entity_remote_pad_unique - Find a remote pad connected to an entity + * @entity: The entity + * @type: The type of pad to find (MEDIA_PAD_FL_SINK or MEDIA_PAD_FL_SOURCE) + * + * Search for and return a remote pad of @type connected to @entity through an + * enabled link. If multiple (or no) remote pads match these criteria, an error + * is returned. + * + * The uniqueness constraint makes this helper function suitable for entities + * that support a single active source or sink at a time. + * + * Return: A pointer to the remote pad, or one of the following error pointers + * if an error occurs: + * + * * -ENOTUNIQ - Multiple links are enabled + * * -ENOLINK - No connected pad found + */ +struct media_pad * +media_entity_remote_pad_unique(const struct media_entity *entity, + unsigned int type); + +/** + * media_entity_remote_source_pad_unique - Find a remote source pad connected to + * an entity + * @entity: The entity + * + * Search for and return a remote source pad connected to @entity through an + * enabled link. If multiple (or no) remote pads match these criteria, an error + * is returned. + * + * The uniqueness constraint makes this helper function suitable for entities + * that support a single active source at a time. + * + * Return: A pointer to the remote pad, or one of the following error pointers + * if an error occurs: + * + * * -ENOTUNIQ - Multiple links are enabled + * * -ENOLINK - No connected pad found + */ +static inline struct media_pad * +media_entity_remote_source_pad_unique(const struct media_entity *entity) +{ + return media_entity_remote_pad_unique(entity, MEDIA_PAD_FL_SOURCE); +} + +/** + * media_pad_is_streaming - Test if a pad is part of a streaming pipeline + * @pad: The pad + * + * Return: True if the pad is part of a pipeline started with the + * media_pipeline_start() function, false otherwise. + */ +static inline bool media_pad_is_streaming(const struct media_pad *pad) +{ + return pad->pipe; +} + +/** + * media_entity_is_streaming - Test if an entity is part of a streaming pipeline + * @entity: The entity + * + * Return: True if the entity is part of a pipeline started with the + * media_pipeline_start() function, false otherwise. + */ +static inline bool media_entity_is_streaming(const struct media_entity *entity) +{ + struct media_pad *pad; + + media_entity_for_each_pad(entity, pad) { + if (media_pad_is_streaming(pad)) + return true; + } + + return false; +} + +/** + * media_entity_pipeline - Get the media pipeline an entity is part of + * @entity: The entity + * + * DEPRECATED: use media_pad_pipeline() instead. + * + * This function returns the media pipeline that an entity has been associated + * with when constructing the pipeline with media_pipeline_start(). The pointer + * remains valid until media_pipeline_stop() is called. + * + * In general, entities can be part of multiple pipelines, when carrying + * multiple streams (either on different pads, or on the same pad using + * multiplexed streams). This function is to be used only for entities that + * do not support multiple pipelines. + * + * Return: The media_pipeline the entity is part of, or NULL if the entity is + * not part of any pipeline. + */ +struct media_pipeline *media_entity_pipeline(struct media_entity *entity); + +/** + * media_pad_pipeline - Get the media pipeline a pad is part of + * @pad: The pad + * + * This function returns the media pipeline that a pad has been associated + * with when constructing the pipeline with media_pipeline_start(). The pointer + * remains valid until media_pipeline_stop() is called. + * + * Return: The media_pipeline the pad is part of, or NULL if the pad is + * not part of any pipeline. + */ +struct media_pipeline *media_pad_pipeline(struct media_pad *pad); + +/** + * media_entity_get_fwnode_pad - Get pad number from fwnode + * + * @entity: The entity + * @fwnode: Pointer to the fwnode_handle which should be used to find the pad + * @direction_flags: Expected direction of the pad, as defined in + * :ref:`include/uapi/linux/media.h ` + * (seek for ``MEDIA_PAD_FL_*``) + * + * This function can be used to resolve the media pad number from + * a fwnode. This is useful for devices which use more complex + * mappings of media pads. + * + * If the entity does not implement the get_fwnode_pad() operation + * then this function searches the entity for the first pad that + * matches the @direction_flags. + * + * Return: returns the pad number on success or a negative error code. + */ +int media_entity_get_fwnode_pad(struct media_entity *entity, + const struct fwnode_handle *fwnode, + unsigned long direction_flags); + +/** + * media_graph_walk_init - Allocate resources used by graph walk. + * + * @graph: Media graph structure that will be used to walk the graph + * @mdev: Pointer to the &media_device that contains the object + * + * This function is deprecated, use media_pipeline_for_each_pad() instead. + * + * The caller is required to hold the media_device graph_mutex during the graph + * walk until the graph state is released. + * + * Returns zero on success or a negative error code otherwise. + */ +__must_check int media_graph_walk_init( + struct media_graph *graph, struct media_device *mdev); + +/** + * media_graph_walk_cleanup - Release resources used by graph walk. + * + * @graph: Media graph structure that will be used to walk the graph + * + * This function is deprecated, use media_pipeline_for_each_pad() instead. + */ +void media_graph_walk_cleanup(struct media_graph *graph); + +/** + * media_graph_walk_start - Start walking the media graph at a + * given entity + * + * @graph: Media graph structure that will be used to walk the graph + * @entity: Starting entity + * + * This function is deprecated, use media_pipeline_for_each_pad() instead. + * + * Before using this function, media_graph_walk_init() must be + * used to allocate resources used for walking the graph. This + * function initializes the graph traversal structure to walk the + * entities graph starting at the given entity. The traversal + * structure must not be modified by the caller during graph + * traversal. After the graph walk, the resources must be released + * using media_graph_walk_cleanup(). + */ +void media_graph_walk_start(struct media_graph *graph, + struct media_entity *entity); + +/** + * media_graph_walk_next - Get the next entity in the graph + * @graph: Media graph structure + * + * This function is deprecated, use media_pipeline_for_each_pad() instead. + * + * Perform a depth-first traversal of the given media entities graph. + * + * The graph structure must have been previously initialized with a call to + * media_graph_walk_start(). + * + * Return: returns the next entity in the graph or %NULL if the whole graph + * have been traversed. + */ +struct media_entity *media_graph_walk_next(struct media_graph *graph); + +/** + * media_pipeline_start - Mark a pipeline as streaming + * @pad: Starting pad + * @pipe: Media pipeline to be assigned to all pads in the pipeline. + * + * Mark all pads connected to a given pad through enabled links, either + * directly or indirectly, as streaming. The given pipeline object is assigned + * to every pad in the pipeline and stored in the media_pad pipe field. + * + * Calls to this function can be nested, in which case the same number of + * media_pipeline_stop() calls will be required to stop streaming. The + * pipeline pointer must be identical for all nested calls to + * media_pipeline_start(). + */ +__must_check int media_pipeline_start(struct media_pad *pad, + struct media_pipeline *pipe); +/** + * __media_pipeline_start - Mark a pipeline as streaming + * + * @pad: Starting pad + * @pipe: Media pipeline to be assigned to all pads in the pipeline. + * + * ..note:: This is the non-locking version of media_pipeline_start() + */ +__must_check int __media_pipeline_start(struct media_pad *pad, + struct media_pipeline *pipe); + +/** + * media_pipeline_stop - Mark a pipeline as not streaming + * @pad: Starting pad + * + * Mark all pads connected to a given pad through enabled links, either + * directly or indirectly, as not streaming. The media_pad pipe field is + * reset to %NULL. + * + * If multiple calls to media_pipeline_start() have been made, the same + * number of calls to this function are required to mark the pipeline as not + * streaming. + */ +void media_pipeline_stop(struct media_pad *pad); + +/** + * __media_pipeline_stop - Mark a pipeline as not streaming + * + * @pad: Starting pad + * + * .. note:: This is the non-locking version of media_pipeline_stop() + */ +void __media_pipeline_stop(struct media_pad *pad); + +struct media_pad * +__media_pipeline_pad_iter_next(struct media_pipeline *pipe, + struct media_pipeline_pad_iter *iter, + struct media_pad *pad); + +/** + * media_pipeline_for_each_pad - Iterate on all pads in a media pipeline + * @pipe: The pipeline + * @iter: The iterator (struct media_pipeline_pad_iter) + * @pad: The iterator pad + * + * Iterate on all pads in a media pipeline. This is only valid after the + * pipeline has been built with media_pipeline_start() and before it gets + * destroyed with media_pipeline_stop(). + */ +#define media_pipeline_for_each_pad(pipe, iter, pad) \ + for (pad = __media_pipeline_pad_iter_next((pipe), iter, NULL); \ + pad != NULL; \ + pad = __media_pipeline_pad_iter_next((pipe), iter, pad)) + +/** + * media_pipeline_entity_iter_init - Initialize a pipeline entity iterator + * @pipe: The pipeline + * @iter: The iterator + * + * This function must be called to initialize the iterator before using it in a + * media_pipeline_for_each_entity() loop. The iterator must be destroyed by a + * call to media_pipeline_entity_iter_cleanup after the loop (including in code + * paths that break from the loop). + * + * The same iterator can be used in multiple consecutive loops without being + * destroyed and reinitialized. + * + * Return: 0 on success or a negative error code otherwise. + */ +int media_pipeline_entity_iter_init(struct media_pipeline *pipe, + struct media_pipeline_entity_iter *iter); + +/** + * media_pipeline_entity_iter_cleanup - Destroy a pipeline entity iterator + * @iter: The iterator + * + * This function must be called to destroy iterators initialized with + * media_pipeline_entity_iter_init(). + */ +void media_pipeline_entity_iter_cleanup(struct media_pipeline_entity_iter *iter); + +struct media_entity * +__media_pipeline_entity_iter_next(struct media_pipeline *pipe, + struct media_pipeline_entity_iter *iter, + struct media_entity *entity); + +/** + * media_pipeline_for_each_entity - Iterate on all entities in a media pipeline + * @pipe: The pipeline + * @iter: The iterator (struct media_pipeline_entity_iter) + * @entity: The iterator entity + * + * Iterate on all entities in a media pipeline. This is only valid after the + * pipeline has been built with media_pipeline_start() and before it gets + * destroyed with media_pipeline_stop(). The iterator must be initialized with + * media_pipeline_entity_iter_init() before iteration, and destroyed with + * media_pipeline_entity_iter_cleanup() after (including in code paths that + * break from the loop). + */ +#define media_pipeline_for_each_entity(pipe, iter, entity) \ + for (entity = __media_pipeline_entity_iter_next((pipe), iter, NULL); \ + entity != NULL; \ + entity = __media_pipeline_entity_iter_next((pipe), iter, entity)) + +/** + * media_pipeline_alloc_start - Mark a pipeline as streaming + * @pad: Starting pad + * + * media_pipeline_alloc_start() is similar to media_pipeline_start() but instead + * of working on a given pipeline the function will use an existing pipeline if + * the pad is already part of a pipeline, or allocate a new pipeline. + * + * Calls to media_pipeline_alloc_start() must be matched with + * media_pipeline_stop(). + */ +__must_check int media_pipeline_alloc_start(struct media_pad *pad); + +/** + * media_devnode_create() - creates and initializes a device node interface + * + * @mdev: pointer to struct &media_device + * @type: type of the interface, as given by + * :ref:`include/uapi/linux/media.h ` + * ( seek for ``MEDIA_INTF_T_*``) macros. + * @flags: Interface flags, as defined in + * :ref:`include/uapi/linux/media.h ` + * ( seek for ``MEDIA_INTF_FL_*``) + * @major: Device node major number. + * @minor: Device node minor number. + * + * Return: if succeeded, returns a pointer to the newly allocated + * &media_intf_devnode pointer. + * + * .. note:: + * + * Currently, no flags for &media_interface is defined. + */ +struct media_intf_devnode * +__must_check media_devnode_create(struct media_device *mdev, + u32 type, u32 flags, + u32 major, u32 minor); +/** + * media_devnode_remove() - removes a device node interface + * + * @devnode: pointer to &media_intf_devnode to be freed. + * + * When a device node interface is removed, all links to it are automatically + * removed. + */ +void media_devnode_remove(struct media_intf_devnode *devnode); + +/** + * media_create_intf_link() - creates a link between an entity and an interface + * + * @entity: pointer to %media_entity + * @intf: pointer to %media_interface + * @flags: Link flags, as defined in + * :ref:`include/uapi/linux/media.h ` + * ( seek for ``MEDIA_LNK_FL_*``) + * + * + * Valid values for flags: + * + * %MEDIA_LNK_FL_ENABLED + * Indicates that the interface is connected to the entity hardware. + * That's the default value for interfaces. An interface may be disabled if + * the hardware is busy due to the usage of some other interface that it is + * currently controlling the hardware. + * + * A typical example is an hybrid TV device that handle only one type of + * stream on a given time. So, when the digital TV is streaming, + * the V4L2 interfaces won't be enabled, as such device is not able to + * also stream analog TV or radio. + * + * .. note:: + * + * Before calling this function, media_devnode_create() should be called for + * the interface and media_device_register_entity() should be called for the + * interface that will be part of the link. + */ +struct media_link * +__must_check media_create_intf_link(struct media_entity *entity, + struct media_interface *intf, + u32 flags); +/** + * __media_remove_intf_link() - remove a single interface link + * + * @link: pointer to &media_link. + * + * .. note:: This is an unlocked version of media_remove_intf_link() + */ +void __media_remove_intf_link(struct media_link *link); + +/** + * media_remove_intf_link() - remove a single interface link + * + * @link: pointer to &media_link. + * + * .. note:: Prefer to use this one, instead of __media_remove_intf_link() + */ +void media_remove_intf_link(struct media_link *link); + +/** + * __media_remove_intf_links() - remove all links associated with an interface + * + * @intf: pointer to &media_interface + * + * .. note:: This is an unlocked version of media_remove_intf_links(). + */ +void __media_remove_intf_links(struct media_interface *intf); + +/** + * media_remove_intf_links() - remove all links associated with an interface + * + * @intf: pointer to &media_interface + * + * .. note:: + * + * #) This is called automatically when an entity is unregistered via + * media_device_register_entity() and by media_devnode_remove(). + * + * #) Prefer to use this one, instead of __media_remove_intf_links(). + */ +void media_remove_intf_links(struct media_interface *intf); + +/** + * media_entity_call - Calls a struct media_entity_operations operation on + * an entity + * + * @entity: entity where the @operation will be called + * @operation: type of the operation. Should be the name of a member of + * struct &media_entity_operations. + * + * This helper function will check if @operation is not %NULL. On such case, + * it will issue a call to @operation\(@entity, @args\). + */ + +#define media_entity_call(entity, operation, args...) \ + (((entity)->ops && (entity)->ops->operation) ? \ + (entity)->ops->operation((entity) , ##args) : -ENOIOCTLCMD) + +/** + * media_create_ancillary_link() - create an ancillary link between two + * instances of &media_entity + * + * @primary: pointer to the primary &media_entity + * @ancillary: pointer to the ancillary &media_entity + * + * Create an ancillary link between two entities, indicating that they + * represent two connected pieces of hardware that form a single logical unit. + * A typical example is a camera lens controller being linked to the sensor that + * it is supporting. + * + * The function sets both MEDIA_LNK_FL_ENABLED and MEDIA_LNK_FL_IMMUTABLE for + * the new link. + */ +struct media_link * +media_create_ancillary_link(struct media_entity *primary, + struct media_entity *ancillary); + +/** + * __media_entity_next_link() - Iterate through a &media_entity's links + * + * @entity: pointer to the &media_entity + * @link: pointer to a &media_link to hold the iterated values + * @link_type: one of the MEDIA_LNK_FL_LINK_TYPE flags + * + * Return the next link against an entity matching a specific link type. This + * allows iteration through an entity's links whilst guaranteeing all of the + * returned links are of the given type. + */ +struct media_link *__media_entity_next_link(struct media_entity *entity, + struct media_link *link, + unsigned long link_type); + +/** + * for_each_media_entity_data_link() - Iterate through an entity's data links + * + * @entity: pointer to the &media_entity + * @link: pointer to a &media_link to hold the iterated values + * + * Iterate over a &media_entity's data links + */ +#define for_each_media_entity_data_link(entity, link) \ + for (link = __media_entity_next_link(entity, NULL, \ + MEDIA_LNK_FL_DATA_LINK); \ + link; \ + link = __media_entity_next_link(entity, link, \ + MEDIA_LNK_FL_DATA_LINK)) + +#endif diff --git a/6.12.0/include-overrides/media/media-request.h b/6.12.0/include-overrides/media/media-request.h new file mode 100644 index 0000000..3cd25a2 --- /dev/null +++ b/6.12.0/include-overrides/media/media-request.h @@ -0,0 +1,442 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Media device request objects + * + * Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + * Copyright (C) 2018 Intel Corporation + * + * Author: Hans Verkuil + * Author: Sakari Ailus + */ + +#ifndef MEDIA_REQUEST_H +#define MEDIA_REQUEST_H + +#include +#include +#include +#include + +#include + +/** + * enum media_request_state - media request state + * + * @MEDIA_REQUEST_STATE_IDLE: Idle + * @MEDIA_REQUEST_STATE_VALIDATING: Validating the request, no state changes + * allowed + * @MEDIA_REQUEST_STATE_QUEUED: Queued + * @MEDIA_REQUEST_STATE_COMPLETE: Completed, the request is done + * @MEDIA_REQUEST_STATE_CLEANING: Cleaning, the request is being re-inited + * @MEDIA_REQUEST_STATE_UPDATING: The request is being updated, i.e. + * request objects are being added, + * modified or removed + * @NR_OF_MEDIA_REQUEST_STATE: The number of media request states, used + * internally for sanity check purposes + */ +enum media_request_state { + MEDIA_REQUEST_STATE_IDLE, + MEDIA_REQUEST_STATE_VALIDATING, + MEDIA_REQUEST_STATE_QUEUED, + MEDIA_REQUEST_STATE_COMPLETE, + MEDIA_REQUEST_STATE_CLEANING, + MEDIA_REQUEST_STATE_UPDATING, + NR_OF_MEDIA_REQUEST_STATE, +}; + +struct media_request_object; + +/** + * struct media_request - Media device request + * @mdev: Media device this request belongs to + * @kref: Reference count + * @debug_str: Prefix for debug messages (process name:fd) + * @state: The state of the request + * @updating_count: count the number of request updates that are in progress + * @access_count: count the number of request accesses that are in progress + * @objects: List of @struct media_request_object request objects + * @num_incomplete_objects: The number of incomplete objects in the request + * @poll_wait: Wait queue for poll + * @lock: Serializes access to this struct + */ +struct media_request { + struct media_device *mdev; + struct kref kref; + char debug_str[TASK_COMM_LEN + 11]; + enum media_request_state state; + unsigned int updating_count; + unsigned int access_count; + struct list_head objects; + unsigned int num_incomplete_objects; + wait_queue_head_t poll_wait; + spinlock_t lock; +}; + +#ifdef CONFIG_MEDIA_CONTROLLER + +/** + * media_request_lock_for_access - Lock the request to access its objects + * + * @req: The media request + * + * Use before accessing a completed request. A reference to the request must + * be held during the access. This usually takes place automatically through + * a file handle. Use @media_request_unlock_for_access when done. + */ +static inline int __must_check +media_request_lock_for_access(struct media_request *req) +{ + unsigned long flags; + int ret = -EBUSY; + + spin_lock_irqsave(&req->lock, flags); + if (req->state == MEDIA_REQUEST_STATE_COMPLETE) { + req->access_count++; + ret = 0; + } + spin_unlock_irqrestore(&req->lock, flags); + + return ret; +} + +/** + * media_request_unlock_for_access - Unlock a request previously locked for + * access + * + * @req: The media request + * + * Unlock a request that has previously been locked using + * @media_request_lock_for_access. + */ +static inline void media_request_unlock_for_access(struct media_request *req) +{ + unsigned long flags; + + spin_lock_irqsave(&req->lock, flags); + if (!WARN_ON(!req->access_count)) + req->access_count--; + spin_unlock_irqrestore(&req->lock, flags); +} + +/** + * media_request_lock_for_update - Lock the request for updating its objects + * + * @req: The media request + * + * Use before updating a request, i.e. adding, modifying or removing a request + * object in it. A reference to the request must be held during the update. This + * usually takes place automatically through a file handle. Use + * @media_request_unlock_for_update when done. + */ +static inline int __must_check +media_request_lock_for_update(struct media_request *req) +{ + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&req->lock, flags); + if (req->state == MEDIA_REQUEST_STATE_IDLE || + req->state == MEDIA_REQUEST_STATE_UPDATING) { + req->state = MEDIA_REQUEST_STATE_UPDATING; + req->updating_count++; + } else { + ret = -EBUSY; + } + spin_unlock_irqrestore(&req->lock, flags); + + return ret; +} + +/** + * media_request_unlock_for_update - Unlock a request previously locked for + * update + * + * @req: The media request + * + * Unlock a request that has previously been locked using + * @media_request_lock_for_update. + */ +static inline void media_request_unlock_for_update(struct media_request *req) +{ + unsigned long flags; + + spin_lock_irqsave(&req->lock, flags); + WARN_ON(req->updating_count <= 0); + if (!--req->updating_count) + req->state = MEDIA_REQUEST_STATE_IDLE; + spin_unlock_irqrestore(&req->lock, flags); +} + +/** + * media_request_get - Get the media request + * + * @req: The media request + * + * Get the media request. + */ +static inline void media_request_get(struct media_request *req) +{ + kref_get(&req->kref); +} + +/** + * media_request_put - Put the media request + * + * @req: The media request + * + * Put the media request. The media request will be released + * when the refcount reaches 0. + */ +void media_request_put(struct media_request *req); + +/** + * media_request_get_by_fd - Get a media request by fd + * + * @mdev: Media device this request belongs to + * @request_fd: The file descriptor of the request + * + * Get the request represented by @request_fd that is owned + * by the media device. + * + * Return a -EBADR error pointer if requests are not supported + * by this driver. Return -EINVAL if the request was not found. + * Return the pointer to the request if found: the caller will + * have to call @media_request_put when it finished using the + * request. + */ +struct media_request * +media_request_get_by_fd(struct media_device *mdev, int request_fd); + +/** + * media_request_alloc - Allocate the media request + * + * @mdev: Media device this request belongs to + * @alloc_fd: Store the request's file descriptor in this int + * + * Allocated the media request and put the fd in @alloc_fd. + */ +int media_request_alloc(struct media_device *mdev, + int *alloc_fd); + +#else + +static inline void media_request_get(struct media_request *req) +{ +} + +static inline void media_request_put(struct media_request *req) +{ +} + +static inline struct media_request * +media_request_get_by_fd(struct media_device *mdev, int request_fd) +{ + return ERR_PTR(-EBADR); +} + +#endif + +/** + * struct media_request_object_ops - Media request object operations + * @prepare: Validate and prepare the request object, optional. + * @unprepare: Unprepare the request object, optional. + * @queue: Queue the request object, optional. + * @unbind: Unbind the request object, optional. + * @release: Release the request object, required. + */ +struct media_request_object_ops { + int (*prepare)(struct media_request_object *object); + void (*unprepare)(struct media_request_object *object); + void (*queue)(struct media_request_object *object); + void (*unbind)(struct media_request_object *object); + void (*release)(struct media_request_object *object); +}; + +/** + * struct media_request_object - An opaque object that belongs to a media + * request + * + * @ops: object's operations + * @priv: object's priv pointer + * @req: the request this object belongs to (can be NULL) + * @list: List entry of the object for @struct media_request + * @kref: Reference count of the object, acquire before releasing req->lock + * @completed: If true, then this object was completed. + * + * An object related to the request. This struct is always embedded in + * another struct that contains the actual data for this request object. + */ +struct media_request_object { + const struct media_request_object_ops *ops; + void *priv; + struct media_request *req; + struct list_head list; + struct kref kref; + bool completed; +}; + +#ifdef CONFIG_MEDIA_CONTROLLER + +/** + * media_request_object_get - Get a media request object + * + * @obj: The object + * + * Get a media request object. + */ +static inline void media_request_object_get(struct media_request_object *obj) +{ + kref_get(&obj->kref); +} + +/** + * media_request_object_put - Put a media request object + * + * @obj: The object + * + * Put a media request object. Once all references are gone, the + * object's memory is released. + */ +void media_request_object_put(struct media_request_object *obj); + +/** + * media_request_object_find - Find an object in a request + * + * @req: The media request + * @ops: Find an object with this ops value + * @priv: Find an object with this priv value + * + * Both @ops and @priv must be non-NULL. + * + * Returns the object pointer or NULL if not found. The caller must + * call media_request_object_put() once it finished using the object. + * + * Since this function needs to walk the list of objects it takes + * the @req->lock spin lock to make this safe. + */ +struct media_request_object * +media_request_object_find(struct media_request *req, + const struct media_request_object_ops *ops, + void *priv); + +/** + * media_request_object_init - Initialise a media request object + * + * @obj: The object + * + * Initialise a media request object. The object will be released using the + * release callback of the ops once it has no references (this function + * initialises references to one). + */ +void media_request_object_init(struct media_request_object *obj); + +/** + * media_request_object_bind - Bind a media request object to a request + * + * @req: The media request + * @ops: The object ops for this object + * @priv: A driver-specific priv pointer associated with this object + * @is_buffer: Set to true if the object a buffer object. + * @obj: The object + * + * Bind this object to the request and set the ops and priv values of + * the object so it can be found later with media_request_object_find(). + * + * Every bound object must be unbound or completed by the kernel at some + * point in time, otherwise the request will never complete. When the + * request is released all completed objects will be unbound by the + * request core code. + * + * Buffer objects will be added to the end of the request's object + * list, non-buffer objects will be added to the front of the list. + * This ensures that all buffer objects are at the end of the list + * and that all non-buffer objects that they depend on are processed + * first. + */ +int media_request_object_bind(struct media_request *req, + const struct media_request_object_ops *ops, + void *priv, bool is_buffer, + struct media_request_object *obj); + +/** + * media_request_object_unbind - Unbind a media request object + * + * @obj: The object + * + * Unbind the media request object from the request. + */ +void media_request_object_unbind(struct media_request_object *obj); + +/** + * media_request_object_complete - Mark the media request object as complete + * + * @obj: The object + * + * Mark the media request object as complete. Only bound objects can + * be completed. + */ +void media_request_object_complete(struct media_request_object *obj); + +#else + +static inline int __must_check +media_request_lock_for_access(struct media_request *req) +{ + return -EINVAL; +} + +static inline void media_request_unlock_for_access(struct media_request *req) +{ +} + +static inline int __must_check +media_request_lock_for_update(struct media_request *req) +{ + return -EINVAL; +} + +static inline void media_request_unlock_for_update(struct media_request *req) +{ +} + +static inline void media_request_object_get(struct media_request_object *obj) +{ +} + +static inline void media_request_object_put(struct media_request_object *obj) +{ +} + +static inline struct media_request_object * +media_request_object_find(struct media_request *req, + const struct media_request_object_ops *ops, + void *priv) +{ + return NULL; +} + +static inline void media_request_object_init(struct media_request_object *obj) +{ + obj->ops = NULL; + obj->req = NULL; +} + +static inline int media_request_object_bind(struct media_request *req, + const struct media_request_object_ops *ops, + void *priv, bool is_buffer, + struct media_request_object *obj) +{ + return 0; +} + +static inline void media_request_object_unbind(struct media_request_object *obj) +{ +} + +static inline void media_request_object_complete(struct media_request_object *obj) +{ +} + +#endif + +#endif diff --git a/6.12.0/include-overrides/media/mipi-csi2.h b/6.12.0/include-overrides/media/mipi-csi2.h new file mode 100644 index 0000000..40fc026 --- /dev/null +++ b/6.12.0/include-overrides/media/mipi-csi2.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * MIPI CSI-2 Data Types + * + * Copyright (C) 2022 Laurent Pinchart + */ + +#ifndef _MEDIA_MIPI_CSI2_H +#define _MEDIA_MIPI_CSI2_H + +/* Short packet data types */ +#define MIPI_CSI2_DT_FS 0x00 +#define MIPI_CSI2_DT_FE 0x01 +#define MIPI_CSI2_DT_LS 0x02 +#define MIPI_CSI2_DT_LE 0x03 +#define MIPI_CSI2_DT_GENERIC_SHORT(n) (0x08 + (n)) /* 0..7 */ + +/* Long packet data types */ +#define MIPI_CSI2_DT_NULL 0x10 +#define MIPI_CSI2_DT_BLANKING 0x11 +#define MIPI_CSI2_DT_EMBEDDED_8B 0x12 +#define MIPI_CSI2_DT_GENERIC_LONG(n) (0x13 + (n) - 1) /* 1..4 */ +#define MIPI_CSI2_DT_YUV420_8B 0x18 +#define MIPI_CSI2_DT_YUV420_10B 0x19 +#define MIPI_CSI2_DT_YUV420_8B_LEGACY 0x1a +#define MIPI_CSI2_DT_YUV420_8B_CS 0x1c +#define MIPI_CSI2_DT_YUV420_10B_CS 0x1d +#define MIPI_CSI2_DT_YUV422_8B 0x1e +#define MIPI_CSI2_DT_YUV422_10B 0x1f +#define MIPI_CSI2_DT_RGB444 0x20 +#define MIPI_CSI2_DT_RGB555 0x21 +#define MIPI_CSI2_DT_RGB565 0x22 +#define MIPI_CSI2_DT_RGB666 0x23 +#define MIPI_CSI2_DT_RGB888 0x24 +#define MIPI_CSI2_DT_RAW28 0x26 +#define MIPI_CSI2_DT_RAW24 0x27 +#define MIPI_CSI2_DT_RAW6 0x28 +#define MIPI_CSI2_DT_RAW7 0x29 +#define MIPI_CSI2_DT_RAW8 0x2a +#define MIPI_CSI2_DT_RAW10 0x2b +#define MIPI_CSI2_DT_RAW12 0x2c +#define MIPI_CSI2_DT_RAW14 0x2d +#define MIPI_CSI2_DT_RAW16 0x2e +#define MIPI_CSI2_DT_RAW20 0x2f +#define MIPI_CSI2_DT_USER_DEFINED(n) (0x30 + (n)) /* 0..7 */ + +#endif /* _MEDIA_MIPI_CSI2_H */ diff --git a/6.12.0/include-overrides/media/rc-core.h b/6.12.0/include-overrides/media/rc-core.h new file mode 100644 index 0000000..d095908 --- /dev/null +++ b/6.12.0/include-overrides/media/rc-core.h @@ -0,0 +1,377 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Remote Controller core header + * + * Copyright (C) 2009-2010 by Mauro Carvalho Chehab + */ + +#ifndef _RC_CORE +#define _RC_CORE + +#include +#include +#include +#include +#include +#include + +/** + * enum rc_driver_type - type of the RC driver. + * + * @RC_DRIVER_SCANCODE: Driver or hardware generates a scancode. + * @RC_DRIVER_IR_RAW: Driver or hardware generates pulse/space sequences. + * It needs a Infra-Red pulse/space decoder + * @RC_DRIVER_IR_RAW_TX: Device transmitter only, + * driver requires pulse/space data sequence. + */ +enum rc_driver_type { + RC_DRIVER_SCANCODE = 0, + RC_DRIVER_IR_RAW, + RC_DRIVER_IR_RAW_TX, +}; + +/** + * struct rc_scancode_filter - Filter scan codes. + * @data: Scancode data to match. + * @mask: Mask of bits of scancode to compare. + */ +struct rc_scancode_filter { + u32 data; + u32 mask; +}; + +/** + * enum rc_filter_type - Filter type constants. + * @RC_FILTER_NORMAL: Filter for normal operation. + * @RC_FILTER_WAKEUP: Filter for waking from suspend. + * @RC_FILTER_MAX: Number of filter types. + */ +enum rc_filter_type { + RC_FILTER_NORMAL = 0, + RC_FILTER_WAKEUP, + + RC_FILTER_MAX +}; + +/** + * struct lirc_fh - represents an open lirc file + * @list: list of open file handles + * @rc: rcdev for this lirc chardev + * @carrier_low: when setting the carrier range, first the low end must be + * set with an ioctl and then the high end with another ioctl + * @rawir: queue for incoming raw IR + * @scancodes: queue for incoming decoded scancodes + * @wait_poll: poll struct for lirc device + * @send_mode: lirc mode for sending, either LIRC_MODE_SCANCODE or + * LIRC_MODE_PULSE + * @rec_mode: lirc mode for receiving, either LIRC_MODE_SCANCODE or + * LIRC_MODE_MODE2 + */ +struct lirc_fh { + struct list_head list; + struct rc_dev *rc; + int carrier_low; + DECLARE_KFIFO_PTR(rawir, unsigned int); + DECLARE_KFIFO_PTR(scancodes, struct lirc_scancode); + wait_queue_head_t wait_poll; + u8 send_mode; + u8 rec_mode; +}; + +/** + * struct rc_dev - represents a remote control device + * @dev: driver model's view of this device + * @managed_alloc: devm_rc_allocate_device was used to create rc_dev + * @sysfs_groups: sysfs attribute groups + * @device_name: name of the rc child device + * @input_phys: physical path to the input child device + * @input_id: id of the input child device (struct input_id) + * @driver_name: name of the hardware driver which registered this device + * @map_name: name of the default keymap + * @rc_map: current scan/key table + * @lock: used to ensure we've filled in all protocol details before + * anyone can call show_protocols or store_protocols + * @minor: unique minor remote control device number + * @raw: additional data for raw pulse/space devices + * @input_dev: the input child device used to communicate events to userspace + * @driver_type: specifies if protocol decoding is done in hardware or software + * @idle: used to keep track of RX state + * @encode_wakeup: wakeup filtering uses IR encode API, therefore the allowed + * wakeup protocols is the set of all raw encoders + * @allowed_protocols: bitmask with the supported RC_PROTO_BIT_* protocols + * @enabled_protocols: bitmask with the enabled RC_PROTO_BIT_* protocols + * @allowed_wakeup_protocols: bitmask with the supported RC_PROTO_BIT_* wakeup + * protocols + * @wakeup_protocol: the enabled RC_PROTO_* wakeup protocol or + * RC_PROTO_UNKNOWN if disabled. + * @scancode_filter: scancode filter + * @scancode_wakeup_filter: scancode wakeup filters + * @scancode_mask: some hardware decoders are not capable of providing the full + * scancode to the application. As this is a hardware limit, we can't do + * anything with it. Yet, as the same keycode table can be used with other + * devices, a mask is provided to allow its usage. Drivers should generally + * leave this field in blank + * @users: number of current users of the device + * @priv: driver-specific data + * @keylock: protects the remaining members of the struct + * @keypressed: whether a key is currently pressed + * @keyup_jiffies: time (in jiffies) when the current keypress should be released + * @timer_keyup: timer for releasing a keypress + * @timer_repeat: timer for autorepeat events. This is needed for CEC, which + * has non-standard repeats. + * @last_keycode: keycode of last keypress + * @last_protocol: protocol of last keypress + * @last_scancode: scancode of last keypress + * @last_toggle: toggle value of last command + * @timeout: optional time after which device stops sending data + * @min_timeout: minimum timeout supported by device + * @max_timeout: maximum timeout supported by device + * @rx_resolution : resolution (in us) of input sampler + * @lirc_dev: lirc device + * @lirc_cdev: lirc char cdev + * @gap_start: start time for gap after timeout if non-zero + * @lirc_fh_lock: protects lirc_fh list + * @lirc_fh: list of open files + * @registered: set to true by rc_register_device(), false by + * rc_unregister_device + * @change_protocol: allow changing the protocol used on hardware decoders + * @open: callback to allow drivers to enable polling/irq when IR input device + * is opened. + * @close: callback to allow drivers to disable polling/irq when IR input device + * is opened. + * @s_tx_mask: set transmitter mask (for devices with multiple tx outputs) + * @s_tx_carrier: set transmit carrier frequency + * @s_tx_duty_cycle: set transmit duty cycle (0% - 100%) + * @s_rx_carrier_range: inform driver about carrier it is expected to handle + * @tx_ir: transmit IR + * @s_idle: enable/disable hardware idle mode, upon which, + * device doesn't interrupt host until it sees IR pulses + * @s_wideband_receiver: enable wide band receiver used for learning + * @s_carrier_report: enable carrier reports + * @s_filter: set the scancode filter + * @s_wakeup_filter: set the wakeup scancode filter. If the mask is zero + * then wakeup should be disabled. wakeup_protocol will be set to + * a valid protocol if mask is nonzero. + * @s_timeout: set hardware timeout in us + */ +struct rc_dev { + struct device dev; + bool managed_alloc; + const struct attribute_group *sysfs_groups[5]; + const char *device_name; + const char *input_phys; + struct input_id input_id; + const char *driver_name; + const char *map_name; + struct rc_map rc_map; + struct mutex lock; + unsigned int minor; + struct ir_raw_event_ctrl *raw; + struct input_dev *input_dev; + enum rc_driver_type driver_type; + bool idle; + bool encode_wakeup; + u64 allowed_protocols; + u64 enabled_protocols; + u64 allowed_wakeup_protocols; + enum rc_proto wakeup_protocol; + struct rc_scancode_filter scancode_filter; + struct rc_scancode_filter scancode_wakeup_filter; + u32 scancode_mask; + u32 users; + void *priv; + spinlock_t keylock; + bool keypressed; + unsigned long keyup_jiffies; + struct timer_list timer_keyup; + struct timer_list timer_repeat; + u32 last_keycode; + enum rc_proto last_protocol; + u64 last_scancode; + u8 last_toggle; + u32 timeout; + u32 min_timeout; + u32 max_timeout; + u32 rx_resolution; +#ifdef CONFIG_LIRC + struct device lirc_dev; + struct cdev lirc_cdev; + ktime_t gap_start; + spinlock_t lirc_fh_lock; + struct list_head lirc_fh; +#endif + bool registered; + int (*change_protocol)(struct rc_dev *dev, u64 *rc_proto); + int (*open)(struct rc_dev *dev); + void (*close)(struct rc_dev *dev); + int (*s_tx_mask)(struct rc_dev *dev, u32 mask); + int (*s_tx_carrier)(struct rc_dev *dev, u32 carrier); + int (*s_tx_duty_cycle)(struct rc_dev *dev, u32 duty_cycle); + int (*s_rx_carrier_range)(struct rc_dev *dev, u32 min, u32 max); + int (*tx_ir)(struct rc_dev *dev, unsigned *txbuf, unsigned n); + void (*s_idle)(struct rc_dev *dev, bool enable); + int (*s_wideband_receiver)(struct rc_dev *dev, int enable); + int (*s_carrier_report) (struct rc_dev *dev, int enable); + int (*s_filter)(struct rc_dev *dev, + struct rc_scancode_filter *filter); + int (*s_wakeup_filter)(struct rc_dev *dev, + struct rc_scancode_filter *filter); + int (*s_timeout)(struct rc_dev *dev, + unsigned int timeout); +}; + +#define to_rc_dev(d) container_of(d, struct rc_dev, dev) + +/* + * From rc-main.c + * Those functions can be used on any type of Remote Controller. They + * basically creates an input_dev and properly reports the device as a + * Remote Controller, at sys/class/rc. + */ + +/** + * rc_allocate_device - Allocates a RC device + * + * @rc_driver_type: specifies the type of the RC output to be allocated + * returns a pointer to struct rc_dev. + */ +struct rc_dev *rc_allocate_device(enum rc_driver_type); + +/** + * devm_rc_allocate_device - Managed RC device allocation + * + * @dev: pointer to struct device + * @rc_driver_type: specifies the type of the RC output to be allocated + * returns a pointer to struct rc_dev. + */ +struct rc_dev *devm_rc_allocate_device(struct device *dev, enum rc_driver_type); + +/** + * rc_free_device - Frees a RC device + * + * @dev: pointer to struct rc_dev. + */ +void rc_free_device(struct rc_dev *dev); + +/** + * rc_register_device - Registers a RC device + * + * @dev: pointer to struct rc_dev. + */ +int rc_register_device(struct rc_dev *dev); + +/** + * devm_rc_register_device - Manageded registering of a RC device + * + * @parent: pointer to struct device. + * @dev: pointer to struct rc_dev. + */ +int devm_rc_register_device(struct device *parent, struct rc_dev *dev); + +/** + * rc_unregister_device - Unregisters a RC device + * + * @dev: pointer to struct rc_dev. + */ +void rc_unregister_device(struct rc_dev *dev); + +void rc_repeat(struct rc_dev *dev); +void rc_keydown(struct rc_dev *dev, enum rc_proto protocol, u64 scancode, + u8 toggle); +void rc_keydown_notimeout(struct rc_dev *dev, enum rc_proto protocol, + u64 scancode, u8 toggle); +void rc_keyup(struct rc_dev *dev); +u32 rc_g_keycode_from_table(struct rc_dev *dev, u64 scancode); + +/* + * From rc-raw.c + * The Raw interface is specific to InfraRed. It may be a good idea to + * split it later into a separate header. + */ +struct ir_raw_event { + union { + u32 duration; + u32 carrier; + }; + u8 duty_cycle; + + unsigned pulse:1; + unsigned overflow:1; + unsigned timeout:1; + unsigned carrier_report:1; +}; + +#define US_TO_NS(usec) ((usec) * 1000) +#define MS_TO_US(msec) ((msec) * 1000) +#define IR_MAX_DURATION MS_TO_US(500) +#define IR_DEFAULT_TIMEOUT MS_TO_US(125) +#define IR_MAX_TIMEOUT LIRC_VALUE_MASK + +void ir_raw_event_handle(struct rc_dev *dev); +int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev); +int ir_raw_event_store_edge(struct rc_dev *dev, bool pulse); +int ir_raw_event_store_with_filter(struct rc_dev *dev, + struct ir_raw_event *ev); +int ir_raw_event_store_with_timeout(struct rc_dev *dev, + struct ir_raw_event *ev); +void ir_raw_event_set_idle(struct rc_dev *dev, bool idle); +int ir_raw_encode_scancode(enum rc_proto protocol, u32 scancode, + struct ir_raw_event *events, unsigned int max); +int ir_raw_encode_carrier(enum rc_proto protocol); + +static inline void ir_raw_event_overflow(struct rc_dev *dev) +{ + ir_raw_event_store(dev, &((struct ir_raw_event) { .overflow = true })); + dev->idle = true; + ir_raw_event_handle(dev); +} + +/* extract mask bits out of data and pack them into the result */ +static inline u32 ir_extract_bits(u32 data, u32 mask) +{ + u32 vbit = 1, value = 0; + + do { + if (mask & 1) { + if (data & 1) + value |= vbit; + vbit <<= 1; + } + data >>= 1; + } while (mask >>= 1); + + return value; +} + +/* Get NEC scancode and protocol type from address and command bytes */ +static inline u32 ir_nec_bytes_to_scancode(u8 address, u8 not_address, + u8 command, u8 not_command, + enum rc_proto *protocol) +{ + u32 scancode; + + if ((command ^ not_command) != 0xff) { + /* NEC transport, but modified protocol, used by at + * least Apple and TiVo remotes + */ + scancode = not_address << 24 | + address << 16 | + not_command << 8 | + command; + *protocol = RC_PROTO_NEC32; + } else if ((address ^ not_address) != 0xff) { + /* Extended NEC */ + scancode = address << 16 | + not_address << 8 | + command; + *protocol = RC_PROTO_NECX; + } else { + /* Normal NEC */ + scancode = address << 8 | command; + *protocol = RC_PROTO_NEC; + } + + return scancode; +} + +#endif /* _RC_CORE */ diff --git a/6.12.0/include-overrides/media/rc-map.h b/6.12.0/include-overrides/media/rc-map.h new file mode 100644 index 0000000..4867eb2 --- /dev/null +++ b/6.12.0/include-overrides/media/rc-map.h @@ -0,0 +1,356 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * rc-map.h - define RC map names used by RC drivers + * + * Copyright (c) 2010 by Mauro Carvalho Chehab + */ + +#ifndef _MEDIA_RC_MAP_H +#define _MEDIA_RC_MAP_H + +#include +#include + +#define RC_PROTO_BIT_NONE 0ULL +#define RC_PROTO_BIT_UNKNOWN BIT_ULL(RC_PROTO_UNKNOWN) +#define RC_PROTO_BIT_OTHER BIT_ULL(RC_PROTO_OTHER) +#define RC_PROTO_BIT_RC5 BIT_ULL(RC_PROTO_RC5) +#define RC_PROTO_BIT_RC5X_20 BIT_ULL(RC_PROTO_RC5X_20) +#define RC_PROTO_BIT_RC5_SZ BIT_ULL(RC_PROTO_RC5_SZ) +#define RC_PROTO_BIT_JVC BIT_ULL(RC_PROTO_JVC) +#define RC_PROTO_BIT_SONY12 BIT_ULL(RC_PROTO_SONY12) +#define RC_PROTO_BIT_SONY15 BIT_ULL(RC_PROTO_SONY15) +#define RC_PROTO_BIT_SONY20 BIT_ULL(RC_PROTO_SONY20) +#define RC_PROTO_BIT_NEC BIT_ULL(RC_PROTO_NEC) +#define RC_PROTO_BIT_NECX BIT_ULL(RC_PROTO_NECX) +#define RC_PROTO_BIT_NEC32 BIT_ULL(RC_PROTO_NEC32) +#define RC_PROTO_BIT_SANYO BIT_ULL(RC_PROTO_SANYO) +#define RC_PROTO_BIT_MCIR2_KBD BIT_ULL(RC_PROTO_MCIR2_KBD) +#define RC_PROTO_BIT_MCIR2_MSE BIT_ULL(RC_PROTO_MCIR2_MSE) +#define RC_PROTO_BIT_RC6_0 BIT_ULL(RC_PROTO_RC6_0) +#define RC_PROTO_BIT_RC6_6A_20 BIT_ULL(RC_PROTO_RC6_6A_20) +#define RC_PROTO_BIT_RC6_6A_24 BIT_ULL(RC_PROTO_RC6_6A_24) +#define RC_PROTO_BIT_RC6_6A_32 BIT_ULL(RC_PROTO_RC6_6A_32) +#define RC_PROTO_BIT_RC6_MCE BIT_ULL(RC_PROTO_RC6_MCE) +#define RC_PROTO_BIT_SHARP BIT_ULL(RC_PROTO_SHARP) +#define RC_PROTO_BIT_XMP BIT_ULL(RC_PROTO_XMP) +#define RC_PROTO_BIT_CEC BIT_ULL(RC_PROTO_CEC) +#define RC_PROTO_BIT_IMON BIT_ULL(RC_PROTO_IMON) +#define RC_PROTO_BIT_RCMM12 BIT_ULL(RC_PROTO_RCMM12) +#define RC_PROTO_BIT_RCMM24 BIT_ULL(RC_PROTO_RCMM24) +#define RC_PROTO_BIT_RCMM32 BIT_ULL(RC_PROTO_RCMM32) +#define RC_PROTO_BIT_XBOX_DVD BIT_ULL(RC_PROTO_XBOX_DVD) + +#if IS_ENABLED(CONFIG_IR_RC5_DECODER) +#define __RC_PROTO_RC5_CODEC \ + (RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC5X_20 | RC_PROTO_BIT_RC5_SZ) +#else +#define __RC_PROTO_RC5_CODEC 0 +#endif + +#if IS_ENABLED(CONFIG_IR_JVC_DECODER) +#define __RC_PROTO_JVC_CODEC RC_PROTO_BIT_JVC +#else +#define __RC_PROTO_JVC_CODEC 0 +#endif +#if IS_ENABLED(CONFIG_IR_SONY_DECODER) +#define __RC_PROTO_SONY_CODEC \ + (RC_PROTO_BIT_SONY12 | RC_PROTO_BIT_SONY15 | RC_PROTO_BIT_SONY20) +#else +#define __RC_PROTO_SONY_CODEC 0 +#endif +#if IS_ENABLED(CONFIG_IR_NEC_DECODER) +#define __RC_PROTO_NEC_CODEC \ + (RC_PROTO_BIT_NEC | RC_PROTO_BIT_NECX | RC_PROTO_BIT_NEC32) +#else +#define __RC_PROTO_NEC_CODEC 0 +#endif +#if IS_ENABLED(CONFIG_IR_SANYO_DECODER) +#define __RC_PROTO_SANYO_CODEC RC_PROTO_BIT_SANYO +#else +#define __RC_PROTO_SANYO_CODEC 0 +#endif +#if IS_ENABLED(CONFIG_IR_MCE_KBD_DECODER) +#define __RC_PROTO_MCE_KBD_CODEC \ + (RC_PROTO_BIT_MCIR2_KBD | RC_PROTO_BIT_MCIR2_MSE) +#else +#define __RC_PROTO_MCE_KBD_CODEC 0 +#endif +#if IS_ENABLED(CONFIG_IR_RC6_DECODER) +#define __RC_PROTO_RC6_CODEC \ + (RC_PROTO_BIT_RC6_0 | RC_PROTO_BIT_RC6_6A_20 | \ + RC_PROTO_BIT_RC6_6A_24 | RC_PROTO_BIT_RC6_6A_32 | \ + RC_PROTO_BIT_RC6_MCE) +#else +#define __RC_PROTO_RC6_CODEC 0 +#endif +#if IS_ENABLED(CONFIG_IR_SHARP_DECODER) +#define __RC_PROTO_SHARP_CODEC RC_PROTO_BIT_SHARP +#else +#define __RC_PROTO_SHARP_CODEC 0 +#endif +#if IS_ENABLED(CONFIG_IR_XMP_DECODER) +#define __RC_PROTO_XMP_CODEC RC_PROTO_BIT_XMP +#else +#define __RC_PROTO_XMP_CODEC 0 +#endif +#if IS_ENABLED(CONFIG_IR_IMON_DECODER) +#define __RC_PROTO_IMON_CODEC RC_PROTO_BIT_IMON +#else +#define __RC_PROTO_IMON_CODEC 0 +#endif +#if IS_ENABLED(CONFIG_IR_RCMM_DECODER) +#define __RC_PROTO_RCMM_CODEC \ + (RC_PROTO_BIT_RCMM12 | RC_PROTO_BIT_RCMM24 | RC_PROTO_BIT_RCMM32) +#else +#define __RC_PROTO_RCMM_CODEC 0 +#endif + +/* All kernel-based codecs have encoders and decoders */ +#define RC_PROTO_BIT_ALL_IR_DECODER \ + (__RC_PROTO_RC5_CODEC | __RC_PROTO_JVC_CODEC | __RC_PROTO_SONY_CODEC | \ + __RC_PROTO_NEC_CODEC | __RC_PROTO_SANYO_CODEC | \ + __RC_PROTO_MCE_KBD_CODEC | __RC_PROTO_RC6_CODEC | \ + __RC_PROTO_SHARP_CODEC | __RC_PROTO_XMP_CODEC | \ + __RC_PROTO_IMON_CODEC | __RC_PROTO_RCMM_CODEC) + +#define RC_PROTO_BIT_ALL_IR_ENCODER \ + (__RC_PROTO_RC5_CODEC | __RC_PROTO_JVC_CODEC | __RC_PROTO_SONY_CODEC | \ + __RC_PROTO_NEC_CODEC | __RC_PROTO_SANYO_CODEC | \ + __RC_PROTO_MCE_KBD_CODEC | __RC_PROTO_RC6_CODEC | \ + __RC_PROTO_SHARP_CODEC | __RC_PROTO_XMP_CODEC | \ + __RC_PROTO_IMON_CODEC | __RC_PROTO_RCMM_CODEC) + +#define RC_SCANCODE_UNKNOWN(x) (x) +#define RC_SCANCODE_OTHER(x) (x) +#define RC_SCANCODE_NEC(addr, cmd) (((addr) << 8) | (cmd)) +#define RC_SCANCODE_NECX(addr, cmd) (((addr) << 8) | (cmd)) +#define RC_SCANCODE_NEC32(data) ((data) & 0xffffffff) +#define RC_SCANCODE_RC5(sys, cmd) (((sys) << 8) | (cmd)) +#define RC_SCANCODE_RC5_SZ(sys, cmd) (((sys) << 8) | (cmd)) +#define RC_SCANCODE_RC6_0(sys, cmd) (((sys) << 8) | (cmd)) +#define RC_SCANCODE_RC6_6A(vendor, sys, cmd) (((vendor) << 16) | ((sys) << 8) | (cmd)) + +/** + * struct rc_map_table - represents a scancode/keycode pair + * + * @scancode: scan code (u64) + * @keycode: Linux input keycode + */ +struct rc_map_table { + u64 scancode; + u32 keycode; +}; + +/** + * struct rc_map - represents a keycode map table + * + * @scan: pointer to struct &rc_map_table + * @size: Max number of entries + * @len: Number of entries that are in use + * @alloc: size of \*scan, in bytes + * @rc_proto: type of the remote controller protocol, as defined at + * enum &rc_proto + * @name: name of the key map table + * @lock: lock to protect access to this structure + */ +struct rc_map { + struct rc_map_table *scan; + unsigned int size; + unsigned int len; + unsigned int alloc; + enum rc_proto rc_proto; + const char *name; + spinlock_t lock; +}; + +/** + * struct rc_map_list - list of the registered &rc_map maps + * + * @list: pointer to struct &list_head + * @map: pointer to struct &rc_map + */ +struct rc_map_list { + struct list_head list; + struct rc_map map; +}; + +#ifdef CONFIG_MEDIA_CEC_RC +/* + * rc_map_list from rc-cec.c + */ +extern struct rc_map_list cec_map; +#endif + +/* Routines from rc-map.c */ + +/** + * rc_map_register() - Registers a Remote Controller scancode map + * + * @map: pointer to struct rc_map_list + */ +int rc_map_register(struct rc_map_list *map); + +/** + * rc_map_unregister() - Unregisters a Remote Controller scancode map + * + * @map: pointer to struct rc_map_list + */ +void rc_map_unregister(struct rc_map_list *map); + +/** + * rc_map_get - gets an RC map from its name + * @name: name of the RC scancode map + */ +struct rc_map *rc_map_get(const char *name); + +/* Names of the several keytables defined in-kernel */ + +#define RC_MAP_ADSTECH_DVB_T_PCI "rc-adstech-dvb-t-pci" +#define RC_MAP_ALINK_DTU_M "rc-alink-dtu-m" +#define RC_MAP_ANYSEE "rc-anysee" +#define RC_MAP_APAC_VIEWCOMP "rc-apac-viewcomp" +#define RC_MAP_ASTROMETA_T2HYBRID "rc-astrometa-t2hybrid" +#define RC_MAP_ASUS_PC39 "rc-asus-pc39" +#define RC_MAP_ASUS_PS3_100 "rc-asus-ps3-100" +#define RC_MAP_ATI_TV_WONDER_HD_600 "rc-ati-tv-wonder-hd-600" +#define RC_MAP_ATI_X10 "rc-ati-x10" +#define RC_MAP_AVERMEDIA "rc-avermedia" +#define RC_MAP_AVERMEDIA_A16D "rc-avermedia-a16d" +#define RC_MAP_AVERMEDIA_CARDBUS "rc-avermedia-cardbus" +#define RC_MAP_AVERMEDIA_DVBT "rc-avermedia-dvbt" +#define RC_MAP_AVERMEDIA_M135A "rc-avermedia-m135a" +#define RC_MAP_AVERMEDIA_M733A_RM_K6 "rc-avermedia-m733a-rm-k6" +#define RC_MAP_AVERMEDIA_RM_KS "rc-avermedia-rm-ks" +#define RC_MAP_AVERTV_303 "rc-avertv-303" +#define RC_MAP_AZUREWAVE_AD_TU700 "rc-azurewave-ad-tu700" +#define RC_MAP_BEELINK_GS1 "rc-beelink-gs1" +#define RC_MAP_BEELINK_MXIII "rc-beelink-mxiii" +#define RC_MAP_BEHOLD "rc-behold" +#define RC_MAP_BEHOLD_COLUMBUS "rc-behold-columbus" +#define RC_MAP_BUDGET_CI_OLD "rc-budget-ci-old" +#define RC_MAP_CEC "rc-cec" +#define RC_MAP_CINERGY "rc-cinergy" +#define RC_MAP_CINERGY_1400 "rc-cinergy-1400" +#define RC_MAP_CT_90405 "rc-ct-90405" +#define RC_MAP_D680_DMB "rc-d680-dmb" +#define RC_MAP_DELOCK_61959 "rc-delock-61959" +#define RC_MAP_DIB0700_NEC_TABLE "rc-dib0700-nec" +#define RC_MAP_DIB0700_RC5_TABLE "rc-dib0700-rc5" +#define RC_MAP_DIGITALNOW_TINYTWIN "rc-digitalnow-tinytwin" +#define RC_MAP_DIGITTRADE "rc-digittrade" +#define RC_MAP_DM1105_NEC "rc-dm1105-nec" +#define RC_MAP_DNTV_LIVE_DVB_T "rc-dntv-live-dvb-t" +#define RC_MAP_DNTV_LIVE_DVBT_PRO "rc-dntv-live-dvbt-pro" +#define RC_MAP_DREAMBOX "rc-dreambox" +#define RC_MAP_DTT200U "rc-dtt200u" +#define RC_MAP_DVBSKY "rc-dvbsky" +#define RC_MAP_DVICO_MCE "rc-dvico-mce" +#define RC_MAP_DVICO_PORTABLE "rc-dvico-portable" +#define RC_MAP_EMPTY "rc-empty" +#define RC_MAP_EM_TERRATEC "rc-em-terratec" +#define RC_MAP_ENCORE_ENLTV "rc-encore-enltv" +#define RC_MAP_ENCORE_ENLTV2 "rc-encore-enltv2" +#define RC_MAP_ENCORE_ENLTV_FM53 "rc-encore-enltv-fm53" +#define RC_MAP_EVGA_INDTUBE "rc-evga-indtube" +#define RC_MAP_EZTV "rc-eztv" +#define RC_MAP_FLYDVB "rc-flydvb" +#define RC_MAP_FLYVIDEO "rc-flyvideo" +#define RC_MAP_FUSIONHDTV_MCE "rc-fusionhdtv-mce" +#define RC_MAP_GADMEI_RM008Z "rc-gadmei-rm008z" +#define RC_MAP_GEEKBOX "rc-geekbox" +#define RC_MAP_GENIUS_TVGO_A11MCE "rc-genius-tvgo-a11mce" +#define RC_MAP_GOTVIEW7135 "rc-gotview7135" +#define RC_MAP_HAUPPAUGE "rc-hauppauge" +#define RC_MAP_HAUPPAUGE_NEW "rc-hauppauge" +#define RC_MAP_HISI_POPLAR "rc-hisi-poplar" +#define RC_MAP_HISI_TV_DEMO "rc-hisi-tv-demo" +#define RC_MAP_IMON_MCE "rc-imon-mce" +#define RC_MAP_IMON_PAD "rc-imon-pad" +#define RC_MAP_IMON_RSC "rc-imon-rsc" +#define RC_MAP_IODATA_BCTV7E "rc-iodata-bctv7e" +#define RC_MAP_IT913X_V1 "rc-it913x-v1" +#define RC_MAP_IT913X_V2 "rc-it913x-v2" +#define RC_MAP_KAIOMY "rc-kaiomy" +#define RC_MAP_KHADAS "rc-khadas" +#define RC_MAP_KHAMSIN "rc-khamsin" +#define RC_MAP_KWORLD_315U "rc-kworld-315u" +#define RC_MAP_KWORLD_PC150U "rc-kworld-pc150u" +#define RC_MAP_KWORLD_PLUS_TV_ANALOG "rc-kworld-plus-tv-analog" +#define RC_MAP_LEADTEK_Y04G0051 "rc-leadtek-y04g0051" +#define RC_MAP_LME2510 "rc-lme2510" +#define RC_MAP_MANLI "rc-manli" +#define RC_MAP_MECOOL_KII_PRO "rc-mecool-kii-pro" +#define RC_MAP_MECOOL_KIII_PRO "rc-mecool-kiii-pro" +#define RC_MAP_MEDION_X10 "rc-medion-x10" +#define RC_MAP_MEDION_X10_DIGITAINER "rc-medion-x10-digitainer" +#define RC_MAP_MEDION_X10_OR2X "rc-medion-x10-or2x" +#define RC_MAP_MINIX_NEO "rc-minix-neo" +#define RC_MAP_MSI_DIGIVOX_II "rc-msi-digivox-ii" +#define RC_MAP_MSI_DIGIVOX_III "rc-msi-digivox-iii" +#define RC_MAP_MSI_TVANYWHERE "rc-msi-tvanywhere" +#define RC_MAP_MSI_TVANYWHERE_PLUS "rc-msi-tvanywhere-plus" +#define RC_MAP_MYGICA_UTV3 "rc-mygica-utv3" +#define RC_MAP_NEBULA "rc-nebula" +#define RC_MAP_NEC_TERRATEC_CINERGY_XS "rc-nec-terratec-cinergy-xs" +#define RC_MAP_NORWOOD "rc-norwood" +#define RC_MAP_NPGTECH "rc-npgtech" +#define RC_MAP_ODROID "rc-odroid" +#define RC_MAP_PCTV_SEDNA "rc-pctv-sedna" +#define RC_MAP_PINE64 "rc-pine64" +#define RC_MAP_PINNACLE_COLOR "rc-pinnacle-color" +#define RC_MAP_PINNACLE_GREY "rc-pinnacle-grey" +#define RC_MAP_PINNACLE_PCTV_HD "rc-pinnacle-pctv-hd" +#define RC_MAP_PIXELVIEW "rc-pixelview" +#define RC_MAP_PIXELVIEW_002T "rc-pixelview-002t" +#define RC_MAP_PIXELVIEW_MK12 "rc-pixelview-mk12" +#define RC_MAP_PIXELVIEW_NEW "rc-pixelview-new" +#define RC_MAP_POWERCOLOR_REAL_ANGEL "rc-powercolor-real-angel" +#define RC_MAP_PROTEUS_2309 "rc-proteus-2309" +#define RC_MAP_PURPLETV "rc-purpletv" +#define RC_MAP_PV951 "rc-pv951" +#define RC_MAP_RC5_TV "rc-rc5-tv" +#define RC_MAP_RC6_MCE "rc-rc6-mce" +#define RC_MAP_REAL_AUDIO_220_32_KEYS "rc-real-audio-220-32-keys" +#define RC_MAP_REDDO "rc-reddo" +#define RC_MAP_SNAPSTREAM_FIREFLY "rc-snapstream-firefly" +#define RC_MAP_STREAMZAP "rc-streamzap" +#define RC_MAP_SU3000 "rc-su3000" +#define RC_MAP_TANIX_TX3MINI "rc-tanix-tx3mini" +#define RC_MAP_TANIX_TX5MAX "rc-tanix-tx5max" +#define RC_MAP_TBS_NEC "rc-tbs-nec" +#define RC_MAP_TECHNISAT_TS35 "rc-technisat-ts35" +#define RC_MAP_TECHNISAT_USB2 "rc-technisat-usb2" +#define RC_MAP_TERRATEC_CINERGY_C_PCI "rc-terratec-cinergy-c-pci" +#define RC_MAP_TERRATEC_CINERGY_S2_HD "rc-terratec-cinergy-s2-hd" +#define RC_MAP_TERRATEC_CINERGY_XS "rc-terratec-cinergy-xs" +#define RC_MAP_TERRATEC_SLIM "rc-terratec-slim" +#define RC_MAP_TERRATEC_SLIM_2 "rc-terratec-slim-2" +#define RC_MAP_TEVII_NEC "rc-tevii-nec" +#define RC_MAP_TIVO "rc-tivo" +#define RC_MAP_TOTAL_MEDIA_IN_HAND "rc-total-media-in-hand" +#define RC_MAP_TOTAL_MEDIA_IN_HAND_02 "rc-total-media-in-hand-02" +#define RC_MAP_TREKSTOR "rc-trekstor" +#define RC_MAP_TT_1500 "rc-tt-1500" +#define RC_MAP_TWINHAN_DTV_CAB_CI "rc-twinhan-dtv-cab-ci" +#define RC_MAP_TWINHAN_VP1027_DVBS "rc-twinhan1027" +#define RC_MAP_VEGA_S9X "rc-vega-s9x" +#define RC_MAP_VIDEOMATE_K100 "rc-videomate-k100" +#define RC_MAP_VIDEOMATE_S350 "rc-videomate-s350" +#define RC_MAP_VIDEOMATE_TV_PVR "rc-videomate-tv-pvr" +#define RC_MAP_KII_PRO "rc-videostrong-kii-pro" +#define RC_MAP_WETEK_HUB "rc-wetek-hub" +#define RC_MAP_WETEK_PLAY2 "rc-wetek-play2" +#define RC_MAP_WINFAST "rc-winfast" +#define RC_MAP_WINFAST_USBII_DELUXE "rc-winfast-usbii-deluxe" +#define RC_MAP_X96MAX "rc-x96max" +#define RC_MAP_XBOX_360 "rc-xbox-360" +#define RC_MAP_XBOX_DVD "rc-xbox-dvd" +#define RC_MAP_ZX_IRDEC "rc-zx-irdec" + +/* + * Please, do not just append newer Remote Controller names at the end. + * The names should be ordered in alphabetical order + */ + +#endif /* _MEDIA_RC_MAP_H */ diff --git a/6.12.0/include-overrides/media/rcar-fcp.h b/6.12.0/include-overrides/media/rcar-fcp.h new file mode 100644 index 0000000..179240f --- /dev/null +++ b/6.12.0/include-overrides/media/rcar-fcp.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * rcar-fcp.h -- R-Car Frame Compression Processor Driver + * + * Copyright (C) 2016 Renesas Electronics Corporation + * + * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) + */ +#ifndef __MEDIA_RCAR_FCP_H__ +#define __MEDIA_RCAR_FCP_H__ + +struct device_node; +struct rcar_fcp_device; + +#if IS_ENABLED(CONFIG_VIDEO_RENESAS_FCP) +struct rcar_fcp_device *rcar_fcp_get(const struct device_node *np); +void rcar_fcp_put(struct rcar_fcp_device *fcp); +struct device *rcar_fcp_get_device(struct rcar_fcp_device *fcp); +int rcar_fcp_enable(struct rcar_fcp_device *fcp); +void rcar_fcp_disable(struct rcar_fcp_device *fcp); +#else +static inline struct rcar_fcp_device *rcar_fcp_get(const struct device_node *np) +{ + return ERR_PTR(-ENOENT); +} +static inline void rcar_fcp_put(struct rcar_fcp_device *fcp) { } +static inline struct device *rcar_fcp_get_device(struct rcar_fcp_device *fcp) +{ + return NULL; +} +static inline int rcar_fcp_enable(struct rcar_fcp_device *fcp) +{ + return 0; +} +static inline void rcar_fcp_disable(struct rcar_fcp_device *fcp) { } +#endif + +#endif /* __MEDIA_RCAR_FCP_H__ */ diff --git a/6.12.0/include-overrides/media/tuner-types.h b/6.12.0/include-overrides/media/tuner-types.h new file mode 100644 index 0000000..df76ac8 --- /dev/null +++ b/6.12.0/include-overrides/media/tuner-types.h @@ -0,0 +1,205 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * descriptions for simple tuners. + */ + +#ifndef __TUNER_TYPES_H__ +#define __TUNER_TYPES_H__ + +/** + * enum param_type - type of the tuner pameters + * + * @TUNER_PARAM_TYPE_RADIO: Tuner params are for FM and/or AM radio + * @TUNER_PARAM_TYPE_PAL: Tuner params are for PAL color TV standard + * @TUNER_PARAM_TYPE_SECAM: Tuner params are for SECAM color TV standard + * @TUNER_PARAM_TYPE_NTSC: Tuner params are for NTSC color TV standard + * @TUNER_PARAM_TYPE_DIGITAL: Tuner params are for digital TV + */ +enum param_type { + TUNER_PARAM_TYPE_RADIO, + TUNER_PARAM_TYPE_PAL, + TUNER_PARAM_TYPE_SECAM, + TUNER_PARAM_TYPE_NTSC, + TUNER_PARAM_TYPE_DIGITAL, +}; + +/** + * struct tuner_range - define the frequencies supported by the tuner + * + * @limit: Max frequency supported by that range, in 62.5 kHz + * (TV) or 62.5 Hz (Radio), as defined by + * V4L2_TUNER_CAP_LOW. + * @config: Value of the band switch byte (BB) to setup this mode. + * @cb: Value of the CB byte to setup this mode. + * + * Please notice that digital tuners like xc3028/xc4000/xc5000 don't use + * those ranges, as they're defined inside the driver. This is used by + * analog tuners that are compatible with the "Philips way" to setup the + * tuners. On those devices, the tuner set is done via 4 bytes: + * + * #) divider byte1 (DB1) + * #) divider byte 2 (DB2) + * #) Control byte (CB) + * #) band switch byte (BB) + * + * Some tuners also have an additional optional Auxiliary byte (AB). + */ +struct tuner_range { + unsigned short limit; + unsigned char config; + unsigned char cb; +}; + +/** + * struct tuner_params - Parameters to be used to setup the tuner. Those + * are used by drivers/media/tuners/tuner-types.c in + * order to specify the tuner properties. Most of + * the parameters are for tuners based on tda9887 IF-PLL + * multi-standard analog TV/Radio demodulator, with is + * very common on legacy analog tuners. + * + * @type: Type of the tuner parameters, as defined at + * enum param_type. If the tuner supports multiple + * standards, an array should be used, with one + * row per different standard. + * @cb_first_if_lower_freq: Many Philips-based tuners have a comment in + * their datasheet like + * "For channel selection involving band + * switching, and to ensure smooth tuning to the + * desired channel without causing unnecessary + * charge pump action, it is recommended to + * consider the difference between wanted channel + * frequency and the current channel frequency. + * Unnecessary charge pump action will result + * in very low tuning voltage which may drive the + * oscillator to extreme conditions". + * Set cb_first_if_lower_freq to 1, if this check + * is required for this tuner. I tested this for + * PAL by first setting the TV frequency to + * 203 MHz and then switching to 96.6 MHz FM + * radio. The result was static unless the + * control byte was sent first. + * @has_tda9887: Set to 1 if this tuner uses a tda9887 + * @port1_fm_high_sensitivity: Many Philips tuners use tda9887 PORT1 to select + * the FM radio sensitivity. If this setting is 1, + * then set PORT1 to 1 to get proper FM reception. + * @port2_fm_high_sensitivity: Some Philips tuners use tda9887 PORT2 to select + * the FM radio sensitivity. If this setting is 1, + * then set PORT2 to 1 to get proper FM reception. + * @fm_gain_normal: Some Philips tuners use tda9887 cGainNormal to + * select the FM radio sensitivity. If this + * setting is 1, e register will use cGainNormal + * instead of cGainLow. + * @intercarrier_mode: Most tuners with a tda9887 use QSS mode. + * Some (cheaper) tuners use Intercarrier mode. + * If this setting is 1, then the tuner needs to + * be set to intercarrier mode. + * @port1_active: This setting sets the default value for PORT1. + * 0 means inactive, 1 means active. Note: the + * actual bit value written to the tda9887 is + * inverted. So a 0 here means a 1 in the B6 bit. + * @port2_active: This setting sets the default value for PORT2. + * 0 means inactive, 1 means active. Note: the + * actual bit value written to the tda9887 is + * inverted. So a 0 here means a 1 in the B7 bit. + * @port1_invert_for_secam_lc: Sometimes PORT1 is inverted when the SECAM-L' + * standard is selected. Set this bit to 1 if this + * is needed. + * @port2_invert_for_secam_lc: Sometimes PORT2 is inverted when the SECAM-L' + * standard is selected. Set this bit to 1 if this + * is needed. + * @port1_set_for_fm_mono: Some cards require PORT1 to be 1 for mono Radio + * FM and 0 for stereo. + * @default_pll_gating_18: Select 18% (or according to datasheet 0%) + * L standard PLL gating, vs the driver default + * of 36%. + * @radio_if: IF to use in radio mode. Tuners with a + * separate radio IF filter seem to use 10.7, + * while those without use 33.3 for PAL/SECAM + * tuners and 41.3 for NTSC tuners. + * 0 = 10.7, 1 = 33.3, 2 = 41.3 + * @default_top_low: Default tda9887 TOP value in dB for the low + * band. Default is 0. Range: -16:+15 + * @default_top_mid: Default tda9887 TOP value in dB for the mid + * band. Default is 0. Range: -16:+15 + * @default_top_high: Default tda9887 TOP value in dB for the high + * band. Default is 0. Range: -16:+15 + * @default_top_secam_low: Default tda9887 TOP value in dB for SECAM-L/L' + * for the low band. Default is 0. Several tuners + * require a different TOP value for the + * SECAM-L/L' standards. Range: -16:+15 + * @default_top_secam_mid: Default tda9887 TOP value in dB for SECAM-L/L' + * for the mid band. Default is 0. Several tuners + * require a different TOP value for the + * SECAM-L/L' standards. Range: -16:+15 + * @default_top_secam_high: Default tda9887 TOP value in dB for SECAM-L/L' + * for the high band. Default is 0. Several tuners + * require a different TOP value for the + * SECAM-L/L' standards. Range: -16:+15 + * @iffreq: Intermediate frequency (IF) used by the tuner + * on digital mode. + * @count: Size of the ranges array. + * @ranges: Array with the frequency ranges supported by + * the tuner. + */ +struct tuner_params { + enum param_type type; + + unsigned int cb_first_if_lower_freq:1; + unsigned int has_tda9887:1; + unsigned int port1_fm_high_sensitivity:1; + unsigned int port2_fm_high_sensitivity:1; + unsigned int fm_gain_normal:1; + unsigned int intercarrier_mode:1; + unsigned int port1_active:1; + unsigned int port2_active:1; + unsigned int port1_invert_for_secam_lc:1; + unsigned int port2_invert_for_secam_lc:1; + unsigned int port1_set_for_fm_mono:1; + unsigned int default_pll_gating_18:1; + unsigned int radio_if:2; + signed int default_top_low:5; + signed int default_top_mid:5; + signed int default_top_high:5; + signed int default_top_secam_low:5; + signed int default_top_secam_mid:5; + signed int default_top_secam_high:5; + + u16 iffreq; + + unsigned int count; + struct tuner_range *ranges; +}; + +/** + * struct tunertype - describes the known tuners. + * + * @name: string with the tuner's name. + * @count: size of &struct tuner_params array. + * @params: pointer to &struct tuner_params array. + * + * @min: minimal tuner frequency, in 62.5 kHz step. + * should be multiplied to 16 to convert to MHz. + * @max: minimal tuner frequency, in 62.5 kHz step. + * Should be multiplied to 16 to convert to MHz. + * @stepsize: frequency step, in Hz. + * @initdata: optional byte sequence to initialize the tuner. + * @sleepdata: optional byte sequence to power down the tuner. + */ +struct tunertype { + char *name; + unsigned int count; + struct tuner_params *params; + + u16 min; + u16 max; + u32 stepsize; + + u8 *initdata; + u8 *sleepdata; +}; + +extern struct tunertype tuners[]; +extern unsigned const int tuner_count; + +#endif diff --git a/6.12.0/include-overrides/media/tuner.h b/6.12.0/include-overrides/media/tuner.h new file mode 100644 index 0000000..c5fd6fa --- /dev/null +++ b/6.12.0/include-overrides/media/tuner.h @@ -0,0 +1,229 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * tuner.h - definition for different tuners + * + * Copyright (C) 1997 Markus Schroeder (schroedm@uni-duesseldorf.de) + * minor modifications by Ralph Metzler (rjkm@thp.uni-koeln.de) + */ + +#ifndef _TUNER_H +#define _TUNER_H +#ifdef __KERNEL__ + +#include +#include + +#define ADDR_UNSET (255) + +#define TUNER_TEMIC_PAL 0 /* 4002 FH5 (3X 7756, 9483) */ +#define TUNER_PHILIPS_PAL_I 1 +#define TUNER_PHILIPS_NTSC 2 +#define TUNER_PHILIPS_SECAM 3 /* you must actively select B/G, L, L` */ + +#define TUNER_ABSENT 4 +#define TUNER_PHILIPS_PAL 5 +#define TUNER_TEMIC_NTSC 6 /* 4032 FY5 (3X 7004, 9498, 9789) */ +#define TUNER_TEMIC_PAL_I 7 /* 4062 FY5 (3X 8501, 9957) */ + +#define TUNER_TEMIC_4036FY5_NTSC 8 /* 4036 FY5 (3X 1223, 1981, 7686) */ +#define TUNER_ALPS_TSBH1_NTSC 9 +#define TUNER_ALPS_TSBE1_PAL 10 +#define TUNER_ALPS_TSBB5_PAL_I 11 + +#define TUNER_ALPS_TSBE5_PAL 12 +#define TUNER_ALPS_TSBC5_PAL 13 +#define TUNER_TEMIC_4006FH5_PAL 14 /* 4006 FH5 (3X 9500, 9501, 7291) */ +#define TUNER_ALPS_TSHC6_NTSC 15 + +#define TUNER_TEMIC_PAL_DK 16 /* 4016 FY5 (3X 1392, 1393) */ +#define TUNER_PHILIPS_NTSC_M 17 +#define TUNER_TEMIC_4066FY5_PAL_I 18 /* 4066 FY5 (3X 7032, 7035) */ +#define TUNER_TEMIC_4006FN5_MULTI_PAL 19 /* B/G, I and D/K autodetected (3X 7595, 7606, 7657) */ + +#define TUNER_TEMIC_4009FR5_PAL 20 /* incl. FM radio (3X 7607, 7488, 7711) */ +#define TUNER_TEMIC_4039FR5_NTSC 21 /* incl. FM radio (3X 7246, 7578, 7732) */ +#define TUNER_TEMIC_4046FM5 22 /* you must actively select B/G, D/K, I, L, L` ! (3X 7804, 7806, 8103, 8104) */ +#define TUNER_PHILIPS_PAL_DK 23 + +#define TUNER_PHILIPS_FQ1216ME 24 /* you must actively select B/G/D/K, I, L, L` */ +#define TUNER_LG_PAL_I_FM 25 +#define TUNER_LG_PAL_I 26 +#define TUNER_LG_NTSC_FM 27 + +#define TUNER_LG_PAL_FM 28 +#define TUNER_LG_PAL 29 +#define TUNER_TEMIC_4009FN5_MULTI_PAL_FM 30 /* B/G, I and D/K autodetected (3X 8155, 8160, 8163) */ +#define TUNER_SHARP_2U5JF5540_NTSC 31 + +#define TUNER_Samsung_PAL_TCPM9091PD27 32 +#define TUNER_MT2032 33 +#define TUNER_TEMIC_4106FH5 34 /* 4106 FH5 (3X 7808, 7865) */ +#define TUNER_TEMIC_4012FY5 35 /* 4012 FY5 (3X 0971, 1099) */ + +#define TUNER_TEMIC_4136FY5 36 /* 4136 FY5 (3X 7708, 7746) */ +#define TUNER_LG_PAL_NEW_TAPC 37 +#define TUNER_PHILIPS_FM1216ME_MK3 38 +#define TUNER_LG_NTSC_NEW_TAPC 39 + +#define TUNER_HITACHI_NTSC 40 +#define TUNER_PHILIPS_PAL_MK 41 +#define TUNER_PHILIPS_FCV1236D 42 +#define TUNER_PHILIPS_FM1236_MK3 43 + +#define TUNER_PHILIPS_4IN1 44 /* ATI TV Wonder Pro - Conexant */ + /* + * Microtune merged with Temic 12/31/1999 partially financed by Alps. + * these may be similar to Temic + */ +#define TUNER_MICROTUNE_4049FM5 45 +#define TUNER_PANASONIC_VP27 46 +#define TUNER_LG_NTSC_TAPE 47 + +#define TUNER_TNF_8831BGFF 48 +#define TUNER_MICROTUNE_4042FI5 49 /* DViCO FusionHDTV 3 Gold-Q - 4042 FI5 (3X 8147) */ +#define TUNER_TCL_2002N 50 +#define TUNER_PHILIPS_FM1256_IH3 51 + +#define TUNER_THOMSON_DTT7610 52 +#define TUNER_PHILIPS_FQ1286 53 +#define TUNER_PHILIPS_TDA8290 54 +#define TUNER_TCL_2002MB 55 /* Hauppauge PVR-150 PAL */ + +#define TUNER_PHILIPS_FQ1216AME_MK4 56 /* Hauppauge PVR-150 PAL */ +#define TUNER_PHILIPS_FQ1236A_MK4 57 /* Hauppauge PVR-500MCE NTSC */ +#define TUNER_YMEC_TVF_8531MF 58 +#define TUNER_YMEC_TVF_5533MF 59 /* Pixelview Pro Ultra NTSC */ + +#define TUNER_THOMSON_DTT761X 60 /* DTT 7611 7611A 7612 7613 7613A 7614 7615 7615A */ +#define TUNER_TENA_9533_DI 61 +#define TUNER_TEA5767 62 /* Only FM Radio Tuner */ +#define TUNER_PHILIPS_FMD1216ME_MK3 63 + +#define TUNER_LG_TDVS_H06XF 64 /* TDVS H061F, H062F, H064F */ +#define TUNER_YMEC_TVF66T5_B_DFF 65 /* Acorp Y878F */ +#define TUNER_LG_TALN 66 +#define TUNER_PHILIPS_TD1316 67 + +#define TUNER_PHILIPS_TUV1236D 68 /* ATI HDTV Wonder */ +#define TUNER_TNF_5335MF 69 /* Sabrent Bt848 */ +#define TUNER_SAMSUNG_TCPN_2121P30A 70 /* Hauppauge PVR-500MCE NTSC */ +#define TUNER_XC2028 71 + +#define TUNER_THOMSON_FE6600 72 /* DViCO FusionHDTV DVB-T Hybrid */ +#define TUNER_SAMSUNG_TCPG_6121P30A 73 /* Hauppauge PVR-500 PAL */ +#define TUNER_TDA9887 74 /* This tuner should be used only internally */ +#define TUNER_TEA5761 75 /* Only FM Radio Tuner */ +#define TUNER_XC5000 76 /* Xceive Silicon Tuner */ +#define TUNER_TCL_MF02GIP_5N 77 /* TCL MF02GIP_5N */ +#define TUNER_PHILIPS_FMD1216MEX_MK3 78 +#define TUNER_PHILIPS_FM1216MK5 79 +#define TUNER_PHILIPS_FQ1216LME_MK3 80 /* Active loopthrough, no FM */ + +#define TUNER_PARTSNIC_PTI_5NF05 81 +#define TUNER_PHILIPS_CU1216L 82 +#define TUNER_NXP_TDA18271 83 +#define TUNER_SONY_BTF_PXN01Z 84 +#define TUNER_PHILIPS_FQ1236_MK5 85 /* NTSC, TDA9885, no FM radio */ +#define TUNER_TENA_TNF_5337 86 + +#define TUNER_XC4000 87 /* Xceive Silicon Tuner */ +#define TUNER_XC5000C 88 /* Xceive Silicon Tuner */ + +#define TUNER_SONY_BTF_PG472Z 89 /* PAL+SECAM */ +#define TUNER_SONY_BTF_PK467Z 90 /* NTSC_JP */ +#define TUNER_SONY_BTF_PB463Z 91 /* NTSC */ +#define TUNER_SI2157 92 +#define TUNER_TENA_TNF_931D_DFDR1 93 + +/* tv card specific */ +#define TDA9887_PRESENT (1<<0) +#define TDA9887_PORT1_INACTIVE (1<<1) +#define TDA9887_PORT2_INACTIVE (1<<2) +#define TDA9887_QSS (1<<3) +#define TDA9887_INTERCARRIER (1<<4) +#define TDA9887_PORT1_ACTIVE (1<<5) +#define TDA9887_PORT2_ACTIVE (1<<6) +#define TDA9887_INTERCARRIER_NTSC (1<<7) +/* Tuner takeover point adjustment, in dB, -16 <= top <= 15 */ +#define TDA9887_TOP_MASK (0x3f << 8) +#define TDA9887_TOP_SET (1 << 13) +#define TDA9887_TOP(top) (TDA9887_TOP_SET | \ + (((16 + (top)) & 0x1f) << 8)) + +/* config options */ +#define TDA9887_DEEMPHASIS_MASK (3<<16) +#define TDA9887_DEEMPHASIS_NONE (1<<16) +#define TDA9887_DEEMPHASIS_50 (2<<16) +#define TDA9887_DEEMPHASIS_75 (3<<16) +#define TDA9887_AUTOMUTE (1<<18) +#define TDA9887_GATING_18 (1<<19) +#define TDA9887_GAIN_NORMAL (1<<20) +#define TDA9887_RIF_41_3 (1<<21) /* radio IF1 41.3 vs 33.3 */ + +/** + * enum tuner_mode - Mode of the tuner + * + * @T_RADIO: Tuner core will work in radio mode + * @T_ANALOG_TV: Tuner core will work in analog TV mode + * + * Older boards only had a single tuner device, but some devices have a + * separate tuner for radio. In any case, the tuner-core needs to know if + * the tuner chip(s) will be used in radio mode or analog TV mode, as, on + * radio mode, frequencies are specified on a different range than on TV + * mode. This enum is used by the tuner core in order to work with the + * proper tuner range and eventually use a different tuner chip while in + * radio mode. + */ +enum tuner_mode { + T_RADIO = 1 << V4L2_TUNER_RADIO, + T_ANALOG_TV = 1 << V4L2_TUNER_ANALOG_TV, + /* Don't map V4L2_TUNER_DIGITAL_TV, as tuner-core won't use it */ +}; + +/** + * struct tuner_setup - setup the tuner chipsets + * + * @addr: I2C address used to control the tuner device/chipset + * @type: Type of the tuner, as defined at the TUNER_* macros. + * Each different tuner model should have an unique + * identifier. + * @mode_mask: Mask with the allowed tuner modes: V4L2_TUNER_RADIO, + * V4L2_TUNER_ANALOG_TV and/or V4L2_TUNER_DIGITAL_TV, + * describing if the tuner should be used to support + * Radio, analog TV and/or digital TV. + * @config: Used to send tuner-specific configuration for complex + * tuners that require extra parameters to be set. + * Only a very few tuners require it and its usage on + * newer tuners should be avoided. + * @tuner_callback: Some tuners require to call back the bridge driver, + * in order to do some tasks like rising a GPIO at the + * bridge chipset, in order to do things like resetting + * the device. + * + * Older boards only had a single tuner device. Nowadays multiple tuner + * devices may be present on a single board. Using TUNER_SET_TYPE_ADDR + * to pass the tuner_setup structure it is possible to setup each tuner + * device in turn. + * + * Since multiple devices may be present it is no longer sufficient to + * send a command to a single i2c device. Instead you should broadcast + * the command to all i2c devices. + * + * By setting the mode_mask correctly you can select which commands are + * accepted by a specific tuner device. For example, set mode_mask to + * T_RADIO if the device is a radio-only tuner. That specific tuner will + * only accept commands when the tuner is in radio mode and ignore them + * when the tuner is set to TV mode. + */ + +struct tuner_setup { + unsigned short addr; + unsigned int type; + unsigned int mode_mask; + void *config; + int (*tuner_callback)(void *dev, int component, int cmd, int arg); +}; + +#endif /* __KERNEL__ */ + +#endif /* _TUNER_H */ diff --git a/6.12.0/include-overrides/media/tveeprom.h b/6.12.0/include-overrides/media/tveeprom.h new file mode 100644 index 0000000..f37c9b1 --- /dev/null +++ b/6.12.0/include-overrides/media/tveeprom.h @@ -0,0 +1,116 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * tveeprom - Contains structures and functions to work with Hauppauge + * eeproms. + */ + +#include + +/** + * enum tveeprom_audio_processor - Specifies the type of audio processor + * used on a Hauppauge device. + * + * @TVEEPROM_AUDPROC_NONE: No audio processor present + * @TVEEPROM_AUDPROC_INTERNAL: The audio processor is internal to the + * video processor + * @TVEEPROM_AUDPROC_MSP: The audio processor is a MSPXXXX device + * @TVEEPROM_AUDPROC_OTHER: The audio processor is another device + */ +enum tveeprom_audio_processor { + TVEEPROM_AUDPROC_NONE, + TVEEPROM_AUDPROC_INTERNAL, + TVEEPROM_AUDPROC_MSP, + TVEEPROM_AUDPROC_OTHER, +}; + +/** + * struct tveeprom - Contains the fields parsed from Hauppauge eeproms + * + * @has_radio: 1 if the device has radio; 0 otherwise. + * + * @has_ir: If has_ir == 0, then it is unknown what the IR + * capabilities are. Otherwise: + * bit 0) 1 (= IR capabilities are known); + * bit 1) IR receiver present; + * bit 2) IR transmitter (blaster) present. + * + * @has_MAC_address: 0: no MAC, 1: MAC present, 2: unknown. + * @tuner_type: type of the tuner (TUNER_*, as defined at + * include/media/tuner.h). + * + * @tuner_formats: Supported analog TV standards (V4L2_STD_*). + * @tuner_hauppauge_model: Hauppauge's code for the device model number. + * @tuner2_type: type of the second tuner (TUNER_*, as defined + * at include/media/tuner.h). + * + * @tuner2_formats: Tuner 2 supported analog TV standards + * (V4L2_STD_*). + * + * @tuner2_hauppauge_model: tuner 2 Hauppauge's code for the device model + * number. + * + * @audio_processor: analog audio decoder, as defined by enum + * tveeprom_audio_processor. + * + * @decoder_processor: Hauppauge's code for the decoder chipset. + * Unused by the drivers, as they probe the + * decoder based on the PCI or USB ID. + * + * @model: Hauppauge's model number + * + * @revision: Card revision number + * + * @serial_number: Card's serial number + * + * @rev_str: Card revision converted to number + * + * @MAC_address: MAC address for the network interface + */ +struct tveeprom { + u32 has_radio; + u32 has_ir; + u32 has_MAC_address; + + u32 tuner_type; + u32 tuner_formats; + u32 tuner_hauppauge_model; + + u32 tuner2_type; + u32 tuner2_formats; + u32 tuner2_hauppauge_model; + + u32 audio_processor; + u32 decoder_processor; + + u32 model; + u32 revision; + u32 serial_number; + char rev_str[5]; + u8 MAC_address[ETH_ALEN]; +}; + +/** + * tveeprom_hauppauge_analog - Fill struct tveeprom using the contents + * of the eeprom previously filled at + * @eeprom_data field. + * + * @tvee: Struct to where the eeprom parsed data will be filled; + * @eeprom_data: Array with the contents of the eeprom_data. It should + * contain 256 bytes filled with the contents of the + * eeprom read from the Hauppauge device. + */ +void tveeprom_hauppauge_analog(struct tveeprom *tvee, + unsigned char *eeprom_data); + +/** + * tveeprom_read - Reads the contents of the eeprom found at the Hauppauge + * devices. + * + * @c: I2C client struct + * @eedata: Array where the eeprom content will be stored. + * @len: Size of @eedata array. If the eeprom content will be latter + * be parsed by tveeprom_hauppauge_analog(), len should be, at + * least, 256. + */ +int tveeprom_read(struct i2c_client *c, unsigned char *eedata, int len); diff --git a/6.12.0/include-overrides/media/v4l2-async.h b/6.12.0/include-overrides/media/v4l2-async.h new file mode 100644 index 0000000..f26c323 --- /dev/null +++ b/6.12.0/include-overrides/media/v4l2-async.h @@ -0,0 +1,346 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * V4L2 asynchronous subdevice registration API + * + * Copyright (C) 2012-2013, Guennadi Liakhovetski + */ + +#ifndef V4L2_ASYNC_H +#define V4L2_ASYNC_H + +#include +#include + +struct dentry; +struct device; +struct device_node; +struct v4l2_device; +struct v4l2_subdev; +struct v4l2_async_notifier; + +/** + * enum v4l2_async_match_type - type of asynchronous subdevice logic to be used + * in order to identify a match + * + * @V4L2_ASYNC_MATCH_TYPE_I2C: Match will check for I2C adapter ID and address + * @V4L2_ASYNC_MATCH_TYPE_FWNODE: Match will use firmware node + * + * This enum is used by the asynchronous connection logic to define the + * algorithm that will be used to match an asynchronous device. + */ +enum v4l2_async_match_type { + V4L2_ASYNC_MATCH_TYPE_I2C, + V4L2_ASYNC_MATCH_TYPE_FWNODE, +}; + +/** + * struct v4l2_async_match_desc - async connection match information + * + * @type: type of match that will be used + * @fwnode: pointer to &struct fwnode_handle to be matched. + * Used if @match_type is %V4L2_ASYNC_MATCH_TYPE_FWNODE. + * @i2c: embedded struct with I2C parameters to be matched. + * Both @match.i2c.adapter_id and @match.i2c.address + * should be matched. + * Used if @match_type is %V4L2_ASYNC_MATCH_TYPE_I2C. + * @i2c.adapter_id: + * I2C adapter ID to be matched. + * Used if @match_type is %V4L2_ASYNC_MATCH_TYPE_I2C. + * @i2c.address: + * I2C address to be matched. + * Used if @match_type is %V4L2_ASYNC_MATCH_TYPE_I2C. + */ +struct v4l2_async_match_desc { + enum v4l2_async_match_type type; + union { + struct fwnode_handle *fwnode; + struct { + int adapter_id; + unsigned short address; + } i2c; + }; +}; + +/** + * struct v4l2_async_connection - sub-device connection descriptor, as known to + * a bridge + * + * @match: struct of match type and per-bus type matching data sets + * @notifier: the async notifier the connection is related to + * @asc_entry: used to add struct v4l2_async_connection objects to the + * notifier @waiting_list or @done_list + * @asc_subdev_entry: entry in struct v4l2_async_subdev.asc_list list + * @sd: the related sub-device + * + * When this struct is used as a member in a driver specific struct, the driver + * specific struct shall contain the &struct v4l2_async_connection as its first + * member. + */ +struct v4l2_async_connection { + struct v4l2_async_match_desc match; + struct v4l2_async_notifier *notifier; + struct list_head asc_entry; + struct list_head asc_subdev_entry; + struct v4l2_subdev *sd; +}; + +/** + * struct v4l2_async_notifier_operations - Asynchronous V4L2 notifier operations + * @bound: a sub-device has been bound by the given connection + * @complete: All connections have been bound successfully. The complete + * callback is only executed for the root notifier. + * @unbind: a subdevice is leaving + * @destroy: the asc is about to be freed + */ +struct v4l2_async_notifier_operations { + int (*bound)(struct v4l2_async_notifier *notifier, + struct v4l2_subdev *subdev, + struct v4l2_async_connection *asc); + int (*complete)(struct v4l2_async_notifier *notifier); + void (*unbind)(struct v4l2_async_notifier *notifier, + struct v4l2_subdev *subdev, + struct v4l2_async_connection *asc); + void (*destroy)(struct v4l2_async_connection *asc); +}; + +/** + * struct v4l2_async_notifier - v4l2_device notifier data + * + * @ops: notifier operations + * @v4l2_dev: v4l2_device of the root notifier, NULL otherwise + * @sd: sub-device that registered the notifier, NULL otherwise + * @parent: parent notifier + * @waiting_list: list of struct v4l2_async_connection, waiting for their + * drivers + * @done_list: list of struct v4l2_subdev, already probed + * @notifier_entry: member in a global list of notifiers + */ +struct v4l2_async_notifier { + const struct v4l2_async_notifier_operations *ops; + struct v4l2_device *v4l2_dev; + struct v4l2_subdev *sd; + struct v4l2_async_notifier *parent; + struct list_head waiting_list; + struct list_head done_list; + struct list_head notifier_entry; +}; + +/** + * struct v4l2_async_subdev_endpoint - Entry in sub-device's fwnode list + * + * @async_subdev_endpoint_entry: An entry in async_subdev_endpoint_list of + * &struct v4l2_subdev + * @endpoint: Endpoint fwnode agains which to match the sub-device + */ +struct v4l2_async_subdev_endpoint { + struct list_head async_subdev_endpoint_entry; + struct fwnode_handle *endpoint; +}; + +/** + * v4l2_async_debug_init - Initialize debugging tools. + * + * @debugfs_dir: pointer to the parent debugfs &struct dentry + */ +void v4l2_async_debug_init(struct dentry *debugfs_dir); + +/** + * v4l2_async_nf_init - Initialize a notifier. + * + * @notifier: pointer to &struct v4l2_async_notifier + * @v4l2_dev: pointer to &struct v4l2_device + * + * This function initializes the notifier @asc_entry. It must be called + * before adding a subdevice to a notifier, using one of: + * v4l2_async_nf_add_fwnode_remote(), + * v4l2_async_nf_add_fwnode() or + * v4l2_async_nf_add_i2c(). + */ +void v4l2_async_nf_init(struct v4l2_async_notifier *notifier, + struct v4l2_device *v4l2_dev); + +/** + * v4l2_async_subdev_nf_init - Initialize a sub-device notifier. + * + * @notifier: pointer to &struct v4l2_async_notifier + * @sd: pointer to &struct v4l2_subdev + * + * This function initializes the notifier @asc_list. It must be called + * before adding a subdevice to a notifier, using one of: + * v4l2_async_nf_add_fwnode_remote(), v4l2_async_nf_add_fwnode() or + * v4l2_async_nf_add_i2c(). + */ +void v4l2_async_subdev_nf_init(struct v4l2_async_notifier *notifier, + struct v4l2_subdev *sd); + +struct v4l2_async_connection * +__v4l2_async_nf_add_fwnode(struct v4l2_async_notifier *notifier, + struct fwnode_handle *fwnode, + unsigned int asc_struct_size); +/** + * v4l2_async_nf_add_fwnode - Allocate and add a fwnode async + * subdev to the notifier's master asc_list. + * + * @notifier: pointer to &struct v4l2_async_notifier + * @fwnode: fwnode handle of the sub-device to be matched, pointer to + * &struct fwnode_handle + * @type: Type of the driver's async sub-device or connection struct. The + * &struct v4l2_async_connection shall be the first member of the + * driver's async struct, i.e. both begin at the same memory address. + * + * Allocate a fwnode-matched asc of size asc_struct_size, and add it to the + * notifiers @asc_list. The function also gets a reference of the fwnode which + * is released later at notifier cleanup time. + */ +#define v4l2_async_nf_add_fwnode(notifier, fwnode, type) \ + ((type *)__v4l2_async_nf_add_fwnode(notifier, fwnode, sizeof(type))) + +struct v4l2_async_connection * +__v4l2_async_nf_add_fwnode_remote(struct v4l2_async_notifier *notif, + struct fwnode_handle *endpoint, + unsigned int asc_struct_size); +/** + * v4l2_async_nf_add_fwnode_remote - Allocate and add a fwnode + * remote async subdev to the + * notifier's master asc_list. + * + * @notifier: pointer to &struct v4l2_async_notifier + * @ep: local endpoint pointing to the remote connection to be matched, + * pointer to &struct fwnode_handle + * @type: Type of the driver's async connection struct. The &struct + * v4l2_async_connection shall be the first member of the driver's async + * connection struct, i.e. both begin at the same memory address. + * + * Gets the remote endpoint of a given local endpoint, set it up for fwnode + * matching and adds the async connection to the notifier's @asc_list. The + * function also gets a reference of the fwnode which is released later at + * notifier cleanup time. + * + * This is just like v4l2_async_nf_add_fwnode(), but with the + * exception that the fwnode refers to a local endpoint, not the remote one. + */ +#define v4l2_async_nf_add_fwnode_remote(notifier, ep, type) \ + ((type *)__v4l2_async_nf_add_fwnode_remote(notifier, ep, sizeof(type))) + +struct v4l2_async_connection * +__v4l2_async_nf_add_i2c(struct v4l2_async_notifier *notifier, + int adapter_id, unsigned short address, + unsigned int asc_struct_size); +/** + * v4l2_async_nf_add_i2c - Allocate and add an i2c async + * subdev to the notifier's master asc_list. + * + * @notifier: pointer to &struct v4l2_async_notifier + * @adapter: I2C adapter ID to be matched + * @address: I2C address of connection to be matched + * @type: Type of the driver's async connection struct. The &struct + * v4l2_async_connection shall be the first member of the driver's async + * connection struct, i.e. both begin at the same memory address. + * + * Same as v4l2_async_nf_add_fwnode() but for I2C matched + * connections. + */ +#define v4l2_async_nf_add_i2c(notifier, adapter, address, type) \ + ((type *)__v4l2_async_nf_add_i2c(notifier, adapter, address, \ + sizeof(type))) + +/** + * v4l2_async_subdev_endpoint_add - Add an endpoint fwnode to async sub-device + * matching list + * + * @sd: the sub-device + * @fwnode: the endpoint fwnode to match + * + * Add a fwnode to the async sub-device's matching list. This allows registering + * multiple async sub-devices from a single device. + * + * Note that calling v4l2_subdev_cleanup() as part of the sub-device's cleanup + * if endpoints have been added to the sub-device's fwnode matching list. + * + * Returns an error on failure, 0 on success. + */ +int v4l2_async_subdev_endpoint_add(struct v4l2_subdev *sd, + struct fwnode_handle *fwnode); + +/** + * v4l2_async_connection_unique - return a unique &struct v4l2_async_connection + * for a sub-device + * @sd: the sub-device + * + * Return an async connection for a sub-device, when there is a single + * one only. + */ +struct v4l2_async_connection * +v4l2_async_connection_unique(struct v4l2_subdev *sd); + +/** + * v4l2_async_nf_register - registers a subdevice asynchronous notifier + * + * @notifier: pointer to &struct v4l2_async_notifier + */ +int v4l2_async_nf_register(struct v4l2_async_notifier *notifier); + +/** + * v4l2_async_nf_unregister - unregisters a subdevice + * asynchronous notifier + * + * @notifier: pointer to &struct v4l2_async_notifier + */ +void v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier); + +/** + * v4l2_async_nf_cleanup - clean up notifier resources + * @notifier: the notifier the resources of which are to be cleaned up + * + * Release memory resources related to a notifier, including the async + * connections allocated for the purposes of the notifier but not the notifier + * itself. The user is responsible for calling this function to clean up the + * notifier after calling v4l2_async_nf_add_fwnode_remote(), + * v4l2_async_nf_add_fwnode() or v4l2_async_nf_add_i2c(). + * + * There is no harm from calling v4l2_async_nf_cleanup() in other + * cases as long as its memory has been zeroed after it has been + * allocated. + */ +void v4l2_async_nf_cleanup(struct v4l2_async_notifier *notifier); + +/** + * v4l2_async_register_subdev - registers a sub-device to the asynchronous + * subdevice framework + * + * @sd: pointer to &struct v4l2_subdev + */ +#define v4l2_async_register_subdev(sd) \ + __v4l2_async_register_subdev(sd, THIS_MODULE) +int __v4l2_async_register_subdev(struct v4l2_subdev *sd, struct module *module); + +/** + * v4l2_async_register_subdev_sensor - registers a sensor sub-device to the + * asynchronous sub-device framework and + * parse set up common sensor related + * devices + * + * @sd: pointer to struct &v4l2_subdev + * + * This function is just like v4l2_async_register_subdev() with the exception + * that calling it will also parse firmware interfaces for remote references + * using v4l2_async_nf_parse_fwnode_sensor() and registers the + * async sub-devices. The sub-device is similarly unregistered by calling + * v4l2_async_unregister_subdev(). + * + * While registered, the subdev module is marked as in-use. + * + * An error is returned if the module is no longer loaded on any attempts + * to register it. + */ +int __must_check +v4l2_async_register_subdev_sensor(struct v4l2_subdev *sd); + +/** + * v4l2_async_unregister_subdev - unregisters a sub-device to the asynchronous + * subdevice framework + * + * @sd: pointer to &struct v4l2_subdev + */ +void v4l2_async_unregister_subdev(struct v4l2_subdev *sd); +#endif diff --git a/6.12.0/include-overrides/media/v4l2-cci.h b/6.12.0/include-overrides/media/v4l2-cci.h new file mode 100644 index 0000000..4e96e90 --- /dev/null +++ b/6.12.0/include-overrides/media/v4l2-cci.h @@ -0,0 +1,141 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * MIPI Camera Control Interface (CCI) register access helpers. + * + * Copyright (C) 2023 Hans de Goede + */ +#ifndef _V4L2_CCI_H +#define _V4L2_CCI_H + +#include +#include +#include + +struct i2c_client; +struct regmap; + +/** + * struct cci_reg_sequence - An individual write from a sequence of CCI writes + * + * @reg: Register address, use CCI_REG#() macros to encode reg width + * @val: Register value + * + * Register/value pairs for sequences of writes. + */ +struct cci_reg_sequence { + u32 reg; + u64 val; +}; + +/* + * Macros to define register address with the register width encoded + * into the higher bits. + */ +#define CCI_REG_ADDR_MASK GENMASK(15, 0) +#define CCI_REG_WIDTH_SHIFT 16 +#define CCI_REG_WIDTH_MASK GENMASK(19, 16) +/* + * Private CCI register flags, for the use of drivers. + */ +#define CCI_REG_PRIVATE_SHIFT 28U +#define CCI_REG_PRIVATE_MASK GENMASK(31U, CCI_REG_PRIVATE_SHIFT) + +#define CCI_REG_WIDTH_BYTES(x) FIELD_GET(CCI_REG_WIDTH_MASK, x) +#define CCI_REG_WIDTH(x) (CCI_REG_WIDTH_BYTES(x) << 3) +#define CCI_REG_ADDR(x) FIELD_GET(CCI_REG_ADDR_MASK, x) +#define CCI_REG_LE BIT(20) + +#define CCI_REG8(x) ((1 << CCI_REG_WIDTH_SHIFT) | (x)) +#define CCI_REG16(x) ((2 << CCI_REG_WIDTH_SHIFT) | (x)) +#define CCI_REG24(x) ((3 << CCI_REG_WIDTH_SHIFT) | (x)) +#define CCI_REG32(x) ((4 << CCI_REG_WIDTH_SHIFT) | (x)) +#define CCI_REG64(x) ((8 << CCI_REG_WIDTH_SHIFT) | (x)) +#define CCI_REG16_LE(x) (CCI_REG_LE | (2U << CCI_REG_WIDTH_SHIFT) | (x)) +#define CCI_REG24_LE(x) (CCI_REG_LE | (3U << CCI_REG_WIDTH_SHIFT) | (x)) +#define CCI_REG32_LE(x) (CCI_REG_LE | (4U << CCI_REG_WIDTH_SHIFT) | (x)) +#define CCI_REG64_LE(x) (CCI_REG_LE | (8U << CCI_REG_WIDTH_SHIFT) | (x)) + +/** + * cci_read() - Read a value from a single CCI register + * + * @map: Register map to read from + * @reg: Register address to read, use CCI_REG#() macros to encode reg width + * @val: Pointer to store read value + * @err: Optional pointer to store errors, if a previous error is set + * then the read will be skipped + * + * Return: %0 on success or a negative error code on failure. + */ +int cci_read(struct regmap *map, u32 reg, u64 *val, int *err); + +/** + * cci_write() - Write a value to a single CCI register + * + * @map: Register map to write to + * @reg: Register address to write, use CCI_REG#() macros to encode reg width + * @val: Value to be written + * @err: Optional pointer to store errors, if a previous error is set + * then the write will be skipped + * + * Return: %0 on success or a negative error code on failure. + */ +int cci_write(struct regmap *map, u32 reg, u64 val, int *err); + +/** + * cci_update_bits() - Perform a read/modify/write cycle on + * a single CCI register + * + * @map: Register map to update + * @reg: Register address to update, use CCI_REG#() macros to encode reg width + * @mask: Bitmask to change + * @val: New value for bitmask + * @err: Optional pointer to store errors, if a previous error is set + * then the update will be skipped + * + * Note this uses read-modify-write to update the bits, atomicity with regards + * to other cci_*() register access functions is NOT guaranteed. + * + * Return: %0 on success or a negative error code on failure. + */ +int cci_update_bits(struct regmap *map, u32 reg, u64 mask, u64 val, int *err); + +/** + * cci_multi_reg_write() - Write multiple registers to the device + * + * @map: Register map to write to + * @regs: Array of structures containing register-address, -value pairs to be + * written, register-addresses use CCI_REG#() macros to encode reg width + * @num_regs: Number of registers to write + * @err: Optional pointer to store errors, if a previous error is set + * then the write will be skipped + * + * Write multiple registers to the device where the set of register, value + * pairs are supplied in any order, possibly not all in a single range. + * + * Use of the CCI_REG#() macros to encode reg width is mandatory. + * + * For raw lists of register-address, -value pairs with only 8 bit + * wide writes regmap_multi_reg_write() can be used instead. + * + * Return: %0 on success or a negative error code on failure. + */ +int cci_multi_reg_write(struct regmap *map, const struct cci_reg_sequence *regs, + unsigned int num_regs, int *err); + +#if IS_ENABLED(CONFIG_V4L2_CCI_I2C) +/** + * devm_cci_regmap_init_i2c() - Create regmap to use with cci_*() register + * access functions + * + * @client: i2c_client to create the regmap for + * @reg_addr_bits: register address width to use (8 or 16) + * + * Note the memory for the created regmap is devm() managed, tied to the client. + * + * Return: %0 on success or a negative error code on failure. + */ +struct regmap *devm_cci_regmap_init_i2c(struct i2c_client *client, + int reg_addr_bits); +#endif + +#endif diff --git a/6.12.0/include-overrides/media/v4l2-common.h b/6.12.0/include-overrides/media/v4l2-common.h new file mode 100644 index 0000000..63ad36f --- /dev/null +++ b/6.12.0/include-overrides/media/v4l2-common.h @@ -0,0 +1,625 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + v4l2 common internal API header + + This header contains internal shared ioctl definitions for use by the + internal low-level v4l2 drivers. + Each ioctl begins with VIDIOC_INT_ to clearly mark that it is an internal + define, + + Copyright (C) 2005 Hans Verkuil + + */ + +#ifndef V4L2_COMMON_H_ +#define V4L2_COMMON_H_ + +#include +#include + +/* Common printk constructs for v4l-i2c drivers. These macros create a unique + prefix consisting of the driver name, the adapter number and the i2c + address. */ +#define v4l_printk(level, name, adapter, addr, fmt, arg...) \ + printk(level "%s %d-%04x: " fmt, name, i2c_adapter_id(adapter), addr , ## arg) + +#define v4l_client_printk(level, client, fmt, arg...) \ + v4l_printk(level, (client)->dev.driver->name, (client)->adapter, \ + (client)->addr, fmt , ## arg) + +#define v4l_err(client, fmt, arg...) \ + v4l_client_printk(KERN_ERR, client, fmt , ## arg) + +#define v4l_warn(client, fmt, arg...) \ + v4l_client_printk(KERN_WARNING, client, fmt , ## arg) + +#define v4l_info(client, fmt, arg...) \ + v4l_client_printk(KERN_INFO, client, fmt , ## arg) + +/* These three macros assume that the debug level is set with a module + parameter called 'debug'. */ +#define v4l_dbg(level, debug, client, fmt, arg...) \ + do { \ + if (debug >= (level)) \ + v4l_client_printk(KERN_DEBUG, client, fmt , ## arg); \ + } while (0) + +/* Add a version of v4l_dbg to be used on drivers using dev_foo() macros */ +#define dev_dbg_lvl(__dev, __level, __debug, __fmt, __arg...) \ + do { \ + if (__debug >= (__level)) \ + dev_printk(KERN_DEBUG, __dev, __fmt, ##__arg); \ + } while (0) + +/* ------------------------------------------------------------------------- */ + +/* These printk constructs can be used with v4l2_device and v4l2_subdev */ +#define v4l2_printk(level, dev, fmt, arg...) \ + printk(level "%s: " fmt, (dev)->name , ## arg) + +#define v4l2_err(dev, fmt, arg...) \ + v4l2_printk(KERN_ERR, dev, fmt , ## arg) + +#define v4l2_warn(dev, fmt, arg...) \ + v4l2_printk(KERN_WARNING, dev, fmt , ## arg) + +#define v4l2_info(dev, fmt, arg...) \ + v4l2_printk(KERN_INFO, dev, fmt , ## arg) + +/* These three macros assume that the debug level is set with a module + parameter called 'debug'. */ +#define v4l2_dbg(level, debug, dev, fmt, arg...) \ + do { \ + if (debug >= (level)) \ + v4l2_printk(KERN_DEBUG, dev, fmt , ## arg); \ + } while (0) + +/** + * v4l2_ctrl_query_fill- Fill in a struct v4l2_queryctrl + * + * @qctrl: pointer to the &struct v4l2_queryctrl to be filled + * @min: minimum value for the control + * @max: maximum value for the control + * @step: control step + * @def: default value for the control + * + * Fills the &struct v4l2_queryctrl fields for the query control. + * + * .. note:: + * + * This function assumes that the @qctrl->id field is filled. + * + * Returns -EINVAL if the control is not known by the V4L2 core, 0 on success. + */ + +int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, + s32 min, s32 max, s32 step, s32 def); + +/* ------------------------------------------------------------------------- */ + +struct v4l2_device; +struct v4l2_subdev; +struct v4l2_subdev_ops; + +/* I2C Helper functions */ +#include + +/** + * enum v4l2_i2c_tuner_type - specifies the range of tuner address that + * should be used when seeking for I2C devices. + * + * @ADDRS_RADIO: Radio tuner addresses. + * Represent the following I2C addresses: + * 0x10 (if compiled with tea5761 support) + * and 0x60. + * @ADDRS_DEMOD: Demod tuner addresses. + * Represent the following I2C addresses: + * 0x42, 0x43, 0x4a and 0x4b. + * @ADDRS_TV: TV tuner addresses. + * Represent the following I2C addresses: + * 0x42, 0x43, 0x4a, 0x4b, 0x60, 0x61, 0x62, + * 0x63 and 0x64. + * @ADDRS_TV_WITH_DEMOD: TV tuner addresses if demod is present, this + * excludes addresses used by the demodulator + * from the list of candidates. + * Represent the following I2C addresses: + * 0x60, 0x61, 0x62, 0x63 and 0x64. + * + * NOTE: All I2C addresses above use the 7-bit notation. + */ +enum v4l2_i2c_tuner_type { + ADDRS_RADIO, + ADDRS_DEMOD, + ADDRS_TV, + ADDRS_TV_WITH_DEMOD, +}; + +#if defined(CONFIG_VIDEO_V4L2_I2C) + +/** + * v4l2_i2c_new_subdev - Load an i2c module and return an initialized + * &struct v4l2_subdev. + * + * @v4l2_dev: pointer to &struct v4l2_device + * @adapter: pointer to struct i2c_adapter + * @client_type: name of the chip that's on the adapter. + * @addr: I2C address. If zero, it will use @probe_addrs + * @probe_addrs: array with a list of address. The last entry at such + * array should be %I2C_CLIENT_END. + * + * returns a &struct v4l2_subdev pointer. + */ +struct v4l2_subdev *v4l2_i2c_new_subdev(struct v4l2_device *v4l2_dev, + struct i2c_adapter *adapter, const char *client_type, + u8 addr, const unsigned short *probe_addrs); + +/** + * v4l2_i2c_new_subdev_board - Load an i2c module and return an initialized + * &struct v4l2_subdev. + * + * @v4l2_dev: pointer to &struct v4l2_device + * @adapter: pointer to struct i2c_adapter + * @info: pointer to struct i2c_board_info used to replace the irq, + * platform_data and addr arguments. + * @probe_addrs: array with a list of address. The last entry at such + * array should be %I2C_CLIENT_END. + * + * returns a &struct v4l2_subdev pointer. + */ +struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev, + struct i2c_adapter *adapter, struct i2c_board_info *info, + const unsigned short *probe_addrs); + +/** + * v4l2_i2c_subdev_set_name - Set name for an I²C sub-device + * + * @sd: pointer to &struct v4l2_subdev + * @client: pointer to struct i2c_client + * @devname: the name of the device; if NULL, the I²C device drivers's name + * will be used + * @postfix: sub-device specific string to put right after the I²C device name; + * may be NULL + */ +void v4l2_i2c_subdev_set_name(struct v4l2_subdev *sd, struct i2c_client *client, + const char *devname, const char *postfix); + +/** + * v4l2_i2c_subdev_init - Initializes a &struct v4l2_subdev with data from + * an i2c_client struct. + * + * @sd: pointer to &struct v4l2_subdev + * @client: pointer to struct i2c_client + * @ops: pointer to &struct v4l2_subdev_ops + */ +void v4l2_i2c_subdev_init(struct v4l2_subdev *sd, struct i2c_client *client, + const struct v4l2_subdev_ops *ops); + +/** + * v4l2_i2c_subdev_addr - returns i2c client address of &struct v4l2_subdev. + * + * @sd: pointer to &struct v4l2_subdev + * + * Returns the address of an I2C sub-device + */ +unsigned short v4l2_i2c_subdev_addr(struct v4l2_subdev *sd); + +/** + * v4l2_i2c_tuner_addrs - Return a list of I2C tuner addresses to probe. + * + * @type: type of the tuner to seek, as defined by + * &enum v4l2_i2c_tuner_type. + * + * NOTE: Use only if the tuner addresses are unknown. + */ +const unsigned short *v4l2_i2c_tuner_addrs(enum v4l2_i2c_tuner_type type); + +/** + * v4l2_i2c_subdev_unregister - Unregister a v4l2_subdev + * + * @sd: pointer to &struct v4l2_subdev + */ +void v4l2_i2c_subdev_unregister(struct v4l2_subdev *sd); + +#else + +static inline struct v4l2_subdev * +v4l2_i2c_new_subdev(struct v4l2_device *v4l2_dev, + struct i2c_adapter *adapter, const char *client_type, + u8 addr, const unsigned short *probe_addrs) +{ + return NULL; +} + +static inline struct v4l2_subdev * +v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev, + struct i2c_adapter *adapter, struct i2c_board_info *info, + const unsigned short *probe_addrs) +{ + return NULL; +} + +static inline void +v4l2_i2c_subdev_set_name(struct v4l2_subdev *sd, struct i2c_client *client, + const char *devname, const char *postfix) +{} + +static inline void +v4l2_i2c_subdev_init(struct v4l2_subdev *sd, struct i2c_client *client, + const struct v4l2_subdev_ops *ops) +{} + +static inline unsigned short v4l2_i2c_subdev_addr(struct v4l2_subdev *sd) +{ + return I2C_CLIENT_END; +} + +static inline const unsigned short * +v4l2_i2c_tuner_addrs(enum v4l2_i2c_tuner_type type) +{ + return NULL; +} + +static inline void v4l2_i2c_subdev_unregister(struct v4l2_subdev *sd) +{} + +#endif + +/* ------------------------------------------------------------------------- */ + +/* SPI Helper functions */ + +#include + +#if defined(CONFIG_SPI) + +/** + * v4l2_spi_new_subdev - Load an spi module and return an initialized + * &struct v4l2_subdev. + * + * + * @v4l2_dev: pointer to &struct v4l2_device. + * @ctlr: pointer to struct spi_controller. + * @info: pointer to struct spi_board_info. + * + * returns a &struct v4l2_subdev pointer. + */ +struct v4l2_subdev *v4l2_spi_new_subdev(struct v4l2_device *v4l2_dev, + struct spi_controller *ctlr, struct spi_board_info *info); + +/** + * v4l2_spi_subdev_init - Initialize a v4l2_subdev with data from an + * spi_device struct. + * + * @sd: pointer to &struct v4l2_subdev + * @spi: pointer to struct spi_device. + * @ops: pointer to &struct v4l2_subdev_ops + */ +void v4l2_spi_subdev_init(struct v4l2_subdev *sd, struct spi_device *spi, + const struct v4l2_subdev_ops *ops); + +/** + * v4l2_spi_subdev_unregister - Unregister a v4l2_subdev + * + * @sd: pointer to &struct v4l2_subdev + */ +void v4l2_spi_subdev_unregister(struct v4l2_subdev *sd); + +#else + +static inline struct v4l2_subdev * +v4l2_spi_new_subdev(struct v4l2_device *v4l2_dev, + struct spi_controller *ctlr, struct spi_board_info *info) +{ + return NULL; +} + +static inline void +v4l2_spi_subdev_init(struct v4l2_subdev *sd, struct spi_device *spi, + const struct v4l2_subdev_ops *ops) +{} + +static inline void v4l2_spi_subdev_unregister(struct v4l2_subdev *sd) +{} +#endif + +/* ------------------------------------------------------------------------- */ + +/* + * FIXME: these remaining ioctls/structs should be removed as well, but they + * are still used in tuner-simple.c (TUNER_SET_CONFIG) and cx18/ivtv (RESET). + * To remove these ioctls some more cleanup is needed in those modules. + * + * It doesn't make much sense on documenting them, as what we really want is + * to get rid of them. + */ + +/* s_config */ +struct v4l2_priv_tun_config { + int tuner; + void *priv; +}; +#define TUNER_SET_CONFIG _IOW('d', 92, struct v4l2_priv_tun_config) + +#define VIDIOC_INT_RESET _IOW ('d', 102, u32) + +/* ------------------------------------------------------------------------- */ + +/* Miscellaneous helper functions */ + +/** + * v4l_bound_align_image - adjust video dimensions according to + * a given constraints. + * + * @width: pointer to width that will be adjusted if needed. + * @wmin: minimum width. + * @wmax: maximum width. + * @walign: least significant bit on width. + * @height: pointer to height that will be adjusted if needed. + * @hmin: minimum height. + * @hmax: maximum height. + * @halign: least significant bit on height. + * @salign: least significant bit for the image size (e. g. + * :math:`width * height`). + * + * Clip an image to have @width between @wmin and @wmax, and @height between + * @hmin and @hmax, inclusive. + * + * Additionally, the @width will be a multiple of :math:`2^{walign}`, + * the @height will be a multiple of :math:`2^{halign}`, and the overall + * size :math:`width * height` will be a multiple of :math:`2^{salign}`. + * + * .. note:: + * + * #. The clipping rectangle may be shrunk or enlarged to fit the alignment + * constraints. + * #. @wmax must not be smaller than @wmin. + * #. @hmax must not be smaller than @hmin. + * #. The alignments must not be so high there are no possible image + * sizes within the allowed bounds. + * #. @wmin and @hmin must be at least 1 (don't use 0). + * #. For @walign, @halign and @salign, if you don't care about a certain + * alignment, specify ``0``, as :math:`2^0 = 1` and one byte alignment + * is equivalent to no alignment. + * #. If you only want to adjust downward, specify a maximum that's the + * same as the initial value. + */ +void v4l_bound_align_image(unsigned int *width, unsigned int wmin, + unsigned int wmax, unsigned int walign, + unsigned int *height, unsigned int hmin, + unsigned int hmax, unsigned int halign, + unsigned int salign); + +/** + * v4l2_find_nearest_size - Find the nearest size among a discrete + * set of resolutions contained in an array of a driver specific struct. + * + * @array: a driver specific array of image sizes + * @array_size: the length of the driver specific array of image sizes + * @width_field: the name of the width field in the driver specific struct + * @height_field: the name of the height field in the driver specific struct + * @width: desired width. + * @height: desired height. + * + * Finds the closest resolution to minimize the width and height differences + * between what requested and the supported resolutions. The size of the width + * and height fields in the driver specific must equal to that of u32, i.e. four + * bytes. + * + * Returns the best match or NULL if the length of the array is zero. + */ +#define v4l2_find_nearest_size(array, array_size, width_field, height_field, \ + width, height) \ + ({ \ + BUILD_BUG_ON(sizeof((array)->width_field) != sizeof(u32) || \ + sizeof((array)->height_field) != sizeof(u32)); \ + (typeof(&(array)[0]))__v4l2_find_nearest_size( \ + (array), array_size, sizeof(*(array)), \ + offsetof(typeof(*(array)), width_field), \ + offsetof(typeof(*(array)), height_field), \ + width, height); \ + }) +const void * +__v4l2_find_nearest_size(const void *array, size_t array_size, + size_t entry_size, size_t width_offset, + size_t height_offset, s32 width, s32 height); + +/** + * v4l2_g_parm_cap - helper routine for vidioc_g_parm to fill this in by + * calling the get_frame_interval op of the given subdev. It only works + * for V4L2_BUF_TYPE_VIDEO_CAPTURE(_MPLANE), hence the _cap in the + * function name. + * + * @vdev: the struct video_device pointer. Used to determine the device caps. + * @sd: the sub-device pointer. + * @a: the VIDIOC_G_PARM argument. + */ +int v4l2_g_parm_cap(struct video_device *vdev, + struct v4l2_subdev *sd, struct v4l2_streamparm *a); + +/** + * v4l2_s_parm_cap - helper routine for vidioc_s_parm to fill this in by + * calling the set_frame_interval op of the given subdev. It only works + * for V4L2_BUF_TYPE_VIDEO_CAPTURE(_MPLANE), hence the _cap in the + * function name. + * + * @vdev: the struct video_device pointer. Used to determine the device caps. + * @sd: the sub-device pointer. + * @a: the VIDIOC_S_PARM argument. + */ +int v4l2_s_parm_cap(struct video_device *vdev, + struct v4l2_subdev *sd, struct v4l2_streamparm *a); + +/* Compare two v4l2_fract structs */ +#define V4L2_FRACT_COMPARE(a, OP, b) \ + ((u64)(a).numerator * (b).denominator OP \ + (u64)(b).numerator * (a).denominator) + +/* ------------------------------------------------------------------------- */ + +/* Pixel format and FourCC helpers */ + +/** + * enum v4l2_pixel_encoding - specifies the pixel encoding value + * + * @V4L2_PIXEL_ENC_UNKNOWN: Pixel encoding is unknown/un-initialized + * @V4L2_PIXEL_ENC_YUV: Pixel encoding is YUV + * @V4L2_PIXEL_ENC_RGB: Pixel encoding is RGB + * @V4L2_PIXEL_ENC_BAYER: Pixel encoding is Bayer + */ +enum v4l2_pixel_encoding { + V4L2_PIXEL_ENC_UNKNOWN = 0, + V4L2_PIXEL_ENC_YUV = 1, + V4L2_PIXEL_ENC_RGB = 2, + V4L2_PIXEL_ENC_BAYER = 3, +}; + +/** + * struct v4l2_format_info - information about a V4L2 format + * @format: 4CC format identifier (V4L2_PIX_FMT_*) + * @pixel_enc: Pixel encoding (see enum v4l2_pixel_encoding above) + * @mem_planes: Number of memory planes, which includes the alpha plane (1 to 4). + * @comp_planes: Number of component planes, which includes the alpha plane (1 to 4). + * @bpp: Array of per-plane bytes per pixel + * @bpp_div: Array of per-plane bytes per pixel divisors to support fractional pixel sizes. + * @hdiv: Horizontal chroma subsampling factor + * @vdiv: Vertical chroma subsampling factor + * @block_w: Per-plane macroblock pixel width (optional) + * @block_h: Per-plane macroblock pixel height (optional) + */ +struct v4l2_format_info { + u32 format; + u8 pixel_enc; + u8 mem_planes; + u8 comp_planes; + u8 bpp[4]; + u8 bpp_div[4]; + u8 hdiv; + u8 vdiv; + u8 block_w[4]; + u8 block_h[4]; +}; + +static inline bool v4l2_is_format_rgb(const struct v4l2_format_info *f) +{ + return f && f->pixel_enc == V4L2_PIXEL_ENC_RGB; +} + +static inline bool v4l2_is_format_yuv(const struct v4l2_format_info *f) +{ + return f && f->pixel_enc == V4L2_PIXEL_ENC_YUV; +} + +static inline bool v4l2_is_format_bayer(const struct v4l2_format_info *f) +{ + return f && f->pixel_enc == V4L2_PIXEL_ENC_BAYER; +} + +const struct v4l2_format_info *v4l2_format_info(u32 format); +void v4l2_apply_frmsize_constraints(u32 *width, u32 *height, + const struct v4l2_frmsize_stepwise *frmsize); +int v4l2_fill_pixfmt(struct v4l2_pix_format *pixfmt, u32 pixelformat, + u32 width, u32 height); +int v4l2_fill_pixfmt_mp(struct v4l2_pix_format_mplane *pixfmt, u32 pixelformat, + u32 width, u32 height); + +/** + * v4l2_get_link_freq - Get link rate from transmitter + * + * @handler: The transmitter's control handler + * @mul: The multiplier between pixel rate and link frequency. Bits per pixel on + * D-PHY, samples per clock on parallel. 0 otherwise. + * @div: The divisor between pixel rate and link frequency. Number of data lanes + * times two on D-PHY, 1 on parallel. 0 otherwise. + * + * This function is intended for obtaining the link frequency from the + * transmitter sub-devices. It returns the link rate, either from the + * V4L2_CID_LINK_FREQ control implemented by the transmitter, or value + * calculated based on the V4L2_CID_PIXEL_RATE implemented by the transmitter. + * + * Return: + * * >0: Link frequency + * * %-ENOENT: Link frequency or pixel rate control not found + * * %-EINVAL: Invalid link frequency value + */ +s64 v4l2_get_link_freq(struct v4l2_ctrl_handler *handler, unsigned int mul, + unsigned int div); + +void v4l2_simplify_fraction(u32 *numerator, u32 *denominator, + unsigned int n_terms, unsigned int threshold); +u32 v4l2_fraction_to_interval(u32 numerator, u32 denominator); + +/** + * v4l2_link_freq_to_bitmap - Figure out platform-supported link frequencies + * @dev: The struct device + * @fw_link_freqs: Array of link frequencies from firmware + * @num_of_fw_link_freqs: Number of entries in @fw_link_freqs + * @driver_link_freqs: Array of link frequencies supported by the driver + * @num_of_driver_link_freqs: Number of entries in @driver_link_freqs + * @bitmap: Bitmap of driver-supported link frequencies found in @fw_link_freqs + * + * This function checks which driver-supported link frequencies are enabled in + * system firmware and sets the corresponding bits in @bitmap (after first + * zeroing it). + * + * Return: + * * %0: Success + * * %-ENOENT: No match found between driver-supported link frequencies and + * those available in firmware. + * * %-ENODATA: No link frequencies were specified in firmware. + */ +int v4l2_link_freq_to_bitmap(struct device *dev, const u64 *fw_link_freqs, + unsigned int num_of_fw_link_freqs, + const s64 *driver_link_freqs, + unsigned int num_of_driver_link_freqs, + unsigned long *bitmap); + +static inline u64 v4l2_buffer_get_timestamp(const struct v4l2_buffer *buf) +{ + /* + * When the timestamp comes from 32-bit user space, there may be + * uninitialized data in tv_usec, so cast it to u32. + * Otherwise allow invalid input for backwards compatibility. + */ + return buf->timestamp.tv_sec * NSEC_PER_SEC + + (u32)buf->timestamp.tv_usec * NSEC_PER_USEC; +} + +static inline void v4l2_buffer_set_timestamp(struct v4l2_buffer *buf, + u64 timestamp) +{ + struct timespec64 ts = ns_to_timespec64(timestamp); + + buf->timestamp.tv_sec = ts.tv_sec; + buf->timestamp.tv_usec = ts.tv_nsec / NSEC_PER_USEC; +} + +static inline bool v4l2_is_colorspace_valid(__u32 colorspace) +{ + return colorspace > V4L2_COLORSPACE_DEFAULT && + colorspace < V4L2_COLORSPACE_LAST; +} + +static inline bool v4l2_is_xfer_func_valid(__u32 xfer_func) +{ + return xfer_func > V4L2_XFER_FUNC_DEFAULT && + xfer_func < V4L2_XFER_FUNC_LAST; +} + +static inline bool v4l2_is_ycbcr_enc_valid(__u8 ycbcr_enc) +{ + return ycbcr_enc > V4L2_YCBCR_ENC_DEFAULT && + ycbcr_enc < V4L2_YCBCR_ENC_LAST; +} + +static inline bool v4l2_is_hsv_enc_valid(__u8 hsv_enc) +{ + return hsv_enc == V4L2_HSV_ENC_180 || hsv_enc == V4L2_HSV_ENC_256; +} + +static inline bool v4l2_is_quant_valid(__u8 quantization) +{ + return quantization == V4L2_QUANTIZATION_FULL_RANGE || + quantization == V4L2_QUANTIZATION_LIM_RANGE; +} + +#endif /* V4L2_COMMON_H_ */ diff --git a/6.12.0/include-overrides/media/v4l2-ctrls.h b/6.12.0/include-overrides/media/v4l2-ctrls.h new file mode 100644 index 0000000..59679a4 --- /dev/null +++ b/6.12.0/include-overrides/media/v4l2-ctrls.h @@ -0,0 +1,1591 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * V4L2 controls support header. + * + * Copyright (C) 2010 Hans Verkuil + */ + +#ifndef _V4L2_CTRLS_H +#define _V4L2_CTRLS_H + +#include +#include +#include +#include + +/* forward references */ +struct file; +struct poll_table_struct; +struct v4l2_ctrl; +struct v4l2_ctrl_handler; +struct v4l2_ctrl_helper; +struct v4l2_fh; +struct v4l2_fwnode_device_properties; +struct v4l2_subdev; +struct v4l2_subscribed_event; +struct video_device; + +/** + * union v4l2_ctrl_ptr - A pointer to a control value. + * @p_s32: Pointer to a 32-bit signed value. + * @p_s64: Pointer to a 64-bit signed value. + * @p_u8: Pointer to a 8-bit unsigned value. + * @p_u16: Pointer to a 16-bit unsigned value. + * @p_u32: Pointer to a 32-bit unsigned value. + * @p_char: Pointer to a string. + * @p_mpeg2_sequence: Pointer to a MPEG2 sequence structure. + * @p_mpeg2_picture: Pointer to a MPEG2 picture structure. + * @p_mpeg2_quantisation: Pointer to a MPEG2 quantisation data structure. + * @p_fwht_params: Pointer to a FWHT stateless parameters structure. + * @p_h264_sps: Pointer to a struct v4l2_ctrl_h264_sps. + * @p_h264_pps: Pointer to a struct v4l2_ctrl_h264_pps. + * @p_h264_scaling_matrix: Pointer to a struct v4l2_ctrl_h264_scaling_matrix. + * @p_h264_slice_params: Pointer to a struct v4l2_ctrl_h264_slice_params. + * @p_h264_decode_params: Pointer to a struct v4l2_ctrl_h264_decode_params. + * @p_h264_pred_weights: Pointer to a struct v4l2_ctrl_h264_pred_weights. + * @p_vp8_frame: Pointer to a VP8 frame params structure. + * @p_vp9_compressed_hdr_probs: Pointer to a VP9 frame compressed header probs structure. + * @p_vp9_frame: Pointer to a VP9 frame params structure. + * @p_hevc_sps: Pointer to an HEVC sequence parameter set structure. + * @p_hevc_pps: Pointer to an HEVC picture parameter set structure. + * @p_hevc_slice_params: Pointer to an HEVC slice parameters structure. + * @p_hdr10_cll: Pointer to an HDR10 Content Light Level structure. + * @p_hdr10_mastering: Pointer to an HDR10 Mastering Display structure. + * @p_area: Pointer to an area. + * @p_av1_sequence: Pointer to an AV1 sequence structure. + * @p_av1_tile_group_entry: Pointer to an AV1 tile group entry structure. + * @p_av1_frame: Pointer to an AV1 frame structure. + * @p_av1_film_grain: Pointer to an AV1 film grain structure. + * @p: Pointer to a compound value. + * @p_const: Pointer to a constant compound value. + */ +union v4l2_ctrl_ptr { + s32 *p_s32; + s64 *p_s64; + u8 *p_u8; + u16 *p_u16; + u32 *p_u32; + char *p_char; + struct v4l2_ctrl_mpeg2_sequence *p_mpeg2_sequence; + struct v4l2_ctrl_mpeg2_picture *p_mpeg2_picture; + struct v4l2_ctrl_mpeg2_quantisation *p_mpeg2_quantisation; + struct v4l2_ctrl_fwht_params *p_fwht_params; + struct v4l2_ctrl_h264_sps *p_h264_sps; + struct v4l2_ctrl_h264_pps *p_h264_pps; + struct v4l2_ctrl_h264_scaling_matrix *p_h264_scaling_matrix; + struct v4l2_ctrl_h264_slice_params *p_h264_slice_params; + struct v4l2_ctrl_h264_decode_params *p_h264_decode_params; + struct v4l2_ctrl_h264_pred_weights *p_h264_pred_weights; + struct v4l2_ctrl_vp8_frame *p_vp8_frame; + struct v4l2_ctrl_hevc_sps *p_hevc_sps; + struct v4l2_ctrl_hevc_pps *p_hevc_pps; + struct v4l2_ctrl_hevc_slice_params *p_hevc_slice_params; + struct v4l2_ctrl_vp9_compressed_hdr *p_vp9_compressed_hdr_probs; + struct v4l2_ctrl_vp9_frame *p_vp9_frame; + struct v4l2_ctrl_hdr10_cll_info *p_hdr10_cll; + struct v4l2_ctrl_hdr10_mastering_display *p_hdr10_mastering; + struct v4l2_area *p_area; + struct v4l2_ctrl_av1_sequence *p_av1_sequence; + struct v4l2_ctrl_av1_tile_group_entry *p_av1_tile_group_entry; + struct v4l2_ctrl_av1_frame *p_av1_frame; + struct v4l2_ctrl_av1_film_grain *p_av1_film_grain; + void *p; + const void *p_const; +}; + +/** + * v4l2_ctrl_ptr_create() - Helper function to return a v4l2_ctrl_ptr from a + * void pointer + * @ptr: The void pointer + */ +static inline union v4l2_ctrl_ptr v4l2_ctrl_ptr_create(void *ptr) +{ + union v4l2_ctrl_ptr p = { .p = ptr }; + + return p; +} + +/** + * struct v4l2_ctrl_ops - The control operations that the driver has to provide. + * + * @g_volatile_ctrl: Get a new value for this control. Generally only relevant + * for volatile (and usually read-only) controls such as a control + * that returns the current signal strength which changes + * continuously. + * If not set, then the currently cached value will be returned. + * @try_ctrl: Test whether the control's value is valid. Only relevant when + * the usual min/max/step checks are not sufficient. + * @s_ctrl: Actually set the new control value. s_ctrl is compulsory. The + * ctrl->handler->lock is held when these ops are called, so no + * one else can access controls owned by that handler. + */ +struct v4l2_ctrl_ops { + int (*g_volatile_ctrl)(struct v4l2_ctrl *ctrl); + int (*try_ctrl)(struct v4l2_ctrl *ctrl); + int (*s_ctrl)(struct v4l2_ctrl *ctrl); +}; + +/** + * struct v4l2_ctrl_type_ops - The control type operations that the driver + * has to provide. + * + * @equal: return true if all ctrl->elems array elements are equal. + * @init: initialize the value for array elements from from_idx to ctrl->elems. + * @log: log the value. + * @validate: validate the value for ctrl->new_elems array elements. + * Return 0 on success and a negative value otherwise. + */ +struct v4l2_ctrl_type_ops { + bool (*equal)(const struct v4l2_ctrl *ctrl, + union v4l2_ctrl_ptr ptr1, union v4l2_ctrl_ptr ptr2); + void (*init)(const struct v4l2_ctrl *ctrl, u32 from_idx, + union v4l2_ctrl_ptr ptr); + void (*log)(const struct v4l2_ctrl *ctrl); + int (*validate)(const struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr ptr); +}; + +/** + * typedef v4l2_ctrl_notify_fnc - typedef for a notify argument with a function + * that should be called when a control value has changed. + * + * @ctrl: pointer to struct &v4l2_ctrl + * @priv: control private data + * + * This typedef definition is used as an argument to v4l2_ctrl_notify() + * and as an argument at struct &v4l2_ctrl_handler. + */ +typedef void (*v4l2_ctrl_notify_fnc)(struct v4l2_ctrl *ctrl, void *priv); + +/** + * struct v4l2_ctrl - The control structure. + * + * @node: The list node. + * @ev_subs: The list of control event subscriptions. + * @handler: The handler that owns the control. + * @cluster: Point to start of cluster array. + * @ncontrols: Number of controls in cluster array. + * @done: Internal flag: set for each processed control. + * @is_new: Set when the user specified a new value for this control. It + * is also set when called from v4l2_ctrl_handler_setup(). Drivers + * should never set this flag. + * @has_changed: Set when the current value differs from the new value. Drivers + * should never use this flag. + * @is_private: If set, then this control is private to its handler and it + * will not be added to any other handlers. Drivers can set + * this flag. + * @is_auto: If set, then this control selects whether the other cluster + * members are in 'automatic' mode or 'manual' mode. This is + * used for autogain/gain type clusters. Drivers should never + * set this flag directly. + * @is_int: If set, then this control has a simple integer value (i.e. it + * uses ctrl->val). + * @is_string: If set, then this control has type %V4L2_CTRL_TYPE_STRING. + * @is_ptr: If set, then this control is an array and/or has type >= + * %V4L2_CTRL_COMPOUND_TYPES + * and/or has type %V4L2_CTRL_TYPE_STRING. In other words, &struct + * v4l2_ext_control uses field p to point to the data. + * @is_array: If set, then this control contains an N-dimensional array. + * @is_dyn_array: If set, then this control contains a dynamically sized 1-dimensional array. + * If this is set, then @is_array is also set. + * @has_volatiles: If set, then one or more members of the cluster are volatile. + * Drivers should never touch this flag. + * @call_notify: If set, then call the handler's notify function whenever the + * control's value changes. + * @manual_mode_value: If the is_auto flag is set, then this is the value + * of the auto control that determines if that control is in + * manual mode. So if the value of the auto control equals this + * value, then the whole cluster is in manual mode. Drivers should + * never set this flag directly. + * @ops: The control ops. + * @type_ops: The control type ops. + * @id: The control ID. + * @name: The control name. + * @type: The control type. + * @minimum: The control's minimum value. + * @maximum: The control's maximum value. + * @default_value: The control's default value. + * @step: The control's step value for non-menu controls. + * @elems: The number of elements in the N-dimensional array. + * @elem_size: The size in bytes of the control. + * @new_elems: The number of elements in p_new. This is the same as @elems, + * except for dynamic arrays. In that case it is in the range of + * 1 to @p_array_alloc_elems. + * @dims: The size of each dimension. + * @nr_of_dims:The number of dimensions in @dims. + * @menu_skip_mask: The control's skip mask for menu controls. This makes it + * easy to skip menu items that are not valid. If bit X is set, + * then menu item X is skipped. Of course, this only works for + * menus with <= 32 menu items. There are no menus that come + * close to that number, so this is OK. Should we ever need more, + * then this will have to be extended to a u64 or a bit array. + * @qmenu: A const char * array for all menu items. Array entries that are + * empty strings ("") correspond to non-existing menu items (this + * is in addition to the menu_skip_mask above). The last entry + * must be NULL. + * Used only if the @type is %V4L2_CTRL_TYPE_MENU. + * @qmenu_int: A 64-bit integer array for with integer menu items. + * The size of array must be equal to the menu size, e. g.: + * :math:`ceil(\frac{maximum - minimum}{step}) + 1`. + * Used only if the @type is %V4L2_CTRL_TYPE_INTEGER_MENU. + * @flags: The control's flags. + * @priv: The control's private pointer. For use by the driver. It is + * untouched by the control framework. Note that this pointer is + * not freed when the control is deleted. Should this be needed + * then a new internal bitfield can be added to tell the framework + * to free this pointer. + * @p_array: Pointer to the allocated array. Only valid if @is_array is true. + * @p_array_alloc_elems: The number of elements in the allocated + * array for both the cur and new values. So @p_array is actually + * sized for 2 * @p_array_alloc_elems * @elem_size. Only valid if + * @is_array is true. + * @cur: Structure to store the current value. + * @cur.val: The control's current value, if the @type is represented via + * a u32 integer (see &enum v4l2_ctrl_type). + * @val: The control's new s32 value. + * @p_def: The control's default value represented via a union which + * provides a standard way of accessing control types + * through a pointer (for compound controls only). + * @p_cur: The control's current value represented via a union which + * provides a standard way of accessing control types + * through a pointer. + * @p_new: The control's new value represented via a union which provides + * a standard way of accessing control types + * through a pointer. + */ +struct v4l2_ctrl { + /* Administrative fields */ + struct list_head node; + struct list_head ev_subs; + struct v4l2_ctrl_handler *handler; + struct v4l2_ctrl **cluster; + unsigned int ncontrols; + + unsigned int done:1; + + unsigned int is_new:1; + unsigned int has_changed:1; + unsigned int is_private:1; + unsigned int is_auto:1; + unsigned int is_int:1; + unsigned int is_string:1; + unsigned int is_ptr:1; + unsigned int is_array:1; + unsigned int is_dyn_array:1; + unsigned int has_volatiles:1; + unsigned int call_notify:1; + unsigned int manual_mode_value:8; + + const struct v4l2_ctrl_ops *ops; + const struct v4l2_ctrl_type_ops *type_ops; + u32 id; + const char *name; + enum v4l2_ctrl_type type; + s64 minimum, maximum, default_value; + u32 elems; + u32 elem_size; + u32 new_elems; + u32 dims[V4L2_CTRL_MAX_DIMS]; + u32 nr_of_dims; + union { + u64 step; + u64 menu_skip_mask; + }; + union { + const char * const *qmenu; + const s64 *qmenu_int; + }; + unsigned long flags; + void *priv; + void *p_array; + u32 p_array_alloc_elems; + s32 val; + struct { + s32 val; + } cur; + + union v4l2_ctrl_ptr p_def; + union v4l2_ctrl_ptr p_new; + union v4l2_ctrl_ptr p_cur; +}; + +/** + * struct v4l2_ctrl_ref - The control reference. + * + * @node: List node for the sorted list. + * @next: Single-link list node for the hash. + * @ctrl: The actual control information. + * @helper: Pointer to helper struct. Used internally in + * ``prepare_ext_ctrls`` function at ``v4l2-ctrl.c``. + * @from_other_dev: If true, then @ctrl was defined in another + * device than the &struct v4l2_ctrl_handler. + * @req_done: Internal flag: if the control handler containing this control + * reference is bound to a media request, then this is set when + * the control has been applied. This prevents applying controls + * from a cluster with multiple controls twice (when the first + * control of a cluster is applied, they all are). + * @p_req_valid: If set, then p_req contains the control value for the request. + * @p_req_array_enomem: If set, then p_req is invalid since allocating space for + * an array failed. Attempting to read this value shall + * result in ENOMEM. Only valid if ctrl->is_array is true. + * @p_req_array_alloc_elems: The number of elements allocated for the + * array. Only valid if @p_req_valid and ctrl->is_array are + * true. + * @p_req_elems: The number of elements in @p_req. This is the same as + * ctrl->elems, except for dynamic arrays. In that case it is in + * the range of 1 to @p_req_array_alloc_elems. Only valid if + * @p_req_valid is true. + * @p_req: If the control handler containing this control reference + * is bound to a media request, then this points to the + * value of the control that must be applied when the request + * is executed, or to the value of the control at the time + * that the request was completed. If @p_req_valid is false, + * then this control was never set for this request and the + * control will not be updated when this request is applied. + * + * Each control handler has a list of these refs. The list_head is used to + * keep a sorted-by-control-ID list of all controls, while the next pointer + * is used to link the control in the hash's bucket. + */ +struct v4l2_ctrl_ref { + struct list_head node; + struct v4l2_ctrl_ref *next; + struct v4l2_ctrl *ctrl; + struct v4l2_ctrl_helper *helper; + bool from_other_dev; + bool req_done; + bool p_req_valid; + bool p_req_array_enomem; + u32 p_req_array_alloc_elems; + u32 p_req_elems; + union v4l2_ctrl_ptr p_req; +}; + +/** + * struct v4l2_ctrl_handler - The control handler keeps track of all the + * controls: both the controls owned by the handler and those inherited + * from other handlers. + * + * @_lock: Default for "lock". + * @lock: Lock to control access to this handler and its controls. + * May be replaced by the user right after init. + * @ctrls: The list of controls owned by this handler. + * @ctrl_refs: The list of control references. + * @cached: The last found control reference. It is common that the same + * control is needed multiple times, so this is a simple + * optimization. + * @buckets: Buckets for the hashing. Allows for quick control lookup. + * @notify: A notify callback that is called whenever the control changes + * value. + * Note that the handler's lock is held when the notify function + * is called! + * @notify_priv: Passed as argument to the v4l2_ctrl notify callback. + * @nr_of_buckets: Total number of buckets in the array. + * @error: The error code of the first failed control addition. + * @request_is_queued: True if the request was queued. + * @requests: List to keep track of open control handler request objects. + * For the parent control handler (@req_obj.ops == NULL) this + * is the list header. When the parent control handler is + * removed, it has to unbind and put all these requests since + * they refer to the parent. + * @requests_queued: List of the queued requests. This determines the order + * in which these controls are applied. Once the request is + * completed it is removed from this list. + * @req_obj: The &struct media_request_object, used to link into a + * &struct media_request. This request object has a refcount. + */ +struct v4l2_ctrl_handler { + struct mutex _lock; + struct mutex *lock; + struct list_head ctrls; + struct list_head ctrl_refs; + struct v4l2_ctrl_ref *cached; + struct v4l2_ctrl_ref **buckets; + v4l2_ctrl_notify_fnc notify; + void *notify_priv; + u16 nr_of_buckets; + int error; + bool request_is_queued; + struct list_head requests; + struct list_head requests_queued; + struct media_request_object req_obj; +}; + +/** + * struct v4l2_ctrl_config - Control configuration structure. + * + * @ops: The control ops. + * @type_ops: The control type ops. Only needed for compound controls. + * @id: The control ID. + * @name: The control name. + * @type: The control type. + * @min: The control's minimum value. + * @max: The control's maximum value. + * @step: The control's step value for non-menu controls. + * @def: The control's default value. + * @p_def: The control's default value for compound controls. + * @dims: The size of each dimension. + * @elem_size: The size in bytes of the control. + * @flags: The control's flags. + * @menu_skip_mask: The control's skip mask for menu controls. This makes it + * easy to skip menu items that are not valid. If bit X is set, + * then menu item X is skipped. Of course, this only works for + * menus with <= 64 menu items. There are no menus that come + * close to that number, so this is OK. Should we ever need more, + * then this will have to be extended to a bit array. + * @qmenu: A const char * array for all menu items. Array entries that are + * empty strings ("") correspond to non-existing menu items (this + * is in addition to the menu_skip_mask above). The last entry + * must be NULL. + * @qmenu_int: A const s64 integer array for all menu items of the type + * V4L2_CTRL_TYPE_INTEGER_MENU. + * @is_private: If set, then this control is private to its handler and it + * will not be added to any other handlers. + */ +struct v4l2_ctrl_config { + const struct v4l2_ctrl_ops *ops; + const struct v4l2_ctrl_type_ops *type_ops; + u32 id; + const char *name; + enum v4l2_ctrl_type type; + s64 min; + s64 max; + u64 step; + s64 def; + union v4l2_ctrl_ptr p_def; + u32 dims[V4L2_CTRL_MAX_DIMS]; + u32 elem_size; + u32 flags; + u64 menu_skip_mask; + const char * const *qmenu; + const s64 *qmenu_int; + unsigned int is_private:1; +}; + +/** + * v4l2_ctrl_fill - Fill in the control fields based on the control ID. + * + * @id: ID of the control + * @name: pointer to be filled with a string with the name of the control + * @type: pointer for storing the type of the control + * @min: pointer for storing the minimum value for the control + * @max: pointer for storing the maximum value for the control + * @step: pointer for storing the control step + * @def: pointer for storing the default value for the control + * @flags: pointer for storing the flags to be used on the control + * + * This works for all standard V4L2 controls. + * For non-standard controls it will only fill in the given arguments + * and @name content will be set to %NULL. + * + * This function will overwrite the contents of @name, @type and @flags. + * The contents of @min, @max, @step and @def may be modified depending on + * the type. + * + * .. note:: + * + * Do not use in drivers! It is used internally for backwards compatibility + * control handling only. Once all drivers are converted to use the new + * control framework this function will no longer be exported. + */ +void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type, + s64 *min, s64 *max, u64 *step, s64 *def, u32 *flags); + + +/** + * v4l2_ctrl_handler_init_class() - Initialize the control handler. + * @hdl: The control handler. + * @nr_of_controls_hint: A hint of how many controls this handler is + * expected to refer to. This is the total number, so including + * any inherited controls. It doesn't have to be precise, but if + * it is way off, then you either waste memory (too many buckets + * are allocated) or the control lookup becomes slower (not enough + * buckets are allocated, so there are more slow list lookups). + * It will always work, though. + * @key: Used by the lock validator if CONFIG_LOCKDEP is set. + * @name: Used by the lock validator if CONFIG_LOCKDEP is set. + * + * .. attention:: + * + * Never use this call directly, always use the v4l2_ctrl_handler_init() + * macro that hides the @key and @name arguments. + * + * Return: returns an error if the buckets could not be allocated. This + * error will also be stored in @hdl->error. + */ +int v4l2_ctrl_handler_init_class(struct v4l2_ctrl_handler *hdl, + unsigned int nr_of_controls_hint, + struct lock_class_key *key, const char *name); + +#ifdef CONFIG_LOCKDEP + +/** + * v4l2_ctrl_handler_init - helper function to create a static struct + * &lock_class_key and calls v4l2_ctrl_handler_init_class() + * + * @hdl: The control handler. + * @nr_of_controls_hint: A hint of how many controls this handler is + * expected to refer to. This is the total number, so including + * any inherited controls. It doesn't have to be precise, but if + * it is way off, then you either waste memory (too many buckets + * are allocated) or the control lookup becomes slower (not enough + * buckets are allocated, so there are more slow list lookups). + * It will always work, though. + * + * This helper function creates a static struct &lock_class_key and + * calls v4l2_ctrl_handler_init_class(), providing a proper name for the lock + * validador. + * + * Use this helper function to initialize a control handler. + */ +#define v4l2_ctrl_handler_init(hdl, nr_of_controls_hint) \ +( \ + ({ \ + static struct lock_class_key _key; \ + v4l2_ctrl_handler_init_class(hdl, nr_of_controls_hint, \ + &_key, \ + KBUILD_BASENAME ":" \ + __stringify(__LINE__) ":" \ + "(" #hdl ")->_lock"); \ + }) \ +) +#else +#define v4l2_ctrl_handler_init(hdl, nr_of_controls_hint) \ + v4l2_ctrl_handler_init_class(hdl, nr_of_controls_hint, NULL, NULL) +#endif + +/** + * v4l2_ctrl_handler_free() - Free all controls owned by the handler and free + * the control list. + * @hdl: The control handler. + * + * Does nothing if @hdl == NULL. + */ +void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl); + +/** + * v4l2_ctrl_lock() - Helper function to lock the handler + * associated with the control. + * @ctrl: The control to lock. + */ +static inline void v4l2_ctrl_lock(struct v4l2_ctrl *ctrl) +{ + mutex_lock(ctrl->handler->lock); +} + +/** + * v4l2_ctrl_unlock() - Helper function to unlock the handler + * associated with the control. + * @ctrl: The control to unlock. + */ +static inline void v4l2_ctrl_unlock(struct v4l2_ctrl *ctrl) +{ + mutex_unlock(ctrl->handler->lock); +} + +/** + * __v4l2_ctrl_handler_setup() - Call the s_ctrl op for all controls belonging + * to the handler to initialize the hardware to the current control values. The + * caller is responsible for acquiring the control handler mutex on behalf of + * __v4l2_ctrl_handler_setup(). + * @hdl: The control handler. + * + * Button controls will be skipped, as are read-only controls. + * + * If @hdl == NULL, then this just returns 0. + */ +int __v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl); + +/** + * v4l2_ctrl_handler_setup() - Call the s_ctrl op for all controls belonging + * to the handler to initialize the hardware to the current control values. + * @hdl: The control handler. + * + * Button controls will be skipped, as are read-only controls. + * + * If @hdl == NULL, then this just returns 0. + */ +int v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl); + +/** + * v4l2_ctrl_handler_log_status() - Log all controls owned by the handler. + * @hdl: The control handler. + * @prefix: The prefix to use when logging the control values. If the + * prefix does not end with a space, then ": " will be added + * after the prefix. If @prefix == NULL, then no prefix will be + * used. + * + * For use with VIDIOC_LOG_STATUS. + * + * Does nothing if @hdl == NULL. + */ +void v4l2_ctrl_handler_log_status(struct v4l2_ctrl_handler *hdl, + const char *prefix); + +/** + * v4l2_ctrl_new_custom() - Allocate and initialize a new custom V4L2 + * control. + * + * @hdl: The control handler. + * @cfg: The control's configuration data. + * @priv: The control's driver-specific private data. + * + * If the &v4l2_ctrl struct could not be allocated then NULL is returned + * and @hdl->error is set to the error code (if it wasn't set already). + */ +struct v4l2_ctrl *v4l2_ctrl_new_custom(struct v4l2_ctrl_handler *hdl, + const struct v4l2_ctrl_config *cfg, + void *priv); + +/** + * v4l2_ctrl_new_std() - Allocate and initialize a new standard V4L2 non-menu + * control. + * + * @hdl: The control handler. + * @ops: The control ops. + * @id: The control ID. + * @min: The control's minimum value. + * @max: The control's maximum value. + * @step: The control's step value + * @def: The control's default value. + * + * If the &v4l2_ctrl struct could not be allocated, or the control + * ID is not known, then NULL is returned and @hdl->error is set to the + * appropriate error code (if it wasn't set already). + * + * If @id refers to a menu control, then this function will return NULL. + * + * Use v4l2_ctrl_new_std_menu() when adding menu controls. + */ +struct v4l2_ctrl *v4l2_ctrl_new_std(struct v4l2_ctrl_handler *hdl, + const struct v4l2_ctrl_ops *ops, + u32 id, s64 min, s64 max, u64 step, + s64 def); + +/** + * v4l2_ctrl_new_std_menu() - Allocate and initialize a new standard V4L2 + * menu control. + * + * @hdl: The control handler. + * @ops: The control ops. + * @id: The control ID. + * @max: The control's maximum value. + * @mask: The control's skip mask for menu controls. This makes it + * easy to skip menu items that are not valid. If bit X is set, + * then menu item X is skipped. Of course, this only works for + * menus with <= 64 menu items. There are no menus that come + * close to that number, so this is OK. Should we ever need more, + * then this will have to be extended to a bit array. + * @def: The control's default value. + * + * Same as v4l2_ctrl_new_std(), but @min is set to 0 and the @mask value + * determines which menu items are to be skipped. + * + * If @id refers to a non-menu control, then this function will return NULL. + */ +struct v4l2_ctrl *v4l2_ctrl_new_std_menu(struct v4l2_ctrl_handler *hdl, + const struct v4l2_ctrl_ops *ops, + u32 id, u8 max, u64 mask, u8 def); + +/** + * v4l2_ctrl_new_std_menu_items() - Create a new standard V4L2 menu control + * with driver specific menu. + * + * @hdl: The control handler. + * @ops: The control ops. + * @id: The control ID. + * @max: The control's maximum value. + * @mask: The control's skip mask for menu controls. This makes it + * easy to skip menu items that are not valid. If bit X is set, + * then menu item X is skipped. Of course, this only works for + * menus with <= 64 menu items. There are no menus that come + * close to that number, so this is OK. Should we ever need more, + * then this will have to be extended to a bit array. + * @def: The control's default value. + * @qmenu: The new menu. + * + * Same as v4l2_ctrl_new_std_menu(), but @qmenu will be the driver specific + * menu of this control. + * + */ +struct v4l2_ctrl *v4l2_ctrl_new_std_menu_items(struct v4l2_ctrl_handler *hdl, + const struct v4l2_ctrl_ops *ops, + u32 id, u8 max, + u64 mask, u8 def, + const char * const *qmenu); + +/** + * v4l2_ctrl_new_std_compound() - Allocate and initialize a new standard V4L2 + * compound control. + * + * @hdl: The control handler. + * @ops: The control ops. + * @id: The control ID. + * @p_def: The control's default value. + * + * Sames as v4l2_ctrl_new_std(), but with support to compound controls, thanks + * to the @p_def field. Use v4l2_ctrl_ptr_create() to create @p_def from a + * pointer. Use v4l2_ctrl_ptr_create(NULL) if the default value of the + * compound control should be all zeroes. + * + */ +struct v4l2_ctrl *v4l2_ctrl_new_std_compound(struct v4l2_ctrl_handler *hdl, + const struct v4l2_ctrl_ops *ops, + u32 id, + const union v4l2_ctrl_ptr p_def); + +/** + * v4l2_ctrl_new_int_menu() - Create a new standard V4L2 integer menu control. + * + * @hdl: The control handler. + * @ops: The control ops. + * @id: The control ID. + * @max: The control's maximum value. + * @def: The control's default value. + * @qmenu_int: The control's menu entries. + * + * Same as v4l2_ctrl_new_std_menu(), but @mask is set to 0 and it additionally + * takes as an argument an array of integers determining the menu items. + * + * If @id refers to a non-integer-menu control, then this function will + * return %NULL. + */ +struct v4l2_ctrl *v4l2_ctrl_new_int_menu(struct v4l2_ctrl_handler *hdl, + const struct v4l2_ctrl_ops *ops, + u32 id, u8 max, u8 def, + const s64 *qmenu_int); + +/** + * typedef v4l2_ctrl_filter - Typedef to define the filter function to be + * used when adding a control handler. + * + * @ctrl: pointer to struct &v4l2_ctrl. + */ + +typedef bool (*v4l2_ctrl_filter)(const struct v4l2_ctrl *ctrl); + +/** + * v4l2_ctrl_add_handler() - Add all controls from handler @add to + * handler @hdl. + * + * @hdl: The control handler. + * @add: The control handler whose controls you want to add to + * the @hdl control handler. + * @filter: This function will filter which controls should be added. + * @from_other_dev: If true, then the controls in @add were defined in another + * device than @hdl. + * + * Does nothing if either of the two handlers is a NULL pointer. + * If @filter is NULL, then all controls are added. Otherwise only those + * controls for which @filter returns true will be added. + * In case of an error @hdl->error will be set to the error code (if it + * wasn't set already). + */ +int v4l2_ctrl_add_handler(struct v4l2_ctrl_handler *hdl, + struct v4l2_ctrl_handler *add, + v4l2_ctrl_filter filter, + bool from_other_dev); + +/** + * v4l2_ctrl_radio_filter() - Standard filter for radio controls. + * + * @ctrl: The control that is filtered. + * + * This will return true for any controls that are valid for radio device + * nodes. Those are all of the V4L2_CID_AUDIO_* user controls and all FM + * transmitter class controls. + * + * This function is to be used with v4l2_ctrl_add_handler(). + */ +bool v4l2_ctrl_radio_filter(const struct v4l2_ctrl *ctrl); + +/** + * v4l2_ctrl_cluster() - Mark all controls in the cluster as belonging + * to that cluster. + * + * @ncontrols: The number of controls in this cluster. + * @controls: The cluster control array of size @ncontrols. + */ +void v4l2_ctrl_cluster(unsigned int ncontrols, struct v4l2_ctrl **controls); + + +/** + * v4l2_ctrl_auto_cluster() - Mark all controls in the cluster as belonging + * to that cluster and set it up for autofoo/foo-type handling. + * + * @ncontrols: The number of controls in this cluster. + * @controls: The cluster control array of size @ncontrols. The first control + * must be the 'auto' control (e.g. autogain, autoexposure, etc.) + * @manual_val: The value for the first control in the cluster that equals the + * manual setting. + * @set_volatile: If true, then all controls except the first auto control will + * be volatile. + * + * Use for control groups where one control selects some automatic feature and + * the other controls are only active whenever the automatic feature is turned + * off (manual mode). Typical examples: autogain vs gain, auto-whitebalance vs + * red and blue balance, etc. + * + * The behavior of such controls is as follows: + * + * When the autofoo control is set to automatic, then any manual controls + * are set to inactive and any reads will call g_volatile_ctrl (if the control + * was marked volatile). + * + * When the autofoo control is set to manual, then any manual controls will + * be marked active, and any reads will just return the current value without + * going through g_volatile_ctrl. + * + * In addition, this function will set the %V4L2_CTRL_FLAG_UPDATE flag + * on the autofoo control and %V4L2_CTRL_FLAG_INACTIVE on the foo control(s) + * if autofoo is in auto mode. + */ +void v4l2_ctrl_auto_cluster(unsigned int ncontrols, + struct v4l2_ctrl **controls, + u8 manual_val, bool set_volatile); + + +/** + * v4l2_ctrl_find() - Find a control with the given ID. + * + * @hdl: The control handler. + * @id: The control ID to find. + * + * If @hdl == NULL this will return NULL as well. Will lock the handler so + * do not use from inside &v4l2_ctrl_ops. + */ +struct v4l2_ctrl *v4l2_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id); + +/** + * v4l2_ctrl_activate() - Make the control active or inactive. + * @ctrl: The control to (de)activate. + * @active: True if the control should become active. + * + * This sets or clears the V4L2_CTRL_FLAG_INACTIVE flag atomically. + * Does nothing if @ctrl == NULL. + * This will usually be called from within the s_ctrl op. + * The V4L2_EVENT_CTRL event will be generated afterwards. + * + * This function assumes that the control handler is locked. + */ +void v4l2_ctrl_activate(struct v4l2_ctrl *ctrl, bool active); + +/** + * __v4l2_ctrl_grab() - Unlocked variant of v4l2_ctrl_grab. + * + * @ctrl: The control to (de)activate. + * @grabbed: True if the control should become grabbed. + * + * This sets or clears the V4L2_CTRL_FLAG_GRABBED flag atomically. + * Does nothing if @ctrl == NULL. + * The V4L2_EVENT_CTRL event will be generated afterwards. + * This will usually be called when starting or stopping streaming in the + * driver. + * + * This function assumes that the control handler is locked by the caller. + */ +void __v4l2_ctrl_grab(struct v4l2_ctrl *ctrl, bool grabbed); + +/** + * v4l2_ctrl_grab() - Mark the control as grabbed or not grabbed. + * + * @ctrl: The control to (de)activate. + * @grabbed: True if the control should become grabbed. + * + * This sets or clears the V4L2_CTRL_FLAG_GRABBED flag atomically. + * Does nothing if @ctrl == NULL. + * The V4L2_EVENT_CTRL event will be generated afterwards. + * This will usually be called when starting or stopping streaming in the + * driver. + * + * This function assumes that the control handler is not locked and will + * take the lock itself. + */ +static inline void v4l2_ctrl_grab(struct v4l2_ctrl *ctrl, bool grabbed) +{ + if (!ctrl) + return; + + v4l2_ctrl_lock(ctrl); + __v4l2_ctrl_grab(ctrl, grabbed); + v4l2_ctrl_unlock(ctrl); +} + +/** + *__v4l2_ctrl_modify_range() - Unlocked variant of v4l2_ctrl_modify_range() + * + * @ctrl: The control to update. + * @min: The control's minimum value. + * @max: The control's maximum value. + * @step: The control's step value + * @def: The control's default value. + * + * Update the range of a control on the fly. This works for control types + * INTEGER, BOOLEAN, MENU, INTEGER MENU and BITMASK. For menu controls the + * @step value is interpreted as a menu_skip_mask. + * + * An error is returned if one of the range arguments is invalid for this + * control type. + * + * The caller is responsible for acquiring the control handler mutex on behalf + * of __v4l2_ctrl_modify_range(). + */ +int __v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl, + s64 min, s64 max, u64 step, s64 def); + +/** + * v4l2_ctrl_modify_range() - Update the range of a control. + * + * @ctrl: The control to update. + * @min: The control's minimum value. + * @max: The control's maximum value. + * @step: The control's step value + * @def: The control's default value. + * + * Update the range of a control on the fly. This works for control types + * INTEGER, BOOLEAN, MENU, INTEGER MENU and BITMASK. For menu controls the + * @step value is interpreted as a menu_skip_mask. + * + * An error is returned if one of the range arguments is invalid for this + * control type. + * + * This function assumes that the control handler is not locked and will + * take the lock itself. + */ +static inline int v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl, + s64 min, s64 max, u64 step, s64 def) +{ + int rval; + + v4l2_ctrl_lock(ctrl); + rval = __v4l2_ctrl_modify_range(ctrl, min, max, step, def); + v4l2_ctrl_unlock(ctrl); + + return rval; +} + +/** + *__v4l2_ctrl_modify_dimensions() - Unlocked variant of v4l2_ctrl_modify_dimensions() + * + * @ctrl: The control to update. + * @dims: The control's new dimensions. + * + * Update the dimensions of an array control on the fly. The elements of the + * array are reset to their default value, even if the dimensions are + * unchanged. + * + * An error is returned if @dims is invalid for this control. + * + * The caller is responsible for acquiring the control handler mutex on behalf + * of __v4l2_ctrl_modify_dimensions(). + * + * Note: calling this function when the same control is used in pending requests + * is untested. It should work (a request with the wrong size of the control + * will drop that control silently), but it will be very confusing. + */ +int __v4l2_ctrl_modify_dimensions(struct v4l2_ctrl *ctrl, + u32 dims[V4L2_CTRL_MAX_DIMS]); + +/** + * v4l2_ctrl_modify_dimensions() - Update the dimensions of an array control. + * + * @ctrl: The control to update. + * @dims: The control's new dimensions. + * + * Update the dimensions of an array control on the fly. The elements of the + * array are reset to their default value, even if the dimensions are + * unchanged. + * + * An error is returned if @dims is invalid for this control type. + * + * This function assumes that the control handler is not locked and will + * take the lock itself. + * + * Note: calling this function when the same control is used in pending requests + * is untested. It should work (a request with the wrong size of the control + * will drop that control silently), but it will be very confusing. + */ +static inline int v4l2_ctrl_modify_dimensions(struct v4l2_ctrl *ctrl, + u32 dims[V4L2_CTRL_MAX_DIMS]) +{ + int rval; + + v4l2_ctrl_lock(ctrl); + rval = __v4l2_ctrl_modify_dimensions(ctrl, dims); + v4l2_ctrl_unlock(ctrl); + + return rval; +} + +/** + * v4l2_ctrl_notify() - Function to set a notify callback for a control. + * + * @ctrl: The control. + * @notify: The callback function. + * @priv: The callback private handle, passed as argument to the callback. + * + * This function sets a callback function for the control. If @ctrl is NULL, + * then it will do nothing. If @notify is NULL, then the notify callback will + * be removed. + * + * There can be only one notify. If another already exists, then a WARN_ON + * will be issued and the function will do nothing. + */ +void v4l2_ctrl_notify(struct v4l2_ctrl *ctrl, v4l2_ctrl_notify_fnc notify, + void *priv); + +/** + * v4l2_ctrl_get_name() - Get the name of the control + * + * @id: The control ID. + * + * This function returns the name of the given control ID or NULL if it isn't + * a known control. + */ +const char *v4l2_ctrl_get_name(u32 id); + +/** + * v4l2_ctrl_get_menu() - Get the menu string array of the control + * + * @id: The control ID. + * + * This function returns the NULL-terminated menu string array name of the + * given control ID or NULL if it isn't a known menu control. + */ +const char * const *v4l2_ctrl_get_menu(u32 id); + +/** + * v4l2_ctrl_get_int_menu() - Get the integer menu array of the control + * + * @id: The control ID. + * @len: The size of the integer array. + * + * This function returns the integer array of the given control ID or NULL if it + * if it isn't a known integer menu control. + */ +const s64 *v4l2_ctrl_get_int_menu(u32 id, u32 *len); + +/** + * v4l2_ctrl_g_ctrl() - Helper function to get the control's value from + * within a driver. + * + * @ctrl: The control. + * + * This returns the control's value safely by going through the control + * framework. This function will lock the control's handler, so it cannot be + * used from within the &v4l2_ctrl_ops functions. + * + * This function is for integer type controls only. + */ +s32 v4l2_ctrl_g_ctrl(struct v4l2_ctrl *ctrl); + +/** + * __v4l2_ctrl_s_ctrl() - Unlocked variant of v4l2_ctrl_s_ctrl(). + * + * @ctrl: The control. + * @val: The new value. + * + * This sets the control's new value safely by going through the control + * framework. This function assumes the control's handler is already locked, + * allowing it to be used from within the &v4l2_ctrl_ops functions. + * + * This function is for integer type controls only. + */ +int __v4l2_ctrl_s_ctrl(struct v4l2_ctrl *ctrl, s32 val); + +/** + * v4l2_ctrl_s_ctrl() - Helper function to set the control's value from + * within a driver. + * @ctrl: The control. + * @val: The new value. + * + * This sets the control's new value safely by going through the control + * framework. This function will lock the control's handler, so it cannot be + * used from within the &v4l2_ctrl_ops functions. + * + * This function is for integer type controls only. + */ +static inline int v4l2_ctrl_s_ctrl(struct v4l2_ctrl *ctrl, s32 val) +{ + int rval; + + v4l2_ctrl_lock(ctrl); + rval = __v4l2_ctrl_s_ctrl(ctrl, val); + v4l2_ctrl_unlock(ctrl); + + return rval; +} + +/** + * v4l2_ctrl_g_ctrl_int64() - Helper function to get a 64-bit control's value + * from within a driver. + * + * @ctrl: The control. + * + * This returns the control's value safely by going through the control + * framework. This function will lock the control's handler, so it cannot be + * used from within the &v4l2_ctrl_ops functions. + * + * This function is for 64-bit integer type controls only. + */ +s64 v4l2_ctrl_g_ctrl_int64(struct v4l2_ctrl *ctrl); + +/** + * __v4l2_ctrl_s_ctrl_int64() - Unlocked variant of v4l2_ctrl_s_ctrl_int64(). + * + * @ctrl: The control. + * @val: The new value. + * + * This sets the control's new value safely by going through the control + * framework. This function assumes the control's handler is already locked, + * allowing it to be used from within the &v4l2_ctrl_ops functions. + * + * This function is for 64-bit integer type controls only. + */ +int __v4l2_ctrl_s_ctrl_int64(struct v4l2_ctrl *ctrl, s64 val); + +/** + * v4l2_ctrl_s_ctrl_int64() - Helper function to set a 64-bit control's value + * from within a driver. + * + * @ctrl: The control. + * @val: The new value. + * + * This sets the control's new value safely by going through the control + * framework. This function will lock the control's handler, so it cannot be + * used from within the &v4l2_ctrl_ops functions. + * + * This function is for 64-bit integer type controls only. + */ +static inline int v4l2_ctrl_s_ctrl_int64(struct v4l2_ctrl *ctrl, s64 val) +{ + int rval; + + v4l2_ctrl_lock(ctrl); + rval = __v4l2_ctrl_s_ctrl_int64(ctrl, val); + v4l2_ctrl_unlock(ctrl); + + return rval; +} + +/** + * __v4l2_ctrl_s_ctrl_string() - Unlocked variant of v4l2_ctrl_s_ctrl_string(). + * + * @ctrl: The control. + * @s: The new string. + * + * This sets the control's new string safely by going through the control + * framework. This function assumes the control's handler is already locked, + * allowing it to be used from within the &v4l2_ctrl_ops functions. + * + * This function is for string type controls only. + */ +int __v4l2_ctrl_s_ctrl_string(struct v4l2_ctrl *ctrl, const char *s); + +/** + * v4l2_ctrl_s_ctrl_string() - Helper function to set a control's string value + * from within a driver. + * + * @ctrl: The control. + * @s: The new string. + * + * This sets the control's new string safely by going through the control + * framework. This function will lock the control's handler, so it cannot be + * used from within the &v4l2_ctrl_ops functions. + * + * This function is for string type controls only. + */ +static inline int v4l2_ctrl_s_ctrl_string(struct v4l2_ctrl *ctrl, const char *s) +{ + int rval; + + v4l2_ctrl_lock(ctrl); + rval = __v4l2_ctrl_s_ctrl_string(ctrl, s); + v4l2_ctrl_unlock(ctrl); + + return rval; +} + +/** + * __v4l2_ctrl_s_ctrl_compound() - Unlocked variant to set a compound control + * + * @ctrl: The control. + * @type: The type of the data. + * @p: The new compound payload. + * + * This sets the control's new compound payload safely by going through the + * control framework. This function assumes the control's handler is already + * locked, allowing it to be used from within the &v4l2_ctrl_ops functions. + * + * This function is for compound type controls only. + */ +int __v4l2_ctrl_s_ctrl_compound(struct v4l2_ctrl *ctrl, + enum v4l2_ctrl_type type, const void *p); + +/** + * v4l2_ctrl_s_ctrl_compound() - Helper function to set a compound control + * from within a driver. + * + * @ctrl: The control. + * @type: The type of the data. + * @p: The new compound payload. + * + * This sets the control's new compound payload safely by going through the + * control framework. This function will lock the control's handler, so it + * cannot be used from within the &v4l2_ctrl_ops functions. + * + * This function is for compound type controls only. + */ +static inline int v4l2_ctrl_s_ctrl_compound(struct v4l2_ctrl *ctrl, + enum v4l2_ctrl_type type, + const void *p) +{ + int rval; + + v4l2_ctrl_lock(ctrl); + rval = __v4l2_ctrl_s_ctrl_compound(ctrl, type, p); + v4l2_ctrl_unlock(ctrl); + + return rval; +} + +/* Helper defines for area type controls */ +#define __v4l2_ctrl_s_ctrl_area(ctrl, area) \ + __v4l2_ctrl_s_ctrl_compound((ctrl), V4L2_CTRL_TYPE_AREA, (area)) +#define v4l2_ctrl_s_ctrl_area(ctrl, area) \ + v4l2_ctrl_s_ctrl_compound((ctrl), V4L2_CTRL_TYPE_AREA, (area)) + +/* Internal helper functions that deal with control events. */ +extern const struct v4l2_subscribed_event_ops v4l2_ctrl_sub_ev_ops; + +/** + * v4l2_ctrl_replace - Function to be used as a callback to + * &struct v4l2_subscribed_event_ops replace\(\) + * + * @old: pointer to struct &v4l2_event with the reported + * event; + * @new: pointer to struct &v4l2_event with the modified + * event; + */ +void v4l2_ctrl_replace(struct v4l2_event *old, const struct v4l2_event *new); + +/** + * v4l2_ctrl_merge - Function to be used as a callback to + * &struct v4l2_subscribed_event_ops merge(\) + * + * @old: pointer to struct &v4l2_event with the reported + * event; + * @new: pointer to struct &v4l2_event with the merged + * event; + */ +void v4l2_ctrl_merge(const struct v4l2_event *old, struct v4l2_event *new); + +/** + * v4l2_ctrl_log_status - helper function to implement %VIDIOC_LOG_STATUS ioctl + * + * @file: pointer to struct file + * @fh: unused. Kept just to be compatible to the arguments expected by + * &struct v4l2_ioctl_ops.vidioc_log_status. + * + * Can be used as a vidioc_log_status function that just dumps all controls + * associated with the filehandle. + */ +int v4l2_ctrl_log_status(struct file *file, void *fh); + +/** + * v4l2_ctrl_subscribe_event - Subscribes to an event + * + * + * @fh: pointer to struct v4l2_fh + * @sub: pointer to &struct v4l2_event_subscription + * + * Can be used as a vidioc_subscribe_event function that just subscribes + * control events. + */ +int v4l2_ctrl_subscribe_event(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub); + +/** + * v4l2_ctrl_poll - function to be used as a callback to the poll() + * That just polls for control events. + * + * @file: pointer to struct file + * @wait: pointer to struct poll_table_struct + */ +__poll_t v4l2_ctrl_poll(struct file *file, struct poll_table_struct *wait); + +/** + * v4l2_ctrl_request_setup - helper function to apply control values in a request + * + * @req: The request + * @parent: The parent control handler ('priv' in media_request_object_find()) + * + * This is a helper function to call the control handler's s_ctrl callback with + * the control values contained in the request. Do note that this approach of + * applying control values in a request is only applicable to memory-to-memory + * devices. + */ +int v4l2_ctrl_request_setup(struct media_request *req, + struct v4l2_ctrl_handler *parent); + +/** + * v4l2_ctrl_request_complete - Complete a control handler request object + * + * @req: The request + * @parent: The parent control handler ('priv' in media_request_object_find()) + * + * This function is to be called on each control handler that may have had a + * request object associated with it, i.e. control handlers of a driver that + * supports requests. + * + * The function first obtains the values of any volatile controls in the control + * handler and attach them to the request. Then, the function completes the + * request object. + */ +void v4l2_ctrl_request_complete(struct media_request *req, + struct v4l2_ctrl_handler *parent); + +/** + * v4l2_ctrl_request_hdl_find - Find the control handler in the request + * + * @req: The request + * @parent: The parent control handler ('priv' in media_request_object_find()) + * + * This function finds the control handler in the request. It may return + * NULL if not found. When done, you must call v4l2_ctrl_request_hdl_put() + * with the returned handler pointer. + * + * If the request is not in state VALIDATING or QUEUED, then this function + * will always return NULL. + * + * Note that in state VALIDATING the req_queue_mutex is held, so + * no objects can be added or deleted from the request. + * + * In state QUEUED it is the driver that will have to ensure this. + */ +struct v4l2_ctrl_handler *v4l2_ctrl_request_hdl_find(struct media_request *req, + struct v4l2_ctrl_handler *parent); + +/** + * v4l2_ctrl_request_hdl_put - Put the control handler + * + * @hdl: Put this control handler + * + * This function released the control handler previously obtained from' + * v4l2_ctrl_request_hdl_find(). + */ +static inline void v4l2_ctrl_request_hdl_put(struct v4l2_ctrl_handler *hdl) +{ + if (hdl) + media_request_object_put(&hdl->req_obj); +} + +/** + * v4l2_ctrl_request_hdl_ctrl_find() - Find a control with the given ID. + * + * @hdl: The control handler from the request. + * @id: The ID of the control to find. + * + * This function returns a pointer to the control if this control is + * part of the request or NULL otherwise. + */ +struct v4l2_ctrl * +v4l2_ctrl_request_hdl_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id); + +/* Helpers for ioctl_ops */ + +/** + * v4l2_queryctrl - Helper function to implement + * :ref:`VIDIOC_QUERYCTRL ` ioctl + * + * @hdl: pointer to &struct v4l2_ctrl_handler + * @qc: pointer to &struct v4l2_queryctrl + * + * If hdl == NULL then they will all return -EINVAL. + */ +int v4l2_queryctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_queryctrl *qc); + +/** + * v4l2_query_ext_ctrl - Helper function to implement + * :ref:`VIDIOC_QUERY_EXT_CTRL ` ioctl + * + * @hdl: pointer to &struct v4l2_ctrl_handler + * @qc: pointer to &struct v4l2_query_ext_ctrl + * + * If hdl == NULL then they will all return -EINVAL. + */ +int v4l2_query_ext_ctrl(struct v4l2_ctrl_handler *hdl, + struct v4l2_query_ext_ctrl *qc); + +/** + * v4l2_querymenu - Helper function to implement + * :ref:`VIDIOC_QUERYMENU ` ioctl + * + * @hdl: pointer to &struct v4l2_ctrl_handler + * @qm: pointer to &struct v4l2_querymenu + * + * If hdl == NULL then they will all return -EINVAL. + */ +int v4l2_querymenu(struct v4l2_ctrl_handler *hdl, struct v4l2_querymenu *qm); + +/** + * v4l2_g_ctrl - Helper function to implement + * :ref:`VIDIOC_G_CTRL ` ioctl + * + * @hdl: pointer to &struct v4l2_ctrl_handler + * @ctrl: pointer to &struct v4l2_control + * + * If hdl == NULL then they will all return -EINVAL. + */ +int v4l2_g_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_control *ctrl); + +/** + * v4l2_s_ctrl - Helper function to implement + * :ref:`VIDIOC_S_CTRL ` ioctl + * + * @fh: pointer to &struct v4l2_fh + * @hdl: pointer to &struct v4l2_ctrl_handler + * + * @ctrl: pointer to &struct v4l2_control + * + * If hdl == NULL then they will all return -EINVAL. + */ +int v4l2_s_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl, + struct v4l2_control *ctrl); + +/** + * v4l2_g_ext_ctrls - Helper function to implement + * :ref:`VIDIOC_G_EXT_CTRLS ` ioctl + * + * @hdl: pointer to &struct v4l2_ctrl_handler + * @vdev: pointer to &struct video_device + * @mdev: pointer to &struct media_device + * @c: pointer to &struct v4l2_ext_controls + * + * If hdl == NULL then they will all return -EINVAL. + */ +int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct video_device *vdev, + struct media_device *mdev, struct v4l2_ext_controls *c); + +/** + * v4l2_try_ext_ctrls - Helper function to implement + * :ref:`VIDIOC_TRY_EXT_CTRLS ` ioctl + * + * @hdl: pointer to &struct v4l2_ctrl_handler + * @vdev: pointer to &struct video_device + * @mdev: pointer to &struct media_device + * @c: pointer to &struct v4l2_ext_controls + * + * If hdl == NULL then they will all return -EINVAL. + */ +int v4l2_try_ext_ctrls(struct v4l2_ctrl_handler *hdl, + struct video_device *vdev, + struct media_device *mdev, + struct v4l2_ext_controls *c); + +/** + * v4l2_s_ext_ctrls - Helper function to implement + * :ref:`VIDIOC_S_EXT_CTRLS ` ioctl + * + * @fh: pointer to &struct v4l2_fh + * @hdl: pointer to &struct v4l2_ctrl_handler + * @vdev: pointer to &struct video_device + * @mdev: pointer to &struct media_device + * @c: pointer to &struct v4l2_ext_controls + * + * If hdl == NULL then they will all return -EINVAL. + */ +int v4l2_s_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl, + struct video_device *vdev, + struct media_device *mdev, + struct v4l2_ext_controls *c); + +/** + * v4l2_ctrl_subdev_subscribe_event - Helper function to implement + * as a &struct v4l2_subdev_core_ops subscribe_event function + * that just subscribes control events. + * + * @sd: pointer to &struct v4l2_subdev + * @fh: pointer to &struct v4l2_fh + * @sub: pointer to &struct v4l2_event_subscription + */ +int v4l2_ctrl_subdev_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh, + struct v4l2_event_subscription *sub); + +/** + * v4l2_ctrl_subdev_log_status - Log all controls owned by subdev's control + * handler. + * + * @sd: pointer to &struct v4l2_subdev + */ +int v4l2_ctrl_subdev_log_status(struct v4l2_subdev *sd); + +/** + * v4l2_ctrl_new_fwnode_properties() - Register controls for the device + * properties + * + * @hdl: pointer to &struct v4l2_ctrl_handler to register controls on + * @ctrl_ops: pointer to &struct v4l2_ctrl_ops to register controls with + * @p: pointer to &struct v4l2_fwnode_device_properties + * + * This function registers controls associated to device properties, using the + * property values contained in @p parameter, if the property has been set to + * a value. + * + * Currently the following v4l2 controls are parsed and registered: + * - V4L2_CID_CAMERA_ORIENTATION + * - V4L2_CID_CAMERA_SENSOR_ROTATION; + * + * Controls already registered by the caller with the @hdl control handler are + * not overwritten. Callers should register the controls they want to handle + * themselves before calling this function. + * + * Return: 0 on success, a negative error code on failure. + */ +int v4l2_ctrl_new_fwnode_properties(struct v4l2_ctrl_handler *hdl, + const struct v4l2_ctrl_ops *ctrl_ops, + const struct v4l2_fwnode_device_properties *p); + +/** + * v4l2_ctrl_type_op_equal - Default v4l2_ctrl_type_ops equal callback. + * + * @ctrl: The v4l2_ctrl pointer. + * @ptr1: A v4l2 control value. + * @ptr2: A v4l2 control value. + * + * Return: true if values are equal, otherwise false. + */ +bool v4l2_ctrl_type_op_equal(const struct v4l2_ctrl *ctrl, + union v4l2_ctrl_ptr ptr1, union v4l2_ctrl_ptr ptr2); + +/** + * v4l2_ctrl_type_op_init - Default v4l2_ctrl_type_ops init callback. + * + * @ctrl: The v4l2_ctrl pointer. + * @from_idx: Starting element index. + * @ptr: The v4l2 control value. + * + * Return: void + */ +void v4l2_ctrl_type_op_init(const struct v4l2_ctrl *ctrl, u32 from_idx, + union v4l2_ctrl_ptr ptr); + +/** + * v4l2_ctrl_type_op_log - Default v4l2_ctrl_type_ops log callback. + * + * @ctrl: The v4l2_ctrl pointer. + * + * Return: void + */ +void v4l2_ctrl_type_op_log(const struct v4l2_ctrl *ctrl); + +/** + * v4l2_ctrl_type_op_validate - Default v4l2_ctrl_type_ops validate callback. + * + * @ctrl: The v4l2_ctrl pointer. + * @ptr: The v4l2 control value. + * + * Return: 0 on success, a negative error code on failure. + */ +int v4l2_ctrl_type_op_validate(const struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr ptr); + +#endif diff --git a/6.12.0/include-overrides/media/v4l2-dev.h b/6.12.0/include-overrides/media/v4l2-dev.h new file mode 100644 index 0000000..d82dfdb --- /dev/null +++ b/6.12.0/include-overrides/media/v4l2-dev.h @@ -0,0 +1,644 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * + * V 4 L 2 D R I V E R H E L P E R A P I + * + * Moved from videodev2.h + * + * Some commonly needed functions for drivers (v4l2-common.o module) + */ +#ifndef _V4L2_DEV_H +#define _V4L2_DEV_H + +#include +#include +#include +#include +#include +#include + +#include + +#define VIDEO_MAJOR 81 + +/** + * enum vfl_devnode_type - type of V4L2 device node + * + * @VFL_TYPE_VIDEO: for video input/output devices + * @VFL_TYPE_VBI: for vertical blank data (i.e. closed captions, teletext) + * @VFL_TYPE_RADIO: for radio tuners + * @VFL_TYPE_SUBDEV: for V4L2 subdevices + * @VFL_TYPE_SDR: for Software Defined Radio tuners + * @VFL_TYPE_TOUCH: for touch sensors + * @VFL_TYPE_MAX: number of VFL types, must always be last in the enum + */ +enum vfl_devnode_type { + VFL_TYPE_VIDEO, + VFL_TYPE_VBI, + VFL_TYPE_RADIO, + VFL_TYPE_SUBDEV, + VFL_TYPE_SDR, + VFL_TYPE_TOUCH, + VFL_TYPE_MAX /* Shall be the last one */ +}; + +/** + * enum vfl_devnode_direction - Identifies if a &struct video_device + * corresponds to a receiver, a transmitter or a mem-to-mem device. + * + * @VFL_DIR_RX: device is a receiver. + * @VFL_DIR_TX: device is a transmitter. + * @VFL_DIR_M2M: device is a memory to memory device. + * + * Note: Ignored if &enum vfl_devnode_type is %VFL_TYPE_SUBDEV. + */ +enum vfl_devnode_direction { + VFL_DIR_RX, + VFL_DIR_TX, + VFL_DIR_M2M, +}; + +struct v4l2_ioctl_callbacks; +struct video_device; +struct v4l2_device; +struct v4l2_ctrl_handler; + +/** + * enum v4l2_video_device_flags - Flags used by &struct video_device + * + * @V4L2_FL_REGISTERED: + * indicates that a &struct video_device is registered. + * Drivers can clear this flag if they want to block all future + * device access. It is cleared by video_unregister_device. + * @V4L2_FL_USES_V4L2_FH: + * indicates that file->private_data points to &struct v4l2_fh. + * This flag is set by the core when v4l2_fh_init() is called. + * All new drivers should use it. + * @V4L2_FL_QUIRK_INVERTED_CROP: + * some old M2M drivers use g/s_crop/cropcap incorrectly: crop and + * compose are swapped. If this flag is set, then the selection + * targets are swapped in the g/s_crop/cropcap functions in v4l2-ioctl.c. + * This allows those drivers to correctly implement the selection API, + * but the old crop API will still work as expected in order to preserve + * backwards compatibility. + * Never set this flag for new drivers. + * @V4L2_FL_SUBDEV_RO_DEVNODE: + * indicates that the video device node is registered in read-only mode. + * The flag only applies to device nodes registered for sub-devices, it is + * set by the core when the sub-devices device nodes are registered with + * v4l2_device_register_ro_subdev_nodes() and used by the sub-device ioctl + * handler to restrict access to some ioctl calls. + */ +enum v4l2_video_device_flags { + V4L2_FL_REGISTERED = 0, + V4L2_FL_USES_V4L2_FH = 1, + V4L2_FL_QUIRK_INVERTED_CROP = 2, + V4L2_FL_SUBDEV_RO_DEVNODE = 3, +}; + +/* Priority helper functions */ + +/** + * struct v4l2_prio_state - stores the priority states + * + * @prios: array with elements to store the array priorities + * + * + * .. note:: + * The size of @prios array matches the number of priority types defined + * by enum &v4l2_priority. + */ +struct v4l2_prio_state { + atomic_t prios[4]; +}; + +/** + * v4l2_prio_init - initializes a struct v4l2_prio_state + * + * @global: pointer to &struct v4l2_prio_state + */ +void v4l2_prio_init(struct v4l2_prio_state *global); + +/** + * v4l2_prio_change - changes the v4l2 file handler priority + * + * @global: pointer to the &struct v4l2_prio_state of the device node. + * @local: pointer to the desired priority, as defined by enum &v4l2_priority + * @new: Priority type requested, as defined by enum &v4l2_priority. + * + * .. note:: + * This function should be used only by the V4L2 core. + */ +int v4l2_prio_change(struct v4l2_prio_state *global, enum v4l2_priority *local, + enum v4l2_priority new); + +/** + * v4l2_prio_open - Implements the priority logic for a file handler open + * + * @global: pointer to the &struct v4l2_prio_state of the device node. + * @local: pointer to the desired priority, as defined by enum &v4l2_priority + * + * .. note:: + * This function should be used only by the V4L2 core. + */ +void v4l2_prio_open(struct v4l2_prio_state *global, enum v4l2_priority *local); + +/** + * v4l2_prio_close - Implements the priority logic for a file handler close + * + * @global: pointer to the &struct v4l2_prio_state of the device node. + * @local: priority to be released, as defined by enum &v4l2_priority + * + * .. note:: + * This function should be used only by the V4L2 core. + */ +void v4l2_prio_close(struct v4l2_prio_state *global, enum v4l2_priority local); + +/** + * v4l2_prio_max - Return the maximum priority, as stored at the @global array. + * + * @global: pointer to the &struct v4l2_prio_state of the device node. + * + * .. note:: + * This function should be used only by the V4L2 core. + */ +enum v4l2_priority v4l2_prio_max(struct v4l2_prio_state *global); + +/** + * v4l2_prio_check - Implements the priority logic for a file handler close + * + * @global: pointer to the &struct v4l2_prio_state of the device node. + * @local: desired priority, as defined by enum &v4l2_priority local + * + * .. note:: + * This function should be used only by the V4L2 core. + */ +int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local); + +/** + * struct v4l2_file_operations - fs operations used by a V4L2 device + * + * @owner: pointer to struct module + * @read: operations needed to implement the read() syscall + * @write: operations needed to implement the write() syscall + * @poll: operations needed to implement the poll() syscall + * @unlocked_ioctl: operations needed to implement the ioctl() syscall + * @compat_ioctl32: operations needed to implement the ioctl() syscall for + * the special case where the Kernel uses 64 bits instructions, but + * the userspace uses 32 bits. + * @get_unmapped_area: called by the mmap() syscall, used when %!CONFIG_MMU + * @mmap: operations needed to implement the mmap() syscall + * @open: operations needed to implement the open() syscall + * @release: operations needed to implement the release() syscall + * + * .. note:: + * + * Those operations are used to implemente the fs struct file_operations + * at the V4L2 drivers. The V4L2 core overrides the fs ops with some + * extra logic needed by the subsystem. + */ +struct v4l2_file_operations { + struct module *owner; + ssize_t (*read) (struct file *, char __user *, size_t, loff_t *); + ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *); + __poll_t (*poll) (struct file *, struct poll_table_struct *); + long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); +#ifdef CONFIG_COMPAT + long (*compat_ioctl32) (struct file *, unsigned int, unsigned long); +#endif + unsigned long (*get_unmapped_area) (struct file *, unsigned long, + unsigned long, unsigned long, unsigned long); + int (*mmap) (struct file *, struct vm_area_struct *); + int (*open) (struct file *); + int (*release) (struct file *); +}; + +/* + * Newer version of video_device, handled by videodev2.c + * This version moves redundant code from video device code to + * the common handler + */ + +/** + * struct video_device - Structure used to create and manage the V4L2 device + * nodes. + * + * @entity: &struct media_entity + * @intf_devnode: pointer to &struct media_intf_devnode + * @pipe: &struct media_pipeline + * @fops: pointer to &struct v4l2_file_operations for the video device + * @device_caps: device capabilities as used in v4l2_capabilities + * @dev: &struct device for the video device + * @cdev: character device + * @v4l2_dev: pointer to &struct v4l2_device parent + * @dev_parent: pointer to &struct device parent + * @ctrl_handler: Control handler associated with this device node. + * May be NULL. + * @queue: &struct vb2_queue associated with this device node. May be NULL. + * @prio: pointer to &struct v4l2_prio_state with device's Priority state. + * If NULL, then v4l2_dev->prio will be used. + * @name: video device name + * @vfl_type: V4L device type, as defined by &enum vfl_devnode_type + * @vfl_dir: V4L receiver, transmitter or m2m + * @minor: device node 'minor'. It is set to -1 if the registration failed + * @num: number of the video device node + * @flags: video device flags. Use bitops to set/clear/test flags. + * Contains a set of &enum v4l2_video_device_flags. + * @index: attribute to differentiate multiple indices on one physical device + * @fh_lock: Lock for all v4l2_fhs + * @fh_list: List of &struct v4l2_fh + * @dev_debug: Internal device debug flags, not for use by drivers + * @tvnorms: Supported tv norms + * + * @release: video device release() callback + * @ioctl_ops: pointer to &struct v4l2_ioctl_ops with ioctl callbacks + * + * @valid_ioctls: bitmap with the valid ioctls for this device + * @lock: pointer to &struct mutex serialization lock + * + * .. note:: + * Only set @dev_parent if that can't be deduced from @v4l2_dev. + */ + +struct video_device { +#if defined(CONFIG_MEDIA_CONTROLLER) + struct media_entity entity; + struct media_intf_devnode *intf_devnode; + struct media_pipeline pipe; +#endif + const struct v4l2_file_operations *fops; + + u32 device_caps; + + /* sysfs */ + struct device dev; + struct cdev *cdev; + + struct v4l2_device *v4l2_dev; + struct device *dev_parent; + + struct v4l2_ctrl_handler *ctrl_handler; + + struct vb2_queue *queue; + + struct v4l2_prio_state *prio; + + /* device info */ + char name[64]; + enum vfl_devnode_type vfl_type; + enum vfl_devnode_direction vfl_dir; + int minor; + u16 num; + unsigned long flags; + int index; + + /* V4L2 file handles */ + spinlock_t fh_lock; + struct list_head fh_list; + + int dev_debug; + + v4l2_std_id tvnorms; + + /* callbacks */ + void (*release)(struct video_device *vdev); + const struct v4l2_ioctl_ops *ioctl_ops; + DECLARE_BITMAP(valid_ioctls, BASE_VIDIOC_PRIVATE); + + struct mutex *lock; +}; + +/** + * media_entity_to_video_device - Returns a &struct video_device from + * the &struct media_entity embedded on it. + * + * @__entity: pointer to &struct media_entity + */ +#define media_entity_to_video_device(__entity) \ + container_of(__entity, struct video_device, entity) + +/** + * to_video_device - Returns a &struct video_device from the + * &struct device embedded on it. + * + * @cd: pointer to &struct device + */ +#define to_video_device(cd) container_of(cd, struct video_device, dev) + +/** + * __video_register_device - register video4linux devices + * + * @vdev: struct video_device to register + * @type: type of device to register, as defined by &enum vfl_devnode_type + * @nr: which device node number is desired: + * (0 == /dev/video0, 1 == /dev/video1, ..., -1 == first free) + * @warn_if_nr_in_use: warn if the desired device node number + * was already in use and another number was chosen instead. + * @owner: module that owns the video device node + * + * The registration code assigns minor numbers and device node numbers + * based on the requested type and registers the new device node with + * the kernel. + * + * This function assumes that struct video_device was zeroed when it + * was allocated and does not contain any stale date. + * + * An error is returned if no free minor or device node number could be + * found, or if the registration of the device node failed. + * + * Returns 0 on success. + * + * .. note:: + * + * This function is meant to be used only inside the V4L2 core. + * Drivers should use video_register_device() or + * video_register_device_no_warn(). + */ +int __must_check __video_register_device(struct video_device *vdev, + enum vfl_devnode_type type, + int nr, int warn_if_nr_in_use, + struct module *owner); + +/** + * video_register_device - register video4linux devices + * + * @vdev: struct video_device to register + * @type: type of device to register, as defined by &enum vfl_devnode_type + * @nr: which device node number is desired: + * (0 == /dev/video0, 1 == /dev/video1, ..., -1 == first free) + * + * Internally, it calls __video_register_device(). Please see its + * documentation for more details. + * + * .. note:: + * if video_register_device fails, the release() callback of + * &struct video_device structure is *not* called, so the caller + * is responsible for freeing any data. Usually that means that + * you video_device_release() should be called on failure. + */ +static inline int __must_check video_register_device(struct video_device *vdev, + enum vfl_devnode_type type, + int nr) +{ + return __video_register_device(vdev, type, nr, 1, vdev->fops->owner); +} + +/** + * video_register_device_no_warn - register video4linux devices + * + * @vdev: struct video_device to register + * @type: type of device to register, as defined by &enum vfl_devnode_type + * @nr: which device node number is desired: + * (0 == /dev/video0, 1 == /dev/video1, ..., -1 == first free) + * + * This function is identical to video_register_device() except that no + * warning is issued if the desired device node number was already in use. + * + * Internally, it calls __video_register_device(). Please see its + * documentation for more details. + * + * .. note:: + * if video_register_device fails, the release() callback of + * &struct video_device structure is *not* called, so the caller + * is responsible for freeing any data. Usually that means that + * you video_device_release() should be called on failure. + */ +static inline int __must_check +video_register_device_no_warn(struct video_device *vdev, + enum vfl_devnode_type type, int nr) +{ + return __video_register_device(vdev, type, nr, 0, vdev->fops->owner); +} + +/** + * video_unregister_device - Unregister video devices. + * + * @vdev: &struct video_device to register + * + * Does nothing if vdev == NULL or if video_is_registered() returns false. + */ +void video_unregister_device(struct video_device *vdev); + +/** + * video_device_alloc - helper function to alloc &struct video_device + * + * Returns NULL if %-ENOMEM or a &struct video_device on success. + */ +struct video_device * __must_check video_device_alloc(void); + +/** + * video_device_release - helper function to release &struct video_device + * + * @vdev: pointer to &struct video_device + * + * Can also be used for video_device->release\(\). + */ +void video_device_release(struct video_device *vdev); + +/** + * video_device_release_empty - helper function to implement the + * video_device->release\(\) callback. + * + * @vdev: pointer to &struct video_device + * + * This release function does nothing. + * + * It should be used when the video_device is a static global struct. + * + * .. note:: + * Having a static video_device is a dubious construction at best. + */ +void video_device_release_empty(struct video_device *vdev); + +/** + * v4l2_disable_ioctl- mark that a given command isn't implemented. + * shouldn't use core locking + * + * @vdev: pointer to &struct video_device + * @cmd: ioctl command + * + * This function allows drivers to provide just one v4l2_ioctl_ops struct, but + * disable ioctls based on the specific card that is actually found. + * + * .. note:: + * + * This must be called before video_register_device. + * See also the comments for determine_valid_ioctls(). + */ +static inline void v4l2_disable_ioctl(struct video_device *vdev, + unsigned int cmd) +{ + if (_IOC_NR(cmd) < BASE_VIDIOC_PRIVATE) + set_bit(_IOC_NR(cmd), vdev->valid_ioctls); +} + +/** + * video_get_drvdata - gets private data from &struct video_device. + * + * @vdev: pointer to &struct video_device + * + * returns a pointer to the private data + */ +static inline void *video_get_drvdata(struct video_device *vdev) +{ + return dev_get_drvdata(&vdev->dev); +} + +/** + * video_set_drvdata - sets private data from &struct video_device. + * + * @vdev: pointer to &struct video_device + * @data: private data pointer + */ +static inline void video_set_drvdata(struct video_device *vdev, void *data) +{ + dev_set_drvdata(&vdev->dev, data); +} + +/** + * video_devdata - gets &struct video_device from struct file. + * + * @file: pointer to struct file + */ +struct video_device *video_devdata(struct file *file); + +/** + * video_drvdata - gets private data from &struct video_device using the + * struct file. + * + * @file: pointer to struct file + * + * This is function combines both video_get_drvdata() and video_devdata() + * as this is used very often. + */ +static inline void *video_drvdata(struct file *file) +{ + return video_get_drvdata(video_devdata(file)); +} + +/** + * video_device_node_name - returns the video device name + * + * @vdev: pointer to &struct video_device + * + * Returns the device name string + */ +static inline const char *video_device_node_name(struct video_device *vdev) +{ + return dev_name(&vdev->dev); +} + +/** + * video_is_registered - returns true if the &struct video_device is registered. + * + * + * @vdev: pointer to &struct video_device + */ +static inline int video_is_registered(struct video_device *vdev) +{ + return test_bit(V4L2_FL_REGISTERED, &vdev->flags); +} + +#if defined(CONFIG_MEDIA_CONTROLLER) + +/** + * video_device_pipeline_start - Mark a pipeline as streaming + * @vdev: Starting video device + * @pipe: Media pipeline to be assigned to all entities in the pipeline. + * + * Mark all entities connected to a given video device through enabled links, + * either directly or indirectly, as streaming. The given pipeline object is + * assigned to every pad in the pipeline and stored in the media_pad pipe + * field. + * + * Calls to this function can be nested, in which case the same number of + * video_device_pipeline_stop() calls will be required to stop streaming. The + * pipeline pointer must be identical for all nested calls to + * video_device_pipeline_start(). + * + * The video device must contain a single pad. + * + * This is a convenience wrapper around media_pipeline_start(). + */ +__must_check int video_device_pipeline_start(struct video_device *vdev, + struct media_pipeline *pipe); + +/** + * __video_device_pipeline_start - Mark a pipeline as streaming + * @vdev: Starting video device + * @pipe: Media pipeline to be assigned to all entities in the pipeline. + * + * ..note:: This is the non-locking version of video_device_pipeline_start() + * + * The video device must contain a single pad. + * + * This is a convenience wrapper around __media_pipeline_start(). + */ +__must_check int __video_device_pipeline_start(struct video_device *vdev, + struct media_pipeline *pipe); + +/** + * video_device_pipeline_stop - Mark a pipeline as not streaming + * @vdev: Starting video device + * + * Mark all entities connected to a given video device through enabled links, + * either directly or indirectly, as not streaming. The media_pad pipe field + * is reset to %NULL. + * + * If multiple calls to media_pipeline_start() have been made, the same + * number of calls to this function are required to mark the pipeline as not + * streaming. + * + * The video device must contain a single pad. + * + * This is a convenience wrapper around media_pipeline_stop(). + */ +void video_device_pipeline_stop(struct video_device *vdev); + +/** + * __video_device_pipeline_stop - Mark a pipeline as not streaming + * @vdev: Starting video device + * + * .. note:: This is the non-locking version of media_pipeline_stop() + * + * The video device must contain a single pad. + * + * This is a convenience wrapper around __media_pipeline_stop(). + */ +void __video_device_pipeline_stop(struct video_device *vdev); + +/** + * video_device_pipeline_alloc_start - Mark a pipeline as streaming + * @vdev: Starting video device + * + * video_device_pipeline_alloc_start() is similar to video_device_pipeline_start() + * but instead of working on a given pipeline the function will use an + * existing pipeline if the video device is already part of a pipeline, or + * allocate a new pipeline. + * + * Calls to video_device_pipeline_alloc_start() must be matched with + * video_device_pipeline_stop(). + */ +__must_check int video_device_pipeline_alloc_start(struct video_device *vdev); + +/** + * video_device_pipeline - Get the media pipeline a video device is part of + * @vdev: The video device + * + * This function returns the media pipeline that a video device has been + * associated with when constructing the pipeline with + * video_device_pipeline_start(). The pointer remains valid until + * video_device_pipeline_stop() is called. + * + * Return: The media_pipeline the video device is part of, or NULL if the video + * device is not part of any pipeline. + * + * The video device must contain a single pad. + * + * This is a convenience wrapper around media_entity_pipeline(). + */ +struct media_pipeline *video_device_pipeline(struct video_device *vdev); + +#endif /* CONFIG_MEDIA_CONTROLLER */ + +#endif /* _V4L2_DEV_H */ diff --git a/6.12.0/include-overrides/media/v4l2-device.h b/6.12.0/include-overrides/media/v4l2-device.h new file mode 100644 index 0000000..dd897a3 --- /dev/null +++ b/6.12.0/include-overrides/media/v4l2-device.h @@ -0,0 +1,569 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + V4L2 device support header. + + Copyright (C) 2008 Hans Verkuil + + */ + +#ifndef _V4L2_DEVICE_H +#define _V4L2_DEVICE_H + +#include +#include +#include + +struct v4l2_ctrl_handler; + +/** + * struct v4l2_device - main struct to for V4L2 device drivers + * + * @dev: pointer to struct device. + * @mdev: pointer to struct media_device, may be NULL. + * @subdevs: used to keep track of the registered subdevs + * @lock: lock this struct; can be used by the driver as well + * if this struct is embedded into a larger struct. + * @name: unique device name, by default the driver name + bus ID + * @notify: notify operation called by some sub-devices. + * @ctrl_handler: The control handler. May be %NULL. + * @prio: Device's priority state + * @ref: Keep track of the references to this struct. + * @release: Release function that is called when the ref count + * goes to 0. + * + * Each instance of a V4L2 device should create the v4l2_device struct, + * either stand-alone or embedded in a larger struct. + * + * It allows easy access to sub-devices (see v4l2-subdev.h) and provides + * basic V4L2 device-level support. + * + * .. note:: + * + * #) @dev->driver_data points to this struct. + * #) @dev might be %NULL if there is no parent device + */ +struct v4l2_device { + struct device *dev; + struct media_device *mdev; + struct list_head subdevs; + spinlock_t lock; + char name[36]; + void (*notify)(struct v4l2_subdev *sd, + unsigned int notification, void *arg); + struct v4l2_ctrl_handler *ctrl_handler; + struct v4l2_prio_state prio; + struct kref ref; + void (*release)(struct v4l2_device *v4l2_dev); +}; + +/** + * v4l2_device_get - gets a V4L2 device reference + * + * @v4l2_dev: pointer to struct &v4l2_device + * + * This is an ancillary routine meant to increment the usage for the + * struct &v4l2_device pointed by @v4l2_dev. + */ +static inline void v4l2_device_get(struct v4l2_device *v4l2_dev) +{ + kref_get(&v4l2_dev->ref); +} + +/** + * v4l2_device_put - puts a V4L2 device reference + * + * @v4l2_dev: pointer to struct &v4l2_device + * + * This is an ancillary routine meant to decrement the usage for the + * struct &v4l2_device pointed by @v4l2_dev. + */ +int v4l2_device_put(struct v4l2_device *v4l2_dev); + +/** + * v4l2_device_register - Initialize v4l2_dev and make @dev->driver_data + * point to @v4l2_dev. + * + * @dev: pointer to struct &device + * @v4l2_dev: pointer to struct &v4l2_device + * + * .. note:: + * @dev may be %NULL in rare cases (ISA devices). + * In such case the caller must fill in the @v4l2_dev->name field + * before calling this function. + */ +int __must_check v4l2_device_register(struct device *dev, + struct v4l2_device *v4l2_dev); + +/** + * v4l2_device_set_name - Optional function to initialize the + * name field of struct &v4l2_device + * + * @v4l2_dev: pointer to struct &v4l2_device + * @basename: base name for the device name + * @instance: pointer to a static atomic_t var with the instance usage for + * the device driver. + * + * v4l2_device_set_name() initializes the name field of struct &v4l2_device + * using the driver name and a driver-global atomic_t instance. + * + * This function will increment the instance counter and returns the + * instance value used in the name. + * + * Example: + * + * static atomic_t drv_instance = ATOMIC_INIT(0); + * + * ... + * + * instance = v4l2_device_set_name(&\ v4l2_dev, "foo", &\ drv_instance); + * + * The first time this is called the name field will be set to foo0 and + * this function returns 0. If the name ends with a digit (e.g. cx18), + * then the name will be set to cx18-0 since cx180 would look really odd. + */ +int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename, + atomic_t *instance); + +/** + * v4l2_device_disconnect - Change V4L2 device state to disconnected. + * + * @v4l2_dev: pointer to struct v4l2_device + * + * Should be called when the USB parent disconnects. + * Since the parent disappears, this ensures that @v4l2_dev doesn't have + * an invalid parent pointer. + * + * .. note:: This function sets @v4l2_dev->dev to NULL. + */ +void v4l2_device_disconnect(struct v4l2_device *v4l2_dev); + +/** + * v4l2_device_unregister - Unregister all sub-devices and any other + * resources related to @v4l2_dev. + * + * @v4l2_dev: pointer to struct v4l2_device + */ +void v4l2_device_unregister(struct v4l2_device *v4l2_dev); + +/** + * v4l2_device_register_subdev - Registers a subdev with a v4l2 device. + * + * @v4l2_dev: pointer to struct &v4l2_device + * @sd: pointer to &struct v4l2_subdev + * + * While registered, the subdev module is marked as in-use. + * + * An error is returned if the module is no longer loaded on any attempts + * to register it. + */ +#define v4l2_device_register_subdev(v4l2_dev, sd) \ + __v4l2_device_register_subdev(v4l2_dev, sd, THIS_MODULE) +int __must_check __v4l2_device_register_subdev(struct v4l2_device *v4l2_dev, + struct v4l2_subdev *sd, + struct module *module); + +/** + * v4l2_device_unregister_subdev - Unregisters a subdev with a v4l2 device. + * + * @sd: pointer to &struct v4l2_subdev + * + * .. note :: + * + * Can also be called if the subdev wasn't registered. In such + * case, it will do nothing. + */ +void v4l2_device_unregister_subdev(struct v4l2_subdev *sd); + +/** + * __v4l2_device_register_subdev_nodes - Registers device nodes for + * all subdevs of the v4l2 device that are marked with the + * %V4L2_SUBDEV_FL_HAS_DEVNODE flag. + * + * @v4l2_dev: pointer to struct v4l2_device + * @read_only: subdevices read-only flag. True to register the subdevices + * device nodes in read-only mode, false to allow full access to the + * subdevice userspace API. + */ +int __must_check +__v4l2_device_register_subdev_nodes(struct v4l2_device *v4l2_dev, + bool read_only); + +/** + * v4l2_device_register_subdev_nodes - Registers subdevices device nodes with + * unrestricted access to the subdevice userspace operations + * + * Internally calls __v4l2_device_register_subdev_nodes(). See its documentation + * for more details. + * + * @v4l2_dev: pointer to struct v4l2_device + */ +static inline int __must_check +v4l2_device_register_subdev_nodes(struct v4l2_device *v4l2_dev) +{ +#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) + return __v4l2_device_register_subdev_nodes(v4l2_dev, false); +#else + return 0; +#endif +} + +/** + * v4l2_device_register_ro_subdev_nodes - Registers subdevices device nodes + * in read-only mode + * + * Internally calls __v4l2_device_register_subdev_nodes(). See its documentation + * for more details. + * + * @v4l2_dev: pointer to struct v4l2_device + */ +static inline int __must_check +v4l2_device_register_ro_subdev_nodes(struct v4l2_device *v4l2_dev) +{ +#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) + return __v4l2_device_register_subdev_nodes(v4l2_dev, true); +#else + return 0; +#endif +} + +/** + * v4l2_subdev_notify - Sends a notification to v4l2_device. + * + * @sd: pointer to &struct v4l2_subdev + * @notification: type of notification. Please notice that the notification + * type is driver-specific. + * @arg: arguments for the notification. Those are specific to each + * notification type. + */ +static inline void v4l2_subdev_notify(struct v4l2_subdev *sd, + unsigned int notification, void *arg) +{ + if (sd && sd->v4l2_dev && sd->v4l2_dev->notify) + sd->v4l2_dev->notify(sd, notification, arg); +} + +/** + * v4l2_device_supports_requests - Test if requests are supported. + * + * @v4l2_dev: pointer to struct v4l2_device + */ +static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev) +{ + return v4l2_dev->mdev && v4l2_dev->mdev->ops && + v4l2_dev->mdev->ops->req_queue; +} + +/* Helper macros to iterate over all subdevs. */ + +/** + * v4l2_device_for_each_subdev - Helper macro that interates over all + * sub-devices of a given &v4l2_device. + * + * @sd: pointer that will be filled by the macro with all + * &struct v4l2_subdev pointer used as an iterator by the loop. + * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. + * + * This macro iterates over all sub-devices owned by the @v4l2_dev device. + * It acts as a for loop iterator and executes the next statement with + * the @sd variable pointing to each sub-device in turn. + */ +#define v4l2_device_for_each_subdev(sd, v4l2_dev) \ + list_for_each_entry(sd, &(v4l2_dev)->subdevs, list) + +/** + * __v4l2_device_call_subdevs_p - Calls the specified operation for + * all subdevs matching the condition. + * + * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. + * @sd: pointer that will be filled by the macro with all + * &struct v4l2_subdev pointer used as an iterator by the loop. + * @cond: condition to be match + * @o: name of the element at &struct v4l2_subdev_ops that contains @f. + * Each element there groups a set of operations functions. + * @f: operation function that will be called if @cond matches. + * The operation functions are defined in groups, according to + * each element at &struct v4l2_subdev_ops. + * @args: arguments for @f. + * + * Ignore any errors. + * + * Note: subdevs cannot be added or deleted while walking + * the subdevs list. + */ +#define __v4l2_device_call_subdevs_p(v4l2_dev, sd, cond, o, f, args...) \ + do { \ + list_for_each_entry((sd), &(v4l2_dev)->subdevs, list) \ + if ((cond) && (sd)->ops->o && (sd)->ops->o->f) \ + (sd)->ops->o->f((sd) , ##args); \ + } while (0) + +/** + * __v4l2_device_call_subdevs - Calls the specified operation for + * all subdevs matching the condition. + * + * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. + * @cond: condition to be match + * @o: name of the element at &struct v4l2_subdev_ops that contains @f. + * Each element there groups a set of operations functions. + * @f: operation function that will be called if @cond matches. + * The operation functions are defined in groups, according to + * each element at &struct v4l2_subdev_ops. + * @args: arguments for @f. + * + * Ignore any errors. + * + * Note: subdevs cannot be added or deleted while walking + * the subdevs list. + */ +#define __v4l2_device_call_subdevs(v4l2_dev, cond, o, f, args...) \ + do { \ + struct v4l2_subdev *__sd; \ + \ + __v4l2_device_call_subdevs_p(v4l2_dev, __sd, cond, o, \ + f , ##args); \ + } while (0) + +/** + * __v4l2_device_call_subdevs_until_err_p - Calls the specified operation for + * all subdevs matching the condition. + * + * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. + * @sd: pointer that will be filled by the macro with all + * &struct v4l2_subdev sub-devices associated with @v4l2_dev. + * @cond: condition to be match + * @o: name of the element at &struct v4l2_subdev_ops that contains @f. + * Each element there groups a set of operations functions. + * @f: operation function that will be called if @cond matches. + * The operation functions are defined in groups, according to + * each element at &struct v4l2_subdev_ops. + * @args: arguments for @f. + * + * Return: + * + * If the operation returns an error other than 0 or ``-ENOIOCTLCMD`` + * for any subdevice, then abort and return with that error code, zero + * otherwise. + * + * Note: subdevs cannot be added or deleted while walking + * the subdevs list. + */ +#define __v4l2_device_call_subdevs_until_err_p(v4l2_dev, sd, cond, o, f, args...) \ +({ \ + long __err = 0; \ + \ + list_for_each_entry((sd), &(v4l2_dev)->subdevs, list) { \ + if ((cond) && (sd)->ops->o && (sd)->ops->o->f) \ + __err = (sd)->ops->o->f((sd) , ##args); \ + if (__err && __err != -ENOIOCTLCMD) \ + break; \ + } \ + (__err == -ENOIOCTLCMD) ? 0 : __err; \ +}) + +/** + * __v4l2_device_call_subdevs_until_err - Calls the specified operation for + * all subdevs matching the condition. + * + * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. + * @cond: condition to be match + * @o: name of the element at &struct v4l2_subdev_ops that contains @f. + * Each element there groups a set of operations functions. + * @f: operation function that will be called if @cond matches. + * The operation functions are defined in groups, according to + * each element at &struct v4l2_subdev_ops. + * @args: arguments for @f. + * + * Return: + * + * If the operation returns an error other than 0 or ``-ENOIOCTLCMD`` + * for any subdevice, then abort and return with that error code, + * zero otherwise. + * + * Note: subdevs cannot be added or deleted while walking + * the subdevs list. + */ +#define __v4l2_device_call_subdevs_until_err(v4l2_dev, cond, o, f, args...) \ +({ \ + struct v4l2_subdev *__sd; \ + __v4l2_device_call_subdevs_until_err_p(v4l2_dev, __sd, cond, o, \ + f , ##args); \ +}) + +/** + * v4l2_device_call_all - Calls the specified operation for + * all subdevs matching the &v4l2_subdev.grp_id, as assigned + * by the bridge driver. + * + * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. + * @grpid: &struct v4l2_subdev->grp_id group ID to match. + * Use 0 to match them all. + * @o: name of the element at &struct v4l2_subdev_ops that contains @f. + * Each element there groups a set of operations functions. + * @f: operation function that will be called if @cond matches. + * The operation functions are defined in groups, according to + * each element at &struct v4l2_subdev_ops. + * @args: arguments for @f. + * + * Ignore any errors. + * + * Note: subdevs cannot be added or deleted while walking + * the subdevs list. + */ +#define v4l2_device_call_all(v4l2_dev, grpid, o, f, args...) \ + do { \ + struct v4l2_subdev *__sd; \ + \ + __v4l2_device_call_subdevs_p(v4l2_dev, __sd, \ + (grpid) == 0 || __sd->grp_id == (grpid), o, f , \ + ##args); \ + } while (0) + +/** + * v4l2_device_call_until_err - Calls the specified operation for + * all subdevs matching the &v4l2_subdev.grp_id, as assigned + * by the bridge driver, until an error occurs. + * + * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. + * @grpid: &struct v4l2_subdev->grp_id group ID to match. + * Use 0 to match them all. + * @o: name of the element at &struct v4l2_subdev_ops that contains @f. + * Each element there groups a set of operations functions. + * @f: operation function that will be called if @cond matches. + * The operation functions are defined in groups, according to + * each element at &struct v4l2_subdev_ops. + * @args: arguments for @f. + * + * Return: + * + * If the operation returns an error other than 0 or ``-ENOIOCTLCMD`` + * for any subdevice, then abort and return with that error code, + * zero otherwise. + * + * Note: subdevs cannot be added or deleted while walking + * the subdevs list. + */ +#define v4l2_device_call_until_err(v4l2_dev, grpid, o, f, args...) \ +({ \ + struct v4l2_subdev *__sd; \ + __v4l2_device_call_subdevs_until_err_p(v4l2_dev, __sd, \ + (grpid) == 0 || __sd->grp_id == (grpid), o, f , \ + ##args); \ +}) + +/** + * v4l2_device_mask_call_all - Calls the specified operation for + * all subdevices where a group ID matches a specified bitmask. + * + * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. + * @grpmsk: bitmask to be checked against &struct v4l2_subdev->grp_id + * group ID to be matched. Use 0 to match them all. + * @o: name of the element at &struct v4l2_subdev_ops that contains @f. + * Each element there groups a set of operations functions. + * @f: operation function that will be called if @cond matches. + * The operation functions are defined in groups, according to + * each element at &struct v4l2_subdev_ops. + * @args: arguments for @f. + * + * Ignore any errors. + * + * Note: subdevs cannot be added or deleted while walking + * the subdevs list. + */ +#define v4l2_device_mask_call_all(v4l2_dev, grpmsk, o, f, args...) \ + do { \ + struct v4l2_subdev *__sd; \ + \ + __v4l2_device_call_subdevs_p(v4l2_dev, __sd, \ + (grpmsk) == 0 || (__sd->grp_id & (grpmsk)), o, \ + f , ##args); \ + } while (0) + +/** + * v4l2_device_mask_call_until_err - Calls the specified operation for + * all subdevices where a group ID matches a specified bitmask. + * + * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. + * @grpmsk: bitmask to be checked against &struct v4l2_subdev->grp_id + * group ID to be matched. Use 0 to match them all. + * @o: name of the element at &struct v4l2_subdev_ops that contains @f. + * Each element there groups a set of operations functions. + * @f: operation function that will be called if @cond matches. + * The operation functions are defined in groups, according to + * each element at &struct v4l2_subdev_ops. + * @args: arguments for @f. + * + * Return: + * + * If the operation returns an error other than 0 or ``-ENOIOCTLCMD`` + * for any subdevice, then abort and return with that error code, + * zero otherwise. + * + * Note: subdevs cannot be added or deleted while walking + * the subdevs list. + */ +#define v4l2_device_mask_call_until_err(v4l2_dev, grpmsk, o, f, args...) \ +({ \ + struct v4l2_subdev *__sd; \ + __v4l2_device_call_subdevs_until_err_p(v4l2_dev, __sd, \ + (grpmsk) == 0 || (__sd->grp_id & (grpmsk)), o, \ + f , ##args); \ +}) + + +/** + * v4l2_device_has_op - checks if any subdev with matching grpid has a + * given ops. + * + * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. + * @grpid: &struct v4l2_subdev->grp_id group ID to match. + * Use 0 to match them all. + * @o: name of the element at &struct v4l2_subdev_ops that contains @f. + * Each element there groups a set of operations functions. + * @f: operation function that will be called if @cond matches. + * The operation functions are defined in groups, according to + * each element at &struct v4l2_subdev_ops. + */ +#define v4l2_device_has_op(v4l2_dev, grpid, o, f) \ +({ \ + struct v4l2_subdev *__sd; \ + bool __result = false; \ + list_for_each_entry(__sd, &(v4l2_dev)->subdevs, list) { \ + if ((grpid) && __sd->grp_id != (grpid)) \ + continue; \ + if (v4l2_subdev_has_op(__sd, o, f)) { \ + __result = true; \ + break; \ + } \ + } \ + __result; \ +}) + +/** + * v4l2_device_mask_has_op - checks if any subdev with matching group + * mask has a given ops. + * + * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. + * @grpmsk: bitmask to be checked against &struct v4l2_subdev->grp_id + * group ID to be matched. Use 0 to match them all. + * @o: name of the element at &struct v4l2_subdev_ops that contains @f. + * Each element there groups a set of operations functions. + * @f: operation function that will be called if @cond matches. + * The operation functions are defined in groups, according to + * each element at &struct v4l2_subdev_ops. + */ +#define v4l2_device_mask_has_op(v4l2_dev, grpmsk, o, f) \ +({ \ + struct v4l2_subdev *__sd; \ + bool __result = false; \ + list_for_each_entry(__sd, &(v4l2_dev)->subdevs, list) { \ + if ((grpmsk) && !(__sd->grp_id & (grpmsk))) \ + continue; \ + if (v4l2_subdev_has_op(__sd, o, f)) { \ + __result = true; \ + break; \ + } \ + } \ + __result; \ +}) + +#endif diff --git a/6.12.0/include-overrides/media/v4l2-dv-timings.h b/6.12.0/include-overrides/media/v4l2-dv-timings.h new file mode 100644 index 0000000..c64096b --- /dev/null +++ b/6.12.0/include-overrides/media/v4l2-dv-timings.h @@ -0,0 +1,260 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * v4l2-dv-timings - Internal header with dv-timings helper functions + * + * Copyright 2013 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + */ + +#ifndef __V4L2_DV_TIMINGS_H +#define __V4L2_DV_TIMINGS_H + +#include + +/** + * v4l2_calc_timeperframe - helper function to calculate timeperframe based + * v4l2_dv_timings fields. + * @t: Timings for the video mode. + * + * Calculates the expected timeperframe using the pixel clock value and + * horizontal/vertical measures. This means that v4l2_dv_timings structure + * must be correctly and fully filled. + */ +struct v4l2_fract v4l2_calc_timeperframe(const struct v4l2_dv_timings *t); + +/* + * v4l2_dv_timings_presets: list of all dv_timings presets. + */ +extern const struct v4l2_dv_timings v4l2_dv_timings_presets[]; + +/** + * typedef v4l2_check_dv_timings_fnc - timings check callback + * + * @t: the v4l2_dv_timings struct. + * @handle: a handle from the driver. + * + * Returns true if the given timings are valid. + */ +typedef bool v4l2_check_dv_timings_fnc(const struct v4l2_dv_timings *t, void *handle); + +/** + * v4l2_valid_dv_timings() - are these timings valid? + * + * @t: the v4l2_dv_timings struct. + * @cap: the v4l2_dv_timings_cap capabilities. + * @fnc: callback to check if this timing is OK. May be NULL. + * @fnc_handle: a handle that is passed on to @fnc. + * + * Returns true if the given dv_timings struct is supported by the + * hardware capabilities and the callback function (if non-NULL), returns + * false otherwise. + */ +bool v4l2_valid_dv_timings(const struct v4l2_dv_timings *t, + const struct v4l2_dv_timings_cap *cap, + v4l2_check_dv_timings_fnc fnc, + void *fnc_handle); + +/** + * v4l2_enum_dv_timings_cap() - Helper function to enumerate possible DV + * timings based on capabilities + * + * @t: the v4l2_enum_dv_timings struct. + * @cap: the v4l2_dv_timings_cap capabilities. + * @fnc: callback to check if this timing is OK. May be NULL. + * @fnc_handle: a handle that is passed on to @fnc. + * + * This enumerates dv_timings using the full list of possible CEA-861 and DMT + * timings, filtering out any timings that are not supported based on the + * hardware capabilities and the callback function (if non-NULL). + * + * If a valid timing for the given index is found, it will fill in @t and + * return 0, otherwise it returns -EINVAL. + */ +int v4l2_enum_dv_timings_cap(struct v4l2_enum_dv_timings *t, + const struct v4l2_dv_timings_cap *cap, + v4l2_check_dv_timings_fnc fnc, + void *fnc_handle); + +/** + * v4l2_find_dv_timings_cap() - Find the closest timings struct + * + * @t: the v4l2_enum_dv_timings struct. + * @cap: the v4l2_dv_timings_cap capabilities. + * @pclock_delta: maximum delta between t->pixelclock and the timing struct + * under consideration. + * @fnc: callback to check if a given timings struct is OK. May be NULL. + * @fnc_handle: a handle that is passed on to @fnc. + * + * This function tries to map the given timings to an entry in the + * full list of possible CEA-861 and DMT timings, filtering out any timings + * that are not supported based on the hardware capabilities and the callback + * function (if non-NULL). + * + * On success it will fill in @t with the found timings and it returns true. + * On failure it will return false. + */ +bool v4l2_find_dv_timings_cap(struct v4l2_dv_timings *t, + const struct v4l2_dv_timings_cap *cap, + unsigned pclock_delta, + v4l2_check_dv_timings_fnc fnc, + void *fnc_handle); + +/** + * v4l2_find_dv_timings_cea861_vic() - find timings based on CEA-861 VIC + * @t: the timings data. + * @vic: CEA-861 VIC code + * + * On success it will fill in @t with the found timings and it returns true. + * On failure it will return false. + */ +bool v4l2_find_dv_timings_cea861_vic(struct v4l2_dv_timings *t, u8 vic); + +/** + * v4l2_match_dv_timings() - do two timings match? + * + * @measured: the measured timings data. + * @standard: the timings according to the standard. + * @pclock_delta: maximum delta in Hz between standard->pixelclock and + * the measured timings. + * @match_reduced_fps: if true, then fail if V4L2_DV_FL_REDUCED_FPS does not + * match. + * + * Returns true if the two timings match, returns false otherwise. + */ +bool v4l2_match_dv_timings(const struct v4l2_dv_timings *measured, + const struct v4l2_dv_timings *standard, + unsigned pclock_delta, bool match_reduced_fps); + +/** + * v4l2_print_dv_timings() - log the contents of a dv_timings struct + * @dev_prefix:device prefix for each log line. + * @prefix: additional prefix for each log line, may be NULL. + * @t: the timings data. + * @detailed: if true, give a detailed log. + */ +void v4l2_print_dv_timings(const char *dev_prefix, const char *prefix, + const struct v4l2_dv_timings *t, bool detailed); + +/** + * v4l2_detect_cvt - detect if the given timings follow the CVT standard + * + * @frame_height: the total height of the frame (including blanking) in lines. + * @hfreq: the horizontal frequency in Hz. + * @vsync: the height of the vertical sync in lines. + * @active_width: active width of image (does not include blanking). This + * information is needed only in case of version 2 of reduced blanking. + * In other cases, this parameter does not have any effect on timings. + * @polarities: the horizontal and vertical polarities (same as struct + * v4l2_bt_timings polarities). + * @interlaced: if this flag is true, it indicates interlaced format + * @cap: the v4l2_dv_timings_cap capabilities. + * @fmt: the resulting timings. + * + * This function will attempt to detect if the given values correspond to a + * valid CVT format. If so, then it will return true, and fmt will be filled + * in with the found CVT timings. + */ +bool v4l2_detect_cvt(unsigned int frame_height, unsigned int hfreq, + unsigned int vsync, unsigned int active_width, + u32 polarities, bool interlaced, + const struct v4l2_dv_timings_cap *cap, + struct v4l2_dv_timings *fmt); + +/** + * v4l2_detect_gtf - detect if the given timings follow the GTF standard + * + * @frame_height: the total height of the frame (including blanking) in lines. + * @hfreq: the horizontal frequency in Hz. + * @vsync: the height of the vertical sync in lines. + * @polarities: the horizontal and vertical polarities (same as struct + * v4l2_bt_timings polarities). + * @interlaced: if this flag is true, it indicates interlaced format + * @aspect: preferred aspect ratio. GTF has no method of determining the + * aspect ratio in order to derive the image width from the + * image height, so it has to be passed explicitly. Usually + * the native screen aspect ratio is used for this. If it + * is not filled in correctly, then 16:9 will be assumed. + * @cap: the v4l2_dv_timings_cap capabilities. + * @fmt: the resulting timings. + * + * This function will attempt to detect if the given values correspond to a + * valid GTF format. If so, then it will return true, and fmt will be filled + * in with the found GTF timings. + */ +bool v4l2_detect_gtf(unsigned int frame_height, unsigned int hfreq, + unsigned int vsync, u32 polarities, bool interlaced, + struct v4l2_fract aspect, + const struct v4l2_dv_timings_cap *cap, + struct v4l2_dv_timings *fmt); + +/** + * v4l2_calc_aspect_ratio - calculate the aspect ratio based on bytes + * 0x15 and 0x16 from the EDID. + * + * @hor_landscape: byte 0x15 from the EDID. + * @vert_portrait: byte 0x16 from the EDID. + * + * Determines the aspect ratio from the EDID. + * See VESA Enhanced EDID standard, release A, rev 2, section 3.6.2: + * "Horizontal and Vertical Screen Size or Aspect Ratio" + */ +struct v4l2_fract v4l2_calc_aspect_ratio(u8 hor_landscape, u8 vert_portrait); + +/** + * v4l2_dv_timings_aspect_ratio - calculate the aspect ratio based on the + * v4l2_dv_timings information. + * + * @t: the timings data. + */ +struct v4l2_fract v4l2_dv_timings_aspect_ratio(const struct v4l2_dv_timings *t); + +/** + * can_reduce_fps - check if conditions for reduced fps are true. + * @bt: v4l2 timing structure + * + * For different timings reduced fps is allowed if the following conditions + * are met: + * + * - For CVT timings: if reduced blanking v2 (vsync == 8) is true. + * - For CEA861 timings: if %V4L2_DV_FL_CAN_REDUCE_FPS flag is true. + */ +static inline bool can_reduce_fps(struct v4l2_bt_timings *bt) +{ + if ((bt->standards & V4L2_DV_BT_STD_CVT) && (bt->vsync == 8)) + return true; + + if ((bt->standards & V4L2_DV_BT_STD_CEA861) && + (bt->flags & V4L2_DV_FL_CAN_REDUCE_FPS)) + return true; + + return false; +} + +/** + * struct v4l2_hdmi_colorimetry - describes the HDMI colorimetry information + * @colorspace: enum v4l2_colorspace, the colorspace + * @ycbcr_enc: enum v4l2_ycbcr_encoding, Y'CbCr encoding + * @quantization: enum v4l2_quantization, colorspace quantization + * @xfer_func: enum v4l2_xfer_func, colorspace transfer function + */ +struct v4l2_hdmi_colorimetry { + enum v4l2_colorspace colorspace; + enum v4l2_ycbcr_encoding ycbcr_enc; + enum v4l2_quantization quantization; + enum v4l2_xfer_func xfer_func; +}; + +struct hdmi_avi_infoframe; +struct hdmi_vendor_infoframe; + +struct v4l2_hdmi_colorimetry +v4l2_hdmi_rx_colorimetry(const struct hdmi_avi_infoframe *avi, + const struct hdmi_vendor_infoframe *hdmi, + unsigned int height); + +u16 v4l2_get_edid_phys_addr(const u8 *edid, unsigned int size, + unsigned int *offset); +void v4l2_set_edid_phys_addr(u8 *edid, unsigned int size, u16 phys_addr); +u16 v4l2_phys_addr_for_input(u16 phys_addr, u8 input); +int v4l2_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port); + +#endif diff --git a/6.12.0/include-overrides/media/v4l2-event.h b/6.12.0/include-overrides/media/v4l2-event.h new file mode 100644 index 0000000..3a0e258 --- /dev/null +++ b/6.12.0/include-overrides/media/v4l2-event.h @@ -0,0 +1,208 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * v4l2-event.h + * + * V4L2 events. + * + * Copyright (C) 2009--2010 Nokia Corporation. + * + * Contact: Sakari Ailus + */ + +#ifndef V4L2_EVENT_H +#define V4L2_EVENT_H + +#include +#include +#include + +struct v4l2_fh; +struct v4l2_subdev; +struct v4l2_subscribed_event; +struct video_device; + +/** + * struct v4l2_kevent - Internal kernel event struct. + * @list: List node for the v4l2_fh->available list. + * @sev: Pointer to parent v4l2_subscribed_event. + * @event: The event itself. + * @ts: The timestamp of the event. + */ +struct v4l2_kevent { + struct list_head list; + struct v4l2_subscribed_event *sev; + struct v4l2_event event; + u64 ts; +}; + +/** + * struct v4l2_subscribed_event_ops - Subscribed event operations. + * + * @add: Optional callback, called when a new listener is added + * @del: Optional callback, called when a listener stops listening + * @replace: Optional callback that can replace event 'old' with event 'new'. + * @merge: Optional callback that can merge event 'old' into event 'new'. + */ +struct v4l2_subscribed_event_ops { + int (*add)(struct v4l2_subscribed_event *sev, unsigned int elems); + void (*del)(struct v4l2_subscribed_event *sev); + void (*replace)(struct v4l2_event *old, const struct v4l2_event *new); + void (*merge)(const struct v4l2_event *old, struct v4l2_event *new); +}; + +/** + * struct v4l2_subscribed_event - Internal struct representing a subscribed + * event. + * + * @list: List node for the v4l2_fh->subscribed list. + * @type: Event type. + * @id: Associated object ID (e.g. control ID). 0 if there isn't any. + * @flags: Copy of v4l2_event_subscription->flags. + * @fh: Filehandle that subscribed to this event. + * @node: List node that hooks into the object's event list + * (if there is one). + * @ops: v4l2_subscribed_event_ops + * @elems: The number of elements in the events array. + * @first: The index of the events containing the oldest available event. + * @in_use: The number of queued events. + * @events: An array of @elems events. + */ +struct v4l2_subscribed_event { + struct list_head list; + u32 type; + u32 id; + u32 flags; + struct v4l2_fh *fh; + struct list_head node; + const struct v4l2_subscribed_event_ops *ops; + unsigned int elems; + unsigned int first; + unsigned int in_use; + struct v4l2_kevent events[] __counted_by(elems); +}; + +/** + * v4l2_event_dequeue - Dequeue events from video device. + * + * @fh: pointer to struct v4l2_fh + * @event: pointer to struct v4l2_event + * @nonblocking: if not zero, waits for an event to arrive + */ +int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event, + int nonblocking); + +/** + * v4l2_event_queue - Queue events to video device. + * + * @vdev: pointer to &struct video_device + * @ev: pointer to &struct v4l2_event + * + * The event will be queued for all &struct v4l2_fh file handlers. + * + * .. note:: + * The driver's only responsibility is to fill in the type and the data + * fields. The other fields will be filled in by V4L2. + */ +void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev); + +/** + * v4l2_event_queue_fh - Queue events to video device. + * + * @fh: pointer to &struct v4l2_fh + * @ev: pointer to &struct v4l2_event + * + * + * The event will be queued only for the specified &struct v4l2_fh file handler. + * + * .. note:: + * The driver's only responsibility is to fill in the type and the data + * fields. The other fields will be filled in by V4L2. + */ +void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev); + +/** + * v4l2_event_wake_all - Wake all filehandles. + * + * Used when unregistering a video device. + * + * @vdev: pointer to &struct video_device + */ +void v4l2_event_wake_all(struct video_device *vdev); + +/** + * v4l2_event_pending - Check if an event is available + * + * @fh: pointer to &struct v4l2_fh + * + * Returns the number of pending events. + */ +int v4l2_event_pending(struct v4l2_fh *fh); + +/** + * v4l2_event_subscribe - Subscribes to an event + * + * @fh: pointer to &struct v4l2_fh + * @sub: pointer to &struct v4l2_event_subscription + * @elems: size of the events queue + * @ops: pointer to &v4l2_subscribed_event_ops + * + * .. note:: + * + * if @elems is zero, the framework will fill in a default value, + * with is currently 1 element. + */ +int v4l2_event_subscribe(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub, + unsigned int elems, + const struct v4l2_subscribed_event_ops *ops); +/** + * v4l2_event_unsubscribe - Unsubscribes to an event + * + * @fh: pointer to &struct v4l2_fh + * @sub: pointer to &struct v4l2_event_subscription + */ +int v4l2_event_unsubscribe(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub); +/** + * v4l2_event_unsubscribe_all - Unsubscribes to all events + * + * @fh: pointer to &struct v4l2_fh + */ +void v4l2_event_unsubscribe_all(struct v4l2_fh *fh); + +/** + * v4l2_event_subdev_unsubscribe - Subdev variant of v4l2_event_unsubscribe() + * + * @sd: pointer to &struct v4l2_subdev + * @fh: pointer to &struct v4l2_fh + * @sub: pointer to &struct v4l2_event_subscription + * + * .. note:: + * + * This function should be used for the &struct v4l2_subdev_core_ops + * %unsubscribe_event field. + */ +int v4l2_event_subdev_unsubscribe(struct v4l2_subdev *sd, + struct v4l2_fh *fh, + struct v4l2_event_subscription *sub); +/** + * v4l2_src_change_event_subscribe - helper function that calls + * v4l2_event_subscribe() if the event is %V4L2_EVENT_SOURCE_CHANGE. + * + * @fh: pointer to struct v4l2_fh + * @sub: pointer to &struct v4l2_event_subscription + */ +int v4l2_src_change_event_subscribe(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub); +/** + * v4l2_src_change_event_subdev_subscribe - Variant of v4l2_event_subscribe(), + * meant to subscribe only events of the type %V4L2_EVENT_SOURCE_CHANGE. + * + * @sd: pointer to &struct v4l2_subdev + * @fh: pointer to &struct v4l2_fh + * @sub: pointer to &struct v4l2_event_subscription + */ +int v4l2_src_change_event_subdev_subscribe(struct v4l2_subdev *sd, + struct v4l2_fh *fh, + struct v4l2_event_subscription *sub); +#endif /* V4L2_EVENT_H */ diff --git a/6.12.0/include-overrides/media/v4l2-fh.h b/6.12.0/include-overrides/media/v4l2-fh.h new file mode 100644 index 0000000..b5b3e00 --- /dev/null +++ b/6.12.0/include-overrides/media/v4l2-fh.h @@ -0,0 +1,161 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * v4l2-fh.h + * + * V4L2 file handle. Store per file handle data for the V4L2 + * framework. Using file handles is optional for the drivers. + * + * Copyright (C) 2009--2010 Nokia Corporation. + * + * Contact: Sakari Ailus + */ + +#ifndef V4L2_FH_H +#define V4L2_FH_H + +#include +#include +#include +#include + +struct video_device; +struct v4l2_ctrl_handler; + +/** + * struct v4l2_fh - Describes a V4L2 file handler + * + * @list: list of file handlers + * @vdev: pointer to &struct video_device + * @ctrl_handler: pointer to &struct v4l2_ctrl_handler + * @prio: priority of the file handler, as defined by &enum v4l2_priority + * + * @wait: event' s wait queue + * @subscribe_lock: serialise changes to the subscribed list; guarantee that + * the add and del event callbacks are orderly called + * @subscribed: list of subscribed events + * @available: list of events waiting to be dequeued + * @navailable: number of available events at @available list + * @sequence: event sequence number + * + * @m2m_ctx: pointer to &struct v4l2_m2m_ctx + */ +struct v4l2_fh { + struct list_head list; + struct video_device *vdev; + struct v4l2_ctrl_handler *ctrl_handler; + enum v4l2_priority prio; + + /* Events */ + wait_queue_head_t wait; + struct mutex subscribe_lock; + struct list_head subscribed; + struct list_head available; + unsigned int navailable; + u32 sequence; + + struct v4l2_m2m_ctx *m2m_ctx; +}; + +/** + * v4l2_fh_init - Initialise the file handle. + * + * @fh: pointer to &struct v4l2_fh + * @vdev: pointer to &struct video_device + * + * Parts of the V4L2 framework using the + * file handles should be initialised in this function. Must be called + * from driver's v4l2_file_operations->open\(\) handler if the driver + * uses &struct v4l2_fh. + */ +void v4l2_fh_init(struct v4l2_fh *fh, struct video_device *vdev); + +/** + * v4l2_fh_add - Add the fh to the list of file handles on a video_device. + * + * @fh: pointer to &struct v4l2_fh + * + * .. note:: + * The @fh file handle must be initialised first. + */ +void v4l2_fh_add(struct v4l2_fh *fh); + +/** + * v4l2_fh_open - Ancillary routine that can be used as the open\(\) op + * of v4l2_file_operations. + * + * @filp: pointer to struct file + * + * It allocates a v4l2_fh and inits and adds it to the &struct video_device + * associated with the file pointer. + */ +int v4l2_fh_open(struct file *filp); + +/** + * v4l2_fh_del - Remove file handle from the list of file handles. + * + * @fh: pointer to &struct v4l2_fh + * + * On error filp->private_data will be %NULL, otherwise it will point to + * the &struct v4l2_fh. + * + * .. note:: + * Must be called in v4l2_file_operations->release\(\) handler if the driver + * uses &struct v4l2_fh. + */ +void v4l2_fh_del(struct v4l2_fh *fh); + +/** + * v4l2_fh_exit - Release resources related to a file handle. + * + * @fh: pointer to &struct v4l2_fh + * + * Parts of the V4L2 framework using the v4l2_fh must release their + * resources here, too. + * + * .. note:: + * Must be called in v4l2_file_operations->release\(\) handler if the + * driver uses &struct v4l2_fh. + */ +void v4l2_fh_exit(struct v4l2_fh *fh); + +/** + * v4l2_fh_release - Ancillary routine that can be used as the release\(\) op + * of v4l2_file_operations. + * + * @filp: pointer to struct file + * + * It deletes and exits the v4l2_fh associated with the file pointer and + * frees it. It will do nothing if filp->private_data (the pointer to the + * v4l2_fh struct) is %NULL. + * + * This function always returns 0. + */ +int v4l2_fh_release(struct file *filp); + +/** + * v4l2_fh_is_singular - Returns 1 if this filehandle is the only filehandle + * opened for the associated video_device. + * + * @fh: pointer to &struct v4l2_fh + * + * If @fh is NULL, then it returns 0. + */ +int v4l2_fh_is_singular(struct v4l2_fh *fh); + +/** + * v4l2_fh_is_singular_file - Returns 1 if this filehandle is the only + * filehandle opened for the associated video_device. + * + * @filp: pointer to struct file + * + * This is a helper function variant of v4l2_fh_is_singular() with uses + * struct file as argument. + * + * If filp->private_data is %NULL, then it will return 0. + */ +static inline int v4l2_fh_is_singular_file(struct file *filp) +{ + return v4l2_fh_is_singular(filp->private_data); +} + +#endif /* V4L2_EVENT_H */ diff --git a/6.12.0/include-overrides/media/v4l2-flash-led-class.h b/6.12.0/include-overrides/media/v4l2-flash-led-class.h new file mode 100644 index 0000000..b106e7a --- /dev/null +++ b/6.12.0/include-overrides/media/v4l2-flash-led-class.h @@ -0,0 +1,186 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * V4L2 flash LED sub-device registration helpers. + * + * Copyright (C) 2015 Samsung Electronics Co., Ltd + * Author: Jacek Anaszewski + */ + +#ifndef _V4L2_FLASH_H +#define _V4L2_FLASH_H + +#include +#include + +struct led_classdev_flash; +struct led_classdev; +struct v4l2_flash; +enum led_brightness; + +/** + * struct v4l2_flash_ctrl_data - flash control initialization data, filled + * basing on the features declared by the LED flash + * class driver in the v4l2_flash_config + * @config: initialization data for a control + * @cid: contains v4l2 flash control id if the config + * field was initialized, 0 otherwise + */ +struct v4l2_flash_ctrl_data { + struct v4l2_ctrl_config config; + u32 cid; +}; + +/** + * struct v4l2_flash_ops - V4L2 flash operations + * + * @external_strobe_set: Setup strobing the flash by hardware pin state + * assertion. + * @intensity_to_led_brightness: Convert intensity to brightness in a device + * specific manner + * @led_brightness_to_intensity: convert brightness to intensity in a device + * specific manner. + */ +struct v4l2_flash_ops { + int (*external_strobe_set)(struct v4l2_flash *v4l2_flash, + bool enable); + enum led_brightness (*intensity_to_led_brightness) + (struct v4l2_flash *v4l2_flash, s32 intensity); + s32 (*led_brightness_to_intensity) + (struct v4l2_flash *v4l2_flash, enum led_brightness); +}; + +/** + * struct v4l2_flash_config - V4L2 Flash sub-device initialization data + * @dev_name: the name of the media entity, + * unique in the system + * @intensity: non-flash strobe constraints for the LED + * @flash_faults: bitmask of flash faults that the LED flash class + * device can report; corresponding LED_FAULT* bit + * definitions are available in the header file + * + * @has_external_strobe: external strobe capability + */ +struct v4l2_flash_config { + char dev_name[32]; + struct led_flash_setting intensity; + u32 flash_faults; + unsigned int has_external_strobe:1; +}; + +/** + * struct v4l2_flash - Flash sub-device context + * @fled_cdev: LED flash class device controlled by this sub-device + * @iled_cdev: LED class device representing indicator LED associated + * with the LED flash class device + * @ops: V4L2 specific flash ops + * @sd: V4L2 sub-device + * @hdl: flash controls handler + * @ctrls: array of pointers to controls, whose values define + * the sub-device state + */ +struct v4l2_flash { + struct led_classdev_flash *fled_cdev; + struct led_classdev *iled_cdev; + const struct v4l2_flash_ops *ops; + + struct v4l2_subdev sd; + struct v4l2_ctrl_handler hdl; + struct v4l2_ctrl **ctrls; +}; + +/** + * v4l2_subdev_to_v4l2_flash - Returns a &struct v4l2_flash from the + * &struct v4l2_subdev embedded on it. + * + * @sd: pointer to &struct v4l2_subdev + */ +static inline struct v4l2_flash *v4l2_subdev_to_v4l2_flash( + struct v4l2_subdev *sd) +{ + return container_of(sd, struct v4l2_flash, sd); +} + +/** + * v4l2_ctrl_to_v4l2_flash - Returns a &struct v4l2_flash from the + * &struct v4l2_ctrl embedded on it. + * + * @c: pointer to &struct v4l2_ctrl + */ +static inline struct v4l2_flash *v4l2_ctrl_to_v4l2_flash(struct v4l2_ctrl *c) +{ + return container_of(c->handler, struct v4l2_flash, hdl); +} + +#if IS_ENABLED(CONFIG_V4L2_FLASH_LED_CLASS) +/** + * v4l2_flash_init - initialize V4L2 flash led sub-device + * @dev: flash device, e.g. an I2C device + * @fwn: fwnode_handle of the LED, may be NULL if the same as device's + * @fled_cdev: LED flash class device to wrap + * @ops: V4L2 Flash device ops + * @config: initialization data for V4L2 Flash sub-device + * + * Create V4L2 Flash sub-device wrapping given LED subsystem device. + * The ops pointer is stored by the V4L2 flash framework. No + * references are held to config nor its contents once this function + * has returned. + * + * Returns: A valid pointer, or, when an error occurs, the return + * value is encoded using ERR_PTR(). Use IS_ERR() to check and + * PTR_ERR() to obtain the numeric return value. + */ +struct v4l2_flash *v4l2_flash_init( + struct device *dev, struct fwnode_handle *fwn, + struct led_classdev_flash *fled_cdev, + const struct v4l2_flash_ops *ops, struct v4l2_flash_config *config); + +/** + * v4l2_flash_indicator_init - initialize V4L2 indicator sub-device + * @dev: flash device, e.g. an I2C device + * @fwn: fwnode_handle of the LED, may be NULL if the same as device's + * @iled_cdev: LED flash class device representing the indicator LED + * @config: initialization data for V4L2 Flash sub-device + * + * Create V4L2 Flash sub-device wrapping given LED subsystem device. + * The ops pointer is stored by the V4L2 flash framework. No + * references are held to config nor its contents once this function + * has returned. + * + * Returns: A valid pointer, or, when an error occurs, the return + * value is encoded using ERR_PTR(). Use IS_ERR() to check and + * PTR_ERR() to obtain the numeric return value. + */ +struct v4l2_flash *v4l2_flash_indicator_init( + struct device *dev, struct fwnode_handle *fwn, + struct led_classdev *iled_cdev, struct v4l2_flash_config *config); + +/** + * v4l2_flash_release - release V4L2 Flash sub-device + * @v4l2_flash: the V4L2 Flash sub-device to release + * + * Release V4L2 Flash sub-device. + */ +void v4l2_flash_release(struct v4l2_flash *v4l2_flash); + +#else +static inline struct v4l2_flash *v4l2_flash_init( + struct device *dev, struct fwnode_handle *fwn, + struct led_classdev_flash *fled_cdev, + const struct v4l2_flash_ops *ops, struct v4l2_flash_config *config) +{ + return NULL; +} + +static inline struct v4l2_flash *v4l2_flash_indicator_init( + struct device *dev, struct fwnode_handle *fwn, + struct led_classdev *iled_cdev, struct v4l2_flash_config *config) +{ + return NULL; +} + +static inline void v4l2_flash_release(struct v4l2_flash *v4l2_flash) +{ +} +#endif /* CONFIG_V4L2_FLASH_LED_CLASS */ + +#endif /* _V4L2_FLASH_H */ diff --git a/6.12.0/include-overrides/media/v4l2-fwnode.h b/6.12.0/include-overrides/media/v4l2-fwnode.h new file mode 100644 index 0000000..f7c57c7 --- /dev/null +++ b/6.12.0/include-overrides/media/v4l2-fwnode.h @@ -0,0 +1,414 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * V4L2 fwnode binding parsing library + * + * Copyright (c) 2016 Intel Corporation. + * Author: Sakari Ailus + * + * Copyright (C) 2012 - 2013 Samsung Electronics Co., Ltd. + * Author: Sylwester Nawrocki + * + * Copyright (C) 2012 Renesas Electronics Corp. + * Author: Guennadi Liakhovetski + */ +#ifndef _V4L2_FWNODE_H +#define _V4L2_FWNODE_H + +#include +#include +#include +#include + +#include + +/** + * struct v4l2_fwnode_endpoint - the endpoint data structure + * @base: fwnode endpoint of the v4l2_fwnode + * @bus_type: bus type + * @bus: bus configuration data structure + * @bus.parallel: embedded &struct v4l2_mbus_config_parallel. + * Used if the bus is parallel. + * @bus.mipi_csi1: embedded &struct v4l2_mbus_config_mipi_csi1. + * Used if the bus is MIPI Alliance's Camera Serial + * Interface version 1 (MIPI CSI1) or Standard + * Mobile Imaging Architecture's Compact Camera Port 2 + * (SMIA CCP2). + * @bus.mipi_csi2: embedded &struct v4l2_mbus_config_mipi_csi2. + * Used if the bus is MIPI Alliance's Camera Serial + * Interface version 2 (MIPI CSI2). + * @link_frequencies: array of supported link frequencies + * @nr_of_link_frequencies: number of elements in link_frequenccies array + */ +struct v4l2_fwnode_endpoint { + struct fwnode_endpoint base; + enum v4l2_mbus_type bus_type; + struct { + struct v4l2_mbus_config_parallel parallel; + struct v4l2_mbus_config_mipi_csi1 mipi_csi1; + struct v4l2_mbus_config_mipi_csi2 mipi_csi2; + } bus; + u64 *link_frequencies; + unsigned int nr_of_link_frequencies; +}; + +/** + * V4L2_FWNODE_PROPERTY_UNSET - identify a non initialized property + * + * All properties in &struct v4l2_fwnode_device_properties are initialized + * to this value. + */ +#define V4L2_FWNODE_PROPERTY_UNSET (-1U) + +/** + * enum v4l2_fwnode_orientation - possible device orientation + * @V4L2_FWNODE_ORIENTATION_FRONT: device installed on the front side + * @V4L2_FWNODE_ORIENTATION_BACK: device installed on the back side + * @V4L2_FWNODE_ORIENTATION_EXTERNAL: device externally located + */ +enum v4l2_fwnode_orientation { + V4L2_FWNODE_ORIENTATION_FRONT, + V4L2_FWNODE_ORIENTATION_BACK, + V4L2_FWNODE_ORIENTATION_EXTERNAL +}; + +/** + * struct v4l2_fwnode_device_properties - fwnode device properties + * @orientation: device orientation. See &enum v4l2_fwnode_orientation + * @rotation: device rotation + */ +struct v4l2_fwnode_device_properties { + enum v4l2_fwnode_orientation orientation; + unsigned int rotation; +}; + +/** + * struct v4l2_fwnode_link - a link between two endpoints + * @local_node: pointer to device_node of this endpoint + * @local_port: identifier of the port this endpoint belongs to + * @local_id: identifier of the id this endpoint belongs to + * @remote_node: pointer to device_node of the remote endpoint + * @remote_port: identifier of the port the remote endpoint belongs to + * @remote_id: identifier of the id the remote endpoint belongs to + */ +struct v4l2_fwnode_link { + struct fwnode_handle *local_node; + unsigned int local_port; + unsigned int local_id; + struct fwnode_handle *remote_node; + unsigned int remote_port; + unsigned int remote_id; +}; + +/** + * enum v4l2_connector_type - connector type + * @V4L2_CONN_UNKNOWN: unknown connector type, no V4L2 connector configuration + * @V4L2_CONN_COMPOSITE: analog composite connector + * @V4L2_CONN_SVIDEO: analog svideo connector + */ +enum v4l2_connector_type { + V4L2_CONN_UNKNOWN, + V4L2_CONN_COMPOSITE, + V4L2_CONN_SVIDEO, +}; + +/** + * struct v4l2_connector_link - connector link data structure + * @head: structure to be used to add the link to the + * &struct v4l2_fwnode_connector + * @fwnode_link: &struct v4l2_fwnode_link link between the connector and the + * device the connector belongs to. + */ +struct v4l2_connector_link { + struct list_head head; + struct v4l2_fwnode_link fwnode_link; +}; + +/** + * struct v4l2_fwnode_connector_analog - analog connector data structure + * @sdtv_stds: sdtv standards this connector supports, set to V4L2_STD_ALL + * if no restrictions are specified. + */ +struct v4l2_fwnode_connector_analog { + v4l2_std_id sdtv_stds; +}; + +/** + * struct v4l2_fwnode_connector - the connector data structure + * @name: the connector device name + * @label: optional connector label + * @type: connector type + * @links: list of all connector &struct v4l2_connector_link links + * @nr_of_links: total number of links + * @connector: connector configuration + * @connector.analog: analog connector configuration + * &struct v4l2_fwnode_connector_analog + */ +struct v4l2_fwnode_connector { + const char *name; + const char *label; + enum v4l2_connector_type type; + struct list_head links; + unsigned int nr_of_links; + + union { + struct v4l2_fwnode_connector_analog analog; + /* future connectors */ + } connector; +}; + +/** + * enum v4l2_fwnode_bus_type - Video bus types defined by firmware properties + * @V4L2_FWNODE_BUS_TYPE_GUESS: Default value if no bus-type fwnode property + * @V4L2_FWNODE_BUS_TYPE_CSI2_CPHY: MIPI CSI-2 bus, C-PHY physical layer + * @V4L2_FWNODE_BUS_TYPE_CSI1: MIPI CSI-1 bus + * @V4L2_FWNODE_BUS_TYPE_CCP2: SMIA Compact Camera Port 2 bus + * @V4L2_FWNODE_BUS_TYPE_CSI2_DPHY: MIPI CSI-2 bus, D-PHY physical layer + * @V4L2_FWNODE_BUS_TYPE_PARALLEL: Camera Parallel Interface bus + * @V4L2_FWNODE_BUS_TYPE_BT656: BT.656 video format bus-type + * @V4L2_FWNODE_BUS_TYPE_DPI: Video Parallel Interface bus + * @NR_OF_V4L2_FWNODE_BUS_TYPE: Number of bus-types + */ +enum v4l2_fwnode_bus_type { + V4L2_FWNODE_BUS_TYPE_GUESS = 0, + V4L2_FWNODE_BUS_TYPE_CSI2_CPHY, + V4L2_FWNODE_BUS_TYPE_CSI1, + V4L2_FWNODE_BUS_TYPE_CCP2, + V4L2_FWNODE_BUS_TYPE_CSI2_DPHY, + V4L2_FWNODE_BUS_TYPE_PARALLEL, + V4L2_FWNODE_BUS_TYPE_BT656, + V4L2_FWNODE_BUS_TYPE_DPI, + NR_OF_V4L2_FWNODE_BUS_TYPE +}; + +/** + * v4l2_fwnode_endpoint_parse() - parse all fwnode node properties + * @fwnode: pointer to the endpoint's fwnode handle + * @vep: pointer to the V4L2 fwnode data structure + * + * This function parses the V4L2 fwnode endpoint specific parameters from the + * firmware. There are two ways to use this function, either by letting it + * obtain the type of the bus (by setting the @vep.bus_type field to + * V4L2_MBUS_UNKNOWN) or specifying the bus type explicitly to one of the &enum + * v4l2_mbus_type types. + * + * When @vep.bus_type is V4L2_MBUS_UNKNOWN, the function will use the "bus-type" + * property to determine the type when it is available. The caller is + * responsible for validating the contents of @vep.bus_type field after the call + * returns. + * + * As a deprecated functionality to support older DT bindings without "bus-type" + * property for devices that support multiple types, if the "bus-type" property + * does not exist, the function will attempt to guess the type based on the + * endpoint properties available. NEVER RELY ON GUESSING THE BUS TYPE IN NEW + * DRIVERS OR BINDINGS. + * + * It is also possible to set @vep.bus_type corresponding to an actual bus. In + * this case the function will only attempt to parse properties related to this + * bus, and it will return an error if the value of the "bus-type" property + * corresponds to a different bus. + * + * The caller is required to initialise all fields of @vep, either with + * explicitly values, or by zeroing them. + * + * The function does not change the V4L2 fwnode endpoint state if it fails. + * + * NOTE: This function does not parse "link-frequencies" property as its size is + * not known in advance. Please use v4l2_fwnode_endpoint_alloc_parse() if you + * need properties of variable size. + * + * Return: %0 on success or a negative error code on failure: + * %-ENOMEM on memory allocation failure + * %-EINVAL on parsing failure + * %-ENXIO on mismatching bus types + */ +int v4l2_fwnode_endpoint_parse(struct fwnode_handle *fwnode, + struct v4l2_fwnode_endpoint *vep); + +/** + * v4l2_fwnode_endpoint_free() - free the V4L2 fwnode acquired by + * v4l2_fwnode_endpoint_alloc_parse() + * @vep: the V4L2 fwnode the resources of which are to be released + * + * It is safe to call this function with NULL argument or on a V4L2 fwnode the + * parsing of which failed. + */ +void v4l2_fwnode_endpoint_free(struct v4l2_fwnode_endpoint *vep); + +/** + * v4l2_fwnode_endpoint_alloc_parse() - parse all fwnode node properties + * @fwnode: pointer to the endpoint's fwnode handle + * @vep: pointer to the V4L2 fwnode data structure + * + * This function parses the V4L2 fwnode endpoint specific parameters from the + * firmware. There are two ways to use this function, either by letting it + * obtain the type of the bus (by setting the @vep.bus_type field to + * V4L2_MBUS_UNKNOWN) or specifying the bus type explicitly to one of the &enum + * v4l2_mbus_type types. + * + * When @vep.bus_type is V4L2_MBUS_UNKNOWN, the function will use the "bus-type" + * property to determine the type when it is available. The caller is + * responsible for validating the contents of @vep.bus_type field after the call + * returns. + * + * As a deprecated functionality to support older DT bindings without "bus-type" + * property for devices that support multiple types, if the "bus-type" property + * does not exist, the function will attempt to guess the type based on the + * endpoint properties available. NEVER RELY ON GUESSING THE BUS TYPE IN NEW + * DRIVERS OR BINDINGS. + * + * It is also possible to set @vep.bus_type corresponding to an actual bus. In + * this case the function will only attempt to parse properties related to this + * bus, and it will return an error if the value of the "bus-type" property + * corresponds to a different bus. + * + * The caller is required to initialise all fields of @vep, either with + * explicitly values, or by zeroing them. + * + * The function does not change the V4L2 fwnode endpoint state if it fails. + * + * v4l2_fwnode_endpoint_alloc_parse() has two important differences to + * v4l2_fwnode_endpoint_parse(): + * + * 1. It also parses variable size data. + * + * 2. The memory it has allocated to store the variable size data must be freed + * using v4l2_fwnode_endpoint_free() when no longer needed. + * + * Return: %0 on success or a negative error code on failure: + * %-ENOMEM on memory allocation failure + * %-EINVAL on parsing failure + * %-ENXIO on mismatching bus types + */ +int v4l2_fwnode_endpoint_alloc_parse(struct fwnode_handle *fwnode, + struct v4l2_fwnode_endpoint *vep); + +/** + * v4l2_fwnode_parse_link() - parse a link between two endpoints + * @fwnode: pointer to the endpoint's fwnode at the local end of the link + * @link: pointer to the V4L2 fwnode link data structure + * + * Fill the link structure with the local and remote nodes and port numbers. + * The local_node and remote_node fields are set to point to the local and + * remote port's parent nodes respectively (the port parent node being the + * parent node of the port node if that node isn't a 'ports' node, or the + * grand-parent node of the port node otherwise). + * + * A reference is taken to both the local and remote nodes, the caller must use + * v4l2_fwnode_put_link() to drop the references when done with the + * link. + * + * Return: 0 on success, or -ENOLINK if the remote endpoint fwnode can't be + * found. + */ +int v4l2_fwnode_parse_link(struct fwnode_handle *fwnode, + struct v4l2_fwnode_link *link); + +/** + * v4l2_fwnode_put_link() - drop references to nodes in a link + * @link: pointer to the V4L2 fwnode link data structure + * + * Drop references to the local and remote nodes in the link. This function + * must be called on every link parsed with v4l2_fwnode_parse_link(). + */ +void v4l2_fwnode_put_link(struct v4l2_fwnode_link *link); + +/** + * v4l2_fwnode_connector_free() - free the V4L2 connector acquired memory + * @connector: the V4L2 connector resources of which are to be released + * + * Free all allocated memory and put all links acquired by + * v4l2_fwnode_connector_parse() and v4l2_fwnode_connector_add_link(). + * + * It is safe to call this function with NULL argument or on a V4L2 connector + * the parsing of which failed. + */ +void v4l2_fwnode_connector_free(struct v4l2_fwnode_connector *connector); + +/** + * v4l2_fwnode_connector_parse() - initialize the 'struct v4l2_fwnode_connector' + * @fwnode: pointer to the subdev endpoint's fwnode handle where the connector + * is connected to or to the connector endpoint fwnode handle. + * @connector: pointer to the V4L2 fwnode connector data structure + * + * Fill the &struct v4l2_fwnode_connector with the connector type, label and + * all &enum v4l2_connector_type specific connector data. The label is optional + * so it is set to %NULL if no one was found. The function initialize the links + * to zero. Adding links to the connector is done by calling + * v4l2_fwnode_connector_add_link(). + * + * The memory allocated for the label must be freed when no longer needed. + * Freeing the memory is done by v4l2_fwnode_connector_free(). + * + * Return: + * * %0 on success or a negative error code on failure: + * * %-EINVAL if @fwnode is invalid + * * %-ENOTCONN if connector type is unknown or connector device can't be found + */ +int v4l2_fwnode_connector_parse(struct fwnode_handle *fwnode, + struct v4l2_fwnode_connector *connector); + +/** + * v4l2_fwnode_connector_add_link - add a link between a connector node and + * a v4l2-subdev node. + * @fwnode: pointer to the subdev endpoint's fwnode handle where the connector + * is connected to + * @connector: pointer to the V4L2 fwnode connector data structure + * + * Add a new &struct v4l2_connector_link link to the + * &struct v4l2_fwnode_connector connector links list. The link local_node + * points to the connector node, the remote_node to the host v4l2 (sub)dev. + * + * The taken references to remote_node and local_node must be dropped and the + * allocated memory must be freed when no longer needed. Both is done by calling + * v4l2_fwnode_connector_free(). + * + * Return: + * * %0 on success or a negative error code on failure: + * * %-EINVAL if @fwnode or @connector is invalid or @connector type is unknown + * * %-ENOMEM on link memory allocation failure + * * %-ENOTCONN if remote connector device can't be found + * * %-ENOLINK if link parsing between v4l2 (sub)dev and connector fails + */ +int v4l2_fwnode_connector_add_link(struct fwnode_handle *fwnode, + struct v4l2_fwnode_connector *connector); + +/** + * v4l2_fwnode_device_parse() - parse fwnode device properties + * @dev: pointer to &struct device + * @props: pointer to &struct v4l2_fwnode_device_properties where to store the + * parsed properties values + * + * This function parses and validates the V4L2 fwnode device properties from the + * firmware interface, and fills the @struct v4l2_fwnode_device_properties + * provided by the caller. + * + * Return: + * % 0 on success + * %-EINVAL if a parsed property value is not valid + */ +int v4l2_fwnode_device_parse(struct device *dev, + struct v4l2_fwnode_device_properties *props); + +/* Helper macros to access the connector links. */ + +/** v4l2_connector_last_link - Helper macro to get the first + * &struct v4l2_fwnode_connector link + * @v4l2c: &struct v4l2_fwnode_connector owning the connector links + * + * This marco returns the first added &struct v4l2_connector_link connector + * link or @NULL if the connector has no links. + */ +#define v4l2_connector_first_link(v4l2c) \ + list_first_entry_or_null(&(v4l2c)->links, \ + struct v4l2_connector_link, head) + +/** v4l2_connector_last_link - Helper macro to get the last + * &struct v4l2_fwnode_connector link + * @v4l2c: &struct v4l2_fwnode_connector owning the connector links + * + * This marco returns the last &struct v4l2_connector_link added connector link. + */ +#define v4l2_connector_last_link(v4l2c) \ + list_last_entry(&(v4l2c)->links, struct v4l2_connector_link, head) + +#endif /* _V4L2_FWNODE_H */ diff --git a/6.12.0/include-overrides/media/v4l2-h264.h b/6.12.0/include-overrides/media/v4l2-h264.h new file mode 100644 index 0000000..0d9eaa9 --- /dev/null +++ b/6.12.0/include-overrides/media/v4l2-h264.h @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Helper functions for H264 codecs. + * + * Copyright (c) 2019 Collabora, Ltd. + * + * Author: Boris Brezillon + */ + +#ifndef _MEDIA_V4L2_H264_H +#define _MEDIA_V4L2_H264_H + +#include + +/** + * struct v4l2_h264_reflist_builder - Reference list builder object + * + * @refs.top_field_order_cnt: top field order count + * @refs.bottom_field_order_cnt: bottom field order count + * @refs.frame_num: reference frame number + * @refs.longterm: set to true for a long term reference + * @refs: array of references + * @cur_pic_order_count: picture order count of the frame being decoded + * @cur_pic_fields: fields present in the frame being decoded + * @unordered_reflist: unordered list of references. Will be used to generate + * ordered P/B0/B1 lists + * @num_valid: number of valid references in the refs array + * + * This object stores the context of the P/B0/B1 reference list builder. + * This procedure is described in section '8.2.4 Decoding process for reference + * picture lists construction' of the H264 spec. + */ +struct v4l2_h264_reflist_builder { + struct { + s32 top_field_order_cnt; + s32 bottom_field_order_cnt; + int frame_num; + u16 longterm : 1; + } refs[V4L2_H264_NUM_DPB_ENTRIES]; + + s32 cur_pic_order_count; + u8 cur_pic_fields; + + struct v4l2_h264_reference unordered_reflist[V4L2_H264_REF_LIST_LEN]; + u8 num_valid; +}; + +void +v4l2_h264_init_reflist_builder(struct v4l2_h264_reflist_builder *b, + const struct v4l2_ctrl_h264_decode_params *dec_params, + const struct v4l2_ctrl_h264_sps *sps, + const struct v4l2_h264_dpb_entry dpb[V4L2_H264_NUM_DPB_ENTRIES]); + +/** + * v4l2_h264_build_b_ref_lists() - Build the B0/B1 reference lists + * + * @builder: reference list builder context + * @b0_reflist: 32 sized array used to store the B0 reference list. Each entry + * is a v4l2_h264_reference structure + * @b1_reflist: 32 sized array used to store the B1 reference list. Each entry + * is a v4l2_h264_reference structure + * + * This functions builds the B0/B1 reference lists. This procedure is described + * in section '8.2.4 Decoding process for reference picture lists construction' + * of the H264 spec. This function can be used by H264 decoder drivers that + * need to pass B0/B1 reference lists to the hardware. + */ +void +v4l2_h264_build_b_ref_lists(const struct v4l2_h264_reflist_builder *builder, + struct v4l2_h264_reference *b0_reflist, + struct v4l2_h264_reference *b1_reflist); + +/** + * v4l2_h264_build_p_ref_list() - Build the P reference list + * + * @builder: reference list builder context + * @reflist: 32 sized array used to store the P reference list. Each entry + * is a v4l2_h264_reference structure + * + * This functions builds the P reference lists. This procedure is describe in + * section '8.2.4 Decoding process for reference picture lists construction' + * of the H264 spec. This function can be used by H264 decoder drivers that + * need to pass a P reference list to the hardware. + */ +void +v4l2_h264_build_p_ref_list(const struct v4l2_h264_reflist_builder *builder, + struct v4l2_h264_reference *reflist); + +#endif /* _MEDIA_V4L2_H264_H */ diff --git a/6.12.0/include-overrides/media/v4l2-image-sizes.h b/6.12.0/include-overrides/media/v4l2-image-sizes.h new file mode 100644 index 0000000..24a7a0b --- /dev/null +++ b/6.12.0/include-overrides/media/v4l2-image-sizes.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Standard image size definitions + * + * Copyright (C) 2013, Sylwester Nawrocki + */ +#ifndef _IMAGE_SIZES_H +#define _IMAGE_SIZES_H + +#define CIF_WIDTH 352 +#define CIF_HEIGHT 288 + +#define HD_720_WIDTH 1280 +#define HD_720_HEIGHT 720 + +#define HD_1080_WIDTH 1920 +#define HD_1080_HEIGHT 1080 + +#define QCIF_WIDTH 176 +#define QCIF_HEIGHT 144 + +#define QQCIF_WIDTH 88 +#define QQCIF_HEIGHT 72 + +#define QQVGA_WIDTH 160 +#define QQVGA_HEIGHT 120 + +#define QVGA_WIDTH 320 +#define QVGA_HEIGHT 240 + +#define SVGA_WIDTH 800 +#define SVGA_HEIGHT 600 + +#define SXGA_WIDTH 1280 +#define SXGA_HEIGHT 1024 + +#define VGA_WIDTH 640 +#define VGA_HEIGHT 480 + +#define UXGA_WIDTH 1600 +#define UXGA_HEIGHT 1200 + +#define XGA_WIDTH 1024 +#define XGA_HEIGHT 768 + +#endif /* _IMAGE_SIZES_H */ diff --git a/6.12.0/include-overrides/media/v4l2-ioctl.h b/6.12.0/include-overrides/media/v4l2-ioctl.h new file mode 100644 index 0000000..bdbb7e5 --- /dev/null +++ b/6.12.0/include-overrides/media/v4l2-ioctl.h @@ -0,0 +1,796 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * + * V 4 L 2 D R I V E R H E L P E R A P I + * + * Moved from videodev2.h + * + * Some commonly needed functions for drivers (v4l2-common.o module) + */ +#ifndef _V4L2_IOCTL_H +#define _V4L2_IOCTL_H + +#include +#include +#include +#include +#include /* need __user */ +#include + +struct v4l2_fh; + +/** + * struct v4l2_ioctl_ops - describe operations for each V4L2 ioctl + * + * @vidioc_querycap: pointer to the function that implements + * :ref:`VIDIOC_QUERYCAP ` ioctl + * @vidioc_enum_fmt_vid_cap: pointer to the function that implements + * :ref:`VIDIOC_ENUM_FMT ` ioctl logic + * for video capture in single and multi plane mode + * @vidioc_enum_fmt_vid_overlay: pointer to the function that implements + * :ref:`VIDIOC_ENUM_FMT ` ioctl logic + * for video overlay + * @vidioc_enum_fmt_vid_out: pointer to the function that implements + * :ref:`VIDIOC_ENUM_FMT ` ioctl logic + * for video output in single and multi plane mode + * @vidioc_enum_fmt_sdr_cap: pointer to the function that implements + * :ref:`VIDIOC_ENUM_FMT ` ioctl logic + * for Software Defined Radio capture + * @vidioc_enum_fmt_sdr_out: pointer to the function that implements + * :ref:`VIDIOC_ENUM_FMT ` ioctl logic + * for Software Defined Radio output + * @vidioc_enum_fmt_meta_cap: pointer to the function that implements + * :ref:`VIDIOC_ENUM_FMT ` ioctl logic + * for metadata capture + * @vidioc_enum_fmt_meta_out: pointer to the function that implements + * :ref:`VIDIOC_ENUM_FMT ` ioctl logic + * for metadata output + * @vidioc_g_fmt_vid_cap: pointer to the function that implements + * :ref:`VIDIOC_G_FMT ` ioctl logic for video capture + * in single plane mode + * @vidioc_g_fmt_vid_overlay: pointer to the function that implements + * :ref:`VIDIOC_G_FMT ` ioctl logic for video overlay + * @vidioc_g_fmt_vid_out: pointer to the function that implements + * :ref:`VIDIOC_G_FMT ` ioctl logic for video out + * in single plane mode + * @vidioc_g_fmt_vid_out_overlay: pointer to the function that implements + * :ref:`VIDIOC_G_FMT ` ioctl logic for video overlay output + * @vidioc_g_fmt_vbi_cap: pointer to the function that implements + * :ref:`VIDIOC_G_FMT ` ioctl logic for raw VBI capture + * @vidioc_g_fmt_vbi_out: pointer to the function that implements + * :ref:`VIDIOC_G_FMT ` ioctl logic for raw VBI output + * @vidioc_g_fmt_sliced_vbi_cap: pointer to the function that implements + * :ref:`VIDIOC_G_FMT ` ioctl logic for sliced VBI capture + * @vidioc_g_fmt_sliced_vbi_out: pointer to the function that implements + * :ref:`VIDIOC_G_FMT ` ioctl logic for sliced VBI output + * @vidioc_g_fmt_vid_cap_mplane: pointer to the function that implements + * :ref:`VIDIOC_G_FMT ` ioctl logic for video capture + * in multiple plane mode + * @vidioc_g_fmt_vid_out_mplane: pointer to the function that implements + * :ref:`VIDIOC_G_FMT ` ioctl logic for video out + * in multiplane plane mode + * @vidioc_g_fmt_sdr_cap: pointer to the function that implements + * :ref:`VIDIOC_G_FMT ` ioctl logic for Software Defined + * Radio capture + * @vidioc_g_fmt_sdr_out: pointer to the function that implements + * :ref:`VIDIOC_G_FMT ` ioctl logic for Software Defined + * Radio output + * @vidioc_g_fmt_meta_cap: pointer to the function that implements + * :ref:`VIDIOC_G_FMT ` ioctl logic for metadata capture + * @vidioc_g_fmt_meta_out: pointer to the function that implements + * :ref:`VIDIOC_G_FMT ` ioctl logic for metadata output + * @vidioc_s_fmt_vid_cap: pointer to the function that implements + * :ref:`VIDIOC_S_FMT ` ioctl logic for video capture + * in single plane mode + * @vidioc_s_fmt_vid_overlay: pointer to the function that implements + * :ref:`VIDIOC_S_FMT ` ioctl logic for video overlay + * @vidioc_s_fmt_vid_out: pointer to the function that implements + * :ref:`VIDIOC_S_FMT ` ioctl logic for video out + * in single plane mode + * @vidioc_s_fmt_vid_out_overlay: pointer to the function that implements + * :ref:`VIDIOC_S_FMT ` ioctl logic for video overlay output + * @vidioc_s_fmt_vbi_cap: pointer to the function that implements + * :ref:`VIDIOC_S_FMT ` ioctl logic for raw VBI capture + * @vidioc_s_fmt_vbi_out: pointer to the function that implements + * :ref:`VIDIOC_S_FMT ` ioctl logic for raw VBI output + * @vidioc_s_fmt_sliced_vbi_cap: pointer to the function that implements + * :ref:`VIDIOC_S_FMT ` ioctl logic for sliced VBI capture + * @vidioc_s_fmt_sliced_vbi_out: pointer to the function that implements + * :ref:`VIDIOC_S_FMT ` ioctl logic for sliced VBI output + * @vidioc_s_fmt_vid_cap_mplane: pointer to the function that implements + * :ref:`VIDIOC_S_FMT ` ioctl logic for video capture + * in multiple plane mode + * @vidioc_s_fmt_vid_out_mplane: pointer to the function that implements + * :ref:`VIDIOC_S_FMT ` ioctl logic for video out + * in multiplane plane mode + * @vidioc_s_fmt_sdr_cap: pointer to the function that implements + * :ref:`VIDIOC_S_FMT ` ioctl logic for Software Defined + * Radio capture + * @vidioc_s_fmt_sdr_out: pointer to the function that implements + * :ref:`VIDIOC_S_FMT ` ioctl logic for Software Defined + * Radio output + * @vidioc_s_fmt_meta_cap: pointer to the function that implements + * :ref:`VIDIOC_S_FMT ` ioctl logic for metadata capture + * @vidioc_s_fmt_meta_out: pointer to the function that implements + * :ref:`VIDIOC_S_FMT ` ioctl logic for metadata output + * @vidioc_try_fmt_vid_cap: pointer to the function that implements + * :ref:`VIDIOC_TRY_FMT ` ioctl logic for video capture + * in single plane mode + * @vidioc_try_fmt_vid_overlay: pointer to the function that implements + * :ref:`VIDIOC_TRY_FMT ` ioctl logic for video overlay + * @vidioc_try_fmt_vid_out: pointer to the function that implements + * :ref:`VIDIOC_TRY_FMT ` ioctl logic for video out + * in single plane mode + * @vidioc_try_fmt_vid_out_overlay: pointer to the function that implements + * :ref:`VIDIOC_TRY_FMT ` ioctl logic for video overlay + * output + * @vidioc_try_fmt_vbi_cap: pointer to the function that implements + * :ref:`VIDIOC_TRY_FMT ` ioctl logic for raw VBI capture + * @vidioc_try_fmt_vbi_out: pointer to the function that implements + * :ref:`VIDIOC_TRY_FMT ` ioctl logic for raw VBI output + * @vidioc_try_fmt_sliced_vbi_cap: pointer to the function that implements + * :ref:`VIDIOC_TRY_FMT ` ioctl logic for sliced VBI + * capture + * @vidioc_try_fmt_sliced_vbi_out: pointer to the function that implements + * :ref:`VIDIOC_TRY_FMT ` ioctl logic for sliced VBI output + * @vidioc_try_fmt_vid_cap_mplane: pointer to the function that implements + * :ref:`VIDIOC_TRY_FMT ` ioctl logic for video capture + * in multiple plane mode + * @vidioc_try_fmt_vid_out_mplane: pointer to the function that implements + * :ref:`VIDIOC_TRY_FMT ` ioctl logic for video out + * in multiplane plane mode + * @vidioc_try_fmt_sdr_cap: pointer to the function that implements + * :ref:`VIDIOC_TRY_FMT ` ioctl logic for Software Defined + * Radio capture + * @vidioc_try_fmt_sdr_out: pointer to the function that implements + * :ref:`VIDIOC_TRY_FMT ` ioctl logic for Software Defined + * Radio output + * @vidioc_try_fmt_meta_cap: pointer to the function that implements + * :ref:`VIDIOC_TRY_FMT ` ioctl logic for metadata capture + * @vidioc_try_fmt_meta_out: pointer to the function that implements + * :ref:`VIDIOC_TRY_FMT ` ioctl logic for metadata output + * @vidioc_reqbufs: pointer to the function that implements + * :ref:`VIDIOC_REQBUFS ` ioctl + * @vidioc_querybuf: pointer to the function that implements + * :ref:`VIDIOC_QUERYBUF ` ioctl + * @vidioc_qbuf: pointer to the function that implements + * :ref:`VIDIOC_QBUF ` ioctl + * @vidioc_expbuf: pointer to the function that implements + * :ref:`VIDIOC_EXPBUF ` ioctl + * @vidioc_dqbuf: pointer to the function that implements + * :ref:`VIDIOC_DQBUF ` ioctl + * @vidioc_create_bufs: pointer to the function that implements + * :ref:`VIDIOC_CREATE_BUFS ` ioctl + * @vidioc_prepare_buf: pointer to the function that implements + * :ref:`VIDIOC_PREPARE_BUF ` ioctl + * @vidioc_remove_bufs: pointer to the function that implements + * :ref:`VIDIOC_REMOVE_BUFS ` ioctl + * @vidioc_overlay: pointer to the function that implements + * :ref:`VIDIOC_OVERLAY ` ioctl + * @vidioc_g_fbuf: pointer to the function that implements + * :ref:`VIDIOC_G_FBUF ` ioctl + * @vidioc_s_fbuf: pointer to the function that implements + * :ref:`VIDIOC_S_FBUF ` ioctl + * @vidioc_streamon: pointer to the function that implements + * :ref:`VIDIOC_STREAMON ` ioctl + * @vidioc_streamoff: pointer to the function that implements + * :ref:`VIDIOC_STREAMOFF ` ioctl + * @vidioc_g_std: pointer to the function that implements + * :ref:`VIDIOC_G_STD ` ioctl + * @vidioc_s_std: pointer to the function that implements + * :ref:`VIDIOC_S_STD ` ioctl + * @vidioc_querystd: pointer to the function that implements + * :ref:`VIDIOC_QUERYSTD ` ioctl + * @vidioc_enum_input: pointer to the function that implements + * :ref:`VIDIOC_ENUM_INPUT ` ioctl + * @vidioc_g_input: pointer to the function that implements + * :ref:`VIDIOC_G_INPUT ` ioctl + * @vidioc_s_input: pointer to the function that implements + * :ref:`VIDIOC_S_INPUT ` ioctl + * @vidioc_enum_output: pointer to the function that implements + * :ref:`VIDIOC_ENUM_OUTPUT ` ioctl + * @vidioc_g_output: pointer to the function that implements + * :ref:`VIDIOC_G_OUTPUT ` ioctl + * @vidioc_s_output: pointer to the function that implements + * :ref:`VIDIOC_S_OUTPUT ` ioctl + * @vidioc_queryctrl: pointer to the function that implements + * :ref:`VIDIOC_QUERYCTRL ` ioctl + * @vidioc_query_ext_ctrl: pointer to the function that implements + * :ref:`VIDIOC_QUERY_EXT_CTRL ` ioctl + * @vidioc_g_ctrl: pointer to the function that implements + * :ref:`VIDIOC_G_CTRL ` ioctl + * @vidioc_s_ctrl: pointer to the function that implements + * :ref:`VIDIOC_S_CTRL ` ioctl + * @vidioc_g_ext_ctrls: pointer to the function that implements + * :ref:`VIDIOC_G_EXT_CTRLS ` ioctl + * @vidioc_s_ext_ctrls: pointer to the function that implements + * :ref:`VIDIOC_S_EXT_CTRLS ` ioctl + * @vidioc_try_ext_ctrls: pointer to the function that implements + * :ref:`VIDIOC_TRY_EXT_CTRLS ` ioctl + * @vidioc_querymenu: pointer to the function that implements + * :ref:`VIDIOC_QUERYMENU ` ioctl + * @vidioc_enumaudio: pointer to the function that implements + * :ref:`VIDIOC_ENUMAUDIO ` ioctl + * @vidioc_g_audio: pointer to the function that implements + * :ref:`VIDIOC_G_AUDIO ` ioctl + * @vidioc_s_audio: pointer to the function that implements + * :ref:`VIDIOC_S_AUDIO ` ioctl + * @vidioc_enumaudout: pointer to the function that implements + * :ref:`VIDIOC_ENUMAUDOUT ` ioctl + * @vidioc_g_audout: pointer to the function that implements + * :ref:`VIDIOC_G_AUDOUT ` ioctl + * @vidioc_s_audout: pointer to the function that implements + * :ref:`VIDIOC_S_AUDOUT ` ioctl + * @vidioc_g_modulator: pointer to the function that implements + * :ref:`VIDIOC_G_MODULATOR ` ioctl + * @vidioc_s_modulator: pointer to the function that implements + * :ref:`VIDIOC_S_MODULATOR ` ioctl + * @vidioc_g_pixelaspect: pointer to the function that implements + * the pixelaspect part of the :ref:`VIDIOC_CROPCAP ` ioctl + * @vidioc_g_selection: pointer to the function that implements + * :ref:`VIDIOC_G_SELECTION ` ioctl + * @vidioc_s_selection: pointer to the function that implements + * :ref:`VIDIOC_S_SELECTION ` ioctl + * @vidioc_g_jpegcomp: pointer to the function that implements + * :ref:`VIDIOC_G_JPEGCOMP ` ioctl + * @vidioc_s_jpegcomp: pointer to the function that implements + * :ref:`VIDIOC_S_JPEGCOMP ` ioctl + * @vidioc_g_enc_index: pointer to the function that implements + * :ref:`VIDIOC_G_ENC_INDEX ` ioctl + * @vidioc_encoder_cmd: pointer to the function that implements + * :ref:`VIDIOC_ENCODER_CMD ` ioctl + * @vidioc_try_encoder_cmd: pointer to the function that implements + * :ref:`VIDIOC_TRY_ENCODER_CMD ` ioctl + * @vidioc_decoder_cmd: pointer to the function that implements + * :ref:`VIDIOC_DECODER_CMD ` ioctl + * @vidioc_try_decoder_cmd: pointer to the function that implements + * :ref:`VIDIOC_TRY_DECODER_CMD ` ioctl + * @vidioc_g_parm: pointer to the function that implements + * :ref:`VIDIOC_G_PARM ` ioctl + * @vidioc_s_parm: pointer to the function that implements + * :ref:`VIDIOC_S_PARM ` ioctl + * @vidioc_g_tuner: pointer to the function that implements + * :ref:`VIDIOC_G_TUNER ` ioctl + * @vidioc_s_tuner: pointer to the function that implements + * :ref:`VIDIOC_S_TUNER ` ioctl + * @vidioc_g_frequency: pointer to the function that implements + * :ref:`VIDIOC_G_FREQUENCY ` ioctl + * @vidioc_s_frequency: pointer to the function that implements + * :ref:`VIDIOC_S_FREQUENCY ` ioctl + * @vidioc_enum_freq_bands: pointer to the function that implements + * :ref:`VIDIOC_ENUM_FREQ_BANDS ` ioctl + * @vidioc_g_sliced_vbi_cap: pointer to the function that implements + * :ref:`VIDIOC_G_SLICED_VBI_CAP ` ioctl + * @vidioc_log_status: pointer to the function that implements + * :ref:`VIDIOC_LOG_STATUS ` ioctl + * @vidioc_s_hw_freq_seek: pointer to the function that implements + * :ref:`VIDIOC_S_HW_FREQ_SEEK ` ioctl + * @vidioc_g_register: pointer to the function that implements + * :ref:`VIDIOC_DBG_G_REGISTER ` ioctl + * @vidioc_s_register: pointer to the function that implements + * :ref:`VIDIOC_DBG_S_REGISTER ` ioctl + * @vidioc_g_chip_info: pointer to the function that implements + * :ref:`VIDIOC_DBG_G_CHIP_INFO ` ioctl + * @vidioc_enum_framesizes: pointer to the function that implements + * :ref:`VIDIOC_ENUM_FRAMESIZES ` ioctl + * @vidioc_enum_frameintervals: pointer to the function that implements + * :ref:`VIDIOC_ENUM_FRAMEINTERVALS ` ioctl + * @vidioc_s_dv_timings: pointer to the function that implements + * :ref:`VIDIOC_S_DV_TIMINGS ` ioctl + * @vidioc_g_dv_timings: pointer to the function that implements + * :ref:`VIDIOC_G_DV_TIMINGS ` ioctl + * @vidioc_query_dv_timings: pointer to the function that implements + * :ref:`VIDIOC_QUERY_DV_TIMINGS ` ioctl + * @vidioc_enum_dv_timings: pointer to the function that implements + * :ref:`VIDIOC_ENUM_DV_TIMINGS ` ioctl + * @vidioc_dv_timings_cap: pointer to the function that implements + * :ref:`VIDIOC_DV_TIMINGS_CAP ` ioctl + * @vidioc_g_edid: pointer to the function that implements + * :ref:`VIDIOC_G_EDID ` ioctl + * @vidioc_s_edid: pointer to the function that implements + * :ref:`VIDIOC_S_EDID ` ioctl + * @vidioc_subscribe_event: pointer to the function that implements + * :ref:`VIDIOC_SUBSCRIBE_EVENT ` ioctl + * @vidioc_unsubscribe_event: pointer to the function that implements + * :ref:`VIDIOC_UNSUBSCRIBE_EVENT ` ioctl + * @vidioc_default: pointed used to allow other ioctls + */ +struct v4l2_ioctl_ops { + /* ioctl callbacks */ + + /* VIDIOC_QUERYCAP handler */ + int (*vidioc_querycap)(struct file *file, void *fh, + struct v4l2_capability *cap); + + /* VIDIOC_ENUM_FMT handlers */ + int (*vidioc_enum_fmt_vid_cap)(struct file *file, void *fh, + struct v4l2_fmtdesc *f); + int (*vidioc_enum_fmt_vid_overlay)(struct file *file, void *fh, + struct v4l2_fmtdesc *f); + int (*vidioc_enum_fmt_vid_out)(struct file *file, void *fh, + struct v4l2_fmtdesc *f); + int (*vidioc_enum_fmt_sdr_cap)(struct file *file, void *fh, + struct v4l2_fmtdesc *f); + int (*vidioc_enum_fmt_sdr_out)(struct file *file, void *fh, + struct v4l2_fmtdesc *f); + int (*vidioc_enum_fmt_meta_cap)(struct file *file, void *fh, + struct v4l2_fmtdesc *f); + int (*vidioc_enum_fmt_meta_out)(struct file *file, void *fh, + struct v4l2_fmtdesc *f); + + /* VIDIOC_G_FMT handlers */ + int (*vidioc_g_fmt_vid_cap)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_g_fmt_vid_overlay)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_g_fmt_vid_out)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_g_fmt_vid_out_overlay)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_g_fmt_vbi_cap)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_g_fmt_vbi_out)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_g_fmt_sliced_vbi_cap)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_g_fmt_sliced_vbi_out)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_g_fmt_vid_cap_mplane)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_g_fmt_vid_out_mplane)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_g_fmt_sdr_cap)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_g_fmt_sdr_out)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_g_fmt_meta_cap)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_g_fmt_meta_out)(struct file *file, void *fh, + struct v4l2_format *f); + + /* VIDIOC_S_FMT handlers */ + int (*vidioc_s_fmt_vid_cap)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_s_fmt_vid_overlay)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_s_fmt_vid_out)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_s_fmt_vid_out_overlay)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_s_fmt_vbi_cap)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_s_fmt_vbi_out)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_s_fmt_sliced_vbi_cap)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_s_fmt_sliced_vbi_out)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_s_fmt_vid_cap_mplane)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_s_fmt_vid_out_mplane)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_s_fmt_sdr_cap)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_s_fmt_sdr_out)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_s_fmt_meta_cap)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_s_fmt_meta_out)(struct file *file, void *fh, + struct v4l2_format *f); + + /* VIDIOC_TRY_FMT handlers */ + int (*vidioc_try_fmt_vid_cap)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_try_fmt_vid_overlay)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_try_fmt_vid_out)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_try_fmt_vid_out_overlay)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_try_fmt_vbi_cap)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_try_fmt_vbi_out)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_try_fmt_sliced_vbi_cap)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_try_fmt_sliced_vbi_out)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_try_fmt_vid_cap_mplane)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_try_fmt_vid_out_mplane)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_try_fmt_sdr_cap)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_try_fmt_sdr_out)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_try_fmt_meta_cap)(struct file *file, void *fh, + struct v4l2_format *f); + int (*vidioc_try_fmt_meta_out)(struct file *file, void *fh, + struct v4l2_format *f); + + /* Buffer handlers */ + int (*vidioc_reqbufs)(struct file *file, void *fh, + struct v4l2_requestbuffers *b); + int (*vidioc_querybuf)(struct file *file, void *fh, + struct v4l2_buffer *b); + int (*vidioc_qbuf)(struct file *file, void *fh, + struct v4l2_buffer *b); + int (*vidioc_expbuf)(struct file *file, void *fh, + struct v4l2_exportbuffer *e); + int (*vidioc_dqbuf)(struct file *file, void *fh, + struct v4l2_buffer *b); + + int (*vidioc_create_bufs)(struct file *file, void *fh, + struct v4l2_create_buffers *b); + int (*vidioc_prepare_buf)(struct file *file, void *fh, + struct v4l2_buffer *b); + int (*vidioc_remove_bufs)(struct file *file, void *fh, + struct v4l2_remove_buffers *d); + + int (*vidioc_overlay)(struct file *file, void *fh, unsigned int i); + int (*vidioc_g_fbuf)(struct file *file, void *fh, + struct v4l2_framebuffer *a); + int (*vidioc_s_fbuf)(struct file *file, void *fh, + const struct v4l2_framebuffer *a); + + /* Stream on/off */ + int (*vidioc_streamon)(struct file *file, void *fh, + enum v4l2_buf_type i); + int (*vidioc_streamoff)(struct file *file, void *fh, + enum v4l2_buf_type i); + + /* + * Standard handling + * + * Note: ENUMSTD is handled by videodev.c + */ + int (*vidioc_g_std)(struct file *file, void *fh, v4l2_std_id *norm); + int (*vidioc_s_std)(struct file *file, void *fh, v4l2_std_id norm); + int (*vidioc_querystd)(struct file *file, void *fh, v4l2_std_id *a); + + /* Input handling */ + int (*vidioc_enum_input)(struct file *file, void *fh, + struct v4l2_input *inp); + int (*vidioc_g_input)(struct file *file, void *fh, unsigned int *i); + int (*vidioc_s_input)(struct file *file, void *fh, unsigned int i); + + /* Output handling */ + int (*vidioc_enum_output)(struct file *file, void *fh, + struct v4l2_output *a); + int (*vidioc_g_output)(struct file *file, void *fh, unsigned int *i); + int (*vidioc_s_output)(struct file *file, void *fh, unsigned int i); + + /* Control handling */ + int (*vidioc_queryctrl)(struct file *file, void *fh, + struct v4l2_queryctrl *a); + int (*vidioc_query_ext_ctrl)(struct file *file, void *fh, + struct v4l2_query_ext_ctrl *a); + int (*vidioc_g_ctrl)(struct file *file, void *fh, + struct v4l2_control *a); + int (*vidioc_s_ctrl)(struct file *file, void *fh, + struct v4l2_control *a); + int (*vidioc_g_ext_ctrls)(struct file *file, void *fh, + struct v4l2_ext_controls *a); + int (*vidioc_s_ext_ctrls)(struct file *file, void *fh, + struct v4l2_ext_controls *a); + int (*vidioc_try_ext_ctrls)(struct file *file, void *fh, + struct v4l2_ext_controls *a); + int (*vidioc_querymenu)(struct file *file, void *fh, + struct v4l2_querymenu *a); + + /* Audio ioctls */ + int (*vidioc_enumaudio)(struct file *file, void *fh, + struct v4l2_audio *a); + int (*vidioc_g_audio)(struct file *file, void *fh, + struct v4l2_audio *a); + int (*vidioc_s_audio)(struct file *file, void *fh, + const struct v4l2_audio *a); + + /* Audio out ioctls */ + int (*vidioc_enumaudout)(struct file *file, void *fh, + struct v4l2_audioout *a); + int (*vidioc_g_audout)(struct file *file, void *fh, + struct v4l2_audioout *a); + int (*vidioc_s_audout)(struct file *file, void *fh, + const struct v4l2_audioout *a); + int (*vidioc_g_modulator)(struct file *file, void *fh, + struct v4l2_modulator *a); + int (*vidioc_s_modulator)(struct file *file, void *fh, + const struct v4l2_modulator *a); + /* Crop ioctls */ + int (*vidioc_g_pixelaspect)(struct file *file, void *fh, + int buf_type, struct v4l2_fract *aspect); + int (*vidioc_g_selection)(struct file *file, void *fh, + struct v4l2_selection *s); + int (*vidioc_s_selection)(struct file *file, void *fh, + struct v4l2_selection *s); + /* Compression ioctls */ + int (*vidioc_g_jpegcomp)(struct file *file, void *fh, + struct v4l2_jpegcompression *a); + int (*vidioc_s_jpegcomp)(struct file *file, void *fh, + const struct v4l2_jpegcompression *a); + int (*vidioc_g_enc_index)(struct file *file, void *fh, + struct v4l2_enc_idx *a); + int (*vidioc_encoder_cmd)(struct file *file, void *fh, + struct v4l2_encoder_cmd *a); + int (*vidioc_try_encoder_cmd)(struct file *file, void *fh, + struct v4l2_encoder_cmd *a); + int (*vidioc_decoder_cmd)(struct file *file, void *fh, + struct v4l2_decoder_cmd *a); + int (*vidioc_try_decoder_cmd)(struct file *file, void *fh, + struct v4l2_decoder_cmd *a); + + /* Stream type-dependent parameter ioctls */ + int (*vidioc_g_parm)(struct file *file, void *fh, + struct v4l2_streamparm *a); + int (*vidioc_s_parm)(struct file *file, void *fh, + struct v4l2_streamparm *a); + + /* Tuner ioctls */ + int (*vidioc_g_tuner)(struct file *file, void *fh, + struct v4l2_tuner *a); + int (*vidioc_s_tuner)(struct file *file, void *fh, + const struct v4l2_tuner *a); + int (*vidioc_g_frequency)(struct file *file, void *fh, + struct v4l2_frequency *a); + int (*vidioc_s_frequency)(struct file *file, void *fh, + const struct v4l2_frequency *a); + int (*vidioc_enum_freq_bands)(struct file *file, void *fh, + struct v4l2_frequency_band *band); + + /* Sliced VBI cap */ + int (*vidioc_g_sliced_vbi_cap)(struct file *file, void *fh, + struct v4l2_sliced_vbi_cap *a); + + /* Log status ioctl */ + int (*vidioc_log_status)(struct file *file, void *fh); + + int (*vidioc_s_hw_freq_seek)(struct file *file, void *fh, + const struct v4l2_hw_freq_seek *a); + + /* Debugging ioctls */ +#ifdef CONFIG_VIDEO_ADV_DEBUG + int (*vidioc_g_register)(struct file *file, void *fh, + struct v4l2_dbg_register *reg); + int (*vidioc_s_register)(struct file *file, void *fh, + const struct v4l2_dbg_register *reg); + + int (*vidioc_g_chip_info)(struct file *file, void *fh, + struct v4l2_dbg_chip_info *chip); +#endif + + int (*vidioc_enum_framesizes)(struct file *file, void *fh, + struct v4l2_frmsizeenum *fsize); + + int (*vidioc_enum_frameintervals)(struct file *file, void *fh, + struct v4l2_frmivalenum *fival); + + /* DV Timings IOCTLs */ + int (*vidioc_s_dv_timings)(struct file *file, void *fh, + struct v4l2_dv_timings *timings); + int (*vidioc_g_dv_timings)(struct file *file, void *fh, + struct v4l2_dv_timings *timings); + int (*vidioc_query_dv_timings)(struct file *file, void *fh, + struct v4l2_dv_timings *timings); + int (*vidioc_enum_dv_timings)(struct file *file, void *fh, + struct v4l2_enum_dv_timings *timings); + int (*vidioc_dv_timings_cap)(struct file *file, void *fh, + struct v4l2_dv_timings_cap *cap); + int (*vidioc_g_edid)(struct file *file, void *fh, + struct v4l2_edid *edid); + int (*vidioc_s_edid)(struct file *file, void *fh, + struct v4l2_edid *edid); + + int (*vidioc_subscribe_event)(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub); + int (*vidioc_unsubscribe_event)(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub); + + /* For other private ioctls */ + long (*vidioc_default)(struct file *file, void *fh, + bool valid_prio, unsigned int cmd, void *arg); +}; + + +/* v4l debugging and diagnostics */ + +/* Device debug flags to be used with the video device debug attribute */ + +/* Just log the ioctl name + error code */ +#define V4L2_DEV_DEBUG_IOCTL 0x01 +/* Log the ioctl name arguments + error code */ +#define V4L2_DEV_DEBUG_IOCTL_ARG 0x02 +/* Log the file operations open, release, mmap and get_unmapped_area */ +#define V4L2_DEV_DEBUG_FOP 0x04 +/* Log the read and write file operations and the VIDIOC_(D)QBUF ioctls */ +#define V4L2_DEV_DEBUG_STREAMING 0x08 +/* Log poll() */ +#define V4L2_DEV_DEBUG_POLL 0x10 +/* Log controls */ +#define V4L2_DEV_DEBUG_CTRL 0x20 + +/* Video standard functions */ + +/** + * v4l2_norm_to_name - Ancillary routine to analog TV standard name from its ID. + * + * @id: analog TV standard ID. + * + * Return: returns a string with the name of the analog TV standard. + * If the standard is not found or if @id points to multiple standard, + * it returns "Unknown". + */ +const char *v4l2_norm_to_name(v4l2_std_id id); + +/** + * v4l2_video_std_frame_period - Ancillary routine that fills a + * struct &v4l2_fract pointer with the default framerate fraction. + * + * @id: analog TV standard ID. + * @frameperiod: struct &v4l2_fract pointer to be filled + * + */ +void v4l2_video_std_frame_period(int id, struct v4l2_fract *frameperiod); + +/** + * v4l2_video_std_construct - Ancillary routine that fills in the fields of + * a &v4l2_standard structure according to the @id parameter. + * + * @vs: struct &v4l2_standard pointer to be filled + * @id: analog TV standard ID. + * @name: name of the standard to be used + * + * .. note:: + * + * This ancillary routine is obsolete. Shouldn't be used on newer drivers. + */ +int v4l2_video_std_construct(struct v4l2_standard *vs, + int id, const char *name); + +/** + * v4l_video_std_enumstd - Ancillary routine that fills in the fields of + * a &v4l2_standard structure according to the @id and @vs->index + * parameters. + * + * @vs: struct &v4l2_standard pointer to be filled. + * @id: analog TV standard ID. + * + */ +int v4l_video_std_enumstd(struct v4l2_standard *vs, v4l2_std_id id); + +/** + * v4l_printk_ioctl - Ancillary routine that prints the ioctl in a + * human-readable format. + * + * @prefix: prefix to be added at the ioctl prints. + * @cmd: ioctl name + * + * .. note:: + * + * If prefix != %NULL, then it will issue a + * ``printk(KERN_DEBUG "%s: ", prefix)`` first. + */ +void v4l_printk_ioctl(const char *prefix, unsigned int cmd); + +struct video_device; + +/* names for fancy debug output */ +extern const char *v4l2_field_names[]; +extern const char *v4l2_type_names[]; + +#ifdef CONFIG_COMPAT +/** + * v4l2_compat_ioctl32 -32 Bits compatibility layer for 64 bits processors + * + * @file: Pointer to struct &file. + * @cmd: Ioctl name. + * @arg: Ioctl argument. + */ +long int v4l2_compat_ioctl32(struct file *file, unsigned int cmd, + unsigned long arg); +#endif + +unsigned int v4l2_compat_translate_cmd(unsigned int cmd); +int v4l2_compat_get_user(void __user *arg, void *parg, unsigned int cmd); +int v4l2_compat_put_user(void __user *arg, void *parg, unsigned int cmd); +int v4l2_compat_get_array_args(struct file *file, void *mbuf, + void __user *user_ptr, size_t array_size, + unsigned int cmd, void *arg); +int v4l2_compat_put_array_args(struct file *file, void __user *user_ptr, + void *mbuf, size_t array_size, + unsigned int cmd, void *arg); + +/** + * typedef v4l2_kioctl - Typedef used to pass an ioctl handler. + * + * @file: Pointer to struct &file. + * @cmd: Ioctl name. + * @arg: Ioctl argument. + */ +typedef long (*v4l2_kioctl)(struct file *file, unsigned int cmd, void *arg); + +/** + * video_usercopy - copies data from/to userspace memory when an ioctl is + * issued. + * + * @file: Pointer to struct &file. + * @cmd: Ioctl name. + * @arg: Ioctl argument. + * @func: function that will handle the ioctl + * + * .. note:: + * + * This routine should be used only inside the V4L2 core. + */ +long int video_usercopy(struct file *file, unsigned int cmd, + unsigned long int arg, v4l2_kioctl func); + +/** + * video_ioctl2 - Handles a V4L2 ioctl. + * + * @file: Pointer to struct &file. + * @cmd: Ioctl name. + * @arg: Ioctl argument. + * + * Method used to hancle an ioctl. Should be used to fill the + * &v4l2_ioctl_ops.unlocked_ioctl on all V4L2 drivers. + */ +long int video_ioctl2(struct file *file, + unsigned int cmd, unsigned long int arg); + +/* + * The user space interpretation of the 'v4l2_event' differs + * based on the 'time_t' definition on 32-bit architectures, so + * the kernel has to handle both. + * This is the old version for 32-bit architectures. + */ +struct v4l2_event_time32 { + __u32 type; + union { + struct v4l2_event_vsync vsync; + struct v4l2_event_ctrl ctrl; + struct v4l2_event_frame_sync frame_sync; + struct v4l2_event_src_change src_change; + struct v4l2_event_motion_det motion_det; + __u8 data[64]; + } u; + __u32 pending; + __u32 sequence; + struct old_timespec32 timestamp; + __u32 id; + __u32 reserved[8]; +}; + +#define VIDIOC_DQEVENT_TIME32 _IOR('V', 89, struct v4l2_event_time32) + +struct v4l2_buffer_time32 { + __u32 index; + __u32 type; + __u32 bytesused; + __u32 flags; + __u32 field; + struct old_timeval32 timestamp; + struct v4l2_timecode timecode; + __u32 sequence; + + /* memory location */ + __u32 memory; + union { + __u32 offset; + unsigned long userptr; + struct v4l2_plane *planes; + __s32 fd; + } m; + __u32 length; + __u32 reserved2; + union { + __s32 request_fd; + __u32 reserved; + }; +}; +#define VIDIOC_QUERYBUF_TIME32 _IOWR('V', 9, struct v4l2_buffer_time32) +#define VIDIOC_QBUF_TIME32 _IOWR('V', 15, struct v4l2_buffer_time32) +#define VIDIOC_DQBUF_TIME32 _IOWR('V', 17, struct v4l2_buffer_time32) +#define VIDIOC_PREPARE_BUF_TIME32 _IOWR('V', 93, struct v4l2_buffer_time32) + +#endif /* _V4L2_IOCTL_H */ diff --git a/6.12.0/include-overrides/media/v4l2-jpeg.h b/6.12.0/include-overrides/media/v4l2-jpeg.h new file mode 100644 index 0000000..b65658a --- /dev/null +++ b/6.12.0/include-overrides/media/v4l2-jpeg.h @@ -0,0 +1,189 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * V4L2 JPEG helpers header + * + * Copyright (C) 2019 Pengutronix, Philipp Zabel + * + * For reference, see JPEG ITU-T.81 (ISO/IEC 10918-1) + */ + +#ifndef _V4L2_JPEG_H +#define _V4L2_JPEG_H + +#include + +#define V4L2_JPEG_MAX_COMPONENTS 4 +#define V4L2_JPEG_MAX_TABLES 4 +/* + * Prefixes used to generate huffman table class and destination identifiers as + * described below: + * + * V4L2_JPEG_LUM_HT | V4L2_JPEG_DC_HT : Prefix for Luma DC coefficients + * huffman table + * V4L2_JPEG_LUM_HT | V4L2_JPEG_AC_HT : Prefix for Luma AC coefficients + * huffman table + * V4L2_JPEG_CHR_HT | V4L2_JPEG_DC_HT : Prefix for Chroma DC coefficients + * huffman table + * V4L2_JPEG_CHR_HT | V4L2_JPEG_AC_HT : Prefix for Chroma AC coefficients + * huffman table + */ +#define V4L2_JPEG_LUM_HT 0x00 +#define V4L2_JPEG_CHR_HT 0x01 +#define V4L2_JPEG_DC_HT 0x00 +#define V4L2_JPEG_AC_HT 0x10 + +/* Length of reference huffman tables as provided in Table K.3 of ITU-T.81 */ +#define V4L2_JPEG_REF_HT_AC_LEN 178 +#define V4L2_JPEG_REF_HT_DC_LEN 28 + +/* Array size for 8x8 block of samples or DCT coefficient */ +#define V4L2_JPEG_PIXELS_IN_BLOCK 64 + +/** + * struct v4l2_jpeg_reference - reference into the JPEG buffer + * @start: pointer to the start of the referenced segment or table + * @length: size of the referenced segment or table + * + * Wnen referencing marker segments, start points right after the marker code, + * and length is the size of the segment parameters, excluding the marker code. + */ +struct v4l2_jpeg_reference { + u8 *start; + size_t length; +}; + +/* B.2.2 Frame header syntax */ + +/** + * struct v4l2_jpeg_frame_component_spec - frame component-specification + * @component_identifier: C[i] + * @horizontal_sampling_factor: H[i] + * @vertical_sampling_factor: V[i] + * @quantization_table_selector: quantization table destination selector Tq[i] + */ +struct v4l2_jpeg_frame_component_spec { + u8 component_identifier; + u8 horizontal_sampling_factor; + u8 vertical_sampling_factor; + u8 quantization_table_selector; +}; + +/** + * struct v4l2_jpeg_frame_header - JPEG frame header + * @height: Y + * @width: X + * @precision: P + * @num_components: Nf + * @component: component-specification, see v4l2_jpeg_frame_component_spec + * @subsampling: decoded subsampling from component-specification + */ +struct v4l2_jpeg_frame_header { + u16 height; + u16 width; + u8 precision; + u8 num_components; + struct v4l2_jpeg_frame_component_spec component[V4L2_JPEG_MAX_COMPONENTS]; + enum v4l2_jpeg_chroma_subsampling subsampling; +}; + +/* B.2.3 Scan header syntax */ + +/** + * struct v4l2_jpeg_scan_component_spec - scan component-specification + * @component_selector: Cs[j] + * @dc_entropy_coding_table_selector: Td[j] + * @ac_entropy_coding_table_selector: Ta[j] + */ +struct v4l2_jpeg_scan_component_spec { + u8 component_selector; + u8 dc_entropy_coding_table_selector; + u8 ac_entropy_coding_table_selector; +}; + +/** + * struct v4l2_jpeg_scan_header - JPEG scan header + * @num_components: Ns + * @component: component-specification, see v4l2_jpeg_scan_component_spec + */ +struct v4l2_jpeg_scan_header { + u8 num_components; /* Ns */ + struct v4l2_jpeg_scan_component_spec component[V4L2_JPEG_MAX_COMPONENTS]; + /* Ss, Se, Ah, and Al are not used by any driver */ +}; + +/** + * enum v4l2_jpeg_app14_tf - APP14 transform flag + * According to Rec. ITU-T T.872 (06/2012) 6.5.3 + * APP14 segment is for color encoding, it contains a transform flag, + * which may have values of 0, 1 and 2 and are interpreted as follows: + * @V4L2_JPEG_APP14_TF_CMYK_RGB: CMYK for images encoded with four components + * RGB for images encoded with three components + * @V4L2_JPEG_APP14_TF_YCBCR: an image encoded with three components using YCbCr + * @V4L2_JPEG_APP14_TF_YCCK: an image encoded with four components using YCCK + * @V4L2_JPEG_APP14_TF_UNKNOWN: indicate app14 is not present + */ +enum v4l2_jpeg_app14_tf { + V4L2_JPEG_APP14_TF_CMYK_RGB = 0, + V4L2_JPEG_APP14_TF_YCBCR = 1, + V4L2_JPEG_APP14_TF_YCCK = 2, + V4L2_JPEG_APP14_TF_UNKNOWN = -1, +}; + +/** + * struct v4l2_jpeg_header - parsed JPEG header + * @sof: pointer to frame header and size + * @sos: pointer to scan header and size + * @num_dht: number of entries in @dht + * @dht: pointers to huffman tables and sizes + * @num_dqt: number of entries in @dqt + * @dqt: pointers to quantization tables and sizes + * @frame: parsed frame header + * @scan: pointer to parsed scan header, optional + * @quantization_tables: references to four quantization tables, optional + * @huffman_tables: references to four Huffman tables in DC0, DC1, AC0, AC1 + * order, optional + * @restart_interval: number of MCU per restart interval, Ri + * @ecs_offset: buffer offset in bytes to the entropy coded segment + * @app14_tf: transform flag from app14 data + * + * When this structure is passed to v4l2_jpeg_parse_header, the optional scan, + * quantization_tables, and huffman_tables pointers must be initialized to NULL + * or point at valid memory. + */ +struct v4l2_jpeg_header { + struct v4l2_jpeg_reference sof; + struct v4l2_jpeg_reference sos; + unsigned int num_dht; + struct v4l2_jpeg_reference dht[V4L2_JPEG_MAX_TABLES]; + unsigned int num_dqt; + struct v4l2_jpeg_reference dqt[V4L2_JPEG_MAX_TABLES]; + + struct v4l2_jpeg_frame_header frame; + struct v4l2_jpeg_scan_header *scan; + struct v4l2_jpeg_reference *quantization_tables; + struct v4l2_jpeg_reference *huffman_tables; + u16 restart_interval; + size_t ecs_offset; + enum v4l2_jpeg_app14_tf app14_tf; +}; + +int v4l2_jpeg_parse_header(void *buf, size_t len, struct v4l2_jpeg_header *out); + +int v4l2_jpeg_parse_frame_header(void *buf, size_t len, + struct v4l2_jpeg_frame_header *frame_header); +int v4l2_jpeg_parse_scan_header(void *buf, size_t len, + struct v4l2_jpeg_scan_header *scan_header); +int v4l2_jpeg_parse_quantization_tables(void *buf, size_t len, u8 precision, + struct v4l2_jpeg_reference *q_tables); +int v4l2_jpeg_parse_huffman_tables(void *buf, size_t len, + struct v4l2_jpeg_reference *huffman_tables); + +extern const u8 v4l2_jpeg_zigzag_scan_index[V4L2_JPEG_PIXELS_IN_BLOCK]; +extern const u8 v4l2_jpeg_ref_table_luma_qt[V4L2_JPEG_PIXELS_IN_BLOCK]; +extern const u8 v4l2_jpeg_ref_table_chroma_qt[V4L2_JPEG_PIXELS_IN_BLOCK]; +extern const u8 v4l2_jpeg_ref_table_luma_dc_ht[V4L2_JPEG_REF_HT_DC_LEN]; +extern const u8 v4l2_jpeg_ref_table_luma_ac_ht[V4L2_JPEG_REF_HT_AC_LEN]; +extern const u8 v4l2_jpeg_ref_table_chroma_dc_ht[V4L2_JPEG_REF_HT_DC_LEN]; +extern const u8 v4l2_jpeg_ref_table_chroma_ac_ht[V4L2_JPEG_REF_HT_AC_LEN]; + +#endif diff --git a/6.12.0/include-overrides/media/v4l2-mc.h b/6.12.0/include-overrides/media/v4l2-mc.h new file mode 100644 index 0000000..1837c9f --- /dev/null +++ b/6.12.0/include-overrides/media/v4l2-mc.h @@ -0,0 +1,232 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * v4l2-mc.h - Media Controller V4L2 types and prototypes + * + * Copyright (C) 2016 Mauro Carvalho Chehab + * Copyright (C) 2006-2010 Nokia Corporation + * Copyright (c) 2016 Intel Corporation. + */ + +#ifndef _V4L2_MC_H +#define _V4L2_MC_H + +#include +#include +#include +#include + +/* We don't need to include pci.h or usb.h here */ +struct pci_dev; +struct usb_device; + +#ifdef CONFIG_MEDIA_CONTROLLER +/** + * v4l2_mc_create_media_graph() - create Media Controller links at the graph. + * + * @mdev: pointer to the &media_device struct. + * + * Add links between the entities commonly found on PC customer's hardware at + * the V4L2 side: camera sensors, audio and video PLL-IF decoders, tuners, + * analog TV decoder and I/O entities (video, VBI and Software Defined Radio). + * + * .. note:: + * + * Webcams are modelled on a very simple way: the sensor is + * connected directly to the I/O entity. All dirty details, like + * scaler and crop HW are hidden. While such mapping is enough for v4l2 + * interface centric PC-consumer's hardware, V4L2 subdev centric camera + * hardware should not use this routine, as it will not build the right graph. + */ +int v4l2_mc_create_media_graph(struct media_device *mdev); + +/** + * v4l_enable_media_source() - Hold media source for exclusive use + * if free + * + * @vdev: pointer to struct video_device + * + * This interface calls enable_source handler to determine if + * media source is free for use. The enable_source handler is + * responsible for checking is the media source is free and + * start a pipeline between the media source and the media + * entity associated with the video device. This interface + * should be called from v4l2-core and dvb-core interfaces + * that change the source configuration. + * + * Return: returns zero on success or a negative error code. + */ +int v4l_enable_media_source(struct video_device *vdev); + +/** + * v4l_disable_media_source() - Release media source + * + * @vdev: pointer to struct video_device + * + * This interface calls disable_source handler to release + * the media source. The disable_source handler stops the + * active media pipeline between the media source and the + * media entity associated with the video device. + * + * Return: returns zero on success or a negative error code. + */ +void v4l_disable_media_source(struct video_device *vdev); + +/* + * v4l_vb2q_enable_media_tuner - Hold media source for exclusive use + * if free. + * @q - pointer to struct vb2_queue + * + * Wrapper for v4l_enable_media_source(). This function should + * be called from v4l2-core to enable the media source with + * pointer to struct vb2_queue as the input argument. Some + * v4l2-core interfaces don't have access to video device and + * this interface finds the struct video_device for the q and + * calls v4l_enable_media_source(). + */ +int v4l_vb2q_enable_media_source(struct vb2_queue *q); + +/** + * v4l2_create_fwnode_links_to_pad - Create fwnode-based links from a + * source subdev to a sink pad. + * + * @src_sd: pointer to a source subdev + * @sink: pointer to a sink pad + * @flags: the link flags + * + * This function searches for fwnode endpoint connections from a source + * subdevice to a single sink pad, and if suitable connections are found, + * translates them into media links to that pad. The function can be + * called by the sink, in its v4l2-async notifier bound callback, to create + * links from a bound source subdevice. + * + * The @flags argument specifies the link flags. The caller shall ensure that + * the flags are valid regardless of the number of links that may be created. + * For instance, setting the MEDIA_LNK_FL_ENABLED flag will cause all created + * links to be enabled, which isn't valid if more than one link is created. + * + * .. note:: + * + * Any sink subdevice that calls this function must implement the + * .get_fwnode_pad media operation in order to verify endpoints passed + * to the sink are owned by the sink. + * + * Return 0 on success or a negative error code on failure. + */ +int v4l2_create_fwnode_links_to_pad(struct v4l2_subdev *src_sd, + struct media_pad *sink, u32 flags); + +/** + * v4l2_create_fwnode_links - Create fwnode-based links from a source + * subdev to a sink subdev. + * + * @src_sd: pointer to a source subdevice + * @sink_sd: pointer to a sink subdevice + * + * This function searches for any and all fwnode endpoint connections + * between source and sink subdevices, and translates them into media + * links. The function can be called by the sink subdevice, in its + * v4l2-async notifier subdev bound callback, to create all links from + * a bound source subdevice. + * + * .. note:: + * + * Any sink subdevice that calls this function must implement the + * .get_fwnode_pad media operation in order to verify endpoints passed + * to the sink are owned by the sink. + * + * Return 0 on success or a negative error code on failure. + */ +int v4l2_create_fwnode_links(struct v4l2_subdev *src_sd, + struct v4l2_subdev *sink_sd); + +/** + * v4l2_pipeline_pm_get - Increase the use count of a pipeline + * @entity: The root entity of a pipeline + * + * THIS FUNCTION IS DEPRECATED. DO NOT USE IN NEW DRIVERS. USE RUNTIME PM + * ON SUB-DEVICE DRIVERS INSTEAD. + * + * Update the use count of all entities in the pipeline and power entities on. + * + * This function is intended to be called in video node open. It uses + * struct media_entity.use_count to track the power status. The use + * of this function should be paired with v4l2_pipeline_link_notify(). + * + * Return 0 on success or a negative error code on failure. + */ +int v4l2_pipeline_pm_get(struct media_entity *entity); + +/** + * v4l2_pipeline_pm_put - Decrease the use count of a pipeline + * @entity: The root entity of a pipeline + * + * THIS FUNCTION IS DEPRECATED. DO NOT USE IN NEW DRIVERS. USE RUNTIME PM + * ON SUB-DEVICE DRIVERS INSTEAD. + * + * Update the use count of all entities in the pipeline and power entities off. + * + * This function is intended to be called in video node release. It uses + * struct media_entity.use_count to track the power status. The use + * of this function should be paired with v4l2_pipeline_link_notify(). + */ +void v4l2_pipeline_pm_put(struct media_entity *entity); + + +/** + * v4l2_pipeline_link_notify - Link management notification callback + * @link: The link + * @flags: New link flags that will be applied + * @notification: The link's state change notification type (MEDIA_DEV_NOTIFY_*) + * + * THIS FUNCTION IS DEPRECATED. DO NOT USE IN NEW DRIVERS. USE RUNTIME PM + * ON SUB-DEVICE DRIVERS INSTEAD. + * + * React to link management on powered pipelines by updating the use count of + * all entities in the source and sink sides of the link. Entities are powered + * on or off accordingly. The use of this function should be paired + * with v4l2_pipeline_pm_{get,put}(). + * + * Return 0 on success or a negative error code on failure. Powering entities + * off is assumed to never fail. This function will not fail for disconnection + * events. + */ +int v4l2_pipeline_link_notify(struct media_link *link, u32 flags, + unsigned int notification); + +#else /* CONFIG_MEDIA_CONTROLLER */ + +static inline int v4l2_mc_create_media_graph(struct media_device *mdev) +{ + return 0; +} + +static inline int v4l_enable_media_source(struct video_device *vdev) +{ + return 0; +} + +static inline void v4l_disable_media_source(struct video_device *vdev) +{ +} + +static inline int v4l_vb2q_enable_media_source(struct vb2_queue *q) +{ + return 0; +} + +static inline int v4l2_pipeline_pm_get(struct media_entity *entity) +{ + return 0; +} + +static inline void v4l2_pipeline_pm_put(struct media_entity *entity) +{} + +static inline int v4l2_pipeline_link_notify(struct media_link *link, u32 flags, + unsigned int notification) +{ + return 0; +} + +#endif /* CONFIG_MEDIA_CONTROLLER */ +#endif /* _V4L2_MC_H */ diff --git a/6.12.0/include-overrides/media/v4l2-mediabus.h b/6.12.0/include-overrides/media/v4l2-mediabus.h new file mode 100644 index 0000000..5bce6e4 --- /dev/null +++ b/6.12.0/include-overrides/media/v4l2-mediabus.h @@ -0,0 +1,255 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Media Bus API header + * + * Copyright (C) 2009, Guennadi Liakhovetski + */ + +#ifndef V4L2_MEDIABUS_H +#define V4L2_MEDIABUS_H + +#include +#include + +/* + * How to use the V4L2_MBUS_* flags: + * Flags are defined for each of the possible states and values of a media + * bus configuration parameter. One and only one bit of each group of flags + * shall be set by the users of the v4l2_subdev_pad_ops.get_mbus_config + * operation to ensure that no conflicting settings are specified when + * reporting the media bus configuration. For example, it is invalid to set or + * clear both the V4L2_MBUS_HSYNC_ACTIVE_HIGH and the + * V4L2_MBUS_HSYNC_ACTIVE_LOW flag at the same time. Instead either flag + * V4L2_MBUS_HSYNC_ACTIVE_HIGH or flag V4L2_MBUS_HSYNC_ACTIVE_LOW shall be set. + * + * TODO: replace the existing V4L2_MBUS_* flags with structures of fields + * to avoid conflicting settings. + * + * In example: + * #define V4L2_MBUS_HSYNC_ACTIVE_HIGH BIT(2) + * #define V4L2_MBUS_HSYNC_ACTIVE_LOW BIT(3) + * will be replaced by a field whose value reports the intended active state of + * the signal: + * unsigned int v4l2_mbus_hsync_active : 1; + */ + +/* Parallel flags */ +/* + * The client runs in master or in slave mode. By "Master mode" an operation + * mode is meant, when the client (e.g., a camera sensor) is producing + * horizontal and vertical synchronisation. In "Slave mode" the host is + * providing these signals to the slave. + */ +#define V4L2_MBUS_MASTER BIT(0) +#define V4L2_MBUS_SLAVE BIT(1) +/* + * Signal polarity flags + * Note: in BT.656 mode HSYNC, FIELD, and VSYNC are unused + * V4L2_MBUS_[HV]SYNC* flags should be also used for specifying + * configuration of hardware that uses [HV]REF signals + */ +#define V4L2_MBUS_HSYNC_ACTIVE_HIGH BIT(2) +#define V4L2_MBUS_HSYNC_ACTIVE_LOW BIT(3) +#define V4L2_MBUS_VSYNC_ACTIVE_HIGH BIT(4) +#define V4L2_MBUS_VSYNC_ACTIVE_LOW BIT(5) +#define V4L2_MBUS_PCLK_SAMPLE_RISING BIT(6) +#define V4L2_MBUS_PCLK_SAMPLE_FALLING BIT(7) +#define V4L2_MBUS_PCLK_SAMPLE_DUALEDGE BIT(8) +#define V4L2_MBUS_DATA_ACTIVE_HIGH BIT(9) +#define V4L2_MBUS_DATA_ACTIVE_LOW BIT(10) +/* FIELD = 0/1 - Field1 (odd)/Field2 (even) */ +#define V4L2_MBUS_FIELD_EVEN_HIGH BIT(11) +/* FIELD = 1/0 - Field1 (odd)/Field2 (even) */ +#define V4L2_MBUS_FIELD_EVEN_LOW BIT(12) +/* Active state of Sync-on-green (SoG) signal, 0/1 for LOW/HIGH respectively. */ +#define V4L2_MBUS_VIDEO_SOG_ACTIVE_HIGH BIT(13) +#define V4L2_MBUS_VIDEO_SOG_ACTIVE_LOW BIT(14) +#define V4L2_MBUS_DATA_ENABLE_HIGH BIT(15) +#define V4L2_MBUS_DATA_ENABLE_LOW BIT(16) + +/* Serial flags */ +/* Clock non-continuous mode support. */ +#define V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK BIT(0) + +#define V4L2_MBUS_CSI2_MAX_DATA_LANES 8 + +/** + * struct v4l2_mbus_config_mipi_csi2 - MIPI CSI-2 data bus configuration + * @flags: media bus (V4L2_MBUS_*) flags + * @data_lanes: an array of physical data lane indexes + * @clock_lane: physical lane index of the clock lane + * @num_data_lanes: number of data lanes + * @lane_polarities: polarity of the lanes. The order is the same of + * the physical lanes. + */ +struct v4l2_mbus_config_mipi_csi2 { + unsigned int flags; + unsigned char data_lanes[V4L2_MBUS_CSI2_MAX_DATA_LANES]; + unsigned char clock_lane; + unsigned char num_data_lanes; + bool lane_polarities[1 + V4L2_MBUS_CSI2_MAX_DATA_LANES]; +}; + +/** + * struct v4l2_mbus_config_parallel - parallel data bus configuration + * @flags: media bus (V4L2_MBUS_*) flags + * @bus_width: bus width in bits + * @data_shift: data shift in bits + */ +struct v4l2_mbus_config_parallel { + unsigned int flags; + unsigned char bus_width; + unsigned char data_shift; +}; + +/** + * struct v4l2_mbus_config_mipi_csi1 - CSI-1/CCP2 data bus configuration + * @clock_inv: polarity of clock/strobe signal + * false - not inverted, true - inverted + * @strobe: false - data/clock, true - data/strobe + * @lane_polarity: the polarities of the clock (index 0) and data lanes + * index (1) + * @data_lane: the number of the data lane + * @clock_lane: the number of the clock lane + */ +struct v4l2_mbus_config_mipi_csi1 { + unsigned char clock_inv:1; + unsigned char strobe:1; + bool lane_polarity[2]; + unsigned char data_lane; + unsigned char clock_lane; +}; + +/** + * enum v4l2_mbus_type - media bus type + * @V4L2_MBUS_UNKNOWN: unknown bus type, no V4L2 mediabus configuration + * @V4L2_MBUS_PARALLEL: parallel interface with hsync and vsync + * @V4L2_MBUS_BT656: parallel interface with embedded synchronisation, can + * also be used for BT.1120 + * @V4L2_MBUS_CSI1: MIPI CSI-1 serial interface + * @V4L2_MBUS_CCP2: CCP2 (Compact Camera Port 2) + * @V4L2_MBUS_CSI2_DPHY: MIPI CSI-2 serial interface, with D-PHY + * @V4L2_MBUS_CSI2_CPHY: MIPI CSI-2 serial interface, with C-PHY + * @V4L2_MBUS_DPI: MIPI VIDEO DPI interface + * @V4L2_MBUS_INVALID: invalid bus type (keep as last) + */ +enum v4l2_mbus_type { + V4L2_MBUS_UNKNOWN, + V4L2_MBUS_PARALLEL, + V4L2_MBUS_BT656, + V4L2_MBUS_CSI1, + V4L2_MBUS_CCP2, + V4L2_MBUS_CSI2_DPHY, + V4L2_MBUS_CSI2_CPHY, + V4L2_MBUS_DPI, + V4L2_MBUS_INVALID, +}; + +/** + * struct v4l2_mbus_config - media bus configuration + * @type: interface type + * @bus: bus configuration data structure + * @bus.parallel: embedded &struct v4l2_mbus_config_parallel. + * Used if the bus is parallel or BT.656. + * @bus.mipi_csi1: embedded &struct v4l2_mbus_config_mipi_csi1. + * Used if the bus is MIPI Alliance's Camera Serial + * Interface version 1 (MIPI CSI1) or Standard + * Mobile Imaging Architecture's Compact Camera Port 2 + * (SMIA CCP2). + * @bus.mipi_csi2: embedded &struct v4l2_mbus_config_mipi_csi2. + * Used if the bus is MIPI Alliance's Camera Serial + * Interface version 2 (MIPI CSI2). + */ +struct v4l2_mbus_config { + enum v4l2_mbus_type type; + union { + struct v4l2_mbus_config_parallel parallel; + struct v4l2_mbus_config_mipi_csi1 mipi_csi1; + struct v4l2_mbus_config_mipi_csi2 mipi_csi2; + } bus; +}; + +/** + * v4l2_fill_pix_format - Ancillary routine that fills a &struct + * v4l2_pix_format fields from a &struct v4l2_mbus_framefmt. + * + * @pix_fmt: pointer to &struct v4l2_pix_format to be filled + * @mbus_fmt: pointer to &struct v4l2_mbus_framefmt to be used as model + */ +static inline void +v4l2_fill_pix_format(struct v4l2_pix_format *pix_fmt, + const struct v4l2_mbus_framefmt *mbus_fmt) +{ + pix_fmt->width = mbus_fmt->width; + pix_fmt->height = mbus_fmt->height; + pix_fmt->field = mbus_fmt->field; + pix_fmt->colorspace = mbus_fmt->colorspace; + pix_fmt->ycbcr_enc = mbus_fmt->ycbcr_enc; + pix_fmt->quantization = mbus_fmt->quantization; + pix_fmt->xfer_func = mbus_fmt->xfer_func; +} + +/** + * v4l2_fill_mbus_format - Ancillary routine that fills a &struct + * v4l2_mbus_framefmt from a &struct v4l2_pix_format and a + * data format code. + * + * @mbus_fmt: pointer to &struct v4l2_mbus_framefmt to be filled + * @pix_fmt: pointer to &struct v4l2_pix_format to be used as model + * @code: data format code (from &enum v4l2_mbus_pixelcode) + */ +static inline void v4l2_fill_mbus_format(struct v4l2_mbus_framefmt *mbus_fmt, + const struct v4l2_pix_format *pix_fmt, + u32 code) +{ + mbus_fmt->width = pix_fmt->width; + mbus_fmt->height = pix_fmt->height; + mbus_fmt->field = pix_fmt->field; + mbus_fmt->colorspace = pix_fmt->colorspace; + mbus_fmt->ycbcr_enc = pix_fmt->ycbcr_enc; + mbus_fmt->quantization = pix_fmt->quantization; + mbus_fmt->xfer_func = pix_fmt->xfer_func; + mbus_fmt->code = code; +} + +/** + * v4l2_fill_pix_format_mplane - Ancillary routine that fills a &struct + * v4l2_pix_format_mplane fields from a media bus structure. + * + * @pix_mp_fmt: pointer to &struct v4l2_pix_format_mplane to be filled + * @mbus_fmt: pointer to &struct v4l2_mbus_framefmt to be used as model + */ +static inline void +v4l2_fill_pix_format_mplane(struct v4l2_pix_format_mplane *pix_mp_fmt, + const struct v4l2_mbus_framefmt *mbus_fmt) +{ + pix_mp_fmt->width = mbus_fmt->width; + pix_mp_fmt->height = mbus_fmt->height; + pix_mp_fmt->field = mbus_fmt->field; + pix_mp_fmt->colorspace = mbus_fmt->colorspace; + pix_mp_fmt->ycbcr_enc = mbus_fmt->ycbcr_enc; + pix_mp_fmt->quantization = mbus_fmt->quantization; + pix_mp_fmt->xfer_func = mbus_fmt->xfer_func; +} + +/** + * v4l2_fill_mbus_format_mplane - Ancillary routine that fills a &struct + * v4l2_mbus_framefmt from a &struct v4l2_pix_format_mplane. + * + * @mbus_fmt: pointer to &struct v4l2_mbus_framefmt to be filled + * @pix_mp_fmt: pointer to &struct v4l2_pix_format_mplane to be used as model + */ +static inline void +v4l2_fill_mbus_format_mplane(struct v4l2_mbus_framefmt *mbus_fmt, + const struct v4l2_pix_format_mplane *pix_mp_fmt) +{ + mbus_fmt->width = pix_mp_fmt->width; + mbus_fmt->height = pix_mp_fmt->height; + mbus_fmt->field = pix_mp_fmt->field; + mbus_fmt->colorspace = pix_mp_fmt->colorspace; + mbus_fmt->ycbcr_enc = pix_mp_fmt->ycbcr_enc; + mbus_fmt->quantization = pix_mp_fmt->quantization; + mbus_fmt->xfer_func = pix_mp_fmt->xfer_func; +} + +#endif diff --git a/6.12.0/include-overrides/media/v4l2-mem2mem.h b/6.12.0/include-overrides/media/v4l2-mem2mem.h new file mode 100644 index 0000000..0af330c --- /dev/null +++ b/6.12.0/include-overrides/media/v4l2-mem2mem.h @@ -0,0 +1,902 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Memory-to-memory device framework for Video for Linux 2. + * + * Helper functions for devices that use memory buffers for both source + * and destination. + * + * Copyright (c) 2009 Samsung Electronics Co., Ltd. + * Pawel Osciak, + * Marek Szyprowski, + */ + +#ifndef _MEDIA_V4L2_MEM2MEM_H +#define _MEDIA_V4L2_MEM2MEM_H + +#include + +/** + * struct v4l2_m2m_ops - mem-to-mem device driver callbacks + * @device_run: required. Begin the actual job (transaction) inside this + * callback. + * The job does NOT have to end before this callback returns + * (and it will be the usual case). When the job finishes, + * v4l2_m2m_job_finish() or v4l2_m2m_buf_done_and_job_finish() + * has to be called. + * @job_ready: optional. Should return 0 if the driver does not have a job + * fully prepared to run yet (i.e. it will not be able to finish a + * transaction without sleeping). If not provided, it will be + * assumed that one source and one destination buffer are all + * that is required for the driver to perform one full transaction. + * This method may not sleep. + * @job_abort: optional. Informs the driver that it has to abort the currently + * running transaction as soon as possible (i.e. as soon as it can + * stop the device safely; e.g. in the next interrupt handler), + * even if the transaction would not have been finished by then. + * After the driver performs the necessary steps, it has to call + * v4l2_m2m_job_finish() or v4l2_m2m_buf_done_and_job_finish() as + * if the transaction ended normally. + * This function does not have to (and will usually not) wait + * until the device enters a state when it can be stopped. + */ +struct v4l2_m2m_ops { + void (*device_run)(void *priv); + int (*job_ready)(void *priv); + void (*job_abort)(void *priv); +}; + +struct video_device; +struct v4l2_m2m_dev; + +/** + * struct v4l2_m2m_queue_ctx - represents a queue for buffers ready to be + * processed + * + * @q: pointer to struct &vb2_queue + * @rdy_queue: List of V4L2 mem-to-mem queues + * @rdy_spinlock: spin lock to protect the struct usage + * @num_rdy: number of buffers ready to be processed + * @buffered: is the queue buffered? + * + * Queue for buffers ready to be processed as soon as this + * instance receives access to the device. + */ + +struct v4l2_m2m_queue_ctx { + struct vb2_queue q; + + struct list_head rdy_queue; + spinlock_t rdy_spinlock; + u8 num_rdy; + bool buffered; +}; + +/** + * struct v4l2_m2m_ctx - Memory to memory context structure + * + * @q_lock: struct &mutex lock + * @new_frame: valid in the device_run callback: if true, then this + * starts a new frame; if false, then this is a new slice + * for an existing frame. This is always true unless + * V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF is set, which + * indicates slicing support. + * @is_draining: indicates device is in draining phase + * @last_src_buf: indicate the last source buffer for draining + * @next_buf_last: next capture queud buffer will be tagged as last + * @has_stopped: indicate the device has been stopped + * @ignore_cap_streaming: If true, job_ready can be called even if the CAPTURE + * queue is not streaming. This allows firmware to + * analyze the bitstream header which arrives on the + * OUTPUT queue. The driver must implement the job_ready + * callback correctly to make sure that the requirements + * for actual decoding are met. + * @m2m_dev: opaque pointer to the internal data to handle M2M context + * @cap_q_ctx: Capture (output to memory) queue context + * @out_q_ctx: Output (input from memory) queue context + * @queue: List of memory to memory contexts + * @job_flags: Job queue flags, used internally by v4l2-mem2mem.c: + * %TRANS_QUEUED, %TRANS_RUNNING and %TRANS_ABORT. + * @finished: Wait queue used to signalize when a job queue finished. + * @priv: Instance private data + * + * The memory to memory context is specific to a file handle, NOT to e.g. + * a device. + */ +struct v4l2_m2m_ctx { + /* optional cap/out vb2 queues lock */ + struct mutex *q_lock; + + bool new_frame; + + bool is_draining; + struct vb2_v4l2_buffer *last_src_buf; + bool next_buf_last; + bool has_stopped; + bool ignore_cap_streaming; + + /* internal use only */ + struct v4l2_m2m_dev *m2m_dev; + + struct v4l2_m2m_queue_ctx cap_q_ctx; + + struct v4l2_m2m_queue_ctx out_q_ctx; + + /* For device job queue */ + struct list_head queue; + unsigned long job_flags; + wait_queue_head_t finished; + + void *priv; +}; + +/** + * struct v4l2_m2m_buffer - Memory to memory buffer + * + * @vb: pointer to struct &vb2_v4l2_buffer + * @list: list of m2m buffers + */ +struct v4l2_m2m_buffer { + struct vb2_v4l2_buffer vb; + struct list_head list; +}; + +/** + * v4l2_m2m_get_curr_priv() - return driver private data for the currently + * running instance or NULL if no instance is running + * + * @m2m_dev: opaque pointer to the internal data to handle M2M context + */ +void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev); + +/** + * v4l2_m2m_get_vq() - return vb2_queue for the given type + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type + */ +struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx, + enum v4l2_buf_type type); + +/** + * v4l2_m2m_try_schedule() - check whether an instance is ready to be added to + * the pending job queue and add it if so. + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * + * There are three basic requirements an instance has to meet to be able to run: + * 1) at least one source buffer has to be queued, + * 2) at least one destination buffer has to be queued, + * 3) streaming has to be on. + * + * If a queue is buffered (for example a decoder hardware ringbuffer that has + * to be drained before doing streamoff), allow scheduling without v4l2 buffers + * on that queue. + * + * There may also be additional, custom requirements. In such case the driver + * should supply a custom callback (job_ready in v4l2_m2m_ops) that should + * return 1 if the instance is ready. + * An example of the above could be an instance that requires more than one + * src/dst buffer per transaction. + */ +void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx); + +/** + * v4l2_m2m_job_finish() - inform the framework that a job has been finished + * and have it clean up + * + * @m2m_dev: opaque pointer to the internal data to handle M2M context + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * + * Called by a driver to yield back the device after it has finished with it. + * Should be called as soon as possible after reaching a state which allows + * other instances to take control of the device. + * + * This function has to be called only after &v4l2_m2m_ops->device_run + * callback has been called on the driver. To prevent recursion, it should + * not be called directly from the &v4l2_m2m_ops->device_run callback though. + */ +void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, + struct v4l2_m2m_ctx *m2m_ctx); + +/** + * v4l2_m2m_buf_done_and_job_finish() - return source/destination buffers with + * state and inform the framework that a job has been finished and have it + * clean up + * + * @m2m_dev: opaque pointer to the internal data to handle M2M context + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @state: vb2 buffer state passed to v4l2_m2m_buf_done(). + * + * Drivers that set V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF must use this + * function instead of job_finish() to take held buffers into account. It is + * optional for other drivers. + * + * This function removes the source buffer from the ready list and returns + * it with the given state. The same is done for the destination buffer, unless + * it is marked 'held'. In that case the buffer is kept on the ready list. + * + * After that the job is finished (see job_finish()). + * + * This allows for multiple output buffers to be used to fill in a single + * capture buffer. This is typically used by stateless decoders where + * multiple e.g. H.264 slices contribute to a single decoded frame. + */ +void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev *m2m_dev, + struct v4l2_m2m_ctx *m2m_ctx, + enum vb2_buffer_state state); + +static inline void +v4l2_m2m_buf_done(struct vb2_v4l2_buffer *buf, enum vb2_buffer_state state) +{ + vb2_buffer_done(&buf->vb2_buf, state); +} + +/** + * v4l2_m2m_clear_state() - clear encoding/decoding state + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + */ +static inline void +v4l2_m2m_clear_state(struct v4l2_m2m_ctx *m2m_ctx) +{ + m2m_ctx->next_buf_last = false; + m2m_ctx->is_draining = false; + m2m_ctx->has_stopped = false; +} + +/** + * v4l2_m2m_mark_stopped() - set current encoding/decoding state as stopped + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + */ +static inline void +v4l2_m2m_mark_stopped(struct v4l2_m2m_ctx *m2m_ctx) +{ + m2m_ctx->next_buf_last = false; + m2m_ctx->is_draining = false; + m2m_ctx->has_stopped = true; +} + +/** + * v4l2_m2m_dst_buf_is_last() - return the current encoding/decoding session + * draining management state of next queued capture buffer + * + * This last capture buffer should be tagged with V4L2_BUF_FLAG_LAST to notify + * the end of the capture session. + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + */ +static inline bool +v4l2_m2m_dst_buf_is_last(struct v4l2_m2m_ctx *m2m_ctx) +{ + return m2m_ctx->is_draining && m2m_ctx->next_buf_last; +} + +/** + * v4l2_m2m_has_stopped() - return the current encoding/decoding session + * stopped state + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + */ +static inline bool +v4l2_m2m_has_stopped(struct v4l2_m2m_ctx *m2m_ctx) +{ + return m2m_ctx->has_stopped; +} + +/** + * v4l2_m2m_is_last_draining_src_buf() - return the output buffer draining + * state in the current encoding/decoding session + * + * This will identify the last output buffer queued before a session stop + * was required, leading to an actual encoding/decoding session stop state + * in the encoding/decoding process after being processed. + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @vbuf: pointer to struct &v4l2_buffer + */ +static inline bool +v4l2_m2m_is_last_draining_src_buf(struct v4l2_m2m_ctx *m2m_ctx, + struct vb2_v4l2_buffer *vbuf) +{ + return m2m_ctx->is_draining && vbuf == m2m_ctx->last_src_buf; +} + +/** + * v4l2_m2m_last_buffer_done() - marks the buffer with LAST flag and DONE + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @vbuf: pointer to struct &v4l2_buffer + */ +void v4l2_m2m_last_buffer_done(struct v4l2_m2m_ctx *m2m_ctx, + struct vb2_v4l2_buffer *vbuf); + +/** + * v4l2_m2m_suspend() - stop new jobs from being run and wait for current job + * to finish + * + * @m2m_dev: opaque pointer to the internal data to handle M2M context + * + * Called by a driver in the suspend hook. Stop new jobs from being run, and + * wait for current running job to finish. + */ +void v4l2_m2m_suspend(struct v4l2_m2m_dev *m2m_dev); + +/** + * v4l2_m2m_resume() - resume job running and try to run a queued job + * + * @m2m_dev: opaque pointer to the internal data to handle M2M context + * + * Called by a driver in the resume hook. This reverts the operation of + * v4l2_m2m_suspend() and allows job to be run. Also try to run a queued job if + * there is any. + */ +void v4l2_m2m_resume(struct v4l2_m2m_dev *m2m_dev); + +/** + * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer + * + * @file: pointer to struct &file + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @reqbufs: pointer to struct &v4l2_requestbuffers + */ +int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_requestbuffers *reqbufs); + +/** + * v4l2_m2m_querybuf() - multi-queue-aware QUERYBUF multiplexer + * + * @file: pointer to struct &file + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @buf: pointer to struct &v4l2_buffer + * + * See v4l2_m2m_mmap() documentation for details. + */ +int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_buffer *buf); + +/** + * v4l2_m2m_qbuf() - enqueue a source or destination buffer, depending on + * the type + * + * @file: pointer to struct &file + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @buf: pointer to struct &v4l2_buffer + */ +int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_buffer *buf); + +/** + * v4l2_m2m_dqbuf() - dequeue a source or destination buffer, depending on + * the type + * + * @file: pointer to struct &file + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @buf: pointer to struct &v4l2_buffer + */ +int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_buffer *buf); + +/** + * v4l2_m2m_prepare_buf() - prepare a source or destination buffer, depending on + * the type + * + * @file: pointer to struct &file + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @buf: pointer to struct &v4l2_buffer + */ +int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_buffer *buf); + +/** + * v4l2_m2m_create_bufs() - create a source or destination buffer, depending + * on the type + * + * @file: pointer to struct &file + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @create: pointer to struct &v4l2_create_buffers + */ +int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_create_buffers *create); + +/** + * v4l2_m2m_expbuf() - export a source or destination buffer, depending on + * the type + * + * @file: pointer to struct &file + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @eb: pointer to struct &v4l2_exportbuffer + */ +int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_exportbuffer *eb); + +/** + * v4l2_m2m_streamon() - turn on streaming for a video queue + * + * @file: pointer to struct &file + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type + */ +int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + enum v4l2_buf_type type); + +/** + * v4l2_m2m_streamoff() - turn off streaming for a video queue + * + * @file: pointer to struct &file + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type + */ +int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + enum v4l2_buf_type type); + +/** + * v4l2_m2m_update_start_streaming_state() - update the encoding/decoding + * session state when a start of streaming of a video queue is requested + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @q: queue + */ +void v4l2_m2m_update_start_streaming_state(struct v4l2_m2m_ctx *m2m_ctx, + struct vb2_queue *q); + +/** + * v4l2_m2m_update_stop_streaming_state() - update the encoding/decoding + * session state when a stop of streaming of a video queue is requested + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @q: queue + */ +void v4l2_m2m_update_stop_streaming_state(struct v4l2_m2m_ctx *m2m_ctx, + struct vb2_queue *q); + +/** + * v4l2_m2m_encoder_cmd() - execute an encoder command + * + * @file: pointer to struct &file + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @ec: pointer to the encoder command + */ +int v4l2_m2m_encoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_encoder_cmd *ec); + +/** + * v4l2_m2m_decoder_cmd() - execute a decoder command + * + * @file: pointer to struct &file + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @dc: pointer to the decoder command + */ +int v4l2_m2m_decoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_decoder_cmd *dc); + +/** + * v4l2_m2m_poll() - poll replacement, for destination buffers only + * + * @file: pointer to struct &file + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @wait: pointer to struct &poll_table_struct + * + * Call from the driver's poll() function. Will poll both queues. If a buffer + * is available to dequeue (with dqbuf) from the source queue, this will + * indicate that a non-blocking write can be performed, while read will be + * returned in case of the destination queue. + */ +__poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct poll_table_struct *wait); + +/** + * v4l2_m2m_mmap() - source and destination queues-aware mmap multiplexer + * + * @file: pointer to struct &file + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @vma: pointer to struct &vm_area_struct + * + * Call from driver's mmap() function. Will handle mmap() for both queues + * seamlessly for the video buffer, which will receive normal per-queue offsets + * and proper vb2 queue pointers. The differentiation is made outside + * vb2 by adding a predefined offset to buffers from one of the queues + * and subtracting it before passing it back to vb2. Only drivers (and + * thus applications) receive modified offsets. + */ +int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct vm_area_struct *vma); + +#ifndef CONFIG_MMU +unsigned long v4l2_m2m_get_unmapped_area(struct file *file, unsigned long addr, + unsigned long len, unsigned long pgoff, + unsigned long flags); +#endif +/** + * v4l2_m2m_init() - initialize per-driver m2m data + * + * @m2m_ops: pointer to struct v4l2_m2m_ops + * + * Usually called from driver's ``probe()`` function. + * + * Return: returns an opaque pointer to the internal data to handle M2M context + */ +struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops); + +#if defined(CONFIG_MEDIA_CONTROLLER) +void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev); +int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev, + struct video_device *vdev, int function); +#else +static inline void +v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev) +{ +} + +static inline int +v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev, + struct video_device *vdev, int function) +{ + return 0; +} +#endif + +/** + * v4l2_m2m_release() - cleans up and frees a m2m_dev structure + * + * @m2m_dev: opaque pointer to the internal data to handle M2M context + * + * Usually called from driver's ``remove()`` function. + */ +void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev); + +/** + * v4l2_m2m_ctx_init() - allocate and initialize a m2m context + * + * @m2m_dev: opaque pointer to the internal data to handle M2M context + * @drv_priv: driver's instance private data + * @queue_init: a callback for queue type-specific initialization function + * to be used for initializing vb2_queues + * + * Usually called from driver's ``open()`` function. + */ +struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev, + void *drv_priv, + int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)); + +static inline void v4l2_m2m_set_src_buffered(struct v4l2_m2m_ctx *m2m_ctx, + bool buffered) +{ + m2m_ctx->out_q_ctx.buffered = buffered; +} + +static inline void v4l2_m2m_set_dst_buffered(struct v4l2_m2m_ctx *m2m_ctx, + bool buffered) +{ + m2m_ctx->cap_q_ctx.buffered = buffered; +} + +/** + * v4l2_m2m_ctx_release() - release m2m context + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * + * Usually called from driver's release() function. + */ +void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx); + +/** + * v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list. + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @vbuf: pointer to struct &vb2_v4l2_buffer + * + * Call from vb2_queue_ops->ops->buf_queue, vb2_queue_ops callback. + */ +void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, + struct vb2_v4l2_buffer *vbuf); + +/** + * v4l2_m2m_num_src_bufs_ready() - return the number of source buffers ready for + * use + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + */ +static inline +unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx) +{ + unsigned int num_buf_rdy; + unsigned long flags; + + spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); + num_buf_rdy = m2m_ctx->out_q_ctx.num_rdy; + spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); + + return num_buf_rdy; +} + +/** + * v4l2_m2m_num_dst_bufs_ready() - return the number of destination buffers + * ready for use + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + */ +static inline +unsigned int v4l2_m2m_num_dst_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx) +{ + unsigned int num_buf_rdy; + unsigned long flags; + + spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags); + num_buf_rdy = m2m_ctx->cap_q_ctx.num_rdy; + spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags); + + return num_buf_rdy; +} + +/** + * v4l2_m2m_next_buf() - return next buffer from the list of ready buffers + * + * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx + */ +struct vb2_v4l2_buffer *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx); + +/** + * v4l2_m2m_next_src_buf() - return next source buffer from the list of ready + * buffers + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + */ +static inline struct vb2_v4l2_buffer * +v4l2_m2m_next_src_buf(struct v4l2_m2m_ctx *m2m_ctx) +{ + return v4l2_m2m_next_buf(&m2m_ctx->out_q_ctx); +} + +/** + * v4l2_m2m_next_dst_buf() - return next destination buffer from the list of + * ready buffers + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + */ +static inline struct vb2_v4l2_buffer * +v4l2_m2m_next_dst_buf(struct v4l2_m2m_ctx *m2m_ctx) +{ + return v4l2_m2m_next_buf(&m2m_ctx->cap_q_ctx); +} + +/** + * v4l2_m2m_last_buf() - return last buffer from the list of ready buffers + * + * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx + */ +struct vb2_v4l2_buffer *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx); + +/** + * v4l2_m2m_last_src_buf() - return last source buffer from the list of + * ready buffers + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + */ +static inline struct vb2_v4l2_buffer * +v4l2_m2m_last_src_buf(struct v4l2_m2m_ctx *m2m_ctx) +{ + return v4l2_m2m_last_buf(&m2m_ctx->out_q_ctx); +} + +/** + * v4l2_m2m_last_dst_buf() - return last destination buffer from the list of + * ready buffers + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + */ +static inline struct vb2_v4l2_buffer * +v4l2_m2m_last_dst_buf(struct v4l2_m2m_ctx *m2m_ctx) +{ + return v4l2_m2m_last_buf(&m2m_ctx->cap_q_ctx); +} + +/** + * v4l2_m2m_for_each_dst_buf() - iterate over a list of destination ready + * buffers + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @b: current buffer of type struct v4l2_m2m_buffer + */ +#define v4l2_m2m_for_each_dst_buf(m2m_ctx, b) \ + list_for_each_entry(b, &m2m_ctx->cap_q_ctx.rdy_queue, list) + +/** + * v4l2_m2m_for_each_src_buf() - iterate over a list of source ready buffers + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @b: current buffer of type struct v4l2_m2m_buffer + */ +#define v4l2_m2m_for_each_src_buf(m2m_ctx, b) \ + list_for_each_entry(b, &m2m_ctx->out_q_ctx.rdy_queue, list) + +/** + * v4l2_m2m_for_each_dst_buf_safe() - iterate over a list of destination ready + * buffers safely + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @b: current buffer of type struct v4l2_m2m_buffer + * @n: used as temporary storage + */ +#define v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, b, n) \ + list_for_each_entry_safe(b, n, &m2m_ctx->cap_q_ctx.rdy_queue, list) + +/** + * v4l2_m2m_for_each_src_buf_safe() - iterate over a list of source ready + * buffers safely + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @b: current buffer of type struct v4l2_m2m_buffer + * @n: used as temporary storage + */ +#define v4l2_m2m_for_each_src_buf_safe(m2m_ctx, b, n) \ + list_for_each_entry_safe(b, n, &m2m_ctx->out_q_ctx.rdy_queue, list) + +/** + * v4l2_m2m_get_src_vq() - return vb2_queue for source buffers + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + */ +static inline +struct vb2_queue *v4l2_m2m_get_src_vq(struct v4l2_m2m_ctx *m2m_ctx) +{ + return &m2m_ctx->out_q_ctx.q; +} + +/** + * v4l2_m2m_get_dst_vq() - return vb2_queue for destination buffers + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + */ +static inline +struct vb2_queue *v4l2_m2m_get_dst_vq(struct v4l2_m2m_ctx *m2m_ctx) +{ + return &m2m_ctx->cap_q_ctx.q; +} + +/** + * v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and + * return it + * + * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx + */ +struct vb2_v4l2_buffer *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx); + +/** + * v4l2_m2m_src_buf_remove() - take off a source buffer from the list of ready + * buffers and return it + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + */ +static inline struct vb2_v4l2_buffer * +v4l2_m2m_src_buf_remove(struct v4l2_m2m_ctx *m2m_ctx) +{ + return v4l2_m2m_buf_remove(&m2m_ctx->out_q_ctx); +} + +/** + * v4l2_m2m_dst_buf_remove() - take off a destination buffer from the list of + * ready buffers and return it + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + */ +static inline struct vb2_v4l2_buffer * +v4l2_m2m_dst_buf_remove(struct v4l2_m2m_ctx *m2m_ctx) +{ + return v4l2_m2m_buf_remove(&m2m_ctx->cap_q_ctx); +} + +/** + * v4l2_m2m_buf_remove_by_buf() - take off exact buffer from the list of ready + * buffers + * + * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx + * @vbuf: the buffer to be removed + */ +void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx *q_ctx, + struct vb2_v4l2_buffer *vbuf); + +/** + * v4l2_m2m_src_buf_remove_by_buf() - take off exact source buffer from the list + * of ready buffers + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @vbuf: the buffer to be removed + */ +static inline void v4l2_m2m_src_buf_remove_by_buf(struct v4l2_m2m_ctx *m2m_ctx, + struct vb2_v4l2_buffer *vbuf) +{ + v4l2_m2m_buf_remove_by_buf(&m2m_ctx->out_q_ctx, vbuf); +} + +/** + * v4l2_m2m_dst_buf_remove_by_buf() - take off exact destination buffer from the + * list of ready buffers + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @vbuf: the buffer to be removed + */ +static inline void v4l2_m2m_dst_buf_remove_by_buf(struct v4l2_m2m_ctx *m2m_ctx, + struct vb2_v4l2_buffer *vbuf) +{ + v4l2_m2m_buf_remove_by_buf(&m2m_ctx->cap_q_ctx, vbuf); +} + +struct vb2_v4l2_buffer * +v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx *q_ctx, unsigned int idx); + +static inline struct vb2_v4l2_buffer * +v4l2_m2m_src_buf_remove_by_idx(struct v4l2_m2m_ctx *m2m_ctx, unsigned int idx) +{ + return v4l2_m2m_buf_remove_by_idx(&m2m_ctx->out_q_ctx, idx); +} + +static inline struct vb2_v4l2_buffer * +v4l2_m2m_dst_buf_remove_by_idx(struct v4l2_m2m_ctx *m2m_ctx, unsigned int idx) +{ + return v4l2_m2m_buf_remove_by_idx(&m2m_ctx->cap_q_ctx, idx); +} + +/** + * v4l2_m2m_buf_copy_metadata() - copy buffer metadata from + * the output buffer to the capture buffer + * + * @out_vb: the output buffer that is the source of the metadata. + * @cap_vb: the capture buffer that will receive the metadata. + * @copy_frame_flags: copy the KEY/B/PFRAME flags as well. + * + * This helper function copies the timestamp, timecode (if the TIMECODE + * buffer flag was set), field and the TIMECODE, KEYFRAME, BFRAME, PFRAME + * and TSTAMP_SRC_MASK flags from @out_vb to @cap_vb. + * + * If @copy_frame_flags is false, then the KEYFRAME, BFRAME and PFRAME + * flags are not copied. This is typically needed for encoders that + * set this bits explicitly. + */ +void v4l2_m2m_buf_copy_metadata(const struct vb2_v4l2_buffer *out_vb, + struct vb2_v4l2_buffer *cap_vb, + bool copy_frame_flags); + +/* v4l2 request helper */ + +void v4l2_m2m_request_queue(struct media_request *req); + +/* v4l2 ioctl helpers */ + +int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv, + struct v4l2_requestbuffers *rb); +int v4l2_m2m_ioctl_create_bufs(struct file *file, void *fh, + struct v4l2_create_buffers *create); +int v4l2_m2m_ioctl_remove_bufs(struct file *file, void *priv, + struct v4l2_remove_buffers *d); +int v4l2_m2m_ioctl_querybuf(struct file *file, void *fh, + struct v4l2_buffer *buf); +int v4l2_m2m_ioctl_expbuf(struct file *file, void *fh, + struct v4l2_exportbuffer *eb); +int v4l2_m2m_ioctl_qbuf(struct file *file, void *fh, + struct v4l2_buffer *buf); +int v4l2_m2m_ioctl_dqbuf(struct file *file, void *fh, + struct v4l2_buffer *buf); +int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *fh, + struct v4l2_buffer *buf); +int v4l2_m2m_ioctl_streamon(struct file *file, void *fh, + enum v4l2_buf_type type); +int v4l2_m2m_ioctl_streamoff(struct file *file, void *fh, + enum v4l2_buf_type type); +int v4l2_m2m_ioctl_encoder_cmd(struct file *file, void *fh, + struct v4l2_encoder_cmd *ec); +int v4l2_m2m_ioctl_decoder_cmd(struct file *file, void *fh, + struct v4l2_decoder_cmd *dc); +int v4l2_m2m_ioctl_try_encoder_cmd(struct file *file, void *fh, + struct v4l2_encoder_cmd *ec); +int v4l2_m2m_ioctl_try_decoder_cmd(struct file *file, void *fh, + struct v4l2_decoder_cmd *dc); +int v4l2_m2m_ioctl_stateless_try_decoder_cmd(struct file *file, void *fh, + struct v4l2_decoder_cmd *dc); +int v4l2_m2m_ioctl_stateless_decoder_cmd(struct file *file, void *priv, + struct v4l2_decoder_cmd *dc); +int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma); +__poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait); + +#endif /* _MEDIA_V4L2_MEM2MEM_H */ + diff --git a/6.12.0/include-overrides/media/v4l2-rect.h b/6.12.0/include-overrides/media/v4l2-rect.h new file mode 100644 index 0000000..bd587d0 --- /dev/null +++ b/6.12.0/include-overrides/media/v4l2-rect.h @@ -0,0 +1,207 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * v4l2-rect.h - v4l2_rect helper functions + * + * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + */ + +#ifndef _V4L2_RECT_H_ +#define _V4L2_RECT_H_ + +#include + +/** + * v4l2_rect_set_size_to() - copy the width/height values. + * @r: rect whose width and height fields will be set + * @size: rect containing the width and height fields you need. + */ +static inline void v4l2_rect_set_size_to(struct v4l2_rect *r, + const struct v4l2_rect *size) +{ + r->width = size->width; + r->height = size->height; +} + +/** + * v4l2_rect_set_min_size() - width and height of r should be >= min_size. + * @r: rect whose width and height will be modified + * @min_size: rect containing the minimal width and height + */ +static inline void v4l2_rect_set_min_size(struct v4l2_rect *r, + const struct v4l2_rect *min_size) +{ + if (r->width < min_size->width) + r->width = min_size->width; + if (r->height < min_size->height) + r->height = min_size->height; +} + +/** + * v4l2_rect_set_max_size() - width and height of r should be <= max_size + * @r: rect whose width and height will be modified + * @max_size: rect containing the maximum width and height + */ +static inline void v4l2_rect_set_max_size(struct v4l2_rect *r, + const struct v4l2_rect *max_size) +{ + if (r->width > max_size->width) + r->width = max_size->width; + if (r->height > max_size->height) + r->height = max_size->height; +} + +/** + * v4l2_rect_map_inside()- r should be inside boundary. + * @r: rect that will be modified + * @boundary: rect containing the boundary for @r + */ +static inline void v4l2_rect_map_inside(struct v4l2_rect *r, + const struct v4l2_rect *boundary) +{ + v4l2_rect_set_max_size(r, boundary); + if (r->left < boundary->left) + r->left = boundary->left; + if (r->top < boundary->top) + r->top = boundary->top; + if (r->left + r->width > boundary->left + boundary->width) + r->left = boundary->left + boundary->width - r->width; + if (r->top + r->height > boundary->top + boundary->height) + r->top = boundary->top + boundary->height - r->height; +} + +/** + * v4l2_rect_same_size() - return true if r1 has the same size as r2 + * @r1: rectangle. + * @r2: rectangle. + * + * Return true if both rectangles have the same size. + */ +static inline bool v4l2_rect_same_size(const struct v4l2_rect *r1, + const struct v4l2_rect *r2) +{ + return r1->width == r2->width && r1->height == r2->height; +} + +/** + * v4l2_rect_same_position() - return true if r1 has the same position as r2 + * @r1: rectangle. + * @r2: rectangle. + * + * Return true if both rectangles have the same position + */ +static inline bool v4l2_rect_same_position(const struct v4l2_rect *r1, + const struct v4l2_rect *r2) +{ + return r1->top == r2->top && r1->left == r2->left; +} + +/** + * v4l2_rect_equal() - return true if r1 equals r2 + * @r1: rectangle. + * @r2: rectangle. + * + * Return true if both rectangles have the same size and position. + */ +static inline bool v4l2_rect_equal(const struct v4l2_rect *r1, + const struct v4l2_rect *r2) +{ + return v4l2_rect_same_size(r1, r2) && v4l2_rect_same_position(r1, r2); +} + +/** + * v4l2_rect_intersect() - calculate the intersection of two rects. + * @r: intersection of @r1 and @r2. + * @r1: rectangle. + * @r2: rectangle. + */ +static inline void v4l2_rect_intersect(struct v4l2_rect *r, + const struct v4l2_rect *r1, + const struct v4l2_rect *r2) +{ + int right, bottom; + + r->top = max(r1->top, r2->top); + r->left = max(r1->left, r2->left); + bottom = min(r1->top + r1->height, r2->top + r2->height); + right = min(r1->left + r1->width, r2->left + r2->width); + r->height = max(0, bottom - r->top); + r->width = max(0, right - r->left); +} + +/** + * v4l2_rect_scale() - scale rect r by to/from + * @r: rect to be scaled. + * @from: from rectangle. + * @to: to rectangle. + * + * This scales rectangle @r horizontally by @to->width / @from->width and + * vertically by @to->height / @from->height. + * + * Typically @r is a rectangle inside @from and you want the rectangle as + * it would appear after scaling @from to @to. So the resulting @r will + * be the scaled rectangle inside @to. + */ +static inline void v4l2_rect_scale(struct v4l2_rect *r, + const struct v4l2_rect *from, + const struct v4l2_rect *to) +{ + if (from->width == 0 || from->height == 0) { + r->left = r->top = r->width = r->height = 0; + return; + } + r->left = (((r->left - from->left) * to->width) / from->width) & ~1; + r->width = ((r->width * to->width) / from->width) & ~1; + r->top = ((r->top - from->top) * to->height) / from->height; + r->height = (r->height * to->height) / from->height; +} + +/** + * v4l2_rect_overlap() - do r1 and r2 overlap? + * @r1: rectangle. + * @r2: rectangle. + * + * Returns true if @r1 and @r2 overlap. + */ +static inline bool v4l2_rect_overlap(const struct v4l2_rect *r1, + const struct v4l2_rect *r2) +{ + /* + * IF the left side of r1 is to the right of the right side of r2 OR + * the left side of r2 is to the right of the right side of r1 THEN + * they do not overlap. + */ + if (r1->left >= r2->left + r2->width || + r2->left >= r1->left + r1->width) + return false; + /* + * IF the top side of r1 is below the bottom of r2 OR + * the top side of r2 is below the bottom of r1 THEN + * they do not overlap. + */ + if (r1->top >= r2->top + r2->height || + r2->top >= r1->top + r1->height) + return false; + return true; +} + +/** + * v4l2_rect_enclosed() - is r1 enclosed in r2? + * @r1: rectangle. + * @r2: rectangle. + * + * Returns true if @r1 is enclosed in @r2. + */ +static inline bool v4l2_rect_enclosed(struct v4l2_rect *r1, + struct v4l2_rect *r2) +{ + if (r1->left < r2->left || r1->top < r2->top) + return false; + if (r1->left + r1->width > r2->left + r2->width) + return false; + if (r1->top + r1->height > r2->top + r2->height) + return false; + + return true; +} + +#endif diff --git a/6.12.0/include-overrides/media/v4l2-subdev.h b/6.12.0/include-overrides/media/v4l2-subdev.h new file mode 100644 index 0000000..8daa092 --- /dev/null +++ b/6.12.0/include-overrides/media/v4l2-subdev.h @@ -0,0 +1,2005 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * V4L2 sub-device support header. + * + * Copyright (C) 2008 Hans Verkuil + */ + +#ifndef _V4L2_SUBDEV_H +#define _V4L2_SUBDEV_H + +#include +#include +#include +#include +#include +#include +#include +#include + +/* generic v4l2_device notify callback notification values */ +#define V4L2_SUBDEV_IR_RX_NOTIFY _IOW('v', 0, u32) +#define V4L2_SUBDEV_IR_RX_FIFO_SERVICE_REQ 0x00000001 +#define V4L2_SUBDEV_IR_RX_END_OF_RX_DETECTED 0x00000002 +#define V4L2_SUBDEV_IR_RX_HW_FIFO_OVERRUN 0x00000004 +#define V4L2_SUBDEV_IR_RX_SW_FIFO_OVERRUN 0x00000008 + +#define V4L2_SUBDEV_IR_TX_NOTIFY _IOW('v', 1, u32) +#define V4L2_SUBDEV_IR_TX_FIFO_SERVICE_REQ 0x00000001 + +#define V4L2_DEVICE_NOTIFY_EVENT _IOW('v', 2, struct v4l2_event) + +struct v4l2_device; +struct v4l2_ctrl_handler; +struct v4l2_event; +struct v4l2_event_subscription; +struct v4l2_fh; +struct v4l2_subdev; +struct v4l2_subdev_fh; +struct tuner_setup; +struct v4l2_mbus_frame_desc; +struct led_classdev; + +/** + * struct v4l2_decode_vbi_line - used to decode_vbi_line + * + * @is_second_field: Set to 0 for the first (odd) field; + * set to 1 for the second (even) field. + * @p: Pointer to the sliced VBI data from the decoder. On exit, points to + * the start of the payload. + * @line: Line number of the sliced VBI data (1-23) + * @type: VBI service type (V4L2_SLICED_*). 0 if no service found + */ +struct v4l2_decode_vbi_line { + u32 is_second_field; + u8 *p; + u32 line; + u32 type; +}; + +/* + * Sub-devices are devices that are connected somehow to the main bridge + * device. These devices are usually audio/video muxers/encoders/decoders or + * sensors and webcam controllers. + * + * Usually these devices are controlled through an i2c bus, but other buses + * may also be used. + * + * The v4l2_subdev struct provides a way of accessing these devices in a + * generic manner. Most operations that these sub-devices support fall in + * a few categories: core ops, audio ops, video ops and tuner ops. + * + * More categories can be added if needed, although this should remain a + * limited set (no more than approx. 8 categories). + * + * Each category has its own set of ops that subdev drivers can implement. + * + * A subdev driver can leave the pointer to the category ops NULL if + * it does not implement them (e.g. an audio subdev will generally not + * implement the video category ops). The exception is the core category: + * this must always be present. + * + * These ops are all used internally so it is no problem to change, remove + * or add ops or move ops from one to another category. Currently these + * ops are based on the original ioctls, but since ops are not limited to + * one argument there is room for improvement here once all i2c subdev + * drivers are converted to use these ops. + */ + +/* + * Core ops: it is highly recommended to implement at least these ops: + * + * log_status + * g_register + * s_register + * + * This provides basic debugging support. + * + * The ioctl ops is meant for generic ioctl-like commands. Depending on + * the use-case it might be better to use subdev-specific ops (currently + * not yet implemented) since ops provide proper type-checking. + */ + +/** + * enum v4l2_subdev_io_pin_bits - Subdevice external IO pin configuration + * bits + * + * @V4L2_SUBDEV_IO_PIN_DISABLE: disables a pin config. ENABLE assumed. + * @V4L2_SUBDEV_IO_PIN_OUTPUT: set it if pin is an output. + * @V4L2_SUBDEV_IO_PIN_INPUT: set it if pin is an input. + * @V4L2_SUBDEV_IO_PIN_SET_VALUE: to set the output value via + * &struct v4l2_subdev_io_pin_config->value. + * @V4L2_SUBDEV_IO_PIN_ACTIVE_LOW: pin active is bit 0. + * Otherwise, ACTIVE HIGH is assumed. + */ +enum v4l2_subdev_io_pin_bits { + V4L2_SUBDEV_IO_PIN_DISABLE = 0, + V4L2_SUBDEV_IO_PIN_OUTPUT = 1, + V4L2_SUBDEV_IO_PIN_INPUT = 2, + V4L2_SUBDEV_IO_PIN_SET_VALUE = 3, + V4L2_SUBDEV_IO_PIN_ACTIVE_LOW = 4, +}; + +/** + * struct v4l2_subdev_io_pin_config - Subdevice external IO pin configuration + * + * @flags: bitmask with flags for this pin's config, whose bits are defined by + * &enum v4l2_subdev_io_pin_bits. + * @pin: Chip external IO pin to configure + * @function: Internal signal pad/function to route to IO pin + * @value: Initial value for pin - e.g. GPIO output value + * @strength: Pin drive strength + */ +struct v4l2_subdev_io_pin_config { + u32 flags; + u8 pin; + u8 function; + u8 value; + u8 strength; +}; + +/** + * struct v4l2_subdev_core_ops - Define core ops callbacks for subdevs + * + * @log_status: callback for VIDIOC_LOG_STATUS() ioctl handler code. + * + * @s_io_pin_config: configure one or more chip I/O pins for chips that + * multiplex different internal signal pads out to IO pins. This function + * takes a pointer to an array of 'n' pin configuration entries, one for + * each pin being configured. This function could be called at times + * other than just subdevice initialization. + * + * @init: initialize the sensor registers to some sort of reasonable default + * values. Do not use for new drivers and should be removed in existing + * drivers. + * + * @load_fw: load firmware. + * + * @reset: generic reset command. The argument selects which subsystems to + * reset. Passing 0 will always reset the whole chip. Do not use for new + * drivers without discussing this first on the linux-media mailinglist. + * There should be no reason normally to reset a device. + * + * @s_gpio: set GPIO pins. Very simple right now, might need to be extended with + * a direction argument if needed. + * + * @command: called by in-kernel drivers in order to call functions internal + * to subdev drivers driver that have a separate callback. + * + * @ioctl: called at the end of ioctl() syscall handler at the V4L2 core. + * used to provide support for private ioctls used on the driver. + * + * @compat_ioctl32: called when a 32 bits application uses a 64 bits Kernel, + * in order to fix data passed from/to userspace. + * + * @g_register: callback for VIDIOC_DBG_G_REGISTER() ioctl handler code. + * + * @s_register: callback for VIDIOC_DBG_S_REGISTER() ioctl handler code. + * + * @s_power: puts subdevice in power saving mode (on == 0) or normal operation + * mode (on == 1). DEPRECATED. See + * Documentation/driver-api/media/camera-sensor.rst . pre_streamon and + * post_streamoff callbacks can be used for e.g. setting the bus to LP-11 + * mode before s_stream is called. + * + * @interrupt_service_routine: Called by the bridge chip's interrupt service + * handler, when an interrupt status has be raised due to this subdev, + * so that this subdev can handle the details. It may schedule work to be + * performed later. It must not sleep. **Called from an IRQ context**. + * + * @subscribe_event: used by the drivers to request the control framework that + * for it to be warned when the value of a control changes. + * + * @unsubscribe_event: remove event subscription from the control framework. + */ +struct v4l2_subdev_core_ops { + int (*log_status)(struct v4l2_subdev *sd); + int (*s_io_pin_config)(struct v4l2_subdev *sd, size_t n, + struct v4l2_subdev_io_pin_config *pincfg); + int (*init)(struct v4l2_subdev *sd, u32 val); + int (*load_fw)(struct v4l2_subdev *sd); + int (*reset)(struct v4l2_subdev *sd, u32 val); + int (*s_gpio)(struct v4l2_subdev *sd, u32 val); + long (*command)(struct v4l2_subdev *sd, unsigned int cmd, void *arg); + long (*ioctl)(struct v4l2_subdev *sd, unsigned int cmd, void *arg); +#ifdef CONFIG_COMPAT + long (*compat_ioctl32)(struct v4l2_subdev *sd, unsigned int cmd, + unsigned long arg); +#endif +#ifdef CONFIG_VIDEO_ADV_DEBUG + int (*g_register)(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg); + int (*s_register)(struct v4l2_subdev *sd, const struct v4l2_dbg_register *reg); +#endif + int (*s_power)(struct v4l2_subdev *sd, int on); + int (*interrupt_service_routine)(struct v4l2_subdev *sd, + u32 status, bool *handled); + int (*subscribe_event)(struct v4l2_subdev *sd, struct v4l2_fh *fh, + struct v4l2_event_subscription *sub); + int (*unsubscribe_event)(struct v4l2_subdev *sd, struct v4l2_fh *fh, + struct v4l2_event_subscription *sub); +}; + +/** + * struct v4l2_subdev_tuner_ops - Callbacks used when v4l device was opened + * in radio mode. + * + * @standby: puts the tuner in standby mode. It will be woken up + * automatically the next time it is used. + * + * @s_radio: callback that switches the tuner to radio mode. + * drivers should explicitly call it when a tuner ops should + * operate on radio mode, before being able to handle it. + * Used on devices that have both AM/FM radio receiver and TV. + * + * @s_frequency: callback for VIDIOC_S_FREQUENCY() ioctl handler code. + * + * @g_frequency: callback for VIDIOC_G_FREQUENCY() ioctl handler code. + * freq->type must be filled in. Normally done by video_ioctl2() + * or the bridge driver. + * + * @enum_freq_bands: callback for VIDIOC_ENUM_FREQ_BANDS() ioctl handler code. + * + * @g_tuner: callback for VIDIOC_G_TUNER() ioctl handler code. + * + * @s_tuner: callback for VIDIOC_S_TUNER() ioctl handler code. @vt->type must be + * filled in. Normally done by video_ioctl2 or the + * bridge driver. + * + * @g_modulator: callback for VIDIOC_G_MODULATOR() ioctl handler code. + * + * @s_modulator: callback for VIDIOC_S_MODULATOR() ioctl handler code. + * + * @s_type_addr: sets tuner type and its I2C addr. + * + * @s_config: sets tda9887 specific stuff, like port1, port2 and qss + * + * .. note:: + * + * On devices that have both AM/FM and TV, it is up to the driver + * to explicitly call s_radio when the tuner should be switched to + * radio mode, before handling other &struct v4l2_subdev_tuner_ops + * that would require it. An example of such usage is:: + * + * static void s_frequency(void *priv, const struct v4l2_frequency *f) + * { + * ... + * if (f.type == V4L2_TUNER_RADIO) + * v4l2_device_call_all(v4l2_dev, 0, tuner, s_radio); + * ... + * v4l2_device_call_all(v4l2_dev, 0, tuner, s_frequency); + * } + */ +struct v4l2_subdev_tuner_ops { + int (*standby)(struct v4l2_subdev *sd); + int (*s_radio)(struct v4l2_subdev *sd); + int (*s_frequency)(struct v4l2_subdev *sd, const struct v4l2_frequency *freq); + int (*g_frequency)(struct v4l2_subdev *sd, struct v4l2_frequency *freq); + int (*enum_freq_bands)(struct v4l2_subdev *sd, struct v4l2_frequency_band *band); + int (*g_tuner)(struct v4l2_subdev *sd, struct v4l2_tuner *vt); + int (*s_tuner)(struct v4l2_subdev *sd, const struct v4l2_tuner *vt); + int (*g_modulator)(struct v4l2_subdev *sd, struct v4l2_modulator *vm); + int (*s_modulator)(struct v4l2_subdev *sd, const struct v4l2_modulator *vm); + int (*s_type_addr)(struct v4l2_subdev *sd, struct tuner_setup *type); + int (*s_config)(struct v4l2_subdev *sd, const struct v4l2_priv_tun_config *config); +}; + +/** + * struct v4l2_subdev_audio_ops - Callbacks used for audio-related settings + * + * @s_clock_freq: set the frequency (in Hz) of the audio clock output. + * Used to slave an audio processor to the video decoder, ensuring that + * audio and video remain synchronized. Usual values for the frequency + * are 48000, 44100 or 32000 Hz. If the frequency is not supported, then + * -EINVAL is returned. + * + * @s_i2s_clock_freq: sets I2S speed in bps. This is used to provide a standard + * way to select I2S clock used by driving digital audio streams at some + * board designs. Usual values for the frequency are 1024000 and 2048000. + * If the frequency is not supported, then %-EINVAL is returned. + * + * @s_routing: used to define the input and/or output pins of an audio chip, + * and any additional configuration data. + * Never attempt to use user-level input IDs (e.g. Composite, S-Video, + * Tuner) at this level. An i2c device shouldn't know about whether an + * input pin is connected to a Composite connector, become on another + * board or platform it might be connected to something else entirely. + * The calling driver is responsible for mapping a user-level input to + * the right pins on the i2c device. + * + * @s_stream: used to notify the audio code that stream will start or has + * stopped. + */ +struct v4l2_subdev_audio_ops { + int (*s_clock_freq)(struct v4l2_subdev *sd, u32 freq); + int (*s_i2s_clock_freq)(struct v4l2_subdev *sd, u32 freq); + int (*s_routing)(struct v4l2_subdev *sd, u32 input, u32 output, u32 config); + int (*s_stream)(struct v4l2_subdev *sd, int enable); +}; + +/** + * struct v4l2_mbus_frame_desc_entry_csi2 + * + * @vc: CSI-2 virtual channel + * @dt: CSI-2 data type ID + */ +struct v4l2_mbus_frame_desc_entry_csi2 { + u8 vc; + u8 dt; +}; + +/** + * enum v4l2_mbus_frame_desc_flags - media bus frame description flags + * + * @V4L2_MBUS_FRAME_DESC_FL_LEN_MAX: + * Indicates that &struct v4l2_mbus_frame_desc_entry->length field + * specifies maximum data length. + * @V4L2_MBUS_FRAME_DESC_FL_BLOB: + * Indicates that the format does not have line offsets, i.e. + * the receiver should use 1D DMA. + */ +enum v4l2_mbus_frame_desc_flags { + V4L2_MBUS_FRAME_DESC_FL_LEN_MAX = BIT(0), + V4L2_MBUS_FRAME_DESC_FL_BLOB = BIT(1), +}; + +/** + * struct v4l2_mbus_frame_desc_entry - media bus frame description structure + * + * @flags: bitmask flags, as defined by &enum v4l2_mbus_frame_desc_flags. + * @stream: stream in routing configuration + * @pixelcode: media bus pixel code, valid if @flags + * %FRAME_DESC_FL_BLOB is not set. + * @length: number of octets per frame, valid if @flags + * %V4L2_MBUS_FRAME_DESC_FL_LEN_MAX is set. + * @bus: Bus-specific frame descriptor parameters + * @bus.csi2: CSI-2-specific bus configuration + */ +struct v4l2_mbus_frame_desc_entry { + enum v4l2_mbus_frame_desc_flags flags; + u32 stream; + u32 pixelcode; + u32 length; + union { + struct v4l2_mbus_frame_desc_entry_csi2 csi2; + } bus; +}; + + /* + * If this number is too small, it should be dropped altogether and the + * API switched to a dynamic number of frame descriptor entries. + */ +#define V4L2_FRAME_DESC_ENTRY_MAX 8 + +/** + * enum v4l2_mbus_frame_desc_type - media bus frame description type + * + * @V4L2_MBUS_FRAME_DESC_TYPE_UNDEFINED: + * Undefined frame desc type. Drivers should not use this, it is + * for backwards compatibility. + * @V4L2_MBUS_FRAME_DESC_TYPE_PARALLEL: + * Parallel media bus. + * @V4L2_MBUS_FRAME_DESC_TYPE_CSI2: + * CSI-2 media bus. Frame desc parameters must be set in + * &struct v4l2_mbus_frame_desc_entry->csi2. + */ +enum v4l2_mbus_frame_desc_type { + V4L2_MBUS_FRAME_DESC_TYPE_UNDEFINED = 0, + V4L2_MBUS_FRAME_DESC_TYPE_PARALLEL, + V4L2_MBUS_FRAME_DESC_TYPE_CSI2, +}; + +/** + * struct v4l2_mbus_frame_desc - media bus data frame description + * @type: type of the bus (enum v4l2_mbus_frame_desc_type) + * @entry: frame descriptors array + * @num_entries: number of entries in @entry array + */ +struct v4l2_mbus_frame_desc { + enum v4l2_mbus_frame_desc_type type; + struct v4l2_mbus_frame_desc_entry entry[V4L2_FRAME_DESC_ENTRY_MAX]; + unsigned short num_entries; +}; + +/** + * enum v4l2_subdev_pre_streamon_flags - Flags for pre_streamon subdev core op + * + * @V4L2_SUBDEV_PRE_STREAMON_FL_MANUAL_LP: Set the transmitter to either LP-11 + * or LP-111 mode before call to s_stream(). + */ +enum v4l2_subdev_pre_streamon_flags { + V4L2_SUBDEV_PRE_STREAMON_FL_MANUAL_LP = BIT(0), +}; + +/** + * struct v4l2_subdev_video_ops - Callbacks used when v4l device was opened + * in video mode. + * + * @s_routing: see s_routing in audio_ops, except this version is for video + * devices. + * + * @s_crystal_freq: sets the frequency of the crystal used to generate the + * clocks in Hz. An extra flags field allows device specific configuration + * regarding clock frequency dividers, etc. If not used, then set flags + * to 0. If the frequency is not supported, then -EINVAL is returned. + * + * @g_std: callback for VIDIOC_G_STD() ioctl handler code. + * + * @s_std: callback for VIDIOC_S_STD() ioctl handler code. + * + * @s_std_output: set v4l2_std_id for video OUTPUT devices. This is ignored by + * video input devices. + * + * @g_std_output: get current standard for video OUTPUT devices. This is ignored + * by video input devices. + * + * @querystd: callback for VIDIOC_QUERYSTD() ioctl handler code. + * + * @g_tvnorms: get &v4l2_std_id with all standards supported by the video + * CAPTURE device. This is ignored by video output devices. + * + * @g_tvnorms_output: get v4l2_std_id with all standards supported by the video + * OUTPUT device. This is ignored by video capture devices. + * + * @g_input_status: get input status. Same as the status field in the + * &struct v4l2_input + * + * @s_stream: start (enabled == 1) or stop (enabled == 0) streaming on the + * sub-device. Failure on stop will remove any resources acquired in + * streaming start, while the error code is still returned by the driver. + * The caller shall track the subdev state, and shall not start or stop an + * already started or stopped subdev. Also see call_s_stream wrapper in + * v4l2-subdev.c. + * + * New drivers should instead implement &v4l2_subdev_pad_ops.enable_streams + * and &v4l2_subdev_pad_ops.disable_streams operations, and use + * v4l2_subdev_s_stream_helper for the &v4l2_subdev_video_ops.s_stream + * operation to support legacy users. + * + * Drivers should also not call the .s_stream() subdev operation directly, + * but use the v4l2_subdev_enable_streams() and + * v4l2_subdev_disable_streams() helpers. + * + * @g_pixelaspect: callback to return the pixelaspect ratio. + * + * @s_rx_buffer: set a host allocated memory buffer for the subdev. The subdev + * can adjust @size to a lower value and must not write more data to the + * buffer starting at @data than the original value of @size. + * + * @pre_streamon: May be called before streaming is actually started, to help + * initialising the bus. Current usage is to set a CSI-2 transmitter to + * LP-11 or LP-111 mode before streaming. See &enum + * v4l2_subdev_pre_streamon_flags. + * + * pre_streamon shall return error if it cannot perform the operation as + * indicated by the flags argument. In particular, -EACCES indicates lack + * of support for the operation. The caller shall call post_streamoff for + * each successful call of pre_streamon. + * + * @post_streamoff: Called after streaming is stopped, but if and only if + * pre_streamon was called earlier. + */ +struct v4l2_subdev_video_ops { + int (*s_routing)(struct v4l2_subdev *sd, u32 input, u32 output, u32 config); + int (*s_crystal_freq)(struct v4l2_subdev *sd, u32 freq, u32 flags); + int (*g_std)(struct v4l2_subdev *sd, v4l2_std_id *norm); + int (*s_std)(struct v4l2_subdev *sd, v4l2_std_id norm); + int (*s_std_output)(struct v4l2_subdev *sd, v4l2_std_id std); + int (*g_std_output)(struct v4l2_subdev *sd, v4l2_std_id *std); + int (*querystd)(struct v4l2_subdev *sd, v4l2_std_id *std); + int (*g_tvnorms)(struct v4l2_subdev *sd, v4l2_std_id *std); + int (*g_tvnorms_output)(struct v4l2_subdev *sd, v4l2_std_id *std); + int (*g_input_status)(struct v4l2_subdev *sd, u32 *status); + int (*s_stream)(struct v4l2_subdev *sd, int enable); + int (*g_pixelaspect)(struct v4l2_subdev *sd, struct v4l2_fract *aspect); + int (*s_rx_buffer)(struct v4l2_subdev *sd, void *buf, + unsigned int *size); + int (*pre_streamon)(struct v4l2_subdev *sd, u32 flags); + int (*post_streamoff)(struct v4l2_subdev *sd); +}; + +/** + * struct v4l2_subdev_vbi_ops - Callbacks used when v4l device was opened + * in video mode via the vbi device node. + * + * @decode_vbi_line: video decoders that support sliced VBI need to implement + * this ioctl. Field p of the &struct v4l2_decode_vbi_line is set to the + * start of the VBI data that was generated by the decoder. The driver + * then parses the sliced VBI data and sets the other fields in the + * struct accordingly. The pointer p is updated to point to the start of + * the payload which can be copied verbatim into the data field of the + * &struct v4l2_sliced_vbi_data. If no valid VBI data was found, then the + * type field is set to 0 on return. + * + * @s_vbi_data: used to generate VBI signals on a video signal. + * &struct v4l2_sliced_vbi_data is filled with the data packets that + * should be output. Note that if you set the line field to 0, then that + * VBI signal is disabled. If no valid VBI data was found, then the type + * field is set to 0 on return. + * + * @g_vbi_data: used to obtain the sliced VBI packet from a readback register. + * Not all video decoders support this. If no data is available because + * the readback register contains invalid or erroneous data %-EIO is + * returned. Note that you must fill in the 'id' member and the 'field' + * member (to determine whether CC data from the first or second field + * should be obtained). + * + * @g_sliced_vbi_cap: callback for VIDIOC_G_SLICED_VBI_CAP() ioctl handler + * code. + * + * @s_raw_fmt: setup the video encoder/decoder for raw VBI. + * + * @g_sliced_fmt: retrieve the current sliced VBI settings. + * + * @s_sliced_fmt: setup the sliced VBI settings. + */ +struct v4l2_subdev_vbi_ops { + int (*decode_vbi_line)(struct v4l2_subdev *sd, struct v4l2_decode_vbi_line *vbi_line); + int (*s_vbi_data)(struct v4l2_subdev *sd, const struct v4l2_sliced_vbi_data *vbi_data); + int (*g_vbi_data)(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_data *vbi_data); + int (*g_sliced_vbi_cap)(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_cap *cap); + int (*s_raw_fmt)(struct v4l2_subdev *sd, struct v4l2_vbi_format *fmt); + int (*g_sliced_fmt)(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *fmt); + int (*s_sliced_fmt)(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *fmt); +}; + +/** + * struct v4l2_subdev_sensor_ops - v4l2-subdev sensor operations + * @g_skip_top_lines: number of lines at the top of the image to be skipped. + * This is needed for some sensors, which always corrupt + * several top lines of the output image, or which send their + * metadata in them. + * @g_skip_frames: number of frames to skip at stream start. This is needed for + * buggy sensors that generate faulty frames when they are + * turned on. + */ +struct v4l2_subdev_sensor_ops { + int (*g_skip_top_lines)(struct v4l2_subdev *sd, u32 *lines); + int (*g_skip_frames)(struct v4l2_subdev *sd, u32 *frames); +}; + +/** + * enum v4l2_subdev_ir_mode- describes the type of IR supported + * + * @V4L2_SUBDEV_IR_MODE_PULSE_WIDTH: IR uses struct ir_raw_event records + */ +enum v4l2_subdev_ir_mode { + V4L2_SUBDEV_IR_MODE_PULSE_WIDTH, +}; + +/** + * struct v4l2_subdev_ir_parameters - Parameters for IR TX or TX + * + * @bytes_per_data_element: bytes per data element of data in read or + * write call. + * @mode: IR mode as defined by &enum v4l2_subdev_ir_mode. + * @enable: device is active if true + * @interrupt_enable: IR interrupts are enabled if true + * @shutdown: if true: set hardware to low/no power, false: normal mode + * + * @modulation: if true, it uses carrier, if false: baseband + * @max_pulse_width: maximum pulse width in ns, valid only for baseband signal + * @carrier_freq: carrier frequency in Hz, valid only for modulated signal + * @duty_cycle: duty cycle percentage, valid only for modulated signal + * @invert_level: invert signal level + * + * @invert_carrier_sense: Send 0/space as a carrier burst. used only in TX. + * + * @noise_filter_min_width: min time of a valid pulse, in ns. Used only for RX. + * @carrier_range_lower: Lower carrier range, in Hz, valid only for modulated + * signal. Used only for RX. + * @carrier_range_upper: Upper carrier range, in Hz, valid only for modulated + * signal. Used only for RX. + * @resolution: The receive resolution, in ns . Used only for RX. + */ +struct v4l2_subdev_ir_parameters { + unsigned int bytes_per_data_element; + enum v4l2_subdev_ir_mode mode; + + bool enable; + bool interrupt_enable; + bool shutdown; + + bool modulation; + u32 max_pulse_width; + unsigned int carrier_freq; + unsigned int duty_cycle; + bool invert_level; + + /* Tx only */ + bool invert_carrier_sense; + + /* Rx only */ + u32 noise_filter_min_width; + unsigned int carrier_range_lower; + unsigned int carrier_range_upper; + u32 resolution; +}; + +/** + * struct v4l2_subdev_ir_ops - operations for IR subdevices + * + * @rx_read: Reads received codes or pulse width data. + * The semantics are similar to a non-blocking read() call. + * @rx_g_parameters: Get the current operating parameters and state of + * the IR receiver. + * @rx_s_parameters: Set the current operating parameters and state of + * the IR receiver. It is recommended to call + * [rt]x_g_parameters first to fill out the current state, and only change + * the fields that need to be changed. Upon return, the actual device + * operating parameters and state will be returned. Note that hardware + * limitations may prevent the actual settings from matching the requested + * settings - e.g. an actual carrier setting of 35,904 Hz when 36,000 Hz + * was requested. An exception is when the shutdown parameter is true. + * The last used operational parameters will be returned, but the actual + * state of the hardware be different to minimize power consumption and + * processing when shutdown is true. + * + * @tx_write: Writes codes or pulse width data for transmission. + * The semantics are similar to a non-blocking write() call. + * @tx_g_parameters: Get the current operating parameters and state of + * the IR transmitter. + * @tx_s_parameters: Set the current operating parameters and state of + * the IR transmitter. It is recommended to call + * [rt]x_g_parameters first to fill out the current state, and only change + * the fields that need to be changed. Upon return, the actual device + * operating parameters and state will be returned. Note that hardware + * limitations may prevent the actual settings from matching the requested + * settings - e.g. an actual carrier setting of 35,904 Hz when 36,000 Hz + * was requested. An exception is when the shutdown parameter is true. + * The last used operational parameters will be returned, but the actual + * state of the hardware be different to minimize power consumption and + * processing when shutdown is true. + */ +struct v4l2_subdev_ir_ops { + /* Receiver */ + int (*rx_read)(struct v4l2_subdev *sd, u8 *buf, size_t count, + ssize_t *num); + + int (*rx_g_parameters)(struct v4l2_subdev *sd, + struct v4l2_subdev_ir_parameters *params); + int (*rx_s_parameters)(struct v4l2_subdev *sd, + struct v4l2_subdev_ir_parameters *params); + + /* Transmitter */ + int (*tx_write)(struct v4l2_subdev *sd, u8 *buf, size_t count, + ssize_t *num); + + int (*tx_g_parameters)(struct v4l2_subdev *sd, + struct v4l2_subdev_ir_parameters *params); + int (*tx_s_parameters)(struct v4l2_subdev *sd, + struct v4l2_subdev_ir_parameters *params); +}; + +/** + * struct v4l2_subdev_pad_config - Used for storing subdev pad information. + * + * @format: &struct v4l2_mbus_framefmt + * @crop: &struct v4l2_rect to be used for crop + * @compose: &struct v4l2_rect to be used for compose + * @interval: frame interval + */ +struct v4l2_subdev_pad_config { + struct v4l2_mbus_framefmt format; + struct v4l2_rect crop; + struct v4l2_rect compose; + struct v4l2_fract interval; +}; + +/** + * struct v4l2_subdev_stream_config - Used for storing stream configuration. + * + * @pad: pad number + * @stream: stream number + * @enabled: has the stream been enabled with v4l2_subdev_enable_streams() + * @fmt: &struct v4l2_mbus_framefmt + * @crop: &struct v4l2_rect to be used for crop + * @compose: &struct v4l2_rect to be used for compose + * @interval: frame interval + * + * This structure stores configuration for a stream. + */ +struct v4l2_subdev_stream_config { + u32 pad; + u32 stream; + bool enabled; + + struct v4l2_mbus_framefmt fmt; + struct v4l2_rect crop; + struct v4l2_rect compose; + struct v4l2_fract interval; +}; + +/** + * struct v4l2_subdev_stream_configs - A collection of stream configs. + * + * @num_configs: number of entries in @config. + * @configs: an array of &struct v4l2_subdev_stream_configs. + */ +struct v4l2_subdev_stream_configs { + u32 num_configs; + struct v4l2_subdev_stream_config *configs; +}; + +/** + * struct v4l2_subdev_krouting - subdev routing table + * + * @len_routes: length of routes array, in routes + * @num_routes: number of routes + * @routes: &struct v4l2_subdev_route + * + * This structure contains the routing table for a subdev. + */ +struct v4l2_subdev_krouting { + unsigned int len_routes; + unsigned int num_routes; + struct v4l2_subdev_route *routes; +}; + +/** + * struct v4l2_subdev_state - Used for storing subdev state information. + * + * @_lock: default for 'lock' + * @lock: mutex for the state. May be replaced by the user. + * @sd: the sub-device which the state is related to + * @pads: &struct v4l2_subdev_pad_config array + * @routing: routing table for the subdev + * @stream_configs: stream configurations (only for V4L2_SUBDEV_FL_STREAMS) + * + * This structure only needs to be passed to the pad op if the 'which' field + * of the main argument is set to %V4L2_SUBDEV_FORMAT_TRY. For + * %V4L2_SUBDEV_FORMAT_ACTIVE it is safe to pass %NULL. + */ +struct v4l2_subdev_state { + /* lock for the struct v4l2_subdev_state fields */ + struct mutex _lock; + struct mutex *lock; + struct v4l2_subdev *sd; + struct v4l2_subdev_pad_config *pads; + struct v4l2_subdev_krouting routing; + struct v4l2_subdev_stream_configs stream_configs; +}; + +/** + * struct v4l2_subdev_pad_ops - v4l2-subdev pad level operations + * + * @enum_mbus_code: callback for VIDIOC_SUBDEV_ENUM_MBUS_CODE() ioctl handler + * code. + * @enum_frame_size: callback for VIDIOC_SUBDEV_ENUM_FRAME_SIZE() ioctl handler + * code. + * + * @enum_frame_interval: callback for VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL() ioctl + * handler code. + * + * @get_fmt: callback for VIDIOC_SUBDEV_G_FMT() ioctl handler code. + * + * @set_fmt: callback for VIDIOC_SUBDEV_S_FMT() ioctl handler code. + * + * @get_selection: callback for VIDIOC_SUBDEV_G_SELECTION() ioctl handler code. + * + * @set_selection: callback for VIDIOC_SUBDEV_S_SELECTION() ioctl handler code. + * + * @get_frame_interval: callback for VIDIOC_SUBDEV_G_FRAME_INTERVAL() + * ioctl handler code. + * + * @set_frame_interval: callback for VIDIOC_SUBDEV_S_FRAME_INTERVAL() + * ioctl handler code. + * + * @get_edid: callback for VIDIOC_SUBDEV_G_EDID() ioctl handler code. + * + * @set_edid: callback for VIDIOC_SUBDEV_S_EDID() ioctl handler code. + * + * @s_dv_timings: Set custom dv timings in the sub device. This is used + * when sub device is capable of setting detailed timing information + * in the hardware to generate/detect the video signal. + * + * @g_dv_timings: Get custom dv timings in the sub device. + * + * @query_dv_timings: callback for VIDIOC_QUERY_DV_TIMINGS() ioctl handler code. + * + * @dv_timings_cap: callback for VIDIOC_SUBDEV_DV_TIMINGS_CAP() ioctl handler + * code. + * + * @enum_dv_timings: callback for VIDIOC_SUBDEV_ENUM_DV_TIMINGS() ioctl handler + * code. + * + * @link_validate: used by the media controller code to check if the links + * that belongs to a pipeline can be used for stream. + * + * @get_frame_desc: get the current low level media bus frame parameters. + * + * @set_frame_desc: set the low level media bus frame parameters, @fd array + * may be adjusted by the subdev driver to device capabilities. + * + * @get_mbus_config: get the media bus configuration of a remote sub-device. + * The media bus configuration is usually retrieved from the + * firmware interface at sub-device probe time, immediately + * applied to the hardware and eventually adjusted by the + * driver. Remote sub-devices (usually video receivers) shall + * use this operation to query the transmitting end bus + * configuration in order to adjust their own one accordingly. + * Callers should make sure they get the most up-to-date as + * possible configuration from the remote end, likely calling + * this operation as close as possible to stream on time. The + * operation shall fail if the pad index it has been called on + * is not valid or in case of unrecoverable failures. + * + * @set_routing: Enable or disable data connection routes described in the + * subdevice routing table. Subdevs that implement this operation + * must set the V4L2_SUBDEV_FL_STREAMS flag. + * + * @enable_streams: Enable the streams defined in streams_mask on the given + * source pad. Subdevs that implement this operation must use the active + * state management provided by the subdev core (enabled through a call to + * v4l2_subdev_init_finalize() at initialization time). Do not call + * directly, use v4l2_subdev_enable_streams() instead. + * + * @disable_streams: Disable the streams defined in streams_mask on the given + * source pad. Subdevs that implement this operation must use the active + * state management provided by the subdev core (enabled through a call to + * v4l2_subdev_init_finalize() at initialization time). Do not call + * directly, use v4l2_subdev_disable_streams() instead. + */ +struct v4l2_subdev_pad_ops { + int (*enum_mbus_code)(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_mbus_code_enum *code); + int (*enum_frame_size)(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_frame_size_enum *fse); + int (*enum_frame_interval)(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_frame_interval_enum *fie); + int (*get_fmt)(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_format *format); + int (*set_fmt)(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_format *format); + int (*get_selection)(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_selection *sel); + int (*set_selection)(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_selection *sel); + int (*get_frame_interval)(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_frame_interval *interval); + int (*set_frame_interval)(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_frame_interval *interval); + int (*get_edid)(struct v4l2_subdev *sd, struct v4l2_edid *edid); + int (*set_edid)(struct v4l2_subdev *sd, struct v4l2_edid *edid); + int (*s_dv_timings)(struct v4l2_subdev *sd, unsigned int pad, + struct v4l2_dv_timings *timings); + int (*g_dv_timings)(struct v4l2_subdev *sd, unsigned int pad, + struct v4l2_dv_timings *timings); + int (*query_dv_timings)(struct v4l2_subdev *sd, unsigned int pad, + struct v4l2_dv_timings *timings); + int (*dv_timings_cap)(struct v4l2_subdev *sd, + struct v4l2_dv_timings_cap *cap); + int (*enum_dv_timings)(struct v4l2_subdev *sd, + struct v4l2_enum_dv_timings *timings); +#ifdef CONFIG_MEDIA_CONTROLLER + int (*link_validate)(struct v4l2_subdev *sd, struct media_link *link, + struct v4l2_subdev_format *source_fmt, + struct v4l2_subdev_format *sink_fmt); +#endif /* CONFIG_MEDIA_CONTROLLER */ + int (*get_frame_desc)(struct v4l2_subdev *sd, unsigned int pad, + struct v4l2_mbus_frame_desc *fd); + int (*set_frame_desc)(struct v4l2_subdev *sd, unsigned int pad, + struct v4l2_mbus_frame_desc *fd); + int (*get_mbus_config)(struct v4l2_subdev *sd, unsigned int pad, + struct v4l2_mbus_config *config); + int (*set_routing)(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + enum v4l2_subdev_format_whence which, + struct v4l2_subdev_krouting *route); + int (*enable_streams)(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, u32 pad, + u64 streams_mask); + int (*disable_streams)(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, u32 pad, + u64 streams_mask); +}; + +/** + * struct v4l2_subdev_ops - Subdev operations + * + * @core: pointer to &struct v4l2_subdev_core_ops. Can be %NULL + * @tuner: pointer to &struct v4l2_subdev_tuner_ops. Can be %NULL + * @audio: pointer to &struct v4l2_subdev_audio_ops. Can be %NULL + * @video: pointer to &struct v4l2_subdev_video_ops. Can be %NULL + * @vbi: pointer to &struct v4l2_subdev_vbi_ops. Can be %NULL + * @ir: pointer to &struct v4l2_subdev_ir_ops. Can be %NULL + * @sensor: pointer to &struct v4l2_subdev_sensor_ops. Can be %NULL + * @pad: pointer to &struct v4l2_subdev_pad_ops. Can be %NULL + */ +struct v4l2_subdev_ops { + const struct v4l2_subdev_core_ops *core; + const struct v4l2_subdev_tuner_ops *tuner; + const struct v4l2_subdev_audio_ops *audio; + const struct v4l2_subdev_video_ops *video; + const struct v4l2_subdev_vbi_ops *vbi; + const struct v4l2_subdev_ir_ops *ir; + const struct v4l2_subdev_sensor_ops *sensor; + const struct v4l2_subdev_pad_ops *pad; +}; + +/** + * struct v4l2_subdev_internal_ops - V4L2 subdev internal ops + * + * @init_state: initialize the subdev state to default values + * + * @registered: called when this subdev is registered. When called the v4l2_dev + * field is set to the correct v4l2_device. + * + * @unregistered: called when this subdev is unregistered. When called the + * v4l2_dev field is still set to the correct v4l2_device. + * + * @open: called when the subdev device node is opened by an application. + * + * @close: called when the subdev device node is closed. Please note that + * it is possible for @close to be called after @unregistered! + * + * @release: called when the last user of the subdev device is gone. This + * happens after the @unregistered callback and when the last open + * filehandle to the v4l-subdevX device node was closed. If no device + * node was created for this sub-device, then the @release callback + * is called right after the @unregistered callback. + * The @release callback is typically used to free the memory containing + * the v4l2_subdev structure. It is almost certainly required for any + * sub-device that sets the V4L2_SUBDEV_FL_HAS_DEVNODE flag. + * + * .. note:: + * Never call this from drivers, only the v4l2 framework can call + * these ops. + */ +struct v4l2_subdev_internal_ops { + int (*init_state)(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state); + int (*registered)(struct v4l2_subdev *sd); + void (*unregistered)(struct v4l2_subdev *sd); + int (*open)(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh); + int (*close)(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh); + void (*release)(struct v4l2_subdev *sd); +}; + +/* Set this flag if this subdev is a i2c device. */ +#define V4L2_SUBDEV_FL_IS_I2C (1U << 0) +/* Set this flag if this subdev is a spi device. */ +#define V4L2_SUBDEV_FL_IS_SPI (1U << 1) +/* Set this flag if this subdev needs a device node. */ +#define V4L2_SUBDEV_FL_HAS_DEVNODE (1U << 2) +/* + * Set this flag if this subdev generates events. + * Note controls can send events, thus drivers exposing controls + * should set this flag. + */ +#define V4L2_SUBDEV_FL_HAS_EVENTS (1U << 3) +/* + * Set this flag if this subdev supports multiplexed streams. This means + * that the driver supports routing and handles the stream parameter in its + * v4l2_subdev_pad_ops handlers. More specifically, this means: + * + * - Centrally managed subdev active state is enabled + * - Legacy pad config is _not_ supported (state->pads is NULL) + * - Routing ioctls are available + * - Multiple streams per pad are supported + */ +#define V4L2_SUBDEV_FL_STREAMS (1U << 4) + +struct regulator_bulk_data; + +/** + * struct v4l2_subdev_platform_data - regulators config struct + * + * @regulators: Optional regulators used to power on/off the subdevice + * @num_regulators: Number of regululators + * @host_priv: Per-subdevice data, specific for a certain video host device + */ +struct v4l2_subdev_platform_data { + struct regulator_bulk_data *regulators; + int num_regulators; + + void *host_priv; +}; + +/** + * struct v4l2_subdev - describes a V4L2 sub-device + * + * @entity: pointer to &struct media_entity + * @list: List of sub-devices + * @owner: The owner is the same as the driver's &struct device owner. + * @owner_v4l2_dev: true if the &sd->owner matches the owner of @v4l2_dev->dev + * owner. Initialized by v4l2_device_register_subdev(). + * @flags: subdev flags. Can be: + * %V4L2_SUBDEV_FL_IS_I2C - Set this flag if this subdev is a i2c device; + * %V4L2_SUBDEV_FL_IS_SPI - Set this flag if this subdev is a spi device; + * %V4L2_SUBDEV_FL_HAS_DEVNODE - Set this flag if this subdev needs a + * device node; + * %V4L2_SUBDEV_FL_HAS_EVENTS - Set this flag if this subdev generates + * events. + * + * @v4l2_dev: pointer to struct &v4l2_device + * @ops: pointer to struct &v4l2_subdev_ops + * @internal_ops: pointer to struct &v4l2_subdev_internal_ops. + * Never call these internal ops from within a driver! + * @ctrl_handler: The control handler of this subdev. May be NULL. + * @name: Name of the sub-device. Please notice that the name must be unique. + * @grp_id: can be used to group similar subdevs. Value is driver-specific + * @dev_priv: pointer to private data + * @host_priv: pointer to private data used by the device where the subdev + * is attached. + * @devnode: subdev device node + * @dev: pointer to the physical device, if any + * @fwnode: The fwnode_handle of the subdev, usually the same as + * either dev->of_node->fwnode or dev->fwnode (whichever is non-NULL). + * @async_list: Links this subdev to a global subdev_list or + * @notifier->done_list list. + * @async_subdev_endpoint_list: List entry in async_subdev_endpoint_entry of + * &struct v4l2_async_subdev_endpoint. + * @subdev_notifier: A sub-device notifier implicitly registered for the sub- + * device using v4l2_async_register_subdev_sensor(). + * @asc_list: Async connection list, of &struct + * v4l2_async_connection.subdev_entry. + * @pdata: common part of subdevice platform data + * @state_lock: A pointer to a lock used for all the subdev's states, set by the + * driver. This is optional. If NULL, each state instance will get + * a lock of its own. + * @privacy_led: Optional pointer to a LED classdev for the privacy LED for sensors. + * @active_state: Active state for the subdev (NULL for subdevs tracking the + * state internally). Initialized by calling + * v4l2_subdev_init_finalize(). + * @enabled_pads: Bitmask of enabled pads used by v4l2_subdev_enable_streams() + * and v4l2_subdev_disable_streams() helper functions for + * fallback cases. + * @s_stream_enabled: Tracks whether streaming has been enabled with s_stream. + * This is only for call_s_stream() internal use. + * + * Each instance of a subdev driver should create this struct, either + * stand-alone or embedded in a larger struct. + * + * This structure should be initialized by v4l2_subdev_init() or one of + * its variants: v4l2_spi_subdev_init(), v4l2_i2c_subdev_init(). + */ +struct v4l2_subdev { +#if defined(CONFIG_MEDIA_CONTROLLER) + struct media_entity entity; +#endif + struct list_head list; + struct module *owner; + bool owner_v4l2_dev; + u32 flags; + struct v4l2_device *v4l2_dev; + const struct v4l2_subdev_ops *ops; + const struct v4l2_subdev_internal_ops *internal_ops; + struct v4l2_ctrl_handler *ctrl_handler; + char name[52]; + u32 grp_id; + void *dev_priv; + void *host_priv; + struct video_device *devnode; + struct device *dev; + struct fwnode_handle *fwnode; + struct list_head async_list; + struct list_head async_subdev_endpoint_list; + struct v4l2_async_notifier *subdev_notifier; + struct list_head asc_list; + struct v4l2_subdev_platform_data *pdata; + struct mutex *state_lock; + + /* + * The fields below are private, and should only be accessed via + * appropriate functions. + */ + + struct led_classdev *privacy_led; + + /* + * TODO: active_state should most likely be changed from a pointer to an + * embedded field. For the time being it's kept as a pointer to more + * easily catch uses of active_state in the cases where the driver + * doesn't support it. + */ + struct v4l2_subdev_state *active_state; + u64 enabled_pads; + bool s_stream_enabled; +}; + + +/** + * media_entity_to_v4l2_subdev - Returns a &struct v4l2_subdev from + * the &struct media_entity embedded in it. + * + * @ent: pointer to &struct media_entity. + */ +#define media_entity_to_v4l2_subdev(ent) \ +({ \ + typeof(ent) __me_sd_ent = (ent); \ + \ + __me_sd_ent ? \ + container_of(__me_sd_ent, struct v4l2_subdev, entity) : \ + NULL; \ +}) + +/** + * vdev_to_v4l2_subdev - Returns a &struct v4l2_subdev from + * the &struct video_device embedded on it. + * + * @vdev: pointer to &struct video_device + */ +#define vdev_to_v4l2_subdev(vdev) \ + ((struct v4l2_subdev *)video_get_drvdata(vdev)) + +/** + * struct v4l2_subdev_fh - Used for storing subdev information per file handle + * + * @vfh: pointer to &struct v4l2_fh + * @state: pointer to &struct v4l2_subdev_state + * @owner: module pointer to the owner of this file handle + * @client_caps: bitmask of ``V4L2_SUBDEV_CLIENT_CAP_*`` + */ +struct v4l2_subdev_fh { + struct v4l2_fh vfh; + struct module *owner; +#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) + struct v4l2_subdev_state *state; + u64 client_caps; +#endif +}; + +/** + * to_v4l2_subdev_fh - Returns a &struct v4l2_subdev_fh from + * the &struct v4l2_fh embedded on it. + * + * @fh: pointer to &struct v4l2_fh + */ +#define to_v4l2_subdev_fh(fh) \ + container_of(fh, struct v4l2_subdev_fh, vfh) + +extern const struct v4l2_file_operations v4l2_subdev_fops; + +/** + * v4l2_set_subdevdata - Sets V4L2 dev private device data + * + * @sd: pointer to &struct v4l2_subdev + * @p: pointer to the private device data to be stored. + */ +static inline void v4l2_set_subdevdata(struct v4l2_subdev *sd, void *p) +{ + sd->dev_priv = p; +} + +/** + * v4l2_get_subdevdata - Gets V4L2 dev private device data + * + * @sd: pointer to &struct v4l2_subdev + * + * Returns the pointer to the private device data to be stored. + */ +static inline void *v4l2_get_subdevdata(const struct v4l2_subdev *sd) +{ + return sd->dev_priv; +} + +/** + * v4l2_set_subdev_hostdata - Sets V4L2 dev private host data + * + * @sd: pointer to &struct v4l2_subdev + * @p: pointer to the private data to be stored. + */ +static inline void v4l2_set_subdev_hostdata(struct v4l2_subdev *sd, void *p) +{ + sd->host_priv = p; +} + +/** + * v4l2_get_subdev_hostdata - Gets V4L2 dev private data + * + * @sd: pointer to &struct v4l2_subdev + * + * Returns the pointer to the private host data to be stored. + */ +static inline void *v4l2_get_subdev_hostdata(const struct v4l2_subdev *sd) +{ + return sd->host_priv; +} + +#ifdef CONFIG_MEDIA_CONTROLLER + +/** + * v4l2_subdev_get_fwnode_pad_1_to_1 - Get pad number from a subdev fwnode + * endpoint, assuming 1:1 port:pad + * + * @entity: Pointer to the subdev entity + * @endpoint: Pointer to a parsed fwnode endpoint + * + * This function can be used as the .get_fwnode_pad operation for + * subdevices that map port numbers and pad indexes 1:1. If the endpoint + * is owned by the subdevice, the function returns the endpoint port + * number. + * + * Returns the endpoint port number on success or a negative error code. + */ +int v4l2_subdev_get_fwnode_pad_1_to_1(struct media_entity *entity, + struct fwnode_endpoint *endpoint); + +/** + * v4l2_subdev_link_validate_default - validates a media link + * + * @sd: pointer to &struct v4l2_subdev + * @link: pointer to &struct media_link + * @source_fmt: pointer to &struct v4l2_subdev_format + * @sink_fmt: pointer to &struct v4l2_subdev_format + * + * This function ensures that width, height and the media bus pixel + * code are equal on both source and sink of the link. + */ +int v4l2_subdev_link_validate_default(struct v4l2_subdev *sd, + struct media_link *link, + struct v4l2_subdev_format *source_fmt, + struct v4l2_subdev_format *sink_fmt); + +/** + * v4l2_subdev_link_validate - validates a media link + * + * @link: pointer to &struct media_link + * + * This function calls the subdev's link_validate ops to validate + * if a media link is valid for streaming. It also internally + * calls v4l2_subdev_link_validate_default() to ensure that + * width, height and the media bus pixel code are equal on both + * source and sink of the link. + * + * The function can be used as a drop-in &media_entity_ops.link_validate + * implementation for v4l2_subdev instances. It supports all links between + * subdevs, as well as links between subdevs and video devices, provided that + * the video devices also implement their &media_entity_ops.link_validate + * operation. + */ +int v4l2_subdev_link_validate(struct media_link *link); + +/** + * v4l2_subdev_has_pad_interdep - MC has_pad_interdep implementation for subdevs + * + * @entity: pointer to &struct media_entity + * @pad0: pad number for the first pad + * @pad1: pad number for the second pad + * + * This function is an implementation of the + * media_entity_operations.has_pad_interdep operation for subdevs that + * implement the multiplexed streams API (as indicated by the + * V4L2_SUBDEV_FL_STREAMS subdev flag). + * + * It considers two pads interdependent if there is an active route between pad0 + * and pad1. + */ +bool v4l2_subdev_has_pad_interdep(struct media_entity *entity, + unsigned int pad0, unsigned int pad1); + +/** + * __v4l2_subdev_state_alloc - allocate v4l2_subdev_state + * + * @sd: pointer to &struct v4l2_subdev for which the state is being allocated. + * @lock_name: name of the state lock + * @key: lock_class_key for the lock + * + * Must call __v4l2_subdev_state_free() when state is no longer needed. + * + * Not to be called directly by the drivers. + */ +struct v4l2_subdev_state *__v4l2_subdev_state_alloc(struct v4l2_subdev *sd, + const char *lock_name, + struct lock_class_key *key); + +/** + * __v4l2_subdev_state_free - free a v4l2_subdev_state + * + * @state: v4l2_subdev_state to be freed. + * + * Not to be called directly by the drivers. + */ +void __v4l2_subdev_state_free(struct v4l2_subdev_state *state); + +/** + * v4l2_subdev_init_finalize() - Finalizes the initialization of the subdevice + * @sd: The subdev + * + * This function finalizes the initialization of the subdev, including + * allocation of the active state for the subdev. + * + * This function must be called by the subdev drivers that use the centralized + * active state, after the subdev struct has been initialized and + * media_entity_pads_init() has been called, but before registering the + * subdev. + * + * The user must call v4l2_subdev_cleanup() when the subdev is being removed. + */ +#define v4l2_subdev_init_finalize(sd) \ + ({ \ + static struct lock_class_key __key; \ + const char *name = KBUILD_BASENAME \ + ":" __stringify(__LINE__) ":sd->active_state->lock"; \ + __v4l2_subdev_init_finalize(sd, name, &__key); \ + }) + +int __v4l2_subdev_init_finalize(struct v4l2_subdev *sd, const char *name, + struct lock_class_key *key); + +/** + * v4l2_subdev_cleanup() - Releases the resources allocated by the subdevice + * @sd: The subdevice + * + * Clean up a V4L2 async sub-device. Must be called for a sub-device as part of + * its release if resources have been associated with it using + * v4l2_async_subdev_endpoint_add() or v4l2_subdev_init_finalize(). + */ +void v4l2_subdev_cleanup(struct v4l2_subdev *sd); + +/* + * A macro to generate the macro or function name for sub-devices state access + * wrapper macros below. + */ +#define __v4l2_subdev_state_gen_call(NAME, _1, ARG, ...) \ + __v4l2_subdev_state_get_ ## NAME ## ARG + +/* + * A macro to constify the return value of the state accessors when the state + * parameter is const. + */ +#define __v4l2_subdev_state_constify_ret(state, value) \ + _Generic(state, \ + const struct v4l2_subdev_state *: (const typeof(*(value)) *)(value), \ + struct v4l2_subdev_state *: (value) \ + ) + +/** + * v4l2_subdev_state_get_format() - Get pointer to a stream format + * @state: subdevice state + * @pad: pad id + * @...: stream id (optional argument) + * + * This returns a pointer to &struct v4l2_mbus_framefmt for the given pad + + * stream in the subdev state. + * + * For stream-unaware drivers the format for the corresponding pad is returned. + * If the pad does not exist, NULL is returned. + */ +/* + * Wrap v4l2_subdev_state_get_format(), allowing the function to be called with + * two or three arguments. The purpose of the __v4l2_subdev_state_gen_call() + * macro is to come up with the name of the function or macro to call, using + * the last two arguments (_stream and _pad). The selected function or macro is + * then called using the arguments specified by the caller. The + * __v4l2_subdev_state_constify_ret() macro constifies the returned pointer + * when the state is const, allowing the state accessors to guarantee + * const-correctness in all cases. + * + * A similar arrangement is used for v4l2_subdev_state_crop(), + * v4l2_subdev_state_compose() and v4l2_subdev_state_get_interval() below. + */ +#define v4l2_subdev_state_get_format(state, pad, ...) \ + __v4l2_subdev_state_constify_ret(state, \ + __v4l2_subdev_state_gen_call(format, ##__VA_ARGS__, , _pad) \ + ((struct v4l2_subdev_state *)state, pad, ##__VA_ARGS__)) +#define __v4l2_subdev_state_get_format_pad(state, pad) \ + __v4l2_subdev_state_get_format(state, pad, 0) +struct v4l2_mbus_framefmt * +__v4l2_subdev_state_get_format(struct v4l2_subdev_state *state, + unsigned int pad, u32 stream); + +/** + * v4l2_subdev_state_get_crop() - Get pointer to a stream crop rectangle + * @state: subdevice state + * @pad: pad id + * @...: stream id (optional argument) + * + * This returns a pointer to crop rectangle for the given pad + stream in the + * subdev state. + * + * For stream-unaware drivers the crop rectangle for the corresponding pad is + * returned. If the pad does not exist, NULL is returned. + */ +#define v4l2_subdev_state_get_crop(state, pad, ...) \ + __v4l2_subdev_state_constify_ret(state, \ + __v4l2_subdev_state_gen_call(crop, ##__VA_ARGS__, , _pad) \ + ((struct v4l2_subdev_state *)state, pad, ##__VA_ARGS__)) +#define __v4l2_subdev_state_get_crop_pad(state, pad) \ + __v4l2_subdev_state_get_crop(state, pad, 0) +struct v4l2_rect * +__v4l2_subdev_state_get_crop(struct v4l2_subdev_state *state, unsigned int pad, + u32 stream); + +/** + * v4l2_subdev_state_get_compose() - Get pointer to a stream compose rectangle + * @state: subdevice state + * @pad: pad id + * @...: stream id (optional argument) + * + * This returns a pointer to compose rectangle for the given pad + stream in the + * subdev state. + * + * For stream-unaware drivers the compose rectangle for the corresponding pad is + * returned. If the pad does not exist, NULL is returned. + */ +#define v4l2_subdev_state_get_compose(state, pad, ...) \ + __v4l2_subdev_state_constify_ret(state, \ + __v4l2_subdev_state_gen_call(compose, ##__VA_ARGS__, , _pad) \ + ((struct v4l2_subdev_state *)state, pad, ##__VA_ARGS__)) +#define __v4l2_subdev_state_get_compose_pad(state, pad) \ + __v4l2_subdev_state_get_compose(state, pad, 0) +struct v4l2_rect * +__v4l2_subdev_state_get_compose(struct v4l2_subdev_state *state, + unsigned int pad, u32 stream); + +/** + * v4l2_subdev_state_get_interval() - Get pointer to a stream frame interval + * @state: subdevice state + * @pad: pad id + * @...: stream id (optional argument) + * + * This returns a pointer to the frame interval for the given pad + stream in + * the subdev state. + * + * For stream-unaware drivers the frame interval for the corresponding pad is + * returned. If the pad does not exist, NULL is returned. + */ +#define v4l2_subdev_state_get_interval(state, pad, ...) \ + __v4l2_subdev_state_constify_ret(state, \ + __v4l2_subdev_state_gen_call(interval, ##__VA_ARGS__, , _pad) \ + ((struct v4l2_subdev_state *)state, pad, ##__VA_ARGS__)) +#define __v4l2_subdev_state_get_interval_pad(state, pad) \ + __v4l2_subdev_state_get_interval(state, pad, 0) +struct v4l2_fract * +__v4l2_subdev_state_get_interval(struct v4l2_subdev_state *state, + unsigned int pad, u32 stream); + +#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) + +/** + * v4l2_subdev_get_fmt() - Fill format based on state + * @sd: subdevice + * @state: subdevice state + * @format: pointer to &struct v4l2_subdev_format + * + * Fill @format->format field based on the information in the @format struct. + * + * This function can be used by the subdev drivers which support active state to + * implement v4l2_subdev_pad_ops.get_fmt if the subdev driver does not need to + * do anything special in their get_fmt op. + * + * Returns 0 on success, error value otherwise. + */ +int v4l2_subdev_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, + struct v4l2_subdev_format *format); + +/** + * v4l2_subdev_get_frame_interval() - Fill frame interval based on state + * @sd: subdevice + * @state: subdevice state + * @fi: pointer to &struct v4l2_subdev_frame_interval + * + * Fill @fi->interval field based on the information in the @fi struct. + * + * This function can be used by the subdev drivers which support active state to + * implement v4l2_subdev_pad_ops.get_frame_interval if the subdev driver does + * not need to do anything special in their get_frame_interval op. + * + * Returns 0 on success, error value otherwise. + */ +int v4l2_subdev_get_frame_interval(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_frame_interval *fi); + +/** + * v4l2_subdev_set_routing() - Set given routing to subdev state + * @sd: The subdevice + * @state: The subdevice state + * @routing: Routing that will be copied to subdev state + * + * This will release old routing table (if any) from the state, allocate + * enough space for the given routing, and copy the routing. + * + * This can be used from the subdev driver's set_routing op, after validating + * the routing. + */ +int v4l2_subdev_set_routing(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + const struct v4l2_subdev_krouting *routing); + +struct v4l2_subdev_route * +__v4l2_subdev_next_active_route(const struct v4l2_subdev_krouting *routing, + struct v4l2_subdev_route *route); + +/** + * for_each_active_route - iterate on all active routes of a routing table + * @routing: The routing table + * @route: The route iterator + */ +#define for_each_active_route(routing, route) \ + for ((route) = NULL; \ + ((route) = __v4l2_subdev_next_active_route((routing), (route)));) + +/** + * v4l2_subdev_set_routing_with_fmt() - Set given routing and format to subdev + * state + * @sd: The subdevice + * @state: The subdevice state + * @routing: Routing that will be copied to subdev state + * @fmt: Format used to initialize all the streams + * + * This is the same as v4l2_subdev_set_routing, but additionally initializes + * all the streams using the given format. + */ +int v4l2_subdev_set_routing_with_fmt(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + const struct v4l2_subdev_krouting *routing, + const struct v4l2_mbus_framefmt *fmt); + +/** + * v4l2_subdev_routing_find_opposite_end() - Find the opposite stream + * @routing: routing used to find the opposite side + * @pad: pad id + * @stream: stream id + * @other_pad: pointer used to return the opposite pad + * @other_stream: pointer used to return the opposite stream + * + * This function uses the routing table to find the pad + stream which is + * opposite the given pad + stream. + * + * @other_pad and/or @other_stream can be NULL if the caller does not need the + * value. + * + * Returns 0 on success, or -EINVAL if no matching route is found. + */ +int v4l2_subdev_routing_find_opposite_end(const struct v4l2_subdev_krouting *routing, + u32 pad, u32 stream, u32 *other_pad, + u32 *other_stream); + +/** + * v4l2_subdev_state_get_opposite_stream_format() - Get pointer to opposite + * stream format + * @state: subdevice state + * @pad: pad id + * @stream: stream id + * + * This returns a pointer to &struct v4l2_mbus_framefmt for the pad + stream + * that is opposite the given pad + stream in the subdev state. + * + * If the state does not contain the given pad + stream, NULL is returned. + */ +struct v4l2_mbus_framefmt * +v4l2_subdev_state_get_opposite_stream_format(struct v4l2_subdev_state *state, + u32 pad, u32 stream); + +/** + * v4l2_subdev_state_xlate_streams() - Translate streams from one pad to another + * + * @state: Subdevice state + * @pad0: The first pad + * @pad1: The second pad + * @streams: Streams bitmask on the first pad + * + * Streams on sink pads of a subdev are routed to source pads as expressed in + * the subdev state routing table. Stream numbers don't necessarily match on + * the sink and source side of a route. This function translates stream numbers + * on @pad0, expressed as a bitmask in @streams, to the corresponding streams + * on @pad1 using the routing table from the @state. It returns the stream mask + * on @pad1, and updates @streams with the streams that have been found in the + * routing table. + * + * @pad0 and @pad1 must be a sink and a source, in any order. + * + * Return: The bitmask of streams of @pad1 that are routed to @streams on @pad0. + */ +u64 v4l2_subdev_state_xlate_streams(const struct v4l2_subdev_state *state, + u32 pad0, u32 pad1, u64 *streams); + +/** + * enum v4l2_subdev_routing_restriction - Subdevice internal routing restrictions + * + * @V4L2_SUBDEV_ROUTING_NO_1_TO_N: + * an input stream shall not be routed to multiple output streams (stream + * duplication) + * @V4L2_SUBDEV_ROUTING_NO_N_TO_1: + * multiple input streams shall not be routed to the same output stream + * (stream merging) + * @V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX: + * all streams from a sink pad must be routed to a single source pad + * @V4L2_SUBDEV_ROUTING_NO_SOURCE_STREAM_MIX: + * all streams on a source pad must originate from a single sink pad + * @V4L2_SUBDEV_ROUTING_NO_SOURCE_MULTIPLEXING: + * source pads shall not contain multiplexed streams + * @V4L2_SUBDEV_ROUTING_NO_SINK_MULTIPLEXING: + * sink pads shall not contain multiplexed streams + * @V4L2_SUBDEV_ROUTING_ONLY_1_TO_1: + * only non-overlapping 1-to-1 stream routing is allowed (a combination of + * @V4L2_SUBDEV_ROUTING_NO_1_TO_N and @V4L2_SUBDEV_ROUTING_NO_N_TO_1) + * @V4L2_SUBDEV_ROUTING_NO_STREAM_MIX: + * all streams from a sink pad must be routed to a single source pad, and + * that source pad shall not get routes from any other sink pad + * (a combination of @V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX and + * @V4L2_SUBDEV_ROUTING_NO_SOURCE_STREAM_MIX) + * @V4L2_SUBDEV_ROUTING_NO_MULTIPLEXING: + * no multiplexed streams allowed on either source or sink sides. + */ +enum v4l2_subdev_routing_restriction { + V4L2_SUBDEV_ROUTING_NO_1_TO_N = BIT(0), + V4L2_SUBDEV_ROUTING_NO_N_TO_1 = BIT(1), + V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX = BIT(2), + V4L2_SUBDEV_ROUTING_NO_SOURCE_STREAM_MIX = BIT(3), + V4L2_SUBDEV_ROUTING_NO_SINK_MULTIPLEXING = BIT(4), + V4L2_SUBDEV_ROUTING_NO_SOURCE_MULTIPLEXING = BIT(5), + V4L2_SUBDEV_ROUTING_ONLY_1_TO_1 = + V4L2_SUBDEV_ROUTING_NO_1_TO_N | + V4L2_SUBDEV_ROUTING_NO_N_TO_1, + V4L2_SUBDEV_ROUTING_NO_STREAM_MIX = + V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX | + V4L2_SUBDEV_ROUTING_NO_SOURCE_STREAM_MIX, + V4L2_SUBDEV_ROUTING_NO_MULTIPLEXING = + V4L2_SUBDEV_ROUTING_NO_SINK_MULTIPLEXING | + V4L2_SUBDEV_ROUTING_NO_SOURCE_MULTIPLEXING, +}; + +/** + * v4l2_subdev_routing_validate() - Verify that routes comply with driver + * constraints + * @sd: The subdevice + * @routing: Routing to verify + * @disallow: Restrictions on routes + * + * This verifies that the given routing complies with the @disallow constraints. + * + * Returns 0 on success, error value otherwise. + */ +int v4l2_subdev_routing_validate(struct v4l2_subdev *sd, + const struct v4l2_subdev_krouting *routing, + enum v4l2_subdev_routing_restriction disallow); + +/** + * v4l2_subdev_enable_streams() - Enable streams on a pad + * @sd: The subdevice + * @pad: The pad + * @streams_mask: Bitmask of streams to enable + * + * This function enables streams on a source @pad of a subdevice. The pad is + * identified by its index, while the streams are identified by the + * @streams_mask bitmask. This allows enabling multiple streams on a pad at + * once. + * + * Enabling a stream that is already enabled isn't allowed. If @streams_mask + * contains an already enabled stream, this function returns -EALREADY without + * performing any operation. + * + * Per-stream enable is only available for subdevs that implement the + * .enable_streams() and .disable_streams() operations. For other subdevs, this + * function implements a best-effort compatibility by calling the .s_stream() + * operation, limited to subdevs that have a single source pad. + * + * Return: + * * 0: Success + * * -EALREADY: One of the streams in streams_mask is already enabled + * * -EINVAL: The pad index is invalid, or doesn't correspond to a source pad + * * -EOPNOTSUPP: Falling back to the legacy .s_stream() operation is + * impossible because the subdev has multiple source pads + */ +int v4l2_subdev_enable_streams(struct v4l2_subdev *sd, u32 pad, + u64 streams_mask); + +/** + * v4l2_subdev_disable_streams() - Disable streams on a pad + * @sd: The subdevice + * @pad: The pad + * @streams_mask: Bitmask of streams to disable + * + * This function disables streams on a source @pad of a subdevice. The pad is + * identified by its index, while the streams are identified by the + * @streams_mask bitmask. This allows disabling multiple streams on a pad at + * once. + * + * Disabling a streams that is not enabled isn't allowed. If @streams_mask + * contains a disabled stream, this function returns -EALREADY without + * performing any operation. + * + * Per-stream disable is only available for subdevs that implement the + * .enable_streams() and .disable_streams() operations. For other subdevs, this + * function implements a best-effort compatibility by calling the .s_stream() + * operation, limited to subdevs that have a single source pad. + * + * Return: + * * 0: Success + * * -EALREADY: One of the streams in streams_mask is not enabled + * * -EINVAL: The pad index is invalid, or doesn't correspond to a source pad + * * -EOPNOTSUPP: Falling back to the legacy .s_stream() operation is + * impossible because the subdev has multiple source pads + */ +int v4l2_subdev_disable_streams(struct v4l2_subdev *sd, u32 pad, + u64 streams_mask); + +/** + * v4l2_subdev_s_stream_helper() - Helper to implement the subdev s_stream + * operation using enable_streams and disable_streams + * @sd: The subdevice + * @enable: Enable or disable streaming + * + * Subdevice drivers that implement the streams-aware + * &v4l2_subdev_pad_ops.enable_streams and &v4l2_subdev_pad_ops.disable_streams + * operations can use this helper to implement the legacy + * &v4l2_subdev_video_ops.s_stream operation. + * + * This helper can only be used by subdevs that have a single source pad. + * + * Return: 0 on success, or a negative error code otherwise. + */ +int v4l2_subdev_s_stream_helper(struct v4l2_subdev *sd, int enable); + +#endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */ + +#endif /* CONFIG_MEDIA_CONTROLLER */ + +/** + * v4l2_subdev_lock_state() - Locks the subdev state + * @state: The subdevice state + * + * Locks the given subdev state. + * + * The state must be unlocked with v4l2_subdev_unlock_state() after use. + */ +static inline void v4l2_subdev_lock_state(struct v4l2_subdev_state *state) +{ + mutex_lock(state->lock); +} + +/** + * v4l2_subdev_unlock_state() - Unlocks the subdev state + * @state: The subdevice state + * + * Unlocks the given subdev state. + */ +static inline void v4l2_subdev_unlock_state(struct v4l2_subdev_state *state) +{ + mutex_unlock(state->lock); +} + +/** + * v4l2_subdev_lock_states - Lock two sub-device states + * @state1: One subdevice state + * @state2: The other subdevice state + * + * Locks the state of two sub-devices. + * + * The states must be unlocked with v4l2_subdev_unlock_states() after use. + * + * This differs from calling v4l2_subdev_lock_state() on both states so that if + * the states share the same lock, the lock is acquired only once (so no + * deadlock occurs). The caller is responsible for ensuring the locks will + * always be acquired in the same order. + */ +static inline void v4l2_subdev_lock_states(struct v4l2_subdev_state *state1, + struct v4l2_subdev_state *state2) +{ + mutex_lock(state1->lock); + if (state1->lock != state2->lock) + mutex_lock(state2->lock); +} + +/** + * v4l2_subdev_unlock_states() - Unlock two sub-device states + * @state1: One subdevice state + * @state2: The other subdevice state + * + * Unlocks the state of two sub-devices. + * + * This differs from calling v4l2_subdev_unlock_state() on both states so that + * if the states share the same lock, the lock is released only once. + */ +static inline void v4l2_subdev_unlock_states(struct v4l2_subdev_state *state1, + struct v4l2_subdev_state *state2) +{ + mutex_unlock(state1->lock); + if (state1->lock != state2->lock) + mutex_unlock(state2->lock); +} + +/** + * v4l2_subdev_get_unlocked_active_state() - Checks that the active subdev state + * is unlocked and returns it + * @sd: The subdevice + * + * Returns the active state for the subdevice, or NULL if the subdev does not + * support active state. If the state is not NULL, calls + * lockdep_assert_not_held() to issue a warning if the state is locked. + * + * This function is to be used e.g. when getting the active state for the sole + * purpose of passing it forward, without accessing the state fields. + */ +static inline struct v4l2_subdev_state * +v4l2_subdev_get_unlocked_active_state(struct v4l2_subdev *sd) +{ + if (sd->active_state) + lockdep_assert_not_held(sd->active_state->lock); + return sd->active_state; +} + +/** + * v4l2_subdev_get_locked_active_state() - Checks that the active subdev state + * is locked and returns it + * + * @sd: The subdevice + * + * Returns the active state for the subdevice, or NULL if the subdev does not + * support active state. If the state is not NULL, calls lockdep_assert_held() + * to issue a warning if the state is not locked. + * + * This function is to be used when the caller knows that the active state is + * already locked. + */ +static inline struct v4l2_subdev_state * +v4l2_subdev_get_locked_active_state(struct v4l2_subdev *sd) +{ + if (sd->active_state) + lockdep_assert_held(sd->active_state->lock); + return sd->active_state; +} + +/** + * v4l2_subdev_lock_and_get_active_state() - Locks and returns the active subdev + * state for the subdevice + * @sd: The subdevice + * + * Returns the locked active state for the subdevice, or NULL if the subdev + * does not support active state. + * + * The state must be unlocked with v4l2_subdev_unlock_state() after use. + */ +static inline struct v4l2_subdev_state * +v4l2_subdev_lock_and_get_active_state(struct v4l2_subdev *sd) +{ + if (sd->active_state) + v4l2_subdev_lock_state(sd->active_state); + return sd->active_state; +} + +/** + * v4l2_subdev_init - initializes the sub-device struct + * + * @sd: pointer to the &struct v4l2_subdev to be initialized + * @ops: pointer to &struct v4l2_subdev_ops. + */ +void v4l2_subdev_init(struct v4l2_subdev *sd, + const struct v4l2_subdev_ops *ops); + +extern const struct v4l2_subdev_ops v4l2_subdev_call_wrappers; + +/** + * v4l2_subdev_call - call an operation of a v4l2_subdev. + * + * @sd: pointer to the &struct v4l2_subdev + * @o: name of the element at &struct v4l2_subdev_ops that contains @f. + * Each element there groups a set of callbacks functions. + * @f: callback function to be called. + * The callback functions are defined in groups, according to + * each element at &struct v4l2_subdev_ops. + * @args: arguments for @f. + * + * Example: err = v4l2_subdev_call(sd, video, s_std, norm); + */ +#define v4l2_subdev_call(sd, o, f, args...) \ + ({ \ + struct v4l2_subdev *__sd = (sd); \ + int __result; \ + if (!__sd) \ + __result = -ENODEV; \ + else if (!(__sd->ops->o && __sd->ops->o->f)) \ + __result = -ENOIOCTLCMD; \ + else if (v4l2_subdev_call_wrappers.o && \ + v4l2_subdev_call_wrappers.o->f) \ + __result = v4l2_subdev_call_wrappers.o->f( \ + __sd, ##args); \ + else \ + __result = __sd->ops->o->f(__sd, ##args); \ + __result; \ + }) + +/** + * v4l2_subdev_call_state_active - call an operation of a v4l2_subdev which + * takes state as a parameter, passing the + * subdev its active state. + * + * @sd: pointer to the &struct v4l2_subdev + * @o: name of the element at &struct v4l2_subdev_ops that contains @f. + * Each element there groups a set of callbacks functions. + * @f: callback function to be called. + * The callback functions are defined in groups, according to + * each element at &struct v4l2_subdev_ops. + * @args: arguments for @f. + * + * This is similar to v4l2_subdev_call(), except that this version can only be + * used for ops that take a subdev state as a parameter. The macro will get the + * active state, lock it before calling the op and unlock it after the call. + */ +#define v4l2_subdev_call_state_active(sd, o, f, args...) \ + ({ \ + int __result; \ + struct v4l2_subdev_state *state; \ + state = v4l2_subdev_get_unlocked_active_state(sd); \ + if (state) \ + v4l2_subdev_lock_state(state); \ + __result = v4l2_subdev_call(sd, o, f, state, ##args); \ + if (state) \ + v4l2_subdev_unlock_state(state); \ + __result; \ + }) + +/** + * v4l2_subdev_call_state_try - call an operation of a v4l2_subdev which + * takes state as a parameter, passing the + * subdev a newly allocated try state. + * + * @sd: pointer to the &struct v4l2_subdev + * @o: name of the element at &struct v4l2_subdev_ops that contains @f. + * Each element there groups a set of callbacks functions. + * @f: callback function to be called. + * The callback functions are defined in groups, according to + * each element at &struct v4l2_subdev_ops. + * @args: arguments for @f. + * + * This is similar to v4l2_subdev_call_state_active(), except that as this + * version allocates a new state, this is only usable for + * V4L2_SUBDEV_FORMAT_TRY use cases. + * + * Note: only legacy non-MC drivers may need this macro. + */ +#define v4l2_subdev_call_state_try(sd, o, f, args...) \ + ({ \ + int __result; \ + static struct lock_class_key __key; \ + const char *name = KBUILD_BASENAME \ + ":" __stringify(__LINE__) ":state->lock"; \ + struct v4l2_subdev_state *state = \ + __v4l2_subdev_state_alloc(sd, name, &__key); \ + v4l2_subdev_lock_state(state); \ + __result = v4l2_subdev_call(sd, o, f, state, ##args); \ + v4l2_subdev_unlock_state(state); \ + __v4l2_subdev_state_free(state); \ + __result; \ + }) + +/** + * v4l2_subdev_has_op - Checks if a subdev defines a certain operation. + * + * @sd: pointer to the &struct v4l2_subdev + * @o: The group of callback functions in &struct v4l2_subdev_ops + * which @f is a part of. + * @f: callback function to be checked for its existence. + */ +#define v4l2_subdev_has_op(sd, o, f) \ + ((sd)->ops->o && (sd)->ops->o->f) + +/** + * v4l2_subdev_notify_event() - Delivers event notification for subdevice + * @sd: The subdev for which to deliver the event + * @ev: The event to deliver + * + * Will deliver the specified event to all userspace event listeners which are + * subscribed to the v42l subdev event queue as well as to the bridge driver + * using the notify callback. The notification type for the notify callback + * will be %V4L2_DEVICE_NOTIFY_EVENT. + */ +void v4l2_subdev_notify_event(struct v4l2_subdev *sd, + const struct v4l2_event *ev); + +/** + * v4l2_subdev_is_streaming() - Returns if the subdevice is streaming + * @sd: The subdevice + * + * v4l2_subdev_is_streaming() tells if the subdevice is currently streaming. + * "Streaming" here means whether .s_stream() or .enable_streams() has been + * successfully called, and the streaming has not yet been disabled. + * + * If the subdevice implements .enable_streams() this function must be called + * while holding the active state lock. + */ +bool v4l2_subdev_is_streaming(struct v4l2_subdev *sd); + +#endif /* _V4L2_SUBDEV_H */ diff --git a/6.12.0/include-overrides/media/v4l2-vp9.h b/6.12.0/include-overrides/media/v4l2-vp9.h new file mode 100644 index 0000000..05478ad --- /dev/null +++ b/6.12.0/include-overrides/media/v4l2-vp9.h @@ -0,0 +1,233 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Helper functions for vp9 codecs. + * + * Copyright (c) 2021 Collabora, Ltd. + * + * Author: Andrzej Pietrasiewicz + */ + +#ifndef _MEDIA_V4L2_VP9_H +#define _MEDIA_V4L2_VP9_H + +#include + +/** + * struct v4l2_vp9_frame_mv_context - motion vector-related probabilities + * + * @joint: motion vector joint probabilities. + * @sign: motion vector sign probabilities. + * @classes: motion vector class probabilities. + * @class0_bit: motion vector class0 bit probabilities. + * @bits: motion vector bits probabilities. + * @class0_fr: motion vector class0 fractional bit probabilities. + * @fr: motion vector fractional bit probabilities. + * @class0_hp: motion vector class0 high precision fractional bit probabilities. + * @hp: motion vector high precision fractional bit probabilities. + * + * A member of v4l2_vp9_frame_context. + */ +struct v4l2_vp9_frame_mv_context { + u8 joint[3]; + u8 sign[2]; + u8 classes[2][10]; + u8 class0_bit[2]; + u8 bits[2][10]; + u8 class0_fr[2][2][3]; + u8 fr[2][3]; + u8 class0_hp[2]; + u8 hp[2]; +}; + +/** + * struct v4l2_vp9_frame_context - frame probabilities, including motion-vector related + * + * @tx8: TX 8x8 probabilities. + * @tx16: TX 16x16 probabilities. + * @tx32: TX 32x32 probabilities. + * @coef: coefficient probabilities. + * @skip: skip probabilities. + * @inter_mode: inter mode probabilities. + * @interp_filter: interpolation filter probabilities. + * @is_inter: is inter-block probabilities. + * @comp_mode: compound prediction mode probabilities. + * @single_ref: single ref probabilities. + * @comp_ref: compound ref probabilities. + * @y_mode: Y prediction mode probabilities. + * @uv_mode: UV prediction mode probabilities. + * @partition: partition probabilities. + * @mv: motion vector probabilities. + * + * Drivers which need to keep track of frame context(s) can use this struct. + * The members correspond to probability tables, which are specified only implicitly in the + * vp9 spec. Section 10.5 "Default probability tables" contains all the types of involved + * tables, i.e. the actual tables are of the same kind, and when they are reset (which is + * mandated by the spec sometimes) they are overwritten with values from the default tables. + */ +struct v4l2_vp9_frame_context { + u8 tx8[2][1]; + u8 tx16[2][2]; + u8 tx32[2][3]; + u8 coef[4][2][2][6][6][3]; + u8 skip[3]; + u8 inter_mode[7][3]; + u8 interp_filter[4][2]; + u8 is_inter[4]; + u8 comp_mode[5]; + u8 single_ref[5][2]; + u8 comp_ref[5]; + u8 y_mode[4][9]; + u8 uv_mode[10][9]; + u8 partition[16][3]; + + struct v4l2_vp9_frame_mv_context mv; +}; + +/** + * struct v4l2_vp9_frame_symbol_counts - pointers to arrays of symbol counts + * + * @partition: partition counts. + * @skip: skip counts. + * @intra_inter: is inter-block counts. + * @tx32p: TX32 counts. + * @tx16p: TX16 counts. + * @tx8p: TX8 counts. + * @y_mode: Y prediction mode counts. + * @uv_mode: UV prediction mode counts. + * @comp: compound prediction mode counts. + * @comp_ref: compound ref counts. + * @single_ref: single ref counts. + * @mv_mode: inter mode counts. + * @filter: interpolation filter counts. + * @mv_joint: motion vector joint counts. + * @sign: motion vector sign counts. + * @classes: motion vector class counts. + * @class0: motion vector class0 bit counts. + * @bits: motion vector bits counts. + * @class0_fp: motion vector class0 fractional bit counts. + * @fp: motion vector fractional bit counts. + * @class0_hp: motion vector class0 high precision fractional bit counts. + * @hp: motion vector high precision fractional bit counts. + * @coeff: coefficient counts. + * @eob: eob counts + * + * The fields correspond to what is specified in section 8.3 "Clear counts process" of the spec. + * Different pieces of hardware can report the counts in different order, so we cannot rely on + * simply overlaying a struct on a relevant block of memory. Instead we provide pointers to + * arrays or array of pointers to arrays in case of coeff, or array of pointers for eob. + */ +struct v4l2_vp9_frame_symbol_counts { + u32 (*partition)[16][4]; + u32 (*skip)[3][2]; + u32 (*intra_inter)[4][2]; + u32 (*tx32p)[2][4]; + u32 (*tx16p)[2][4]; + u32 (*tx8p)[2][2]; + u32 (*y_mode)[4][10]; + u32 (*uv_mode)[10][10]; + u32 (*comp)[5][2]; + u32 (*comp_ref)[5][2]; + u32 (*single_ref)[5][2][2]; + u32 (*mv_mode)[7][4]; + u32 (*filter)[4][3]; + u32 (*mv_joint)[4]; + u32 (*sign)[2][2]; + u32 (*classes)[2][11]; + u32 (*class0)[2][2]; + u32 (*bits)[2][10][2]; + u32 (*class0_fp)[2][2][4]; + u32 (*fp)[2][4]; + u32 (*class0_hp)[2][2]; + u32 (*hp)[2][2]; + u32 (*coeff[4][2][2][6][6])[3]; + u32 *eob[4][2][2][6][6][2]; +}; + +extern const u8 v4l2_vp9_kf_y_mode_prob[10][10][9]; /* Section 10.4 of the spec */ +extern const u8 v4l2_vp9_kf_partition_probs[16][3]; /* Section 10.4 of the spec */ +extern const u8 v4l2_vp9_kf_uv_mode_prob[10][9]; /* Section 10.4 of the spec */ +extern const struct v4l2_vp9_frame_context v4l2_vp9_default_probs; /* Section 10.5 of the spec */ + +/** + * v4l2_vp9_fw_update_probs() - Perform forward update of vp9 probabilities + * + * @probs: current probabilities values + * @deltas: delta values from compressed header + * @dec_params: vp9 frame decoding parameters + * + * This function performs forward updates of probabilities for the vp9 boolean decoder. + * The frame header can contain a directive to update the probabilities (deltas), if so, then + * the deltas are provided in the header, too. The userspace parses those and passes the said + * deltas struct to the kernel. + */ +void v4l2_vp9_fw_update_probs(struct v4l2_vp9_frame_context *probs, + const struct v4l2_ctrl_vp9_compressed_hdr *deltas, + const struct v4l2_ctrl_vp9_frame *dec_params); + +/** + * v4l2_vp9_reset_frame_ctx() - Reset appropriate frame context + * + * @dec_params: vp9 frame decoding parameters + * @frame_context: array of the 4 frame contexts + * + * This function resets appropriate frame contexts, based on what's in dec_params. + * + * Returns the frame context index after the update, which might be reset to zero if + * mandated by the spec. + */ +u8 v4l2_vp9_reset_frame_ctx(const struct v4l2_ctrl_vp9_frame *dec_params, + struct v4l2_vp9_frame_context *frame_context); + +/** + * v4l2_vp9_adapt_coef_probs() - Perform backward update of vp9 coefficients probabilities + * + * @probs: current probabilities values + * @counts: values of symbol counts after the current frame has been decoded + * @use_128: flag to request that 128 is used as update factor if true, otherwise 112 is used + * @frame_is_intra: flag indicating that FrameIsIntra is true + * + * This function performs backward updates of coefficients probabilities for the vp9 boolean + * decoder. After a frame has been decoded the counts of how many times a given symbol has + * occurred are known and are used to update the probability of each symbol. + */ +void v4l2_vp9_adapt_coef_probs(struct v4l2_vp9_frame_context *probs, + struct v4l2_vp9_frame_symbol_counts *counts, + bool use_128, + bool frame_is_intra); + +/** + * v4l2_vp9_adapt_noncoef_probs() - Perform backward update of vp9 non-coefficients probabilities + * + * @probs: current probabilities values + * @counts: values of symbol counts after the current frame has been decoded + * @reference_mode: specifies the type of inter prediction to be used. See + * &v4l2_vp9_reference_mode for more details + * @interpolation_filter: specifies the filter selection used for performing inter prediction. + * See &v4l2_vp9_interpolation_filter for more details + * @tx_mode: specifies the TX mode. See &v4l2_vp9_tx_mode for more details + * @flags: combination of V4L2_VP9_FRAME_FLAG_* flags + * + * This function performs backward updates of non-coefficients probabilities for the vp9 boolean + * decoder. After a frame has been decoded the counts of how many times a given symbol has + * occurred are known and are used to update the probability of each symbol. + */ +void v4l2_vp9_adapt_noncoef_probs(struct v4l2_vp9_frame_context *probs, + struct v4l2_vp9_frame_symbol_counts *counts, + u8 reference_mode, u8 interpolation_filter, u8 tx_mode, + u32 flags); + +/** + * v4l2_vp9_seg_feat_enabled() - Check if a segmentation feature is enabled + * + * @feature_enabled: array of 8-bit flags (for all segments) + * @feature: id of the feature to check + * @segid: id of the segment to look up + * + * This function returns true if a given feature is active in a given segment. + */ +bool +v4l2_vp9_seg_feat_enabled(const u8 *feature_enabled, + unsigned int feature, + unsigned int segid); + +#endif /* _MEDIA_V4L2_VP9_H */ diff --git a/6.12.0/include-overrides/media/videobuf2-core.h b/6.12.0/include-overrides/media/videobuf2-core.h new file mode 100644 index 0000000..9b02aeb --- /dev/null +++ b/6.12.0/include-overrides/media/videobuf2-core.h @@ -0,0 +1,1348 @@ +/* + * videobuf2-core.h - Video Buffer 2 Core Framework + * + * Copyright (C) 2010 Samsung Electronics + * + * Author: Pawel Osciak + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ +#ifndef _MEDIA_VIDEOBUF2_CORE_H +#define _MEDIA_VIDEOBUF2_CORE_H + +#include +#include +#include +#include +#include +#include +#include + +#define VB2_MAX_FRAME (32) +#define VB2_MAX_PLANES (8) + +/** + * enum vb2_memory - type of memory model used to make the buffers visible + * on userspace. + * + * @VB2_MEMORY_UNKNOWN: Buffer status is unknown or it is not used yet on + * userspace. + * @VB2_MEMORY_MMAP: The buffers are allocated by the Kernel and it is + * memory mapped via mmap() ioctl. This model is + * also used when the user is using the buffers via + * read() or write() system calls. + * @VB2_MEMORY_USERPTR: The buffers was allocated in userspace and it is + * memory mapped via mmap() ioctl. + * @VB2_MEMORY_DMABUF: The buffers are passed to userspace via DMA buffer. + */ +enum vb2_memory { + VB2_MEMORY_UNKNOWN = 0, + VB2_MEMORY_MMAP = 1, + VB2_MEMORY_USERPTR = 2, + VB2_MEMORY_DMABUF = 4, +}; + +struct vb2_fileio_data; +struct vb2_threadio_data; +struct vb2_buffer; + +/** + * struct vb2_mem_ops - memory handling/memory allocator operations. + * @alloc: allocate video memory and, optionally, allocator private data, + * return ERR_PTR() on failure or a pointer to allocator private, + * per-buffer data on success; the returned private structure + * will then be passed as @buf_priv argument to other ops in this + * structure. The size argument to this function shall be + * *page aligned*. + * @put: inform the allocator that the buffer will no longer be used; + * usually will result in the allocator freeing the buffer (if + * no other users of this buffer are present); the @buf_priv + * argument is the allocator private per-buffer structure + * previously returned from the alloc callback. + * @get_dmabuf: acquire userspace memory for a hardware operation; used for + * DMABUF memory types. + * @get_userptr: acquire userspace memory for a hardware operation; used for + * USERPTR memory types; vaddr is the address passed to the + * videobuf2 layer when queuing a video buffer of USERPTR type; + * should return an allocator private per-buffer structure + * associated with the buffer on success, ERR_PTR() on failure; + * the returned private structure will then be passed as @buf_priv + * argument to other ops in this structure. + * @put_userptr: inform the allocator that a USERPTR buffer will no longer + * be used. + * @prepare: called every time the buffer is passed from userspace to the + * driver, useful for cache synchronisation, optional. + * @finish: called every time the buffer is passed back from the driver + * to the userspace, also optional. + * @attach_dmabuf: attach a shared &struct dma_buf for a hardware operation; + * used for DMABUF memory types; dev is the alloc device + * dbuf is the shared dma_buf; returns ERR_PTR() on failure; + * allocator private per-buffer structure on success; + * this needs to be used for further accesses to the buffer. + * @detach_dmabuf: inform the exporter of the buffer that the current DMABUF + * buffer is no longer used; the @buf_priv argument is the + * allocator private per-buffer structure previously returned + * from the attach_dmabuf callback. + * @map_dmabuf: request for access to the dmabuf from allocator; the allocator + * of dmabuf is informed that this driver is going to use the + * dmabuf. + * @unmap_dmabuf: releases access control to the dmabuf - allocator is notified + * that this driver is done using the dmabuf for now. + * @vaddr: return a kernel virtual address to a given memory buffer + * associated with the passed private structure or NULL if no + * such mapping exists. + * @cookie: return allocator specific cookie for a given memory buffer + * associated with the passed private structure or NULL if not + * available. + * @num_users: return the current number of users of a memory buffer; + * return 1 if the videobuf2 layer (or actually the driver using + * it) is the only user. + * @mmap: setup a userspace mapping for a given memory buffer under + * the provided virtual memory region. + * + * Those operations are used by the videobuf2 core to implement the memory + * handling/memory allocators for each type of supported streaming I/O method. + * + * .. note:: + * #) Required ops for USERPTR types: get_userptr, put_userptr. + * + * #) Required ops for MMAP types: alloc, put, num_users, mmap. + * + * #) Required ops for read/write access types: alloc, put, num_users, vaddr. + * + * #) Required ops for DMABUF types: attach_dmabuf, detach_dmabuf, + * map_dmabuf, unmap_dmabuf. + */ +struct vb2_mem_ops { + void *(*alloc)(struct vb2_buffer *vb, + struct device *dev, + unsigned long size); + void (*put)(void *buf_priv); + struct dma_buf *(*get_dmabuf)(struct vb2_buffer *vb, + void *buf_priv, + unsigned long flags); + + void *(*get_userptr)(struct vb2_buffer *vb, + struct device *dev, + unsigned long vaddr, + unsigned long size); + void (*put_userptr)(void *buf_priv); + + void (*prepare)(void *buf_priv); + void (*finish)(void *buf_priv); + + void *(*attach_dmabuf)(struct vb2_buffer *vb, + struct device *dev, + struct dma_buf *dbuf, + unsigned long size); + void (*detach_dmabuf)(void *buf_priv); + int (*map_dmabuf)(void *buf_priv); + void (*unmap_dmabuf)(void *buf_priv); + + void *(*vaddr)(struct vb2_buffer *vb, void *buf_priv); + void *(*cookie)(struct vb2_buffer *vb, void *buf_priv); + + unsigned int (*num_users)(void *buf_priv); + + int (*mmap)(void *buf_priv, struct vm_area_struct *vma); +}; + +/** + * struct vb2_plane - plane information. + * @mem_priv: private data with this plane. + * @dbuf: dma_buf - shared buffer object. + * @dbuf_mapped: flag to show whether dbuf is mapped or not + * @dbuf_duplicated: boolean to show whether dbuf is duplicated with a + * previous plane of the buffer. + * @bytesused: number of bytes occupied by data in the plane (payload). + * @length: size of this plane (NOT the payload) in bytes. The maximum + * valid size is MAX_UINT - PAGE_SIZE. + * @min_length: minimum required size of this plane (NOT the payload) in bytes. + * @length is always greater or equal to @min_length, and like + * @length, it is limited to MAX_UINT - PAGE_SIZE. + * @m: Union with memtype-specific data. + * @m.offset: when memory in the associated struct vb2_buffer is + * %VB2_MEMORY_MMAP, equals the offset from the start of + * the device memory for this plane (or is a "cookie" that + * should be passed to mmap() called on the video node). + * @m.userptr: when memory is %VB2_MEMORY_USERPTR, a userspace pointer + * pointing to this plane. + * @m.fd: when memory is %VB2_MEMORY_DMABUF, a userspace file + * descriptor associated with this plane. + * @data_offset: offset in the plane to the start of data; usually 0, + * unless there is a header in front of the data. + * + * Should contain enough information to be able to cover all the fields + * of &struct v4l2_plane at videodev2.h. + */ +struct vb2_plane { + void *mem_priv; + struct dma_buf *dbuf; + unsigned int dbuf_mapped; + bool dbuf_duplicated; + unsigned int bytesused; + unsigned int length; + unsigned int min_length; + union { + unsigned int offset; + unsigned long userptr; + int fd; + } m; + unsigned int data_offset; +}; + +/** + * enum vb2_io_modes - queue access methods. + * @VB2_MMAP: driver supports MMAP with streaming API. + * @VB2_USERPTR: driver supports USERPTR with streaming API. + * @VB2_READ: driver supports read() style access. + * @VB2_WRITE: driver supports write() style access. + * @VB2_DMABUF: driver supports DMABUF with streaming API. + */ +enum vb2_io_modes { + VB2_MMAP = BIT(0), + VB2_USERPTR = BIT(1), + VB2_READ = BIT(2), + VB2_WRITE = BIT(3), + VB2_DMABUF = BIT(4), +}; + +/** + * enum vb2_buffer_state - current video buffer state. + * @VB2_BUF_STATE_DEQUEUED: buffer under userspace control. + * @VB2_BUF_STATE_IN_REQUEST: buffer is queued in media request. + * @VB2_BUF_STATE_PREPARING: buffer is being prepared in videobuf2. + * @VB2_BUF_STATE_QUEUED: buffer queued in videobuf2, but not in driver. + * @VB2_BUF_STATE_ACTIVE: buffer queued in driver and possibly used + * in a hardware operation. + * @VB2_BUF_STATE_DONE: buffer returned from driver to videobuf2, but + * not yet dequeued to userspace. + * @VB2_BUF_STATE_ERROR: same as above, but the operation on the buffer + * has ended with an error, which will be reported + * to the userspace when it is dequeued. + */ +enum vb2_buffer_state { + VB2_BUF_STATE_DEQUEUED, + VB2_BUF_STATE_IN_REQUEST, + VB2_BUF_STATE_PREPARING, + VB2_BUF_STATE_QUEUED, + VB2_BUF_STATE_ACTIVE, + VB2_BUF_STATE_DONE, + VB2_BUF_STATE_ERROR, +}; + +struct vb2_queue; + +/** + * struct vb2_buffer - represents a video buffer. + * @vb2_queue: pointer to &struct vb2_queue with the queue to + * which this driver belongs. + * @index: id number of the buffer. + * @type: buffer type. + * @memory: the method, in which the actual data is passed. + * @num_planes: number of planes in the buffer + * on an internal driver queue. + * @timestamp: frame timestamp in ns. + * @request: the request this buffer is associated with. + * @req_obj: used to bind this buffer to a request. This + * request object has a refcount. + */ +struct vb2_buffer { + struct vb2_queue *vb2_queue; + unsigned int index; + unsigned int type; + unsigned int memory; + unsigned int num_planes; + u64 timestamp; + struct media_request *request; + struct media_request_object req_obj; + + /* private: internal use only + * + * state: current buffer state; do not change + * synced: this buffer has been synced for DMA, i.e. the + * 'prepare' memop was called. It is cleared again + * after the 'finish' memop is called. + * prepared: this buffer has been prepared, i.e. the + * buf_prepare op was called. It is cleared again + * after the 'buf_finish' op is called. + * copied_timestamp: the timestamp of this capture buffer was copied + * from an output buffer. + * skip_cache_sync_on_prepare: when set buffer's ->prepare() function + * skips cache sync/invalidation. + * skip_cache_sync_on_finish: when set buffer's ->finish() function + * skips cache sync/invalidation. + * planes: per-plane information; do not change + * queued_entry: entry on the queued buffers list, which holds + * all buffers queued from userspace + * done_entry: entry on the list that stores all buffers ready + * to be dequeued to userspace + */ + enum vb2_buffer_state state; + unsigned int synced:1; + unsigned int prepared:1; + unsigned int copied_timestamp:1; + unsigned int skip_cache_sync_on_prepare:1; + unsigned int skip_cache_sync_on_finish:1; + + struct vb2_plane planes[VB2_MAX_PLANES]; + struct list_head queued_entry; + struct list_head done_entry; +#ifdef CONFIG_VIDEO_ADV_DEBUG + /* + * Counters for how often these buffer-related ops are + * called. Used to check for unbalanced ops. + */ + u32 cnt_mem_alloc; + u32 cnt_mem_put; + u32 cnt_mem_get_dmabuf; + u32 cnt_mem_get_userptr; + u32 cnt_mem_put_userptr; + u32 cnt_mem_prepare; + u32 cnt_mem_finish; + u32 cnt_mem_attach_dmabuf; + u32 cnt_mem_detach_dmabuf; + u32 cnt_mem_map_dmabuf; + u32 cnt_mem_unmap_dmabuf; + u32 cnt_mem_vaddr; + u32 cnt_mem_cookie; + u32 cnt_mem_num_users; + u32 cnt_mem_mmap; + + u32 cnt_buf_out_validate; + u32 cnt_buf_init; + u32 cnt_buf_prepare; + u32 cnt_buf_finish; + u32 cnt_buf_cleanup; + u32 cnt_buf_queue; + u32 cnt_buf_request_complete; + + /* This counts the number of calls to vb2_buffer_done() */ + u32 cnt_buf_done; +#endif +}; + +/** + * struct vb2_ops - driver-specific callbacks. + * + * These operations are not called from interrupt context except where + * mentioned specifically. + * + * @queue_setup: called from VIDIOC_REQBUFS() and VIDIOC_CREATE_BUFS() + * handlers before memory allocation. It can be called + * twice: if the original number of requested buffers + * could not be allocated, then it will be called a + * second time with the actually allocated number of + * buffers to verify if that is OK. + * The driver should return the required number of buffers + * in \*num_buffers, the required number of planes per + * buffer in \*num_planes, the size of each plane should be + * set in the sizes\[\] array and optional per-plane + * allocator specific device in the alloc_devs\[\] array. + * When called from VIDIOC_REQBUFS(), \*num_planes == 0, + * the driver has to use the currently configured format to + * determine the plane sizes and \*num_buffers is the total + * number of buffers that are being allocated. When called + * from VIDIOC_CREATE_BUFS(), \*num_planes != 0 and it + * describes the requested number of planes and sizes\[\] + * contains the requested plane sizes. In this case + * \*num_buffers are being allocated additionally to + * the buffers already allocated. If either \*num_planes + * or the requested sizes are invalid callback must return %-EINVAL. + * @wait_prepare: release any locks taken while calling vb2 functions; + * it is called before an ioctl needs to wait for a new + * buffer to arrive; required to avoid a deadlock in + * blocking access type. + * @wait_finish: reacquire all locks released in the previous callback; + * required to continue operation after sleeping while + * waiting for a new buffer to arrive. + * @buf_out_validate: called when the output buffer is prepared or queued + * to a request; drivers can use this to validate + * userspace-provided information; this is required only + * for OUTPUT queues. + * @buf_init: called once after allocating a buffer (in MMAP case) + * or after acquiring a new USERPTR buffer; drivers may + * perform additional buffer-related initialization; + * initialization failure (return != 0) will prevent + * queue setup from completing successfully; optional. + * @buf_prepare: called every time the buffer is queued from userspace + * and from the VIDIOC_PREPARE_BUF() ioctl; drivers may + * perform any initialization required before each + * hardware operation in this callback; drivers can + * access/modify the buffer here as it is still synced for + * the CPU; drivers that support VIDIOC_CREATE_BUFS() must + * also validate the buffer size; if an error is returned, + * the buffer will not be queued in driver; optional. + * @buf_finish: called before every dequeue of the buffer back to + * userspace; the buffer is synced for the CPU, so drivers + * can access/modify the buffer contents; drivers may + * perform any operations required before userspace + * accesses the buffer; optional. The buffer state can be + * one of the following: %DONE and %ERROR occur while + * streaming is in progress, and the %PREPARED state occurs + * when the queue has been canceled and all pending + * buffers are being returned to their default %DEQUEUED + * state. Typically you only have to do something if the + * state is %VB2_BUF_STATE_DONE, since in all other cases + * the buffer contents will be ignored anyway. + * @buf_cleanup: called once before the buffer is freed; drivers may + * perform any additional cleanup; optional. + * @prepare_streaming: called once to prepare for 'streaming' state; this is + * where validation can be done to verify everything is + * okay and streaming resources can be claimed. It is + * called when the VIDIOC_STREAMON ioctl is called. The + * actual streaming starts when @start_streaming is called. + * Optional. + * @start_streaming: called once to enter 'streaming' state; the driver may + * receive buffers with @buf_queue callback + * before @start_streaming is called; the driver gets the + * number of already queued buffers in count parameter; + * driver can return an error if hardware fails, in that + * case all buffers that have been already given by + * the @buf_queue callback are to be returned by the driver + * by calling vb2_buffer_done() with %VB2_BUF_STATE_QUEUED. + * If you need a minimum number of buffers before you can + * start streaming, then set + * &vb2_queue->min_queued_buffers. If that is non-zero + * then @start_streaming won't be called until at least + * that many buffers have been queued up by userspace. + * @stop_streaming: called when 'streaming' state must be disabled; driver + * should stop any DMA transactions or wait until they + * finish and give back all buffers it got from &buf_queue + * callback by calling vb2_buffer_done() with either + * %VB2_BUF_STATE_DONE or %VB2_BUF_STATE_ERROR; may use + * vb2_wait_for_all_buffers() function + * @unprepare_streaming:called as counterpart to @prepare_streaming; any claimed + * streaming resources can be released here. It is + * called when the VIDIOC_STREAMOFF ioctls is called or + * when the streaming filehandle is closed. Optional. + * @buf_queue: passes buffer vb to the driver; driver may start + * hardware operation on this buffer; driver should give + * the buffer back by calling vb2_buffer_done() function; + * it is always called after calling VIDIOC_STREAMON() + * ioctl; might be called before @start_streaming callback + * if user pre-queued buffers before calling + * VIDIOC_STREAMON(). + * @buf_request_complete: a buffer that was never queued to the driver but is + * associated with a queued request was canceled. + * The driver will have to mark associated objects in the + * request as completed; required if requests are + * supported. + */ +struct vb2_ops { + int (*queue_setup)(struct vb2_queue *q, + unsigned int *num_buffers, unsigned int *num_planes, + unsigned int sizes[], struct device *alloc_devs[]); + + void (*wait_prepare)(struct vb2_queue *q); + void (*wait_finish)(struct vb2_queue *q); + + int (*buf_out_validate)(struct vb2_buffer *vb); + int (*buf_init)(struct vb2_buffer *vb); + int (*buf_prepare)(struct vb2_buffer *vb); + void (*buf_finish)(struct vb2_buffer *vb); + void (*buf_cleanup)(struct vb2_buffer *vb); + + int (*prepare_streaming)(struct vb2_queue *q); + int (*start_streaming)(struct vb2_queue *q, unsigned int count); + void (*stop_streaming)(struct vb2_queue *q); + void (*unprepare_streaming)(struct vb2_queue *q); + + void (*buf_queue)(struct vb2_buffer *vb); + + void (*buf_request_complete)(struct vb2_buffer *vb); +}; + +/** + * struct vb2_buf_ops - driver-specific callbacks. + * + * @verify_planes_array: Verify that a given user space structure contains + * enough planes for the buffer. This is called + * for each dequeued buffer. + * @init_buffer: given a &vb2_buffer initialize the extra data after + * struct vb2_buffer. + * For V4L2 this is a &struct vb2_v4l2_buffer. + * @fill_user_buffer: given a &vb2_buffer fill in the userspace structure. + * For V4L2 this is a &struct v4l2_buffer. + * @fill_vb2_buffer: given a userspace structure, fill in the &vb2_buffer. + * If the userspace structure is invalid, then this op + * will return an error. + * @copy_timestamp: copy the timestamp from a userspace structure to + * the &struct vb2_buffer. + */ +struct vb2_buf_ops { + int (*verify_planes_array)(struct vb2_buffer *vb, const void *pb); + void (*init_buffer)(struct vb2_buffer *vb); + void (*fill_user_buffer)(struct vb2_buffer *vb, void *pb); + int (*fill_vb2_buffer)(struct vb2_buffer *vb, struct vb2_plane *planes); + void (*copy_timestamp)(struct vb2_buffer *vb, const void *pb); +}; + +/** + * struct vb2_queue - a videobuf2 queue. + * + * @type: private buffer type whose content is defined by the vb2-core + * caller. For example, for V4L2, it should match + * the types defined on &enum v4l2_buf_type. + * @io_modes: supported io methods (see &enum vb2_io_modes). + * @dev: device to use for the default allocation context if the driver + * doesn't fill in the @alloc_devs array. + * @dma_attrs: DMA attributes to use for the DMA. + * @bidirectional: when this flag is set the DMA direction for the buffers of + * this queue will be overridden with %DMA_BIDIRECTIONAL direction. + * This is useful in cases where the hardware (firmware) writes to + * a buffer which is mapped as read (%DMA_TO_DEVICE), or reads from + * buffer which is mapped for write (%DMA_FROM_DEVICE) in order + * to satisfy some internal hardware restrictions or adds a padding + * needed by the processing algorithm. In case the DMA mapping is + * not bidirectional but the hardware (firmware) trying to access + * the buffer (in the opposite direction) this could lead to an + * IOMMU protection faults. + * @fileio_read_once: report EOF after reading the first buffer + * @fileio_write_immediately: queue buffer after each write() call + * @allow_zero_bytesused: allow bytesused == 0 to be passed to the driver + * @quirk_poll_must_check_waiting_for_buffers: Return %EPOLLERR at poll when QBUF + * has not been called. This is a vb1 idiom that has been adopted + * also by vb2. + * @supports_requests: this queue supports the Request API. + * @requires_requests: this queue requires the Request API. If this is set to 1, + * then supports_requests must be set to 1 as well. + * @uses_qbuf: qbuf was used directly for this queue. Set to 1 the first + * time this is called. Set to 0 when the queue is canceled. + * If this is 1, then you cannot queue buffers from a request. + * @uses_requests: requests are used for this queue. Set to 1 the first time + * a request is queued. Set to 0 when the queue is canceled. + * If this is 1, then you cannot queue buffers directly. + * @allow_cache_hints: when set user-space can pass cache management hints in + * order to skip cache flush/invalidation on ->prepare() or/and + * ->finish(). + * @non_coherent_mem: when set queue will attempt to allocate buffers using + * non-coherent memory. + * @lock: pointer to a mutex that protects the &struct vb2_queue. The + * driver can set this to a mutex to let the v4l2 core serialize + * the queuing ioctls. If the driver wants to handle locking + * itself, then this should be set to NULL. This lock is not used + * by the videobuf2 core API. + * @owner: The filehandle that 'owns' the buffers, i.e. the filehandle + * that called reqbufs, create_buffers or started fileio. + * This field is not used by the videobuf2 core API, but it allows + * drivers to easily associate an owner filehandle with the queue. + * @ops: driver-specific callbacks + * @mem_ops: memory allocator specific callbacks + * @buf_ops: callbacks to deliver buffer information. + * between user-space and kernel-space. + * @drv_priv: driver private data. + * @subsystem_flags: Flags specific to the subsystem (V4L2/DVB/etc.). Not used + * by the vb2 core. + * @buf_struct_size: size of the driver-specific buffer structure; + * "0" indicates the driver doesn't want to use a custom buffer + * structure type. In that case a subsystem-specific struct + * will be used (in the case of V4L2 that is + * ``sizeof(struct vb2_v4l2_buffer)``). The first field of the + * driver-specific buffer structure must be the subsystem-specific + * struct (vb2_v4l2_buffer in the case of V4L2). + * @timestamp_flags: Timestamp flags; ``V4L2_BUF_FLAG_TIMESTAMP_*`` and + * ``V4L2_BUF_FLAG_TSTAMP_SRC_*`` + * @gfp_flags: additional gfp flags used when allocating the buffers. + * Typically this is 0, but it may be e.g. %GFP_DMA or %__GFP_DMA32 + * to force the buffer allocation to a specific memory zone. + * @min_queued_buffers: the minimum number of queued buffers needed before + * @start_streaming can be called. Used when a DMA engine + * cannot be started unless at least this number of buffers + * have been queued into the driver. + * VIDIOC_REQBUFS will ensure at least @min_queued_buffers + 1 + * buffers will be allocated. Note that VIDIOC_CREATE_BUFS will not + * modify the requested buffer count. + * @min_reqbufs_allocation: the minimum number of buffers to be allocated when + * calling VIDIOC_REQBUFS. Note that VIDIOC_CREATE_BUFS will *not* + * modify the requested buffer count and does not use this field. + * Drivers can set this if there has to be a certain number of + * buffers available for the hardware to work effectively. + * This allows calling VIDIOC_REQBUFS with a buffer count of 1 and + * it will be automatically adjusted to a workable buffer count. + * If set, then @min_reqbufs_allocation must be larger than + * @min_queued_buffers + 1. + * If this field is > 3, then it is highly recommended that the + * driver implements the V4L2_CID_MIN_BUFFERS_FOR_CAPTURE/OUTPUT + * control. + * @alloc_devs: &struct device memory type/allocator-specific per-plane device + */ +/* + * Private elements (won't appear at the uAPI book): + * @mmap_lock: private mutex used when buffers are allocated/freed/mmapped + * @memory: current memory type used + * @dma_dir: DMA mapping direction. + * @bufs: videobuf2 buffer structures. If it is non-NULL then + * bufs_bitmap is also non-NULL. + * @bufs_bitmap: bitmap tracking whether each bufs[] entry is used + * @max_num_buffers: upper limit of number of allocated/used buffers. + * If set to 0 v4l2 core will change it VB2_MAX_FRAME + * for backward compatibility. + * @queued_list: list of buffers currently queued from userspace + * @queued_count: number of buffers queued and ready for streaming. + * @owned_by_drv_count: number of buffers owned by the driver + * @done_list: list of buffers ready to be dequeued to userspace + * @done_lock: lock to protect done_list list + * @done_wq: waitqueue for processes waiting for buffers ready to be dequeued + * @streaming: current streaming state + * @start_streaming_called: @start_streaming was called successfully and we + * started streaming. + * @error: a fatal error occurred on the queue + * @waiting_for_buffers: used in poll() to check if vb2 is still waiting for + * buffers. Only set for capture queues if qbuf has not yet been + * called since poll() needs to return %EPOLLERR in that situation. + * @waiting_in_dqbuf: set by the core for the duration of a blocking DQBUF, when + * it has to wait for a buffer to become available with vb2_queue->lock + * released. Used to prevent destroying the queue by other threads. + * @is_multiplanar: set if buffer type is multiplanar + * @is_output: set if buffer type is output + * @is_busy: set if at least one buffer has been allocated at some time. + * @copy_timestamp: set if vb2-core should set timestamps + * @last_buffer_dequeued: used in poll() and DQBUF to immediately return if the + * last decoded buffer was already dequeued. Set for capture queues + * when a buffer with the %V4L2_BUF_FLAG_LAST is dequeued. + * @fileio: file io emulator internal data, used only if emulator is active + * @threadio: thread io internal data, used only if thread is active + * @name: queue name, used for logging purpose. Initialized automatically + * if left empty by drivers. + */ +struct vb2_queue { + unsigned int type; + unsigned int io_modes; + struct device *dev; + unsigned long dma_attrs; + unsigned int bidirectional:1; + unsigned int fileio_read_once:1; + unsigned int fileio_write_immediately:1; + unsigned int allow_zero_bytesused:1; + unsigned int quirk_poll_must_check_waiting_for_buffers:1; + unsigned int supports_requests:1; + unsigned int requires_requests:1; + unsigned int uses_qbuf:1; + unsigned int uses_requests:1; + unsigned int allow_cache_hints:1; + unsigned int non_coherent_mem:1; + + struct mutex *lock; + void *owner; + + const struct vb2_ops *ops; + const struct vb2_mem_ops *mem_ops; + const struct vb2_buf_ops *buf_ops; + + void *drv_priv; + u32 subsystem_flags; + unsigned int buf_struct_size; + u32 timestamp_flags; + gfp_t gfp_flags; + u32 min_queued_buffers; + u32 min_reqbufs_allocation; + + struct device *alloc_devs[VB2_MAX_PLANES]; + + /* private: internal use only */ + struct mutex mmap_lock; + unsigned int memory; + enum dma_data_direction dma_dir; + struct vb2_buffer **bufs; + unsigned long *bufs_bitmap; + unsigned int max_num_buffers; + + struct list_head queued_list; + unsigned int queued_count; + + atomic_t owned_by_drv_count; + struct list_head done_list; + spinlock_t done_lock; + wait_queue_head_t done_wq; + + unsigned int streaming:1; + unsigned int start_streaming_called:1; + unsigned int error:1; + unsigned int waiting_for_buffers:1; + unsigned int waiting_in_dqbuf:1; + unsigned int is_multiplanar:1; + unsigned int is_output:1; + unsigned int is_busy:1; + unsigned int copy_timestamp:1; + unsigned int last_buffer_dequeued:1; + + struct vb2_fileio_data *fileio; + struct vb2_threadio_data *threadio; + + char name[32]; + +#ifdef CONFIG_VIDEO_ADV_DEBUG + /* + * Counters for how often these queue-related ops are + * called. Used to check for unbalanced ops. + */ + u32 cnt_queue_setup; + u32 cnt_wait_prepare; + u32 cnt_wait_finish; + u32 cnt_prepare_streaming; + u32 cnt_start_streaming; + u32 cnt_stop_streaming; + u32 cnt_unprepare_streaming; +#endif +}; + +/** + * vb2_queue_allows_cache_hints() - Return true if the queue allows cache + * and memory consistency hints. + * + * @q: pointer to &struct vb2_queue with videobuf2 queue + */ +static inline bool vb2_queue_allows_cache_hints(struct vb2_queue *q) +{ + return q->allow_cache_hints && q->memory == VB2_MEMORY_MMAP; +} + +/** + * vb2_plane_vaddr() - Return a kernel virtual address of a given plane. + * @vb: pointer to &struct vb2_buffer to which the plane in + * question belongs to. + * @plane_no: plane number for which the address is to be returned. + * + * This function returns a kernel virtual address of a given plane if + * such a mapping exist, NULL otherwise. + */ +void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no); + +/** + * vb2_plane_cookie() - Return allocator specific cookie for the given plane. + * @vb: pointer to &struct vb2_buffer to which the plane in + * question belongs to. + * @plane_no: plane number for which the cookie is to be returned. + * + * This function returns an allocator specific cookie for a given plane if + * available, NULL otherwise. The allocator should provide some simple static + * inline function, which would convert this cookie to the allocator specific + * type that can be used directly by the driver to access the buffer. This can + * be for example physical address, pointer to scatter list or IOMMU mapping. + */ +void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no); + +/** + * vb2_buffer_done() - inform videobuf2 that an operation on a buffer + * is finished. + * @vb: pointer to &struct vb2_buffer to be used. + * @state: state of the buffer, as defined by &enum vb2_buffer_state. + * Either %VB2_BUF_STATE_DONE if the operation finished + * successfully, %VB2_BUF_STATE_ERROR if the operation finished + * with an error or %VB2_BUF_STATE_QUEUED. + * + * This function should be called by the driver after a hardware operation on + * a buffer is finished and the buffer may be returned to userspace. The driver + * cannot use this buffer anymore until it is queued back to it by videobuf + * by the means of &vb2_ops->buf_queue callback. Only buffers previously queued + * to the driver by &vb2_ops->buf_queue can be passed to this function. + * + * While streaming a buffer can only be returned in state DONE or ERROR. + * The &vb2_ops->start_streaming op can also return them in case the DMA engine + * cannot be started for some reason. In that case the buffers should be + * returned with state QUEUED to put them back into the queue. + */ +void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state); + +/** + * vb2_discard_done() - discard all buffers marked as DONE. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * + * This function is intended to be used with suspend/resume operations. It + * discards all 'done' buffers as they would be too old to be requested after + * resume. + * + * Drivers must stop the hardware and synchronize with interrupt handlers and/or + * delayed works before calling this function to make sure no buffer will be + * touched by the driver and/or hardware. + */ +void vb2_discard_done(struct vb2_queue *q); + +/** + * vb2_wait_for_all_buffers() - wait until all buffers are given back to vb2. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * + * This function will wait until all buffers that have been given to the driver + * by &vb2_ops->buf_queue are given back to vb2 with vb2_buffer_done(). It + * doesn't call &vb2_ops->wait_prepare/&vb2_ops->wait_finish pair. + * It is intended to be called with all locks taken, for example from + * &vb2_ops->stop_streaming callback. + */ +int vb2_wait_for_all_buffers(struct vb2_queue *q); + +/** + * vb2_core_querybuf() - query video buffer information. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @vb: pointer to struct &vb2_buffer. + * @pb: buffer struct passed from userspace. + * + * Videobuf2 core helper to implement VIDIOC_QUERYBUF() operation. It is called + * internally by VB2 by an API-specific handler, like ``videobuf2-v4l2.h``. + * + * The passed buffer should have been verified. + * + * This function fills the relevant information for the userspace. + * + * Return: returns zero on success; an error code otherwise. + */ +void vb2_core_querybuf(struct vb2_queue *q, struct vb2_buffer *vb, void *pb); + +/** + * vb2_core_reqbufs() - Initiate streaming. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @memory: memory type, as defined by &enum vb2_memory. + * @flags: auxiliary queue/buffer management flags. Currently, the only + * used flag is %V4L2_MEMORY_FLAG_NON_COHERENT. + * @count: requested buffer count. + * + * Videobuf2 core helper to implement VIDIOC_REQBUF() operation. It is called + * internally by VB2 by an API-specific handler, like ``videobuf2-v4l2.h``. + * + * This function: + * + * #) verifies streaming parameters passed from the userspace; + * #) sets up the queue; + * #) negotiates number of buffers and planes per buffer with the driver + * to be used during streaming; + * #) allocates internal buffer structures (&struct vb2_buffer), according to + * the agreed parameters; + * #) for MMAP memory type, allocates actual video memory, using the + * memory handling/allocation routines provided during queue initialization. + * + * If req->count is 0, all the memory will be freed instead. + * + * If the queue has been allocated previously by a previous vb2_core_reqbufs() + * call and the queue is not busy, memory will be reallocated. + * + * Return: returns zero on success; an error code otherwise. + */ +int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, + unsigned int flags, unsigned int *count); + +/** + * vb2_core_create_bufs() - Allocate buffers and any required auxiliary structs + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @memory: memory type, as defined by &enum vb2_memory. + * @flags: auxiliary queue/buffer management flags. + * @count: requested buffer count. + * @requested_planes: number of planes requested. + * @requested_sizes: array with the size of the planes. + * @first_index: index of the first created buffer, all allocated buffers have + * indices in the range [first_index..first_index+count-1] + * + * Videobuf2 core helper to implement VIDIOC_CREATE_BUFS() operation. It is + * called internally by VB2 by an API-specific handler, like + * ``videobuf2-v4l2.h``. + * + * This function: + * + * #) verifies parameter sanity; + * #) calls the &vb2_ops->queue_setup queue operation; + * #) performs any necessary memory allocations. + * + * Return: returns zero on success; an error code otherwise. + */ +int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, + unsigned int flags, unsigned int *count, + unsigned int requested_planes, + const unsigned int requested_sizes[], + unsigned int *first_index); + +/** + * vb2_core_prepare_buf() - Pass ownership of a buffer from userspace + * to the kernel. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @vb: pointer to struct &vb2_buffer. + * @pb: buffer structure passed from userspace to + * &v4l2_ioctl_ops->vidioc_prepare_buf handler in driver. + * + * Videobuf2 core helper to implement VIDIOC_PREPARE_BUF() operation. It is + * called internally by VB2 by an API-specific handler, like + * ``videobuf2-v4l2.h``. + * + * The passed buffer should have been verified. + * + * This function calls vb2_ops->buf_prepare callback in the driver + * (if provided), in which driver-specific buffer initialization can + * be performed. + * + * Return: returns zero on success; an error code otherwise. + */ +int vb2_core_prepare_buf(struct vb2_queue *q, struct vb2_buffer *vb, void *pb); + +/** + * vb2_core_remove_bufs() - + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @start: first index of the range of buffers to remove. + * @count: number of buffers to remove. + * + * Return: returns zero on success; an error code otherwise. + */ +int vb2_core_remove_bufs(struct vb2_queue *q, unsigned int start, unsigned int count); + +/** + * vb2_core_qbuf() - Queue a buffer from userspace + * + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @vb: pointer to struct &vb2_buffer. + * @pb: buffer structure passed from userspace to + * v4l2_ioctl_ops->vidioc_qbuf handler in driver + * @req: pointer to &struct media_request, may be NULL. + * + * Videobuf2 core helper to implement VIDIOC_QBUF() operation. It is called + * internally by VB2 by an API-specific handler, like ``videobuf2-v4l2.h``. + * + * This function: + * + * #) If @req is non-NULL, then the buffer will be bound to this + * media request and it returns. The buffer will be prepared and + * queued to the driver (i.e. the next two steps) when the request + * itself is queued. + * #) if necessary, calls &vb2_ops->buf_prepare callback in the driver + * (if provided), in which driver-specific buffer initialization can + * be performed; + * #) if streaming is on, queues the buffer in driver by the means of + * &vb2_ops->buf_queue callback for processing. + * + * Return: returns zero on success; an error code otherwise. + */ +int vb2_core_qbuf(struct vb2_queue *q, struct vb2_buffer *vb, void *pb, + struct media_request *req); + +/** + * vb2_core_dqbuf() - Dequeue a buffer to the userspace + * @q: pointer to &struct vb2_queue with videobuf2 queue + * @pindex: pointer to the buffer index. May be NULL + * @pb: buffer structure passed from userspace to + * v4l2_ioctl_ops->vidioc_dqbuf handler in driver. + * @nonblocking: if true, this call will not sleep waiting for a buffer if no + * buffers ready for dequeuing are present. Normally the driver + * would be passing (file->f_flags & O_NONBLOCK) here. + * + * Videobuf2 core helper to implement VIDIOC_DQBUF() operation. It is called + * internally by VB2 by an API-specific handler, like ``videobuf2-v4l2.h``. + * + * This function: + * + * #) calls buf_finish callback in the driver (if provided), in which + * driver can perform any additional operations that may be required before + * returning the buffer to userspace, such as cache sync, + * #) the buffer struct members are filled with relevant information for + * the userspace. + * + * Return: returns zero on success; an error code otherwise. + */ +int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb, + bool nonblocking); + +/** + * vb2_core_streamon() - Implements VB2 stream ON logic + * + * @q: pointer to &struct vb2_queue with videobuf2 queue + * @type: type of the queue to be started. + * For V4L2, this is defined by &enum v4l2_buf_type type. + * + * Videobuf2 core helper to implement VIDIOC_STREAMON() operation. It is called + * internally by VB2 by an API-specific handler, like ``videobuf2-v4l2.h``. + * + * Return: returns zero on success; an error code otherwise. + */ +int vb2_core_streamon(struct vb2_queue *q, unsigned int type); + +/** + * vb2_core_streamoff() - Implements VB2 stream OFF logic + * + * @q: pointer to &struct vb2_queue with videobuf2 queue + * @type: type of the queue to be started. + * For V4L2, this is defined by &enum v4l2_buf_type type. + * + * Videobuf2 core helper to implement VIDIOC_STREAMOFF() operation. It is + * called internally by VB2 by an API-specific handler, like + * ``videobuf2-v4l2.h``. + * + * Return: returns zero on success; an error code otherwise. + */ +int vb2_core_streamoff(struct vb2_queue *q, unsigned int type); + +/** + * vb2_core_expbuf() - Export a buffer as a file descriptor. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @fd: pointer to the file descriptor associated with DMABUF + * (set by driver). + * @type: buffer type. + * @vb: pointer to struct &vb2_buffer. + * @plane: index of the plane to be exported, 0 for single plane queues + * @flags: file flags for newly created file, as defined at + * include/uapi/asm-generic/fcntl.h. + * Currently, the only used flag is %O_CLOEXEC. + * is supported, refer to manual of open syscall for more details. + * + * + * Videobuf2 core helper to implement VIDIOC_EXPBUF() operation. It is called + * internally by VB2 by an API-specific handler, like ``videobuf2-v4l2.h``. + * + * Return: returns zero on success; an error code otherwise. + */ +int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type, + struct vb2_buffer *vb, unsigned int plane, unsigned int flags); + +/** + * vb2_core_queue_init() - initialize a videobuf2 queue + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * This structure should be allocated in driver + * + * The &vb2_queue structure should be allocated by the driver. The driver is + * responsible of clearing it's content and setting initial values for some + * required entries before calling this function. + * + * .. note:: + * + * The following fields at @q should be set before calling this function: + * &vb2_queue->ops, &vb2_queue->mem_ops, &vb2_queue->type. + */ +int vb2_core_queue_init(struct vb2_queue *q); + +/** + * vb2_core_queue_release() - stop streaming, release the queue and free memory + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * + * This function stops streaming and performs necessary clean ups, including + * freeing video buffer memory. The driver is responsible for freeing + * the &struct vb2_queue itself. + */ +void vb2_core_queue_release(struct vb2_queue *q); + +/** + * vb2_queue_error() - signal a fatal error on the queue + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * + * Flag that a fatal unrecoverable error has occurred and wake up all processes + * waiting on the queue. Polling will now set %EPOLLERR and queuing and dequeuing + * buffers will return %-EIO. + * + * The error flag will be cleared when canceling the queue, either from + * vb2_streamoff() or vb2_queue_release(). Drivers should thus not call this + * function before starting the stream, otherwise the error flag will remain set + * until the queue is released when closing the device node. + */ +void vb2_queue_error(struct vb2_queue *q); + +/** + * vb2_mmap() - map video buffers into application address space. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @vma: pointer to &struct vm_area_struct with the vma passed + * to the mmap file operation handler in the driver. + * + * Should be called from mmap file operation handler of a driver. + * This function maps one plane of one of the available video buffers to + * userspace. To map whole video memory allocated on reqbufs, this function + * has to be called once per each plane per each buffer previously allocated. + * + * When the userspace application calls mmap, it passes to it an offset returned + * to it earlier by the means of &v4l2_ioctl_ops->vidioc_querybuf handler. + * That offset acts as a "cookie", which is then used to identify the plane + * to be mapped. + * + * This function finds a plane with a matching offset and a mapping is performed + * by the means of a provided memory operation. + * + * The return values from this function are intended to be directly returned + * from the mmap handler in driver. + */ +int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma); + +#ifndef CONFIG_MMU +/** + * vb2_get_unmapped_area - map video buffers into application address space. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @addr: memory address. + * @len: buffer size. + * @pgoff: page offset. + * @flags: memory flags. + * + * This function is used in noMMU platforms to propose address mapping + * for a given buffer. It's intended to be used as a handler for the + * &file_operations->get_unmapped_area operation. + * + * This is called by the mmap() syscall routines will call this + * to get a proposed address for the mapping, when ``!CONFIG_MMU``. + */ +unsigned long vb2_get_unmapped_area(struct vb2_queue *q, + unsigned long addr, + unsigned long len, + unsigned long pgoff, + unsigned long flags); +#endif + +/** + * vb2_core_poll() - implements poll syscall() logic. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @file: &struct file argument passed to the poll + * file operation handler. + * @wait: &poll_table wait argument passed to the poll + * file operation handler. + * + * This function implements poll file operation handler for a driver. + * For CAPTURE queues, if a buffer is ready to be dequeued, the userspace will + * be informed that the file descriptor of a video device is available for + * reading. + * For OUTPUT queues, if a buffer is ready to be dequeued, the file descriptor + * will be reported as available for writing. + * + * The return values from this function are intended to be directly returned + * from poll handler in driver. + */ +__poll_t vb2_core_poll(struct vb2_queue *q, struct file *file, + poll_table *wait); + +/** + * vb2_read() - implements read() syscall logic. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @data: pointed to target userspace buffer + * @count: number of bytes to read + * @ppos: file handle position tracking pointer + * @nonblock: mode selector (1 means blocking calls, 0 means nonblocking) + */ +size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count, + loff_t *ppos, int nonblock); +/** + * vb2_write() - implements write() syscall logic. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @data: pointed to target userspace buffer + * @count: number of bytes to write + * @ppos: file handle position tracking pointer + * @nonblock: mode selector (1 means blocking calls, 0 means nonblocking) + */ +size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count, + loff_t *ppos, int nonblock); + +/** + * typedef vb2_thread_fnc - callback function for use with vb2_thread. + * + * @vb: pointer to struct &vb2_buffer. + * @priv: pointer to a private data. + * + * This is called whenever a buffer is dequeued in the thread. + */ +typedef int (*vb2_thread_fnc)(struct vb2_buffer *vb, void *priv); + +/** + * vb2_thread_start() - start a thread for the given queue. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @fnc: &vb2_thread_fnc callback function. + * @priv: priv pointer passed to the callback function. + * @thread_name:the name of the thread. This will be prefixed with "vb2-". + * + * This starts a thread that will queue and dequeue until an error occurs + * or vb2_thread_stop() is called. + * + * .. attention:: + * + * This function should not be used for anything else but the videobuf2-dvb + * support. If you think you have another good use-case for this, then please + * contact the linux-media mailing list first. + */ +int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv, + const char *thread_name); + +/** + * vb2_thread_stop() - stop the thread for the given queue. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + */ +int vb2_thread_stop(struct vb2_queue *q); + +/** + * vb2_is_streaming() - return streaming status of the queue. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + */ +static inline bool vb2_is_streaming(struct vb2_queue *q) +{ + return q->streaming; +} + +/** + * vb2_fileio_is_active() - return true if fileio is active. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * + * This returns true if read() or write() is used to stream the data + * as opposed to stream I/O. This is almost never an important distinction, + * except in rare cases. One such case is that using read() or write() to + * stream a format using %V4L2_FIELD_ALTERNATE is not allowed since there + * is no way you can pass the field information of each buffer to/from + * userspace. A driver that supports this field format should check for + * this in the &vb2_ops->queue_setup op and reject it if this function returns + * true. + */ +static inline bool vb2_fileio_is_active(struct vb2_queue *q) +{ + return q->fileio; +} + +/** + * vb2_get_num_buffers() - get the number of buffer in a queue + * @q: pointer to &struct vb2_queue with videobuf2 queue. + */ +static inline unsigned int vb2_get_num_buffers(struct vb2_queue *q) +{ + if (q->bufs_bitmap) + return bitmap_weight(q->bufs_bitmap, q->max_num_buffers); + + return 0; +} + +/** + * vb2_is_busy() - return busy status of the queue. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * + * This function checks if queue has any buffers allocated. + */ +static inline bool vb2_is_busy(struct vb2_queue *q) +{ + return !!q->is_busy; +} + +/** + * vb2_get_drv_priv() - return driver private data associated with the queue. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + */ +static inline void *vb2_get_drv_priv(struct vb2_queue *q) +{ + return q->drv_priv; +} + +/** + * vb2_set_plane_payload() - set bytesused for the plane @plane_no. + * @vb: pointer to &struct vb2_buffer to which the plane in + * question belongs to. + * @plane_no: plane number for which payload should be set. + * @size: payload in bytes. + */ +static inline void vb2_set_plane_payload(struct vb2_buffer *vb, + unsigned int plane_no, unsigned long size) +{ + /* + * size must never be larger than the buffer length, so + * warn and clamp to the buffer length if that's the case. + */ + if (plane_no < vb->num_planes) { + if (WARN_ON_ONCE(size > vb->planes[plane_no].length)) + size = vb->planes[plane_no].length; + vb->planes[plane_no].bytesused = size; + } +} + +/** + * vb2_get_plane_payload() - get bytesused for the plane plane_no + * @vb: pointer to &struct vb2_buffer to which the plane in + * question belongs to. + * @plane_no: plane number for which payload should be set. + */ +static inline unsigned long vb2_get_plane_payload(struct vb2_buffer *vb, + unsigned int plane_no) +{ + if (plane_no < vb->num_planes) + return vb->planes[plane_no].bytesused; + return 0; +} + +/** + * vb2_plane_size() - return plane size in bytes. + * @vb: pointer to &struct vb2_buffer to which the plane in + * question belongs to. + * @plane_no: plane number for which size should be returned. + */ +static inline unsigned long +vb2_plane_size(struct vb2_buffer *vb, unsigned int plane_no) +{ + if (plane_no < vb->num_planes) + return vb->planes[plane_no].length; + return 0; +} + +/** + * vb2_start_streaming_called() - return streaming status of driver. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + */ +static inline bool vb2_start_streaming_called(struct vb2_queue *q) +{ + return q->start_streaming_called; +} + +/** + * vb2_clear_last_buffer_dequeued() - clear last buffer dequeued flag of queue. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + */ +static inline void vb2_clear_last_buffer_dequeued(struct vb2_queue *q) +{ + q->last_buffer_dequeued = false; +} + +/** + * vb2_get_buffer() - get a buffer from a queue + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @index: buffer index + * + * This function obtains a buffer from a queue, by its index. + * Keep in mind that there is no refcounting involved in this + * operation, so the buffer lifetime should be taken into + * consideration. + */ +static inline struct vb2_buffer *vb2_get_buffer(struct vb2_queue *q, + unsigned int index) +{ + if (!q->bufs) + return NULL; + + if (index >= q->max_num_buffers) + return NULL; + + if (test_bit(index, q->bufs_bitmap)) + return q->bufs[index]; + return NULL; +} + +/* + * The following functions are not part of the vb2 core API, but are useful + * functions for videobuf2-*. + */ + +/** + * vb2_buffer_in_use() - return true if the buffer is in use and + * the queue cannot be freed (by the means of VIDIOC_REQBUFS(0)) call. + * + * @vb: buffer for which plane size should be returned. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + */ +bool vb2_buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb); + +/** + * vb2_verify_memory_type() - Check whether the memory type and buffer type + * passed to a buffer operation are compatible with the queue. + * + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @memory: memory model, as defined by enum &vb2_memory. + * @type: private buffer type whose content is defined by the vb2-core + * caller. For example, for V4L2, it should match + * the types defined on enum &v4l2_buf_type. + */ +int vb2_verify_memory_type(struct vb2_queue *q, + enum vb2_memory memory, unsigned int type); + +/** + * vb2_request_object_is_buffer() - return true if the object is a buffer + * + * @obj: the request object. + */ +bool vb2_request_object_is_buffer(struct media_request_object *obj); + +/** + * vb2_request_buffer_cnt() - return the number of buffers in the request + * + * @req: the request. + */ +unsigned int vb2_request_buffer_cnt(struct media_request *req); + +#endif /* _MEDIA_VIDEOBUF2_CORE_H */ diff --git a/6.12.0/include-overrides/media/videobuf2-dma-contig.h b/6.12.0/include-overrides/media/videobuf2-dma-contig.h new file mode 100644 index 0000000..5be313c --- /dev/null +++ b/6.12.0/include-overrides/media/videobuf2-dma-contig.h @@ -0,0 +1,32 @@ +/* + * videobuf2-dma-contig.h - DMA contig memory allocator for videobuf2 + * + * Copyright (C) 2010 Samsung Electronics + * + * Author: Pawel Osciak + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef _MEDIA_VIDEOBUF2_DMA_CONTIG_H +#define _MEDIA_VIDEOBUF2_DMA_CONTIG_H + +#include +#include + +static inline dma_addr_t +vb2_dma_contig_plane_dma_addr(struct vb2_buffer *vb, unsigned int plane_no) +{ + dma_addr_t *addr = vb2_plane_cookie(vb, plane_no); + + return *addr; +} + +int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size); +static inline void vb2_dma_contig_clear_max_seg_size(struct device *dev) { } + +extern const struct vb2_mem_ops vb2_dma_contig_memops; + +#endif diff --git a/6.12.0/include-overrides/media/videobuf2-dma-sg.h b/6.12.0/include-overrides/media/videobuf2-dma-sg.h new file mode 100644 index 0000000..f28fcb0 --- /dev/null +++ b/6.12.0/include-overrides/media/videobuf2-dma-sg.h @@ -0,0 +1,26 @@ +/* + * videobuf2-dma-sg.h - DMA scatter/gather memory allocator for videobuf2 + * + * Copyright (C) 2010 Samsung Electronics + * + * Author: Andrzej Pietrasiewicz + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef _MEDIA_VIDEOBUF2_DMA_SG_H +#define _MEDIA_VIDEOBUF2_DMA_SG_H + +#include + +static inline struct sg_table *vb2_dma_sg_plane_desc( + struct vb2_buffer *vb, unsigned int plane_no) +{ + return (struct sg_table *)vb2_plane_cookie(vb, plane_no); +} + +extern const struct vb2_mem_ops vb2_dma_sg_memops; + +#endif diff --git a/6.12.0/include-overrides/media/videobuf2-dvb.h b/6.12.0/include-overrides/media/videobuf2-dvb.h new file mode 100644 index 0000000..2d577b9 --- /dev/null +++ b/6.12.0/include-overrides/media/videobuf2-dvb.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _VIDEOBUF2_DVB_H_ +#define _VIDEOBUF2_DVB_H_ + +#include +#include +#include +#include +#include +#include + +/* We don't actually need to include media-device.h here */ +struct media_device; + +/* + * TODO: This header file should be replaced with videobuf2-core.h + * Currently, vb2_thread is not a stuff of videobuf2-core, + * since vb2_thread has many dependencies on videobuf2-v4l2. + */ + +struct vb2_dvb { + /* filling that the job of the driver */ + char *name; + struct dvb_frontend *frontend; + struct vb2_queue dvbq; + + /* vb2-dvb state info */ + struct mutex lock; + int nfeeds; + + /* vb2_dvb_(un)register manages this */ + struct dvb_demux demux; + struct dmxdev dmxdev; + struct dmx_frontend fe_hw; + struct dmx_frontend fe_mem; + struct dvb_net net; +}; + +struct vb2_dvb_frontend { + struct list_head felist; + int id; + struct vb2_dvb dvb; +}; + +struct vb2_dvb_frontends { + struct list_head felist; + struct mutex lock; + struct dvb_adapter adapter; + int active_fe_id; /* Indicates which frontend in the felist is in use */ + int gate; /* Frontend with gate control 0=!MFE,1=fe0,2=fe1 etc */ +}; + +int vb2_dvb_register_bus(struct vb2_dvb_frontends *f, + struct module *module, + void *adapter_priv, + struct device *device, + struct media_device *mdev, + short *adapter_nr, + int mfe_shared); + +void vb2_dvb_unregister_bus(struct vb2_dvb_frontends *f); + +struct vb2_dvb_frontend *vb2_dvb_alloc_frontend(struct vb2_dvb_frontends *f, int id); +void vb2_dvb_dealloc_frontends(struct vb2_dvb_frontends *f); + +struct vb2_dvb_frontend *vb2_dvb_get_frontend(struct vb2_dvb_frontends *f, int id); +int vb2_dvb_find_frontend(struct vb2_dvb_frontends *f, struct dvb_frontend *p); + +#endif /* _VIDEOBUF2_DVB_H_ */ diff --git a/6.12.0/include-overrides/media/videobuf2-memops.h b/6.12.0/include-overrides/media/videobuf2-memops.h new file mode 100644 index 0000000..4b5b84f --- /dev/null +++ b/6.12.0/include-overrides/media/videobuf2-memops.h @@ -0,0 +1,41 @@ +/* + * videobuf2-memops.h - generic memory handling routines for videobuf2 + * + * Copyright (C) 2010 Samsung Electronics + * + * Author: Pawel Osciak + * Marek Szyprowski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef _MEDIA_VIDEOBUF2_MEMOPS_H +#define _MEDIA_VIDEOBUF2_MEMOPS_H + +#include +#include +#include + +/** + * struct vb2_vmarea_handler - common vma refcount tracking handler. + * + * @refcount: pointer to &refcount_t entry in the buffer. + * @put: callback to function that decreases buffer refcount. + * @arg: argument for @put callback. + */ +struct vb2_vmarea_handler { + refcount_t *refcount; + void (*put)(void *arg); + void *arg; +}; + +extern const struct vm_operations_struct vb2_common_vm_ops; + +struct frame_vector *vb2_create_framevec(unsigned long start, + unsigned long length, + bool write); +void vb2_destroy_framevec(struct frame_vector *vec); + +#endif diff --git a/6.12.0/include-overrides/media/videobuf2-v4l2.h b/6.12.0/include-overrides/media/videobuf2-v4l2.h new file mode 100644 index 0000000..77ce823 --- /dev/null +++ b/6.12.0/include-overrides/media/videobuf2-v4l2.h @@ -0,0 +1,392 @@ +/* + * videobuf2-v4l2.h - V4L2 driver helper framework + * + * Copyright (C) 2010 Samsung Electronics + * + * Author: Pawel Osciak + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ +#ifndef _MEDIA_VIDEOBUF2_V4L2_H +#define _MEDIA_VIDEOBUF2_V4L2_H + +#include +#include + +#if VB2_MAX_FRAME != VIDEO_MAX_FRAME +#error VB2_MAX_FRAME != VIDEO_MAX_FRAME +#endif + +#if VB2_MAX_PLANES != VIDEO_MAX_PLANES +#error VB2_MAX_PLANES != VIDEO_MAX_PLANES +#endif + +struct video_device; + +/** + * struct vb2_v4l2_buffer - video buffer information for v4l2. + * + * @vb2_buf: embedded struct &vb2_buffer. + * @flags: buffer informational flags. + * @field: field order of the image in the buffer, as defined by + * &enum v4l2_field. + * @timecode: frame timecode. + * @sequence: sequence count of this frame. + * @request_fd: the request_fd associated with this buffer + * @is_held: if true, then this capture buffer was held + * @planes: plane information (userptr/fd, length, bytesused, data_offset). + * + * Should contain enough information to be able to cover all the fields + * of &struct v4l2_buffer at ``videodev2.h``. + */ +struct vb2_v4l2_buffer { + struct vb2_buffer vb2_buf; + + __u32 flags; + __u32 field; + struct v4l2_timecode timecode; + __u32 sequence; + __s32 request_fd; + bool is_held; + struct vb2_plane planes[VB2_MAX_PLANES]; +}; + +/* VB2 V4L2 flags as set in vb2_queue.subsystem_flags */ +#define VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF (1 << 0) + +/* + * to_vb2_v4l2_buffer() - cast struct vb2_buffer * to struct vb2_v4l2_buffer * + */ +#define to_vb2_v4l2_buffer(vb) \ + container_of(vb, struct vb2_v4l2_buffer, vb2_buf) + +/** + * vb2_find_buffer() - Find a buffer with given timestamp + * + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @timestamp: the timestamp to find. + * + * Returns the buffer with the given @timestamp, or NULL if not found. + */ +struct vb2_buffer *vb2_find_buffer(struct vb2_queue *q, u64 timestamp); + +int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b); + +/** + * vb2_reqbufs() - Wrapper for vb2_core_reqbufs() that also verifies + * the memory and type values. + * + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @req: &struct v4l2_requestbuffers passed from userspace to + * &v4l2_ioctl_ops->vidioc_reqbufs handler in driver. + */ +int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req); + +/** + * vb2_create_bufs() - Wrapper for vb2_core_create_bufs() that also verifies + * the memory and type values. + * + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @create: creation parameters, passed from userspace to + * &v4l2_ioctl_ops->vidioc_create_bufs handler in driver + */ +int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create); + +/** + * vb2_prepare_buf() - Pass ownership of a buffer from userspace to the kernel + * + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @mdev: pointer to &struct media_device, may be NULL. + * @b: buffer structure passed from userspace to + * &v4l2_ioctl_ops->vidioc_prepare_buf handler in driver + * + * Should be called from &v4l2_ioctl_ops->vidioc_prepare_buf ioctl handler + * of a driver. + * + * This function: + * + * #) verifies the passed buffer, + * #) calls &vb2_ops->buf_prepare callback in the driver (if provided), + * in which driver-specific buffer initialization can be performed. + * #) if @b->request_fd is non-zero and @mdev->ops->req_queue is set, + * then bind the prepared buffer to the request. + * + * The return values from this function are intended to be directly returned + * from &v4l2_ioctl_ops->vidioc_prepare_buf handler in driver. + */ +int vb2_prepare_buf(struct vb2_queue *q, struct media_device *mdev, + struct v4l2_buffer *b); + +/** + * vb2_qbuf() - Queue a buffer from userspace + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @mdev: pointer to &struct media_device, may be NULL. + * @b: buffer structure passed from userspace to + * &v4l2_ioctl_ops->vidioc_qbuf handler in driver + * + * Should be called from &v4l2_ioctl_ops->vidioc_qbuf handler of a driver. + * + * This function: + * + * #) verifies the passed buffer; + * #) if @b->request_fd is non-zero and @mdev->ops->req_queue is set, + * then bind the buffer to the request. + * #) if necessary, calls &vb2_ops->buf_prepare callback in the driver + * (if provided), in which driver-specific buffer initialization can + * be performed; + * #) if streaming is on, queues the buffer in driver by the means of + * &vb2_ops->buf_queue callback for processing. + * + * The return values from this function are intended to be directly returned + * from &v4l2_ioctl_ops->vidioc_qbuf handler in driver. + */ +int vb2_qbuf(struct vb2_queue *q, struct media_device *mdev, + struct v4l2_buffer *b); + +/** + * vb2_expbuf() - Export a buffer as a file descriptor + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @eb: export buffer structure passed from userspace to + * &v4l2_ioctl_ops->vidioc_expbuf handler in driver + * + * The return values from this function are intended to be directly returned + * from &v4l2_ioctl_ops->vidioc_expbuf handler in driver. + */ +int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb); + +/** + * vb2_dqbuf() - Dequeue a buffer to the userspace + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @b: buffer structure passed from userspace to + * &v4l2_ioctl_ops->vidioc_dqbuf handler in driver + * @nonblocking: if true, this call will not sleep waiting for a buffer if no + * buffers ready for dequeuing are present. Normally the driver + * would be passing (&file->f_flags & %O_NONBLOCK) here + * + * Should be called from &v4l2_ioctl_ops->vidioc_dqbuf ioctl handler + * of a driver. + * + * This function: + * + * #) verifies the passed buffer; + * #) calls &vb2_ops->buf_finish callback in the driver (if provided), in which + * driver can perform any additional operations that may be required before + * returning the buffer to userspace, such as cache sync; + * #) the buffer struct members are filled with relevant information for + * the userspace. + * + * The return values from this function are intended to be directly returned + * from &v4l2_ioctl_ops->vidioc_dqbuf handler in driver. + */ +int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking); + +/** + * vb2_streamon - start streaming + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @type: type argument passed from userspace to vidioc_streamon handler, + * as defined by &enum v4l2_buf_type. + * + * Should be called from &v4l2_ioctl_ops->vidioc_streamon handler of a driver. + * + * This function: + * + * 1) verifies current state + * 2) passes any previously queued buffers to the driver and starts streaming + * + * The return values from this function are intended to be directly returned + * from &v4l2_ioctl_ops->vidioc_streamon handler in the driver. + */ +int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type); + +/** + * vb2_streamoff - stop streaming + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @type: type argument passed from userspace to vidioc_streamoff handler + * + * Should be called from vidioc_streamoff handler of a driver. + * + * This function: + * + * #) verifies current state, + * #) stop streaming and dequeues any queued buffers, including those previously + * passed to the driver (after waiting for the driver to finish). + * + * This call can be used for pausing playback. + * The return values from this function are intended to be directly returned + * from vidioc_streamoff handler in the driver + */ +int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type); + +/** + * vb2_queue_init() - initialize a videobuf2 queue + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * + * The vb2_queue structure should be allocated by the driver. The driver is + * responsible of clearing it's content and setting initial values for some + * required entries before calling this function. + * q->ops, q->mem_ops, q->type and q->io_modes are mandatory. Please refer + * to the struct vb2_queue description in include/media/videobuf2-core.h + * for more information. + */ +int __must_check vb2_queue_init(struct vb2_queue *q); + +/** + * vb2_queue_init_name() - initialize a videobuf2 queue with a name + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @name: the queue name + * + * This function initializes the vb2_queue exactly like vb2_queue_init(), + * and additionally sets the queue name. The queue name is used for logging + * purpose, and should uniquely identify the queue within the context of the + * device it belongs to. This is useful to attribute kernel log messages to the + * right queue for m2m devices or other devices that handle multiple queues. + */ +int __must_check vb2_queue_init_name(struct vb2_queue *q, const char *name); + +/** + * vb2_queue_release() - stop streaming, release the queue and free memory + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * + * This function stops streaming and performs necessary clean ups, including + * freeing video buffer memory. The driver is responsible for freeing + * the vb2_queue structure itself. + */ +void vb2_queue_release(struct vb2_queue *q); + +/** + * vb2_queue_change_type() - change the type of an inactive vb2_queue + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @type: the type to change to (V4L2_BUF_TYPE_VIDEO_*) + * + * This function changes the type of the vb2_queue. This is only possible + * if the queue is not busy (i.e. no buffers have been allocated). + * + * vb2_queue_change_type() can be used to support multiple buffer types using + * the same queue. The driver can implement v4l2_ioctl_ops.vidioc_reqbufs and + * v4l2_ioctl_ops.vidioc_create_bufs functions and call vb2_queue_change_type() + * before calling vb2_ioctl_reqbufs() or vb2_ioctl_create_bufs(), and thus + * "lock" the buffer type until the buffers have been released. + */ +int vb2_queue_change_type(struct vb2_queue *q, unsigned int type); + +/** + * vb2_poll() - implements poll userspace operation + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @file: file argument passed to the poll file operation handler + * @wait: wait argument passed to the poll file operation handler + * + * This function implements poll file operation handler for a driver. + * For CAPTURE queues, if a buffer is ready to be dequeued, the userspace will + * be informed that the file descriptor of a video device is available for + * reading. + * For OUTPUT queues, if a buffer is ready to be dequeued, the file descriptor + * will be reported as available for writing. + * + * If the driver uses struct v4l2_fh, then vb2_poll() will also check for any + * pending events. + * + * The return values from this function are intended to be directly returned + * from poll handler in driver. + */ +__poll_t vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait); + +/* + * The following functions are not part of the vb2 core API, but are simple + * helper functions that you can use in your struct v4l2_file_operations, + * struct v4l2_ioctl_ops and struct vb2_ops. They will serialize if vb2_queue->lock + * or video_device->lock is set, and they will set and test the queue owner + * (vb2_queue->owner) to check if the calling filehandle is permitted to do the + * queuing operation. + */ + +/** + * vb2_queue_is_busy() - check if the queue is busy + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @file: file through which the vb2 queue access is performed + * + * The queue is considered busy if it has an owner and the owner is not the + * @file. + * + * Queue ownership is acquired and checked by some of the v4l2_ioctl_ops helpers + * below. Drivers can also use this function directly when they need to + * open-code ioctl handlers, for instance to add additional checks between the + * queue ownership test and the call to the corresponding vb2 operation. + */ +static inline bool vb2_queue_is_busy(struct vb2_queue *q, struct file *file) +{ + return q->owner && q->owner != file->private_data; +} + +/* struct v4l2_ioctl_ops helpers */ + +int vb2_ioctl_reqbufs(struct file *file, void *priv, + struct v4l2_requestbuffers *p); +int vb2_ioctl_create_bufs(struct file *file, void *priv, + struct v4l2_create_buffers *p); +int vb2_ioctl_prepare_buf(struct file *file, void *priv, + struct v4l2_buffer *p); +int vb2_ioctl_querybuf(struct file *file, void *priv, struct v4l2_buffer *p); +int vb2_ioctl_qbuf(struct file *file, void *priv, struct v4l2_buffer *p); +int vb2_ioctl_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p); +int vb2_ioctl_streamon(struct file *file, void *priv, enum v4l2_buf_type i); +int vb2_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type i); +int vb2_ioctl_expbuf(struct file *file, void *priv, + struct v4l2_exportbuffer *p); +int vb2_ioctl_remove_bufs(struct file *file, void *priv, + struct v4l2_remove_buffers *p); + +/* struct v4l2_file_operations helpers */ + +int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma); +int vb2_fop_release(struct file *file); +int _vb2_fop_release(struct file *file, struct mutex *lock); +ssize_t vb2_fop_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos); +ssize_t vb2_fop_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos); +__poll_t vb2_fop_poll(struct file *file, poll_table *wait); +#ifndef CONFIG_MMU +unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags); +#endif + +/** + * vb2_video_unregister_device - unregister the video device and release queue + * + * @vdev: pointer to &struct video_device + * + * If the driver uses vb2_fop_release()/_vb2_fop_release(), then it should use + * vb2_video_unregister_device() instead of video_unregister_device(). + * + * This function will call video_unregister_device() and then release the + * vb2_queue if streaming is in progress. This will stop streaming and + * this will simplify the unbind sequence since after this call all subdevs + * will have stopped streaming as well. + */ +void vb2_video_unregister_device(struct video_device *vdev); + +/** + * vb2_ops_wait_prepare - helper function to lock a struct &vb2_queue + * + * @vq: pointer to &struct vb2_queue + * + * ..note:: only use if vq->lock is non-NULL. + */ +void vb2_ops_wait_prepare(struct vb2_queue *vq); + +/** + * vb2_ops_wait_finish - helper function to unlock a struct &vb2_queue + * + * @vq: pointer to &struct vb2_queue + * + * ..note:: only use if vq->lock is non-NULL. + */ +void vb2_ops_wait_finish(struct vb2_queue *vq); + +struct media_request; +int vb2_request_validate(struct media_request *req); +void vb2_request_queue(struct media_request *req); + +#endif /* _MEDIA_VIDEOBUF2_V4L2_H */ diff --git a/6.12.0/include-overrides/media/videobuf2-vmalloc.h b/6.12.0/include-overrides/media/videobuf2-vmalloc.h new file mode 100644 index 0000000..a63fe66 --- /dev/null +++ b/6.12.0/include-overrides/media/videobuf2-vmalloc.h @@ -0,0 +1,20 @@ +/* + * videobuf2-vmalloc.h - vmalloc memory allocator for videobuf2 + * + * Copyright (C) 2010 Samsung Electronics + * + * Author: Pawel Osciak + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef _MEDIA_VIDEOBUF2_VMALLOC_H +#define _MEDIA_VIDEOBUF2_VMALLOC_H + +#include + +extern const struct vb2_mem_ops vb2_vmalloc_memops; + +#endif diff --git a/6.12.0/include-overrides/media/vsp1.h b/6.12.0/include-overrides/media/vsp1.h new file mode 100644 index 0000000..48f4a50 --- /dev/null +++ b/6.12.0/include-overrides/media/vsp1.h @@ -0,0 +1,120 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * vsp1.h -- R-Car VSP1 API + * + * Copyright (C) 2015 Renesas Electronics Corporation + * + * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) + */ +#ifndef __MEDIA_VSP1_H__ +#define __MEDIA_VSP1_H__ + +#include +#include +#include + +struct device; + +int vsp1_du_init(struct device *dev); + +#define VSP1_DU_STATUS_COMPLETE BIT(0) +#define VSP1_DU_STATUS_WRITEBACK BIT(1) + +/** + * struct vsp1_du_lif_config - VSP LIF configuration + * @width: output frame width + * @height: output frame height + * @interlaced: true for interlaced pipelines + * @callback: frame completion callback function (optional). When a callback + * is provided, the VSP driver guarantees that it will be called once + * and only once for each vsp1_du_atomic_flush() call. + * @callback_data: data to be passed to the frame completion callback + */ +struct vsp1_du_lif_config { + unsigned int width; + unsigned int height; + bool interlaced; + + void (*callback)(void *data, unsigned int status, u32 crc); + void *callback_data; +}; + +int vsp1_du_setup_lif(struct device *dev, unsigned int pipe_index, + const struct vsp1_du_lif_config *cfg); + +/** + * struct vsp1_du_atomic_config - VSP atomic configuration parameters + * @pixelformat: plane pixel format (V4L2 4CC) + * @pitch: line pitch in bytes for the first plane + * @mem: DMA memory address for each plane of the frame buffer + * @src: source rectangle in the frame buffer (integer coordinates) + * @dst: destination rectangle on the display (integer coordinates) + * @alpha: alpha value (0: fully transparent, 255: fully opaque) + * @zpos: Z position of the plane (from 0 to number of planes minus 1) + * @premult: true for premultiplied alpha + */ +struct vsp1_du_atomic_config { + u32 pixelformat; + unsigned int pitch; + dma_addr_t mem[3]; + struct v4l2_rect src; + struct v4l2_rect dst; + unsigned int alpha; + unsigned int zpos; + bool premult; +}; + +/** + * enum vsp1_du_crc_source - Source used for CRC calculation + * @VSP1_DU_CRC_NONE: CRC calculation disabled + * @VSP1_DU_CRC_PLANE: Perform CRC calculation on an input plane + * @VSP1_DU_CRC_OUTPUT: Perform CRC calculation on the composed output + */ +enum vsp1_du_crc_source { + VSP1_DU_CRC_NONE, + VSP1_DU_CRC_PLANE, + VSP1_DU_CRC_OUTPUT, +}; + +/** + * struct vsp1_du_crc_config - VSP CRC computation configuration parameters + * @source: source for CRC calculation + * @index: index of the CRC source plane (when source is set to plane) + */ +struct vsp1_du_crc_config { + enum vsp1_du_crc_source source; + unsigned int index; +}; + +/** + * struct vsp1_du_writeback_config - VSP writeback configuration parameters + * @pixelformat: plane pixel format (V4L2 4CC) + * @pitch: line pitch in bytes for the first plane + * @mem: DMA memory address for each plane of the frame buffer + */ +struct vsp1_du_writeback_config { + u32 pixelformat; + unsigned int pitch; + dma_addr_t mem[3]; +}; + +/** + * struct vsp1_du_atomic_pipe_config - VSP atomic pipe configuration parameters + * @crc: CRC computation configuration + * @writeback: writeback configuration + */ +struct vsp1_du_atomic_pipe_config { + struct vsp1_du_crc_config crc; + struct vsp1_du_writeback_config writeback; +}; + +void vsp1_du_atomic_begin(struct device *dev, unsigned int pipe_index); +int vsp1_du_atomic_update(struct device *dev, unsigned int pipe_index, + unsigned int rpf, + const struct vsp1_du_atomic_config *cfg); +void vsp1_du_atomic_flush(struct device *dev, unsigned int pipe_index, + const struct vsp1_du_atomic_pipe_config *cfg); +int vsp1_du_map_sg(struct device *dev, struct sg_table *sgt); +void vsp1_du_unmap_sg(struct device *dev, struct sg_table *sgt); + +#endif /* __MEDIA_VSP1_H__ */ diff --git a/dkms.conf b/dkms.conf index 67e67f8..e0101f7 100644 --- a/dkms.conf +++ b/dkms.conf @@ -1,6 +1,6 @@ PACKAGE_NAME="intel-mipi-gmsl-dkms" PACKAGE_VERSION="#MODULE_VERSION#" -BUILD_EXCLUSIVE_KERNEL="^(6\.(1[7])\.)" +BUILD_EXCLUSIVE_KERNEL="^(6\.(1[2])\.)" VERSION_SUFFIX="${PACKAGE_VERSION}" KBASE=${dkms_tree}/${PACKAGE_NAME}/${PACKAGE_VERSION}/build @@ -21,10 +21,10 @@ MAKE="make V=1 KERNELRELEASE=$kernelver DRIVER_VERSION_SUFFIX='${VERSION_SUFFIX} make V=1 -C ${kernel_source_dir} M=${KBASE}/${SELECTED_KERNEL}/drivers/media/v4l2-core videodev.ko DRIVER_VERSION_SUFFIX='${VERSION_SUFFIX}' &&\ mkdir -p ${KBASE}/drivers/media/v4l2-core/ && cp ${KBASE}/${SELECTED_KERNEL}/drivers/media/v4l2-core/videodev.ko ${KBASE}/drivers/media/v4l2-core/ && \ if [ -d ${KBASE}/drivers/media/pci/intel/ipu6 ]; then rmdir ${KBASE}/drivers/media/pci/intel/ipu6 ; fi &&\ - make V=1 -C ${kernel_source_dir} M=${KBASE}/${SELECTED_KERNEL}/drivers/media/pci/intel/ipu6 intel-ipu6-isys.ko intel-ipu6.ko DRIVER_VERSION_SUFFIX='${VERSION_SUFFIX}' && \ - mkdir -p ${KBASE}/drivers/media/pci/intel/ipu6/ && cp ${KBASE}/${SELECTED_KERNEL}/drivers/media/pci/intel/ipu6/intel-ipu6.ko ${KBASE}/drivers/media/pci/intel/ipu6/ && cp ${KBASE}/${SELECTED_KERNEL}/drivers/media/pci/intel/ipu6/intel-ipu6-isys.ko ${KBASE}/drivers/media/pci/intel/ipu6/" + make V=1 -C ${kernel_source_dir} M=${KBASE}/${SELECTED_KERNEL}/drivers/media/pci/intel/ipu6 DRIVER_VERSION_SUFFIX='${VERSION_SUFFIX}' && \ + mkdir -p ${KBASE}/drivers/media/pci/intel/ipu6/ && cp ${KBASE}/${SELECTED_KERNEL}/drivers/media/pci/intel/ipu6/intel-ipu6.ko ${KBASE}/${SELECTED_KERNEL}/drivers/media/pci/intel/ipu6/intel-ipu6-isys.ko ${KBASE}/${SELECTED_KERNEL}/drivers/media/pci/intel/ipu6/psys/intel-ipu6-psys.ko ${KBASE}/drivers/media/pci/intel/ipu6/" -MAKE_MATCH[1]="^(6.1[7])" +MAKE_MATCH[1]="^(6.1[2])" CLEAN="make V=1 KERNELRELEASE=$kernelver KERNEL_SRC=$kernel_source_dir clean && \ make -C ${kernel_source_dir} M=${KBASE}/${SELECTED_KERNEL}/drivers/media/v4l2-core clean clean && \ @@ -32,13 +32,62 @@ CLEAN="make V=1 KERNELRELEASE=$kernelver KERNEL_SRC=$kernel_source_dir clean && AUTOINSTALL="yes" -#Patches for Kernel 6.17 -PATCH[0]="0001-ipu7-dkms-v4l2-core-makefile-adaptation-6.17.patch" -PATCH[1]="0002-drivers-media-set-v4l2_subdev_enable_streams_api-tru.patch" -PATCH[2]="0003-ipu6-dkms-add-isys-makefile-adaptation-6.17.patch" -PATCH_MATCH[0]="^(6.1[7])" -PATCH_MATCH[1]="^(6.1[7])" -PATCH_MATCH[2]="^(6.1[7])" +# Add non-upstreamed ISYS patches for Kernel 6.12 +PATCH[0]="0001-media-intel-ipu6-Fix-dma-mask-for-non-secure-mode.patch" +PATCH[1]="0002-media-ipu6-Remove-workaround-for-Meteor-Lake-ES2.patch" +PATCH[2]="0003-media-intel-ipu6-remove-buttress-ish-structure.patch" +PATCH[3]="0004-upstream-Use-module-parameter-to-set-isys-freq.patch" +PATCH[4]="0005-media-pci-Enable-ISYS-reset.patch" +PATCH[5]="0006-media-intel-ipu6-remove-buttress-ish-structure.patch" +PATCH[6]="0007-media-intel-ipu6-use-vc1-dma-for-MTL-and-ARL.patch" +PATCH[7]="0008-media-ipu-Dma-sync-at-buffer_prepare-callback-as-DMA.patch" +PATCH[8]="0009-media-intel-ipu6-support-IPU6-ISYS-FW-trace-dump-for.patch" +PATCH[9]="0010-media-pci-The-order-of-return-buffers-should-be-FIFO.patch" +PATCH[10]="0011-media-pci-Modify-enble-disable-stream-in-CSI2.patch" +PATCH[11]="0012-media-pci-Set-the-correct-SOF-for-different-stream.patch" +PATCH[12]="0013-media-intel-ipu6-support-imx390-for-6.11.0-rc3.patch" +PATCH[13]="0014-media-intel-ipu6-fix-coverity-issue.patch" +PATCH[14]="0015-media-intel-ipu6-move-ipu-acpi-module-to-linux-drive.patch" +PATCH[15]="0016-media-pci-unregister-i2c-device-to-complete-ext_subd.patch" +PATCH[16]="0017-media-pci-add-missing-if-for-PDATA.patch" +PATCH[17]="0018-media-pci-refine-PDATA-related-config.patch" +PATCH[18]="0019-media-intel-ipu6-align-ACPI-PDATA-and-ACPI-fwnode-bu.patch" +PATCH_MATCH[0]="^(6.1[2])" +PATCH_MATCH[1]="^(6.1[2])" +PATCH_MATCH[2]="^(6.1[2])" +PATCH_MATCH[3]="^(6.1[2])" +PATCH_MATCH[4]="^(6.1[2])" +PATCH_MATCH[5]="^(6.1[2])" +PATCH_MATCH[6]="^(6.1[2])" +PATCH_MATCH[7]="^(6.1[2])" +PATCH_MATCH[8]="^(6.1[2])" +PATCH_MATCH[9]="^(6.1[2])" +PATCH_MATCH[10]="^(6.1[2])" +PATCH_MATCH[11]="^(6.1[2])" +PATCH_MATCH[12]="^(6.1[2])" +PATCH_MATCH[13]="^(6.1[2])" +PATCH_MATCH[14]="^(6.1[2])" +PATCH_MATCH[15]="^(6.1[2])" +PATCH_MATCH[16]="^(6.1[2])" +PATCH_MATCH[17]="^(6.1[2])" +PATCH_MATCH[18]="^(6.1[2])" +# Add out-of-tree PSYS codebase +PATCH[19]="0020-upstream-Use-module-parameter-to-set-psys-freq.patch" +PATCH[20]="0021-media-pci-update-IPU6-PSYS-driver.patch" +PATCH[21]="0022-media-intel-ipu6-support-IPU6-PSYS-FW-trace-dump-for.patch" +PATCH_MATCH[19]="^(6.1[2])" +PATCH_MATCH[20]="^(6.1[2])" +PATCH_MATCH[21]="^(6.1[2])" +# Make IPU V4L2-CORE out-of-tree +PATCH[22]="0023-ipu7-dkms-v4l2-core-makefile-adaptation-6.12.patch" +PATCH[23]="0024-drivers-media-set-v4l2_subdev_enable_streams_api-tru.patch" +PATCH_MATCH[22]="^(6.1[2])" +PATCH_MATCH[23]="^(6.1[2])" +# Make IPU ISYS+PSYS out-of-tree +PATCH[24]="0025-ipu6-dkms-add-isys-makefile-adaptation-6.12.patch" +PATCH[25]="0026-ipu6-dkms-align-ipu7-acpi-pdata-device-parser-on-6.1.patch" +PATCH_MATCH[24]="^(6.1[2])" +PATCH_MATCH[25]="^(6.1[2])" i=0 @@ -58,6 +107,11 @@ BUILT_MODULE_LOCATION[$i]="drivers/media/pci/intel/ipu6" DEST_MODULE_LOCATION[$i]="/updates" STRIP[$i]=no +BUILT_MODULE_NAME[$((++i))]="intel-ipu6-psys" +BUILT_MODULE_LOCATION[$i]="drivers/media/pci/intel/ipu6" +DEST_MODULE_LOCATION[$i]="/updates" +STRIP[$i]=no + BUILT_MODULE_NAME[$((++i))]="ipu-acpi" BUILT_MODULE_LOCATION[$i]="drivers/media/platform/intel" DEST_MODULE_LOCATION[$i]="/updates" diff --git a/patches/0001-media-intel-ipu6-Fix-dma-mask-for-non-secure-mode.patch b/patches/0001-media-intel-ipu6-Fix-dma-mask-for-non-secure-mode.patch new file mode 100644 index 0000000..d2b21c4 --- /dev/null +++ b/patches/0001-media-intel-ipu6-Fix-dma-mask-for-non-secure-mode.patch @@ -0,0 +1,49 @@ +From 61bd4d3d1182afbb7e658aef7c1e66d4f0c91205 Mon Sep 17 00:00:00 2001 +From: Stanislaw Gruszka +Date: Mon, 12 Jan 2026 05:06:31 -0700 +Subject: [PATCH 01/28] media: intel/ipu6: Fix dma mask for non-secure mode + +commit 0209916ebe2475079ce6d8dc4114afbc0ccad1c2 upstream. + +We use dma_get_mask() of auxdev device for calculate iova pfn limit. +This is always 32 bit mask as we do not initialize the mask (and we can +not do so, since dev->dev_mask is NULL anyways for auxdev). + +Since we need 31 bit mask for non-secure mode use mmu_info->aperture_end +which is properly initialized to correct mask for both modes. + +Fixes: daabc5c64703 ("media: ipu6: not override the dma_ops of device in driver") +Cc: stable@vger.kernel.org +Signed-off-by: Stanislaw Gruszka +Signed-off-by: Sakari Ailus +Signed-off-by: Hans Verkuil +Signed-off-by: Greg Kroah-Hartman +--- + 6.12.0/drivers/media/pci/intel/ipu6/ipu6-dma.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-dma.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-dma.c +index b71f66b..92d5136 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-dma.c ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-dma.c +@@ -172,7 +172,7 @@ void *ipu6_dma_alloc(struct ipu6_bus_device *sys, size_t size, + count = PHYS_PFN(size); + + iova = alloc_iova(&mmu->dmap->iovad, count, +- PHYS_PFN(dma_get_mask(dev)), 0); ++ PHYS_PFN(mmu->dmap->mmu_info->aperture_end), 0); + if (!iova) + goto out_kfree; + +@@ -398,7 +398,7 @@ int ipu6_dma_map_sg(struct ipu6_bus_device *sys, struct scatterlist *sglist, + nents, npages); + + iova = alloc_iova(&mmu->dmap->iovad, npages, +- PHYS_PFN(dma_get_mask(dev)), 0); ++ PHYS_PFN(mmu->dmap->mmu_info->aperture_end), 0); + if (!iova) + return 0; + +-- +2.43.0 + diff --git a/patches/0002-media-ipu6-Remove-workaround-for-Meteor-Lake-ES2.patch b/patches/0002-media-ipu6-Remove-workaround-for-Meteor-Lake-ES2.patch new file mode 100644 index 0000000..5db8f87 --- /dev/null +++ b/patches/0002-media-ipu6-Remove-workaround-for-Meteor-Lake-ES2.patch @@ -0,0 +1,45 @@ +From 94b81f0eec26ba7ee16abb58f895187f7b1b1cdf Mon Sep 17 00:00:00 2001 +From: Hao Yao +Date: Mon, 12 Jan 2026 05:00:33 -0700 +Subject: [PATCH 02/28] media: ipu6: Remove workaround for Meteor Lake ES2 + +commit d471fb06b21ae54bf76464731ae1dcb26ef1ca68 upstream. + +There was a hardware bug which need IPU6 driver to disable the ATS. This +workaround is not needed anymore as the bug was fixed in hardware level. + +Additionally, Arrow Lake has the same IPU6 PCI ID and x86 stepping but +does not have the bug. Removing the Meteor Lake workaround is also +required for the driver to function on Arrow Lake. + +Signed-off-by: Hao Yao +Reviewed-by: Stanislaw Gruszka +Fixes: 25fedc021985 ("media: intel/ipu6: add Intel IPU6 PCI device driver") +Cc: stable@vger.kernel.org +[Sakari Ailus: Added tags and explanation of what is fixed.] +Signed-off-by: Sakari Ailus +Signed-off-by: Hans Verkuil +Signed-off-by: Greg Kroah-Hartman +--- + 6.12.0/drivers/media/pci/intel/ipu6/ipu6.c | 5 ----- + 1 file changed, 5 deletions(-) + +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c +index 91718ea..1c5c38a 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c +@@ -463,11 +463,6 @@ static int ipu6_pci_config_setup(struct pci_dev *dev, u8 hw_ver) + { + int ret; + +- /* disable IPU6 PCI ATS on mtl ES2 */ +- if (is_ipu6ep_mtl(hw_ver) && boot_cpu_data.x86_stepping == 0x2 && +- pci_ats_supported(dev)) +- pci_disable_ats(dev); +- + /* No PCI msi capability for IPU6EP */ + if (is_ipu6ep(hw_ver) || is_ipu6ep_mtl(hw_ver)) { + /* likely do nothing as msi not enabled by default */ +-- +2.43.0 + diff --git a/patches/0003-media-intel-ipu6-remove-buttress-ish-structure.patch b/patches/0003-media-intel-ipu6-remove-buttress-ish-structure.patch new file mode 100644 index 0000000..c1d5de9 --- /dev/null +++ b/patches/0003-media-intel-ipu6-remove-buttress-ish-structure.patch @@ -0,0 +1,125 @@ +From a611ad3710a6c3d65051b5fc0d84fde15d11c4c1 Mon Sep 17 00:00:00 2001 +From: Stanislaw Gruszka +Date: Mon, 12 Jan 2026 05:25:13 -0700 +Subject: [PATCH 03/28] media: intel/ipu6: remove buttress ish structure + +The buttress ipc ish structure is not effectively used on IPU6 - data +is nullified on init. Remove the ish structure and handing of related +interrupts to cleanup the code. + +Signed-off-by: Stanislaw Gruszka +Signed-off-by: Sakari Ailus +Signed-off-by: Hans Verkuil +--- + .../media/pci/intel/ipu6/ipu6-buttress.c | 29 +++---------------- + 1 file changed, 4 insertions(+), 25 deletions(-) + +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c +index 1ee63ef..ffa6dff 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c +@@ -215,20 +215,17 @@ static void ipu6_buttress_ipc_recv(struct ipu6_device *isp, + } + + static int ipu6_buttress_ipc_send_bulk(struct ipu6_device *isp, +- enum ipu6_buttress_ipc_domain ipc_domain, + struct ipu6_ipc_buttress_bulk_msg *msgs, + u32 size) + { + unsigned long tx_timeout_jiffies, rx_timeout_jiffies; + unsigned int i, retry = BUTTRESS_IPC_CMD_SEND_RETRY; + struct ipu6_buttress *b = &isp->buttress; +- struct ipu6_buttress_ipc *ipc; ++ struct ipu6_buttress_ipc *ipc = &b->cse; + u32 val; + int ret; + int tout; + +- ipc = ipc_domain == IPU6_BUTTRESS_IPC_CSE ? &b->cse : &b->ish; +- + mutex_lock(&b->ipc_mutex); + + ret = ipu6_buttress_ipc_validity_open(isp, ipc); +@@ -306,7 +303,6 @@ out: + + static int + ipu6_buttress_ipc_send(struct ipu6_device *isp, +- enum ipu6_buttress_ipc_domain ipc_domain, + u32 ipc_msg, u32 size, bool require_resp, + u32 expected_resp) + { +@@ -317,7 +313,7 @@ ipu6_buttress_ipc_send(struct ipu6_device *isp, + .expected_resp = expected_resp, + }; + +- return ipu6_buttress_ipc_send_bulk(isp, ipc_domain, &msg, 1); ++ return ipu6_buttress_ipc_send_bulk(isp, &msg, 1); + } + + static irqreturn_t ipu6_buttress_call_isr(struct ipu6_bus_device *adev) +@@ -386,25 +382,12 @@ irqreturn_t ipu6_buttress_isr(int irq, void *isp_ptr) + complete(&b->cse.recv_complete); + } + +- if (irq_status & BUTTRESS_ISR_IPC_FROM_ISH_IS_WAITING) { +- dev_dbg(&isp->pdev->dev, +- "BUTTRESS_ISR_IPC_FROM_ISH_IS_WAITING\n"); +- ipu6_buttress_ipc_recv(isp, &b->ish, &b->ish.recv_data); +- complete(&b->ish.recv_complete); +- } +- + if (irq_status & BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE) { + dev_dbg(&isp->pdev->dev, + "BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE\n"); + complete(&b->cse.send_complete); + } + +- if (irq_status & BUTTRESS_ISR_IPC_EXEC_DONE_BY_ISH) { +- dev_dbg(&isp->pdev->dev, +- "BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE\n"); +- complete(&b->ish.send_complete); +- } +- + if (irq_status & BUTTRESS_ISR_SAI_VIOLATION && + ipu6_buttress_get_secure_mode(isp)) + dev_err(&isp->pdev->dev, +@@ -666,7 +649,7 @@ int ipu6_buttress_authenticate(struct ipu6_device *isp) + */ + dev_info(&isp->pdev->dev, "Sending BOOT_LOAD to CSE\n"); + +- ret = ipu6_buttress_ipc_send(isp, IPU6_BUTTRESS_IPC_CSE, ++ ret = ipu6_buttress_ipc_send(isp, + BUTTRESS_IU2CSEDATA0_IPC_BOOT_LOAD, + 1, true, + BUTTRESS_CSE2IUDATA0_IPC_BOOT_LOAD_DONE); +@@ -708,7 +691,7 @@ int ipu6_buttress_authenticate(struct ipu6_device *isp) + * IU2CSEDB.IU2CSECMD and set IU2CSEDB.IU2CSEBUSY as + */ + dev_info(&isp->pdev->dev, "Sending AUTHENTICATE_RUN to CSE\n"); +- ret = ipu6_buttress_ipc_send(isp, IPU6_BUTTRESS_IPC_CSE, ++ ret = ipu6_buttress_ipc_send(isp, + BUTTRESS_IU2CSEDATA0_IPC_AUTH_RUN, + 1, true, + BUTTRESS_CSE2IUDATA0_IPC_AUTH_RUN_DONE); +@@ -849,9 +832,7 @@ int ipu6_buttress_init(struct ipu6_device *isp) + mutex_init(&b->auth_mutex); + mutex_init(&b->cons_mutex); + mutex_init(&b->ipc_mutex); +- init_completion(&b->ish.send_complete); + init_completion(&b->cse.send_complete); +- init_completion(&b->ish.recv_complete); + init_completion(&b->cse.recv_complete); + + b->cse.nack = BUTTRESS_CSE2IUDATA0_IPC_NACK; +@@ -863,8 +844,6 @@ int ipu6_buttress_init(struct ipu6_device *isp) + b->cse.data0_in = BUTTRESS_REG_CSE2IUDATA0; + b->cse.data0_out = BUTTRESS_REG_IU2CSEDATA0; + +- /* no ISH on IPU6 */ +- memset(&b->ish, 0, sizeof(b->ish)); + INIT_LIST_HEAD(&b->constraints); + + isp->secure_mode = ipu6_buttress_get_secure_mode(isp); +-- +2.43.0 + diff --git a/patches/0004-upstream-Use-module-parameter-to-set-isys-freq.patch b/patches/0004-upstream-Use-module-parameter-to-set-isys-freq.patch new file mode 100644 index 0000000..cc8f7df --- /dev/null +++ b/patches/0004-upstream-Use-module-parameter-to-set-isys-freq.patch @@ -0,0 +1,45 @@ +From a3deded546b8560ada051dd94269c7afa24dd299 Mon Sep 17 00:00:00 2001 +From: Dongcheng Yan +Date: Mon, 12 Jan 2026 05:27:39 -0700 +Subject: [PATCH 04/28] upstream: Use module parameter to set isys freq + +Signed-off-by: Hongju Wang +Signed-off-by: Dongcheng Yan +Signed-off-by: zouxiaoh +--- + 6.12.0/drivers/media/pci/intel/ipu6/ipu6.c | 12 ++++++++++++ + 1 file changed, 12 insertions(+) + +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c +index 1c5c38a..18f0dd2 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c +@@ -33,6 +33,10 @@ + #include "ipu6-platform-isys-csi2-reg.h" + #include "ipu6-platform-regs.h" + ++static unsigned int isys_freq_override; ++module_param(isys_freq_override, uint, 0660); ++MODULE_PARM_DESC(isys_freq_override, "Override ISYS freq(mhz)"); ++ + #define IPU6_PCI_BAR 0 + + struct ipu6_cell_program { +@@ -387,6 +391,14 @@ ipu6_isys_init(struct pci_dev *pdev, struct device *parent, + pdata->base = base; + pdata->ipdata = ipdata; + ++ /* Override the isys freq */ ++ if (isys_freq_override >= BUTTRESS_MIN_FORCE_IS_FREQ && ++ isys_freq_override <= BUTTRESS_MAX_FORCE_IS_FREQ) { ++ ctrl->ratio = isys_freq_override / BUTTRESS_IS_FREQ_STEP; ++ dev_dbg(&pdev->dev, "Override the isys freq:%u(mhz)\n", ++ isys_freq_override); ++ } ++ + isys_adev = ipu6_bus_initialize_device(pdev, parent, pdata, ctrl, + IPU6_ISYS_NAME); + if (IS_ERR(isys_adev)) { +-- +2.43.0 + diff --git a/patches/0005-media-pci-Enable-ISYS-reset.patch b/patches/0005-media-pci-Enable-ISYS-reset.patch new file mode 100644 index 0000000..c2bb0b1 --- /dev/null +++ b/patches/0005-media-pci-Enable-ISYS-reset.patch @@ -0,0 +1,627 @@ +From 20f1e38fca4e59b674ae67763bdaca560d55004b Mon Sep 17 00:00:00 2001 +From: zouxiaoh +Date: Mon, 12 Jan 2026 05:30:26 -0700 +Subject: [PATCH 05/28] media: pci: Enable ISYS reset + +Signed-off-by: zouxiaoh +--- + 6.12.0/drivers/media/pci/intel/ipu6/Kconfig | 9 + + .../media/pci/intel/ipu6/ipu6-fw-isys.h | 3 + + .../media/pci/intel/ipu6/ipu6-isys-queue.c | 251 ++++++++++++++++++ + .../media/pci/intel/ipu6/ipu6-isys-queue.h | 3 + + .../media/pci/intel/ipu6/ipu6-isys-video.c | 82 +++++- + .../media/pci/intel/ipu6/ipu6-isys-video.h | 8 + + .../drivers/media/pci/intel/ipu6/ipu6-isys.c | 10 + + .../drivers/media/pci/intel/ipu6/ipu6-isys.h | 5 + + 8 files changed, 370 insertions(+), 1 deletion(-) + +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/Kconfig b/6.12.0/drivers/media/pci/intel/ipu6/Kconfig +index cd1c545..f48b833 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/Kconfig ++++ b/6.12.0/drivers/media/pci/intel/ipu6/Kconfig +@@ -16,3 +16,12 @@ config VIDEO_INTEL_IPU6 + + To compile this driver, say Y here! It contains 2 modules - + intel_ipu6 and intel_ipu6_isys. ++ ++config VIDEO_INTEL_IPU6_ISYS_RESET ++ depends on VIDEO_INTEL_IPU6 ++ bool "Enable isys reset" ++ default n ++ help ++ Support ISYS reset in IPU6. ++ ++ To compile this driver, say Y here! +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h +index b60f020..e6ee161 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h +@@ -66,6 +66,9 @@ struct ipu6_isys; + #define IPU6_ISYS_OPEN_RETRY 2000 + #define IPU6_ISYS_CLOSE_RETRY 2000 + #define IPU6_FW_CALL_TIMEOUT_JIFFIES msecs_to_jiffies(2000) ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++#define IPU6_LIB_CALL_TIMEOUT_JIFFIES_RESET msecs_to_jiffies(200) ++#endif + + enum ipu6_fw_isys_resp_type { + IPU6_FW_ISYS_RESP_TYPE_STREAM_OPEN_DONE = 0, +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c +index bbb66b5..69fde84 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c +@@ -10,6 +10,9 @@ + #include + #include + #include ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++#include ++#endif + + #include + #include +@@ -21,6 +24,9 @@ + #include "ipu6-fw-isys.h" + #include "ipu6-isys.h" + #include "ipu6-isys-video.h" ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++#include "ipu6-cpd.h" ++#endif + + static int ipu6_isys_buf_init(struct vb2_buffer *vb) + { +@@ -218,6 +224,16 @@ static int buffer_list_get(struct ipu6_isys_stream *stream, + ib = list_last_entry(&aq->incoming, + struct ipu6_isys_buffer, head); + ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++ struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq); ++ ++ if (av->skipframe) { ++ atomic_set(&ib->skipframe_flag, 1); ++ av->skipframe--; ++ } else { ++ atomic_set(&ib->skipframe_flag, 0); ++ } ++#endif + dev_dbg(dev, "buffer: %s: buffer %u\n", + ipu6_isys_queue_to_video(aq)->vdev.name, + ipu6_isys_buffer_to_vb2_buffer(ib)->index); +@@ -372,6 +388,19 @@ static void buf_queue(struct vb2_buffer *vb) + list_add(&ib->head, &aq->incoming); + spin_unlock_irqrestore(&aq->lock, flags); + ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++ mutex_lock(&av->isys->reset_mutex); ++ while (av->isys->in_reset) { ++ mutex_unlock(&av->isys->reset_mutex); ++ dev_dbg(dev, "buffer: %s: wait for reset\n", av->vdev.name); ++ usleep_range(10000, 11000); ++ mutex_lock(&av->isys->reset_mutex); ++ } ++ mutex_unlock(&av->isys->reset_mutex); ++ /* ip may be cleared in ipu reset */ ++ stream = av->stream; ++ ++#endif + if (!media_pipe || !vb->vb2_queue->start_streaming_called) { + dev_dbg(dev, "media pipeline is not ready for %s\n", + av->vdev.name); +@@ -603,6 +632,9 @@ static int start_streaming(struct vb2_queue *q, unsigned int count) + + out: + mutex_unlock(&stream->mutex); ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++ av->start_streaming = 1; ++#endif + + return 0; + +@@ -624,12 +656,210 @@ out_return_buffers: + return ret; + } + ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++static void reset_stop_streaming(struct ipu6_isys_video *av) ++{ ++ struct ipu6_isys_queue *aq = &av->aq; ++ struct ipu6_isys_stream *stream = av->stream; ++ ++ mutex_lock(&stream->mutex); ++ ++ ipu6_isys_update_stream_watermark(av, false); ++ ++ mutex_lock(&av->isys->stream_mutex); ++ if (stream->nr_streaming == stream->nr_queues && stream->streaming) ++ ipu6_isys_video_set_streaming(av, 0, NULL); ++ mutex_unlock(&av->isys->stream_mutex); ++ ++ stream->nr_streaming--; ++ list_del(&aq->node); ++ stream->streaming = 0; ++ mutex_unlock(&stream->mutex); ++ ++ ipu6_isys_stream_cleanup(av); ++ ++ return_buffers(aq, VB2_BUF_STATE_ERROR); ++ ++ ipu6_isys_fw_close(av->isys); ++} ++ ++static int reset_start_streaming(struct ipu6_isys_video *av) ++{ ++ struct ipu6_isys_queue *aq = &av->aq; ++ struct device *dev = &av->isys->adev->auxdev.dev; ++ unsigned long flags; ++ int ret; ++ ++ dev_dbg(dev, "%s: start streaming\n", av->vdev.name); ++ ++ spin_lock_irqsave(&aq->lock, flags); ++ while (!list_empty(&aq->active)) { ++ struct ipu6_isys_buffer *ib = list_first_entry(&aq->active, ++ struct ipu6_isys_buffer, head); ++ ++ list_del(&ib->head); ++ list_add_tail(&ib->head, &aq->incoming); ++ } ++ spin_unlock_irqrestore(&aq->lock, flags); ++ ++ av->skipframe = 1; ++ ret = start_streaming(&aq->vbq, 0); ++ if (ret) { ++ dev_dbg(dev, ++ "%s: start streaming failed in reset ! set av->start_streaming = 0.\n", ++ av->vdev.name); ++ av->start_streaming = 0; ++ } else ++ av->start_streaming = 1; ++ ++ return ret; ++} ++ ++static int ipu_isys_reset(struct ipu6_isys_video *self_av, ++ struct ipu6_isys_stream *self_stream) ++{ ++ struct ipu6_isys *isys = self_av->isys; ++ struct ipu6_bus_device *adev = isys->adev; ++ struct ipu6_device *isp = adev->isp; ++ struct ipu6_isys_video *av = NULL; ++ struct ipu6_isys_stream *stream = NULL; ++ struct device *dev = &adev->auxdev.dev; ++ int ret, i, j; ++ int has_streaming = 0; ++ const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = ++ &isys->pdata->ipdata->csi2; ++ ++ ++ mutex_lock(&isys->reset_mutex); ++ if (isys->in_reset) { ++ mutex_unlock(&isys->reset_mutex); ++ return 0; ++ } ++ isys->in_reset = true; ++ ++ while (isys->in_stop_streaming) { ++ dev_dbg(dev, "isys reset: %s: wait for stop\n", ++ self_av->vdev.name); ++ mutex_unlock(&isys->reset_mutex); ++ usleep_range(10000, 11000); ++ mutex_lock(&isys->reset_mutex); ++ } ++ ++ mutex_unlock(&isys->reset_mutex); ++ ++ for (i = 0; i < csi2_pdata->nports; i++) { ++ for (j = 0; j < NR_OF_CSI2_SRC_PADS; j++) { ++ av = &isys->csi2[i].av[j]; ++ if (av == self_av) ++ continue; ++ ++ stream = av->stream; ++ if (!stream || stream == self_stream) ++ continue; ++ ++ if (!stream->streaming && !stream->nr_streaming) ++ continue; ++ ++ av->reset = true; ++ has_streaming = true; ++ reset_stop_streaming(av); ++ } ++ } ++ ++ if (!has_streaming) ++ goto end_of_reset; ++ ++ dev_dbg(dev, "ipu reset, power cycle\n"); ++ /* bus_pm_runtime_suspend() */ ++ /* isys_runtime_pm_suspend() */ ++ dev->bus->pm->runtime_suspend(dev); ++ ++ /* ipu_suspend */ ++ isp->pdev->driver->driver.pm->runtime_suspend(&isp->pdev->dev); ++ ++ /* ipu_runtime_resume */ ++ isp->pdev->driver->driver.pm->runtime_resume(&isp->pdev->dev); ++ ++ /* bus_pm_runtime_resume() */ ++ /* isys_runtime_pm_resume() */ ++ dev->bus->pm->runtime_resume(dev); ++ ++ ipu6_cleanup_fw_msg_bufs(isys); ++ ++ if (isys->fwcom) { ++ dev_err(dev, "Clearing old context\n"); ++ ipu6_fw_isys_cleanup(isys); ++ } ++ ++ ret = ipu6_fw_isys_init(av->isys, ++ isys->pdata->ipdata->num_parallel_streams); ++ if (ret < 0) ++ dev_err(dev, "ipu fw isys init failed\n"); ++ ++ dev_dbg(dev, "restart streams\n"); ++ ++ for (j = 0; j < csi2_pdata->nports; j++) { ++ for (i = 0; i < NR_OF_CSI2_SRC_PADS; i++) { ++ av = &isys->csi2[j].av[i]; ++ if (!av->reset) ++ continue; ++ ++ av->reset = false; ++ reset_start_streaming(av); ++ } ++ } ++ ++end_of_reset: ++ mutex_lock(&isys->reset_mutex); ++ isys->in_reset = false; ++ mutex_unlock(&isys->reset_mutex); ++ dev_dbg(dev, "reset done\n"); ++ ++ return 0; ++} ++ ++#endif + static void stop_streaming(struct vb2_queue *q) + { + struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(q); + struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq); + struct ipu6_isys_stream *stream = av->stream; + ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++ struct device *dev = &av->isys->adev->auxdev.dev; ++ ++ dev_dbg(dev, "stop: %s: enter\n", av->vdev.name); ++ ++ mutex_lock(&av->isys->reset_mutex); ++ while (av->isys->in_reset || av->isys->in_stop_streaming) { ++ mutex_unlock(&av->isys->reset_mutex); ++ dev_dbg(dev, "stop: %s: wait for in_reset = %d\n", ++ av->vdev.name, av->isys->in_reset); ++ dev_dbg(dev, "stop: %s: wait for in_stop = %d\n", ++ av->vdev.name, av->isys->in_stop_streaming); ++ usleep_range(10000, 11000); ++ mutex_lock(&av->isys->reset_mutex); ++ } ++ ++ if (!av->start_streaming) { ++ mutex_unlock(&av->isys->reset_mutex); ++ return; ++ } ++ ++ av->isys->in_stop_streaming = true; ++ mutex_unlock(&av->isys->reset_mutex); ++ ++ stream = av->stream; ++ if (!stream) { ++ dev_err(dev, "stop: %s: ip cleard!\n", av->vdev.name); ++ return_buffers(aq, VB2_BUF_STATE_ERROR); ++ mutex_lock(&av->isys->reset_mutex); ++ av->isys->in_stop_streaming = false; ++ mutex_unlock(&av->isys->reset_mutex); ++ return; ++ } ++#endif ++ + mutex_lock(&stream->mutex); + + ipu6_isys_update_stream_watermark(av, false); +@@ -649,6 +879,22 @@ static void stop_streaming(struct vb2_queue *q) + return_buffers(aq, VB2_BUF_STATE_ERROR); + + ipu6_isys_fw_close(av->isys); ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++ ++ av->start_streaming = 0; ++ mutex_lock(&av->isys->reset_mutex); ++ av->isys->in_stop_streaming = false; ++ mutex_unlock(&av->isys->reset_mutex); ++ ++ if (av->isys->need_reset) { ++ if (!stream->nr_streaming) ++ ipu_isys_reset(av, stream); ++ else ++ av->isys->need_reset = 0; ++ } ++ ++ dev_dbg(dev, "stop: %s: exit\n", av->vdev.name); ++#endif + } + + static unsigned int +@@ -732,6 +978,11 @@ void ipu6_isys_queue_buf_done(struct ipu6_isys_buffer *ib) + * to the userspace when it is de-queued + */ + atomic_set(&ib->str2mmio_flag, 0); ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++ } else if (atomic_read(&ib->skipframe_flag)) { ++ vb2_buffer_done(vb, VB2_BUF_STATE_ERROR); ++ atomic_set(&ib->skipframe_flag, 0); ++#endif + } else { + vb2_buffer_done(vb, VB2_BUF_STATE_DONE); + } +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h +index fe8fc79..78f3c2f 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h +@@ -33,6 +33,9 @@ struct ipu6_isys_queue { + struct ipu6_isys_buffer { + struct list_head head; + atomic_t str2mmio_flag; ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++ atomic_t skipframe_flag; ++#endif + }; + + struct ipu6_isys_video_buffer { +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c +index 48388c0..e060bae 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c +@@ -17,6 +17,9 @@ + #include + #include + #include ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++#include ++#endif + + #include + #include +@@ -112,6 +115,26 @@ static int video_open(struct file *file) + return v4l2_fh_open(file); + } + ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++static int video_release(struct file *file) ++{ ++ struct ipu6_isys_video *av = video_drvdata(file); ++ ++ dev_dbg(&av->isys->adev->auxdev.dev, ++ "release: %s: enter\n", av->vdev.name); ++ mutex_lock(&av->isys->reset_mutex); ++ while (av->isys->in_reset) { ++ mutex_unlock(&av->isys->reset_mutex); ++ dev_dbg(&av->isys->adev->auxdev.dev, ++ "release: %s: wait for reset\n", av->vdev.name); ++ usleep_range(10000, 11000); ++ mutex_lock(&av->isys->reset_mutex); ++ } ++ mutex_unlock(&av->isys->reset_mutex); ++ return vb2_fop_release(file); ++} ++ ++#endif + const struct ipu6_isys_pixelformat * + ipu6_isys_get_isys_format(u32 pixelformat, u32 type) + { +@@ -596,7 +619,11 @@ static int start_stream_firmware(struct ipu6_isys_video *av, + } + + reinit_completion(&stream->stream_start_completion); +- ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++ send_type = IPU6_FW_ISYS_SEND_TYPE_STREAM_START; ++ ret = ipu6_fw_isys_simple_cmd(av->isys, stream->stream_handle, ++ send_type); ++#else + if (bl) { + send_type = IPU6_FW_ISYS_SEND_TYPE_STREAM_START_AND_CAPTURE; + ipu6_fw_isys_dump_frame_buff_set(dev, buf, +@@ -609,6 +636,7 @@ static int start_stream_firmware(struct ipu6_isys_video *av, + ret = ipu6_fw_isys_simple_cmd(av->isys, stream->stream_handle, + send_type); + } ++#endif + + if (ret < 0) { + dev_err(dev, "can't start streaming (%d)\n", ret); +@@ -627,7 +655,25 @@ static int start_stream_firmware(struct ipu6_isys_video *av, + ret = -EIO; + goto out_stream_close; + } ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++ if (bl) { ++ dev_dbg(dev, "start stream: capture\n"); ++ send_type = IPU6_FW_ISYS_SEND_TYPE_STREAM_CAPTURE; ++ ipu6_fw_isys_dump_frame_buff_set(dev, buf, stream_cfg->nof_output_pins); ++ ret = ipu6_fw_isys_complex_cmd(av->isys, stream->stream_handle, ++ buf, msg->dma_addr, ++ sizeof(*buf), ++ send_type); ++ ++ if (ret < 0) { ++ dev_err(dev, "can't queue buffers (%d)\n", ret); ++ goto out_stream_close; ++ } ++ } ++ ++#else + dev_dbg(dev, "start stream: complete\n"); ++#endif + + return 0; + +@@ -674,7 +720,11 @@ static void stop_streaming_firmware(struct ipu6_isys_video *av) + } + + tout = wait_for_completion_timeout(&stream->stream_stop_completion, ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++ IPU6_LIB_CALL_TIMEOUT_JIFFIES_RESET); ++#else + IPU6_FW_CALL_TIMEOUT_JIFFIES); ++#endif + if (!tout) + dev_warn(dev, "stream stop time out\n"); + else if (stream->error) +@@ -699,7 +749,11 @@ static void close_streaming_firmware(struct ipu6_isys_video *av) + } + + tout = wait_for_completion_timeout(&stream->stream_close_completion, ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++ IPU6_LIB_CALL_TIMEOUT_JIFFIES_RESET); ++#else + IPU6_FW_CALL_TIMEOUT_JIFFIES); ++#endif + if (!tout) + dev_warn(dev, "stream close time out\n"); + else if (stream->error) +@@ -707,6 +761,12 @@ static void close_streaming_firmware(struct ipu6_isys_video *av) + else + dev_dbg(dev, "close stream: complete\n"); + ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++ stream->last_sequence = atomic_read(&stream->sequence); ++ dev_dbg(dev, "IPU_ISYS_RESET: ip->last_sequence = %d\n", ++ stream->last_sequence); ++ ++#endif + put_stream_opened(av); + } + +@@ -721,7 +781,18 @@ int ipu6_isys_video_prepare_stream(struct ipu6_isys_video *av, + return -EINVAL; + + stream->nr_queues = nr_queues; ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++ if (av->isys->in_reset) { ++ atomic_set(&stream->sequence, stream->last_sequence); ++ dev_dbg(&av->isys->adev->auxdev.dev, ++ "atomic_set : stream->last_sequence = %d\n", ++ stream->last_sequence); ++ } else { ++ atomic_set(&stream->sequence, 0); ++ } ++# else + atomic_set(&stream->sequence, 0); ++#endif + + stream->seq_index = 0; + memset(stream->seq, 0, sizeof(stream->seq)); +@@ -1089,7 +1160,11 @@ static const struct v4l2_file_operations isys_fops = { + .unlocked_ioctl = video_ioctl2, + .mmap = vb2_fop_mmap, + .open = video_open, ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++ .release = video_release, ++#else + .release = vb2_fop_release, ++#endif + }; + + int ipu6_isys_fw_open(struct ipu6_isys *isys) +@@ -1306,6 +1381,11 @@ int ipu6_isys_video_init(struct ipu6_isys_video *av) + av->pix_fmt = format.fmt.pix; + __ipu6_isys_vidioc_try_fmt_meta_cap(av, &format_meta); + av->meta_fmt = format_meta.fmt.meta; ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++ av->reset = false; ++ av->skipframe = 0; ++ av->start_streaming = 0; ++#endif + + set_bit(V4L2_FL_USES_V4L2_FH, &av->vdev.flags); + video_set_drvdata(&av->vdev, av); +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.h +index 1d945be..0dfa477 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.h ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.h +@@ -51,6 +51,9 @@ struct ipu6_isys_stream { + struct mutex mutex; + struct media_entity *source_entity; + atomic_t sequence; ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++ int last_sequence; ++#endif + unsigned int seq_index; + struct sequence_info seq[IPU6_ISYS_MAX_PARALLEL_SOF]; + int stream_source; +@@ -101,6 +104,11 @@ struct ipu6_isys_video { + u32 source_stream; + u8 vc; + u8 dt; ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++ unsigned int reset; ++ unsigned int skipframe; ++ unsigned int start_streaming; ++#endif + }; + + #define ipu6_isys_queue_to_video(__aq) \ +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c +index 17bc8ca..4958a28 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c +@@ -1084,6 +1084,10 @@ static int isys_probe(struct auxiliary_device *auxdev, + + mutex_init(&isys->mutex); + mutex_init(&isys->stream_mutex); ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++ mutex_init(&isys->reset_mutex); ++ isys->in_reset = false; ++#endif + + spin_lock_init(&isys->listlock); + INIT_LIST_HEAD(&isys->framebuflist); +@@ -1126,6 +1130,9 @@ static int isys_probe(struct auxiliary_device *auxdev, + if (ret) + goto free_fw_msg_bufs; + ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++ mutex_destroy(&isys->reset_mutex); ++#endif + ipu6_mmu_hw_cleanup(adev->mmu); + + return 0; +@@ -1180,6 +1187,9 @@ static void isys_remove(struct auxiliary_device *auxdev) + isys_iwake_watermark_cleanup(isys); + mutex_destroy(&isys->stream_mutex); + mutex_destroy(&isys->mutex); ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++ mutex_destroy(&isys->reset_mutex); ++#endif + } + + struct fwmsg { +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h +index 86dfc2e..982eb5d 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h +@@ -167,6 +167,11 @@ struct ipu6_isys { + struct list_head framebuflist_fw; + struct v4l2_async_notifier notifier; + struct isys_iwake_watermark iwake_watermark; ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++ struct mutex reset_mutex; ++ bool in_reset; ++ bool in_stop_streaming; ++#endif + }; + + struct isys_fw_msgs { +-- +2.43.0 + diff --git a/patches/0006-media-intel-ipu6-remove-buttress-ish-structure.patch b/patches/0006-media-intel-ipu6-remove-buttress-ish-structure.patch new file mode 100644 index 0000000..8a5c0fe --- /dev/null +++ b/patches/0006-media-intel-ipu6-remove-buttress-ish-structure.patch @@ -0,0 +1,38 @@ +From 394f85bc8316347626789f0e186403482da2c3da Mon Sep 17 00:00:00 2001 +From: Julian Chen +Date: Mon, 12 Jan 2026 05:32:38 -0700 +Subject: [PATCH 06/28] media: intel/ipu6: remove buttress ish structure + +missed content in remove buttress ish patch + +Signed-off-by: Julian Chen +--- + 6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h | 6 ------ + 1 file changed, 6 deletions(-) + +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h +index 9b6f569..482978c 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h +@@ -46,18 +46,12 @@ struct ipu6_buttress_ipc { + struct ipu6_buttress { + struct mutex power_mutex, auth_mutex, cons_mutex, ipc_mutex; + struct ipu6_buttress_ipc cse; +- struct ipu6_buttress_ipc ish; + struct list_head constraints; + u32 wdt_cached_value; + bool force_suspend; + u32 ref_clk; + }; + +-enum ipu6_buttress_ipc_domain { +- IPU6_BUTTRESS_IPC_CSE, +- IPU6_BUTTRESS_IPC_ISH, +-}; +- + struct ipu6_ipc_buttress_bulk_msg { + u32 cmd; + u32 expected_resp; +-- +2.43.0 + diff --git a/patches/0007-media-intel-ipu6-use-vc1-dma-for-MTL-and-ARL.patch b/patches/0007-media-intel-ipu6-use-vc1-dma-for-MTL-and-ARL.patch new file mode 100644 index 0000000..c93ff76 --- /dev/null +++ b/patches/0007-media-intel-ipu6-use-vc1-dma-for-MTL-and-ARL.patch @@ -0,0 +1,42 @@ +From b1f999981313f65a89fa53539ef9d23bf380621b Mon Sep 17 00:00:00 2001 +From: Chen Meng J +Date: Mon, 12 Jan 2026 05:34:38 -0700 +Subject: [PATCH 07/28] media: intel-ipu6: use vc1 dma for MTL and ARL + +Signed-off-by: Chen Meng J +--- + 6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h | 4 ++-- + 6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c | 2 +- + 2 files changed, 3 insertions(+), 3 deletions(-) + +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h +index e6ee161..545180c 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h +@@ -54,8 +54,8 @@ struct ipu6_isys; + /* Max number of planes for frame formats supported by the FW */ + #define IPU6_PIN_PLANES_MAX 4 + +-#define IPU6_FW_ISYS_SENSOR_TYPE_START 14 +-#define IPU6_FW_ISYS_SENSOR_TYPE_END 19 ++#define IPU6_FW_ISYS_SENSOR_TYPE_START 1 ++#define IPU6_FW_ISYS_SENSOR_TYPE_END 10 + #define IPU6SE_FW_ISYS_SENSOR_TYPE_START 6 + #define IPU6SE_FW_ISYS_SENSOR_TYPE_END 11 + /* +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c +index e060bae..2d1391e 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c +@@ -530,7 +530,7 @@ static int ipu6_isys_fw_pin_cfg(struct ipu6_isys_video *av, + output_pin->csi_be_soc_pixel_remapping = + CSI_BE_SOC_PIXEL_REMAPPING_FLAG_NO_REMAPPING; + +- output_pin->snoopable = true; ++ output_pin->snoopable = false; + output_pin->error_handling_enable = false; + output_pin->sensor_type = isys->sensor_type++; + if (isys->sensor_type > isys->pdata->ipdata->sensor_type_end) +-- +2.43.0 + diff --git a/patches/0008-media-ipu-Dma-sync-at-buffer_prepare-callback-as-DMA.patch b/patches/0008-media-ipu-Dma-sync-at-buffer_prepare-callback-as-DMA.patch new file mode 100644 index 0000000..0c96ba2 --- /dev/null +++ b/patches/0008-media-ipu-Dma-sync-at-buffer_prepare-callback-as-DMA.patch @@ -0,0 +1,42 @@ +From 8b60cd4f3bd091dee52e1e1d2a00c8a5326f09d9 Mon Sep 17 00:00:00 2001 +From: linya14x +Date: Mon, 12 Jan 2026 05:43:14 -0700 +Subject: [PATCH 08/28] media: ipu: Dma sync at buffer_prepare callback as DMA + is non-coherent + +Test Platform: +MTL ARL LT6911UXE + +Signed-off-by: linya14x +Signed-off-by: Bingbu Cao +--- + 6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c | 5 +++++ + 1 file changed, 5 insertions(+) + +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c +index 69fde84..b931c43 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c +@@ -84,7 +84,9 @@ static int ipu6_isys_queue_setup(struct vb2_queue *q, unsigned int *num_buffers, + static int ipu6_isys_buf_prepare(struct vb2_buffer *vb) + { + struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(vb->vb2_queue); ++ struct ipu6_isys *isys = vb2_get_drv_priv(vb->vb2_queue); + struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq); ++ struct sg_table *sg = vb2_dma_sg_plane_desc(vb, 0); + struct device *dev = &av->isys->adev->auxdev.dev; + u32 bytesperline = ipu6_isys_get_bytes_per_line(av); + u32 height = ipu6_isys_get_frame_height(av); +@@ -98,6 +100,9 @@ static int ipu6_isys_buf_prepare(struct vb2_buffer *vb) + + vb2_set_plane_payload(vb, 0, bytesperline * height); + ++ /* assume IPU is not DMA coherent */ ++ ipu6_dma_sync_sgtable(isys->adev, sg); ++ + return 0; + } + +-- +2.43.0 + diff --git a/patches/0009-media-intel-ipu6-support-IPU6-ISYS-FW-trace-dump-for.patch b/patches/0009-media-intel-ipu6-support-IPU6-ISYS-FW-trace-dump-for.patch new file mode 100644 index 0000000..8d50a21 --- /dev/null +++ b/patches/0009-media-intel-ipu6-support-IPU6-ISYS-FW-trace-dump-for.patch @@ -0,0 +1,1432 @@ +From 592e4eb78f74948ad2b0e6929d7fb2650b4cb501 Mon Sep 17 00:00:00 2001 +From: hepengpx +Date: Mon, 12 Jan 2026 05:36:07 -0700 +Subject: [PATCH 09/28] media: intel-ipu6: support IPU6 ISYS FW trace dump for + upstream driverxsy + +[media]pci]Support IPU6 ISYS FW trace dump for upstream drive +Signed-off-by: hepengpx +--- + 6.12.0/drivers/media/pci/intel/ipu6/Makefile | 3 +- + .../drivers/media/pci/intel/ipu6/ipu6-bus.h | 2 + + .../media/pci/intel/ipu6/ipu6-fw-com.c | 12 +- + .../drivers/media/pci/intel/ipu6/ipu6-isys.c | 58 +- + .../media/pci/intel/ipu6/ipu6-platform-regs.h | 40 + + .../drivers/media/pci/intel/ipu6/ipu6-trace.c | 896 ++++++++++++++++++ + .../drivers/media/pci/intel/ipu6/ipu6-trace.h | 147 +++ + 6.12.0/drivers/media/pci/intel/ipu6/ipu6.c | 48 + + 6.12.0/drivers/media/pci/intel/ipu6/ipu6.h | 5 + + 9 files changed, 1206 insertions(+), 5 deletions(-) + create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-trace.c + create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-trace.h + +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/Makefile b/6.12.0/drivers/media/pci/intel/ipu6/Makefile +index a821b0a..e1e123b 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/Makefile ++++ b/6.12.0/drivers/media/pci/intel/ipu6/Makefile +@@ -6,7 +6,8 @@ intel-ipu6-y := ipu6.o \ + ipu6-mmu.o \ + ipu6-buttress.o \ + ipu6-cpd.o \ +- ipu6-fw-com.o ++ ipu6-fw-com.o \ ++ ipu6-trace.o + + obj-$(CONFIG_VIDEO_INTEL_IPU6) += intel-ipu6.o + +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-bus.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-bus.h +index bb4926d..c2a3f22 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-bus.h ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-bus.h +@@ -18,6 +18,7 @@ struct pci_dev; + #define IPU6_BUS_NAME IPU6_NAME "-bus" + + struct ipu6_buttress_ctrl; ++struct ipu_subsystem_trace_config; + + struct ipu6_bus_device { + struct auxiliary_device auxdev; +@@ -27,6 +28,7 @@ struct ipu6_bus_device { + void *pdata; + struct ipu6_mmu *mmu; + struct ipu6_device *isp; ++ struct ipu_subsystem_trace_config *trace_cfg; + struct ipu6_buttress_ctrl *ctrl; + u64 dma_mask; + const struct firmware *fw; +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.c +index 7d3d931..45738f3 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.c ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.c +@@ -14,6 +14,7 @@ + #include "ipu6-bus.h" + #include "ipu6-dma.h" + #include "ipu6-fw-com.h" ++#include "ipu6-trace.h" + + /* + * FWCOM layer is a shared resource between FW and driver. It consist +@@ -265,8 +266,15 @@ EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_prepare, INTEL_IPU6); + + int ipu6_fw_com_open(struct ipu6_fw_com_context *ctx) + { +- /* write magic pattern to disable the tunit trace */ +- writel(TUNIT_MAGIC_PATTERN, ctx->dmem_addr + TUNIT_CFG_DWR_REG * 4); ++ dma_addr_t trace_buff = TUNIT_MAGIC_PATTERN; ++ ++ /* ++ * Write trace buff start addr to tunit cfg reg. ++ * This feature is used to enable tunit trace in secure mode. ++ */ ++ ipu_trace_buffer_dma_handle(&ctx->adev->auxdev.dev, &trace_buff); ++ writel(trace_buff, ctx->dmem_addr + TUNIT_CFG_DWR_REG * 4); ++ + /* Check if SP is in valid state */ + if (!ctx->cell_ready(ctx->adev)) + return -EIO; +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c +index 4958a28..af03e2f 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c +@@ -41,6 +41,7 @@ + #include "ipu6-platform-buttress-regs.h" + #include "ipu6-platform-isys-csi2-reg.h" + #include "ipu6-platform-regs.h" ++#include "ipu6-trace.h" + + #define IPU6_BUTTRESS_FABIC_CONTROL 0x68 + #define GDA_ENABLE_IWAKE_INDEX 2 +@@ -348,9 +349,11 @@ irqreturn_t isys_isr(struct ipu6_bus_device *adev) + u32 status_sw, status_csi; + u32 ctrl0_status, ctrl0_clear; + ++ dev_dbg(&adev->auxdev.dev, "%s enter", __func__); + spin_lock(&isys->power_lock); + if (!isys->power) { + spin_unlock(&isys->power_lock); ++ dev_dbg(&adev->auxdev.dev, "%s exit, no power", __func__); + return IRQ_NONE; + } + +@@ -399,6 +402,7 @@ irqreturn_t isys_isr(struct ipu6_bus_device *adev) + + spin_unlock(&isys->power_lock); + ++ dev_dbg(&adev->auxdev.dev, "%s exit", __func__); + return IRQ_HANDLED; + } + +@@ -1040,6 +1044,46 @@ void ipu6_put_fw_msg_buf(struct ipu6_isys *isys, u64 data) + spin_unlock_irqrestore(&isys->listlock, flags); + } + ++struct ipu_trace_block isys_trace_blocks[] = { ++ { ++ .offset = IPU_TRACE_REG_IS_TRACE_UNIT_BASE, ++ .type = IPU_TRACE_BLOCK_TUN, ++ }, ++ { ++ .offset = IPU_TRACE_REG_IS_SP_EVQ_BASE, ++ .type = IPU_TRACE_BLOCK_TM, ++ }, ++ { ++ .offset = IPU_TRACE_REG_IS_SP_GPC_BASE, ++ .type = IPU_TRACE_BLOCK_GPC, ++ }, ++ { ++ .offset = IPU_TRACE_REG_IS_ISL_GPC_BASE, ++ .type = IPU_TRACE_BLOCK_GPC, ++ }, ++ { ++ .offset = IPU_TRACE_REG_IS_MMU_GPC_BASE, ++ .type = IPU_TRACE_BLOCK_GPC, ++ }, ++ { ++ /* Note! this covers all 8 blocks */ ++ .offset = IPU_TRACE_REG_CSI2_TM_BASE(0), ++ .type = IPU_TRACE_CSI2, ++ }, ++ { ++ /* Note! this covers all 11 blocks */ ++ .offset = IPU_TRACE_REG_CSI2_PORT_SIG2SIO_GR_BASE(0), ++ .type = IPU_TRACE_SIG2CIOS, ++ }, ++ { ++ .offset = IPU_TRACE_REG_IS_GPREG_TRACE_TIMER_RST_N, ++ .type = IPU_TRACE_TIMER_RST, ++ }, ++ { ++ .type = IPU_TRACE_BLOCK_END, ++ } ++}; ++ + static int isys_probe(struct auxiliary_device *auxdev, + const struct auxiliary_device_id *auxdev_id) + { +@@ -1111,6 +1155,8 @@ static int isys_probe(struct auxiliary_device *auxdev, + goto remove_shared_buffer; + } + ++ ipu_trace_init(adev->isp, isys->pdata->base, &auxdev->dev, ++ isys_trace_blocks); + cpu_latency_qos_add_request(&isys->pm_qos, PM_QOS_DEFAULT_VALUE); + + ret = alloc_fw_msg_bufs(isys, 20); +@@ -1140,6 +1186,7 @@ static int isys_probe(struct auxiliary_device *auxdev, + free_fw_msg_bufs: + free_fw_msg_bufs(isys); + out_remove_pkg_dir_shared_buffer: ++ ipu_trace_uninit(&auxdev->dev); + cpu_latency_qos_remove_request(&isys->pm_qos); + if (!isp->secure_mode) + ipu6_cpd_free_pkg_dir(adev); +@@ -1185,6 +1232,7 @@ static void isys_remove(struct auxiliary_device *auxdev) + mutex_destroy(&isys->streams[i].mutex); + + isys_iwake_watermark_cleanup(isys); ++ ipu_trace_uninit(&auxdev->dev); + mutex_destroy(&isys->stream_mutex); + mutex_destroy(&isys->mutex); + #ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET +@@ -1237,12 +1285,17 @@ static int isys_isr_one(struct ipu6_bus_device *adev) + u32 index; + u64 ts; + +- if (!isys->fwcom) ++ dev_dbg(&adev->auxdev.dev, "%s enter", __func__); ++ if (!isys->fwcom) { ++ dev_dbg(&adev->auxdev.dev, "%s exit, fwcom is null", __func__); + return 1; ++ } + + resp = ipu6_fw_isys_get_resp(isys->fwcom, IPU6_BASE_MSG_RECV_QUEUES); +- if (!resp) ++ if (!resp) { ++ dev_dbg(&adev->auxdev.dev, "%s exit, resp is null", __func__); + return 1; ++ } + + ts = (u64)resp->timestamp[1] << 32 | resp->timestamp[0]; + +@@ -1351,6 +1404,7 @@ static int isys_isr_one(struct ipu6_bus_device *adev) + ipu6_isys_put_stream(stream); + leave: + ipu6_fw_isys_put_resp(isys->fwcom, IPU6_BASE_MSG_RECV_QUEUES); ++ dev_dbg(&adev->auxdev.dev, "%s exit", __func__); + return 0; + } + +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-platform-regs.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-platform-regs.h +index b883385..a1744d2 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-platform-regs.h ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-platform-regs.h +@@ -176,4 +176,44 @@ enum nci_ab_access_mode { + #define IPU6_PSYS_GPDEV_IRQ_FWIRQ(n) BIT(n) + #define IPU6_REG_PSYS_GPDEV_FWIRQ(n) (4 * (n) + 0x1aa100) + ++/* Trace unit related register definitions */ ++#define TRACE_REG_MAX_ISYS_OFFSET 0xfffff ++#define TRACE_REG_MAX_PSYS_OFFSET 0xfffff ++#define IPU_ISYS_OFFSET IPU6_ISYS_DMEM_OFFSET ++#define IPU_PSYS_OFFSET IPU6_PSYS_DMEM_OFFSET ++/* ISYS trace unit registers */ ++/* Trace unit base offset */ ++#define IPU_TRACE_REG_IS_TRACE_UNIT_BASE 0x27d000 ++/* Trace monitors */ ++#define IPU_TRACE_REG_IS_SP_EVQ_BASE 0x211000 ++/* GPC blocks */ ++#define IPU_TRACE_REG_IS_SP_GPC_BASE 0x210800 ++#define IPU_TRACE_REG_IS_ISL_GPC_BASE 0x2b0a00 ++#define IPU_TRACE_REG_IS_MMU_GPC_BASE 0x2e0f00 ++/* each CSI2 port has a dedicated trace monitor, index 0..7 */ ++#define IPU_TRACE_REG_CSI2_TM_BASE(port) (0x220400 + 0x1000 * (port)) ++ ++/* Trace timers */ ++#define IPU_TRACE_REG_IS_GPREG_TRACE_TIMER_RST_N 0x27c410 ++#define TRACE_REG_GPREG_TRACE_TIMER_RST_OFF BIT(0) ++ ++/* SIG2CIO */ ++#define IPU_TRACE_REG_CSI2_PORT_SIG2SIO_GR_BASE(port) \ ++ (0x220e00 + (port) * 0x1000) ++ ++/* PSYS trace unit registers */ ++/* Trace unit base offset */ ++#define IPU_TRACE_REG_PS_TRACE_UNIT_BASE 0x1b4000 ++/* Trace monitors */ ++#define IPU_TRACE_REG_PS_SPC_EVQ_BASE 0x119000 ++#define IPU_TRACE_REG_PS_SPP0_EVQ_BASE 0x139000 ++ ++/* GPC blocks */ ++#define IPU_TRACE_REG_PS_SPC_GPC_BASE 0x118800 ++#define IPU_TRACE_REG_PS_SPP0_GPC_BASE 0x138800 ++#define IPU_TRACE_REG_PS_MMU_GPC_BASE 0x1b1b00 ++ ++/* Trace timers */ ++#define IPU_TRACE_REG_PS_GPREG_TRACE_TIMER_RST_N 0x1aa714 ++ + #endif /* IPU6_PLATFORM_REGS_H */ +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-trace.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-trace.c +new file mode 100644 +index 0000000..adcee20 +--- /dev/null ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-trace.c +@@ -0,0 +1,896 @@ ++// SPDX-License-Identifier: GPL-2.0 ++// Copyright (C) 2014 - 2025 Intel Corporation ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "ipu6.h" ++#include "ipu6-bus.h" ++#include "ipu6-platform-regs.h" ++#include "ipu6-dma.h" ++#include "ipu6-trace.h" ++ ++/* ++ * enabling ipu trace need a 96 MB buffer. ++ */ ++static bool ipu_trace_enable; ++module_param(ipu_trace_enable, bool, 0660); ++MODULE_PARM_DESC(ipu_trace_enable, "IPU trace enable"); ++ ++struct trace_register_range { ++ u32 start; ++ u32 end; ++}; ++ ++#define MEMORY_RING_BUFFER_SIZE (SZ_1M * 96) ++#define TRACE_MESSAGE_SIZE 16 ++/* ++ * It looks that the trace unit sometimes writes outside the given buffer. ++ * To avoid memory corruption one extra page is reserved at the end ++ * of the buffer. Read also the extra area since it may contain valid data. ++ */ ++#define MEMORY_RING_BUFFER_GUARD PAGE_SIZE ++#define MEMORY_RING_BUFFER_OVERREAD MEMORY_RING_BUFFER_GUARD ++#define MAX_TRACE_REGISTERS 200 ++#define TRACE_CONF_DUMP_BUFFER_SIZE (MAX_TRACE_REGISTERS * 2 * 32) ++#define TRACE_CONF_DATA_MAX_LEN (1024 * 4) ++#define WPT_TRACE_CONF_DATA_MAX_LEN (1024 * 64) ++ ++struct config_value { ++ u32 reg; ++ u32 value; ++}; ++ ++struct ipu_trace_buffer { ++ dma_addr_t dma_handle; ++ void *memory_buffer; ++}; ++ ++struct ipu_subsystem_wptrace_config { ++ bool open; ++ char *conf_dump_buffer; ++ int size_conf_dump; ++ unsigned int fill_level; ++ struct config_value config[MAX_TRACE_REGISTERS]; ++}; ++ ++struct ipu_subsystem_trace_config { ++ u32 offset; ++ void __iomem *base; ++ struct ipu_trace_buffer memory; /* ring buffer */ ++ struct device *dev; ++ struct ipu_trace_block *blocks; ++ unsigned int fill_level; /* Nbr of regs in config table below */ ++ bool running; ++ /* Cached register values */ ++ struct config_value config[MAX_TRACE_REGISTERS]; ++ /* watchpoint trace info */ ++ struct ipu_subsystem_wptrace_config wpt; ++}; ++ ++struct ipu_trace { ++ struct mutex lock; /* Protect ipu trace operations */ ++ bool open; ++ char *conf_dump_buffer; ++ int size_conf_dump; ++ ++ struct ipu_subsystem_trace_config isys; ++ struct ipu_subsystem_trace_config psys; ++}; ++ ++static void __ipu_trace_restore(struct device *dev) ++{ ++ struct ipu6_bus_device *adev = to_ipu6_bus_device(dev); ++ struct ipu6_device *isp = adev->isp; ++ struct ipu_trace *trace = isp->trace; ++ struct config_value *config; ++ struct ipu_subsystem_trace_config *sys = adev->trace_cfg; ++ struct ipu_trace_block *blocks; ++ u32 mapped_trace_buffer; ++ void __iomem *addr = NULL; ++ int i; ++ ++ if (trace->open) { ++ dev_info(dev, "Trace control file open. Skipping update\n"); ++ return; ++ } ++ ++ if (!sys) ++ return; ++ ++ /* leave if no trace configuration for this subsystem */ ++ if (sys->fill_level == 0) ++ return; ++ ++ /* Find trace unit base address */ ++ blocks = sys->blocks; ++ while (blocks->type != IPU_TRACE_BLOCK_END) { ++ if (blocks->type == IPU_TRACE_BLOCK_TUN) { ++ addr = sys->base + blocks->offset; ++ break; ++ } ++ blocks++; ++ } ++ if (!addr) ++ return; ++ ++ if (!sys->memory.memory_buffer) { ++ sys->memory.memory_buffer = ++ ipu6_dma_alloc(adev, MEMORY_RING_BUFFER_SIZE + ++ MEMORY_RING_BUFFER_GUARD, ++ &sys->memory.dma_handle, ++ GFP_KERNEL, 0); ++ } ++ ++ if (!sys->memory.memory_buffer) { ++ dev_err(dev, "No memory for tracing. Trace unit disabled\n"); ++ return; ++ } ++ ++ config = sys->config; ++ mapped_trace_buffer = sys->memory.dma_handle; ++ ++ /* ring buffer base */ ++ writel(mapped_trace_buffer, addr + TRACE_REG_TUN_DRAM_BASE_ADDR); ++ ++ /* ring buffer end */ ++ writel(mapped_trace_buffer + MEMORY_RING_BUFFER_SIZE - ++ TRACE_MESSAGE_SIZE, addr + TRACE_REG_TUN_DRAM_END_ADDR); ++ ++ /* Infobits for ddr trace */ ++ writel(IPU6_INFO_REQUEST_DESTINATION_PRIMARY, ++ addr + TRACE_REG_TUN_DDR_INFO_VAL); ++ ++ /* Find trace timer reset address */ ++ addr = NULL; ++ blocks = sys->blocks; ++ while (blocks->type != IPU_TRACE_BLOCK_END) { ++ if (blocks->type == IPU_TRACE_TIMER_RST) { ++ addr = sys->base + blocks->offset; ++ break; ++ } ++ blocks++; ++ } ++ if (!addr) { ++ dev_err(dev, "No trace reset addr\n"); ++ return; ++ } ++ ++ /* Remove reset from trace timers */ ++ writel(TRACE_REG_GPREG_TRACE_TIMER_RST_OFF, addr); ++ ++ /* Register config received from userspace */ ++ for (i = 0; i < sys->fill_level; i++) { ++ dev_dbg(dev, ++ "Trace restore: reg 0x%08x, value 0x%08x\n", ++ config[i].reg, config[i].value); ++ writel(config[i].value, isp->base + config[i].reg); ++ } ++ ++ /* Register wpt config received from userspace, and only psys has wpt */ ++ config = sys->wpt.config; ++ for (i = 0; i < sys->wpt.fill_level; i++) { ++ dev_dbg(dev, "Trace restore: reg 0x%08x, value 0x%08x\n", ++ config[i].reg, config[i].value); ++ writel(config[i].value, isp->base + config[i].reg); ++ } ++ sys->running = true; ++} ++ ++void ipu_trace_restore(struct device *dev) ++{ ++ struct ipu_trace *trace = to_ipu6_bus_device(dev)->isp->trace; ++ ++ if (!trace) ++ return; ++ ++ mutex_lock(&trace->lock); ++ __ipu_trace_restore(dev); ++ mutex_unlock(&trace->lock); ++} ++EXPORT_SYMBOL_GPL(ipu_trace_restore); ++ ++static void __ipu_trace_stop(struct device *dev) ++{ ++ struct ipu_subsystem_trace_config *sys = ++ to_ipu6_bus_device(dev)->trace_cfg; ++ struct ipu_trace_block *blocks; ++ ++ if (!sys) ++ return; ++ ++ if (!sys->running) ++ return; ++ sys->running = false; ++ ++ /* Turn off all the gpc blocks */ ++ blocks = sys->blocks; ++ while (blocks->type != IPU_TRACE_BLOCK_END) { ++ if (blocks->type == IPU_TRACE_BLOCK_GPC) { ++ writel(0, sys->base + blocks->offset + ++ TRACE_REG_GPC_OVERALL_ENABLE); ++ } ++ blocks++; ++ } ++ ++ /* Turn off all the trace monitors */ ++ blocks = sys->blocks; ++ while (blocks->type != IPU_TRACE_BLOCK_END) { ++ if (blocks->type == IPU_TRACE_BLOCK_TM) { ++ writel(0, sys->base + blocks->offset + ++ TRACE_REG_TM_TRACE_ENABLE_NPK); ++ ++ writel(0, sys->base + blocks->offset + ++ TRACE_REG_TM_TRACE_ENABLE_DDR); ++ } ++ blocks++; ++ } ++ ++ /* Turn off trace units */ ++ blocks = sys->blocks; ++ while (blocks->type != IPU_TRACE_BLOCK_END) { ++ if (blocks->type == IPU_TRACE_BLOCK_TUN) { ++ writel(0, sys->base + blocks->offset + ++ TRACE_REG_TUN_DDR_ENABLE); ++ writel(0, sys->base + blocks->offset + ++ TRACE_REG_TUN_NPK_ENABLE); ++ } ++ blocks++; ++ } ++} ++ ++void ipu_trace_stop(struct device *dev) ++{ ++ struct ipu_trace *trace = to_ipu6_bus_device(dev)->isp->trace; ++ ++ if (!trace) ++ return; ++ ++ mutex_lock(&trace->lock); ++ __ipu_trace_stop(dev); ++ mutex_unlock(&trace->lock); ++} ++EXPORT_SYMBOL_GPL(ipu_trace_stop); ++ ++static int update_register_cache(struct ipu6_device *isp, u32 reg, u32 value) ++{ ++ struct ipu_trace *dctrl = isp->trace; ++ struct ipu_subsystem_trace_config *sys; ++ int rval = -EINVAL; ++ ++ if (dctrl->isys.offset == dctrl->psys.offset) { ++ /* For the IPU with uniform address space */ ++ if (reg >= IPU_ISYS_OFFSET && ++ reg < IPU_ISYS_OFFSET + TRACE_REG_MAX_ISYS_OFFSET) ++ sys = &dctrl->isys; ++ else if (reg >= IPU_PSYS_OFFSET && ++ reg < IPU_PSYS_OFFSET + TRACE_REG_MAX_PSYS_OFFSET) ++ sys = &dctrl->psys; ++ else ++ goto error; ++ } else { ++ if (dctrl->isys.offset && ++ reg >= dctrl->isys.offset && ++ reg < dctrl->isys.offset + TRACE_REG_MAX_ISYS_OFFSET) ++ sys = &dctrl->isys; ++ else if (dctrl->psys.offset && ++ reg >= dctrl->psys.offset && ++ reg < dctrl->psys.offset + TRACE_REG_MAX_PSYS_OFFSET) ++ sys = &dctrl->psys; ++ else ++ goto error; ++ } ++ ++ if (sys->fill_level < MAX_TRACE_REGISTERS) { ++ dev_dbg(sys->dev, ++ "Trace reg addr 0x%08x value 0x%08x\n", reg, value); ++ sys->config[sys->fill_level].reg = reg; ++ sys->config[sys->fill_level].value = value; ++ sys->fill_level++; ++ } else { ++ rval = -ENOMEM; ++ goto error; ++ } ++ return 0; ++error: ++ dev_info(&isp->pdev->dev, ++ "Trace register address 0x%08x ignored as invalid register\n", ++ reg); ++ return rval; ++} ++ ++static void traceconf_dump(struct ipu6_device *isp) ++{ ++ struct ipu_subsystem_trace_config *sys[2] = { ++ &isp->trace->isys, ++ &isp->trace->psys ++ }; ++ int i, j, rem_size; ++ char *out; ++ ++ isp->trace->size_conf_dump = 0; ++ out = isp->trace->conf_dump_buffer; ++ rem_size = TRACE_CONF_DUMP_BUFFER_SIZE; ++ ++ for (j = 0; j < ARRAY_SIZE(sys); j++) { ++ for (i = 0; i < sys[j]->fill_level && rem_size > 0; i++) { ++ int bytes_print; ++ int n = snprintf(out, rem_size, "0x%08x = 0x%08x\n", ++ sys[j]->config[i].reg, ++ sys[j]->config[i].value); ++ ++ bytes_print = min(n, rem_size - 1); ++ rem_size -= bytes_print; ++ out += bytes_print; ++ } ++ } ++ isp->trace->size_conf_dump = out - isp->trace->conf_dump_buffer; ++} ++ ++static void clear_trace_buffer(struct ipu_subsystem_trace_config *sys) ++{ ++ if (!sys || !sys->memory.memory_buffer) ++ return; ++ ++ memset(sys->memory.memory_buffer, 0, MEMORY_RING_BUFFER_SIZE + ++ MEMORY_RING_BUFFER_OVERREAD); ++ ++ dma_sync_single_for_device(sys->dev, ++ sys->memory.dma_handle, ++ MEMORY_RING_BUFFER_SIZE + ++ MEMORY_RING_BUFFER_GUARD, DMA_FROM_DEVICE); ++} ++ ++static int traceconf_open(struct inode *inode, struct file *file) ++{ ++ int ret; ++ struct ipu6_device *isp; ++ ++ if (!inode->i_private) ++ return -EACCES; ++ ++ isp = inode->i_private; ++ ++ ret = mutex_trylock(&isp->trace->lock); ++ if (!ret) ++ return -EBUSY; ++ ++ if (isp->trace->open) { ++ mutex_unlock(&isp->trace->lock); ++ return -EBUSY; ++ } ++ ++ file->private_data = isp; ++ isp->trace->open = 1; ++ if (file->f_mode & FMODE_WRITE) { ++ /* TBD: Allocate temp buffer for processing. ++ * Push validated buffer to active config ++ */ ++ ++ /* Forget old config if opened for write */ ++ isp->trace->isys.fill_level = 0; ++ isp->trace->psys.fill_level = 0; ++ isp->trace->psys.wpt.fill_level = 0; ++ } ++ ++ if (file->f_mode & FMODE_READ) { ++ isp->trace->conf_dump_buffer = ++ vzalloc(TRACE_CONF_DUMP_BUFFER_SIZE); ++ if (!isp->trace->conf_dump_buffer) { ++ isp->trace->open = 0; ++ mutex_unlock(&isp->trace->lock); ++ return -ENOMEM; ++ } ++ traceconf_dump(isp); ++ } ++ mutex_unlock(&isp->trace->lock); ++ return 0; ++} ++ ++static ssize_t traceconf_read(struct file *file, char __user *buf, ++ size_t len, loff_t *ppos) ++{ ++ struct ipu6_device *isp = file->private_data; ++ ++ return simple_read_from_buffer(buf, len, ppos, ++ isp->trace->conf_dump_buffer, ++ isp->trace->size_conf_dump); ++} ++ ++static ssize_t traceconf_write(struct file *file, const char __user *buf, ++ size_t len, loff_t *ppos) ++{ ++ int i; ++ struct ipu6_device *isp = file->private_data; ++ ssize_t bytes = 0; ++ char *ipu_trace_buffer = NULL; ++ size_t buffer_size = 0; ++ u32 ipu_trace_number = 0; ++ struct config_value *cfg_buffer = NULL; ++ ++ if ((*ppos < 0) || (len > TRACE_CONF_DATA_MAX_LEN) || ++ (len < sizeof(ipu_trace_number))) { ++ dev_info(&isp->pdev->dev, ++ "length is error, len:%ld, loff:%lld\n", ++ len, *ppos); ++ return -EINVAL; ++ } ++ ++ ipu_trace_buffer = vzalloc(len); ++ if (!ipu_trace_buffer) ++ return -ENOMEM; ++ ++ bytes = copy_from_user(ipu_trace_buffer, buf, len); ++ if (bytes != 0) { ++ vfree(ipu_trace_buffer); ++ return -EFAULT; ++ } ++ ++ memcpy(&ipu_trace_number, ipu_trace_buffer, sizeof(u32)); ++ buffer_size = ipu_trace_number * sizeof(struct config_value); ++ if ((buffer_size + sizeof(ipu_trace_number)) != len) { ++ dev_info(&isp->pdev->dev, ++ "File size is not right, len:%ld, buffer_size:%zu\n", ++ len, buffer_size); ++ vfree(ipu_trace_buffer); ++ return -EFAULT; ++ } ++ ++ mutex_lock(&isp->trace->lock); ++ cfg_buffer = (struct config_value *)(ipu_trace_buffer + sizeof(u32)); ++ for (i = 0; i < ipu_trace_number; i++) { ++ update_register_cache(isp, cfg_buffer[i].reg, ++ cfg_buffer[i].value); ++ } ++ mutex_unlock(&isp->trace->lock); ++ vfree(ipu_trace_buffer); ++ ++ return len; ++} ++ ++static int traceconf_release(struct inode *inode, struct file *file) ++{ ++ struct ipu6_device *isp = file->private_data; ++ struct device *psys_dev = isp->psys ? &isp->psys->auxdev.dev : NULL; ++ struct device *isys_dev = isp->isys ? &isp->isys->auxdev.dev : NULL; ++ int isys_pm_rval = -EINVAL; ++ int psys_pm_rval = -EINVAL; ++ ++ /* ++ * Turn devices on outside trace->lock mutex. PM transition may ++ * cause call to function which tries to take the same lock. ++ * Also do this before trace->open is set back to 0 to avoid ++ * double restore (one here and one in pm transition). We can't ++ * rely purely on the restore done by pm call backs since trace ++ * configuration can occur in any phase compared to other activity. ++ */ ++ ++ if (file->f_mode & FMODE_WRITE) { ++ if (isys_dev) ++ isys_pm_rval = pm_runtime_resume_and_get(isys_dev); ++ if (isys_pm_rval >= 0 && psys_dev) ++ psys_pm_rval = pm_runtime_resume_and_get(psys_dev); ++ } ++ ++ mutex_lock(&isp->trace->lock); ++ isp->trace->open = 0; ++ vfree(isp->trace->conf_dump_buffer); ++ isp->trace->conf_dump_buffer = NULL; ++ ++ /* Update new cfg to HW */ ++ if (isys_pm_rval >= 0) { ++ __ipu_trace_stop(isys_dev); ++ clear_trace_buffer(isp->isys->trace_cfg); ++ __ipu_trace_restore(isys_dev); ++ } ++ ++ if (psys_pm_rval >= 0) { ++ __ipu_trace_stop(psys_dev); ++ clear_trace_buffer(isp->psys->trace_cfg); ++ __ipu_trace_restore(psys_dev); ++ } ++ mutex_unlock(&isp->trace->lock); ++ ++ if (psys_pm_rval >= 0) ++ pm_runtime_put(psys_dev); ++ if (isys_pm_rval >= 0) ++ pm_runtime_put(isys_dev); ++ ++ return 0; ++} ++ ++static const struct file_operations ipu_traceconf_fops = { ++ .owner = THIS_MODULE, ++ .open = traceconf_open, ++ .release = traceconf_release, ++ .read = traceconf_read, ++ .write = traceconf_write, ++ .llseek = noop_llseek, ++}; ++ ++static void wptraceconf_dump(struct ipu6_device *isp) ++{ ++ struct ipu_subsystem_wptrace_config *sys = &isp->trace->psys.wpt; ++ int i, rem_size; ++ char *out; ++ ++ sys->size_conf_dump = 0; ++ out = sys->conf_dump_buffer; ++ rem_size = TRACE_CONF_DUMP_BUFFER_SIZE; ++ ++ for (i = 0; i < sys->fill_level && rem_size > 0; i++) { ++ int bytes_print; ++ int n = snprintf(out, rem_size, "0x%08x = 0x%08x\n", ++ sys->config[i].reg, ++ sys->config[i].value); ++ ++ bytes_print = min(n, rem_size - 1); ++ rem_size -= bytes_print; ++ out += bytes_print; ++ } ++ sys->size_conf_dump = out - sys->conf_dump_buffer; ++} ++ ++static int wptraceconf_open(struct inode *inode, struct file *file) ++{ ++ int ret; ++ struct ipu6_device *isp; ++ ++ if (!inode->i_private) ++ return -EACCES; ++ ++ isp = inode->i_private; ++ ret = mutex_trylock(&isp->trace->lock); ++ if (!ret) ++ return -EBUSY; ++ ++ if (isp->trace->psys.wpt.open) { ++ mutex_unlock(&isp->trace->lock); ++ return -EBUSY; ++ } ++ ++ file->private_data = isp; ++ if (file->f_mode & FMODE_WRITE) { ++ /* TBD: Allocate temp buffer for processing. ++ * Push validated buffer to active config ++ */ ++ /* Forget old config if opened for write */ ++ isp->trace->psys.wpt.fill_level = 0; ++ } ++ ++ if (file->f_mode & FMODE_READ) { ++ isp->trace->psys.wpt.conf_dump_buffer = ++ vzalloc(TRACE_CONF_DUMP_BUFFER_SIZE); ++ if (!isp->trace->psys.wpt.conf_dump_buffer) { ++ mutex_unlock(&isp->trace->lock); ++ return -ENOMEM; ++ } ++ wptraceconf_dump(isp); ++ } ++ mutex_unlock(&isp->trace->lock); ++ return 0; ++} ++ ++static ssize_t wptraceconf_read(struct file *file, char __user *buf, ++ size_t len, loff_t *ppos) ++{ ++ struct ipu6_device *isp = file->private_data; ++ ++ return simple_read_from_buffer(buf, len, ppos, ++ isp->trace->psys.wpt.conf_dump_buffer, ++ isp->trace->psys.wpt.size_conf_dump); ++} ++ ++static ssize_t wptraceconf_write(struct file *file, const char __user *buf, ++ size_t len, loff_t *ppos) ++{ ++ int i; ++ struct ipu6_device *isp = file->private_data; ++ ssize_t bytes = 0; ++ char *wpt_info_buffer = NULL; ++ size_t buffer_size = 0; ++ u32 wp_node_number = 0; ++ struct config_value *wpt_buffer = NULL; ++ struct ipu_subsystem_wptrace_config *wpt = &isp->trace->psys.wpt; ++ ++ if ((*ppos < 0) || (len > WPT_TRACE_CONF_DATA_MAX_LEN) || ++ (len < sizeof(wp_node_number))) { ++ dev_info(&isp->pdev->dev, ++ "length is error, len:%ld, loff:%lld\n", ++ len, *ppos); ++ return -EINVAL; ++ } ++ ++ wpt_info_buffer = vzalloc(len); ++ if (!wpt_info_buffer) ++ return -ENOMEM; ++ ++ bytes = copy_from_user(wpt_info_buffer, buf, len); ++ if (bytes != 0) { ++ vfree(wpt_info_buffer); ++ return -EFAULT; ++ } ++ ++ memcpy(&wp_node_number, wpt_info_buffer, sizeof(u32)); ++ buffer_size = wp_node_number * sizeof(struct config_value); ++ if ((buffer_size + sizeof(wp_node_number)) != len) { ++ dev_info(&isp->pdev->dev, ++ "File size is not right, len:%ld, buffer_size:%zu\n", ++ len, buffer_size); ++ vfree(wpt_info_buffer); ++ return -EFAULT; ++ } ++ ++ mutex_lock(&isp->trace->lock); ++ wpt_buffer = (struct config_value *)(wpt_info_buffer + sizeof(u32)); ++ for (i = 0; i < wp_node_number; i++) { ++ if (wpt->fill_level < MAX_TRACE_REGISTERS) { ++ wpt->config[wpt->fill_level].reg = wpt_buffer[i].reg; ++ wpt->config[wpt->fill_level].value = ++ wpt_buffer[i].value; ++ wpt->fill_level++; ++ } else { ++ dev_info(&isp->pdev->dev, ++ "Address 0x%08x ignored as invalid register\n", ++ wpt_buffer[i].reg); ++ break; ++ } ++ } ++ mutex_unlock(&isp->trace->lock); ++ vfree(wpt_info_buffer); ++ ++ return len; ++} ++ ++static int wptraceconf_release(struct inode *inode, struct file *file) ++{ ++ struct ipu6_device *isp = file->private_data; ++ ++ mutex_lock(&isp->trace->lock); ++ isp->trace->open = 0; ++ vfree(isp->trace->psys.wpt.conf_dump_buffer); ++ isp->trace->psys.wpt.conf_dump_buffer = NULL; ++ mutex_unlock(&isp->trace->lock); ++ ++ return 0; ++} ++ ++static const struct file_operations ipu_wptraceconf_fops = { ++ .owner = THIS_MODULE, ++ .open = wptraceconf_open, ++ .release = wptraceconf_release, ++ .read = wptraceconf_read, ++ .write = wptraceconf_write, ++ .llseek = noop_llseek, ++}; ++ ++static int gettrace_open(struct inode *inode, struct file *file) ++{ ++ struct ipu_subsystem_trace_config *sys = inode->i_private; ++ ++ if (!sys) ++ return -EACCES; ++ ++ if (!sys->memory.memory_buffer) ++ return -EACCES; ++ ++ dma_sync_single_for_cpu(sys->dev, ++ sys->memory.dma_handle, ++ MEMORY_RING_BUFFER_SIZE + ++ MEMORY_RING_BUFFER_GUARD, DMA_FROM_DEVICE); ++ ++ file->private_data = sys; ++ return 0; ++}; ++ ++static ssize_t gettrace_read(struct file *file, char __user *buf, ++ size_t len, loff_t *ppos) ++{ ++ struct ipu_subsystem_trace_config *sys = file->private_data; ++ ++ return simple_read_from_buffer(buf, len, ppos, ++ sys->memory.memory_buffer, ++ MEMORY_RING_BUFFER_SIZE + ++ MEMORY_RING_BUFFER_OVERREAD); ++} ++ ++static ssize_t gettrace_write(struct file *file, const char __user *buf, ++ size_t len, loff_t *ppos) ++{ ++ struct ipu_subsystem_trace_config *sys = file->private_data; ++ static const char str[] = "clear"; ++ char buffer[sizeof(str)] = { 0 }; ++ ssize_t ret; ++ ++ ret = simple_write_to_buffer(buffer, sizeof(buffer), ppos, buf, len); ++ if (ret < 0) ++ return ret; ++ ++ if (ret < sizeof(str) - 1) ++ return -EINVAL; ++ ++ if (!strncmp(str, buffer, sizeof(str) - 1)) { ++ clear_trace_buffer(sys); ++ return len; ++ } ++ ++ return -EINVAL; ++} ++ ++static int gettrace_release(struct inode *inode, struct file *file) ++{ ++ return 0; ++} ++ ++static const struct file_operations ipu_gettrace_fops = { ++ .owner = THIS_MODULE, ++ .open = gettrace_open, ++ .release = gettrace_release, ++ .read = gettrace_read, ++ .write = gettrace_write, ++ .llseek = noop_llseek, ++}; ++ ++int ipu_trace_init(struct ipu6_device *isp, void __iomem *base, ++ struct device *dev, struct ipu_trace_block *blocks) ++{ ++ struct ipu6_bus_device *adev = to_ipu6_bus_device(dev); ++ struct ipu_trace *trace = isp->trace; ++ struct ipu_subsystem_trace_config *sys; ++ int ret = 0; ++ ++ if (!isp->trace) ++ return 0; ++ ++ mutex_lock(&isp->trace->lock); ++ ++ if (dev == &isp->isys->auxdev.dev) { ++ sys = &trace->isys; ++ } else if (dev == &isp->psys->auxdev.dev) { ++ sys = &trace->psys; ++ } else { ++ ret = -EINVAL; ++ goto leave; ++ } ++ ++ adev->trace_cfg = sys; ++ sys->dev = dev; ++ sys->offset = base - isp->base; /* sub system offset */ ++ sys->base = base; ++ sys->blocks = blocks; ++ ++ sys->memory.memory_buffer = ++ ipu6_dma_alloc(adev, MEMORY_RING_BUFFER_SIZE + ++ MEMORY_RING_BUFFER_GUARD, ++ &sys->memory.dma_handle, ++ GFP_KERNEL, 0); ++ ++ if (!sys->memory.memory_buffer) ++ dev_err(dev, "failed alloc memory for tracing.\n"); ++ ++leave: ++ mutex_unlock(&isp->trace->lock); ++ ++ return ret; ++} ++EXPORT_SYMBOL_GPL(ipu_trace_init); ++ ++void ipu_trace_uninit(struct device *dev) ++{ ++ struct ipu6_bus_device *adev = to_ipu6_bus_device(dev); ++ struct ipu6_device *isp = adev->isp; ++ struct ipu_trace *trace = isp->trace; ++ struct ipu_subsystem_trace_config *sys = adev->trace_cfg; ++ ++ if (!trace || !sys) ++ return; ++ ++ mutex_lock(&trace->lock); ++ ++ if (sys->memory.memory_buffer) ++ ipu6_dma_free( ++ adev, ++ MEMORY_RING_BUFFER_SIZE + MEMORY_RING_BUFFER_GUARD, ++ sys->memory.memory_buffer, sys->memory.dma_handle, 0); ++ ++ sys->dev = NULL; ++ sys->memory.memory_buffer = NULL; ++ ++ mutex_unlock(&trace->lock); ++} ++EXPORT_SYMBOL_GPL(ipu_trace_uninit); ++ ++int ipu_trace_debugfs_add(struct ipu6_device *isp, struct dentry *dir) ++{ ++ struct dentry *files[4]; ++ int i = 0; ++ ++ if (!ipu_trace_enable) ++ return 0; ++ ++ files[i] = debugfs_create_file("traceconf", 0644, ++ dir, isp, &ipu_traceconf_fops); ++ if (!files[i]) ++ return -ENOMEM; ++ i++; ++ ++ files[i] = debugfs_create_file("wptraceconf", 0644, ++ dir, isp, &ipu_wptraceconf_fops); ++ if (!files[i]) ++ goto error; ++ i++; ++ ++ files[i] = debugfs_create_file("getisystrace", 0444, ++ dir, ++ &isp->trace->isys, &ipu_gettrace_fops); ++ ++ if (!files[i]) ++ goto error; ++ i++; ++ ++ files[i] = debugfs_create_file("getpsystrace", 0444, ++ dir, ++ &isp->trace->psys, &ipu_gettrace_fops); ++ if (!files[i]) ++ goto error; ++ ++ return 0; ++ ++error: ++ for (; i > 0; i--) ++ debugfs_remove(files[i - 1]); ++ return -ENOMEM; ++} ++ ++int ipu_trace_add(struct ipu6_device *isp) ++{ ++ if (!ipu_trace_enable) ++ return 0; ++ ++ isp->trace = devm_kzalloc(&isp->pdev->dev, ++ sizeof(struct ipu_trace), GFP_KERNEL); ++ if (!isp->trace) ++ return -ENOMEM; ++ ++ mutex_init(&isp->trace->lock); ++ ++ dev_dbg(&isp->pdev->dev, "ipu trace enabled!"); ++ ++ return 0; ++} ++ ++void ipu_trace_release(struct ipu6_device *isp) ++{ ++ if (!isp->trace) ++ return; ++ mutex_destroy(&isp->trace->lock); ++} ++ ++int ipu_trace_buffer_dma_handle(struct device *dev, dma_addr_t *dma_handle) ++{ ++ struct ipu6_bus_device *adev = to_ipu6_bus_device(dev); ++ struct ipu_subsystem_trace_config *sys = adev->trace_cfg; ++ ++ if (!ipu_trace_enable) ++ return -EACCES; ++ ++ if (!sys || !sys->memory.memory_buffer) ++ return -EACCES; ++ ++ *dma_handle = sys->memory.dma_handle; ++ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(ipu_trace_buffer_dma_handle); ++ ++MODULE_AUTHOR("Samu Onkalo "); ++MODULE_LICENSE("GPL"); ++MODULE_DESCRIPTION("Intel ipu trace support"); +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-trace.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-trace.h +new file mode 100644 +index 0000000..f66d889 +--- /dev/null ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-trace.h +@@ -0,0 +1,147 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* Copyright (C) 2014 - 2025 Intel Corporation */ ++ ++#ifndef IPU_TRACE_H ++#define IPU_TRACE_H ++#include ++ ++/* Trace unit register offsets */ ++#define TRACE_REG_TUN_DDR_ENABLE 0x000 ++#define TRACE_REG_TUN_NPK_ENABLE 0x004 ++#define TRACE_REG_TUN_DDR_INFO_VAL 0x008 ++#define TRACE_REG_TUN_NPK_ADDR 0x00C ++#define TRACE_REG_TUN_DRAM_BASE_ADDR 0x010 ++#define TRACE_REG_TUN_DRAM_END_ADDR 0x014 ++#define TRACE_REG_TUN_LOCAL_TIMER0 0x018 ++#define TRACE_REG_TUN_LOCAL_TIMER1 0x01C ++#define TRACE_REG_TUN_WR_PTR 0x020 ++#define TRACE_REG_TUN_RD_PTR 0x024 ++ ++/* ++ * Following registers are left out on purpose: ++ * TUN_LOCAL_TIMER0, TUN_LOCAL_TIMER1, TUN_DRAM_BASE_ADDR ++ * TUN_DRAM_END_ADDR, TUN_WR_PTR, TUN_RD_PTR ++ */ ++ ++/* Trace monitor register offsets */ ++#define TRACE_REG_TM_TRACE_ADDR_A 0x0900 ++#define TRACE_REG_TM_TRACE_ADDR_B 0x0904 ++#define TRACE_REG_TM_TRACE_ADDR_C 0x0908 ++#define TRACE_REG_TM_TRACE_ADDR_D 0x090c ++#define TRACE_REG_TM_TRACE_ENABLE_NPK 0x0910 ++#define TRACE_REG_TM_TRACE_ENABLE_DDR 0x0914 ++#define TRACE_REG_TM_TRACE_PER_PC 0x0918 ++#define TRACE_REG_TM_TRACE_PER_BRANCH 0x091c ++#define TRACE_REG_TM_TRACE_HEADER 0x0920 ++#define TRACE_REG_TM_TRACE_CFG 0x0924 ++#define TRACE_REG_TM_TRACE_LOST_PACKETS 0x0928 ++#define TRACE_REG_TM_TRACE_LP_CLEAR 0x092c ++#define TRACE_REG_TM_TRACE_LMRUN_MASK 0x0930 ++#define TRACE_REG_TM_TRACE_LMRUN_PC_LOW 0x0934 ++#define TRACE_REG_TM_TRACE_LMRUN_PC_HIGH 0x0938 ++#define TRACE_REG_TM_TRACE_MMIO_SEL 0x093c ++#define TRACE_REG_TM_TRACE_MMIO_WP0_LOW 0x0940 ++#define TRACE_REG_TM_TRACE_MMIO_WP1_LOW 0x0944 ++#define TRACE_REG_TM_TRACE_MMIO_WP2_LOW 0x0948 ++#define TRACE_REG_TM_TRACE_MMIO_WP3_LOW 0x094c ++#define TRACE_REG_TM_TRACE_MMIO_WP0_HIGH 0x0950 ++#define TRACE_REG_TM_TRACE_MMIO_WP1_HIGH 0x0954 ++#define TRACE_REG_TM_TRACE_MMIO_WP2_HIGH 0x0958 ++#define TRACE_REG_TM_TRACE_MMIO_WP3_HIGH 0x095c ++#define TRACE_REG_TM_FWTRACE_FIRST 0x0A00 ++#define TRACE_REG_TM_FWTRACE_MIDDLE 0x0A04 ++#define TRACE_REG_TM_FWTRACE_LAST 0x0A08 ++ ++/* ++ * Following exists only in (I)SP address space: ++ * TM_FWTRACE_FIRST, TM_FWTRACE_MIDDLE, TM_FWTRACE_LAST ++ */ ++ ++#define TRACE_REG_GPC_RESET 0x000 ++#define TRACE_REG_GPC_OVERALL_ENABLE 0x004 ++#define TRACE_REG_GPC_TRACE_HEADER 0x008 ++#define TRACE_REG_GPC_TRACE_ADDRESS 0x00C ++#define TRACE_REG_GPC_TRACE_NPK_EN 0x010 ++#define TRACE_REG_GPC_TRACE_DDR_EN 0x014 ++#define TRACE_REG_GPC_TRACE_LPKT_CLEAR 0x018 ++#define TRACE_REG_GPC_TRACE_LPKT 0x01C ++ ++#define TRACE_REG_GPC_ENABLE_ID0 0x020 ++#define TRACE_REG_GPC_ENABLE_ID1 0x024 ++#define TRACE_REG_GPC_ENABLE_ID2 0x028 ++#define TRACE_REG_GPC_ENABLE_ID3 0x02c ++ ++#define TRACE_REG_GPC_VALUE_ID0 0x030 ++#define TRACE_REG_GPC_VALUE_ID1 0x034 ++#define TRACE_REG_GPC_VALUE_ID2 0x038 ++#define TRACE_REG_GPC_VALUE_ID3 0x03c ++ ++#define TRACE_REG_GPC_CNT_INPUT_SELECT_ID0 0x040 ++#define TRACE_REG_GPC_CNT_INPUT_SELECT_ID1 0x044 ++#define TRACE_REG_GPC_CNT_INPUT_SELECT_ID2 0x048 ++#define TRACE_REG_GPC_CNT_INPUT_SELECT_ID3 0x04c ++ ++#define TRACE_REG_GPC_CNT_START_SELECT_ID0 0x050 ++#define TRACE_REG_GPC_CNT_START_SELECT_ID1 0x054 ++#define TRACE_REG_GPC_CNT_START_SELECT_ID2 0x058 ++#define TRACE_REG_GPC_CNT_START_SELECT_ID3 0x05c ++ ++#define TRACE_REG_GPC_CNT_STOP_SELECT_ID0 0x060 ++#define TRACE_REG_GPC_CNT_STOP_SELECT_ID1 0x064 ++#define TRACE_REG_GPC_CNT_STOP_SELECT_ID2 0x068 ++#define TRACE_REG_GPC_CNT_STOP_SELECT_ID3 0x06c ++ ++#define TRACE_REG_GPC_CNT_MSG_SELECT_ID0 0x070 ++#define TRACE_REG_GPC_CNT_MSG_SELECT_ID1 0x074 ++#define TRACE_REG_GPC_CNT_MSG_SELECT_ID2 0x078 ++#define TRACE_REG_GPC_CNT_MSG_SELECT_ID3 0x07c ++ ++#define TRACE_REG_GPC_CNT_MSG_PLOAD_SELECT_ID0 0x080 ++#define TRACE_REG_GPC_CNT_MSG_PLOAD_SELECT_ID1 0x084 ++#define TRACE_REG_GPC_CNT_MSG_PLOAD_SELECT_ID2 0x088 ++#define TRACE_REG_GPC_CNT_MSG_PLOAD_SELECT_ID3 0x08c ++ ++#define TRACE_REG_GPC_IRQ_TRIGGER_VALUE_ID0 0x090 ++#define TRACE_REG_GPC_IRQ_TRIGGER_VALUE_ID1 0x094 ++#define TRACE_REG_GPC_IRQ_TRIGGER_VALUE_ID2 0x098 ++#define TRACE_REG_GPC_IRQ_TRIGGER_VALUE_ID3 0x09c ++ ++#define TRACE_REG_GPC_IRQ_TIMER_SELECT_ID0 0x0a0 ++#define TRACE_REG_GPC_IRQ_TIMER_SELECT_ID1 0x0a4 ++#define TRACE_REG_GPC_IRQ_TIMER_SELECT_ID2 0x0a8 ++#define TRACE_REG_GPC_IRQ_TIMER_SELECT_ID3 0x0ac ++ ++#define TRACE_REG_GPC_IRQ_ENABLE_ID0 0x0b0 ++#define TRACE_REG_GPC_IRQ_ENABLE_ID1 0x0b4 ++#define TRACE_REG_GPC_IRQ_ENABLE_ID2 0x0b8 ++#define TRACE_REG_GPC_IRQ_ENABLE_ID3 0x0bc ++ ++struct ipu_trace; ++struct ipu_subsystem_trace_config; ++ ++enum ipu_trace_block_type { ++ IPU_TRACE_BLOCK_TUN = 0, /* Trace unit */ ++ IPU_TRACE_BLOCK_TM, /* Trace monitor */ ++ IPU_TRACE_BLOCK_GPC, /* General purpose control */ ++ IPU_TRACE_CSI2, /* CSI2 legacy receiver */ ++ IPU_TRACE_CSI2_3PH, /* CSI2 combo receiver */ ++ IPU_TRACE_SIG2CIOS, ++ IPU_TRACE_TIMER_RST, /* Trace reset control timer */ ++ IPU_TRACE_BLOCK_END /* End of list */ ++}; ++ ++struct ipu_trace_block { ++ u32 offset; /* Offset to block inside subsystem */ ++ enum ipu_trace_block_type type; ++}; ++ ++int ipu_trace_add(struct ipu6_device *isp); ++int ipu_trace_debugfs_add(struct ipu6_device *isp, struct dentry *dir); ++void ipu_trace_release(struct ipu6_device *isp); ++int ipu_trace_init(struct ipu6_device *isp, void __iomem *base, ++ struct device *dev, struct ipu_trace_block *blocks); ++void ipu_trace_restore(struct device *dev); ++void ipu_trace_uninit(struct device *dev); ++void ipu_trace_stop(struct device *dev); ++int ipu_trace_buffer_dma_handle(struct device *dev, dma_addr_t *dma_handle); ++#endif +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c +index 18f0dd2..e8ce0c9 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c +@@ -32,6 +32,7 @@ + #include "ipu6-platform-buttress-regs.h" + #include "ipu6-platform-isys-csi2-reg.h" + #include "ipu6-platform-regs.h" ++#include "ipu6-trace.h" + + static unsigned int isys_freq_override; + module_param(isys_freq_override, uint, 0660); +@@ -489,6 +490,37 @@ static int ipu6_pci_config_setup(struct pci_dev *dev, u8 hw_ver) + return 0; + } + ++#ifdef CONFIG_DEBUG_FS ++static int ipu_init_debugfs(struct ipu6_device *isp) ++{ ++ struct dentry *dir; ++ ++ dir = debugfs_create_dir(IPU6_NAME, NULL); ++ if (!dir) ++ return -ENOMEM; ++ ++ if (ipu_trace_debugfs_add(isp, dir)) ++ goto err; ++ ++ isp->ipu_dir = dir; ++ ++ return 0; ++err: ++ debugfs_remove_recursive(dir); ++ return -ENOMEM; ++} ++ ++static void ipu_remove_debugfs(struct ipu6_device *isp) ++{ ++ /* ++ * Since isys and psys debugfs dir will be created under ipu root dir, ++ * mark its dentry to NULL to avoid duplicate removal. ++ */ ++ debugfs_remove_recursive(isp->ipu_dir); ++ isp->ipu_dir = NULL; ++} ++#endif /* CONFIG_DEBUG_FS */ ++ + static void ipu6_configure_vc_mechanism(struct ipu6_device *isp) + { + u32 val = readl(isp->base + BUTTRESS_REG_BTRS_CTRL); +@@ -597,6 +629,10 @@ static int ipu6_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) + goto buttress_exit; + } + ++ ret = ipu_trace_add(isp); ++ if (ret) ++ dev_err(&isp->pdev->dev, "Trace support not available\n"); ++ + ret = ipu6_cpd_validate_cpd_file(isp, isp->cpd_fw->data, + isp->cpd_fw->size); + if (ret) { +@@ -676,6 +712,13 @@ static int ipu6_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) + ipu6_mmu_hw_cleanup(isp->psys->mmu); + pm_runtime_put(&isp->psys->auxdev.dev); + ++#ifdef CONFIG_DEBUG_FS ++ ret = ipu_init_debugfs(isp); ++ if (ret) ++ dev_err(&isp->pdev->dev, "Failed to initialize debugfs"); ++ ++#endif ++ + /* Configure the arbitration mechanisms for VC requests */ + ipu6_configure_vc_mechanism(isp); + +@@ -717,6 +760,11 @@ static void ipu6_pci_remove(struct pci_dev *pdev) + struct ipu6_mmu *isys_mmu = isp->isys->mmu; + struct ipu6_mmu *psys_mmu = isp->psys->mmu; + ++#ifdef CONFIG_DEBUG_FS ++ ipu_remove_debugfs(isp); ++#endif ++ ipu_trace_release(isp); ++ + devm_free_irq(&pdev->dev, pdev->irq, isp); + ipu6_cpd_free_pkg_dir(isp->psys); + +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.h +index 92e3c34..d91a78e 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.h ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.h +@@ -82,10 +82,15 @@ struct ipu6_device { + u32 cpd_metadata_cmpnt_size; + + void __iomem *base; ++#ifdef CONFIG_DEBUG_FS ++ struct dentry *ipu_dir; ++#endif + bool need_ipc_reset; + bool secure_mode; + u8 hw_ver; + bool bus_ready_to_probe; ++ ++ struct ipu_trace *trace; + }; + + #define IPU6_ISYS_NAME "isys" +-- +2.43.0 + diff --git a/patches/0010-media-pci-The-order-of-return-buffers-should-be-FIFO.patch b/patches/0010-media-pci-The-order-of-return-buffers-should-be-FIFO.patch new file mode 100644 index 0000000..25b41d8 --- /dev/null +++ b/patches/0010-media-pci-The-order-of-return-buffers-should-be-FIFO.patch @@ -0,0 +1,100 @@ +From dbe7da034781bfd23399a8f4f71a3c8c0a18ab10 Mon Sep 17 00:00:00 2001 +From: zouxiaoh +Date: Mon, 12 Jan 2026 05:46:03 -0700 +Subject: [PATCH 10/28] media: pci: The order of return buffers should be FIFO. +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Change Description: +return_buffers : use "list_first_entry", will get these logs in hal: +“CamHAL[ERR] DeviceBase: dequeueBuffer, CamBuf index isn't same with +index used by kernel +CamHAL[ERR] CaptureUnit: Device:Generic grab frame failed:-22” + +The index order is changed from sequential to reverse after return_buffers. +The reason why the normal start&stop does not expose the problem is that +every Hal start will start the buffer index from 0 instead of continuing +to use the buffer index returned last stop. + +So need return_buffers from the list of tail, +and need use the "list_last_entry". + +Signed-off-by: linya14x +Signed-off-by: zouxiaoh +--- + .../media/pci/intel/ipu6/ipu6-isys-queue.c | 31 ++++++++++--------- + 1 file changed, 16 insertions(+), 15 deletions(-) + +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c +index b931c43..14868b1 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c +@@ -519,39 +519,40 @@ static void return_buffers(struct ipu6_isys_queue *aq, + unsigned long flags; + + spin_lock_irqsave(&aq->lock, flags); +- while (!list_empty(&aq->incoming)) { ++ ++ /* ++ * Something went wrong (FW crash / HW hang / not all buffers ++ * returned from isys) if there are still buffers queued in active ++ * queue. We have to clean up places a bit. ++ */ ++ while (!list_empty(&aq->active)) { + struct vb2_buffer *vb; + +- ib = list_first_entry(&aq->incoming, struct ipu6_isys_buffer, +- head); ++ ib = list_last_entry(&aq->active, struct ipu6_isys_buffer, ++ head); + vb = ipu6_isys_buffer_to_vb2_buffer(ib); ++ + list_del(&ib->head); + spin_unlock_irqrestore(&aq->lock, flags); + + vb2_buffer_done(vb, state); + + spin_lock_irqsave(&aq->lock, flags); ++ need_reset = true; + } + +- /* +- * Something went wrong (FW crash / HW hang / not all buffers +- * returned from isys) if there are still buffers queued in active +- * queue. We have to clean up places a bit. +- */ +- while (!list_empty(&aq->active)) { ++ while (!list_empty(&aq->incoming)) { + struct vb2_buffer *vb; + +- ib = list_first_entry(&aq->active, struct ipu6_isys_buffer, +- head); ++ ib = list_last_entry(&aq->incoming, struct ipu6_isys_buffer, ++ head); + vb = ipu6_isys_buffer_to_vb2_buffer(ib); +- + list_del(&ib->head); + spin_unlock_irqrestore(&aq->lock, flags); + + vb2_buffer_done(vb, state); + + spin_lock_irqsave(&aq->lock, flags); +- need_reset = true; + } + + spin_unlock_irqrestore(&aq->lock, flags); +@@ -699,8 +700,8 @@ static int reset_start_streaming(struct ipu6_isys_video *av) + + spin_lock_irqsave(&aq->lock, flags); + while (!list_empty(&aq->active)) { +- struct ipu6_isys_buffer *ib = list_first_entry(&aq->active, +- struct ipu6_isys_buffer, head); ++ struct ipu6_isys_buffer *ib = list_last_entry(&aq->active, ++ struct ipu6_isys_buffer, head); + + list_del(&ib->head); + list_add_tail(&ib->head, &aq->incoming); +-- +2.43.0 + diff --git a/patches/0011-media-pci-Modify-enble-disable-stream-in-CSI2.patch b/patches/0011-media-pci-Modify-enble-disable-stream-in-CSI2.patch new file mode 100644 index 0000000..55886a2 --- /dev/null +++ b/patches/0011-media-pci-Modify-enble-disable-stream-in-CSI2.patch @@ -0,0 +1,338 @@ +From b184c6f1f79f757ebdbf3b31098f7ed5f9336586 Mon Sep 17 00:00:00 2001 +From: Hongju Wang +Date: Mon, 12 Jan 2026 05:37:29 -0700 +Subject: [PATCH 11/28] media: pci: Modify enble/disable stream in CSI2 + +Signed-off-by: Hongju Wang +Signed-off-by: zouxiaoh +--- + .../media/pci/intel/ipu6/ipu6-isys-csi2.c | 105 ++++++++++++------ + .../media/pci/intel/ipu6/ipu6-isys-csi2.h | 7 +- + .../media/pci/intel/ipu6/ipu6-isys-video.c | 78 +++++++------ + 3 files changed, 118 insertions(+), 72 deletions(-) + +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c +index 051898c..7779c47 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c +@@ -345,6 +345,12 @@ static int ipu6_isys_csi2_set_stream(struct v4l2_subdev *sd, + return ret; + } + ++/* ++ * Maximum stream ID is 63 for now, as we use u64 bitmask to represent a set ++ * of streams. ++ */ ++#define CSI2_SUBDEV_MAX_STREAM_ID 63 ++ + static int ipu6_isys_csi2_enable_streams(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + u32 pad, u64 streams_mask) +@@ -352,54 +358,81 @@ static int ipu6_isys_csi2_enable_streams(struct v4l2_subdev *sd, + struct ipu6_isys_subdev *asd = to_ipu6_isys_subdev(sd); + struct ipu6_isys_csi2 *csi2 = to_ipu6_isys_csi2(asd); + struct ipu6_isys_csi2_timing timing = { }; +- struct v4l2_subdev *remote_sd; +- struct media_pad *remote_pad; +- u64 sink_streams; +- int ret; +- +- remote_pad = media_pad_remote_pad_first(&sd->entity.pads[CSI2_PAD_SINK]); +- remote_sd = media_entity_to_v4l2_subdev(remote_pad->entity); ++ u32 sink_pad, sink_stream; ++ struct v4l2_subdev *r_sd; ++ struct media_pad *r_pad; ++ int ret, i; ++ ++ if (!csi2->stream_count) { ++ ret = ipu6_isys_csi2_calc_timing(csi2, &timing, CSI2_ACCINV); ++ if (ret) ++ return ret; ++ ++ ret = ipu6_isys_csi2_set_stream(sd, &timing, csi2->nlanes, ++ true); ++ if (ret) ++ return ret; ++ } + +- sink_streams = v4l2_subdev_state_xlate_streams(state, CSI2_PAD_SRC, +- CSI2_PAD_SINK, +- &streams_mask); ++ for (i = 0; i <= CSI2_SUBDEV_MAX_STREAM_ID; i++) { ++ if (streams_mask & BIT_ULL(i)) ++ break; ++ } + +- ret = ipu6_isys_csi2_calc_timing(csi2, &timing, CSI2_ACCINV); ++ ret = v4l2_subdev_routing_find_opposite_end(&state->routing, pad, i, ++ &sink_pad, &sink_stream); + if (ret) + return ret; + +- ret = ipu6_isys_csi2_set_stream(sd, &timing, csi2->nlanes, true); +- if (ret) +- return ret; ++ r_pad = media_pad_remote_pad_first(&sd->entity.pads[CSI2_PAD_SINK]); ++ r_sd = media_entity_to_v4l2_subdev(r_pad->entity); + +- ret = v4l2_subdev_enable_streams(remote_sd, remote_pad->index, +- sink_streams); +- if (ret) { +- ipu6_isys_csi2_set_stream(sd, NULL, 0, false); +- return ret; ++ ret = v4l2_subdev_enable_streams(r_sd, r_pad->index, ++ BIT_ULL(sink_stream)); ++ if (!ret) { ++ csi2->stream_count++; ++ return 0; + } + +- return 0; ++ if (!csi2->stream_count) ++ ipu6_isys_csi2_set_stream(sd, NULL, 0, false); ++ ++ return ret; + } + + static int ipu6_isys_csi2_disable_streams(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + u32 pad, u64 streams_mask) + { +- struct v4l2_subdev *remote_sd; +- struct media_pad *remote_pad; +- u64 sink_streams; ++ struct ipu6_isys_subdev *asd = to_ipu6_isys_subdev(sd); ++ struct ipu6_isys_csi2 *csi2 = to_ipu6_isys_csi2(asd); ++ u32 sink_pad, sink_stream; ++ struct v4l2_subdev *r_sd; ++ struct media_pad *r_pad; ++ int ret, i; + +- sink_streams = v4l2_subdev_state_xlate_streams(state, CSI2_PAD_SRC, +- CSI2_PAD_SINK, +- &streams_mask); ++ for (i = 0; i <= CSI2_SUBDEV_MAX_STREAM_ID; i++) { ++ if (streams_mask & BIT_ULL(i)) ++ break; ++ } + +- remote_pad = media_pad_remote_pad_first(&sd->entity.pads[CSI2_PAD_SINK]); +- remote_sd = media_entity_to_v4l2_subdev(remote_pad->entity); ++ ret = v4l2_subdev_routing_find_opposite_end(&state->routing, pad, i, ++ &sink_pad, &sink_stream); ++ if (ret) ++ return ret; + +- ipu6_isys_csi2_set_stream(sd, NULL, 0, false); ++ r_pad = media_pad_remote_pad_first(&sd->entity.pads[CSI2_PAD_SINK]); ++ r_sd = media_entity_to_v4l2_subdev(r_pad->entity); + +- v4l2_subdev_disable_streams(remote_sd, remote_pad->index, sink_streams); ++ v4l2_subdev_disable_streams(r_sd, r_pad->index, BIT_ULL(sink_stream)); ++ ++ if (--csi2->stream_count) ++ return 0; ++ ++ dev_dbg(&csi2->isys->adev->auxdev.dev, ++ "stream off CSI2-%u with %u lanes\n", csi2->port, csi2->nlanes); ++ ++ ipu6_isys_csi2_set_stream(sd, NULL, 0, false); + + return 0; + } +@@ -598,7 +631,8 @@ void ipu6_isys_csi2_eof_event_by_stream(struct ipu6_isys_stream *stream) + int ipu6_isys_csi2_get_remote_desc(u32 source_stream, + struct ipu6_isys_csi2 *csi2, + struct media_entity *source_entity, +- struct v4l2_mbus_frame_desc_entry *entry) ++ struct v4l2_mbus_frame_desc_entry *entry, ++ int *nr_queues) + { + struct v4l2_mbus_frame_desc_entry *desc_entry = NULL; + struct device *dev = &csi2->isys->adev->auxdev.dev; +@@ -645,5 +679,14 @@ int ipu6_isys_csi2_get_remote_desc(u32 source_stream, + + *entry = *desc_entry; + ++ for (i = 0; i < desc.num_entries; i++) { ++ if (desc_entry->bus.csi2.vc == desc.entry[i].bus.csi2.vc) ++ (*nr_queues)++; ++ } ++ ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++ csi2->is_multiple = true; ++ dev_dbg(dev, "set csi2->is_multiple is true.\n"); ++#endif + return 0; + } +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h +index bc8594c..ea609dd 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h +@@ -45,6 +45,10 @@ struct ipu6_isys_csi2 { + u32 receiver_errors; + unsigned int nlanes; + unsigned int port; ++ unsigned int stream_count; ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++ bool is_multiple; ++#endif + }; + + struct ipu6_isys_csi2_timing { +@@ -75,6 +79,7 @@ void ipu6_isys_csi2_error(struct ipu6_isys_csi2 *csi2); + int ipu6_isys_csi2_get_remote_desc(u32 source_stream, + struct ipu6_isys_csi2 *csi2, + struct media_entity *source_entity, +- struct v4l2_mbus_frame_desc_entry *entry); ++ struct v4l2_mbus_frame_desc_entry *entry, ++ int *nr_queues); + + #endif /* IPU6_ISYS_CSI2_H */ +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c +index 2d1391e..d1a0a72 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c +@@ -1036,35 +1036,40 @@ ipu6_isys_query_stream_by_source(struct ipu6_isys *isys, int source, u8 vc) + return stream; + } + +-static u64 get_stream_mask_by_pipeline(struct ipu6_isys_video *__av) ++static u32 get_remote_pad_stream(struct media_pad *r_pad) + { +- struct media_pipeline *pipeline = +- media_entity_pipeline(&__av->vdev.entity); ++ struct v4l2_subdev_state *state; ++ struct v4l2_subdev *sd; ++ u32 stream_id = 0; + unsigned int i; +- u64 stream_mask = 0; + +- for (i = 0; i < NR_OF_CSI2_SRC_PADS; i++) { +- struct ipu6_isys_video *av = &__av->csi2->av[i]; ++ sd = media_entity_to_v4l2_subdev(r_pad->entity); ++ state = v4l2_subdev_lock_and_get_active_state(sd); ++ if (!state) ++ return 0; + +- if (pipeline == media_entity_pipeline(&av->vdev.entity)) +- stream_mask |= BIT_ULL(av->source_stream); ++ for (i = 0; i < state->stream_configs.num_configs; i++) { ++ struct v4l2_subdev_stream_config *cfg = ++ &state->stream_configs.configs[i]; ++ if (cfg->pad == r_pad->index) { ++ stream_id = cfg->stream; ++ break; ++ } + } + +- return stream_mask; ++ v4l2_subdev_unlock_state(state); ++ ++ return stream_id; + } + + int ipu6_isys_video_set_streaming(struct ipu6_isys_video *av, int state, + struct ipu6_isys_buffer_list *bl) + { +- struct v4l2_subdev_krouting *routing; + struct ipu6_isys_stream *stream = av->stream; +- struct v4l2_subdev_state *subdev_state; + struct device *dev = &av->isys->adev->auxdev.dev; + struct v4l2_subdev *sd; + struct media_pad *r_pad; +- u32 sink_pad, sink_stream; +- u64 r_stream; +- u64 stream_mask = 0; ++ u32 r_stream = 0; + int ret = 0; + + dev_dbg(dev, "set stream: %d\n", state); +@@ -1074,31 +1079,22 @@ int ipu6_isys_video_set_streaming(struct ipu6_isys_video *av, int state, + + sd = &stream->asd->sd; + r_pad = media_pad_remote_pad_first(&av->pad); +- r_stream = ipu6_isys_get_src_stream_by_src_pad(sd, r_pad->index); +- +- subdev_state = v4l2_subdev_lock_and_get_active_state(sd); +- routing = &subdev_state->routing; +- ret = v4l2_subdev_routing_find_opposite_end(routing, r_pad->index, +- r_stream, &sink_pad, +- &sink_stream); +- v4l2_subdev_unlock_state(subdev_state); +- if (ret) +- return ret; +- +- stream_mask = get_stream_mask_by_pipeline(av); ++ r_stream = get_remote_pad_stream(r_pad); + if (!state) { + stop_streaming_firmware(av); + + /* stop sub-device which connects with video */ +- dev_dbg(dev, "stream off entity %s pad:%d mask:0x%llx\n", +- sd->name, r_pad->index, stream_mask); ++ dev_dbg(dev, "disable streams %s pad:%d mask:0x%llx for %s\n", ++ sd->name, r_pad->index, BIT_ULL(r_stream), ++ stream->source_entity->name); + ret = v4l2_subdev_disable_streams(sd, r_pad->index, +- stream_mask); ++ BIT_ULL(r_stream)); + if (ret) { +- dev_err(dev, "stream off %s failed with %d\n", sd->name, +- ret); ++ dev_err(dev, "disable streams %s failed with %d\n", ++ sd->name, ret); + return ret; + } ++ + close_streaming_firmware(av); + } else { + ret = start_stream_firmware(av, bl); +@@ -1108,12 +1104,14 @@ int ipu6_isys_video_set_streaming(struct ipu6_isys_video *av, int state, + } + + /* start sub-device which connects with video */ +- dev_dbg(dev, "stream on %s pad %d mask 0x%llx\n", sd->name, +- r_pad->index, stream_mask); +- ret = v4l2_subdev_enable_streams(sd, r_pad->index, stream_mask); ++ dev_dbg(dev, "enable streams %s pad: %d mask:0x%llx for %s\n", ++ sd->name, r_pad->index, BIT_ULL(r_stream), ++ stream->source_entity->name); ++ ret = v4l2_subdev_enable_streams(sd, r_pad->index, ++ BIT_ULL(r_stream)); + if (ret) { +- dev_err(dev, "stream on %s failed with %d\n", sd->name, +- ret); ++ dev_err(dev, "enable streams %s failed with %d\n", ++ sd->name, ret); + goto out_media_entity_stop_streaming_firmware; + } + } +@@ -1277,8 +1275,6 @@ int ipu6_isys_setup_video(struct ipu6_isys_video *av, + /* Find the root */ + state = v4l2_subdev_lock_and_get_active_state(remote_sd); + for_each_active_route(&state->routing, r) { +- (*nr_queues)++; +- + if (r->source_pad == remote_pad->index) + route = r; + } +@@ -1293,11 +1289,13 @@ int ipu6_isys_setup_video(struct ipu6_isys_video *av, + + ret = ipu6_isys_csi2_get_remote_desc(av->source_stream, + to_ipu6_isys_csi2(asd), +- *source_entity, &entry); ++ *source_entity, &entry, ++ nr_queues); + if (ret == -ENOIOCTLCMD) { + av->vc = 0; + av->dt = ipu6_isys_mbus_code_to_mipi(pfmt->code); +- } else if (!ret) { ++ *nr_queues = 1; ++ } else if (*nr_queues && !ret) { + dev_dbg(dev, "Framedesc: stream %u, len %u, vc %u, dt %#x\n", + entry.stream, entry.length, entry.bus.csi2.vc, + entry.bus.csi2.dt); +-- +2.43.0 + diff --git a/patches/0012-media-pci-Set-the-correct-SOF-for-different-stream.patch b/patches/0012-media-pci-Set-the-correct-SOF-for-different-stream.patch new file mode 100644 index 0000000..3f494da --- /dev/null +++ b/patches/0012-media-pci-Set-the-correct-SOF-for-different-stream.patch @@ -0,0 +1,24 @@ +From fdf4f8d8cda8254118d673d053534ad59488c932 Mon Sep 17 00:00:00 2001 +From: Hongju Wang +Date: Mon, 12 Jan 2026 07:28:11 -0700 +Subject: [PATCH 12/28] media: pci: Set the correct SOF for different stream + +--- + 6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c +index 7779c47..70aaf8f 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c +@@ -611,6 +611,7 @@ void ipu6_isys_csi2_sof_event_by_stream(struct ipu6_isys_stream *stream) + .type = V4L2_EVENT_FRAME_SYNC, + }; + ++ ev.id = stream->vc; + ev.u.frame_sync.frame_sequence = atomic_fetch_inc(&stream->sequence); + v4l2_event_queue(vdev, &ev); + +-- +2.43.0 + diff --git a/patches/0013-media-intel-ipu6-support-imx390-for-6.11.0-rc3.patch b/patches/0013-media-intel-ipu6-support-imx390-for-6.11.0-rc3.patch new file mode 100644 index 0000000..31e8f46 --- /dev/null +++ b/patches/0013-media-intel-ipu6-support-imx390-for-6.11.0-rc3.patch @@ -0,0 +1,981 @@ +From 8e9a0b679a7a1dfef27eb9d9db8da768ae09e1b8 Mon Sep 17 00:00:00 2001 +From: hepengpx +Date: Mon, 12 Jan 2026 07:30:57 -0700 +Subject: [PATCH 13/28] media: intel-ipu6: support imx390 for 6.11.0-rc3. + +Signed-off-by: hepengpx +Signed-off-by: zouxiaoh +--- + .../media/pci/intel/ipu6/ipu6-buttress.c | 42 +++ + .../media/pci/intel/ipu6/ipu6-buttress.h | 1 + + .../media/pci/intel/ipu6/ipu6-isys-mcd-phy.c | 77 +++++ + .../drivers/media/pci/intel/ipu6/ipu6-isys.c | 324 +++++++++++++++++- + .../drivers/media/pci/intel/ipu6/ipu6-isys.h | 85 +++++ + 6.12.0/drivers/media/pci/intel/ipu6/ipu6.c | 123 ++++++- + 6.12.0/drivers/media/pci/intel/ipu6/ipu6.h | 15 +- + 7 files changed, 656 insertions(+), 11 deletions(-) + +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c +index ffa6dff..0cf42e4 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c +@@ -21,6 +21,7 @@ + #include + #include + #include ++#include + + #include "ipu6.h" + #include "ipu6-bus.h" +@@ -776,6 +777,47 @@ int ipu6_buttress_start_tsc_sync(struct ipu6_device *isp) + } + EXPORT_SYMBOL_NS_GPL(ipu6_buttress_start_tsc_sync, INTEL_IPU6); + ++/* ++ * The dev_id was hard code in platform data, as i2c bus number ++ * may change dynamiclly, we need to update this bus id ++ * accordingly. ++ * ++ * @adapter_id: hardware i2c adapter id, this was fixed in platform data ++ * return: i2c bus id registered in system ++ */ ++int ipu6_get_i2c_bus_id(int adapter_id, char *adapter_bdf, int bdf_len) ++{ ++ struct i2c_adapter *adapter; ++ char name[32]; ++ int i = 0; ++ ++ if (adapter_bdf) { ++ while ((adapter = i2c_get_adapter(i)) != NULL) { ++ struct device *parent = adapter->dev.parent; ++ struct device *pp = parent->parent; ++ ++ if (pp && !strncmp(adapter_bdf, dev_name(pp), bdf_len)) ++ return i; ++ i++; ++ } ++ } ++ ++ i = 0; ++ snprintf(name, sizeof(name), "i2c_designware.%d", adapter_id); ++ while ((adapter = i2c_get_adapter(i)) != NULL) { ++ struct device *parent = adapter->dev.parent; ++ ++ if (parent && !strncmp(name, dev_name(parent), sizeof(name))) ++ return i; ++ i++; ++ } ++ ++ /* Not found, should never happen! */ ++ WARN_ON_ONCE(1); ++ return -1; ++} ++EXPORT_SYMBOL_NS_GPL(ipu6_get_i2c_bus_id, INTEL_IPU6); ++ + void ipu6_buttress_tsc_read(struct ipu6_device *isp, u64 *val) + { + u32 tsc_hi_1, tsc_hi_2, tsc_lo; +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h +index 482978c..18d6e0b 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h +@@ -83,4 +83,5 @@ void ipu6_buttress_exit(struct ipu6_device *isp); + void ipu6_buttress_csi_port_config(struct ipu6_device *isp, + u32 legacy, u32 combo); + void ipu6_buttress_restore(struct ipu6_device *isp); ++int ipu6_get_i2c_bus_id(int adapter_id, char *adapter_bdf, int bdf_len); + #endif /* IPU6_BUTTRESS_H */ +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c +index 71aa500..f18e6fa 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c +@@ -567,6 +567,27 @@ static int ipu6_isys_mcd_phy_ready(struct ipu6_isys *isys, u8 id) + return ret; + } + ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++static int ipu6_isys_mcd_phy_common_init(struct ipu6_isys *isys, struct ipu6_isys_csi2_config *cfg) ++{ ++ unsigned int phy_id; ++ void __iomem *phy_base; ++ struct ipu6_bus_device *adev = to_ipu6_bus_device(&isys->adev->auxdev.dev); ++ struct ipu6_device *isp = adev->isp; ++ void __iomem *isp_base = isp->base; ++ unsigned int i; ++ ++ phy_id = cfg->port / 4; ++ phy_base = isp_base + IPU6_ISYS_MCD_PHY_BASE(phy_id); ++ ++ for (i = 0 ; i < ARRAY_SIZE(common_init_regs); i++) { ++ writel(common_init_regs[i].val, ++ phy_base + common_init_regs[i].reg); ++ } ++ ++ return 0; ++} ++#else + static void ipu6_isys_mcd_phy_common_init(struct ipu6_isys *isys) + { + struct ipu6_bus_device *adev = isys->adev; +@@ -588,6 +609,7 @@ static void ipu6_isys_mcd_phy_common_init(struct ipu6_isys *isys) + phy_base + common_init_regs[i].reg); + } + } ++#endif + + static int ipu6_isys_driver_port_to_phy_port(struct ipu6_isys_csi2_config *cfg) + { +@@ -619,6 +641,55 @@ static int ipu6_isys_driver_port_to_phy_port(struct ipu6_isys_csi2_config *cfg) + return ret; + } + ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++static int ipu6_isys_mcd_phy_config(struct ipu6_isys *isys, struct ipu6_isys_csi2_config *cfg) ++{ ++ unsigned int phy_port, phy_id; ++ void __iomem *phy_base; ++ struct ipu6_bus_device *adev = to_ipu6_bus_device(&isys->adev->auxdev.dev); ++ struct ipu6_device *isp = adev->isp; ++ void __iomem *isp_base = isp->base; ++ const struct phy_reg **phy_config_regs; ++ struct ipu6_isys_subdev_pdata *spdata = isys->pdata->spdata; ++ struct ipu6_isys_subdev_info **subdevs, *sd_info; ++ int i; ++ ++ if (!spdata) { ++ dev_err(&isys->adev->auxdev.dev, "no subdevice info provided\n"); ++ return -EINVAL; ++ } ++ ++ phy_id = cfg->port / 4; ++ phy_base = isp_base + IPU6_ISYS_MCD_PHY_BASE(phy_id); ++ for (subdevs = spdata->subdevs; *subdevs; subdevs++) { ++ sd_info = *subdevs; ++ if (!sd_info->csi2) ++ continue; ++ ++ phy_port = ipu6_isys_driver_port_to_phy_port(sd_info->csi2); ++ if (phy_port < 0) { ++ dev_err(&isys->adev->auxdev.dev, "invalid port %d for lane %d", ++ cfg->port, cfg->nlanes); ++ return -ENXIO; ++ } ++ ++ if ((sd_info->csi2->port / 4) != phy_id) ++ continue; ++ ++ dev_dbg(&isys->adev->auxdev.dev, "port%d PHY%u lanes %u\n", ++ phy_port, phy_id, cfg->nlanes); ++ ++ phy_config_regs = config_regs[sd_info->csi2->nlanes/2]; ++ ++ for (i = 0; phy_config_regs[phy_port][i].reg; i++) { ++ writel(phy_config_regs[phy_port][i].val, ++ phy_base + phy_config_regs[phy_port][i].reg); ++ } ++ } ++ ++ return 0; ++} ++#else + static int ipu6_isys_mcd_phy_config(struct ipu6_isys *isys) + { + struct device *dev = &isys->adev->auxdev.dev; +@@ -658,6 +729,7 @@ static int ipu6_isys_mcd_phy_config(struct ipu6_isys *isys) + + return 0; + } ++#endif + + #define CSI_MCD_PHY_NUM 2 + static refcount_t phy_power_ref_count[CSI_MCD_PHY_NUM]; +@@ -698,9 +770,14 @@ int ipu6_isys_mcd_phy_set_power(struct ipu6_isys *isys, + return ret; + + ipu6_isys_mcd_phy_reset(isys, phy_id, 0); ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++ ipu6_isys_mcd_phy_common_init(isys, cfg); ++ ret = ipu6_isys_mcd_phy_config(isys, cfg); ++#else + ipu6_isys_mcd_phy_common_init(isys); + + ret = ipu6_isys_mcd_phy_config(isys); ++#endif + if (ret) + return ret; + +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c +index af03e2f..05ced8b 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c +@@ -28,9 +28,14 @@ + #include + #include + #include +-#include +-#include ++#if !IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) + #include ++#include ++#include ++#include ++#include ++#include ++#endif + + #include "ipu6-bus.h" + #include "ipu6-cpd.h" +@@ -100,6 +105,57 @@ enum ltr_did_type { + }; + + #define ISYS_PM_QOS_VALUE 300 ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++/* ++ * The param was passed from module to indicate if port ++ * could be optimized. ++ */ ++static bool csi2_port_optimized = true; ++module_param(csi2_port_optimized, bool, 0660); ++MODULE_PARM_DESC(csi2_port_optimized, "IPU CSI2 port optimization"); ++#endif ++ ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++struct isys_i2c_test { ++ u8 bus_nr; ++ u16 addr; ++ struct i2c_client *client; ++}; ++ ++static int isys_i2c_test(struct device *dev, void *priv) ++{ ++ struct i2c_client *client = i2c_verify_client(dev); ++ struct isys_i2c_test *test = priv; ++ ++ if (!client) ++ return 0; ++ ++ if (i2c_adapter_id(client->adapter) != test->bus_nr || ++ client->addr != test->addr) ++ return 0; ++ ++ test->client = client; ++ ++ return 0; ++} ++ ++static struct ++i2c_client *isys_find_i2c_subdev(struct i2c_adapter *adapter, ++ struct ipu6_isys_subdev_info *sd_info) ++{ ++ struct i2c_board_info *info = &sd_info->i2c.board_info; ++ struct isys_i2c_test test = { ++ .bus_nr = i2c_adapter_id(adapter), ++ .addr = info->addr, ++ }; ++ int rval; ++ ++ rval = i2c_for_each_dev(&test, isys_i2c_test); ++ if (rval || !test.client) ++ return NULL; ++ return test.client; ++} ++#endif + + static int isys_isr_one(struct ipu6_bus_device *adev); + +@@ -142,6 +198,153 @@ unregister_subdev: + return ret; + } + ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++static int isys_register_ext_subdev(struct ipu6_isys *isys, ++ struct ipu6_isys_subdev_info *sd_info) ++{ ++ struct i2c_adapter *adapter; ++ struct v4l2_subdev *sd; ++ struct i2c_client *client; ++ int rval; ++ int bus; ++ ++ bus = ipu6_get_i2c_bus_id(sd_info->i2c.i2c_adapter_id, ++ sd_info->i2c.i2c_adapter_bdf, ++ sizeof(sd_info->i2c.i2c_adapter_bdf)); ++ if (bus < 0) { ++ dev_err(&isys->adev->auxdev.dev, ++ "getting i2c bus id for adapter %d (bdf %s) failed", ++ sd_info->i2c.i2c_adapter_id, ++ sd_info->i2c.i2c_adapter_bdf); ++ return -ENOENT; ++ } ++ dev_info(&isys->adev->auxdev.dev, ++ "got i2c bus id %d for adapter %d (bdf %s)", bus, ++ sd_info->i2c.i2c_adapter_id, ++ sd_info->i2c.i2c_adapter_bdf); ++ adapter = i2c_get_adapter(bus); ++ if (!adapter) { ++ dev_warn(&isys->adev->auxdev.dev, "can't find adapter\n"); ++ return -ENOENT; ++ } ++ ++ dev_info(&isys->adev->auxdev.dev, ++ "creating new i2c subdev for %s (address %2.2x, bus %d)", ++ sd_info->i2c.board_info.type, sd_info->i2c.board_info.addr, ++ bus); ++ ++ if (sd_info->csi2) { ++ dev_info(&isys->adev->auxdev.dev, "sensor device on CSI port: %d\n", ++ sd_info->csi2->port); ++ if (sd_info->csi2->port >= isys->pdata->ipdata->csi2.nports || ++ !isys->csi2[sd_info->csi2->port].isys) { ++ dev_warn(&isys->adev->auxdev.dev, "invalid csi2 port %u\n", ++ sd_info->csi2->port); ++ rval = -EINVAL; ++ goto skip_put_adapter; ++ } ++ } else { ++ dev_info(&isys->adev->auxdev.dev, "non camera subdevice\n"); ++ } ++ ++ client = isys_find_i2c_subdev(adapter, sd_info); ++ if (client) { ++ dev_dbg(&isys->adev->auxdev.dev, "Device exists\n"); ++ rval = 0; ++ goto skip_put_adapter; ++ } ++ ++ sd = v4l2_i2c_new_subdev_board(&isys->v4l2_dev, adapter, ++ &sd_info->i2c.board_info, NULL); ++ if (!sd) { ++ dev_warn(&isys->adev->auxdev.dev, "can't create new i2c subdev\n"); ++ rval = -EINVAL; ++ goto skip_put_adapter; ++ } ++ ++ if (!sd_info->csi2) ++ return 0; ++ ++ return isys_complete_ext_device_registration(isys, sd, sd_info->csi2); ++ ++skip_put_adapter: ++ i2c_put_adapter(adapter); ++ ++ return rval; ++} ++ ++static int isys_unregister_ext_subdev(struct ipu6_isys *isys, ++ struct ipu6_isys_subdev_info *sd_info) ++{ ++ struct i2c_adapter *adapter; ++ struct i2c_client *client; ++ int bus; ++ ++ bus = ipu6_get_i2c_bus_id(sd_info->i2c.i2c_adapter_id, ++ sd_info->i2c.i2c_adapter_bdf, ++ sizeof(sd_info->i2c.i2c_adapter_bdf)); ++ if (bus < 0) { ++ dev_err(&isys->adev->auxdev.dev, ++ "getting i2c bus id for adapter %d (bdf %s) failed\n", ++ sd_info->i2c.i2c_adapter_id, ++ sd_info->i2c.i2c_adapter_bdf); ++ return -ENOENT; ++ } ++ dev_dbg(&isys->adev->auxdev.dev, ++ "got i2c bus id %d for adapter %d (bdf %s)\n", bus, ++ sd_info->i2c.i2c_adapter_id, ++ sd_info->i2c.i2c_adapter_bdf); ++ adapter = i2c_get_adapter(bus); ++ if (!adapter) { ++ dev_warn(&isys->adev->auxdev.dev, "can't find adapter\n"); ++ return -ENOENT; ++ } ++ ++ dev_dbg(&isys->adev->auxdev.dev, ++ "unregister i2c subdev for %s (address %2.2x, bus %d)\n", ++ sd_info->i2c.board_info.type, sd_info->i2c.board_info.addr, ++ bus); ++ ++ client = isys_find_i2c_subdev(adapter, sd_info); ++ if (!client) { ++ dev_dbg(&isys->adev->auxdev.dev, "Device not exists\n"); ++ goto skip_put_adapter; ++ } ++ ++ i2c_unregister_device(client); ++ ++skip_put_adapter: ++ i2c_put_adapter(adapter); ++ ++ return 0; ++} ++ ++static void isys_register_ext_subdevs(struct ipu6_isys *isys) ++{ ++ struct ipu6_isys_subdev_pdata *spdata = isys->pdata->spdata; ++ struct ipu6_isys_subdev_info **sd_info; ++ ++ if (!spdata) { ++ dev_info(&isys->adev->auxdev.dev, "no subdevice info provided\n"); ++ return; ++ } ++ for (sd_info = spdata->subdevs; *sd_info; sd_info++) ++ isys_register_ext_subdev(isys, *sd_info); ++} ++ ++static void isys_unregister_ext_subdevs(struct ipu6_isys *isys) ++{ ++ struct ipu6_isys_subdev_pdata *spdata = isys->pdata->spdata; ++ struct ipu6_isys_subdev_info **sd_info; ++ ++ if (!spdata) ++ return; ++ ++ for (sd_info = spdata->subdevs; *sd_info; sd_info++) ++ isys_unregister_ext_subdev(isys, *sd_info); ++} ++#endif ++ + static void isys_stream_init(struct ipu6_isys *isys) + { + u32 i; +@@ -173,10 +376,43 @@ static int isys_csi2_register_subdevices(struct ipu6_isys *isys) + { + const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = + &isys->pdata->ipdata->csi2; ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++ struct ipu6_isys_subdev_pdata *spdata = isys->pdata->spdata; ++ struct ipu6_isys_subdev_info **sd_info; ++ DECLARE_BITMAP(csi2_enable, 32); ++#endif + unsigned int i; + int ret; + ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++ /* ++ * Here is somewhat a workaround, let each platform decide ++ * if csi2 port can be optimized, which means only registered ++ * port from pdata would be enabled. ++ */ ++ if (csi2_port_optimized && spdata) { ++ bitmap_zero(csi2_enable, 32); ++ for (sd_info = spdata->subdevs; *sd_info; sd_info++) { ++ if ((*sd_info)->csi2) { ++ i = (*sd_info)->csi2->port; ++ if (i >= csi2_pdata->nports) { ++ dev_warn(&isys->adev->auxdev.dev, ++ "invalid csi2 port %u\n", i); ++ continue; ++ } ++ bitmap_set(csi2_enable, i, 1); ++ } ++ } ++ } else { ++ bitmap_fill(csi2_enable, 32); ++ } ++#endif ++ + for (i = 0; i < csi2_pdata->nports; i++) { ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++ if (!test_bit(i, csi2_enable)) ++ continue; ++#endif + ret = ipu6_isys_csi2_init(&isys->csi2[i], isys, + isys->pdata->base + + CSI_REG_PORT_BASE(i), i); +@@ -200,10 +436,43 @@ static int isys_csi2_create_media_links(struct ipu6_isys *isys) + const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = + &isys->pdata->ipdata->csi2; + struct device *dev = &isys->adev->auxdev.dev; ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++ struct ipu6_isys_subdev_pdata *spdata = isys->pdata->spdata; ++ struct ipu6_isys_subdev_info **sd_info; ++ DECLARE_BITMAP(csi2_enable, 32); ++#endif + unsigned int i, j; + int ret; + ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++ /* ++ * Here is somewhat a workaround, let each platform decide ++ * if csi2 port can be optimized, which means only registered ++ * port from pdata would be enabled. ++ */ ++ if (csi2_port_optimized && spdata) { ++ bitmap_zero(csi2_enable, 32); ++ for (sd_info = spdata->subdevs; *sd_info; sd_info++) { ++ if ((*sd_info)->csi2) { ++ i = (*sd_info)->csi2->port; ++ if (i >= csi2_pdata->nports) { ++ dev_warn(&isys->adev->auxdev.dev, ++ "invalid csi2 port %u\n", i); ++ continue; ++ } ++ bitmap_set(csi2_enable, i, 1); ++ } ++ } ++ } else { ++ bitmap_fill(csi2_enable, 32); ++ } ++#endif ++ + for (i = 0; i < csi2_pdata->nports; i++) { ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++ if (!test_bit(i, csi2_enable)) ++ continue; ++#endif + struct media_entity *sd = &isys->csi2[i].asd.sd.entity; + + for (j = 0; j < NR_OF_CSI2_SRC_PADS; j++) { +@@ -238,10 +507,43 @@ static int isys_register_video_devices(struct ipu6_isys *isys) + { + const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = + &isys->pdata->ipdata->csi2; ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++ struct ipu6_isys_subdev_pdata *spdata = isys->pdata->spdata; ++ struct ipu6_isys_subdev_info **sd_info; ++ DECLARE_BITMAP(csi2_enable, 32); ++#endif + unsigned int i, j; + int ret; + ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++ /* ++ * Here is somewhat a workaround, let each platform decide ++ * if csi2 port can be optimized, which means only registered ++ * port from pdata would be enabled. ++ */ ++ if (csi2_port_optimized && spdata) { ++ bitmap_zero(csi2_enable, 32); ++ for (sd_info = spdata->subdevs; *sd_info; sd_info++) { ++ if ((*sd_info)->csi2) { ++ i = (*sd_info)->csi2->port; ++ if (i >= csi2_pdata->nports) { ++ dev_warn(&isys->adev->auxdev.dev, ++ "invalid csi2 port %u\n", i); ++ continue; ++ } ++ bitmap_set(csi2_enable, i, 1); ++ } ++ } ++ } else { ++ bitmap_fill(csi2_enable, 32); ++ } ++#endif ++ + for (i = 0; i < csi2_pdata->nports; i++) { ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++ if (!test_bit(i, csi2_enable)) ++ continue; ++#endif + for (j = 0; j < NR_OF_CSI2_SRC_PADS; j++) { + struct ipu6_isys_video *av = &isys->csi2[i].av[j]; + +@@ -672,6 +974,7 @@ static void isys_iwake_watermark_cleanup(struct ipu6_isys *isys) + mutex_destroy(&iwake_watermark->mutex); + } + ++#if !IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) + /* The .bound() notifier callback when a match is found */ + static int isys_notifier_bound(struct v4l2_async_notifier *notifier, + struct v4l2_subdev *sd, +@@ -783,6 +1086,7 @@ static void isys_notifier_cleanup(struct ipu6_isys *isys) + v4l2_async_nf_unregister(&isys->notifier); + v4l2_async_nf_cleanup(&isys->notifier); + } ++#endif + + static int isys_register_devices(struct ipu6_isys *isys) + { +@@ -820,12 +1124,23 @@ static int isys_register_devices(struct ipu6_isys *isys) + if (ret) + goto out_isys_unregister_subdevices; + ++#if !IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) + ret = isys_notifier_init(isys); + if (ret) + goto out_isys_unregister_subdevices; ++#else ++ isys_register_ext_subdevs(isys); ++#endif ++ ++ ret = v4l2_device_register_subdev_nodes(&isys->v4l2_dev); ++ if (ret) ++ goto out_isys_unregister_ext_subdevs; + + return 0; + ++out_isys_unregister_ext_subdevs: ++ isys_unregister_ext_subdevs(isys); ++ + out_isys_unregister_subdevices: + isys_csi2_unregister_subdevices(isys); + +@@ -848,6 +1163,9 @@ static void isys_unregister_devices(struct ipu6_isys *isys) + { + isys_unregister_video_devices(isys); + isys_csi2_unregister_subdevices(isys); ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++ isys_unregister_ext_subdevs(isys); ++#endif + v4l2_device_unregister(&isys->v4l2_dev); + media_device_unregister(&isys->media_dev); + media_device_cleanup(&isys->media_dev); +@@ -1218,7 +1536,9 @@ static void isys_remove(struct auxiliary_device *auxdev) + free_fw_msg_bufs(isys); + + isys_unregister_devices(isys); ++#if !IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) + isys_notifier_cleanup(isys); ++#endif + + cpu_latency_qos_remove_request(&isys->pm_qos); + +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h +index 982eb5d..a956045 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h +@@ -10,6 +10,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -62,6 +63,15 @@ struct ipu6_bus_device; + #define IPU6EP_MTL_LTR_VALUE 1023 + #define IPU6EP_MTL_MIN_MEMOPEN_TH 0xc + ++#if (IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ ++ && IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING)) \ ++ || IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) ++#define IPU6_SPDATA_NAME_LEN 20 ++#define IPU6_SPDATA_BDF_LEN 32 ++#define IPU6_SPDATA_GPIO_NUM 4 ++#define IPU6_SPDATA_IRQ_PIN_NAME_LEN 16 ++#endif ++ + struct ltr_did { + union { + u32 value; +@@ -165,7 +175,9 @@ struct ipu6_isys { + spinlock_t listlock; /* Protect framebuflist */ + struct list_head framebuflist; + struct list_head framebuflist_fw; ++#if !IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) + struct v4l2_async_notifier notifier; ++#endif + struct isys_iwake_watermark iwake_watermark; + #ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET + struct mutex reset_mutex; +@@ -184,6 +196,79 @@ struct isys_fw_msgs { + dma_addr_t dma_addr; + }; + ++struct ipu6_isys_subdev_i2c_info { ++ struct i2c_board_info board_info; ++ int i2c_adapter_id; ++ char i2c_adapter_bdf[32]; ++}; ++ ++#if (IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ ++ && IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING)) \ ++ || IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) ++#define IPU6_SPDATA_NAME_LEN 20 ++#define IPU6_SPDATA_BDF_LEN 32 ++#define IPU6_SPDATA_GPIO_NUM 4 ++#define IPU6_SPDATA_IRQ_PIN_NAME_LEN 16 ++#endif ++ ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ ++ && IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) ++/** ++ * struct ipu6_spdata_rep - override subdev platform data ++ * ++ * @name: i2c_board_info.type ++ * @i2c_adapter_bdf_o: old i2c adapter bdf ++ * @slave_addr_o: old i2c slave address ++ * @i2c_adapter_bdf_n: new i2c adapter bdf ++ * @slave_addr_n: new i2c slave address ++ * ++ * identify a subdev with @name, @i2c_adapter_bdf_o and @slave_addr_o and ++ * configure it to use the new @i2c_adapter_bdf_n and @slave_addr_n ++ */ ++struct ipu6_spdata_rep { ++ /* i2c old information */ ++ char name[IPU6_SPDATA_NAME_LEN]; ++ unsigned int port_o; ++ char i2c_adapter_bdf_o[IPU6_SPDATA_BDF_LEN]; ++ uint32_t slave_addr_o; ++ ++ /* i2c new information */ ++ unsigned int port_n; ++ char i2c_adapter_bdf_n[IPU6_SPDATA_BDF_LEN]; ++ uint32_t slave_addr_n; ++ ++ /* sensor_platform */ ++ unsigned int lanes; ++ int gpios[IPU6_SPDATA_GPIO_NUM]; ++ int irq_pin; ++ unsigned int irq_pin_flags; ++ char irq_pin_name[IPU6_SPDATA_IRQ_PIN_NAME_LEN]; ++ char suffix; ++}; ++#endif ++ ++struct ipu6_isys_subdev_info { ++ struct ipu6_isys_csi2_config *csi2; ++ struct ipu6_isys_subdev_i2c_info i2c; ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ ++ && IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) ++ void (*fixup_spdata)(const void *spdata_rep, void *spdata); ++#endif ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) ++ char *acpi_hid; ++#endif ++}; ++ ++struct ipu6_isys_clk_mapping { ++ struct clk_lookup clkdev_data; ++ char *platform_clock_name; ++}; ++ ++struct ipu6_isys_subdev_pdata { ++ struct ipu6_isys_subdev_info **subdevs; ++ struct ipu6_isys_clk_mapping *clk_map; ++}; ++ + struct isys_fw_msgs *ipu6_get_fw_msg_buf(struct ipu6_isys_stream *stream); + void ipu6_put_fw_msg_buf(struct ipu6_isys *isys, u64 data); + void ipu6_cleanup_fw_msg_bufs(struct ipu6_isys *isys); +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c +index e8ce0c9..6019705 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c +@@ -34,6 +34,19 @@ + #include "ipu6-platform-regs.h" + #include "ipu6-trace.h" + ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) ++#include ++#endif ++ ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) ++static int isys_init_acpi_add_device(struct device *dev, void *priv, ++ struct ipu6_isys_csi2_config *csi2, ++ bool reprobe) ++{ ++ return 0; ++} ++#endif ++ + static unsigned int isys_freq_override; + module_param(isys_freq_override, uint, 0660); + MODULE_PARM_DESC(isys_freq_override, "Override ISYS freq(mhz)"); +@@ -372,18 +385,25 @@ static void ipu6_internal_pdata_init(struct ipu6_device *isp) + static struct ipu6_bus_device * + ipu6_isys_init(struct pci_dev *pdev, struct device *parent, + struct ipu6_buttress_ctrl *ctrl, void __iomem *base, +- const struct ipu6_isys_internal_pdata *ipdata) ++ const struct ipu6_isys_internal_pdata *ipdata, ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++ struct ipu6_isys_subdev_pdata *spdata ++#endif ++ ) + { + struct device *dev = &pdev->dev; + struct ipu6_bus_device *isys_adev; + struct ipu6_isys_pdata *pdata; ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) ++ struct ipu6_isys_subdev_pdata *acpi_pdata; ++#endif + int ret; + +- ret = ipu_bridge_init(dev, ipu_bridge_parse_ssdb); +- if (ret) { +- dev_err_probe(dev, ret, "IPU6 bridge init failed\n"); +- return ERR_PTR(ret); +- } ++ // ret = ipu_bridge_init(dev, ipu_bridge_parse_ssdb); ++ // if (ret) { ++ // dev_err_probe(dev, ret, "IPU6 bridge init failed\n"); ++ // return ERR_PTR(ret); ++ // } + + pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); + if (!pdata) +@@ -391,7 +411,9 @@ ipu6_isys_init(struct pci_dev *pdev, struct device *parent, + + pdata->base = base; + pdata->ipdata = ipdata; +- ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++ pdata->spdata = spdata; ++#endif + /* Override the isys freq */ + if (isys_freq_override >= BUTTRESS_MIN_FORCE_IS_FREQ && + isys_freq_override <= BUTTRESS_MAX_FORCE_IS_FREQ) { +@@ -408,6 +430,19 @@ ipu6_isys_init(struct pci_dev *pdev, struct device *parent, + "ipu6_bus_initialize_device isys failed\n"); + } + ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) ++ if (!spdata) { ++ dev_dbg(&pdev->dev, "No subdevice info provided"); ++ ipu_get_acpi_devices(isys_adev, &isys_adev->auxdev.dev, &acpi_pdata, NULL, ++ isys_init_acpi_add_device); ++ pdata->spdata = acpi_pdata; ++ } else { ++ dev_dbg(&pdev->dev, "Subdevice info found"); ++ ipu_get_acpi_devices(isys_adev, &isys_adev->auxdev.dev, &acpi_pdata, &spdata, ++ isys_init_acpi_add_device); ++ } ++#endif ++ + isys_adev->mmu = ipu6_mmu_init(dev, base, ISYS_MMID, + &ipdata->hw_variant); + if (IS_ERR(isys_adev->mmu)) { +@@ -538,6 +573,59 @@ static void ipu6_configure_vc_mechanism(struct ipu6_device *isp) + writel(val, isp->base + BUTTRESS_REG_BTRS_CTRL); + } + ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) ++static inline int match_spdata(struct ipu6_isys_subdev_info *sd, ++ const struct ipu6_spdata_rep *rep) ++{ ++ if (strcmp(sd->i2c.board_info.type, rep->name)) ++ return 0; ++ ++ if (strcmp(sd->i2c.i2c_adapter_bdf, rep->i2c_adapter_bdf_o)) ++ return 0; ++ ++ if (sd->i2c.board_info.addr != rep->slave_addr_o) ++ return 0; ++ ++ if (sd->csi2->port != rep->port_o) ++ return 0; ++ ++ return 1; ++} ++ ++static void fixup_spdata(const void *spdata_rep, ++ struct ipu6_isys_subdev_pdata *spdata) ++{ ++ const struct ipu6_spdata_rep *rep = spdata_rep; ++ struct ipu6_isys_subdev_info **subdevs, *sd_info; ++ ++ if (!spdata) ++ return; ++ ++ for (; rep->name[0]; rep++) { ++ for (subdevs = spdata->subdevs; *subdevs; subdevs++) { ++ sd_info = *subdevs; ++ ++ if (!sd_info->csi2) ++ continue; ++ ++ if (match_spdata(sd_info, rep)) { ++ strcpy(sd_info->i2c.i2c_adapter_bdf, ++ rep->i2c_adapter_bdf_n); ++ sd_info->i2c.board_info.addr = ++ rep->slave_addr_n; ++ sd_info->csi2->port = rep->port_n; ++ ++ if (sd_info->fixup_spdata) ++ sd_info->fixup_spdata(rep, ++ sd_info->i2c.board_info.platform_data); ++ } ++ } ++ } ++} ++#endif ++#endif ++ + static int ipu6_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) + { + struct ipu6_buttress_ctrl *isys_ctrl = NULL, *psys_ctrl = NULL; +@@ -641,6 +729,16 @@ static int ipu6_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) + goto out_ipu6_bus_del_devices; + } + ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) ++ rval = request_firmware(&isp->spdata_fw, IPU6_SPDATA_NAME, &pdev->dev); ++ if (rval) ++ dev_warn(&isp->pdev->dev, "no spdata replace, using default\n"); ++ else ++ fixup_spdata(isp->spdata_fw->data, pdev->dev.platform_data); ++#endif ++#endif ++ + isys_ctrl = devm_kmemdup(dev, &isys_buttress_ctrl, + sizeof(isys_buttress_ctrl), GFP_KERNEL); + if (!isys_ctrl) { +@@ -649,7 +747,11 @@ static int ipu6_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) + } + + isp->isys = ipu6_isys_init(pdev, dev, isys_ctrl, isys_base, +- &isys_ipdata); ++ &isys_ipdata, ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++ pdev->dev.platform_data ++#endif ++ ); + if (IS_ERR(isp->isys)) { + ret = PTR_ERR(isp->isys); + goto out_ipu6_bus_del_devices; +@@ -748,6 +850,11 @@ out_ipu6_bus_del_devices: + ipu6_mmu_cleanup(isp->isys->mmu); + ipu6_bus_del_devices(pdev); + release_firmware(isp->cpd_fw); ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) ++ release_firmware(isp->spdata_fw); ++#endif ++#endif + buttress_exit: + ipu6_buttress_exit(isp); + +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.h +index d91a78e..71d8cdf 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.h ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.h +@@ -23,6 +23,12 @@ struct ipu6_bus_device; + #define IPU6EPMTL_FIRMWARE_NAME "intel/ipu/ipu6epmtl_fw.bin" + #define IPU6EPADLN_FIRMWARE_NAME "intel/ipu/ipu6epadln_fw.bin" + ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ ++ && IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) ++/* array of struct ipu6_spdata_rep terminated by NULL */ ++#define IPU6_SPDATA_NAME "ipu6v1_spdata.bin" ++#endif ++ + enum ipu6_version { + IPU6_VER_INVALID = 0, + IPU6_VER_6 = 1, +@@ -80,7 +86,11 @@ struct ipu6_device { + const struct firmware *cpd_fw; + const char *cpd_fw_name; + u32 cpd_metadata_cmpnt_size; +- ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) ++ const struct firmware *spdata_fw; ++#endif ++#endif + void __iomem *base; + #ifdef CONFIG_DEBUG_FS + struct dentry *ipu_dir; +@@ -328,6 +338,9 @@ struct ipu6_isys_internal_pdata { + struct ipu6_isys_pdata { + void __iomem *base; + const struct ipu6_isys_internal_pdata *ipdata; ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++ struct ipu6_isys_subdev_pdata *spdata; ++#endif + }; + + struct ipu6_psys_internal_pdata { +-- +2.43.0 + diff --git a/patches/0014-media-intel-ipu6-fix-coverity-issue.patch b/patches/0014-media-intel-ipu6-fix-coverity-issue.patch new file mode 100644 index 0000000..10c5e2c --- /dev/null +++ b/patches/0014-media-intel-ipu6-fix-coverity-issue.patch @@ -0,0 +1,31 @@ +From d17dcc21b65a37c86913dfed523184be691e4362 Mon Sep 17 00:00:00 2001 +From: hepengpx +Date: Mon, 12 Jan 2026 07:33:57 -0700 +Subject: [PATCH 14/28] media: intel-ipu6: fix coverity issue + +check return value of v4l2_ctrl_handler_init; +change the type of phy_port to int; + +Signed-off-by: hepengpx +Signed-off-by: zouxiaoh +--- + 6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c +index f18e6fa..ff29130 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c +@@ -644,7 +644,8 @@ static int ipu6_isys_driver_port_to_phy_port(struct ipu6_isys_csi2_config *cfg) + #if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) + static int ipu6_isys_mcd_phy_config(struct ipu6_isys *isys, struct ipu6_isys_csi2_config *cfg) + { +- unsigned int phy_port, phy_id; ++ unsigned int phy_id; ++ int phy_port; + void __iomem *phy_base; + struct ipu6_bus_device *adev = to_ipu6_bus_device(&isys->adev->auxdev.dev); + struct ipu6_device *isp = adev->isp; +-- +2.43.0 + diff --git a/patches/0015-media-intel-ipu6-move-ipu-acpi-module-to-linux-drive.patch b/patches/0015-media-intel-ipu6-move-ipu-acpi-module-to-linux-drive.patch new file mode 100644 index 0000000..025ac56 --- /dev/null +++ b/patches/0015-media-intel-ipu6-move-ipu-acpi-module-to-linux-drive.patch @@ -0,0 +1,253 @@ +From 1d70488bb65f48cf6196bef78d8f546b28ee1738 Mon Sep 17 00:00:00 2001 +From: Dongcheng Yan +Date: Mon, 12 Jan 2026 07:37:10 -0700 +Subject: [PATCH 15/28] media: intel-ipu6: move ipu-acpi module to + linux-drivers + +The ipu-acpi module is responsible for obtaining ACPI device platform +data (pdata) and is universally applicable to both IPU6/7 and +upstream/non-upstream. To better align with its purpose and usage, the +ipu-acpi module is being moved out of the linux_kernel and into the +linux-drivers[android/platform/main]. + +Specific changes include: + +- Struct ipu_isys_subdev_pdata and its internal structs + ipu_isys_subdev_info/ipu_isys_clk_mapping: + solely related to ACPI pdata, not tied to any specific IPU. +- IPU_SPDATA_NAME_LEN and related macros: + used exclusively for ACPI device definition and configuration. + +Signed-off-by: Dongcheng Yan +Signed-off-by: zouxiaoh +--- + .../media/pci/intel/ipu6/ipu6-isys-mcd-phy.c | 4 +-- + .../drivers/media/pci/intel/ipu6/ipu6-isys.c | 26 +++++++++---------- + .../drivers/media/pci/intel/ipu6/ipu6-isys.h | 26 +++++++++---------- + 6.12.0/drivers/media/pci/intel/ipu6/ipu6.c | 10 +++---- + 6.12.0/drivers/media/pci/intel/ipu6/ipu6.h | 2 +- + 5 files changed, 34 insertions(+), 34 deletions(-) + +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c +index ff29130..7ab02b7 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c +@@ -651,8 +651,8 @@ static int ipu6_isys_mcd_phy_config(struct ipu6_isys *isys, struct ipu6_isys_csi + struct ipu6_device *isp = adev->isp; + void __iomem *isp_base = isp->base; + const struct phy_reg **phy_config_regs; +- struct ipu6_isys_subdev_pdata *spdata = isys->pdata->spdata; +- struct ipu6_isys_subdev_info **subdevs, *sd_info; ++ struct ipu_isys_subdev_pdata *spdata = isys->pdata->spdata; ++ struct ipu_isys_subdev_info **subdevs, *sd_info; + int i; + + if (!spdata) { +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c +index 05ced8b..bc40325 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c +@@ -141,7 +141,7 @@ static int isys_i2c_test(struct device *dev, void *priv) + + static struct + i2c_client *isys_find_i2c_subdev(struct i2c_adapter *adapter, +- struct ipu6_isys_subdev_info *sd_info) ++ struct ipu_isys_subdev_info *sd_info) + { + struct i2c_board_info *info = &sd_info->i2c.board_info; + struct isys_i2c_test test = { +@@ -200,7 +200,7 @@ unregister_subdev: + + #if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) + static int isys_register_ext_subdev(struct ipu6_isys *isys, +- struct ipu6_isys_subdev_info *sd_info) ++ struct ipu_isys_subdev_info *sd_info) + { + struct i2c_adapter *adapter; + struct v4l2_subdev *sd; +@@ -274,7 +274,7 @@ skip_put_adapter: + } + + static int isys_unregister_ext_subdev(struct ipu6_isys *isys, +- struct ipu6_isys_subdev_info *sd_info) ++ struct ipu_isys_subdev_info *sd_info) + { + struct i2c_adapter *adapter; + struct i2c_client *client; +@@ -321,8 +321,8 @@ skip_put_adapter: + + static void isys_register_ext_subdevs(struct ipu6_isys *isys) + { +- struct ipu6_isys_subdev_pdata *spdata = isys->pdata->spdata; +- struct ipu6_isys_subdev_info **sd_info; ++ struct ipu_isys_subdev_pdata *spdata = isys->pdata->spdata; ++ struct ipu_isys_subdev_info **sd_info; + + if (!spdata) { + dev_info(&isys->adev->auxdev.dev, "no subdevice info provided\n"); +@@ -334,8 +334,8 @@ static void isys_register_ext_subdevs(struct ipu6_isys *isys) + + static void isys_unregister_ext_subdevs(struct ipu6_isys *isys) + { +- struct ipu6_isys_subdev_pdata *spdata = isys->pdata->spdata; +- struct ipu6_isys_subdev_info **sd_info; ++ struct ipu_isys_subdev_pdata *spdata = isys->pdata->spdata; ++ struct ipu_isys_subdev_info **sd_info; + + if (!spdata) + return; +@@ -377,8 +377,8 @@ static int isys_csi2_register_subdevices(struct ipu6_isys *isys) + const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = + &isys->pdata->ipdata->csi2; + #if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) +- struct ipu6_isys_subdev_pdata *spdata = isys->pdata->spdata; +- struct ipu6_isys_subdev_info **sd_info; ++ struct ipu_isys_subdev_pdata *spdata = isys->pdata->spdata; ++ struct ipu_isys_subdev_info **sd_info; + DECLARE_BITMAP(csi2_enable, 32); + #endif + unsigned int i; +@@ -437,8 +437,8 @@ static int isys_csi2_create_media_links(struct ipu6_isys *isys) + &isys->pdata->ipdata->csi2; + struct device *dev = &isys->adev->auxdev.dev; + #if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) +- struct ipu6_isys_subdev_pdata *spdata = isys->pdata->spdata; +- struct ipu6_isys_subdev_info **sd_info; ++ struct ipu_isys_subdev_pdata *spdata = isys->pdata->spdata; ++ struct ipu_isys_subdev_info **sd_info; + DECLARE_BITMAP(csi2_enable, 32); + #endif + unsigned int i, j; +@@ -508,8 +508,8 @@ static int isys_register_video_devices(struct ipu6_isys *isys) + const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = + &isys->pdata->ipdata->csi2; + #if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) +- struct ipu6_isys_subdev_pdata *spdata = isys->pdata->spdata; +- struct ipu6_isys_subdev_info **sd_info; ++ struct ipu_isys_subdev_pdata *spdata = isys->pdata->spdata; ++ struct ipu_isys_subdev_info **sd_info; + DECLARE_BITMAP(csi2_enable, 32); + #endif + unsigned int i, j; +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h +index a956045..a897650 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h +@@ -66,10 +66,10 @@ struct ipu6_bus_device; + #if (IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ + && IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING)) \ + || IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) +-#define IPU6_SPDATA_NAME_LEN 20 +-#define IPU6_SPDATA_BDF_LEN 32 +-#define IPU6_SPDATA_GPIO_NUM 4 +-#define IPU6_SPDATA_IRQ_PIN_NAME_LEN 16 ++#define IPU_SPDATA_NAME_LEN 20 ++#define IPU_SPDATA_BDF_LEN 32 ++#define IPU_SPDATA_GPIO_NUM 4 ++#define IPU_SPDATA_IRQ_PIN_NAME_LEN 16 + #endif + + struct ltr_did { +@@ -205,10 +205,10 @@ struct ipu6_isys_subdev_i2c_info { + #if (IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ + && IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING)) \ + || IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) +-#define IPU6_SPDATA_NAME_LEN 20 +-#define IPU6_SPDATA_BDF_LEN 32 +-#define IPU6_SPDATA_GPIO_NUM 4 +-#define IPU6_SPDATA_IRQ_PIN_NAME_LEN 16 ++#define IPU_SPDATA_NAME_LEN 20 ++#define IPU_SPDATA_BDF_LEN 32 ++#define IPU_SPDATA_GPIO_NUM 4 ++#define IPU_SPDATA_IRQ_PIN_NAME_LEN 16 + #endif + + #if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ +@@ -247,7 +247,7 @@ struct ipu6_spdata_rep { + }; + #endif + +-struct ipu6_isys_subdev_info { ++struct ipu_isys_subdev_info { + struct ipu6_isys_csi2_config *csi2; + struct ipu6_isys_subdev_i2c_info i2c; + #if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ +@@ -259,14 +259,14 @@ struct ipu6_isys_subdev_info { + #endif + }; + +-struct ipu6_isys_clk_mapping { ++struct ipu_isys_clk_mapping { + struct clk_lookup clkdev_data; + char *platform_clock_name; + }; + +-struct ipu6_isys_subdev_pdata { +- struct ipu6_isys_subdev_info **subdevs; +- struct ipu6_isys_clk_mapping *clk_map; ++struct ipu_isys_subdev_pdata { ++ struct ipu_isys_subdev_info **subdevs; ++ struct ipu_isys_clk_mapping *clk_map; + }; + + struct isys_fw_msgs *ipu6_get_fw_msg_buf(struct ipu6_isys_stream *stream); +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c +index 6019705..5456801 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c +@@ -387,7 +387,7 @@ ipu6_isys_init(struct pci_dev *pdev, struct device *parent, + struct ipu6_buttress_ctrl *ctrl, void __iomem *base, + const struct ipu6_isys_internal_pdata *ipdata, + #if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) +- struct ipu6_isys_subdev_pdata *spdata ++ struct ipu_isys_subdev_pdata *spdata + #endif + ) + { +@@ -395,7 +395,7 @@ ipu6_isys_init(struct pci_dev *pdev, struct device *parent, + struct ipu6_bus_device *isys_adev; + struct ipu6_isys_pdata *pdata; + #if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) +- struct ipu6_isys_subdev_pdata *acpi_pdata; ++ struct ipu_isys_subdev_pdata *acpi_pdata; + #endif + int ret; + +@@ -575,7 +575,7 @@ static void ipu6_configure_vc_mechanism(struct ipu6_device *isp) + + #if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) + #if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) +-static inline int match_spdata(struct ipu6_isys_subdev_info *sd, ++static inline int match_spdata(struct ipu_isys_subdev_info *sd, + const struct ipu6_spdata_rep *rep) + { + if (strcmp(sd->i2c.board_info.type, rep->name)) +@@ -594,10 +594,10 @@ static inline int match_spdata(struct ipu6_isys_subdev_info *sd, + } + + static void fixup_spdata(const void *spdata_rep, +- struct ipu6_isys_subdev_pdata *spdata) ++ struct ipu_isys_subdev_pdata *spdata) + { + const struct ipu6_spdata_rep *rep = spdata_rep; +- struct ipu6_isys_subdev_info **subdevs, *sd_info; ++ struct ipu_isys_subdev_info **subdevs, *sd_info; + + if (!spdata) + return; +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.h +index 71d8cdf..786494d 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.h ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.h +@@ -339,7 +339,7 @@ struct ipu6_isys_pdata { + void __iomem *base; + const struct ipu6_isys_internal_pdata *ipdata; + #if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) +- struct ipu6_isys_subdev_pdata *spdata; ++ struct ipu_isys_subdev_pdata *spdata; + #endif + }; + +-- +2.43.0 + diff --git a/patches/0016-media-pci-unregister-i2c-device-to-complete-ext_subd.patch b/patches/0016-media-pci-unregister-i2c-device-to-complete-ext_subd.patch new file mode 100644 index 0000000..391eff2 --- /dev/null +++ b/patches/0016-media-pci-unregister-i2c-device-to-complete-ext_subd.patch @@ -0,0 +1,58 @@ +From 03e22286960e89231418c7ad45676b991a3655af Mon Sep 17 00:00:00 2001 +From: Dongcheng Yan +Date: Mon, 12 Jan 2026 07:40:01 -0700 +Subject: [PATCH 16/28] media: pci: unregister i2c device to complete + ext_subdev_register + +Signed-off-by: Dongcheng Yan +Signed-off-by: zouxiaoh + +u# Please enter the commit message for your changes. Lines starting +--- + 6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c | 4 ++-- + 6.12.0/drivers/media/pci/intel/ipu6/ipu6.c | 7 ++++--- + 2 files changed, 6 insertions(+), 5 deletions(-) + +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c +index bc40325..e61b5b4 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c +@@ -250,8 +250,8 @@ static int isys_register_ext_subdev(struct ipu6_isys *isys, + client = isys_find_i2c_subdev(adapter, sd_info); + if (client) { + dev_dbg(&isys->adev->auxdev.dev, "Device exists\n"); +- rval = 0; +- goto skip_put_adapter; ++ i2c_unregister_device(client); ++ dev_dbg(&isys->adev->auxdev.dev, "Unregister device"); + } + + sd = v4l2_i2c_new_subdev_board(&isys->v4l2_dev, adapter, +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c +index 5456801..406ff21 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c +@@ -433,16 +433,17 @@ ipu6_isys_init(struct pci_dev *pdev, struct device *parent, + #if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + if (!spdata) { + dev_dbg(&pdev->dev, "No subdevice info provided"); +- ipu_get_acpi_devices(isys_adev, &isys_adev->auxdev.dev, &acpi_pdata, NULL, ++ ret = ipu_get_acpi_devices(isys_adev, &isys_adev->auxdev.dev, &acpi_pdata, NULL, + isys_init_acpi_add_device); + pdata->spdata = acpi_pdata; + } else { + dev_dbg(&pdev->dev, "Subdevice info found"); +- ipu_get_acpi_devices(isys_adev, &isys_adev->auxdev.dev, &acpi_pdata, &spdata, ++ ret = ipu_get_acpi_devices(isys_adev, &isys_adev->auxdev.dev, &acpi_pdata, &spdata, + isys_init_acpi_add_device); + } ++ if (ret) ++ return ERR_PTR(ret); + #endif +- + isys_adev->mmu = ipu6_mmu_init(dev, base, ISYS_MMID, + &ipdata->hw_variant); + if (IS_ERR(isys_adev->mmu)) { +-- +2.43.0 + diff --git a/patches/0017-media-pci-add-missing-if-for-PDATA.patch b/patches/0017-media-pci-add-missing-if-for-PDATA.patch new file mode 100644 index 0000000..ccfda66 --- /dev/null +++ b/patches/0017-media-pci-add-missing-if-for-PDATA.patch @@ -0,0 +1,116 @@ +From 5b761df0eab492439ec7045320ea2e28016073b1 Mon Sep 17 00:00:00 2001 +From: zouxiaoh +Date: Mon, 12 Jan 2026 07:46:33 -0700 +Subject: [PATCH 17/28] media: pci: add missing #if for PDATA + +--- + 6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c | 7 ++++--- + 6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h | 2 ++ + 6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c | 8 ++++---- + 6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h | 7 ++++--- + 4 files changed, 14 insertions(+), 10 deletions(-) + +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c +index 0cf42e4..0ba908c 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c +@@ -21,8 +21,9 @@ + #include + #include + #include ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) + #include +- ++#endif + #include "ipu6.h" + #include "ipu6-bus.h" + #include "ipu6-dma.h" +@@ -776,7 +777,7 @@ int ipu6_buttress_start_tsc_sync(struct ipu6_device *isp) + return -ETIMEDOUT; + } + EXPORT_SYMBOL_NS_GPL(ipu6_buttress_start_tsc_sync, INTEL_IPU6); +- ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) + /* + * The dev_id was hard code in platform data, as i2c bus number + * may change dynamiclly, we need to update this bus id +@@ -817,7 +818,7 @@ int ipu6_get_i2c_bus_id(int adapter_id, char *adapter_bdf, int bdf_len) + return -1; + } + EXPORT_SYMBOL_NS_GPL(ipu6_get_i2c_bus_id, INTEL_IPU6); +- ++#endif + void ipu6_buttress_tsc_read(struct ipu6_device *isp, u64 *val) + { + u32 tsc_hi_1, tsc_hi_2, tsc_lo; +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h +index 18d6e0b..99fa266 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h +@@ -83,5 +83,7 @@ void ipu6_buttress_exit(struct ipu6_device *isp); + void ipu6_buttress_csi_port_config(struct ipu6_device *isp, + u32 legacy, u32 combo); + void ipu6_buttress_restore(struct ipu6_device *isp); ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) + int ipu6_get_i2c_bus_id(int adapter_id, char *adapter_bdf, int bdf_len); ++#endif + #endif /* IPU6_BUTTRESS_H */ +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c +index e61b5b4..a74a3a0 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c +@@ -1131,16 +1131,16 @@ static int isys_register_devices(struct ipu6_isys *isys) + #else + isys_register_ext_subdevs(isys); + #endif +- ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) + ret = v4l2_device_register_subdev_nodes(&isys->v4l2_dev); + if (ret) + goto out_isys_unregister_ext_subdevs; +- ++#endif + return 0; +- ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) + out_isys_unregister_ext_subdevs: + isys_unregister_ext_subdevs(isys); +- ++#endif + out_isys_unregister_subdevices: + isys_csi2_unregister_subdevices(isys); + +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h +index a897650..80e1a0f 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h +@@ -10,8 +10,9 @@ + #include + #include + #include ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) + #include +- ++#endif + #include + #include + #include +@@ -195,13 +196,13 @@ struct isys_fw_msgs { + struct list_head head; + dma_addr_t dma_addr; + }; +- ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) + struct ipu6_isys_subdev_i2c_info { + struct i2c_board_info board_info; + int i2c_adapter_id; + char i2c_adapter_bdf[32]; + }; +- ++#endif + #if (IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ + && IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING)) \ + || IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) +-- +2.43.0 + diff --git a/patches/0018-media-pci-refine-PDATA-related-config.patch b/patches/0018-media-pci-refine-PDATA-related-config.patch new file mode 100644 index 0000000..3de098d --- /dev/null +++ b/patches/0018-media-pci-refine-PDATA-related-config.patch @@ -0,0 +1,526 @@ +From 99ab4097d616470ca9a5410d89345f9f931a4b30 Mon Sep 17 00:00:00 2001 +From: hepengpx +Date: Mon, 12 Jan 2026 07:51:32 -0700 +Subject: [PATCH 18/28] media: pci: refine PDATA related config + +remove dynamic pdata related code; +replace CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA +with CONFIG_INTEL_IPU6_ACPI; + +Signed-off-by: hepengpx +Signed-off-by: zouxiaoh +--- + .../media/pci/intel/ipu6/ipu6-buttress.c | 4 +- + .../media/pci/intel/ipu6/ipu6-buttress.h | 2 +- + .../media/pci/intel/ipu6/ipu6-isys-mcd-phy.c | 6 +- + .../drivers/media/pci/intel/ipu6/ipu6-isys.c | 38 +++++----- + .../drivers/media/pci/intel/ipu6/ipu6-isys.h | 60 ++------------- + 6.12.0/drivers/media/pci/intel/ipu6/ipu6.c | 74 +------------------ + 6.12.0/drivers/media/pci/intel/ipu6/ipu6.h | 13 +--- + 7 files changed, 34 insertions(+), 163 deletions(-) + +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c +index 0ba908c..32fa979 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c +@@ -21,7 +21,7 @@ + #include + #include + #include +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + #include + #endif + #include "ipu6.h" +@@ -777,7 +777,7 @@ int ipu6_buttress_start_tsc_sync(struct ipu6_device *isp) + return -ETIMEDOUT; + } + EXPORT_SYMBOL_NS_GPL(ipu6_buttress_start_tsc_sync, INTEL_IPU6); +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + /* + * The dev_id was hard code in platform data, as i2c bus number + * may change dynamiclly, we need to update this bus id +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h +index 99fa266..34c0da0 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h +@@ -83,7 +83,7 @@ void ipu6_buttress_exit(struct ipu6_device *isp); + void ipu6_buttress_csi_port_config(struct ipu6_device *isp, + u32 legacy, u32 combo); + void ipu6_buttress_restore(struct ipu6_device *isp); +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + int ipu6_get_i2c_bus_id(int adapter_id, char *adapter_bdf, int bdf_len); + #endif + #endif /* IPU6_BUTTRESS_H */ +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c +index 7ab02b7..1cf33b5 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c +@@ -567,7 +567,7 @@ static int ipu6_isys_mcd_phy_ready(struct ipu6_isys *isys, u8 id) + return ret; + } + +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + static int ipu6_isys_mcd_phy_common_init(struct ipu6_isys *isys, struct ipu6_isys_csi2_config *cfg) + { + unsigned int phy_id; +@@ -641,7 +641,7 @@ static int ipu6_isys_driver_port_to_phy_port(struct ipu6_isys_csi2_config *cfg) + return ret; + } + +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + static int ipu6_isys_mcd_phy_config(struct ipu6_isys *isys, struct ipu6_isys_csi2_config *cfg) + { + unsigned int phy_id; +@@ -771,7 +771,7 @@ int ipu6_isys_mcd_phy_set_power(struct ipu6_isys *isys, + return ret; + + ipu6_isys_mcd_phy_reset(isys, phy_id, 0); +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + ipu6_isys_mcd_phy_common_init(isys, cfg); + ret = ipu6_isys_mcd_phy_config(isys, cfg); + #else +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c +index a74a3a0..eebcffc 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c +@@ -28,7 +28,7 @@ + #include + #include + #include +-#if !IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if !IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + #include + #include + #include +@@ -105,7 +105,7 @@ enum ltr_did_type { + }; + + #define ISYS_PM_QOS_VALUE 300 +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + /* + * The param was passed from module to indicate if port + * could be optimized. +@@ -113,9 +113,7 @@ enum ltr_did_type { + static bool csi2_port_optimized = true; + module_param(csi2_port_optimized, bool, 0660); + MODULE_PARM_DESC(csi2_port_optimized, "IPU CSI2 port optimization"); +-#endif + +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) + struct isys_i2c_test { + u8 bus_nr; + u16 addr; +@@ -198,7 +196,7 @@ unregister_subdev: + return ret; + } + +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + static int isys_register_ext_subdev(struct ipu6_isys *isys, + struct ipu_isys_subdev_info *sd_info) + { +@@ -376,7 +374,7 @@ static int isys_csi2_register_subdevices(struct ipu6_isys *isys) + { + const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = + &isys->pdata->ipdata->csi2; +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + struct ipu_isys_subdev_pdata *spdata = isys->pdata->spdata; + struct ipu_isys_subdev_info **sd_info; + DECLARE_BITMAP(csi2_enable, 32); +@@ -384,7 +382,7 @@ static int isys_csi2_register_subdevices(struct ipu6_isys *isys) + unsigned int i; + int ret; + +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + /* + * Here is somewhat a workaround, let each platform decide + * if csi2 port can be optimized, which means only registered +@@ -409,7 +407,7 @@ static int isys_csi2_register_subdevices(struct ipu6_isys *isys) + #endif + + for (i = 0; i < csi2_pdata->nports; i++) { +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + if (!test_bit(i, csi2_enable)) + continue; + #endif +@@ -436,7 +434,7 @@ static int isys_csi2_create_media_links(struct ipu6_isys *isys) + const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = + &isys->pdata->ipdata->csi2; + struct device *dev = &isys->adev->auxdev.dev; +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + struct ipu_isys_subdev_pdata *spdata = isys->pdata->spdata; + struct ipu_isys_subdev_info **sd_info; + DECLARE_BITMAP(csi2_enable, 32); +@@ -444,7 +442,7 @@ static int isys_csi2_create_media_links(struct ipu6_isys *isys) + unsigned int i, j; + int ret; + +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + /* + * Here is somewhat a workaround, let each platform decide + * if csi2 port can be optimized, which means only registered +@@ -469,7 +467,7 @@ static int isys_csi2_create_media_links(struct ipu6_isys *isys) + #endif + + for (i = 0; i < csi2_pdata->nports; i++) { +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + if (!test_bit(i, csi2_enable)) + continue; + #endif +@@ -507,7 +505,7 @@ static int isys_register_video_devices(struct ipu6_isys *isys) + { + const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = + &isys->pdata->ipdata->csi2; +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + struct ipu_isys_subdev_pdata *spdata = isys->pdata->spdata; + struct ipu_isys_subdev_info **sd_info; + DECLARE_BITMAP(csi2_enable, 32); +@@ -515,7 +513,7 @@ static int isys_register_video_devices(struct ipu6_isys *isys) + unsigned int i, j; + int ret; + +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + /* + * Here is somewhat a workaround, let each platform decide + * if csi2 port can be optimized, which means only registered +@@ -540,7 +538,7 @@ static int isys_register_video_devices(struct ipu6_isys *isys) + #endif + + for (i = 0; i < csi2_pdata->nports; i++) { +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + if (!test_bit(i, csi2_enable)) + continue; + #endif +@@ -974,7 +972,7 @@ static void isys_iwake_watermark_cleanup(struct ipu6_isys *isys) + mutex_destroy(&iwake_watermark->mutex); + } + +-#if !IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if !IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + /* The .bound() notifier callback when a match is found */ + static int isys_notifier_bound(struct v4l2_async_notifier *notifier, + struct v4l2_subdev *sd, +@@ -1124,20 +1122,20 @@ static int isys_register_devices(struct ipu6_isys *isys) + if (ret) + goto out_isys_unregister_subdevices; + +-#if !IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if !IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + ret = isys_notifier_init(isys); + if (ret) + goto out_isys_unregister_subdevices; + #else + isys_register_ext_subdevs(isys); + #endif +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + ret = v4l2_device_register_subdev_nodes(&isys->v4l2_dev); + if (ret) + goto out_isys_unregister_ext_subdevs; + #endif + return 0; +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + out_isys_unregister_ext_subdevs: + isys_unregister_ext_subdevs(isys); + #endif +@@ -1163,7 +1161,7 @@ static void isys_unregister_devices(struct ipu6_isys *isys) + { + isys_unregister_video_devices(isys); + isys_csi2_unregister_subdevices(isys); +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + isys_unregister_ext_subdevs(isys); + #endif + v4l2_device_unregister(&isys->v4l2_dev); +@@ -1536,7 +1534,7 @@ static void isys_remove(struct auxiliary_device *auxdev) + free_fw_msg_bufs(isys); + + isys_unregister_devices(isys); +-#if !IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if !IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + isys_notifier_cleanup(isys); + #endif + +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h +index 80e1a0f..1db940a 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h +@@ -10,7 +10,7 @@ + #include + #include + #include +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + #include + #endif + #include +@@ -64,11 +64,7 @@ struct ipu6_bus_device; + #define IPU6EP_MTL_LTR_VALUE 1023 + #define IPU6EP_MTL_MIN_MEMOPEN_TH 0xc + +-#if (IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ +- && IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING)) \ +- || IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) +-#define IPU_SPDATA_NAME_LEN 20 +-#define IPU_SPDATA_BDF_LEN 32 ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + #define IPU_SPDATA_GPIO_NUM 4 + #define IPU_SPDATA_IRQ_PIN_NAME_LEN 16 + #endif +@@ -176,7 +172,7 @@ struct ipu6_isys { + spinlock_t listlock; /* Protect framebuflist */ + struct list_head framebuflist; + struct list_head framebuflist_fw; +-#if !IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if !IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + struct v4l2_async_notifier notifier; + #endif + struct isys_iwake_watermark iwake_watermark; +@@ -196,66 +192,22 @@ struct isys_fw_msgs { + struct list_head head; + dma_addr_t dma_addr; + }; +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + struct ipu6_isys_subdev_i2c_info { + struct i2c_board_info board_info; + int i2c_adapter_id; + char i2c_adapter_bdf[32]; + }; + #endif +-#if (IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ +- && IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING)) \ +- || IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) +-#define IPU_SPDATA_NAME_LEN 20 +-#define IPU_SPDATA_BDF_LEN 32 ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + #define IPU_SPDATA_GPIO_NUM 4 + #define IPU_SPDATA_IRQ_PIN_NAME_LEN 16 + #endif + +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ +- && IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) +-/** +- * struct ipu6_spdata_rep - override subdev platform data +- * +- * @name: i2c_board_info.type +- * @i2c_adapter_bdf_o: old i2c adapter bdf +- * @slave_addr_o: old i2c slave address +- * @i2c_adapter_bdf_n: new i2c adapter bdf +- * @slave_addr_n: new i2c slave address +- * +- * identify a subdev with @name, @i2c_adapter_bdf_o and @slave_addr_o and +- * configure it to use the new @i2c_adapter_bdf_n and @slave_addr_n +- */ +-struct ipu6_spdata_rep { +- /* i2c old information */ +- char name[IPU6_SPDATA_NAME_LEN]; +- unsigned int port_o; +- char i2c_adapter_bdf_o[IPU6_SPDATA_BDF_LEN]; +- uint32_t slave_addr_o; +- +- /* i2c new information */ +- unsigned int port_n; +- char i2c_adapter_bdf_n[IPU6_SPDATA_BDF_LEN]; +- uint32_t slave_addr_n; +- +- /* sensor_platform */ +- unsigned int lanes; +- int gpios[IPU6_SPDATA_GPIO_NUM]; +- int irq_pin; +- unsigned int irq_pin_flags; +- char irq_pin_name[IPU6_SPDATA_IRQ_PIN_NAME_LEN]; +- char suffix; +-}; +-#endif +- + struct ipu_isys_subdev_info { + struct ipu6_isys_csi2_config *csi2; +- struct ipu6_isys_subdev_i2c_info i2c; +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ +- && IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) +- void (*fixup_spdata)(const void *spdata_rep, void *spdata); +-#endif + #if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) ++ struct ipu6_isys_subdev_i2c_info i2c; + char *acpi_hid; + #endif + }; +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c +index 406ff21..57ab6a5 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c +@@ -386,7 +386,7 @@ static struct ipu6_bus_device * + ipu6_isys_init(struct pci_dev *pdev, struct device *parent, + struct ipu6_buttress_ctrl *ctrl, void __iomem *base, + const struct ipu6_isys_internal_pdata *ipdata, +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + struct ipu_isys_subdev_pdata *spdata + #endif + ) +@@ -411,7 +411,7 @@ ipu6_isys_init(struct pci_dev *pdev, struct device *parent, + + pdata->base = base; + pdata->ipdata = ipdata; +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + pdata->spdata = spdata; + #endif + /* Override the isys freq */ +@@ -574,59 +574,6 @@ static void ipu6_configure_vc_mechanism(struct ipu6_device *isp) + writel(val, isp->base + BUTTRESS_REG_BTRS_CTRL); + } + +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) +-static inline int match_spdata(struct ipu_isys_subdev_info *sd, +- const struct ipu6_spdata_rep *rep) +-{ +- if (strcmp(sd->i2c.board_info.type, rep->name)) +- return 0; +- +- if (strcmp(sd->i2c.i2c_adapter_bdf, rep->i2c_adapter_bdf_o)) +- return 0; +- +- if (sd->i2c.board_info.addr != rep->slave_addr_o) +- return 0; +- +- if (sd->csi2->port != rep->port_o) +- return 0; +- +- return 1; +-} +- +-static void fixup_spdata(const void *spdata_rep, +- struct ipu_isys_subdev_pdata *spdata) +-{ +- const struct ipu6_spdata_rep *rep = spdata_rep; +- struct ipu_isys_subdev_info **subdevs, *sd_info; +- +- if (!spdata) +- return; +- +- for (; rep->name[0]; rep++) { +- for (subdevs = spdata->subdevs; *subdevs; subdevs++) { +- sd_info = *subdevs; +- +- if (!sd_info->csi2) +- continue; +- +- if (match_spdata(sd_info, rep)) { +- strcpy(sd_info->i2c.i2c_adapter_bdf, +- rep->i2c_adapter_bdf_n); +- sd_info->i2c.board_info.addr = +- rep->slave_addr_n; +- sd_info->csi2->port = rep->port_n; +- +- if (sd_info->fixup_spdata) +- sd_info->fixup_spdata(rep, +- sd_info->i2c.board_info.platform_data); +- } +- } +- } +-} +-#endif +-#endif +- + static int ipu6_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) + { + struct ipu6_buttress_ctrl *isys_ctrl = NULL, *psys_ctrl = NULL; +@@ -730,16 +677,6 @@ static int ipu6_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) + goto out_ipu6_bus_del_devices; + } + +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) +- rval = request_firmware(&isp->spdata_fw, IPU6_SPDATA_NAME, &pdev->dev); +- if (rval) +- dev_warn(&isp->pdev->dev, "no spdata replace, using default\n"); +- else +- fixup_spdata(isp->spdata_fw->data, pdev->dev.platform_data); +-#endif +-#endif +- + isys_ctrl = devm_kmemdup(dev, &isys_buttress_ctrl, + sizeof(isys_buttress_ctrl), GFP_KERNEL); + if (!isys_ctrl) { +@@ -749,7 +686,7 @@ static int ipu6_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) + + isp->isys = ipu6_isys_init(pdev, dev, isys_ctrl, isys_base, + &isys_ipdata, +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + pdev->dev.platform_data + #endif + ); +@@ -851,11 +788,6 @@ out_ipu6_bus_del_devices: + ipu6_mmu_cleanup(isp->isys->mmu); + ipu6_bus_del_devices(pdev); + release_firmware(isp->cpd_fw); +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) +- release_firmware(isp->spdata_fw); +-#endif +-#endif + buttress_exit: + ipu6_buttress_exit(isp); + +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.h +index 786494d..f464a7b 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.h ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.h +@@ -23,12 +23,6 @@ struct ipu6_bus_device; + #define IPU6EPMTL_FIRMWARE_NAME "intel/ipu/ipu6epmtl_fw.bin" + #define IPU6EPADLN_FIRMWARE_NAME "intel/ipu/ipu6epadln_fw.bin" + +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ +- && IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) +-/* array of struct ipu6_spdata_rep terminated by NULL */ +-#define IPU6_SPDATA_NAME "ipu6v1_spdata.bin" +-#endif +- + enum ipu6_version { + IPU6_VER_INVALID = 0, + IPU6_VER_6 = 1, +@@ -86,11 +80,6 @@ struct ipu6_device { + const struct firmware *cpd_fw; + const char *cpd_fw_name; + u32 cpd_metadata_cmpnt_size; +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) +- const struct firmware *spdata_fw; +-#endif +-#endif + void __iomem *base; + #ifdef CONFIG_DEBUG_FS + struct dentry *ipu_dir; +@@ -338,7 +327,7 @@ struct ipu6_isys_internal_pdata { + struct ipu6_isys_pdata { + void __iomem *base; + const struct ipu6_isys_internal_pdata *ipdata; +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + struct ipu_isys_subdev_pdata *spdata; + #endif + }; +-- +2.43.0 + diff --git a/patches/0019-media-intel-ipu6-align-ACPI-PDATA-and-ACPI-fwnode-bu.patch b/patches/0019-media-intel-ipu6-align-ACPI-PDATA-and-ACPI-fwnode-bu.patch new file mode 100644 index 0000000..84ae2d8 --- /dev/null +++ b/patches/0019-media-intel-ipu6-align-ACPI-PDATA-and-ACPI-fwnode-bu.patch @@ -0,0 +1,385 @@ +From a7d31333a164a84a02df565a3f0f9620a9a0098b Mon Sep 17 00:00:00 2001 +From: hepengpx +Date: Mon, 12 Jan 2026 07:53:37 -0700 +Subject: [PATCH 19/28] media: intel-ipu6: align ACPI PDATA and ACPI fwnode + build for ECG + +* align ACPI PDATA and ACPI fwnode build for ECG + +Signed-off-by: hepengpx +Signed-off-by: zouxiaoh +--- + .../media/pci/intel/ipu6/ipu6-isys-mcd-phy.c | 170 ++++++++---------- + .../drivers/media/pci/intel/ipu6/ipu6-isys.c | 29 ++- + .../drivers/media/pci/intel/ipu6/ipu6-isys.h | 4 - + 6.12.0/drivers/media/pci/intel/ipu6/ipu6.c | 14 +- + 4 files changed, 96 insertions(+), 121 deletions(-) + +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c +index 1cf33b5..1be7d75 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c +@@ -567,49 +567,46 @@ static int ipu6_isys_mcd_phy_ready(struct ipu6_isys *isys, u8 id) + return ret; + } + +-#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) +-static int ipu6_isys_mcd_phy_common_init(struct ipu6_isys *isys, struct ipu6_isys_csi2_config *cfg) ++static void ipu6_isys_write_mcd_phy_common_init_regs(struct ipu6_isys *isys, ++ u32 port) + { +- unsigned int phy_id; +- void __iomem *phy_base; +- struct ipu6_bus_device *adev = to_ipu6_bus_device(&isys->adev->auxdev.dev); ++ struct ipu6_bus_device *adev = isys->adev; + struct ipu6_device *isp = adev->isp; + void __iomem *isp_base = isp->base; ++ void __iomem *phy_base; + unsigned int i; ++ u8 phy_id; + +- phy_id = cfg->port / 4; ++ phy_id = port / 4; + phy_base = isp_base + IPU6_ISYS_MCD_PHY_BASE(phy_id); + +- for (i = 0 ; i < ARRAY_SIZE(common_init_regs); i++) { ++ for (i = 0; i < ARRAY_SIZE(common_init_regs); i++) + writel(common_init_regs[i].val, +- phy_base + common_init_regs[i].reg); +- } +- +- return 0; ++ phy_base + common_init_regs[i].reg); + } +-#else +-static void ipu6_isys_mcd_phy_common_init(struct ipu6_isys *isys) ++ ++static void ipu6_isys_mcd_phy_common_init(struct ipu6_isys *isys ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) ++ , struct ipu6_isys_csi2_config *csi2_cfg ++#endif ++) + { +- struct ipu6_bus_device *adev = isys->adev; +- struct ipu6_device *isp = adev->isp; +- void __iomem *isp_base = isp->base; + struct sensor_async_sd *s_asd; + struct v4l2_async_connection *asc; +- void __iomem *phy_base; +- unsigned int i; +- u8 phy_id; + +- list_for_each_entry(asc, &isys->notifier.done_list, asc_entry) { +- s_asd = container_of(asc, struct sensor_async_sd, asc); +- phy_id = s_asd->csi2.port / 4; +- phy_base = isp_base + IPU6_ISYS_MCD_PHY_BASE(phy_id); +- +- for (i = 0; i < ARRAY_SIZE(common_init_regs); i++) +- writel(common_init_regs[i].val, +- phy_base + common_init_regs[i].reg); ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) ++ if (isys->pdata->spdata) { ++ ipu6_isys_write_mcd_phy_common_init_regs(isys, csi2_cfg->port); ++ } else { ++#endif ++ list_for_each_entry(asc, &isys->notifier.done_list, asc_entry) { ++ s_asd = container_of(asc, struct sensor_async_sd, asc); ++ ipu6_isys_write_mcd_phy_common_init_regs(isys, s_asd->csi2.port); ++ } ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + } +-} + #endif ++} + + static int ipu6_isys_driver_port_to_phy_port(struct ipu6_isys_csi2_config *cfg) + { +@@ -641,96 +638,79 @@ static int ipu6_isys_driver_port_to_phy_port(struct ipu6_isys_csi2_config *cfg) + return ret; + } + +-#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) +-static int ipu6_isys_mcd_phy_config(struct ipu6_isys *isys, struct ipu6_isys_csi2_config *cfg) ++static int ipu6_isys_write_mcd_phy_config_regs(struct ipu6_isys *isys, ++ struct ipu6_isys_csi2_config *cfg) + { +- unsigned int phy_id; +- int phy_port; +- void __iomem *phy_base; +- struct ipu6_bus_device *adev = to_ipu6_bus_device(&isys->adev->auxdev.dev); ++ struct device *dev = &isys->adev->auxdev.dev; ++ struct ipu6_bus_device *adev = isys->adev; ++ const struct phy_reg **phy_config_regs; + struct ipu6_device *isp = adev->isp; + void __iomem *isp_base = isp->base; +- const struct phy_reg **phy_config_regs; +- struct ipu_isys_subdev_pdata *spdata = isys->pdata->spdata; +- struct ipu_isys_subdev_info **subdevs, *sd_info; +- int i; ++ void __iomem *phy_base; ++ int phy_port, phy_id; ++ unsigned int i; + +- if (!spdata) { +- dev_err(&isys->adev->auxdev.dev, "no subdevice info provided\n"); +- return -EINVAL; ++ phy_port = ipu6_isys_driver_port_to_phy_port(cfg); ++ if (phy_port < 0) { ++ dev_err(dev, "invalid port %d for lane %d", cfg->port, ++ cfg->nlanes); ++ return -ENXIO; + } + + phy_id = cfg->port / 4; + phy_base = isp_base + IPU6_ISYS_MCD_PHY_BASE(phy_id); +- for (subdevs = spdata->subdevs; *subdevs; subdevs++) { +- sd_info = *subdevs; +- if (!sd_info->csi2) +- continue; +- +- phy_port = ipu6_isys_driver_port_to_phy_port(sd_info->csi2); +- if (phy_port < 0) { +- dev_err(&isys->adev->auxdev.dev, "invalid port %d for lane %d", +- cfg->port, cfg->nlanes); +- return -ENXIO; +- } +- +- if ((sd_info->csi2->port / 4) != phy_id) +- continue; +- +- dev_dbg(&isys->adev->auxdev.dev, "port%d PHY%u lanes %u\n", +- phy_port, phy_id, cfg->nlanes); ++ dev_dbg(dev, "port%d PHY%u lanes %u\n", cfg->port, phy_id, cfg->nlanes); + +- phy_config_regs = config_regs[sd_info->csi2->nlanes/2]; +- +- for (i = 0; phy_config_regs[phy_port][i].reg; i++) { +- writel(phy_config_regs[phy_port][i].val, +- phy_base + phy_config_regs[phy_port][i].reg); +- } +- } ++ phy_config_regs = config_regs[cfg->nlanes / 2]; ++ for (i = 0; phy_config_regs[phy_port][i].reg; i++) ++ writel(phy_config_regs[phy_port][i].val, ++ phy_base + phy_config_regs[phy_port][i].reg); + + return 0; + } +-#else +-static int ipu6_isys_mcd_phy_config(struct ipu6_isys *isys) ++ ++static int ipu6_isys_mcd_phy_config(struct ipu6_isys *isys ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) ++ , struct ipu6_isys_csi2_config *csi2_cfg ++#endif ++) + { +- struct device *dev = &isys->adev->auxdev.dev; +- struct ipu6_bus_device *adev = isys->adev; +- const struct phy_reg **phy_config_regs; +- struct ipu6_device *isp = adev->isp; +- void __iomem *isp_base = isp->base; + struct sensor_async_sd *s_asd; + struct ipu6_isys_csi2_config cfg; + struct v4l2_async_connection *asc; +- int phy_port, phy_id; +- unsigned int i; +- void __iomem *phy_base; ++ int ret = 0; + +- list_for_each_entry(asc, &isys->notifier.done_list, asc_entry) { +- s_asd = container_of(asc, struct sensor_async_sd, asc); +- cfg.port = s_asd->csi2.port; +- cfg.nlanes = s_asd->csi2.nlanes; +- phy_port = ipu6_isys_driver_port_to_phy_port(&cfg); +- if (phy_port < 0) { +- dev_err(dev, "invalid port %d for lane %d", cfg.port, +- cfg.nlanes); +- return -ENXIO; +- } ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) ++ struct ipu_isys_subdev_pdata *spdata = isys->pdata->spdata; ++ struct ipu_isys_subdev_info **subdevs, *sd_info; + +- phy_id = cfg.port / 4; +- phy_base = isp_base + IPU6_ISYS_MCD_PHY_BASE(phy_id); +- dev_dbg(dev, "port%d PHY%u lanes %u\n", cfg.port, phy_id, +- cfg.nlanes); ++ if (spdata) { ++ for (subdevs = spdata->subdevs; *subdevs; subdevs++) { ++ sd_info = *subdevs; ++ if (!sd_info->csi2 || ++ (sd_info->csi2->port / 4) != (csi2_cfg->port / 4)) ++ continue; + +- phy_config_regs = config_regs[cfg.nlanes / 2]; +- cfg.port = phy_port; +- for (i = 0; phy_config_regs[cfg.port][i].reg; i++) +- writel(phy_config_regs[cfg.port][i].val, +- phy_base + phy_config_regs[cfg.port][i].reg); ++ ret = ipu6_isys_write_mcd_phy_config_regs(isys, sd_info->csi2); ++ if (ret) ++ return ret; ++ } ++ } else { ++#endif ++ list_for_each_entry(asc, &isys->notifier.done_list, asc_entry) { ++ s_asd = container_of(asc, struct sensor_async_sd, asc); ++ cfg.port = s_asd->csi2.port; ++ cfg.nlanes = s_asd->csi2.nlanes; ++ ret = ipu6_isys_write_mcd_phy_config_regs(isys, &cfg); ++ if (ret) ++ return ret; ++ } ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + } ++#endif + + return 0; + } +-#endif + + #define CSI_MCD_PHY_NUM 2 + static refcount_t phy_power_ref_count[CSI_MCD_PHY_NUM]; +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c +index eebcffc..caf29f7 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c +@@ -28,14 +28,12 @@ + #include + #include + #include +-#if !IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + #include + #include + #include + #include + #include + #include +-#endif + + #include "ipu6-bus.h" + #include "ipu6-cpd.h" +@@ -972,7 +970,6 @@ static void isys_iwake_watermark_cleanup(struct ipu6_isys *isys) + mutex_destroy(&iwake_watermark->mutex); + } + +-#if !IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + /* The .bound() notifier callback when a match is found */ + static int isys_notifier_bound(struct v4l2_async_notifier *notifier, + struct v4l2_subdev *sd, +@@ -1084,7 +1081,6 @@ static void isys_notifier_cleanup(struct ipu6_isys *isys) + v4l2_async_nf_unregister(&isys->notifier); + v4l2_async_nf_cleanup(&isys->notifier); + } +-#endif + + static int isys_register_devices(struct ipu6_isys *isys) + { +@@ -1122,17 +1118,19 @@ static int isys_register_devices(struct ipu6_isys *isys) + if (ret) + goto out_isys_unregister_subdevices; + +-#if !IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) +- ret = isys_notifier_init(isys); +- if (ret) +- goto out_isys_unregister_subdevices; +-#else +- isys_register_ext_subdevs(isys); ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) ++ if (!isys->pdata->spdata) { + #endif ++ ret = isys_notifier_init(isys); ++ if (ret) ++ goto out_isys_unregister_subdevices; + #if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) +- ret = v4l2_device_register_subdev_nodes(&isys->v4l2_dev); +- if (ret) +- goto out_isys_unregister_ext_subdevs; ++ } else { ++ isys_register_ext_subdevs(isys); ++ ret = v4l2_device_register_subdev_nodes(&isys->v4l2_dev); ++ if (ret) ++ goto out_isys_unregister_ext_subdevs; ++ } + #endif + return 0; + #if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) +@@ -1162,7 +1160,8 @@ static void isys_unregister_devices(struct ipu6_isys *isys) + isys_unregister_video_devices(isys); + isys_csi2_unregister_subdevices(isys); + #if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) +- isys_unregister_ext_subdevs(isys); ++ if (isys->pdata->spdata) ++ isys_unregister_ext_subdevs(isys); + #endif + v4l2_device_unregister(&isys->v4l2_dev); + media_device_unregister(&isys->media_dev); +@@ -1534,9 +1533,7 @@ static void isys_remove(struct auxiliary_device *auxdev) + free_fw_msg_bufs(isys); + + isys_unregister_devices(isys); +-#if !IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + isys_notifier_cleanup(isys); +-#endif + + cpu_latency_qos_remove_request(&isys->pm_qos); + +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h +index 1db940a..57df01c 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h +@@ -172,9 +172,7 @@ struct ipu6_isys { + spinlock_t listlock; /* Protect framebuflist */ + struct list_head framebuflist; + struct list_head framebuflist_fw; +-#if !IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + struct v4l2_async_notifier notifier; +-#endif + struct isys_iwake_watermark iwake_watermark; + #ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET + struct mutex reset_mutex; +@@ -198,8 +196,6 @@ struct ipu6_isys_subdev_i2c_info { + int i2c_adapter_id; + char i2c_adapter_bdf[32]; + }; +-#endif +-#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + #define IPU_SPDATA_GPIO_NUM 4 + #define IPU_SPDATA_IRQ_PIN_NAME_LEN 16 + #endif +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c +index 57ab6a5..5684dac 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c +@@ -399,11 +399,11 @@ ipu6_isys_init(struct pci_dev *pdev, struct device *parent, + #endif + int ret; + +- // ret = ipu_bridge_init(dev, ipu_bridge_parse_ssdb); +- // if (ret) { +- // dev_err_probe(dev, ret, "IPU6 bridge init failed\n"); +- // return ERR_PTR(ret); +- // } ++ ret = ipu_bridge_init(dev, ipu_bridge_parse_ssdb); ++ if (ret) { ++ dev_err_probe(dev, ret, "IPU6 bridge init failed\n"); ++ return ERR_PTR(ret); ++ } + + pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); + if (!pdata) +@@ -435,7 +435,9 @@ ipu6_isys_init(struct pci_dev *pdev, struct device *parent, + dev_dbg(&pdev->dev, "No subdevice info provided"); + ret = ipu_get_acpi_devices(isys_adev, &isys_adev->auxdev.dev, &acpi_pdata, NULL, + isys_init_acpi_add_device); +- pdata->spdata = acpi_pdata; ++ if (acpi_pdata && (*acpi_pdata->subdevs)) { ++ pdata->spdata = acpi_pdata; ++ } + } else { + dev_dbg(&pdev->dev, "Subdevice info found"); + ret = ipu_get_acpi_devices(isys_adev, &isys_adev->auxdev.dev, &acpi_pdata, &spdata, +-- +2.43.0 + diff --git a/patches/0020-upstream-Use-module-parameter-to-set-psys-freq.patch b/patches/0020-upstream-Use-module-parameter-to-set-psys-freq.patch new file mode 100644 index 0000000..43fa627 --- /dev/null +++ b/patches/0020-upstream-Use-module-parameter-to-set-psys-freq.patch @@ -0,0 +1,43 @@ +From ea56f3f2c08dc6ef45ff26221f92f45562b149b9 Mon Sep 17 00:00:00 2001 +From: Dongcheng Yan +Date: Mon, 12 Jan 2026 08:06:36 -0700 +Subject: [PATCH 20/28] upstream: Use module parameter to set psys freq + +--- + 6.12.0/drivers/media/pci/intel/ipu6/ipu6.c | 13 +++++++++++++ + 1 file changed, 13 insertions(+) + +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c +index 5684dac..c9513c5 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c +@@ -51,6 +51,10 @@ static unsigned int isys_freq_override; + module_param(isys_freq_override, uint, 0660); + MODULE_PARM_DESC(isys_freq_override, "Override ISYS freq(mhz)"); + ++static unsigned int psys_freq_override; ++module_param(psys_freq_override, uint, 0660); ++MODULE_PARM_DESC(psys_freq_override, "Override PSYS freq(mhz)"); ++ + #define IPU6_PCI_BAR 0 + + struct ipu6_cell_program { +@@ -482,6 +486,15 @@ ipu6_psys_init(struct pci_dev *pdev, struct device *parent, + pdata->base = base; + pdata->ipdata = ipdata; + ++ /* Override the isys freq */ ++ if (psys_freq_override >= BUTTRESS_MIN_FORCE_PS_FREQ && ++ psys_freq_override <= BUTTRESS_MAX_FORCE_PS_FREQ) { ++ ctrl->ratio = psys_freq_override / BUTTRESS_PS_FREQ_STEP; ++ ctrl->qos_floor = psys_freq_override; ++ dev_dbg(&pdev->dev, "Override the psys freq:%u(mhz)\n", ++ psys_freq_override); ++ } ++ + psys_adev = ipu6_bus_initialize_device(pdev, parent, pdata, ctrl, + IPU6_PSYS_NAME); + if (IS_ERR(psys_adev)) { +-- +2.43.0 + diff --git a/patches/0021-media-pci-update-IPU6-PSYS-driver.patch b/patches/0021-media-pci-update-IPU6-PSYS-driver.patch new file mode 100644 index 0000000..67a0d9e --- /dev/null +++ b/patches/0021-media-pci-update-IPU6-PSYS-driver.patch @@ -0,0 +1,8335 @@ +From d55eab67d68fc04677229cf8d410f344b2d60b04 Mon Sep 17 00:00:00 2001 +From: zouxiaoh +Date: Mon, 12 Jan 2026 08:09:05 -0700 +Subject: [PATCH 21/28] media: pci: update IPU6 PSYS driver + +Signed-off-by: zouxiaoh +Signed-off-by: Florent Pirou +--- + 6.12.0/drivers/media/pci/intel/ipu6/Makefile | 1 + + .../media/pci/intel/ipu6/psys/Makefile | 26 + + .../media/pci/intel/ipu6/psys/ipu-fw-psys.c | 433 +++++ + .../media/pci/intel/ipu6/psys/ipu-fw-psys.h | 382 ++++ + .../pci/intel/ipu6/psys/ipu-fw-resources.c | 103 ++ + .../pci/intel/ipu6/psys/ipu-platform-psys.h | 78 + + .../intel/ipu6/psys/ipu-platform-resources.h | 103 ++ + .../pci/intel/ipu6/psys/ipu-psys-compat32.c | 222 +++ + .../media/pci/intel/ipu6/psys/ipu-psys.c | 1599 +++++++++++++++++ + .../media/pci/intel/ipu6/psys/ipu-psys.h | 324 ++++ + .../media/pci/intel/ipu6/psys/ipu-resources.c | 861 +++++++++ + .../pci/intel/ipu6/psys/ipu6-fw-resources.c | 607 +++++++ + .../pci/intel/ipu6/psys/ipu6-l-scheduler.c | 621 +++++++ + .../intel/ipu6/psys/ipu6-platform-resources.h | 194 ++ + .../media/pci/intel/ipu6/psys/ipu6-ppg.c | 556 ++++++ + .../media/pci/intel/ipu6/psys/ipu6-ppg.h | 38 + + .../media/pci/intel/ipu6/psys/ipu6-psys-gpc.c | 209 +++ + .../media/pci/intel/ipu6/psys/ipu6-psys.c | 1056 +++++++++++ + .../pci/intel/ipu6/psys/ipu6ep-fw-resources.c | 393 ++++ + .../ipu6/psys/ipu6ep-platform-resources.h | 42 + + .../pci/intel/ipu6/psys/ipu6se-fw-resources.c | 194 ++ + .../ipu6/psys/ipu6se-platform-resources.h | 103 ++ + 22 files changed, 8145 insertions(+) + create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/psys/Makefile + create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-fw-psys.c + create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-fw-psys.h + create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-fw-resources.c + create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-platform-psys.h + create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-platform-resources.h + create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-psys-compat32.c + create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.c + create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.h + create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-resources.c + create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-fw-resources.c + create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-l-scheduler.c + create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-platform-resources.h + create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-ppg.c + create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-ppg.h + create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-psys-gpc.c + create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-psys.c + create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6ep-fw-resources.c + create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6ep-platform-resources.h + create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6se-fw-resources.c + create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6se-platform-resources.h + +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/Makefile b/6.12.0/drivers/media/pci/intel/ipu6/Makefile +index e1e123b..67c13c9 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/Makefile ++++ b/6.12.0/drivers/media/pci/intel/ipu6/Makefile +@@ -22,3 +22,4 @@ intel-ipu6-isys-y := ipu6-isys.o \ + ipu6-isys-dwc-phy.o + + obj-$(CONFIG_VIDEO_INTEL_IPU6) += intel-ipu6-isys.o ++obj-$(CONFIG_VIDEO_INTEL_IPU6) += psys/ +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/psys/Makefile b/6.12.0/drivers/media/pci/intel/ipu6/psys/Makefile +new file mode 100644 +index 0000000..299cb04 +--- /dev/null ++++ b/6.12.0/drivers/media/pci/intel/ipu6/psys/Makefile +@@ -0,0 +1,26 @@ ++# SPDX-License-Identifier: GPL-2.0-only ++ ++is_kernel_lt_6_10 = $(shell if [ $$(printf "6.10\n$(KERNELVERSION)" | sort -V | head -n1) != "6.10" ]; then echo 1; fi) ++ifeq ($(is_kernel_lt_6_10), 1) ++ifneq ($(EXTERNAL_BUILD), 1) ++src := $(srctree)/$(src) ++endif ++endif ++ ++intel-ipu6-psys-objs += ipu-psys.o \ ++ ipu-fw-psys.o \ ++ ipu-fw-resources.o \ ++ ipu-resources.o \ ++ ipu6-psys.o \ ++ ipu6-ppg.o \ ++ ipu6-l-scheduler.o \ ++ ipu6-fw-resources.o \ ++ ipu6se-fw-resources.o \ ++ ipu6ep-fw-resources.o ++ ++obj-$(CONFIG_VIDEO_INTEL_IPU6) += intel-ipu6-psys.o ++ ++ifeq ($(is_kernel_lt_6_10), 1) ++ccflags-y += -I$(src)/../ipu6/ ++endif ++ccflags-y += -I$(src)/../ +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-fw-psys.c b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-fw-psys.c +new file mode 100644 +index 0000000..c89cd0c +--- /dev/null ++++ b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-fw-psys.c +@@ -0,0 +1,433 @@ ++// SPDX-License-Identifier: GPL-2.0 ++// Copyright (C) 2016 - 2024 Intel Corporation ++ ++#include ++ ++#include ++ ++#include ++#include "ipu6-fw-com.h" ++#include "ipu-fw-psys.h" ++#include "ipu-psys.h" ++ ++int ipu_fw_psys_pg_start(struct ipu_psys_kcmd *kcmd) ++{ ++ kcmd->kpg->pg->state = IPU_FW_PSYS_PROCESS_GROUP_STARTED; ++ return 0; ++} ++ ++int ipu_fw_psys_pg_disown(struct ipu_psys_kcmd *kcmd) ++{ ++ struct ipu_fw_psys_cmd *psys_cmd; ++ struct device *dev = &kcmd->fh->psys->adev->auxdev.dev; ++ int ret = 0; ++ ++ psys_cmd = ipu6_send_get_token(kcmd->fh->psys->fwcom, 0); ++ if (!psys_cmd) { ++ dev_err(dev, "%s failed to get token!\n", __func__); ++ kcmd->pg_user = NULL; ++ ret = -ENODATA; ++ goto out; ++ } ++ psys_cmd->command = IPU_FW_PSYS_PROCESS_GROUP_CMD_START; ++ psys_cmd->msg = 0; ++ psys_cmd->context_handle = kcmd->kpg->pg->ipu_virtual_address; ++ ipu6_send_put_token(kcmd->fh->psys->fwcom, 0); ++ ++out: ++ return ret; ++} ++ ++int ipu_fw_psys_ppg_suspend(struct ipu_psys_kcmd *kcmd) ++{ ++ struct device *dev = &kcmd->fh->psys->adev->auxdev.dev; ++ struct ipu_fw_psys_cmd *psys_cmd; ++ int ret = 0; ++ ++ /* ppg suspend cmd uses QUEUE_DEVICE_ID instead of QUEUE_COMMAND_ID */ ++ psys_cmd = ipu6_send_get_token(kcmd->fh->psys->fwcom, 1); ++ if (!psys_cmd) { ++ dev_err(dev, "%s failed to get token!\n", __func__); ++ kcmd->pg_user = NULL; ++ ret = -ENODATA; ++ goto out; ++ } ++ psys_cmd->command = IPU_FW_PSYS_PROCESS_GROUP_CMD_SUSPEND; ++ psys_cmd->msg = 0; ++ psys_cmd->context_handle = kcmd->kpg->pg->ipu_virtual_address; ++ ipu6_send_put_token(kcmd->fh->psys->fwcom, 1); ++ ++out: ++ return ret; ++} ++ ++int ipu_fw_psys_ppg_resume(struct ipu_psys_kcmd *kcmd) ++{ ++ struct device *dev = &kcmd->fh->psys->adev->auxdev.dev; ++ struct ipu_fw_psys_cmd *psys_cmd; ++ int ret = 0; ++ ++ psys_cmd = ipu6_send_get_token(kcmd->fh->psys->fwcom, 0); ++ if (!psys_cmd) { ++ dev_err(dev, "%s failed to get token!\n", __func__); ++ kcmd->pg_user = NULL; ++ ret = -ENODATA; ++ goto out; ++ } ++ psys_cmd->command = IPU_FW_PSYS_PROCESS_GROUP_CMD_RESUME; ++ psys_cmd->msg = 0; ++ psys_cmd->context_handle = kcmd->kpg->pg->ipu_virtual_address; ++ ipu6_send_put_token(kcmd->fh->psys->fwcom, 0); ++ ++out: ++ return ret; ++} ++ ++int ipu_fw_psys_pg_abort(struct ipu_psys_kcmd *kcmd) ++{ ++ struct device *dev = &kcmd->fh->psys->adev->auxdev.dev; ++ struct ipu_fw_psys_cmd *psys_cmd; ++ int ret = 0; ++ ++ psys_cmd = ipu6_send_get_token(kcmd->fh->psys->fwcom, 0); ++ if (!psys_cmd) { ++ dev_err(dev, "%s failed to get token!\n", __func__); ++ kcmd->pg_user = NULL; ++ ret = -ENODATA; ++ goto out; ++ } ++ psys_cmd->command = IPU_FW_PSYS_PROCESS_GROUP_CMD_STOP; ++ psys_cmd->msg = 0; ++ psys_cmd->context_handle = kcmd->kpg->pg->ipu_virtual_address; ++ ipu6_send_put_token(kcmd->fh->psys->fwcom, 0); ++ ++out: ++ return ret; ++} ++ ++int ipu_fw_psys_pg_submit(struct ipu_psys_kcmd *kcmd) ++{ ++ kcmd->kpg->pg->state = IPU_FW_PSYS_PROCESS_GROUP_BLOCKED; ++ return 0; ++} ++ ++int ipu_fw_psys_rcv_event(struct ipu_psys *psys, ++ struct ipu_fw_psys_event *event) ++{ ++ void *rcv; ++ ++ rcv = ipu6_recv_get_token(psys->fwcom, 0); ++ if (!rcv) ++ return 0; ++ ++ memcpy(event, rcv, sizeof(*event)); ++ ipu6_recv_put_token(psys->fwcom, 0); ++ return 1; ++} ++ ++int ipu_fw_psys_terminal_set(struct ipu_fw_psys_terminal *terminal, ++ int terminal_idx, ++ struct ipu_psys_kcmd *kcmd, ++ u32 buffer, unsigned int size) ++{ ++ struct device *dev = &kcmd->fh->psys->adev->auxdev.dev; ++ u32 type; ++ u32 buffer_state; ++ ++ type = terminal->terminal_type; ++ ++ switch (type) { ++ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_CACHED_IN: ++ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_CACHED_OUT: ++ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SPATIAL_IN: ++ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SPATIAL_OUT: ++ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SLICED_IN: ++ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SLICED_OUT: ++ case IPU_FW_PSYS_TERMINAL_TYPE_PROGRAM: ++ case IPU_FW_PSYS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT: ++ buffer_state = IPU_FW_PSYS_BUFFER_UNDEFINED; ++ break; ++ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_STREAM: ++ case IPU_FW_PSYS_TERMINAL_TYPE_DATA_IN: ++ case IPU_FW_PSYS_TERMINAL_TYPE_STATE_IN: ++ buffer_state = IPU_FW_PSYS_BUFFER_FULL; ++ break; ++ case IPU_FW_PSYS_TERMINAL_TYPE_DATA_OUT: ++ case IPU_FW_PSYS_TERMINAL_TYPE_STATE_OUT: ++ buffer_state = IPU_FW_PSYS_BUFFER_EMPTY; ++ break; ++ default: ++ dev_err(dev, "unknown terminal type: 0x%x\n", type); ++ return -EAGAIN; ++ } ++ ++ if (type == IPU_FW_PSYS_TERMINAL_TYPE_DATA_IN || ++ type == IPU_FW_PSYS_TERMINAL_TYPE_DATA_OUT) { ++ struct ipu_fw_psys_data_terminal *dterminal = ++ (struct ipu_fw_psys_data_terminal *)terminal; ++ dterminal->connection_type = IPU_FW_PSYS_CONNECTION_MEMORY; ++ dterminal->frame.data_bytes = size; ++ if (!ipu_fw_psys_pg_get_protocol(kcmd)) ++ dterminal->frame.data = buffer; ++ else ++ dterminal->frame.data_index = terminal_idx; ++ dterminal->frame.buffer_state = buffer_state; ++ } else { ++ struct ipu_fw_psys_param_terminal *pterminal = ++ (struct ipu_fw_psys_param_terminal *)terminal; ++ if (!ipu_fw_psys_pg_get_protocol(kcmd)) ++ pterminal->param_payload.buffer = buffer; ++ else ++ pterminal->param_payload.terminal_index = terminal_idx; ++ } ++ return 0; ++} ++ ++void ipu_fw_psys_pg_dump(struct ipu_psys *psys, ++ struct ipu_psys_kcmd *kcmd, const char *note) ++{ ++ ipu6_fw_psys_pg_dump(psys, kcmd, note); ++} ++ ++int ipu_fw_psys_pg_get_id(struct ipu_psys_kcmd *kcmd) ++{ ++ return kcmd->kpg->pg->ID; ++} ++ ++int ipu_fw_psys_pg_get_terminal_count(struct ipu_psys_kcmd *kcmd) ++{ ++ return kcmd->kpg->pg->terminal_count; ++} ++ ++int ipu_fw_psys_pg_get_size(struct ipu_psys_kcmd *kcmd) ++{ ++ return kcmd->kpg->pg->size; ++} ++ ++int ipu_fw_psys_pg_set_ipu_vaddress(struct ipu_psys_kcmd *kcmd, ++ dma_addr_t vaddress) ++{ ++ kcmd->kpg->pg->ipu_virtual_address = vaddress; ++ return 0; ++} ++ ++struct ipu_fw_psys_terminal *ipu_fw_psys_pg_get_terminal(struct ipu_psys_kcmd ++ *kcmd, int index) ++{ ++ struct ipu_fw_psys_terminal *terminal; ++ u16 *terminal_offset_table; ++ ++ terminal_offset_table = ++ (uint16_t *)((char *)kcmd->kpg->pg + ++ kcmd->kpg->pg->terminals_offset); ++ terminal = (struct ipu_fw_psys_terminal *) ++ ((char *)kcmd->kpg->pg + terminal_offset_table[index]); ++ return terminal; ++} ++ ++void ipu_fw_psys_pg_set_token(struct ipu_psys_kcmd *kcmd, u64 token) ++{ ++ kcmd->kpg->pg->token = (u64)token; ++} ++ ++u64 ipu_fw_psys_pg_get_token(struct ipu_psys_kcmd *kcmd) ++{ ++ return kcmd->kpg->pg->token; ++} ++ ++int ipu_fw_psys_pg_get_protocol(struct ipu_psys_kcmd *kcmd) ++{ ++ return kcmd->kpg->pg->protocol_version; ++} ++ ++int ipu_fw_psys_ppg_set_buffer_set(struct ipu_psys_kcmd *kcmd, ++ struct ipu_fw_psys_terminal *terminal, ++ int terminal_idx, u32 buffer) ++{ ++ struct device *dev = &kcmd->fh->psys->adev->auxdev.dev; ++ struct ipu_fw_psys_buffer_set *buf_set = kcmd->kbuf_set->buf_set; ++ u32 buffer_state; ++ u32 *buffer_ptr; ++ u32 type; ++ ++ type = terminal->terminal_type; ++ ++ switch (type) { ++ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_CACHED_IN: ++ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_CACHED_OUT: ++ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SPATIAL_IN: ++ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SPATIAL_OUT: ++ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SLICED_IN: ++ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SLICED_OUT: ++ case IPU_FW_PSYS_TERMINAL_TYPE_PROGRAM: ++ case IPU_FW_PSYS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT: ++ buffer_state = IPU_FW_PSYS_BUFFER_UNDEFINED; ++ break; ++ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_STREAM: ++ case IPU_FW_PSYS_TERMINAL_TYPE_DATA_IN: ++ case IPU_FW_PSYS_TERMINAL_TYPE_STATE_IN: ++ buffer_state = IPU_FW_PSYS_BUFFER_FULL; ++ break; ++ case IPU_FW_PSYS_TERMINAL_TYPE_DATA_OUT: ++ case IPU_FW_PSYS_TERMINAL_TYPE_STATE_OUT: ++ buffer_state = IPU_FW_PSYS_BUFFER_EMPTY; ++ break; ++ default: ++ dev_err(dev, "unknown terminal type: 0x%x\n", type); ++ return -EAGAIN; ++ } ++ ++ buffer_ptr = (u32 *)((char *)buf_set + sizeof(*buf_set) + ++ terminal_idx * sizeof(*buffer_ptr)); ++ ++ *buffer_ptr = buffer; ++ ++ if (type == IPU_FW_PSYS_TERMINAL_TYPE_DATA_IN || ++ type == IPU_FW_PSYS_TERMINAL_TYPE_DATA_OUT) { ++ struct ipu_fw_psys_data_terminal *dterminal = ++ (struct ipu_fw_psys_data_terminal *)terminal; ++ dterminal->frame.buffer_state = buffer_state; ++ } ++ ++ return 0; ++} ++ ++size_t ipu_fw_psys_ppg_get_buffer_set_size(struct ipu_psys_kcmd *kcmd) ++{ ++ return (sizeof(struct ipu_fw_psys_buffer_set) + ++ kcmd->kpg->pg->terminal_count * sizeof(u32)); ++} ++ ++int ++ipu_fw_psys_ppg_buffer_set_vaddress(struct ipu_fw_psys_buffer_set *buf_set, ++ u32 vaddress) ++{ ++ buf_set->ipu_virtual_address = vaddress; ++ return 0; ++} ++ ++int ipu_fw_psys_ppg_buffer_set_set_keb(struct ipu_fw_psys_buffer_set *buf_set, ++ u32 *kernel_enable_bitmap) ++{ ++ memcpy(buf_set->kernel_enable_bitmap, (u8 *)kernel_enable_bitmap, ++ sizeof(buf_set->kernel_enable_bitmap)); ++ return 0; ++} ++ ++struct ipu_fw_psys_buffer_set * ++ipu_fw_psys_ppg_create_buffer_set(struct ipu_psys_kcmd *kcmd, ++ void *kaddr, u32 frame_counter) ++{ ++ struct ipu_fw_psys_buffer_set *buffer_set = NULL; ++ unsigned int i; ++ ++ buffer_set = (struct ipu_fw_psys_buffer_set *)kaddr; ++ ++ /* ++ * Set base struct members ++ */ ++ buffer_set->ipu_virtual_address = 0; ++ buffer_set->process_group_handle = kcmd->kpg->pg->ipu_virtual_address; ++ buffer_set->frame_counter = frame_counter; ++ buffer_set->terminal_count = kcmd->kpg->pg->terminal_count; ++ ++ /* ++ * Initialize adjacent buffer addresses ++ */ ++ for (i = 0; i < buffer_set->terminal_count; i++) { ++ u32 *buffer = ++ (u32 *)((char *)buffer_set + ++ sizeof(*buffer_set) + sizeof(u32) * i); ++ ++ *buffer = 0; ++ } ++ ++ return buffer_set; ++} ++ ++int ipu_fw_psys_ppg_enqueue_bufs(struct ipu_psys_kcmd *kcmd) ++{ ++ struct device *dev = &kcmd->fh->psys->adev->auxdev.dev; ++ struct ipu_fw_psys_cmd *psys_cmd; ++ unsigned int queue_id; ++ int ret = 0; ++ unsigned int size; ++ ++ if (ipu_ver == IPU6_VER_6SE) ++ size = IPU6SE_FW_PSYS_N_PSYS_CMD_QUEUE_ID; ++ else ++ size = IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID; ++ queue_id = kcmd->kpg->pg->base_queue_id; ++ ++ if (queue_id >= size) ++ return -EINVAL; ++ ++ psys_cmd = ipu6_send_get_token(kcmd->fh->psys->fwcom, queue_id); ++ if (!psys_cmd) { ++ dev_err(dev, "%s failed to get token!\n", __func__); ++ kcmd->pg_user = NULL; ++ return -ENODATA; ++ } ++ ++ psys_cmd->command = IPU_FW_PSYS_PROCESS_GROUP_CMD_RUN; ++ psys_cmd->msg = 0; ++ psys_cmd->context_handle = kcmd->kbuf_set->buf_set->ipu_virtual_address; ++ ++ ipu6_send_put_token(kcmd->fh->psys->fwcom, queue_id); ++ ++ return ret; ++} ++ ++u8 ipu_fw_psys_ppg_get_base_queue_id(struct ipu_psys_kcmd *kcmd) ++{ ++ return kcmd->kpg->pg->base_queue_id; ++} ++ ++void ipu_fw_psys_ppg_set_base_queue_id(struct ipu_psys_kcmd *kcmd, u8 queue_id) ++{ ++ kcmd->kpg->pg->base_queue_id = queue_id; ++} ++ ++int ipu_fw_psys_open(struct ipu_psys *psys) ++{ ++ struct device *dev = &psys->adev->auxdev.dev; ++ int retry = IPU_PSYS_OPEN_RETRY, retval; ++ ++ retval = ipu6_fw_com_open(psys->fwcom); ++ if (retval) { ++ dev_err(dev, "fw com open failed.\n"); ++ return retval; ++ } ++ ++ do { ++ usleep_range(IPU_PSYS_OPEN_TIMEOUT_US, ++ IPU_PSYS_OPEN_TIMEOUT_US + 10); ++ retval = ipu6_fw_com_ready(psys->fwcom); ++ if (retval) { ++ dev_dbg(dev, "psys port open ready!\n"); ++ break; ++ } ++ retry--; ++ } while (retry > 0); ++ ++ if (!retry) { ++ dev_err(dev, "psys port open ready failed\n"); ++ ipu6_fw_com_close(psys->fwcom); ++ return -EIO; ++ } ++ ++ return 0; ++} ++ ++int ipu_fw_psys_close(struct ipu_psys *psys) ++{ ++ struct device *dev = &psys->adev->auxdev.dev; ++ int retval; ++ ++ retval = ipu6_fw_com_close(psys->fwcom); ++ if (retval) { ++ dev_err(dev, "fw com close failed.\n"); ++ return retval; ++ } ++ return retval; ++} +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-fw-psys.h b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-fw-psys.h +new file mode 100644 +index 0000000..492a9ea +--- /dev/null ++++ b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-fw-psys.h +@@ -0,0 +1,382 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* Copyright (C) 2016 - 2024 Intel Corporation */ ++ ++#ifndef IPU_FW_PSYS_H ++#define IPU_FW_PSYS_H ++ ++#include "ipu6-platform-resources.h" ++#include "ipu6se-platform-resources.h" ++#include "ipu6ep-platform-resources.h" ++ ++#define IPU_FW_PSYS_CMD_QUEUE_SIZE 0x20 ++#define IPU_FW_PSYS_EVENT_QUEUE_SIZE 0x40 ++ ++#define IPU_FW_PSYS_CMD_BITS 64 ++#define IPU_FW_PSYS_EVENT_BITS 128 ++ ++enum { ++ IPU_FW_PSYS_EVENT_TYPE_SUCCESS = 0, ++ IPU_FW_PSYS_EVENT_TYPE_UNKNOWN_ERROR = 1, ++ IPU_FW_PSYS_EVENT_TYPE_RET_REM_OBJ_NOT_FOUND = 2, ++ IPU_FW_PSYS_EVENT_TYPE_RET_REM_OBJ_TOO_BIG = 3, ++ IPU_FW_PSYS_EVENT_TYPE_RET_REM_OBJ_DDR_TRANS_ERR = 4, ++ IPU_FW_PSYS_EVENT_TYPE_RET_REM_OBJ_NULL_PKG_DIR_ADDR = 5, ++ IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_LOAD_FRAME_ERR = 6, ++ IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_LOAD_FRAGMENT_ERR = 7, ++ IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_PROCESS_COUNT_ZERO = 8, ++ IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_PROCESS_INIT_ERR = 9, ++ IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_ABORT = 10, ++ IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_NULL = 11, ++ IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_VALIDATION_ERR = 12, ++ IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_INVALID_FRAME = 13 ++}; ++ ++enum { ++ IPU_FW_PSYS_EVENT_QUEUE_MAIN_ID, ++ IPU_FW_PSYS_N_PSYS_EVENT_QUEUE_ID ++}; ++ ++enum { ++ IPU_FW_PSYS_PROCESS_GROUP_ERROR = 0, ++ IPU_FW_PSYS_PROCESS_GROUP_CREATED, ++ IPU_FW_PSYS_PROCESS_GROUP_READY, ++ IPU_FW_PSYS_PROCESS_GROUP_BLOCKED, ++ IPU_FW_PSYS_PROCESS_GROUP_STARTED, ++ IPU_FW_PSYS_PROCESS_GROUP_RUNNING, ++ IPU_FW_PSYS_PROCESS_GROUP_STALLED, ++ IPU_FW_PSYS_PROCESS_GROUP_STOPPED, ++ IPU_FW_PSYS_N_PROCESS_GROUP_STATES ++}; ++ ++enum { ++ IPU_FW_PSYS_CONNECTION_MEMORY = 0, ++ IPU_FW_PSYS_CONNECTION_MEMORY_STREAM, ++ IPU_FW_PSYS_CONNECTION_STREAM, ++ IPU_FW_PSYS_N_CONNECTION_TYPES ++}; ++ ++enum { ++ IPU_FW_PSYS_BUFFER_NULL = 0, ++ IPU_FW_PSYS_BUFFER_UNDEFINED, ++ IPU_FW_PSYS_BUFFER_EMPTY, ++ IPU_FW_PSYS_BUFFER_NONEMPTY, ++ IPU_FW_PSYS_BUFFER_FULL, ++ IPU_FW_PSYS_N_BUFFER_STATES ++}; ++ ++enum { ++ IPU_FW_PSYS_TERMINAL_TYPE_DATA_IN = 0, ++ IPU_FW_PSYS_TERMINAL_TYPE_DATA_OUT, ++ IPU_FW_PSYS_TERMINAL_TYPE_PARAM_STREAM, ++ IPU_FW_PSYS_TERMINAL_TYPE_PARAM_CACHED_IN, ++ IPU_FW_PSYS_TERMINAL_TYPE_PARAM_CACHED_OUT, ++ IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SPATIAL_IN, ++ IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SPATIAL_OUT, ++ IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SLICED_IN, ++ IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SLICED_OUT, ++ IPU_FW_PSYS_TERMINAL_TYPE_STATE_IN, ++ IPU_FW_PSYS_TERMINAL_TYPE_STATE_OUT, ++ IPU_FW_PSYS_TERMINAL_TYPE_PROGRAM, ++ IPU_FW_PSYS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT, ++ IPU_FW_PSYS_N_TERMINAL_TYPES ++}; ++ ++enum { ++ IPU_FW_PSYS_COL_DIMENSION = 0, ++ IPU_FW_PSYS_ROW_DIMENSION = 1, ++ IPU_FW_PSYS_N_DATA_DIMENSION = 2 ++}; ++ ++enum { ++ IPU_FW_PSYS_PROCESS_GROUP_CMD_NOP = 0, ++ IPU_FW_PSYS_PROCESS_GROUP_CMD_SUBMIT, ++ IPU_FW_PSYS_PROCESS_GROUP_CMD_ATTACH, ++ IPU_FW_PSYS_PROCESS_GROUP_CMD_DETACH, ++ IPU_FW_PSYS_PROCESS_GROUP_CMD_START, ++ IPU_FW_PSYS_PROCESS_GROUP_CMD_DISOWN, ++ IPU_FW_PSYS_PROCESS_GROUP_CMD_RUN, ++ IPU_FW_PSYS_PROCESS_GROUP_CMD_STOP, ++ IPU_FW_PSYS_PROCESS_GROUP_CMD_SUSPEND, ++ IPU_FW_PSYS_PROCESS_GROUP_CMD_RESUME, ++ IPU_FW_PSYS_PROCESS_GROUP_CMD_ABORT, ++ IPU_FW_PSYS_PROCESS_GROUP_CMD_RESET, ++ IPU_FW_PSYS_N_PROCESS_GROUP_CMDS ++}; ++ ++enum { ++ IPU_FW_PSYS_PROCESS_GROUP_PROTOCOL_LEGACY = 0, ++ IPU_FW_PSYS_PROCESS_GROUP_PROTOCOL_PPG, ++ IPU_FW_PSYS_PROCESS_GROUP_N_PROTOCOLS ++}; ++ ++struct __packed ipu_fw_psys_process_group { ++ u64 token; ++ u64 private_token; ++ u32 routing_bitmap[IPU_FW_PSYS_RBM_NOF_ELEMS]; ++ u32 kernel_bitmap[IPU_FW_PSYS_KBM_NOF_ELEMS]; ++ u32 size; ++ u32 psys_server_init_cycles; ++ u32 pg_load_start_ts; ++ u32 pg_load_cycles; ++ u32 pg_init_cycles; ++ u32 pg_processing_cycles; ++ u32 pg_next_frame_init_cycles; ++ u32 pg_complete_cycles; ++ u32 ID; ++ u32 state; ++ u32 ipu_virtual_address; ++ u32 resource_bitmap; ++ u16 fragment_count; ++ u16 fragment_state; ++ u16 fragment_limit; ++ u16 processes_offset; ++ u16 terminals_offset; ++ u8 process_count; ++ u8 terminal_count; ++ u8 subgraph_count; ++ u8 protocol_version; ++ u8 base_queue_id; ++ u8 num_queues; ++ u8 mask_irq; ++ u8 error_handling_enable; ++ u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_PROCESS_GROUP_STRUCT]; ++}; ++ ++struct ipu_fw_psys_srv_init { ++ void *host_ddr_pkg_dir; ++ u32 ddr_pkg_dir_address; ++ u32 pkg_dir_size; ++ ++ u32 icache_prefetch_sp; ++ u32 icache_prefetch_isp; ++}; ++ ++struct __packed ipu_fw_psys_cmd { ++ u16 command; ++ u16 msg; ++ u32 context_handle; ++}; ++ ++struct __packed ipu_fw_psys_event { ++ u16 status; ++ u16 command; ++ u32 context_handle; ++ u64 token; ++}; ++ ++struct ipu_fw_psys_terminal { ++ u32 terminal_type; ++ s16 parent_offset; ++ u16 size; ++ u16 tm_index; ++ u8 ID; ++ u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_TERMINAL_STRUCT]; ++}; ++ ++struct ipu_fw_psys_param_payload { ++ u64 host_buffer; ++ u32 buffer; ++ u32 terminal_index; ++}; ++ ++struct ipu_fw_psys_param_terminal { ++ struct ipu_fw_psys_terminal base; ++ struct ipu_fw_psys_param_payload param_payload; ++ u16 param_section_desc_offset; ++ u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_PARAM_TERMINAL_STRUCT]; ++}; ++ ++struct ipu_fw_psys_frame { ++ u32 buffer_state; ++ u32 access_type; ++ u32 pointer_state; ++ u32 access_scope; ++ u32 data; ++ u32 data_index; ++ u32 data_bytes; ++ u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_FRAME_STRUCT]; ++}; ++ ++struct ipu_fw_psys_frame_descriptor { ++ u32 frame_format_type; ++ u32 plane_count; ++ u32 plane_offsets[IPU_FW_PSYS_N_FRAME_PLANES]; ++ u32 stride[1]; ++ u32 ts_offsets[IPU_FW_PSYS_N_FRAME_PLANES]; ++ u16 dimension[2]; ++ u16 size; ++ u8 bpp; ++ u8 bpe; ++ u8 is_compressed; ++ u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_FRAME_DESC_STRUCT]; ++}; ++ ++struct ipu_fw_psys_stream { ++ u64 dummy; ++}; ++ ++struct ipu_fw_psys_data_terminal { ++ struct ipu_fw_psys_terminal base; ++ struct ipu_fw_psys_frame_descriptor frame_descriptor; ++ struct ipu_fw_psys_frame frame; ++ struct ipu_fw_psys_stream stream; ++ u32 reserved; ++ u32 connection_type; ++ u16 fragment_descriptor_offset; ++ u8 kernel_id; ++ u8 subgraph_id; ++ u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_DATA_TERMINAL_STRUCT]; ++}; ++ ++struct ipu_fw_psys_buffer_set { ++ u64 token; ++ u32 kernel_enable_bitmap[IPU_FW_PSYS_KERNEL_BITMAP_NOF_ELEMS]; ++ u32 terminal_enable_bitmap[IPU_FW_PSYS_KERNEL_BITMAP_NOF_ELEMS]; ++ u32 routing_enable_bitmap[IPU_FW_PSYS_KERNEL_BITMAP_NOF_ELEMS]; ++ u32 rbm[IPU_FW_PSYS_RBM_NOF_ELEMS]; ++ u32 ipu_virtual_address; ++ u32 process_group_handle; ++ u16 terminal_count; ++ u8 frame_counter; ++ u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_BUFFER_SET_STRUCT]; ++}; ++ ++struct ipu_fw_psys_pgm { ++ u32 kernel_bitmap[IPU_FW_PSYS_KERNEL_BITMAP_NOF_ELEMS]; ++ u32 ID; ++ u16 program_manifest_offset; ++ u16 terminal_manifest_offset; ++ u16 private_data_offset; ++ u16 rbm_manifest_offset; ++ u16 size; ++ u8 alignment; ++ u8 kernel_count; ++ u8 program_count; ++ u8 terminal_count; ++ u8 subgraph_count; ++ u8 reserved[5]; ++}; ++ ++struct ipu_fw_generic_program_manifest { ++ u16 *dev_chn_size; ++ u16 *dev_chn_offset; ++ u16 *ext_mem_size; ++ u16 *ext_mem_offset; ++ u8 cell_id; ++ u8 cells[IPU_FW_PSYS_PROCESS_MAX_CELLS]; ++ u8 cell_type_id; ++ u8 *is_dfm_relocatable; ++ u32 *dfm_port_bitmap; ++ u32 *dfm_active_port_bitmap; ++}; ++ ++struct ipu_fw_resource_definitions { ++ u32 num_cells; ++ u32 num_cells_type; ++ const u8 *cells; ++ u32 num_dev_channels; ++ const u16 *dev_channels; ++ ++ u32 num_ext_mem_types; ++ u32 num_ext_mem_ids; ++ const u16 *ext_mem_ids; ++ ++ u32 num_dfm_ids; ++ const u16 *dfms; ++ ++ u32 cell_mem_row; ++ const u8 *cell_mem; ++}; ++ ++struct ipu6_psys_hw_res_variant { ++ unsigned int queue_num; ++ unsigned int cell_num; ++ int (*set_proc_dev_chn)(struct ipu_fw_psys_process *ptr, u16 offset, ++ u16 value); ++ int (*set_proc_dfm_bitmap)(struct ipu_fw_psys_process *ptr, ++ u16 id, u32 bitmap, u32 active_bitmap); ++ int (*set_proc_ext_mem)(struct ipu_fw_psys_process *ptr, ++ u16 type_id, u16 mem_id, u16 offset); ++ int (*get_pgm_by_proc)(struct ipu_fw_generic_program_manifest *gen_pm, ++ const struct ipu_fw_psys_pgm *pg_manifest, ++ struct ipu_fw_psys_process *process); ++}; ++ ++struct ipu_psys_kcmd; ++struct ipu_psys; ++int ipu_fw_psys_pg_start(struct ipu_psys_kcmd *kcmd); ++int ipu_fw_psys_pg_disown(struct ipu_psys_kcmd *kcmd); ++int ipu_fw_psys_pg_abort(struct ipu_psys_kcmd *kcmd); ++int ipu_fw_psys_pg_submit(struct ipu_psys_kcmd *kcmd); ++int ipu_fw_psys_ppg_suspend(struct ipu_psys_kcmd *kcmd); ++int ipu_fw_psys_ppg_resume(struct ipu_psys_kcmd *kcmd); ++int ipu_fw_psys_pg_load_cycles(struct ipu_psys_kcmd *kcmd); ++int ipu_fw_psys_pg_init_cycles(struct ipu_psys_kcmd *kcmd); ++int ipu_fw_psys_pg_processing_cycles(struct ipu_psys_kcmd *kcmd); ++int ipu_fw_psys_pg_server_init_cycles(struct ipu_psys_kcmd *kcmd); ++int ipu_fw_psys_pg_next_frame_init_cycles(struct ipu_psys_kcmd *kcmd); ++int ipu_fw_psys_pg_complete_cycles(struct ipu_psys_kcmd *kcmd); ++int ipu_fw_psys_rcv_event(struct ipu_psys *psys, ++ struct ipu_fw_psys_event *event); ++int ipu_fw_psys_terminal_set(struct ipu_fw_psys_terminal *terminal, ++ int terminal_idx, ++ struct ipu_psys_kcmd *kcmd, ++ u32 buffer, unsigned int size); ++void ipu_fw_psys_pg_dump(struct ipu_psys *psys, ++ struct ipu_psys_kcmd *kcmd, const char *note); ++int ipu_fw_psys_pg_get_id(struct ipu_psys_kcmd *kcmd); ++int ipu_fw_psys_pg_get_terminal_count(struct ipu_psys_kcmd *kcmd); ++int ipu_fw_psys_pg_get_size(struct ipu_psys_kcmd *kcmd); ++int ipu_fw_psys_pg_set_ipu_vaddress(struct ipu_psys_kcmd *kcmd, ++ dma_addr_t vaddress); ++struct ipu_fw_psys_terminal *ipu_fw_psys_pg_get_terminal(struct ipu_psys_kcmd ++ *kcmd, int index); ++void ipu_fw_psys_pg_set_token(struct ipu_psys_kcmd *kcmd, u64 token); ++u64 ipu_fw_psys_pg_get_token(struct ipu_psys_kcmd *kcmd); ++int ipu_fw_psys_ppg_set_buffer_set(struct ipu_psys_kcmd *kcmd, ++ struct ipu_fw_psys_terminal *terminal, ++ int terminal_idx, u32 buffer); ++size_t ipu_fw_psys_ppg_get_buffer_set_size(struct ipu_psys_kcmd *kcmd); ++int ++ipu_fw_psys_ppg_buffer_set_vaddress(struct ipu_fw_psys_buffer_set *buf_set, ++ u32 vaddress); ++int ipu_fw_psys_ppg_buffer_set_set_keb(struct ipu_fw_psys_buffer_set *buf_set, ++ u32 *kernel_enable_bitmap); ++struct ipu_fw_psys_buffer_set * ++ipu_fw_psys_ppg_create_buffer_set(struct ipu_psys_kcmd *kcmd, ++ void *kaddr, u32 frame_counter); ++int ipu_fw_psys_ppg_enqueue_bufs(struct ipu_psys_kcmd *kcmd); ++u8 ipu_fw_psys_ppg_get_base_queue_id(struct ipu_psys_kcmd *kcmd); ++void ipu_fw_psys_ppg_set_base_queue_id(struct ipu_psys_kcmd *kcmd, u8 queue_id); ++int ipu_fw_psys_pg_get_protocol(struct ipu_psys_kcmd *kcmd); ++int ipu_fw_psys_open(struct ipu_psys *psys); ++int ipu_fw_psys_close(struct ipu_psys *psys); ++ ++/* common resource interface for both abi and api mode */ ++int ipu_fw_psys_set_process_cell_id(struct ipu_fw_psys_process *ptr, u8 index, ++ u8 value); ++u8 ipu_fw_psys_get_process_cell_id(struct ipu_fw_psys_process *ptr, u8 index); ++int ipu_fw_psys_clear_process_cell(struct ipu_fw_psys_process *ptr); ++int ipu_fw_psys_set_proc_dev_chn(struct ipu_fw_psys_process *ptr, u16 offset, ++ u16 value); ++int ipu_fw_psys_set_process_ext_mem(struct ipu_fw_psys_process *ptr, ++ u16 type_id, u16 mem_id, u16 offset); ++int ++ipu_fw_psys_get_pgm_by_process(struct ipu_fw_generic_program_manifest *gen_pm, ++ const struct ipu_fw_psys_pgm *pg_manifest, ++ struct ipu_fw_psys_process *process); ++int ipu6_fw_psys_set_proc_dev_chn(struct ipu_fw_psys_process *ptr, u16 offset, ++ u16 value); ++int ipu6_fw_psys_set_proc_dfm_bitmap(struct ipu_fw_psys_process *ptr, ++ u16 id, u32 bitmap, ++ u32 active_bitmap); ++int ipu6_fw_psys_set_process_ext_mem(struct ipu_fw_psys_process *ptr, ++ u16 type_id, u16 mem_id, u16 offset); ++int ++ipu6_fw_psys_get_pgm_by_process(struct ipu_fw_generic_program_manifest *gen_pm, ++ const struct ipu_fw_psys_pgm *pg_manifest, ++ struct ipu_fw_psys_process *process); ++void ipu6_fw_psys_pg_dump(struct ipu_psys *psys, ++ struct ipu_psys_kcmd *kcmd, const char *note); ++void ipu6_psys_hw_res_variant_init(void); ++#endif /* IPU_FW_PSYS_H */ +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-fw-resources.c b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-fw-resources.c +new file mode 100644 +index 0000000..b6af0b7 +--- /dev/null ++++ b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-fw-resources.c +@@ -0,0 +1,103 @@ ++// SPDX-License-Identifier: GPL-2.0 ++// Copyright (C) 2015 - 2024 Intel Corporation ++ ++#include ++#include ++ ++#include "ipu-psys.h" ++#include "ipu-fw-psys.h" ++#include "ipu6-platform-resources.h" ++#include "ipu6se-platform-resources.h" ++ ++/********** Generic resource handling **********/ ++ ++/* ++ * Extension library gives byte offsets to its internal structures. ++ * use those offsets to update fields. Without extension lib access ++ * structures directly. ++ */ ++static const struct ipu6_psys_hw_res_variant *var = &hw_var; ++ ++int ipu_fw_psys_set_process_cell_id(struct ipu_fw_psys_process *ptr, u8 index, ++ u8 value) ++{ ++ struct ipu_fw_psys_process_group *parent = ++ (struct ipu_fw_psys_process_group *)((char *)ptr + ++ ptr->parent_offset); ++ ++ ptr->cells[index] = value; ++ parent->resource_bitmap |= 1 << value; ++ ++ return 0; ++} ++ ++u8 ipu_fw_psys_get_process_cell_id(struct ipu_fw_psys_process *ptr, u8 index) ++{ ++ return ptr->cells[index]; ++} ++ ++int ipu_fw_psys_clear_process_cell(struct ipu_fw_psys_process *ptr) ++{ ++ struct ipu_fw_psys_process_group *parent; ++ u8 cell_id = ipu_fw_psys_get_process_cell_id(ptr, 0); ++ int retval = -1; ++ u8 value; ++ ++ parent = (struct ipu_fw_psys_process_group *)((char *)ptr + ++ ptr->parent_offset); ++ ++ value = var->cell_num; ++ if ((1 << cell_id) != 0 && ++ ((1 << cell_id) & parent->resource_bitmap)) { ++ ipu_fw_psys_set_process_cell_id(ptr, 0, value); ++ parent->resource_bitmap &= ~(1 << cell_id); ++ retval = 0; ++ } ++ ++ return retval; ++} ++ ++int ipu_fw_psys_set_proc_dev_chn(struct ipu_fw_psys_process *ptr, u16 offset, ++ u16 value) ++{ ++ if (var->set_proc_dev_chn) ++ return var->set_proc_dev_chn(ptr, offset, value); ++ ++ WARN(1, "ipu6 psys res var is not initialised correctly."); ++ return 0; ++} ++ ++int ipu_fw_psys_set_proc_dfm_bitmap(struct ipu_fw_psys_process *ptr, ++ u16 id, u32 bitmap, ++ u32 active_bitmap) ++{ ++ if (var->set_proc_dfm_bitmap) ++ return var->set_proc_dfm_bitmap(ptr, id, bitmap, ++ active_bitmap); ++ ++ WARN(1, "ipu6 psys res var is not initialised correctly."); ++ return 0; ++} ++ ++int ipu_fw_psys_set_process_ext_mem(struct ipu_fw_psys_process *ptr, ++ u16 type_id, u16 mem_id, u16 offset) ++{ ++ if (var->set_proc_ext_mem) ++ return var->set_proc_ext_mem(ptr, type_id, mem_id, offset); ++ ++ WARN(1, "ipu6 psys res var is not initialised correctly."); ++ return 0; ++} ++ ++int ++ipu_fw_psys_get_pgm_by_process(struct ipu_fw_generic_program_manifest *gen_pm, ++ const struct ipu_fw_psys_pgm *pg_manifest, ++ struct ipu_fw_psys_process *process) ++{ ++ if (var->get_pgm_by_proc) ++ return var->get_pgm_by_proc(gen_pm, pg_manifest, process); ++ ++ WARN(1, "ipu6 psys res var is not initialised correctly."); ++ return 0; ++} ++ +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-platform-psys.h b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-platform-psys.h +new file mode 100644 +index 0000000..dc2cae3 +--- /dev/null ++++ b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-platform-psys.h +@@ -0,0 +1,78 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* Copyright (C) 2020 - 2024 Intel Corporation */ ++ ++#ifndef IPU_PLATFORM_PSYS_H ++#define IPU_PLATFORM_PSYS_H ++ ++#include "ipu-psys.h" ++#include ++ ++#define IPU_PSYS_BUF_SET_POOL_SIZE 8 ++#define IPU_PSYS_BUF_SET_MAX_SIZE 1024 ++ ++struct ipu_fw_psys_buffer_set; ++ ++enum ipu_psys_cmd_state { ++ KCMD_STATE_PPG_NEW, ++ KCMD_STATE_PPG_START, ++ KCMD_STATE_PPG_ENQUEUE, ++ KCMD_STATE_PPG_STOP, ++ KCMD_STATE_PPG_COMPLETE ++}; ++ ++struct ipu_psys_scheduler { ++ struct list_head ppgs; ++ struct mutex bs_mutex; /* Protects buf_set field */ ++ struct list_head buf_sets; ++}; ++ ++enum ipu_psys_ppg_state { ++ PPG_STATE_START = (1 << 0), ++ PPG_STATE_STARTING = (1 << 1), ++ PPG_STATE_STARTED = (1 << 2), ++ PPG_STATE_RUNNING = (1 << 3), ++ PPG_STATE_SUSPEND = (1 << 4), ++ PPG_STATE_SUSPENDING = (1 << 5), ++ PPG_STATE_SUSPENDED = (1 << 6), ++ PPG_STATE_RESUME = (1 << 7), ++ PPG_STATE_RESUMING = (1 << 8), ++ PPG_STATE_RESUMED = (1 << 9), ++ PPG_STATE_STOP = (1 << 10), ++ PPG_STATE_STOPPING = (1 << 11), ++ PPG_STATE_STOPPED = (1 << 12), ++}; ++ ++struct ipu_psys_ppg { ++ struct ipu_psys_pg *kpg; ++ struct ipu_psys_fh *fh; ++ struct list_head list; ++ struct list_head sched_list; ++ u64 token; ++ void *manifest; ++ struct mutex mutex; /* Protects kcmd and ppg state field */ ++ struct list_head kcmds_new_list; ++ struct list_head kcmds_processing_list; ++ struct list_head kcmds_finished_list; ++ enum ipu_psys_ppg_state state; ++ u32 pri_base; ++ int pri_dynamic; ++}; ++ ++struct ipu_psys_buffer_set { ++ struct list_head list; ++ struct ipu_fw_psys_buffer_set *buf_set; ++ size_t size; ++ size_t buf_set_size; ++ dma_addr_t dma_addr; ++ void *kaddr; ++ struct ipu_psys_kcmd *kcmd; ++}; ++ ++int ipu_psys_kcmd_start(struct ipu_psys *psys, struct ipu_psys_kcmd *kcmd); ++void ipu_psys_kcmd_complete(struct ipu_psys_ppg *kppg, ++ struct ipu_psys_kcmd *kcmd, ++ int error); ++int ipu_psys_fh_init(struct ipu_psys_fh *fh); ++int ipu_psys_fh_deinit(struct ipu_psys_fh *fh); ++ ++#endif /* IPU_PLATFORM_PSYS_H */ +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-platform-resources.h b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-platform-resources.h +new file mode 100644 +index 0000000..1f064dc +--- /dev/null ++++ b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-platform-resources.h +@@ -0,0 +1,103 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* Copyright (C) 2018 - 2024 Intel Corporation */ ++ ++#ifndef IPU_PLATFORM_RESOURCES_COMMON_H ++#define IPU_PLATFORM_RESOURCES_COMMON_H ++ ++#define IPU_FW_PSYS_N_PADDING_UINT8_IN_PROGRAM_MANIFEST 0 ++ ++#define IPU_FW_PSYS_N_PADDING_UINT8_IN_PROCESS_STRUCT 0 ++#define IPU_FW_PSYS_N_PADDING_UINT8_IN_PROCESS_GROUP_STRUCT 2 ++#define IPU_FW_PSYS_N_PADDING_UINT8_IN_PROGRAM_MANIFEST_EXT 2 ++ ++#define IPU_FW_PSYS_N_PADDING_UINT8_IN_TERMINAL_STRUCT 5 ++ ++#define IPU_FW_PSYS_N_PADDING_UINT8_IN_PARAM_TERMINAL_STRUCT 6 ++ ++#define IPU_FW_PSYS_N_PADDING_UINT8_IN_DATA_TERMINAL_STRUCT 3 ++ ++#define IPU_FW_PSYS_N_PADDING_UINT8_IN_FRAME_DESC_STRUCT 3 ++#define IPU_FW_PSYS_N_FRAME_PLANES 6 ++#define IPU_FW_PSYS_N_PADDING_UINT8_IN_FRAME_STRUCT 4 ++ ++#define IPU_FW_PSYS_N_PADDING_UINT8_IN_BUFFER_SET_STRUCT 1 ++ ++#define IPU_FW_PSYS_MAX_INPUT_DEC_RESOURCES 4 ++#define IPU_FW_PSYS_MAX_OUTPUT_DEC_RESOURCES 4 ++ ++#define IPU_FW_PSYS_PROCESS_MAX_CELLS 1 ++#define IPU_FW_PSYS_KERNEL_BITMAP_NOF_ELEMS 4 ++#define IPU_FW_PSYS_RBM_NOF_ELEMS 5 ++#define IPU_FW_PSYS_KBM_NOF_ELEMS 4 ++ ++struct ipu_fw_psys_process { ++ s16 parent_offset; ++ u8 size; ++ u8 cell_dependencies_offset; ++ u8 terminal_dependencies_offset; ++ u8 process_extension_offset; ++ u8 ID; ++ u8 program_idx; ++ u8 state; ++ u8 cells[IPU_FW_PSYS_PROCESS_MAX_CELLS]; ++ u8 cell_dependency_count; ++ u8 terminal_dependency_count; ++}; ++ ++struct ipu_fw_psys_program_manifest { ++ u32 kernel_bitmap[IPU_FW_PSYS_KERNEL_BITMAP_NOF_ELEMS]; ++ s16 parent_offset; ++ u8 program_dependency_offset; ++ u8 terminal_dependency_offset; ++ u8 size; ++ u8 program_extension_offset; ++ u8 program_type; ++ u8 ID; ++ u8 cells[IPU_FW_PSYS_PROCESS_MAX_CELLS]; ++ u8 cell_type_id; ++ u8 program_dependency_count; ++ u8 terminal_dependency_count; ++}; ++ ++/* platform specific resource interface */ ++struct ipu_psys_resource_pool; ++struct ipu_psys_resource_alloc; ++struct ipu_fw_psys_process_group; ++int ipu_psys_allocate_resources(const struct device *dev, ++ struct ipu_fw_psys_process_group *pg, ++ void *pg_manifest, ++ struct ipu_psys_resource_alloc *alloc, ++ struct ipu_psys_resource_pool *pool); ++int ipu_psys_move_resources(const struct device *dev, ++ struct ipu_psys_resource_alloc *alloc, ++ struct ipu_psys_resource_pool *source_pool, ++ struct ipu_psys_resource_pool *target_pool); ++ ++void ipu_psys_resource_copy(struct ipu_psys_resource_pool *src, ++ struct ipu_psys_resource_pool *dest); ++ ++int ipu_psys_try_allocate_resources(struct device *dev, ++ struct ipu_fw_psys_process_group *pg, ++ void *pg_manifest, ++ struct ipu_psys_resource_pool *pool); ++ ++void ipu_psys_reset_process_cell(const struct device *dev, ++ struct ipu_fw_psys_process_group *pg, ++ void *pg_manifest, ++ int process_count); ++void ipu_psys_free_resources(struct ipu_psys_resource_alloc *alloc, ++ struct ipu_psys_resource_pool *pool); ++ ++int ipu_fw_psys_set_proc_dfm_bitmap(struct ipu_fw_psys_process *ptr, ++ u16 id, u32 bitmap, ++ u32 active_bitmap); ++ ++int ipu_psys_allocate_cmd_queue_res(struct ipu_psys_resource_pool *pool); ++void ipu_psys_free_cmd_queue_res(struct ipu_psys_resource_pool *pool, ++ u8 queue_id); ++ ++extern const struct ipu_fw_resource_definitions *ipu6_res_defs; ++extern const struct ipu_fw_resource_definitions *ipu6se_res_defs; ++extern const struct ipu_fw_resource_definitions *ipu6ep_res_defs; ++extern struct ipu6_psys_hw_res_variant hw_var; ++#endif /* IPU_PLATFORM_RESOURCES_COMMON_H */ +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-psys-compat32.c b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-psys-compat32.c +new file mode 100644 +index 0000000..32350ca +--- /dev/null ++++ b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-psys-compat32.c +@@ -0,0 +1,222 @@ ++// SPDX-License-Identifier: GPL-2.0 ++// Copyright (C) 2013 - 2024 Intel Corporation ++ ++#include ++#include ++#include ++ ++#include ++ ++#include "ipu-psys.h" ++ ++static long native_ioctl(struct file *file, unsigned int cmd, unsigned long arg) ++{ ++ long ret = -ENOTTY; ++ ++ if (file->f_op->unlocked_ioctl) ++ ret = file->f_op->unlocked_ioctl(file, cmd, arg); ++ ++ return ret; ++} ++ ++struct ipu_psys_buffer32 { ++ u64 len; ++ union { ++ int fd; ++ compat_uptr_t userptr; ++ u64 reserved; ++ } base; ++ u32 data_offset; ++ u32 bytes_used; ++ u32 flags; ++ u32 reserved[2]; ++} __packed; ++ ++struct ipu_psys_command32 { ++ u64 issue_id; ++ u64 user_token; ++ u32 priority; ++ compat_uptr_t pg_manifest; ++ compat_uptr_t buffers; ++ int pg; ++ u32 pg_manifest_size; ++ u32 bufcount; ++ u32 min_psys_freq; ++ u32 frame_counter; ++ u32 reserved[2]; ++} __packed; ++ ++struct ipu_psys_manifest32 { ++ u32 index; ++ u32 size; ++ compat_uptr_t manifest; ++ u32 reserved[5]; ++} __packed; ++ ++static int ++get_ipu_psys_command32(struct ipu_psys_command *kp, ++ struct ipu_psys_command32 __user *up) ++{ ++ compat_uptr_t pgm, bufs; ++ bool access_ok; ++ ++ access_ok = access_ok(up, sizeof(struct ipu_psys_command32)); ++ if (!access_ok || get_user(kp->issue_id, &up->issue_id) || ++ get_user(kp->user_token, &up->user_token) || ++ get_user(kp->priority, &up->priority) || ++ get_user(pgm, &up->pg_manifest) || ++ get_user(bufs, &up->buffers) || ++ get_user(kp->pg, &up->pg) || ++ get_user(kp->pg_manifest_size, &up->pg_manifest_size) || ++ get_user(kp->bufcount, &up->bufcount) || ++ get_user(kp->min_psys_freq, &up->min_psys_freq) || ++ get_user(kp->frame_counter, &up->frame_counter) ++ ) ++ return -EFAULT; ++ ++ kp->pg_manifest = compat_ptr(pgm); ++ kp->buffers = compat_ptr(bufs); ++ ++ return 0; ++} ++ ++static int ++get_ipu_psys_buffer32(struct ipu_psys_buffer *kp, ++ struct ipu_psys_buffer32 __user *up) ++{ ++ compat_uptr_t ptr; ++ bool access_ok; ++ ++ access_ok = access_ok(up, sizeof(struct ipu_psys_buffer32)); ++ if (!access_ok || get_user(kp->len, &up->len) || ++ get_user(ptr, &up->base.userptr) || ++ get_user(kp->data_offset, &up->data_offset) || ++ get_user(kp->bytes_used, &up->bytes_used) || ++ get_user(kp->flags, &up->flags)) ++ return -EFAULT; ++ ++ kp->base.userptr = compat_ptr(ptr); ++ ++ return 0; ++} ++ ++static int ++put_ipu_psys_buffer32(struct ipu_psys_buffer *kp, ++ struct ipu_psys_buffer32 __user *up) ++{ ++ bool access_ok; ++ ++ access_ok = access_ok(up, sizeof(struct ipu_psys_buffer32)); ++ if (!access_ok || put_user(kp->len, &up->len) || ++ put_user(kp->base.fd, &up->base.fd) || ++ put_user(kp->data_offset, &up->data_offset) || ++ put_user(kp->bytes_used, &up->bytes_used) || ++ put_user(kp->flags, &up->flags)) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++static int ++get_ipu_psys_manifest32(struct ipu_psys_manifest *kp, ++ struct ipu_psys_manifest32 __user *up) ++{ ++ compat_uptr_t ptr; ++ bool access_ok; ++ ++ access_ok = access_ok(up, sizeof(struct ipu_psys_manifest32)); ++ if (!access_ok || get_user(kp->index, &up->index) || ++ get_user(kp->size, &up->size) || get_user(ptr, &up->manifest)) ++ return -EFAULT; ++ ++ kp->manifest = compat_ptr(ptr); ++ ++ return 0; ++} ++ ++static int ++put_ipu_psys_manifest32(struct ipu_psys_manifest *kp, ++ struct ipu_psys_manifest32 __user *up) ++{ ++ compat_uptr_t ptr = (u32)((unsigned long)kp->manifest); ++ bool access_ok; ++ ++ access_ok = access_ok(up, sizeof(struct ipu_psys_manifest32)); ++ if (!access_ok || put_user(kp->index, &up->index) || ++ put_user(kp->size, &up->size) || put_user(ptr, &up->manifest)) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++#define IPU_IOC_GETBUF32 _IOWR('A', 4, struct ipu_psys_buffer32) ++#define IPU_IOC_PUTBUF32 _IOWR('A', 5, struct ipu_psys_buffer32) ++#define IPU_IOC_QCMD32 _IOWR('A', 6, struct ipu_psys_command32) ++#define IPU_IOC_CMD_CANCEL32 _IOWR('A', 8, struct ipu_psys_command32) ++#define IPU_IOC_GET_MANIFEST32 _IOWR('A', 9, struct ipu_psys_manifest32) ++ ++long ipu_psys_compat_ioctl32(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ union { ++ struct ipu_psys_buffer buf; ++ struct ipu_psys_command cmd; ++ struct ipu_psys_event ev; ++ struct ipu_psys_manifest m; ++ } karg; ++ int compatible_arg = 1; ++ int err = 0; ++ void __user *up = compat_ptr(arg); ++ ++ switch (cmd) { ++ case IPU_IOC_GETBUF32: ++ cmd = IPU_IOC_GETBUF; ++ break; ++ case IPU_IOC_PUTBUF32: ++ cmd = IPU_IOC_PUTBUF; ++ break; ++ case IPU_IOC_QCMD32: ++ cmd = IPU_IOC_QCMD; ++ break; ++ case IPU_IOC_GET_MANIFEST32: ++ cmd = IPU_IOC_GET_MANIFEST; ++ break; ++ } ++ ++ switch (cmd) { ++ case IPU_IOC_GETBUF: ++ case IPU_IOC_PUTBUF: ++ err = get_ipu_psys_buffer32(&karg.buf, up); ++ compatible_arg = 0; ++ break; ++ case IPU_IOC_QCMD: ++ err = get_ipu_psys_command32(&karg.cmd, up); ++ compatible_arg = 0; ++ break; ++ case IPU_IOC_GET_MANIFEST: ++ err = get_ipu_psys_manifest32(&karg.m, up); ++ compatible_arg = 0; ++ break; ++ } ++ if (err) ++ return err; ++ ++ if (compatible_arg) { ++ err = native_ioctl(file, cmd, (unsigned long)up); ++ } else { ++ err = native_ioctl(file, cmd, (unsigned long)&karg); ++ } ++ ++ if (err) ++ return err; ++ ++ switch (cmd) { ++ case IPU_IOC_GETBUF: ++ err = put_ipu_psys_buffer32(&karg.buf, up); ++ break; ++ case IPU_IOC_GET_MANIFEST: ++ err = put_ipu_psys_manifest32(&karg.m, up); ++ break; ++ } ++ return err; ++} +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.c b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.c +new file mode 100644 +index 0000000..9f36749 +--- /dev/null ++++ b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.c +@@ -0,0 +1,1599 @@ ++// SPDX-License-Identifier: GPL-2.0 ++// Copyright (C) 2013 - 2024 Intel Corporation ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#include "ipu6.h" ++#include "ipu6-mmu.h" ++#include "ipu6-bus.h" ++#include "ipu6-buttress.h" ++#include "ipu6-cpd.h" ++#include "ipu-fw-psys.h" ++#include "ipu-psys.h" ++#include "ipu6-platform-regs.h" ++#include "ipu6-fw-com.h" ++ ++static bool async_fw_init; ++module_param(async_fw_init, bool, 0664); ++MODULE_PARM_DESC(async_fw_init, "Enable asynchronous firmware initialization"); ++ ++#define SYSCOM_BUTTRESS_FW_PARAMS_PSYS_OFFSET 7 ++ ++#define IPU_PSYS_NUM_DEVICES 4 ++ ++#define IPU_PSYS_MAX_NUM_DESCS 1024 ++#define IPU_PSYS_MAX_NUM_BUFS 1024 ++#define IPU_PSYS_MAX_NUM_BUFS_LRU 12 ++ ++static int psys_runtime_pm_resume(struct device *dev); ++static int psys_runtime_pm_suspend(struct device *dev); ++ ++static dev_t ipu_psys_dev_t; ++static DECLARE_BITMAP(ipu_psys_devices, IPU_PSYS_NUM_DEVICES); ++static DEFINE_MUTEX(ipu_psys_mutex); ++ ++static struct fw_init_task { ++ struct delayed_work work; ++ struct ipu_psys *psys; ++} fw_init_task; ++ ++static void ipu6_psys_remove(struct auxiliary_device *auxdev); ++ ++static const struct bus_type ipu_psys_bus = { ++ .name = "intel-ipu6-psys", ++}; ++#define PKG_DIR_ENT_LEN_FOR_PSYS 2 ++#define PKG_DIR_SIZE_MASK_FOR_PSYS GENMASK(23, 0) ++ ++enum ipu6_version ipu_ver; ++ ++static u32 ipu6_cpd_pkg_dir_get_address(const u64 *pkg_dir, int pkg_dir_idx) ++{ ++ return pkg_dir[++pkg_dir_idx * PKG_DIR_ENT_LEN_FOR_PSYS]; ++} ++ ++static u32 ipu6_cpd_pkg_dir_get_num_entries(const u64 *pkg_dir) ++{ ++ return pkg_dir[1]; ++} ++ ++static u32 ipu6_cpd_pkg_dir_get_size(const u64 *pkg_dir, int pkg_dir_idx) ++{ ++ return pkg_dir[++pkg_dir_idx * PKG_DIR_ENT_LEN_FOR_PSYS + 1] & ++ PKG_DIR_SIZE_MASK_FOR_PSYS; ++} ++ ++#define PKG_DIR_ID_SHIFT 48 ++#define PKG_DIR_ID_MASK 0x7f ++ ++static u32 ipu6_cpd_pkg_dir_get_type(const u64 *pkg_dir, int pkg_dir_idx) ++{ ++ return pkg_dir[++pkg_dir_idx * PKG_DIR_ENT_LEN_FOR_PSYS + 1] >> ++ PKG_DIR_ID_SHIFT & PKG_DIR_ID_MASK; ++} ++ ++/* ++ * These are some trivial wrappers that save us from open-coding some ++ * common patterns and also that's were we have some checking (for the ++ * time being) ++ */ ++static void ipu_desc_add(struct ipu_psys_fh *fh, struct ipu_psys_desc *desc) ++{ ++ fh->num_descs++; ++ ++ WARN_ON_ONCE(fh->num_descs >= IPU_PSYS_MAX_NUM_DESCS); ++ list_add(&desc->list, &fh->descs_list); ++} ++ ++static void ipu_desc_del(struct ipu_psys_fh *fh, struct ipu_psys_desc *desc) ++{ ++ fh->num_descs--; ++ list_del_init(&desc->list); ++} ++ ++static void ipu_buffer_add(struct ipu_psys_fh *fh, ++ struct ipu_psys_kbuffer *kbuf) ++{ ++ fh->num_bufs++; ++ ++ WARN_ON_ONCE(fh->num_bufs >= IPU_PSYS_MAX_NUM_BUFS); ++ list_add(&kbuf->list, &fh->bufs_list); ++} ++ ++static void ipu_buffer_del(struct ipu_psys_fh *fh, ++ struct ipu_psys_kbuffer *kbuf) ++{ ++ fh->num_bufs--; ++ list_del_init(&kbuf->list); ++} ++ ++static void ipu_buffer_lru_add(struct ipu_psys_fh *fh, ++ struct ipu_psys_kbuffer *kbuf) ++{ ++ fh->num_bufs_lru++; ++ list_add_tail(&kbuf->list, &fh->bufs_lru); ++} ++ ++static void ipu_buffer_lru_del(struct ipu_psys_fh *fh, ++ struct ipu_psys_kbuffer *kbuf) ++{ ++ fh->num_bufs_lru--; ++ list_del_init(&kbuf->list); ++} ++ ++static struct ipu_psys_kbuffer *ipu_psys_kbuffer_alloc(void) ++{ ++ struct ipu_psys_kbuffer *kbuf; ++ ++ kbuf = kzalloc(sizeof(*kbuf), GFP_KERNEL); ++ if (!kbuf) ++ return NULL; ++ ++ atomic_set(&kbuf->map_count, 0); ++ INIT_LIST_HEAD(&kbuf->list); ++ return kbuf; ++} ++ ++static struct ipu_psys_desc *ipu_psys_desc_alloc(int fd) ++{ ++ struct ipu_psys_desc *desc; ++ ++ desc = kzalloc(sizeof(*desc), GFP_KERNEL); ++ if (!desc) ++ return NULL; ++ ++ desc->fd = fd; ++ INIT_LIST_HEAD(&desc->list); ++ return desc; ++} ++ ++struct ipu_psys_pg *__get_pg_buf(struct ipu_psys *psys, size_t pg_size) ++{ ++ struct ipu_psys_pg *kpg; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&psys->pgs_lock, flags); ++ list_for_each_entry(kpg, &psys->pgs, list) { ++ if (!kpg->pg_size && kpg->size >= pg_size) { ++ kpg->pg_size = pg_size; ++ spin_unlock_irqrestore(&psys->pgs_lock, flags); ++ return kpg; ++ } ++ } ++ spin_unlock_irqrestore(&psys->pgs_lock, flags); ++ /* no big enough buffer available, allocate new one */ ++ kpg = kzalloc(sizeof(*kpg), GFP_KERNEL); ++ if (!kpg) ++ return NULL; ++ ++ kpg->pg = ipu6_dma_alloc(psys->adev, pg_size, &kpg->pg_dma_addr, ++ GFP_KERNEL, 0); ++ if (!kpg->pg) { ++ kfree(kpg); ++ return NULL; ++ } ++ ++ kpg->pg_size = pg_size; ++ kpg->size = pg_size; ++ spin_lock_irqsave(&psys->pgs_lock, flags); ++ list_add(&kpg->list, &psys->pgs); ++ spin_unlock_irqrestore(&psys->pgs_lock, flags); ++ ++ return kpg; ++} ++ ++static struct ipu_psys_desc *psys_desc_lookup(struct ipu_psys_fh *fh, int fd) ++{ ++ struct ipu_psys_desc *desc; ++ ++ list_for_each_entry(desc, &fh->descs_list, list) { ++ if (desc->fd == fd) ++ return desc; ++ } ++ ++ return NULL; ++} ++ ++static bool dmabuf_cmp(struct dma_buf *lb, struct dma_buf *rb) ++{ ++ return lb == rb && lb->size == rb->size; ++} ++ ++static struct ipu_psys_kbuffer *psys_buf_lookup(struct ipu_psys_fh *fh, int fd) ++{ ++ struct ipu_psys_kbuffer *kbuf; ++ struct dma_buf *dma_buf; ++ ++ dma_buf = dma_buf_get(fd); ++ if (IS_ERR(dma_buf)) ++ return NULL; ++ ++ /* ++ * First lookup so-called `active` list, that is the list of ++ * referenced buffers ++ */ ++ list_for_each_entry(kbuf, &fh->bufs_list, list) { ++ if (dmabuf_cmp(kbuf->dbuf, dma_buf)) { ++ dma_buf_put(dma_buf); ++ return kbuf; ++ } ++ } ++ ++ /* ++ * We didn't find anything on the `active` list, try the LRU list ++ * (list of unreferenced buffers) and possibly resurrect a buffer ++ */ ++ list_for_each_entry(kbuf, &fh->bufs_lru, list) { ++ if (dmabuf_cmp(kbuf->dbuf, dma_buf)) { ++ dma_buf_put(dma_buf); ++ ipu_buffer_lru_del(fh, kbuf); ++ ipu_buffer_add(fh, kbuf); ++ return kbuf; ++ } ++ } ++ ++ dma_buf_put(dma_buf); ++ return NULL; ++} ++ ++struct ipu_psys_kbuffer *ipu_psys_lookup_kbuffer(struct ipu_psys_fh *fh, int fd) ++{ ++ struct ipu_psys_desc *desc; ++ ++ desc = psys_desc_lookup(fh, fd); ++ if (!desc) ++ return NULL; ++ ++ return desc->kbuf; ++} ++ ++struct ipu_psys_kbuffer * ++ipu_psys_lookup_kbuffer_by_kaddr(struct ipu_psys_fh *fh, void *kaddr) ++{ ++ struct ipu_psys_kbuffer *kbuffer; ++ ++ list_for_each_entry(kbuffer, &fh->bufs_list, list) { ++ if (kbuffer->kaddr == kaddr) ++ return kbuffer; ++ } ++ ++ return NULL; ++} ++ ++static int ipu_psys_get_userpages(struct ipu_dma_buf_attach *attach) ++{ ++ struct vm_area_struct *vma; ++ unsigned long start, end; ++ int npages, array_size; ++ struct page **pages; ++ struct sg_table *sgt; ++ int ret = -ENOMEM; ++ int nr = 0; ++ u32 flags; ++ ++ start = attach->userptr; ++ end = PAGE_ALIGN(start + attach->len); ++ npages = (end - (start & PAGE_MASK)) >> PAGE_SHIFT; ++ array_size = npages * sizeof(struct page *); ++ ++ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); ++ if (!sgt) ++ return -ENOMEM; ++ ++ WARN_ON_ONCE(attach->npages); ++ ++ pages = kvzalloc(array_size, GFP_KERNEL); ++ if (!pages) ++ goto free_sgt; ++ ++ mmap_read_lock(current->mm); ++ vma = vma_lookup(current->mm, start); ++ if (unlikely(!vma)) { ++ ret = -EFAULT; ++ goto error_up_read; ++ } ++ mmap_read_unlock(current->mm); ++ ++ flags = FOLL_WRITE | FOLL_FORCE | FOLL_LONGTERM; ++ nr = pin_user_pages_fast(start & PAGE_MASK, npages, ++ flags, pages); ++ if (nr < npages) ++ goto error; ++ ++ attach->pages = pages; ++ attach->npages = npages; ++ ++ ret = sg_alloc_table_from_pages(sgt, pages, npages, ++ start & ~PAGE_MASK, attach->len, ++ GFP_KERNEL); ++ if (ret < 0) ++ goto error; ++ ++ attach->sgt = sgt; ++ ++ return 0; ++ ++error_up_read: ++ mmap_read_unlock(current->mm); ++error: ++ if (nr) ++ unpin_user_pages(pages, nr); ++ kvfree(pages); ++free_sgt: ++ kfree(sgt); ++ ++ pr_err("failed to get userpages:%d\n", ret); ++ ++ return ret; ++} ++ ++static void ipu_psys_put_userpages(struct ipu_dma_buf_attach *attach) ++{ ++ if (!attach || !attach->userptr || !attach->sgt) ++ return; ++ ++ unpin_user_pages(attach->pages, attach->npages); ++ kvfree(attach->pages); ++ ++ sg_free_table(attach->sgt); ++ kfree(attach->sgt); ++ attach->sgt = NULL; ++} ++ ++static int ipu_dma_buf_attach(struct dma_buf *dbuf, ++ struct dma_buf_attachment *attach) ++{ ++ struct ipu_psys_kbuffer *kbuf = dbuf->priv; ++ struct ipu_dma_buf_attach *ipu_attach; ++ int ret; ++ ++ ipu_attach = kzalloc(sizeof(*ipu_attach), GFP_KERNEL); ++ if (!ipu_attach) ++ return -ENOMEM; ++ ++ ipu_attach->len = kbuf->len; ++ ipu_attach->userptr = kbuf->userptr; ++ ++ ret = ipu_psys_get_userpages(ipu_attach); ++ if (ret) { ++ kfree(ipu_attach); ++ return ret; ++ } ++ ++ attach->priv = ipu_attach; ++ return 0; ++} ++ ++static void ipu_dma_buf_detach(struct dma_buf *dbuf, ++ struct dma_buf_attachment *attach) ++{ ++ struct ipu_dma_buf_attach *ipu_attach = attach->priv; ++ ++ ipu_psys_put_userpages(ipu_attach); ++ kfree(ipu_attach); ++ attach->priv = NULL; ++} ++ ++static struct sg_table *ipu_dma_buf_map(struct dma_buf_attachment *attach, ++ enum dma_data_direction dir) ++{ ++ struct ipu_dma_buf_attach *ipu_attach = attach->priv; ++ struct pci_dev *pdev = to_pci_dev(attach->dev); ++ struct ipu6_device *isp = pci_get_drvdata(pdev); ++ struct ipu6_bus_device *adev = isp->psys; ++ unsigned long attrs; ++ int ret; ++ ++ attrs = DMA_ATTR_SKIP_CPU_SYNC; ++ ret = dma_map_sgtable(&pdev->dev, ipu_attach->sgt, dir, attrs); ++ if (ret) { ++ dev_err(attach->dev, "pci buf map failed\n"); ++ return ERR_PTR(-EIO); ++ } ++ ++ dma_sync_sgtable_for_device(&pdev->dev, ipu_attach->sgt, dir); ++ ++ ret = ipu6_dma_map_sgtable(adev, ipu_attach->sgt, dir, 0); ++ if (ret) { ++ dev_err(attach->dev, "ipu6 buf map failed\n"); ++ return ERR_PTR(-EIO); ++ } ++ ++ ipu6_dma_sync_sgtable(adev, ipu_attach->sgt); ++ ++ return ipu_attach->sgt; ++} ++ ++static void ipu_dma_buf_unmap(struct dma_buf_attachment *attach, ++ struct sg_table *sgt, enum dma_data_direction dir) ++{ ++ struct pci_dev *pdev = to_pci_dev(attach->dev); ++ struct ipu6_device *isp = pci_get_drvdata(pdev); ++ struct ipu6_bus_device *adev = isp->psys; ++ ++ ipu6_dma_unmap_sgtable(adev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC); ++ dma_unmap_sgtable(&pdev->dev, sgt, dir, 0); ++} ++ ++static int ipu_dma_buf_mmap(struct dma_buf *dbuf, struct vm_area_struct *vma) ++{ ++ return -ENOTTY; ++} ++ ++static void ipu_dma_buf_release(struct dma_buf *buf) ++{ ++ struct ipu_psys_kbuffer *kbuf = buf->priv; ++ ++ if (!kbuf) ++ return; ++ ++ if (kbuf->db_attach) ++ ipu_psys_put_userpages(kbuf->db_attach->priv); ++ ++ kfree(kbuf); ++} ++ ++static int ipu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf, ++ enum dma_data_direction dir) ++{ ++ return -ENOTTY; ++} ++ ++static int ipu_dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map) ++{ ++ struct dma_buf_attachment *attach; ++ struct ipu_dma_buf_attach *ipu_attach; ++ ++ if (list_empty(&dmabuf->attachments)) ++ return -EINVAL; ++ ++ attach = list_last_entry(&dmabuf->attachments, ++ struct dma_buf_attachment, node); ++ ipu_attach = attach->priv; ++ ++ if (!ipu_attach || !ipu_attach->pages || !ipu_attach->npages) ++ return -EINVAL; ++ ++ map->vaddr = vm_map_ram(ipu_attach->pages, ipu_attach->npages, 0); ++ map->is_iomem = false; ++ if (!map->vaddr) ++ return -EINVAL; ++ ++ return 0; ++} ++ ++static void ipu_dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map) ++{ ++ struct dma_buf_attachment *attach; ++ struct ipu_dma_buf_attach *ipu_attach; ++ ++ if (WARN_ON(list_empty(&dmabuf->attachments))) ++ return; ++ ++ attach = list_last_entry(&dmabuf->attachments, ++ struct dma_buf_attachment, node); ++ ipu_attach = attach->priv; ++ ++ if (WARN_ON(!ipu_attach || !ipu_attach->pages || !ipu_attach->npages)) ++ return; ++ ++ vm_unmap_ram(map->vaddr, ipu_attach->npages); ++} ++ ++static const struct dma_buf_ops ipu_dma_buf_ops = { ++ .attach = ipu_dma_buf_attach, ++ .detach = ipu_dma_buf_detach, ++ .map_dma_buf = ipu_dma_buf_map, ++ .unmap_dma_buf = ipu_dma_buf_unmap, ++ .release = ipu_dma_buf_release, ++ .begin_cpu_access = ipu_dma_buf_begin_cpu_access, ++ .mmap = ipu_dma_buf_mmap, ++ .vmap = ipu_dma_buf_vmap, ++ .vunmap = ipu_dma_buf_vunmap, ++}; ++ ++static int ipu_psys_open(struct inode *inode, struct file *file) ++{ ++ struct ipu_psys *psys = inode_to_ipu_psys(inode); ++ struct ipu_psys_fh *fh; ++ int rval; ++ ++ fh = kzalloc(sizeof(*fh), GFP_KERNEL); ++ if (!fh) ++ return -ENOMEM; ++ ++ fh->psys = psys; ++ ++ file->private_data = fh; ++ ++ mutex_init(&fh->mutex); ++ INIT_LIST_HEAD(&fh->bufs_list); ++ INIT_LIST_HEAD(&fh->descs_list); ++ INIT_LIST_HEAD(&fh->bufs_lru); ++ init_waitqueue_head(&fh->wait); ++ ++ rval = ipu_psys_fh_init(fh); ++ if (rval) ++ goto open_failed; ++ ++ mutex_lock(&psys->mutex); ++ list_add_tail(&fh->list, &psys->fhs); ++ mutex_unlock(&psys->mutex); ++ ++ return 0; ++ ++open_failed: ++ mutex_destroy(&fh->mutex); ++ kfree(fh); ++ return rval; ++} ++ ++static inline void ipu_psys_kbuf_unmap(struct ipu_psys_fh *fh, ++ struct ipu_psys_kbuffer *kbuf) ++{ ++ if (!kbuf) ++ return; ++ ++ kbuf->valid = false; ++ if (kbuf->kaddr) { ++ struct iosys_map dmap; ++ ++ iosys_map_set_vaddr(&dmap, kbuf->kaddr); ++ dma_buf_vunmap_unlocked(kbuf->dbuf, &dmap); ++ } ++ ++ if (!kbuf->userptr) ++ ipu6_dma_unmap_sgtable(fh->psys->adev, kbuf->sgt, ++ DMA_BIDIRECTIONAL, 0); ++ ++ if (!IS_ERR_OR_NULL(kbuf->sgt)) ++ dma_buf_unmap_attachment_unlocked(kbuf->db_attach, ++ kbuf->sgt, ++ DMA_BIDIRECTIONAL); ++ if (!IS_ERR_OR_NULL(kbuf->db_attach)) ++ dma_buf_detach(kbuf->dbuf, kbuf->db_attach); ++ dma_buf_put(kbuf->dbuf); ++ ++ kbuf->db_attach = NULL; ++ kbuf->dbuf = NULL; ++ kbuf->sgt = NULL; ++} ++ ++static void __ipu_psys_unmapbuf(struct ipu_psys_fh *fh, ++ struct ipu_psys_kbuffer *kbuf) ++{ ++ /* From now on it is not safe to use this kbuffer */ ++ ipu_psys_kbuf_unmap(fh, kbuf); ++ ipu_buffer_del(fh, kbuf); ++ if (!kbuf->userptr) ++ kfree(kbuf); ++} ++ ++static int ipu_psys_unmapbuf_locked(int fd, struct ipu_psys_fh *fh) ++{ ++ struct ipu_psys *psys = fh->psys; ++ struct device *dev = &psys->adev->auxdev.dev; ++ struct ipu_psys_kbuffer *kbuf; ++ struct ipu_psys_desc *desc; ++ ++ desc = psys_desc_lookup(fh, fd); ++ if (WARN_ON_ONCE(!desc)) { ++ dev_err(dev, "descriptor not found: %d\n", fd); ++ return -EINVAL; ++ } ++ ++ kbuf = desc->kbuf; ++ /* descriptor is gone now */ ++ ipu_desc_del(fh, desc); ++ kfree(desc); ++ ++ if (WARN_ON_ONCE(!kbuf || !kbuf->dbuf)) { ++ dev_err(dev, ++ "descriptor with no buffer: %d\n", fd); ++ return -EINVAL; ++ } ++ ++ /* Wait for final UNMAP */ ++ if (!atomic_dec_and_test(&kbuf->map_count)) ++ return 0; ++ ++ __ipu_psys_unmapbuf(fh, kbuf); ++ ++ return 0; ++} ++ ++static int ipu_psys_release(struct inode *inode, struct file *file) ++{ ++ struct ipu_psys *psys = inode_to_ipu_psys(inode); ++ struct ipu_psys_fh *fh = file->private_data; ++ ++ mutex_lock(&fh->mutex); ++ while (!list_empty(&fh->descs_list)) { ++ struct ipu_psys_desc *desc; ++ ++ desc = list_first_entry(&fh->descs_list, ++ struct ipu_psys_desc, ++ list); ++ ++ ipu_desc_del(fh, desc); ++ kfree(desc); ++ } ++ ++ while (!list_empty(&fh->bufs_lru)) { ++ struct ipu_psys_kbuffer *kbuf; ++ ++ kbuf = list_first_entry(&fh->bufs_lru, ++ struct ipu_psys_kbuffer, ++ list); ++ ++ ipu_buffer_lru_del(fh, kbuf); ++ __ipu_psys_unmapbuf(fh, kbuf); ++ } ++ ++ while (!list_empty(&fh->bufs_list)) { ++ struct dma_buf_attachment *db_attach; ++ struct ipu_psys_kbuffer *kbuf; ++ ++ kbuf = list_first_entry(&fh->bufs_list, ++ struct ipu_psys_kbuffer, ++ list); ++ ++ ipu_buffer_del(fh, kbuf); ++ db_attach = kbuf->db_attach; ++ ++ /* Unmap and release buffers */ ++ if (kbuf->dbuf && db_attach) { ++ ipu_psys_kbuf_unmap(fh, kbuf); ++ } else { ++ if (db_attach) ++ ipu_psys_put_userpages(db_attach->priv); ++ kfree(kbuf); ++ } ++ } ++ mutex_unlock(&fh->mutex); ++ ++ mutex_lock(&psys->mutex); ++ list_del(&fh->list); ++ ++ mutex_unlock(&psys->mutex); ++ ipu_psys_fh_deinit(fh); ++ ++ mutex_lock(&psys->mutex); ++ if (list_empty(&psys->fhs)) ++ psys->power_gating = 0; ++ mutex_unlock(&psys->mutex); ++ mutex_destroy(&fh->mutex); ++ kfree(fh); ++ ++ return 0; ++} ++ ++static int ipu_psys_getbuf(struct ipu_psys_buffer *buf, struct ipu_psys_fh *fh) ++{ ++ struct ipu_psys_kbuffer *kbuf; ++ struct ipu_psys *psys = fh->psys; ++ struct device *dev = &psys->adev->auxdev.dev; ++ struct ipu_psys_desc *desc; ++ DEFINE_DMA_BUF_EXPORT_INFO(exp_info); ++ struct dma_buf *dbuf; ++ int ret; ++ ++ if (!buf->base.userptr) { ++ dev_err(dev, "Buffer allocation not supported\n"); ++ return -EINVAL; ++ } ++ ++ kbuf = ipu_psys_kbuffer_alloc(); ++ if (!kbuf) ++ return -ENOMEM; ++ ++ kbuf->len = buf->len; ++ kbuf->userptr = (unsigned long)buf->base.userptr; ++ kbuf->flags = buf->flags; ++ ++ exp_info.ops = &ipu_dma_buf_ops; ++ exp_info.size = kbuf->len; ++ exp_info.flags = O_RDWR; ++ exp_info.priv = kbuf; ++ ++ dbuf = dma_buf_export(&exp_info); ++ if (IS_ERR(dbuf)) { ++ kfree(kbuf); ++ return PTR_ERR(dbuf); ++ } ++ ++ ret = dma_buf_fd(dbuf, 0); ++ if (ret < 0) { ++ dma_buf_put(dbuf); ++ return ret; ++ } ++ ++ buf->base.fd = ret; ++ buf->flags &= ~IPU_BUFFER_FLAG_USERPTR; ++ buf->flags |= IPU_BUFFER_FLAG_DMA_HANDLE; ++ kbuf->flags = buf->flags; ++ ++ desc = ipu_psys_desc_alloc(ret); ++ if (!desc) { ++ dma_buf_put(dbuf); ++ return -ENOMEM; ++ } ++ ++ kbuf->dbuf = dbuf; ++ ++ mutex_lock(&fh->mutex); ++ ipu_desc_add(fh, desc); ++ ipu_buffer_add(fh, kbuf); ++ mutex_unlock(&fh->mutex); ++ ++ dev_dbg(dev, "IOC_GETBUF: userptr %p size %llu to fd %d", ++ buf->base.userptr, buf->len, buf->base.fd); ++ ++ return 0; ++} ++ ++static int ipu_psys_putbuf(struct ipu_psys_buffer *buf, struct ipu_psys_fh *fh) ++{ ++ return 0; ++} ++ ++static void ipu_psys_kbuffer_lru(struct ipu_psys_fh *fh, ++ struct ipu_psys_kbuffer *kbuf) ++{ ++ ipu_buffer_del(fh, kbuf); ++ ipu_buffer_lru_add(fh, kbuf); ++ ++ while (fh->num_bufs_lru > IPU_PSYS_MAX_NUM_BUFS_LRU) { ++ kbuf = list_first_entry(&fh->bufs_lru, ++ struct ipu_psys_kbuffer, ++ list); ++ ++ ipu_buffer_lru_del(fh, kbuf); ++ __ipu_psys_unmapbuf(fh, kbuf); ++ } ++} ++ ++struct ipu_psys_kbuffer *ipu_psys_mapbuf_locked(int fd, struct ipu_psys_fh *fh) ++{ ++ struct ipu_psys *psys = fh->psys; ++ struct device *dev = &psys->adev->isp->pdev->dev; ++ int ret; ++ struct ipu_psys_kbuffer *kbuf; ++ struct ipu_psys_desc *desc; ++ struct dma_buf *dbuf; ++ struct iosys_map dmap; ++ ++ dbuf = dma_buf_get(fd); ++ if (IS_ERR(dbuf)) ++ return NULL; ++ ++ desc = psys_desc_lookup(fh, fd); ++ if (!desc) { ++ desc = ipu_psys_desc_alloc(fd); ++ if (!desc) ++ goto desc_alloc_fail; ++ ipu_desc_add(fh, desc); ++ } ++ ++ kbuf = psys_buf_lookup(fh, fd); ++ if (!kbuf) { ++ kbuf = ipu_psys_kbuffer_alloc(); ++ if (!kbuf) ++ goto buf_alloc_fail; ++ ipu_buffer_add(fh, kbuf); ++ } ++ ++ /* If this descriptor references no buffer or new buffer */ ++ if (desc->kbuf != kbuf) { ++ if (desc->kbuf) { ++ /* ++ * Un-reference old buffer and possibly put it on ++ * the LRU list ++ */ ++ if (atomic_dec_and_test(&desc->kbuf->map_count)) ++ ipu_psys_kbuffer_lru(fh, desc->kbuf); ++ } ++ ++ /* Grab reference of the new buffer */ ++ atomic_inc(&kbuf->map_count); ++ } ++ ++ desc->kbuf = kbuf; ++ ++ if (kbuf->sgt) { ++ dev_dbg(dev, "fd %d has been mapped!\n", fd); ++ dma_buf_put(dbuf); ++ goto mapbuf_end; ++ } ++ ++ kbuf->dbuf = dbuf; ++ ++ if (kbuf->len == 0) ++ kbuf->len = kbuf->dbuf->size; ++ ++ kbuf->db_attach = dma_buf_attach(kbuf->dbuf, dev); ++ if (IS_ERR(kbuf->db_attach)) { ++ dev_dbg(dev, "dma buf attach failed\n"); ++ goto attach_fail; ++ } ++ ++ kbuf->sgt = dma_buf_map_attachment_unlocked(kbuf->db_attach, ++ DMA_BIDIRECTIONAL); ++ if (IS_ERR_OR_NULL(kbuf->sgt)) { ++ kbuf->sgt = NULL; ++ dev_dbg(dev, "dma buf map attachment failed\n"); ++ goto kbuf_map_fail; ++ } ++ ++ if (!kbuf->userptr) { ++ ret = ipu6_dma_map_sgtable(psys->adev, kbuf->sgt, ++ DMA_BIDIRECTIONAL, 0); ++ if (ret) { ++ dev_dbg(dev, "ipu6 buf map failed\n"); ++ goto kbuf_map_fail; ++ } ++ } ++ ++ kbuf->dma_addr = sg_dma_address(kbuf->sgt->sgl); ++ ++ dmap.is_iomem = false; ++ if (dma_buf_vmap_unlocked(kbuf->dbuf, &dmap)) { ++ dev_dbg(dev, "dma buf vmap failed\n"); ++ goto kbuf_map_fail; ++ } ++ kbuf->kaddr = dmap.vaddr; ++ ++mapbuf_end: ++ dev_dbg(dev, "%s %s kbuf %p fd %d with len %llu mapped\n", ++ __func__, kbuf->userptr ? "private" : "imported", kbuf, fd, ++ kbuf->len); ++ ++ kbuf->valid = true; ++ return kbuf; ++ ++kbuf_map_fail: ++ ipu_buffer_del(fh, kbuf); ++ if (!IS_ERR_OR_NULL(kbuf->sgt)) { ++ if (!kbuf->userptr) ++ ipu6_dma_unmap_sgtable(psys->adev, kbuf->sgt, ++ DMA_BIDIRECTIONAL, 0); ++ dma_buf_unmap_attachment_unlocked(kbuf->db_attach, kbuf->sgt, ++ DMA_BIDIRECTIONAL); ++ } ++ dma_buf_detach(kbuf->dbuf, kbuf->db_attach); ++attach_fail: ++ ipu_buffer_del(fh, kbuf); ++ dbuf = ERR_PTR(-EINVAL); ++ if (!kbuf->userptr) ++ kfree(kbuf); ++ ++buf_alloc_fail: ++ ipu_desc_del(fh, desc); ++ kfree(desc); ++ ++desc_alloc_fail: ++ if (!IS_ERR(dbuf)) ++ dma_buf_put(dbuf); ++ return NULL; ++} ++ ++static long ipu_psys_mapbuf(int fd, struct ipu_psys_fh *fh) ++{ ++ struct ipu_psys_kbuffer *kbuf; ++ ++ dev_dbg(&fh->psys->adev->auxdev.dev, "IOC_MAPBUF\n"); ++ ++ mutex_lock(&fh->mutex); ++ kbuf = ipu_psys_mapbuf_locked(fd, fh); ++ mutex_unlock(&fh->mutex); ++ ++ return kbuf ? 0 : -EINVAL; ++} ++ ++static long ipu_psys_unmapbuf(int fd, struct ipu_psys_fh *fh) ++{ ++ struct device *dev = &fh->psys->adev->auxdev.dev; ++ long ret; ++ ++ dev_dbg(dev, "IOC_UNMAPBUF\n"); ++ ++ mutex_lock(&fh->mutex); ++ ret = ipu_psys_unmapbuf_locked(fd, fh); ++ mutex_unlock(&fh->mutex); ++ ++ return ret; ++} ++ ++static __poll_t ipu_psys_poll(struct file *file, ++ struct poll_table_struct *wait) ++{ ++ struct ipu_psys_fh *fh = file->private_data; ++ struct ipu_psys *psys = fh->psys; ++ struct device *dev = &psys->adev->auxdev.dev; ++ __poll_t ret = 0; ++ ++ dev_dbg(dev, "ipu psys poll\n"); ++ ++ poll_wait(file, &fh->wait, wait); ++ ++ if (ipu_get_completed_kcmd(fh)) ++ ret = POLLIN; ++ ++ dev_dbg(dev, "ipu psys poll ret %u\n", ret); ++ ++ return ret; ++} ++ ++static long ipu_get_manifest(struct ipu_psys_manifest *manifest, ++ struct ipu_psys_fh *fh) ++{ ++ struct ipu_psys *psys = fh->psys; ++ struct device *dev = &psys->adev->auxdev.dev; ++ struct ipu6_bus_device *adev = psys->adev; ++ struct ipu6_device *isp = adev->isp; ++ struct ipu6_cpd_client_pkg_hdr *client_pkg; ++ u32 entries; ++ void *host_fw_data; ++ dma_addr_t dma_fw_data; ++ u32 client_pkg_offset; ++ ++ host_fw_data = (void *)isp->cpd_fw->data; ++ dma_fw_data = sg_dma_address(adev->fw_sgt.sgl); ++ entries = ipu6_cpd_pkg_dir_get_num_entries(adev->pkg_dir); ++ if (!manifest || manifest->index > entries - 1) { ++ dev_err(dev, "invalid argument\n"); ++ return -EINVAL; ++ } ++ ++ if (!ipu6_cpd_pkg_dir_get_size(adev->pkg_dir, manifest->index) || ++ ipu6_cpd_pkg_dir_get_type(adev->pkg_dir, manifest->index) < ++ IPU6_CPD_PKG_DIR_CLIENT_PG_TYPE) { ++ dev_dbg(dev, "invalid pkg dir entry\n"); ++ return -ENOENT; ++ } ++ ++ client_pkg_offset = ipu6_cpd_pkg_dir_get_address(adev->pkg_dir, ++ manifest->index); ++ client_pkg_offset -= dma_fw_data; ++ ++ client_pkg = host_fw_data + client_pkg_offset; ++ manifest->size = client_pkg->pg_manifest_size; ++ ++ if (!manifest->manifest) ++ return 0; ++ ++ if (copy_to_user(manifest->manifest, ++ (uint8_t *)client_pkg + client_pkg->pg_manifest_offs, ++ manifest->size)) { ++ return -EFAULT; ++ } ++ ++ return 0; ++} ++ ++static long ipu_psys_ioctl(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ union { ++ struct ipu_psys_buffer buf; ++ struct ipu_psys_command cmd; ++ struct ipu_psys_event ev; ++ struct ipu_psys_capability caps; ++ struct ipu_psys_manifest m; ++ } karg; ++ struct ipu_psys_fh *fh = file->private_data; ++ long err = 0; ++ void __user *up = (void __user *)arg; ++ bool copy = (cmd != IPU_IOC_MAPBUF && cmd != IPU_IOC_UNMAPBUF); ++ ++ if (copy) { ++ if (_IOC_SIZE(cmd) > sizeof(karg)) ++ return -ENOTTY; ++ ++ if (_IOC_DIR(cmd) & _IOC_WRITE) { ++ err = copy_from_user(&karg, up, _IOC_SIZE(cmd)); ++ if (err) ++ return -EFAULT; ++ } ++ } ++ ++ switch (cmd) { ++ case IPU_IOC_MAPBUF: ++ err = ipu_psys_mapbuf(arg, fh); ++ break; ++ case IPU_IOC_UNMAPBUF: ++ err = ipu_psys_unmapbuf(arg, fh); ++ break; ++ case IPU_IOC_QUERYCAP: ++ karg.caps = fh->psys->caps; ++ break; ++ case IPU_IOC_GETBUF: ++ err = ipu_psys_getbuf(&karg.buf, fh); ++ break; ++ case IPU_IOC_PUTBUF: ++ err = ipu_psys_putbuf(&karg.buf, fh); ++ break; ++ case IPU_IOC_QCMD: ++ err = ipu_psys_kcmd_new(&karg.cmd, fh); ++ break; ++ case IPU_IOC_DQEVENT: ++ err = ipu_ioctl_dqevent(&karg.ev, fh, file->f_flags); ++ break; ++ case IPU_IOC_GET_MANIFEST: ++ err = ipu_get_manifest(&karg.m, fh); ++ break; ++ default: ++ err = -ENOTTY; ++ break; ++ } ++ ++ if (err) ++ return err; ++ ++ if (copy && _IOC_DIR(cmd) & _IOC_READ) ++ if (copy_to_user(up, &karg, _IOC_SIZE(cmd))) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++static const struct file_operations ipu_psys_fops = { ++ .open = ipu_psys_open, ++ .release = ipu_psys_release, ++ .unlocked_ioctl = ipu_psys_ioctl, ++ .poll = ipu_psys_poll, ++ .owner = THIS_MODULE, ++}; ++ ++static void ipu_psys_dev_release(struct device *dev) ++{ ++} ++ ++static int psys_runtime_pm_resume(struct device *dev) ++{ ++ struct ipu6_bus_device *adev = to_ipu6_bus_device(dev); ++ struct ipu_psys *psys = ipu6_bus_get_drvdata(adev); ++ unsigned long flags; ++ int retval; ++ ++ if (!psys) ++ return 0; ++ ++ spin_lock_irqsave(&psys->ready_lock, flags); ++ if (psys->ready) { ++ spin_unlock_irqrestore(&psys->ready_lock, flags); ++ return 0; ++ } ++ spin_unlock_irqrestore(&psys->ready_lock, flags); ++ ++ retval = ipu6_mmu_hw_init(adev->mmu); ++ if (retval) ++ return retval; ++ ++ if (async_fw_init && !psys->fwcom) { ++ dev_err(dev, "async firmware init not finished, skipping\n"); ++ return 0; ++ } ++ ++ if (!ipu6_buttress_auth_done(adev->isp)) { ++ dev_dbg(dev, "fw not yet authenticated, skipping\n"); ++ return 0; ++ } ++ ++ ipu_psys_setup_hw(psys); ++ ++ ipu_psys_subdomains_power(psys, 1); ++ ipu6_configure_spc(adev->isp, ++ &psys->pdata->ipdata->hw_variant, ++ IPU6_CPD_PKG_DIR_PSYS_SERVER_IDX, ++ psys->pdata->base, adev->pkg_dir, ++ adev->pkg_dir_dma_addr); ++ ++ retval = ipu_fw_psys_open(psys); ++ if (retval) { ++ dev_err(dev, "Failed to open abi.\n"); ++ return retval; ++ } ++ ++ spin_lock_irqsave(&psys->ready_lock, flags); ++ psys->ready = 1; ++ spin_unlock_irqrestore(&psys->ready_lock, flags); ++ ++ return 0; ++} ++ ++static int psys_runtime_pm_suspend(struct device *dev) ++{ ++ struct ipu6_bus_device *adev = to_ipu6_bus_device(dev); ++ struct ipu_psys *psys = ipu6_bus_get_drvdata(adev); ++ unsigned long flags; ++ int rval; ++ ++ if (!psys) ++ return 0; ++ ++ if (!psys->ready) ++ return 0; ++ ++ spin_lock_irqsave(&psys->ready_lock, flags); ++ psys->ready = 0; ++ spin_unlock_irqrestore(&psys->ready_lock, flags); ++ ++ /* ++ * We can trace failure but better to not return an error. ++ * At suspend we are progressing towards psys power gated state. ++ * Any hang / failure inside psys will be forgotten soon. ++ */ ++ rval = ipu_fw_psys_close(psys); ++ if (rval) ++ dev_err(dev, "Device close failure: %d\n", rval); ++ ++ ipu_psys_subdomains_power(psys, 0); ++ ++ ipu6_mmu_hw_cleanup(adev->mmu); ++ ++ return 0; ++} ++ ++/* The following PM callbacks are needed to enable runtime PM in IPU PCI ++ * device resume, otherwise, runtime PM can't work in PCI resume from ++ * S3 state. ++ */ ++static int psys_resume(struct device *dev) ++{ ++ return 0; ++} ++ ++static int psys_suspend(struct device *dev) ++{ ++ return 0; ++} ++ ++static const struct dev_pm_ops psys_pm_ops = { ++ .runtime_suspend = psys_runtime_pm_suspend, ++ .runtime_resume = psys_runtime_pm_resume, ++ .suspend = psys_suspend, ++ .resume = psys_resume, ++}; ++ ++static int ipu_psys_sched_cmd(void *ptr) ++{ ++ struct ipu_psys *psys = ptr; ++ size_t pending = 0; ++ ++ while (1) { ++ wait_event_interruptible(psys->sched_cmd_wq, ++ (kthread_should_stop() || ++ (pending = ++ atomic_read(&psys->wakeup_count)))); ++ ++ if (kthread_should_stop()) ++ break; ++ ++ if (pending == 0) ++ continue; ++ ++ mutex_lock(&psys->mutex); ++ atomic_set(&psys->wakeup_count, 0); ++ ipu_psys_run_next(psys); ++ mutex_unlock(&psys->mutex); ++ } ++ ++ return 0; ++} ++ ++static void start_sp(struct ipu6_bus_device *adev) ++{ ++ struct ipu_psys *psys = ipu6_bus_get_drvdata(adev); ++ void __iomem *spc_regs_base = psys->pdata->base + ++ psys->pdata->ipdata->hw_variant.spc_offset; ++ u32 val = 0; ++ ++ val |= IPU6_PSYS_SPC_STATUS_START | ++ IPU6_PSYS_SPC_STATUS_RUN | ++ IPU6_PSYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE; ++ val |= psys->icache_prefetch_sp ? ++ IPU6_PSYS_SPC_STATUS_ICACHE_PREFETCH : 0; ++ writel(val, spc_regs_base + IPU6_PSYS_REG_SPC_STATUS_CTRL); ++} ++ ++static int query_sp(struct ipu6_bus_device *adev) ++{ ++ struct ipu_psys *psys = ipu6_bus_get_drvdata(adev); ++ void __iomem *spc_regs_base = psys->pdata->base + ++ psys->pdata->ipdata->hw_variant.spc_offset; ++ u32 val = readl(spc_regs_base + IPU6_PSYS_REG_SPC_STATUS_CTRL); ++ ++ /* return true when READY == 1, START == 0 */ ++ val &= IPU6_PSYS_SPC_STATUS_READY | IPU6_PSYS_SPC_STATUS_START; ++ ++ return val == IPU6_PSYS_SPC_STATUS_READY; ++} ++ ++static int ipu_psys_fw_init(struct ipu_psys *psys) ++{ ++ struct ipu6_fw_syscom_queue_config *queue_cfg; ++ struct device *dev = &psys->adev->auxdev.dev; ++ unsigned int size; ++ struct ipu6_fw_syscom_queue_config fw_psys_event_queue_cfg[] = { ++ { ++ IPU_FW_PSYS_EVENT_QUEUE_SIZE, ++ sizeof(struct ipu_fw_psys_event) ++ } ++ }; ++ struct ipu_fw_psys_srv_init server_init = { ++ .ddr_pkg_dir_address = 0, ++ .host_ddr_pkg_dir = NULL, ++ .pkg_dir_size = 0, ++ .icache_prefetch_sp = psys->icache_prefetch_sp, ++ .icache_prefetch_isp = psys->icache_prefetch_isp, ++ }; ++ struct ipu6_fw_com_cfg fwcom = { ++ .num_output_queues = IPU_FW_PSYS_N_PSYS_EVENT_QUEUE_ID, ++ .output = fw_psys_event_queue_cfg, ++ .specific_addr = &server_init, ++ .specific_size = sizeof(server_init), ++ .cell_start = start_sp, ++ .cell_ready = query_sp, ++ .buttress_boot_offset = SYSCOM_BUTTRESS_FW_PARAMS_PSYS_OFFSET, ++ }; ++ int i; ++ ++ size = IPU6SE_FW_PSYS_N_PSYS_CMD_QUEUE_ID; ++ if (ipu_ver == IPU6_VER_6 || ipu_ver == IPU6_VER_6EP || ++ ipu_ver == IPU6_VER_6EP_MTL) ++ size = IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID; ++ ++ queue_cfg = devm_kzalloc(dev, sizeof(*queue_cfg) * size, ++ GFP_KERNEL); ++ if (!queue_cfg) ++ return -ENOMEM; ++ ++ for (i = 0; i < size; i++) { ++ queue_cfg[i].queue_size = IPU_FW_PSYS_CMD_QUEUE_SIZE; ++ queue_cfg[i].token_size = sizeof(struct ipu_fw_psys_cmd); ++ } ++ ++ fwcom.input = queue_cfg; ++ fwcom.num_input_queues = size; ++ fwcom.dmem_addr = psys->pdata->ipdata->hw_variant.dmem_offset; ++ ++ psys->fwcom = ipu6_fw_com_prepare(&fwcom, psys->adev, ++ psys->pdata->base); ++ if (!psys->fwcom) { ++ dev_err(dev, "psys fw com prepare failed\n"); ++ return -EIO; ++ } ++ ++ return 0; ++} ++ ++static void run_fw_init_work(struct work_struct *work) ++{ ++ struct fw_init_task *task = (struct fw_init_task *)work; ++ struct ipu_psys *psys = task->psys; ++ struct device *dev = &psys->adev->auxdev.dev; ++ int rval; ++ ++ rval = ipu_psys_fw_init(psys); ++ ++ if (rval) { ++ dev_err(dev, "FW init failed(%d)\n", rval); ++ ipu6_psys_remove(&psys->adev->auxdev); ++ } else { ++ dev_info(dev, "FW init done\n"); ++ } ++} ++ ++static int ipu6_psys_probe(struct auxiliary_device *auxdev, ++ const struct auxiliary_device_id *auxdev_id) ++{ ++ struct ipu6_bus_device *adev = auxdev_to_adev(auxdev); ++ struct device *dev = &auxdev->dev; ++ struct ipu_psys_pg *kpg, *kpg0; ++ struct ipu_psys *psys; ++ unsigned int minor; ++ int i, rval = -E2BIG; ++ ++ if (!adev->isp->bus_ready_to_probe) ++ return -EPROBE_DEFER; ++ ++ if (!adev->pkg_dir) ++ return -EPROBE_DEFER; ++ ++ ipu_ver = adev->isp->hw_ver; ++ ++ rval = ipu6_mmu_hw_init(adev->mmu); ++ if (rval) ++ return rval; ++ ++ mutex_lock(&ipu_psys_mutex); ++ ++ minor = find_next_zero_bit(ipu_psys_devices, IPU_PSYS_NUM_DEVICES, 0); ++ if (minor == IPU_PSYS_NUM_DEVICES) { ++ dev_err(dev, "too many devices\n"); ++ goto out_unlock; ++ } ++ ++ psys = devm_kzalloc(dev, sizeof(*psys), GFP_KERNEL); ++ if (!psys) { ++ rval = -ENOMEM; ++ goto out_unlock; ++ } ++ ++ adev->auxdrv_data = ++ (const struct ipu6_auxdrv_data *)auxdev_id->driver_data; ++ adev->auxdrv = to_auxiliary_drv(dev->driver); ++ ++ psys->adev = adev; ++ psys->pdata = adev->pdata; ++ psys->icache_prefetch_sp = 0; ++ ++ psys->power_gating = 0; ++ ++ spin_lock_init(&psys->ready_lock); ++ spin_lock_init(&psys->pgs_lock); ++ psys->ready = 0; ++ psys->timeout = IPU_PSYS_CMD_TIMEOUT_MS; ++ ++ mutex_init(&psys->mutex); ++ INIT_LIST_HEAD(&psys->fhs); ++ INIT_LIST_HEAD(&psys->pgs); ++ INIT_LIST_HEAD(&psys->started_kcmds_list); ++ ++ init_waitqueue_head(&psys->sched_cmd_wq); ++ atomic_set(&psys->wakeup_count, 0); ++ /* ++ * Create a thread to schedule commands sent to IPU firmware. ++ * The thread reduces the coupling between the command scheduler ++ * and queueing commands from the user to driver. ++ */ ++ psys->sched_cmd_thread = kthread_run(ipu_psys_sched_cmd, psys, ++ "psys_sched_cmd"); ++ ++ if (IS_ERR(psys->sched_cmd_thread)) { ++ psys->sched_cmd_thread = NULL; ++ mutex_destroy(&psys->mutex); ++ goto out_unlock; ++ } ++ ++ dev_set_drvdata(dev, psys); ++ ++ rval = ipu_psys_res_pool_init(&psys->res_pool_running); ++ if (rval < 0) { ++ dev_err(&psys->dev, ++ "unable to alloc process group resources\n"); ++ goto out_mutex_destroy; ++ } ++ ++ ipu6_psys_hw_res_variant_init(); ++ ++ /* allocate and map memory for process groups */ ++ for (i = 0; i < IPU_PSYS_PG_POOL_SIZE; i++) { ++ kpg = kzalloc(sizeof(*kpg), GFP_KERNEL); ++ if (!kpg) ++ goto out_free_pgs; ++ kpg->pg = ipu6_dma_alloc(adev, IPU_PSYS_PG_MAX_SIZE, ++ &kpg->pg_dma_addr, ++ GFP_KERNEL, 0); ++ if (!kpg->pg) { ++ kfree(kpg); ++ goto out_free_pgs; ++ } ++ kpg->size = IPU_PSYS_PG_MAX_SIZE; ++ list_add(&kpg->list, &psys->pgs); ++ } ++ ++ psys->caps.pg_count = ipu6_cpd_pkg_dir_get_num_entries(adev->pkg_dir); ++ ++ dev_info(dev, "pkg_dir entry count:%d\n", psys->caps.pg_count); ++ if (async_fw_init) { ++ INIT_DELAYED_WORK((struct delayed_work *)&fw_init_task, ++ run_fw_init_work); ++ fw_init_task.psys = psys; ++ schedule_delayed_work((struct delayed_work *)&fw_init_task, 0); ++ } else { ++ rval = ipu_psys_fw_init(psys); ++ } ++ ++ if (rval) { ++ dev_err(dev, "FW init failed(%d)\n", rval); ++ goto out_free_pgs; ++ } ++ ++ psys->dev.bus = &ipu_psys_bus; ++ psys->dev.parent = dev; ++ psys->dev.devt = MKDEV(MAJOR(ipu_psys_dev_t), minor); ++ psys->dev.release = ipu_psys_dev_release; ++ dev_set_name(&psys->dev, "ipu-psys%d", minor); ++ device_initialize(&psys->dev); ++ ++ cdev_init(&psys->cdev, &ipu_psys_fops); ++ psys->cdev.owner = ipu_psys_fops.owner; ++ ++ rval = cdev_device_add(&psys->cdev, &psys->dev); ++ if (rval < 0) { ++ dev_err(dev, "psys device_register failed\n"); ++ goto out_release_fw_com; ++ } ++ ++ set_bit(minor, ipu_psys_devices); ++ ++ /* Add the hw stepping information to caps */ ++ strscpy(psys->caps.dev_model, IPU6_MEDIA_DEV_MODEL_NAME, ++ sizeof(psys->caps.dev_model)); ++ ++ mutex_unlock(&ipu_psys_mutex); ++ ++ dev_info(dev, "psys probe minor: %d\n", minor); ++ ++ ipu6_mmu_hw_cleanup(adev->mmu); ++ ++ return 0; ++ ++out_release_fw_com: ++ ipu6_fw_com_release(psys->fwcom, 1); ++out_free_pgs: ++ list_for_each_entry_safe(kpg, kpg0, &psys->pgs, list) { ++ ipu6_dma_free(adev, kpg->size, kpg->pg, kpg->pg_dma_addr, 0); ++ kfree(kpg); ++ } ++ ++ ipu_psys_res_pool_cleanup(&psys->res_pool_running); ++out_mutex_destroy: ++ mutex_destroy(&psys->mutex); ++ if (psys->sched_cmd_thread) { ++ kthread_stop(psys->sched_cmd_thread); ++ psys->sched_cmd_thread = NULL; ++ } ++out_unlock: ++ /* Safe to call even if the init is not called */ ++ mutex_unlock(&ipu_psys_mutex); ++ ipu6_mmu_hw_cleanup(adev->mmu); ++ return rval; ++} ++ ++static void ipu6_psys_remove(struct auxiliary_device *auxdev) ++{ ++ struct ipu6_bus_device *adev = auxdev_to_adev(auxdev); ++ struct device *dev = &auxdev->dev; ++ struct ipu_psys *psys = dev_get_drvdata(&auxdev->dev); ++ struct ipu_psys_pg *kpg, *kpg0; ++ ++ if (psys->sched_cmd_thread) { ++ kthread_stop(psys->sched_cmd_thread); ++ psys->sched_cmd_thread = NULL; ++ } ++ ++ mutex_lock(&ipu_psys_mutex); ++ ++ list_for_each_entry_safe(kpg, kpg0, &psys->pgs, list) { ++ ipu6_dma_free(adev, kpg->size, kpg->pg, kpg->pg_dma_addr, 0); ++ kfree(kpg); ++ } ++ ++ if (psys->fwcom && ipu6_fw_com_release(psys->fwcom, 1)) ++ dev_err(dev, "fw com release failed.\n"); ++ ++ kfree(psys->server_init); ++ kfree(psys->syscom_config); ++ ++ ipu_psys_res_pool_cleanup(&psys->res_pool_running); ++ ++ cdev_device_del(&psys->cdev, &psys->dev); ++ ++ clear_bit(MINOR(psys->cdev.dev), ipu_psys_devices); ++ ++ mutex_unlock(&ipu_psys_mutex); ++ ++ mutex_destroy(&psys->mutex); ++ ++ dev_info(dev, "removed\n"); ++} ++ ++static irqreturn_t psys_isr_threaded(struct ipu6_bus_device *adev) ++{ ++ struct ipu_psys *psys = ipu6_bus_get_drvdata(adev); ++ struct device *dev = &psys->adev->auxdev.dev; ++ void __iomem *base = psys->pdata->base; ++ u32 status; ++ int r; ++ ++ mutex_lock(&psys->mutex); ++ r = pm_runtime_get_if_in_use(dev); ++ if (!r || WARN_ON_ONCE(r < 0)) { ++ mutex_unlock(&psys->mutex); ++ return IRQ_NONE; ++ } ++ ++ status = readl(base + IPU6_REG_PSYS_GPDEV_IRQ_STATUS); ++ writel(status, base + IPU6_REG_PSYS_GPDEV_IRQ_CLEAR); ++ ++ if (status & IPU6_PSYS_GPDEV_IRQ_FWIRQ(IPU6_PSYS_GPDEV_FWIRQ0)) { ++ writel(0, base + IPU6_REG_PSYS_GPDEV_FWIRQ(0)); ++ ipu_psys_handle_events(psys); ++ } ++ ++ pm_runtime_put(dev); ++ mutex_unlock(&psys->mutex); ++ ++ return status ? IRQ_HANDLED : IRQ_NONE; ++} ++ ++static const struct ipu6_auxdrv_data ipu6_psys_auxdrv_data = { ++ .isr_threaded = psys_isr_threaded, ++ .wake_isr_thread = true, ++}; ++ ++static const struct auxiliary_device_id ipu6_psys_id_table[] = { ++ { ++ .name = "intel_ipu6.psys", ++ .driver_data = (kernel_ulong_t)&ipu6_psys_auxdrv_data, ++ }, ++ { } ++}; ++MODULE_DEVICE_TABLE(auxiliary, ipu6_psys_id_table); ++ ++static struct auxiliary_driver ipu6_psys_aux_driver = { ++ .name = IPU6_PSYS_NAME, ++ .probe = ipu6_psys_probe, ++ .remove = ipu6_psys_remove, ++ .id_table = ipu6_psys_id_table, ++ .driver = { ++ .pm = &psys_pm_ops, ++ }, ++}; ++ ++static int __init ipu_psys_init(void) ++{ ++ int rval = alloc_chrdev_region(&ipu_psys_dev_t, 0, ++ IPU_PSYS_NUM_DEVICES, ipu_psys_bus.name); ++ if (rval) { ++ pr_err("can't alloc psys chrdev region (%d)\n", rval); ++ return rval; ++ } ++ ++ rval = bus_register(&ipu_psys_bus); ++ if (rval) { ++ pr_err("can't register psys bus (%d)\n", rval); ++ unregister_chrdev_region(ipu_psys_dev_t, IPU_PSYS_NUM_DEVICES); ++ return rval; ++ } ++ ++ auxiliary_driver_register(&ipu6_psys_aux_driver); ++ return 0; ++} ++module_init(ipu_psys_init); ++ ++static void __exit ipu_psys_exit(void) ++{ ++ auxiliary_driver_unregister(&ipu6_psys_aux_driver); ++ bus_unregister(&ipu_psys_bus); ++ unregister_chrdev_region(ipu_psys_dev_t, IPU_PSYS_NUM_DEVICES); ++} ++module_exit(ipu_psys_exit); ++ ++MODULE_AUTHOR("Bingbu Cao "); ++MODULE_LICENSE("GPL"); ++MODULE_DESCRIPTION("Intel IPU6 processing system driver"); ++MODULE_IMPORT_NS(DMA_BUF); ++MODULE_IMPORT_NS(INTEL_IPU6); +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.h b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.h +new file mode 100644 +index 0000000..8af6551 +--- /dev/null ++++ b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.h +@@ -0,0 +1,324 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* Copyright (C) 2013 - 2024 Intel Corporation */ ++ ++#ifndef IPU_PSYS_H ++#define IPU_PSYS_H ++ ++#include ++#include ++ ++#include ++#include "ipu6.h" ++#include "ipu6-bus.h" ++#include "ipu6-dma.h" ++#include "ipu-fw-psys.h" ++#include "ipu-platform-psys.h" ++ ++/* PSYS Info bits*/ ++#define IPU_REG_PSYS_INFO_SEG_CMEM_MASTER(a) (0x2c + ((a) * 12)) ++#define IPU_REG_PSYS_INFO_SEG_XMEM_MASTER(a) (0x5c + ((a) * 12)) ++ ++#define IPU_PSYS_REG_SPC_STATUS_CTRL 0x0 ++#define IPU_PSYS_REG_SPC_START_PC 0x4 ++#define IPU_PSYS_REG_SPC_ICACHE_BASE 0x10 ++#define IPU_REG_PSYS_INFO_SEG_0_CONFIG_ICACHE_MASTER 0x14 ++ ++#define IPU_PSYS_SPC_STATUS_START BIT(1) ++#define IPU_PSYS_SPC_STATUS_RUN BIT(3) ++#define IPU_PSYS_SPC_STATUS_READY BIT(5) ++#define IPU_PSYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE BIT(12) ++#define IPU_PSYS_SPC_STATUS_ICACHE_PREFETCH BIT(13) ++ ++#define IPU_PSYS_REG_SPP0_STATUS_CTRL 0x20000 ++ ++#define IPU_INFO_ENABLE_SNOOP BIT(0) ++#define IPU_INFO_DEC_FORCE_FLUSH BIT(1) ++#define IPU_INFO_DEC_PASS_THRU BIT(2) ++#define IPU_INFO_ZLW BIT(3) ++#define IPU_INFO_STREAM_ID_SET(id) (((id) & 0x1f) << 4) ++#define IPU_INFO_REQUEST_DESTINATION_IOSF BIT(9) ++#define IPU_INFO_IMR_BASE BIT(10) ++#define IPU_INFO_IMR_DESTINED BIT(11) ++ ++#define IPU_INFO_REQUEST_DESTINATION_PRIMARY IPU_INFO_REQUEST_DESTINATION_IOSF ++ ++#define IPU_REG_DMA_TOP_AB_GROUP1_BASE_ADDR 0x1ae000 ++#define IPU_REG_DMA_TOP_AB_GROUP2_BASE_ADDR 0x1af000 ++#define IPU_REG_DMA_TOP_AB_RING_MIN_OFFSET(n) (0x4 + (n) * 0xc) ++#define IPU_REG_DMA_TOP_AB_RING_MAX_OFFSET(n) (0x8 + (n) * 0xc) ++#define IPU_REG_DMA_TOP_AB_RING_ACCESS_OFFSET(n) (0xc + (n) * 0xc) ++ ++enum ipu_device_ab_group1_target_id { ++ IPU_DEVICE_AB_GROUP1_TARGET_ID_R0_SPC_DMEM, ++ IPU_DEVICE_AB_GROUP1_TARGET_ID_R1_SPC_DMEM, ++ IPU_DEVICE_AB_GROUP1_TARGET_ID_R2_SPC_DMEM, ++ IPU_DEVICE_AB_GROUP1_TARGET_ID_R3_SPC_STATUS_REG, ++ IPU_DEVICE_AB_GROUP1_TARGET_ID_R4_SPC_MASTER_BASE_ADDR, ++ IPU_DEVICE_AB_GROUP1_TARGET_ID_R5_SPC_PC_STALL, ++ IPU_DEVICE_AB_GROUP1_TARGET_ID_R6_SPC_EQ, ++ IPU_DEVICE_AB_GROUP1_TARGET_ID_R7_SPC_RESERVED, ++ IPU_DEVICE_AB_GROUP1_TARGET_ID_R8_SPC_RESERVED, ++ IPU_DEVICE_AB_GROUP1_TARGET_ID_R9_SPP0, ++ IPU_DEVICE_AB_GROUP1_TARGET_ID_R10_SPP1, ++ IPU_DEVICE_AB_GROUP1_TARGET_ID_R11_CENTRAL_R1, ++ IPU_DEVICE_AB_GROUP1_TARGET_ID_R12_IRQ, ++ IPU_DEVICE_AB_GROUP1_TARGET_ID_R13_CENTRAL_R2, ++ IPU_DEVICE_AB_GROUP1_TARGET_ID_R14_DMA, ++ IPU_DEVICE_AB_GROUP1_TARGET_ID_R15_DMA, ++ IPU_DEVICE_AB_GROUP1_TARGET_ID_R16_GP, ++ IPU_DEVICE_AB_GROUP1_TARGET_ID_R17_ZLW_INSERTER, ++ IPU_DEVICE_AB_GROUP1_TARGET_ID_R18_AB, ++}; ++ ++/* IRQ-related registers in PSYS */ ++#define IPU_REG_PSYS_GPDEV_IRQ_EDGE 0x1aa200 ++#define IPU_REG_PSYS_GPDEV_IRQ_MASK 0x1aa204 ++#define IPU_REG_PSYS_GPDEV_IRQ_STATUS 0x1aa208 ++#define IPU_REG_PSYS_GPDEV_IRQ_CLEAR 0x1aa20c ++#define IPU_REG_PSYS_GPDEV_IRQ_ENABLE 0x1aa210 ++#define IPU_REG_PSYS_GPDEV_IRQ_LEVEL_NOT_PULSE 0x1aa214 ++/* There are 8 FW interrupts, n = 0..7 */ ++#define IPU_PSYS_GPDEV_FWIRQ0 5 ++#define IPU_PSYS_GPDEV_FWIRQ1 6 ++#define IPU_PSYS_GPDEV_FWIRQ2 7 ++#define IPU_PSYS_GPDEV_FWIRQ3 8 ++#define IPU_PSYS_GPDEV_FWIRQ4 9 ++#define IPU_PSYS_GPDEV_FWIRQ5 10 ++#define IPU_PSYS_GPDEV_FWIRQ6 11 ++#define IPU_PSYS_GPDEV_FWIRQ7 12 ++#define IPU_PSYS_GPDEV_IRQ_FWIRQ(n) (1 << (n)) ++#define IPU_REG_PSYS_GPDEV_FWIRQ(n) (4 * (n) + 0x1aa100) ++ ++/* ++ * psys subdomains power request regs ++ */ ++enum ipu_device_buttress_psys_domain_pos { ++ IPU_PSYS_SUBDOMAIN_ISA = 0, ++ IPU_PSYS_SUBDOMAIN_PSA = 1, ++ IPU_PSYS_SUBDOMAIN_BB = 2, ++ IPU_PSYS_SUBDOMAIN_IDSP1 = 3, /* only in IPU6M */ ++ IPU_PSYS_SUBDOMAIN_IDSP2 = 4, /* only in IPU6M */ ++}; ++ ++#define IPU_PSYS_SUBDOMAINS_POWER_MASK (BIT(IPU_PSYS_SUBDOMAIN_ISA) | \ ++ BIT(IPU_PSYS_SUBDOMAIN_PSA) | \ ++ BIT(IPU_PSYS_SUBDOMAIN_BB)) ++ ++#define IPU_PSYS_SUBDOMAINS_POWER_REQ 0xa0 ++#define IPU_PSYS_SUBDOMAINS_POWER_STATUS 0xa4 ++ ++#define IPU_PSYS_CMD_TIMEOUT_MS 2000 ++#define IPU_PSYS_OPEN_TIMEOUT_US 50 ++#define IPU_PSYS_OPEN_RETRY (10000 / IPU_PSYS_OPEN_TIMEOUT_US) ++ ++#define IPU_PSYS_PG_POOL_SIZE 16 ++#define IPU_PSYS_PG_MAX_SIZE 8192 ++#define IPU_MAX_PSYS_CMD_BUFFERS 32 ++#define IPU_PSYS_EVENT_CMD_COMPLETE IPU_FW_PSYS_EVENT_TYPE_SUCCESS ++#define IPU_PSYS_EVENT_FRAGMENT_COMPLETE IPU_FW_PSYS_EVENT_TYPE_SUCCESS ++#define IPU_PSYS_CLOSE_TIMEOUT_US 50 ++#define IPU_PSYS_CLOSE_TIMEOUT (100000 / IPU_PSYS_CLOSE_TIMEOUT_US) ++#define IPU_MAX_RESOURCES 128 ++ ++extern enum ipu6_version ipu_ver; ++ ++/* Opaque structure. Do not access fields. */ ++struct ipu_resource { ++ u32 id; ++ int elements; /* Number of elements available to allocation */ ++ unsigned long *bitmap; /* Allocation bitmap, a bit for each element */ ++}; ++ ++enum ipu_resource_type { ++ IPU_RESOURCE_DEV_CHN = 0, ++ IPU_RESOURCE_EXT_MEM, ++ IPU_RESOURCE_DFM ++}; ++ ++/* Allocation of resource(s) */ ++/* Opaque structure. Do not access fields. */ ++struct ipu_resource_alloc { ++ enum ipu_resource_type type; ++ struct ipu_resource *resource; ++ int elements; ++ int pos; ++}; ++ ++/* ++ * This struct represents all of the currently allocated ++ * resources from IPU model. It is used also for allocating ++ * resources for the next set of PGs to be run on IPU ++ * (ie. those PGs which are not yet being run and which don't ++ * yet reserve real IPU resources). ++ * Use larger array to cover existing resource quantity ++ */ ++ ++/* resource size may need expand for new resource model */ ++struct ipu_psys_resource_pool { ++ u32 cells; /* Bitmask of cells allocated */ ++ struct ipu_resource dev_channels[16]; ++ struct ipu_resource ext_memory[32]; ++ struct ipu_resource dfms[16]; ++ DECLARE_BITMAP(cmd_queues, 32); ++ /* Protects cmd_queues bitmap */ ++ spinlock_t queues_lock; ++}; ++ ++/* ++ * This struct keeps book of the resources allocated for a specific PG. ++ * It is used for freeing up resources from struct ipu_psys_resources ++ * when the PG is released from IPU (or model of IPU). ++ */ ++struct ipu_psys_resource_alloc { ++ u32 cells; /* Bitmask of cells needed */ ++ struct ipu_resource_alloc ++ resource_alloc[IPU_MAX_RESOURCES]; ++ int resources; ++}; ++ ++struct task_struct; ++struct ipu_psys { ++ struct ipu_psys_capability caps; ++ struct cdev cdev; ++ struct device dev; ++ ++ struct mutex mutex; /* Psys various */ ++ int ready; /* psys fw status */ ++ bool icache_prefetch_sp; ++ bool icache_prefetch_isp; ++ spinlock_t ready_lock; /* protect psys firmware state */ ++ spinlock_t pgs_lock; /* Protect pgs list access */ ++ struct list_head fhs; ++ struct list_head pgs; ++ struct list_head started_kcmds_list; ++ struct ipu6_psys_pdata *pdata; ++ struct ipu6_bus_device *adev; ++ struct ia_css_syscom_context *dev_ctx; ++ struct ia_css_syscom_config *syscom_config; ++ struct ia_css_psys_server_init *server_init; ++ struct task_struct *sched_cmd_thread; ++ wait_queue_head_t sched_cmd_wq; ++ atomic_t wakeup_count; /* Psys schedule thread wakeup count */ ++ ++ /* Resources needed to be managed for process groups */ ++ struct ipu_psys_resource_pool res_pool_running; ++ ++ const struct firmware *fw; ++ struct sg_table fw_sgt; ++ u64 *pkg_dir; ++ dma_addr_t pkg_dir_dma_addr; ++ unsigned int pkg_dir_size; ++ unsigned long timeout; ++ ++ int active_kcmds, started_kcmds; ++ void *fwcom; ++ ++ int power_gating; ++}; ++ ++struct ipu_psys_fh { ++ struct ipu_psys *psys; ++ struct mutex mutex;/* Protects bufs_list & kcmds fields */ ++ struct list_head list; ++ /* Holds all buffers that this fh owns */ ++ struct list_head bufs_list; ++ /* Holds all descriptors (fd:kbuffer associations) */ ++ struct list_head descs_list; ++ struct list_head bufs_lru; ++ wait_queue_head_t wait; ++ struct ipu_psys_scheduler sched; ++ ++ u32 num_bufs; ++ u32 num_descs; ++ u32 num_bufs_lru; ++}; ++ ++struct ipu_psys_pg { ++ struct ipu_fw_psys_process_group *pg; ++ size_t size; ++ size_t pg_size; ++ dma_addr_t pg_dma_addr; ++ struct list_head list; ++ struct ipu_psys_resource_alloc resource_alloc; ++}; ++ ++struct ipu6_psys_constraint { ++ struct list_head list; ++ unsigned int min_freq; ++}; ++ ++struct ipu_psys_kcmd { ++ struct ipu_psys_fh *fh; ++ struct list_head list; ++ struct ipu_psys_buffer_set *kbuf_set; ++ enum ipu_psys_cmd_state state; ++ void *pg_manifest; ++ size_t pg_manifest_size; ++ struct ipu_psys_kbuffer **kbufs; ++ struct ipu_psys_buffer *buffers; ++ size_t nbuffers; ++ struct ipu_fw_psys_process_group *pg_user; ++ struct ipu_psys_pg *kpg; ++ u64 user_token; ++ u64 issue_id; ++ u32 priority; ++ u32 kernel_enable_bitmap[4]; ++ u32 terminal_enable_bitmap[4]; ++ u32 routing_enable_bitmap[4]; ++ u32 rbm[5]; ++ struct ipu6_psys_constraint constraint; ++ struct ipu_psys_event ev; ++ struct timer_list watchdog; ++}; ++ ++struct ipu_dma_buf_attach { ++ struct device *dev; ++ u64 len; ++ unsigned long userptr; ++ struct sg_table *sgt; ++ struct page **pages; ++ size_t npages; ++}; ++ ++struct ipu_psys_kbuffer { ++ u64 len; ++ unsigned long userptr; ++ void *kaddr; ++ struct list_head list; ++ dma_addr_t dma_addr; ++ struct sg_table *sgt; ++ struct dma_buf_attachment *db_attach; ++ struct dma_buf *dbuf; ++ u32 flags; ++ atomic_t map_count; /* The number of times this buffer is mapped */ ++ bool valid; /* True when buffer is usable */ ++}; ++ ++struct ipu_psys_desc { ++ struct ipu_psys_kbuffer *kbuf; ++ struct list_head list; ++ u32 fd; ++}; ++ ++#define inode_to_ipu_psys(inode) \ ++ container_of((inode)->i_cdev, struct ipu_psys, cdev) ++ ++void ipu_psys_setup_hw(struct ipu_psys *psys); ++void ipu_psys_subdomains_power(struct ipu_psys *psys, bool on); ++void ipu_psys_handle_events(struct ipu_psys *psys); ++int ipu_psys_kcmd_new(struct ipu_psys_command *cmd, struct ipu_psys_fh *fh); ++void ipu_psys_run_next(struct ipu_psys *psys); ++struct ipu_psys_pg *__get_pg_buf(struct ipu_psys *psys, size_t pg_size); ++struct ipu_psys_kbuffer * ++ipu_psys_lookup_kbuffer(struct ipu_psys_fh *fh, int fd); ++struct ipu_psys_kbuffer * ++ipu_psys_mapbuf_locked(int fd, struct ipu_psys_fh *fh); ++struct ipu_psys_kbuffer * ++ipu_psys_lookup_kbuffer_by_kaddr(struct ipu_psys_fh *fh, void *kaddr); ++int ipu_psys_res_pool_init(struct ipu_psys_resource_pool *pool); ++void ipu_psys_res_pool_cleanup(struct ipu_psys_resource_pool *pool); ++struct ipu_psys_kcmd *ipu_get_completed_kcmd(struct ipu_psys_fh *fh); ++long ipu_ioctl_dqevent(struct ipu_psys_event *event, ++ struct ipu_psys_fh *fh, unsigned int f_flags); ++ ++#endif /* IPU_PSYS_H */ +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-resources.c b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-resources.c +new file mode 100644 +index 0000000..4068e34 +--- /dev/null ++++ b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-resources.c +@@ -0,0 +1,861 @@ ++// SPDX-License-Identifier: GPL-2.0 ++// Copyright (C) 2015 - 2024 Intel Corporation ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#include "ipu-fw-psys.h" ++#include "ipu-psys.h" ++ ++struct ipu6_psys_hw_res_variant hw_var; ++void ipu6_psys_hw_res_variant_init(void) ++{ ++ if (ipu_ver == IPU6_VER_6SE) { ++ hw_var.queue_num = IPU6SE_FW_PSYS_N_PSYS_CMD_QUEUE_ID; ++ hw_var.cell_num = IPU6SE_FW_PSYS_N_CELL_ID; ++ } else if (ipu_ver == IPU6_VER_6) { ++ hw_var.queue_num = IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID; ++ hw_var.cell_num = IPU6_FW_PSYS_N_CELL_ID; ++ } else if (ipu_ver == IPU6_VER_6EP || ipu_ver == IPU6_VER_6EP_MTL) { ++ hw_var.queue_num = IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID; ++ hw_var.cell_num = IPU6EP_FW_PSYS_N_CELL_ID; ++ } else { ++ WARN(1, "ipu6 psys res var is not initialised correctly."); ++ } ++ ++ hw_var.set_proc_dev_chn = ipu6_fw_psys_set_proc_dev_chn; ++ hw_var.set_proc_dfm_bitmap = ipu6_fw_psys_set_proc_dfm_bitmap; ++ hw_var.set_proc_ext_mem = ipu6_fw_psys_set_process_ext_mem; ++ hw_var.get_pgm_by_proc = ipu6_fw_psys_get_pgm_by_process; ++} ++ ++static const struct ipu_fw_resource_definitions *get_res(void) ++{ ++ if (ipu_ver == IPU6_VER_6SE) ++ return ipu6se_res_defs; ++ ++ if (ipu_ver == IPU6_VER_6EP || ipu_ver == IPU6_VER_6EP_MTL) ++ return ipu6ep_res_defs; ++ ++ return ipu6_res_defs; ++} ++ ++static int ipu_resource_init(struct ipu_resource *res, u32 id, int elements) ++{ ++ if (elements <= 0) { ++ res->bitmap = NULL; ++ return 0; ++ } ++ ++ res->bitmap = bitmap_zalloc(elements, GFP_KERNEL); ++ if (!res->bitmap) ++ return -ENOMEM; ++ res->elements = elements; ++ res->id = id; ++ return 0; ++} ++ ++static unsigned long ++ipu_resource_alloc_with_pos(struct ipu_resource *res, int n, ++ int pos, ++ struct ipu_resource_alloc *alloc, ++ enum ipu_resource_type type) ++{ ++ unsigned long p; ++ ++ if (n <= 0) { ++ alloc->elements = 0; ++ return 0; ++ } ++ ++ if (!res->bitmap || pos >= res->elements) ++ return (unsigned long)(-ENOSPC); ++ ++ p = bitmap_find_next_zero_area(res->bitmap, res->elements, pos, n, 0); ++ alloc->resource = NULL; ++ ++ if (p != pos) ++ return (unsigned long)(-ENOSPC); ++ bitmap_set(res->bitmap, p, n); ++ alloc->resource = res; ++ alloc->elements = n; ++ alloc->pos = p; ++ alloc->type = type; ++ ++ return pos; ++} ++ ++static unsigned long ++ipu_resource_alloc(struct ipu_resource *res, int n, ++ struct ipu_resource_alloc *alloc, ++ enum ipu_resource_type type) ++{ ++ unsigned long p; ++ ++ if (n <= 0) { ++ alloc->elements = 0; ++ return 0; ++ } ++ ++ if (!res->bitmap) ++ return (unsigned long)(-ENOSPC); ++ ++ p = bitmap_find_next_zero_area(res->bitmap, res->elements, 0, n, 0); ++ alloc->resource = NULL; ++ ++ if (p >= res->elements) ++ return (unsigned long)(-ENOSPC); ++ bitmap_set(res->bitmap, p, n); ++ alloc->resource = res; ++ alloc->elements = n; ++ alloc->pos = p; ++ alloc->type = type; ++ ++ return p; ++} ++ ++static void ipu_resource_free(struct ipu_resource_alloc *alloc) ++{ ++ if (alloc->elements <= 0) ++ return; ++ ++ if (alloc->type == IPU_RESOURCE_DFM) ++ *alloc->resource->bitmap &= ~(unsigned long)(alloc->elements); ++ else ++ bitmap_clear(alloc->resource->bitmap, alloc->pos, ++ alloc->elements); ++ alloc->resource = NULL; ++} ++ ++static void ipu_resource_cleanup(struct ipu_resource *res) ++{ ++ bitmap_free(res->bitmap); ++ res->bitmap = NULL; ++} ++ ++/********** IPU PSYS-specific resource handling **********/ ++int ipu_psys_res_pool_init(struct ipu_psys_resource_pool *pool) ++{ ++ int i, j, k, ret; ++ const struct ipu_fw_resource_definitions *res_defs; ++ ++ res_defs = get_res(); ++ ++ spin_lock_init(&pool->queues_lock); ++ pool->cells = 0; ++ ++ for (i = 0; i < res_defs->num_dev_channels; i++) { ++ ret = ipu_resource_init(&pool->dev_channels[i], i, ++ res_defs->dev_channels[i]); ++ if (ret) ++ goto error; ++ } ++ ++ for (j = 0; j < res_defs->num_ext_mem_ids; j++) { ++ ret = ipu_resource_init(&pool->ext_memory[j], j, ++ res_defs->ext_mem_ids[j]); ++ if (ret) ++ goto memory_error; ++ } ++ ++ for (k = 0; k < res_defs->num_dfm_ids; k++) { ++ ret = ipu_resource_init(&pool->dfms[k], k, res_defs->dfms[k]); ++ if (ret) ++ goto dfm_error; ++ } ++ ++ spin_lock(&pool->queues_lock); ++ if (ipu_ver == IPU6_VER_6SE) ++ bitmap_zero(pool->cmd_queues, ++ IPU6SE_FW_PSYS_N_PSYS_CMD_QUEUE_ID); ++ else ++ bitmap_zero(pool->cmd_queues, ++ IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID); ++ spin_unlock(&pool->queues_lock); ++ ++ return 0; ++ ++dfm_error: ++ for (k--; k >= 0; k--) ++ ipu_resource_cleanup(&pool->dfms[k]); ++ ++memory_error: ++ for (j--; j >= 0; j--) ++ ipu_resource_cleanup(&pool->ext_memory[j]); ++ ++error: ++ for (i--; i >= 0; i--) ++ ipu_resource_cleanup(&pool->dev_channels[i]); ++ return ret; ++} ++ ++void ipu_psys_resource_copy(struct ipu_psys_resource_pool *src, ++ struct ipu_psys_resource_pool *dest) ++{ ++ int i; ++ const struct ipu_fw_resource_definitions *res_defs; ++ ++ res_defs = get_res(); ++ ++ dest->cells = src->cells; ++ for (i = 0; i < res_defs->num_dev_channels; i++) ++ *dest->dev_channels[i].bitmap = *src->dev_channels[i].bitmap; ++ ++ for (i = 0; i < res_defs->num_ext_mem_ids; i++) ++ *dest->ext_memory[i].bitmap = *src->ext_memory[i].bitmap; ++ ++ for (i = 0; i < res_defs->num_dfm_ids; i++) ++ *dest->dfms[i].bitmap = *src->dfms[i].bitmap; ++} ++ ++void ipu_psys_res_pool_cleanup(struct ipu_psys_resource_pool *pool) ++{ ++ u32 i; ++ const struct ipu_fw_resource_definitions *res_defs; ++ ++ res_defs = get_res(); ++ for (i = 0; i < res_defs->num_dev_channels; i++) ++ ipu_resource_cleanup(&pool->dev_channels[i]); ++ ++ for (i = 0; i < res_defs->num_ext_mem_ids; i++) ++ ipu_resource_cleanup(&pool->ext_memory[i]); ++ ++ for (i = 0; i < res_defs->num_dfm_ids; i++) ++ ipu_resource_cleanup(&pool->dfms[i]); ++} ++ ++static int __alloc_one_resrc(const struct device *dev, ++ struct ipu_fw_psys_process *process, ++ struct ipu_resource *resource, ++ struct ipu_fw_generic_program_manifest *pm, ++ u32 resource_id, ++ struct ipu_psys_resource_alloc *alloc) ++{ ++ const u16 resource_req = pm->dev_chn_size[resource_id]; ++ const u16 resource_offset_req = pm->dev_chn_offset[resource_id]; ++ unsigned long retl; ++ ++ if (!resource_req) ++ return -ENXIO; ++ ++ if (alloc->resources >= IPU_MAX_RESOURCES) { ++ dev_err(dev, "out of resource handles\n"); ++ return -ENOSPC; ++ } ++ if (resource_offset_req != (u16)(-1)) ++ retl = ipu_resource_alloc_with_pos ++ (resource, ++ resource_req, ++ resource_offset_req, ++ &alloc->resource_alloc[alloc->resources], ++ IPU_RESOURCE_DEV_CHN); ++ else ++ retl = ipu_resource_alloc ++ (resource, resource_req, ++ &alloc->resource_alloc[alloc->resources], ++ IPU_RESOURCE_DEV_CHN); ++ if (IS_ERR_VALUE(retl)) { ++ dev_dbg(dev, "out of device channel resources\n"); ++ return (int)retl; ++ } ++ alloc->resources++; ++ ++ return 0; ++} ++ ++static int ipu_psys_allocate_one_dfm(const struct device *dev, ++ struct ipu_fw_psys_process *process, ++ struct ipu_resource *resource, ++ struct ipu_fw_generic_program_manifest *pm, ++ u32 resource_id, ++ struct ipu_psys_resource_alloc *alloc) ++{ ++ u32 dfm_bitmap_req = pm->dfm_port_bitmap[resource_id]; ++ u32 active_dfm_bitmap_req = pm->dfm_active_port_bitmap[resource_id]; ++ const u8 is_relocatable = pm->is_dfm_relocatable[resource_id]; ++ struct ipu_resource_alloc *alloc_resource; ++ unsigned long p = 0; ++ ++ if (!dfm_bitmap_req) ++ return -ENXIO; ++ ++ if (alloc->resources >= IPU_MAX_RESOURCES) { ++ dev_err(dev, "out of resource handles\n"); ++ return -ENOSPC; ++ } ++ ++ if (!resource->bitmap) ++ return -ENOSPC; ++ ++ if (!is_relocatable) { ++ if (*resource->bitmap & dfm_bitmap_req) { ++ dev_warn(dev, ++ "out of dfm resources, req 0x%x, get 0x%lx\n", ++ dfm_bitmap_req, *resource->bitmap); ++ return -ENOSPC; ++ } ++ *resource->bitmap |= dfm_bitmap_req; ++ } else { ++ unsigned int n = hweight32(dfm_bitmap_req); ++ ++ p = bitmap_find_next_zero_area(resource->bitmap, ++ resource->elements, 0, n, 0); ++ ++ if (p >= resource->elements) ++ return -ENOSPC; ++ ++ bitmap_set(resource->bitmap, p, n); ++ dfm_bitmap_req = dfm_bitmap_req << p; ++ active_dfm_bitmap_req = active_dfm_bitmap_req << p; ++ } ++ ++ alloc_resource = &alloc->resource_alloc[alloc->resources]; ++ alloc_resource->resource = resource; ++ /* Using elements to indicate the bitmap */ ++ alloc_resource->elements = dfm_bitmap_req; ++ alloc_resource->pos = p; ++ alloc_resource->type = IPU_RESOURCE_DFM; ++ ++ alloc->resources++; ++ ++ return 0; ++} ++ ++/* ++ * ext_mem_type_id is a generic type id for memory (like DMEM, VMEM) ++ * ext_mem_bank_id is detailed type id for memory (like DMEM0, DMEM1 etc.) ++ */ ++static int __alloc_mem_resrc(const struct device *dev, ++ struct ipu_fw_psys_process *process, ++ struct ipu_resource *resource, ++ struct ipu_fw_generic_program_manifest *pm, ++ u32 ext_mem_type_id, u32 ext_mem_bank_id, ++ struct ipu_psys_resource_alloc *alloc) ++{ ++ const u16 memory_resource_req = pm->ext_mem_size[ext_mem_type_id]; ++ const u16 memory_offset_req = pm->ext_mem_offset[ext_mem_type_id]; ++ ++ unsigned long retl; ++ ++ if (!memory_resource_req) ++ return -ENXIO; ++ ++ if (alloc->resources >= IPU_MAX_RESOURCES) { ++ dev_err(dev, "out of resource handles\n"); ++ return -ENOSPC; ++ } ++ if (memory_offset_req != (u16)(-1)) ++ retl = ipu_resource_alloc_with_pos ++ (resource, ++ memory_resource_req, memory_offset_req, ++ &alloc->resource_alloc[alloc->resources], ++ IPU_RESOURCE_EXT_MEM); ++ else ++ retl = ipu_resource_alloc ++ (resource, memory_resource_req, ++ &alloc->resource_alloc[alloc->resources], ++ IPU_RESOURCE_EXT_MEM); ++ if (IS_ERR_VALUE(retl)) { ++ dev_dbg(dev, "out of memory resources\n"); ++ return (int)retl; ++ } ++ ++ alloc->resources++; ++ ++ return 0; ++} ++ ++int ipu_psys_allocate_cmd_queue_res(struct ipu_psys_resource_pool *pool) ++{ ++ unsigned long p; ++ int size, start; ++ ++ size = IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID; ++ start = IPU6_FW_PSYS_CMD_QUEUE_PPG0_COMMAND_ID; ++ ++ if (ipu_ver == IPU6_VER_6SE) { ++ size = IPU6SE_FW_PSYS_N_PSYS_CMD_QUEUE_ID; ++ start = IPU6SE_FW_PSYS_CMD_QUEUE_PPG0_COMMAND_ID; ++ } ++ ++ spin_lock(&pool->queues_lock); ++ /* find available cmd queue from ppg0_cmd_id */ ++ p = bitmap_find_next_zero_area(pool->cmd_queues, size, start, 1, 0); ++ ++ if (p >= size) { ++ spin_unlock(&pool->queues_lock); ++ return -ENOSPC; ++ } ++ ++ bitmap_set(pool->cmd_queues, p, 1); ++ spin_unlock(&pool->queues_lock); ++ ++ return p; ++} ++ ++void ipu_psys_free_cmd_queue_res(struct ipu_psys_resource_pool *pool, ++ u8 queue_id) ++{ ++ spin_lock(&pool->queues_lock); ++ bitmap_clear(pool->cmd_queues, queue_id, 1); ++ spin_unlock(&pool->queues_lock); ++} ++ ++int ipu_psys_try_allocate_resources(struct device *dev, ++ struct ipu_fw_psys_process_group *pg, ++ void *pg_manifest, ++ struct ipu_psys_resource_pool *pool) ++{ ++ u32 id, idx; ++ u32 mem_type_id; ++ int ret, i; ++ u16 *process_offset_table; ++ u8 processes; ++ u32 cells = 0; ++ struct ipu_psys_resource_alloc *alloc; ++ const struct ipu_fw_resource_definitions *res_defs; ++ ++ if (!pg) ++ return -EINVAL; ++ process_offset_table = (u16 *)((u8 *)pg + pg->processes_offset); ++ processes = pg->process_count; ++ ++ alloc = kzalloc(sizeof(*alloc), GFP_KERNEL); ++ if (!alloc) ++ return -ENOMEM; ++ ++ res_defs = get_res(); ++ for (i = 0; i < processes; i++) { ++ u32 cell; ++ struct ipu_fw_psys_process *process = ++ (struct ipu_fw_psys_process *) ++ ((char *)pg + process_offset_table[i]); ++ struct ipu_fw_generic_program_manifest pm; ++ ++ memset(&pm, 0, sizeof(pm)); ++ ++ if (!process) { ++ dev_err(dev, "can not get process\n"); ++ ret = -ENOENT; ++ goto free_out; ++ } ++ ++ ret = ipu_fw_psys_get_pgm_by_process(&pm, pg_manifest, process); ++ if (ret < 0) { ++ dev_err(dev, "can not get manifest\n"); ++ goto free_out; ++ } ++ ++ if (pm.cell_id == res_defs->num_cells && ++ pm.cell_type_id == res_defs->num_cells_type) { ++ cell = res_defs->num_cells; ++ } else if ((pm.cell_id != res_defs->num_cells && ++ pm.cell_type_id == res_defs->num_cells_type)) { ++ cell = pm.cell_id; ++ } else { ++ /* Find a free cell of desired type */ ++ u32 type = pm.cell_type_id; ++ ++ for (cell = 0; cell < res_defs->num_cells; cell++) ++ if (res_defs->cells[cell] == type && ++ ((pool->cells | cells) & (1 << cell)) == 0) ++ break; ++ if (cell >= res_defs->num_cells) { ++ dev_dbg(dev, "no free cells of right type\n"); ++ ret = -ENOSPC; ++ goto free_out; ++ } ++ } ++ if (cell < res_defs->num_cells) ++ cells |= 1 << cell; ++ if (pool->cells & cells) { ++ dev_dbg(dev, "out of cell resources\n"); ++ ret = -ENOSPC; ++ goto free_out; ++ } ++ ++ if (pm.dev_chn_size) { ++ for (id = 0; id < res_defs->num_dev_channels; id++) { ++ ret = __alloc_one_resrc(dev, process, ++ &pool->dev_channels[id], ++ &pm, id, alloc); ++ if (ret == -ENXIO) ++ continue; ++ ++ if (ret) ++ goto free_out; ++ } ++ } ++ ++ if (pm.dfm_port_bitmap) { ++ for (id = 0; id < res_defs->num_dfm_ids; id++) { ++ ret = ipu_psys_allocate_one_dfm ++ (dev, process, ++ &pool->dfms[id], &pm, id, alloc); ++ if (ret == -ENXIO) ++ continue; ++ ++ if (ret) ++ goto free_out; ++ } ++ } ++ ++ if (pm.ext_mem_size) { ++ for (mem_type_id = 0; ++ mem_type_id < res_defs->num_ext_mem_types; ++ mem_type_id++) { ++ u32 bank = res_defs->num_ext_mem_ids; ++ ++ if (cell != res_defs->num_cells) { ++ idx = res_defs->cell_mem_row * cell + ++ mem_type_id; ++ bank = res_defs->cell_mem[idx]; ++ } ++ ++ if (bank == res_defs->num_ext_mem_ids) ++ continue; ++ ++ ret = __alloc_mem_resrc(dev, process, ++ &pool->ext_memory[bank], ++ &pm, mem_type_id, bank, ++ alloc); ++ if (ret == -ENXIO) ++ continue; ++ ++ if (ret) ++ goto free_out; ++ } ++ } ++ } ++ ++ pool->cells |= cells; ++ ++ kfree(alloc); ++ return 0; ++ ++free_out: ++ dev_dbg(dev, "failed to try_allocate resource\n"); ++ kfree(alloc); ++ return ret; ++} ++ ++/* ++ * Allocate resources for pg from `pool'. Mark the allocated ++ * resources into `alloc'. Returns 0 on success, -ENOSPC ++ * if there are no enough resources, in which cases resources ++ * are not allocated at all, or some other error on other conditions. ++ */ ++int ipu_psys_allocate_resources(const struct device *dev, ++ struct ipu_fw_psys_process_group *pg, ++ void *pg_manifest, ++ struct ipu_psys_resource_alloc *alloc, ++ struct ipu_psys_resource_pool *pool) ++{ ++ u32 id; ++ u32 mem_type_id; ++ int ret, i; ++ u16 *process_offset_table; ++ u8 processes; ++ u32 cells = 0; ++ int p, idx; ++ u32 bmp, a_bmp; ++ const struct ipu_fw_resource_definitions *res_defs; ++ ++ if (!pg) ++ return -EINVAL; ++ ++ res_defs = get_res(); ++ process_offset_table = (u16 *)((u8 *)pg + pg->processes_offset); ++ processes = pg->process_count; ++ ++ for (i = 0; i < processes; i++) { ++ u32 cell; ++ struct ipu_fw_psys_process *process = ++ (struct ipu_fw_psys_process *) ++ ((char *)pg + process_offset_table[i]); ++ struct ipu_fw_generic_program_manifest pm; ++ ++ memset(&pm, 0, sizeof(pm)); ++ if (!process) { ++ dev_err(dev, "can not get process\n"); ++ ret = -ENOENT; ++ goto free_out; ++ } ++ ++ ret = ipu_fw_psys_get_pgm_by_process(&pm, pg_manifest, process); ++ if (ret < 0) { ++ dev_err(dev, "can not get manifest\n"); ++ goto free_out; ++ } ++ ++ if (pm.cell_id == res_defs->num_cells && ++ pm.cell_type_id == res_defs->num_cells_type) { ++ cell = res_defs->num_cells; ++ } else if ((pm.cell_id != res_defs->num_cells && ++ pm.cell_type_id == res_defs->num_cells_type)) { ++ cell = pm.cell_id; ++ } else { ++ /* Find a free cell of desired type */ ++ u32 type = pm.cell_type_id; ++ ++ for (cell = 0; cell < res_defs->num_cells; cell++) ++ if (res_defs->cells[cell] == type && ++ ((pool->cells | cells) & (1 << cell)) == 0) ++ break; ++ if (cell >= res_defs->num_cells) { ++ dev_dbg(dev, "no free cells of right type\n"); ++ ret = -ENOSPC; ++ goto free_out; ++ } ++ ret = ipu_fw_psys_set_process_cell_id(process, 0, cell); ++ if (ret) ++ goto free_out; ++ } ++ if (cell < res_defs->num_cells) ++ cells |= 1 << cell; ++ if (pool->cells & cells) { ++ dev_dbg(dev, "out of cell resources\n"); ++ ret = -ENOSPC; ++ goto free_out; ++ } ++ ++ if (pm.dev_chn_size) { ++ for (id = 0; id < res_defs->num_dev_channels; id++) { ++ ret = __alloc_one_resrc(dev, process, ++ &pool->dev_channels[id], ++ &pm, id, alloc); ++ if (ret == -ENXIO) ++ continue; ++ ++ if (ret) ++ goto free_out; ++ ++ idx = alloc->resources - 1; ++ p = alloc->resource_alloc[idx].pos; ++ ret = ipu_fw_psys_set_proc_dev_chn(process, id, ++ p); ++ if (ret) ++ goto free_out; ++ } ++ } ++ ++ if (pm.dfm_port_bitmap) { ++ for (id = 0; id < res_defs->num_dfm_ids; id++) { ++ ret = ipu_psys_allocate_one_dfm(dev, process, ++ &pool->dfms[id], ++ &pm, id, alloc); ++ if (ret == -ENXIO) ++ continue; ++ ++ if (ret) ++ goto free_out; ++ ++ idx = alloc->resources - 1; ++ p = alloc->resource_alloc[idx].pos; ++ bmp = pm.dfm_port_bitmap[id]; ++ bmp = bmp << p; ++ a_bmp = pm.dfm_active_port_bitmap[id]; ++ a_bmp = a_bmp << p; ++ ret = ipu_fw_psys_set_proc_dfm_bitmap(process, ++ id, bmp, ++ a_bmp); ++ if (ret) ++ goto free_out; ++ } ++ } ++ ++ if (pm.ext_mem_size) { ++ for (mem_type_id = 0; ++ mem_type_id < res_defs->num_ext_mem_types; ++ mem_type_id++) { ++ u32 bank = res_defs->num_ext_mem_ids; ++ ++ if (cell != res_defs->num_cells) { ++ idx = res_defs->cell_mem_row * cell + ++ mem_type_id; ++ bank = res_defs->cell_mem[idx]; ++ } ++ if (bank == res_defs->num_ext_mem_ids) ++ continue; ++ ++ ret = __alloc_mem_resrc(dev, process, ++ &pool->ext_memory[bank], ++ &pm, mem_type_id, ++ bank, alloc); ++ if (ret == -ENXIO) ++ continue; ++ ++ if (ret) ++ goto free_out; ++ ++ /* no return value check here because fw api ++ * will do some checks, and would return ++ * non-zero except mem_type_id == 0. ++ * This maybe caused by that above flow of ++ * allocating mem_bank_id is improper. ++ */ ++ idx = alloc->resources - 1; ++ p = alloc->resource_alloc[idx].pos; ++ ipu_fw_psys_set_process_ext_mem(process, ++ mem_type_id, ++ bank, p); ++ } ++ } ++ } ++ alloc->cells |= cells; ++ pool->cells |= cells; ++ return 0; ++ ++free_out: ++ dev_err(dev, "failed to allocate resources, ret %d\n", ret); ++ ipu_psys_reset_process_cell(dev, pg, pg_manifest, i + 1); ++ ipu_psys_free_resources(alloc, pool); ++ return ret; ++} ++ ++int ipu_psys_move_resources(const struct device *dev, ++ struct ipu_psys_resource_alloc *alloc, ++ struct ipu_psys_resource_pool ++ *source_pool, struct ipu_psys_resource_pool ++ *target_pool) ++{ ++ int i; ++ ++ if (target_pool->cells & alloc->cells) { ++ dev_dbg(dev, "out of cell resources\n"); ++ return -ENOSPC; ++ } ++ ++ for (i = 0; i < alloc->resources; i++) { ++ unsigned long bitmap = 0; ++ unsigned int id = alloc->resource_alloc[i].resource->id; ++ unsigned long fbit, end; ++ ++ switch (alloc->resource_alloc[i].type) { ++ case IPU_RESOURCE_DEV_CHN: ++ bitmap_set(&bitmap, alloc->resource_alloc[i].pos, ++ alloc->resource_alloc[i].elements); ++ if (*target_pool->dev_channels[id].bitmap & bitmap) ++ return -ENOSPC; ++ break; ++ case IPU_RESOURCE_EXT_MEM: ++ end = alloc->resource_alloc[i].elements + ++ alloc->resource_alloc[i].pos; ++ ++ fbit = find_next_bit(target_pool->ext_memory[id].bitmap, ++ end, alloc->resource_alloc[i].pos); ++ /* if find_next_bit returns "end" it didn't find 1bit */ ++ if (end != fbit) ++ return -ENOSPC; ++ break; ++ case IPU_RESOURCE_DFM: ++ bitmap = alloc->resource_alloc[i].elements; ++ if (*target_pool->dfms[id].bitmap & bitmap) ++ return -ENOSPC; ++ break; ++ default: ++ dev_err(dev, "Illegal resource type\n"); ++ return -EINVAL; ++ } ++ } ++ ++ for (i = 0; i < alloc->resources; i++) { ++ u32 id = alloc->resource_alloc[i].resource->id; ++ ++ switch (alloc->resource_alloc[i].type) { ++ case IPU_RESOURCE_DEV_CHN: ++ bitmap_set(target_pool->dev_channels[id].bitmap, ++ alloc->resource_alloc[i].pos, ++ alloc->resource_alloc[i].elements); ++ ipu_resource_free(&alloc->resource_alloc[i]); ++ alloc->resource_alloc[i].resource = ++ &target_pool->dev_channels[id]; ++ break; ++ case IPU_RESOURCE_EXT_MEM: ++ bitmap_set(target_pool->ext_memory[id].bitmap, ++ alloc->resource_alloc[i].pos, ++ alloc->resource_alloc[i].elements); ++ ipu_resource_free(&alloc->resource_alloc[i]); ++ alloc->resource_alloc[i].resource = ++ &target_pool->ext_memory[id]; ++ break; ++ case IPU_RESOURCE_DFM: ++ *target_pool->dfms[id].bitmap |= ++ alloc->resource_alloc[i].elements; ++ *alloc->resource_alloc[i].resource->bitmap &= ++ ~(alloc->resource_alloc[i].elements); ++ alloc->resource_alloc[i].resource = ++ &target_pool->dfms[id]; ++ break; ++ default: ++ /* ++ * Just keep compiler happy. This case failed already ++ * in above loop. ++ */ ++ break; ++ } ++ } ++ ++ target_pool->cells |= alloc->cells; ++ source_pool->cells &= ~alloc->cells; ++ ++ return 0; ++} ++ ++void ipu_psys_reset_process_cell(const struct device *dev, ++ struct ipu_fw_psys_process_group *pg, ++ void *pg_manifest, ++ int process_count) ++{ ++ int i; ++ u16 *process_offset_table; ++ const struct ipu_fw_resource_definitions *res_defs; ++ ++ if (!pg) ++ return; ++ ++ res_defs = get_res(); ++ process_offset_table = (u16 *)((u8 *)pg + pg->processes_offset); ++ for (i = 0; i < process_count; i++) { ++ struct ipu_fw_psys_process *process = ++ (struct ipu_fw_psys_process *) ++ ((char *)pg + process_offset_table[i]); ++ struct ipu_fw_generic_program_manifest pm; ++ int ret; ++ ++ if (!process) ++ break; ++ ++ ret = ipu_fw_psys_get_pgm_by_process(&pm, pg_manifest, process); ++ if (ret < 0) { ++ dev_err(dev, "can not get manifest\n"); ++ break; ++ } ++ if ((pm.cell_id != res_defs->num_cells && ++ pm.cell_type_id == res_defs->num_cells_type)) ++ continue; ++ /* no return value check here because if finding free cell ++ * failed, process cell would not set then calling clear_cell ++ * will return non-zero. ++ */ ++ ipu_fw_psys_clear_process_cell(process); ++ } ++} ++ ++/* Free resources marked in `alloc' from `resources' */ ++void ipu_psys_free_resources(struct ipu_psys_resource_alloc ++ *alloc, struct ipu_psys_resource_pool *pool) ++{ ++ unsigned int i; ++ ++ pool->cells &= ~alloc->cells; ++ alloc->cells = 0; ++ for (i = 0; i < alloc->resources; i++) ++ ipu_resource_free(&alloc->resource_alloc[i]); ++ alloc->resources = 0; ++} +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-fw-resources.c b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-fw-resources.c +new file mode 100644 +index 0000000..fe65a0a +--- /dev/null ++++ b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-fw-resources.c +@@ -0,0 +1,607 @@ ++// SPDX-License-Identifier: GPL-2.0 ++// Copyright (C) 2015 - 2024 Intel Corporation ++ ++#include ++#include ++ ++#include "ipu-psys.h" ++#include "ipu-fw-psys.h" ++#include "ipu6-platform-resources.h" ++ ++/* resources table */ ++ ++/* ++ * Cell types by cell IDs ++ */ ++static const u8 ipu6_fw_psys_cell_types[IPU6_FW_PSYS_N_CELL_ID] = { ++ IPU6_FW_PSYS_SP_CTRL_TYPE_ID, ++ IPU6_FW_PSYS_VP_TYPE_ID, ++ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_OSA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_OSA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_OSA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, /* X2B_MD */ ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, /* ICA_MEDIUM */ ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, /* X2B_SVE_RGBIR */ ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, /* PAF */ ++ IPU6_FW_PSYS_GDC_TYPE_ID, ++ IPU6_FW_PSYS_TNR_TYPE_ID, ++}; ++ ++static const u16 ipu6_fw_num_dev_channels[IPU6_FW_PSYS_N_DEV_CHN_ID] = { ++ IPU6_FW_PSYS_DEV_CHN_DMA_EXT0_MAX_SIZE, ++ IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_READ_MAX_SIZE, ++ IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_MAX_SIZE, ++ IPU6_FW_PSYS_DEV_CHN_DMA_INTERNAL_MAX_SIZE, ++ IPU6_FW_PSYS_DEV_CHN_DMA_ISA_MAX_SIZE, ++}; ++ ++static const u16 ipu6_fw_psys_mem_size[IPU6_FW_PSYS_N_MEM_ID] = { ++ IPU6_FW_PSYS_VMEM0_MAX_SIZE, ++ IPU6_FW_PSYS_TRANSFER_VMEM0_MAX_SIZE, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_MAX_SIZE, ++ IPU6_FW_PSYS_LB_VMEM_MAX_SIZE, ++ IPU6_FW_PSYS_BAMEM0_MAX_SIZE, ++ IPU6_FW_PSYS_DMEM0_MAX_SIZE, ++ IPU6_FW_PSYS_DMEM1_MAX_SIZE, ++ IPU6_FW_PSYS_DMEM2_MAX_SIZE, ++ IPU6_FW_PSYS_DMEM3_MAX_SIZE, ++ IPU6_FW_PSYS_PMEM0_MAX_SIZE ++}; ++ ++static const u16 ipu6_fw_psys_dfms[IPU6_FW_PSYS_N_DEV_DFM_ID] = { ++ IPU6_FW_PSYS_DEV_DFM_BB_FULL_PORT_ID_MAX_SIZE, ++ IPU6_FW_PSYS_DEV_DFM_BB_EMPTY_PORT_ID_MAX_SIZE, ++ IPU6_FW_PSYS_DEV_DFM_ISL_FULL_PORT_ID_MAX_SIZE, ++ IPU6_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID_MAX_SIZE, ++ IPU6_FW_PSYS_DEV_DFM_LB_FULL_PORT_ID_MAX_SIZE, ++ IPU6_FW_PSYS_DEV_DFM_LB_EMPTY_PORT_ID_MAX_SIZE, ++}; ++ ++static const u8 ++ipu6_fw_psys_c_mem[IPU6_FW_PSYS_N_CELL_ID][IPU6_FW_PSYS_N_MEM_TYPE_ID] = { ++ { ++ /* IPU6_FW_PSYS_SP0_ID */ ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_DMEM0_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_SP1_ID */ ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_DMEM1_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_VP0_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_DMEM3_ID, ++ IPU6_FW_PSYS_VMEM0_ID, ++ IPU6_FW_PSYS_BAMEM0_ID, ++ IPU6_FW_PSYS_PMEM0_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ACC1_ID BNLM */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ACC2_ID DM */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ACC3_ID ACM */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ACC4_ID GTC YUV1 */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ACC5_ID OFS pin main */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ACC6_ID OFS pin display */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ACC7_ID OFS pin pp */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ACC8_ID GAMMASTAR */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ACC9_ID GLTM */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ACC10_ID XNR */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_ICA_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_LSC_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_DPC_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_SIS_A_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_SIS_B_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_B2B_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_B2R_ID and ISA_R2I_SIE */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_R2I_DS_A_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_R2I_DS_B_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_AWB_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_AE_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_AF_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_DOL_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_X2B_MD_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_ICA_MEDIUM_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_X2B_SVE_RGBIR_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_PAF_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_BB_ACC_GDC0_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_BB_ACC_TNR_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ } ++}; ++ ++static const struct ipu_fw_resource_definitions ipu6_defs = { ++ .cells = ipu6_fw_psys_cell_types, ++ .num_cells = IPU6_FW_PSYS_N_CELL_ID, ++ .num_cells_type = IPU6_FW_PSYS_N_CELL_TYPE_ID, ++ ++ .dev_channels = ipu6_fw_num_dev_channels, ++ .num_dev_channels = IPU6_FW_PSYS_N_DEV_CHN_ID, ++ ++ .num_ext_mem_types = IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID, ++ .num_ext_mem_ids = IPU6_FW_PSYS_N_MEM_ID, ++ .ext_mem_ids = ipu6_fw_psys_mem_size, ++ ++ .num_dfm_ids = IPU6_FW_PSYS_N_DEV_DFM_ID, ++ ++ .dfms = ipu6_fw_psys_dfms, ++ ++ .cell_mem_row = IPU6_FW_PSYS_N_MEM_TYPE_ID, ++ .cell_mem = &ipu6_fw_psys_c_mem[0][0], ++}; ++ ++const struct ipu_fw_resource_definitions *ipu6_res_defs = &ipu6_defs; ++ ++/********** Generic resource handling **********/ ++ ++int ipu6_fw_psys_set_proc_dev_chn(struct ipu_fw_psys_process *ptr, u16 offset, ++ u16 value) ++{ ++ struct ipu6_fw_psys_process_ext *pm_ext; ++ u8 ps_ext_offset; ++ ++ ps_ext_offset = ptr->process_extension_offset; ++ if (!ps_ext_offset) ++ return -EINVAL; ++ ++ pm_ext = (struct ipu6_fw_psys_process_ext *)((u8 *)ptr + ps_ext_offset); ++ ++ pm_ext->dev_chn_offset[offset] = value; ++ ++ return 0; ++} ++ ++int ipu6_fw_psys_set_proc_dfm_bitmap(struct ipu_fw_psys_process *ptr, ++ u16 id, u32 bitmap, ++ u32 active_bitmap) ++{ ++ struct ipu6_fw_psys_process_ext *pm_ext; ++ u8 ps_ext_offset; ++ ++ ps_ext_offset = ptr->process_extension_offset; ++ if (!ps_ext_offset) ++ return -EINVAL; ++ ++ pm_ext = (struct ipu6_fw_psys_process_ext *)((u8 *)ptr + ps_ext_offset); ++ ++ pm_ext->dfm_port_bitmap[id] = bitmap; ++ pm_ext->dfm_active_port_bitmap[id] = active_bitmap; ++ ++ return 0; ++} ++ ++int ipu6_fw_psys_set_process_ext_mem(struct ipu_fw_psys_process *ptr, ++ u16 type_id, u16 mem_id, u16 offset) ++{ ++ struct ipu6_fw_psys_process_ext *pm_ext; ++ u8 ps_ext_offset; ++ ++ ps_ext_offset = ptr->process_extension_offset; ++ if (!ps_ext_offset) ++ return -EINVAL; ++ ++ pm_ext = (struct ipu6_fw_psys_process_ext *)((u8 *)ptr + ps_ext_offset); ++ ++ pm_ext->ext_mem_offset[type_id] = offset; ++ pm_ext->ext_mem_id[type_id] = mem_id; ++ ++ return 0; ++} ++ ++static struct ipu_fw_psys_program_manifest * ++get_program_manifest(const struct ipu_fw_psys_pgm *manifest, ++ const unsigned int program_index) ++{ ++ struct ipu_fw_psys_program_manifest *prg_manifest_base; ++ u8 *program_manifest = NULL; ++ u8 program_count; ++ unsigned int i; ++ ++ program_count = manifest->program_count; ++ ++ prg_manifest_base = (struct ipu_fw_psys_program_manifest *) ++ ((char *)manifest + manifest->program_manifest_offset); ++ if (program_index < program_count) { ++ program_manifest = (u8 *)prg_manifest_base; ++ for (i = 0; i < program_index; i++) ++ program_manifest += ++ ((struct ipu_fw_psys_program_manifest *) ++ program_manifest)->size; ++ } ++ ++ return (struct ipu_fw_psys_program_manifest *)program_manifest; ++} ++ ++int ++ipu6_fw_psys_get_pgm_by_process(struct ipu_fw_generic_program_manifest *gen_pm, ++ const struct ipu_fw_psys_pgm *pg_manifest, ++ struct ipu_fw_psys_process *process) ++{ ++ u32 program_id = process->program_idx; ++ struct ipu_fw_psys_program_manifest *pm; ++ struct ipu6_fw_psys_program_manifest_ext *pm_ext; ++ ++ pm = get_program_manifest(pg_manifest, program_id); ++ ++ if (!pm) ++ return -ENOENT; ++ ++ if (pm->program_extension_offset) { ++ pm_ext = (struct ipu6_fw_psys_program_manifest_ext *) ++ ((u8 *)pm + pm->program_extension_offset); ++ ++ gen_pm->dev_chn_size = pm_ext->dev_chn_size; ++ gen_pm->dev_chn_offset = pm_ext->dev_chn_offset; ++ gen_pm->ext_mem_size = pm_ext->ext_mem_size; ++ gen_pm->ext_mem_offset = (u16 *)pm_ext->ext_mem_offset; ++ gen_pm->is_dfm_relocatable = pm_ext->is_dfm_relocatable; ++ gen_pm->dfm_port_bitmap = pm_ext->dfm_port_bitmap; ++ gen_pm->dfm_active_port_bitmap = ++ pm_ext->dfm_active_port_bitmap; ++ } ++ ++ memcpy(gen_pm->cells, pm->cells, sizeof(pm->cells)); ++ gen_pm->cell_id = pm->cells[0]; ++ gen_pm->cell_type_id = pm->cell_type_id; ++ ++ return 0; ++} ++ ++#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) || \ ++ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) ++void ipu6_fw_psys_pg_dump(struct ipu_psys *psys, ++ struct ipu_psys_kcmd *kcmd, const char *note) ++{ ++ struct ipu_fw_psys_process_group *pg = kcmd->kpg->pg; ++ struct device *dev = &psys->adev->auxdev.dev; ++ u32 pgid = pg->ID; ++ u8 processes = pg->process_count; ++ u16 *process_offset_table = (u16 *)((char *)pg + pg->processes_offset); ++ unsigned int p, chn, mem, mem_id; ++ unsigned int mem_type, max_mem_id, dev_chn; ++ ++ if (ipu_ver == IPU6_VER_6SE) { ++ mem_type = IPU6SE_FW_PSYS_N_DATA_MEM_TYPE_ID; ++ max_mem_id = IPU6SE_FW_PSYS_N_MEM_ID; ++ dev_chn = IPU6SE_FW_PSYS_N_DEV_CHN_ID; ++ } else if (ipu_ver == IPU6_VER_6 || ipu_ver == IPU6_VER_6EP || ++ ipu_ver == IPU6_VER_6EP_MTL) { ++ mem_type = IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID; ++ max_mem_id = IPU6_FW_PSYS_N_MEM_ID; ++ dev_chn = IPU6_FW_PSYS_N_DEV_CHN_ID; ++ } else { ++ WARN(1, "%s ipu_ver:[%u] is unsupported!\n", __func__, ipu_ver); ++ return; ++ } ++ ++ dev_dbg(dev, "%s %s pgid %i has %i processes:\n", ++ __func__, note, pgid, processes); ++ ++ for (p = 0; p < processes; p++) { ++ struct ipu_fw_psys_process *process = ++ (struct ipu_fw_psys_process *) ++ ((char *)pg + process_offset_table[p]); ++ struct ipu6_fw_psys_process_ext *pm_ext = ++ (struct ipu6_fw_psys_process_ext *)((u8 *)process ++ + process->process_extension_offset); ++ dev_dbg(dev, "\t process %i size=%u", p, process->size); ++ if (!process->process_extension_offset) ++ continue; ++ ++ for (mem = 0; mem < mem_type; mem++) { ++ mem_id = pm_ext->ext_mem_id[mem]; ++ if (mem_id != max_mem_id) ++ dev_dbg(dev, "\t mem type %u id %d offset=0x%x", ++ mem, mem_id, ++ pm_ext->ext_mem_offset[mem]); ++ } ++ for (chn = 0; chn < dev_chn; chn++) { ++ if (pm_ext->dev_chn_offset[chn] != (u16)(-1)) ++ dev_dbg(dev, "\t dev_chn[%u]=0x%x\n", ++ chn, pm_ext->dev_chn_offset[chn]); ++ } ++ } ++} ++#else ++void ipu6_fw_psys_pg_dump(struct ipu_psys *psys, ++ struct ipu_psys_kcmd *kcmd, const char *note) ++{ ++ if (ipu_ver == IPU6_VER_6SE || ipu_ver == IPU6_VER_6 || ++ ipu_ver == IPU6_VER_6EP || ipu_ver == IPU6_VER_6EP_MTL) ++ return; ++ ++ WARN(1, "%s ipu_ver:[%u] is unsupported!\n", __func__, ipu_ver); ++} ++#endif +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-l-scheduler.c b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-l-scheduler.c +new file mode 100644 +index 0000000..5d95843 +--- /dev/null ++++ b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-l-scheduler.c +@@ -0,0 +1,621 @@ ++// SPDX-License-Identifier: GPL-2.0 ++// Copyright (C) 2020 - 2024 Intel Corporation ++ ++#include "ipu-psys.h" ++#include "ipu6-ppg.h" ++ ++extern bool enable_power_gating; ++ ++struct sched_list { ++ struct list_head list; ++ /* to protect the list */ ++ struct mutex lock; ++}; ++ ++static struct sched_list start_list = { ++ .list = LIST_HEAD_INIT(start_list.list), ++ .lock = __MUTEX_INITIALIZER(start_list.lock), ++}; ++ ++static struct sched_list stop_list = { ++ .list = LIST_HEAD_INIT(stop_list.list), ++ .lock = __MUTEX_INITIALIZER(stop_list.lock), ++}; ++ ++static struct sched_list *get_sc_list(enum SCHED_LIST type) ++{ ++ /* for debug purposes */ ++ WARN_ON(type != SCHED_START_LIST && type != SCHED_STOP_LIST); ++ ++ if (type == SCHED_START_LIST) ++ return &start_list; ++ return &stop_list; ++} ++ ++static bool is_kppg_in_list(struct ipu_psys_ppg *kppg, struct list_head *head) ++{ ++ struct ipu_psys_ppg *tmp; ++ ++ list_for_each_entry(tmp, head, sched_list) { ++ if (kppg == tmp) ++ return true; ++ } ++ ++ return false; ++} ++ ++void ipu_psys_scheduler_remove_kppg(struct ipu_psys_ppg *kppg, ++ enum SCHED_LIST type) ++{ ++ struct sched_list *sc_list = get_sc_list(type); ++ struct ipu_psys_ppg *tmp0, *tmp1; ++ struct ipu_psys *psys = kppg->fh->psys; ++ struct device *dev = &psys->adev->auxdev.dev; ++ ++ mutex_lock(&sc_list->lock); ++ list_for_each_entry_safe(tmp0, tmp1, &sc_list->list, sched_list) { ++ if (tmp0 == kppg) { ++ dev_dbg(dev, "remove from %s list, kppg(%d 0x%p) state %d\n", ++ type == SCHED_START_LIST ? "start" : "stop", ++ kppg->kpg->pg->ID, kppg, kppg->state); ++ list_del_init(&kppg->sched_list); ++ } ++ } ++ mutex_unlock(&sc_list->lock); ++} ++ ++void ipu_psys_scheduler_add_kppg(struct ipu_psys_ppg *kppg, ++ enum SCHED_LIST type) ++{ ++ int cur_pri = kppg->pri_base + kppg->pri_dynamic; ++ struct sched_list *sc_list = get_sc_list(type); ++ struct ipu_psys *psys = kppg->fh->psys; ++ struct device *dev = &psys->adev->auxdev.dev; ++ struct ipu_psys_ppg *tmp0, *tmp1; ++ ++ dev_dbg(dev, ++ "add to %s list, kppg(%d 0x%p) state %d prio(%d %d) fh 0x%p\n", ++ type == SCHED_START_LIST ? "start" : "stop", ++ kppg->kpg->pg->ID, kppg, kppg->state, ++ kppg->pri_base, kppg->pri_dynamic, kppg->fh); ++ ++ mutex_lock(&sc_list->lock); ++ if (list_empty(&sc_list->list)) { ++ list_add(&kppg->sched_list, &sc_list->list); ++ goto out; ++ } ++ ++ if (is_kppg_in_list(kppg, &sc_list->list)) { ++ dev_dbg(dev, "kppg already in list\n"); ++ goto out; ++ } ++ ++ list_for_each_entry_safe(tmp0, tmp1, &sc_list->list, sched_list) { ++ int tmp_pri = tmp0->pri_base + tmp0->pri_dynamic; ++ ++ dev_dbg(dev, ++ "found kppg(%d 0x%p), state %d pri(%d %d) fh 0x%p\n", ++ tmp0->kpg->pg->ID, tmp0, tmp0->state, ++ tmp0->pri_base, tmp0->pri_dynamic, tmp0->fh); ++ ++ if (type == SCHED_START_LIST && tmp_pri > cur_pri) { ++ list_add(&kppg->sched_list, tmp0->sched_list.prev); ++ goto out; ++ } else if (type == SCHED_STOP_LIST && tmp_pri < cur_pri) { ++ list_add(&kppg->sched_list, tmp0->sched_list.prev); ++ goto out; ++ } ++ } ++ ++ list_add_tail(&kppg->sched_list, &sc_list->list); ++out: ++ mutex_unlock(&sc_list->lock); ++} ++ ++static int ipu_psys_detect_resource_contention(struct ipu_psys_ppg *kppg) ++{ ++ struct ipu_psys_resource_pool *try_res_pool; ++ struct ipu_psys *psys = kppg->fh->psys; ++ struct device *dev = &psys->adev->auxdev.dev; ++ int ret = 0; ++ int state; ++ ++ try_res_pool = kzalloc(sizeof(*try_res_pool), GFP_KERNEL); ++ if (IS_ERR_OR_NULL(try_res_pool)) ++ return -ENOMEM; ++ ++ mutex_lock(&kppg->mutex); ++ state = kppg->state; ++ mutex_unlock(&kppg->mutex); ++ if (state == PPG_STATE_STARTED || state == PPG_STATE_RUNNING || ++ state == PPG_STATE_RESUMED) ++ goto exit; ++ ++ ret = ipu_psys_res_pool_init(try_res_pool); ++ if (ret < 0) { ++ dev_err(dev, "unable to alloc pg resources\n"); ++ WARN_ON(1); ++ goto exit; ++ } ++ ++ ipu_psys_resource_copy(&psys->res_pool_running, try_res_pool); ++ ret = ipu_psys_try_allocate_resources(dev, kppg->kpg->pg, ++ kppg->manifest, ++ try_res_pool); ++ ++ ipu_psys_res_pool_cleanup(try_res_pool); ++exit: ++ kfree(try_res_pool); ++ ++ return ret; ++} ++ ++static void ipu_psys_scheduler_ppg_sort(struct ipu_psys *psys, bool *stopping) ++{ ++ struct ipu_psys_ppg *kppg, *tmp; ++ struct ipu_psys_scheduler *sched; ++ struct ipu_psys_fh *fh; ++ ++ list_for_each_entry(fh, &psys->fhs, list) { ++ mutex_lock(&fh->mutex); ++ sched = &fh->sched; ++ ++ if (list_empty(&sched->ppgs)) { ++ mutex_unlock(&fh->mutex); ++ continue; ++ } ++ ++ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { ++ mutex_lock(&kppg->mutex); ++ if (kppg->state == PPG_STATE_START || ++ kppg->state == PPG_STATE_RESUME) { ++ ipu_psys_scheduler_add_kppg(kppg, ++ SCHED_START_LIST); ++ } else if (kppg->state == PPG_STATE_RUNNING) { ++ ipu_psys_scheduler_add_kppg(kppg, ++ SCHED_STOP_LIST); ++ } else if (kppg->state == PPG_STATE_SUSPENDING || ++ kppg->state == PPG_STATE_STOPPING) { ++ /* there are some suspending/stopping ppgs */ ++ *stopping = true; ++ } else if (kppg->state == PPG_STATE_RESUMING || ++ kppg->state == PPG_STATE_STARTING) { ++ /* how about kppg are resuming/starting? */ ++ } ++ mutex_unlock(&kppg->mutex); ++ } ++ mutex_unlock(&fh->mutex); ++ } ++} ++ ++static void ipu_psys_scheduler_update_start_ppg_priority(void) ++{ ++ struct sched_list *sc_list = get_sc_list(SCHED_START_LIST); ++ struct ipu_psys_ppg *kppg, *tmp; ++ ++ mutex_lock(&sc_list->lock); ++ if (!list_empty(&sc_list->list)) ++ list_for_each_entry_safe(kppg, tmp, &sc_list->list, sched_list) ++ kppg->pri_dynamic--; ++ mutex_unlock(&sc_list->lock); ++} ++ ++static bool ipu_psys_scheduler_switch_ppg(struct ipu_psys *psys) ++{ ++ struct sched_list *sc_list = get_sc_list(SCHED_STOP_LIST); ++ struct device *dev = &psys->adev->auxdev.dev; ++ struct ipu_psys_ppg *kppg; ++ bool resched = false; ++ ++ mutex_lock(&sc_list->lock); ++ if (list_empty(&sc_list->list)) { ++ /* some ppgs are RESUMING/STARTING */ ++ dev_dbg(dev, "no candidated stop ppg\n"); ++ mutex_unlock(&sc_list->lock); ++ return false; ++ } ++ kppg = list_first_entry(&sc_list->list, struct ipu_psys_ppg, ++ sched_list); ++ mutex_unlock(&sc_list->lock); ++ ++ mutex_lock(&kppg->mutex); ++ if (!(kppg->state & PPG_STATE_STOP)) { ++ dev_dbg(dev, "s_change:%s: %p %d -> %d\n", ++ __func__, kppg, kppg->state, PPG_STATE_SUSPEND); ++ kppg->state = PPG_STATE_SUSPEND; ++ resched = true; ++ } ++ mutex_unlock(&kppg->mutex); ++ ++ return resched; ++} ++ ++/* ++ * search all kppgs and sort them into start_list and stop_list, always start ++ * first kppg(high priority) in start_list; ++ * if there is resource contention, it would switch kppgs in stop_list ++ * to suspend state one by one ++ */ ++static bool ipu_psys_scheduler_ppg_start(struct ipu_psys *psys) ++{ ++ struct sched_list *sc_list = get_sc_list(SCHED_START_LIST); ++ struct device *dev = &psys->adev->auxdev.dev; ++ struct ipu_psys_ppg *kppg, *kppg0; ++ bool stopping_existed = false; ++ int ret; ++ ++ ipu_psys_scheduler_ppg_sort(psys, &stopping_existed); ++ ++ mutex_lock(&sc_list->lock); ++ if (list_empty(&sc_list->list)) { ++ dev_dbg(dev, "no ppg to start\n"); ++ mutex_unlock(&sc_list->lock); ++ return false; ++ } ++ ++ list_for_each_entry_safe(kppg, kppg0, ++ &sc_list->list, sched_list) { ++ mutex_unlock(&sc_list->lock); ++ ++ ret = ipu_psys_detect_resource_contention(kppg); ++ if (ret < 0) { ++ dev_dbg(dev, "ppg %d resource detect failed(%d)\n", ++ kppg->kpg->pg->ID, ret); ++ /* ++ * switch out other ppg in 2 cases: ++ * 1. resource contention ++ * 2. no suspending/stopping ppg ++ */ ++ if (ret == -ENOSPC) { ++ if (!stopping_existed && ++ ipu_psys_scheduler_switch_ppg(psys)) { ++ return true; ++ } ++ dev_dbg(dev, "ppg is suspending/stopping\n"); ++ } else { ++ dev_err(dev, "detect resource error %d\n", ret); ++ } ++ } else { ++ kppg->pri_dynamic = 0; ++ ++ mutex_lock(&kppg->mutex); ++ if (kppg->state == PPG_STATE_START) ++ ipu_psys_ppg_start(kppg); ++ else ++ ipu_psys_ppg_resume(kppg); ++ mutex_unlock(&kppg->mutex); ++ ++ ipu_psys_scheduler_remove_kppg(kppg, ++ SCHED_START_LIST); ++ ipu_psys_scheduler_update_start_ppg_priority(); ++ } ++ mutex_lock(&sc_list->lock); ++ } ++ mutex_unlock(&sc_list->lock); ++ ++ return false; ++} ++ ++static bool ipu_psys_scheduler_ppg_enqueue_bufset(struct ipu_psys *psys) ++{ ++ struct ipu_psys_scheduler *sched; ++ struct ipu_psys_ppg *kppg; ++ struct ipu_psys_fh *fh; ++ bool resched = false; ++ ++ list_for_each_entry(fh, &psys->fhs, list) { ++ mutex_lock(&fh->mutex); ++ sched = &fh->sched; ++ if (list_empty(&sched->ppgs)) { ++ mutex_unlock(&fh->mutex); ++ continue; ++ } ++ ++ list_for_each_entry(kppg, &sched->ppgs, list) { ++ if (ipu_psys_ppg_enqueue_bufsets(kppg)) ++ resched = true; ++ } ++ mutex_unlock(&fh->mutex); ++ } ++ ++ return resched; ++} ++ ++/* ++ * This function will check all kppgs within fhs, and if kppg state ++ * is STOP or SUSPEND, l-scheduler will call ppg function to stop ++ * or suspend it and update stop list ++ */ ++ ++static bool ipu_psys_scheduler_ppg_halt(struct ipu_psys *psys) ++{ ++ struct ipu_psys_scheduler *sched; ++ struct ipu_psys_ppg *kppg, *tmp; ++ struct ipu_psys_fh *fh; ++ bool stopping_exit = false; ++ ++ list_for_each_entry(fh, &psys->fhs, list) { ++ mutex_lock(&fh->mutex); ++ sched = &fh->sched; ++ if (list_empty(&sched->ppgs)) { ++ mutex_unlock(&fh->mutex); ++ continue; ++ } ++ ++ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { ++ mutex_lock(&kppg->mutex); ++ if (kppg->state & PPG_STATE_STOP) { ++ ipu_psys_ppg_stop(kppg); ++ ipu_psys_scheduler_remove_kppg(kppg, ++ SCHED_STOP_LIST); ++ } else if (kppg->state == PPG_STATE_SUSPEND && ++ list_empty(&kppg->kcmds_processing_list)) { ++ ipu_psys_ppg_suspend(kppg); ++ ipu_psys_scheduler_remove_kppg(kppg, ++ SCHED_STOP_LIST); ++ } else if (kppg->state == PPG_STATE_SUSPENDING || ++ kppg->state == PPG_STATE_STOPPING) { ++ stopping_exit = true; ++ } ++ mutex_unlock(&kppg->mutex); ++ } ++ mutex_unlock(&fh->mutex); ++ } ++ return stopping_exit; ++} ++ ++static void ipu_psys_update_ppg_state_by_kcmd(struct ipu_psys *psys, ++ struct ipu_psys_ppg *kppg, ++ struct ipu_psys_kcmd *kcmd) ++{ ++ struct device *dev = &psys->adev->auxdev.dev; ++ int old_ppg_state = kppg->state; ++ ++ /* ++ * Respond kcmd when ppg is in stable state: ++ * STARTED/RESUMED/RUNNING/SUSPENDED/STOPPED ++ */ ++ if (kppg->state == PPG_STATE_STARTED || ++ kppg->state == PPG_STATE_RESUMED || ++ kppg->state == PPG_STATE_RUNNING) { ++ if (kcmd->state == KCMD_STATE_PPG_START) ++ ipu_psys_kcmd_complete(kppg, kcmd, 0); ++ else if (kcmd->state == KCMD_STATE_PPG_STOP) ++ kppg->state = PPG_STATE_STOP; ++ } else if (kppg->state == PPG_STATE_SUSPENDED) { ++ if (kcmd->state == KCMD_STATE_PPG_START) ++ ipu_psys_kcmd_complete(kppg, kcmd, 0); ++ else if (kcmd->state == KCMD_STATE_PPG_STOP) ++ /* ++ * Record the previous state ++ * because here need resume at first ++ */ ++ kppg->state |= PPG_STATE_STOP; ++ else if (kcmd->state == KCMD_STATE_PPG_ENQUEUE) ++ kppg->state = PPG_STATE_RESUME; ++ } else if (kppg->state == PPG_STATE_STOPPED) { ++ if (kcmd->state == KCMD_STATE_PPG_START) { ++ kppg->state = PPG_STATE_START; ++ } else if (kcmd->state == KCMD_STATE_PPG_STOP) { ++ ipu_psys_kcmd_complete(kppg, kcmd, 0); ++ } else if (kcmd->state == KCMD_STATE_PPG_ENQUEUE) { ++ dev_err(dev, "ppg %p stopped!\n", kppg); ++ ipu_psys_kcmd_complete(kppg, kcmd, -EIO); ++ } ++ } ++ ++ if (old_ppg_state != kppg->state) ++ dev_dbg(dev, "s_change:%s: %p %d -> %d\n", ++ __func__, kppg, old_ppg_state, kppg->state); ++} ++ ++static void ipu_psys_scheduler_kcmd_set(struct ipu_psys *psys) ++{ ++ struct ipu_psys_kcmd *kcmd; ++ struct ipu_psys_scheduler *sched; ++ struct ipu_psys_ppg *kppg, *tmp; ++ struct ipu_psys_fh *fh; ++ ++ list_for_each_entry(fh, &psys->fhs, list) { ++ mutex_lock(&fh->mutex); ++ sched = &fh->sched; ++ if (list_empty(&sched->ppgs)) { ++ mutex_unlock(&fh->mutex); ++ continue; ++ } ++ ++ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { ++ mutex_lock(&kppg->mutex); ++ if (list_empty(&kppg->kcmds_new_list)) { ++ mutex_unlock(&kppg->mutex); ++ continue; ++ }; ++ ++ kcmd = list_first_entry(&kppg->kcmds_new_list, ++ struct ipu_psys_kcmd, list); ++ ipu_psys_update_ppg_state_by_kcmd(psys, kppg, kcmd); ++ mutex_unlock(&kppg->mutex); ++ } ++ mutex_unlock(&fh->mutex); ++ } ++} ++ ++static bool is_ready_to_enter_power_gating(struct ipu_psys *psys) ++{ ++ struct ipu_psys_scheduler *sched; ++ struct ipu_psys_ppg *kppg, *tmp; ++ struct ipu_psys_fh *fh; ++ ++ list_for_each_entry(fh, &psys->fhs, list) { ++ mutex_lock(&fh->mutex); ++ sched = &fh->sched; ++ if (list_empty(&sched->ppgs)) { ++ mutex_unlock(&fh->mutex); ++ continue; ++ } ++ ++ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { ++ mutex_lock(&kppg->mutex); ++ if (!list_empty(&kppg->kcmds_new_list) || ++ !list_empty(&kppg->kcmds_processing_list)) { ++ mutex_unlock(&kppg->mutex); ++ mutex_unlock(&fh->mutex); ++ return false; ++ } ++ if (!(kppg->state == PPG_STATE_RUNNING || ++ kppg->state == PPG_STATE_STOPPED || ++ kppg->state == PPG_STATE_SUSPENDED)) { ++ mutex_unlock(&kppg->mutex); ++ mutex_unlock(&fh->mutex); ++ return false; ++ } ++ mutex_unlock(&kppg->mutex); ++ } ++ mutex_unlock(&fh->mutex); ++ } ++ ++ return true; ++} ++ ++static bool has_pending_kcmd(struct ipu_psys *psys) ++{ ++ struct ipu_psys_scheduler *sched; ++ struct ipu_psys_ppg *kppg, *tmp; ++ struct ipu_psys_fh *fh; ++ ++ list_for_each_entry(fh, &psys->fhs, list) { ++ mutex_lock(&fh->mutex); ++ sched = &fh->sched; ++ if (list_empty(&sched->ppgs)) { ++ mutex_unlock(&fh->mutex); ++ continue; ++ } ++ ++ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { ++ mutex_lock(&kppg->mutex); ++ if (!list_empty(&kppg->kcmds_new_list) || ++ !list_empty(&kppg->kcmds_processing_list)) { ++ mutex_unlock(&kppg->mutex); ++ mutex_unlock(&fh->mutex); ++ return true; ++ } ++ mutex_unlock(&kppg->mutex); ++ } ++ mutex_unlock(&fh->mutex); ++ } ++ ++ return false; ++} ++ ++static bool ipu_psys_scheduler_exit_power_gating(struct ipu_psys *psys) ++{ ++ struct device *dev = &psys->adev->auxdev.dev; ++ ++ /* Assume power gating process can be aborted directly during START */ ++ if (psys->power_gating == PSYS_POWER_GATED) { ++ dev_dbg(dev, "powergating: exit ---\n"); ++ ipu_psys_exit_power_gating(psys); ++ } ++ psys->power_gating = PSYS_POWER_NORMAL; ++ return false; ++} ++ ++static bool ipu_psys_scheduler_enter_power_gating(struct ipu_psys *psys) ++{ ++ struct device *dev = &psys->adev->auxdev.dev; ++ struct ipu_psys_scheduler *sched; ++ struct ipu_psys_ppg *kppg, *tmp; ++ struct ipu_psys_fh *fh; ++ ++ if (!enable_power_gating) ++ return false; ++ ++ if (psys->power_gating == PSYS_POWER_NORMAL && ++ is_ready_to_enter_power_gating(psys)) { ++ /* Enter power gating */ ++ dev_dbg(dev, "powergating: enter +++\n"); ++ psys->power_gating = PSYS_POWER_GATING; ++ } ++ ++ if (psys->power_gating != PSYS_POWER_GATING) ++ return false; ++ ++ /* Suspend ppgs one by one */ ++ list_for_each_entry(fh, &psys->fhs, list) { ++ mutex_lock(&fh->mutex); ++ sched = &fh->sched; ++ if (list_empty(&sched->ppgs)) { ++ mutex_unlock(&fh->mutex); ++ continue; ++ } ++ ++ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { ++ mutex_lock(&kppg->mutex); ++ if (kppg->state == PPG_STATE_RUNNING) { ++ kppg->state = PPG_STATE_SUSPEND; ++ mutex_unlock(&kppg->mutex); ++ mutex_unlock(&fh->mutex); ++ return true; ++ } ++ ++ if (kppg->state != PPG_STATE_SUSPENDED && ++ kppg->state != PPG_STATE_STOPPED) { ++ /* Can't enter power gating */ ++ mutex_unlock(&kppg->mutex); ++ mutex_unlock(&fh->mutex); ++ /* Need re-run l-scheduler to suspend ppg? */ ++ return (kppg->state & PPG_STATE_STOP || ++ kppg->state == PPG_STATE_SUSPEND); ++ } ++ mutex_unlock(&kppg->mutex); ++ } ++ mutex_unlock(&fh->mutex); ++ } ++ ++ psys->power_gating = PSYS_POWER_GATED; ++ ipu_psys_enter_power_gating(psys); ++ ++ return false; ++} ++ ++void ipu_psys_run_next(struct ipu_psys *psys) ++{ ++ struct device *dev = &psys->adev->auxdev.dev; ++ /* Wake up scheduler due to unfinished work */ ++ bool need_trigger = false; ++ /* Wait FW callback if there are stopping/suspending/running ppg */ ++ bool wait_fw_finish = false; ++ /* ++ * Code below will crash if fhs is empty. Normally this ++ * shouldn't happen. ++ */ ++ if (list_empty(&psys->fhs)) { ++ WARN_ON(1); ++ return; ++ } ++ ++ /* Abort power gating process */ ++ if (psys->power_gating != PSYS_POWER_NORMAL && ++ has_pending_kcmd(psys)) ++ need_trigger = ipu_psys_scheduler_exit_power_gating(psys); ++ ++ /* Handle kcmd and related ppg switch */ ++ if (psys->power_gating == PSYS_POWER_NORMAL) { ++ ipu_psys_scheduler_kcmd_set(psys); ++ wait_fw_finish = ipu_psys_scheduler_ppg_halt(psys); ++ need_trigger |= ipu_psys_scheduler_ppg_start(psys); ++ need_trigger |= ipu_psys_scheduler_ppg_enqueue_bufset(psys); ++ } ++ if (!(need_trigger || wait_fw_finish)) { ++ /* Nothing to do, enter power gating */ ++ need_trigger = ipu_psys_scheduler_enter_power_gating(psys); ++ if (psys->power_gating == PSYS_POWER_GATING) ++ wait_fw_finish = ipu_psys_scheduler_ppg_halt(psys); ++ } ++ ++ if (need_trigger && !wait_fw_finish) { ++ dev_dbg(dev, "scheduler: wake up\n"); ++ atomic_set(&psys->wakeup_count, 1); ++ wake_up_interruptible(&psys->sched_cmd_wq); ++ } ++} +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-platform-resources.h b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-platform-resources.h +new file mode 100644 +index 0000000..1b67956 +--- /dev/null ++++ b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-platform-resources.h +@@ -0,0 +1,194 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* Copyright (C) 2018 - 2024 Intel Corporation */ ++ ++#ifndef IPU6_PLATFORM_RESOURCES_H ++#define IPU6_PLATFORM_RESOURCES_H ++ ++#include "ipu-platform-resources.h" ++ ++#define IPU6_FW_PSYS_N_PADDING_UINT8_IN_PROCESS_EXT_STRUCT 0 ++ ++enum { ++ IPU6_FW_PSYS_CMD_QUEUE_COMMAND_ID = 0, ++ IPU6_FW_PSYS_CMD_QUEUE_DEVICE_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG0_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG1_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG2_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG3_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG4_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG5_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG6_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG7_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG8_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG9_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG10_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG11_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG12_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG13_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG14_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG15_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG16_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG17_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG18_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG19_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG20_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG21_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG22_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG23_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG24_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG25_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG26_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG27_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG28_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG29_COMMAND_ID, ++ IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID ++}; ++ ++enum { ++ IPU6_FW_PSYS_TRANSFER_VMEM0_TYPE_ID = 0, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_TYPE_ID, ++ IPU6_FW_PSYS_LB_VMEM_TYPE_ID, ++ IPU6_FW_PSYS_DMEM_TYPE_ID, ++ IPU6_FW_PSYS_VMEM_TYPE_ID, ++ IPU6_FW_PSYS_BAMEM_TYPE_ID, ++ IPU6_FW_PSYS_PMEM_TYPE_ID, ++ IPU6_FW_PSYS_N_MEM_TYPE_ID ++}; ++ ++enum ipu6_mem_id { ++ IPU6_FW_PSYS_VMEM0_ID = 0, /* ISP0 VMEM */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, /* TRANSFER VMEM 0 */ ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, /* TRANSFER VMEM 1 */ ++ IPU6_FW_PSYS_LB_VMEM_ID, /* LB VMEM */ ++ IPU6_FW_PSYS_BAMEM0_ID, /* ISP0 BAMEM */ ++ IPU6_FW_PSYS_DMEM0_ID, /* SPC0 Dmem */ ++ IPU6_FW_PSYS_DMEM1_ID, /* SPP0 Dmem */ ++ IPU6_FW_PSYS_DMEM2_ID, /* SPP1 Dmem */ ++ IPU6_FW_PSYS_DMEM3_ID, /* ISP0 Dmem */ ++ IPU6_FW_PSYS_PMEM0_ID, /* ISP0 PMEM */ ++ IPU6_FW_PSYS_N_MEM_ID ++}; ++ ++enum { ++ IPU6_FW_PSYS_DEV_CHN_DMA_EXT0_ID = 0, ++ IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_READ_ID, ++ IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_ID, ++ IPU6_FW_PSYS_DEV_CHN_DMA_INTERNAL_ID, ++ IPU6_FW_PSYS_DEV_CHN_DMA_ISA_ID, ++ IPU6_FW_PSYS_N_DEV_CHN_ID ++}; ++ ++enum { ++ IPU6_FW_PSYS_SP_CTRL_TYPE_ID = 0, ++ IPU6_FW_PSYS_SP_SERVER_TYPE_ID, ++ IPU6_FW_PSYS_VP_TYPE_ID, ++ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_OSA_TYPE_ID, ++ IPU6_FW_PSYS_GDC_TYPE_ID, ++ IPU6_FW_PSYS_TNR_TYPE_ID, ++ IPU6_FW_PSYS_N_CELL_TYPE_ID ++}; ++ ++enum { ++ IPU6_FW_PSYS_SP0_ID = 0, ++ IPU6_FW_PSYS_VP0_ID, ++ IPU6_FW_PSYS_PSA_ACC_BNLM_ID, ++ IPU6_FW_PSYS_PSA_ACC_DM_ID, ++ IPU6_FW_PSYS_PSA_ACC_ACM_ID, ++ IPU6_FW_PSYS_PSA_ACC_GTC_YUV1_ID, ++ IPU6_FW_PSYS_BB_ACC_OFS_PIN_MAIN_ID, ++ IPU6_FW_PSYS_BB_ACC_OFS_PIN_DISPLAY_ID, ++ IPU6_FW_PSYS_BB_ACC_OFS_PIN_PP_ID, ++ IPU6_FW_PSYS_PSA_ACC_GAMMASTAR_ID, ++ IPU6_FW_PSYS_PSA_ACC_GLTM_ID, ++ IPU6_FW_PSYS_PSA_ACC_XNR_ID, ++ IPU6_FW_PSYS_PSA_VCSC_ID, /* VCSC */ ++ IPU6_FW_PSYS_ISA_ICA_ID, ++ IPU6_FW_PSYS_ISA_LSC_ID, ++ IPU6_FW_PSYS_ISA_DPC_ID, ++ IPU6_FW_PSYS_ISA_SIS_A_ID, ++ IPU6_FW_PSYS_ISA_SIS_B_ID, ++ IPU6_FW_PSYS_ISA_B2B_ID, ++ IPU6_FW_PSYS_ISA_B2R_R2I_SIE_ID, ++ IPU6_FW_PSYS_ISA_R2I_DS_A_ID, ++ IPU6_FW_PSYS_ISA_R2I_DS_B_ID, ++ IPU6_FW_PSYS_ISA_AWB_ID, ++ IPU6_FW_PSYS_ISA_AE_ID, ++ IPU6_FW_PSYS_ISA_AF_ID, ++ IPU6_FW_PSYS_ISA_DOL_ID, ++ IPU6_FW_PSYS_ISA_ICA_MEDIUM_ID, ++ IPU6_FW_PSYS_ISA_X2B_MD_ID, ++ IPU6_FW_PSYS_ISA_X2B_SVE_RGBIR_ID, ++ IPU6_FW_PSYS_ISA_PAF_ID, ++ IPU6_FW_PSYS_BB_ACC_GDC0_ID, ++ IPU6_FW_PSYS_BB_ACC_TNR_ID, ++ IPU6_FW_PSYS_N_CELL_ID ++}; ++ ++enum { ++ IPU6_FW_PSYS_DEV_DFM_BB_FULL_PORT_ID = 0, ++ IPU6_FW_PSYS_DEV_DFM_BB_EMPTY_PORT_ID, ++ IPU6_FW_PSYS_DEV_DFM_ISL_FULL_PORT_ID, ++ IPU6_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID, ++ IPU6_FW_PSYS_DEV_DFM_LB_FULL_PORT_ID, ++ IPU6_FW_PSYS_DEV_DFM_LB_EMPTY_PORT_ID, ++}; ++ ++/* Excluding PMEM */ ++#define IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID (IPU6_FW_PSYS_N_MEM_TYPE_ID - 1) ++#define IPU6_FW_PSYS_N_DEV_DFM_ID \ ++ (IPU6_FW_PSYS_DEV_DFM_LB_EMPTY_PORT_ID + 1) ++ ++#define IPU6_FW_PSYS_VMEM0_MAX_SIZE 0x0800 ++/* Transfer VMEM0 words, ref HAS Transfer*/ ++#define IPU6_FW_PSYS_TRANSFER_VMEM0_MAX_SIZE 0x0800 ++/* Transfer VMEM1 words, ref HAS Transfer*/ ++#define IPU6_FW_PSYS_TRANSFER_VMEM1_MAX_SIZE 0x0800 ++#define IPU6_FW_PSYS_LB_VMEM_MAX_SIZE 0x0400 /* LB VMEM words */ ++#define IPU6_FW_PSYS_BAMEM0_MAX_SIZE 0x0800 ++#define IPU6_FW_PSYS_DMEM0_MAX_SIZE 0x4000 ++#define IPU6_FW_PSYS_DMEM1_MAX_SIZE 0x1000 ++#define IPU6_FW_PSYS_DMEM2_MAX_SIZE 0x1000 ++#define IPU6_FW_PSYS_DMEM3_MAX_SIZE 0x1000 ++#define IPU6_FW_PSYS_PMEM0_MAX_SIZE 0x0500 ++ ++#define IPU6_FW_PSYS_DEV_CHN_DMA_EXT0_MAX_SIZE 30 ++#define IPU6_FW_PSYS_DEV_CHN_GDC_MAX_SIZE 0 ++#define IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_READ_MAX_SIZE 30 ++#define IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_MAX_SIZE 43 ++#define IPU6_FW_PSYS_DEV_CHN_DMA_INTERNAL_MAX_SIZE 8 ++#define IPU6_FW_PSYS_DEV_CHN_DMA_IPFD_MAX_SIZE 0 ++#define IPU6_FW_PSYS_DEV_CHN_DMA_ISA_MAX_SIZE 2 ++ ++#define IPU6_FW_PSYS_DEV_DFM_BB_FULL_PORT_ID_MAX_SIZE 32 ++#define IPU6_FW_PSYS_DEV_DFM_ISL_FULL_PORT_ID_MAX_SIZE 32 ++#define IPU6_FW_PSYS_DEV_DFM_LB_FULL_PORT_ID_MAX_SIZE 32 ++#define IPU6_FW_PSYS_DEV_DFM_BB_EMPTY_PORT_ID_MAX_SIZE 32 ++#define IPU6_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID_MAX_SIZE 32 ++#define IPU6_FW_PSYS_DEV_DFM_LB_EMPTY_PORT_ID_MAX_SIZE 32 ++ ++struct ipu6_fw_psys_program_manifest_ext { ++ u32 dfm_port_bitmap[IPU6_FW_PSYS_N_DEV_DFM_ID]; ++ u32 dfm_active_port_bitmap[IPU6_FW_PSYS_N_DEV_DFM_ID]; ++ u16 ext_mem_size[IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID]; ++ u16 ext_mem_offset[IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID]; ++ u16 dev_chn_size[IPU6_FW_PSYS_N_DEV_CHN_ID]; ++ u16 dev_chn_offset[IPU6_FW_PSYS_N_DEV_CHN_ID]; ++ u8 is_dfm_relocatable[IPU6_FW_PSYS_N_DEV_DFM_ID]; ++ u8 dec_resources_input[IPU_FW_PSYS_MAX_INPUT_DEC_RESOURCES]; ++ u8 dec_resources_input_terminal[IPU_FW_PSYS_MAX_INPUT_DEC_RESOURCES]; ++ u8 dec_resources_output[IPU_FW_PSYS_MAX_OUTPUT_DEC_RESOURCES]; ++ u8 dec_resources_output_terminal[IPU_FW_PSYS_MAX_OUTPUT_DEC_RESOURCES]; ++ u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_PROGRAM_MANIFEST_EXT]; ++}; ++ ++struct ipu6_fw_psys_process_ext { ++ u32 dfm_port_bitmap[IPU6_FW_PSYS_N_DEV_DFM_ID]; ++ u32 dfm_active_port_bitmap[IPU6_FW_PSYS_N_DEV_DFM_ID]; ++ u16 ext_mem_offset[IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID]; ++ u16 dev_chn_offset[IPU6_FW_PSYS_N_DEV_CHN_ID]; ++ u8 ext_mem_id[IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID]; ++}; ++ ++#endif /* IPU6_PLATFORM_RESOURCES_H */ +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-ppg.c b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-ppg.c +new file mode 100644 +index 0000000..b6a90f8 +--- /dev/null ++++ b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-ppg.c +@@ -0,0 +1,556 @@ ++// SPDX-License-Identifier: GPL-2.0 ++// Copyright (C) 2020 - 2024 Intel Corporation ++ ++#include ++#include ++#include ++#include ++ ++#include "ipu6-dma.h" ++#include "ipu6-ppg.h" ++ ++static bool enable_suspend_resume; ++module_param(enable_suspend_resume, bool, 0664); ++MODULE_PARM_DESC(enable_suspend_resume, "enable fw ppg suspend/resume api"); ++ ++static struct ipu_psys_kcmd * ++ipu_psys_ppg_get_kcmd(struct ipu_psys_ppg *kppg, enum ipu_psys_cmd_state state) ++{ ++ struct ipu_psys_kcmd *kcmd; ++ ++ if (list_empty(&kppg->kcmds_new_list)) ++ return NULL; ++ ++ list_for_each_entry(kcmd, &kppg->kcmds_new_list, list) { ++ if (kcmd->state == state) ++ return kcmd; ++ } ++ ++ return NULL; ++} ++ ++struct ipu_psys_kcmd *ipu_psys_ppg_get_stop_kcmd(struct ipu_psys_ppg *kppg) ++{ ++ struct ipu_psys_kcmd *kcmd; ++ ++ WARN(!mutex_is_locked(&kppg->mutex), "ppg locking error"); ++ ++ if (list_empty(&kppg->kcmds_processing_list)) ++ return NULL; ++ ++ list_for_each_entry(kcmd, &kppg->kcmds_processing_list, list) { ++ if (kcmd->state == KCMD_STATE_PPG_STOP) ++ return kcmd; ++ } ++ ++ return NULL; ++} ++ ++static struct ipu_psys_buffer_set * ++__get_buf_set(struct ipu_psys_fh *fh, size_t buf_set_size) ++{ ++ struct ipu6_bus_device *adev = fh->psys->adev; ++ struct ipu_psys_buffer_set *kbuf_set; ++ struct ipu_psys_scheduler *sched = &fh->sched; ++ ++ mutex_lock(&sched->bs_mutex); ++ list_for_each_entry(kbuf_set, &sched->buf_sets, list) { ++ if (!kbuf_set->buf_set_size && ++ kbuf_set->size >= buf_set_size) { ++ kbuf_set->buf_set_size = buf_set_size; ++ mutex_unlock(&sched->bs_mutex); ++ return kbuf_set; ++ } ++ } ++ ++ mutex_unlock(&sched->bs_mutex); ++ /* no suitable buffer available, allocate new one */ ++ kbuf_set = kzalloc(sizeof(*kbuf_set), GFP_KERNEL); ++ if (!kbuf_set) ++ return NULL; ++ ++ kbuf_set->kaddr = ipu6_dma_alloc(adev, buf_set_size, ++ &kbuf_set->dma_addr, GFP_KERNEL, 0); ++ if (!kbuf_set->kaddr) { ++ kfree(kbuf_set); ++ return NULL; ++ } ++ ++ kbuf_set->buf_set_size = buf_set_size; ++ kbuf_set->size = buf_set_size; ++ mutex_lock(&sched->bs_mutex); ++ list_add(&kbuf_set->list, &sched->buf_sets); ++ mutex_unlock(&sched->bs_mutex); ++ ++ return kbuf_set; ++} ++ ++static struct ipu_psys_buffer_set * ++ipu_psys_create_buffer_set(struct ipu_psys_kcmd *kcmd, ++ struct ipu_psys_ppg *kppg) ++{ ++ struct ipu_psys_fh *fh = kcmd->fh; ++ struct ipu_psys *psys = fh->psys; ++ struct device *dev = &psys->adev->auxdev.dev; ++ struct ipu_psys_buffer_set *kbuf_set; ++ size_t buf_set_size; ++ u32 *keb; ++ ++ buf_set_size = ipu_fw_psys_ppg_get_buffer_set_size(kcmd); ++ ++ kbuf_set = __get_buf_set(fh, buf_set_size); ++ if (!kbuf_set) { ++ dev_err(dev, "failed to create buffer set\n"); ++ return NULL; ++ } ++ ++ kbuf_set->buf_set = ipu_fw_psys_ppg_create_buffer_set(kcmd, ++ kbuf_set->kaddr, ++ 0); ++ ++ ipu_fw_psys_ppg_buffer_set_vaddress(kbuf_set->buf_set, ++ kbuf_set->dma_addr); ++ keb = kcmd->kernel_enable_bitmap; ++ ipu_fw_psys_ppg_buffer_set_set_keb(kbuf_set->buf_set, keb); ++ ipu6_dma_sync_single(psys->adev, kbuf_set->dma_addr, buf_set_size); ++ ++ return kbuf_set; ++} ++ ++int ipu_psys_ppg_get_bufset(struct ipu_psys_kcmd *kcmd, ++ struct ipu_psys_ppg *kppg) ++{ ++ struct ipu_psys *psys = kppg->fh->psys; ++ struct device *dev = &psys->adev->auxdev.dev; ++ struct ipu_psys_buffer_set *kbuf_set; ++ unsigned int i; ++ int ret; ++ ++ kbuf_set = ipu_psys_create_buffer_set(kcmd, kppg); ++ if (!kbuf_set) { ++ ret = -EINVAL; ++ goto error; ++ } ++ kcmd->kbuf_set = kbuf_set; ++ kbuf_set->kcmd = kcmd; ++ ++ for (i = 0; i < kcmd->nbuffers; i++) { ++ struct ipu_fw_psys_terminal *terminal; ++ u32 buffer; ++ ++ terminal = ipu_fw_psys_pg_get_terminal(kcmd, i); ++ if (!terminal) ++ continue; ++ ++ buffer = (u32)kcmd->kbufs[i]->dma_addr + ++ kcmd->buffers[i].data_offset; ++ ++ ret = ipu_fw_psys_ppg_set_buffer_set(kcmd, terminal, i, buffer); ++ if (ret) { ++ dev_err(dev, "Unable to set bufset\n"); ++ goto error; ++ } ++ } ++ ++ return 0; ++ ++error: ++ dev_err(dev, "failed to get buffer set\n"); ++ return ret; ++} ++ ++void ipu_psys_ppg_complete(struct ipu_psys *psys, struct ipu_psys_ppg *kppg) ++{ ++ struct device *dev; ++ u8 queue_id; ++ int old_ppg_state; ++ ++ if (!psys || !kppg) ++ return; ++ ++ dev = &psys->adev->auxdev.dev; ++ ++ mutex_lock(&kppg->mutex); ++ old_ppg_state = kppg->state; ++ if (kppg->state == PPG_STATE_STOPPING) { ++ struct ipu_psys_kcmd tmp_kcmd = { ++ .kpg = kppg->kpg, ++ }; ++ ++ kppg->state = PPG_STATE_STOPPED; ++ ipu_psys_free_resources(&kppg->kpg->resource_alloc, ++ &psys->res_pool_running); ++ queue_id = ipu_fw_psys_ppg_get_base_queue_id(&tmp_kcmd); ++ ipu_psys_free_cmd_queue_res(&psys->res_pool_running, queue_id); ++ pm_runtime_put(dev); ++ } else { ++ if (kppg->state == PPG_STATE_SUSPENDING) { ++ kppg->state = PPG_STATE_SUSPENDED; ++ ipu_psys_free_resources(&kppg->kpg->resource_alloc, ++ &psys->res_pool_running); ++ } else if (kppg->state == PPG_STATE_STARTED || ++ kppg->state == PPG_STATE_RESUMED) { ++ kppg->state = PPG_STATE_RUNNING; ++ } ++ ++ /* Kick l-scheduler thread for FW callback, ++ * also for checking if need to enter power gating ++ */ ++ atomic_set(&psys->wakeup_count, 1); ++ wake_up_interruptible(&psys->sched_cmd_wq); ++ } ++ if (old_ppg_state != kppg->state) ++ dev_dbg(dev, "s_change:%s: %p %d -> %d\n", __func__, kppg, ++ old_ppg_state, kppg->state); ++ ++ mutex_unlock(&kppg->mutex); ++} ++ ++int ipu_psys_ppg_start(struct ipu_psys_ppg *kppg) ++{ ++ struct ipu_psys *psys = kppg->fh->psys; ++ struct device *dev = &psys->adev->auxdev.dev; ++ struct ipu_psys_kcmd *kcmd = ipu_psys_ppg_get_kcmd(kppg, ++ KCMD_STATE_PPG_START); ++ unsigned int i; ++ int ret; ++ ++ if (!kcmd) { ++ dev_err(dev, "failed to find start kcmd!\n"); ++ return -EINVAL; ++ } ++ ++ dev_dbg(dev, "start ppg id %d, addr 0x%p\n", ++ ipu_fw_psys_pg_get_id(kcmd), kppg); ++ ++ kppg->state = PPG_STATE_STARTING; ++ for (i = 0; i < kcmd->nbuffers; i++) { ++ struct ipu_fw_psys_terminal *terminal; ++ ++ terminal = ipu_fw_psys_pg_get_terminal(kcmd, i); ++ if (!terminal) ++ continue; ++ ++ ret = ipu_fw_psys_terminal_set(terminal, i, kcmd, 0, ++ kcmd->buffers[i].len); ++ if (ret) { ++ dev_err(dev, "Unable to set terminal\n"); ++ return ret; ++ } ++ } ++ ++ ret = ipu_fw_psys_pg_submit(kcmd); ++ if (ret) { ++ dev_err(dev, "failed to submit kcmd!\n"); ++ return ret; ++ } ++ ++ ret = ipu_psys_allocate_resources(dev, kcmd->kpg->pg, kcmd->pg_manifest, ++ &kcmd->kpg->resource_alloc, ++ &psys->res_pool_running); ++ if (ret) { ++ dev_err(dev, "alloc resources failed!\n"); ++ return ret; ++ } ++ ++ ret = pm_runtime_get_sync(dev); ++ if (ret < 0) { ++ dev_err(dev, "failed to power on psys\n"); ++ goto error; ++ } ++ ++ ret = ipu_psys_kcmd_start(psys, kcmd); ++ if (ret) { ++ ipu_psys_kcmd_complete(kppg, kcmd, -EIO); ++ goto error; ++ } ++ ++ dev_dbg(dev, "s_change:%s: %p %d -> %d\n", ++ __func__, kppg, kppg->state, PPG_STATE_STARTED); ++ kppg->state = PPG_STATE_STARTED; ++ ipu_psys_kcmd_complete(kppg, kcmd, 0); ++ ++ return 0; ++ ++error: ++ pm_runtime_put_noidle(dev); ++ ipu_psys_reset_process_cell(dev, kcmd->kpg->pg, kcmd->pg_manifest, ++ kcmd->kpg->pg->process_count); ++ ipu_psys_free_resources(&kppg->kpg->resource_alloc, ++ &psys->res_pool_running); ++ ++ dev_err(dev, "failed to start ppg\n"); ++ return ret; ++} ++ ++int ipu_psys_ppg_resume(struct ipu_psys_ppg *kppg) ++{ ++ struct ipu_psys *psys = kppg->fh->psys; ++ struct device *dev = &psys->adev->auxdev.dev; ++ struct ipu_psys_kcmd tmp_kcmd = { ++ .kpg = kppg->kpg, ++ .fh = kppg->fh, ++ }; ++ int ret; ++ ++ dev_dbg(dev, "resume ppg id %d, addr 0x%p\n", ++ ipu_fw_psys_pg_get_id(&tmp_kcmd), kppg); ++ ++ kppg->state = PPG_STATE_RESUMING; ++ if (enable_suspend_resume) { ++ ret = ipu_psys_allocate_resources(dev, kppg->kpg->pg, ++ kppg->manifest, ++ &kppg->kpg->resource_alloc, ++ &psys->res_pool_running); ++ if (ret) { ++ dev_err(dev, "failed to allocate res\n"); ++ return -EIO; ++ } ++ ++ ret = ipu_fw_psys_ppg_resume(&tmp_kcmd); ++ if (ret) { ++ dev_err(dev, "failed to resume ppg\n"); ++ goto error; ++ } ++ } else { ++ kppg->kpg->pg->state = IPU_FW_PSYS_PROCESS_GROUP_READY; ++ ret = ipu_fw_psys_pg_submit(&tmp_kcmd); ++ if (ret) { ++ dev_err(dev, "failed to submit kcmd!\n"); ++ return ret; ++ } ++ ++ ret = ipu_psys_allocate_resources(dev, kppg->kpg->pg, ++ kppg->manifest, ++ &kppg->kpg->resource_alloc, ++ &psys->res_pool_running); ++ if (ret) { ++ dev_err(dev, "failed to allocate res\n"); ++ return ret; ++ } ++ ++ ret = ipu_psys_kcmd_start(psys, &tmp_kcmd); ++ if (ret) { ++ dev_err(dev, "failed to start kcmd!\n"); ++ goto error; ++ } ++ } ++ dev_dbg(dev, "s_change:%s: %p %d -> %d\n", ++ __func__, kppg, kppg->state, PPG_STATE_RESUMED); ++ kppg->state = PPG_STATE_RESUMED; ++ ++ return 0; ++ ++error: ++ ipu_psys_reset_process_cell(dev, kppg->kpg->pg, kppg->manifest, ++ kppg->kpg->pg->process_count); ++ ipu_psys_free_resources(&kppg->kpg->resource_alloc, ++ &psys->res_pool_running); ++ ++ return ret; ++} ++ ++int ipu_psys_ppg_stop(struct ipu_psys_ppg *kppg) ++{ ++ struct ipu_psys_kcmd *kcmd = ipu_psys_ppg_get_kcmd(kppg, ++ KCMD_STATE_PPG_STOP); ++ struct ipu_psys *psys = kppg->fh->psys; ++ struct device *dev = &psys->adev->auxdev.dev; ++ struct ipu_psys_kcmd kcmd_temp; ++ int ppg_id, ret = 0; ++ ++ if (kcmd) { ++ list_move_tail(&kcmd->list, &kppg->kcmds_processing_list); ++ } else { ++ dev_dbg(dev, "Exceptional stop happened!\n"); ++ kcmd_temp.kpg = kppg->kpg; ++ kcmd_temp.fh = kppg->fh; ++ kcmd = &kcmd_temp; ++ /* delete kppg in stop list to avoid this ppg resuming */ ++ ipu_psys_scheduler_remove_kppg(kppg, SCHED_STOP_LIST); ++ } ++ ++ ppg_id = ipu_fw_psys_pg_get_id(kcmd); ++ dev_dbg(dev, "stop ppg(%d, addr 0x%p)\n", ppg_id, kppg); ++ ++ if (kppg->state & PPG_STATE_SUSPENDED) { ++ if (enable_suspend_resume) { ++ dev_dbg(dev, "need resume before stop!\n"); ++ kcmd_temp.kpg = kppg->kpg; ++ kcmd_temp.fh = kppg->fh; ++ ret = ipu_fw_psys_ppg_resume(&kcmd_temp); ++ if (ret) ++ dev_err(dev, "ppg(%d) failed to resume\n", ++ ppg_id); ++ } else if (kcmd != &kcmd_temp) { ++ u8 queue_id = ipu_fw_psys_ppg_get_base_queue_id(kcmd); ++ ++ ipu_psys_free_cmd_queue_res(&psys->res_pool_running, ++ queue_id); ++ ipu_psys_kcmd_complete(kppg, kcmd, 0); ++ dev_dbg(dev, "s_change:%s %p %d -> %d\n", __func__, ++ kppg, kppg->state, PPG_STATE_STOPPED); ++ pm_runtime_put(dev); ++ kppg->state = PPG_STATE_STOPPED; ++ ++ return 0; ++ } else { ++ return 0; ++ } ++ } ++ dev_dbg(dev, "s_change:%s %p %d -> %d\n", __func__, kppg, kppg->state, ++ PPG_STATE_STOPPING); ++ kppg->state = PPG_STATE_STOPPING; ++ ret = ipu_fw_psys_pg_abort(kcmd); ++ if (ret) ++ dev_err(dev, "ppg(%d) failed to abort\n", ppg_id); ++ ++ return ret; ++} ++ ++int ipu_psys_ppg_suspend(struct ipu_psys_ppg *kppg) ++{ ++ struct ipu_psys *psys = kppg->fh->psys; ++ struct device *dev = &psys->adev->auxdev.dev; ++ struct ipu_psys_kcmd tmp_kcmd = { ++ .kpg = kppg->kpg, ++ .fh = kppg->fh, ++ }; ++ int ppg_id = ipu_fw_psys_pg_get_id(&tmp_kcmd); ++ int ret = 0; ++ ++ dev_dbg(dev, "suspend ppg(%d, addr 0x%p)\n", ppg_id, kppg); ++ ++ dev_dbg(dev, "s_change:%s %p %d -> %d\n", __func__, kppg, kppg->state, ++ PPG_STATE_SUSPENDING); ++ kppg->state = PPG_STATE_SUSPENDING; ++ if (enable_suspend_resume) ++ ret = ipu_fw_psys_ppg_suspend(&tmp_kcmd); ++ else ++ ret = ipu_fw_psys_pg_abort(&tmp_kcmd); ++ if (ret) ++ dev_err(dev, "failed to %s ppg(%d)\n", ++ enable_suspend_resume ? "suspend" : "stop", ret); ++ ++ return ret; ++} ++ ++static bool ipu_psys_ppg_is_bufset_existing(struct ipu_psys_ppg *kppg) ++{ ++ return !list_empty(&kppg->kcmds_new_list); ++} ++ ++/* ++ * ipu_psys_ppg_enqueue_bufsets - enqueue buffer sets to firmware ++ * Sometimes, if the ppg is at suspended state, this function will return true ++ * to reschedule and let the resume command scheduled before the buffer sets ++ * enqueuing. ++ */ ++bool ipu_psys_ppg_enqueue_bufsets(struct ipu_psys_ppg *kppg) ++{ ++ struct ipu_psys_kcmd *kcmd, *kcmd0; ++ struct ipu_psys *psys = kppg->fh->psys; ++ struct device *dev = &psys->adev->auxdev.dev; ++ bool need_resume = false; ++ ++ mutex_lock(&kppg->mutex); ++ ++ if (kppg->state & (PPG_STATE_STARTED | PPG_STATE_RESUMED | ++ PPG_STATE_RUNNING)) { ++ if (ipu_psys_ppg_is_bufset_existing(kppg)) { ++ list_for_each_entry_safe(kcmd, kcmd0, ++ &kppg->kcmds_new_list, list) { ++ int ret; ++ ++ if (kcmd->state != KCMD_STATE_PPG_ENQUEUE) { ++ need_resume = true; ++ break; ++ } ++ ++ ret = ipu_fw_psys_ppg_enqueue_bufs(kcmd); ++ if (ret) { ++ dev_err(dev, ++ "kppg 0x%p fail to qbufset %d", ++ kppg, ret); ++ break; ++ } ++ list_move_tail(&kcmd->list, ++ &kppg->kcmds_processing_list); ++ dev_dbg(dev, ++ "kppg %d %p queue kcmd 0x%p fh 0x%p\n", ++ ipu_fw_psys_pg_get_id(kcmd), ++ kppg, kcmd, kcmd->fh); ++ } ++ } ++ } ++ ++ mutex_unlock(&kppg->mutex); ++ return need_resume; ++} ++ ++void ipu_psys_enter_power_gating(struct ipu_psys *psys) ++{ ++ struct device *dev = &psys->adev->auxdev.dev; ++ struct ipu_psys_scheduler *sched; ++ struct ipu_psys_ppg *kppg, *tmp; ++ struct ipu_psys_fh *fh; ++ ++ list_for_each_entry(fh, &psys->fhs, list) { ++ mutex_lock(&fh->mutex); ++ sched = &fh->sched; ++ if (list_empty(&sched->ppgs)) { ++ mutex_unlock(&fh->mutex); ++ continue; ++ } ++ ++ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { ++ mutex_lock(&kppg->mutex); ++ /* ++ * Only for SUSPENDED kppgs, STOPPED kppgs has already ++ * power down and new kppgs might come now. ++ */ ++ if (kppg->state != PPG_STATE_SUSPENDED) { ++ mutex_unlock(&kppg->mutex); ++ continue; ++ } ++ pm_runtime_put(dev); ++ mutex_unlock(&kppg->mutex); ++ } ++ mutex_unlock(&fh->mutex); ++ } ++} ++ ++void ipu_psys_exit_power_gating(struct ipu_psys *psys) ++{ ++ struct device *dev = &psys->adev->auxdev.dev; ++ struct ipu_psys_scheduler *sched; ++ struct ipu_psys_ppg *kppg, *tmp; ++ struct ipu_psys_fh *fh; ++ int ret = 0; ++ ++ list_for_each_entry(fh, &psys->fhs, list) { ++ mutex_lock(&fh->mutex); ++ sched = &fh->sched; ++ if (list_empty(&sched->ppgs)) { ++ mutex_unlock(&fh->mutex); ++ continue; ++ } ++ ++ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { ++ mutex_lock(&kppg->mutex); ++ /* Only for SUSPENDED kppgs */ ++ if (kppg->state != PPG_STATE_SUSPENDED) { ++ mutex_unlock(&kppg->mutex); ++ continue; ++ } ++ ++ ret = pm_runtime_get_sync(dev); ++ if (ret < 0) { ++ dev_err(dev, "failed to power gating\n"); ++ pm_runtime_put_noidle(dev); ++ } ++ mutex_unlock(&kppg->mutex); ++ } ++ mutex_unlock(&fh->mutex); ++ } ++} +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-ppg.h b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-ppg.h +new file mode 100644 +index 0000000..676a4ea +--- /dev/null ++++ b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-ppg.h +@@ -0,0 +1,38 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Copyright (C) 2020 - 2024 Intel Corporation ++ */ ++ ++#ifndef IPU6_PPG_H ++#define IPU6_PPG_H ++ ++#include "ipu-psys.h" ++/* starting from '2' in case of someone passes true or false */ ++enum SCHED_LIST { ++ SCHED_START_LIST = 2, ++ SCHED_STOP_LIST ++}; ++ ++enum ipu_psys_power_gating_state { ++ PSYS_POWER_NORMAL = 0, ++ PSYS_POWER_GATING, ++ PSYS_POWER_GATED ++}; ++ ++int ipu_psys_ppg_get_bufset(struct ipu_psys_kcmd *kcmd, ++ struct ipu_psys_ppg *kppg); ++struct ipu_psys_kcmd *ipu_psys_ppg_get_stop_kcmd(struct ipu_psys_ppg *kppg); ++void ipu_psys_scheduler_remove_kppg(struct ipu_psys_ppg *kppg, ++ enum SCHED_LIST type); ++void ipu_psys_scheduler_add_kppg(struct ipu_psys_ppg *kppg, ++ enum SCHED_LIST type); ++int ipu_psys_ppg_start(struct ipu_psys_ppg *kppg); ++int ipu_psys_ppg_resume(struct ipu_psys_ppg *kppg); ++int ipu_psys_ppg_stop(struct ipu_psys_ppg *kppg); ++int ipu_psys_ppg_suspend(struct ipu_psys_ppg *kppg); ++void ipu_psys_ppg_complete(struct ipu_psys *psys, struct ipu_psys_ppg *kppg); ++bool ipu_psys_ppg_enqueue_bufsets(struct ipu_psys_ppg *kppg); ++void ipu_psys_enter_power_gating(struct ipu_psys *psys); ++void ipu_psys_exit_power_gating(struct ipu_psys *psys); ++ ++#endif /* IPU6_PPG_H */ +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-psys-gpc.c b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-psys-gpc.c +new file mode 100644 +index 0000000..ff19256 +--- /dev/null ++++ b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-psys-gpc.c +@@ -0,0 +1,209 @@ ++// SPDX-License-Identifier: GPL-2.0 ++// Copyright (C) 2020 - 2024 Intel Corporation ++ ++#ifdef CONFIG_DEBUG_FS ++#include ++#include ++ ++#include "ipu-psys.h" ++#include "ipu-platform-regs.h" ++ ++/* ++ * GPC (Gerneral Performance Counters) ++ */ ++#define IPU_PSYS_GPC_NUM 16 ++ ++#ifndef CONFIG_PM ++#define pm_runtime_get_sync(d) 0 ++#define pm_runtime_put(d) 0 ++#endif ++ ++struct ipu_psys_gpc { ++ bool enable; ++ unsigned int route; ++ unsigned int source; ++ unsigned int sense; ++ unsigned int gpcindex; ++ void *prit; ++}; ++ ++struct ipu_psys_gpcs { ++ bool gpc_enable; ++ struct ipu_psys_gpc gpc[IPU_PSYS_GPC_NUM]; ++ void *prit; ++}; ++ ++static int ipu6_psys_gpc_global_enable_get(void *data, u64 *val) ++{ ++ struct ipu_psys_gpcs *psys_gpcs = data; ++ struct ipu_psys *psys = psys_gpcs->prit; ++ ++ mutex_lock(&psys->mutex); ++ ++ *val = psys_gpcs->gpc_enable; ++ ++ mutex_unlock(&psys->mutex); ++ return 0; ++} ++ ++static int ipu6_psys_gpc_global_enable_set(void *data, u64 val) ++{ ++ struct ipu_psys_gpcs *psys_gpcs = data; ++ struct ipu_psys *psys = psys_gpcs->prit; ++ void __iomem *base; ++ int idx, res; ++ ++ if (val != 0 && val != 1) ++ return -EINVAL; ++ ++ if (!psys || !psys->pdata || !psys->pdata->base) ++ return -EINVAL; ++ ++ mutex_lock(&psys->mutex); ++ ++ base = psys->pdata->base + IPU_GPC_BASE; ++ ++ res = pm_runtime_get_sync(&psys->adev->dev); ++ if (res < 0) { ++ pm_runtime_put(&psys->adev->dev); ++ mutex_unlock(&psys->mutex); ++ return res; ++ } ++ ++ if (val == 0) { ++ writel(0x0, base + IPU_GPREG_TRACE_TIMER_RST); ++ writel(0x0, base + IPU_CDC_MMU_GPC_OVERALL_ENABLE); ++ writel(0xffff, base + IPU_CDC_MMU_GPC_SOFT_RESET); ++ psys_gpcs->gpc_enable = false; ++ for (idx = 0; idx < IPU_PSYS_GPC_NUM; idx++) { ++ psys_gpcs->gpc[idx].enable = 0; ++ psys_gpcs->gpc[idx].sense = 0; ++ psys_gpcs->gpc[idx].route = 0; ++ psys_gpcs->gpc[idx].source = 0; ++ } ++ pm_runtime_put(&psys->adev->dev); ++ } else { ++ /* Set gpc reg and start all gpc here. ++ * RST free running local timer. ++ */ ++ writel(0x0, base + IPU_GPREG_TRACE_TIMER_RST); ++ writel(0x1, base + IPU_GPREG_TRACE_TIMER_RST); ++ ++ for (idx = 0; idx < IPU_PSYS_GPC_NUM; idx++) { ++ /* Enable */ ++ writel(psys_gpcs->gpc[idx].enable, ++ base + IPU_CDC_MMU_GPC_ENABLE0 + 4 * idx); ++ /* Setting (route/source/sense) */ ++ writel((psys_gpcs->gpc[idx].sense ++ << IPU_GPC_SENSE_OFFSET) ++ + (psys_gpcs->gpc[idx].route ++ << IPU_GPC_ROUTE_OFFSET) ++ + (psys_gpcs->gpc[idx].source ++ << IPU_GPC_SOURCE_OFFSET), ++ base + IPU_CDC_MMU_GPC_CNT_SEL0 + 4 * idx); ++ } ++ ++ /* Soft reset and Overall Enable. */ ++ writel(0x0, base + IPU_CDC_MMU_GPC_OVERALL_ENABLE); ++ writel(0xffff, base + IPU_CDC_MMU_GPC_SOFT_RESET); ++ writel(0x1, base + IPU_CDC_MMU_GPC_OVERALL_ENABLE); ++ ++ psys_gpcs->gpc_enable = true; ++ } ++ ++ mutex_unlock(&psys->mutex); ++ return 0; ++} ++ ++DEFINE_SIMPLE_ATTRIBUTE(psys_gpc_globe_enable_fops, ++ ipu6_psys_gpc_global_enable_get, ++ ipu6_psys_gpc_global_enable_set, "%llu\n"); ++ ++static int ipu6_psys_gpc_count_get(void *data, u64 *val) ++{ ++ struct ipu_psys_gpc *psys_gpc = data; ++ struct ipu_psys *psys = psys_gpc->prit; ++ void __iomem *base; ++ int res; ++ ++ if (!psys || !psys->pdata || !psys->pdata->base) ++ return -EINVAL; ++ ++ mutex_lock(&psys->mutex); ++ ++ base = psys->pdata->base + IPU_GPC_BASE; ++ ++ res = pm_runtime_get_sync(&psys->adev->dev); ++ if (res < 0) { ++ pm_runtime_put(&psys->adev->dev); ++ mutex_unlock(&psys->mutex); ++ return res; ++ } ++ ++ *val = readl(base + IPU_CDC_MMU_GPC_VALUE0 + 4 * psys_gpc->gpcindex); ++ ++ mutex_unlock(&psys->mutex); ++ return 0; ++} ++ ++DEFINE_SIMPLE_ATTRIBUTE(psys_gpc_count_fops, ++ ipu6_psys_gpc_count_get, ++ NULL, "%llu\n"); ++ ++int ipu_psys_gpc_init_debugfs(struct ipu_psys *psys) ++{ ++ struct dentry *gpcdir; ++ struct dentry *dir; ++ struct dentry *file; ++ int idx; ++ char gpcname[10]; ++ struct ipu_psys_gpcs *psys_gpcs; ++ ++ psys_gpcs = devm_kzalloc(&psys->dev, sizeof(*psys_gpcs), GFP_KERNEL); ++ if (!psys_gpcs) ++ return -ENOMEM; ++ ++ gpcdir = debugfs_create_dir("gpc", psys->debugfsdir); ++ if (IS_ERR(gpcdir)) ++ return -ENOMEM; ++ ++ psys_gpcs->prit = psys; ++ file = debugfs_create_file("enable", 0600, gpcdir, psys_gpcs, ++ &psys_gpc_globe_enable_fops); ++ if (IS_ERR(file)) ++ goto err; ++ ++ for (idx = 0; idx < IPU_PSYS_GPC_NUM; idx++) { ++ sprintf(gpcname, "gpc%d", idx); ++ dir = debugfs_create_dir(gpcname, gpcdir); ++ if (IS_ERR(dir)) ++ goto err; ++ ++ debugfs_create_bool("enable", 0600, dir, ++ &psys_gpcs->gpc[idx].enable); ++ ++ debugfs_create_u32("source", 0600, dir, ++ &psys_gpcs->gpc[idx].source); ++ ++ debugfs_create_u32("route", 0600, dir, ++ &psys_gpcs->gpc[idx].route); ++ ++ debugfs_create_u32("sense", 0600, dir, ++ &psys_gpcs->gpc[idx].sense); ++ ++ psys_gpcs->gpc[idx].gpcindex = idx; ++ psys_gpcs->gpc[idx].prit = psys; ++ file = debugfs_create_file("count", 0400, dir, ++ &psys_gpcs->gpc[idx], ++ &psys_gpc_count_fops); ++ if (IS_ERR(file)) ++ goto err; ++ } ++ ++ return 0; ++ ++err: ++ debugfs_remove_recursive(gpcdir); ++ return -ENOMEM; ++} ++#endif +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-psys.c b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-psys.c +new file mode 100644 +index 0000000..46c6bc8 +--- /dev/null ++++ b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-psys.c +@@ -0,0 +1,1056 @@ ++// SPDX-License-Identifier: GPL-2.0 ++// Copyright (C) 2020 - 2024 Intel Corporation ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "ipu6.h" ++#include "ipu6-dma.h" ++#include "ipu-psys.h" ++#include "ipu6-ppg.h" ++#include "ipu6-platform-regs.h" ++#include "ipu6-platform-buttress-regs.h" ++ ++MODULE_IMPORT_NS(DMA_BUF); ++ ++static bool early_pg_transfer; ++module_param(early_pg_transfer, bool, 0664); ++MODULE_PARM_DESC(early_pg_transfer, ++ "Copy PGs back to user after resource allocation"); ++ ++bool enable_power_gating = true; ++module_param(enable_power_gating, bool, 0664); ++MODULE_PARM_DESC(enable_power_gating, "enable power gating"); ++ ++static void ipu6_set_sp_info_bits(void __iomem *base) ++{ ++ int i; ++ ++ writel(IPU6_INFO_REQUEST_DESTINATION_IOSF, ++ base + IPU6_REG_PSYS_INFO_SEG_0_CONFIG_ICACHE_MASTER); ++ ++ for (i = 0; i < 4; i++) ++ writel(IPU6_INFO_REQUEST_DESTINATION_IOSF, ++ base + IPU_REG_PSYS_INFO_SEG_CMEM_MASTER(i)); ++ for (i = 0; i < 4; i++) ++ writel(IPU6_INFO_REQUEST_DESTINATION_IOSF, ++ base + IPU_REG_PSYS_INFO_SEG_XMEM_MASTER(i)); ++} ++ ++#define PSYS_SUBDOMAINS_STATUS_WAIT_COUNT 1000 ++void ipu_psys_subdomains_power(struct ipu_psys *psys, bool on) ++{ ++ struct device *dev = &psys->adev->auxdev.dev; ++ unsigned int i; ++ u32 val; ++ ++ /* power domain req */ ++ dev_dbg(dev, "power %s psys sub-domains", on ? "UP" : "DOWN"); ++ if (on) ++ writel(IPU_PSYS_SUBDOMAINS_POWER_MASK, ++ psys->adev->isp->base + IPU_PSYS_SUBDOMAINS_POWER_REQ); ++ else ++ writel(0x0, ++ psys->adev->isp->base + IPU_PSYS_SUBDOMAINS_POWER_REQ); ++ ++ i = 0; ++ do { ++ usleep_range(10, 20); ++ val = readl(psys->adev->isp->base + ++ IPU_PSYS_SUBDOMAINS_POWER_STATUS); ++ if (!(val & BIT(31))) { ++ dev_dbg(dev, "PS sub-domains req done with status 0x%x", ++ val); ++ break; ++ } ++ i++; ++ } while (i < PSYS_SUBDOMAINS_STATUS_WAIT_COUNT); ++ ++ if (i == PSYS_SUBDOMAINS_STATUS_WAIT_COUNT) ++ dev_warn(dev, "Psys sub-domains %s req timeout!", ++ on ? "UP" : "DOWN"); ++} ++ ++void ipu_psys_setup_hw(struct ipu_psys *psys) ++{ ++ void __iomem *base = psys->pdata->base; ++ void __iomem *spc_regs_base = ++ base + psys->pdata->ipdata->hw_variant.spc_offset; ++ void __iomem *psys_iommu0_ctrl; ++ u32 irqs; ++ const u8 r3 = IPU_DEVICE_AB_GROUP1_TARGET_ID_R3_SPC_STATUS_REG; ++ const u8 r4 = IPU_DEVICE_AB_GROUP1_TARGET_ID_R4_SPC_MASTER_BASE_ADDR; ++ const u8 r5 = IPU_DEVICE_AB_GROUP1_TARGET_ID_R5_SPC_PC_STALL; ++ ++ if (!psys->adev->isp->secure_mode) { ++ /* configure access blocker for non-secure mode */ ++ writel(NCI_AB_ACCESS_MODE_RW, ++ base + IPU_REG_DMA_TOP_AB_GROUP1_BASE_ADDR + ++ IPU_REG_DMA_TOP_AB_RING_ACCESS_OFFSET(r3)); ++ writel(NCI_AB_ACCESS_MODE_RW, ++ base + IPU_REG_DMA_TOP_AB_GROUP1_BASE_ADDR + ++ IPU_REG_DMA_TOP_AB_RING_ACCESS_OFFSET(r4)); ++ writel(NCI_AB_ACCESS_MODE_RW, ++ base + IPU_REG_DMA_TOP_AB_GROUP1_BASE_ADDR + ++ IPU_REG_DMA_TOP_AB_RING_ACCESS_OFFSET(r5)); ++ } ++ psys_iommu0_ctrl = base + ++ psys->pdata->ipdata->hw_variant.mmu_hw[0].offset + ++ IPU6_MMU_INFO_OFFSET; ++ writel(IPU_INFO_REQUEST_DESTINATION_IOSF, psys_iommu0_ctrl); ++ ++ ipu6_set_sp_info_bits(spc_regs_base + IPU_PSYS_REG_SPC_STATUS_CTRL); ++ ipu6_set_sp_info_bits(spc_regs_base + IPU_PSYS_REG_SPP0_STATUS_CTRL); ++ ++ /* Enable FW interrupt #0 */ ++ writel(0, base + IPU_REG_PSYS_GPDEV_FWIRQ(0)); ++ irqs = IPU6_PSYS_GPDEV_IRQ_FWIRQ(IPU_PSYS_GPDEV_FWIRQ0); ++ writel(irqs, base + IPU_REG_PSYS_GPDEV_IRQ_EDGE); ++ writel(irqs, base + IPU_REG_PSYS_GPDEV_IRQ_LEVEL_NOT_PULSE); ++ writel(0xffffffff, base + IPU_REG_PSYS_GPDEV_IRQ_CLEAR); ++ writel(irqs, base + IPU_REG_PSYS_GPDEV_IRQ_MASK); ++ writel(irqs, base + IPU_REG_PSYS_GPDEV_IRQ_ENABLE); ++} ++ ++static struct ipu_psys_ppg *ipu_psys_identify_kppg(struct ipu_psys_kcmd *kcmd) ++{ ++ struct ipu_psys_scheduler *sched = &kcmd->fh->sched; ++ struct ipu_psys_ppg *kppg, *tmp; ++ ++ mutex_lock(&kcmd->fh->mutex); ++ if (list_empty(&sched->ppgs)) ++ goto not_found; ++ ++ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { ++ if (ipu_fw_psys_pg_get_token(kcmd) ++ != kppg->token) ++ continue; ++ mutex_unlock(&kcmd->fh->mutex); ++ return kppg; ++ } ++ ++not_found: ++ mutex_unlock(&kcmd->fh->mutex); ++ return NULL; ++} ++ ++/* ++ * Called to free up all resources associated with a kcmd. ++ * After this the kcmd doesn't anymore exist in the driver. ++ */ ++static void ipu_psys_kcmd_free(struct ipu_psys_kcmd *kcmd) ++{ ++ struct ipu_psys_ppg *kppg; ++ struct ipu_psys_scheduler *sched; ++ ++ if (!kcmd) ++ return; ++ ++ kppg = ipu_psys_identify_kppg(kcmd); ++ sched = &kcmd->fh->sched; ++ ++ if (kcmd->kbuf_set) { ++ mutex_lock(&sched->bs_mutex); ++ kcmd->kbuf_set->buf_set_size = 0; ++ mutex_unlock(&sched->bs_mutex); ++ kcmd->kbuf_set = NULL; ++ } ++ ++ if (kppg) { ++ mutex_lock(&kppg->mutex); ++ if (!list_empty(&kcmd->list)) ++ list_del(&kcmd->list); ++ mutex_unlock(&kppg->mutex); ++ } ++ ++ kfree(kcmd->pg_manifest); ++ kfree(kcmd->kbufs); ++ kfree(kcmd->buffers); ++ kfree(kcmd); ++} ++ ++static struct ipu_psys_kcmd *ipu_psys_copy_cmd(struct ipu_psys_command *cmd, ++ struct ipu_psys_fh *fh) ++{ ++ struct ipu_psys *psys = fh->psys; ++ struct device *dev = &psys->adev->auxdev.dev; ++ struct ipu_psys_kcmd *kcmd; ++ struct ipu_psys_kbuffer *kpgbuf; ++ unsigned int i; ++ int ret, prevfd, fd; ++ ++ fd = -1; ++ prevfd = -1; ++ ++ if (cmd->bufcount > IPU_MAX_PSYS_CMD_BUFFERS) ++ return NULL; ++ ++ if (!cmd->pg_manifest_size) ++ return NULL; ++ ++ kcmd = kzalloc(sizeof(*kcmd), GFP_KERNEL); ++ if (!kcmd) ++ return NULL; ++ ++ kcmd->state = KCMD_STATE_PPG_NEW; ++ kcmd->fh = fh; ++ INIT_LIST_HEAD(&kcmd->list); ++ ++ mutex_lock(&fh->mutex); ++ fd = cmd->pg; ++ kpgbuf = ipu_psys_lookup_kbuffer(fh, fd); ++ if (!kpgbuf || !kpgbuf->sgt) { ++ dev_err(dev, "%s kbuf %p with fd %d not found.\n", ++ __func__, kpgbuf, fd); ++ mutex_unlock(&fh->mutex); ++ goto error; ++ } ++ ++ /* check and remap if possible */ ++ kpgbuf = ipu_psys_mapbuf_locked(fd, fh); ++ if (!kpgbuf || !kpgbuf->sgt) { ++ dev_err(dev, "%s remap failed\n", __func__); ++ mutex_unlock(&fh->mutex); ++ goto error; ++ } ++ mutex_unlock(&fh->mutex); ++ ++ kcmd->pg_user = kpgbuf->kaddr; ++ kcmd->kpg = __get_pg_buf(psys, kpgbuf->len); ++ if (!kcmd->kpg) ++ goto error; ++ ++ memcpy(kcmd->kpg->pg, kcmd->pg_user, kcmd->kpg->pg_size); ++ ++ kcmd->pg_manifest = kzalloc(cmd->pg_manifest_size, GFP_KERNEL); ++ if (!kcmd->pg_manifest) ++ goto error; ++ ++ ret = copy_from_user(kcmd->pg_manifest, cmd->pg_manifest, ++ cmd->pg_manifest_size); ++ if (ret) ++ goto error; ++ ++ kcmd->pg_manifest_size = cmd->pg_manifest_size; ++ ++ kcmd->user_token = cmd->user_token; ++ kcmd->issue_id = cmd->issue_id; ++ kcmd->priority = cmd->priority; ++ if (kcmd->priority >= IPU_PSYS_CMD_PRIORITY_NUM) ++ goto error; ++ ++ /* ++ * Kernel enable bitmap be used only. ++ */ ++ memcpy(kcmd->kernel_enable_bitmap, cmd->kernel_enable_bitmap, ++ sizeof(cmd->kernel_enable_bitmap)); ++ ++ kcmd->nbuffers = ipu_fw_psys_pg_get_terminal_count(kcmd); ++ kcmd->buffers = kcalloc(kcmd->nbuffers, sizeof(*kcmd->buffers), ++ GFP_KERNEL); ++ if (!kcmd->buffers) ++ goto error; ++ ++ kcmd->kbufs = kcalloc(kcmd->nbuffers, sizeof(kcmd->kbufs[0]), ++ GFP_KERNEL); ++ if (!kcmd->kbufs) ++ goto error; ++ ++ /* should be stop cmd for ppg */ ++ if (!cmd->buffers) { ++ kcmd->state = KCMD_STATE_PPG_STOP; ++ return kcmd; ++ } ++ ++ if (!cmd->bufcount || kcmd->nbuffers > cmd->bufcount) ++ goto error; ++ ++ ret = copy_from_user(kcmd->buffers, cmd->buffers, ++ kcmd->nbuffers * sizeof(*kcmd->buffers)); ++ if (ret) ++ goto error; ++ ++ for (i = 0; i < kcmd->nbuffers; i++) { ++ struct ipu_fw_psys_terminal *terminal; ++ ++ terminal = ipu_fw_psys_pg_get_terminal(kcmd, i); ++ if (!terminal) ++ continue; ++ ++ if (!(kcmd->buffers[i].flags & IPU_BUFFER_FLAG_DMA_HANDLE)) { ++ kcmd->state = KCMD_STATE_PPG_START; ++ continue; ++ } ++ if (kcmd->state == KCMD_STATE_PPG_START) { ++ dev_err(dev, "buffer.flags & DMA_HANDLE must be 0\n"); ++ goto error; ++ } ++ ++ mutex_lock(&fh->mutex); ++ fd = kcmd->buffers[i].base.fd; ++ kpgbuf = ipu_psys_lookup_kbuffer(fh, fd); ++ if (!kpgbuf || !kpgbuf->sgt) { ++ dev_err(dev, ++ "%s kcmd->buffers[%d] %p fd %d not found.\n", ++ __func__, i, kpgbuf, fd); ++ mutex_unlock(&fh->mutex); ++ goto error; ++ } ++ ++ kpgbuf = ipu_psys_mapbuf_locked(fd, fh); ++ if (!kpgbuf || !kpgbuf->sgt) { ++ dev_err(dev, "%s remap failed\n", ++ __func__); ++ mutex_unlock(&fh->mutex); ++ goto error; ++ } ++ ++ mutex_unlock(&fh->mutex); ++ kcmd->kbufs[i] = kpgbuf; ++ if (!kcmd->kbufs[i] || !kcmd->kbufs[i]->sgt || ++ kcmd->kbufs[i]->len < kcmd->buffers[i].bytes_used) ++ goto error; ++ ++ if ((kcmd->kbufs[i]->flags & IPU_BUFFER_FLAG_NO_FLUSH) || ++ (kcmd->buffers[i].flags & IPU_BUFFER_FLAG_NO_FLUSH) || ++ prevfd == kcmd->buffers[i].base.fd) ++ continue; ++ ++ prevfd = kcmd->buffers[i].base.fd; ++ ++ /* ++ * TODO: remove exported buffer sync here as the cache ++ * coherency should be done by the exporter ++ */ ++ if (kcmd->kbufs[i]->kaddr) ++ clflush_cache_range(kcmd->kbufs[i]->kaddr, ++ kcmd->kbufs[i]->len); ++ } ++ ++ if (kcmd->state != KCMD_STATE_PPG_START) ++ kcmd->state = KCMD_STATE_PPG_ENQUEUE; ++ ++ return kcmd; ++error: ++ ipu_psys_kcmd_free(kcmd); ++ ++ dev_dbg(dev, "failed to copy cmd\n"); ++ ++ return NULL; ++} ++ ++static struct ipu_psys_buffer_set * ++ipu_psys_lookup_kbuffer_set(struct ipu_psys *psys, u32 addr) ++{ ++ struct ipu_psys_fh *fh; ++ struct ipu_psys_buffer_set *kbuf_set; ++ struct ipu_psys_scheduler *sched; ++ ++ list_for_each_entry(fh, &psys->fhs, list) { ++ sched = &fh->sched; ++ mutex_lock(&sched->bs_mutex); ++ list_for_each_entry(kbuf_set, &sched->buf_sets, list) { ++ if (kbuf_set->buf_set && ++ kbuf_set->buf_set->ipu_virtual_address == addr) { ++ mutex_unlock(&sched->bs_mutex); ++ return kbuf_set; ++ } ++ } ++ mutex_unlock(&sched->bs_mutex); ++ } ++ ++ return NULL; ++} ++ ++static struct ipu_psys_ppg *ipu_psys_lookup_ppg(struct ipu_psys *psys, ++ dma_addr_t pg_addr) ++{ ++ struct ipu_psys_scheduler *sched; ++ struct ipu_psys_ppg *kppg, *tmp; ++ struct ipu_psys_fh *fh; ++ ++ list_for_each_entry(fh, &psys->fhs, list) { ++ sched = &fh->sched; ++ mutex_lock(&fh->mutex); ++ if (list_empty(&sched->ppgs)) { ++ mutex_unlock(&fh->mutex); ++ continue; ++ } ++ ++ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { ++ if (pg_addr != kppg->kpg->pg_dma_addr) ++ continue; ++ mutex_unlock(&fh->mutex); ++ return kppg; ++ } ++ mutex_unlock(&fh->mutex); ++ } ++ ++ return NULL; ++} ++ ++#define BUTTRESS_FREQ_CTL_QOS_FLOOR_SHIFT 8 ++static void ipu_buttress_set_psys_ratio(struct ipu6_device *isp, ++ unsigned int psys_divisor, ++ unsigned int psys_qos_floor) ++{ ++ struct ipu6_buttress_ctrl *ctrl = isp->psys->ctrl; ++ ++ mutex_lock(&isp->buttress.power_mutex); ++ ++ if (ctrl->ratio == psys_divisor && ctrl->qos_floor == psys_qos_floor) ++ goto out_mutex_unlock; ++ ++ ctrl->ratio = psys_divisor; ++ ctrl->qos_floor = psys_qos_floor; ++ ++ if (ctrl->started) { ++ /* ++ * According to documentation driver initiates DVFS ++ * transition by writing wanted ratio, floor ratio and start ++ * bit. No need to stop PS first ++ */ ++ writel(BUTTRESS_FREQ_CTL_START | ++ ctrl->qos_floor << BUTTRESS_FREQ_CTL_QOS_FLOOR_SHIFT | ++ psys_divisor, isp->base + BUTTRESS_REG_PS_FREQ_CTL); ++ } ++ ++out_mutex_unlock: ++ mutex_unlock(&isp->buttress.power_mutex); ++} ++ ++static void ipu_buttress_set_psys_freq(struct ipu6_device *isp, ++ unsigned int freq) ++{ ++ unsigned int psys_ratio = freq / BUTTRESS_PS_FREQ_STEP; ++ ++ dev_dbg(&isp->psys->auxdev.dev, "freq:%u\n", freq); ++ ++ ipu_buttress_set_psys_ratio(isp, psys_ratio, psys_ratio); ++} ++ ++static void ++ipu_buttress_add_psys_constraint(struct ipu6_device *isp, ++ struct ipu6_psys_constraint *constraint) ++{ ++ struct ipu6_buttress *b = &isp->buttress; ++ ++ mutex_lock(&b->cons_mutex); ++ list_add(&constraint->list, &b->constraints); ++ mutex_unlock(&b->cons_mutex); ++} ++ ++static void ++ipu_buttress_remove_psys_constraint(struct ipu6_device *isp, ++ struct ipu6_psys_constraint *constraint) ++{ ++ struct ipu6_buttress *b = &isp->buttress; ++ struct ipu6_psys_constraint *c; ++ unsigned int min_freq = 0; ++ ++ mutex_lock(&b->cons_mutex); ++ list_del(&constraint->list); ++ ++ list_for_each_entry(c, &b->constraints, list) ++ if (c->min_freq > min_freq) ++ min_freq = c->min_freq; ++ ++ ipu_buttress_set_psys_freq(isp, min_freq); ++ mutex_unlock(&b->cons_mutex); ++} ++ ++/* ++ * Move kcmd into completed state (due to running finished or failure). ++ * Fill up the event struct and notify waiters. ++ */ ++void ipu_psys_kcmd_complete(struct ipu_psys_ppg *kppg, ++ struct ipu_psys_kcmd *kcmd, int error) ++{ ++ struct ipu_psys_fh *fh = kcmd->fh; ++ struct ipu_psys *psys = fh->psys; ++ struct device *dev = &psys->adev->auxdev.dev; ++ ++ kcmd->ev.type = IPU_PSYS_EVENT_TYPE_CMD_COMPLETE; ++ kcmd->ev.user_token = kcmd->user_token; ++ kcmd->ev.issue_id = kcmd->issue_id; ++ kcmd->ev.error = error; ++ list_move_tail(&kcmd->list, &kppg->kcmds_finished_list); ++ ++ if (kcmd->constraint.min_freq) ++ ipu_buttress_remove_psys_constraint(psys->adev->isp, ++ &kcmd->constraint); ++ ++ if (!early_pg_transfer && kcmd->pg_user && kcmd->kpg->pg) { ++ struct ipu_psys_kbuffer *kbuf; ++ ++ kbuf = ipu_psys_lookup_kbuffer_by_kaddr(kcmd->fh, ++ kcmd->pg_user); ++ if (kbuf && kbuf->valid) ++ memcpy(kcmd->pg_user, ++ kcmd->kpg->pg, kcmd->kpg->pg_size); ++ else ++ dev_dbg(dev, "Skipping unmapped buffer\n"); ++ } ++ ++ kcmd->state = KCMD_STATE_PPG_COMPLETE; ++ wake_up_interruptible(&fh->wait); ++} ++ ++/* ++ * Submit kcmd into psys queue. If running fails, complete the kcmd ++ * with an error. ++ * ++ * Found a runnable PG. Move queue to the list tail for round-robin ++ * scheduling and run the PG. Start the watchdog timer if the PG was ++ * started successfully. Enable PSYS power if requested. ++ */ ++int ipu_psys_kcmd_start(struct ipu_psys *psys, struct ipu_psys_kcmd *kcmd) ++{ ++ struct device *dev = &psys->adev->auxdev.dev; ++ int ret; ++ ++ if (early_pg_transfer && kcmd->pg_user && kcmd->kpg->pg) ++ memcpy(kcmd->pg_user, kcmd->kpg->pg, kcmd->kpg->pg_size); ++ ++ ret = ipu_fw_psys_pg_start(kcmd); ++ if (ret) { ++ dev_err(dev, "failed to start kcmd!\n"); ++ return ret; ++ } ++ ++ ipu_fw_psys_pg_dump(psys, kcmd, "run"); ++ ++ ret = ipu_fw_psys_pg_disown(kcmd); ++ if (ret) { ++ dev_err(dev, "failed to start kcmd!\n"); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static int ipu_psys_kcmd_send_to_ppg_start(struct ipu_psys_kcmd *kcmd) ++{ ++ struct ipu_psys_fh *fh = kcmd->fh; ++ struct ipu_psys_scheduler *sched = &fh->sched; ++ struct ipu_psys *psys = fh->psys; ++ struct device *dev = &psys->adev->auxdev.dev; ++ struct ipu_psys_ppg *kppg; ++ struct ipu_psys_resource_pool *rpr; ++ int queue_id; ++ int ret; ++ ++ rpr = &psys->res_pool_running; ++ ++ kppg = kzalloc(sizeof(*kppg), GFP_KERNEL); ++ if (!kppg) ++ return -ENOMEM; ++ ++ kppg->fh = fh; ++ kppg->kpg = kcmd->kpg; ++ kppg->state = PPG_STATE_START; ++ kppg->pri_base = kcmd->priority; ++ kppg->pri_dynamic = 0; ++ INIT_LIST_HEAD(&kppg->list); ++ ++ mutex_init(&kppg->mutex); ++ INIT_LIST_HEAD(&kppg->kcmds_new_list); ++ INIT_LIST_HEAD(&kppg->kcmds_processing_list); ++ INIT_LIST_HEAD(&kppg->kcmds_finished_list); ++ INIT_LIST_HEAD(&kppg->sched_list); ++ ++ kppg->manifest = kzalloc(kcmd->pg_manifest_size, GFP_KERNEL); ++ if (!kppg->manifest) { ++ kfree(kppg); ++ return -ENOMEM; ++ } ++ memcpy(kppg->manifest, kcmd->pg_manifest, ++ kcmd->pg_manifest_size); ++ ++ queue_id = ipu_psys_allocate_cmd_queue_res(rpr); ++ if (queue_id == -ENOSPC) { ++ dev_err(dev, "no available queue\n"); ++ kfree(kppg->manifest); ++ kfree(kppg); ++ mutex_unlock(&psys->mutex); ++ return -ENOMEM; ++ } ++ ++ /* ++ * set token as start cmd will immediately be followed by a ++ * enqueue cmd so that kppg could be retrieved. ++ */ ++ kppg->token = (u64)kcmd->kpg; ++ ipu_fw_psys_pg_set_token(kcmd, kppg->token); ++ ipu_fw_psys_ppg_set_base_queue_id(kcmd, queue_id); ++ ret = ipu_fw_psys_pg_set_ipu_vaddress(kcmd, ++ kcmd->kpg->pg_dma_addr); ++ if (ret) { ++ ipu_psys_free_cmd_queue_res(rpr, queue_id); ++ ++ kfree(kppg->manifest); ++ kfree(kppg); ++ return -EIO; ++ } ++ memcpy(kcmd->pg_user, kcmd->kpg->pg, kcmd->kpg->pg_size); ++ ++ mutex_lock(&fh->mutex); ++ list_add_tail(&kppg->list, &sched->ppgs); ++ mutex_unlock(&fh->mutex); ++ ++ mutex_lock(&kppg->mutex); ++ list_add(&kcmd->list, &kppg->kcmds_new_list); ++ mutex_unlock(&kppg->mutex); ++ ++ dev_dbg(dev, "START ppg(%d, 0x%p) kcmd 0x%p, queue %d\n", ++ ipu_fw_psys_pg_get_id(kcmd), kppg, kcmd, queue_id); ++ ++ /* Kick l-scheduler thread */ ++ atomic_set(&psys->wakeup_count, 1); ++ wake_up_interruptible(&psys->sched_cmd_wq); ++ ++ return 0; ++} ++ ++static int ipu_psys_kcmd_send_to_ppg(struct ipu_psys_kcmd *kcmd) ++{ ++ struct ipu_psys_fh *fh = kcmd->fh; ++ struct ipu_psys *psys = fh->psys; ++ struct device *dev = &psys->adev->auxdev.dev; ++ struct ipu_psys_ppg *kppg; ++ struct ipu_psys_resource_pool *rpr; ++ unsigned long flags; ++ u8 id; ++ bool resche = true; ++ ++ rpr = &psys->res_pool_running; ++ if (kcmd->state == KCMD_STATE_PPG_START) ++ return ipu_psys_kcmd_send_to_ppg_start(kcmd); ++ ++ kppg = ipu_psys_identify_kppg(kcmd); ++ spin_lock_irqsave(&psys->pgs_lock, flags); ++ kcmd->kpg->pg_size = 0; ++ spin_unlock_irqrestore(&psys->pgs_lock, flags); ++ if (!kppg) { ++ dev_err(dev, "token not match\n"); ++ return -EINVAL; ++ } ++ ++ kcmd->kpg = kppg->kpg; ++ ++ dev_dbg(dev, "%s ppg(%d, 0x%p) kcmd %p\n", ++ (kcmd->state == KCMD_STATE_PPG_STOP) ? "STOP" : "ENQUEUE", ++ ipu_fw_psys_pg_get_id(kcmd), kppg, kcmd); ++ ++ if (kcmd->state == KCMD_STATE_PPG_STOP) { ++ mutex_lock(&kppg->mutex); ++ if (kppg->state == PPG_STATE_STOPPED) { ++ dev_dbg(dev, "kppg 0x%p stopped!\n", kppg); ++ id = ipu_fw_psys_ppg_get_base_queue_id(kcmd); ++ ipu_psys_free_cmd_queue_res(rpr, id); ++ ipu_psys_kcmd_complete(kppg, kcmd, 0); ++ pm_runtime_put(dev); ++ resche = false; ++ } else { ++ list_add(&kcmd->list, &kppg->kcmds_new_list); ++ } ++ mutex_unlock(&kppg->mutex); ++ } else { ++ int ret; ++ ++ ret = ipu_psys_ppg_get_bufset(kcmd, kppg); ++ if (ret) ++ return ret; ++ ++ mutex_lock(&kppg->mutex); ++ list_add_tail(&kcmd->list, &kppg->kcmds_new_list); ++ mutex_unlock(&kppg->mutex); ++ } ++ ++ if (resche) { ++ /* Kick l-scheduler thread */ ++ atomic_set(&psys->wakeup_count, 1); ++ wake_up_interruptible(&psys->sched_cmd_wq); ++ } ++ return 0; ++} ++ ++int ipu_psys_kcmd_new(struct ipu_psys_command *cmd, struct ipu_psys_fh *fh) ++{ ++ struct ipu_psys *psys = fh->psys; ++ struct device *dev = &psys->adev->auxdev.dev; ++ struct ipu_psys_kcmd *kcmd; ++ size_t pg_size; ++ int ret; ++ ++ kcmd = ipu_psys_copy_cmd(cmd, fh); ++ if (!kcmd) ++ return -EINVAL; ++ ++ pg_size = ipu_fw_psys_pg_get_size(kcmd); ++ if (pg_size > kcmd->kpg->pg_size) { ++ dev_dbg(dev, "pg size mismatch %lu %lu\n", pg_size, ++ kcmd->kpg->pg_size); ++ ret = -EINVAL; ++ goto error; ++ } ++ ++ if (ipu_fw_psys_pg_get_protocol(kcmd) != ++ IPU_FW_PSYS_PROCESS_GROUP_PROTOCOL_PPG) { ++ dev_err(dev, "No support legacy pg now\n"); ++ ret = -EINVAL; ++ goto error; ++ } ++ ++ if (cmd->min_psys_freq) { ++ kcmd->constraint.min_freq = cmd->min_psys_freq; ++ ipu_buttress_add_psys_constraint(psys->adev->isp, ++ &kcmd->constraint); ++ } ++ ++ ret = ipu_psys_kcmd_send_to_ppg(kcmd); ++ if (ret) ++ goto error; ++ ++ dev_dbg(dev, "IOC_QCMD: user_token:%llx issue_id:0x%llx pri:%d\n", ++ cmd->user_token, cmd->issue_id, cmd->priority); ++ ++ return 0; ++ ++error: ++ ipu_psys_kcmd_free(kcmd); ++ ++ return ret; ++} ++ ++static bool ipu_psys_kcmd_is_valid(struct ipu_psys *psys, ++ struct ipu_psys_kcmd *kcmd) ++{ ++ struct ipu_psys_fh *fh; ++ struct ipu_psys_kcmd *kcmd0; ++ struct ipu_psys_ppg *kppg, *tmp; ++ struct ipu_psys_scheduler *sched; ++ ++ list_for_each_entry(fh, &psys->fhs, list) { ++ sched = &fh->sched; ++ mutex_lock(&fh->mutex); ++ if (list_empty(&sched->ppgs)) { ++ mutex_unlock(&fh->mutex); ++ continue; ++ } ++ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { ++ mutex_lock(&kppg->mutex); ++ list_for_each_entry(kcmd0, ++ &kppg->kcmds_processing_list, ++ list) { ++ if (kcmd0 == kcmd) { ++ mutex_unlock(&kppg->mutex); ++ mutex_unlock(&fh->mutex); ++ return true; ++ } ++ } ++ mutex_unlock(&kppg->mutex); ++ } ++ mutex_unlock(&fh->mutex); ++ } ++ ++ return false; ++} ++ ++void ipu_psys_handle_events(struct ipu_psys *psys) ++{ ++ struct device *dev = &psys->adev->auxdev.dev; ++ struct ipu_psys_kcmd *kcmd; ++ struct ipu_fw_psys_event event; ++ struct ipu_psys_ppg *kppg; ++ bool error; ++ u32 hdl; ++ u16 cmd, status; ++ int res; ++ ++ do { ++ memset(&event, 0, sizeof(event)); ++ if (!ipu_fw_psys_rcv_event(psys, &event)) ++ break; ++ ++ if (!event.context_handle) ++ break; ++ ++ dev_dbg(dev, "ppg event: 0x%x, %d, status %d\n", ++ event.context_handle, event.command, event.status); ++ ++ error = false; ++ /* ++ * event.command == CMD_RUN shows this is fw processing frame ++ * done as pPG mode, and event.context_handle should be pointer ++ * of buffer set; so we make use of this pointer to lookup ++ * kbuffer_set and kcmd ++ */ ++ hdl = event.context_handle; ++ cmd = event.command; ++ status = event.status; ++ ++ kppg = NULL; ++ kcmd = NULL; ++ if (cmd == IPU_FW_PSYS_PROCESS_GROUP_CMD_RUN) { ++ struct ipu_psys_buffer_set *kbuf_set; ++ /* ++ * Need change ppg state when the 1st running is done ++ * (after PPG started/resumed) ++ */ ++ kbuf_set = ipu_psys_lookup_kbuffer_set(psys, hdl); ++ if (kbuf_set) ++ kcmd = kbuf_set->kcmd; ++ if (!kbuf_set || !kcmd) ++ error = true; ++ else ++ kppg = ipu_psys_identify_kppg(kcmd); ++ } else if (cmd == IPU_FW_PSYS_PROCESS_GROUP_CMD_STOP || ++ cmd == IPU_FW_PSYS_PROCESS_GROUP_CMD_SUSPEND || ++ cmd == IPU_FW_PSYS_PROCESS_GROUP_CMD_RESUME) { ++ /* ++ * STOP/SUSPEND/RESUME cmd event would run this branch; ++ * only stop cmd queued by user has stop_kcmd and need ++ * to notify user to dequeue. ++ */ ++ kppg = ipu_psys_lookup_ppg(psys, hdl); ++ if (kppg) { ++ mutex_lock(&kppg->mutex); ++ if (kppg->state == PPG_STATE_STOPPING) { ++ kcmd = ipu_psys_ppg_get_stop_kcmd(kppg); ++ if (!kcmd) ++ error = true; ++ } ++ mutex_unlock(&kppg->mutex); ++ } ++ } else { ++ dev_err(dev, "invalid event\n"); ++ continue; ++ } ++ ++ if (error || !kppg) { ++ dev_err(dev, "event error, command %d\n", cmd); ++ break; ++ } ++ ++ dev_dbg(dev, "event to kppg 0x%p, kcmd 0x%p\n", kppg, kcmd); ++ ++ ipu_psys_ppg_complete(psys, kppg); ++ ++ if (kcmd && ipu_psys_kcmd_is_valid(psys, kcmd)) { ++ res = (status == IPU_PSYS_EVENT_CMD_COMPLETE || ++ status == IPU_PSYS_EVENT_FRAGMENT_COMPLETE) ? ++ 0 : -EIO; ++ mutex_lock(&kppg->mutex); ++ ipu_psys_kcmd_complete(kppg, kcmd, res); ++ mutex_unlock(&kppg->mutex); ++ } ++ } while (1); ++} ++ ++int ipu_psys_fh_init(struct ipu_psys_fh *fh) ++{ ++ struct ipu_psys *psys = fh->psys; ++ struct ipu_psys_buffer_set *kbuf_set, *kbuf_set_tmp; ++ struct ipu_psys_scheduler *sched = &fh->sched; ++ int i; ++ ++ mutex_init(&sched->bs_mutex); ++ INIT_LIST_HEAD(&sched->buf_sets); ++ INIT_LIST_HEAD(&sched->ppgs); ++ ++ /* allocate and map memory for buf_sets */ ++ for (i = 0; i < IPU_PSYS_BUF_SET_POOL_SIZE; i++) { ++ kbuf_set = kzalloc(sizeof(*kbuf_set), GFP_KERNEL); ++ if (!kbuf_set) ++ goto out_free_buf_sets; ++ kbuf_set->kaddr = ipu6_dma_alloc(psys->adev, ++ IPU_PSYS_BUF_SET_MAX_SIZE, ++ &kbuf_set->dma_addr, ++ GFP_KERNEL, 0); ++ if (!kbuf_set->kaddr) { ++ kfree(kbuf_set); ++ goto out_free_buf_sets; ++ } ++ kbuf_set->size = IPU_PSYS_BUF_SET_MAX_SIZE; ++ list_add(&kbuf_set->list, &sched->buf_sets); ++ } ++ ++ return 0; ++ ++out_free_buf_sets: ++ list_for_each_entry_safe(kbuf_set, kbuf_set_tmp, ++ &sched->buf_sets, list) { ++ ipu6_dma_free(psys->adev, kbuf_set->size, kbuf_set->kaddr, ++ kbuf_set->dma_addr, 0); ++ list_del(&kbuf_set->list); ++ kfree(kbuf_set); ++ } ++ mutex_destroy(&sched->bs_mutex); ++ ++ return -ENOMEM; ++} ++ ++int ipu_psys_fh_deinit(struct ipu_psys_fh *fh) ++{ ++ struct ipu_psys *psys = fh->psys; ++ struct device *dev = &psys->adev->auxdev.dev; ++ struct ipu_psys_ppg *kppg, *kppg0; ++ struct ipu_psys_kcmd *kcmd, *kcmd0; ++ struct ipu_psys_buffer_set *kbuf_set, *kbuf_set0; ++ struct ipu_psys_scheduler *sched = &fh->sched; ++ struct ipu_psys_resource_pool *rpr; ++ struct ipu_psys_resource_alloc *alloc; ++ u8 id; ++ ++ mutex_lock(&fh->mutex); ++ if (!list_empty(&sched->ppgs)) { ++ list_for_each_entry_safe(kppg, kppg0, &sched->ppgs, list) { ++ unsigned long flags; ++ ++ mutex_lock(&kppg->mutex); ++ if (!(kppg->state & ++ (PPG_STATE_STOPPED | ++ PPG_STATE_STOPPING))) { ++ struct ipu_psys_kcmd tmp = { ++ .kpg = kppg->kpg, ++ }; ++ ++ rpr = &psys->res_pool_running; ++ alloc = &kppg->kpg->resource_alloc; ++ id = ipu_fw_psys_ppg_get_base_queue_id(&tmp); ++ ipu_psys_ppg_stop(kppg); ++ ipu_psys_free_resources(alloc, rpr); ++ ipu_psys_free_cmd_queue_res(rpr, id); ++ dev_dbg(dev, ++ "s_change:%s %p %d -> %d\n", ++ __func__, kppg, kppg->state, ++ PPG_STATE_STOPPED); ++ kppg->state = PPG_STATE_STOPPED; ++ if (psys->power_gating != PSYS_POWER_GATED) ++ pm_runtime_put(dev); ++ } ++ list_del(&kppg->list); ++ mutex_unlock(&kppg->mutex); ++ ++ list_for_each_entry_safe(kcmd, kcmd0, ++ &kppg->kcmds_new_list, list) { ++ kcmd->pg_user = NULL; ++ mutex_unlock(&fh->mutex); ++ ipu_psys_kcmd_free(kcmd); ++ mutex_lock(&fh->mutex); ++ } ++ ++ list_for_each_entry_safe(kcmd, kcmd0, ++ &kppg->kcmds_processing_list, ++ list) { ++ kcmd->pg_user = NULL; ++ mutex_unlock(&fh->mutex); ++ ipu_psys_kcmd_free(kcmd); ++ mutex_lock(&fh->mutex); ++ } ++ ++ list_for_each_entry_safe(kcmd, kcmd0, ++ &kppg->kcmds_finished_list, ++ list) { ++ kcmd->pg_user = NULL; ++ mutex_unlock(&fh->mutex); ++ ipu_psys_kcmd_free(kcmd); ++ mutex_lock(&fh->mutex); ++ } ++ ++ spin_lock_irqsave(&psys->pgs_lock, flags); ++ kppg->kpg->pg_size = 0; ++ spin_unlock_irqrestore(&psys->pgs_lock, flags); ++ ++ mutex_destroy(&kppg->mutex); ++ kfree(kppg->manifest); ++ kfree(kppg); ++ } ++ } ++ mutex_unlock(&fh->mutex); ++ ++ mutex_lock(&sched->bs_mutex); ++ list_for_each_entry_safe(kbuf_set, kbuf_set0, &sched->buf_sets, list) { ++ ipu6_dma_free(psys->adev, kbuf_set->size, kbuf_set->kaddr, ++ kbuf_set->dma_addr, 0); ++ list_del(&kbuf_set->list); ++ kfree(kbuf_set); ++ } ++ mutex_unlock(&sched->bs_mutex); ++ mutex_destroy(&sched->bs_mutex); ++ ++ return 0; ++} ++ ++struct ipu_psys_kcmd *ipu_get_completed_kcmd(struct ipu_psys_fh *fh) ++{ ++ struct ipu_psys_scheduler *sched = &fh->sched; ++ struct device *dev = &fh->psys->adev->auxdev.dev; ++ struct ipu_psys_kcmd *kcmd; ++ struct ipu_psys_ppg *kppg; ++ ++ mutex_lock(&fh->mutex); ++ if (list_empty(&sched->ppgs)) { ++ mutex_unlock(&fh->mutex); ++ return NULL; ++ } ++ ++ list_for_each_entry(kppg, &sched->ppgs, list) { ++ mutex_lock(&kppg->mutex); ++ if (list_empty(&kppg->kcmds_finished_list)) { ++ mutex_unlock(&kppg->mutex); ++ continue; ++ } ++ ++ kcmd = list_first_entry(&kppg->kcmds_finished_list, ++ struct ipu_psys_kcmd, list); ++ mutex_unlock(&fh->mutex); ++ mutex_unlock(&kppg->mutex); ++ dev_dbg(dev, "get completed kcmd 0x%p\n", kcmd); ++ return kcmd; ++ } ++ mutex_unlock(&fh->mutex); ++ ++ return NULL; ++} ++ ++long ipu_ioctl_dqevent(struct ipu_psys_event *event, ++ struct ipu_psys_fh *fh, unsigned int f_flags) ++{ ++ struct ipu_psys *psys = fh->psys; ++ struct device *dev = &psys->adev->auxdev.dev; ++ struct ipu_psys_kcmd *kcmd = NULL; ++ int rval; ++ ++ dev_dbg(dev, "IOC_DQEVENT\n"); ++ ++ if (!(f_flags & O_NONBLOCK)) { ++ rval = wait_event_interruptible(fh->wait, ++ (kcmd = ++ ipu_get_completed_kcmd(fh))); ++ if (rval == -ERESTARTSYS) ++ return rval; ++ } ++ ++ if (!kcmd) { ++ kcmd = ipu_get_completed_kcmd(fh); ++ if (!kcmd) ++ return -ENODATA; ++ } ++ ++ *event = kcmd->ev; ++ ipu_psys_kcmd_free(kcmd); ++ ++ return 0; ++} +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6ep-fw-resources.c b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6ep-fw-resources.c +new file mode 100644 +index 0000000..4a1a869 +--- /dev/null ++++ b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6ep-fw-resources.c +@@ -0,0 +1,393 @@ ++// SPDX-License-Identifier: GPL-2.0 ++// Copyright (C) 2020 - 2024 Intel Corporation ++ ++#include ++#include ++ ++#include "ipu-psys.h" ++#include "ipu-fw-psys.h" ++#include "ipu6-platform-resources.h" ++#include "ipu6ep-platform-resources.h" ++ ++/* resources table */ ++ ++/* ++ * Cell types by cell IDs ++ */ ++static const u8 ipu6ep_fw_psys_cell_types[IPU6EP_FW_PSYS_N_CELL_ID] = { ++ IPU6_FW_PSYS_SP_CTRL_TYPE_ID, ++ IPU6_FW_PSYS_VP_TYPE_ID, ++ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_OSA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_OSA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_OSA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, /* AF */ ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, /* X2B_MD */ ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, /* X2B_SVE_RGBIR */ ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, /* PAF */ ++ IPU6_FW_PSYS_GDC_TYPE_ID, ++ IPU6_FW_PSYS_TNR_TYPE_ID, ++}; ++ ++static const u16 ipu6ep_fw_num_dev_channels[IPU6_FW_PSYS_N_DEV_CHN_ID] = { ++ IPU6_FW_PSYS_DEV_CHN_DMA_EXT0_MAX_SIZE, ++ IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_READ_MAX_SIZE, ++ IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_MAX_SIZE, ++ IPU6_FW_PSYS_DEV_CHN_DMA_INTERNAL_MAX_SIZE, ++ IPU6_FW_PSYS_DEV_CHN_DMA_ISA_MAX_SIZE, ++}; ++ ++static const u16 ipu6ep_fw_psys_mem_size[IPU6_FW_PSYS_N_MEM_ID] = { ++ IPU6_FW_PSYS_VMEM0_MAX_SIZE, ++ IPU6_FW_PSYS_TRANSFER_VMEM0_MAX_SIZE, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_MAX_SIZE, ++ IPU6_FW_PSYS_LB_VMEM_MAX_SIZE, ++ IPU6_FW_PSYS_BAMEM0_MAX_SIZE, ++ IPU6_FW_PSYS_DMEM0_MAX_SIZE, ++ IPU6_FW_PSYS_DMEM1_MAX_SIZE, ++ IPU6_FW_PSYS_DMEM2_MAX_SIZE, ++ IPU6_FW_PSYS_DMEM3_MAX_SIZE, ++ IPU6_FW_PSYS_PMEM0_MAX_SIZE ++}; ++ ++static const u16 ipu6ep_fw_psys_dfms[IPU6_FW_PSYS_N_DEV_DFM_ID] = { ++ IPU6_FW_PSYS_DEV_DFM_BB_FULL_PORT_ID_MAX_SIZE, ++ IPU6_FW_PSYS_DEV_DFM_BB_EMPTY_PORT_ID_MAX_SIZE, ++ IPU6_FW_PSYS_DEV_DFM_ISL_FULL_PORT_ID_MAX_SIZE, ++ IPU6_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID_MAX_SIZE, ++ IPU6_FW_PSYS_DEV_DFM_LB_FULL_PORT_ID_MAX_SIZE, ++ IPU6_FW_PSYS_DEV_DFM_LB_EMPTY_PORT_ID_MAX_SIZE, ++}; ++ ++static const u8 ++ipu6ep_fw_psys_c_mem[IPU6EP_FW_PSYS_N_CELL_ID][IPU6_FW_PSYS_N_MEM_TYPE_ID] = { ++ { ++ /* IPU6_FW_PSYS_SP0_ID */ ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_DMEM0_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_SP1_ID */ ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_DMEM1_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_VP0_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_DMEM3_ID, ++ IPU6_FW_PSYS_VMEM0_ID, ++ IPU6_FW_PSYS_BAMEM0_ID, ++ IPU6_FW_PSYS_PMEM0_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ACC1_ID BNLM */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ACC2_ID DM */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ACC3_ID ACM */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ACC4_ID GTC YUV1 */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ACC5_ID OFS pin main */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ACC6_ID OFS pin display */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ACC7_ID OFS pin pp */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ACC8_ID GAMMASTAR */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ACC9_ID GLTM */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ACC10_ID XNR */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_ICA_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_LSC_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_DPC_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_SIS_A_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_SIS_B_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_B2B_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_B2R_ID and ISA_R2I_SIE */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_R2I_DS_A_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_AWB_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_AE_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_AF_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_X2B_MD_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_X2B_SVE_RGBIR_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_PAF_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_BB_ACC_GDC0_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_BB_ACC_TNR_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ } ++}; ++ ++static const struct ipu_fw_resource_definitions ipu6ep_defs = { ++ .cells = ipu6ep_fw_psys_cell_types, ++ .num_cells = IPU6EP_FW_PSYS_N_CELL_ID, ++ .num_cells_type = IPU6_FW_PSYS_N_CELL_TYPE_ID, ++ ++ .dev_channels = ipu6ep_fw_num_dev_channels, ++ .num_dev_channels = IPU6_FW_PSYS_N_DEV_CHN_ID, ++ ++ .num_ext_mem_types = IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID, ++ .num_ext_mem_ids = IPU6_FW_PSYS_N_MEM_ID, ++ .ext_mem_ids = ipu6ep_fw_psys_mem_size, ++ ++ .num_dfm_ids = IPU6_FW_PSYS_N_DEV_DFM_ID, ++ ++ .dfms = ipu6ep_fw_psys_dfms, ++ ++ .cell_mem_row = IPU6_FW_PSYS_N_MEM_TYPE_ID, ++ .cell_mem = &ipu6ep_fw_psys_c_mem[0][0], ++}; ++ ++const struct ipu_fw_resource_definitions *ipu6ep_res_defs = &ipu6ep_defs; +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6ep-platform-resources.h b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6ep-platform-resources.h +new file mode 100644 +index 0000000..8420149 +--- /dev/null ++++ b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6ep-platform-resources.h +@@ -0,0 +1,42 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* Copyright (C) 2020 - 2024 Intel Corporation */ ++ ++#ifndef IPU6EP_PLATFORM_RESOURCES_H ++#define IPU6EP_PLATFORM_RESOURCES_H ++ ++#include ++#include ++ ++enum { ++ IPU6EP_FW_PSYS_SP0_ID = 0, ++ IPU6EP_FW_PSYS_VP0_ID, ++ IPU6EP_FW_PSYS_PSA_ACC_BNLM_ID, ++ IPU6EP_FW_PSYS_PSA_ACC_DM_ID, ++ IPU6EP_FW_PSYS_PSA_ACC_ACM_ID, ++ IPU6EP_FW_PSYS_PSA_ACC_GTC_YUV1_ID, ++ IPU6EP_FW_PSYS_BB_ACC_OFS_PIN_MAIN_ID, ++ IPU6EP_FW_PSYS_BB_ACC_OFS_PIN_DISPLAY_ID, ++ IPU6EP_FW_PSYS_BB_ACC_OFS_PIN_PP_ID, ++ IPU6EP_FW_PSYS_PSA_ACC_GAMMASTAR_ID, ++ IPU6EP_FW_PSYS_PSA_ACC_GLTM_ID, ++ IPU6EP_FW_PSYS_PSA_ACC_XNR_ID, ++ IPU6EP_FW_PSYS_PSA_VCSC_ID, /* VCSC */ ++ IPU6EP_FW_PSYS_ISA_ICA_ID, ++ IPU6EP_FW_PSYS_ISA_LSC_ID, ++ IPU6EP_FW_PSYS_ISA_DPC_ID, ++ IPU6EP_FW_PSYS_ISA_SIS_A_ID, ++ IPU6EP_FW_PSYS_ISA_SIS_B_ID, ++ IPU6EP_FW_PSYS_ISA_B2B_ID, ++ IPU6EP_FW_PSYS_ISA_B2R_R2I_SIE_ID, ++ IPU6EP_FW_PSYS_ISA_R2I_DS_A_ID, ++ IPU6EP_FW_PSYS_ISA_AWB_ID, ++ IPU6EP_FW_PSYS_ISA_AE_ID, ++ IPU6EP_FW_PSYS_ISA_AF_ID, ++ IPU6EP_FW_PSYS_ISA_X2B_MD_ID, ++ IPU6EP_FW_PSYS_ISA_X2B_SVE_RGBIR_ID, ++ IPU6EP_FW_PSYS_ISA_PAF_ID, ++ IPU6EP_FW_PSYS_BB_ACC_GDC0_ID, ++ IPU6EP_FW_PSYS_BB_ACC_TNR_ID, ++ IPU6EP_FW_PSYS_N_CELL_ID ++}; ++#endif /* IPU6EP_PLATFORM_RESOURCES_H */ +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6se-fw-resources.c b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6se-fw-resources.c +new file mode 100644 +index 0000000..41b6ba6 +--- /dev/null ++++ b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6se-fw-resources.c +@@ -0,0 +1,194 @@ ++// SPDX-License-Identifier: GPL-2.0 ++// Copyright (C) 2015 - 2024 Intel Corporation ++ ++#include ++#include ++ ++#include "ipu-psys.h" ++#include "ipu-fw-psys.h" ++#include "ipu6se-platform-resources.h" ++ ++/* resources table */ ++ ++/* ++ * Cell types by cell IDs ++ */ ++/* resources table */ ++ ++/* ++ * Cell types by cell IDs ++ */ ++static const u8 ipu6se_fw_psys_cell_types[IPU6SE_FW_PSYS_N_CELL_ID] = { ++ IPU6SE_FW_PSYS_SP_CTRL_TYPE_ID, ++ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_ICA_ID */ ++ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_LSC_ID */ ++ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_DPC_ID */ ++ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_B2B_ID */ ++ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_BNLM_ID */ ++ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_DM_ID */ ++ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_R2I_SIE_ID */ ++ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_R2I_DS_A_ID */ ++ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_R2I_DS_B_ID */ ++ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_AWB_ID */ ++ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_AE_ID */ ++ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_AF_ID*/ ++ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID /* PAF */ ++}; ++ ++static const u16 ipu6se_fw_num_dev_channels[IPU6SE_FW_PSYS_N_DEV_CHN_ID] = { ++ IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT0_MAX_SIZE, ++ IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT1_READ_MAX_SIZE, ++ IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_MAX_SIZE, ++ IPU6SE_FW_PSYS_DEV_CHN_DMA_ISA_MAX_SIZE, ++}; ++ ++static const u16 ipu6se_fw_psys_mem_size[IPU6SE_FW_PSYS_N_MEM_ID] = { ++ IPU6SE_FW_PSYS_TRANSFER_VMEM0_MAX_SIZE, ++ IPU6SE_FW_PSYS_LB_VMEM_MAX_SIZE, ++ IPU6SE_FW_PSYS_DMEM0_MAX_SIZE, ++ IPU6SE_FW_PSYS_DMEM1_MAX_SIZE ++}; ++ ++static const u16 ipu6se_fw_psys_dfms[IPU6SE_FW_PSYS_N_DEV_DFM_ID] = { ++ IPU6SE_FW_PSYS_DEV_DFM_ISL_FULL_PORT_ID_MAX_SIZE, ++ IPU6SE_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID_MAX_SIZE ++}; ++ ++static const u8 ++ipu6se_fw_psys_c_mem[IPU6SE_FW_PSYS_N_CELL_ID][IPU6SE_FW_PSYS_N_MEM_TYPE_ID] = { ++ { /* IPU6SE_FW_PSYS_SP0_ID */ ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_DMEM0_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ }, ++ { /* IPU6SE_FW_PSYS_ISA_ICA_ID */ ++ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6SE_FW_PSYS_LB_VMEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ }, ++ { /* IPU6SE_FW_PSYS_ISA_LSC_ID */ ++ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6SE_FW_PSYS_LB_VMEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ }, ++ { /* IPU6SE_FW_PSYS_ISA_DPC_ID */ ++ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6SE_FW_PSYS_LB_VMEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ }, ++ { /* IPU6SE_FW_PSYS_ISA_B2B_ID */ ++ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6SE_FW_PSYS_LB_VMEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ }, ++ ++ { /* IPU6SE_FW_PSYS_ISA_BNLM_ID */ ++ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6SE_FW_PSYS_LB_VMEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ }, ++ { /* IPU6SE_FW_PSYS_ISA_DM_ID */ ++ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6SE_FW_PSYS_LB_VMEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ }, ++ { /* IPU6SE_FW_PSYS_ISA_R2I_SIE_ID */ ++ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6SE_FW_PSYS_LB_VMEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ }, ++ { /* IPU6SE_FW_PSYS_ISA_R2I_DS_A_ID */ ++ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6SE_FW_PSYS_LB_VMEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ }, ++ { /* IPU6SE_FW_PSYS_ISA_R2I_DS_B_ID */ ++ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6SE_FW_PSYS_LB_VMEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ }, ++ { /* IPU6SE_FW_PSYS_ISA_AWB_ID */ ++ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6SE_FW_PSYS_LB_VMEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ }, ++ { /* IPU6SE_FW_PSYS_ISA_AE_ID */ ++ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6SE_FW_PSYS_LB_VMEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ }, ++ { /* IPU6SE_FW_PSYS_ISA_AF_ID */ ++ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6SE_FW_PSYS_LB_VMEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ }, ++ { /* IPU6SE_FW_PSYS_ISA_PAF_ID */ ++ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6SE_FW_PSYS_LB_VMEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ } ++}; ++ ++static const struct ipu_fw_resource_definitions ipu6se_defs = { ++ .cells = ipu6se_fw_psys_cell_types, ++ .num_cells = IPU6SE_FW_PSYS_N_CELL_ID, ++ .num_cells_type = IPU6SE_FW_PSYS_N_CELL_TYPE_ID, ++ ++ .dev_channels = ipu6se_fw_num_dev_channels, ++ .num_dev_channels = IPU6SE_FW_PSYS_N_DEV_CHN_ID, ++ ++ .num_ext_mem_types = IPU6SE_FW_PSYS_N_DATA_MEM_TYPE_ID, ++ .num_ext_mem_ids = IPU6SE_FW_PSYS_N_MEM_ID, ++ .ext_mem_ids = ipu6se_fw_psys_mem_size, ++ ++ .num_dfm_ids = IPU6SE_FW_PSYS_N_DEV_DFM_ID, ++ ++ .dfms = ipu6se_fw_psys_dfms, ++ ++ .cell_mem_row = IPU6SE_FW_PSYS_N_MEM_TYPE_ID, ++ .cell_mem = &ipu6se_fw_psys_c_mem[0][0], ++}; ++ ++const struct ipu_fw_resource_definitions *ipu6se_res_defs = &ipu6se_defs; +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6se-platform-resources.h b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6se-platform-resources.h +new file mode 100644 +index 0000000..81c618d +--- /dev/null ++++ b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6se-platform-resources.h +@@ -0,0 +1,103 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* Copyright (C) 2018 - 2024 Intel Corporation */ ++ ++#ifndef IPU6SE_PLATFORM_RESOURCES_H ++#define IPU6SE_PLATFORM_RESOURCES_H ++ ++#include ++#include ++#include "ipu-platform-resources.h" ++ ++#define IPU6SE_FW_PSYS_N_PADDING_UINT8_IN_PROCESS_EXT_STRUCT 1 ++ ++enum { ++ IPU6SE_FW_PSYS_CMD_QUEUE_COMMAND_ID = 0, ++ IPU6SE_FW_PSYS_CMD_QUEUE_DEVICE_ID, ++ IPU6SE_FW_PSYS_CMD_QUEUE_PPG0_COMMAND_ID, ++ IPU6SE_FW_PSYS_CMD_QUEUE_PPG1_COMMAND_ID, ++ IPU6SE_FW_PSYS_CMD_QUEUE_PPG2_COMMAND_ID, ++ IPU6SE_FW_PSYS_CMD_QUEUE_PPG3_COMMAND_ID, ++ IPU6SE_FW_PSYS_CMD_QUEUE_PPG4_COMMAND_ID, ++ IPU6SE_FW_PSYS_CMD_QUEUE_PPG5_COMMAND_ID, ++ IPU6SE_FW_PSYS_N_PSYS_CMD_QUEUE_ID ++}; ++ ++enum { ++ IPU6SE_FW_PSYS_TRANSFER_VMEM0_TYPE_ID = 0, ++ IPU6SE_FW_PSYS_LB_VMEM_TYPE_ID, ++ IPU6SE_FW_PSYS_DMEM_TYPE_ID, ++ IPU6SE_FW_PSYS_VMEM_TYPE_ID, ++ IPU6SE_FW_PSYS_BAMEM_TYPE_ID, ++ IPU6SE_FW_PSYS_PMEM_TYPE_ID, ++ IPU6SE_FW_PSYS_N_MEM_TYPE_ID ++}; ++ ++enum ipu6se_mem_id { ++ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID = 0, /* TRANSFER VMEM 0 */ ++ IPU6SE_FW_PSYS_LB_VMEM_ID, /* LB VMEM */ ++ IPU6SE_FW_PSYS_DMEM0_ID, /* SPC0 Dmem */ ++ IPU6SE_FW_PSYS_DMEM1_ID, /* SPP0 Dmem */ ++ IPU6SE_FW_PSYS_N_MEM_ID ++}; ++ ++enum { ++ IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT0_ID = 0, ++ IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT1_READ_ID, ++ IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_ID, ++ IPU6SE_FW_PSYS_DEV_CHN_DMA_ISA_ID, ++ IPU6SE_FW_PSYS_N_DEV_CHN_ID ++}; ++ ++enum { ++ IPU6SE_FW_PSYS_SP_CTRL_TYPE_ID = 0, ++ IPU6SE_FW_PSYS_SP_SERVER_TYPE_ID, ++ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, ++ IPU6SE_FW_PSYS_N_CELL_TYPE_ID ++}; ++ ++enum { ++ IPU6SE_FW_PSYS_SP0_ID = 0, ++ IPU6SE_FW_PSYS_ISA_ICA_ID, ++ IPU6SE_FW_PSYS_ISA_LSC_ID, ++ IPU6SE_FW_PSYS_ISA_DPC_ID, ++ IPU6SE_FW_PSYS_ISA_B2B_ID, ++ IPU6SE_FW_PSYS_ISA_BNLM_ID, ++ IPU6SE_FW_PSYS_ISA_DM_ID, ++ IPU6SE_FW_PSYS_ISA_R2I_SIE_ID, ++ IPU6SE_FW_PSYS_ISA_R2I_DS_A_ID, ++ IPU6SE_FW_PSYS_ISA_R2I_DS_B_ID, ++ IPU6SE_FW_PSYS_ISA_AWB_ID, ++ IPU6SE_FW_PSYS_ISA_AE_ID, ++ IPU6SE_FW_PSYS_ISA_AF_ID, ++ IPU6SE_FW_PSYS_ISA_PAF_ID, ++ IPU6SE_FW_PSYS_N_CELL_ID ++}; ++ ++enum { ++ IPU6SE_FW_PSYS_DEV_DFM_ISL_FULL_PORT_ID = 0, ++ IPU6SE_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID, ++}; ++ ++/* Excluding PMEM */ ++#define IPU6SE_FW_PSYS_N_DATA_MEM_TYPE_ID (IPU6SE_FW_PSYS_N_MEM_TYPE_ID - 1) ++#define IPU6SE_FW_PSYS_N_DEV_DFM_ID \ ++ (IPU6SE_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID + 1) ++#define IPU6SE_FW_PSYS_VMEM0_MAX_SIZE 0x0800 ++/* Transfer VMEM0 words, ref HAS Transfer*/ ++#define IPU6SE_FW_PSYS_TRANSFER_VMEM0_MAX_SIZE 0x0800 ++#define IPU6SE_FW_PSYS_LB_VMEM_MAX_SIZE 0x0400 /* LB VMEM words */ ++#define IPU6SE_FW_PSYS_DMEM0_MAX_SIZE 0x4000 ++#define IPU6SE_FW_PSYS_DMEM1_MAX_SIZE 0x1000 ++ ++#define IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT0_MAX_SIZE 22 ++#define IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT1_READ_MAX_SIZE 22 ++#define IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_MAX_SIZE 22 ++#define IPU6SE_FW_PSYS_DEV_CHN_DMA_IPFD_MAX_SIZE 0 ++#define IPU6SE_FW_PSYS_DEV_CHN_DMA_ISA_MAX_SIZE 2 ++ ++#define IPU6SE_FW_PSYS_DEV_DFM_ISL_FULL_PORT_ID_MAX_SIZE 32 ++#define IPU6SE_FW_PSYS_DEV_DFM_LB_FULL_PORT_ID_MAX_SIZE 32 ++#define IPU6SE_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID_MAX_SIZE 32 ++#define IPU6SE_FW_PSYS_DEV_DFM_LB_EMPTY_PORT_ID_MAX_SIZE 32 ++ ++#endif /* IPU6SE_PLATFORM_RESOURCES_H */ +-- +2.43.0 + diff --git a/patches/0022-media-intel-ipu6-support-IPU6-PSYS-FW-trace-dump-for.patch b/patches/0022-media-intel-ipu6-support-IPU6-PSYS-FW-trace-dump-for.patch new file mode 100644 index 0000000..4b628f0 --- /dev/null +++ b/patches/0022-media-intel-ipu6-support-IPU6-PSYS-FW-trace-dump-for.patch @@ -0,0 +1,134 @@ +From 6521b164624d49c5e397ed3ec0759aab103c7570 Mon Sep 17 00:00:00 2001 +From: hepengpx +Date: Mon, 12 Jan 2026 08:12:25 -0700 +Subject: [PATCH 22/28] media: intel-ipu6: support IPU6 PSYS FW trace dump for + upstream driver + +[media][pci]Support IPU6 PSYS FW trace dump for upstream driver +Signed-off-by: hepengpx +--- + .../drivers/media/pci/intel/ipu6/ipu6-trace.c | 6 +++ + .../drivers/media/pci/intel/ipu6/ipu6-trace.h | 1 + + .../media/pci/intel/ipu6/psys/ipu-psys.c | 44 ++++++++++++++++++- + 3 files changed, 50 insertions(+), 1 deletion(-) + +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-trace.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-trace.c +index adcee20..077f140 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-trace.c ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-trace.c +@@ -891,6 +891,12 @@ int ipu_trace_buffer_dma_handle(struct device *dev, dma_addr_t *dma_handle) + } + EXPORT_SYMBOL_GPL(ipu_trace_buffer_dma_handle); + ++bool is_ipu_trace_enable(void) ++{ ++ return ipu_trace_enable; ++} ++EXPORT_SYMBOL_GPL(is_ipu_trace_enable); ++ + MODULE_AUTHOR("Samu Onkalo "); + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("Intel ipu trace support"); +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-trace.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-trace.h +index f66d889..fe89d1b 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-trace.h ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-trace.h +@@ -144,4 +144,5 @@ void ipu_trace_restore(struct device *dev); + void ipu_trace_uninit(struct device *dev); + void ipu_trace_stop(struct device *dev); + int ipu_trace_buffer_dma_handle(struct device *dev, dma_addr_t *dma_handle); ++bool is_ipu_trace_enable(void); + #endif +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.c b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.c +index 9f36749..e8638a9 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.c ++++ b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.c +@@ -31,6 +31,7 @@ + #include "ipu-psys.h" + #include "ipu6-platform-regs.h" + #include "ipu6-fw-com.h" ++#include "ipu6-trace.h" + + static bool async_fw_init; + module_param(async_fw_init, bool, 0664); +@@ -1201,6 +1202,8 @@ static int ipu_psys_sched_cmd(void *ptr) + return 0; + } + ++#include "../ipu6-trace.h" ++ + static void start_sp(struct ipu6_bus_device *adev) + { + struct ipu_psys *psys = ipu6_bus_get_drvdata(adev); +@@ -1211,7 +1214,7 @@ static void start_sp(struct ipu6_bus_device *adev) + val |= IPU6_PSYS_SPC_STATUS_START | + IPU6_PSYS_SPC_STATUS_RUN | + IPU6_PSYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE; +- val |= psys->icache_prefetch_sp ? ++ val |= (psys->icache_prefetch_sp || is_ipu_trace_enable()) ? + IPU6_PSYS_SPC_STATUS_ICACHE_PREFETCH : 0; + writel(val, spc_regs_base + IPU6_PSYS_REG_SPC_STATUS_CTRL); + } +@@ -1304,6 +1307,40 @@ static void run_fw_init_work(struct work_struct *work) + } + } + ++struct ipu_trace_block psys_trace_blocks[] = { ++ { ++ .offset = IPU_TRACE_REG_PS_TRACE_UNIT_BASE, ++ .type = IPU_TRACE_BLOCK_TUN, ++ }, ++ { ++ .offset = IPU_TRACE_REG_PS_SPC_EVQ_BASE, ++ .type = IPU_TRACE_BLOCK_TM, ++ }, ++ { ++ .offset = IPU_TRACE_REG_PS_SPP0_EVQ_BASE, ++ .type = IPU_TRACE_BLOCK_TM, ++ }, ++ { ++ .offset = IPU_TRACE_REG_PS_SPC_GPC_BASE, ++ .type = IPU_TRACE_BLOCK_GPC, ++ }, ++ { ++ .offset = IPU_TRACE_REG_PS_SPP0_GPC_BASE, ++ .type = IPU_TRACE_BLOCK_GPC, ++ }, ++ { ++ .offset = IPU_TRACE_REG_PS_MMU_GPC_BASE, ++ .type = IPU_TRACE_BLOCK_GPC, ++ }, ++ { ++ .offset = IPU_TRACE_REG_PS_GPREG_TRACE_TIMER_RST_N, ++ .type = IPU_TRACE_TIMER_RST, ++ }, ++ { ++ .type = IPU_TRACE_BLOCK_END, ++ } ++}; ++ + static int ipu6_psys_probe(struct auxiliary_device *auxdev, + const struct auxiliary_device_id *auxdev_id) + { +@@ -1442,6 +1479,9 @@ static int ipu6_psys_probe(struct auxiliary_device *auxdev, + strscpy(psys->caps.dev_model, IPU6_MEDIA_DEV_MODEL_NAME, + sizeof(psys->caps.dev_model)); + ++ ipu_trace_init(adev->isp, psys->pdata->base, &auxdev->dev, ++ psys_trace_blocks); ++ + mutex_unlock(&ipu_psys_mutex); + + dev_info(dev, "psys probe minor: %d\n", minor); +@@ -1503,6 +1543,8 @@ static void ipu6_psys_remove(struct auxiliary_device *auxdev) + + clear_bit(MINOR(psys->cdev.dev), ipu_psys_devices); + ++ ipu_trace_uninit(&auxdev->dev); ++ + mutex_unlock(&ipu_psys_mutex); + + mutex_destroy(&psys->mutex); +-- +2.43.0 + diff --git a/patches/0023-ipu7-dkms-v4l2-core-makefile-adaptation-6.12.patch b/patches/0023-ipu7-dkms-v4l2-core-makefile-adaptation-6.12.patch new file mode 100644 index 0000000..04a048c --- /dev/null +++ b/patches/0023-ipu7-dkms-v4l2-core-makefile-adaptation-6.12.patch @@ -0,0 +1,37 @@ +From 8fe1a4467308a5bc10777ecbc95f5b660f566763 Mon Sep 17 00:00:00 2001 +From: florent pirou +Date: Mon, 17 Nov 2025 07:44:33 -0700 +Subject: [PATCH 23/28] ipu7 dkms : v4l2-core makefile adaptation 6.12 + +Signed-off-by: florent pirou +--- + 6.12.0/drivers/media/v4l2-core/Makefile | 3 +++ + 6.12.0/drivers/media/v4l2-core/v4l2-dev.c | 1 + + 2 files changed, 4 insertions(+) + +diff --git a/6.12.0/drivers/media/v4l2-core/Makefile b/6.12.0/drivers/media/v4l2-core/Makefile +index 2177b9d..82c235f 100644 +--- a/6.12.0/drivers/media/v4l2-core/Makefile ++++ b/6.12.0/drivers/media/v4l2-core/Makefile +@@ -2,6 +2,9 @@ + # + # Makefile for the V4L2 core + # ++# ipu7-dkms appendix ++CC := ${CC} -I ${M}/../../../include-overrides -I ${M}/../tuners -I ${M}/../dvb-core -I ${M}/../dvb-frontends ++ccflags-y := -DDRIVER_VERSION_SUFFIX=\"${DRIVER_VERSION_SUFFIX}\" + + ccflags-y += -I$(srctree)/drivers/media/dvb-frontends + ccflags-y += -I$(srctree)/drivers/media/tuners +diff --git a/6.12.0/drivers/media/v4l2-core/v4l2-dev.c b/6.12.0/drivers/media/v4l2-core/v4l2-dev.c +index 3d7711c..7b3e462 100644 +--- a/6.12.0/drivers/media/v4l2-core/v4l2-dev.c ++++ b/6.12.0/drivers/media/v4l2-core/v4l2-dev.c +@@ -1231,3 +1231,4 @@ MODULE_AUTHOR("Alan Cox, Mauro Carvalho Chehab , Bill Dirks, + MODULE_DESCRIPTION("Video4Linux2 core driver"); + MODULE_LICENSE("GPL"); + MODULE_ALIAS_CHARDEV_MAJOR(VIDEO_MAJOR); ++MODULE_VERSION(DRIVER_VERSION_SUFFIX); +-- +2.43.0 + diff --git a/patches/0024-drivers-media-set-v4l2_subdev_enable_streams_api-tru.patch b/patches/0024-drivers-media-set-v4l2_subdev_enable_streams_api-tru.patch new file mode 100644 index 0000000..b4c8be3 --- /dev/null +++ b/patches/0024-drivers-media-set-v4l2_subdev_enable_streams_api-tru.patch @@ -0,0 +1,28 @@ +From 2e8f579bd4a1984f0ba6b0e7a259558ba0de5d09 Mon Sep 17 00:00:00 2001 +From: florent pirou +Date: Mon, 17 Nov 2025 07:45:23 -0700 +Subject: [PATCH 24/28] drivers: media: set v4l2_subdev_enable_streams_api=true + for WA 6.12 + +Signed-off-by: hepengpx +Signed-off-by: florent pirou +--- + 6.12.0/drivers/media/v4l2-core/v4l2-subdev.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/6.12.0/drivers/media/v4l2-core/v4l2-subdev.c b/6.12.0/drivers/media/v4l2-core/v4l2-subdev.c +index 3a4ba08..f535929 100644 +--- a/6.12.0/drivers/media/v4l2-core/v4l2-subdev.c ++++ b/6.12.0/drivers/media/v4l2-core/v4l2-subdev.c +@@ -32,7 +32,7 @@ + * 'v4l2_subdev_enable_streams_api' to 1 below. + */ + +-static bool v4l2_subdev_enable_streams_api; ++static bool v4l2_subdev_enable_streams_api = true; + #endif + + /* +-- +2.43.0 + diff --git a/patches/0025-ipu6-dkms-add-isys-makefile-adaptation-6.12.patch b/patches/0025-ipu6-dkms-add-isys-makefile-adaptation-6.12.patch new file mode 100644 index 0000000..7bf0c85 --- /dev/null +++ b/patches/0025-ipu6-dkms-add-isys-makefile-adaptation-6.12.patch @@ -0,0 +1,65 @@ +From 07afeb848cc7d9a2549b996924297fb09ca43ce3 Mon Sep 17 00:00:00 2001 +From: Florent Pirou +Date: Mon, 12 Jan 2026 03:45:03 -0700 +Subject: [PATCH 25/29] ipu6-dkms: add isys makefile adaptation 6.12 + +*ipu6-isys: compile ISYS RESET and ACPI PDATA support +--- + 6.12.0/drivers/media/pci/intel/ipu6/Makefile | 7 +++++++ + 6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c | 1 + + 6.12.0/drivers/media/pci/intel/ipu6/ipu6.c | 1 + + 6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.c | 1 + + 4 files changed, 10 insertions(+) + +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/Makefile b/6.12.0/drivers/media/pci/intel/ipu6/Makefile +index 67c13c9..9a0a130 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/Makefile ++++ b/6.12.0/drivers/media/pci/intel/ipu6/Makefile +@@ -1,5 +1,10 @@ + # SPDX-License-Identifier: GPL-2.0-only + ++CC := ${CC} -I ${M}/../../../../../include-overrides -DCONFIG_INTEL_IPU6_ACPI -DCONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++ccflags-y := -DDRIVER_VERSION_SUFFIX=\"${DRIVER_VERSION_SUFFIX}\" ++ ++export CONFIG_VIDEO_INTEL_IPU6 = m ++ + intel-ipu6-y := ipu6.o \ + ipu6-bus.o \ + ipu6-dma.o \ +@@ -22,4 +27,6 @@ intel-ipu6-isys-y := ipu6-isys.o \ + ipu6-isys-dwc-phy.o + + obj-$(CONFIG_VIDEO_INTEL_IPU6) += intel-ipu6-isys.o ++ ++subdir-ccflags-y += -DDRIVER_VERSION_SUFFIX=\"${DRIVER_VERSION_SUFFIX}\" + obj-$(CONFIG_VIDEO_INTEL_IPU6) += psys/ +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c +index caf29f7..e9fbb9a 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c +@@ -1759,3 +1759,4 @@ MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("Intel IPU6 input system driver"); + MODULE_IMPORT_NS(INTEL_IPU6); + MODULE_IMPORT_NS(INTEL_IPU_BRIDGE); ++MODULE_VERSION(DRIVER_VERSION_SUFFIX); +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c +index c9513c5..3fa8980 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c +@@ -958,3 +958,4 @@ MODULE_AUTHOR("Yunliang Ding "); + MODULE_AUTHOR("Hongju Wang "); + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("Intel IPU6 PCI driver"); ++MODULE_VERSION(DRIVER_VERSION_SUFFIX); +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.c b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.c +index e8638a9..4925c4d 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.c ++++ b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.c +@@ -1639,3 +1639,4 @@ MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("Intel IPU6 processing system driver"); + MODULE_IMPORT_NS(DMA_BUF); + MODULE_IMPORT_NS(INTEL_IPU6); ++MODULE_VERSION(DRIVER_VERSION_SUFFIX); +-- +2.43.0 + diff --git a/patches/0026-ipu6-dkms-align-ipu7-acpi-pdata-device-parser-on-6.1.patch b/patches/0026-ipu6-dkms-align-ipu7-acpi-pdata-device-parser-on-6.1.patch new file mode 100644 index 0000000..920d8b9 --- /dev/null +++ b/patches/0026-ipu6-dkms-align-ipu7-acpi-pdata-device-parser-on-6.1.patch @@ -0,0 +1,80 @@ +From 60f5dbc2e602ad4e56fc2000448be4e01d4fdadf Mon Sep 17 00:00:00 2001 +From: Florent Pirou +Date: Mon, 12 Jan 2026 11:15:12 -0700 +Subject: [PATCH 26/29] ipu6-dkms: align ipu7 acpi pdata device parser on 6.12 + +Signed-off-by: Florent Pirou +--- + 6.12.0/drivers/media/pci/intel/ipu6/Makefile | 5 ++++- + 6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h | 1 + + 6.12.0/drivers/media/pci/intel/ipu6/ipu6.c | 8 ++++---- + 3 files changed, 9 insertions(+), 5 deletions(-) + +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/Makefile b/6.12.0/drivers/media/pci/intel/ipu6/Makefile +index c3d8ffd..9355e3f 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/Makefile ++++ b/6.12.0/drivers/media/pci/intel/ipu6/Makefile +@@ -1,8 +1,11 @@ + # SPDX-License-Identifier: GPL-2.0-only + +-CC := ${CC} -I ${M}/../../../../../include-overrides -DCONFIG_INTEL_IPU6_ACPI -DCONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++CC := ${CC} -I ${M}/../../../../../../include -I ${M}/../../../../../include-overrides -DCONFIG_INTEL_IPU_ACPI -DCONFIG_INTEL_IPU6_ACPI -DCONFIG_VIDEO_INTEL_IPU6_ISYS_RESET + ccflags-y := -DDRIVER_VERSION_SUFFIX=\"${DRIVER_VERSION_SUFFIX}\" + ++# Path to ipu_acpi module build directory ++KBUILD_EXTRA_SYMBOLS += ${M}/../../../../../../Module.symvers ++ + export CONFIG_VIDEO_INTEL_IPU6 = m + + intel-ipu6-y := ipu6.o \ +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h +index 57df01c..f935223 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h +@@ -104,6 +104,7 @@ struct isys_iwake_watermark { + struct ipu6_isys_csi2_config { + u32 nlanes; + u32 port; ++ enum v4l2_mbus_type bus_type; + }; + + struct sensor_async_sd { +diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c +index 3fa8980..0fb6b64 100644 +--- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c ++++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c +@@ -35,12 +35,12 @@ + #include "ipu6-trace.h" + + #if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) +-#include ++#include + #endif + + #if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + static int isys_init_acpi_add_device(struct device *dev, void *priv, +- struct ipu6_isys_csi2_config *csi2, ++ void *csi2, + bool reprobe) + { + return 0; +@@ -437,14 +437,14 @@ ipu6_isys_init(struct pci_dev *pdev, struct device *parent, + #if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + if (!spdata) { + dev_dbg(&pdev->dev, "No subdevice info provided"); +- ret = ipu_get_acpi_devices(isys_adev, &isys_adev->auxdev.dev, &acpi_pdata, NULL, ++ ret = ipu_get_acpi_devices(isys_adev, &isys_adev->auxdev.dev, (void **)&acpi_pdata, NULL, + isys_init_acpi_add_device); + if (acpi_pdata && (*acpi_pdata->subdevs)) { + pdata->spdata = acpi_pdata; + } + } else { + dev_dbg(&pdev->dev, "Subdevice info found"); +- ret = ipu_get_acpi_devices(isys_adev, &isys_adev->auxdev.dev, &acpi_pdata, &spdata, ++ ret = ipu_get_acpi_devices(isys_adev, &isys_adev->auxdev.dev, (void **)&acpi_pdata, (void **)&spdata, + isys_init_acpi_add_device); + } + if (ret) +-- +2.43.0 + From a1edd778a41b4be6f16c3d9bd9749b79ed897cb5 Mon Sep 17 00:00:00 2001 From: Florent Pirou Date: Fri, 16 Jan 2026 12:11:22 -0700 Subject: [PATCH 06/14] v4l2-core, ipu6-isys : add 6.18 v4l2_subdev_enable_streams_api support * ipu6-isys : add intel-lts v6.18.6 quilt ontop of upstream v6.18 * ipu6-psys : add intel-lts v6.18.6 quilt ontop of upstream v6.18 * v4l2-core : add v4l2_subdev_enable_streams_api support ontop of upstream v6.17 * v4l2-core : add v4l2_subdev_enable_streams_api support ontop of upstream v6.18 Signed-off-by: Florent Pirou --- .../drivers/media/pci/intel/ipu6/Kconfig | 0 .../drivers/media/pci/intel/ipu6/Makefile | 0 .../drivers/media/pci/intel/ipu6/ipu6-bus.c | 0 .../drivers/media/pci/intel/ipu6/ipu6-bus.h | 0 .../media/pci/intel/ipu6/ipu6-buttress.c | 0 .../media/pci/intel/ipu6/ipu6-buttress.h | 0 .../drivers/media/pci/intel/ipu6/ipu6-cpd.c | 0 .../drivers/media/pci/intel/ipu6/ipu6-cpd.h | 0 .../drivers/media/pci/intel/ipu6/ipu6-dma.c | 0 .../drivers/media/pci/intel/ipu6/ipu6-dma.h | 0 .../media/pci/intel/ipu6/ipu6-fw-com.c | 0 .../media/pci/intel/ipu6/ipu6-fw-com.h | 0 .../media/pci/intel/ipu6/ipu6-fw-isys.c | 0 .../media/pci/intel/ipu6/ipu6-fw-isys.h | 0 .../media/pci/intel/ipu6/ipu6-isys-csi2.c | 0 .../media/pci/intel/ipu6/ipu6-isys-csi2.h | 0 .../media/pci/intel/ipu6/ipu6-isys-dwc-phy.c | 0 .../media/pci/intel/ipu6/ipu6-isys-jsl-phy.c | 0 .../media/pci/intel/ipu6/ipu6-isys-mcd-phy.c | 0 .../media/pci/intel/ipu6/ipu6-isys-queue.c | 0 .../media/pci/intel/ipu6/ipu6-isys-queue.h | 0 .../media/pci/intel/ipu6/ipu6-isys-subdev.c | 0 .../media/pci/intel/ipu6/ipu6-isys-subdev.h | 0 .../media/pci/intel/ipu6/ipu6-isys-video.c | 0 .../media/pci/intel/ipu6/ipu6-isys-video.h | 0 .../drivers/media/pci/intel/ipu6/ipu6-isys.c | 0 .../drivers/media/pci/intel/ipu6/ipu6-isys.h | 0 .../drivers/media/pci/intel/ipu6/ipu6-mmu.c | 0 .../drivers/media/pci/intel/ipu6/ipu6-mmu.h | 0 .../intel/ipu6/ipu6-platform-buttress-regs.h | 0 .../intel/ipu6/ipu6-platform-isys-csi2-reg.h | 0 .../media/pci/intel/ipu6/ipu6-platform-regs.h | 0 .../drivers/media/pci/intel/ipu6/ipu6.c | 0 .../drivers/media/pci/intel/ipu6/ipu6.h | 0 6.18.0/drivers/media/v4l2-core/Kconfig | 84 + 6.18.0/drivers/media/v4l2-core/Makefile | 37 + 6.18.0/drivers/media/v4l2-core/tuner-core.c | 1424 +++ 6.18.0/drivers/media/v4l2-core/v4l2-async.c | 976 ++ 6.18.0/drivers/media/v4l2-core/v4l2-cci.c | 203 + 6.18.0/drivers/media/v4l2-core/v4l2-common.c | 779 ++ .../media/v4l2-core/v4l2-compat-ioctl32.c | 1207 +++ .../drivers/media/v4l2-core/v4l2-ctrls-api.c | 1361 +++ .../drivers/media/v4l2-core/v4l2-ctrls-core.c | 2721 ++++++ .../drivers/media/v4l2-core/v4l2-ctrls-defs.c | 1685 ++++ .../drivers/media/v4l2-core/v4l2-ctrls-priv.h | 95 + .../media/v4l2-core/v4l2-ctrls-request.c | 501 + 6.18.0/drivers/media/v4l2-core/v4l2-dev.c | 1258 +++ 6.18.0/drivers/media/v4l2-core/v4l2-device.c | 295 + .../drivers/media/v4l2-core/v4l2-dv-timings.c | 1275 +++ 6.18.0/drivers/media/v4l2-core/v4l2-event.c | 373 + 6.18.0/drivers/media/v4l2-core/v4l2-fh.c | 119 + .../media/v4l2-core/v4l2-flash-led-class.c | 746 ++ 6.18.0/drivers/media/v4l2-core/v4l2-fwnode.c | 1299 +++ 6.18.0/drivers/media/v4l2-core/v4l2-h264.c | 453 + 6.18.0/drivers/media/v4l2-core/v4l2-i2c.c | 185 + 6.18.0/drivers/media/v4l2-core/v4l2-ioctl.c | 3530 +++++++ 6.18.0/drivers/media/v4l2-core/v4l2-jpeg.c | 713 ++ 6.18.0/drivers/media/v4l2-core/v4l2-mc.c | 614 ++ 6.18.0/drivers/media/v4l2-core/v4l2-mem2mem.c | 1639 ++++ 6.18.0/drivers/media/v4l2-core/v4l2-spi.c | 78 + .../media/v4l2-core/v4l2-subdev-priv.h | 14 + 6.18.0/drivers/media/v4l2-core/v4l2-subdev.c | 2639 ++++++ 6.18.0/drivers/media/v4l2-core/v4l2-trace.c | 12 + 6.18.0/drivers/media/v4l2-core/v4l2-vp9.c | 1850 ++++ .../media/cadence/cdns-csi2rx.h | 19 + 6.18.0/include-overrides/media/cec-notifier.h | 166 + 6.18.0/include-overrides/media/cec-pin.h | 79 + 6.18.0/include-overrides/media/cec.h | 597 ++ .../media/davinci/vpfe_types.h | 38 + .../media/davinci/vpif_types.h | 78 + 6.18.0/include-overrides/media/demux.h | 600 ++ 6.18.0/include-overrides/media/dmxdev.h | 213 + .../media/drv-intf/cx2341x.h | 283 + .../media/drv-intf/cx25840.h | 262 + .../media/drv-intf/exynos-fimc.h | 157 + .../media/drv-intf/msp3400.h | 213 + .../media/drv-intf/renesas-ceu.h | 26 + .../media/drv-intf/s3c_camif.h | 38 + .../media/drv-intf/saa7146.h | 472 + .../media/drv-intf/saa7146_vv.h | 221 + .../include-overrides/media/drv-intf/sh_vou.h | 30 + .../include-overrides/media/drv-intf/si476x.h | 28 + .../media/drv-intf/tea575x.h | 70 + 6.18.0/include-overrides/media/dvb-usb-ids.h | 471 + .../include-overrides/media/dvb_ca_en50221.h | 142 + 6.18.0/include-overrides/media/dvb_demux.h | 354 + 6.18.0/include-overrides/media/dvb_frontend.h | 834 ++ 6.18.0/include-overrides/media/dvb_net.h | 95 + .../include-overrides/media/dvb_ringbuffer.h | 280 + 6.18.0/include-overrides/media/dvb_vb2.h | 280 + 6.18.0/include-overrides/media/dvbdev.h | 493 + 6.18.0/include-overrides/media/frame_vector.h | 47 + 6.18.0/include-overrides/media/i2c/adp1653.h | 114 + 6.18.0/include-overrides/media/i2c/adv7183.h | 35 + 6.18.0/include-overrides/media/i2c/adv7343.h | 55 + 6.18.0/include-overrides/media/i2c/adv7393.h | 20 + 6.18.0/include-overrides/media/i2c/adv7511.h | 33 + 6.18.0/include-overrides/media/i2c/adv7604.h | 157 + 6.18.0/include-overrides/media/i2c/adv7842.h | 227 + 6.18.0/include-overrides/media/i2c/ak881x.h | 22 + 6.18.0/include-overrides/media/i2c/bt819.h | 24 + 6.18.0/include-overrides/media/i2c/cs5345.h | 27 + 6.18.0/include-overrides/media/i2c/cs53l32a.h | 22 + .../include-overrides/media/i2c/ds90ub9xx.h | 22 + .../include-overrides/media/i2c/ir-kbd-i2c.h | 62 + 6.18.0/include-overrides/media/i2c/lm3560.h | 84 + 6.18.0/include-overrides/media/i2c/lm3646.h | 84 + 6.18.0/include-overrides/media/i2c/m52790.h | 81 + 6.18.0/include-overrides/media/i2c/mt9t112.h | 27 + 6.18.0/include-overrides/media/i2c/mt9v011.h | 14 + 6.18.0/include-overrides/media/i2c/ov2659.h | 22 + 6.18.0/include-overrides/media/i2c/ov7670.h | 20 + 6.18.0/include-overrides/media/i2c/ov772x.h | 58 + .../include-overrides/media/i2c/rj54n1cb0c.h | 16 + 6.18.0/include-overrides/media/i2c/saa6588.h | 31 + 6.18.0/include-overrides/media/i2c/saa7115.h | 128 + 6.18.0/include-overrides/media/i2c/saa7127.h | 28 + 6.18.0/include-overrides/media/i2c/tc358743.h | 117 + 6.18.0/include-overrides/media/i2c/tda1997x.h | 42 + 6.18.0/include-overrides/media/i2c/ths7303.h | 28 + 6.18.0/include-overrides/media/i2c/tvaudio.h | 52 + 6.18.0/include-overrides/media/i2c/tvp514x.h | 91 + 6.18.0/include-overrides/media/i2c/tvp7002.h | 41 + 6.18.0/include-overrides/media/i2c/tw9910.h | 40 + 6.18.0/include-overrides/media/i2c/uda1342.h | 16 + .../include-overrides/media/i2c/upd64031a.h | 27 + 6.18.0/include-overrides/media/i2c/upd64083.h | 45 + 6.18.0/include-overrides/media/i2c/wm8775.h | 32 + 6.18.0/include-overrides/media/imx.h | 11 + 6.18.0/include-overrides/media/ipu-bridge.h | 182 + .../include-overrides/media/ipu6-pci-table.h | 28 + 6.18.0/include-overrides/media/jpeg.h | 20 + .../media/media-dev-allocator.h | 63 + 6.18.0/include-overrides/media/media-device.h | 518 + .../include-overrides/media/media-devnode.h | 168 + 6.18.0/include-overrides/media/media-entity.h | 1450 +++ .../include-overrides/media/media-request.h | 442 + 6.18.0/include-overrides/media/mipi-csi2.h | 47 + 6.18.0/include-overrides/media/rc-core.h | 377 + 6.18.0/include-overrides/media/rc-map.h | 357 + 6.18.0/include-overrides/media/rcar-fcp.h | 43 + 6.18.0/include-overrides/media/tpg/v4l2-tpg.h | 668 ++ 6.18.0/include-overrides/media/tuner-types.h | 205 + 6.18.0/include-overrides/media/tuner.h | 229 + 6.18.0/include-overrides/media/tveeprom.h | 116 + 6.18.0/include-overrides/media/v4l2-async.h | 346 + 6.18.0/include-overrides/media/v4l2-cci.h | 141 + 6.18.0/include-overrides/media/v4l2-common.h | 737 ++ 6.18.0/include-overrides/media/v4l2-ctrls.h | 1633 ++++ 6.18.0/include-overrides/media/v4l2-dev.h | 665 ++ 6.18.0/include-overrides/media/v4l2-device.h | 569 ++ .../include-overrides/media/v4l2-dv-timings.h | 310 + 6.18.0/include-overrides/media/v4l2-event.h | 208 + 6.18.0/include-overrides/media/v4l2-fh.h | 181 + .../media/v4l2-flash-led-class.h | 186 + 6.18.0/include-overrides/media/v4l2-fwnode.h | 414 + 6.18.0/include-overrides/media/v4l2-h264.h | 89 + .../media/v4l2-image-sizes.h | 46 + 6.18.0/include-overrides/media/v4l2-ioctl.h | 785 ++ 6.18.0/include-overrides/media/v4l2-jpeg.h | 180 + 6.18.0/include-overrides/media/v4l2-mc.h | 232 + .../include-overrides/media/v4l2-mediabus.h | 278 + 6.18.0/include-overrides/media/v4l2-mem2mem.h | 902 ++ 6.18.0/include-overrides/media/v4l2-rect.h | 207 + 6.18.0/include-overrides/media/v4l2-subdev.h | 1998 ++++ 6.18.0/include-overrides/media/v4l2-vp9.h | 233 + .../include-overrides/media/videobuf2-core.h | 1348 +++ .../media/videobuf2-dma-contig.h | 32 + .../media/videobuf2-dma-sg.h | 26 + .../include-overrides/media/videobuf2-dvb.h | 69 + .../media/videobuf2-memops.h | 41 + .../include-overrides/media/videobuf2-v4l2.h | 392 + .../media/videobuf2-vmalloc.h | 20 + 6.18.0/include-overrides/media/vsp1.h | 213 + dkms.conf | 69 +- include/uapi/linux/ipu-psys.h | 121 + ...Use-v4l2_ctrl_subdev_subscribe_event.patch | 34 + ...Don-t-set-V4L2_FL_USES_V4L2_FH-manua.patch | 32 + ...Set-embedded-data-type-correctly-for.patch | 35 + ...se-module-parameter-to-set-isys-freq.patch | 45 + .../0054-media-pci-Enable-ISYS-reset.patch | 626 ++ ...tel-ipu6-use-vc1-dma-for-MTL-and-ARL.patch | 41 + ...nc-at-buffer_prepare-callback-as-DMA.patch | 42 + ...-Support-IPU6-ISYS-FW-trace-dump-for.patch | 1432 +++ ...der-of-return-buffers-should-be-FIFO.patch | 100 + ...-Modify-enble-disable-stream-in-CSI2.patch | 338 + ...the-correct-SOF-for-different-stream.patch | 29 + ...ia-pci-support-imx390-for-6.11.0-rc3.patch | 981 ++ patches/0062-i2c-media-fix-cov-issue.patch | 31 + ...add-ipu-acpi-module-to-linux-drivers.patch | 252 + ...ster-i2c-device-to-complete-ext_subd.patch | 56 + ...5-media-pci-add-missing-if-for-PDATA.patch | 114 + ...edia-pci-refine-PDATA-related-config.patch | 527 ++ ...I-PDATA-and-ACPI-fwnode-build-for-EC.patch | 382 + ...se-module-parameter-to-set-psys-freq.patch | 43 + .../0069-media-pci-add-IPU6-PSYS-driver.patch | 8335 +++++++++++++++++ ...-support-IPU6-PSYS-FW-trace-dump-for.patch | 134 + ...el-ipu6-Constify-ipu6_buttress_ctrl-.patch | 84 + ...s-v4l2-core-makefile-adaptation-6.17.patch | 37 + ...t-v4l2_subdev_enable_streams_api-tru.patch | 28 + ...s-v4l2-core-makefile-adaptation-6.18.patch | 27 + ...t-v4l2_subdev_enable_streams_api-tru.patch | 27 + ...2-subdev-Make-struct-v4l2_subdev_str.patch | 95 + ...ms-add-isys-makefile-adaptation-6.17.patch | 48 + ...ipu7-acpi-pdata-device-parser-on-6.1.patch | 89 + 205 files changed, 68332 insertions(+), 2 deletions(-) rename {6.17.0 => 6.18.0}/drivers/media/pci/intel/ipu6/Kconfig (100%) rename {6.17.0 => 6.18.0}/drivers/media/pci/intel/ipu6/Makefile (100%) rename {6.17.0 => 6.18.0}/drivers/media/pci/intel/ipu6/ipu6-bus.c (100%) rename {6.17.0 => 6.18.0}/drivers/media/pci/intel/ipu6/ipu6-bus.h (100%) rename {6.17.0 => 6.18.0}/drivers/media/pci/intel/ipu6/ipu6-buttress.c (100%) rename {6.17.0 => 6.18.0}/drivers/media/pci/intel/ipu6/ipu6-buttress.h (100%) rename {6.17.0 => 6.18.0}/drivers/media/pci/intel/ipu6/ipu6-cpd.c (100%) rename {6.17.0 => 6.18.0}/drivers/media/pci/intel/ipu6/ipu6-cpd.h (100%) rename {6.17.0 => 6.18.0}/drivers/media/pci/intel/ipu6/ipu6-dma.c (100%) rename {6.17.0 => 6.18.0}/drivers/media/pci/intel/ipu6/ipu6-dma.h (100%) rename {6.17.0 => 6.18.0}/drivers/media/pci/intel/ipu6/ipu6-fw-com.c (100%) rename {6.17.0 => 6.18.0}/drivers/media/pci/intel/ipu6/ipu6-fw-com.h (100%) rename {6.17.0 => 6.18.0}/drivers/media/pci/intel/ipu6/ipu6-fw-isys.c (100%) rename {6.17.0 => 6.18.0}/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h (100%) rename {6.17.0 => 6.18.0}/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c (100%) rename {6.17.0 => 6.18.0}/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h (100%) rename {6.17.0 => 6.18.0}/drivers/media/pci/intel/ipu6/ipu6-isys-dwc-phy.c (100%) rename {6.17.0 => 6.18.0}/drivers/media/pci/intel/ipu6/ipu6-isys-jsl-phy.c (100%) rename {6.17.0 => 6.18.0}/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c (100%) rename {6.17.0 => 6.18.0}/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c (100%) rename {6.17.0 => 6.18.0}/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h (100%) rename {6.17.0 => 6.18.0}/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c (100%) rename {6.17.0 => 6.18.0}/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.h (100%) rename {6.17.0 => 6.18.0}/drivers/media/pci/intel/ipu6/ipu6-isys-video.c (100%) rename {6.17.0 => 6.18.0}/drivers/media/pci/intel/ipu6/ipu6-isys-video.h (100%) rename {6.17.0 => 6.18.0}/drivers/media/pci/intel/ipu6/ipu6-isys.c (100%) rename {6.17.0 => 6.18.0}/drivers/media/pci/intel/ipu6/ipu6-isys.h (100%) rename {6.17.0 => 6.18.0}/drivers/media/pci/intel/ipu6/ipu6-mmu.c (100%) rename {6.17.0 => 6.18.0}/drivers/media/pci/intel/ipu6/ipu6-mmu.h (100%) rename {6.17.0 => 6.18.0}/drivers/media/pci/intel/ipu6/ipu6-platform-buttress-regs.h (100%) rename {6.17.0 => 6.18.0}/drivers/media/pci/intel/ipu6/ipu6-platform-isys-csi2-reg.h (100%) rename {6.17.0 => 6.18.0}/drivers/media/pci/intel/ipu6/ipu6-platform-regs.h (100%) rename {6.17.0 => 6.18.0}/drivers/media/pci/intel/ipu6/ipu6.c (100%) rename {6.17.0 => 6.18.0}/drivers/media/pci/intel/ipu6/ipu6.h (100%) create mode 100644 6.18.0/drivers/media/v4l2-core/Kconfig create mode 100644 6.18.0/drivers/media/v4l2-core/Makefile create mode 100644 6.18.0/drivers/media/v4l2-core/tuner-core.c create mode 100644 6.18.0/drivers/media/v4l2-core/v4l2-async.c create mode 100644 6.18.0/drivers/media/v4l2-core/v4l2-cci.c create mode 100644 6.18.0/drivers/media/v4l2-core/v4l2-common.c create mode 100644 6.18.0/drivers/media/v4l2-core/v4l2-compat-ioctl32.c create mode 100644 6.18.0/drivers/media/v4l2-core/v4l2-ctrls-api.c create mode 100644 6.18.0/drivers/media/v4l2-core/v4l2-ctrls-core.c create mode 100644 6.18.0/drivers/media/v4l2-core/v4l2-ctrls-defs.c create mode 100644 6.18.0/drivers/media/v4l2-core/v4l2-ctrls-priv.h create mode 100644 6.18.0/drivers/media/v4l2-core/v4l2-ctrls-request.c create mode 100644 6.18.0/drivers/media/v4l2-core/v4l2-dev.c create mode 100644 6.18.0/drivers/media/v4l2-core/v4l2-device.c create mode 100644 6.18.0/drivers/media/v4l2-core/v4l2-dv-timings.c create mode 100644 6.18.0/drivers/media/v4l2-core/v4l2-event.c create mode 100644 6.18.0/drivers/media/v4l2-core/v4l2-fh.c create mode 100644 6.18.0/drivers/media/v4l2-core/v4l2-flash-led-class.c create mode 100644 6.18.0/drivers/media/v4l2-core/v4l2-fwnode.c create mode 100644 6.18.0/drivers/media/v4l2-core/v4l2-h264.c create mode 100644 6.18.0/drivers/media/v4l2-core/v4l2-i2c.c create mode 100644 6.18.0/drivers/media/v4l2-core/v4l2-ioctl.c create mode 100644 6.18.0/drivers/media/v4l2-core/v4l2-jpeg.c create mode 100644 6.18.0/drivers/media/v4l2-core/v4l2-mc.c create mode 100644 6.18.0/drivers/media/v4l2-core/v4l2-mem2mem.c create mode 100644 6.18.0/drivers/media/v4l2-core/v4l2-spi.c create mode 100644 6.18.0/drivers/media/v4l2-core/v4l2-subdev-priv.h create mode 100644 6.18.0/drivers/media/v4l2-core/v4l2-subdev.c create mode 100644 6.18.0/drivers/media/v4l2-core/v4l2-trace.c create mode 100644 6.18.0/drivers/media/v4l2-core/v4l2-vp9.c create mode 100644 6.18.0/include-overrides/media/cadence/cdns-csi2rx.h create mode 100644 6.18.0/include-overrides/media/cec-notifier.h create mode 100644 6.18.0/include-overrides/media/cec-pin.h create mode 100644 6.18.0/include-overrides/media/cec.h create mode 100644 6.18.0/include-overrides/media/davinci/vpfe_types.h create mode 100644 6.18.0/include-overrides/media/davinci/vpif_types.h create mode 100644 6.18.0/include-overrides/media/demux.h create mode 100644 6.18.0/include-overrides/media/dmxdev.h create mode 100644 6.18.0/include-overrides/media/drv-intf/cx2341x.h create mode 100644 6.18.0/include-overrides/media/drv-intf/cx25840.h create mode 100644 6.18.0/include-overrides/media/drv-intf/exynos-fimc.h create mode 100644 6.18.0/include-overrides/media/drv-intf/msp3400.h create mode 100644 6.18.0/include-overrides/media/drv-intf/renesas-ceu.h create mode 100644 6.18.0/include-overrides/media/drv-intf/s3c_camif.h create mode 100644 6.18.0/include-overrides/media/drv-intf/saa7146.h create mode 100644 6.18.0/include-overrides/media/drv-intf/saa7146_vv.h create mode 100644 6.18.0/include-overrides/media/drv-intf/sh_vou.h create mode 100644 6.18.0/include-overrides/media/drv-intf/si476x.h create mode 100644 6.18.0/include-overrides/media/drv-intf/tea575x.h create mode 100644 6.18.0/include-overrides/media/dvb-usb-ids.h create mode 100644 6.18.0/include-overrides/media/dvb_ca_en50221.h create mode 100644 6.18.0/include-overrides/media/dvb_demux.h create mode 100644 6.18.0/include-overrides/media/dvb_frontend.h create mode 100644 6.18.0/include-overrides/media/dvb_net.h create mode 100644 6.18.0/include-overrides/media/dvb_ringbuffer.h create mode 100644 6.18.0/include-overrides/media/dvb_vb2.h create mode 100644 6.18.0/include-overrides/media/dvbdev.h create mode 100644 6.18.0/include-overrides/media/frame_vector.h create mode 100644 6.18.0/include-overrides/media/i2c/adp1653.h create mode 100644 6.18.0/include-overrides/media/i2c/adv7183.h create mode 100644 6.18.0/include-overrides/media/i2c/adv7343.h create mode 100644 6.18.0/include-overrides/media/i2c/adv7393.h create mode 100644 6.18.0/include-overrides/media/i2c/adv7511.h create mode 100644 6.18.0/include-overrides/media/i2c/adv7604.h create mode 100644 6.18.0/include-overrides/media/i2c/adv7842.h create mode 100644 6.18.0/include-overrides/media/i2c/ak881x.h create mode 100644 6.18.0/include-overrides/media/i2c/bt819.h create mode 100644 6.18.0/include-overrides/media/i2c/cs5345.h create mode 100644 6.18.0/include-overrides/media/i2c/cs53l32a.h create mode 100644 6.18.0/include-overrides/media/i2c/ds90ub9xx.h create mode 100644 6.18.0/include-overrides/media/i2c/ir-kbd-i2c.h create mode 100644 6.18.0/include-overrides/media/i2c/lm3560.h create mode 100644 6.18.0/include-overrides/media/i2c/lm3646.h create mode 100644 6.18.0/include-overrides/media/i2c/m52790.h create mode 100644 6.18.0/include-overrides/media/i2c/mt9t112.h create mode 100644 6.18.0/include-overrides/media/i2c/mt9v011.h create mode 100644 6.18.0/include-overrides/media/i2c/ov2659.h create mode 100644 6.18.0/include-overrides/media/i2c/ov7670.h create mode 100644 6.18.0/include-overrides/media/i2c/ov772x.h create mode 100644 6.18.0/include-overrides/media/i2c/rj54n1cb0c.h create mode 100644 6.18.0/include-overrides/media/i2c/saa6588.h create mode 100644 6.18.0/include-overrides/media/i2c/saa7115.h create mode 100644 6.18.0/include-overrides/media/i2c/saa7127.h create mode 100644 6.18.0/include-overrides/media/i2c/tc358743.h create mode 100644 6.18.0/include-overrides/media/i2c/tda1997x.h create mode 100644 6.18.0/include-overrides/media/i2c/ths7303.h create mode 100644 6.18.0/include-overrides/media/i2c/tvaudio.h create mode 100644 6.18.0/include-overrides/media/i2c/tvp514x.h create mode 100644 6.18.0/include-overrides/media/i2c/tvp7002.h create mode 100644 6.18.0/include-overrides/media/i2c/tw9910.h create mode 100644 6.18.0/include-overrides/media/i2c/uda1342.h create mode 100644 6.18.0/include-overrides/media/i2c/upd64031a.h create mode 100644 6.18.0/include-overrides/media/i2c/upd64083.h create mode 100644 6.18.0/include-overrides/media/i2c/wm8775.h create mode 100644 6.18.0/include-overrides/media/imx.h create mode 100644 6.18.0/include-overrides/media/ipu-bridge.h create mode 100644 6.18.0/include-overrides/media/ipu6-pci-table.h create mode 100644 6.18.0/include-overrides/media/jpeg.h create mode 100644 6.18.0/include-overrides/media/media-dev-allocator.h create mode 100644 6.18.0/include-overrides/media/media-device.h create mode 100644 6.18.0/include-overrides/media/media-devnode.h create mode 100644 6.18.0/include-overrides/media/media-entity.h create mode 100644 6.18.0/include-overrides/media/media-request.h create mode 100644 6.18.0/include-overrides/media/mipi-csi2.h create mode 100644 6.18.0/include-overrides/media/rc-core.h create mode 100644 6.18.0/include-overrides/media/rc-map.h create mode 100644 6.18.0/include-overrides/media/rcar-fcp.h create mode 100644 6.18.0/include-overrides/media/tpg/v4l2-tpg.h create mode 100644 6.18.0/include-overrides/media/tuner-types.h create mode 100644 6.18.0/include-overrides/media/tuner.h create mode 100644 6.18.0/include-overrides/media/tveeprom.h create mode 100644 6.18.0/include-overrides/media/v4l2-async.h create mode 100644 6.18.0/include-overrides/media/v4l2-cci.h create mode 100644 6.18.0/include-overrides/media/v4l2-common.h create mode 100644 6.18.0/include-overrides/media/v4l2-ctrls.h create mode 100644 6.18.0/include-overrides/media/v4l2-dev.h create mode 100644 6.18.0/include-overrides/media/v4l2-device.h create mode 100644 6.18.0/include-overrides/media/v4l2-dv-timings.h create mode 100644 6.18.0/include-overrides/media/v4l2-event.h create mode 100644 6.18.0/include-overrides/media/v4l2-fh.h create mode 100644 6.18.0/include-overrides/media/v4l2-flash-led-class.h create mode 100644 6.18.0/include-overrides/media/v4l2-fwnode.h create mode 100644 6.18.0/include-overrides/media/v4l2-h264.h create mode 100644 6.18.0/include-overrides/media/v4l2-image-sizes.h create mode 100644 6.18.0/include-overrides/media/v4l2-ioctl.h create mode 100644 6.18.0/include-overrides/media/v4l2-jpeg.h create mode 100644 6.18.0/include-overrides/media/v4l2-mc.h create mode 100644 6.18.0/include-overrides/media/v4l2-mediabus.h create mode 100644 6.18.0/include-overrides/media/v4l2-mem2mem.h create mode 100644 6.18.0/include-overrides/media/v4l2-rect.h create mode 100644 6.18.0/include-overrides/media/v4l2-subdev.h create mode 100644 6.18.0/include-overrides/media/v4l2-vp9.h create mode 100644 6.18.0/include-overrides/media/videobuf2-core.h create mode 100644 6.18.0/include-overrides/media/videobuf2-dma-contig.h create mode 100644 6.18.0/include-overrides/media/videobuf2-dma-sg.h create mode 100644 6.18.0/include-overrides/media/videobuf2-dvb.h create mode 100644 6.18.0/include-overrides/media/videobuf2-memops.h create mode 100644 6.18.0/include-overrides/media/videobuf2-v4l2.h create mode 100644 6.18.0/include-overrides/media/videobuf2-vmalloc.h create mode 100644 6.18.0/include-overrides/media/vsp1.h create mode 100644 include/uapi/linux/ipu-psys.h create mode 100644 patches/0050-media-ipu6-isys-Use-v4l2_ctrl_subdev_subscribe_event.patch create mode 100644 patches/0051-media-ipu6-isys-Don-t-set-V4L2_FL_USES_V4L2_FH-manua.patch create mode 100644 patches/0052-media-ipu6-isys-Set-embedded-data-type-correctly-for.patch create mode 100644 patches/0053-upstream-Use-module-parameter-to-set-isys-freq.patch create mode 100644 patches/0054-media-pci-Enable-ISYS-reset.patch create mode 100644 patches/0055-media-intel-ipu6-use-vc1-dma-for-MTL-and-ARL.patch create mode 100644 patches/0056-media-ipu-Dma-sync-at-buffer_prepare-callback-as-DMA.patch create mode 100644 patches/0057-media-intel-ipu6-Support-IPU6-ISYS-FW-trace-dump-for.patch create mode 100644 patches/0058-media-pci-The-order-of-return-buffers-should-be-FIFO.patch create mode 100644 patches/0059-media-pci-Modify-enble-disable-stream-in-CSI2.patch create mode 100644 patches/0060-media-pci-Set-the-correct-SOF-for-different-stream.patch create mode 100644 patches/0061-media-pci-support-imx390-for-6.11.0-rc3.patch create mode 100644 patches/0062-i2c-media-fix-cov-issue.patch create mode 100644 patches/0063-media-add-ipu-acpi-module-to-linux-drivers.patch create mode 100644 patches/0064-media-pci-unregister-i2c-device-to-complete-ext_subd.patch create mode 100644 patches/0065-media-pci-add-missing-if-for-PDATA.patch create mode 100644 patches/0066-media-pci-refine-PDATA-related-config.patch create mode 100644 patches/0067-kernel-align-ACPI-PDATA-and-ACPI-fwnode-build-for-EC.patch create mode 100644 patches/0068-upstream-Use-module-parameter-to-set-psys-freq.patch create mode 100644 patches/0069-media-pci-add-IPU6-PSYS-driver.patch create mode 100644 patches/0070-media-intel-ipu6-support-IPU6-PSYS-FW-trace-dump-for.patch create mode 100644 patches/0071-Revert-media-intel-ipu6-Constify-ipu6_buttress_ctrl-.patch create mode 100644 patches/0072-ipu7-dkms-v4l2-core-makefile-adaptation-6.17.patch create mode 100644 patches/0073-drivers-media-set-v4l2_subdev_enable_streams_api-tru.patch create mode 100644 patches/0074-ipu7-dkms-v4l2-core-makefile-adaptation-6.18.patch create mode 100644 patches/0075-drivers-media-set-v4l2_subdev_enable_streams_api-tru.patch create mode 100644 patches/0076-Revert-media-v4l2-subdev-Make-struct-v4l2_subdev_str.patch create mode 100644 patches/0077-ipu6-dkms-add-isys-makefile-adaptation-6.17.patch create mode 100644 patches/0078-ipu6-dkms-align-ipu7-acpi-pdata-device-parser-on-6.1.patch diff --git a/6.17.0/drivers/media/pci/intel/ipu6/Kconfig b/6.18.0/drivers/media/pci/intel/ipu6/Kconfig similarity index 100% rename from 6.17.0/drivers/media/pci/intel/ipu6/Kconfig rename to 6.18.0/drivers/media/pci/intel/ipu6/Kconfig diff --git a/6.17.0/drivers/media/pci/intel/ipu6/Makefile b/6.18.0/drivers/media/pci/intel/ipu6/Makefile similarity index 100% rename from 6.17.0/drivers/media/pci/intel/ipu6/Makefile rename to 6.18.0/drivers/media/pci/intel/ipu6/Makefile diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-bus.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-bus.c similarity index 100% rename from 6.17.0/drivers/media/pci/intel/ipu6/ipu6-bus.c rename to 6.18.0/drivers/media/pci/intel/ipu6/ipu6-bus.c diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-bus.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-bus.h similarity index 100% rename from 6.17.0/drivers/media/pci/intel/ipu6/ipu6-bus.h rename to 6.18.0/drivers/media/pci/intel/ipu6/ipu6-bus.h diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c similarity index 100% rename from 6.17.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c rename to 6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h similarity index 100% rename from 6.17.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h rename to 6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-cpd.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-cpd.c similarity index 100% rename from 6.17.0/drivers/media/pci/intel/ipu6/ipu6-cpd.c rename to 6.18.0/drivers/media/pci/intel/ipu6/ipu6-cpd.c diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-cpd.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-cpd.h similarity index 100% rename from 6.17.0/drivers/media/pci/intel/ipu6/ipu6-cpd.h rename to 6.18.0/drivers/media/pci/intel/ipu6/ipu6-cpd.h diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-dma.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-dma.c similarity index 100% rename from 6.17.0/drivers/media/pci/intel/ipu6/ipu6-dma.c rename to 6.18.0/drivers/media/pci/intel/ipu6/ipu6-dma.c diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-dma.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-dma.h similarity index 100% rename from 6.17.0/drivers/media/pci/intel/ipu6/ipu6-dma.h rename to 6.18.0/drivers/media/pci/intel/ipu6/ipu6-dma.h diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.c similarity index 100% rename from 6.17.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.c rename to 6.18.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.c diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.h similarity index 100% rename from 6.17.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.h rename to 6.18.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.h diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.c similarity index 100% rename from 6.17.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.c rename to 6.18.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.c diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h similarity index 100% rename from 6.17.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h rename to 6.18.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c similarity index 100% rename from 6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c rename to 6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h similarity index 100% rename from 6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h rename to 6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-dwc-phy.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-dwc-phy.c similarity index 100% rename from 6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-dwc-phy.c rename to 6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-dwc-phy.c diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-jsl-phy.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-jsl-phy.c similarity index 100% rename from 6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-jsl-phy.c rename to 6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-jsl-phy.c diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c similarity index 100% rename from 6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c rename to 6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c similarity index 100% rename from 6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c rename to 6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h similarity index 100% rename from 6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h rename to 6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c similarity index 100% rename from 6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c rename to 6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.h similarity index 100% rename from 6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.h rename to 6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.h diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c similarity index 100% rename from 6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c rename to 6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.h similarity index 100% rename from 6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.h rename to 6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.h diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c similarity index 100% rename from 6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys.c rename to 6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h similarity index 100% rename from 6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys.h rename to 6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-mmu.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-mmu.c similarity index 100% rename from 6.17.0/drivers/media/pci/intel/ipu6/ipu6-mmu.c rename to 6.18.0/drivers/media/pci/intel/ipu6/ipu6-mmu.c diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-mmu.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-mmu.h similarity index 100% rename from 6.17.0/drivers/media/pci/intel/ipu6/ipu6-mmu.h rename to 6.18.0/drivers/media/pci/intel/ipu6/ipu6-mmu.h diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-platform-buttress-regs.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-platform-buttress-regs.h similarity index 100% rename from 6.17.0/drivers/media/pci/intel/ipu6/ipu6-platform-buttress-regs.h rename to 6.18.0/drivers/media/pci/intel/ipu6/ipu6-platform-buttress-regs.h diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-platform-isys-csi2-reg.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-platform-isys-csi2-reg.h similarity index 100% rename from 6.17.0/drivers/media/pci/intel/ipu6/ipu6-platform-isys-csi2-reg.h rename to 6.18.0/drivers/media/pci/intel/ipu6/ipu6-platform-isys-csi2-reg.h diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-platform-regs.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-platform-regs.h similarity index 100% rename from 6.17.0/drivers/media/pci/intel/ipu6/ipu6-platform-regs.h rename to 6.18.0/drivers/media/pci/intel/ipu6/ipu6-platform-regs.h diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c similarity index 100% rename from 6.17.0/drivers/media/pci/intel/ipu6/ipu6.c rename to 6.18.0/drivers/media/pci/intel/ipu6/ipu6.c diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.h similarity index 100% rename from 6.17.0/drivers/media/pci/intel/ipu6/ipu6.h rename to 6.18.0/drivers/media/pci/intel/ipu6/ipu6.h diff --git a/6.18.0/drivers/media/v4l2-core/Kconfig b/6.18.0/drivers/media/v4l2-core/Kconfig new file mode 100644 index 0000000..331b8e5 --- /dev/null +++ b/6.18.0/drivers/media/v4l2-core/Kconfig @@ -0,0 +1,84 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Generic video config states +# + +config VIDEO_V4L2_I2C + bool + depends on I2C && VIDEO_DEV + default y + +config VIDEO_V4L2_SUBDEV_API + bool + depends on VIDEO_DEV && MEDIA_CONTROLLER + help + Enables the V4L2 sub-device pad-level userspace API used to configure + video format, size and frame rate between hardware blocks. + + This API is mostly used by camera interfaces in embedded platforms. + +config VIDEO_ADV_DEBUG + bool "Enable advanced debug functionality on V4L2 drivers" + help + Say Y here to enable advanced debugging functionality on some + V4L devices. + In doubt, say N. + +config VIDEO_FIXED_MINOR_RANGES + bool "Enable old-style fixed minor ranges on drivers/video devices" + help + Say Y here to enable the old-style fixed-range minor assignments. + Only useful if you rely on the old behavior and use mknod instead of udev. + + When in doubt, say N. + +# Used by drivers that need tuner.ko +config VIDEO_TUNER + tristate + +# Used by drivers that need v4l2-jpeg.ko +config V4L2_JPEG_HELPER + tristate + +# Used by drivers that need v4l2-h264.ko +config V4L2_H264 + tristate + +# Used by drivers that need v4l2-vp9.ko +config V4L2_VP9 + tristate + +# Used by drivers that need v4l2-mem2mem.ko +config V4L2_MEM2MEM_DEV + tristate + depends on VIDEOBUF2_CORE + +# Used by LED subsystem flash drivers +config V4L2_FLASH_LED_CLASS + tristate "V4L2 flash API for LED flash class devices" + depends on VIDEO_DEV + depends on LEDS_CLASS_FLASH + select MEDIA_CONTROLLER + select V4L2_ASYNC + select VIDEO_V4L2_SUBDEV_API + help + Say Y here to enable V4L2 flash API support for LED flash + class drivers. + + When in doubt, say N. + +config V4L2_FWNODE + tristate + select V4L2_ASYNC + +config V4L2_ASYNC + tristate + +config V4L2_CCI + tristate + +config V4L2_CCI_I2C + tristate + depends on I2C + select REGMAP_I2C + select V4L2_CCI diff --git a/6.18.0/drivers/media/v4l2-core/Makefile b/6.18.0/drivers/media/v4l2-core/Makefile new file mode 100644 index 0000000..2177b9d --- /dev/null +++ b/6.18.0/drivers/media/v4l2-core/Makefile @@ -0,0 +1,37 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the V4L2 core +# + +ccflags-y += -I$(srctree)/drivers/media/dvb-frontends +ccflags-y += -I$(srctree)/drivers/media/tuners + +tuner-objs := tuner-core.o + +videodev-objs := v4l2-dev.o v4l2-ioctl.o v4l2-device.o v4l2-fh.o \ + v4l2-event.o v4l2-subdev.o v4l2-common.o \ + v4l2-ctrls-core.o v4l2-ctrls-api.o \ + v4l2-ctrls-request.o v4l2-ctrls-defs.o + +# Please keep it alphabetically sorted by Kconfig name +# (e. g. LC_ALL=C sort Makefile) +videodev-$(CONFIG_COMPAT) += v4l2-compat-ioctl32.o +videodev-$(CONFIG_MEDIA_CONTROLLER) += v4l2-mc.o +videodev-$(CONFIG_SPI) += v4l2-spi.o +videodev-$(CONFIG_TRACEPOINTS) += v4l2-trace.o +videodev-$(CONFIG_VIDEO_V4L2_I2C) += v4l2-i2c.o + +# Please keep it alphabetically sorted by Kconfig name +# (e. g. LC_ALL=C sort Makefile) + +obj-$(CONFIG_V4L2_ASYNC) += v4l2-async.o +obj-$(CONFIG_V4L2_CCI) += v4l2-cci.o +obj-$(CONFIG_V4L2_FLASH_LED_CLASS) += v4l2-flash-led-class.o +obj-$(CONFIG_V4L2_FWNODE) += v4l2-fwnode.o +obj-$(CONFIG_V4L2_H264) += v4l2-h264.o +obj-$(CONFIG_V4L2_JPEG_HELPER) += v4l2-jpeg.o +obj-$(CONFIG_V4L2_MEM2MEM_DEV) += v4l2-mem2mem.o +obj-$(CONFIG_V4L2_VP9) += v4l2-vp9.o + +obj-$(CONFIG_VIDEO_TUNER) += tuner.o +obj-$(CONFIG_VIDEO_DEV) += v4l2-dv-timings.o videodev.o diff --git a/6.18.0/drivers/media/v4l2-core/tuner-core.c b/6.18.0/drivers/media/v4l2-core/tuner-core.c new file mode 100644 index 0000000..5687089 --- /dev/null +++ b/6.18.0/drivers/media/v4l2-core/tuner-core.c @@ -0,0 +1,1424 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * i2c tv tuner chip device driver + * core core, i.e. kernel interfaces, registering and so on + * + * Copyright(c) by Ralph Metzler, Gerd Knorr, Gunther Mayer + * + * Copyright(c) 2005-2011 by Mauro Carvalho Chehab + * - Added support for a separate Radio tuner + * - Major rework and cleanups at the code + * + * This driver supports many devices and the idea is to let the driver + * detect which device is present. So rather than listing all supported + * devices here, we pretend to support a single, fake device type that will + * handle both radio and analog TV tuning. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mt20xx.h" +#include "tda8290.h" +#include "tea5761.h" +#include "tea5767.h" +#include "xc2028.h" +#include "tuner-simple.h" +#include "tda9887.h" +#include "xc5000.h" +#include "tda18271.h" +#include "xc4000.h" + +#define UNSET (-1U) + +/* + * Driver modprobe parameters + */ + +/* insmod options used at init time => read/only */ +static unsigned int addr; +static unsigned int no_autodetect; +static unsigned int show_i2c; + +module_param(addr, int, 0444); +module_param(no_autodetect, int, 0444); +module_param(show_i2c, int, 0444); + +/* insmod options used at runtime => read/write */ +static int tuner_debug; +static unsigned int tv_range[2] = { 44, 958 }; +static unsigned int radio_range[2] = { 65, 108 }; +static char pal[] = "--"; +static char secam[] = "--"; +static char ntsc[] = "-"; + +module_param_named(debug, tuner_debug, int, 0644); +module_param_array(tv_range, int, NULL, 0644); +module_param_array(radio_range, int, NULL, 0644); +module_param_string(pal, pal, sizeof(pal), 0644); +module_param_string(secam, secam, sizeof(secam), 0644); +module_param_string(ntsc, ntsc, sizeof(ntsc), 0644); + +/* + * Static vars + */ + +static LIST_HEAD(tuner_list); +static const struct v4l2_subdev_ops tuner_ops; + +/* + * Debug macros + */ + +#undef pr_fmt + +#define pr_fmt(fmt) KBUILD_MODNAME ": %d-%04x: " fmt, \ + i2c_adapter_id(t->i2c->adapter), t->i2c->addr + + +#define dprintk(fmt, arg...) do { \ + if (tuner_debug) \ + printk(KERN_DEBUG pr_fmt("%s: " fmt), __func__, ##arg); \ +} while (0) + +/* + * Internal enums/struct used inside the driver + */ + +/** + * enum tuner_pad_index - tuner pad index for MEDIA_ENT_F_TUNER + * + * @TUNER_PAD_RF_INPUT: + * Radiofrequency (RF) sink pad, usually linked to a RF connector entity. + * @TUNER_PAD_OUTPUT: + * tuner video output source pad. Contains the video chrominance + * and luminance or the hole bandwidth of the signal converted to + * an Intermediate Frequency (IF) or to baseband (on zero-IF tuners). + * @TUNER_PAD_AUD_OUT: + * Tuner audio output source pad. Tuners used to decode analog TV + * signals have an extra pad for audio output. Old tuners use an + * analog stage with a saw filter for the audio IF frequency. The + * output of the pad is, in this case, the audio IF, with should be + * decoded either by the bridge chipset (that's the case of cx2388x + * chipsets) or may require an external IF sound processor, like + * msp34xx. On modern silicon tuners, the audio IF decoder is usually + * incorporated at the tuner. On such case, the output of this pad + * is an audio sampled data. + * @TUNER_NUM_PADS: + * Number of pads of the tuner. + */ +enum tuner_pad_index { + TUNER_PAD_RF_INPUT, + TUNER_PAD_OUTPUT, + TUNER_PAD_AUD_OUT, + TUNER_NUM_PADS +}; + +/** + * enum if_vid_dec_pad_index - video IF-PLL pad index + * for MEDIA_ENT_F_IF_VID_DECODER + * + * @IF_VID_DEC_PAD_IF_INPUT: + * video Intermediate Frequency (IF) sink pad + * @IF_VID_DEC_PAD_OUT: + * IF-PLL video output source pad. Contains the video chrominance + * and luminance IF signals. + * @IF_VID_DEC_PAD_NUM_PADS: + * Number of pads of the video IF-PLL. + */ +enum if_vid_dec_pad_index { + IF_VID_DEC_PAD_IF_INPUT, + IF_VID_DEC_PAD_OUT, + IF_VID_DEC_PAD_NUM_PADS +}; + +struct tuner { + /* device */ + struct dvb_frontend fe; + struct i2c_client *i2c; + struct v4l2_subdev sd; + struct list_head list; + + /* keep track of the current settings */ + v4l2_std_id std; + unsigned int tv_freq; + unsigned int radio_freq; + unsigned int audmode; + + enum v4l2_tuner_type mode; + unsigned int mode_mask; /* Combination of allowable modes */ + + bool standby; /* Standby mode */ + + unsigned int type; /* chip type id */ + void *config; + const char *name; + +#if defined(CONFIG_MEDIA_CONTROLLER) + struct media_pad pad[TUNER_NUM_PADS]; +#endif +}; + +/* + * Function prototypes + */ + +static void set_tv_freq(struct i2c_client *c, unsigned int freq); +static void set_radio_freq(struct i2c_client *c, unsigned int freq); + +/* + * tuner attach/detach logic + */ + +/* This macro allows us to probe dynamically, avoiding static links */ +#ifdef CONFIG_MEDIA_ATTACH +#define tuner_symbol_probe(FUNCTION, ARGS...) ({ \ + int __r = -EINVAL; \ + typeof(&FUNCTION) __a = symbol_request(FUNCTION); \ + if (__a) { \ + __r = (int) __a(ARGS); \ + symbol_put(FUNCTION); \ + } else { \ + printk(KERN_ERR "TUNER: Unable to find " \ + "symbol "#FUNCTION"()\n"); \ + } \ + __r; \ +}) + +static void tuner_detach(struct dvb_frontend *fe) +{ + if (fe->ops.tuner_ops.release) { + fe->ops.tuner_ops.release(fe); + symbol_put_addr(fe->ops.tuner_ops.release); + } + if (fe->ops.analog_ops.release) { + fe->ops.analog_ops.release(fe); + symbol_put_addr(fe->ops.analog_ops.release); + } +} +#else +#define tuner_symbol_probe(FUNCTION, ARGS...) ({ \ + FUNCTION(ARGS); \ +}) + +static void tuner_detach(struct dvb_frontend *fe) +{ + if (fe->ops.tuner_ops.release) + fe->ops.tuner_ops.release(fe); + if (fe->ops.analog_ops.release) + fe->ops.analog_ops.release(fe); +} +#endif + + +static inline struct tuner *to_tuner(struct v4l2_subdev *sd) +{ + return container_of(sd, struct tuner, sd); +} + +/* + * struct analog_demod_ops callbacks + */ + +static void fe_set_params(struct dvb_frontend *fe, + struct analog_parameters *params) +{ + struct dvb_tuner_ops *fe_tuner_ops = &fe->ops.tuner_ops; + struct tuner *t = fe->analog_demod_priv; + + if (NULL == fe_tuner_ops->set_analog_params) { + pr_warn("Tuner frontend module has no way to set freq\n"); + return; + } + fe_tuner_ops->set_analog_params(fe, params); +} + +static void fe_standby(struct dvb_frontend *fe) +{ + struct dvb_tuner_ops *fe_tuner_ops = &fe->ops.tuner_ops; + + if (fe_tuner_ops->sleep) + fe_tuner_ops->sleep(fe); +} + +static int fe_set_config(struct dvb_frontend *fe, void *priv_cfg) +{ + struct dvb_tuner_ops *fe_tuner_ops = &fe->ops.tuner_ops; + struct tuner *t = fe->analog_demod_priv; + + if (fe_tuner_ops->set_config) + return fe_tuner_ops->set_config(fe, priv_cfg); + + pr_warn("Tuner frontend module has no way to set config\n"); + + return 0; +} + +static void tuner_status(struct dvb_frontend *fe); + +static const struct analog_demod_ops tuner_analog_ops = { + .set_params = fe_set_params, + .standby = fe_standby, + .set_config = fe_set_config, + .tuner_status = tuner_status +}; + +/* + * Functions to select between radio and TV and tuner probe/remove functions + */ + +/** + * set_type - Sets the tuner type for a given device + * + * @c: i2c_client descriptor + * @type: type of the tuner (e. g. tuner number) + * @new_mode_mask: Indicates if tuner supports TV and/or Radio + * @new_config: an optional parameter used by a few tuners to adjust + * internal parameters, like LNA mode + * @tuner_callback: an optional function to be called when switching + * to analog mode + * + * This function applies the tuner config to tuner specified + * by tun_setup structure. It contains several per-tuner initialization "magic" + */ +static void set_type(struct i2c_client *c, unsigned int type, + unsigned int new_mode_mask, void *new_config, + int (*tuner_callback) (void *dev, int component, int cmd, int arg)) +{ + struct tuner *t = to_tuner(i2c_get_clientdata(c)); + struct dvb_tuner_ops *fe_tuner_ops = &t->fe.ops.tuner_ops; + struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops; + unsigned char buffer[4]; + int tune_now = 1; + + if (type == UNSET || type == TUNER_ABSENT) { + dprintk("tuner 0x%02x: Tuner type absent\n", c->addr); + return; + } + + t->type = type; + t->config = new_config; + if (tuner_callback != NULL) { + dprintk("defining GPIO callback\n"); + t->fe.callback = tuner_callback; + } + + /* discard private data, in case set_type() was previously called */ + tuner_detach(&t->fe); + t->fe.analog_demod_priv = NULL; + + switch (t->type) { + case TUNER_MT2032: + if (!dvb_attach(microtune_attach, + &t->fe, t->i2c->adapter, t->i2c->addr)) + goto attach_failed; + break; + case TUNER_PHILIPS_TDA8290: + { + if (!dvb_attach(tda829x_attach, &t->fe, t->i2c->adapter, + t->i2c->addr, t->config)) + goto attach_failed; + break; + } + case TUNER_TEA5767: + if (!dvb_attach(tea5767_attach, &t->fe, + t->i2c->adapter, t->i2c->addr)) + goto attach_failed; + t->mode_mask = T_RADIO; + break; + case TUNER_TEA5761: + if (!dvb_attach(tea5761_attach, &t->fe, + t->i2c->adapter, t->i2c->addr)) + goto attach_failed; + t->mode_mask = T_RADIO; + break; + case TUNER_PHILIPS_FMD1216ME_MK3: + case TUNER_PHILIPS_FMD1216MEX_MK3: + buffer[0] = 0x0b; + buffer[1] = 0xdc; + buffer[2] = 0x9c; + buffer[3] = 0x60; + i2c_master_send(c, buffer, 4); + mdelay(1); + buffer[2] = 0x86; + buffer[3] = 0x54; + i2c_master_send(c, buffer, 4); + if (!dvb_attach(simple_tuner_attach, &t->fe, + t->i2c->adapter, t->i2c->addr, t->type)) + goto attach_failed; + break; + case TUNER_PHILIPS_TD1316: + buffer[0] = 0x0b; + buffer[1] = 0xdc; + buffer[2] = 0x86; + buffer[3] = 0xa4; + i2c_master_send(c, buffer, 4); + if (!dvb_attach(simple_tuner_attach, &t->fe, + t->i2c->adapter, t->i2c->addr, t->type)) + goto attach_failed; + break; + case TUNER_XC2028: + { + struct xc2028_config cfg = { + .i2c_adap = t->i2c->adapter, + .i2c_addr = t->i2c->addr, + }; + if (!dvb_attach(xc2028_attach, &t->fe, &cfg)) + goto attach_failed; + tune_now = 0; + break; + } + case TUNER_TDA9887: + if (!dvb_attach(tda9887_attach, + &t->fe, t->i2c->adapter, t->i2c->addr)) + goto attach_failed; + break; + case TUNER_XC5000: + { + struct xc5000_config xc5000_cfg = { + .i2c_address = t->i2c->addr, + /* if_khz will be set at dvb_attach() */ + .if_khz = 0, + }; + + if (!dvb_attach(xc5000_attach, + &t->fe, t->i2c->adapter, &xc5000_cfg)) + goto attach_failed; + tune_now = 0; + break; + } + case TUNER_XC5000C: + { + struct xc5000_config xc5000c_cfg = { + .i2c_address = t->i2c->addr, + /* if_khz will be set at dvb_attach() */ + .if_khz = 0, + .chip_id = XC5000C, + }; + + if (!dvb_attach(xc5000_attach, + &t->fe, t->i2c->adapter, &xc5000c_cfg)) + goto attach_failed; + tune_now = 0; + break; + } + case TUNER_NXP_TDA18271: + { + struct tda18271_config cfg = { + .small_i2c = TDA18271_03_BYTE_CHUNK_INIT, + }; + + if (!dvb_attach(tda18271_attach, &t->fe, t->i2c->addr, + t->i2c->adapter, &cfg)) + goto attach_failed; + tune_now = 0; + break; + } + case TUNER_XC4000: + { + struct xc4000_config xc4000_cfg = { + .i2c_address = t->i2c->addr, + /* FIXME: the correct parameters will be set */ + /* only when the digital dvb_attach() occurs */ + .default_pm = 0, + .dvb_amplitude = 0, + .set_smoothedcvbs = 0, + .if_khz = 0 + }; + if (!dvb_attach(xc4000_attach, + &t->fe, t->i2c->adapter, &xc4000_cfg)) + goto attach_failed; + tune_now = 0; + break; + } + default: + if (!dvb_attach(simple_tuner_attach, &t->fe, + t->i2c->adapter, t->i2c->addr, t->type)) + goto attach_failed; + + break; + } + + if ((NULL == analog_ops->set_params) && + (fe_tuner_ops->set_analog_params)) { + + t->name = fe_tuner_ops->info.name; + + t->fe.analog_demod_priv = t; + memcpy(analog_ops, &tuner_analog_ops, + sizeof(struct analog_demod_ops)); + + if (fe_tuner_ops->get_rf_strength) + analog_ops->has_signal = fe_tuner_ops->get_rf_strength; + if (fe_tuner_ops->get_afc) + analog_ops->get_afc = fe_tuner_ops->get_afc; + + } else { + t->name = analog_ops->info.name; + } + +#ifdef CONFIG_MEDIA_CONTROLLER + t->sd.entity.name = t->name; +#endif + + dprintk("type set to %s\n", t->name); + + t->mode_mask = new_mode_mask; + + /* Some tuners require more initialization setup before use, + such as firmware download or device calibration. + trying to set a frequency here will just fail + FIXME: better to move set_freq to the tuner code. This is needed + on analog tuners for PLL to properly work + */ + if (tune_now) { + if (V4L2_TUNER_RADIO == t->mode) + set_radio_freq(c, t->radio_freq); + else + set_tv_freq(c, t->tv_freq); + } + + dprintk("%s %s I2C addr 0x%02x with type %d used for 0x%02x\n", + c->adapter->name, c->dev.driver->name, c->addr << 1, type, + t->mode_mask); + return; + +attach_failed: + dprintk("Tuner attach for type = %d failed.\n", t->type); + t->type = TUNER_ABSENT; + + return; +} + +/** + * tuner_s_type_addr - Sets the tuner type for a device + * + * @sd: subdev descriptor + * @tun_setup: type to be associated to a given tuner i2c address + * + * This function applies the tuner config to tuner specified + * by tun_setup structure. + * If tuner I2C address is UNSET, then it will only set the device + * if the tuner supports the mode specified in the call. + * If the address is specified, the change will be applied only if + * tuner I2C address matches. + * The call can change the tuner number and the tuner mode. + */ +static int tuner_s_type_addr(struct v4l2_subdev *sd, + struct tuner_setup *tun_setup) +{ + struct tuner *t = to_tuner(sd); + struct i2c_client *c = v4l2_get_subdevdata(sd); + + dprintk("Calling set_type_addr for type=%d, addr=0x%02x, mode=0x%02x, config=%p\n", + tun_setup->type, + tun_setup->addr, + tun_setup->mode_mask, + tun_setup->config); + + if ((t->type == UNSET && ((tun_setup->addr == ADDR_UNSET) && + (t->mode_mask & tun_setup->mode_mask))) || + (tun_setup->addr == c->addr)) { + set_type(c, tun_setup->type, tun_setup->mode_mask, + tun_setup->config, tun_setup->tuner_callback); + } else + dprintk("set addr discarded for type %i, mask %x. Asked to change tuner at addr 0x%02x, with mask %x\n", + t->type, t->mode_mask, + tun_setup->addr, tun_setup->mode_mask); + + return 0; +} + +/** + * tuner_s_config - Sets tuner configuration + * + * @sd: subdev descriptor + * @cfg: tuner configuration + * + * Calls tuner set_config() private function to set some tuner-internal + * parameters + */ +static int tuner_s_config(struct v4l2_subdev *sd, + const struct v4l2_priv_tun_config *cfg) +{ + struct tuner *t = to_tuner(sd); + struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops; + + if (t->type != cfg->tuner) + return 0; + + if (analog_ops->set_config) { + analog_ops->set_config(&t->fe, cfg->priv); + return 0; + } + + dprintk("Tuner frontend module has no way to set config\n"); + return 0; +} + +/** + * tuner_lookup - Seek for tuner adapters + * + * @adap: i2c_adapter struct + * @radio: pointer to be filled if the adapter is radio + * @tv: pointer to be filled if the adapter is TV + * + * Search for existing radio and/or TV tuners on the given I2C adapter, + * discarding demod-only adapters (tda9887). + * + * Note that when this function is called from tuner_probe you can be + * certain no other devices will be added/deleted at the same time, I2C + * core protects against that. + */ +static void tuner_lookup(struct i2c_adapter *adap, + struct tuner **radio, struct tuner **tv) +{ + struct tuner *pos; + + *radio = NULL; + *tv = NULL; + + list_for_each_entry(pos, &tuner_list, list) { + int mode_mask; + + if (pos->i2c->adapter != adap || + strcmp(pos->i2c->dev.driver->name, "tuner")) + continue; + + mode_mask = pos->mode_mask; + if (*radio == NULL && mode_mask == T_RADIO) + *radio = pos; + /* Note: currently TDA9887 is the only demod-only + device. If other devices appear then we need to + make this test more general. */ + else if (*tv == NULL && pos->type != TUNER_TDA9887 && + (pos->mode_mask & T_ANALOG_TV)) + *tv = pos; + } +} + +/** + *tuner_probe - Probes the existing tuners on an I2C bus + * + * @client: i2c_client descriptor + * + * This routine probes for tuners at the expected I2C addresses. On most + * cases, if a device answers to a given I2C address, it assumes that the + * device is a tuner. On a few cases, however, an additional logic is needed + * to double check if the device is really a tuner, or to identify the tuner + * type, like on tea5767/5761 devices. + * + * During client attach, set_type is called by adapter's attach_inform callback. + * set_type must then be completed by tuner_probe. + */ +static int tuner_probe(struct i2c_client *client) +{ + struct tuner *t; + struct tuner *radio; + struct tuner *tv; +#ifdef CONFIG_MEDIA_CONTROLLER + int ret; +#endif + + t = kzalloc(sizeof(struct tuner), GFP_KERNEL); + if (NULL == t) + return -ENOMEM; + v4l2_i2c_subdev_init(&t->sd, client, &tuner_ops); + t->i2c = client; + t->name = "(tuner unset)"; + t->type = UNSET; + t->audmode = V4L2_TUNER_MODE_STEREO; + t->standby = true; + t->radio_freq = 87.5 * 16000; /* Initial freq range */ + t->tv_freq = 400 * 16; /* Sets freq to VHF High - needed for some PLL's to properly start */ + + if (show_i2c) { + unsigned char buffer[16]; + int rc; + + memset(buffer, 0, sizeof(buffer)); + rc = i2c_master_recv(client, buffer, sizeof(buffer)); + if (rc >= 0) + pr_info("I2C RECV = %*ph\n", rc, buffer); + } + + /* autodetection code based on the i2c addr */ + if (!no_autodetect) { + switch (client->addr) { + case 0x10: + if (tuner_symbol_probe(tea5761_autodetection, + t->i2c->adapter, + t->i2c->addr) >= 0) { + t->type = TUNER_TEA5761; + t->mode_mask = T_RADIO; + tuner_lookup(t->i2c->adapter, &radio, &tv); + if (tv) + tv->mode_mask &= ~T_RADIO; + + goto register_client; + } + kfree(t); + return -ENODEV; + case 0x42: + case 0x43: + case 0x4a: + case 0x4b: + /* If chip is not tda8290, don't register. + since it can be tda9887*/ + if (tuner_symbol_probe(tda829x_probe, t->i2c->adapter, + t->i2c->addr) >= 0) { + dprintk("tda829x detected\n"); + } else { + /* Default is being tda9887 */ + t->type = TUNER_TDA9887; + t->mode_mask = T_RADIO | T_ANALOG_TV; + goto register_client; + } + break; + case 0x60: + if (tuner_symbol_probe(tea5767_autodetection, + t->i2c->adapter, t->i2c->addr) + >= 0) { + t->type = TUNER_TEA5767; + t->mode_mask = T_RADIO; + /* Sets freq to FM range */ + tuner_lookup(t->i2c->adapter, &radio, &tv); + if (tv) + tv->mode_mask &= ~T_RADIO; + + goto register_client; + } + break; + } + } + + /* Initializes only the first TV tuner on this adapter. Why only the + first? Because there are some devices (notably the ones with TI + tuners) that have more than one i2c address for the *same* device. + Experience shows that, except for just one case, the first + address is the right one. The exception is a Russian tuner + (ACORP_Y878F). So, the desired behavior is just to enable the + first found TV tuner. */ + tuner_lookup(t->i2c->adapter, &radio, &tv); + if (tv == NULL) { + t->mode_mask = T_ANALOG_TV; + if (radio == NULL) + t->mode_mask |= T_RADIO; + dprintk("Setting mode_mask to 0x%02x\n", t->mode_mask); + } + + /* Should be just before return */ +register_client: +#if defined(CONFIG_MEDIA_CONTROLLER) + t->sd.entity.name = t->name; + /* + * Handle the special case where the tuner has actually + * two stages: the PLL to tune into a frequency and the + * IF-PLL demodulator (tda988x). + */ + if (t->type == TUNER_TDA9887) { + t->pad[IF_VID_DEC_PAD_IF_INPUT].flags = MEDIA_PAD_FL_SINK; + t->pad[IF_VID_DEC_PAD_IF_INPUT].sig_type = PAD_SIGNAL_ANALOG; + t->pad[IF_VID_DEC_PAD_OUT].flags = MEDIA_PAD_FL_SOURCE; + t->pad[IF_VID_DEC_PAD_OUT].sig_type = PAD_SIGNAL_ANALOG; + ret = media_entity_pads_init(&t->sd.entity, + IF_VID_DEC_PAD_NUM_PADS, + &t->pad[0]); + t->sd.entity.function = MEDIA_ENT_F_IF_VID_DECODER; + } else { + t->pad[TUNER_PAD_RF_INPUT].flags = MEDIA_PAD_FL_SINK; + t->pad[TUNER_PAD_RF_INPUT].sig_type = PAD_SIGNAL_ANALOG; + t->pad[TUNER_PAD_OUTPUT].flags = MEDIA_PAD_FL_SOURCE; + t->pad[TUNER_PAD_OUTPUT].sig_type = PAD_SIGNAL_ANALOG; + t->pad[TUNER_PAD_AUD_OUT].flags = MEDIA_PAD_FL_SOURCE; + t->pad[TUNER_PAD_AUD_OUT].sig_type = PAD_SIGNAL_AUDIO; + ret = media_entity_pads_init(&t->sd.entity, TUNER_NUM_PADS, + &t->pad[0]); + t->sd.entity.function = MEDIA_ENT_F_TUNER; + } + + if (ret < 0) { + pr_err("failed to initialize media entity!\n"); + kfree(t); + return ret; + } +#endif + /* Sets a default mode */ + if (t->mode_mask & T_ANALOG_TV) + t->mode = V4L2_TUNER_ANALOG_TV; + else + t->mode = V4L2_TUNER_RADIO; + set_type(client, t->type, t->mode_mask, t->config, t->fe.callback); + list_add_tail(&t->list, &tuner_list); + + pr_info("Tuner %d found with type(s)%s%s.\n", + t->type, + t->mode_mask & T_RADIO ? " Radio" : "", + t->mode_mask & T_ANALOG_TV ? " TV" : ""); + return 0; +} + +/** + * tuner_remove - detaches a tuner + * + * @client: i2c_client descriptor + */ + +static void tuner_remove(struct i2c_client *client) +{ + struct tuner *t = to_tuner(i2c_get_clientdata(client)); + + v4l2_device_unregister_subdev(&t->sd); + tuner_detach(&t->fe); + t->fe.analog_demod_priv = NULL; + + list_del(&t->list); + kfree(t); +} + +/* + * Functions to switch between Radio and TV + * + * A few cards have a separate I2C tuner for radio. Those routines + * take care of switching between TV/Radio mode, filtering only the + * commands that apply to the Radio or TV tuner. + */ + +/** + * check_mode - Verify if tuner supports the requested mode + * @t: a pointer to the module's internal struct_tuner + * @mode: mode of the tuner, as defined by &enum v4l2_tuner_type. + * + * This function checks if the tuner is capable of tuning analog TV, + * digital TV or radio, depending on what the caller wants. If the + * tuner can't support that mode, it returns -EINVAL. Otherwise, it + * returns 0. + * This function is needed for boards that have a separate tuner for + * radio (like devices with tea5767). + * + * NOTE: mt20xx uses V4L2_TUNER_DIGITAL_TV and calls set_tv_freq to + * select a TV frequency. So, t_mode = T_ANALOG_TV could actually + * be used to represent a Digital TV too. + */ +static inline int check_mode(struct tuner *t, enum v4l2_tuner_type mode) +{ + int t_mode; + if (mode == V4L2_TUNER_RADIO) + t_mode = T_RADIO; + else + t_mode = T_ANALOG_TV; + + if ((t_mode & t->mode_mask) == 0) + return -EINVAL; + + return 0; +} + +/** + * set_mode - Switch tuner to other mode. + * @t: a pointer to the module's internal struct_tuner + * @mode: enum v4l2_type (radio or TV) + * + * If tuner doesn't support the needed mode (radio or TV), prints a + * debug message and returns -EINVAL, changing its state to standby. + * Otherwise, changes the mode and returns 0. + */ +static int set_mode(struct tuner *t, enum v4l2_tuner_type mode) +{ + struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops; + + if (mode != t->mode) { + if (check_mode(t, mode) == -EINVAL) { + dprintk("Tuner doesn't support mode %d. Putting tuner to sleep\n", + mode); + t->standby = true; + if (analog_ops->standby) + analog_ops->standby(&t->fe); + return -EINVAL; + } + t->mode = mode; + dprintk("Changing to mode %d\n", mode); + } + return 0; +} + +/** + * set_freq - Set the tuner to the desired frequency. + * @t: a pointer to the module's internal struct_tuner + * @freq: frequency to set (0 means to use the current frequency) + */ +static void set_freq(struct tuner *t, unsigned int freq) +{ + struct i2c_client *client = v4l2_get_subdevdata(&t->sd); + + if (t->mode == V4L2_TUNER_RADIO) { + if (!freq) + freq = t->radio_freq; + set_radio_freq(client, freq); + } else { + if (!freq) + freq = t->tv_freq; + set_tv_freq(client, freq); + } +} + +/* + * Functions that are specific for TV mode + */ + +/** + * set_tv_freq - Set tuner frequency, freq in Units of 62.5 kHz = 1/16MHz + * + * @c: i2c_client descriptor + * @freq: frequency + */ +static void set_tv_freq(struct i2c_client *c, unsigned int freq) +{ + struct tuner *t = to_tuner(i2c_get_clientdata(c)); + struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops; + + struct analog_parameters params = { + .mode = t->mode, + .audmode = t->audmode, + .std = t->std + }; + + if (t->type == UNSET) { + pr_warn("tuner type not set\n"); + return; + } + if (NULL == analog_ops->set_params) { + pr_warn("Tuner has no way to set tv freq\n"); + return; + } + if (freq < tv_range[0] * 16 || freq > tv_range[1] * 16) { + dprintk("TV freq (%d.%02d) out of range (%d-%d)\n", + freq / 16, freq % 16 * 100 / 16, tv_range[0], + tv_range[1]); + /* V4L2 spec: if the freq is not possible then the closest + possible value should be selected */ + if (freq < tv_range[0] * 16) + freq = tv_range[0] * 16; + else + freq = tv_range[1] * 16; + } + params.frequency = freq; + dprintk("tv freq set to %d.%02d\n", + freq / 16, freq % 16 * 100 / 16); + t->tv_freq = freq; + t->standby = false; + + analog_ops->set_params(&t->fe, ¶ms); +} + +/** + * tuner_fixup_std - force a given video standard variant + * + * @t: tuner internal struct + * @std: TV standard + * + * A few devices or drivers have problem to detect some standard variations. + * On other operational systems, the drivers generally have a per-country + * code, and some logic to apply per-country hacks. V4L2 API doesn't provide + * such hacks. Instead, it relies on a proper video standard selection from + * the userspace application. However, as some apps are buggy, not allowing + * to distinguish all video standard variations, a modprobe parameter can + * be used to force a video standard match. + */ +static v4l2_std_id tuner_fixup_std(struct tuner *t, v4l2_std_id std) +{ + if (pal[0] != '-' && (std & V4L2_STD_PAL) == V4L2_STD_PAL) { + switch (pal[0]) { + case '6': + return V4L2_STD_PAL_60; + case 'b': + case 'B': + case 'g': + case 'G': + return V4L2_STD_PAL_BG; + case 'i': + case 'I': + return V4L2_STD_PAL_I; + case 'd': + case 'D': + case 'k': + case 'K': + return V4L2_STD_PAL_DK; + case 'M': + case 'm': + return V4L2_STD_PAL_M; + case 'N': + case 'n': + if (pal[1] == 'c' || pal[1] == 'C') + return V4L2_STD_PAL_Nc; + return V4L2_STD_PAL_N; + default: + pr_warn("pal= argument not recognised\n"); + break; + } + } + if (secam[0] != '-' && (std & V4L2_STD_SECAM) == V4L2_STD_SECAM) { + switch (secam[0]) { + case 'b': + case 'B': + case 'g': + case 'G': + case 'h': + case 'H': + return V4L2_STD_SECAM_B | + V4L2_STD_SECAM_G | + V4L2_STD_SECAM_H; + case 'd': + case 'D': + case 'k': + case 'K': + return V4L2_STD_SECAM_DK; + case 'l': + case 'L': + if ((secam[1] == 'C') || (secam[1] == 'c')) + return V4L2_STD_SECAM_LC; + return V4L2_STD_SECAM_L; + default: + pr_warn("secam= argument not recognised\n"); + break; + } + } + + if (ntsc[0] != '-' && (std & V4L2_STD_NTSC) == V4L2_STD_NTSC) { + switch (ntsc[0]) { + case 'm': + case 'M': + return V4L2_STD_NTSC_M; + case 'j': + case 'J': + return V4L2_STD_NTSC_M_JP; + case 'k': + case 'K': + return V4L2_STD_NTSC_M_KR; + default: + pr_info("ntsc= argument not recognised\n"); + break; + } + } + return std; +} + +/* + * Functions that are specific for Radio mode + */ + +/** + * set_radio_freq - Set tuner frequency, freq in Units of 62.5 Hz = 1/16kHz + * + * @c: i2c_client descriptor + * @freq: frequency + */ +static void set_radio_freq(struct i2c_client *c, unsigned int freq) +{ + struct tuner *t = to_tuner(i2c_get_clientdata(c)); + struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops; + + struct analog_parameters params = { + .mode = t->mode, + .audmode = t->audmode, + .std = t->std + }; + + if (t->type == UNSET) { + pr_warn("tuner type not set\n"); + return; + } + if (NULL == analog_ops->set_params) { + pr_warn("tuner has no way to set radio frequency\n"); + return; + } + if (freq < radio_range[0] * 16000 || freq > radio_range[1] * 16000) { + dprintk("radio freq (%d.%02d) out of range (%d-%d)\n", + freq / 16000, freq % 16000 * 100 / 16000, + radio_range[0], radio_range[1]); + /* V4L2 spec: if the freq is not possible then the closest + possible value should be selected */ + if (freq < radio_range[0] * 16000) + freq = radio_range[0] * 16000; + else + freq = radio_range[1] * 16000; + } + params.frequency = freq; + dprintk("radio freq set to %d.%02d\n", + freq / 16000, freq % 16000 * 100 / 16000); + t->radio_freq = freq; + t->standby = false; + + analog_ops->set_params(&t->fe, ¶ms); + /* + * The tuner driver might decide to change the audmode if it only + * supports stereo, so update t->audmode. + */ + t->audmode = params.audmode; +} + +/* + * Debug function for reporting tuner status to userspace + */ + +/** + * tuner_status - Dumps the current tuner status at dmesg + * @fe: pointer to struct dvb_frontend + * + * This callback is used only for driver debug purposes, answering to + * VIDIOC_LOG_STATUS. No changes should happen on this call. + */ +static void tuner_status(struct dvb_frontend *fe) +{ + struct tuner *t = fe->analog_demod_priv; + unsigned long freq, freq_fraction; + struct dvb_tuner_ops *fe_tuner_ops = &fe->ops.tuner_ops; + struct analog_demod_ops *analog_ops = &fe->ops.analog_ops; + const char *p; + + switch (t->mode) { + case V4L2_TUNER_RADIO: + p = "radio"; + break; + case V4L2_TUNER_DIGITAL_TV: /* Used by mt20xx */ + p = "digital TV"; + break; + case V4L2_TUNER_ANALOG_TV: + default: + p = "analog TV"; + break; + } + if (t->mode == V4L2_TUNER_RADIO) { + freq = t->radio_freq / 16000; + freq_fraction = (t->radio_freq % 16000) * 100 / 16000; + } else { + freq = t->tv_freq / 16; + freq_fraction = (t->tv_freq % 16) * 100 / 16; + } + pr_info("Tuner mode: %s%s\n", p, + t->standby ? " on standby mode" : ""); + pr_info("Frequency: %lu.%02lu MHz\n", freq, freq_fraction); + pr_info("Standard: 0x%08lx\n", (unsigned long)t->std); + if (t->mode != V4L2_TUNER_RADIO) + return; + if (fe_tuner_ops->get_status) { + u32 tuner_status = 0; + + fe_tuner_ops->get_status(&t->fe, &tuner_status); + if (tuner_status & TUNER_STATUS_LOCKED) + pr_info("Tuner is locked.\n"); + if (tuner_status & TUNER_STATUS_STEREO) + pr_info("Stereo: yes\n"); + } + if (analog_ops->has_signal) { + u16 signal; + + if (!analog_ops->has_signal(fe, &signal)) + pr_info("Signal strength: %hu\n", signal); + } +} + +/* + * Function to splicitly change mode to radio. Probably not needed anymore + */ + +static int tuner_s_radio(struct v4l2_subdev *sd) +{ + struct tuner *t = to_tuner(sd); + + if (set_mode(t, V4L2_TUNER_RADIO) == 0) + set_freq(t, 0); + return 0; +} + +/* + * Tuner callbacks to handle userspace ioctl's + */ + +/** + * tuner_standby - places the tuner in standby mode + * @sd: pointer to struct v4l2_subdev + */ +static int tuner_standby(struct v4l2_subdev *sd) +{ + struct tuner *t = to_tuner(sd); + struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops; + + dprintk("Putting tuner to sleep\n"); + t->standby = true; + if (analog_ops->standby) + analog_ops->standby(&t->fe); + return 0; +} + +static int tuner_s_std(struct v4l2_subdev *sd, v4l2_std_id std) +{ + struct tuner *t = to_tuner(sd); + + if (set_mode(t, V4L2_TUNER_ANALOG_TV)) + return 0; + + t->std = tuner_fixup_std(t, std); + if (t->std != std) + dprintk("Fixup standard %llx to %llx\n", std, t->std); + set_freq(t, 0); + return 0; +} + +static int tuner_s_frequency(struct v4l2_subdev *sd, const struct v4l2_frequency *f) +{ + struct tuner *t = to_tuner(sd); + + if (set_mode(t, f->type) == 0) + set_freq(t, f->frequency); + return 0; +} + +/** + * tuner_g_frequency - Get the tuned frequency for the tuner + * @sd: pointer to struct v4l2_subdev + * @f: pointer to struct v4l2_frequency + * + * At return, the structure f will be filled with tuner frequency + * if the tuner matches the f->type. + * Note: f->type should be initialized before calling it. + * This is done by either video_ioctl2 or by the bridge driver. + */ +static int tuner_g_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *f) +{ + struct tuner *t = to_tuner(sd); + struct dvb_tuner_ops *fe_tuner_ops = &t->fe.ops.tuner_ops; + + if (check_mode(t, f->type) == -EINVAL) + return 0; + if (f->type == t->mode && fe_tuner_ops->get_frequency && !t->standby) { + u32 abs_freq; + + fe_tuner_ops->get_frequency(&t->fe, &abs_freq); + f->frequency = (V4L2_TUNER_RADIO == t->mode) ? + DIV_ROUND_CLOSEST(abs_freq * 2, 125) : + DIV_ROUND_CLOSEST(abs_freq, 62500); + } else { + f->frequency = (V4L2_TUNER_RADIO == f->type) ? + t->radio_freq : t->tv_freq; + } + return 0; +} + +/** + * tuner_g_tuner - Fill in tuner information + * @sd: pointer to struct v4l2_subdev + * @vt: pointer to struct v4l2_tuner + * + * At return, the structure vt will be filled with tuner information + * if the tuner matches vt->type. + * Note: vt->type should be initialized before calling it. + * This is done by either video_ioctl2 or by the bridge driver. + */ +static int tuner_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt) +{ + struct tuner *t = to_tuner(sd); + struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops; + struct dvb_tuner_ops *fe_tuner_ops = &t->fe.ops.tuner_ops; + + if (check_mode(t, vt->type) == -EINVAL) + return 0; + if (vt->type == t->mode && analog_ops->get_afc) + analog_ops->get_afc(&t->fe, &vt->afc); + if (vt->type == t->mode && analog_ops->has_signal) { + u16 signal = (u16)vt->signal; + + if (!analog_ops->has_signal(&t->fe, &signal)) + vt->signal = signal; + } + if (vt->type != V4L2_TUNER_RADIO) { + vt->capability |= V4L2_TUNER_CAP_NORM; + vt->rangelow = tv_range[0] * 16; + vt->rangehigh = tv_range[1] * 16; + return 0; + } + + /* radio mode */ + if (vt->type == t->mode) { + vt->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO; + if (fe_tuner_ops->get_status) { + u32 tuner_status = 0; + + fe_tuner_ops->get_status(&t->fe, &tuner_status); + vt->rxsubchans = + (tuner_status & TUNER_STATUS_STEREO) ? + V4L2_TUNER_SUB_STEREO : + V4L2_TUNER_SUB_MONO; + } + vt->audmode = t->audmode; + } + vt->capability |= V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO; + vt->rangelow = radio_range[0] * 16000; + vt->rangehigh = radio_range[1] * 16000; + + return 0; +} + +/** + * tuner_s_tuner - Set the tuner's audio mode + * @sd: pointer to struct v4l2_subdev + * @vt: pointer to struct v4l2_tuner + * + * Sets the audio mode if the tuner matches vt->type. + * Note: vt->type should be initialized before calling it. + * This is done by either video_ioctl2 or by the bridge driver. + */ +static int tuner_s_tuner(struct v4l2_subdev *sd, const struct v4l2_tuner *vt) +{ + struct tuner *t = to_tuner(sd); + + if (set_mode(t, vt->type)) + return 0; + + if (t->mode == V4L2_TUNER_RADIO) { + t->audmode = vt->audmode; + /* + * For radio audmode can only be mono or stereo. Map any + * other values to stereo. The actual tuner driver that is + * called in set_radio_freq can decide to limit the audmode to + * mono if only mono is supported. + */ + if (t->audmode != V4L2_TUNER_MODE_MONO && + t->audmode != V4L2_TUNER_MODE_STEREO) + t->audmode = V4L2_TUNER_MODE_STEREO; + } + set_freq(t, 0); + + return 0; +} + +static int tuner_log_status(struct v4l2_subdev *sd) +{ + struct tuner *t = to_tuner(sd); + struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops; + + if (analog_ops->tuner_status) + analog_ops->tuner_status(&t->fe); + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int tuner_suspend(struct device *dev) +{ + struct i2c_client *c = to_i2c_client(dev); + struct tuner *t = to_tuner(i2c_get_clientdata(c)); + struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops; + + dprintk("suspend\n"); + + if (t->fe.ops.tuner_ops.suspend) + t->fe.ops.tuner_ops.suspend(&t->fe); + else if (!t->standby && analog_ops->standby) + analog_ops->standby(&t->fe); + + return 0; +} + +static int tuner_resume(struct device *dev) +{ + struct i2c_client *c = to_i2c_client(dev); + struct tuner *t = to_tuner(i2c_get_clientdata(c)); + + dprintk("resume\n"); + + if (t->fe.ops.tuner_ops.resume) + t->fe.ops.tuner_ops.resume(&t->fe); + else if (!t->standby) + if (set_mode(t, t->mode) == 0) + set_freq(t, 0); + + return 0; +} +#endif + +static int tuner_command(struct i2c_client *client, unsigned cmd, void *arg) +{ + struct v4l2_subdev *sd = i2c_get_clientdata(client); + + /* TUNER_SET_CONFIG is still called by tuner-simple.c, so we have + to handle it here. + There must be a better way of doing this... */ + switch (cmd) { + case TUNER_SET_CONFIG: + return tuner_s_config(sd, arg); + } + return -ENOIOCTLCMD; +} + +/* + * Callback structs + */ + +static const struct v4l2_subdev_core_ops tuner_core_ops = { + .log_status = tuner_log_status, +}; + +static const struct v4l2_subdev_tuner_ops tuner_tuner_ops = { + .standby = tuner_standby, + .s_radio = tuner_s_radio, + .g_tuner = tuner_g_tuner, + .s_tuner = tuner_s_tuner, + .s_frequency = tuner_s_frequency, + .g_frequency = tuner_g_frequency, + .s_type_addr = tuner_s_type_addr, + .s_config = tuner_s_config, +}; + +static const struct v4l2_subdev_video_ops tuner_video_ops = { + .s_std = tuner_s_std, +}; + +static const struct v4l2_subdev_ops tuner_ops = { + .core = &tuner_core_ops, + .tuner = &tuner_tuner_ops, + .video = &tuner_video_ops, +}; + +/* + * I2C structs and module init functions + */ + +static const struct dev_pm_ops tuner_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(tuner_suspend, tuner_resume) +}; + +static const struct i2c_device_id tuner_id[] = { + { "tuner", }, /* autodetect */ + { } +}; +MODULE_DEVICE_TABLE(i2c, tuner_id); + +static struct i2c_driver tuner_driver = { + .driver = { + .name = "tuner", + .pm = &tuner_pm_ops, + }, + .probe = tuner_probe, + .remove = tuner_remove, + .command = tuner_command, + .id_table = tuner_id, +}; + +module_i2c_driver(tuner_driver); + +MODULE_DESCRIPTION("device driver for various TV and TV+FM radio tuners"); +MODULE_AUTHOR("Ralph Metzler, Gerd Knorr, Gunther Mayer"); +MODULE_LICENSE("GPL"); diff --git a/6.18.0/drivers/media/v4l2-core/v4l2-async.c b/6.18.0/drivers/media/v4l2-core/v4l2-async.c new file mode 100644 index 0000000..ee884a8 --- /dev/null +++ b/6.18.0/drivers/media/v4l2-core/v4l2-async.c @@ -0,0 +1,976 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * V4L2 asynchronous subdevice registration API + * + * Copyright (C) 2012-2013, Guennadi Liakhovetski + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "v4l2-subdev-priv.h" + +static int v4l2_async_nf_call_bound(struct v4l2_async_notifier *n, + struct v4l2_subdev *subdev, + struct v4l2_async_connection *asc) +{ + if (!n->ops || !n->ops->bound) + return 0; + + return n->ops->bound(n, subdev, asc); +} + +static void v4l2_async_nf_call_unbind(struct v4l2_async_notifier *n, + struct v4l2_subdev *subdev, + struct v4l2_async_connection *asc) +{ + if (!n->ops || !n->ops->unbind) + return; + + n->ops->unbind(n, subdev, asc); +} + +static int v4l2_async_nf_call_complete(struct v4l2_async_notifier *n) +{ + if (!n->ops || !n->ops->complete) + return 0; + + return n->ops->complete(n); +} + +static void v4l2_async_nf_call_destroy(struct v4l2_async_notifier *n, + struct v4l2_async_connection *asc) +{ + if (!n->ops || !n->ops->destroy) + return; + + n->ops->destroy(asc); +} + +static bool match_i2c(struct v4l2_async_notifier *notifier, + struct v4l2_subdev *sd, + struct v4l2_async_match_desc *match) +{ +#if IS_ENABLED(CONFIG_I2C) + struct i2c_client *client = i2c_verify_client(sd->dev); + + return client && + match->i2c.adapter_id == client->adapter->nr && + match->i2c.address == client->addr; +#else + return false; +#endif +} + +static struct device *notifier_dev(struct v4l2_async_notifier *notifier) +{ + if (notifier->sd) + return notifier->sd->dev; + + if (notifier->v4l2_dev) + return notifier->v4l2_dev->dev; + + return NULL; +} + +static bool +match_fwnode_one(struct v4l2_async_notifier *notifier, + struct v4l2_subdev *sd, struct fwnode_handle *sd_fwnode, + struct v4l2_async_match_desc *match) +{ + struct fwnode_handle *asd_dev_fwnode; + bool ret; + + dev_dbg(notifier_dev(notifier), + "v4l2-async: fwnode match: need %pfw, trying %pfw\n", + sd_fwnode, match->fwnode); + + if (sd_fwnode == match->fwnode) { + dev_dbg(notifier_dev(notifier), + "v4l2-async: direct match found\n"); + return true; + } + + if (!fwnode_graph_is_endpoint(match->fwnode)) { + dev_dbg(notifier_dev(notifier), + "v4l2-async: direct match not found\n"); + return false; + } + + asd_dev_fwnode = fwnode_graph_get_port_parent(match->fwnode); + + ret = sd_fwnode == asd_dev_fwnode; + + fwnode_handle_put(asd_dev_fwnode); + + dev_dbg(notifier_dev(notifier), + "v4l2-async: device--endpoint match %sfound\n", + ret ? "" : "not "); + + return ret; +} + +static bool match_fwnode(struct v4l2_async_notifier *notifier, + struct v4l2_subdev *sd, + struct v4l2_async_match_desc *match) +{ + dev_dbg(notifier_dev(notifier), + "v4l2-async: matching for notifier %pfw, sd fwnode %pfw\n", + dev_fwnode(notifier_dev(notifier)), sd->fwnode); + + if (!list_empty(&sd->async_subdev_endpoint_list)) { + struct v4l2_async_subdev_endpoint *ase; + + dev_dbg(sd->dev, + "v4l2-async: endpoint fwnode list available, looking for %pfw\n", + match->fwnode); + + list_for_each_entry(ase, &sd->async_subdev_endpoint_list, + async_subdev_endpoint_entry) { + bool matched = ase->endpoint == match->fwnode; + + dev_dbg(sd->dev, + "v4l2-async: endpoint-endpoint match %sfound with %pfw\n", + matched ? "" : "not ", ase->endpoint); + + if (matched) + return true; + } + + dev_dbg(sd->dev, "async: no endpoint matched\n"); + + return false; + } + + if (match_fwnode_one(notifier, sd, sd->fwnode, match)) + return true; + + /* Also check the secondary fwnode. */ + if (IS_ERR_OR_NULL(sd->fwnode->secondary)) + return false; + + dev_dbg(notifier_dev(notifier), + "v4l2-async: trying secondary fwnode match\n"); + + return match_fwnode_one(notifier, sd, sd->fwnode->secondary, match); +} + +static LIST_HEAD(subdev_list); +static LIST_HEAD(notifier_list); +static DEFINE_MUTEX(list_lock); + +static struct v4l2_async_connection * +v4l2_async_find_match(struct v4l2_async_notifier *notifier, + struct v4l2_subdev *sd) +{ + bool (*match)(struct v4l2_async_notifier *notifier, + struct v4l2_subdev *sd, + struct v4l2_async_match_desc *match); + struct v4l2_async_connection *asc; + + list_for_each_entry(asc, ¬ifier->waiting_list, asc_entry) { + /* bus_type has been verified valid before */ + switch (asc->match.type) { + case V4L2_ASYNC_MATCH_TYPE_I2C: + match = match_i2c; + break; + case V4L2_ASYNC_MATCH_TYPE_FWNODE: + match = match_fwnode; + break; + default: + /* Cannot happen, unless someone breaks us */ + WARN_ON(true); + return NULL; + } + + /* match cannot be NULL here */ + if (match(notifier, sd, &asc->match)) + return asc; + } + + return NULL; +} + +/* Compare two async match descriptors for equivalence */ +static bool v4l2_async_match_equal(struct v4l2_async_match_desc *match1, + struct v4l2_async_match_desc *match2) +{ + if (match1->type != match2->type) + return false; + + switch (match1->type) { + case V4L2_ASYNC_MATCH_TYPE_I2C: + return match1->i2c.adapter_id == match2->i2c.adapter_id && + match1->i2c.address == match2->i2c.address; + case V4L2_ASYNC_MATCH_TYPE_FWNODE: + return match1->fwnode == match2->fwnode; + default: + break; + } + + return false; +} + +/* Find the sub-device notifier registered by a sub-device driver. */ +static struct v4l2_async_notifier * +v4l2_async_find_subdev_notifier(struct v4l2_subdev *sd) +{ + struct v4l2_async_notifier *n; + + list_for_each_entry(n, ¬ifier_list, notifier_entry) + if (n->sd == sd) + return n; + + return NULL; +} + +/* Get v4l2_device related to the notifier if one can be found. */ +static struct v4l2_device * +v4l2_async_nf_find_v4l2_dev(struct v4l2_async_notifier *notifier) +{ + while (notifier->parent) + notifier = notifier->parent; + + return notifier->v4l2_dev; +} + +/* + * Return true if all child sub-device notifiers are complete, false otherwise. + */ +static bool +v4l2_async_nf_can_complete(struct v4l2_async_notifier *notifier) +{ + struct v4l2_async_connection *asc; + + if (!list_empty(¬ifier->waiting_list)) + return false; + + list_for_each_entry(asc, ¬ifier->done_list, asc_entry) { + struct v4l2_async_notifier *subdev_notifier = + v4l2_async_find_subdev_notifier(asc->sd); + + if (subdev_notifier && + !v4l2_async_nf_can_complete(subdev_notifier)) + return false; + } + + return true; +} + +/* + * Complete the master notifier if possible. This is done when all async + * sub-devices have been bound; v4l2_device is also available then. + */ +static int +v4l2_async_nf_try_complete(struct v4l2_async_notifier *notifier) +{ + struct v4l2_async_notifier *__notifier = notifier; + + /* Quick check whether there are still more sub-devices here. */ + if (!list_empty(¬ifier->waiting_list)) + return 0; + + if (notifier->sd) + dev_dbg(notifier_dev(notifier), + "v4l2-async: trying to complete\n"); + + /* Check the entire notifier tree; find the root notifier first. */ + while (notifier->parent) + notifier = notifier->parent; + + /* This is root if it has v4l2_dev. */ + if (!notifier->v4l2_dev) { + dev_dbg(notifier_dev(__notifier), + "v4l2-async: V4L2 device not available\n"); + return 0; + } + + /* Is everything ready? */ + if (!v4l2_async_nf_can_complete(notifier)) + return 0; + + dev_dbg(notifier_dev(__notifier), "v4l2-async: complete\n"); + + return v4l2_async_nf_call_complete(notifier); +} + +static int +v4l2_async_nf_try_all_subdevs(struct v4l2_async_notifier *notifier); + +static int v4l2_async_create_ancillary_links(struct v4l2_async_notifier *n, + struct v4l2_subdev *sd) +{ +#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER) + struct media_link *link; + + if (sd->entity.function != MEDIA_ENT_F_LENS && + sd->entity.function != MEDIA_ENT_F_FLASH) + return 0; + + if (!n->sd) { + dev_warn(notifier_dev(n), + "not a sub-device notifier, not creating an ancillary link for %s!\n", + dev_name(sd->dev)); + return 0; + } + + link = media_create_ancillary_link(&n->sd->entity, &sd->entity); + + return IS_ERR(link) ? PTR_ERR(link) : 0; +#else + return 0; +#endif +} + +static int v4l2_async_match_notify(struct v4l2_async_notifier *notifier, + struct v4l2_device *v4l2_dev, + struct v4l2_subdev *sd, + struct v4l2_async_connection *asc) +{ + struct v4l2_async_notifier *subdev_notifier; + bool registered = false; + int ret; + + if (list_empty(&sd->asc_list)) { + ret = __v4l2_device_register_subdev(v4l2_dev, sd, sd->owner); + if (ret < 0) + return ret; + registered = true; + } + + ret = v4l2_async_nf_call_bound(notifier, sd, asc); + if (ret < 0) { + if (asc->match.type == V4L2_ASYNC_MATCH_TYPE_FWNODE) + dev_dbg(notifier_dev(notifier), + "failed binding %pfw (%d)\n", + asc->match.fwnode, ret); + goto err_unregister_subdev; + } + + if (registered) { + /* + * Depending of the function of the entities involved, we may + * want to create links between them (for example between a + * sensor and its lens or between a sensor's source pad and the + * connected device's sink pad). + */ + ret = v4l2_async_create_ancillary_links(notifier, sd); + if (ret) { + if (asc->match.type == V4L2_ASYNC_MATCH_TYPE_FWNODE) + dev_dbg(notifier_dev(notifier), + "failed creating links for %pfw (%d)\n", + asc->match.fwnode, ret); + goto err_call_unbind; + } + } + + list_add(&asc->asc_subdev_entry, &sd->asc_list); + asc->sd = sd; + + /* Move from the waiting list to notifier's done */ + list_move(&asc->asc_entry, ¬ifier->done_list); + + dev_dbg(notifier_dev(notifier), "v4l2-async: %s bound (ret %d)\n", + dev_name(sd->dev), ret); + + /* + * See if the sub-device has a notifier. If not, return here. + */ + subdev_notifier = v4l2_async_find_subdev_notifier(sd); + if (!subdev_notifier || subdev_notifier->parent) + return 0; + + /* + * Proceed with checking for the sub-device notifier's async + * sub-devices, and return the result. The error will be handled by the + * caller. + */ + subdev_notifier->parent = notifier; + + return v4l2_async_nf_try_all_subdevs(subdev_notifier); + +err_call_unbind: + v4l2_async_nf_call_unbind(notifier, sd, asc); + list_del(&asc->asc_subdev_entry); + +err_unregister_subdev: + if (registered) + v4l2_device_unregister_subdev(sd); + + return ret; +} + +/* Test all async sub-devices in a notifier for a match. */ +static int +v4l2_async_nf_try_all_subdevs(struct v4l2_async_notifier *notifier) +{ + struct v4l2_device *v4l2_dev = + v4l2_async_nf_find_v4l2_dev(notifier); + struct v4l2_subdev *sd; + + if (!v4l2_dev) + return 0; + + dev_dbg(notifier_dev(notifier), "v4l2-async: trying all sub-devices\n"); + +again: + list_for_each_entry(sd, &subdev_list, async_list) { + struct v4l2_async_connection *asc; + int ret; + + asc = v4l2_async_find_match(notifier, sd); + if (!asc) + continue; + + dev_dbg(notifier_dev(notifier), + "v4l2-async: match found, subdev %s\n", sd->name); + + ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asc); + if (ret < 0) + return ret; + + /* + * v4l2_async_match_notify() may lead to registering a + * new notifier and thus changing the async subdevs + * list. In order to proceed safely from here, restart + * parsing the list from the beginning. + */ + goto again; + } + + return 0; +} + +static void v4l2_async_unbind_subdev_one(struct v4l2_async_notifier *notifier, + struct v4l2_async_connection *asc) +{ + list_move_tail(&asc->asc_entry, ¬ifier->waiting_list); + if (list_is_singular(&asc->asc_subdev_entry)) { + v4l2_async_nf_call_unbind(notifier, asc->sd, asc); + v4l2_device_unregister_subdev(asc->sd); + asc->sd = NULL; + } + list_del(&asc->asc_subdev_entry); +} + +/* Unbind all sub-devices in the notifier tree. */ +static void +v4l2_async_nf_unbind_all_subdevs(struct v4l2_async_notifier *notifier) +{ + struct v4l2_async_connection *asc, *asc_tmp; + + list_for_each_entry_safe(asc, asc_tmp, ¬ifier->done_list, + asc_entry) { + struct v4l2_async_notifier *subdev_notifier = + v4l2_async_find_subdev_notifier(asc->sd); + + if (subdev_notifier) + v4l2_async_nf_unbind_all_subdevs(subdev_notifier); + + v4l2_async_unbind_subdev_one(notifier, asc); + } + + notifier->parent = NULL; +} + +/* See if an async sub-device can be found in a notifier's lists. */ +static bool +v4l2_async_nf_has_async_match_entry(struct v4l2_async_notifier *notifier, + struct v4l2_async_match_desc *match) +{ + struct v4l2_async_connection *asc; + + list_for_each_entry(asc, ¬ifier->waiting_list, asc_entry) + if (v4l2_async_match_equal(&asc->match, match)) + return true; + + list_for_each_entry(asc, ¬ifier->done_list, asc_entry) + if (v4l2_async_match_equal(&asc->match, match)) + return true; + + return false; +} + +/* + * Find out whether an async sub-device was set up already or whether it exists + * in a given notifier. + */ +static bool +v4l2_async_nf_has_async_match(struct v4l2_async_notifier *notifier, + struct v4l2_async_match_desc *match) +{ + struct list_head *heads[] = { + ¬ifier->waiting_list, + ¬ifier->done_list, + }; + unsigned int i; + + lockdep_assert_held(&list_lock); + + /* Check that an asd is not being added more than once. */ + for (i = 0; i < ARRAY_SIZE(heads); i++) { + struct v4l2_async_connection *asc; + + list_for_each_entry(asc, heads[i], asc_entry) { + if (&asc->match == match) + continue; + if (v4l2_async_match_equal(&asc->match, match)) + return true; + } + } + + /* Check that an asc does not exist in other notifiers. */ + list_for_each_entry(notifier, ¬ifier_list, notifier_entry) + if (v4l2_async_nf_has_async_match_entry(notifier, match)) + return true; + + return false; +} + +static int v4l2_async_nf_match_valid(struct v4l2_async_notifier *notifier, + struct v4l2_async_match_desc *match) +{ + struct device *dev = notifier_dev(notifier); + + switch (match->type) { + case V4L2_ASYNC_MATCH_TYPE_I2C: + case V4L2_ASYNC_MATCH_TYPE_FWNODE: + if (v4l2_async_nf_has_async_match(notifier, match)) { + dev_dbg(dev, "v4l2-async: match descriptor already listed in a notifier\n"); + return -EEXIST; + } + break; + default: + dev_err(dev, "v4l2-async: Invalid match type %u on %p\n", + match->type, match); + return -EINVAL; + } + + return 0; +} + +void v4l2_async_nf_init(struct v4l2_async_notifier *notifier, + struct v4l2_device *v4l2_dev) +{ + INIT_LIST_HEAD(¬ifier->waiting_list); + INIT_LIST_HEAD(¬ifier->done_list); + INIT_LIST_HEAD(¬ifier->notifier_entry); + notifier->v4l2_dev = v4l2_dev; +} +EXPORT_SYMBOL(v4l2_async_nf_init); + +void v4l2_async_subdev_nf_init(struct v4l2_async_notifier *notifier, + struct v4l2_subdev *sd) +{ + INIT_LIST_HEAD(¬ifier->waiting_list); + INIT_LIST_HEAD(¬ifier->done_list); + INIT_LIST_HEAD(¬ifier->notifier_entry); + notifier->sd = sd; +} +EXPORT_SYMBOL_GPL(v4l2_async_subdev_nf_init); + +static int __v4l2_async_nf_register(struct v4l2_async_notifier *notifier) +{ + struct v4l2_async_connection *asc; + int ret; + + mutex_lock(&list_lock); + + list_for_each_entry(asc, ¬ifier->waiting_list, asc_entry) { + ret = v4l2_async_nf_match_valid(notifier, &asc->match); + if (ret) + goto err_unlock; + } + + ret = v4l2_async_nf_try_all_subdevs(notifier); + if (ret < 0) + goto err_unbind; + + ret = v4l2_async_nf_try_complete(notifier); + if (ret < 0) + goto err_unbind; + + /* Keep also completed notifiers on the list */ + list_add(¬ifier->notifier_entry, ¬ifier_list); + + mutex_unlock(&list_lock); + + return 0; + +err_unbind: + /* + * On failure, unbind all sub-devices registered through this notifier. + */ + v4l2_async_nf_unbind_all_subdevs(notifier); + +err_unlock: + mutex_unlock(&list_lock); + + return ret; +} + +int v4l2_async_nf_register(struct v4l2_async_notifier *notifier) +{ + if (WARN_ON(!notifier->v4l2_dev == !notifier->sd)) + return -EINVAL; + + return __v4l2_async_nf_register(notifier); +} +EXPORT_SYMBOL(v4l2_async_nf_register); + +static void +__v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier) +{ + if (!notifier || (!notifier->v4l2_dev && !notifier->sd)) + return; + + v4l2_async_nf_unbind_all_subdevs(notifier); + + list_del_init(¬ifier->notifier_entry); +} + +void v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier) +{ + mutex_lock(&list_lock); + + __v4l2_async_nf_unregister(notifier); + + mutex_unlock(&list_lock); +} +EXPORT_SYMBOL(v4l2_async_nf_unregister); + +static void __v4l2_async_nf_cleanup(struct v4l2_async_notifier *notifier) +{ + struct v4l2_async_connection *asc, *tmp; + + if (!notifier || !notifier->waiting_list.next) + return; + + WARN_ON(!list_empty(¬ifier->done_list)); + + list_for_each_entry_safe(asc, tmp, ¬ifier->waiting_list, asc_entry) { + list_del(&asc->asc_entry); + v4l2_async_nf_call_destroy(notifier, asc); + + if (asc->match.type == V4L2_ASYNC_MATCH_TYPE_FWNODE) + fwnode_handle_put(asc->match.fwnode); + + kfree(asc); + } + + notifier->sd = NULL; + notifier->v4l2_dev = NULL; +} + +void v4l2_async_nf_cleanup(struct v4l2_async_notifier *notifier) +{ + mutex_lock(&list_lock); + + __v4l2_async_nf_cleanup(notifier); + + mutex_unlock(&list_lock); +} +EXPORT_SYMBOL_GPL(v4l2_async_nf_cleanup); + +static void __v4l2_async_nf_add_connection(struct v4l2_async_notifier *notifier, + struct v4l2_async_connection *asc) +{ + mutex_lock(&list_lock); + + list_add_tail(&asc->asc_entry, ¬ifier->waiting_list); + + mutex_unlock(&list_lock); +} + +struct v4l2_async_connection * +__v4l2_async_nf_add_fwnode(struct v4l2_async_notifier *notifier, + struct fwnode_handle *fwnode, + unsigned int asc_struct_size) +{ + struct v4l2_async_connection *asc; + + asc = kzalloc(asc_struct_size, GFP_KERNEL); + if (!asc) + return ERR_PTR(-ENOMEM); + + asc->notifier = notifier; + asc->match.type = V4L2_ASYNC_MATCH_TYPE_FWNODE; + asc->match.fwnode = fwnode_handle_get(fwnode); + + __v4l2_async_nf_add_connection(notifier, asc); + + return asc; +} +EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_fwnode); + +struct v4l2_async_connection * +__v4l2_async_nf_add_fwnode_remote(struct v4l2_async_notifier *notif, + struct fwnode_handle *endpoint, + unsigned int asc_struct_size) +{ + struct v4l2_async_connection *asc; + struct fwnode_handle *remote; + + remote = fwnode_graph_get_remote_endpoint(endpoint); + if (!remote) + return ERR_PTR(-ENOTCONN); + + asc = __v4l2_async_nf_add_fwnode(notif, remote, asc_struct_size); + /* + * Calling __v4l2_async_nf_add_fwnode grabs a refcount, + * so drop the one we got in fwnode_graph_get_remote_port_parent. + */ + fwnode_handle_put(remote); + return asc; +} +EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_fwnode_remote); + +struct v4l2_async_connection * +__v4l2_async_nf_add_i2c(struct v4l2_async_notifier *notifier, int adapter_id, + unsigned short address, unsigned int asc_struct_size) +{ + struct v4l2_async_connection *asc; + + asc = kzalloc(asc_struct_size, GFP_KERNEL); + if (!asc) + return ERR_PTR(-ENOMEM); + + asc->notifier = notifier; + asc->match.type = V4L2_ASYNC_MATCH_TYPE_I2C; + asc->match.i2c.adapter_id = adapter_id; + asc->match.i2c.address = address; + + __v4l2_async_nf_add_connection(notifier, asc); + + return asc; +} +EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_i2c); + +int v4l2_async_subdev_endpoint_add(struct v4l2_subdev *sd, + struct fwnode_handle *fwnode) +{ + struct v4l2_async_subdev_endpoint *ase; + + ase = kmalloc(sizeof(*ase), GFP_KERNEL); + if (!ase) + return -ENOMEM; + + ase->endpoint = fwnode; + list_add(&ase->async_subdev_endpoint_entry, + &sd->async_subdev_endpoint_list); + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_async_subdev_endpoint_add); + +struct v4l2_async_connection * +v4l2_async_connection_unique(struct v4l2_subdev *sd) +{ + if (!list_is_singular(&sd->asc_list)) + return NULL; + + return list_first_entry(&sd->asc_list, + struct v4l2_async_connection, asc_subdev_entry); +} +EXPORT_SYMBOL_GPL(v4l2_async_connection_unique); + +int __v4l2_async_register_subdev(struct v4l2_subdev *sd, struct module *module) +{ + struct v4l2_async_notifier *subdev_notifier; + struct v4l2_async_notifier *notifier; + struct v4l2_async_connection *asc; + int ret; + + INIT_LIST_HEAD(&sd->asc_list); + + /* + * No reference taken. The reference is held by the device (struct + * v4l2_subdev.dev), and async sub-device does not exist independently + * of the device at any point of time. + * + * The async sub-device shall always be registered for its device node, + * not the endpoint node. + */ + if (!sd->fwnode && sd->dev) { + sd->fwnode = dev_fwnode(sd->dev); + } else if (fwnode_graph_is_endpoint(sd->fwnode)) { + dev_warn(sd->dev, "sub-device fwnode is an endpoint!\n"); + return -EINVAL; + } + + sd->owner = module; + + mutex_lock(&list_lock); + + list_for_each_entry(notifier, ¬ifier_list, notifier_entry) { + struct v4l2_device *v4l2_dev = + v4l2_async_nf_find_v4l2_dev(notifier); + + if (!v4l2_dev) + continue; + + while ((asc = v4l2_async_find_match(notifier, sd))) { + ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, + asc); + if (ret) + goto err_unbind; + + ret = v4l2_async_nf_try_complete(notifier); + if (ret) + goto err_unbind; + } + } + + /* None matched, wait for hot-plugging */ + list_add(&sd->async_list, &subdev_list); + + mutex_unlock(&list_lock); + + return 0; + +err_unbind: + /* + * Complete failed. Unbind the sub-devices bound through registering + * this async sub-device. + */ + subdev_notifier = v4l2_async_find_subdev_notifier(sd); + if (subdev_notifier) + v4l2_async_nf_unbind_all_subdevs(subdev_notifier); + + if (asc) + v4l2_async_unbind_subdev_one(notifier, asc); + + mutex_unlock(&list_lock); + + sd->owner = NULL; + + return ret; +} +EXPORT_SYMBOL(__v4l2_async_register_subdev); + +void v4l2_async_unregister_subdev(struct v4l2_subdev *sd) +{ + struct v4l2_async_connection *asc, *asc_tmp; + + if (!sd->async_list.next) + return; + + v4l2_subdev_put_privacy_led(sd); + + mutex_lock(&list_lock); + + __v4l2_async_nf_unregister(sd->subdev_notifier); + __v4l2_async_nf_cleanup(sd->subdev_notifier); + kfree(sd->subdev_notifier); + sd->subdev_notifier = NULL; + + if (sd->asc_list.next) { + list_for_each_entry_safe(asc, asc_tmp, &sd->asc_list, + asc_subdev_entry) { + v4l2_async_unbind_subdev_one(asc->notifier, asc); + } + } + + list_del(&sd->async_list); + sd->async_list.next = NULL; + + mutex_unlock(&list_lock); +} +EXPORT_SYMBOL(v4l2_async_unregister_subdev); + +static void print_waiting_match(struct seq_file *s, + struct v4l2_async_match_desc *match) +{ + switch (match->type) { + case V4L2_ASYNC_MATCH_TYPE_I2C: + seq_printf(s, " [i2c] dev=%d-%04x\n", match->i2c.adapter_id, + match->i2c.address); + break; + case V4L2_ASYNC_MATCH_TYPE_FWNODE: { + struct fwnode_handle *devnode, *fwnode = match->fwnode; + + devnode = fwnode_graph_is_endpoint(fwnode) ? + fwnode_graph_get_port_parent(fwnode) : + fwnode_handle_get(fwnode); + + seq_printf(s, " [fwnode] dev=%s, node=%pfw\n", + devnode->dev ? dev_name(devnode->dev) : "nil", + fwnode); + + fwnode_handle_put(devnode); + break; + } + } +} + +static const char * +v4l2_async_nf_name(struct v4l2_async_notifier *notifier) +{ + if (notifier->v4l2_dev) + return notifier->v4l2_dev->name; + else if (notifier->sd) + return notifier->sd->name; + else + return "nil"; +} + +static int pending_subdevs_show(struct seq_file *s, void *data) +{ + struct v4l2_async_notifier *notif; + struct v4l2_async_connection *asc; + + mutex_lock(&list_lock); + + list_for_each_entry(notif, ¬ifier_list, notifier_entry) { + seq_printf(s, "%s:\n", v4l2_async_nf_name(notif)); + list_for_each_entry(asc, ¬if->waiting_list, asc_entry) + print_waiting_match(s, &asc->match); + } + + mutex_unlock(&list_lock); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(pending_subdevs); + +static struct dentry *v4l2_async_debugfs_dir; + +static int __init v4l2_async_init(void) +{ + v4l2_async_debugfs_dir = debugfs_create_dir("v4l2-async", NULL); + debugfs_create_file("pending_async_subdevices", 0444, + v4l2_async_debugfs_dir, NULL, + &pending_subdevs_fops); + + return 0; +} + +static void __exit v4l2_async_exit(void) +{ + debugfs_remove_recursive(v4l2_async_debugfs_dir); +} + +subsys_initcall(v4l2_async_init); +module_exit(v4l2_async_exit); + +MODULE_AUTHOR("Guennadi Liakhovetski "); +MODULE_AUTHOR("Sakari Ailus "); +MODULE_AUTHOR("Ezequiel Garcia "); +MODULE_DESCRIPTION("V4L2 asynchronous subdevice registration API"); +MODULE_LICENSE("GPL"); diff --git a/6.18.0/drivers/media/v4l2-core/v4l2-cci.c b/6.18.0/drivers/media/v4l2-core/v4l2-cci.c new file mode 100644 index 0000000..e9ecf47 --- /dev/null +++ b/6.18.0/drivers/media/v4l2-core/v4l2-cci.c @@ -0,0 +1,203 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * MIPI Camera Control Interface (CCI) register access helpers. + * + * Copyright (C) 2023 Hans de Goede + */ + +#include +#include +#include +#include +#include +#include + +#include + +#include + +int cci_read(struct regmap *map, u32 reg, u64 *val, int *err) +{ + bool little_endian; + unsigned int len; + u8 buf[8]; + int ret; + + /* + * TODO: Fix smatch. Assign *val to 0 here in order to avoid + * failing a smatch check on caller when the caller proceeds to + * read *val without initialising it on caller's side. *val is set + * to a valid value whenever this function returns 0 but smatch + * can't figure that out currently. + */ + *val = 0; + + if (err && *err) + return *err; + + little_endian = reg & CCI_REG_LE; + len = CCI_REG_WIDTH_BYTES(reg); + reg = CCI_REG_ADDR(reg); + + ret = regmap_bulk_read(map, reg, buf, len); + if (ret) { + dev_err(regmap_get_device(map), "Error reading reg 0x%04x: %d\n", + reg, ret); + goto out; + } + + switch (len) { + case 1: + *val = buf[0]; + break; + case 2: + if (little_endian) + *val = get_unaligned_le16(buf); + else + *val = get_unaligned_be16(buf); + break; + case 3: + if (little_endian) + *val = get_unaligned_le24(buf); + else + *val = get_unaligned_be24(buf); + break; + case 4: + if (little_endian) + *val = get_unaligned_le32(buf); + else + *val = get_unaligned_be32(buf); + break; + case 8: + if (little_endian) + *val = get_unaligned_le64(buf); + else + *val = get_unaligned_be64(buf); + break; + default: + dev_err(regmap_get_device(map), "Error invalid reg-width %u for reg 0x%04x\n", + len, reg); + ret = -EINVAL; + break; + } + +out: + if (ret && err) + *err = ret; + + return ret; +} +EXPORT_SYMBOL_GPL(cci_read); + +int cci_write(struct regmap *map, u32 reg, u64 val, int *err) +{ + bool little_endian; + unsigned int len; + u8 buf[8]; + int ret; + + if (err && *err) + return *err; + + little_endian = reg & CCI_REG_LE; + len = CCI_REG_WIDTH_BYTES(reg); + reg = CCI_REG_ADDR(reg); + + switch (len) { + case 1: + buf[0] = val; + break; + case 2: + if (little_endian) + put_unaligned_le16(val, buf); + else + put_unaligned_be16(val, buf); + break; + case 3: + if (little_endian) + put_unaligned_le24(val, buf); + else + put_unaligned_be24(val, buf); + break; + case 4: + if (little_endian) + put_unaligned_le32(val, buf); + else + put_unaligned_be32(val, buf); + break; + case 8: + if (little_endian) + put_unaligned_le64(val, buf); + else + put_unaligned_be64(val, buf); + break; + default: + dev_err(regmap_get_device(map), "Error invalid reg-width %u for reg 0x%04x\n", + len, reg); + ret = -EINVAL; + goto out; + } + + ret = regmap_bulk_write(map, reg, buf, len); + if (ret) + dev_err(regmap_get_device(map), "Error writing reg 0x%04x: %d\n", + reg, ret); + +out: + if (ret && err) + *err = ret; + + return ret; +} +EXPORT_SYMBOL_GPL(cci_write); + +int cci_update_bits(struct regmap *map, u32 reg, u64 mask, u64 val, int *err) +{ + u64 readval; + int ret; + + ret = cci_read(map, reg, &readval, err); + if (ret) + return ret; + + val = (readval & ~mask) | (val & mask); + + return cci_write(map, reg, val, err); +} +EXPORT_SYMBOL_GPL(cci_update_bits); + +int cci_multi_reg_write(struct regmap *map, const struct cci_reg_sequence *regs, + unsigned int num_regs, int *err) +{ + unsigned int i; + int ret; + + for (i = 0; i < num_regs; i++) { + ret = cci_write(map, regs[i].reg, regs[i].val, err); + if (ret) + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(cci_multi_reg_write); + +#if IS_ENABLED(CONFIG_V4L2_CCI_I2C) +struct regmap *devm_cci_regmap_init_i2c(struct i2c_client *client, + int reg_addr_bits) +{ + struct regmap_config config = { + .reg_bits = reg_addr_bits, + .val_bits = 8, + .reg_format_endian = REGMAP_ENDIAN_BIG, + .disable_locking = true, + }; + + return devm_regmap_init_i2c(client, &config); +} +EXPORT_SYMBOL_GPL(devm_cci_regmap_init_i2c); +#endif + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Hans de Goede "); +MODULE_DESCRIPTION("MIPI Camera Control Interface (CCI) support"); diff --git a/6.18.0/drivers/media/v4l2-core/v4l2-common.c b/6.18.0/drivers/media/v4l2-core/v4l2-common.c new file mode 100644 index 0000000..b367d47 --- /dev/null +++ b/6.18.0/drivers/media/v4l2-core/v4l2-common.c @@ -0,0 +1,779 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Video for Linux Two + * + * A generic video device interface for the LINUX operating system + * using a set of device structures/vectors for low level operations. + * + * This file replaces the videodev.c file that comes with the + * regular kernel distribution. + * + * Author: Bill Dirks + * based on code by Alan Cox, + */ + +/* + * Video capture interface for Linux + * + * A generic video device interface for the LINUX operating system + * using a set of device structures/vectors for low level operations. + * + * Author: Alan Cox, + * + * Fixes: + */ + +/* + * Video4linux 1/2 integration by Justin Schoeman + * + * 2.4 PROCFS support ported from 2.4 kernels by + * Iñaki García Etxebarria + * Makefile fix by "W. Michael Petullo" + * 2.4 devfs support ported from 2.4 kernels by + * Dan Merillat + * Added Gerd Knorrs v4l1 enhancements (Justin Schoeman) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +/* + * + * V 4 L 2 D R I V E R H E L P E R A P I + * + */ + +/* + * Video Standard Operations (contributed by Michael Schimek) + */ + +/* Helper functions for control handling */ + +/* Fill in a struct v4l2_queryctrl */ +int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 _min, s32 _max, s32 _step, s32 _def) +{ + const char *name; + s64 min = _min; + s64 max = _max; + u64 step = _step; + s64 def = _def; + + v4l2_ctrl_fill(qctrl->id, &name, &qctrl->type, + &min, &max, &step, &def, &qctrl->flags); + + if (name == NULL) + return -EINVAL; + + qctrl->minimum = min; + qctrl->maximum = max; + qctrl->step = step; + qctrl->default_value = def; + qctrl->reserved[0] = qctrl->reserved[1] = 0; + strscpy(qctrl->name, name, sizeof(qctrl->name)); + return 0; +} +EXPORT_SYMBOL(v4l2_ctrl_query_fill); + +/* Clamp x to be between min and max, aligned to a multiple of 2^align. min + * and max don't have to be aligned, but there must be at least one valid + * value. E.g., min=17,max=31,align=4 is not allowed as there are no multiples + * of 16 between 17 and 31. */ +static unsigned int clamp_align(unsigned int x, unsigned int min, + unsigned int max, unsigned int align) +{ + /* Bits that must be zero to be aligned */ + unsigned int mask = ~((1 << align) - 1); + + /* Clamp to aligned min and max */ + x = clamp(x, (min + ~mask) & mask, max & mask); + + /* Round to nearest aligned value */ + if (align) + x = (x + (1 << (align - 1))) & mask; + + return x; +} + +static unsigned int clamp_roundup(unsigned int x, unsigned int min, + unsigned int max, unsigned int alignment) +{ + x = clamp(x, min, max); + if (alignment) + x = round_up(x, alignment); + + return x; +} + +void v4l_bound_align_image(u32 *w, unsigned int wmin, unsigned int wmax, + unsigned int walign, + u32 *h, unsigned int hmin, unsigned int hmax, + unsigned int halign, unsigned int salign) +{ + *w = clamp_align(*w, wmin, wmax, walign); + *h = clamp_align(*h, hmin, hmax, halign); + + /* Usually we don't need to align the size and are done now. */ + if (!salign) + return; + + /* How much alignment do we have? */ + walign = __ffs(*w); + halign = __ffs(*h); + /* Enough to satisfy the image alignment? */ + if (walign + halign < salign) { + /* Max walign where there is still a valid width */ + unsigned int wmaxa = __fls(wmax ^ (wmin - 1)); + /* Max halign where there is still a valid height */ + unsigned int hmaxa = __fls(hmax ^ (hmin - 1)); + + /* up the smaller alignment until we have enough */ + do { + if (halign >= hmaxa || + (walign <= halign && walign < wmaxa)) { + *w = clamp_align(*w, wmin, wmax, walign + 1); + walign = __ffs(*w); + } else { + *h = clamp_align(*h, hmin, hmax, halign + 1); + halign = __ffs(*h); + } + } while (halign + walign < salign); + } +} +EXPORT_SYMBOL_GPL(v4l_bound_align_image); + +const void * +__v4l2_find_nearest_size_conditional(const void *array, size_t array_size, + size_t entry_size, size_t width_offset, + size_t height_offset, s32 width, + s32 height, + bool (*func)(const void *array, + size_t index, + const void *context), + const void *context) +{ + u32 error, min_error = U32_MAX; + const void *best = NULL; + size_t i; + + if (!array) + return NULL; + + for (i = 0; i < array_size; i++, array += entry_size) { + const u32 *entry_width = array + width_offset; + const u32 *entry_height = array + height_offset; + + if (func && !func(array, i, context)) + continue; + + error = abs(*entry_width - width) + abs(*entry_height - height); + if (error > min_error) + continue; + + min_error = error; + best = array; + if (!error) + break; + } + + return best; +} +EXPORT_SYMBOL_GPL(__v4l2_find_nearest_size_conditional); + +int v4l2_g_parm_cap(struct video_device *vdev, + struct v4l2_subdev *sd, struct v4l2_streamparm *a) +{ + struct v4l2_subdev_frame_interval ival = { 0 }; + int ret; + + if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE && + a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) + return -EINVAL; + + if (vdev->device_caps & V4L2_CAP_READWRITE) + a->parm.capture.readbuffers = 2; + if (v4l2_subdev_has_op(sd, pad, get_frame_interval)) + a->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; + ret = v4l2_subdev_call_state_active(sd, pad, get_frame_interval, &ival); + if (!ret) + a->parm.capture.timeperframe = ival.interval; + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_g_parm_cap); + +int v4l2_s_parm_cap(struct video_device *vdev, + struct v4l2_subdev *sd, struct v4l2_streamparm *a) +{ + struct v4l2_subdev_frame_interval ival = { + .interval = a->parm.capture.timeperframe + }; + int ret; + + if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE && + a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) + return -EINVAL; + + memset(&a->parm, 0, sizeof(a->parm)); + if (vdev->device_caps & V4L2_CAP_READWRITE) + a->parm.capture.readbuffers = 2; + else + a->parm.capture.readbuffers = 0; + + if (v4l2_subdev_has_op(sd, pad, get_frame_interval)) + a->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; + ret = v4l2_subdev_call_state_active(sd, pad, set_frame_interval, &ival); + if (!ret) + a->parm.capture.timeperframe = ival.interval; + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_s_parm_cap); + +const struct v4l2_format_info *v4l2_format_info(u32 format) +{ + static const struct v4l2_format_info formats[] = { + /* RGB formats */ + { .format = V4L2_PIX_FMT_BGR24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_RGB24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_HSV24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_BGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_XBGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_BGRX32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_RGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_XRGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_RGBX32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_HSV32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_ARGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_RGBA32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_ABGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_BGRA32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_RGB565, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_RGB565X, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_RGB555, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_BGR666, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_BGR48_12, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 6, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_BGR48, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 6, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_RGB48, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 6, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_ABGR64_12, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 8, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_RGBA1010102, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_RGBX1010102, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_ARGB2101010, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + + /* YUV packed formats */ + { .format = V4L2_PIX_FMT_YUYV, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_YVYU, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_UYVY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_VYUY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_Y210, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_Y212, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_Y216, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_YUV48_12, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 6, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_MT2110T, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 5, 10, 0, 0 }, .bpp_div = { 4, 4, 1, 1 }, .hdiv = 2, .vdiv = 2, + .block_w = { 16, 8, 0, 0 }, .block_h = { 32, 16, 0, 0 }}, + { .format = V4L2_PIX_FMT_MT2110R, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 5, 10, 0, 0 }, .bpp_div = { 4, 4, 1, 1 }, .hdiv = 2, .vdiv = 2, + .block_w = { 16, 8, 0, 0 }, .block_h = { 32, 16, 0, 0 }}, + + /* YUV planar formats */ + { .format = V4L2_PIX_FMT_NV12, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 }, + { .format = V4L2_PIX_FMT_NV21, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 }, + { .format = V4L2_PIX_FMT_NV15, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 5, 10, 0, 0 }, .bpp_div = { 4, 4, 1, 1 }, .hdiv = 2, .vdiv = 2 }, + { .format = V4L2_PIX_FMT_NV16, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_NV61, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_NV20, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 5, 10, 0, 0 }, .bpp_div = { 4, 4, 1, 1 }, .hdiv = 2, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_NV24, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_NV42, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_P010, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 2, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_P012, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 2, 4, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 }, + + { .format = V4L2_PIX_FMT_YUV410, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 4, .vdiv = 4 }, + { .format = V4L2_PIX_FMT_YVU410, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 4, .vdiv = 4 }, + { .format = V4L2_PIX_FMT_YUV411P, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 4, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_YUV420, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 }, + { .format = V4L2_PIX_FMT_YVU420, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 }, + { .format = V4L2_PIX_FMT_YUV422P, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_GREY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + + /* Tiled YUV formats */ + { .format = V4L2_PIX_FMT_NV12_4L4, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 }, + { .format = V4L2_PIX_FMT_NV15_4L4, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 5, 10, 0, 0 }, .bpp_div = { 4, 4, 1, 1 }, .hdiv = 2, .vdiv = 2, + .block_w = { 4, 2, 0, 0 }, .block_h = { 1, 1, 0, 0 }}, + { .format = V4L2_PIX_FMT_P010_4L4, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 2, 4, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 }, + + /* YUV planar formats, non contiguous variant */ + { .format = V4L2_PIX_FMT_YUV420M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 }, + { .format = V4L2_PIX_FMT_YVU420M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 }, + { .format = V4L2_PIX_FMT_YUV422M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_YVU422M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_YUV444M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_YVU444M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + + { .format = V4L2_PIX_FMT_NV12M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 }, + { .format = V4L2_PIX_FMT_NV21M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 }, + { .format = V4L2_PIX_FMT_NV16M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_NV61M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_P012M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 2, 4, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 }, + + /* Tiled YUV formats, non contiguous variant */ + { .format = V4L2_PIX_FMT_NV12MT, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2, + .block_w = { 64, 32, 0, 0 }, .block_h = { 32, 16, 0, 0 }}, + { .format = V4L2_PIX_FMT_NV12MT_16X16, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2, + .block_w = { 16, 8, 0, 0 }, .block_h = { 16, 8, 0, 0 }}, + + /* Bayer RGB formats */ + { .format = V4L2_PIX_FMT_SBGGR8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SGBRG8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SGRBG8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SRGGB8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SBGGR10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SGBRG10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SGRBG10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SRGGB10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SBGGR10P, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 5, 0, 0, 0 }, .bpp_div = { 4, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SGBRG10P, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 5, 0, 0, 0 }, .bpp_div = { 4, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SGRBG10P, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 5, 0, 0, 0 }, .bpp_div = { 4, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SRGGB10P, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 5, 0, 0, 0 }, .bpp_div = { 4, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SBGGR10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SGBRG10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SGRBG10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SRGGB10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SBGGR10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SGBRG10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SGRBG10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SRGGB10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SBGGR12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SGBRG12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SGRBG12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SRGGB12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SBGGR12P, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .bpp_div = { 2, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SGBRG12P, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .bpp_div = { 2, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SGRBG12P, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .bpp_div = { 2, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SRGGB12P, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .bpp_div = { 2, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SBGGR14, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SGBRG14, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SGRBG14, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SRGGB14, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SBGGR14P, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 7, 0, 0, 0 }, .bpp_div = { 4, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SGBRG14P, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 7, 0, 0, 0 }, .bpp_div = { 4, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SGRBG14P, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 7, 0, 0, 0 }, .bpp_div = { 4, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SRGGB14P, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 7, 0, 0, 0 }, .bpp_div = { 4, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SBGGR16, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SGBRG16, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SGRBG16, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_SRGGB16, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + + /* Renesas Camera Data Receiver Unit formats, bayer order agnostic */ + { .format = V4L2_PIX_FMT_RAW_CRU10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 8, 0, 0, 0 }, .bpp_div = { 6, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_RAW_CRU12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 8, 0, 0, 0 }, .bpp_div = { 5, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_RAW_CRU14, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 8, 0, 0, 0 }, .bpp_div = { 4, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_RAW_CRU20, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 8, 0, 0, 0 }, .bpp_div = { 3, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, + }; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(formats); ++i) + if (formats[i].format == format) + return &formats[i]; + return NULL; +} +EXPORT_SYMBOL(v4l2_format_info); + +static inline unsigned int v4l2_format_block_width(const struct v4l2_format_info *info, int plane) +{ + if (!info->block_w[plane]) + return 1; + return info->block_w[plane]; +} + +static inline unsigned int v4l2_format_block_height(const struct v4l2_format_info *info, int plane) +{ + if (!info->block_h[plane]) + return 1; + return info->block_h[plane]; +} + +static inline unsigned int v4l2_format_plane_stride(const struct v4l2_format_info *info, int plane, + unsigned int width) +{ + unsigned int hdiv = plane ? info->hdiv : 1; + unsigned int aligned_width = + ALIGN(width, v4l2_format_block_width(info, plane)); + + return DIV_ROUND_UP(aligned_width, hdiv) * + info->bpp[plane] / info->bpp_div[plane]; +} + +static inline unsigned int v4l2_format_plane_height(const struct v4l2_format_info *info, int plane, + unsigned int height) +{ + unsigned int vdiv = plane ? info->vdiv : 1; + unsigned int aligned_height = + ALIGN(height, v4l2_format_block_height(info, plane)); + + return DIV_ROUND_UP(aligned_height, vdiv); +} + +static inline unsigned int v4l2_format_plane_size(const struct v4l2_format_info *info, int plane, + unsigned int width, unsigned int height) +{ + return v4l2_format_plane_stride(info, plane, width) * + v4l2_format_plane_height(info, plane, height); +} + +void v4l2_apply_frmsize_constraints(u32 *width, u32 *height, + const struct v4l2_frmsize_stepwise *frmsize) +{ + if (!frmsize) + return; + + /* + * Clamp width/height to meet min/max constraints and round it up to + * macroblock alignment. + */ + *width = clamp_roundup(*width, frmsize->min_width, frmsize->max_width, + frmsize->step_width); + *height = clamp_roundup(*height, frmsize->min_height, frmsize->max_height, + frmsize->step_height); +} +EXPORT_SYMBOL_GPL(v4l2_apply_frmsize_constraints); + +int v4l2_fill_pixfmt_mp(struct v4l2_pix_format_mplane *pixfmt, + u32 pixelformat, u32 width, u32 height) +{ + const struct v4l2_format_info *info; + struct v4l2_plane_pix_format *plane; + int i; + + info = v4l2_format_info(pixelformat); + if (!info) + return -EINVAL; + + pixfmt->width = width; + pixfmt->height = height; + pixfmt->pixelformat = pixelformat; + pixfmt->num_planes = info->mem_planes; + + if (info->mem_planes == 1) { + plane = &pixfmt->plane_fmt[0]; + plane->bytesperline = v4l2_format_plane_stride(info, 0, width); + plane->sizeimage = 0; + + for (i = 0; i < info->comp_planes; i++) + plane->sizeimage += + v4l2_format_plane_size(info, i, width, height); + } else { + for (i = 0; i < info->comp_planes; i++) { + plane = &pixfmt->plane_fmt[i]; + plane->bytesperline = + v4l2_format_plane_stride(info, i, width); + plane->sizeimage = plane->bytesperline * + v4l2_format_plane_height(info, i, height); + } + } + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_fill_pixfmt_mp); + +int v4l2_fill_pixfmt(struct v4l2_pix_format *pixfmt, u32 pixelformat, + u32 width, u32 height) +{ + const struct v4l2_format_info *info; + int i; + + info = v4l2_format_info(pixelformat); + if (!info) + return -EINVAL; + + /* Single planar API cannot be used for multi plane formats. */ + if (info->mem_planes > 1) + return -EINVAL; + + pixfmt->width = width; + pixfmt->height = height; + pixfmt->pixelformat = pixelformat; + pixfmt->bytesperline = v4l2_format_plane_stride(info, 0, width); + pixfmt->sizeimage = 0; + + for (i = 0; i < info->comp_planes; i++) + pixfmt->sizeimage += + v4l2_format_plane_size(info, i, width, height); + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_fill_pixfmt); + +#ifdef CONFIG_MEDIA_CONTROLLER +static s64 v4l2_get_link_freq_ctrl(struct v4l2_ctrl_handler *handler, + unsigned int mul, unsigned int div) +{ + struct v4l2_ctrl *ctrl; + s64 freq; + + ctrl = v4l2_ctrl_find(handler, V4L2_CID_LINK_FREQ); + if (ctrl) { + struct v4l2_querymenu qm = { .id = V4L2_CID_LINK_FREQ }; + int ret; + + qm.index = v4l2_ctrl_g_ctrl(ctrl); + + ret = v4l2_querymenu(handler, &qm); + if (ret) + return -ENOENT; + + freq = qm.value; + } else { + if (!mul || !div) + return -ENOENT; + + ctrl = v4l2_ctrl_find(handler, V4L2_CID_PIXEL_RATE); + if (!ctrl) + return -ENOENT; + + freq = div_u64(v4l2_ctrl_g_ctrl_int64(ctrl) * mul, div); + + pr_warn_once("%s: Link frequency estimated using pixel rate: result might be inaccurate\n", + __func__); + pr_warn_once("%s: Consider implementing support for V4L2_CID_LINK_FREQ in the transmitter driver\n", + __func__); + } + + return freq > 0 ? freq : -EINVAL; +} + +s64 v4l2_get_link_freq(const struct media_pad *pad, unsigned int mul, + unsigned int div) +{ + struct v4l2_mbus_config mbus_config = {}; + struct v4l2_subdev *sd; + int ret; + + sd = media_entity_to_v4l2_subdev(pad->entity); + ret = v4l2_subdev_call(sd, pad, get_mbus_config, pad->index, + &mbus_config); + if (ret < 0 && ret != -ENOIOCTLCMD) + return ret; + + if (mbus_config.link_freq) + return mbus_config.link_freq; + + /* + * Fall back to using the link frequency control if the media bus config + * doesn't provide a link frequency. + */ + return v4l2_get_link_freq_ctrl(sd->ctrl_handler, mul, div); +} +EXPORT_SYMBOL_GPL(v4l2_get_link_freq); +#endif + +/* + * Simplify a fraction using a simple continued fraction decomposition. The + * idea here is to convert fractions such as 333333/10000000 to 1/30 using + * 32 bit arithmetic only. The algorithm is not perfect and relies upon two + * arbitrary parameters to remove non-significative terms from the simple + * continued fraction decomposition. Using 8 and 333 for n_terms and threshold + * respectively seems to give nice results. + */ +void v4l2_simplify_fraction(u32 *numerator, u32 *denominator, + unsigned int n_terms, unsigned int threshold) +{ + u32 *an; + u32 x, y, r; + unsigned int i, n; + + an = kmalloc_array(n_terms, sizeof(*an), GFP_KERNEL); + if (an == NULL) + return; + + /* + * Convert the fraction to a simple continued fraction. See + * https://en.wikipedia.org/wiki/Continued_fraction + * Stop if the current term is bigger than or equal to the given + * threshold. + */ + x = *numerator; + y = *denominator; + + for (n = 0; n < n_terms && y != 0; ++n) { + an[n] = x / y; + if (an[n] >= threshold) { + if (n < 2) + n++; + break; + } + + r = x - an[n] * y; + x = y; + y = r; + } + + /* Expand the simple continued fraction back to an integer fraction. */ + x = 0; + y = 1; + + for (i = n; i > 0; --i) { + r = y; + y = an[i-1] * y + x; + x = r; + } + + *numerator = y; + *denominator = x; + kfree(an); +} +EXPORT_SYMBOL_GPL(v4l2_simplify_fraction); + +/* + * Convert a fraction to a frame interval in 100ns multiples. The idea here is + * to compute numerator / denominator * 10000000 using 32 bit fixed point + * arithmetic only. + */ +u32 v4l2_fraction_to_interval(u32 numerator, u32 denominator) +{ + u32 multiplier; + + /* Saturate the result if the operation would overflow. */ + if (denominator == 0 || + numerator/denominator >= ((u32)-1)/10000000) + return (u32)-1; + + /* + * Divide both the denominator and the multiplier by two until + * numerator * multiplier doesn't overflow. If anyone knows a better + * algorithm please let me know. + */ + multiplier = 10000000; + while (numerator > ((u32)-1)/multiplier) { + multiplier /= 2; + denominator /= 2; + } + + return denominator ? numerator * multiplier / denominator : 0; +} +EXPORT_SYMBOL_GPL(v4l2_fraction_to_interval); + +int v4l2_link_freq_to_bitmap(struct device *dev, const u64 *fw_link_freqs, + unsigned int num_of_fw_link_freqs, + const s64 *driver_link_freqs, + unsigned int num_of_driver_link_freqs, + unsigned long *bitmap) +{ + unsigned int i; + + *bitmap = 0; + + if (!num_of_fw_link_freqs) { + dev_err(dev, "no link frequencies in firmware\n"); + return -ENODATA; + } + + for (i = 0; i < num_of_fw_link_freqs; i++) { + unsigned int j; + + for (j = 0; j < num_of_driver_link_freqs; j++) { + if (fw_link_freqs[i] != driver_link_freqs[j]) + continue; + + dev_dbg(dev, "enabling link frequency %lld Hz\n", + driver_link_freqs[j]); + *bitmap |= BIT(j); + break; + } + } + + if (!*bitmap) { + dev_err(dev, "no matching link frequencies found\n"); + + dev_dbg(dev, "specified in firmware:\n"); + for (i = 0; i < num_of_fw_link_freqs; i++) + dev_dbg(dev, "\t%llu Hz\n", fw_link_freqs[i]); + + dev_dbg(dev, "driver supported:\n"); + for (i = 0; i < num_of_driver_link_freqs; i++) + dev_dbg(dev, "\t%lld Hz\n", driver_link_freqs[i]); + + return -ENOENT; + } + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_link_freq_to_bitmap); + +struct clk *__devm_v4l2_sensor_clk_get(struct device *dev, const char *id, + bool legacy, bool fixed_rate, + unsigned long clk_rate) +{ + bool of_node = is_of_node(dev_fwnode(dev)); + const char *clk_id __free(kfree) = NULL; + struct clk_hw *clk_hw; + struct clk *clk; + u32 rate = clk_rate; + int ret = 0; + + clk = devm_clk_get_optional(dev, id); + if (IS_ERR(clk)) + return clk; + + /* + * If the caller didn't request a fixed rate, retrieve it from the + * clock-frequency property. -EINVAL indicates the property is absent, + * and is not a failure. Other errors, or success with a clock-frequency + * value of 0, are hard failures. + */ + if (!fixed_rate || !clk_rate) { + ret = device_property_read_u32(dev, "clock-frequency", &rate); + if ((ret && ret != -EINVAL) || (!ret && !rate)) + return ERR_PTR(-EINVAL); + } + + if (clk) { + /* + * On non-OF platforms, or when legacy behaviour is requested, + * set the clock rate if a rate has been specified by the caller + * or by the clock-frequency property. + */ + if (rate && (!of_node || legacy)) { + ret = clk_set_rate(clk, rate); + if (ret) { + dev_err(dev, "Failed to set clock rate: %u\n", + rate); + return ERR_PTR(ret); + } + } + return clk; + } + + /* + * Register a dummy fixed clock on non-OF platforms or when legacy + * behaviour is requested. This required the common clock framework. + */ + if (!IS_ENABLED(CONFIG_COMMON_CLK) || (of_node && !legacy)) + return ERR_PTR(-ENOENT); + + /* We need a rate to create a clock. */ + if (ret) + return ERR_PTR(ret == -EINVAL ? -EPROBE_DEFER : ret); + + if (!id) { + clk_id = kasprintf(GFP_KERNEL, "clk-%s", dev_name(dev)); + if (!clk_id) + return ERR_PTR(-ENOMEM); + id = clk_id; + } + + clk_hw = devm_clk_hw_register_fixed_rate(dev, id, NULL, 0, rate); + if (IS_ERR(clk_hw)) + return ERR_CAST(clk_hw); + + return clk_hw->clk; +} +EXPORT_SYMBOL_GPL(__devm_v4l2_sensor_clk_get); diff --git a/6.18.0/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/6.18.0/drivers/media/v4l2-core/v4l2-compat-ioctl32.c new file mode 100644 index 0000000..2c88e11 --- /dev/null +++ b/6.18.0/drivers/media/v4l2-core/v4l2-compat-ioctl32.c @@ -0,0 +1,1207 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * ioctl32.c: Conversion between 32bit and 64bit native ioctls. + * Separated from fs stuff by Arnd Bergmann + * + * Copyright (C) 1997-2000 Jakub Jelinek (jakub@redhat.com) + * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be) + * Copyright (C) 2001,2002 Andi Kleen, SuSE Labs + * Copyright (C) 2003 Pavel Machek (pavel@ucw.cz) + * Copyright (C) 2005 Philippe De Muyter (phdm@macqel.be) + * Copyright (C) 2008 Hans Verkuil + * + * These routines maintain argument size conversion between 32bit and 64bit + * ioctls. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Per-ioctl data copy handlers. + * + * Those come in pairs, with a get_v4l2_foo() and a put_v4l2_foo() routine, + * where "v4l2_foo" is the name of the V4L2 struct. + * + * They basically get two __user pointers, one with a 32-bits struct that + * came from the userspace call and a 64-bits struct, also allocated as + * userspace, but filled internally by do_video_ioctl(). + * + * For ioctls that have pointers inside it, the functions will also + * receive an ancillary buffer with extra space, used to pass extra + * data to the routine. + */ + +struct v4l2_window32 { + struct v4l2_rect w; + __u32 field; /* enum v4l2_field */ + __u32 chromakey; + compat_caddr_t clips; /* always NULL */ + __u32 clipcount; /* always 0 */ + compat_caddr_t bitmap; /* always NULL */ + __u8 global_alpha; +}; + +static int get_v4l2_window32(struct v4l2_window *p64, + struct v4l2_window32 __user *p32) +{ + struct v4l2_window32 w32; + + if (copy_from_user(&w32, p32, sizeof(w32))) + return -EFAULT; + + *p64 = (struct v4l2_window) { + .w = w32.w, + .field = w32.field, + .chromakey = w32.chromakey, + .clips = NULL, + .clipcount = 0, + .bitmap = NULL, + .global_alpha = w32.global_alpha, + }; + + return 0; +} + +static int put_v4l2_window32(struct v4l2_window *p64, + struct v4l2_window32 __user *p32) +{ + struct v4l2_window32 w32; + + memset(&w32, 0, sizeof(w32)); + w32 = (struct v4l2_window32) { + .w = p64->w, + .field = p64->field, + .chromakey = p64->chromakey, + .clips = 0, + .clipcount = 0, + .bitmap = 0, + .global_alpha = p64->global_alpha, + }; + + if (copy_to_user(p32, &w32, sizeof(w32))) + return -EFAULT; + + return 0; +} + +struct v4l2_format32 { + __u32 type; /* enum v4l2_buf_type */ + union { + struct v4l2_pix_format pix; + struct v4l2_pix_format_mplane pix_mp; + struct v4l2_window32 win; + struct v4l2_vbi_format vbi; + struct v4l2_sliced_vbi_format sliced; + struct v4l2_sdr_format sdr; + struct v4l2_meta_format meta; + __u8 raw_data[200]; /* user-defined */ + } fmt; +}; + +/** + * struct v4l2_create_buffers32 - VIDIOC_CREATE_BUFS32 argument + * @index: on return, index of the first created buffer + * @count: entry: number of requested buffers, + * return: number of created buffers + * @memory: buffer memory type + * @format: frame format, for which buffers are requested + * @capabilities: capabilities of this buffer type. + * @flags: additional buffer management attributes (ignored unless the + * queue has V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS capability and + * configured for MMAP streaming I/O). + * @max_num_buffers: if V4L2_BUF_CAP_SUPPORTS_MAX_NUM_BUFFERS capability flag is set + * this field indicate the maximum possible number of buffers + * for this queue. + * @reserved: future extensions + */ +struct v4l2_create_buffers32 { + __u32 index; + __u32 count; + __u32 memory; /* enum v4l2_memory */ + struct v4l2_format32 format; + __u32 capabilities; + __u32 flags; + __u32 max_num_buffers; + __u32 reserved[5]; +}; + +static int get_v4l2_format32(struct v4l2_format *p64, + struct v4l2_format32 __user *p32) +{ + if (get_user(p64->type, &p32->type)) + return -EFAULT; + + switch (p64->type) { + case V4L2_BUF_TYPE_VIDEO_CAPTURE: + case V4L2_BUF_TYPE_VIDEO_OUTPUT: + return copy_from_user(&p64->fmt.pix, &p32->fmt.pix, + sizeof(p64->fmt.pix)) ? -EFAULT : 0; + case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: + case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: + return copy_from_user(&p64->fmt.pix_mp, &p32->fmt.pix_mp, + sizeof(p64->fmt.pix_mp)) ? -EFAULT : 0; + case V4L2_BUF_TYPE_VIDEO_OVERLAY: + case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: + return get_v4l2_window32(&p64->fmt.win, &p32->fmt.win); + case V4L2_BUF_TYPE_VBI_CAPTURE: + case V4L2_BUF_TYPE_VBI_OUTPUT: + return copy_from_user(&p64->fmt.vbi, &p32->fmt.vbi, + sizeof(p64->fmt.vbi)) ? -EFAULT : 0; + case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: + case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: + return copy_from_user(&p64->fmt.sliced, &p32->fmt.sliced, + sizeof(p64->fmt.sliced)) ? -EFAULT : 0; + case V4L2_BUF_TYPE_SDR_CAPTURE: + case V4L2_BUF_TYPE_SDR_OUTPUT: + return copy_from_user(&p64->fmt.sdr, &p32->fmt.sdr, + sizeof(p64->fmt.sdr)) ? -EFAULT : 0; + case V4L2_BUF_TYPE_META_CAPTURE: + case V4L2_BUF_TYPE_META_OUTPUT: + return copy_from_user(&p64->fmt.meta, &p32->fmt.meta, + sizeof(p64->fmt.meta)) ? -EFAULT : 0; + default: + return -EINVAL; + } +} + +static int get_v4l2_create32(struct v4l2_create_buffers *p64, + struct v4l2_create_buffers32 __user *p32) +{ + if (copy_from_user(p64, p32, + offsetof(struct v4l2_create_buffers32, format))) + return -EFAULT; + if (copy_from_user(&p64->flags, &p32->flags, sizeof(p32->flags))) + return -EFAULT; + if (copy_from_user(&p64->max_num_buffers, &p32->max_num_buffers, + sizeof(p32->max_num_buffers))) + return -EFAULT; + return get_v4l2_format32(&p64->format, &p32->format); +} + +static int put_v4l2_format32(struct v4l2_format *p64, + struct v4l2_format32 __user *p32) +{ + switch (p64->type) { + case V4L2_BUF_TYPE_VIDEO_CAPTURE: + case V4L2_BUF_TYPE_VIDEO_OUTPUT: + return copy_to_user(&p32->fmt.pix, &p64->fmt.pix, + sizeof(p64->fmt.pix)) ? -EFAULT : 0; + case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: + case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: + return copy_to_user(&p32->fmt.pix_mp, &p64->fmt.pix_mp, + sizeof(p64->fmt.pix_mp)) ? -EFAULT : 0; + case V4L2_BUF_TYPE_VIDEO_OVERLAY: + case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: + return put_v4l2_window32(&p64->fmt.win, &p32->fmt.win); + case V4L2_BUF_TYPE_VBI_CAPTURE: + case V4L2_BUF_TYPE_VBI_OUTPUT: + return copy_to_user(&p32->fmt.vbi, &p64->fmt.vbi, + sizeof(p64->fmt.vbi)) ? -EFAULT : 0; + case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: + case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: + return copy_to_user(&p32->fmt.sliced, &p64->fmt.sliced, + sizeof(p64->fmt.sliced)) ? -EFAULT : 0; + case V4L2_BUF_TYPE_SDR_CAPTURE: + case V4L2_BUF_TYPE_SDR_OUTPUT: + return copy_to_user(&p32->fmt.sdr, &p64->fmt.sdr, + sizeof(p64->fmt.sdr)) ? -EFAULT : 0; + case V4L2_BUF_TYPE_META_CAPTURE: + case V4L2_BUF_TYPE_META_OUTPUT: + return copy_to_user(&p32->fmt.meta, &p64->fmt.meta, + sizeof(p64->fmt.meta)) ? -EFAULT : 0; + default: + return -EINVAL; + } +} + +static int put_v4l2_create32(struct v4l2_create_buffers *p64, + struct v4l2_create_buffers32 __user *p32) +{ + if (copy_to_user(p32, p64, + offsetof(struct v4l2_create_buffers32, format)) || + put_user(p64->capabilities, &p32->capabilities) || + put_user(p64->flags, &p32->flags) || + put_user(p64->max_num_buffers, &p32->max_num_buffers) || + copy_to_user(p32->reserved, p64->reserved, sizeof(p64->reserved))) + return -EFAULT; + return put_v4l2_format32(&p64->format, &p32->format); +} + +struct v4l2_standard32 { + __u32 index; + compat_u64 id; + __u8 name[24]; + struct v4l2_fract frameperiod; /* Frames, not fields */ + __u32 framelines; + __u32 reserved[4]; +}; + +static int get_v4l2_standard32(struct v4l2_standard *p64, + struct v4l2_standard32 __user *p32) +{ + /* other fields are not set by the user, nor used by the driver */ + return get_user(p64->index, &p32->index); +} + +static int put_v4l2_standard32(struct v4l2_standard *p64, + struct v4l2_standard32 __user *p32) +{ + if (put_user(p64->index, &p32->index) || + put_user(p64->id, &p32->id) || + copy_to_user(p32->name, p64->name, sizeof(p32->name)) || + copy_to_user(&p32->frameperiod, &p64->frameperiod, + sizeof(p32->frameperiod)) || + put_user(p64->framelines, &p32->framelines) || + copy_to_user(p32->reserved, p64->reserved, sizeof(p32->reserved))) + return -EFAULT; + return 0; +} + +struct v4l2_plane32 { + __u32 bytesused; + __u32 length; + union { + __u32 mem_offset; + compat_long_t userptr; + __s32 fd; + } m; + __u32 data_offset; + __u32 reserved[11]; +}; + +/* + * This is correct for all architectures including i386, but not x32, + * which has different alignment requirements for timestamp + */ +struct v4l2_buffer32 { + __u32 index; + __u32 type; /* enum v4l2_buf_type */ + __u32 bytesused; + __u32 flags; + __u32 field; /* enum v4l2_field */ + struct { + compat_s64 tv_sec; + compat_s64 tv_usec; + } timestamp; + struct v4l2_timecode timecode; + __u32 sequence; + + /* memory location */ + __u32 memory; /* enum v4l2_memory */ + union { + __u32 offset; + compat_long_t userptr; + compat_caddr_t planes; + __s32 fd; + } m; + __u32 length; + __u32 reserved2; + __s32 request_fd; +}; + +#ifdef CONFIG_COMPAT_32BIT_TIME +struct v4l2_buffer32_time32 { + __u32 index; + __u32 type; /* enum v4l2_buf_type */ + __u32 bytesused; + __u32 flags; + __u32 field; /* enum v4l2_field */ + struct old_timeval32 timestamp; + struct v4l2_timecode timecode; + __u32 sequence; + + /* memory location */ + __u32 memory; /* enum v4l2_memory */ + union { + __u32 offset; + compat_long_t userptr; + compat_caddr_t planes; + __s32 fd; + } m; + __u32 length; + __u32 reserved2; + __s32 request_fd; +}; +#endif + +static int get_v4l2_plane32(struct v4l2_plane *p64, + struct v4l2_plane32 __user *p32, + enum v4l2_memory memory) +{ + struct v4l2_plane32 plane32; + typeof(p64->m) m = {}; + + if (copy_from_user(&plane32, p32, sizeof(plane32))) + return -EFAULT; + + switch (memory) { + case V4L2_MEMORY_MMAP: + case V4L2_MEMORY_OVERLAY: + m.mem_offset = plane32.m.mem_offset; + break; + case V4L2_MEMORY_USERPTR: + m.userptr = (unsigned long)compat_ptr(plane32.m.userptr); + break; + case V4L2_MEMORY_DMABUF: + m.fd = plane32.m.fd; + break; + } + + memset(p64, 0, sizeof(*p64)); + *p64 = (struct v4l2_plane) { + .bytesused = plane32.bytesused, + .length = plane32.length, + .m = m, + .data_offset = plane32.data_offset, + }; + + return 0; +} + +static int put_v4l2_plane32(struct v4l2_plane *p64, + struct v4l2_plane32 __user *p32, + enum v4l2_memory memory) +{ + struct v4l2_plane32 plane32; + + memset(&plane32, 0, sizeof(plane32)); + plane32 = (struct v4l2_plane32) { + .bytesused = p64->bytesused, + .length = p64->length, + .data_offset = p64->data_offset, + }; + + switch (memory) { + case V4L2_MEMORY_MMAP: + case V4L2_MEMORY_OVERLAY: + plane32.m.mem_offset = p64->m.mem_offset; + break; + case V4L2_MEMORY_USERPTR: + plane32.m.userptr = (uintptr_t)(p64->m.userptr); + break; + case V4L2_MEMORY_DMABUF: + plane32.m.fd = p64->m.fd; + break; + } + + if (copy_to_user(p32, &plane32, sizeof(plane32))) + return -EFAULT; + + return 0; +} + +static int get_v4l2_buffer32(struct v4l2_buffer *vb, + struct v4l2_buffer32 __user *arg) +{ + struct v4l2_buffer32 vb32; + + if (copy_from_user(&vb32, arg, sizeof(vb32))) + return -EFAULT; + + memset(vb, 0, sizeof(*vb)); + *vb = (struct v4l2_buffer) { + .index = vb32.index, + .type = vb32.type, + .bytesused = vb32.bytesused, + .flags = vb32.flags, + .field = vb32.field, + .timestamp.tv_sec = vb32.timestamp.tv_sec, + .timestamp.tv_usec = vb32.timestamp.tv_usec, + .timecode = vb32.timecode, + .sequence = vb32.sequence, + .memory = vb32.memory, + .m.offset = vb32.m.offset, + .length = vb32.length, + .request_fd = vb32.request_fd, + }; + + switch (vb->memory) { + case V4L2_MEMORY_MMAP: + case V4L2_MEMORY_OVERLAY: + vb->m.offset = vb32.m.offset; + break; + case V4L2_MEMORY_USERPTR: + vb->m.userptr = (unsigned long)compat_ptr(vb32.m.userptr); + break; + case V4L2_MEMORY_DMABUF: + vb->m.fd = vb32.m.fd; + break; + } + + if (V4L2_TYPE_IS_MULTIPLANAR(vb->type)) + vb->m.planes = (void __force *) + compat_ptr(vb32.m.planes); + + return 0; +} + +#ifdef CONFIG_COMPAT_32BIT_TIME +static int get_v4l2_buffer32_time32(struct v4l2_buffer *vb, + struct v4l2_buffer32_time32 __user *arg) +{ + struct v4l2_buffer32_time32 vb32; + + if (copy_from_user(&vb32, arg, sizeof(vb32))) + return -EFAULT; + + *vb = (struct v4l2_buffer) { + .index = vb32.index, + .type = vb32.type, + .bytesused = vb32.bytesused, + .flags = vb32.flags, + .field = vb32.field, + .timestamp.tv_sec = vb32.timestamp.tv_sec, + .timestamp.tv_usec = vb32.timestamp.tv_usec, + .timecode = vb32.timecode, + .sequence = vb32.sequence, + .memory = vb32.memory, + .m.offset = vb32.m.offset, + .length = vb32.length, + .request_fd = vb32.request_fd, + }; + switch (vb->memory) { + case V4L2_MEMORY_MMAP: + case V4L2_MEMORY_OVERLAY: + vb->m.offset = vb32.m.offset; + break; + case V4L2_MEMORY_USERPTR: + vb->m.userptr = (unsigned long)compat_ptr(vb32.m.userptr); + break; + case V4L2_MEMORY_DMABUF: + vb->m.fd = vb32.m.fd; + break; + } + + if (V4L2_TYPE_IS_MULTIPLANAR(vb->type)) + vb->m.planes = (void __force *) + compat_ptr(vb32.m.planes); + + return 0; +} +#endif + +static int put_v4l2_buffer32(struct v4l2_buffer *vb, + struct v4l2_buffer32 __user *arg) +{ + struct v4l2_buffer32 vb32; + + memset(&vb32, 0, sizeof(vb32)); + vb32 = (struct v4l2_buffer32) { + .index = vb->index, + .type = vb->type, + .bytesused = vb->bytesused, + .flags = vb->flags, + .field = vb->field, + .timestamp.tv_sec = vb->timestamp.tv_sec, + .timestamp.tv_usec = vb->timestamp.tv_usec, + .timecode = vb->timecode, + .sequence = vb->sequence, + .memory = vb->memory, + .m.offset = vb->m.offset, + .length = vb->length, + .request_fd = vb->request_fd, + }; + + switch (vb->memory) { + case V4L2_MEMORY_MMAP: + case V4L2_MEMORY_OVERLAY: + vb32.m.offset = vb->m.offset; + break; + case V4L2_MEMORY_USERPTR: + vb32.m.userptr = (uintptr_t)(vb->m.userptr); + break; + case V4L2_MEMORY_DMABUF: + vb32.m.fd = vb->m.fd; + break; + } + + if (V4L2_TYPE_IS_MULTIPLANAR(vb->type)) + vb32.m.planes = (uintptr_t)vb->m.planes; + + if (copy_to_user(arg, &vb32, sizeof(vb32))) + return -EFAULT; + + return 0; +} + +#ifdef CONFIG_COMPAT_32BIT_TIME +static int put_v4l2_buffer32_time32(struct v4l2_buffer *vb, + struct v4l2_buffer32_time32 __user *arg) +{ + struct v4l2_buffer32_time32 vb32; + + memset(&vb32, 0, sizeof(vb32)); + vb32 = (struct v4l2_buffer32_time32) { + .index = vb->index, + .type = vb->type, + .bytesused = vb->bytesused, + .flags = vb->flags, + .field = vb->field, + .timestamp.tv_sec = vb->timestamp.tv_sec, + .timestamp.tv_usec = vb->timestamp.tv_usec, + .timecode = vb->timecode, + .sequence = vb->sequence, + .memory = vb->memory, + .m.offset = vb->m.offset, + .length = vb->length, + .request_fd = vb->request_fd, + }; + switch (vb->memory) { + case V4L2_MEMORY_MMAP: + case V4L2_MEMORY_OVERLAY: + vb32.m.offset = vb->m.offset; + break; + case V4L2_MEMORY_USERPTR: + vb32.m.userptr = (uintptr_t)(vb->m.userptr); + break; + case V4L2_MEMORY_DMABUF: + vb32.m.fd = vb->m.fd; + break; + } + + if (V4L2_TYPE_IS_MULTIPLANAR(vb->type)) + vb32.m.planes = (uintptr_t)vb->m.planes; + + if (copy_to_user(arg, &vb32, sizeof(vb32))) + return -EFAULT; + + return 0; +} +#endif + +struct v4l2_framebuffer32 { + __u32 capability; + __u32 flags; + compat_caddr_t base; + struct { + __u32 width; + __u32 height; + __u32 pixelformat; + __u32 field; + __u32 bytesperline; + __u32 sizeimage; + __u32 colorspace; + __u32 priv; + } fmt; +}; + +static int get_v4l2_framebuffer32(struct v4l2_framebuffer *p64, + struct v4l2_framebuffer32 __user *p32) +{ + if (get_user(p64->capability, &p32->capability) || + get_user(p64->flags, &p32->flags) || + copy_from_user(&p64->fmt, &p32->fmt, sizeof(p64->fmt))) + return -EFAULT; + p64->base = NULL; + + return 0; +} + +static int put_v4l2_framebuffer32(struct v4l2_framebuffer *p64, + struct v4l2_framebuffer32 __user *p32) +{ + if (put_user((uintptr_t)p64->base, &p32->base) || + put_user(p64->capability, &p32->capability) || + put_user(p64->flags, &p32->flags) || + copy_to_user(&p32->fmt, &p64->fmt, sizeof(p64->fmt))) + return -EFAULT; + + return 0; +} + +struct v4l2_input32 { + __u32 index; /* Which input */ + __u8 name[32]; /* Label */ + __u32 type; /* Type of input */ + __u32 audioset; /* Associated audios (bitfield) */ + __u32 tuner; /* Associated tuner */ + compat_u64 std; + __u32 status; + __u32 capabilities; + __u32 reserved[3]; +}; + +/* + * The 64-bit v4l2_input struct has extra padding at the end of the struct. + * Otherwise it is identical to the 32-bit version. + */ +static inline int get_v4l2_input32(struct v4l2_input *p64, + struct v4l2_input32 __user *p32) +{ + if (copy_from_user(p64, p32, sizeof(*p32))) + return -EFAULT; + return 0; +} + +static inline int put_v4l2_input32(struct v4l2_input *p64, + struct v4l2_input32 __user *p32) +{ + if (copy_to_user(p32, p64, sizeof(*p32))) + return -EFAULT; + return 0; +} + +struct v4l2_ext_controls32 { + __u32 which; + __u32 count; + __u32 error_idx; + __s32 request_fd; + __u32 reserved[1]; + compat_caddr_t controls; /* actually struct v4l2_ext_control32 * */ +}; + +struct v4l2_ext_control32 { + __u32 id; + __u32 size; + __u32 reserved2[1]; + union { + __s32 value; + __s64 value64; + compat_caddr_t string; /* actually char * */ + }; +} __attribute__ ((packed)); + +/* Return true if this control is a pointer type. */ +static inline bool ctrl_is_pointer(struct file *file, u32 id) +{ + struct video_device *vdev = video_devdata(file); + struct v4l2_fh *fh = file_to_v4l2_fh(file); + struct v4l2_ctrl_handler *hdl = NULL; + struct v4l2_query_ext_ctrl qec = { id }; + const struct v4l2_ioctl_ops *ops = vdev->ioctl_ops; + + if (fh->ctrl_handler) + hdl = fh->ctrl_handler; + else if (vdev->ctrl_handler) + hdl = vdev->ctrl_handler; + + if (hdl) { + struct v4l2_ctrl *ctrl = v4l2_ctrl_find(hdl, id); + + return ctrl && ctrl->is_ptr; + } + + if (!ops || !ops->vidioc_query_ext_ctrl) + return false; + + return !ops->vidioc_query_ext_ctrl(file, NULL, &qec) && + (qec.flags & V4L2_CTRL_FLAG_HAS_PAYLOAD); +} + +static int get_v4l2_ext_controls32(struct v4l2_ext_controls *p64, + struct v4l2_ext_controls32 __user *p32) +{ + struct v4l2_ext_controls32 ec32; + + if (copy_from_user(&ec32, p32, sizeof(ec32))) + return -EFAULT; + + *p64 = (struct v4l2_ext_controls) { + .which = ec32.which, + .count = ec32.count, + .error_idx = ec32.error_idx, + .request_fd = ec32.request_fd, + .reserved[0] = ec32.reserved[0], + .controls = (void __force *)compat_ptr(ec32.controls), + }; + + return 0; +} + +static int put_v4l2_ext_controls32(struct v4l2_ext_controls *p64, + struct v4l2_ext_controls32 __user *p32) +{ + struct v4l2_ext_controls32 ec32; + + memset(&ec32, 0, sizeof(ec32)); + ec32 = (struct v4l2_ext_controls32) { + .which = p64->which, + .count = p64->count, + .error_idx = p64->error_idx, + .request_fd = p64->request_fd, + .reserved[0] = p64->reserved[0], + .controls = (uintptr_t)p64->controls, + }; + + if (copy_to_user(p32, &ec32, sizeof(ec32))) + return -EFAULT; + + return 0; +} + +#ifdef CONFIG_X86_64 +/* + * x86 is the only compat architecture with different struct alignment + * between 32-bit and 64-bit tasks. + */ +struct v4l2_event32 { + __u32 type; + union { + compat_s64 value64; + __u8 data[64]; + } u; + __u32 pending; + __u32 sequence; + struct { + compat_s64 tv_sec; + compat_s64 tv_nsec; + } timestamp; + __u32 id; + __u32 reserved[8]; +}; + +static int put_v4l2_event32(struct v4l2_event *p64, + struct v4l2_event32 __user *p32) +{ + if (put_user(p64->type, &p32->type) || + copy_to_user(&p32->u, &p64->u, sizeof(p64->u)) || + put_user(p64->pending, &p32->pending) || + put_user(p64->sequence, &p32->sequence) || + put_user(p64->timestamp.tv_sec, &p32->timestamp.tv_sec) || + put_user(p64->timestamp.tv_nsec, &p32->timestamp.tv_nsec) || + put_user(p64->id, &p32->id) || + copy_to_user(p32->reserved, p64->reserved, sizeof(p32->reserved))) + return -EFAULT; + return 0; +} + +#endif + +#ifdef CONFIG_COMPAT_32BIT_TIME +struct v4l2_event32_time32 { + __u32 type; + union { + compat_s64 value64; + __u8 data[64]; + } u; + __u32 pending; + __u32 sequence; + struct old_timespec32 timestamp; + __u32 id; + __u32 reserved[8]; +}; + +static int put_v4l2_event32_time32(struct v4l2_event *p64, + struct v4l2_event32_time32 __user *p32) +{ + if (put_user(p64->type, &p32->type) || + copy_to_user(&p32->u, &p64->u, sizeof(p64->u)) || + put_user(p64->pending, &p32->pending) || + put_user(p64->sequence, &p32->sequence) || + put_user(p64->timestamp.tv_sec, &p32->timestamp.tv_sec) || + put_user(p64->timestamp.tv_nsec, &p32->timestamp.tv_nsec) || + put_user(p64->id, &p32->id) || + copy_to_user(p32->reserved, p64->reserved, sizeof(p32->reserved))) + return -EFAULT; + return 0; +} +#endif + +struct v4l2_edid32 { + __u32 pad; + __u32 start_block; + __u32 blocks; + __u32 reserved[5]; + compat_caddr_t edid; +}; + +static int get_v4l2_edid32(struct v4l2_edid *p64, + struct v4l2_edid32 __user *p32) +{ + compat_uptr_t edid; + + if (copy_from_user(p64, p32, offsetof(struct v4l2_edid32, edid)) || + get_user(edid, &p32->edid)) + return -EFAULT; + + p64->edid = (void __force *)compat_ptr(edid); + return 0; +} + +static int put_v4l2_edid32(struct v4l2_edid *p64, + struct v4l2_edid32 __user *p32) +{ + if (copy_to_user(p32, p64, offsetof(struct v4l2_edid32, edid))) + return -EFAULT; + return 0; +} + +/* + * List of ioctls that require 32-bits/64-bits conversion + * + * The V4L2 ioctls that aren't listed there don't have pointer arguments + * and the struct size is identical for both 32 and 64 bits versions, so + * they don't need translations. + */ + +#define VIDIOC_G_FMT32 _IOWR('V', 4, struct v4l2_format32) +#define VIDIOC_S_FMT32 _IOWR('V', 5, struct v4l2_format32) +#define VIDIOC_QUERYBUF32 _IOWR('V', 9, struct v4l2_buffer32) +#define VIDIOC_G_FBUF32 _IOR ('V', 10, struct v4l2_framebuffer32) +#define VIDIOC_S_FBUF32 _IOW ('V', 11, struct v4l2_framebuffer32) +#define VIDIOC_QBUF32 _IOWR('V', 15, struct v4l2_buffer32) +#define VIDIOC_DQBUF32 _IOWR('V', 17, struct v4l2_buffer32) +#define VIDIOC_ENUMSTD32 _IOWR('V', 25, struct v4l2_standard32) +#define VIDIOC_ENUMINPUT32 _IOWR('V', 26, struct v4l2_input32) +#define VIDIOC_G_EDID32 _IOWR('V', 40, struct v4l2_edid32) +#define VIDIOC_S_EDID32 _IOWR('V', 41, struct v4l2_edid32) +#define VIDIOC_TRY_FMT32 _IOWR('V', 64, struct v4l2_format32) +#define VIDIOC_G_EXT_CTRLS32 _IOWR('V', 71, struct v4l2_ext_controls32) +#define VIDIOC_S_EXT_CTRLS32 _IOWR('V', 72, struct v4l2_ext_controls32) +#define VIDIOC_TRY_EXT_CTRLS32 _IOWR('V', 73, struct v4l2_ext_controls32) +#define VIDIOC_DQEVENT32 _IOR ('V', 89, struct v4l2_event32) +#define VIDIOC_CREATE_BUFS32 _IOWR('V', 92, struct v4l2_create_buffers32) +#define VIDIOC_PREPARE_BUF32 _IOWR('V', 93, struct v4l2_buffer32) + +#ifdef CONFIG_COMPAT_32BIT_TIME +#define VIDIOC_QUERYBUF32_TIME32 _IOWR('V', 9, struct v4l2_buffer32_time32) +#define VIDIOC_QBUF32_TIME32 _IOWR('V', 15, struct v4l2_buffer32_time32) +#define VIDIOC_DQBUF32_TIME32 _IOWR('V', 17, struct v4l2_buffer32_time32) +#define VIDIOC_DQEVENT32_TIME32 _IOR ('V', 89, struct v4l2_event32_time32) +#define VIDIOC_PREPARE_BUF32_TIME32 _IOWR('V', 93, struct v4l2_buffer32_time32) +#endif + +unsigned int v4l2_compat_translate_cmd(unsigned int cmd) +{ + switch (cmd) { + case VIDIOC_G_FMT32: + return VIDIOC_G_FMT; + case VIDIOC_S_FMT32: + return VIDIOC_S_FMT; + case VIDIOC_TRY_FMT32: + return VIDIOC_TRY_FMT; + case VIDIOC_G_FBUF32: + return VIDIOC_G_FBUF; + case VIDIOC_S_FBUF32: + return VIDIOC_S_FBUF; +#ifdef CONFIG_COMPAT_32BIT_TIME + case VIDIOC_QUERYBUF32_TIME32: + return VIDIOC_QUERYBUF; + case VIDIOC_QBUF32_TIME32: + return VIDIOC_QBUF; + case VIDIOC_DQBUF32_TIME32: + return VIDIOC_DQBUF; + case VIDIOC_PREPARE_BUF32_TIME32: + return VIDIOC_PREPARE_BUF; +#endif + case VIDIOC_QUERYBUF32: + return VIDIOC_QUERYBUF; + case VIDIOC_QBUF32: + return VIDIOC_QBUF; + case VIDIOC_DQBUF32: + return VIDIOC_DQBUF; + case VIDIOC_CREATE_BUFS32: + return VIDIOC_CREATE_BUFS; + case VIDIOC_G_EXT_CTRLS32: + return VIDIOC_G_EXT_CTRLS; + case VIDIOC_S_EXT_CTRLS32: + return VIDIOC_S_EXT_CTRLS; + case VIDIOC_TRY_EXT_CTRLS32: + return VIDIOC_TRY_EXT_CTRLS; + case VIDIOC_PREPARE_BUF32: + return VIDIOC_PREPARE_BUF; + case VIDIOC_ENUMSTD32: + return VIDIOC_ENUMSTD; + case VIDIOC_ENUMINPUT32: + return VIDIOC_ENUMINPUT; + case VIDIOC_G_EDID32: + return VIDIOC_G_EDID; + case VIDIOC_S_EDID32: + return VIDIOC_S_EDID; +#ifdef CONFIG_X86_64 + case VIDIOC_DQEVENT32: + return VIDIOC_DQEVENT; +#endif +#ifdef CONFIG_COMPAT_32BIT_TIME + case VIDIOC_DQEVENT32_TIME32: + return VIDIOC_DQEVENT; +#endif + } + return cmd; +} + +int v4l2_compat_get_user(void __user *arg, void *parg, unsigned int cmd) +{ + switch (cmd) { + case VIDIOC_G_FMT32: + case VIDIOC_S_FMT32: + case VIDIOC_TRY_FMT32: + return get_v4l2_format32(parg, arg); + + case VIDIOC_S_FBUF32: + return get_v4l2_framebuffer32(parg, arg); +#ifdef CONFIG_COMPAT_32BIT_TIME + case VIDIOC_QUERYBUF32_TIME32: + case VIDIOC_QBUF32_TIME32: + case VIDIOC_DQBUF32_TIME32: + case VIDIOC_PREPARE_BUF32_TIME32: + return get_v4l2_buffer32_time32(parg, arg); +#endif + case VIDIOC_QUERYBUF32: + case VIDIOC_QBUF32: + case VIDIOC_DQBUF32: + case VIDIOC_PREPARE_BUF32: + return get_v4l2_buffer32(parg, arg); + + case VIDIOC_G_EXT_CTRLS32: + case VIDIOC_S_EXT_CTRLS32: + case VIDIOC_TRY_EXT_CTRLS32: + return get_v4l2_ext_controls32(parg, arg); + + case VIDIOC_CREATE_BUFS32: + return get_v4l2_create32(parg, arg); + + case VIDIOC_ENUMSTD32: + return get_v4l2_standard32(parg, arg); + + case VIDIOC_ENUMINPUT32: + return get_v4l2_input32(parg, arg); + + case VIDIOC_G_EDID32: + case VIDIOC_S_EDID32: + return get_v4l2_edid32(parg, arg); + } + return 0; +} + +int v4l2_compat_put_user(void __user *arg, void *parg, unsigned int cmd) +{ + switch (cmd) { + case VIDIOC_G_FMT32: + case VIDIOC_S_FMT32: + case VIDIOC_TRY_FMT32: + return put_v4l2_format32(parg, arg); + + case VIDIOC_G_FBUF32: + return put_v4l2_framebuffer32(parg, arg); +#ifdef CONFIG_COMPAT_32BIT_TIME + case VIDIOC_QUERYBUF32_TIME32: + case VIDIOC_QBUF32_TIME32: + case VIDIOC_DQBUF32_TIME32: + case VIDIOC_PREPARE_BUF32_TIME32: + return put_v4l2_buffer32_time32(parg, arg); +#endif + case VIDIOC_QUERYBUF32: + case VIDIOC_QBUF32: + case VIDIOC_DQBUF32: + case VIDIOC_PREPARE_BUF32: + return put_v4l2_buffer32(parg, arg); + + case VIDIOC_G_EXT_CTRLS32: + case VIDIOC_S_EXT_CTRLS32: + case VIDIOC_TRY_EXT_CTRLS32: + return put_v4l2_ext_controls32(parg, arg); + + case VIDIOC_CREATE_BUFS32: + return put_v4l2_create32(parg, arg); + + case VIDIOC_ENUMSTD32: + return put_v4l2_standard32(parg, arg); + + case VIDIOC_ENUMINPUT32: + return put_v4l2_input32(parg, arg); + + case VIDIOC_G_EDID32: + case VIDIOC_S_EDID32: + return put_v4l2_edid32(parg, arg); +#ifdef CONFIG_X86_64 + case VIDIOC_DQEVENT32: + return put_v4l2_event32(parg, arg); +#endif +#ifdef CONFIG_COMPAT_32BIT_TIME + case VIDIOC_DQEVENT32_TIME32: + return put_v4l2_event32_time32(parg, arg); +#endif + } + return 0; +} + +int v4l2_compat_get_array_args(struct file *file, void *mbuf, + void __user *user_ptr, size_t array_size, + unsigned int cmd, void *arg) +{ + int err = 0; + + memset(mbuf, 0, array_size); + + switch (cmd) { +#ifdef CONFIG_COMPAT_32BIT_TIME + case VIDIOC_QUERYBUF32_TIME32: + case VIDIOC_QBUF32_TIME32: + case VIDIOC_DQBUF32_TIME32: + case VIDIOC_PREPARE_BUF32_TIME32: +#endif + case VIDIOC_QUERYBUF32: + case VIDIOC_QBUF32: + case VIDIOC_DQBUF32: + case VIDIOC_PREPARE_BUF32: { + struct v4l2_buffer *b64 = arg; + struct v4l2_plane *p64 = mbuf; + struct v4l2_plane32 __user *p32 = user_ptr; + + if (V4L2_TYPE_IS_MULTIPLANAR(b64->type)) { + u32 num_planes = b64->length; + + if (num_planes == 0) + return 0; + + while (num_planes--) { + err = get_v4l2_plane32(p64, p32, b64->memory); + if (err) + return err; + ++p64; + ++p32; + } + } + break; + } + case VIDIOC_G_EXT_CTRLS32: + case VIDIOC_S_EXT_CTRLS32: + case VIDIOC_TRY_EXT_CTRLS32: { + struct v4l2_ext_controls *ecs64 = arg; + struct v4l2_ext_control *ec64 = mbuf; + struct v4l2_ext_control32 __user *ec32 = user_ptr; + int n; + + for (n = 0; n < ecs64->count; n++) { + if (copy_from_user(ec64, ec32, sizeof(*ec32))) + return -EFAULT; + + if (ctrl_is_pointer(file, ec64->id)) { + compat_uptr_t p; + + if (get_user(p, &ec32->string)) + return -EFAULT; + ec64->string = compat_ptr(p); + } + ec32++; + ec64++; + } + break; + } + default: + if (copy_from_user(mbuf, user_ptr, array_size)) + err = -EFAULT; + break; + } + + return err; +} + +int v4l2_compat_put_array_args(struct file *file, void __user *user_ptr, + void *mbuf, size_t array_size, + unsigned int cmd, void *arg) +{ + int err = 0; + + switch (cmd) { +#ifdef CONFIG_COMPAT_32BIT_TIME + case VIDIOC_QUERYBUF32_TIME32: + case VIDIOC_QBUF32_TIME32: + case VIDIOC_DQBUF32_TIME32: + case VIDIOC_PREPARE_BUF32_TIME32: +#endif + case VIDIOC_QUERYBUF32: + case VIDIOC_QBUF32: + case VIDIOC_DQBUF32: + case VIDIOC_PREPARE_BUF32: { + struct v4l2_buffer *b64 = arg; + struct v4l2_plane *p64 = mbuf; + struct v4l2_plane32 __user *p32 = user_ptr; + + if (V4L2_TYPE_IS_MULTIPLANAR(b64->type)) { + u32 num_planes = b64->length; + + if (num_planes == 0) + return 0; + + while (num_planes--) { + err = put_v4l2_plane32(p64, p32, b64->memory); + if (err) + return err; + ++p64; + ++p32; + } + } + break; + } + case VIDIOC_G_EXT_CTRLS32: + case VIDIOC_S_EXT_CTRLS32: + case VIDIOC_TRY_EXT_CTRLS32: { + struct v4l2_ext_controls *ecs64 = arg; + struct v4l2_ext_control *ec64 = mbuf; + struct v4l2_ext_control32 __user *ec32 = user_ptr; + int n; + + for (n = 0; n < ecs64->count; n++) { + unsigned int size = sizeof(*ec32); + /* + * Do not modify the pointer when copying a pointer + * control. The contents of the pointer was changed, + * not the pointer itself. + * The structures are otherwise compatible. + */ + if (ctrl_is_pointer(file, ec64->id)) + size -= sizeof(ec32->value64); + + if (copy_to_user(ec32, ec64, size)) + return -EFAULT; + + ec32++; + ec64++; + } + break; + } + default: + if (copy_to_user(user_ptr, mbuf, array_size)) + err = -EFAULT; + break; + } + + return err; +} + +/** + * v4l2_compat_ioctl32() - Handles a compat32 ioctl call + * + * @file: pointer to &struct file with the file handler + * @cmd: ioctl to be called + * @arg: arguments passed from/to the ioctl handler + * + * This function is meant to be used as .compat_ioctl fops at v4l2-dev.c + * in order to deal with 32-bit calls on a 64-bits Kernel. + * + * This function calls do_video_ioctl() for non-private V4L2 ioctls. + * If the function is a private one it calls vdev->fops->compat_ioctl32 + * instead. + */ +long v4l2_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct video_device *vdev = video_devdata(file); + long ret = -ENOIOCTLCMD; + + if (!file->f_op->unlocked_ioctl) + return ret; + + if (!video_is_registered(vdev)) + return -ENODEV; + + if (_IOC_TYPE(cmd) == 'V' && _IOC_NR(cmd) < BASE_VIDIOC_PRIVATE) + ret = file->f_op->unlocked_ioctl(file, cmd, + (unsigned long)compat_ptr(arg)); + else if (vdev->fops->compat_ioctl32) + ret = vdev->fops->compat_ioctl32(file, cmd, arg); + + if (ret == -ENOIOCTLCMD) + pr_debug("compat_ioctl32: unknown ioctl '%c', dir=%d, #%d (0x%08x)\n", + _IOC_TYPE(cmd), _IOC_DIR(cmd), _IOC_NR(cmd), cmd); + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_compat_ioctl32); diff --git a/6.18.0/drivers/media/v4l2-core/v4l2-ctrls-api.c b/6.18.0/drivers/media/v4l2-core/v4l2-ctrls-api.c new file mode 100644 index 0000000..0078a04 --- /dev/null +++ b/6.18.0/drivers/media/v4l2-core/v4l2-ctrls-api.c @@ -0,0 +1,1361 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * V4L2 controls framework uAPI implementation: + * + * Copyright (C) 2010-2021 Hans Verkuil + */ + +#define pr_fmt(fmt) "v4l2-ctrls: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "v4l2-ctrls-priv.h" + +/* Internal temporary helper struct, one for each v4l2_ext_control */ +struct v4l2_ctrl_helper { + /* Pointer to the control reference of the master control */ + struct v4l2_ctrl_ref *mref; + /* The control ref corresponding to the v4l2_ext_control ID field. */ + struct v4l2_ctrl_ref *ref; + /* + * v4l2_ext_control index of the next control belonging to the + * same cluster, or 0 if there isn't any. + */ + u32 next; +}; + +/* + * Helper functions to copy control payload data from kernel space to + * user space and vice versa. + */ + +/* Helper function: copy the given control value back to the caller */ +static int ptr_to_user(struct v4l2_ext_control *c, + struct v4l2_ctrl *ctrl, + union v4l2_ctrl_ptr ptr) +{ + u32 len; + + if (ctrl->is_ptr && !ctrl->is_string) + return copy_to_user(c->ptr, ptr.p_const, c->size) ? + -EFAULT : 0; + + switch (ctrl->type) { + case V4L2_CTRL_TYPE_STRING: + len = strlen(ptr.p_char); + if (c->size < len + 1) { + c->size = ctrl->elem_size; + return -ENOSPC; + } + return copy_to_user(c->string, ptr.p_char, len + 1) ? + -EFAULT : 0; + case V4L2_CTRL_TYPE_INTEGER64: + c->value64 = *ptr.p_s64; + break; + default: + c->value = *ptr.p_s32; + break; + } + return 0; +} + +/* Helper function: copy the current control value back to the caller */ +static int cur_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl) +{ + return ptr_to_user(c, ctrl, ctrl->p_cur); +} + +/* Helper function: copy the new control value back to the caller */ +static int new_to_user(struct v4l2_ext_control *c, + struct v4l2_ctrl *ctrl) +{ + return ptr_to_user(c, ctrl, ctrl->p_new); +} + +/* Helper function: copy the request value back to the caller */ +static int req_to_user(struct v4l2_ext_control *c, + struct v4l2_ctrl_ref *ref) +{ + return ptr_to_user(c, ref->ctrl, ref->p_req); +} + +/* Helper function: copy the initial control value back to the caller */ +static int def_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl) +{ + ctrl->type_ops->init(ctrl, 0, ctrl->p_new); + + return ptr_to_user(c, ctrl, ctrl->p_new); +} + +/* Helper function: copy the minimum control value back to the caller */ +static int min_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl) +{ + ctrl->type_ops->minimum(ctrl, 0, ctrl->p_new); + + return ptr_to_user(c, ctrl, ctrl->p_new); +} + +/* Helper function: copy the maximum control value back to the caller */ +static int max_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl) +{ + ctrl->type_ops->maximum(ctrl, 0, ctrl->p_new); + + return ptr_to_user(c, ctrl, ctrl->p_new); +} + +/* Helper function: copy the caller-provider value as the new control value */ +static int user_to_new(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl) +{ + int ret; + u32 size; + + ctrl->is_new = 0; + if (ctrl->is_dyn_array && + c->size > ctrl->p_array_alloc_elems * ctrl->elem_size) { + void *old = ctrl->p_array; + void *tmp = kvzalloc(2 * c->size, GFP_KERNEL); + + if (!tmp) + return -ENOMEM; + memcpy(tmp, ctrl->p_new.p, ctrl->elems * ctrl->elem_size); + memcpy(tmp + c->size, ctrl->p_cur.p, ctrl->elems * ctrl->elem_size); + ctrl->p_new.p = tmp; + ctrl->p_cur.p = tmp + c->size; + ctrl->p_array = tmp; + ctrl->p_array_alloc_elems = c->size / ctrl->elem_size; + kvfree(old); + } + + if (ctrl->is_ptr && !ctrl->is_string) { + unsigned int elems = c->size / ctrl->elem_size; + + if (copy_from_user(ctrl->p_new.p, c->ptr, c->size)) + return -EFAULT; + ctrl->is_new = 1; + if (ctrl->is_dyn_array) + ctrl->new_elems = elems; + else if (ctrl->is_array) + ctrl->type_ops->init(ctrl, elems, ctrl->p_new); + return 0; + } + + switch (ctrl->type) { + case V4L2_CTRL_TYPE_INTEGER64: + *ctrl->p_new.p_s64 = c->value64; + break; + case V4L2_CTRL_TYPE_STRING: + size = c->size; + if (size == 0) + return -ERANGE; + if (size > ctrl->maximum + 1) + size = ctrl->maximum + 1; + ret = copy_from_user(ctrl->p_new.p_char, c->string, size) ? -EFAULT : 0; + if (!ret) { + char last = ctrl->p_new.p_char[size - 1]; + + ctrl->p_new.p_char[size - 1] = 0; + /* + * If the string was longer than ctrl->maximum, + * then return an error. + */ + if (strlen(ctrl->p_new.p_char) == ctrl->maximum && last) + return -ERANGE; + ctrl->is_new = 1; + } + return ret; + default: + *ctrl->p_new.p_s32 = c->value; + break; + } + ctrl->is_new = 1; + return 0; +} + +/* + * VIDIOC_G/TRY/S_EXT_CTRLS implementation + */ + +/* + * Some general notes on the atomic requirements of VIDIOC_G/TRY/S_EXT_CTRLS: + * + * It is not a fully atomic operation, just best-effort only. After all, if + * multiple controls have to be set through multiple i2c writes (for example) + * then some initial writes may succeed while others fail. Thus leaving the + * system in an inconsistent state. The question is how much effort you are + * willing to spend on trying to make something atomic that really isn't. + * + * From the point of view of an application the main requirement is that + * when you call VIDIOC_S_EXT_CTRLS and some values are invalid then an + * error should be returned without actually affecting any controls. + * + * If all the values are correct, then it is acceptable to just give up + * in case of low-level errors. + * + * It is important though that the application can tell when only a partial + * configuration was done. The way we do that is through the error_idx field + * of struct v4l2_ext_controls: if that is equal to the count field then no + * controls were affected. Otherwise all controls before that index were + * successful in performing their 'get' or 'set' operation, the control at + * the given index failed, and you don't know what happened with the controls + * after the failed one. Since if they were part of a control cluster they + * could have been successfully processed (if a cluster member was encountered + * at index < error_idx), they could have failed (if a cluster member was at + * error_idx), or they may not have been processed yet (if the first cluster + * member appeared after error_idx). + * + * It is all fairly theoretical, though. In practice all you can do is to + * bail out. If error_idx == count, then it is an application bug. If + * error_idx < count then it is only an application bug if the error code was + * EBUSY. That usually means that something started streaming just when you + * tried to set the controls. In all other cases it is a driver/hardware + * problem and all you can do is to retry or bail out. + * + * Note that these rules do not apply to VIDIOC_TRY_EXT_CTRLS: since that + * never modifies controls the error_idx is just set to whatever control + * has an invalid value. + */ + +/* + * Prepare for the extended g/s/try functions. + * Find the controls in the control array and do some basic checks. + */ +static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl, + struct v4l2_ext_controls *cs, + struct v4l2_ctrl_helper *helpers, + struct video_device *vdev, + bool get) +{ + struct v4l2_ctrl_helper *h; + bool have_clusters = false; + u32 i; + + for (i = 0, h = helpers; i < cs->count; i++, h++) { + struct v4l2_ext_control *c = &cs->controls[i]; + struct v4l2_ctrl_ref *ref; + struct v4l2_ctrl *ctrl; + u32 id = c->id & V4L2_CTRL_ID_MASK; + + cs->error_idx = i; + + if (cs->which && + (cs->which < V4L2_CTRL_WHICH_DEF_VAL || + cs->which > V4L2_CTRL_WHICH_MAX_VAL) && + V4L2_CTRL_ID2WHICH(id) != cs->which) { + dprintk(vdev, + "invalid which 0x%x or control id 0x%x\n", + cs->which, id); + return -EINVAL; + } + + /* + * Old-style private controls are not allowed for + * extended controls. + */ + if (id >= V4L2_CID_PRIVATE_BASE) { + dprintk(vdev, + "old-style private controls not allowed\n"); + return -EINVAL; + } + ref = find_ref_lock(hdl, id); + if (!ref) { + dprintk(vdev, "cannot find control id 0x%x\n", id); + return -EINVAL; + } + h->ref = ref; + ctrl = ref->ctrl; + if (ctrl->flags & V4L2_CTRL_FLAG_DISABLED) { + dprintk(vdev, "control id 0x%x is disabled\n", id); + return -EINVAL; + } + + if (!(ctrl->flags & V4L2_CTRL_FLAG_HAS_WHICH_MIN_MAX) && + (cs->which == V4L2_CTRL_WHICH_MIN_VAL || + cs->which == V4L2_CTRL_WHICH_MAX_VAL)) { + dprintk(vdev, + "invalid which 0x%x or control id 0x%x\n", + cs->which, id); + return -EINVAL; + } + + if (ctrl->cluster[0]->ncontrols > 1) + have_clusters = true; + if (ctrl->cluster[0] != ctrl) + ref = find_ref_lock(hdl, ctrl->cluster[0]->id); + if (ctrl->is_dyn_array) { + unsigned int max_size = ctrl->dims[0] * ctrl->elem_size; + unsigned int tot_size = ctrl->elem_size; + + if (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL) + tot_size *= ref->p_req_elems; + else + tot_size *= ctrl->elems; + + c->size = ctrl->elem_size * (c->size / ctrl->elem_size); + if (get) { + if (c->size < tot_size) { + c->size = tot_size; + return -ENOSPC; + } + c->size = tot_size; + } else { + if (c->size > max_size) { + c->size = max_size; + return -ENOSPC; + } + if (!c->size) + return -EFAULT; + } + } else if (ctrl->is_ptr && !ctrl->is_string) { + unsigned int tot_size = ctrl->elems * ctrl->elem_size; + + if (c->size < tot_size) { + /* + * In the get case the application first + * queries to obtain the size of the control. + */ + if (get) { + c->size = tot_size; + return -ENOSPC; + } + dprintk(vdev, + "pointer control id 0x%x size too small, %d bytes but %d bytes needed\n", + id, c->size, tot_size); + return -EFAULT; + } + c->size = tot_size; + } + /* Store the ref to the master control of the cluster */ + h->mref = ref; + /* + * Initially set next to 0, meaning that there is no other + * control in this helper array belonging to the same + * cluster. + */ + h->next = 0; + } + + /* + * We are done if there were no controls that belong to a multi- + * control cluster. + */ + if (!have_clusters) + return 0; + + /* + * The code below figures out in O(n) time which controls in the list + * belong to the same cluster. + */ + + /* This has to be done with the handler lock taken. */ + mutex_lock(hdl->lock); + + /* First zero the helper field in the master control references */ + for (i = 0; i < cs->count; i++) + helpers[i].mref->helper = NULL; + for (i = 0, h = helpers; i < cs->count; i++, h++) { + struct v4l2_ctrl_ref *mref = h->mref; + + /* + * If the mref->helper is set, then it points to an earlier + * helper that belongs to the same cluster. + */ + if (mref->helper) { + /* + * Set the next field of mref->helper to the current + * index: this means that the earlier helper now + * points to the next helper in the same cluster. + */ + mref->helper->next = i; + /* + * mref should be set only for the first helper in the + * cluster, clear the others. + */ + h->mref = NULL; + } + /* Point the mref helper to the current helper struct. */ + mref->helper = h; + } + mutex_unlock(hdl->lock); + return 0; +} + +/* + * Handles the corner case where cs->count == 0. It checks whether the + * specified control class exists. If that class ID is 0, then it checks + * whether there are any controls at all. + */ +static int class_check(struct v4l2_ctrl_handler *hdl, u32 which) +{ + if (which == 0 || (which >= V4L2_CTRL_WHICH_DEF_VAL && + which <= V4L2_CTRL_WHICH_MAX_VAL)) + return 0; + return find_ref_lock(hdl, which | 1) ? 0 : -EINVAL; +} + +/* + * Get extended controls. Allocates the helpers array if needed. + * + * Note that v4l2_g_ext_ctrls_common() with 'which' set to + * V4L2_CTRL_WHICH_REQUEST_VAL is only called if the request was + * completed, and in that case p_req_valid is true for all controls. + */ +int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl, + struct v4l2_ext_controls *cs, + struct video_device *vdev) +{ + struct v4l2_ctrl_helper helper[4]; + struct v4l2_ctrl_helper *helpers = helper; + int ret; + int i, j; + bool is_default, is_request, is_min, is_max; + + is_default = (cs->which == V4L2_CTRL_WHICH_DEF_VAL); + is_request = (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL); + is_min = (cs->which == V4L2_CTRL_WHICH_MIN_VAL); + is_max = (cs->which == V4L2_CTRL_WHICH_MAX_VAL); + + cs->error_idx = cs->count; + cs->which = V4L2_CTRL_ID2WHICH(cs->which); + + if (!hdl) + return -EINVAL; + + if (cs->count == 0) + return class_check(hdl, cs->which); + + if (cs->count > ARRAY_SIZE(helper)) { + helpers = kvmalloc_array(cs->count, sizeof(helper[0]), + GFP_KERNEL); + if (!helpers) + return -ENOMEM; + } + + ret = prepare_ext_ctrls(hdl, cs, helpers, vdev, true); + cs->error_idx = cs->count; + + for (i = 0; !ret && i < cs->count; i++) + if (helpers[i].ref->ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY) + ret = -EACCES; + + for (i = 0; !ret && i < cs->count; i++) { + struct v4l2_ctrl *master; + bool is_volatile = false; + u32 idx = i; + + if (!helpers[i].mref) + continue; + + master = helpers[i].mref->ctrl; + cs->error_idx = i; + + v4l2_ctrl_lock(master); + + /* + * g_volatile_ctrl will update the new control values. + * This makes no sense for V4L2_CTRL_WHICH_DEF_VAL, + * V4L2_CTRL_WHICH_MIN_VAL, V4L2_CTRL_WHICH_MAX_VAL and + * V4L2_CTRL_WHICH_REQUEST_VAL. In the case of requests + * it is v4l2_ctrl_request_complete() that copies the + * volatile controls at the time of request completion + * to the request, so you don't want to do that again. + */ + if (!is_default && !is_request && !is_min && !is_max && + ((master->flags & V4L2_CTRL_FLAG_VOLATILE) || + (master->has_volatiles && !is_cur_manual(master)))) { + for (j = 0; j < master->ncontrols; j++) + cur_to_new(master->cluster[j]); + ret = call_op(master, g_volatile_ctrl); + is_volatile = true; + } + + if (ret) { + v4l2_ctrl_unlock(master); + break; + } + + /* + * Copy the default value (if is_default is true), the + * request value (if is_request is true and p_req is valid), + * the new volatile value (if is_volatile is true) or the + * current value. + */ + do { + struct v4l2_ctrl_ref *ref = helpers[idx].ref; + + if (is_default) + ret = def_to_user(cs->controls + idx, ref->ctrl); + else if (is_request && ref->p_req_array_enomem) + ret = -ENOMEM; + else if (is_request && ref->p_req_valid) + ret = req_to_user(cs->controls + idx, ref); + else if (is_min) + ret = min_to_user(cs->controls + idx, ref->ctrl); + else if (is_max) + ret = max_to_user(cs->controls + idx, ref->ctrl); + else if (is_volatile) + ret = new_to_user(cs->controls + idx, ref->ctrl); + else + ret = cur_to_user(cs->controls + idx, ref->ctrl); + idx = helpers[idx].next; + } while (!ret && idx); + + v4l2_ctrl_unlock(master); + } + + if (cs->count > ARRAY_SIZE(helper)) + kvfree(helpers); + return ret; +} + +int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct video_device *vdev, + struct media_device *mdev, struct v4l2_ext_controls *cs) +{ + if (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL) + return v4l2_g_ext_ctrls_request(hdl, vdev, mdev, cs); + + return v4l2_g_ext_ctrls_common(hdl, cs, vdev); +} +EXPORT_SYMBOL(v4l2_g_ext_ctrls); + +/* Validate a new control */ +static int validate_new(const struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr p_new) +{ + return ctrl->type_ops->validate(ctrl, p_new); +} + +/* Validate controls. */ +static int validate_ctrls(struct v4l2_ext_controls *cs, + struct v4l2_ctrl_helper *helpers, + struct video_device *vdev, + bool set) +{ + unsigned int i; + int ret = 0; + + cs->error_idx = cs->count; + for (i = 0; i < cs->count; i++) { + struct v4l2_ctrl *ctrl = helpers[i].ref->ctrl; + union v4l2_ctrl_ptr p_new; + + cs->error_idx = i; + + if (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY) { + dprintk(vdev, + "control id 0x%x is read-only\n", + ctrl->id); + return -EACCES; + } + /* + * This test is also done in try_set_control_cluster() which + * is called in atomic context, so that has the final say, + * but it makes sense to do an up-front check as well. Once + * an error occurs in try_set_control_cluster() some other + * controls may have been set already and we want to do a + * best-effort to avoid that. + */ + if (set && (ctrl->flags & V4L2_CTRL_FLAG_GRABBED)) { + dprintk(vdev, + "control id 0x%x is grabbed, cannot set\n", + ctrl->id); + return -EBUSY; + } + /* + * Skip validation for now if the payload needs to be copied + * from userspace into kernelspace. We'll validate those later. + */ + if (ctrl->is_ptr) + continue; + if (ctrl->type == V4L2_CTRL_TYPE_INTEGER64) + p_new.p_s64 = &cs->controls[i].value64; + else + p_new.p_s32 = &cs->controls[i].value; + ret = validate_new(ctrl, p_new); + if (ret) + return ret; + } + return 0; +} + +/* Try or try-and-set controls */ +int try_set_ext_ctrls_common(struct v4l2_fh *fh, + struct v4l2_ctrl_handler *hdl, + struct v4l2_ext_controls *cs, + struct video_device *vdev, bool set) +{ + struct v4l2_ctrl_helper helper[4]; + struct v4l2_ctrl_helper *helpers = helper; + unsigned int i, j; + int ret; + + cs->error_idx = cs->count; + + /* Default/minimum/maximum values cannot be changed */ + if (cs->which == V4L2_CTRL_WHICH_DEF_VAL || + cs->which == V4L2_CTRL_WHICH_MIN_VAL || + cs->which == V4L2_CTRL_WHICH_MAX_VAL) { + dprintk(vdev, "%s: cannot change default/min/max value\n", + video_device_node_name(vdev)); + return -EINVAL; + } + + cs->which = V4L2_CTRL_ID2WHICH(cs->which); + + if (!hdl) { + dprintk(vdev, "%s: invalid null control handler\n", + video_device_node_name(vdev)); + return -EINVAL; + } + + if (cs->count == 0) + return class_check(hdl, cs->which); + + if (cs->count > ARRAY_SIZE(helper)) { + helpers = kvmalloc_array(cs->count, sizeof(helper[0]), + GFP_KERNEL); + if (!helpers) + return -ENOMEM; + } + ret = prepare_ext_ctrls(hdl, cs, helpers, vdev, false); + if (!ret) + ret = validate_ctrls(cs, helpers, vdev, set); + if (ret && set) + cs->error_idx = cs->count; + for (i = 0; !ret && i < cs->count; i++) { + struct v4l2_ctrl *master; + u32 idx = i; + + if (!helpers[i].mref) + continue; + + cs->error_idx = i; + master = helpers[i].mref->ctrl; + v4l2_ctrl_lock(master); + + /* Reset the 'is_new' flags of the cluster */ + for (j = 0; j < master->ncontrols; j++) + if (master->cluster[j]) + master->cluster[j]->is_new = 0; + + /* + * For volatile autoclusters that are currently in auto mode + * we need to discover if it will be set to manual mode. + * If so, then we have to copy the current volatile values + * first since those will become the new manual values (which + * may be overwritten by explicit new values from this set + * of controls). + */ + if (master->is_auto && master->has_volatiles && + !is_cur_manual(master)) { + /* Pick an initial non-manual value */ + s32 new_auto_val = master->manual_mode_value + 1; + u32 tmp_idx = idx; + + do { + /* + * Check if the auto control is part of the + * list, and remember the new value. + */ + if (helpers[tmp_idx].ref->ctrl == master) + new_auto_val = cs->controls[tmp_idx].value; + tmp_idx = helpers[tmp_idx].next; + } while (tmp_idx); + /* + * If the new value == the manual value, then copy + * the current volatile values. + */ + if (new_auto_val == master->manual_mode_value) + update_from_auto_cluster(master); + } + + /* + * Copy the new caller-supplied control values. + * user_to_new() sets 'is_new' to 1. + */ + do { + struct v4l2_ctrl *ctrl = helpers[idx].ref->ctrl; + + ret = user_to_new(cs->controls + idx, ctrl); + if (!ret && ctrl->is_ptr) { + ret = validate_new(ctrl, ctrl->p_new); + if (ret) + dprintk(vdev, + "failed to validate control %s (%d)\n", + v4l2_ctrl_get_name(ctrl->id), ret); + } + idx = helpers[idx].next; + } while (!ret && idx); + + if (!ret) + ret = try_or_set_cluster(fh, master, + !hdl->req_obj.req && set, 0); + if (!ret && hdl->req_obj.req && set) { + for (j = 0; j < master->ncontrols; j++) { + struct v4l2_ctrl_ref *ref = + find_ref(hdl, master->cluster[j]->id); + + new_to_req(ref); + } + } + + /* Copy the new values back to userspace. */ + if (!ret) { + idx = i; + do { + ret = new_to_user(cs->controls + idx, + helpers[idx].ref->ctrl); + idx = helpers[idx].next; + } while (!ret && idx); + } + v4l2_ctrl_unlock(master); + } + + if (cs->count > ARRAY_SIZE(helper)) + kvfree(helpers); + return ret; +} + +static int try_set_ext_ctrls(struct v4l2_fh *fh, + struct v4l2_ctrl_handler *hdl, + struct video_device *vdev, + struct media_device *mdev, + struct v4l2_ext_controls *cs, bool set) +{ + int ret; + + if (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL) + return try_set_ext_ctrls_request(fh, hdl, vdev, mdev, cs, set); + + ret = try_set_ext_ctrls_common(fh, hdl, cs, vdev, set); + if (ret) + dprintk(vdev, + "%s: try_set_ext_ctrls_common failed (%d)\n", + video_device_node_name(vdev), ret); + + return ret; +} + +int v4l2_try_ext_ctrls(struct v4l2_ctrl_handler *hdl, + struct video_device *vdev, + struct media_device *mdev, + struct v4l2_ext_controls *cs) +{ + return try_set_ext_ctrls(NULL, hdl, vdev, mdev, cs, false); +} +EXPORT_SYMBOL(v4l2_try_ext_ctrls); + +int v4l2_s_ext_ctrls(struct v4l2_fh *fh, + struct v4l2_ctrl_handler *hdl, + struct video_device *vdev, + struct media_device *mdev, + struct v4l2_ext_controls *cs) +{ + return try_set_ext_ctrls(fh, hdl, vdev, mdev, cs, true); +} +EXPORT_SYMBOL(v4l2_s_ext_ctrls); + +/* + * VIDIOC_G/S_CTRL implementation + */ + +/* Helper function to get a single control */ +static int get_ctrl(struct v4l2_ctrl *ctrl, struct v4l2_ext_control *c) +{ + struct v4l2_ctrl *master = ctrl->cluster[0]; + int ret = 0; + int i; + + /* Compound controls are not supported. The new_to_user() and + * cur_to_user() calls below would need to be modified not to access + * userspace memory when called from get_ctrl(). + */ + if (!ctrl->is_int && ctrl->type != V4L2_CTRL_TYPE_INTEGER64) + return -EINVAL; + + if (ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY) + return -EACCES; + + v4l2_ctrl_lock(master); + /* g_volatile_ctrl will update the current control values */ + if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) { + for (i = 0; i < master->ncontrols; i++) + cur_to_new(master->cluster[i]); + ret = call_op(master, g_volatile_ctrl); + if (!ret) + ret = new_to_user(c, ctrl); + } else { + ret = cur_to_user(c, ctrl); + } + v4l2_ctrl_unlock(master); + return ret; +} + +int v4l2_g_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_control *control) +{ + struct v4l2_ctrl *ctrl = v4l2_ctrl_find(hdl, control->id); + struct v4l2_ext_control c; + int ret; + + if (!ctrl || !ctrl->is_int) + return -EINVAL; + ret = get_ctrl(ctrl, &c); + + if (!ret) + control->value = c.value; + + return ret; +} +EXPORT_SYMBOL(v4l2_g_ctrl); + +/* Helper function for VIDIOC_S_CTRL compatibility */ +static int set_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 ch_flags) +{ + struct v4l2_ctrl *master = ctrl->cluster[0]; + int ret; + int i; + + /* Reset the 'is_new' flags of the cluster */ + for (i = 0; i < master->ncontrols; i++) + if (master->cluster[i]) + master->cluster[i]->is_new = 0; + + ret = validate_new(ctrl, ctrl->p_new); + if (ret) + return ret; + + /* + * For autoclusters with volatiles that are switched from auto to + * manual mode we have to update the current volatile values since + * those will become the initial manual values after such a switch. + */ + if (master->is_auto && master->has_volatiles && ctrl == master && + !is_cur_manual(master) && ctrl->val == master->manual_mode_value) + update_from_auto_cluster(master); + + ctrl->is_new = 1; + return try_or_set_cluster(fh, master, true, ch_flags); +} + +/* Helper function for VIDIOC_S_CTRL compatibility */ +static int set_ctrl_lock(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, + struct v4l2_ext_control *c) +{ + int ret; + + v4l2_ctrl_lock(ctrl); + ret = user_to_new(c, ctrl); + if (!ret) + ret = set_ctrl(fh, ctrl, 0); + if (!ret) + ret = cur_to_user(c, ctrl); + v4l2_ctrl_unlock(ctrl); + return ret; +} + +int v4l2_s_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl, + struct v4l2_control *control) +{ + struct v4l2_ctrl *ctrl = v4l2_ctrl_find(hdl, control->id); + struct v4l2_ext_control c = { control->id }; + int ret; + + if (!ctrl || !ctrl->is_int) + return -EINVAL; + + if (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY) + return -EACCES; + + c.value = control->value; + ret = set_ctrl_lock(fh, ctrl, &c); + control->value = c.value; + return ret; +} +EXPORT_SYMBOL(v4l2_s_ctrl); + +/* + * Helper functions for drivers to get/set controls. + */ + +s32 v4l2_ctrl_g_ctrl(struct v4l2_ctrl *ctrl) +{ + struct v4l2_ext_control c; + + /* It's a driver bug if this happens. */ + if (WARN_ON(!ctrl->is_int)) + return 0; + c.value = 0; + get_ctrl(ctrl, &c); + return c.value; +} +EXPORT_SYMBOL(v4l2_ctrl_g_ctrl); + +s64 v4l2_ctrl_g_ctrl_int64(struct v4l2_ctrl *ctrl) +{ + struct v4l2_ext_control c; + + /* It's a driver bug if this happens. */ + if (WARN_ON(ctrl->is_ptr || ctrl->type != V4L2_CTRL_TYPE_INTEGER64)) + return 0; + c.value64 = 0; + get_ctrl(ctrl, &c); + return c.value64; +} +EXPORT_SYMBOL(v4l2_ctrl_g_ctrl_int64); + +int __v4l2_ctrl_s_ctrl(struct v4l2_ctrl *ctrl, s32 val) +{ + lockdep_assert_held(ctrl->handler->lock); + + /* It's a driver bug if this happens. */ + if (WARN_ON(!ctrl->is_int)) + return -EINVAL; + ctrl->val = val; + return set_ctrl(NULL, ctrl, 0); +} +EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl); + +int __v4l2_ctrl_s_ctrl_int64(struct v4l2_ctrl *ctrl, s64 val) +{ + lockdep_assert_held(ctrl->handler->lock); + + /* It's a driver bug if this happens. */ + if (WARN_ON(ctrl->is_ptr || ctrl->type != V4L2_CTRL_TYPE_INTEGER64)) + return -EINVAL; + *ctrl->p_new.p_s64 = val; + return set_ctrl(NULL, ctrl, 0); +} +EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_int64); + +int __v4l2_ctrl_s_ctrl_string(struct v4l2_ctrl *ctrl, const char *s) +{ + lockdep_assert_held(ctrl->handler->lock); + + /* It's a driver bug if this happens. */ + if (WARN_ON(ctrl->type != V4L2_CTRL_TYPE_STRING)) + return -EINVAL; + strscpy(ctrl->p_new.p_char, s, ctrl->maximum + 1); + return set_ctrl(NULL, ctrl, 0); +} +EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_string); + +int __v4l2_ctrl_s_ctrl_compound(struct v4l2_ctrl *ctrl, + enum v4l2_ctrl_type type, const void *p) +{ + lockdep_assert_held(ctrl->handler->lock); + + /* It's a driver bug if this happens. */ + if (WARN_ON(ctrl->type != type)) + return -EINVAL; + /* Setting dynamic arrays is not (yet?) supported. */ + if (WARN_ON(ctrl->is_dyn_array)) + return -EINVAL; + memcpy(ctrl->p_new.p, p, ctrl->elems * ctrl->elem_size); + return set_ctrl(NULL, ctrl, 0); +} +EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_compound); + +/* + * Modify the range of a control. + */ +int __v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl, + s64 min, s64 max, u64 step, s64 def) +{ + bool value_changed; + bool range_changed = false; + int ret; + + lockdep_assert_held(ctrl->handler->lock); + + switch (ctrl->type) { + case V4L2_CTRL_TYPE_INTEGER: + case V4L2_CTRL_TYPE_INTEGER64: + case V4L2_CTRL_TYPE_BOOLEAN: + case V4L2_CTRL_TYPE_MENU: + case V4L2_CTRL_TYPE_INTEGER_MENU: + case V4L2_CTRL_TYPE_BITMASK: + case V4L2_CTRL_TYPE_U8: + case V4L2_CTRL_TYPE_U16: + case V4L2_CTRL_TYPE_U32: + if (ctrl->is_array) + return -EINVAL; + ret = check_range(ctrl->type, min, max, step, def); + if (ret) + return ret; + break; + default: + return -EINVAL; + } + if (ctrl->minimum != min || ctrl->maximum != max || + ctrl->step != step || ctrl->default_value != def) { + range_changed = true; + ctrl->minimum = min; + ctrl->maximum = max; + ctrl->step = step; + ctrl->default_value = def; + } + cur_to_new(ctrl); + if (validate_new(ctrl, ctrl->p_new)) { + if (ctrl->type == V4L2_CTRL_TYPE_INTEGER64) + *ctrl->p_new.p_s64 = def; + else + *ctrl->p_new.p_s32 = def; + } + + if (ctrl->type == V4L2_CTRL_TYPE_INTEGER64) + value_changed = *ctrl->p_new.p_s64 != *ctrl->p_cur.p_s64; + else + value_changed = *ctrl->p_new.p_s32 != *ctrl->p_cur.p_s32; + if (value_changed) + ret = set_ctrl(NULL, ctrl, V4L2_EVENT_CTRL_CH_RANGE); + else if (range_changed) + send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_RANGE); + return ret; +} +EXPORT_SYMBOL(__v4l2_ctrl_modify_range); + +int __v4l2_ctrl_modify_dimensions(struct v4l2_ctrl *ctrl, + u32 dims[V4L2_CTRL_MAX_DIMS]) +{ + unsigned int elems = 1; + unsigned int i; + void *p_array; + + lockdep_assert_held(ctrl->handler->lock); + + if (!ctrl->is_array || ctrl->is_dyn_array) + return -EINVAL; + + for (i = 0; i < ctrl->nr_of_dims; i++) + elems *= dims[i]; + if (elems == 0) + return -EINVAL; + p_array = kvzalloc(2 * elems * ctrl->elem_size, GFP_KERNEL); + if (!p_array) + return -ENOMEM; + kvfree(ctrl->p_array); + ctrl->p_array_alloc_elems = elems; + ctrl->elems = elems; + ctrl->new_elems = elems; + ctrl->p_array = p_array; + ctrl->p_new.p = p_array; + ctrl->p_cur.p = p_array + elems * ctrl->elem_size; + for (i = 0; i < ctrl->nr_of_dims; i++) + ctrl->dims[i] = dims[i]; + ctrl->type_ops->init(ctrl, 0, ctrl->p_cur); + cur_to_new(ctrl); + send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_VALUE | + V4L2_EVENT_CTRL_CH_DIMENSIONS); + return 0; +} +EXPORT_SYMBOL(__v4l2_ctrl_modify_dimensions); + +/* Implement VIDIOC_QUERY_EXT_CTRL */ +int v4l2_query_ext_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_query_ext_ctrl *qc) +{ + const unsigned int next_flags = V4L2_CTRL_FLAG_NEXT_CTRL | V4L2_CTRL_FLAG_NEXT_COMPOUND; + u32 id = qc->id & V4L2_CTRL_ID_MASK; + struct v4l2_ctrl_ref *ref; + struct v4l2_ctrl *ctrl; + + if (!hdl) + return -EINVAL; + + mutex_lock(hdl->lock); + + /* Try to find it */ + ref = find_ref(hdl, id); + + if ((qc->id & next_flags) && !list_empty(&hdl->ctrl_refs)) { + bool is_compound; + /* Match any control that is not hidden */ + unsigned int mask = 1; + bool match = false; + + if ((qc->id & next_flags) == V4L2_CTRL_FLAG_NEXT_COMPOUND) { + /* Match any hidden control */ + match = true; + } else if ((qc->id & next_flags) == next_flags) { + /* Match any control, compound or not */ + mask = 0; + } + + /* Find the next control with ID > qc->id */ + + /* Did we reach the end of the control list? */ + if (id >= node2id(hdl->ctrl_refs.prev)) { + ref = NULL; /* Yes, so there is no next control */ + } else if (ref) { + struct v4l2_ctrl_ref *pos = ref; + + /* + * We found a control with the given ID, so just get + * the next valid one in the list. + */ + ref = NULL; + list_for_each_entry_continue(pos, &hdl->ctrl_refs, node) { + is_compound = pos->ctrl->is_array || + pos->ctrl->type >= V4L2_CTRL_COMPOUND_TYPES; + if (id < pos->ctrl->id && + (is_compound & mask) == match) { + ref = pos; + break; + } + } + } else { + struct v4l2_ctrl_ref *pos; + + /* + * No control with the given ID exists, so start + * searching for the next largest ID. We know there + * is one, otherwise the first 'if' above would have + * been true. + */ + list_for_each_entry(pos, &hdl->ctrl_refs, node) { + is_compound = pos->ctrl->is_array || + pos->ctrl->type >= V4L2_CTRL_COMPOUND_TYPES; + if (id < pos->ctrl->id && + (is_compound & mask) == match) { + ref = pos; + break; + } + } + } + } + mutex_unlock(hdl->lock); + + if (!ref) + return -EINVAL; + + ctrl = ref->ctrl; + memset(qc, 0, sizeof(*qc)); + if (id >= V4L2_CID_PRIVATE_BASE) + qc->id = id; + else + qc->id = ctrl->id; + strscpy(qc->name, ctrl->name, sizeof(qc->name)); + qc->flags = user_flags(ctrl); + qc->type = ctrl->type; + qc->elem_size = ctrl->elem_size; + qc->elems = ctrl->elems; + qc->nr_of_dims = ctrl->nr_of_dims; + memcpy(qc->dims, ctrl->dims, qc->nr_of_dims * sizeof(qc->dims[0])); + qc->minimum = ctrl->minimum; + qc->maximum = ctrl->maximum; + qc->default_value = ctrl->default_value; + if (ctrl->type == V4L2_CTRL_TYPE_MENU || + ctrl->type == V4L2_CTRL_TYPE_INTEGER_MENU) + qc->step = 1; + else + qc->step = ctrl->step; + return 0; +} +EXPORT_SYMBOL(v4l2_query_ext_ctrl); + +void v4l2_query_ext_ctrl_to_v4l2_queryctrl(struct v4l2_queryctrl *to, + const struct v4l2_query_ext_ctrl *from) +{ + to->id = from->id; + to->type = from->type; + to->flags = from->flags; + strscpy(to->name, from->name, sizeof(to->name)); + + switch (from->type) { + case V4L2_CTRL_TYPE_INTEGER: + case V4L2_CTRL_TYPE_BOOLEAN: + case V4L2_CTRL_TYPE_MENU: + case V4L2_CTRL_TYPE_INTEGER_MENU: + case V4L2_CTRL_TYPE_STRING: + case V4L2_CTRL_TYPE_BITMASK: + to->minimum = from->minimum; + to->maximum = from->maximum; + to->step = from->step; + to->default_value = from->default_value; + break; + default: + to->minimum = 0; + to->maximum = 0; + to->step = 0; + to->default_value = 0; + break; + } +} +EXPORT_SYMBOL(v4l2_query_ext_ctrl_to_v4l2_queryctrl); + +/* Implement VIDIOC_QUERYCTRL */ +int v4l2_queryctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_queryctrl *qc) +{ + struct v4l2_query_ext_ctrl qec = { qc->id }; + int rc; + + rc = v4l2_query_ext_ctrl(hdl, &qec); + if (rc) + return rc; + + v4l2_query_ext_ctrl_to_v4l2_queryctrl(qc, &qec); + + return 0; +} +EXPORT_SYMBOL(v4l2_queryctrl); + +/* Implement VIDIOC_QUERYMENU */ +int v4l2_querymenu(struct v4l2_ctrl_handler *hdl, struct v4l2_querymenu *qm) +{ + struct v4l2_ctrl *ctrl; + u32 i = qm->index; + + ctrl = v4l2_ctrl_find(hdl, qm->id); + if (!ctrl) + return -EINVAL; + + qm->reserved = 0; + /* Sanity checks */ + switch (ctrl->type) { + case V4L2_CTRL_TYPE_MENU: + if (!ctrl->qmenu) + return -EINVAL; + break; + case V4L2_CTRL_TYPE_INTEGER_MENU: + if (!ctrl->qmenu_int) + return -EINVAL; + break; + default: + return -EINVAL; + } + + if (i < ctrl->minimum || i > ctrl->maximum) + return -EINVAL; + + /* Use mask to see if this menu item should be skipped */ + if (i < BITS_PER_LONG_LONG && (ctrl->menu_skip_mask & BIT_ULL(i))) + return -EINVAL; + /* Empty menu items should also be skipped */ + if (ctrl->type == V4L2_CTRL_TYPE_MENU) { + if (!ctrl->qmenu[i] || ctrl->qmenu[i][0] == '\0') + return -EINVAL; + strscpy(qm->name, ctrl->qmenu[i], sizeof(qm->name)); + } else { + qm->value = ctrl->qmenu_int[i]; + } + return 0; +} +EXPORT_SYMBOL(v4l2_querymenu); + +/* + * VIDIOC_LOG_STATUS helpers + */ + +int v4l2_ctrl_log_status(struct file *file, void *priv) +{ + struct video_device *vfd = video_devdata(file); + + if (vfd->v4l2_dev) { + struct v4l2_fh *vfh = file_to_v4l2_fh(file); + + v4l2_ctrl_handler_log_status(vfh->ctrl_handler, + vfd->v4l2_dev->name); + } + + return 0; +} +EXPORT_SYMBOL(v4l2_ctrl_log_status); + +int v4l2_ctrl_subdev_log_status(struct v4l2_subdev *sd) +{ + v4l2_ctrl_handler_log_status(sd->ctrl_handler, sd->name); + return 0; +} +EXPORT_SYMBOL(v4l2_ctrl_subdev_log_status); + +/* + * VIDIOC_(UN)SUBSCRIBE_EVENT implementation + */ + +static int v4l2_ctrl_add_event(struct v4l2_subscribed_event *sev, + unsigned int elems) +{ + struct v4l2_ctrl *ctrl = v4l2_ctrl_find(sev->fh->ctrl_handler, sev->id); + + if (!ctrl) + return -EINVAL; + + v4l2_ctrl_lock(ctrl); + list_add_tail(&sev->node, &ctrl->ev_subs); + if (ctrl->type != V4L2_CTRL_TYPE_CTRL_CLASS && + (sev->flags & V4L2_EVENT_SUB_FL_SEND_INITIAL)) + send_initial_event(sev->fh, ctrl); + v4l2_ctrl_unlock(ctrl); + return 0; +} + +static void v4l2_ctrl_del_event(struct v4l2_subscribed_event *sev) +{ + struct v4l2_ctrl *ctrl = v4l2_ctrl_find(sev->fh->ctrl_handler, sev->id); + + if (!ctrl) + return; + + v4l2_ctrl_lock(ctrl); + list_del(&sev->node); + v4l2_ctrl_unlock(ctrl); +} + +void v4l2_ctrl_replace(struct v4l2_event *old, const struct v4l2_event *new) +{ + u32 old_changes = old->u.ctrl.changes; + + old->u.ctrl = new->u.ctrl; + old->u.ctrl.changes |= old_changes; +} +EXPORT_SYMBOL(v4l2_ctrl_replace); + +void v4l2_ctrl_merge(const struct v4l2_event *old, struct v4l2_event *new) +{ + new->u.ctrl.changes |= old->u.ctrl.changes; +} +EXPORT_SYMBOL(v4l2_ctrl_merge); + +const struct v4l2_subscribed_event_ops v4l2_ctrl_sub_ev_ops = { + .add = v4l2_ctrl_add_event, + .del = v4l2_ctrl_del_event, + .replace = v4l2_ctrl_replace, + .merge = v4l2_ctrl_merge, +}; +EXPORT_SYMBOL(v4l2_ctrl_sub_ev_ops); + +int v4l2_ctrl_subscribe_event(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub) +{ + if (sub->type == V4L2_EVENT_CTRL) + return v4l2_event_subscribe(fh, sub, 0, &v4l2_ctrl_sub_ev_ops); + return -EINVAL; +} +EXPORT_SYMBOL(v4l2_ctrl_subscribe_event); + +int v4l2_ctrl_subdev_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh, + struct v4l2_event_subscription *sub) +{ + if (!sd->ctrl_handler) + return -EINVAL; + return v4l2_ctrl_subscribe_event(fh, sub); +} +EXPORT_SYMBOL(v4l2_ctrl_subdev_subscribe_event); + +/* + * poll helper + */ +__poll_t v4l2_ctrl_poll(struct file *file, struct poll_table_struct *wait) +{ + struct v4l2_fh *fh = file_to_v4l2_fh(file); + + poll_wait(file, &fh->wait, wait); + if (v4l2_event_pending(fh)) + return EPOLLPRI; + return 0; +} +EXPORT_SYMBOL(v4l2_ctrl_poll); diff --git a/6.18.0/drivers/media/v4l2-core/v4l2-ctrls-core.c b/6.18.0/drivers/media/v4l2-core/v4l2-ctrls-core.c new file mode 100644 index 0000000..85d07ef --- /dev/null +++ b/6.18.0/drivers/media/v4l2-core/v4l2-ctrls-core.c @@ -0,0 +1,2721 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * V4L2 controls framework core implementation. + * + * Copyright (C) 2010-2021 Hans Verkuil + */ + +#include +#include +#include +#include +#include +#include + +#include "v4l2-ctrls-priv.h" + +static const union v4l2_ctrl_ptr ptr_null; + +static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, + u32 changes) +{ + memset(ev, 0, sizeof(*ev)); + ev->type = V4L2_EVENT_CTRL; + ev->id = ctrl->id; + ev->u.ctrl.changes = changes; + ev->u.ctrl.type = ctrl->type; + ev->u.ctrl.flags = user_flags(ctrl); + if (ctrl->is_ptr) + ev->u.ctrl.value64 = 0; + else + ev->u.ctrl.value64 = *ctrl->p_cur.p_s64; + ev->u.ctrl.minimum = ctrl->minimum; + ev->u.ctrl.maximum = ctrl->maximum; + if (ctrl->type == V4L2_CTRL_TYPE_MENU + || ctrl->type == V4L2_CTRL_TYPE_INTEGER_MENU) + ev->u.ctrl.step = 1; + else + ev->u.ctrl.step = ctrl->step; + ev->u.ctrl.default_value = ctrl->default_value; +} + +void send_initial_event(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl) +{ + struct v4l2_event ev; + u32 changes = V4L2_EVENT_CTRL_CH_FLAGS; + + if (!(ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY)) + changes |= V4L2_EVENT_CTRL_CH_VALUE; + fill_event(&ev, ctrl, changes); + v4l2_event_queue_fh(fh, &ev); +} + +void send_event(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 changes) +{ + struct v4l2_event ev; + struct v4l2_subscribed_event *sev; + + if (list_empty(&ctrl->ev_subs)) + return; + fill_event(&ev, ctrl, changes); + + list_for_each_entry(sev, &ctrl->ev_subs, node) + if (sev->fh != fh || + (sev->flags & V4L2_EVENT_SUB_FL_ALLOW_FEEDBACK)) + v4l2_event_queue_fh(sev->fh, &ev); +} + +bool v4l2_ctrl_type_op_equal(const struct v4l2_ctrl *ctrl, + union v4l2_ctrl_ptr ptr1, union v4l2_ctrl_ptr ptr2) +{ + unsigned int i; + + switch (ctrl->type) { + case V4L2_CTRL_TYPE_BUTTON: + return false; + case V4L2_CTRL_TYPE_STRING: + for (i = 0; i < ctrl->elems; i++) { + unsigned int idx = i * ctrl->elem_size; + + /* strings are always 0-terminated */ + if (strcmp(ptr1.p_char + idx, ptr2.p_char + idx)) + return false; + } + return true; + default: + return !memcmp(ptr1.p_const, ptr2.p_const, + ctrl->elems * ctrl->elem_size); + } +} +EXPORT_SYMBOL(v4l2_ctrl_type_op_equal); + +/* Default intra MPEG-2 quantisation coefficients, from the specification. */ +static const u8 mpeg2_intra_quant_matrix[64] = { + 8, 16, 16, 19, 16, 19, 22, 22, + 22, 22, 22, 22, 26, 24, 26, 27, + 27, 27, 26, 26, 26, 26, 27, 27, + 27, 29, 29, 29, 34, 34, 34, 29, + 29, 29, 27, 27, 29, 29, 32, 32, + 34, 34, 37, 38, 37, 35, 35, 34, + 35, 38, 38, 40, 40, 40, 48, 48, + 46, 46, 56, 56, 58, 69, 69, 83 +}; + +static void std_init_compound(const struct v4l2_ctrl *ctrl, u32 idx, + union v4l2_ctrl_ptr ptr) +{ + struct v4l2_ctrl_mpeg2_sequence *p_mpeg2_sequence; + struct v4l2_ctrl_mpeg2_picture *p_mpeg2_picture; + struct v4l2_ctrl_mpeg2_quantisation *p_mpeg2_quant; + struct v4l2_ctrl_vp8_frame *p_vp8_frame; + struct v4l2_ctrl_vp9_frame *p_vp9_frame; + struct v4l2_ctrl_fwht_params *p_fwht_params; + struct v4l2_ctrl_h264_scaling_matrix *p_h264_scaling_matrix; + struct v4l2_ctrl_av1_sequence *p_av1_sequence; + void *p = ptr.p + idx * ctrl->elem_size; + + if (ctrl->p_def.p_const) + memcpy(p, ctrl->p_def.p_const, ctrl->elem_size); + else + memset(p, 0, ctrl->elem_size); + + switch ((u32)ctrl->type) { + case V4L2_CTRL_TYPE_MPEG2_SEQUENCE: + p_mpeg2_sequence = p; + + /* 4:2:0 */ + p_mpeg2_sequence->chroma_format = 1; + break; + case V4L2_CTRL_TYPE_MPEG2_PICTURE: + p_mpeg2_picture = p; + + /* interlaced top field */ + p_mpeg2_picture->picture_structure = V4L2_MPEG2_PIC_TOP_FIELD; + p_mpeg2_picture->picture_coding_type = + V4L2_MPEG2_PIC_CODING_TYPE_I; + break; + case V4L2_CTRL_TYPE_MPEG2_QUANTISATION: + p_mpeg2_quant = p; + + memcpy(p_mpeg2_quant->intra_quantiser_matrix, + mpeg2_intra_quant_matrix, + ARRAY_SIZE(mpeg2_intra_quant_matrix)); + /* + * The default non-intra MPEG-2 quantisation + * coefficients are all 16, as per the specification. + */ + memset(p_mpeg2_quant->non_intra_quantiser_matrix, 16, + sizeof(p_mpeg2_quant->non_intra_quantiser_matrix)); + break; + case V4L2_CTRL_TYPE_VP8_FRAME: + p_vp8_frame = p; + p_vp8_frame->num_dct_parts = 1; + break; + case V4L2_CTRL_TYPE_VP9_FRAME: + p_vp9_frame = p; + p_vp9_frame->profile = 0; + p_vp9_frame->bit_depth = 8; + p_vp9_frame->flags |= V4L2_VP9_FRAME_FLAG_X_SUBSAMPLING | + V4L2_VP9_FRAME_FLAG_Y_SUBSAMPLING; + break; + case V4L2_CTRL_TYPE_AV1_SEQUENCE: + p_av1_sequence = p; + p_av1_sequence->bit_depth = 8; + break; + case V4L2_CTRL_TYPE_FWHT_PARAMS: + p_fwht_params = p; + p_fwht_params->version = V4L2_FWHT_VERSION; + p_fwht_params->width = 1280; + p_fwht_params->height = 720; + p_fwht_params->flags = V4L2_FWHT_FL_PIXENC_YUV | + (2 << V4L2_FWHT_FL_COMPONENTS_NUM_OFFSET); + break; + case V4L2_CTRL_TYPE_H264_SCALING_MATRIX: + p_h264_scaling_matrix = p; + /* + * The default (flat) H.264 scaling matrix when none are + * specified in the bitstream, this is according to formulas + * (7-8) and (7-9) of the specification. + */ + memset(p_h264_scaling_matrix, 16, sizeof(*p_h264_scaling_matrix)); + break; + } +} + +static void std_min_compound(const struct v4l2_ctrl *ctrl, u32 idx, + union v4l2_ctrl_ptr ptr) +{ + void *p = ptr.p + idx * ctrl->elem_size; + + if (ctrl->p_min.p_const) + memcpy(p, ctrl->p_min.p_const, ctrl->elem_size); + else + memset(p, 0, ctrl->elem_size); +} + +static void std_max_compound(const struct v4l2_ctrl *ctrl, u32 idx, + union v4l2_ctrl_ptr ptr) +{ + void *p = ptr.p + idx * ctrl->elem_size; + + if (ctrl->p_max.p_const) + memcpy(p, ctrl->p_max.p_const, ctrl->elem_size); + else + memset(p, 0, ctrl->elem_size); +} + +static void __v4l2_ctrl_type_op_init(const struct v4l2_ctrl *ctrl, u32 from_idx, + u32 which, union v4l2_ctrl_ptr ptr) +{ + unsigned int i; + u32 tot_elems = ctrl->elems; + u32 elems = tot_elems - from_idx; + s64 value; + + switch (which) { + case V4L2_CTRL_WHICH_DEF_VAL: + value = ctrl->default_value; + break; + case V4L2_CTRL_WHICH_MAX_VAL: + value = ctrl->maximum; + break; + case V4L2_CTRL_WHICH_MIN_VAL: + value = ctrl->minimum; + break; + default: + return; + } + + switch (ctrl->type) { + case V4L2_CTRL_TYPE_STRING: + if (which == V4L2_CTRL_WHICH_DEF_VAL) + value = ctrl->minimum; + + for (i = from_idx; i < tot_elems; i++) { + unsigned int offset = i * ctrl->elem_size; + + memset(ptr.p_char + offset, ' ', value); + ptr.p_char[offset + value] = '\0'; + } + break; + case V4L2_CTRL_TYPE_INTEGER64: + if (value) { + for (i = from_idx; i < tot_elems; i++) + ptr.p_s64[i] = value; + } else { + memset(ptr.p_s64 + from_idx, 0, elems * sizeof(s64)); + } + break; + case V4L2_CTRL_TYPE_INTEGER: + case V4L2_CTRL_TYPE_INTEGER_MENU: + case V4L2_CTRL_TYPE_MENU: + case V4L2_CTRL_TYPE_BITMASK: + case V4L2_CTRL_TYPE_BOOLEAN: + if (value) { + for (i = from_idx; i < tot_elems; i++) + ptr.p_s32[i] = value; + } else { + memset(ptr.p_s32 + from_idx, 0, elems * sizeof(s32)); + } + break; + case V4L2_CTRL_TYPE_BUTTON: + case V4L2_CTRL_TYPE_CTRL_CLASS: + memset(ptr.p_s32 + from_idx, 0, elems * sizeof(s32)); + break; + case V4L2_CTRL_TYPE_U8: + memset(ptr.p_u8 + from_idx, value, elems); + break; + case V4L2_CTRL_TYPE_U16: + if (value) { + for (i = from_idx; i < tot_elems; i++) + ptr.p_u16[i] = value; + } else { + memset(ptr.p_u16 + from_idx, 0, elems * sizeof(u16)); + } + break; + case V4L2_CTRL_TYPE_U32: + if (value) { + for (i = from_idx; i < tot_elems; i++) + ptr.p_u32[i] = value; + } else { + memset(ptr.p_u32 + from_idx, 0, elems * sizeof(u32)); + } + break; + default: + for (i = from_idx; i < tot_elems; i++) { + switch (which) { + case V4L2_CTRL_WHICH_DEF_VAL: + std_init_compound(ctrl, i, ptr); + break; + case V4L2_CTRL_WHICH_MAX_VAL: + std_max_compound(ctrl, i, ptr); + break; + case V4L2_CTRL_WHICH_MIN_VAL: + std_min_compound(ctrl, i, ptr); + break; + } + } + break; + } +} + +void v4l2_ctrl_type_op_init(const struct v4l2_ctrl *ctrl, u32 from_idx, + union v4l2_ctrl_ptr ptr) +{ + __v4l2_ctrl_type_op_init(ctrl, from_idx, V4L2_CTRL_WHICH_DEF_VAL, ptr); +} +EXPORT_SYMBOL(v4l2_ctrl_type_op_init); + +static void v4l2_ctrl_type_op_minimum(const struct v4l2_ctrl *ctrl, + u32 from_idx, union v4l2_ctrl_ptr ptr) +{ + __v4l2_ctrl_type_op_init(ctrl, from_idx, V4L2_CTRL_WHICH_MIN_VAL, ptr); +} + +static void v4l2_ctrl_type_op_maximum(const struct v4l2_ctrl *ctrl, + u32 from_idx, union v4l2_ctrl_ptr ptr) +{ + __v4l2_ctrl_type_op_init(ctrl, from_idx, V4L2_CTRL_WHICH_MAX_VAL, ptr); +} + +void v4l2_ctrl_type_op_log(const struct v4l2_ctrl *ctrl) +{ + union v4l2_ctrl_ptr ptr = ctrl->p_cur; + + if (ctrl->is_array) { + unsigned i; + + for (i = 0; i < ctrl->nr_of_dims; i++) + pr_cont("[%u]", ctrl->dims[i]); + pr_cont(" "); + } + + switch (ctrl->type) { + case V4L2_CTRL_TYPE_INTEGER: + pr_cont("%d", *ptr.p_s32); + break; + case V4L2_CTRL_TYPE_BOOLEAN: + pr_cont("%s", *ptr.p_s32 ? "true" : "false"); + break; + case V4L2_CTRL_TYPE_MENU: + pr_cont("%s", ctrl->qmenu[*ptr.p_s32]); + break; + case V4L2_CTRL_TYPE_INTEGER_MENU: + pr_cont("%lld", ctrl->qmenu_int[*ptr.p_s32]); + break; + case V4L2_CTRL_TYPE_BITMASK: + pr_cont("0x%08x", *ptr.p_s32); + break; + case V4L2_CTRL_TYPE_INTEGER64: + pr_cont("%lld", *ptr.p_s64); + break; + case V4L2_CTRL_TYPE_STRING: + pr_cont("%s", ptr.p_char); + break; + case V4L2_CTRL_TYPE_U8: + pr_cont("%u", (unsigned)*ptr.p_u8); + break; + case V4L2_CTRL_TYPE_U16: + pr_cont("%u", (unsigned)*ptr.p_u16); + break; + case V4L2_CTRL_TYPE_U32: + pr_cont("%u", (unsigned)*ptr.p_u32); + break; + case V4L2_CTRL_TYPE_AREA: + pr_cont("%ux%u", ptr.p_area->width, ptr.p_area->height); + break; + case V4L2_CTRL_TYPE_H264_SPS: + pr_cont("H264_SPS"); + break; + case V4L2_CTRL_TYPE_H264_PPS: + pr_cont("H264_PPS"); + break; + case V4L2_CTRL_TYPE_H264_SCALING_MATRIX: + pr_cont("H264_SCALING_MATRIX"); + break; + case V4L2_CTRL_TYPE_H264_SLICE_PARAMS: + pr_cont("H264_SLICE_PARAMS"); + break; + case V4L2_CTRL_TYPE_H264_DECODE_PARAMS: + pr_cont("H264_DECODE_PARAMS"); + break; + case V4L2_CTRL_TYPE_H264_PRED_WEIGHTS: + pr_cont("H264_PRED_WEIGHTS"); + break; + case V4L2_CTRL_TYPE_FWHT_PARAMS: + pr_cont("FWHT_PARAMS"); + break; + case V4L2_CTRL_TYPE_VP8_FRAME: + pr_cont("VP8_FRAME"); + break; + case V4L2_CTRL_TYPE_HDR10_CLL_INFO: + pr_cont("HDR10_CLL_INFO"); + break; + case V4L2_CTRL_TYPE_HDR10_MASTERING_DISPLAY: + pr_cont("HDR10_MASTERING_DISPLAY"); + break; + case V4L2_CTRL_TYPE_MPEG2_QUANTISATION: + pr_cont("MPEG2_QUANTISATION"); + break; + case V4L2_CTRL_TYPE_MPEG2_SEQUENCE: + pr_cont("MPEG2_SEQUENCE"); + break; + case V4L2_CTRL_TYPE_MPEG2_PICTURE: + pr_cont("MPEG2_PICTURE"); + break; + case V4L2_CTRL_TYPE_VP9_COMPRESSED_HDR: + pr_cont("VP9_COMPRESSED_HDR"); + break; + case V4L2_CTRL_TYPE_VP9_FRAME: + pr_cont("VP9_FRAME"); + break; + case V4L2_CTRL_TYPE_HEVC_SPS: + pr_cont("HEVC_SPS"); + break; + case V4L2_CTRL_TYPE_HEVC_PPS: + pr_cont("HEVC_PPS"); + break; + case V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS: + pr_cont("HEVC_SLICE_PARAMS"); + break; + case V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX: + pr_cont("HEVC_SCALING_MATRIX"); + break; + case V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS: + pr_cont("HEVC_DECODE_PARAMS"); + break; + case V4L2_CTRL_TYPE_AV1_SEQUENCE: + pr_cont("AV1_SEQUENCE"); + break; + case V4L2_CTRL_TYPE_AV1_TILE_GROUP_ENTRY: + pr_cont("AV1_TILE_GROUP_ENTRY"); + break; + case V4L2_CTRL_TYPE_AV1_FRAME: + pr_cont("AV1_FRAME"); + break; + case V4L2_CTRL_TYPE_AV1_FILM_GRAIN: + pr_cont("AV1_FILM_GRAIN"); + break; + case V4L2_CTRL_TYPE_RECT: + pr_cont("(%d,%d)/%ux%u", + ptr.p_rect->left, ptr.p_rect->top, + ptr.p_rect->width, ptr.p_rect->height); + break; + default: + pr_cont("unknown type %d", ctrl->type); + break; + } +} +EXPORT_SYMBOL(v4l2_ctrl_type_op_log); + +/* + * Round towards the closest legal value. Be careful when we are + * close to the maximum range of the control type to prevent + * wrap-arounds. + */ +#define ROUND_TO_RANGE(val, offset_type, ctrl) \ +({ \ + offset_type offset; \ + if ((ctrl)->maximum >= 0 && \ + val >= (ctrl)->maximum - (s32)((ctrl)->step / 2)) \ + val = (ctrl)->maximum; \ + else \ + val += (s32)((ctrl)->step / 2); \ + val = clamp_t(typeof(val), val, \ + (ctrl)->minimum, (ctrl)->maximum); \ + offset = (val) - (ctrl)->minimum; \ + offset = (ctrl)->step * (offset / (u32)(ctrl)->step); \ + val = (ctrl)->minimum + offset; \ + 0; \ +}) + +/* Validate a new control */ + +#define zero_padding(s) \ + memset(&(s).padding, 0, sizeof((s).padding)) +#define zero_reserved(s) \ + memset(&(s).reserved, 0, sizeof((s).reserved)) + +static int +validate_vp9_lf_params(struct v4l2_vp9_loop_filter *lf) +{ + unsigned int i; + + if (lf->flags & ~(V4L2_VP9_LOOP_FILTER_FLAG_DELTA_ENABLED | + V4L2_VP9_LOOP_FILTER_FLAG_DELTA_UPDATE)) + return -EINVAL; + + /* That all values are in the accepted range. */ + if (lf->level > GENMASK(5, 0)) + return -EINVAL; + + if (lf->sharpness > GENMASK(2, 0)) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(lf->ref_deltas); i++) + if (lf->ref_deltas[i] < -63 || lf->ref_deltas[i] > 63) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(lf->mode_deltas); i++) + if (lf->mode_deltas[i] < -63 || lf->mode_deltas[i] > 63) + return -EINVAL; + + zero_reserved(*lf); + return 0; +} + +static int +validate_vp9_quant_params(struct v4l2_vp9_quantization *quant) +{ + if (quant->delta_q_y_dc < -15 || quant->delta_q_y_dc > 15 || + quant->delta_q_uv_dc < -15 || quant->delta_q_uv_dc > 15 || + quant->delta_q_uv_ac < -15 || quant->delta_q_uv_ac > 15) + return -EINVAL; + + zero_reserved(*quant); + return 0; +} + +static int +validate_vp9_seg_params(struct v4l2_vp9_segmentation *seg) +{ + unsigned int i, j; + + if (seg->flags & ~(V4L2_VP9_SEGMENTATION_FLAG_ENABLED | + V4L2_VP9_SEGMENTATION_FLAG_UPDATE_MAP | + V4L2_VP9_SEGMENTATION_FLAG_TEMPORAL_UPDATE | + V4L2_VP9_SEGMENTATION_FLAG_UPDATE_DATA | + V4L2_VP9_SEGMENTATION_FLAG_ABS_OR_DELTA_UPDATE)) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(seg->feature_enabled); i++) { + if (seg->feature_enabled[i] & + ~V4L2_VP9_SEGMENT_FEATURE_ENABLED_MASK) + return -EINVAL; + } + + for (i = 0; i < ARRAY_SIZE(seg->feature_data); i++) { + static const int range[] = { 255, 63, 3, 0 }; + + for (j = 0; j < ARRAY_SIZE(seg->feature_data[j]); j++) { + if (seg->feature_data[i][j] < -range[j] || + seg->feature_data[i][j] > range[j]) + return -EINVAL; + } + } + + zero_reserved(*seg); + return 0; +} + +static int +validate_vp9_compressed_hdr(struct v4l2_ctrl_vp9_compressed_hdr *hdr) +{ + if (hdr->tx_mode > V4L2_VP9_TX_MODE_SELECT) + return -EINVAL; + + return 0; +} + +static int +validate_vp9_frame(struct v4l2_ctrl_vp9_frame *frame) +{ + int ret; + + /* Make sure we're not passed invalid flags. */ + if (frame->flags & ~(V4L2_VP9_FRAME_FLAG_KEY_FRAME | + V4L2_VP9_FRAME_FLAG_SHOW_FRAME | + V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT | + V4L2_VP9_FRAME_FLAG_INTRA_ONLY | + V4L2_VP9_FRAME_FLAG_ALLOW_HIGH_PREC_MV | + V4L2_VP9_FRAME_FLAG_REFRESH_FRAME_CTX | + V4L2_VP9_FRAME_FLAG_PARALLEL_DEC_MODE | + V4L2_VP9_FRAME_FLAG_X_SUBSAMPLING | + V4L2_VP9_FRAME_FLAG_Y_SUBSAMPLING | + V4L2_VP9_FRAME_FLAG_COLOR_RANGE_FULL_SWING)) + return -EINVAL; + + if (frame->flags & V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT && + frame->flags & V4L2_VP9_FRAME_FLAG_REFRESH_FRAME_CTX) + return -EINVAL; + + if (frame->profile > V4L2_VP9_PROFILE_MAX) + return -EINVAL; + + if (frame->reset_frame_context > V4L2_VP9_RESET_FRAME_CTX_ALL) + return -EINVAL; + + if (frame->frame_context_idx >= V4L2_VP9_NUM_FRAME_CTX) + return -EINVAL; + + /* + * Profiles 0 and 1 only support 8-bit depth, profiles 2 and 3 only 10 + * and 12 bit depths. + */ + if ((frame->profile < 2 && frame->bit_depth != 8) || + (frame->profile >= 2 && + (frame->bit_depth != 10 && frame->bit_depth != 12))) + return -EINVAL; + + /* Profile 0 and 2 only accept YUV 4:2:0. */ + if ((frame->profile == 0 || frame->profile == 2) && + (!(frame->flags & V4L2_VP9_FRAME_FLAG_X_SUBSAMPLING) || + !(frame->flags & V4L2_VP9_FRAME_FLAG_Y_SUBSAMPLING))) + return -EINVAL; + + /* Profile 1 and 3 only accept YUV 4:2:2, 4:4:0 and 4:4:4. */ + if ((frame->profile == 1 || frame->profile == 3) && + ((frame->flags & V4L2_VP9_FRAME_FLAG_X_SUBSAMPLING) && + (frame->flags & V4L2_VP9_FRAME_FLAG_Y_SUBSAMPLING))) + return -EINVAL; + + if (frame->interpolation_filter > V4L2_VP9_INTERP_FILTER_SWITCHABLE) + return -EINVAL; + + /* + * According to the spec, tile_cols_log2 shall be less than or equal + * to 6. + */ + if (frame->tile_cols_log2 > 6) + return -EINVAL; + + if (frame->reference_mode > V4L2_VP9_REFERENCE_MODE_SELECT) + return -EINVAL; + + ret = validate_vp9_lf_params(&frame->lf); + if (ret) + return ret; + + ret = validate_vp9_quant_params(&frame->quant); + if (ret) + return ret; + + ret = validate_vp9_seg_params(&frame->seg); + if (ret) + return ret; + + zero_reserved(*frame); + return 0; +} + +static int validate_av1_quantization(struct v4l2_av1_quantization *q) +{ + if (q->flags > GENMASK(2, 0)) + return -EINVAL; + + if (q->delta_q_y_dc < -64 || q->delta_q_y_dc > 63 || + q->delta_q_u_dc < -64 || q->delta_q_u_dc > 63 || + q->delta_q_v_dc < -64 || q->delta_q_v_dc > 63 || + q->delta_q_u_ac < -64 || q->delta_q_u_ac > 63 || + q->delta_q_v_ac < -64 || q->delta_q_v_ac > 63 || + q->delta_q_res > GENMASK(1, 0)) + return -EINVAL; + + if (q->qm_y > GENMASK(3, 0) || + q->qm_u > GENMASK(3, 0) || + q->qm_v > GENMASK(3, 0)) + return -EINVAL; + + return 0; +} + +static int validate_av1_segmentation(struct v4l2_av1_segmentation *s) +{ + u32 i; + u32 j; + + if (s->flags > GENMASK(4, 0)) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(s->feature_data); i++) { + static const int segmentation_feature_signed[] = { 1, 1, 1, 1, 1, 0, 0, 0 }; + static const int segmentation_feature_max[] = { 255, 63, 63, 63, 63, 7, 0, 0}; + + for (j = 0; j < ARRAY_SIZE(s->feature_data[j]); j++) { + s32 limit = segmentation_feature_max[j]; + + if (segmentation_feature_signed[j]) { + if (s->feature_data[i][j] < -limit || + s->feature_data[i][j] > limit) + return -EINVAL; + } else { + if (s->feature_data[i][j] < 0 || s->feature_data[i][j] > limit) + return -EINVAL; + } + } + } + + return 0; +} + +static int validate_av1_loop_filter(struct v4l2_av1_loop_filter *lf) +{ + u32 i; + + if (lf->flags > GENMASK(3, 0)) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(lf->level); i++) { + if (lf->level[i] > GENMASK(5, 0)) + return -EINVAL; + } + + if (lf->sharpness > GENMASK(2, 0)) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(lf->ref_deltas); i++) { + if (lf->ref_deltas[i] < -64 || lf->ref_deltas[i] > 63) + return -EINVAL; + } + + for (i = 0; i < ARRAY_SIZE(lf->mode_deltas); i++) { + if (lf->mode_deltas[i] < -64 || lf->mode_deltas[i] > 63) + return -EINVAL; + } + + return 0; +} + +static int validate_av1_cdef(struct v4l2_av1_cdef *cdef) +{ + u32 i; + + if (cdef->damping_minus_3 > GENMASK(1, 0) || + cdef->bits > GENMASK(1, 0)) + return -EINVAL; + + for (i = 0; i < 1 << cdef->bits; i++) { + if (cdef->y_pri_strength[i] > GENMASK(3, 0) || + cdef->y_sec_strength[i] > 4 || + cdef->uv_pri_strength[i] > GENMASK(3, 0) || + cdef->uv_sec_strength[i] > 4) + return -EINVAL; + } + + return 0; +} + +static int validate_av1_loop_restauration(struct v4l2_av1_loop_restoration *lr) +{ + if (lr->lr_unit_shift > 3 || lr->lr_uv_shift > 1) + return -EINVAL; + + return 0; +} + +static int validate_av1_film_grain(struct v4l2_ctrl_av1_film_grain *fg) +{ + u32 i; + + if (fg->flags > GENMASK(4, 0)) + return -EINVAL; + + if (fg->film_grain_params_ref_idx > GENMASK(2, 0) || + fg->num_y_points > 14 || + fg->num_cb_points > 10 || + fg->num_cr_points > GENMASK(3, 0) || + fg->grain_scaling_minus_8 > GENMASK(1, 0) || + fg->ar_coeff_lag > GENMASK(1, 0) || + fg->ar_coeff_shift_minus_6 > GENMASK(1, 0) || + fg->grain_scale_shift > GENMASK(1, 0)) + return -EINVAL; + + if (!(fg->flags & V4L2_AV1_FILM_GRAIN_FLAG_APPLY_GRAIN)) + return 0; + + for (i = 1; i < fg->num_y_points; i++) + if (fg->point_y_value[i] <= fg->point_y_value[i - 1]) + return -EINVAL; + + for (i = 1; i < fg->num_cb_points; i++) + if (fg->point_cb_value[i] <= fg->point_cb_value[i - 1]) + return -EINVAL; + + for (i = 1; i < fg->num_cr_points; i++) + if (fg->point_cr_value[i] <= fg->point_cr_value[i - 1]) + return -EINVAL; + + return 0; +} + +static int validate_av1_frame(struct v4l2_ctrl_av1_frame *f) +{ + int ret = 0; + + ret = validate_av1_quantization(&f->quantization); + if (ret) + return ret; + ret = validate_av1_segmentation(&f->segmentation); + if (ret) + return ret; + ret = validate_av1_loop_filter(&f->loop_filter); + if (ret) + return ret; + ret = validate_av1_cdef(&f->cdef); + if (ret) + return ret; + ret = validate_av1_loop_restauration(&f->loop_restoration); + if (ret) + return ret; + + if (f->flags & + ~(V4L2_AV1_FRAME_FLAG_SHOW_FRAME | + V4L2_AV1_FRAME_FLAG_SHOWABLE_FRAME | + V4L2_AV1_FRAME_FLAG_ERROR_RESILIENT_MODE | + V4L2_AV1_FRAME_FLAG_DISABLE_CDF_UPDATE | + V4L2_AV1_FRAME_FLAG_ALLOW_SCREEN_CONTENT_TOOLS | + V4L2_AV1_FRAME_FLAG_FORCE_INTEGER_MV | + V4L2_AV1_FRAME_FLAG_ALLOW_INTRABC | + V4L2_AV1_FRAME_FLAG_USE_SUPERRES | + V4L2_AV1_FRAME_FLAG_ALLOW_HIGH_PRECISION_MV | + V4L2_AV1_FRAME_FLAG_IS_MOTION_MODE_SWITCHABLE | + V4L2_AV1_FRAME_FLAG_USE_REF_FRAME_MVS | + V4L2_AV1_FRAME_FLAG_DISABLE_FRAME_END_UPDATE_CDF | + V4L2_AV1_FRAME_FLAG_ALLOW_WARPED_MOTION | + V4L2_AV1_FRAME_FLAG_REFERENCE_SELECT | + V4L2_AV1_FRAME_FLAG_REDUCED_TX_SET | + V4L2_AV1_FRAME_FLAG_SKIP_MODE_ALLOWED | + V4L2_AV1_FRAME_FLAG_SKIP_MODE_PRESENT | + V4L2_AV1_FRAME_FLAG_FRAME_SIZE_OVERRIDE | + V4L2_AV1_FRAME_FLAG_BUFFER_REMOVAL_TIME_PRESENT | + V4L2_AV1_FRAME_FLAG_FRAME_REFS_SHORT_SIGNALING)) + return -EINVAL; + + if (f->superres_denom > GENMASK(2, 0) + 9) + return -EINVAL; + + return 0; +} + +static int validate_av1_sequence(struct v4l2_ctrl_av1_sequence *s) +{ + if (s->flags & + ~(V4L2_AV1_SEQUENCE_FLAG_STILL_PICTURE | + V4L2_AV1_SEQUENCE_FLAG_USE_128X128_SUPERBLOCK | + V4L2_AV1_SEQUENCE_FLAG_ENABLE_FILTER_INTRA | + V4L2_AV1_SEQUENCE_FLAG_ENABLE_INTRA_EDGE_FILTER | + V4L2_AV1_SEQUENCE_FLAG_ENABLE_INTERINTRA_COMPOUND | + V4L2_AV1_SEQUENCE_FLAG_ENABLE_MASKED_COMPOUND | + V4L2_AV1_SEQUENCE_FLAG_ENABLE_WARPED_MOTION | + V4L2_AV1_SEQUENCE_FLAG_ENABLE_DUAL_FILTER | + V4L2_AV1_SEQUENCE_FLAG_ENABLE_ORDER_HINT | + V4L2_AV1_SEQUENCE_FLAG_ENABLE_JNT_COMP | + V4L2_AV1_SEQUENCE_FLAG_ENABLE_REF_FRAME_MVS | + V4L2_AV1_SEQUENCE_FLAG_ENABLE_SUPERRES | + V4L2_AV1_SEQUENCE_FLAG_ENABLE_CDEF | + V4L2_AV1_SEQUENCE_FLAG_ENABLE_RESTORATION | + V4L2_AV1_SEQUENCE_FLAG_MONO_CHROME | + V4L2_AV1_SEQUENCE_FLAG_COLOR_RANGE | + V4L2_AV1_SEQUENCE_FLAG_SUBSAMPLING_X | + V4L2_AV1_SEQUENCE_FLAG_SUBSAMPLING_Y | + V4L2_AV1_SEQUENCE_FLAG_FILM_GRAIN_PARAMS_PRESENT | + V4L2_AV1_SEQUENCE_FLAG_SEPARATE_UV_DELTA_Q)) + return -EINVAL; + + if (s->seq_profile == 1 && s->flags & V4L2_AV1_SEQUENCE_FLAG_MONO_CHROME) + return -EINVAL; + + /* reserved */ + if (s->seq_profile > 2) + return -EINVAL; + + /* TODO: PROFILES */ + return 0; +} + +/* + * Compound controls validation requires setting unused fields/flags to zero + * in order to properly detect unchanged controls with v4l2_ctrl_type_op_equal's + * memcmp. + */ +static int std_validate_compound(const struct v4l2_ctrl *ctrl, u32 idx, + union v4l2_ctrl_ptr ptr) +{ + struct v4l2_ctrl_mpeg2_sequence *p_mpeg2_sequence; + struct v4l2_ctrl_mpeg2_picture *p_mpeg2_picture; + struct v4l2_ctrl_vp8_frame *p_vp8_frame; + struct v4l2_ctrl_fwht_params *p_fwht_params; + struct v4l2_ctrl_h264_sps *p_h264_sps; + struct v4l2_ctrl_h264_pps *p_h264_pps; + struct v4l2_ctrl_h264_pred_weights *p_h264_pred_weights; + struct v4l2_ctrl_h264_slice_params *p_h264_slice_params; + struct v4l2_ctrl_h264_decode_params *p_h264_dec_params; + struct v4l2_ctrl_hevc_sps *p_hevc_sps; + struct v4l2_ctrl_hevc_pps *p_hevc_pps; + struct v4l2_ctrl_hdr10_mastering_display *p_hdr10_mastering; + struct v4l2_ctrl_hevc_decode_params *p_hevc_decode_params; + struct v4l2_area *area; + struct v4l2_rect *rect; + void *p = ptr.p + idx * ctrl->elem_size; + unsigned int i; + + switch ((u32)ctrl->type) { + case V4L2_CTRL_TYPE_MPEG2_SEQUENCE: + p_mpeg2_sequence = p; + + switch (p_mpeg2_sequence->chroma_format) { + case 1: /* 4:2:0 */ + case 2: /* 4:2:2 */ + case 3: /* 4:4:4 */ + break; + default: + return -EINVAL; + } + break; + + case V4L2_CTRL_TYPE_MPEG2_PICTURE: + p_mpeg2_picture = p; + + switch (p_mpeg2_picture->intra_dc_precision) { + case 0: /* 8 bits */ + case 1: /* 9 bits */ + case 2: /* 10 bits */ + case 3: /* 11 bits */ + break; + default: + return -EINVAL; + } + + switch (p_mpeg2_picture->picture_structure) { + case V4L2_MPEG2_PIC_TOP_FIELD: + case V4L2_MPEG2_PIC_BOTTOM_FIELD: + case V4L2_MPEG2_PIC_FRAME: + break; + default: + return -EINVAL; + } + + switch (p_mpeg2_picture->picture_coding_type) { + case V4L2_MPEG2_PIC_CODING_TYPE_I: + case V4L2_MPEG2_PIC_CODING_TYPE_P: + case V4L2_MPEG2_PIC_CODING_TYPE_B: + break; + default: + return -EINVAL; + } + zero_reserved(*p_mpeg2_picture); + break; + + case V4L2_CTRL_TYPE_MPEG2_QUANTISATION: + break; + + case V4L2_CTRL_TYPE_FWHT_PARAMS: + p_fwht_params = p; + if (p_fwht_params->version < V4L2_FWHT_VERSION) + return -EINVAL; + if (!p_fwht_params->width || !p_fwht_params->height) + return -EINVAL; + break; + + case V4L2_CTRL_TYPE_H264_SPS: + p_h264_sps = p; + + /* Some syntax elements are only conditionally valid */ + if (p_h264_sps->pic_order_cnt_type != 0) { + p_h264_sps->log2_max_pic_order_cnt_lsb_minus4 = 0; + } else if (p_h264_sps->pic_order_cnt_type != 1) { + p_h264_sps->num_ref_frames_in_pic_order_cnt_cycle = 0; + p_h264_sps->offset_for_non_ref_pic = 0; + p_h264_sps->offset_for_top_to_bottom_field = 0; + memset(&p_h264_sps->offset_for_ref_frame, 0, + sizeof(p_h264_sps->offset_for_ref_frame)); + } + + if (!V4L2_H264_SPS_HAS_CHROMA_FORMAT(p_h264_sps)) { + p_h264_sps->chroma_format_idc = 1; + p_h264_sps->bit_depth_luma_minus8 = 0; + p_h264_sps->bit_depth_chroma_minus8 = 0; + + p_h264_sps->flags &= + ~V4L2_H264_SPS_FLAG_QPPRIME_Y_ZERO_TRANSFORM_BYPASS; + } + + if (p_h264_sps->chroma_format_idc < 3) + p_h264_sps->flags &= + ~V4L2_H264_SPS_FLAG_SEPARATE_COLOUR_PLANE; + + if (p_h264_sps->flags & V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY) + p_h264_sps->flags &= + ~V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD; + + /* + * Chroma 4:2:2 format require at least High 4:2:2 profile. + * + * The H264 specification and well-known parser implementations + * use profile-idc values directly, as that is clearer and + * less ambiguous. We do the same here. + */ + if (p_h264_sps->profile_idc < 122 && + p_h264_sps->chroma_format_idc > 1) + return -EINVAL; + /* Chroma 4:4:4 format require at least High 4:2:2 profile */ + if (p_h264_sps->profile_idc < 244 && + p_h264_sps->chroma_format_idc > 2) + return -EINVAL; + if (p_h264_sps->chroma_format_idc > 3) + return -EINVAL; + + if (p_h264_sps->bit_depth_luma_minus8 > 6) + return -EINVAL; + if (p_h264_sps->bit_depth_chroma_minus8 > 6) + return -EINVAL; + if (p_h264_sps->log2_max_frame_num_minus4 > 12) + return -EINVAL; + if (p_h264_sps->pic_order_cnt_type > 2) + return -EINVAL; + if (p_h264_sps->log2_max_pic_order_cnt_lsb_minus4 > 12) + return -EINVAL; + if (p_h264_sps->max_num_ref_frames > V4L2_H264_REF_LIST_LEN) + return -EINVAL; + break; + + case V4L2_CTRL_TYPE_H264_PPS: + p_h264_pps = p; + + if (p_h264_pps->num_slice_groups_minus1 > 7) + return -EINVAL; + if (p_h264_pps->num_ref_idx_l0_default_active_minus1 > + (V4L2_H264_REF_LIST_LEN - 1)) + return -EINVAL; + if (p_h264_pps->num_ref_idx_l1_default_active_minus1 > + (V4L2_H264_REF_LIST_LEN - 1)) + return -EINVAL; + if (p_h264_pps->weighted_bipred_idc > 2) + return -EINVAL; + /* + * pic_init_qp_minus26 shall be in the range of + * -(26 + QpBdOffset_y) to +25, inclusive, + * where QpBdOffset_y is 6 * bit_depth_luma_minus8 + */ + if (p_h264_pps->pic_init_qp_minus26 < -62 || + p_h264_pps->pic_init_qp_minus26 > 25) + return -EINVAL; + if (p_h264_pps->pic_init_qs_minus26 < -26 || + p_h264_pps->pic_init_qs_minus26 > 25) + return -EINVAL; + if (p_h264_pps->chroma_qp_index_offset < -12 || + p_h264_pps->chroma_qp_index_offset > 12) + return -EINVAL; + if (p_h264_pps->second_chroma_qp_index_offset < -12 || + p_h264_pps->second_chroma_qp_index_offset > 12) + return -EINVAL; + break; + + case V4L2_CTRL_TYPE_H264_SCALING_MATRIX: + break; + + case V4L2_CTRL_TYPE_H264_PRED_WEIGHTS: + p_h264_pred_weights = p; + + if (p_h264_pred_weights->luma_log2_weight_denom > 7) + return -EINVAL; + if (p_h264_pred_weights->chroma_log2_weight_denom > 7) + return -EINVAL; + break; + + case V4L2_CTRL_TYPE_H264_SLICE_PARAMS: + p_h264_slice_params = p; + + if (p_h264_slice_params->slice_type != V4L2_H264_SLICE_TYPE_B) + p_h264_slice_params->flags &= + ~V4L2_H264_SLICE_FLAG_DIRECT_SPATIAL_MV_PRED; + + if (p_h264_slice_params->colour_plane_id > 2) + return -EINVAL; + if (p_h264_slice_params->cabac_init_idc > 2) + return -EINVAL; + if (p_h264_slice_params->disable_deblocking_filter_idc > 2) + return -EINVAL; + if (p_h264_slice_params->slice_alpha_c0_offset_div2 < -6 || + p_h264_slice_params->slice_alpha_c0_offset_div2 > 6) + return -EINVAL; + if (p_h264_slice_params->slice_beta_offset_div2 < -6 || + p_h264_slice_params->slice_beta_offset_div2 > 6) + return -EINVAL; + + if (p_h264_slice_params->slice_type == V4L2_H264_SLICE_TYPE_I || + p_h264_slice_params->slice_type == V4L2_H264_SLICE_TYPE_SI) + p_h264_slice_params->num_ref_idx_l0_active_minus1 = 0; + if (p_h264_slice_params->slice_type != V4L2_H264_SLICE_TYPE_B) + p_h264_slice_params->num_ref_idx_l1_active_minus1 = 0; + + if (p_h264_slice_params->num_ref_idx_l0_active_minus1 > + (V4L2_H264_REF_LIST_LEN - 1)) + return -EINVAL; + if (p_h264_slice_params->num_ref_idx_l1_active_minus1 > + (V4L2_H264_REF_LIST_LEN - 1)) + return -EINVAL; + zero_reserved(*p_h264_slice_params); + break; + + case V4L2_CTRL_TYPE_H264_DECODE_PARAMS: + p_h264_dec_params = p; + + if (p_h264_dec_params->nal_ref_idc > 3) + return -EINVAL; + for (i = 0; i < V4L2_H264_NUM_DPB_ENTRIES; i++) { + struct v4l2_h264_dpb_entry *dpb_entry = + &p_h264_dec_params->dpb[i]; + + zero_reserved(*dpb_entry); + } + zero_reserved(*p_h264_dec_params); + break; + + case V4L2_CTRL_TYPE_VP8_FRAME: + p_vp8_frame = p; + + switch (p_vp8_frame->num_dct_parts) { + case 1: + case 2: + case 4: + case 8: + break; + default: + return -EINVAL; + } + zero_padding(p_vp8_frame->segment); + zero_padding(p_vp8_frame->lf); + zero_padding(p_vp8_frame->quant); + zero_padding(p_vp8_frame->entropy); + zero_padding(p_vp8_frame->coder_state); + break; + + case V4L2_CTRL_TYPE_HEVC_SPS: + p_hevc_sps = p; + + if (!(p_hevc_sps->flags & V4L2_HEVC_SPS_FLAG_PCM_ENABLED)) { + p_hevc_sps->pcm_sample_bit_depth_luma_minus1 = 0; + p_hevc_sps->pcm_sample_bit_depth_chroma_minus1 = 0; + p_hevc_sps->log2_min_pcm_luma_coding_block_size_minus3 = 0; + p_hevc_sps->log2_diff_max_min_pcm_luma_coding_block_size = 0; + } + + if (!(p_hevc_sps->flags & + V4L2_HEVC_SPS_FLAG_LONG_TERM_REF_PICS_PRESENT)) + p_hevc_sps->num_long_term_ref_pics_sps = 0; + break; + + case V4L2_CTRL_TYPE_HEVC_PPS: + p_hevc_pps = p; + + if (!(p_hevc_pps->flags & + V4L2_HEVC_PPS_FLAG_CU_QP_DELTA_ENABLED)) + p_hevc_pps->diff_cu_qp_delta_depth = 0; + + if (!(p_hevc_pps->flags & V4L2_HEVC_PPS_FLAG_TILES_ENABLED)) { + p_hevc_pps->num_tile_columns_minus1 = 0; + p_hevc_pps->num_tile_rows_minus1 = 0; + memset(&p_hevc_pps->column_width_minus1, 0, + sizeof(p_hevc_pps->column_width_minus1)); + memset(&p_hevc_pps->row_height_minus1, 0, + sizeof(p_hevc_pps->row_height_minus1)); + + p_hevc_pps->flags &= + ~V4L2_HEVC_PPS_FLAG_LOOP_FILTER_ACROSS_TILES_ENABLED; + } + + if (p_hevc_pps->flags & + V4L2_HEVC_PPS_FLAG_PPS_DISABLE_DEBLOCKING_FILTER) { + p_hevc_pps->pps_beta_offset_div2 = 0; + p_hevc_pps->pps_tc_offset_div2 = 0; + } + break; + + case V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS: + p_hevc_decode_params = p; + + if (p_hevc_decode_params->num_active_dpb_entries > + V4L2_HEVC_DPB_ENTRIES_NUM_MAX) + return -EINVAL; + break; + + case V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS: + break; + + case V4L2_CTRL_TYPE_HDR10_CLL_INFO: + break; + + case V4L2_CTRL_TYPE_HDR10_MASTERING_DISPLAY: + p_hdr10_mastering = p; + + for (i = 0; i < 3; ++i) { + if (p_hdr10_mastering->display_primaries_x[i] < + V4L2_HDR10_MASTERING_PRIMARIES_X_LOW || + p_hdr10_mastering->display_primaries_x[i] > + V4L2_HDR10_MASTERING_PRIMARIES_X_HIGH || + p_hdr10_mastering->display_primaries_y[i] < + V4L2_HDR10_MASTERING_PRIMARIES_Y_LOW || + p_hdr10_mastering->display_primaries_y[i] > + V4L2_HDR10_MASTERING_PRIMARIES_Y_HIGH) + return -EINVAL; + } + + if (p_hdr10_mastering->white_point_x < + V4L2_HDR10_MASTERING_WHITE_POINT_X_LOW || + p_hdr10_mastering->white_point_x > + V4L2_HDR10_MASTERING_WHITE_POINT_X_HIGH || + p_hdr10_mastering->white_point_y < + V4L2_HDR10_MASTERING_WHITE_POINT_Y_LOW || + p_hdr10_mastering->white_point_y > + V4L2_HDR10_MASTERING_WHITE_POINT_Y_HIGH) + return -EINVAL; + + if (p_hdr10_mastering->max_display_mastering_luminance < + V4L2_HDR10_MASTERING_MAX_LUMA_LOW || + p_hdr10_mastering->max_display_mastering_luminance > + V4L2_HDR10_MASTERING_MAX_LUMA_HIGH || + p_hdr10_mastering->min_display_mastering_luminance < + V4L2_HDR10_MASTERING_MIN_LUMA_LOW || + p_hdr10_mastering->min_display_mastering_luminance > + V4L2_HDR10_MASTERING_MIN_LUMA_HIGH) + return -EINVAL; + + /* The following restriction comes from ITU-T Rec. H.265 spec */ + if (p_hdr10_mastering->max_display_mastering_luminance == + V4L2_HDR10_MASTERING_MAX_LUMA_LOW && + p_hdr10_mastering->min_display_mastering_luminance == + V4L2_HDR10_MASTERING_MIN_LUMA_HIGH) + return -EINVAL; + + break; + + case V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX: + break; + + case V4L2_CTRL_TYPE_VP9_COMPRESSED_HDR: + return validate_vp9_compressed_hdr(p); + + case V4L2_CTRL_TYPE_VP9_FRAME: + return validate_vp9_frame(p); + case V4L2_CTRL_TYPE_AV1_FRAME: + return validate_av1_frame(p); + case V4L2_CTRL_TYPE_AV1_SEQUENCE: + return validate_av1_sequence(p); + case V4L2_CTRL_TYPE_AV1_TILE_GROUP_ENTRY: + break; + case V4L2_CTRL_TYPE_AV1_FILM_GRAIN: + return validate_av1_film_grain(p); + + case V4L2_CTRL_TYPE_AREA: + area = p; + if (!area->width || !area->height) + return -EINVAL; + break; + + case V4L2_CTRL_TYPE_RECT: + rect = p; + if (!rect->width || !rect->height) + return -EINVAL; + break; + + default: + return -EINVAL; + } + + return 0; +} + +static int std_validate_elem(const struct v4l2_ctrl *ctrl, u32 idx, + union v4l2_ctrl_ptr ptr) +{ + size_t len; + u64 offset; + s64 val; + + switch ((u32)ctrl->type) { + case V4L2_CTRL_TYPE_INTEGER: + return ROUND_TO_RANGE(ptr.p_s32[idx], u32, ctrl); + case V4L2_CTRL_TYPE_INTEGER64: + /* + * We can't use the ROUND_TO_RANGE define here due to + * the u64 divide that needs special care. + */ + val = ptr.p_s64[idx]; + if (ctrl->maximum >= 0 && val >= ctrl->maximum - (s64)(ctrl->step / 2)) + val = ctrl->maximum; + else + val += (s64)(ctrl->step / 2); + val = clamp_t(s64, val, ctrl->minimum, ctrl->maximum); + offset = val - ctrl->minimum; + do_div(offset, ctrl->step); + ptr.p_s64[idx] = ctrl->minimum + offset * ctrl->step; + return 0; + case V4L2_CTRL_TYPE_U8: + return ROUND_TO_RANGE(ptr.p_u8[idx], u8, ctrl); + case V4L2_CTRL_TYPE_U16: + return ROUND_TO_RANGE(ptr.p_u16[idx], u16, ctrl); + case V4L2_CTRL_TYPE_U32: + return ROUND_TO_RANGE(ptr.p_u32[idx], u32, ctrl); + + case V4L2_CTRL_TYPE_BOOLEAN: + ptr.p_s32[idx] = !!ptr.p_s32[idx]; + return 0; + + case V4L2_CTRL_TYPE_MENU: + case V4L2_CTRL_TYPE_INTEGER_MENU: + if (ptr.p_s32[idx] < ctrl->minimum || ptr.p_s32[idx] > ctrl->maximum) + return -ERANGE; + if (ptr.p_s32[idx] < BITS_PER_LONG_LONG && + (ctrl->menu_skip_mask & BIT_ULL(ptr.p_s32[idx]))) + return -EINVAL; + if (ctrl->type == V4L2_CTRL_TYPE_MENU && + ctrl->qmenu[ptr.p_s32[idx]][0] == '\0') + return -EINVAL; + return 0; + + case V4L2_CTRL_TYPE_BITMASK: + ptr.p_s32[idx] &= ctrl->maximum; + return 0; + + case V4L2_CTRL_TYPE_BUTTON: + case V4L2_CTRL_TYPE_CTRL_CLASS: + ptr.p_s32[idx] = 0; + return 0; + + case V4L2_CTRL_TYPE_STRING: + idx *= ctrl->elem_size; + len = strlen(ptr.p_char + idx); + if (len < ctrl->minimum) + return -ERANGE; + if ((len - (u32)ctrl->minimum) % (u32)ctrl->step) + return -ERANGE; + return 0; + + default: + return std_validate_compound(ctrl, idx, ptr); + } +} + +int v4l2_ctrl_type_op_validate(const struct v4l2_ctrl *ctrl, + union v4l2_ctrl_ptr ptr) +{ + unsigned int i; + int ret = 0; + + switch ((u32)ctrl->type) { + case V4L2_CTRL_TYPE_U8: + if (ctrl->maximum == 0xff && ctrl->minimum == 0 && ctrl->step == 1) + return 0; + break; + case V4L2_CTRL_TYPE_U16: + if (ctrl->maximum == 0xffff && ctrl->minimum == 0 && ctrl->step == 1) + return 0; + break; + case V4L2_CTRL_TYPE_U32: + if (ctrl->maximum == 0xffffffff && ctrl->minimum == 0 && ctrl->step == 1) + return 0; + break; + + case V4L2_CTRL_TYPE_BUTTON: + case V4L2_CTRL_TYPE_CTRL_CLASS: + memset(ptr.p_s32, 0, ctrl->new_elems * sizeof(s32)); + return 0; + } + + for (i = 0; !ret && i < ctrl->new_elems; i++) + ret = std_validate_elem(ctrl, i, ptr); + return ret; +} +EXPORT_SYMBOL(v4l2_ctrl_type_op_validate); + +static const struct v4l2_ctrl_type_ops std_type_ops = { + .equal = v4l2_ctrl_type_op_equal, + .init = v4l2_ctrl_type_op_init, + .minimum = v4l2_ctrl_type_op_minimum, + .maximum = v4l2_ctrl_type_op_maximum, + .log = v4l2_ctrl_type_op_log, + .validate = v4l2_ctrl_type_op_validate, +}; + +void v4l2_ctrl_notify(struct v4l2_ctrl *ctrl, v4l2_ctrl_notify_fnc notify, void *priv) +{ + if (!ctrl) + return; + if (!notify) { + ctrl->call_notify = 0; + return; + } + if (WARN_ON(ctrl->handler->notify && ctrl->handler->notify != notify)) + return; + ctrl->handler->notify = notify; + ctrl->handler->notify_priv = priv; + ctrl->call_notify = 1; +} +EXPORT_SYMBOL(v4l2_ctrl_notify); + +/* Copy the one value to another. */ +static void ptr_to_ptr(struct v4l2_ctrl *ctrl, + union v4l2_ctrl_ptr from, union v4l2_ctrl_ptr to, + unsigned int elems) +{ + if (ctrl == NULL) + return; + memcpy(to.p, from.p_const, elems * ctrl->elem_size); +} + +/* Copy the new value to the current value. */ +void new_to_cur(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 ch_flags) +{ + bool changed; + + if (ctrl == NULL) + return; + + /* has_changed is set by cluster_changed */ + changed = ctrl->has_changed; + if (changed) { + if (ctrl->is_dyn_array) + ctrl->elems = ctrl->new_elems; + ptr_to_ptr(ctrl, ctrl->p_new, ctrl->p_cur, ctrl->elems); + } + + if (ch_flags & V4L2_EVENT_CTRL_CH_FLAGS) { + /* Note: CH_FLAGS is only set for auto clusters. */ + ctrl->flags &= + ~(V4L2_CTRL_FLAG_INACTIVE | V4L2_CTRL_FLAG_VOLATILE); + if (!is_cur_manual(ctrl->cluster[0])) { + ctrl->flags |= V4L2_CTRL_FLAG_INACTIVE; + if (ctrl->cluster[0]->has_volatiles) + ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE; + } + fh = NULL; + } + if (changed || ch_flags) { + /* If a control was changed that was not one of the controls + modified by the application, then send the event to all. */ + if (!ctrl->is_new) + fh = NULL; + send_event(fh, ctrl, + (changed ? V4L2_EVENT_CTRL_CH_VALUE : 0) | ch_flags); + if (ctrl->call_notify && changed && ctrl->handler->notify) + ctrl->handler->notify(ctrl, ctrl->handler->notify_priv); + } +} + +/* Copy the current value to the new value */ +void cur_to_new(struct v4l2_ctrl *ctrl) +{ + if (ctrl == NULL) + return; + if (ctrl->is_dyn_array) + ctrl->new_elems = ctrl->elems; + ptr_to_ptr(ctrl, ctrl->p_cur, ctrl->p_new, ctrl->new_elems); +} + +static bool req_alloc_array(struct v4l2_ctrl_ref *ref, u32 elems) +{ + void *tmp; + + if (elems == ref->p_req_array_alloc_elems) + return true; + if (ref->ctrl->is_dyn_array && + elems < ref->p_req_array_alloc_elems) + return true; + + tmp = kvmalloc(elems * ref->ctrl->elem_size, GFP_KERNEL); + + if (!tmp) { + ref->p_req_array_enomem = true; + return false; + } + ref->p_req_array_enomem = false; + kvfree(ref->p_req.p); + ref->p_req.p = tmp; + ref->p_req_array_alloc_elems = elems; + return true; +} + +/* Copy the new value to the request value */ +void new_to_req(struct v4l2_ctrl_ref *ref) +{ + struct v4l2_ctrl *ctrl; + + if (!ref) + return; + + ctrl = ref->ctrl; + if (ctrl->is_array && !req_alloc_array(ref, ctrl->new_elems)) + return; + + ref->p_req_elems = ctrl->new_elems; + ptr_to_ptr(ctrl, ctrl->p_new, ref->p_req, ref->p_req_elems); + ref->p_req_valid = true; +} + +/* Copy the current value to the request value */ +void cur_to_req(struct v4l2_ctrl_ref *ref) +{ + struct v4l2_ctrl *ctrl; + + if (!ref) + return; + + ctrl = ref->ctrl; + if (ctrl->is_array && !req_alloc_array(ref, ctrl->elems)) + return; + + ref->p_req_elems = ctrl->elems; + ptr_to_ptr(ctrl, ctrl->p_cur, ref->p_req, ctrl->elems); + ref->p_req_valid = true; +} + +/* Copy the request value to the new value */ +int req_to_new(struct v4l2_ctrl_ref *ref) +{ + struct v4l2_ctrl *ctrl; + + if (!ref) + return 0; + + ctrl = ref->ctrl; + + /* + * This control was never set in the request, so just use the current + * value. + */ + if (!ref->p_req_valid) { + if (ctrl->is_dyn_array) + ctrl->new_elems = ctrl->elems; + ptr_to_ptr(ctrl, ctrl->p_cur, ctrl->p_new, ctrl->new_elems); + return 0; + } + + /* Not an array, so just copy the request value */ + if (!ctrl->is_array) { + ptr_to_ptr(ctrl, ref->p_req, ctrl->p_new, ctrl->new_elems); + return 0; + } + + /* Sanity check, should never happen */ + if (WARN_ON(!ref->p_req_array_alloc_elems)) + return -ENOMEM; + + if (!ctrl->is_dyn_array && + ref->p_req_elems != ctrl->p_array_alloc_elems) + return -ENOMEM; + + /* + * Check if the number of elements in the request is more than the + * elements in ctrl->p_array. If so, attempt to realloc ctrl->p_array. + * Note that p_array is allocated with twice the number of elements + * in the dynamic array since it has to store both the current and + * new value of such a control. + */ + if (ref->p_req_elems > ctrl->p_array_alloc_elems) { + unsigned int sz = ref->p_req_elems * ctrl->elem_size; + void *old = ctrl->p_array; + void *tmp = kvzalloc(2 * sz, GFP_KERNEL); + + if (!tmp) + return -ENOMEM; + memcpy(tmp, ctrl->p_new.p, ctrl->elems * ctrl->elem_size); + memcpy(tmp + sz, ctrl->p_cur.p, ctrl->elems * ctrl->elem_size); + ctrl->p_new.p = tmp; + ctrl->p_cur.p = tmp + sz; + ctrl->p_array = tmp; + ctrl->p_array_alloc_elems = ref->p_req_elems; + kvfree(old); + } + + ctrl->new_elems = ref->p_req_elems; + ptr_to_ptr(ctrl, ref->p_req, ctrl->p_new, ctrl->new_elems); + return 0; +} + +/* Control range checking */ +int check_range(enum v4l2_ctrl_type type, + s64 min, s64 max, u64 step, s64 def) +{ + switch (type) { + case V4L2_CTRL_TYPE_BOOLEAN: + if (step != 1 || max > 1 || min < 0) + return -ERANGE; + fallthrough; + case V4L2_CTRL_TYPE_U8: + case V4L2_CTRL_TYPE_U16: + case V4L2_CTRL_TYPE_U32: + case V4L2_CTRL_TYPE_INTEGER: + case V4L2_CTRL_TYPE_INTEGER64: + if (step == 0 || min > max || def < min || def > max) + return -ERANGE; + return 0; + case V4L2_CTRL_TYPE_BITMASK: + if (step || min || !max || (def & ~max)) + return -ERANGE; + return 0; + case V4L2_CTRL_TYPE_MENU: + case V4L2_CTRL_TYPE_INTEGER_MENU: + if (min > max || def < min || def > max || + min < 0 || (step && max >= BITS_PER_LONG_LONG)) + return -ERANGE; + /* Note: step == menu_skip_mask for menu controls. + So here we check if the default value is masked out. */ + if (def < BITS_PER_LONG_LONG && (step & BIT_ULL(def))) + return -EINVAL; + return 0; + case V4L2_CTRL_TYPE_STRING: + if (min > max || min < 0 || step < 1 || def) + return -ERANGE; + return 0; + default: + return 0; + } +} + +/* Set the handler's error code if it wasn't set earlier already */ +static inline int handler_set_err(struct v4l2_ctrl_handler *hdl, int err) +{ + if (hdl->error == 0) + hdl->error = err; + return err; +} + +/* Initialize the handler */ +int v4l2_ctrl_handler_init_class(struct v4l2_ctrl_handler *hdl, + unsigned nr_of_controls_hint, + struct lock_class_key *key, const char *name) +{ + mutex_init(&hdl->_lock); + hdl->lock = &hdl->_lock; + lockdep_set_class_and_name(hdl->lock, key, name); + INIT_LIST_HEAD(&hdl->ctrls); + INIT_LIST_HEAD(&hdl->ctrl_refs); + hdl->nr_of_buckets = 1 + nr_of_controls_hint / 8; + hdl->buckets = kvcalloc(hdl->nr_of_buckets, sizeof(hdl->buckets[0]), + GFP_KERNEL); + hdl->error = hdl->buckets ? 0 : -ENOMEM; + v4l2_ctrl_handler_init_request(hdl); + return hdl->error; +} +EXPORT_SYMBOL(v4l2_ctrl_handler_init_class); + +/* Free all controls and control refs */ +int v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl) +{ + struct v4l2_ctrl_ref *ref, *next_ref; + struct v4l2_ctrl *ctrl, *next_ctrl; + struct v4l2_subscribed_event *sev, *next_sev; + + if (!hdl) + return 0; + + if (!hdl->buckets) + return hdl->error; + + v4l2_ctrl_handler_free_request(hdl); + + mutex_lock(hdl->lock); + /* Free all nodes */ + list_for_each_entry_safe(ref, next_ref, &hdl->ctrl_refs, node) { + list_del(&ref->node); + if (ref->p_req_array_alloc_elems) + kvfree(ref->p_req.p); + kfree(ref); + } + /* Free all controls owned by the handler */ + list_for_each_entry_safe(ctrl, next_ctrl, &hdl->ctrls, node) { + list_del(&ctrl->node); + list_for_each_entry_safe(sev, next_sev, &ctrl->ev_subs, node) + list_del(&sev->node); + kvfree(ctrl->p_array); + kvfree(ctrl); + } + kvfree(hdl->buckets); + hdl->buckets = NULL; + hdl->cached = NULL; + mutex_unlock(hdl->lock); + mutex_destroy(&hdl->_lock); + + return hdl->error; +} +EXPORT_SYMBOL(v4l2_ctrl_handler_free); + +/* For backwards compatibility: V4L2_CID_PRIVATE_BASE should no longer + be used except in G_CTRL, S_CTRL, QUERYCTRL and QUERYMENU when dealing + with applications that do not use the NEXT_CTRL flag. + + We just find the n-th private user control. It's O(N), but that should not + be an issue in this particular case. */ +static struct v4l2_ctrl_ref *find_private_ref( + struct v4l2_ctrl_handler *hdl, u32 id) +{ + struct v4l2_ctrl_ref *ref; + + id -= V4L2_CID_PRIVATE_BASE; + list_for_each_entry(ref, &hdl->ctrl_refs, node) { + /* Search for private user controls that are compatible with + VIDIOC_G/S_CTRL. */ + if (V4L2_CTRL_ID2WHICH(ref->ctrl->id) == V4L2_CTRL_CLASS_USER && + V4L2_CTRL_DRIVER_PRIV(ref->ctrl->id)) { + if (!ref->ctrl->is_int) + continue; + if (id == 0) + return ref; + id--; + } + } + return NULL; +} + +/* Find a control with the given ID. */ +struct v4l2_ctrl_ref *find_ref(struct v4l2_ctrl_handler *hdl, u32 id) +{ + struct v4l2_ctrl_ref *ref; + int bucket; + + id &= V4L2_CTRL_ID_MASK; + + /* Old-style private controls need special handling */ + if (id >= V4L2_CID_PRIVATE_BASE) + return find_private_ref(hdl, id); + bucket = id % hdl->nr_of_buckets; + + /* Simple optimization: cache the last control found */ + if (hdl->cached && hdl->cached->ctrl->id == id) + return hdl->cached; + + /* Not in cache, search the hash */ + ref = hdl->buckets ? hdl->buckets[bucket] : NULL; + while (ref && ref->ctrl->id != id) + ref = ref->next; + + if (ref) + hdl->cached = ref; /* cache it! */ + return ref; +} + +/* Find a control with the given ID. Take the handler's lock first. */ +struct v4l2_ctrl_ref *find_ref_lock(struct v4l2_ctrl_handler *hdl, u32 id) +{ + struct v4l2_ctrl_ref *ref = NULL; + + if (hdl) { + mutex_lock(hdl->lock); + ref = find_ref(hdl, id); + mutex_unlock(hdl->lock); + } + return ref; +} + +/* Find a control with the given ID. */ +struct v4l2_ctrl *v4l2_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id) +{ + struct v4l2_ctrl_ref *ref = find_ref_lock(hdl, id); + + return ref ? ref->ctrl : NULL; +} +EXPORT_SYMBOL(v4l2_ctrl_find); + +/* Allocate a new v4l2_ctrl_ref and hook it into the handler. */ +int handler_new_ref(struct v4l2_ctrl_handler *hdl, + struct v4l2_ctrl *ctrl, + struct v4l2_ctrl_ref **ctrl_ref, + bool from_other_dev, bool allocate_req) +{ + struct v4l2_ctrl_ref *ref; + struct v4l2_ctrl_ref *new_ref; + u32 id = ctrl->id; + u32 class_ctrl = V4L2_CTRL_ID2WHICH(id) | 1; + int bucket = id % hdl->nr_of_buckets; /* which bucket to use */ + unsigned int size_extra_req = 0; + + if (ctrl_ref) + *ctrl_ref = NULL; + + /* + * Automatically add the control class if it is not yet present and + * the new control is not a compound control. + */ + if (ctrl->type < V4L2_CTRL_COMPOUND_TYPES && + id != class_ctrl && find_ref_lock(hdl, class_ctrl) == NULL) + if (!v4l2_ctrl_new_std(hdl, NULL, class_ctrl, 0, 0, 0, 0)) + return hdl->error; + + if (hdl->error) + return hdl->error; + + if (allocate_req && !ctrl->is_array) + size_extra_req = ctrl->elems * ctrl->elem_size; + new_ref = kzalloc(sizeof(*new_ref) + size_extra_req, GFP_KERNEL); + if (!new_ref) + return handler_set_err(hdl, -ENOMEM); + new_ref->ctrl = ctrl; + new_ref->from_other_dev = from_other_dev; + if (size_extra_req) + new_ref->p_req.p = &new_ref[1]; + + INIT_LIST_HEAD(&new_ref->node); + + mutex_lock(hdl->lock); + + /* Add immediately at the end of the list if the list is empty, or if + the last element in the list has a lower ID. + This ensures that when elements are added in ascending order the + insertion is an O(1) operation. */ + if (list_empty(&hdl->ctrl_refs) || id > node2id(hdl->ctrl_refs.prev)) { + list_add_tail(&new_ref->node, &hdl->ctrl_refs); + goto insert_in_hash; + } + + /* Find insert position in sorted list */ + list_for_each_entry(ref, &hdl->ctrl_refs, node) { + if (ref->ctrl->id < id) + continue; + /* Don't add duplicates */ + if (ref->ctrl->id == id) { + kfree(new_ref); + goto unlock; + } + list_add(&new_ref->node, ref->node.prev); + break; + } + +insert_in_hash: + /* Insert the control node in the hash */ + new_ref->next = hdl->buckets[bucket]; + hdl->buckets[bucket] = new_ref; + if (ctrl_ref) + *ctrl_ref = new_ref; + if (ctrl->handler == hdl) { + /* By default each control starts in a cluster of its own. + * new_ref->ctrl is basically a cluster array with one + * element, so that's perfect to use as the cluster pointer. + * But only do this for the handler that owns the control. + */ + ctrl->cluster = &new_ref->ctrl; + ctrl->ncontrols = 1; + } + +unlock: + mutex_unlock(hdl->lock); + return 0; +} + +/* Add a new control */ +static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl, + const struct v4l2_ctrl_ops *ops, + const struct v4l2_ctrl_type_ops *type_ops, + u32 id, const char *name, enum v4l2_ctrl_type type, + s64 min, s64 max, u64 step, s64 def, + const u32 dims[V4L2_CTRL_MAX_DIMS], u32 elem_size, + u32 flags, const char * const *qmenu, + const s64 *qmenu_int, + const union v4l2_ctrl_ptr p_def, + const union v4l2_ctrl_ptr p_min, + const union v4l2_ctrl_ptr p_max, + void *priv) +{ + struct v4l2_ctrl *ctrl; + unsigned sz_extra; + unsigned nr_of_dims = 0; + unsigned elems = 1; + bool is_array; + unsigned tot_ctrl_size; + void *data; + int err; + + if (hdl->error) + return NULL; + + while (dims && dims[nr_of_dims]) { + elems *= dims[nr_of_dims]; + nr_of_dims++; + if (nr_of_dims == V4L2_CTRL_MAX_DIMS) + break; + } + is_array = nr_of_dims > 0; + + /* Prefill elem_size for all types handled by std_type_ops */ + switch ((u32)type) { + case V4L2_CTRL_TYPE_INTEGER64: + elem_size = sizeof(s64); + break; + case V4L2_CTRL_TYPE_STRING: + elem_size = max + 1; + break; + case V4L2_CTRL_TYPE_U8: + elem_size = sizeof(u8); + break; + case V4L2_CTRL_TYPE_U16: + elem_size = sizeof(u16); + break; + case V4L2_CTRL_TYPE_U32: + elem_size = sizeof(u32); + break; + case V4L2_CTRL_TYPE_MPEG2_SEQUENCE: + elem_size = sizeof(struct v4l2_ctrl_mpeg2_sequence); + break; + case V4L2_CTRL_TYPE_MPEG2_PICTURE: + elem_size = sizeof(struct v4l2_ctrl_mpeg2_picture); + break; + case V4L2_CTRL_TYPE_MPEG2_QUANTISATION: + elem_size = sizeof(struct v4l2_ctrl_mpeg2_quantisation); + break; + case V4L2_CTRL_TYPE_FWHT_PARAMS: + elem_size = sizeof(struct v4l2_ctrl_fwht_params); + break; + case V4L2_CTRL_TYPE_H264_SPS: + elem_size = sizeof(struct v4l2_ctrl_h264_sps); + break; + case V4L2_CTRL_TYPE_H264_PPS: + elem_size = sizeof(struct v4l2_ctrl_h264_pps); + break; + case V4L2_CTRL_TYPE_H264_SCALING_MATRIX: + elem_size = sizeof(struct v4l2_ctrl_h264_scaling_matrix); + break; + case V4L2_CTRL_TYPE_H264_SLICE_PARAMS: + elem_size = sizeof(struct v4l2_ctrl_h264_slice_params); + break; + case V4L2_CTRL_TYPE_H264_DECODE_PARAMS: + elem_size = sizeof(struct v4l2_ctrl_h264_decode_params); + break; + case V4L2_CTRL_TYPE_H264_PRED_WEIGHTS: + elem_size = sizeof(struct v4l2_ctrl_h264_pred_weights); + break; + case V4L2_CTRL_TYPE_VP8_FRAME: + elem_size = sizeof(struct v4l2_ctrl_vp8_frame); + break; + case V4L2_CTRL_TYPE_HEVC_SPS: + elem_size = sizeof(struct v4l2_ctrl_hevc_sps); + break; + case V4L2_CTRL_TYPE_HEVC_PPS: + elem_size = sizeof(struct v4l2_ctrl_hevc_pps); + break; + case V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS: + elem_size = sizeof(struct v4l2_ctrl_hevc_slice_params); + break; + case V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX: + elem_size = sizeof(struct v4l2_ctrl_hevc_scaling_matrix); + break; + case V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS: + elem_size = sizeof(struct v4l2_ctrl_hevc_decode_params); + break; + case V4L2_CTRL_TYPE_HDR10_CLL_INFO: + elem_size = sizeof(struct v4l2_ctrl_hdr10_cll_info); + break; + case V4L2_CTRL_TYPE_HDR10_MASTERING_DISPLAY: + elem_size = sizeof(struct v4l2_ctrl_hdr10_mastering_display); + break; + case V4L2_CTRL_TYPE_VP9_COMPRESSED_HDR: + elem_size = sizeof(struct v4l2_ctrl_vp9_compressed_hdr); + break; + case V4L2_CTRL_TYPE_VP9_FRAME: + elem_size = sizeof(struct v4l2_ctrl_vp9_frame); + break; + case V4L2_CTRL_TYPE_AV1_SEQUENCE: + elem_size = sizeof(struct v4l2_ctrl_av1_sequence); + break; + case V4L2_CTRL_TYPE_AV1_TILE_GROUP_ENTRY: + elem_size = sizeof(struct v4l2_ctrl_av1_tile_group_entry); + break; + case V4L2_CTRL_TYPE_AV1_FRAME: + elem_size = sizeof(struct v4l2_ctrl_av1_frame); + break; + case V4L2_CTRL_TYPE_AV1_FILM_GRAIN: + elem_size = sizeof(struct v4l2_ctrl_av1_film_grain); + break; + case V4L2_CTRL_TYPE_AREA: + elem_size = sizeof(struct v4l2_area); + break; + case V4L2_CTRL_TYPE_RECT: + elem_size = sizeof(struct v4l2_rect); + break; + default: + if (type < V4L2_CTRL_COMPOUND_TYPES) + elem_size = sizeof(s32); + break; + } + + if (type < V4L2_CTRL_COMPOUND_TYPES && + type != V4L2_CTRL_TYPE_BUTTON && + type != V4L2_CTRL_TYPE_CTRL_CLASS && + type != V4L2_CTRL_TYPE_STRING) + flags |= V4L2_CTRL_FLAG_HAS_WHICH_MIN_MAX; + + /* Sanity checks */ + if (id == 0 || name == NULL || !elem_size || + id >= V4L2_CID_PRIVATE_BASE || + (type == V4L2_CTRL_TYPE_MENU && qmenu == NULL) || + (type == V4L2_CTRL_TYPE_INTEGER_MENU && qmenu_int == NULL)) { + handler_set_err(hdl, -ERANGE); + return NULL; + } + + err = check_range(type, min, max, step, def); + if (err) { + handler_set_err(hdl, err); + return NULL; + } + if (is_array && + (type == V4L2_CTRL_TYPE_BUTTON || + type == V4L2_CTRL_TYPE_CTRL_CLASS)) { + handler_set_err(hdl, -EINVAL); + return NULL; + } + if (flags & V4L2_CTRL_FLAG_DYNAMIC_ARRAY) { + /* + * For now only support this for one-dimensional arrays only. + * + * This can be relaxed in the future, but this will + * require more effort. + */ + if (nr_of_dims != 1) { + handler_set_err(hdl, -EINVAL); + return NULL; + } + /* Start with just 1 element */ + elems = 1; + } + + tot_ctrl_size = elem_size * elems; + sz_extra = 0; + if (type == V4L2_CTRL_TYPE_BUTTON) + flags |= V4L2_CTRL_FLAG_WRITE_ONLY | + V4L2_CTRL_FLAG_EXECUTE_ON_WRITE; + else if (type == V4L2_CTRL_TYPE_CTRL_CLASS) + flags |= V4L2_CTRL_FLAG_READ_ONLY; + else if (!is_array && + (type == V4L2_CTRL_TYPE_INTEGER64 || + type == V4L2_CTRL_TYPE_STRING || + type >= V4L2_CTRL_COMPOUND_TYPES)) + sz_extra += 2 * tot_ctrl_size; + + if (type >= V4L2_CTRL_COMPOUND_TYPES && p_def.p_const) + sz_extra += elem_size; + if (type >= V4L2_CTRL_COMPOUND_TYPES && p_min.p_const) + sz_extra += elem_size; + if (type >= V4L2_CTRL_COMPOUND_TYPES && p_max.p_const) + sz_extra += elem_size; + + ctrl = kvzalloc(sizeof(*ctrl) + sz_extra, GFP_KERNEL); + if (ctrl == NULL) { + handler_set_err(hdl, -ENOMEM); + return NULL; + } + + INIT_LIST_HEAD(&ctrl->node); + INIT_LIST_HEAD(&ctrl->ev_subs); + ctrl->handler = hdl; + ctrl->ops = ops; + ctrl->type_ops = type_ops ? type_ops : &std_type_ops; + ctrl->id = id; + ctrl->name = name; + ctrl->type = type; + ctrl->flags = flags; + ctrl->minimum = min; + ctrl->maximum = max; + ctrl->step = step; + ctrl->default_value = def; + ctrl->is_string = !is_array && type == V4L2_CTRL_TYPE_STRING; + ctrl->is_ptr = is_array || type >= V4L2_CTRL_COMPOUND_TYPES || ctrl->is_string; + ctrl->is_int = !ctrl->is_ptr && type != V4L2_CTRL_TYPE_INTEGER64; + ctrl->is_array = is_array; + ctrl->is_dyn_array = !!(flags & V4L2_CTRL_FLAG_DYNAMIC_ARRAY); + ctrl->elems = elems; + ctrl->new_elems = elems; + ctrl->nr_of_dims = nr_of_dims; + if (nr_of_dims) + memcpy(ctrl->dims, dims, nr_of_dims * sizeof(dims[0])); + ctrl->elem_size = elem_size; + if (type == V4L2_CTRL_TYPE_MENU) + ctrl->qmenu = qmenu; + else if (type == V4L2_CTRL_TYPE_INTEGER_MENU) + ctrl->qmenu_int = qmenu_int; + ctrl->priv = priv; + ctrl->cur.val = ctrl->val = def; + data = &ctrl[1]; + + if (ctrl->is_array) { + ctrl->p_array_alloc_elems = elems; + ctrl->p_array = kvzalloc(2 * elems * elem_size, GFP_KERNEL); + if (!ctrl->p_array) { + kvfree(ctrl); + return NULL; + } + data = ctrl->p_array; + } + + if (!ctrl->is_int) { + ctrl->p_new.p = data; + ctrl->p_cur.p = data + tot_ctrl_size; + } else { + ctrl->p_new.p = &ctrl->val; + ctrl->p_cur.p = &ctrl->cur.val; + } + + if (type >= V4L2_CTRL_COMPOUND_TYPES && p_def.p_const) { + if (ctrl->is_array) + ctrl->p_def.p = &ctrl[1]; + else + ctrl->p_def.p = ctrl->p_cur.p + tot_ctrl_size; + memcpy(ctrl->p_def.p, p_def.p_const, elem_size); + } + + if (flags & V4L2_CTRL_FLAG_HAS_WHICH_MIN_MAX) { + void *ptr = ctrl->p_def.p; + + if (p_min.p_const) { + ptr += elem_size; + ctrl->p_min.p = ptr; + memcpy(ctrl->p_min.p, p_min.p_const, elem_size); + } + + if (p_max.p_const) { + ptr += elem_size; + ctrl->p_max.p = ptr; + memcpy(ctrl->p_max.p, p_max.p_const, elem_size); + } + } + + ctrl->type_ops->init(ctrl, 0, ctrl->p_cur); + cur_to_new(ctrl); + + if (handler_new_ref(hdl, ctrl, NULL, false, false)) { + kvfree(ctrl->p_array); + kvfree(ctrl); + return NULL; + } + mutex_lock(hdl->lock); + list_add_tail(&ctrl->node, &hdl->ctrls); + mutex_unlock(hdl->lock); + return ctrl; +} + +struct v4l2_ctrl *v4l2_ctrl_new_custom(struct v4l2_ctrl_handler *hdl, + const struct v4l2_ctrl_config *cfg, void *priv) +{ + bool is_menu; + struct v4l2_ctrl *ctrl; + const char *name = cfg->name; + const char * const *qmenu = cfg->qmenu; + const s64 *qmenu_int = cfg->qmenu_int; + enum v4l2_ctrl_type type = cfg->type; + u32 flags = cfg->flags; + s64 min = cfg->min; + s64 max = cfg->max; + u64 step = cfg->step; + s64 def = cfg->def; + + if (name == NULL) + v4l2_ctrl_fill(cfg->id, &name, &type, &min, &max, &step, + &def, &flags); + + is_menu = (type == V4L2_CTRL_TYPE_MENU || + type == V4L2_CTRL_TYPE_INTEGER_MENU); + if (is_menu) + WARN_ON(step); + else + WARN_ON(cfg->menu_skip_mask); + if (type == V4L2_CTRL_TYPE_MENU && !qmenu) { + qmenu = v4l2_ctrl_get_menu(cfg->id); + } else if (type == V4L2_CTRL_TYPE_INTEGER_MENU && !qmenu_int) { + handler_set_err(hdl, -EINVAL); + return NULL; + } + + ctrl = v4l2_ctrl_new(hdl, cfg->ops, cfg->type_ops, cfg->id, name, + type, min, max, + is_menu ? cfg->menu_skip_mask : step, def, + cfg->dims, cfg->elem_size, + flags, qmenu, qmenu_int, cfg->p_def, cfg->p_min, + cfg->p_max, priv); + if (ctrl) + ctrl->is_private = cfg->is_private; + return ctrl; +} +EXPORT_SYMBOL(v4l2_ctrl_new_custom); + +/* Helper function for standard non-menu controls */ +struct v4l2_ctrl *v4l2_ctrl_new_std(struct v4l2_ctrl_handler *hdl, + const struct v4l2_ctrl_ops *ops, + u32 id, s64 min, s64 max, u64 step, s64 def) +{ + const char *name; + enum v4l2_ctrl_type type; + u32 flags; + + v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags); + if (type == V4L2_CTRL_TYPE_MENU || + type == V4L2_CTRL_TYPE_INTEGER_MENU || + type >= V4L2_CTRL_COMPOUND_TYPES) { + handler_set_err(hdl, -EINVAL); + return NULL; + } + return v4l2_ctrl_new(hdl, ops, NULL, id, name, type, + min, max, step, def, NULL, 0, + flags, NULL, NULL, ptr_null, ptr_null, + ptr_null, NULL); +} +EXPORT_SYMBOL(v4l2_ctrl_new_std); + +/* Helper function for standard menu controls */ +struct v4l2_ctrl *v4l2_ctrl_new_std_menu(struct v4l2_ctrl_handler *hdl, + const struct v4l2_ctrl_ops *ops, + u32 id, u8 _max, u64 mask, u8 _def) +{ + const char * const *qmenu = NULL; + const s64 *qmenu_int = NULL; + unsigned int qmenu_int_len = 0; + const char *name; + enum v4l2_ctrl_type type; + s64 min; + s64 max = _max; + s64 def = _def; + u64 step; + u32 flags; + + v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags); + + if (type == V4L2_CTRL_TYPE_MENU) + qmenu = v4l2_ctrl_get_menu(id); + else if (type == V4L2_CTRL_TYPE_INTEGER_MENU) + qmenu_int = v4l2_ctrl_get_int_menu(id, &qmenu_int_len); + + if ((!qmenu && !qmenu_int) || (qmenu_int && max >= qmenu_int_len)) { + handler_set_err(hdl, -EINVAL); + return NULL; + } + return v4l2_ctrl_new(hdl, ops, NULL, id, name, type, + 0, max, mask, def, NULL, 0, + flags, qmenu, qmenu_int, ptr_null, ptr_null, + ptr_null, NULL); +} +EXPORT_SYMBOL(v4l2_ctrl_new_std_menu); + +/* Helper function for standard menu controls with driver defined menu */ +struct v4l2_ctrl *v4l2_ctrl_new_std_menu_items(struct v4l2_ctrl_handler *hdl, + const struct v4l2_ctrl_ops *ops, u32 id, u8 _max, + u64 mask, u8 _def, const char * const *qmenu) +{ + enum v4l2_ctrl_type type; + const char *name; + u32 flags; + u64 step; + s64 min; + s64 max = _max; + s64 def = _def; + + /* v4l2_ctrl_new_std_menu_items() should only be called for + * standard controls without a standard menu. + */ + if (v4l2_ctrl_get_menu(id)) { + handler_set_err(hdl, -EINVAL); + return NULL; + } + + v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags); + if (type != V4L2_CTRL_TYPE_MENU || qmenu == NULL) { + handler_set_err(hdl, -EINVAL); + return NULL; + } + return v4l2_ctrl_new(hdl, ops, NULL, id, name, type, + 0, max, mask, def, NULL, 0, + flags, qmenu, NULL, ptr_null, ptr_null, + ptr_null, NULL); + +} +EXPORT_SYMBOL(v4l2_ctrl_new_std_menu_items); + +/* Helper function for standard compound controls */ +struct v4l2_ctrl *v4l2_ctrl_new_std_compound(struct v4l2_ctrl_handler *hdl, + const struct v4l2_ctrl_ops *ops, u32 id, + const union v4l2_ctrl_ptr p_def, + const union v4l2_ctrl_ptr p_min, + const union v4l2_ctrl_ptr p_max) +{ + const char *name; + enum v4l2_ctrl_type type; + u32 flags; + s64 min, max, step, def; + + v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags); + if (type < V4L2_CTRL_COMPOUND_TYPES) { + handler_set_err(hdl, -EINVAL); + return NULL; + } + return v4l2_ctrl_new(hdl, ops, NULL, id, name, type, + min, max, step, def, NULL, 0, + flags, NULL, NULL, p_def, p_min, p_max, NULL); +} +EXPORT_SYMBOL(v4l2_ctrl_new_std_compound); + +/* Helper function for standard integer menu controls */ +struct v4l2_ctrl *v4l2_ctrl_new_int_menu(struct v4l2_ctrl_handler *hdl, + const struct v4l2_ctrl_ops *ops, + u32 id, u8 _max, u8 _def, const s64 *qmenu_int) +{ + const char *name; + enum v4l2_ctrl_type type; + s64 min; + u64 step; + s64 max = _max; + s64 def = _def; + u32 flags; + + v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags); + if (type != V4L2_CTRL_TYPE_INTEGER_MENU) { + handler_set_err(hdl, -EINVAL); + return NULL; + } + return v4l2_ctrl_new(hdl, ops, NULL, id, name, type, + 0, max, 0, def, NULL, 0, + flags, NULL, qmenu_int, ptr_null, ptr_null, + ptr_null, NULL); +} +EXPORT_SYMBOL(v4l2_ctrl_new_int_menu); + +/* Add the controls from another handler to our own. */ +int v4l2_ctrl_add_handler(struct v4l2_ctrl_handler *hdl, + struct v4l2_ctrl_handler *add, + bool (*filter)(const struct v4l2_ctrl *ctrl), + bool from_other_dev) +{ + struct v4l2_ctrl_ref *ref; + int ret = 0; + + /* Do nothing if either handler is NULL or if they are the same */ + if (!hdl || !add || hdl == add) + return 0; + if (hdl->error) + return hdl->error; + mutex_lock(add->lock); + list_for_each_entry(ref, &add->ctrl_refs, node) { + struct v4l2_ctrl *ctrl = ref->ctrl; + + /* Skip handler-private controls. */ + if (ctrl->is_private) + continue; + /* And control classes */ + if (ctrl->type == V4L2_CTRL_TYPE_CTRL_CLASS) + continue; + /* Filter any unwanted controls */ + if (filter && !filter(ctrl)) + continue; + ret = handler_new_ref(hdl, ctrl, NULL, from_other_dev, false); + if (ret) + break; + } + mutex_unlock(add->lock); + return ret; +} +EXPORT_SYMBOL(v4l2_ctrl_add_handler); + +bool v4l2_ctrl_radio_filter(const struct v4l2_ctrl *ctrl) +{ + if (V4L2_CTRL_ID2WHICH(ctrl->id) == V4L2_CTRL_CLASS_FM_TX) + return true; + if (V4L2_CTRL_ID2WHICH(ctrl->id) == V4L2_CTRL_CLASS_FM_RX) + return true; + switch (ctrl->id) { + case V4L2_CID_AUDIO_MUTE: + case V4L2_CID_AUDIO_VOLUME: + case V4L2_CID_AUDIO_BALANCE: + case V4L2_CID_AUDIO_BASS: + case V4L2_CID_AUDIO_TREBLE: + case V4L2_CID_AUDIO_LOUDNESS: + return true; + default: + break; + } + return false; +} +EXPORT_SYMBOL(v4l2_ctrl_radio_filter); + +/* Cluster controls */ +void v4l2_ctrl_cluster(unsigned ncontrols, struct v4l2_ctrl **controls) +{ + bool has_volatiles = false; + int i; + + /* The first control is the master control and it must not be NULL */ + if (WARN_ON(ncontrols == 0 || controls[0] == NULL)) + return; + + for (i = 0; i < ncontrols; i++) { + if (controls[i]) { + controls[i]->cluster = controls; + controls[i]->ncontrols = ncontrols; + if (controls[i]->flags & V4L2_CTRL_FLAG_VOLATILE) + has_volatiles = true; + } + } + controls[0]->has_volatiles = has_volatiles; +} +EXPORT_SYMBOL(v4l2_ctrl_cluster); + +void v4l2_ctrl_auto_cluster(unsigned ncontrols, struct v4l2_ctrl **controls, + u8 manual_val, bool set_volatile) +{ + struct v4l2_ctrl *master = controls[0]; + u32 flag = 0; + int i; + + v4l2_ctrl_cluster(ncontrols, controls); + WARN_ON(ncontrols <= 1); + WARN_ON(manual_val < master->minimum || manual_val > master->maximum); + WARN_ON(set_volatile && !has_op(master, g_volatile_ctrl)); + master->is_auto = true; + master->has_volatiles = set_volatile; + master->manual_mode_value = manual_val; + master->flags |= V4L2_CTRL_FLAG_UPDATE; + + if (!is_cur_manual(master)) + flag = V4L2_CTRL_FLAG_INACTIVE | + (set_volatile ? V4L2_CTRL_FLAG_VOLATILE : 0); + + for (i = 1; i < ncontrols; i++) + if (controls[i]) + controls[i]->flags |= flag; +} +EXPORT_SYMBOL(v4l2_ctrl_auto_cluster); + +/* + * Obtain the current volatile values of an autocluster and mark them + * as new. + */ +void update_from_auto_cluster(struct v4l2_ctrl *master) +{ + int i; + + for (i = 1; i < master->ncontrols; i++) + cur_to_new(master->cluster[i]); + if (!call_op(master, g_volatile_ctrl)) + for (i = 1; i < master->ncontrols; i++) + if (master->cluster[i]) + master->cluster[i]->is_new = 1; +} + +/* + * Return non-zero if one or more of the controls in the cluster has a new + * value that differs from the current value. + */ +static int cluster_changed(struct v4l2_ctrl *master) +{ + bool changed = false; + int i; + + for (i = 0; i < master->ncontrols; i++) { + struct v4l2_ctrl *ctrl = master->cluster[i]; + bool ctrl_changed = false; + + if (!ctrl) + continue; + + if (ctrl->flags & V4L2_CTRL_FLAG_EXECUTE_ON_WRITE) { + changed = true; + ctrl_changed = true; + } + + /* + * Set has_changed to false to avoid generating + * the event V4L2_EVENT_CTRL_CH_VALUE + */ + if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) { + ctrl->has_changed = false; + continue; + } + + if (ctrl->elems != ctrl->new_elems) + ctrl_changed = true; + if (!ctrl_changed) + ctrl_changed = !ctrl->type_ops->equal(ctrl, + ctrl->p_cur, ctrl->p_new); + ctrl->has_changed = ctrl_changed; + changed |= ctrl->has_changed; + } + return changed; +} + +/* + * Core function that calls try/s_ctrl and ensures that the new value is + * copied to the current value on a set. + * Must be called with ctrl->handler->lock held. + */ +int try_or_set_cluster(struct v4l2_fh *fh, struct v4l2_ctrl *master, + bool set, u32 ch_flags) +{ + bool update_flag; + int ret; + int i; + + /* + * Go through the cluster and either validate the new value or + * (if no new value was set), copy the current value to the new + * value, ensuring a consistent view for the control ops when + * called. + */ + for (i = 0; i < master->ncontrols; i++) { + struct v4l2_ctrl *ctrl = master->cluster[i]; + + if (!ctrl) + continue; + + if (!ctrl->is_new) { + cur_to_new(ctrl); + continue; + } + /* + * Check again: it may have changed since the + * previous check in try_or_set_ext_ctrls(). + */ + if (set && (ctrl->flags & V4L2_CTRL_FLAG_GRABBED)) + return -EBUSY; + } + + ret = call_op(master, try_ctrl); + + /* Don't set if there is no change */ + if (ret || !set || !cluster_changed(master)) + return ret; + ret = call_op(master, s_ctrl); + if (ret) + return ret; + + /* If OK, then make the new values permanent. */ + update_flag = is_cur_manual(master) != is_new_manual(master); + + for (i = 0; i < master->ncontrols; i++) { + /* + * If we switch from auto to manual mode, and this cluster + * contains volatile controls, then all non-master controls + * have to be marked as changed. The 'new' value contains + * the volatile value (obtained by update_from_auto_cluster), + * which now has to become the current value. + */ + if (i && update_flag && is_new_manual(master) && + master->has_volatiles && master->cluster[i]) + master->cluster[i]->has_changed = true; + + new_to_cur(fh, master->cluster[i], ch_flags | + ((update_flag && i > 0) ? V4L2_EVENT_CTRL_CH_FLAGS : 0)); + } + return 0; +} + +/* Activate/deactivate a control. */ +void v4l2_ctrl_activate(struct v4l2_ctrl *ctrl, bool active) +{ + /* invert since the actual flag is called 'inactive' */ + bool inactive = !active; + bool old; + + if (ctrl == NULL) + return; + + if (inactive) + /* set V4L2_CTRL_FLAG_INACTIVE */ + old = test_and_set_bit(4, &ctrl->flags); + else + /* clear V4L2_CTRL_FLAG_INACTIVE */ + old = test_and_clear_bit(4, &ctrl->flags); + if (old != inactive) + send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_FLAGS); +} +EXPORT_SYMBOL(v4l2_ctrl_activate); + +void __v4l2_ctrl_grab(struct v4l2_ctrl *ctrl, bool grabbed) +{ + bool old; + + if (ctrl == NULL) + return; + + lockdep_assert_held(ctrl->handler->lock); + + if (grabbed) + /* set V4L2_CTRL_FLAG_GRABBED */ + old = test_and_set_bit(1, &ctrl->flags); + else + /* clear V4L2_CTRL_FLAG_GRABBED */ + old = test_and_clear_bit(1, &ctrl->flags); + if (old != grabbed) + send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_FLAGS); +} +EXPORT_SYMBOL(__v4l2_ctrl_grab); + +/* Call s_ctrl for all controls owned by the handler */ +int __v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl) +{ + struct v4l2_ctrl *ctrl; + int ret = 0; + + if (hdl == NULL) + return 0; + + lockdep_assert_held(hdl->lock); + + list_for_each_entry(ctrl, &hdl->ctrls, node) + ctrl->done = false; + + list_for_each_entry(ctrl, &hdl->ctrls, node) { + struct v4l2_ctrl *master = ctrl->cluster[0]; + int i; + + /* Skip if this control was already handled by a cluster. */ + /* Skip button controls and read-only controls. */ + if (ctrl->done || ctrl->type == V4L2_CTRL_TYPE_BUTTON || + (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY)) + continue; + + for (i = 0; i < master->ncontrols; i++) { + if (master->cluster[i]) { + cur_to_new(master->cluster[i]); + master->cluster[i]->is_new = 1; + master->cluster[i]->done = true; + } + } + ret = call_op(master, s_ctrl); + if (ret) + break; + } + + return ret; +} +EXPORT_SYMBOL_GPL(__v4l2_ctrl_handler_setup); + +int v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl) +{ + int ret; + + if (hdl == NULL) + return 0; + + mutex_lock(hdl->lock); + ret = __v4l2_ctrl_handler_setup(hdl); + mutex_unlock(hdl->lock); + + return ret; +} +EXPORT_SYMBOL(v4l2_ctrl_handler_setup); + +/* Log the control name and value */ +static void log_ctrl(const struct v4l2_ctrl *ctrl, + const char *prefix, const char *colon) +{ + if (ctrl->flags & (V4L2_CTRL_FLAG_DISABLED | V4L2_CTRL_FLAG_WRITE_ONLY)) + return; + if (ctrl->type == V4L2_CTRL_TYPE_CTRL_CLASS) + return; + + pr_info("%s%s%s: ", prefix, colon, ctrl->name); + + ctrl->type_ops->log(ctrl); + + if (ctrl->flags & (V4L2_CTRL_FLAG_INACTIVE | + V4L2_CTRL_FLAG_GRABBED | + V4L2_CTRL_FLAG_VOLATILE)) { + if (ctrl->flags & V4L2_CTRL_FLAG_INACTIVE) + pr_cont(" inactive"); + if (ctrl->flags & V4L2_CTRL_FLAG_GRABBED) + pr_cont(" grabbed"); + if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) + pr_cont(" volatile"); + } + pr_cont("\n"); +} + +/* Log all controls owned by the handler */ +void v4l2_ctrl_handler_log_status(struct v4l2_ctrl_handler *hdl, + const char *prefix) +{ + struct v4l2_ctrl *ctrl; + const char *colon = ""; + int len; + + if (!hdl) + return; + if (!prefix) + prefix = ""; + len = strlen(prefix); + if (len && prefix[len - 1] != ' ') + colon = ": "; + mutex_lock(hdl->lock); + list_for_each_entry(ctrl, &hdl->ctrls, node) + if (!(ctrl->flags & V4L2_CTRL_FLAG_DISABLED)) + log_ctrl(ctrl, prefix, colon); + mutex_unlock(hdl->lock); +} +EXPORT_SYMBOL(v4l2_ctrl_handler_log_status); + +int v4l2_ctrl_new_fwnode_properties(struct v4l2_ctrl_handler *hdl, + const struct v4l2_ctrl_ops *ctrl_ops, + const struct v4l2_fwnode_device_properties *p) +{ + if (hdl->error) + return hdl->error; + + if (p->orientation != V4L2_FWNODE_PROPERTY_UNSET) { + u32 orientation_ctrl; + + switch (p->orientation) { + case V4L2_FWNODE_ORIENTATION_FRONT: + orientation_ctrl = V4L2_CAMERA_ORIENTATION_FRONT; + break; + case V4L2_FWNODE_ORIENTATION_BACK: + orientation_ctrl = V4L2_CAMERA_ORIENTATION_BACK; + break; + case V4L2_FWNODE_ORIENTATION_EXTERNAL: + orientation_ctrl = V4L2_CAMERA_ORIENTATION_EXTERNAL; + break; + default: + return -EINVAL; + } + if (!v4l2_ctrl_new_std_menu(hdl, ctrl_ops, + V4L2_CID_CAMERA_ORIENTATION, + V4L2_CAMERA_ORIENTATION_EXTERNAL, 0, + orientation_ctrl)) + return hdl->error; + } + + if (p->rotation != V4L2_FWNODE_PROPERTY_UNSET) { + if (!v4l2_ctrl_new_std(hdl, ctrl_ops, + V4L2_CID_CAMERA_SENSOR_ROTATION, + p->rotation, p->rotation, 1, + p->rotation)) + return hdl->error; + } + + return hdl->error; +} +EXPORT_SYMBOL(v4l2_ctrl_new_fwnode_properties); diff --git a/6.18.0/drivers/media/v4l2-core/v4l2-ctrls-defs.c b/6.18.0/drivers/media/v4l2-core/v4l2-ctrls-defs.c new file mode 100644 index 0000000..ad41f65 --- /dev/null +++ b/6.18.0/drivers/media/v4l2-core/v4l2-ctrls-defs.c @@ -0,0 +1,1685 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * V4L2 controls framework control definitions. + * + * Copyright (C) 2010-2021 Hans Verkuil + */ + +#include +#include + +/* + * Returns NULL or a character pointer array containing the menu for + * the given control ID. The pointer array ends with a NULL pointer. + * An empty string signifies a menu entry that is invalid. This allows + * drivers to disable certain options if it is not supported. + */ +const char * const *v4l2_ctrl_get_menu(u32 id) +{ + static const char * const mpeg_audio_sampling_freq[] = { + "44.1 kHz", + "48 kHz", + "32 kHz", + NULL + }; + static const char * const mpeg_audio_encoding[] = { + "MPEG-1/2 Layer I", + "MPEG-1/2 Layer II", + "MPEG-1/2 Layer III", + "MPEG-2/4 AAC", + "AC-3", + NULL + }; + static const char * const mpeg_audio_l1_bitrate[] = { + "32 kbps", + "64 kbps", + "96 kbps", + "128 kbps", + "160 kbps", + "192 kbps", + "224 kbps", + "256 kbps", + "288 kbps", + "320 kbps", + "352 kbps", + "384 kbps", + "416 kbps", + "448 kbps", + NULL + }; + static const char * const mpeg_audio_l2_bitrate[] = { + "32 kbps", + "48 kbps", + "56 kbps", + "64 kbps", + "80 kbps", + "96 kbps", + "112 kbps", + "128 kbps", + "160 kbps", + "192 kbps", + "224 kbps", + "256 kbps", + "320 kbps", + "384 kbps", + NULL + }; + static const char * const mpeg_audio_l3_bitrate[] = { + "32 kbps", + "40 kbps", + "48 kbps", + "56 kbps", + "64 kbps", + "80 kbps", + "96 kbps", + "112 kbps", + "128 kbps", + "160 kbps", + "192 kbps", + "224 kbps", + "256 kbps", + "320 kbps", + NULL + }; + static const char * const mpeg_audio_ac3_bitrate[] = { + "32 kbps", + "40 kbps", + "48 kbps", + "56 kbps", + "64 kbps", + "80 kbps", + "96 kbps", + "112 kbps", + "128 kbps", + "160 kbps", + "192 kbps", + "224 kbps", + "256 kbps", + "320 kbps", + "384 kbps", + "448 kbps", + "512 kbps", + "576 kbps", + "640 kbps", + NULL + }; + static const char * const mpeg_audio_mode[] = { + "Stereo", + "Joint Stereo", + "Dual", + "Mono", + NULL + }; + static const char * const mpeg_audio_mode_extension[] = { + "Bound 4", + "Bound 8", + "Bound 12", + "Bound 16", + NULL + }; + static const char * const mpeg_audio_emphasis[] = { + "No Emphasis", + "50/15 us", + "CCITT J17", + NULL + }; + static const char * const mpeg_audio_crc[] = { + "No CRC", + "16-bit CRC", + NULL + }; + static const char * const mpeg_audio_dec_playback[] = { + "Auto", + "Stereo", + "Left", + "Right", + "Mono", + "Swapped Stereo", + NULL + }; + static const char * const mpeg_video_encoding[] = { + "MPEG-1", + "MPEG-2", + "MPEG-4 AVC", + NULL + }; + static const char * const mpeg_video_aspect[] = { + "1x1", + "4x3", + "16x9", + "2.21x1", + NULL + }; + static const char * const mpeg_video_bitrate_mode[] = { + "Variable Bitrate", + "Constant Bitrate", + "Constant Quality", + NULL + }; + static const char * const mpeg_stream_type[] = { + "MPEG-2 Program Stream", + "MPEG-2 Transport Stream", + "MPEG-1 System Stream", + "MPEG-2 DVD-compatible Stream", + "MPEG-1 VCD-compatible Stream", + "MPEG-2 SVCD-compatible Stream", + NULL + }; + static const char * const mpeg_stream_vbi_fmt[] = { + "No VBI", + "Private Packet, IVTV Format", + NULL + }; + static const char * const camera_power_line_frequency[] = { + "Disabled", + "50 Hz", + "60 Hz", + "Auto", + NULL + }; + static const char * const camera_exposure_auto[] = { + "Auto Mode", + "Manual Mode", + "Shutter Priority Mode", + "Aperture Priority Mode", + NULL + }; + static const char * const camera_exposure_metering[] = { + "Average", + "Center Weighted", + "Spot", + "Matrix", + NULL + }; + static const char * const camera_auto_focus_range[] = { + "Auto", + "Normal", + "Macro", + "Infinity", + NULL + }; + static const char * const colorfx[] = { + "None", + "Black & White", + "Sepia", + "Negative", + "Emboss", + "Sketch", + "Sky Blue", + "Grass Green", + "Skin Whiten", + "Vivid", + "Aqua", + "Art Freeze", + "Silhouette", + "Solarization", + "Antique", + "Set Cb/Cr", + NULL + }; + static const char * const auto_n_preset_white_balance[] = { + "Manual", + "Auto", + "Incandescent", + "Fluorescent", + "Fluorescent H", + "Horizon", + "Daylight", + "Flash", + "Cloudy", + "Shade", + NULL, + }; + static const char * const camera_iso_sensitivity_auto[] = { + "Manual", + "Auto", + NULL + }; + static const char * const scene_mode[] = { + "None", + "Backlight", + "Beach/Snow", + "Candle Light", + "Dusk/Dawn", + "Fall Colors", + "Fireworks", + "Landscape", + "Night", + "Party/Indoor", + "Portrait", + "Sports", + "Sunset", + "Text", + NULL + }; + static const char * const tune_emphasis[] = { + "None", + "50 Microseconds", + "75 Microseconds", + NULL, + }; + static const char * const header_mode[] = { + "Separate Buffer", + "Joined With 1st Frame", + NULL, + }; + static const char * const multi_slice[] = { + "Single", + "Max Macroblocks", + "Max Bytes", + NULL, + }; + static const char * const entropy_mode[] = { + "CAVLC", + "CABAC", + NULL, + }; + static const char * const mpeg_h264_level[] = { + "1", + "1b", + "1.1", + "1.2", + "1.3", + "2", + "2.1", + "2.2", + "3", + "3.1", + "3.2", + "4", + "4.1", + "4.2", + "5", + "5.1", + "5.2", + "6.0", + "6.1", + "6.2", + NULL, + }; + static const char * const h264_loop_filter[] = { + "Enabled", + "Disabled", + "Disabled at Slice Boundary", + NULL, + }; + static const char * const h264_profile[] = { + "Baseline", + "Constrained Baseline", + "Main", + "Extended", + "High", + "High 10", + "High 422", + "High 444 Predictive", + "High 10 Intra", + "High 422 Intra", + "High 444 Intra", + "CAVLC 444 Intra", + "Scalable Baseline", + "Scalable High", + "Scalable High Intra", + "Stereo High", + "Multiview High", + "Constrained High", + NULL, + }; + static const char * const vui_sar_idc[] = { + "Unspecified", + "1:1", + "12:11", + "10:11", + "16:11", + "40:33", + "24:11", + "20:11", + "32:11", + "80:33", + "18:11", + "15:11", + "64:33", + "160:99", + "4:3", + "3:2", + "2:1", + "Extended SAR", + NULL, + }; + static const char * const h264_fp_arrangement_type[] = { + "Checkerboard", + "Column", + "Row", + "Side by Side", + "Top Bottom", + "Temporal", + NULL, + }; + static const char * const h264_fmo_map_type[] = { + "Interleaved Slices", + "Scattered Slices", + "Foreground with Leftover", + "Box Out", + "Raster Scan", + "Wipe Scan", + "Explicit", + NULL, + }; + static const char * const h264_decode_mode[] = { + "Slice-Based", + "Frame-Based", + NULL, + }; + static const char * const h264_start_code[] = { + "No Start Code", + "Annex B Start Code", + NULL, + }; + static const char * const h264_hierarchical_coding_type[] = { + "Hier Coding B", + "Hier Coding P", + NULL, + }; + static const char * const mpeg_mpeg2_level[] = { + "Low", + "Main", + "High 1440", + "High", + NULL, + }; + static const char * const mpeg2_profile[] = { + "Simple", + "Main", + "SNR Scalable", + "Spatially Scalable", + "High", + NULL, + }; + static const char * const mpeg_mpeg4_level[] = { + "0", + "0b", + "1", + "2", + "3", + "3b", + "4", + "5", + NULL, + }; + static const char * const mpeg4_profile[] = { + "Simple", + "Advanced Simple", + "Core", + "Simple Scalable", + "Advanced Coding Efficiency", + NULL, + }; + + static const char * const vpx_golden_frame_sel[] = { + "Use Previous Frame", + "Use Previous Specific Frame", + NULL, + }; + static const char * const vp8_profile[] = { + "0", + "1", + "2", + "3", + NULL, + }; + static const char * const vp9_profile[] = { + "0", + "1", + "2", + "3", + NULL, + }; + static const char * const vp9_level[] = { + "1", + "1.1", + "2", + "2.1", + "3", + "3.1", + "4", + "4.1", + "5", + "5.1", + "5.2", + "6", + "6.1", + "6.2", + NULL, + }; + + static const char * const flash_led_mode[] = { + "Off", + "Flash", + "Torch", + NULL, + }; + static const char * const flash_strobe_source[] = { + "Software", + "External", + NULL, + }; + + static const char * const jpeg_chroma_subsampling[] = { + "4:4:4", + "4:2:2", + "4:2:0", + "4:1:1", + "4:1:0", + "Gray", + NULL, + }; + static const char * const dv_tx_mode[] = { + "DVI-D", + "HDMI", + NULL, + }; + static const char * const dv_rgb_range[] = { + "Automatic", + "RGB Limited Range (16-235)", + "RGB Full Range (0-255)", + NULL, + }; + static const char * const dv_it_content_type[] = { + "Graphics", + "Photo", + "Cinema", + "Game", + "No IT Content", + NULL, + }; + static const char * const detect_md_mode[] = { + "Disabled", + "Global", + "Threshold Grid", + "Region Grid", + NULL, + }; + + static const char * const av1_profile[] = { + "Main", + "High", + "Professional", + NULL, + }; + static const char * const av1_level[] = { + "2.0", + "2.1", + "2.2", + "2.3", + "3.0", + "3.1", + "3.2", + "3.3", + "4.0", + "4.1", + "4.2", + "4.3", + "5.0", + "5.1", + "5.2", + "5.3", + "6.0", + "6.1", + "6.2", + "6.3", + "7.0", + "7.1", + "7.2", + "7.3", + NULL, + }; + + static const char * const hevc_profile[] = { + "Main", + "Main Still Picture", + "Main 10", + NULL, + }; + static const char * const hevc_level[] = { + "1", + "2", + "2.1", + "3", + "3.1", + "4", + "4.1", + "5", + "5.1", + "5.2", + "6", + "6.1", + "6.2", + NULL, + }; + static const char * const hevc_hierarchial_coding_type[] = { + "B", + "P", + NULL, + }; + static const char * const hevc_refresh_type[] = { + "None", + "CRA", + "IDR", + NULL, + }; + static const char * const hevc_size_of_length_field[] = { + "0", + "1", + "2", + "4", + NULL, + }; + static const char * const hevc_tier[] = { + "Main", + "High", + NULL, + }; + static const char * const hevc_loop_filter_mode[] = { + "Disabled", + "Enabled", + "Disabled at slice boundary", + "NULL", + }; + static const char * const hevc_decode_mode[] = { + "Slice-Based", + "Frame-Based", + NULL, + }; + static const char * const hevc_start_code[] = { + "No Start Code", + "Annex B Start Code", + NULL, + }; + static const char * const camera_orientation[] = { + "Front", + "Back", + "External", + NULL, + }; + static const char * const mpeg_video_frame_skip[] = { + "Disabled", + "Level Limit", + "VBV/CPB Limit", + NULL, + }; + static const char * const intra_refresh_period_type[] = { + "Random", + "Cyclic", + NULL, + }; + + switch (id) { + case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ: + return mpeg_audio_sampling_freq; + case V4L2_CID_MPEG_AUDIO_ENCODING: + return mpeg_audio_encoding; + case V4L2_CID_MPEG_AUDIO_L1_BITRATE: + return mpeg_audio_l1_bitrate; + case V4L2_CID_MPEG_AUDIO_L2_BITRATE: + return mpeg_audio_l2_bitrate; + case V4L2_CID_MPEG_AUDIO_L3_BITRATE: + return mpeg_audio_l3_bitrate; + case V4L2_CID_MPEG_AUDIO_AC3_BITRATE: + return mpeg_audio_ac3_bitrate; + case V4L2_CID_MPEG_AUDIO_MODE: + return mpeg_audio_mode; + case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION: + return mpeg_audio_mode_extension; + case V4L2_CID_MPEG_AUDIO_EMPHASIS: + return mpeg_audio_emphasis; + case V4L2_CID_MPEG_AUDIO_CRC: + return mpeg_audio_crc; + case V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK: + case V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK: + return mpeg_audio_dec_playback; + case V4L2_CID_MPEG_VIDEO_ENCODING: + return mpeg_video_encoding; + case V4L2_CID_MPEG_VIDEO_ASPECT: + return mpeg_video_aspect; + case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: + return mpeg_video_bitrate_mode; + case V4L2_CID_MPEG_STREAM_TYPE: + return mpeg_stream_type; + case V4L2_CID_MPEG_STREAM_VBI_FMT: + return mpeg_stream_vbi_fmt; + case V4L2_CID_POWER_LINE_FREQUENCY: + return camera_power_line_frequency; + case V4L2_CID_EXPOSURE_AUTO: + return camera_exposure_auto; + case V4L2_CID_EXPOSURE_METERING: + return camera_exposure_metering; + case V4L2_CID_AUTO_FOCUS_RANGE: + return camera_auto_focus_range; + case V4L2_CID_COLORFX: + return colorfx; + case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE: + return auto_n_preset_white_balance; + case V4L2_CID_ISO_SENSITIVITY_AUTO: + return camera_iso_sensitivity_auto; + case V4L2_CID_SCENE_MODE: + return scene_mode; + case V4L2_CID_TUNE_PREEMPHASIS: + return tune_emphasis; + case V4L2_CID_TUNE_DEEMPHASIS: + return tune_emphasis; + case V4L2_CID_FLASH_LED_MODE: + return flash_led_mode; + case V4L2_CID_FLASH_STROBE_SOURCE: + return flash_strobe_source; + case V4L2_CID_MPEG_VIDEO_HEADER_MODE: + return header_mode; + case V4L2_CID_MPEG_VIDEO_FRAME_SKIP_MODE: + return mpeg_video_frame_skip; + case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: + return multi_slice; + case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE: + return entropy_mode; + case V4L2_CID_MPEG_VIDEO_H264_LEVEL: + return mpeg_h264_level; + case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE: + return h264_loop_filter; + case V4L2_CID_MPEG_VIDEO_H264_PROFILE: + return h264_profile; + case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC: + return vui_sar_idc; + case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE: + return h264_fp_arrangement_type; + case V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE: + return h264_fmo_map_type; + case V4L2_CID_STATELESS_H264_DECODE_MODE: + return h264_decode_mode; + case V4L2_CID_STATELESS_H264_START_CODE: + return h264_start_code; + case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_TYPE: + return h264_hierarchical_coding_type; + case V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL: + return mpeg_mpeg2_level; + case V4L2_CID_MPEG_VIDEO_MPEG2_PROFILE: + return mpeg2_profile; + case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL: + return mpeg_mpeg4_level; + case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE: + return mpeg4_profile; + case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL: + return vpx_golden_frame_sel; + case V4L2_CID_MPEG_VIDEO_VP8_PROFILE: + return vp8_profile; + case V4L2_CID_MPEG_VIDEO_VP9_PROFILE: + return vp9_profile; + case V4L2_CID_MPEG_VIDEO_VP9_LEVEL: + return vp9_level; + case V4L2_CID_JPEG_CHROMA_SUBSAMPLING: + return jpeg_chroma_subsampling; + case V4L2_CID_DV_TX_MODE: + return dv_tx_mode; + case V4L2_CID_DV_TX_RGB_RANGE: + case V4L2_CID_DV_RX_RGB_RANGE: + return dv_rgb_range; + case V4L2_CID_DV_TX_IT_CONTENT_TYPE: + case V4L2_CID_DV_RX_IT_CONTENT_TYPE: + return dv_it_content_type; + case V4L2_CID_DETECT_MD_MODE: + return detect_md_mode; + case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE: + return hevc_profile; + case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL: + return hevc_level; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE: + return hevc_hierarchial_coding_type; + case V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_TYPE: + return hevc_refresh_type; + case V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD: + return hevc_size_of_length_field; + case V4L2_CID_MPEG_VIDEO_HEVC_TIER: + return hevc_tier; + case V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE: + return hevc_loop_filter_mode; + case V4L2_CID_MPEG_VIDEO_AV1_PROFILE: + return av1_profile; + case V4L2_CID_MPEG_VIDEO_AV1_LEVEL: + return av1_level; + case V4L2_CID_STATELESS_HEVC_DECODE_MODE: + return hevc_decode_mode; + case V4L2_CID_STATELESS_HEVC_START_CODE: + return hevc_start_code; + case V4L2_CID_CAMERA_ORIENTATION: + return camera_orientation; + case V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE: + return intra_refresh_period_type; + default: + return NULL; + } +} +EXPORT_SYMBOL(v4l2_ctrl_get_menu); + +#define __v4l2_qmenu_int_len(arr, len) ({ *(len) = ARRAY_SIZE(arr); (arr); }) +/* + * Returns NULL or an s64 type array containing the menu for given + * control ID. The total number of the menu items is returned in @len. + */ +const s64 *v4l2_ctrl_get_int_menu(u32 id, u32 *len) +{ + static const s64 qmenu_int_vpx_num_partitions[] = { + 1, 2, 4, 8, + }; + + static const s64 qmenu_int_vpx_num_ref_frames[] = { + 1, 2, 3, + }; + + switch (id) { + case V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS: + return __v4l2_qmenu_int_len(qmenu_int_vpx_num_partitions, len); + case V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES: + return __v4l2_qmenu_int_len(qmenu_int_vpx_num_ref_frames, len); + default: + *len = 0; + return NULL; + } +} +EXPORT_SYMBOL(v4l2_ctrl_get_int_menu); + +/* Return the control name. */ +const char *v4l2_ctrl_get_name(u32 id) +{ + switch (id) { + /* USER controls */ + /* Keep the order of the 'case's the same as in v4l2-controls.h! */ + case V4L2_CID_USER_CLASS: return "User Controls"; + case V4L2_CID_BRIGHTNESS: return "Brightness"; + case V4L2_CID_CONTRAST: return "Contrast"; + case V4L2_CID_SATURATION: return "Saturation"; + case V4L2_CID_HUE: return "Hue"; + case V4L2_CID_AUDIO_VOLUME: return "Volume"; + case V4L2_CID_AUDIO_BALANCE: return "Balance"; + case V4L2_CID_AUDIO_BASS: return "Bass"; + case V4L2_CID_AUDIO_TREBLE: return "Treble"; + case V4L2_CID_AUDIO_MUTE: return "Mute"; + case V4L2_CID_AUDIO_LOUDNESS: return "Loudness"; + case V4L2_CID_BLACK_LEVEL: return "Black Level"; + case V4L2_CID_AUTO_WHITE_BALANCE: return "White Balance, Automatic"; + case V4L2_CID_DO_WHITE_BALANCE: return "Do White Balance"; + case V4L2_CID_RED_BALANCE: return "Red Balance"; + case V4L2_CID_BLUE_BALANCE: return "Blue Balance"; + case V4L2_CID_GAMMA: return "Gamma"; + case V4L2_CID_EXPOSURE: return "Exposure"; + case V4L2_CID_AUTOGAIN: return "Gain, Automatic"; + case V4L2_CID_GAIN: return "Gain"; + case V4L2_CID_HFLIP: return "Horizontal Flip"; + case V4L2_CID_VFLIP: return "Vertical Flip"; + case V4L2_CID_POWER_LINE_FREQUENCY: return "Power Line Frequency"; + case V4L2_CID_HUE_AUTO: return "Hue, Automatic"; + case V4L2_CID_WHITE_BALANCE_TEMPERATURE: return "White Balance Temperature"; + case V4L2_CID_SHARPNESS: return "Sharpness"; + case V4L2_CID_BACKLIGHT_COMPENSATION: return "Backlight Compensation"; + case V4L2_CID_CHROMA_AGC: return "Chroma AGC"; + case V4L2_CID_COLOR_KILLER: return "Color Killer"; + case V4L2_CID_COLORFX: return "Color Effects"; + case V4L2_CID_AUTOBRIGHTNESS: return "Brightness, Automatic"; + case V4L2_CID_BAND_STOP_FILTER: return "Band-Stop Filter"; + case V4L2_CID_ROTATE: return "Rotate"; + case V4L2_CID_BG_COLOR: return "Background Color"; + case V4L2_CID_CHROMA_GAIN: return "Chroma Gain"; + case V4L2_CID_ILLUMINATORS_1: return "Illuminator 1"; + case V4L2_CID_ILLUMINATORS_2: return "Illuminator 2"; + case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE: return "Min Number of Capture Buffers"; + case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT: return "Min Number of Output Buffers"; + case V4L2_CID_ALPHA_COMPONENT: return "Alpha Component"; + case V4L2_CID_COLORFX_CBCR: return "Color Effects, CbCr"; + case V4L2_CID_COLORFX_RGB: return "Color Effects, RGB"; + + /* + * Codec controls + * + * The MPEG controls are applicable to all codec controls + * and the 'MPEG' part of the define is historical. + * + * Keep the order of the 'case's the same as in videodev2.h! + */ + case V4L2_CID_CODEC_CLASS: return "Codec Controls"; + case V4L2_CID_MPEG_STREAM_TYPE: return "Stream Type"; + case V4L2_CID_MPEG_STREAM_PID_PMT: return "Stream PMT Program ID"; + case V4L2_CID_MPEG_STREAM_PID_AUDIO: return "Stream Audio Program ID"; + case V4L2_CID_MPEG_STREAM_PID_VIDEO: return "Stream Video Program ID"; + case V4L2_CID_MPEG_STREAM_PID_PCR: return "Stream PCR Program ID"; + case V4L2_CID_MPEG_STREAM_PES_ID_AUDIO: return "Stream PES Audio ID"; + case V4L2_CID_MPEG_STREAM_PES_ID_VIDEO: return "Stream PES Video ID"; + case V4L2_CID_MPEG_STREAM_VBI_FMT: return "Stream VBI Format"; + case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ: return "Audio Sampling Frequency"; + case V4L2_CID_MPEG_AUDIO_ENCODING: return "Audio Encoding"; + case V4L2_CID_MPEG_AUDIO_L1_BITRATE: return "Audio Layer I Bitrate"; + case V4L2_CID_MPEG_AUDIO_L2_BITRATE: return "Audio Layer II Bitrate"; + case V4L2_CID_MPEG_AUDIO_L3_BITRATE: return "Audio Layer III Bitrate"; + case V4L2_CID_MPEG_AUDIO_MODE: return "Audio Stereo Mode"; + case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION: return "Audio Stereo Mode Extension"; + case V4L2_CID_MPEG_AUDIO_EMPHASIS: return "Audio Emphasis"; + case V4L2_CID_MPEG_AUDIO_CRC: return "Audio CRC"; + case V4L2_CID_MPEG_AUDIO_MUTE: return "Audio Mute"; + case V4L2_CID_MPEG_AUDIO_AAC_BITRATE: return "Audio AAC Bitrate"; + case V4L2_CID_MPEG_AUDIO_AC3_BITRATE: return "Audio AC-3 Bitrate"; + case V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK: return "Audio Playback"; + case V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK: return "Audio Multilingual Playback"; + case V4L2_CID_MPEG_VIDEO_ENCODING: return "Video Encoding"; + case V4L2_CID_MPEG_VIDEO_ASPECT: return "Video Aspect"; + case V4L2_CID_MPEG_VIDEO_B_FRAMES: return "Video B Frames"; + case V4L2_CID_MPEG_VIDEO_GOP_SIZE: return "Video GOP Size"; + case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE: return "Video GOP Closure"; + case V4L2_CID_MPEG_VIDEO_PULLDOWN: return "Video Pulldown"; + case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: return "Video Bitrate Mode"; + case V4L2_CID_MPEG_VIDEO_CONSTANT_QUALITY: return "Constant Quality"; + case V4L2_CID_MPEG_VIDEO_BITRATE: return "Video Bitrate"; + case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK: return "Video Peak Bitrate"; + case V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION: return "Video Temporal Decimation"; + case V4L2_CID_MPEG_VIDEO_MUTE: return "Video Mute"; + case V4L2_CID_MPEG_VIDEO_MUTE_YUV: return "Video Mute YUV"; + case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE: return "Decoder Slice Interface"; + case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER: return "MPEG4 Loop Filter Enable"; + case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB: return "Number of Intra Refresh MBs"; + case V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE: return "Intra Refresh Period Type"; + case V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD: return "Intra Refresh Period"; + case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE: return "Frame Level Rate Control Enable"; + case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE: return "H264 MB Level Rate Control"; + case V4L2_CID_MPEG_VIDEO_HEADER_MODE: return "Sequence Header Mode"; + case V4L2_CID_MPEG_VIDEO_MAX_REF_PIC: return "Max Number of Reference Pics"; + case V4L2_CID_MPEG_VIDEO_FRAME_SKIP_MODE: return "Frame Skip Mode"; + case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY: return "Display Delay"; + case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY_ENABLE: return "Display Delay Enable"; + case V4L2_CID_MPEG_VIDEO_AU_DELIMITER: return "Generate Access Unit Delimiters"; + case V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP: return "H263 I-Frame QP Value"; + case V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP: return "H263 P-Frame QP Value"; + case V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP: return "H263 B-Frame QP Value"; + case V4L2_CID_MPEG_VIDEO_H263_MIN_QP: return "H263 Minimum QP Value"; + case V4L2_CID_MPEG_VIDEO_H263_MAX_QP: return "H263 Maximum QP Value"; + case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP: return "H264 I-Frame QP Value"; + case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP: return "H264 P-Frame QP Value"; + case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP: return "H264 B-Frame QP Value"; + case V4L2_CID_MPEG_VIDEO_H264_MAX_QP: return "H264 Maximum QP Value"; + case V4L2_CID_MPEG_VIDEO_H264_MIN_QP: return "H264 Minimum QP Value"; + case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM: return "H264 8x8 Transform Enable"; + case V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE: return "H264 CPB Buffer Size"; + case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE: return "H264 Entropy Mode"; + case V4L2_CID_MPEG_VIDEO_H264_I_PERIOD: return "H264 I-Frame Period"; + case V4L2_CID_MPEG_VIDEO_H264_LEVEL: return "H264 Level"; + case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA: return "H264 Loop Filter Alpha Offset"; + case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA: return "H264 Loop Filter Beta Offset"; + case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE: return "H264 Loop Filter Mode"; + case V4L2_CID_MPEG_VIDEO_H264_PROFILE: return "H264 Profile"; + case V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_HEIGHT: return "Vertical Size of SAR"; + case V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH: return "Horizontal Size of SAR"; + case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE: return "Aspect Ratio VUI Enable"; + case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC: return "VUI Aspect Ratio IDC"; + case V4L2_CID_MPEG_VIDEO_H264_SEI_FRAME_PACKING: return "H264 Enable Frame Packing SEI"; + case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_CURRENT_FRAME_0: return "H264 Set Curr. Frame as Frame0"; + case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE: return "H264 FP Arrangement Type"; + case V4L2_CID_MPEG_VIDEO_H264_FMO: return "H264 Flexible MB Ordering"; + case V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE: return "H264 Map Type for FMO"; + case V4L2_CID_MPEG_VIDEO_H264_FMO_SLICE_GROUP: return "H264 FMO Number of Slice Groups"; + case V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_DIRECTION: return "H264 FMO Direction of Change"; + case V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_RATE: return "H264 FMO Size of 1st Slice Grp"; + case V4L2_CID_MPEG_VIDEO_H264_FMO_RUN_LENGTH: return "H264 FMO No. of Consecutive MBs"; + case V4L2_CID_MPEG_VIDEO_H264_ASO: return "H264 Arbitrary Slice Ordering"; + case V4L2_CID_MPEG_VIDEO_H264_ASO_SLICE_ORDER: return "H264 ASO Slice Order"; + case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING: return "Enable H264 Hierarchical Coding"; + case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_TYPE: return "H264 Hierarchical Coding Type"; + case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER:return "H264 Number of HC Layers"; + case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER_QP: + return "H264 Set QP Value for HC Layers"; + case V4L2_CID_MPEG_VIDEO_H264_CONSTRAINED_INTRA_PREDICTION: + return "H264 Constrained Intra Pred"; + case V4L2_CID_MPEG_VIDEO_H264_CHROMA_QP_INDEX_OFFSET: return "H264 Chroma QP Index Offset"; + case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MIN_QP: return "H264 I-Frame Minimum QP Value"; + case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MAX_QP: return "H264 I-Frame Maximum QP Value"; + case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MIN_QP: return "H264 P-Frame Minimum QP Value"; + case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MAX_QP: return "H264 P-Frame Maximum QP Value"; + case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MIN_QP: return "H264 B-Frame Minimum QP Value"; + case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MAX_QP: return "H264 B-Frame Maximum QP Value"; + case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L0_BR: return "H264 Hierarchical Lay 0 Bitrate"; + case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L1_BR: return "H264 Hierarchical Lay 1 Bitrate"; + case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L2_BR: return "H264 Hierarchical Lay 2 Bitrate"; + case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L3_BR: return "H264 Hierarchical Lay 3 Bitrate"; + case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L4_BR: return "H264 Hierarchical Lay 4 Bitrate"; + case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L5_BR: return "H264 Hierarchical Lay 5 Bitrate"; + case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L6_BR: return "H264 Hierarchical Lay 6 Bitrate"; + case V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL: return "MPEG2 Level"; + case V4L2_CID_MPEG_VIDEO_MPEG2_PROFILE: return "MPEG2 Profile"; + case V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP: return "MPEG4 I-Frame QP Value"; + case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP: return "MPEG4 P-Frame QP Value"; + case V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP: return "MPEG4 B-Frame QP Value"; + case V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP: return "MPEG4 Minimum QP Value"; + case V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP: return "MPEG4 Maximum QP Value"; + case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL: return "MPEG4 Level"; + case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE: return "MPEG4 Profile"; + case V4L2_CID_MPEG_VIDEO_MPEG4_QPEL: return "Quarter Pixel Search Enable"; + case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES: return "Maximum Bytes in a Slice"; + case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB: return "Number of MBs in a Slice"; + case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: return "Slice Partitioning Method"; + case V4L2_CID_MPEG_VIDEO_VBV_SIZE: return "VBV Buffer Size"; + case V4L2_CID_MPEG_VIDEO_DEC_PTS: return "Video Decoder PTS"; + case V4L2_CID_MPEG_VIDEO_DEC_FRAME: return "Video Decoder Frame Count"; + case V4L2_CID_MPEG_VIDEO_DEC_CONCEAL_COLOR: return "Video Decoder Conceal Color"; + case V4L2_CID_MPEG_VIDEO_VBV_DELAY: return "Initial Delay for VBV Control"; + case V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE: return "Horizontal MV Search Range"; + case V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE: return "Vertical MV Search Range"; + case V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER: return "Repeat Sequence Header"; + case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME: return "Force Key Frame"; + case V4L2_CID_MPEG_VIDEO_BASELAYER_PRIORITY_ID: return "Base Layer Priority ID"; + case V4L2_CID_MPEG_VIDEO_LTR_COUNT: return "LTR Count"; + case V4L2_CID_MPEG_VIDEO_FRAME_LTR_INDEX: return "Frame LTR Index"; + case V4L2_CID_MPEG_VIDEO_USE_LTR_FRAMES: return "Use LTR Frames"; + case V4L2_CID_MPEG_VIDEO_AVERAGE_QP: return "Average QP Value"; + case V4L2_CID_FWHT_I_FRAME_QP: return "FWHT I-Frame QP Value"; + case V4L2_CID_FWHT_P_FRAME_QP: return "FWHT P-Frame QP Value"; + + /* VPX controls */ + case V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS: return "VPX Number of Partitions"; + case V4L2_CID_MPEG_VIDEO_VPX_IMD_DISABLE_4X4: return "VPX Intra Mode Decision Disable"; + case V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES: return "VPX No. of Refs for P Frame"; + case V4L2_CID_MPEG_VIDEO_VPX_FILTER_LEVEL: return "VPX Loop Filter Level Range"; + case V4L2_CID_MPEG_VIDEO_VPX_FILTER_SHARPNESS: return "VPX Deblocking Effect Control"; + case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_REF_PERIOD: return "VPX Golden Frame Refresh Period"; + case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL: return "VPX Golden Frame Indicator"; + case V4L2_CID_MPEG_VIDEO_VPX_MIN_QP: return "VPX Minimum QP Value"; + case V4L2_CID_MPEG_VIDEO_VPX_MAX_QP: return "VPX Maximum QP Value"; + case V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP: return "VPX I-Frame QP Value"; + case V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP: return "VPX P-Frame QP Value"; + case V4L2_CID_MPEG_VIDEO_VP8_PROFILE: return "VP8 Profile"; + case V4L2_CID_MPEG_VIDEO_VP9_PROFILE: return "VP9 Profile"; + case V4L2_CID_MPEG_VIDEO_VP9_LEVEL: return "VP9 Level"; + + /* HEVC controls */ + case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP: return "HEVC I-Frame QP Value"; + case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP: return "HEVC P-Frame QP Value"; + case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP: return "HEVC B-Frame QP Value"; + case V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP: return "HEVC Minimum QP Value"; + case V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP: return "HEVC Maximum QP Value"; + case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MIN_QP: return "HEVC I-Frame Minimum QP Value"; + case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MAX_QP: return "HEVC I-Frame Maximum QP Value"; + case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MIN_QP: return "HEVC P-Frame Minimum QP Value"; + case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MAX_QP: return "HEVC P-Frame Maximum QP Value"; + case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MIN_QP: return "HEVC B-Frame Minimum QP Value"; + case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MAX_QP: return "HEVC B-Frame Maximum QP Value"; + case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE: return "HEVC Profile"; + case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL: return "HEVC Level"; + case V4L2_CID_MPEG_VIDEO_HEVC_TIER: return "HEVC Tier"; + case V4L2_CID_MPEG_VIDEO_HEVC_FRAME_RATE_RESOLUTION: return "HEVC Frame Rate Resolution"; + case V4L2_CID_MPEG_VIDEO_HEVC_MAX_PARTITION_DEPTH: return "HEVC Maximum Coding Unit Depth"; + case V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_TYPE: return "HEVC Refresh Type"; + case V4L2_CID_MPEG_VIDEO_HEVC_CONST_INTRA_PRED: return "HEVC Constant Intra Prediction"; + case V4L2_CID_MPEG_VIDEO_HEVC_LOSSLESS_CU: return "HEVC Lossless Encoding"; + case V4L2_CID_MPEG_VIDEO_HEVC_WAVEFRONT: return "HEVC Wavefront"; + case V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE: return "HEVC Loop Filter"; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_QP: return "HEVC QP Values"; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE: return "HEVC Hierarchical Coding Type"; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_LAYER: return "HEVC Hierarchical Coding Layer"; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_QP: return "HEVC Hierarchical Layer 0 QP"; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_QP: return "HEVC Hierarchical Layer 1 QP"; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_QP: return "HEVC Hierarchical Layer 2 QP"; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_QP: return "HEVC Hierarchical Layer 3 QP"; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_QP: return "HEVC Hierarchical Layer 4 QP"; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_QP: return "HEVC Hierarchical Layer 5 QP"; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L6_QP: return "HEVC Hierarchical Layer 6 QP"; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_BR: return "HEVC Hierarchical Lay 0 BitRate"; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_BR: return "HEVC Hierarchical Lay 1 BitRate"; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_BR: return "HEVC Hierarchical Lay 2 BitRate"; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_BR: return "HEVC Hierarchical Lay 3 BitRate"; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_BR: return "HEVC Hierarchical Lay 4 BitRate"; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_BR: return "HEVC Hierarchical Lay 5 BitRate"; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L6_BR: return "HEVC Hierarchical Lay 6 BitRate"; + case V4L2_CID_MPEG_VIDEO_HEVC_GENERAL_PB: return "HEVC General PB"; + case V4L2_CID_MPEG_VIDEO_HEVC_TEMPORAL_ID: return "HEVC Temporal ID"; + case V4L2_CID_MPEG_VIDEO_HEVC_STRONG_SMOOTHING: return "HEVC Strong Intra Smoothing"; + case V4L2_CID_MPEG_VIDEO_HEVC_INTRA_PU_SPLIT: return "HEVC Intra PU Split"; + case V4L2_CID_MPEG_VIDEO_HEVC_TMV_PREDICTION: return "HEVC TMV Prediction"; + case V4L2_CID_MPEG_VIDEO_HEVC_MAX_NUM_MERGE_MV_MINUS1: return "HEVC Max Num of Candidate MVs"; + case V4L2_CID_MPEG_VIDEO_HEVC_WITHOUT_STARTCODE: return "HEVC ENC Without Startcode"; + case V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_PERIOD: return "HEVC Num of I-Frame b/w 2 IDR"; + case V4L2_CID_MPEG_VIDEO_HEVC_LF_BETA_OFFSET_DIV2: return "HEVC Loop Filter Beta Offset"; + case V4L2_CID_MPEG_VIDEO_HEVC_LF_TC_OFFSET_DIV2: return "HEVC Loop Filter TC Offset"; + case V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD: return "HEVC Size of Length Field"; + case V4L2_CID_MPEG_VIDEO_REF_NUMBER_FOR_PFRAMES: return "Reference Frames for a P-Frame"; + case V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR: return "Prepend SPS and PPS to IDR"; + + /* AV1 controls */ + case V4L2_CID_MPEG_VIDEO_AV1_PROFILE: return "AV1 Profile"; + case V4L2_CID_MPEG_VIDEO_AV1_LEVEL: return "AV1 Level"; + + /* CAMERA controls */ + /* Keep the order of the 'case's the same as in v4l2-controls.h! */ + case V4L2_CID_CAMERA_CLASS: return "Camera Controls"; + case V4L2_CID_EXPOSURE_AUTO: return "Auto Exposure"; + case V4L2_CID_EXPOSURE_ABSOLUTE: return "Exposure Time, Absolute"; + case V4L2_CID_EXPOSURE_AUTO_PRIORITY: return "Exposure, Dynamic Framerate"; + case V4L2_CID_PAN_RELATIVE: return "Pan, Relative"; + case V4L2_CID_TILT_RELATIVE: return "Tilt, Relative"; + case V4L2_CID_PAN_RESET: return "Pan, Reset"; + case V4L2_CID_TILT_RESET: return "Tilt, Reset"; + case V4L2_CID_PAN_ABSOLUTE: return "Pan, Absolute"; + case V4L2_CID_TILT_ABSOLUTE: return "Tilt, Absolute"; + case V4L2_CID_FOCUS_ABSOLUTE: return "Focus, Absolute"; + case V4L2_CID_FOCUS_RELATIVE: return "Focus, Relative"; + case V4L2_CID_FOCUS_AUTO: return "Focus, Automatic Continuous"; + case V4L2_CID_ZOOM_ABSOLUTE: return "Zoom, Absolute"; + case V4L2_CID_ZOOM_RELATIVE: return "Zoom, Relative"; + case V4L2_CID_ZOOM_CONTINUOUS: return "Zoom, Continuous"; + case V4L2_CID_PRIVACY: return "Privacy"; + case V4L2_CID_IRIS_ABSOLUTE: return "Iris, Absolute"; + case V4L2_CID_IRIS_RELATIVE: return "Iris, Relative"; + case V4L2_CID_AUTO_EXPOSURE_BIAS: return "Auto Exposure, Bias"; + case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE: return "White Balance, Auto & Preset"; + case V4L2_CID_WIDE_DYNAMIC_RANGE: return "Wide Dynamic Range"; + case V4L2_CID_IMAGE_STABILIZATION: return "Image Stabilization"; + case V4L2_CID_ISO_SENSITIVITY: return "ISO Sensitivity"; + case V4L2_CID_ISO_SENSITIVITY_AUTO: return "ISO Sensitivity, Auto"; + case V4L2_CID_EXPOSURE_METERING: return "Exposure, Metering Mode"; + case V4L2_CID_SCENE_MODE: return "Scene Mode"; + case V4L2_CID_3A_LOCK: return "3A Lock"; + case V4L2_CID_AUTO_FOCUS_START: return "Auto Focus, Start"; + case V4L2_CID_AUTO_FOCUS_STOP: return "Auto Focus, Stop"; + case V4L2_CID_AUTO_FOCUS_STATUS: return "Auto Focus, Status"; + case V4L2_CID_AUTO_FOCUS_RANGE: return "Auto Focus, Range"; + case V4L2_CID_PAN_SPEED: return "Pan, Speed"; + case V4L2_CID_TILT_SPEED: return "Tilt, Speed"; + case V4L2_CID_UNIT_CELL_SIZE: return "Unit Cell Size"; + case V4L2_CID_CAMERA_ORIENTATION: return "Camera Orientation"; + case V4L2_CID_CAMERA_SENSOR_ROTATION: return "Camera Sensor Rotation"; + case V4L2_CID_HDR_SENSOR_MODE: return "HDR Sensor Mode"; + + /* FM Radio Modulator controls */ + /* Keep the order of the 'case's the same as in v4l2-controls.h! */ + case V4L2_CID_FM_TX_CLASS: return "FM Radio Modulator Controls"; + case V4L2_CID_RDS_TX_DEVIATION: return "RDS Signal Deviation"; + case V4L2_CID_RDS_TX_PI: return "RDS Program ID"; + case V4L2_CID_RDS_TX_PTY: return "RDS Program Type"; + case V4L2_CID_RDS_TX_PS_NAME: return "RDS PS Name"; + case V4L2_CID_RDS_TX_RADIO_TEXT: return "RDS Radio Text"; + case V4L2_CID_RDS_TX_MONO_STEREO: return "RDS Stereo"; + case V4L2_CID_RDS_TX_ARTIFICIAL_HEAD: return "RDS Artificial Head"; + case V4L2_CID_RDS_TX_COMPRESSED: return "RDS Compressed"; + case V4L2_CID_RDS_TX_DYNAMIC_PTY: return "RDS Dynamic PTY"; + case V4L2_CID_RDS_TX_TRAFFIC_ANNOUNCEMENT: return "RDS Traffic Announcement"; + case V4L2_CID_RDS_TX_TRAFFIC_PROGRAM: return "RDS Traffic Program"; + case V4L2_CID_RDS_TX_MUSIC_SPEECH: return "RDS Music"; + case V4L2_CID_RDS_TX_ALT_FREQS_ENABLE: return "RDS Enable Alt Frequencies"; + case V4L2_CID_RDS_TX_ALT_FREQS: return "RDS Alternate Frequencies"; + case V4L2_CID_AUDIO_LIMITER_ENABLED: return "Audio Limiter Feature Enabled"; + case V4L2_CID_AUDIO_LIMITER_RELEASE_TIME: return "Audio Limiter Release Time"; + case V4L2_CID_AUDIO_LIMITER_DEVIATION: return "Audio Limiter Deviation"; + case V4L2_CID_AUDIO_COMPRESSION_ENABLED: return "Audio Compression Enabled"; + case V4L2_CID_AUDIO_COMPRESSION_GAIN: return "Audio Compression Gain"; + case V4L2_CID_AUDIO_COMPRESSION_THRESHOLD: return "Audio Compression Threshold"; + case V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME: return "Audio Compression Attack Time"; + case V4L2_CID_AUDIO_COMPRESSION_RELEASE_TIME: return "Audio Compression Release Time"; + case V4L2_CID_PILOT_TONE_ENABLED: return "Pilot Tone Feature Enabled"; + case V4L2_CID_PILOT_TONE_DEVIATION: return "Pilot Tone Deviation"; + case V4L2_CID_PILOT_TONE_FREQUENCY: return "Pilot Tone Frequency"; + case V4L2_CID_TUNE_PREEMPHASIS: return "Pre-Emphasis"; + case V4L2_CID_TUNE_POWER_LEVEL: return "Tune Power Level"; + case V4L2_CID_TUNE_ANTENNA_CAPACITOR: return "Tune Antenna Capacitor"; + + /* Flash controls */ + /* Keep the order of the 'case's the same as in v4l2-controls.h! */ + case V4L2_CID_FLASH_CLASS: return "Flash Controls"; + case V4L2_CID_FLASH_LED_MODE: return "LED Mode"; + case V4L2_CID_FLASH_STROBE_SOURCE: return "Strobe Source"; + case V4L2_CID_FLASH_STROBE: return "Strobe"; + case V4L2_CID_FLASH_STROBE_STOP: return "Stop Strobe"; + case V4L2_CID_FLASH_STROBE_STATUS: return "Strobe Status"; + case V4L2_CID_FLASH_TIMEOUT: return "Strobe Timeout"; + case V4L2_CID_FLASH_INTENSITY: return "Intensity, Flash Mode"; + case V4L2_CID_FLASH_TORCH_INTENSITY: return "Intensity, Torch Mode"; + case V4L2_CID_FLASH_INDICATOR_INTENSITY: return "Intensity, Indicator"; + case V4L2_CID_FLASH_FAULT: return "Faults"; + case V4L2_CID_FLASH_CHARGE: return "Charge"; + case V4L2_CID_FLASH_READY: return "Ready to Strobe"; + + /* JPEG encoder controls */ + /* Keep the order of the 'case's the same as in v4l2-controls.h! */ + case V4L2_CID_JPEG_CLASS: return "JPEG Compression Controls"; + case V4L2_CID_JPEG_CHROMA_SUBSAMPLING: return "Chroma Subsampling"; + case V4L2_CID_JPEG_RESTART_INTERVAL: return "Restart Interval"; + case V4L2_CID_JPEG_COMPRESSION_QUALITY: return "Compression Quality"; + case V4L2_CID_JPEG_ACTIVE_MARKER: return "Active Markers"; + + /* Image source controls */ + /* Keep the order of the 'case's the same as in v4l2-controls.h! */ + case V4L2_CID_IMAGE_SOURCE_CLASS: return "Image Source Controls"; + case V4L2_CID_VBLANK: return "Vertical Blanking"; + case V4L2_CID_HBLANK: return "Horizontal Blanking"; + case V4L2_CID_ANALOGUE_GAIN: return "Analogue Gain"; + case V4L2_CID_TEST_PATTERN_RED: return "Red Pixel Value"; + case V4L2_CID_TEST_PATTERN_GREENR: return "Green (Red) Pixel Value"; + case V4L2_CID_TEST_PATTERN_BLUE: return "Blue Pixel Value"; + case V4L2_CID_TEST_PATTERN_GREENB: return "Green (Blue) Pixel Value"; + case V4L2_CID_NOTIFY_GAINS: return "Notify Gains"; + + /* Image processing controls */ + /* Keep the order of the 'case's the same as in v4l2-controls.h! */ + case V4L2_CID_IMAGE_PROC_CLASS: return "Image Processing Controls"; + case V4L2_CID_LINK_FREQ: return "Link Frequency"; + case V4L2_CID_PIXEL_RATE: return "Pixel Rate"; + case V4L2_CID_TEST_PATTERN: return "Test Pattern"; + case V4L2_CID_DEINTERLACING_MODE: return "Deinterlacing Mode"; + case V4L2_CID_DIGITAL_GAIN: return "Digital Gain"; + + /* DV controls */ + /* Keep the order of the 'case's the same as in v4l2-controls.h! */ + case V4L2_CID_DV_CLASS: return "Digital Video Controls"; + case V4L2_CID_DV_TX_HOTPLUG: return "Hotplug Present"; + case V4L2_CID_DV_TX_RXSENSE: return "RxSense Present"; + case V4L2_CID_DV_TX_EDID_PRESENT: return "EDID Present"; + case V4L2_CID_DV_TX_MODE: return "Transmit Mode"; + case V4L2_CID_DV_TX_RGB_RANGE: return "Tx RGB Quantization Range"; + case V4L2_CID_DV_TX_IT_CONTENT_TYPE: return "Tx IT Content Type"; + case V4L2_CID_DV_RX_POWER_PRESENT: return "Power Present"; + case V4L2_CID_DV_RX_RGB_RANGE: return "Rx RGB Quantization Range"; + case V4L2_CID_DV_RX_IT_CONTENT_TYPE: return "Rx IT Content Type"; + + case V4L2_CID_FM_RX_CLASS: return "FM Radio Receiver Controls"; + case V4L2_CID_TUNE_DEEMPHASIS: return "De-Emphasis"; + case V4L2_CID_RDS_RECEPTION: return "RDS Reception"; + case V4L2_CID_RF_TUNER_CLASS: return "RF Tuner Controls"; + case V4L2_CID_RF_TUNER_RF_GAIN: return "RF Gain"; + case V4L2_CID_RF_TUNER_LNA_GAIN_AUTO: return "LNA Gain, Auto"; + case V4L2_CID_RF_TUNER_LNA_GAIN: return "LNA Gain"; + case V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO: return "Mixer Gain, Auto"; + case V4L2_CID_RF_TUNER_MIXER_GAIN: return "Mixer Gain"; + case V4L2_CID_RF_TUNER_IF_GAIN_AUTO: return "IF Gain, Auto"; + case V4L2_CID_RF_TUNER_IF_GAIN: return "IF Gain"; + case V4L2_CID_RF_TUNER_BANDWIDTH_AUTO: return "Bandwidth, Auto"; + case V4L2_CID_RF_TUNER_BANDWIDTH: return "Bandwidth"; + case V4L2_CID_RF_TUNER_PLL_LOCK: return "PLL Lock"; + case V4L2_CID_RDS_RX_PTY: return "RDS Program Type"; + case V4L2_CID_RDS_RX_PS_NAME: return "RDS PS Name"; + case V4L2_CID_RDS_RX_RADIO_TEXT: return "RDS Radio Text"; + case V4L2_CID_RDS_RX_TRAFFIC_ANNOUNCEMENT: return "RDS Traffic Announcement"; + case V4L2_CID_RDS_RX_TRAFFIC_PROGRAM: return "RDS Traffic Program"; + case V4L2_CID_RDS_RX_MUSIC_SPEECH: return "RDS Music"; + + /* Detection controls */ + /* Keep the order of the 'case's the same as in v4l2-controls.h! */ + case V4L2_CID_DETECT_CLASS: return "Detection Controls"; + case V4L2_CID_DETECT_MD_MODE: return "Motion Detection Mode"; + case V4L2_CID_DETECT_MD_GLOBAL_THRESHOLD: return "MD Global Threshold"; + case V4L2_CID_DETECT_MD_THRESHOLD_GRID: return "MD Threshold Grid"; + case V4L2_CID_DETECT_MD_REGION_GRID: return "MD Region Grid"; + + /* Stateless Codec controls */ + /* Keep the order of the 'case's the same as in v4l2-controls.h! */ + case V4L2_CID_CODEC_STATELESS_CLASS: return "Stateless Codec Controls"; + case V4L2_CID_STATELESS_H264_DECODE_MODE: return "H264 Decode Mode"; + case V4L2_CID_STATELESS_H264_START_CODE: return "H264 Start Code"; + case V4L2_CID_STATELESS_H264_SPS: return "H264 Sequence Parameter Set"; + case V4L2_CID_STATELESS_H264_PPS: return "H264 Picture Parameter Set"; + case V4L2_CID_STATELESS_H264_SCALING_MATRIX: return "H264 Scaling Matrix"; + case V4L2_CID_STATELESS_H264_PRED_WEIGHTS: return "H264 Prediction Weight Table"; + case V4L2_CID_STATELESS_H264_SLICE_PARAMS: return "H264 Slice Parameters"; + case V4L2_CID_STATELESS_H264_DECODE_PARAMS: return "H264 Decode Parameters"; + case V4L2_CID_STATELESS_FWHT_PARAMS: return "FWHT Stateless Parameters"; + case V4L2_CID_STATELESS_VP8_FRAME: return "VP8 Frame Parameters"; + case V4L2_CID_STATELESS_MPEG2_SEQUENCE: return "MPEG-2 Sequence Header"; + case V4L2_CID_STATELESS_MPEG2_PICTURE: return "MPEG-2 Picture Header"; + case V4L2_CID_STATELESS_MPEG2_QUANTISATION: return "MPEG-2 Quantisation Matrices"; + case V4L2_CID_STATELESS_VP9_COMPRESSED_HDR: return "VP9 Probabilities Updates"; + case V4L2_CID_STATELESS_VP9_FRAME: return "VP9 Frame Decode Parameters"; + case V4L2_CID_STATELESS_HEVC_SPS: return "HEVC Sequence Parameter Set"; + case V4L2_CID_STATELESS_HEVC_PPS: return "HEVC Picture Parameter Set"; + case V4L2_CID_STATELESS_HEVC_SLICE_PARAMS: return "HEVC Slice Parameters"; + case V4L2_CID_STATELESS_HEVC_SCALING_MATRIX: return "HEVC Scaling Matrix"; + case V4L2_CID_STATELESS_HEVC_DECODE_PARAMS: return "HEVC Decode Parameters"; + case V4L2_CID_STATELESS_HEVC_DECODE_MODE: return "HEVC Decode Mode"; + case V4L2_CID_STATELESS_HEVC_START_CODE: return "HEVC Start Code"; + case V4L2_CID_STATELESS_HEVC_ENTRY_POINT_OFFSETS: return "HEVC Entry Point Offsets"; + case V4L2_CID_STATELESS_AV1_SEQUENCE: return "AV1 Sequence Parameters"; + case V4L2_CID_STATELESS_AV1_TILE_GROUP_ENTRY: return "AV1 Tile Group Entry"; + case V4L2_CID_STATELESS_AV1_FRAME: return "AV1 Frame Parameters"; + case V4L2_CID_STATELESS_AV1_FILM_GRAIN: return "AV1 Film Grain"; + + /* Colorimetry controls */ + /* Keep the order of the 'case's the same as in v4l2-controls.h! */ + case V4L2_CID_COLORIMETRY_CLASS: return "Colorimetry Controls"; + case V4L2_CID_COLORIMETRY_HDR10_CLL_INFO: return "HDR10 Content Light Info"; + case V4L2_CID_COLORIMETRY_HDR10_MASTERING_DISPLAY: return "HDR10 Mastering Display"; + default: + return NULL; + } +} +EXPORT_SYMBOL(v4l2_ctrl_get_name); + +void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type, + s64 *min, s64 *max, u64 *step, s64 *def, u32 *flags) +{ + *name = v4l2_ctrl_get_name(id); + *flags = 0; + + switch (id) { + case V4L2_CID_AUDIO_MUTE: + case V4L2_CID_AUDIO_LOUDNESS: + case V4L2_CID_AUTO_WHITE_BALANCE: + case V4L2_CID_AUTOGAIN: + case V4L2_CID_HFLIP: + case V4L2_CID_VFLIP: + case V4L2_CID_HUE_AUTO: + case V4L2_CID_CHROMA_AGC: + case V4L2_CID_COLOR_KILLER: + case V4L2_CID_AUTOBRIGHTNESS: + case V4L2_CID_MPEG_AUDIO_MUTE: + case V4L2_CID_MPEG_VIDEO_MUTE: + case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE: + case V4L2_CID_MPEG_VIDEO_PULLDOWN: + case V4L2_CID_EXPOSURE_AUTO_PRIORITY: + case V4L2_CID_FOCUS_AUTO: + case V4L2_CID_PRIVACY: + case V4L2_CID_AUDIO_LIMITER_ENABLED: + case V4L2_CID_AUDIO_COMPRESSION_ENABLED: + case V4L2_CID_PILOT_TONE_ENABLED: + case V4L2_CID_ILLUMINATORS_1: + case V4L2_CID_ILLUMINATORS_2: + case V4L2_CID_FLASH_STROBE_STATUS: + case V4L2_CID_FLASH_CHARGE: + case V4L2_CID_FLASH_READY: + case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER: + case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE: + case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY_ENABLE: + case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE: + case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE: + case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM: + case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE: + case V4L2_CID_MPEG_VIDEO_MPEG4_QPEL: + case V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER: + case V4L2_CID_MPEG_VIDEO_AU_DELIMITER: + case V4L2_CID_WIDE_DYNAMIC_RANGE: + case V4L2_CID_IMAGE_STABILIZATION: + case V4L2_CID_RDS_RECEPTION: + case V4L2_CID_RF_TUNER_LNA_GAIN_AUTO: + case V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO: + case V4L2_CID_RF_TUNER_IF_GAIN_AUTO: + case V4L2_CID_RF_TUNER_BANDWIDTH_AUTO: + case V4L2_CID_RF_TUNER_PLL_LOCK: + case V4L2_CID_RDS_TX_MONO_STEREO: + case V4L2_CID_RDS_TX_ARTIFICIAL_HEAD: + case V4L2_CID_RDS_TX_COMPRESSED: + case V4L2_CID_RDS_TX_DYNAMIC_PTY: + case V4L2_CID_RDS_TX_TRAFFIC_ANNOUNCEMENT: + case V4L2_CID_RDS_TX_TRAFFIC_PROGRAM: + case V4L2_CID_RDS_TX_MUSIC_SPEECH: + case V4L2_CID_RDS_TX_ALT_FREQS_ENABLE: + case V4L2_CID_RDS_RX_TRAFFIC_ANNOUNCEMENT: + case V4L2_CID_RDS_RX_TRAFFIC_PROGRAM: + case V4L2_CID_RDS_RX_MUSIC_SPEECH: + *type = V4L2_CTRL_TYPE_BOOLEAN; + *min = 0; + *max = *step = 1; + break; + case V4L2_CID_ROTATE: + *type = V4L2_CTRL_TYPE_INTEGER; + *flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT; + break; + case V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE: + case V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE: + case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY: + case V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD: + *type = V4L2_CTRL_TYPE_INTEGER; + break; + case V4L2_CID_MPEG_VIDEO_LTR_COUNT: + *type = V4L2_CTRL_TYPE_INTEGER; + break; + case V4L2_CID_MPEG_VIDEO_FRAME_LTR_INDEX: + *type = V4L2_CTRL_TYPE_INTEGER; + *flags |= V4L2_CTRL_FLAG_EXECUTE_ON_WRITE; + break; + case V4L2_CID_MPEG_VIDEO_USE_LTR_FRAMES: + *type = V4L2_CTRL_TYPE_BITMASK; + *flags |= V4L2_CTRL_FLAG_EXECUTE_ON_WRITE; + break; + case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME: + case V4L2_CID_PAN_RESET: + case V4L2_CID_TILT_RESET: + case V4L2_CID_FLASH_STROBE: + case V4L2_CID_FLASH_STROBE_STOP: + case V4L2_CID_AUTO_FOCUS_START: + case V4L2_CID_AUTO_FOCUS_STOP: + case V4L2_CID_DO_WHITE_BALANCE: + *type = V4L2_CTRL_TYPE_BUTTON; + *flags |= V4L2_CTRL_FLAG_WRITE_ONLY | + V4L2_CTRL_FLAG_EXECUTE_ON_WRITE; + *min = *max = *step = *def = 0; + break; + case V4L2_CID_POWER_LINE_FREQUENCY: + case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ: + case V4L2_CID_MPEG_AUDIO_ENCODING: + case V4L2_CID_MPEG_AUDIO_L1_BITRATE: + case V4L2_CID_MPEG_AUDIO_L2_BITRATE: + case V4L2_CID_MPEG_AUDIO_L3_BITRATE: + case V4L2_CID_MPEG_AUDIO_AC3_BITRATE: + case V4L2_CID_MPEG_AUDIO_MODE: + case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION: + case V4L2_CID_MPEG_AUDIO_EMPHASIS: + case V4L2_CID_MPEG_AUDIO_CRC: + case V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK: + case V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK: + case V4L2_CID_MPEG_VIDEO_ENCODING: + case V4L2_CID_MPEG_VIDEO_ASPECT: + case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: + case V4L2_CID_MPEG_STREAM_TYPE: + case V4L2_CID_MPEG_STREAM_VBI_FMT: + case V4L2_CID_EXPOSURE_AUTO: + case V4L2_CID_AUTO_FOCUS_RANGE: + case V4L2_CID_COLORFX: + case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE: + case V4L2_CID_TUNE_PREEMPHASIS: + case V4L2_CID_FLASH_LED_MODE: + case V4L2_CID_FLASH_STROBE_SOURCE: + case V4L2_CID_MPEG_VIDEO_HEADER_MODE: + case V4L2_CID_MPEG_VIDEO_FRAME_SKIP_MODE: + case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: + case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE: + case V4L2_CID_MPEG_VIDEO_H264_LEVEL: + case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE: + case V4L2_CID_MPEG_VIDEO_H264_PROFILE: + case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC: + case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE: + case V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE: + case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_TYPE: + case V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL: + case V4L2_CID_MPEG_VIDEO_MPEG2_PROFILE: + case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL: + case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE: + case V4L2_CID_JPEG_CHROMA_SUBSAMPLING: + case V4L2_CID_ISO_SENSITIVITY_AUTO: + case V4L2_CID_EXPOSURE_METERING: + case V4L2_CID_SCENE_MODE: + case V4L2_CID_DV_TX_MODE: + case V4L2_CID_DV_TX_RGB_RANGE: + case V4L2_CID_DV_TX_IT_CONTENT_TYPE: + case V4L2_CID_DV_RX_RGB_RANGE: + case V4L2_CID_DV_RX_IT_CONTENT_TYPE: + case V4L2_CID_TEST_PATTERN: + case V4L2_CID_DEINTERLACING_MODE: + case V4L2_CID_TUNE_DEEMPHASIS: + case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL: + case V4L2_CID_MPEG_VIDEO_VP8_PROFILE: + case V4L2_CID_MPEG_VIDEO_VP9_PROFILE: + case V4L2_CID_MPEG_VIDEO_VP9_LEVEL: + case V4L2_CID_DETECT_MD_MODE: + case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE: + case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL: + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE: + case V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_TYPE: + case V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD: + case V4L2_CID_MPEG_VIDEO_HEVC_TIER: + case V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE: + case V4L2_CID_MPEG_VIDEO_AV1_PROFILE: + case V4L2_CID_MPEG_VIDEO_AV1_LEVEL: + case V4L2_CID_STATELESS_HEVC_DECODE_MODE: + case V4L2_CID_STATELESS_HEVC_START_CODE: + case V4L2_CID_STATELESS_H264_DECODE_MODE: + case V4L2_CID_STATELESS_H264_START_CODE: + case V4L2_CID_CAMERA_ORIENTATION: + case V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE: + case V4L2_CID_HDR_SENSOR_MODE: + *type = V4L2_CTRL_TYPE_MENU; + break; + case V4L2_CID_LINK_FREQ: + *type = V4L2_CTRL_TYPE_INTEGER_MENU; + break; + case V4L2_CID_RDS_TX_PS_NAME: + case V4L2_CID_RDS_TX_RADIO_TEXT: + case V4L2_CID_RDS_RX_PS_NAME: + case V4L2_CID_RDS_RX_RADIO_TEXT: + *type = V4L2_CTRL_TYPE_STRING; + break; + case V4L2_CID_ISO_SENSITIVITY: + case V4L2_CID_AUTO_EXPOSURE_BIAS: + case V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS: + case V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES: + *type = V4L2_CTRL_TYPE_INTEGER_MENU; + break; + case V4L2_CID_USER_CLASS: + case V4L2_CID_CAMERA_CLASS: + case V4L2_CID_CODEC_CLASS: + case V4L2_CID_FM_TX_CLASS: + case V4L2_CID_FLASH_CLASS: + case V4L2_CID_JPEG_CLASS: + case V4L2_CID_IMAGE_SOURCE_CLASS: + case V4L2_CID_IMAGE_PROC_CLASS: + case V4L2_CID_DV_CLASS: + case V4L2_CID_FM_RX_CLASS: + case V4L2_CID_RF_TUNER_CLASS: + case V4L2_CID_DETECT_CLASS: + case V4L2_CID_CODEC_STATELESS_CLASS: + case V4L2_CID_COLORIMETRY_CLASS: + *type = V4L2_CTRL_TYPE_CTRL_CLASS; + /* You can neither read nor write these */ + *flags |= V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_WRITE_ONLY; + *min = *max = *step = *def = 0; + break; + case V4L2_CID_BG_COLOR: + case V4L2_CID_COLORFX_RGB: + *type = V4L2_CTRL_TYPE_INTEGER; + *step = 1; + *min = 0; + /* Max is calculated as RGB888 that is 2^24 - 1 */ + *max = 0xffffff; + break; + case V4L2_CID_COLORFX_CBCR: + *type = V4L2_CTRL_TYPE_INTEGER; + *step = 1; + *min = 0; + *max = 0xffff; + break; + case V4L2_CID_FLASH_FAULT: + case V4L2_CID_JPEG_ACTIVE_MARKER: + case V4L2_CID_3A_LOCK: + case V4L2_CID_AUTO_FOCUS_STATUS: + case V4L2_CID_DV_TX_HOTPLUG: + case V4L2_CID_DV_TX_RXSENSE: + case V4L2_CID_DV_TX_EDID_PRESENT: + case V4L2_CID_DV_RX_POWER_PRESENT: + *type = V4L2_CTRL_TYPE_BITMASK; + break; + case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE: + case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT: + *type = V4L2_CTRL_TYPE_INTEGER; + *flags |= V4L2_CTRL_FLAG_READ_ONLY; + break; + case V4L2_CID_MPEG_VIDEO_DEC_PTS: + *type = V4L2_CTRL_TYPE_INTEGER64; + *flags |= V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY; + *min = *def = 0; + *max = 0x1ffffffffLL; + *step = 1; + break; + case V4L2_CID_MPEG_VIDEO_DEC_FRAME: + *type = V4L2_CTRL_TYPE_INTEGER64; + *flags |= V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY; + *min = *def = 0; + *max = 0x7fffffffffffffffLL; + *step = 1; + break; + case V4L2_CID_MPEG_VIDEO_DEC_CONCEAL_COLOR: + *type = V4L2_CTRL_TYPE_INTEGER64; + *min = 0; + /* default for 8 bit black, luma is 16, chroma is 128 */ + *def = 0x8000800010LL; + *max = 0xffffffffffffLL; + *step = 1; + break; + case V4L2_CID_MPEG_VIDEO_AVERAGE_QP: + *type = V4L2_CTRL_TYPE_INTEGER; + *flags |= V4L2_CTRL_FLAG_READ_ONLY; + break; + case V4L2_CID_PIXEL_RATE: + *type = V4L2_CTRL_TYPE_INTEGER64; + *flags |= V4L2_CTRL_FLAG_READ_ONLY; + break; + case V4L2_CID_DETECT_MD_REGION_GRID: + *type = V4L2_CTRL_TYPE_U8; + break; + case V4L2_CID_DETECT_MD_THRESHOLD_GRID: + *type = V4L2_CTRL_TYPE_U16; + break; + case V4L2_CID_RDS_TX_ALT_FREQS: + *type = V4L2_CTRL_TYPE_U32; + break; + case V4L2_CID_STATELESS_MPEG2_SEQUENCE: + *type = V4L2_CTRL_TYPE_MPEG2_SEQUENCE; + break; + case V4L2_CID_STATELESS_MPEG2_PICTURE: + *type = V4L2_CTRL_TYPE_MPEG2_PICTURE; + break; + case V4L2_CID_STATELESS_MPEG2_QUANTISATION: + *type = V4L2_CTRL_TYPE_MPEG2_QUANTISATION; + break; + case V4L2_CID_STATELESS_FWHT_PARAMS: + *type = V4L2_CTRL_TYPE_FWHT_PARAMS; + break; + case V4L2_CID_STATELESS_H264_SPS: + *type = V4L2_CTRL_TYPE_H264_SPS; + break; + case V4L2_CID_STATELESS_H264_PPS: + *type = V4L2_CTRL_TYPE_H264_PPS; + break; + case V4L2_CID_STATELESS_H264_SCALING_MATRIX: + *type = V4L2_CTRL_TYPE_H264_SCALING_MATRIX; + break; + case V4L2_CID_STATELESS_H264_SLICE_PARAMS: + *type = V4L2_CTRL_TYPE_H264_SLICE_PARAMS; + break; + case V4L2_CID_STATELESS_H264_DECODE_PARAMS: + *type = V4L2_CTRL_TYPE_H264_DECODE_PARAMS; + break; + case V4L2_CID_STATELESS_H264_PRED_WEIGHTS: + *type = V4L2_CTRL_TYPE_H264_PRED_WEIGHTS; + break; + case V4L2_CID_STATELESS_VP8_FRAME: + *type = V4L2_CTRL_TYPE_VP8_FRAME; + break; + case V4L2_CID_STATELESS_HEVC_SPS: + *type = V4L2_CTRL_TYPE_HEVC_SPS; + break; + case V4L2_CID_STATELESS_HEVC_PPS: + *type = V4L2_CTRL_TYPE_HEVC_PPS; + break; + case V4L2_CID_STATELESS_HEVC_SLICE_PARAMS: + *type = V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS; + *flags |= V4L2_CTRL_FLAG_DYNAMIC_ARRAY; + break; + case V4L2_CID_STATELESS_HEVC_SCALING_MATRIX: + *type = V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX; + break; + case V4L2_CID_STATELESS_HEVC_DECODE_PARAMS: + *type = V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS; + break; + case V4L2_CID_STATELESS_HEVC_ENTRY_POINT_OFFSETS: + *type = V4L2_CTRL_TYPE_U32; + *flags |= V4L2_CTRL_FLAG_DYNAMIC_ARRAY; + break; + case V4L2_CID_STATELESS_VP9_COMPRESSED_HDR: + *type = V4L2_CTRL_TYPE_VP9_COMPRESSED_HDR; + break; + case V4L2_CID_STATELESS_VP9_FRAME: + *type = V4L2_CTRL_TYPE_VP9_FRAME; + break; + case V4L2_CID_STATELESS_AV1_SEQUENCE: + *type = V4L2_CTRL_TYPE_AV1_SEQUENCE; + break; + case V4L2_CID_STATELESS_AV1_TILE_GROUP_ENTRY: + *type = V4L2_CTRL_TYPE_AV1_TILE_GROUP_ENTRY; + *flags |= V4L2_CTRL_FLAG_DYNAMIC_ARRAY; + break; + case V4L2_CID_STATELESS_AV1_FRAME: + *type = V4L2_CTRL_TYPE_AV1_FRAME; + break; + case V4L2_CID_STATELESS_AV1_FILM_GRAIN: + *type = V4L2_CTRL_TYPE_AV1_FILM_GRAIN; + break; + case V4L2_CID_UNIT_CELL_SIZE: + *type = V4L2_CTRL_TYPE_AREA; + *flags |= V4L2_CTRL_FLAG_READ_ONLY; + break; + case V4L2_CID_COLORIMETRY_HDR10_CLL_INFO: + *type = V4L2_CTRL_TYPE_HDR10_CLL_INFO; + break; + case V4L2_CID_COLORIMETRY_HDR10_MASTERING_DISPLAY: + *type = V4L2_CTRL_TYPE_HDR10_MASTERING_DISPLAY; + break; + default: + *type = V4L2_CTRL_TYPE_INTEGER; + break; + } + switch (id) { + case V4L2_CID_MPEG_AUDIO_ENCODING: + case V4L2_CID_MPEG_AUDIO_MODE: + case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: + case V4L2_CID_MPEG_VIDEO_B_FRAMES: + case V4L2_CID_MPEG_STREAM_TYPE: + *flags |= V4L2_CTRL_FLAG_UPDATE; + break; + case V4L2_CID_AUDIO_VOLUME: + case V4L2_CID_AUDIO_BALANCE: + case V4L2_CID_AUDIO_BASS: + case V4L2_CID_AUDIO_TREBLE: + case V4L2_CID_BRIGHTNESS: + case V4L2_CID_CONTRAST: + case V4L2_CID_SATURATION: + case V4L2_CID_HUE: + case V4L2_CID_RED_BALANCE: + case V4L2_CID_BLUE_BALANCE: + case V4L2_CID_GAMMA: + case V4L2_CID_SHARPNESS: + case V4L2_CID_CHROMA_GAIN: + case V4L2_CID_RDS_TX_DEVIATION: + case V4L2_CID_AUDIO_LIMITER_RELEASE_TIME: + case V4L2_CID_AUDIO_LIMITER_DEVIATION: + case V4L2_CID_AUDIO_COMPRESSION_GAIN: + case V4L2_CID_AUDIO_COMPRESSION_THRESHOLD: + case V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME: + case V4L2_CID_AUDIO_COMPRESSION_RELEASE_TIME: + case V4L2_CID_PILOT_TONE_DEVIATION: + case V4L2_CID_PILOT_TONE_FREQUENCY: + case V4L2_CID_TUNE_POWER_LEVEL: + case V4L2_CID_TUNE_ANTENNA_CAPACITOR: + case V4L2_CID_RF_TUNER_RF_GAIN: + case V4L2_CID_RF_TUNER_LNA_GAIN: + case V4L2_CID_RF_TUNER_MIXER_GAIN: + case V4L2_CID_RF_TUNER_IF_GAIN: + case V4L2_CID_RF_TUNER_BANDWIDTH: + case V4L2_CID_DETECT_MD_GLOBAL_THRESHOLD: + *flags |= V4L2_CTRL_FLAG_SLIDER; + break; + case V4L2_CID_PAN_RELATIVE: + case V4L2_CID_TILT_RELATIVE: + case V4L2_CID_FOCUS_RELATIVE: + case V4L2_CID_IRIS_RELATIVE: + case V4L2_CID_ZOOM_RELATIVE: + *flags |= V4L2_CTRL_FLAG_WRITE_ONLY | + V4L2_CTRL_FLAG_EXECUTE_ON_WRITE; + break; + case V4L2_CID_FLASH_STROBE_STATUS: + case V4L2_CID_AUTO_FOCUS_STATUS: + case V4L2_CID_FLASH_READY: + case V4L2_CID_DV_TX_HOTPLUG: + case V4L2_CID_DV_TX_RXSENSE: + case V4L2_CID_DV_TX_EDID_PRESENT: + case V4L2_CID_DV_RX_POWER_PRESENT: + case V4L2_CID_DV_RX_IT_CONTENT_TYPE: + case V4L2_CID_RDS_RX_PTY: + case V4L2_CID_RDS_RX_PS_NAME: + case V4L2_CID_RDS_RX_RADIO_TEXT: + case V4L2_CID_RDS_RX_TRAFFIC_ANNOUNCEMENT: + case V4L2_CID_RDS_RX_TRAFFIC_PROGRAM: + case V4L2_CID_RDS_RX_MUSIC_SPEECH: + case V4L2_CID_CAMERA_ORIENTATION: + case V4L2_CID_CAMERA_SENSOR_ROTATION: + *flags |= V4L2_CTRL_FLAG_READ_ONLY; + break; + case V4L2_CID_RF_TUNER_PLL_LOCK: + *flags |= V4L2_CTRL_FLAG_VOLATILE; + break; + } +} +EXPORT_SYMBOL(v4l2_ctrl_fill); diff --git a/6.18.0/drivers/media/v4l2-core/v4l2-ctrls-priv.h b/6.18.0/drivers/media/v4l2-core/v4l2-ctrls-priv.h new file mode 100644 index 0000000..f4cf273 --- /dev/null +++ b/6.18.0/drivers/media/v4l2-core/v4l2-ctrls-priv.h @@ -0,0 +1,95 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * V4L2 controls framework private header. + * + * Copyright (C) 2010-2021 Hans Verkuil + */ + +#ifndef _V4L2_CTRLS_PRIV_H_ +#define _V4L2_CTRLS_PRIV_H_ + +#define dprintk(vdev, fmt, arg...) do { \ + if (!WARN_ON(!(vdev)) && ((vdev)->dev_debug & V4L2_DEV_DEBUG_CTRL)) \ + printk(KERN_DEBUG pr_fmt("%s: %s: " fmt), \ + __func__, video_device_node_name(vdev), ##arg); \ +} while (0) + +#define has_op(master, op) \ + ((master)->ops && (master)->ops->op) +#define call_op(master, op) \ + (has_op(master, op) ? (master)->ops->op(master) : 0) + +static inline u32 node2id(struct list_head *node) +{ + return list_entry(node, struct v4l2_ctrl_ref, node)->ctrl->id; +} + +/* + * Small helper function to determine if the autocluster is set to manual + * mode. + */ +static inline bool is_cur_manual(const struct v4l2_ctrl *master) +{ + return master->is_auto && master->cur.val == master->manual_mode_value; +} + +/* + * Small helper function to determine if the autocluster will be set to manual + * mode. + */ +static inline bool is_new_manual(const struct v4l2_ctrl *master) +{ + return master->is_auto && master->val == master->manual_mode_value; +} + +static inline u32 user_flags(const struct v4l2_ctrl *ctrl) +{ + u32 flags = ctrl->flags; + + if (ctrl->is_ptr) + flags |= V4L2_CTRL_FLAG_HAS_PAYLOAD; + + return flags; +} + +/* v4l2-ctrls-core.c */ +void cur_to_new(struct v4l2_ctrl *ctrl); +void cur_to_req(struct v4l2_ctrl_ref *ref); +void new_to_cur(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 ch_flags); +void new_to_req(struct v4l2_ctrl_ref *ref); +int req_to_new(struct v4l2_ctrl_ref *ref); +void send_initial_event(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl); +void send_event(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 changes); +int handler_new_ref(struct v4l2_ctrl_handler *hdl, + struct v4l2_ctrl *ctrl, + struct v4l2_ctrl_ref **ctrl_ref, + bool from_other_dev, bool allocate_req); +struct v4l2_ctrl_ref *find_ref(struct v4l2_ctrl_handler *hdl, u32 id); +struct v4l2_ctrl_ref *find_ref_lock(struct v4l2_ctrl_handler *hdl, u32 id); +int check_range(enum v4l2_ctrl_type type, + s64 min, s64 max, u64 step, s64 def); +void update_from_auto_cluster(struct v4l2_ctrl *master); +int try_or_set_cluster(struct v4l2_fh *fh, struct v4l2_ctrl *master, + bool set, u32 ch_flags); + +/* v4l2-ctrls-api.c */ +int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl, + struct v4l2_ext_controls *cs, + struct video_device *vdev); +int try_set_ext_ctrls_common(struct v4l2_fh *fh, + struct v4l2_ctrl_handler *hdl, + struct v4l2_ext_controls *cs, + struct video_device *vdev, bool set); + +/* v4l2-ctrls-request.c */ +void v4l2_ctrl_handler_init_request(struct v4l2_ctrl_handler *hdl); +void v4l2_ctrl_handler_free_request(struct v4l2_ctrl_handler *hdl); +int v4l2_g_ext_ctrls_request(struct v4l2_ctrl_handler *hdl, struct video_device *vdev, + struct media_device *mdev, struct v4l2_ext_controls *cs); +int try_set_ext_ctrls_request(struct v4l2_fh *fh, + struct v4l2_ctrl_handler *hdl, + struct video_device *vdev, + struct media_device *mdev, + struct v4l2_ext_controls *cs, bool set); + +#endif diff --git a/6.18.0/drivers/media/v4l2-core/v4l2-ctrls-request.c b/6.18.0/drivers/media/v4l2-core/v4l2-ctrls-request.c new file mode 100644 index 0000000..e77f722 --- /dev/null +++ b/6.18.0/drivers/media/v4l2-core/v4l2-ctrls-request.c @@ -0,0 +1,501 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * V4L2 controls framework Request API implementation. + * + * Copyright (C) 2018-2021 Hans Verkuil + */ + +#define pr_fmt(fmt) "v4l2-ctrls: " fmt + +#include +#include +#include +#include +#include + +#include "v4l2-ctrls-priv.h" + +/* Initialize the request-related fields in a control handler */ +void v4l2_ctrl_handler_init_request(struct v4l2_ctrl_handler *hdl) +{ + INIT_LIST_HEAD(&hdl->requests); + INIT_LIST_HEAD(&hdl->requests_queued); + hdl->request_is_queued = false; + media_request_object_init(&hdl->req_obj); +} + +/* Free the request-related fields in a control handler */ +void v4l2_ctrl_handler_free_request(struct v4l2_ctrl_handler *hdl) +{ + struct v4l2_ctrl_handler *req, *next_req; + + /* + * Do nothing if this isn't the main handler or the main + * handler is not used in any request. + * + * The main handler can be identified by having a NULL ops pointer in + * the request object. + */ + if (hdl->req_obj.ops || list_empty(&hdl->requests)) + return; + + /* + * If the main handler is freed and it is used by handler objects in + * outstanding requests, then unbind and put those objects before + * freeing the main handler. + */ + list_for_each_entry_safe(req, next_req, &hdl->requests, requests) { + media_request_object_unbind(&req->req_obj); + media_request_object_put(&req->req_obj); + } +} + +static int v4l2_ctrl_request_clone(struct v4l2_ctrl_handler *hdl, + const struct v4l2_ctrl_handler *from) +{ + struct v4l2_ctrl_ref *ref; + int err = 0; + + if (WARN_ON(!hdl || hdl == from)) + return -EINVAL; + + if (hdl->error) + return hdl->error; + + WARN_ON(hdl->lock != &hdl->_lock); + + mutex_lock(from->lock); + list_for_each_entry(ref, &from->ctrl_refs, node) { + struct v4l2_ctrl *ctrl = ref->ctrl; + struct v4l2_ctrl_ref *new_ref; + + /* Skip refs inherited from other devices */ + if (ref->from_other_dev) + continue; + err = handler_new_ref(hdl, ctrl, &new_ref, false, true); + if (err) + break; + } + mutex_unlock(from->lock); + return err; +} + +static void v4l2_ctrl_request_queue(struct media_request_object *obj) +{ + struct v4l2_ctrl_handler *hdl = + container_of(obj, struct v4l2_ctrl_handler, req_obj); + struct v4l2_ctrl_handler *main_hdl = obj->priv; + + mutex_lock(main_hdl->lock); + list_add_tail(&hdl->requests_queued, &main_hdl->requests_queued); + hdl->request_is_queued = true; + mutex_unlock(main_hdl->lock); +} + +static void v4l2_ctrl_request_unbind(struct media_request_object *obj) +{ + struct v4l2_ctrl_handler *hdl = + container_of(obj, struct v4l2_ctrl_handler, req_obj); + struct v4l2_ctrl_handler *main_hdl = obj->priv; + + mutex_lock(main_hdl->lock); + list_del_init(&hdl->requests); + if (hdl->request_is_queued) { + list_del_init(&hdl->requests_queued); + hdl->request_is_queued = false; + } + mutex_unlock(main_hdl->lock); +} + +static void v4l2_ctrl_request_release(struct media_request_object *obj) +{ + struct v4l2_ctrl_handler *hdl = + container_of(obj, struct v4l2_ctrl_handler, req_obj); + + v4l2_ctrl_handler_free(hdl); + kfree(hdl); +} + +static const struct media_request_object_ops req_ops = { + .queue = v4l2_ctrl_request_queue, + .unbind = v4l2_ctrl_request_unbind, + .release = v4l2_ctrl_request_release, +}; + +struct v4l2_ctrl_handler *v4l2_ctrl_request_hdl_find(struct media_request *req, + struct v4l2_ctrl_handler *parent) +{ + struct media_request_object *obj; + + if (WARN_ON(req->state != MEDIA_REQUEST_STATE_VALIDATING && + req->state != MEDIA_REQUEST_STATE_QUEUED)) + return NULL; + + obj = media_request_object_find(req, &req_ops, parent); + if (obj) + return container_of(obj, struct v4l2_ctrl_handler, req_obj); + return NULL; +} +EXPORT_SYMBOL_GPL(v4l2_ctrl_request_hdl_find); + +struct v4l2_ctrl * +v4l2_ctrl_request_hdl_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id) +{ + struct v4l2_ctrl_ref *ref = find_ref_lock(hdl, id); + + return (ref && ref->p_req_valid) ? ref->ctrl : NULL; +} +EXPORT_SYMBOL_GPL(v4l2_ctrl_request_hdl_ctrl_find); + +static int v4l2_ctrl_request_bind(struct media_request *req, + struct v4l2_ctrl_handler *hdl, + struct v4l2_ctrl_handler *from) +{ + int ret; + + ret = v4l2_ctrl_request_clone(hdl, from); + + if (!ret) { + ret = media_request_object_bind(req, &req_ops, + from, false, &hdl->req_obj); + if (!ret) { + mutex_lock(from->lock); + list_add_tail(&hdl->requests, &from->requests); + mutex_unlock(from->lock); + } + } + return ret; +} + +static struct media_request_object * +v4l2_ctrls_find_req_obj(struct v4l2_ctrl_handler *hdl, + struct media_request *req, bool set) +{ + struct media_request_object *obj; + struct v4l2_ctrl_handler *new_hdl; + int ret; + + if (IS_ERR(req)) + return ERR_CAST(req); + + if (set && WARN_ON(req->state != MEDIA_REQUEST_STATE_UPDATING)) + return ERR_PTR(-EBUSY); + + obj = media_request_object_find(req, &req_ops, hdl); + if (obj) + return obj; + /* + * If there are no controls in this completed request, + * then that can only happen if: + * + * 1) no controls were present in the queued request, and + * 2) v4l2_ctrl_request_complete() could not allocate a + * control handler object to store the completed state in. + * + * So return ENOMEM to indicate that there was an out-of-memory + * error. + */ + if (!set) + return ERR_PTR(-ENOMEM); + + new_hdl = kzalloc(sizeof(*new_hdl), GFP_KERNEL); + if (!new_hdl) + return ERR_PTR(-ENOMEM); + + obj = &new_hdl->req_obj; + ret = v4l2_ctrl_handler_init(new_hdl, (hdl->nr_of_buckets - 1) * 8); + if (!ret) + ret = v4l2_ctrl_request_bind(req, new_hdl, hdl); + if (ret) { + v4l2_ctrl_handler_free(new_hdl); + kfree(new_hdl); + return ERR_PTR(ret); + } + + media_request_object_get(obj); + return obj; +} + +int v4l2_g_ext_ctrls_request(struct v4l2_ctrl_handler *hdl, struct video_device *vdev, + struct media_device *mdev, struct v4l2_ext_controls *cs) +{ + struct media_request_object *obj = NULL; + struct media_request *req = NULL; + int ret; + + if (!mdev || cs->request_fd < 0) + return -EINVAL; + + req = media_request_get_by_fd(mdev, cs->request_fd); + if (IS_ERR(req)) + return PTR_ERR(req); + + if (req->state != MEDIA_REQUEST_STATE_COMPLETE) { + media_request_put(req); + return -EACCES; + } + + ret = media_request_lock_for_access(req); + if (ret) { + media_request_put(req); + return ret; + } + + obj = v4l2_ctrls_find_req_obj(hdl, req, false); + if (IS_ERR(obj)) { + media_request_unlock_for_access(req); + media_request_put(req); + return PTR_ERR(obj); + } + + hdl = container_of(obj, struct v4l2_ctrl_handler, + req_obj); + ret = v4l2_g_ext_ctrls_common(hdl, cs, vdev); + + media_request_unlock_for_access(req); + media_request_object_put(obj); + media_request_put(req); + return ret; +} + +int try_set_ext_ctrls_request(struct v4l2_fh *fh, + struct v4l2_ctrl_handler *hdl, + struct video_device *vdev, + struct media_device *mdev, + struct v4l2_ext_controls *cs, bool set) +{ + struct media_request_object *obj = NULL; + struct media_request *req = NULL; + int ret; + + if (!mdev) { + dprintk(vdev, "%s: missing media device\n", + video_device_node_name(vdev)); + return -EINVAL; + } + + if (cs->request_fd < 0) { + dprintk(vdev, "%s: invalid request fd %d\n", + video_device_node_name(vdev), cs->request_fd); + return -EINVAL; + } + + req = media_request_get_by_fd(mdev, cs->request_fd); + if (IS_ERR(req)) { + dprintk(vdev, "%s: cannot find request fd %d\n", + video_device_node_name(vdev), cs->request_fd); + return PTR_ERR(req); + } + + ret = media_request_lock_for_update(req); + if (ret) { + dprintk(vdev, "%s: cannot lock request fd %d\n", + video_device_node_name(vdev), cs->request_fd); + media_request_put(req); + return ret; + } + + obj = v4l2_ctrls_find_req_obj(hdl, req, set); + if (IS_ERR(obj)) { + dprintk(vdev, + "%s: cannot find request object for request fd %d\n", + video_device_node_name(vdev), + cs->request_fd); + media_request_unlock_for_update(req); + media_request_put(req); + return PTR_ERR(obj); + } + + hdl = container_of(obj, struct v4l2_ctrl_handler, + req_obj); + ret = try_set_ext_ctrls_common(fh, hdl, cs, vdev, set); + if (ret) + dprintk(vdev, + "%s: try_set_ext_ctrls_common failed (%d)\n", + video_device_node_name(vdev), ret); + + media_request_unlock_for_update(req); + media_request_object_put(obj); + media_request_put(req); + + return ret; +} + +void v4l2_ctrl_request_complete(struct media_request *req, + struct v4l2_ctrl_handler *main_hdl) +{ + struct media_request_object *obj; + struct v4l2_ctrl_handler *hdl; + struct v4l2_ctrl_ref *ref; + + if (!req || !main_hdl) + return; + + /* + * Note that it is valid if nothing was found. It means + * that this request doesn't have any controls and so just + * wants to leave the controls unchanged. + */ + obj = media_request_object_find(req, &req_ops, main_hdl); + if (!obj) { + int ret; + + /* Create a new request so the driver can return controls */ + hdl = kzalloc(sizeof(*hdl), GFP_KERNEL); + if (!hdl) + return; + + ret = v4l2_ctrl_handler_init(hdl, (main_hdl->nr_of_buckets - 1) * 8); + if (!ret) + ret = v4l2_ctrl_request_bind(req, hdl, main_hdl); + if (ret) { + v4l2_ctrl_handler_free(hdl); + kfree(hdl); + return; + } + hdl->request_is_queued = true; + obj = media_request_object_find(req, &req_ops, main_hdl); + } + hdl = container_of(obj, struct v4l2_ctrl_handler, req_obj); + + list_for_each_entry(ref, &hdl->ctrl_refs, node) { + struct v4l2_ctrl *ctrl = ref->ctrl; + struct v4l2_ctrl *master = ctrl->cluster[0]; + unsigned int i; + + if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) { + v4l2_ctrl_lock(master); + /* g_volatile_ctrl will update the current control values */ + for (i = 0; i < master->ncontrols; i++) + cur_to_new(master->cluster[i]); + call_op(master, g_volatile_ctrl); + new_to_req(ref); + v4l2_ctrl_unlock(master); + continue; + } + if (ref->p_req_valid) + continue; + + /* Copy the current control value into the request */ + v4l2_ctrl_lock(ctrl); + cur_to_req(ref); + v4l2_ctrl_unlock(ctrl); + } + + mutex_lock(main_hdl->lock); + WARN_ON(!hdl->request_is_queued); + list_del_init(&hdl->requests_queued); + hdl->request_is_queued = false; + mutex_unlock(main_hdl->lock); + media_request_object_complete(obj); + media_request_object_put(obj); +} +EXPORT_SYMBOL(v4l2_ctrl_request_complete); + +int v4l2_ctrl_request_setup(struct media_request *req, + struct v4l2_ctrl_handler *main_hdl) +{ + struct media_request_object *obj; + struct v4l2_ctrl_handler *hdl; + struct v4l2_ctrl_ref *ref; + int ret = 0; + + if (!req || !main_hdl) + return 0; + + if (WARN_ON(req->state != MEDIA_REQUEST_STATE_QUEUED)) + return -EBUSY; + + /* + * Note that it is valid if nothing was found. It means + * that this request doesn't have any controls and so just + * wants to leave the controls unchanged. + */ + obj = media_request_object_find(req, &req_ops, main_hdl); + if (!obj) + return 0; + if (obj->completed) { + media_request_object_put(obj); + return -EBUSY; + } + hdl = container_of(obj, struct v4l2_ctrl_handler, req_obj); + + list_for_each_entry(ref, &hdl->ctrl_refs, node) + ref->req_done = false; + + list_for_each_entry(ref, &hdl->ctrl_refs, node) { + struct v4l2_ctrl *ctrl = ref->ctrl; + struct v4l2_ctrl *master = ctrl->cluster[0]; + bool have_new_data = false; + int i; + + /* + * Skip if this control was already handled by a cluster. + * Skip button controls and read-only controls. + */ + if (ref->req_done || (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY)) + continue; + + v4l2_ctrl_lock(master); + for (i = 0; i < master->ncontrols; i++) { + if (master->cluster[i]) { + struct v4l2_ctrl_ref *r = + find_ref(hdl, master->cluster[i]->id); + + if (r->p_req_valid) { + have_new_data = true; + break; + } + } + } + if (!have_new_data) { + v4l2_ctrl_unlock(master); + continue; + } + + for (i = 0; i < master->ncontrols; i++) { + if (master->cluster[i]) { + struct v4l2_ctrl_ref *r = + find_ref(hdl, master->cluster[i]->id); + + ret = req_to_new(r); + if (ret) { + v4l2_ctrl_unlock(master); + goto error; + } + master->cluster[i]->is_new = 1; + r->req_done = true; + } + } + /* + * For volatile autoclusters that are currently in auto mode + * we need to discover if it will be set to manual mode. + * If so, then we have to copy the current volatile values + * first since those will become the new manual values (which + * may be overwritten by explicit new values from this set + * of controls). + */ + if (master->is_auto && master->has_volatiles && + !is_cur_manual(master)) { + s32 new_auto_val = *master->p_new.p_s32; + + /* + * If the new value == the manual value, then copy + * the current volatile values. + */ + if (new_auto_val == master->manual_mode_value) + update_from_auto_cluster(master); + } + + ret = try_or_set_cluster(NULL, master, true, 0); + v4l2_ctrl_unlock(master); + + if (ret) + break; + } + +error: + media_request_object_put(obj); + return ret; +} +EXPORT_SYMBOL(v4l2_ctrl_request_setup); diff --git a/6.18.0/drivers/media/v4l2-core/v4l2-dev.c b/6.18.0/drivers/media/v4l2-core/v4l2-dev.c new file mode 100644 index 0000000..10a126e --- /dev/null +++ b/6.18.0/drivers/media/v4l2-core/v4l2-dev.c @@ -0,0 +1,1258 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Video capture interface for Linux version 2 + * + * A generic video device interface for the LINUX operating system + * using a set of device structures/vectors for low level operations. + * + * Authors: Alan Cox, (version 1) + * Mauro Carvalho Chehab (version 2) + * + * Fixes: 20000516 Claudio Matsuoka + * - Added procfs support + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#define VIDEO_NUM_DEVICES 256 +#define VIDEO_NAME "video4linux" + +#define dprintk(fmt, arg...) do { \ + printk(KERN_DEBUG pr_fmt("%s: " fmt), \ + __func__, ##arg); \ +} while (0) + +/* + * sysfs stuff + */ + +static ssize_t index_show(struct device *cd, + struct device_attribute *attr, char *buf) +{ + struct video_device *vdev = to_video_device(cd); + + return sprintf(buf, "%i\n", vdev->index); +} +static DEVICE_ATTR_RO(index); + +static ssize_t dev_debug_show(struct device *cd, + struct device_attribute *attr, char *buf) +{ + struct video_device *vdev = to_video_device(cd); + + return sprintf(buf, "%i\n", vdev->dev_debug); +} + +static ssize_t dev_debug_store(struct device *cd, struct device_attribute *attr, + const char *buf, size_t len) +{ + struct video_device *vdev = to_video_device(cd); + int res = 0; + u16 value; + + res = kstrtou16(buf, 0, &value); + if (res) + return res; + + vdev->dev_debug = value; + return len; +} +static DEVICE_ATTR_RW(dev_debug); + +static ssize_t name_show(struct device *cd, + struct device_attribute *attr, char *buf) +{ + struct video_device *vdev = to_video_device(cd); + + return sprintf(buf, "%.*s\n", (int)sizeof(vdev->name), vdev->name); +} +static DEVICE_ATTR_RO(name); + +static struct attribute *video_device_attrs[] = { + &dev_attr_name.attr, + &dev_attr_dev_debug.attr, + &dev_attr_index.attr, + NULL, +}; +ATTRIBUTE_GROUPS(video_device); + +static struct dentry *v4l2_debugfs_root_dir; + +/* + * Active devices + */ +static struct video_device *video_devices[VIDEO_NUM_DEVICES]; +static DEFINE_MUTEX(videodev_lock); +static DECLARE_BITMAP(devnode_nums[VFL_TYPE_MAX], VIDEO_NUM_DEVICES); + +/* Device node utility functions */ + +/* Note: these utility functions all assume that vfl_type is in the range + [0, VFL_TYPE_MAX-1]. */ + +#ifdef CONFIG_VIDEO_FIXED_MINOR_RANGES +/* Return the bitmap corresponding to vfl_type. */ +static inline unsigned long *devnode_bits(enum vfl_devnode_type vfl_type) +{ + /* Any types not assigned to fixed minor ranges must be mapped to + one single bitmap for the purposes of finding a free node number + since all those unassigned types use the same minor range. */ + int idx = (vfl_type > VFL_TYPE_RADIO) ? VFL_TYPE_MAX - 1 : vfl_type; + + return devnode_nums[idx]; +} +#else +/* Return the bitmap corresponding to vfl_type. */ +static inline unsigned long *devnode_bits(enum vfl_devnode_type vfl_type) +{ + return devnode_nums[vfl_type]; +} +#endif + +/* Mark device node number vdev->num as used */ +static inline void devnode_set(struct video_device *vdev) +{ + set_bit(vdev->num, devnode_bits(vdev->vfl_type)); +} + +/* Mark device node number vdev->num as unused */ +static inline void devnode_clear(struct video_device *vdev) +{ + clear_bit(vdev->num, devnode_bits(vdev->vfl_type)); +} + +/* Try to find a free device node number in the range [from, to> */ +static inline int devnode_find(struct video_device *vdev, int from, int to) +{ + return find_next_zero_bit(devnode_bits(vdev->vfl_type), to, from); +} + +struct video_device *video_device_alloc(void) +{ + return kzalloc(sizeof(struct video_device), GFP_KERNEL); +} +EXPORT_SYMBOL(video_device_alloc); + +void video_device_release(struct video_device *vdev) +{ + kfree(vdev); +} +EXPORT_SYMBOL(video_device_release); + +void video_device_release_empty(struct video_device *vdev) +{ + /* Do nothing */ + /* Only valid when the video_device struct is a static. */ +} +EXPORT_SYMBOL(video_device_release_empty); + +static inline void video_get(struct video_device *vdev) +{ + get_device(&vdev->dev); +} + +static inline void video_put(struct video_device *vdev) +{ + put_device(&vdev->dev); +} + +/* Called when the last user of the video device exits. */ +static void v4l2_device_release(struct device *cd) +{ + struct video_device *vdev = to_video_device(cd); + struct v4l2_device *v4l2_dev = vdev->v4l2_dev; + + mutex_lock(&videodev_lock); + if (WARN_ON(video_devices[vdev->minor] != vdev)) { + /* should not happen */ + mutex_unlock(&videodev_lock); + return; + } + + /* Free up this device for reuse */ + video_devices[vdev->minor] = NULL; + + /* Delete the cdev on this minor as well */ + cdev_del(vdev->cdev); + /* Just in case some driver tries to access this from + the release() callback. */ + vdev->cdev = NULL; + + /* Mark device node number as free */ + devnode_clear(vdev); + + mutex_unlock(&videodev_lock); + +#if defined(CONFIG_MEDIA_CONTROLLER) + if (v4l2_dev->mdev && vdev->vfl_dir != VFL_DIR_M2M) { + /* Remove interfaces and interface links */ + media_devnode_remove(vdev->intf_devnode); + if (vdev->entity.function != MEDIA_ENT_F_UNKNOWN) + media_device_unregister_entity(&vdev->entity); + } +#endif + + /* Do not call v4l2_device_put if there is no release callback set. + * Drivers that have no v4l2_device release callback might free the + * v4l2_dev instance in the video_device release callback below, so we + * must perform this check here. + * + * TODO: In the long run all drivers that use v4l2_device should use the + * v4l2_device release callback. This check will then be unnecessary. + */ + if (v4l2_dev->release == NULL) + v4l2_dev = NULL; + + /* Release video_device and perform other + cleanups as needed. */ + vdev->release(vdev); + + /* Decrease v4l2_device refcount */ + if (v4l2_dev) + v4l2_device_put(v4l2_dev); +} + +static const struct class video_class = { + .name = VIDEO_NAME, + .dev_groups = video_device_groups, +}; + +struct video_device *video_devdata(struct file *file) +{ + return video_devices[iminor(file_inode(file))]; +} +EXPORT_SYMBOL(video_devdata); + + +/* Priority handling */ + +static inline bool prio_is_valid(enum v4l2_priority prio) +{ + return prio == V4L2_PRIORITY_BACKGROUND || + prio == V4L2_PRIORITY_INTERACTIVE || + prio == V4L2_PRIORITY_RECORD; +} + +void v4l2_prio_init(struct v4l2_prio_state *global) +{ + memset(global, 0, sizeof(*global)); +} +EXPORT_SYMBOL(v4l2_prio_init); + +int v4l2_prio_change(struct v4l2_prio_state *global, enum v4l2_priority *local, + enum v4l2_priority new) +{ + if (!prio_is_valid(new)) + return -EINVAL; + if (*local == new) + return 0; + + atomic_inc(&global->prios[new]); + if (prio_is_valid(*local)) + atomic_dec(&global->prios[*local]); + *local = new; + return 0; +} +EXPORT_SYMBOL(v4l2_prio_change); + +void v4l2_prio_open(struct v4l2_prio_state *global, enum v4l2_priority *local) +{ + v4l2_prio_change(global, local, V4L2_PRIORITY_DEFAULT); +} +EXPORT_SYMBOL(v4l2_prio_open); + +void v4l2_prio_close(struct v4l2_prio_state *global, enum v4l2_priority local) +{ + if (prio_is_valid(local)) + atomic_dec(&global->prios[local]); +} +EXPORT_SYMBOL(v4l2_prio_close); + +enum v4l2_priority v4l2_prio_max(struct v4l2_prio_state *global) +{ + if (atomic_read(&global->prios[V4L2_PRIORITY_RECORD]) > 0) + return V4L2_PRIORITY_RECORD; + if (atomic_read(&global->prios[V4L2_PRIORITY_INTERACTIVE]) > 0) + return V4L2_PRIORITY_INTERACTIVE; + if (atomic_read(&global->prios[V4L2_PRIORITY_BACKGROUND]) > 0) + return V4L2_PRIORITY_BACKGROUND; + return V4L2_PRIORITY_UNSET; +} +EXPORT_SYMBOL(v4l2_prio_max); + +int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local) +{ + return (local < v4l2_prio_max(global)) ? -EBUSY : 0; +} +EXPORT_SYMBOL(v4l2_prio_check); + + +static ssize_t v4l2_read(struct file *filp, char __user *buf, + size_t sz, loff_t *off) +{ + struct video_device *vdev = video_devdata(filp); + int ret = -ENODEV; + + if (!vdev->fops->read) + return -EINVAL; + if (video_is_registered(vdev)) + ret = vdev->fops->read(filp, buf, sz, off); + if ((vdev->dev_debug & V4L2_DEV_DEBUG_FOP) && + (vdev->dev_debug & V4L2_DEV_DEBUG_STREAMING)) + dprintk("%s: read: %zd (%d)\n", + video_device_node_name(vdev), sz, ret); + return ret; +} + +static ssize_t v4l2_write(struct file *filp, const char __user *buf, + size_t sz, loff_t *off) +{ + struct video_device *vdev = video_devdata(filp); + int ret = -ENODEV; + + if (!vdev->fops->write) + return -EINVAL; + if (video_is_registered(vdev)) + ret = vdev->fops->write(filp, buf, sz, off); + if ((vdev->dev_debug & V4L2_DEV_DEBUG_FOP) && + (vdev->dev_debug & V4L2_DEV_DEBUG_STREAMING)) + dprintk("%s: write: %zd (%d)\n", + video_device_node_name(vdev), sz, ret); + return ret; +} + +static __poll_t v4l2_poll(struct file *filp, struct poll_table_struct *poll) +{ + struct video_device *vdev = video_devdata(filp); + __poll_t res = EPOLLERR | EPOLLHUP | EPOLLPRI; + + if (video_is_registered(vdev)) { + if (!vdev->fops->poll) + res = DEFAULT_POLLMASK; + else + res = vdev->fops->poll(filp, poll); + } + if (vdev->dev_debug & V4L2_DEV_DEBUG_POLL) + dprintk("%s: poll: %08x %08x\n", + video_device_node_name(vdev), res, + poll_requested_events(poll)); + return res; +} + +static long v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + struct video_device *vdev = video_devdata(filp); + int ret = -ENODEV; + + if (vdev->fops->unlocked_ioctl) { + if (video_is_registered(vdev)) + ret = vdev->fops->unlocked_ioctl(filp, cmd, arg); + } else + ret = -ENOTTY; + + return ret; +} + +#ifdef CONFIG_MMU +#define v4l2_get_unmapped_area NULL +#else +static unsigned long v4l2_get_unmapped_area(struct file *filp, + unsigned long addr, unsigned long len, unsigned long pgoff, + unsigned long flags) +{ + struct video_device *vdev = video_devdata(filp); + int ret; + + if (!vdev->fops->get_unmapped_area) + return -ENOSYS; + if (!video_is_registered(vdev)) + return -ENODEV; + ret = vdev->fops->get_unmapped_area(filp, addr, len, pgoff, flags); + if (vdev->dev_debug & V4L2_DEV_DEBUG_FOP) + dprintk("%s: get_unmapped_area (%d)\n", + video_device_node_name(vdev), ret); + return ret; +} +#endif + +static int v4l2_mmap(struct file *filp, struct vm_area_struct *vm) +{ + struct video_device *vdev = video_devdata(filp); + int ret = -ENODEV; + + if (!vdev->fops->mmap) + return -ENODEV; + if (video_is_registered(vdev)) + ret = vdev->fops->mmap(filp, vm); + if (vdev->dev_debug & V4L2_DEV_DEBUG_FOP) + dprintk("%s: mmap (%d)\n", + video_device_node_name(vdev), ret); + return ret; +} + +/* Override for the open function */ +static int v4l2_open(struct inode *inode, struct file *filp) +{ + struct video_device *vdev; + int ret; + + /* Check if the video device is available */ + mutex_lock(&videodev_lock); + vdev = video_devdata(filp); + /* return ENODEV if the video device has already been removed. */ + if (vdev == NULL || !video_is_registered(vdev)) { + mutex_unlock(&videodev_lock); + return -ENODEV; + } + /* and increase the device refcount */ + video_get(vdev); + mutex_unlock(&videodev_lock); + + if (!video_is_registered(vdev)) { + ret = -ENODEV; + goto done; + } + + ret = vdev->fops->open(filp); + if (ret) + goto done; + + /* All drivers must use v4l2_fh. */ + if (WARN_ON(!test_bit(V4L2_FL_USES_V4L2_FH, &vdev->flags))) { + vdev->fops->release(filp); + ret = -ENODEV; + } + +done: + if (vdev->dev_debug & V4L2_DEV_DEBUG_FOP) + dprintk("%s: open (%d)\n", + video_device_node_name(vdev), ret); + + /* decrease the refcount in case of an error */ + if (ret) + video_put(vdev); + return ret; +} + +/* Override for the release function */ +static int v4l2_release(struct inode *inode, struct file *filp) +{ + struct video_device *vdev = video_devdata(filp); + int ret; + + /* + * We need to serialize the release() with queueing new requests. + * The release() may trigger the cancellation of a streaming + * operation, and that should not be mixed with queueing a new + * request at the same time. + */ + if (v4l2_device_supports_requests(vdev->v4l2_dev)) { + mutex_lock(&vdev->v4l2_dev->mdev->req_queue_mutex); + ret = vdev->fops->release(filp); + mutex_unlock(&vdev->v4l2_dev->mdev->req_queue_mutex); + } else { + ret = vdev->fops->release(filp); + } + + if (vdev->dev_debug & V4L2_DEV_DEBUG_FOP) + dprintk("%s: release\n", + video_device_node_name(vdev)); + + /* decrease the refcount unconditionally since the release() + return value is ignored. */ + video_put(vdev); + return ret; +} + +static const struct file_operations v4l2_fops = { + .owner = THIS_MODULE, + .read = v4l2_read, + .write = v4l2_write, + .open = v4l2_open, + .get_unmapped_area = v4l2_get_unmapped_area, + .mmap = v4l2_mmap, + .unlocked_ioctl = v4l2_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = v4l2_compat_ioctl32, +#endif + .release = v4l2_release, + .poll = v4l2_poll, +}; + +/** + * get_index - assign stream index number based on v4l2_dev + * @vdev: video_device to assign index number to, vdev->v4l2_dev should be assigned + * + * Note that when this is called the new device has not yet been registered + * in the video_device array, but it was able to obtain a minor number. + * + * This means that we can always obtain a free stream index number since + * the worst case scenario is that there are VIDEO_NUM_DEVICES - 1 slots in + * use of the video_device array. + * + * Returns a free index number. + */ +static int get_index(struct video_device *vdev) +{ + /* This can be static since this function is called with the global + videodev_lock held. */ + static DECLARE_BITMAP(used, VIDEO_NUM_DEVICES); + int i; + + bitmap_zero(used, VIDEO_NUM_DEVICES); + + for (i = 0; i < VIDEO_NUM_DEVICES; i++) { + if (video_devices[i] != NULL && + video_devices[i]->v4l2_dev == vdev->v4l2_dev) { + __set_bit(video_devices[i]->index, used); + } + } + + return find_first_zero_bit(used, VIDEO_NUM_DEVICES); +} + +#define SET_VALID_IOCTL(ops, cmd, op) \ + do { if ((ops)->op) __set_bit(_IOC_NR(cmd), valid_ioctls); } while (0) + +/* This determines which ioctls are actually implemented in the driver. + It's a one-time thing which simplifies video_ioctl2 as it can just do + a bit test. + + Note that drivers can override this by setting bits to 1 in + vdev->valid_ioctls. If an ioctl is marked as 1 when this function is + called, then that ioctl will actually be marked as unimplemented. + + It does that by first setting up the local valid_ioctls bitmap, and + at the end do a: + + vdev->valid_ioctls = valid_ioctls & ~(vdev->valid_ioctls) + */ +static void determine_valid_ioctls(struct video_device *vdev) +{ + const u32 vid_caps = V4L2_CAP_VIDEO_CAPTURE | + V4L2_CAP_VIDEO_CAPTURE_MPLANE | + V4L2_CAP_VIDEO_OUTPUT | + V4L2_CAP_VIDEO_OUTPUT_MPLANE | + V4L2_CAP_VIDEO_M2M | V4L2_CAP_VIDEO_M2M_MPLANE; + const u32 meta_caps = V4L2_CAP_META_CAPTURE | + V4L2_CAP_META_OUTPUT; + DECLARE_BITMAP(valid_ioctls, BASE_VIDIOC_PRIVATE); + const struct v4l2_ioctl_ops *ops = vdev->ioctl_ops; + bool is_vid = vdev->vfl_type == VFL_TYPE_VIDEO && + (vdev->device_caps & vid_caps); + bool is_vbi = vdev->vfl_type == VFL_TYPE_VBI; + bool is_radio = vdev->vfl_type == VFL_TYPE_RADIO; + bool is_sdr = vdev->vfl_type == VFL_TYPE_SDR; + bool is_tch = vdev->vfl_type == VFL_TYPE_TOUCH; + bool is_meta = vdev->vfl_type == VFL_TYPE_VIDEO && + (vdev->device_caps & meta_caps); + bool is_rx = vdev->vfl_dir != VFL_DIR_TX; + bool is_tx = vdev->vfl_dir != VFL_DIR_RX; + bool is_io_mc = vdev->device_caps & V4L2_CAP_IO_MC; + bool has_streaming = vdev->device_caps & V4L2_CAP_STREAMING; + bool is_edid = vdev->device_caps & V4L2_CAP_EDID; + + bitmap_zero(valid_ioctls, BASE_VIDIOC_PRIVATE); + + /* vfl_type and vfl_dir independent ioctls */ + + SET_VALID_IOCTL(ops, VIDIOC_QUERYCAP, vidioc_querycap); + __set_bit(_IOC_NR(VIDIOC_G_PRIORITY), valid_ioctls); + __set_bit(_IOC_NR(VIDIOC_S_PRIORITY), valid_ioctls); + + /* Note: the control handler can also be passed through the filehandle, + and that can't be tested here. If the bit for these control ioctls + is set, then the ioctl is valid. But if it is 0, then it can still + be valid if the filehandle passed the control handler. */ + if (vdev->ctrl_handler || ops->vidioc_query_ext_ctrl) + __set_bit(_IOC_NR(VIDIOC_QUERYCTRL), valid_ioctls); + if (vdev->ctrl_handler || ops->vidioc_query_ext_ctrl) + __set_bit(_IOC_NR(VIDIOC_QUERY_EXT_CTRL), valid_ioctls); + if (vdev->ctrl_handler || ops->vidioc_g_ext_ctrls) + __set_bit(_IOC_NR(VIDIOC_G_CTRL), valid_ioctls); + if (vdev->ctrl_handler || ops->vidioc_s_ext_ctrls) + __set_bit(_IOC_NR(VIDIOC_S_CTRL), valid_ioctls); + if (vdev->ctrl_handler || ops->vidioc_g_ext_ctrls) + __set_bit(_IOC_NR(VIDIOC_G_EXT_CTRLS), valid_ioctls); + if (vdev->ctrl_handler || ops->vidioc_s_ext_ctrls) + __set_bit(_IOC_NR(VIDIOC_S_EXT_CTRLS), valid_ioctls); + if (vdev->ctrl_handler || ops->vidioc_try_ext_ctrls) + __set_bit(_IOC_NR(VIDIOC_TRY_EXT_CTRLS), valid_ioctls); + if (vdev->ctrl_handler || ops->vidioc_querymenu) + __set_bit(_IOC_NR(VIDIOC_QUERYMENU), valid_ioctls); + if (!is_tch) { + SET_VALID_IOCTL(ops, VIDIOC_G_FREQUENCY, vidioc_g_frequency); + SET_VALID_IOCTL(ops, VIDIOC_S_FREQUENCY, vidioc_s_frequency); + } + SET_VALID_IOCTL(ops, VIDIOC_LOG_STATUS, vidioc_log_status); +#ifdef CONFIG_VIDEO_ADV_DEBUG + __set_bit(_IOC_NR(VIDIOC_DBG_G_CHIP_INFO), valid_ioctls); + __set_bit(_IOC_NR(VIDIOC_DBG_G_REGISTER), valid_ioctls); + __set_bit(_IOC_NR(VIDIOC_DBG_S_REGISTER), valid_ioctls); +#endif + /* yes, really vidioc_subscribe_event */ + SET_VALID_IOCTL(ops, VIDIOC_DQEVENT, vidioc_subscribe_event); + SET_VALID_IOCTL(ops, VIDIOC_SUBSCRIBE_EVENT, vidioc_subscribe_event); + SET_VALID_IOCTL(ops, VIDIOC_UNSUBSCRIBE_EVENT, vidioc_unsubscribe_event); + if (ops->vidioc_enum_freq_bands || ops->vidioc_g_tuner || ops->vidioc_g_modulator) + __set_bit(_IOC_NR(VIDIOC_ENUM_FREQ_BANDS), valid_ioctls); + + if (is_vid) { + /* video specific ioctls */ + if ((is_rx && (ops->vidioc_enum_fmt_vid_cap || + ops->vidioc_enum_fmt_vid_overlay)) || + (is_tx && ops->vidioc_enum_fmt_vid_out)) + __set_bit(_IOC_NR(VIDIOC_ENUM_FMT), valid_ioctls); + if ((is_rx && (ops->vidioc_g_fmt_vid_cap || + ops->vidioc_g_fmt_vid_cap_mplane || + ops->vidioc_g_fmt_vid_overlay)) || + (is_tx && (ops->vidioc_g_fmt_vid_out || + ops->vidioc_g_fmt_vid_out_mplane || + ops->vidioc_g_fmt_vid_out_overlay))) + __set_bit(_IOC_NR(VIDIOC_G_FMT), valid_ioctls); + if ((is_rx && (ops->vidioc_s_fmt_vid_cap || + ops->vidioc_s_fmt_vid_cap_mplane || + ops->vidioc_s_fmt_vid_overlay)) || + (is_tx && (ops->vidioc_s_fmt_vid_out || + ops->vidioc_s_fmt_vid_out_mplane || + ops->vidioc_s_fmt_vid_out_overlay))) + __set_bit(_IOC_NR(VIDIOC_S_FMT), valid_ioctls); + if ((is_rx && (ops->vidioc_try_fmt_vid_cap || + ops->vidioc_try_fmt_vid_cap_mplane || + ops->vidioc_try_fmt_vid_overlay)) || + (is_tx && (ops->vidioc_try_fmt_vid_out || + ops->vidioc_try_fmt_vid_out_mplane || + ops->vidioc_try_fmt_vid_out_overlay))) + __set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls); + SET_VALID_IOCTL(ops, VIDIOC_OVERLAY, vidioc_overlay); + SET_VALID_IOCTL(ops, VIDIOC_G_FBUF, vidioc_g_fbuf); + SET_VALID_IOCTL(ops, VIDIOC_S_FBUF, vidioc_s_fbuf); + SET_VALID_IOCTL(ops, VIDIOC_G_JPEGCOMP, vidioc_g_jpegcomp); + SET_VALID_IOCTL(ops, VIDIOC_S_JPEGCOMP, vidioc_s_jpegcomp); + SET_VALID_IOCTL(ops, VIDIOC_G_ENC_INDEX, vidioc_g_enc_index); + SET_VALID_IOCTL(ops, VIDIOC_ENCODER_CMD, vidioc_encoder_cmd); + SET_VALID_IOCTL(ops, VIDIOC_TRY_ENCODER_CMD, vidioc_try_encoder_cmd); + SET_VALID_IOCTL(ops, VIDIOC_DECODER_CMD, vidioc_decoder_cmd); + SET_VALID_IOCTL(ops, VIDIOC_TRY_DECODER_CMD, vidioc_try_decoder_cmd); + SET_VALID_IOCTL(ops, VIDIOC_ENUM_FRAMESIZES, vidioc_enum_framesizes); + SET_VALID_IOCTL(ops, VIDIOC_ENUM_FRAMEINTERVALS, vidioc_enum_frameintervals); + if (ops->vidioc_g_selection && + !test_bit(_IOC_NR(VIDIOC_G_SELECTION), vdev->valid_ioctls)) { + __set_bit(_IOC_NR(VIDIOC_G_CROP), valid_ioctls); + __set_bit(_IOC_NR(VIDIOC_CROPCAP), valid_ioctls); + } + if (ops->vidioc_s_selection && + !test_bit(_IOC_NR(VIDIOC_S_SELECTION), vdev->valid_ioctls)) + __set_bit(_IOC_NR(VIDIOC_S_CROP), valid_ioctls); + SET_VALID_IOCTL(ops, VIDIOC_G_SELECTION, vidioc_g_selection); + SET_VALID_IOCTL(ops, VIDIOC_S_SELECTION, vidioc_s_selection); + } + if (is_meta && is_rx) { + /* metadata capture specific ioctls */ + SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_meta_cap); + SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_meta_cap); + SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_meta_cap); + SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_meta_cap); + } else if (is_meta && is_tx) { + /* metadata output specific ioctls */ + SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_meta_out); + SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_meta_out); + SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_meta_out); + SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_meta_out); + } + if (is_vbi) { + /* vbi specific ioctls */ + if ((is_rx && (ops->vidioc_g_fmt_vbi_cap || + ops->vidioc_g_fmt_sliced_vbi_cap)) || + (is_tx && (ops->vidioc_g_fmt_vbi_out || + ops->vidioc_g_fmt_sliced_vbi_out))) + __set_bit(_IOC_NR(VIDIOC_G_FMT), valid_ioctls); + if ((is_rx && (ops->vidioc_s_fmt_vbi_cap || + ops->vidioc_s_fmt_sliced_vbi_cap)) || + (is_tx && (ops->vidioc_s_fmt_vbi_out || + ops->vidioc_s_fmt_sliced_vbi_out))) + __set_bit(_IOC_NR(VIDIOC_S_FMT), valid_ioctls); + if ((is_rx && (ops->vidioc_try_fmt_vbi_cap || + ops->vidioc_try_fmt_sliced_vbi_cap)) || + (is_tx && (ops->vidioc_try_fmt_vbi_out || + ops->vidioc_try_fmt_sliced_vbi_out))) + __set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls); + SET_VALID_IOCTL(ops, VIDIOC_G_SLICED_VBI_CAP, vidioc_g_sliced_vbi_cap); + } else if (is_tch) { + /* touch specific ioctls */ + SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_vid_cap); + SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_vid_cap); + SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_vid_cap); + SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_vid_cap); + SET_VALID_IOCTL(ops, VIDIOC_ENUM_FRAMESIZES, vidioc_enum_framesizes); + SET_VALID_IOCTL(ops, VIDIOC_ENUM_FRAMEINTERVALS, vidioc_enum_frameintervals); + SET_VALID_IOCTL(ops, VIDIOC_ENUMINPUT, vidioc_enum_input); + SET_VALID_IOCTL(ops, VIDIOC_G_INPUT, vidioc_g_input); + SET_VALID_IOCTL(ops, VIDIOC_S_INPUT, vidioc_s_input); + SET_VALID_IOCTL(ops, VIDIOC_G_PARM, vidioc_g_parm); + SET_VALID_IOCTL(ops, VIDIOC_S_PARM, vidioc_s_parm); + } else if (is_sdr && is_rx) { + /* SDR receiver specific ioctls */ + SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_sdr_cap); + SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_sdr_cap); + SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_sdr_cap); + SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_sdr_cap); + } else if (is_sdr && is_tx) { + /* SDR transmitter specific ioctls */ + SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_sdr_out); + SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_sdr_out); + SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_sdr_out); + SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_sdr_out); + } + + if (has_streaming) { + /* ioctls valid for streaming I/O */ + SET_VALID_IOCTL(ops, VIDIOC_REQBUFS, vidioc_reqbufs); + SET_VALID_IOCTL(ops, VIDIOC_QUERYBUF, vidioc_querybuf); + SET_VALID_IOCTL(ops, VIDIOC_QBUF, vidioc_qbuf); + SET_VALID_IOCTL(ops, VIDIOC_EXPBUF, vidioc_expbuf); + SET_VALID_IOCTL(ops, VIDIOC_DQBUF, vidioc_dqbuf); + SET_VALID_IOCTL(ops, VIDIOC_CREATE_BUFS, vidioc_create_bufs); + SET_VALID_IOCTL(ops, VIDIOC_PREPARE_BUF, vidioc_prepare_buf); + SET_VALID_IOCTL(ops, VIDIOC_STREAMON, vidioc_streamon); + SET_VALID_IOCTL(ops, VIDIOC_STREAMOFF, vidioc_streamoff); + /* VIDIOC_CREATE_BUFS support is mandatory to enable VIDIOC_REMOVE_BUFS */ + if (ops->vidioc_create_bufs) + SET_VALID_IOCTL(ops, VIDIOC_REMOVE_BUFS, vidioc_remove_bufs); + } + + if (is_vid || is_vbi || is_meta) { + /* ioctls valid for video, vbi and metadata */ + if (ops->vidioc_s_std) + __set_bit(_IOC_NR(VIDIOC_ENUMSTD), valid_ioctls); + SET_VALID_IOCTL(ops, VIDIOC_S_STD, vidioc_s_std); + SET_VALID_IOCTL(ops, VIDIOC_G_STD, vidioc_g_std); + if (is_rx) { + SET_VALID_IOCTL(ops, VIDIOC_QUERYSTD, vidioc_querystd); + if (is_io_mc) { + __set_bit(_IOC_NR(VIDIOC_ENUMINPUT), valid_ioctls); + __set_bit(_IOC_NR(VIDIOC_G_INPUT), valid_ioctls); + __set_bit(_IOC_NR(VIDIOC_S_INPUT), valid_ioctls); + } else { + SET_VALID_IOCTL(ops, VIDIOC_ENUMINPUT, vidioc_enum_input); + SET_VALID_IOCTL(ops, VIDIOC_G_INPUT, vidioc_g_input); + SET_VALID_IOCTL(ops, VIDIOC_S_INPUT, vidioc_s_input); + } + SET_VALID_IOCTL(ops, VIDIOC_ENUMAUDIO, vidioc_enumaudio); + SET_VALID_IOCTL(ops, VIDIOC_G_AUDIO, vidioc_g_audio); + SET_VALID_IOCTL(ops, VIDIOC_S_AUDIO, vidioc_s_audio); + SET_VALID_IOCTL(ops, VIDIOC_QUERY_DV_TIMINGS, vidioc_query_dv_timings); + SET_VALID_IOCTL(ops, VIDIOC_S_EDID, vidioc_s_edid); + } + if (is_tx) { + if (is_io_mc) { + __set_bit(_IOC_NR(VIDIOC_ENUMOUTPUT), valid_ioctls); + __set_bit(_IOC_NR(VIDIOC_G_OUTPUT), valid_ioctls); + __set_bit(_IOC_NR(VIDIOC_S_OUTPUT), valid_ioctls); + } else { + SET_VALID_IOCTL(ops, VIDIOC_ENUMOUTPUT, vidioc_enum_output); + SET_VALID_IOCTL(ops, VIDIOC_G_OUTPUT, vidioc_g_output); + SET_VALID_IOCTL(ops, VIDIOC_S_OUTPUT, vidioc_s_output); + } + SET_VALID_IOCTL(ops, VIDIOC_ENUMAUDOUT, vidioc_enumaudout); + SET_VALID_IOCTL(ops, VIDIOC_G_AUDOUT, vidioc_g_audout); + SET_VALID_IOCTL(ops, VIDIOC_S_AUDOUT, vidioc_s_audout); + } + if (ops->vidioc_g_parm || ops->vidioc_g_std) + __set_bit(_IOC_NR(VIDIOC_G_PARM), valid_ioctls); + SET_VALID_IOCTL(ops, VIDIOC_S_PARM, vidioc_s_parm); + SET_VALID_IOCTL(ops, VIDIOC_S_DV_TIMINGS, vidioc_s_dv_timings); + SET_VALID_IOCTL(ops, VIDIOC_G_DV_TIMINGS, vidioc_g_dv_timings); + SET_VALID_IOCTL(ops, VIDIOC_ENUM_DV_TIMINGS, vidioc_enum_dv_timings); + SET_VALID_IOCTL(ops, VIDIOC_DV_TIMINGS_CAP, vidioc_dv_timings_cap); + SET_VALID_IOCTL(ops, VIDIOC_G_EDID, vidioc_g_edid); + } + if (is_tx && (is_radio || is_sdr)) { + /* radio transmitter only ioctls */ + SET_VALID_IOCTL(ops, VIDIOC_G_MODULATOR, vidioc_g_modulator); + SET_VALID_IOCTL(ops, VIDIOC_S_MODULATOR, vidioc_s_modulator); + } + if (is_rx && !is_tch) { + /* receiver only ioctls */ + SET_VALID_IOCTL(ops, VIDIOC_G_TUNER, vidioc_g_tuner); + SET_VALID_IOCTL(ops, VIDIOC_S_TUNER, vidioc_s_tuner); + SET_VALID_IOCTL(ops, VIDIOC_S_HW_FREQ_SEEK, vidioc_s_hw_freq_seek); + } + if (is_edid) { + SET_VALID_IOCTL(ops, VIDIOC_G_EDID, vidioc_g_edid); + if (is_tx) { + SET_VALID_IOCTL(ops, VIDIOC_G_OUTPUT, vidioc_g_output); + SET_VALID_IOCTL(ops, VIDIOC_S_OUTPUT, vidioc_s_output); + SET_VALID_IOCTL(ops, VIDIOC_ENUMOUTPUT, vidioc_enum_output); + } + if (is_rx) { + SET_VALID_IOCTL(ops, VIDIOC_ENUMINPUT, vidioc_enum_input); + SET_VALID_IOCTL(ops, VIDIOC_G_INPUT, vidioc_g_input); + SET_VALID_IOCTL(ops, VIDIOC_S_INPUT, vidioc_s_input); + SET_VALID_IOCTL(ops, VIDIOC_S_EDID, vidioc_s_edid); + } + } + + bitmap_andnot(vdev->valid_ioctls, valid_ioctls, vdev->valid_ioctls, + BASE_VIDIOC_PRIVATE); +} + +static int video_register_media_controller(struct video_device *vdev) +{ +#if defined(CONFIG_MEDIA_CONTROLLER) + u32 intf_type; + int ret; + + /* Memory-to-memory devices are more complex and use + * their own function to register its mc entities. + */ + if (!vdev->v4l2_dev->mdev || vdev->vfl_dir == VFL_DIR_M2M) + return 0; + + vdev->entity.obj_type = MEDIA_ENTITY_TYPE_VIDEO_DEVICE; + vdev->entity.function = MEDIA_ENT_F_UNKNOWN; + + switch (vdev->vfl_type) { + case VFL_TYPE_VIDEO: + intf_type = MEDIA_INTF_T_V4L_VIDEO; + vdev->entity.function = MEDIA_ENT_F_IO_V4L; + break; + case VFL_TYPE_VBI: + intf_type = MEDIA_INTF_T_V4L_VBI; + vdev->entity.function = MEDIA_ENT_F_IO_VBI; + break; + case VFL_TYPE_SDR: + intf_type = MEDIA_INTF_T_V4L_SWRADIO; + vdev->entity.function = MEDIA_ENT_F_IO_SWRADIO; + break; + case VFL_TYPE_TOUCH: + intf_type = MEDIA_INTF_T_V4L_TOUCH; + vdev->entity.function = MEDIA_ENT_F_IO_V4L; + break; + case VFL_TYPE_RADIO: + intf_type = MEDIA_INTF_T_V4L_RADIO; + /* + * Radio doesn't have an entity at the V4L2 side to represent + * radio input or output. Instead, the audio input/output goes + * via either physical wires or ALSA. + */ + break; + case VFL_TYPE_SUBDEV: + intf_type = MEDIA_INTF_T_V4L_SUBDEV; + /* Entity will be created via v4l2_device_register_subdev() */ + break; + default: + return 0; + } + + if (vdev->entity.function != MEDIA_ENT_F_UNKNOWN) { + vdev->entity.name = vdev->name; + + /* Needed just for backward compatibility with legacy MC API */ + vdev->entity.info.dev.major = VIDEO_MAJOR; + vdev->entity.info.dev.minor = vdev->minor; + + ret = media_device_register_entity(vdev->v4l2_dev->mdev, + &vdev->entity); + if (ret < 0) { + pr_warn("%s: media_device_register_entity failed\n", + __func__); + return ret; + } + } + + vdev->intf_devnode = media_devnode_create(vdev->v4l2_dev->mdev, + intf_type, + 0, VIDEO_MAJOR, + vdev->minor); + if (!vdev->intf_devnode) { + media_device_unregister_entity(&vdev->entity); + return -ENOMEM; + } + + if (vdev->entity.function != MEDIA_ENT_F_UNKNOWN) { + struct media_link *link; + + link = media_create_intf_link(&vdev->entity, + &vdev->intf_devnode->intf, + MEDIA_LNK_FL_ENABLED | + MEDIA_LNK_FL_IMMUTABLE); + if (!link) { + media_devnode_remove(vdev->intf_devnode); + media_device_unregister_entity(&vdev->entity); + return -ENOMEM; + } + } + + /* FIXME: how to create the other interface links? */ + +#endif + return 0; +} + +int __video_register_device(struct video_device *vdev, + enum vfl_devnode_type type, + int nr, int warn_if_nr_in_use, + struct module *owner) +{ + int i = 0; + int ret; + int minor_offset = 0; + int minor_cnt = VIDEO_NUM_DEVICES; + const char *name_base; + + /* A minor value of -1 marks this video device as never + having been registered */ + vdev->minor = -1; + + /* the release callback MUST be present */ + if (WARN_ON(!vdev->release)) + return -EINVAL; + /* the v4l2_dev pointer MUST be present */ + if (WARN_ON(!vdev->v4l2_dev)) + return -EINVAL; + /* the device_caps field MUST be set for all but subdevs */ + if (WARN_ON(type != VFL_TYPE_SUBDEV && !vdev->device_caps)) + return -EINVAL; + /* the open and release file operations are mandatory */ + if (WARN_ON(!vdev->fops || !vdev->fops->open || !vdev->fops->release)) + return -EINVAL; + + /* v4l2_fh support */ + spin_lock_init(&vdev->fh_lock); + INIT_LIST_HEAD(&vdev->fh_list); + + /* Part 1: check device type */ + switch (type) { + case VFL_TYPE_VIDEO: + name_base = "video"; + break; + case VFL_TYPE_VBI: + name_base = "vbi"; + break; + case VFL_TYPE_RADIO: + name_base = "radio"; + break; + case VFL_TYPE_SUBDEV: + name_base = "v4l-subdev"; + break; + case VFL_TYPE_SDR: + /* Use device name 'swradio' because 'sdr' was already taken. */ + name_base = "swradio"; + break; + case VFL_TYPE_TOUCH: + name_base = "v4l-touch"; + break; + default: + pr_err("%s called with unknown type: %d\n", + __func__, type); + return -EINVAL; + } + + vdev->vfl_type = type; + vdev->cdev = NULL; + if (vdev->dev_parent == NULL) + vdev->dev_parent = vdev->v4l2_dev->dev; + if (vdev->ctrl_handler == NULL) + vdev->ctrl_handler = vdev->v4l2_dev->ctrl_handler; + /* If the prio state pointer is NULL, then use the v4l2_device + prio state. */ + if (vdev->prio == NULL) + vdev->prio = &vdev->v4l2_dev->prio; + + /* Part 2: find a free minor, device node number and device index. */ +#ifdef CONFIG_VIDEO_FIXED_MINOR_RANGES + /* Keep the ranges for the first four types for historical + * reasons. + * Newer devices (not yet in place) should use the range + * of 128-191 and just pick the first free minor there + * (new style). */ + switch (type) { + case VFL_TYPE_VIDEO: + minor_offset = 0; + minor_cnt = 64; + break; + case VFL_TYPE_RADIO: + minor_offset = 64; + minor_cnt = 64; + break; + case VFL_TYPE_VBI: + minor_offset = 224; + minor_cnt = 32; + break; + default: + minor_offset = 128; + minor_cnt = 64; + break; + } +#endif + + /* Pick a device node number */ + mutex_lock(&videodev_lock); + nr = devnode_find(vdev, nr == -1 ? 0 : nr, minor_cnt); + if (nr == minor_cnt) + nr = devnode_find(vdev, 0, minor_cnt); + if (nr == minor_cnt) { + pr_err("could not get a free device node number\n"); + mutex_unlock(&videodev_lock); + return -ENFILE; + } +#ifdef CONFIG_VIDEO_FIXED_MINOR_RANGES + /* 1-on-1 mapping of device node number to minor number */ + i = nr; +#else + /* The device node number and minor numbers are independent, so + we just find the first free minor number. */ + for (i = 0; i < VIDEO_NUM_DEVICES; i++) + if (video_devices[i] == NULL) + break; + if (i == VIDEO_NUM_DEVICES) { + mutex_unlock(&videodev_lock); + pr_err("could not get a free minor\n"); + return -ENFILE; + } +#endif + vdev->minor = i + minor_offset; + vdev->num = nr; + + /* Should not happen since we thought this minor was free */ + if (WARN_ON(video_devices[vdev->minor])) { + mutex_unlock(&videodev_lock); + pr_err("video_device not empty!\n"); + return -ENFILE; + } + devnode_set(vdev); + vdev->index = get_index(vdev); + video_devices[vdev->minor] = vdev; + mutex_unlock(&videodev_lock); + + if (vdev->ioctl_ops) + determine_valid_ioctls(vdev); + + /* Part 3: Initialize the character device */ + vdev->cdev = cdev_alloc(); + if (vdev->cdev == NULL) { + ret = -ENOMEM; + goto cleanup; + } + vdev->cdev->ops = &v4l2_fops; + vdev->cdev->owner = owner; + ret = cdev_add(vdev->cdev, MKDEV(VIDEO_MAJOR, vdev->minor), 1); + if (ret < 0) { + pr_err("%s: cdev_add failed\n", __func__); + kfree(vdev->cdev); + vdev->cdev = NULL; + goto cleanup; + } + + /* Part 4: register the device with sysfs */ + vdev->dev.class = &video_class; + vdev->dev.devt = MKDEV(VIDEO_MAJOR, vdev->minor); + vdev->dev.parent = vdev->dev_parent; + vdev->dev.release = v4l2_device_release; + dev_set_name(&vdev->dev, "%s%d", name_base, vdev->num); + + /* Increase v4l2_device refcount */ + v4l2_device_get(vdev->v4l2_dev); + + mutex_lock(&videodev_lock); + ret = device_register(&vdev->dev); + if (ret < 0) { + mutex_unlock(&videodev_lock); + pr_err("%s: device_register failed\n", __func__); + put_device(&vdev->dev); + return ret; + } + + if (nr != -1 && nr != vdev->num && warn_if_nr_in_use) + pr_warn("%s: requested %s%d, got %s\n", __func__, + name_base, nr, video_device_node_name(vdev)); + + /* Part 5: Register the entity. */ + ret = video_register_media_controller(vdev); + + /* Part 6: Activate this minor. The char device can now be used. */ + set_bit(V4L2_FL_REGISTERED, &vdev->flags); + mutex_unlock(&videodev_lock); + + return 0; + +cleanup: + mutex_lock(&videodev_lock); + if (vdev->cdev) + cdev_del(vdev->cdev); + video_devices[vdev->minor] = NULL; + devnode_clear(vdev); + mutex_unlock(&videodev_lock); + /* Mark this video device as never having been registered. */ + vdev->minor = -1; + return ret; +} +EXPORT_SYMBOL(__video_register_device); + +/** + * video_unregister_device - unregister a video4linux device + * @vdev: the device to unregister + * + * This unregisters the passed device. Future open calls will + * be met with errors. + */ +void video_unregister_device(struct video_device *vdev) +{ + /* Check if vdev was ever registered at all */ + if (!vdev || !video_is_registered(vdev)) + return; + + mutex_lock(&videodev_lock); + /* This must be in a critical section to prevent a race with v4l2_open. + * Once this bit has been cleared video_get may never be called again. + */ + clear_bit(V4L2_FL_REGISTERED, &vdev->flags); + mutex_unlock(&videodev_lock); + v4l2_event_wake_all(vdev); + device_unregister(&vdev->dev); +} +EXPORT_SYMBOL(video_unregister_device); + +#ifdef CONFIG_DEBUG_FS +struct dentry *v4l2_debugfs_root(void) +{ + if (!v4l2_debugfs_root_dir) + v4l2_debugfs_root_dir = debugfs_create_dir("v4l2", NULL); + return v4l2_debugfs_root_dir; +} +EXPORT_SYMBOL_GPL(v4l2_debugfs_root); +#endif + +#if defined(CONFIG_MEDIA_CONTROLLER) + +__must_check int video_device_pipeline_start(struct video_device *vdev, + struct media_pipeline *pipe) +{ + struct media_entity *entity = &vdev->entity; + + if (entity->num_pads != 1) + return -ENODEV; + + return media_pipeline_start(&entity->pads[0], pipe); +} +EXPORT_SYMBOL_GPL(video_device_pipeline_start); + +__must_check int __video_device_pipeline_start(struct video_device *vdev, + struct media_pipeline *pipe) +{ + struct media_entity *entity = &vdev->entity; + + if (entity->num_pads != 1) + return -ENODEV; + + return __media_pipeline_start(&entity->pads[0], pipe); +} +EXPORT_SYMBOL_GPL(__video_device_pipeline_start); + +void video_device_pipeline_stop(struct video_device *vdev) +{ + struct media_entity *entity = &vdev->entity; + + if (WARN_ON(entity->num_pads != 1)) + return; + + return media_pipeline_stop(&entity->pads[0]); +} +EXPORT_SYMBOL_GPL(video_device_pipeline_stop); + +void __video_device_pipeline_stop(struct video_device *vdev) +{ + struct media_entity *entity = &vdev->entity; + + if (WARN_ON(entity->num_pads != 1)) + return; + + return __media_pipeline_stop(&entity->pads[0]); +} +EXPORT_SYMBOL_GPL(__video_device_pipeline_stop); + +__must_check int video_device_pipeline_alloc_start(struct video_device *vdev) +{ + struct media_entity *entity = &vdev->entity; + + if (entity->num_pads != 1) + return -ENODEV; + + return media_pipeline_alloc_start(&entity->pads[0]); +} +EXPORT_SYMBOL_GPL(video_device_pipeline_alloc_start); + +struct media_pipeline *video_device_pipeline(struct video_device *vdev) +{ + struct media_entity *entity = &vdev->entity; + + if (WARN_ON(entity->num_pads != 1)) + return NULL; + + return media_pad_pipeline(&entity->pads[0]); +} +EXPORT_SYMBOL_GPL(video_device_pipeline); + +#endif /* CONFIG_MEDIA_CONTROLLER */ + +/* + * Initialise video for linux + */ +static int __init videodev_init(void) +{ + dev_t dev = MKDEV(VIDEO_MAJOR, 0); + int ret; + + pr_info("Linux video capture interface: v2.00\n"); + ret = register_chrdev_region(dev, VIDEO_NUM_DEVICES, VIDEO_NAME); + if (ret < 0) { + pr_warn("videodev: unable to get major %d\n", + VIDEO_MAJOR); + return ret; + } + + ret = class_register(&video_class); + if (ret < 0) { + unregister_chrdev_region(dev, VIDEO_NUM_DEVICES); + pr_warn("video_dev: class_register failed\n"); + return -EIO; + } + + return 0; +} + +static void __exit videodev_exit(void) +{ + dev_t dev = MKDEV(VIDEO_MAJOR, 0); + + class_unregister(&video_class); + unregister_chrdev_region(dev, VIDEO_NUM_DEVICES); + debugfs_remove_recursive(v4l2_debugfs_root_dir); + v4l2_debugfs_root_dir = NULL; +} + +subsys_initcall(videodev_init); +module_exit(videodev_exit) + +MODULE_AUTHOR("Alan Cox, Mauro Carvalho Chehab , Bill Dirks, Justin Schoeman, Gerd Knorr"); +MODULE_DESCRIPTION("Video4Linux2 core driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_CHARDEV_MAJOR(VIDEO_MAJOR); diff --git a/6.18.0/drivers/media/v4l2-core/v4l2-device.c b/6.18.0/drivers/media/v4l2-core/v4l2-device.c new file mode 100644 index 0000000..63b12ef --- /dev/null +++ b/6.18.0/drivers/media/v4l2-core/v4l2-device.c @@ -0,0 +1,295 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + V4L2 device support. + + Copyright (C) 2008 Hans Verkuil + + */ + +#include +#include +#include +#include +#include +#include +#include + +int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev) +{ + if (v4l2_dev == NULL) + return -EINVAL; + + INIT_LIST_HEAD(&v4l2_dev->subdevs); + spin_lock_init(&v4l2_dev->lock); + v4l2_prio_init(&v4l2_dev->prio); + kref_init(&v4l2_dev->ref); + get_device(dev); + v4l2_dev->dev = dev; + if (dev == NULL) { + /* If dev == NULL, then name must be filled in by the caller */ + if (WARN_ON(!v4l2_dev->name[0])) + return -EINVAL; + return 0; + } + + /* Set name to driver name + device name if it is empty. */ + if (!v4l2_dev->name[0]) + snprintf(v4l2_dev->name, sizeof(v4l2_dev->name), "%s %s", + dev->driver->name, dev_name(dev)); + if (!dev_get_drvdata(dev)) + dev_set_drvdata(dev, v4l2_dev); + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_device_register); + +static void v4l2_device_release(struct kref *ref) +{ + struct v4l2_device *v4l2_dev = + container_of(ref, struct v4l2_device, ref); + + if (v4l2_dev->release) + v4l2_dev->release(v4l2_dev); +} + +int v4l2_device_put(struct v4l2_device *v4l2_dev) +{ + return kref_put(&v4l2_dev->ref, v4l2_device_release); +} +EXPORT_SYMBOL_GPL(v4l2_device_put); + +int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename, + atomic_t *instance) +{ + int num = atomic_inc_return(instance) - 1; + int len = strlen(basename); + + if (basename[len - 1] >= '0' && basename[len - 1] <= '9') + snprintf(v4l2_dev->name, sizeof(v4l2_dev->name), + "%s-%d", basename, num); + else + snprintf(v4l2_dev->name, sizeof(v4l2_dev->name), + "%s%d", basename, num); + return num; +} +EXPORT_SYMBOL_GPL(v4l2_device_set_name); + +void v4l2_device_disconnect(struct v4l2_device *v4l2_dev) +{ + if (v4l2_dev->dev == NULL) + return; + + if (dev_get_drvdata(v4l2_dev->dev) == v4l2_dev) + dev_set_drvdata(v4l2_dev->dev, NULL); + put_device(v4l2_dev->dev); + v4l2_dev->dev = NULL; +} +EXPORT_SYMBOL_GPL(v4l2_device_disconnect); + +void v4l2_device_unregister(struct v4l2_device *v4l2_dev) +{ + struct v4l2_subdev *sd, *next; + + /* Just return if v4l2_dev is NULL or if it was already + * unregistered before. */ + if (v4l2_dev == NULL || !v4l2_dev->name[0]) + return; + v4l2_device_disconnect(v4l2_dev); + + /* Unregister subdevs */ + list_for_each_entry_safe(sd, next, &v4l2_dev->subdevs, list) { + v4l2_device_unregister_subdev(sd); + if (sd->flags & V4L2_SUBDEV_FL_IS_I2C) + v4l2_i2c_subdev_unregister(sd); + else if (sd->flags & V4L2_SUBDEV_FL_IS_SPI) + v4l2_spi_subdev_unregister(sd); + } + /* Mark as unregistered, thus preventing duplicate unregistrations */ + v4l2_dev->name[0] = '\0'; +} +EXPORT_SYMBOL_GPL(v4l2_device_unregister); + +int __v4l2_device_register_subdev(struct v4l2_device *v4l2_dev, + struct v4l2_subdev *sd, struct module *module) +{ + int err; + + /* Check for valid input */ + if (!v4l2_dev || !sd || sd->v4l2_dev || !sd->name[0]) + return -EINVAL; + + /* + * The reason to acquire the module here is to avoid unloading + * a module of sub-device which is registered to a media + * device. To make it possible to unload modules for media + * devices that also register sub-devices, do not + * try_module_get() such sub-device owners. + */ + sd->owner_v4l2_dev = v4l2_dev->dev && v4l2_dev->dev->driver && + module == v4l2_dev->dev->driver->owner; + + if (!sd->owner_v4l2_dev && !try_module_get(module)) + return -ENODEV; + + sd->v4l2_dev = v4l2_dev; + /* This just returns 0 if either of the two args is NULL */ + err = v4l2_ctrl_add_handler(v4l2_dev->ctrl_handler, sd->ctrl_handler, + NULL, true); + if (err) + goto error_module; + +#if defined(CONFIG_MEDIA_CONTROLLER) + /* Register the entity. */ + if (v4l2_dev->mdev) { + err = media_device_register_entity(v4l2_dev->mdev, &sd->entity); + if (err < 0) + goto error_module; + } +#endif + + if (sd->internal_ops && sd->internal_ops->registered) { + err = sd->internal_ops->registered(sd); + if (err) + goto error_unregister; + } + + sd->owner = module; + + spin_lock(&v4l2_dev->lock); + list_add_tail(&sd->list, &v4l2_dev->subdevs); + spin_unlock(&v4l2_dev->lock); + + return 0; + +error_unregister: +#if defined(CONFIG_MEDIA_CONTROLLER) + media_device_unregister_entity(&sd->entity); +#endif +error_module: + if (!sd->owner_v4l2_dev) + module_put(sd->owner); + sd->v4l2_dev = NULL; + return err; +} +EXPORT_SYMBOL_GPL(__v4l2_device_register_subdev); + +static void v4l2_subdev_release(struct v4l2_subdev *sd) +{ + struct module *owner = !sd->owner_v4l2_dev ? sd->owner : NULL; + + if (sd->internal_ops && sd->internal_ops->release) + sd->internal_ops->release(sd); + sd->devnode = NULL; + module_put(owner); +} + +static void v4l2_device_release_subdev_node(struct video_device *vdev) +{ + v4l2_subdev_release(video_get_drvdata(vdev)); + kfree(vdev); +} + +int __v4l2_device_register_subdev_nodes(struct v4l2_device *v4l2_dev, + bool read_only) +{ + struct video_device *vdev; + struct v4l2_subdev *sd; + int err; + + /* Register a device node for every subdev marked with the + * V4L2_SUBDEV_FL_HAS_DEVNODE flag. + */ + list_for_each_entry(sd, &v4l2_dev->subdevs, list) { + if (!(sd->flags & V4L2_SUBDEV_FL_HAS_DEVNODE)) + continue; + + if (sd->devnode) + continue; + + vdev = kzalloc(sizeof(*vdev), GFP_KERNEL); + if (!vdev) { + err = -ENOMEM; + goto clean_up; + } + + video_set_drvdata(vdev, sd); + strscpy(vdev->name, sd->name, sizeof(vdev->name)); + vdev->dev_parent = sd->dev; + vdev->v4l2_dev = v4l2_dev; + vdev->fops = &v4l2_subdev_fops; + vdev->release = v4l2_device_release_subdev_node; + vdev->ctrl_handler = sd->ctrl_handler; + if (read_only) + set_bit(V4L2_FL_SUBDEV_RO_DEVNODE, &vdev->flags); + sd->devnode = vdev; + err = __video_register_device(vdev, VFL_TYPE_SUBDEV, -1, 1, + sd->owner); + if (err < 0) { + sd->devnode = NULL; + kfree(vdev); + goto clean_up; + } +#if defined(CONFIG_MEDIA_CONTROLLER) + sd->entity.info.dev.major = VIDEO_MAJOR; + sd->entity.info.dev.minor = vdev->minor; + + /* Interface is created by __video_register_device() */ + if (vdev->v4l2_dev->mdev) { + struct media_link *link; + + link = media_create_intf_link(&sd->entity, + &vdev->intf_devnode->intf, + MEDIA_LNK_FL_ENABLED | + MEDIA_LNK_FL_IMMUTABLE); + if (!link) { + err = -ENOMEM; + goto clean_up; + } + } +#endif + } + return 0; + +clean_up: + list_for_each_entry(sd, &v4l2_dev->subdevs, list) { + if (!sd->devnode) + break; + video_unregister_device(sd->devnode); + } + + return err; +} +EXPORT_SYMBOL_GPL(__v4l2_device_register_subdev_nodes); + +void v4l2_device_unregister_subdev(struct v4l2_subdev *sd) +{ + struct v4l2_device *v4l2_dev; + + /* return if it isn't registered */ + if (sd == NULL || sd->v4l2_dev == NULL) + return; + + v4l2_dev = sd->v4l2_dev; + + spin_lock(&v4l2_dev->lock); + list_del(&sd->list); + spin_unlock(&v4l2_dev->lock); + + if (sd->internal_ops && sd->internal_ops->unregistered) + sd->internal_ops->unregistered(sd); + sd->v4l2_dev = NULL; + +#if defined(CONFIG_MEDIA_CONTROLLER) + if (v4l2_dev->mdev) { + /* + * No need to explicitly remove links, as both pads and + * links are removed by the function below, in the right order + */ + media_device_unregister_entity(&sd->entity); + } +#endif + if (sd->devnode) + video_unregister_device(sd->devnode); + else + v4l2_subdev_release(sd); +} +EXPORT_SYMBOL_GPL(v4l2_device_unregister_subdev); diff --git a/6.18.0/drivers/media/v4l2-core/v4l2-dv-timings.c b/6.18.0/drivers/media/v4l2-core/v4l2-dv-timings.c new file mode 100644 index 0000000..346d1b0 --- /dev/null +++ b/6.18.0/drivers/media/v4l2-core/v4l2-dv-timings.c @@ -0,0 +1,1275 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * v4l2-dv-timings - dv-timings helper functions + * + * Copyright 2013 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +MODULE_AUTHOR("Hans Verkuil"); +MODULE_DESCRIPTION("V4L2 DV Timings Helper Functions"); +MODULE_LICENSE("GPL"); + +const struct v4l2_dv_timings v4l2_dv_timings_presets[] = { + V4L2_DV_BT_CEA_640X480P59_94, + V4L2_DV_BT_CEA_720X480I59_94, + V4L2_DV_BT_CEA_720X480P59_94, + V4L2_DV_BT_CEA_720X576I50, + V4L2_DV_BT_CEA_720X576P50, + V4L2_DV_BT_CEA_1280X720P24, + V4L2_DV_BT_CEA_1280X720P25, + V4L2_DV_BT_CEA_1280X720P30, + V4L2_DV_BT_CEA_1280X720P50, + V4L2_DV_BT_CEA_1280X720P60, + V4L2_DV_BT_CEA_1920X1080P24, + V4L2_DV_BT_CEA_1920X1080P25, + V4L2_DV_BT_CEA_1920X1080P30, + V4L2_DV_BT_CEA_1920X1080I50, + V4L2_DV_BT_CEA_1920X1080P50, + V4L2_DV_BT_CEA_1920X1080I60, + V4L2_DV_BT_CEA_1920X1080P60, + V4L2_DV_BT_DMT_640X350P85, + V4L2_DV_BT_DMT_640X400P85, + V4L2_DV_BT_DMT_720X400P85, + V4L2_DV_BT_DMT_640X480P72, + V4L2_DV_BT_DMT_640X480P75, + V4L2_DV_BT_DMT_640X480P85, + V4L2_DV_BT_DMT_800X600P56, + V4L2_DV_BT_DMT_800X600P60, + V4L2_DV_BT_DMT_800X600P72, + V4L2_DV_BT_DMT_800X600P75, + V4L2_DV_BT_DMT_800X600P85, + V4L2_DV_BT_DMT_800X600P120_RB, + V4L2_DV_BT_DMT_848X480P60, + V4L2_DV_BT_DMT_1024X768I43, + V4L2_DV_BT_DMT_1024X768P60, + V4L2_DV_BT_DMT_1024X768P70, + V4L2_DV_BT_DMT_1024X768P75, + V4L2_DV_BT_DMT_1024X768P85, + V4L2_DV_BT_DMT_1024X768P120_RB, + V4L2_DV_BT_DMT_1152X864P75, + V4L2_DV_BT_DMT_1280X768P60_RB, + V4L2_DV_BT_DMT_1280X768P60, + V4L2_DV_BT_DMT_1280X768P75, + V4L2_DV_BT_DMT_1280X768P85, + V4L2_DV_BT_DMT_1280X768P120_RB, + V4L2_DV_BT_DMT_1280X800P60_RB, + V4L2_DV_BT_DMT_1280X800P60, + V4L2_DV_BT_DMT_1280X800P75, + V4L2_DV_BT_DMT_1280X800P85, + V4L2_DV_BT_DMT_1280X800P120_RB, + V4L2_DV_BT_DMT_1280X960P60, + V4L2_DV_BT_DMT_1280X960P85, + V4L2_DV_BT_DMT_1280X960P120_RB, + V4L2_DV_BT_DMT_1280X1024P60, + V4L2_DV_BT_DMT_1280X1024P75, + V4L2_DV_BT_DMT_1280X1024P85, + V4L2_DV_BT_DMT_1280X1024P120_RB, + V4L2_DV_BT_DMT_1360X768P60, + V4L2_DV_BT_DMT_1360X768P120_RB, + V4L2_DV_BT_DMT_1366X768P60, + V4L2_DV_BT_DMT_1366X768P60_RB, + V4L2_DV_BT_DMT_1400X1050P60_RB, + V4L2_DV_BT_DMT_1400X1050P60, + V4L2_DV_BT_DMT_1400X1050P75, + V4L2_DV_BT_DMT_1400X1050P85, + V4L2_DV_BT_DMT_1400X1050P120_RB, + V4L2_DV_BT_DMT_1440X900P60_RB, + V4L2_DV_BT_DMT_1440X900P60, + V4L2_DV_BT_DMT_1440X900P75, + V4L2_DV_BT_DMT_1440X900P85, + V4L2_DV_BT_DMT_1440X900P120_RB, + V4L2_DV_BT_DMT_1600X900P60_RB, + V4L2_DV_BT_DMT_1600X1200P60, + V4L2_DV_BT_DMT_1600X1200P65, + V4L2_DV_BT_DMT_1600X1200P70, + V4L2_DV_BT_DMT_1600X1200P75, + V4L2_DV_BT_DMT_1600X1200P85, + V4L2_DV_BT_DMT_1600X1200P120_RB, + V4L2_DV_BT_DMT_1680X1050P60_RB, + V4L2_DV_BT_DMT_1680X1050P60, + V4L2_DV_BT_DMT_1680X1050P75, + V4L2_DV_BT_DMT_1680X1050P85, + V4L2_DV_BT_DMT_1680X1050P120_RB, + V4L2_DV_BT_DMT_1792X1344P60, + V4L2_DV_BT_DMT_1792X1344P75, + V4L2_DV_BT_DMT_1792X1344P120_RB, + V4L2_DV_BT_DMT_1856X1392P60, + V4L2_DV_BT_DMT_1856X1392P75, + V4L2_DV_BT_DMT_1856X1392P120_RB, + V4L2_DV_BT_DMT_1920X1200P60_RB, + V4L2_DV_BT_DMT_1920X1200P60, + V4L2_DV_BT_DMT_1920X1200P75, + V4L2_DV_BT_DMT_1920X1200P85, + V4L2_DV_BT_DMT_1920X1200P120_RB, + V4L2_DV_BT_DMT_1920X1440P60, + V4L2_DV_BT_DMT_1920X1440P75, + V4L2_DV_BT_DMT_1920X1440P120_RB, + V4L2_DV_BT_DMT_2048X1152P60_RB, + V4L2_DV_BT_DMT_2560X1600P60_RB, + V4L2_DV_BT_DMT_2560X1600P60, + V4L2_DV_BT_DMT_2560X1600P75, + V4L2_DV_BT_DMT_2560X1600P85, + V4L2_DV_BT_DMT_2560X1600P120_RB, + V4L2_DV_BT_CEA_3840X2160P24, + V4L2_DV_BT_CEA_3840X2160P25, + V4L2_DV_BT_CEA_3840X2160P30, + V4L2_DV_BT_CEA_3840X2160P50, + V4L2_DV_BT_CEA_3840X2160P60, + V4L2_DV_BT_CEA_4096X2160P24, + V4L2_DV_BT_CEA_4096X2160P25, + V4L2_DV_BT_CEA_4096X2160P30, + V4L2_DV_BT_CEA_4096X2160P50, + V4L2_DV_BT_DMT_4096X2160P59_94_RB, + V4L2_DV_BT_CEA_4096X2160P60, + { } +}; +EXPORT_SYMBOL_GPL(v4l2_dv_timings_presets); + +bool v4l2_valid_dv_timings(const struct v4l2_dv_timings *t, + const struct v4l2_dv_timings_cap *dvcap, + v4l2_check_dv_timings_fnc fnc, + void *fnc_handle) +{ + const struct v4l2_bt_timings *bt = &t->bt; + const struct v4l2_bt_timings_cap *cap = &dvcap->bt; + u32 caps = cap->capabilities; + const u32 max_vert = 10240; + u32 max_hor = 3 * bt->width; + + if (t->type != V4L2_DV_BT_656_1120) + return false; + if (t->type != dvcap->type || + bt->height < cap->min_height || + bt->height > cap->max_height || + bt->width < cap->min_width || + bt->width > cap->max_width || + bt->pixelclock < cap->min_pixelclock || + bt->pixelclock > cap->max_pixelclock || + (!(caps & V4L2_DV_BT_CAP_CUSTOM) && + cap->standards && bt->standards && + !(bt->standards & cap->standards)) || + (bt->interlaced && !(caps & V4L2_DV_BT_CAP_INTERLACED)) || + (!bt->interlaced && !(caps & V4L2_DV_BT_CAP_PROGRESSIVE))) + return false; + + /* sanity checks for the blanking timings */ + if (!bt->interlaced && + (bt->il_vbackporch || bt->il_vsync || bt->il_vfrontporch)) + return false; + /* + * Some video receivers cannot properly separate the frontporch, + * backporch and sync values, and instead they only have the total + * blanking. That can be assigned to any of these three fields. + * So just check that none of these are way out of range. + */ + if (bt->hfrontporch > max_hor || + bt->hsync > max_hor || bt->hbackporch > max_hor) + return false; + if (bt->vfrontporch > max_vert || + bt->vsync > max_vert || bt->vbackporch > max_vert) + return false; + if (bt->interlaced && (bt->il_vfrontporch > max_vert || + bt->il_vsync > max_vert || bt->il_vbackporch > max_vert)) + return false; + return fnc == NULL || fnc(t, fnc_handle); +} +EXPORT_SYMBOL_GPL(v4l2_valid_dv_timings); + +int v4l2_enum_dv_timings_cap(struct v4l2_enum_dv_timings *t, + const struct v4l2_dv_timings_cap *cap, + v4l2_check_dv_timings_fnc fnc, + void *fnc_handle) +{ + u32 i, idx; + + memset(t->reserved, 0, sizeof(t->reserved)); + for (i = idx = 0; v4l2_dv_timings_presets[i].bt.width; i++) { + if (v4l2_valid_dv_timings(v4l2_dv_timings_presets + i, cap, + fnc, fnc_handle) && + idx++ == t->index) { + t->timings = v4l2_dv_timings_presets[i]; + return 0; + } + } + return -EINVAL; +} +EXPORT_SYMBOL_GPL(v4l2_enum_dv_timings_cap); + +bool v4l2_find_dv_timings_cap(struct v4l2_dv_timings *t, + const struct v4l2_dv_timings_cap *cap, + unsigned pclock_delta, + v4l2_check_dv_timings_fnc fnc, + void *fnc_handle) +{ + int i; + + if (!v4l2_valid_dv_timings(t, cap, fnc, fnc_handle)) + return false; + + for (i = 0; v4l2_dv_timings_presets[i].bt.width; i++) { + if (v4l2_valid_dv_timings(v4l2_dv_timings_presets + i, cap, + fnc, fnc_handle) && + v4l2_match_dv_timings(t, v4l2_dv_timings_presets + i, + pclock_delta, false)) { + u32 flags = t->bt.flags & V4L2_DV_FL_REDUCED_FPS; + + *t = v4l2_dv_timings_presets[i]; + if (can_reduce_fps(&t->bt)) + t->bt.flags |= flags; + + return true; + } + } + return false; +} +EXPORT_SYMBOL_GPL(v4l2_find_dv_timings_cap); + +bool v4l2_find_dv_timings_cea861_vic(struct v4l2_dv_timings *t, u8 vic) +{ + unsigned int i; + + for (i = 0; v4l2_dv_timings_presets[i].bt.width; i++) { + const struct v4l2_bt_timings *bt = + &v4l2_dv_timings_presets[i].bt; + + if ((bt->flags & V4L2_DV_FL_HAS_CEA861_VIC) && + bt->cea861_vic == vic) { + *t = v4l2_dv_timings_presets[i]; + return true; + } + } + return false; +} +EXPORT_SYMBOL_GPL(v4l2_find_dv_timings_cea861_vic); + +/** + * v4l2_match_dv_timings - check if two timings match + * @t1: compare this v4l2_dv_timings struct... + * @t2: with this struct. + * @pclock_delta: the allowed pixelclock deviation. + * @match_reduced_fps: if true, then fail if V4L2_DV_FL_REDUCED_FPS does not + * match. + * + * Compare t1 with t2 with a given margin of error for the pixelclock. + */ +bool v4l2_match_dv_timings(const struct v4l2_dv_timings *t1, + const struct v4l2_dv_timings *t2, + unsigned pclock_delta, bool match_reduced_fps) +{ + if (t1->type != t2->type || t1->type != V4L2_DV_BT_656_1120) + return false; + if (t1->bt.width == t2->bt.width && + t1->bt.height == t2->bt.height && + t1->bt.interlaced == t2->bt.interlaced && + t1->bt.polarities == t2->bt.polarities && + t1->bt.pixelclock >= t2->bt.pixelclock - pclock_delta && + t1->bt.pixelclock <= t2->bt.pixelclock + pclock_delta && + t1->bt.hfrontporch == t2->bt.hfrontporch && + t1->bt.hsync == t2->bt.hsync && + t1->bt.hbackporch == t2->bt.hbackporch && + t1->bt.vfrontporch == t2->bt.vfrontporch && + t1->bt.vsync == t2->bt.vsync && + t1->bt.vbackporch == t2->bt.vbackporch && + (!match_reduced_fps || + (t1->bt.flags & V4L2_DV_FL_REDUCED_FPS) == + (t2->bt.flags & V4L2_DV_FL_REDUCED_FPS)) && + (!t1->bt.interlaced || + (t1->bt.il_vfrontporch == t2->bt.il_vfrontporch && + t1->bt.il_vsync == t2->bt.il_vsync && + t1->bt.il_vbackporch == t2->bt.il_vbackporch))) + return true; + return false; +} +EXPORT_SYMBOL_GPL(v4l2_match_dv_timings); + +void v4l2_print_dv_timings(const char *dev_prefix, const char *prefix, + const struct v4l2_dv_timings *t, bool detailed) +{ + const struct v4l2_bt_timings *bt = &t->bt; + u32 htot, vtot; + u32 fps; + + if (t->type != V4L2_DV_BT_656_1120) + return; + + htot = V4L2_DV_BT_FRAME_WIDTH(bt); + vtot = V4L2_DV_BT_FRAME_HEIGHT(bt); + if (bt->interlaced) + vtot /= 2; + + fps = (htot * vtot) > 0 ? div_u64((100 * (u64)bt->pixelclock), + (htot * vtot)) : 0; + + if (prefix == NULL) + prefix = ""; + + pr_info("%s: %s%ux%u%s%u.%02u (%ux%u)\n", dev_prefix, prefix, + bt->width, bt->height, bt->interlaced ? "i" : "p", + fps / 100, fps % 100, htot, vtot); + + if (!detailed) + return; + + pr_info("%s: horizontal: fp = %u, %ssync = %u, bp = %u\n", + dev_prefix, bt->hfrontporch, + (bt->polarities & V4L2_DV_HSYNC_POS_POL) ? "+" : "-", + bt->hsync, bt->hbackporch); + pr_info("%s: vertical: fp = %u, %ssync = %u, bp = %u\n", + dev_prefix, bt->vfrontporch, + (bt->polarities & V4L2_DV_VSYNC_POS_POL) ? "+" : "-", + bt->vsync, bt->vbackporch); + if (bt->interlaced) + pr_info("%s: vertical bottom field: fp = %u, %ssync = %u, bp = %u\n", + dev_prefix, bt->il_vfrontporch, + (bt->polarities & V4L2_DV_VSYNC_POS_POL) ? "+" : "-", + bt->il_vsync, bt->il_vbackporch); + pr_info("%s: pixelclock: %llu\n", dev_prefix, bt->pixelclock); + pr_info("%s: flags (0x%x):%s%s%s%s%s%s%s%s%s%s\n", + dev_prefix, bt->flags, + (bt->flags & V4L2_DV_FL_REDUCED_BLANKING) ? + " REDUCED_BLANKING" : "", + ((bt->flags & V4L2_DV_FL_REDUCED_BLANKING) && + bt->vsync == 8) ? " (V2)" : "", + (bt->flags & V4L2_DV_FL_CAN_REDUCE_FPS) ? + " CAN_REDUCE_FPS" : "", + (bt->flags & V4L2_DV_FL_REDUCED_FPS) ? + " REDUCED_FPS" : "", + (bt->flags & V4L2_DV_FL_HALF_LINE) ? + " HALF_LINE" : "", + (bt->flags & V4L2_DV_FL_IS_CE_VIDEO) ? + " CE_VIDEO" : "", + (bt->flags & V4L2_DV_FL_FIRST_FIELD_EXTRA_LINE) ? + " FIRST_FIELD_EXTRA_LINE" : "", + (bt->flags & V4L2_DV_FL_HAS_PICTURE_ASPECT) ? + " HAS_PICTURE_ASPECT" : "", + (bt->flags & V4L2_DV_FL_HAS_CEA861_VIC) ? + " HAS_CEA861_VIC" : "", + (bt->flags & V4L2_DV_FL_HAS_HDMI_VIC) ? + " HAS_HDMI_VIC" : ""); + pr_info("%s: standards (0x%x):%s%s%s%s%s\n", dev_prefix, bt->standards, + (bt->standards & V4L2_DV_BT_STD_CEA861) ? " CEA" : "", + (bt->standards & V4L2_DV_BT_STD_DMT) ? " DMT" : "", + (bt->standards & V4L2_DV_BT_STD_CVT) ? " CVT" : "", + (bt->standards & V4L2_DV_BT_STD_GTF) ? " GTF" : "", + (bt->standards & V4L2_DV_BT_STD_SDI) ? " SDI" : ""); + if (bt->flags & V4L2_DV_FL_HAS_PICTURE_ASPECT) + pr_info("%s: picture aspect (hor:vert): %u:%u\n", dev_prefix, + bt->picture_aspect.numerator, + bt->picture_aspect.denominator); + if (bt->flags & V4L2_DV_FL_HAS_CEA861_VIC) + pr_info("%s: CEA-861 VIC: %u\n", dev_prefix, bt->cea861_vic); + if (bt->flags & V4L2_DV_FL_HAS_HDMI_VIC) + pr_info("%s: HDMI VIC: %u\n", dev_prefix, bt->hdmi_vic); +} +EXPORT_SYMBOL_GPL(v4l2_print_dv_timings); + +struct v4l2_fract v4l2_dv_timings_aspect_ratio(const struct v4l2_dv_timings *t) +{ + struct v4l2_fract ratio = { 1, 1 }; + unsigned long n, d; + + if (t->type != V4L2_DV_BT_656_1120) + return ratio; + if (!(t->bt.flags & V4L2_DV_FL_HAS_PICTURE_ASPECT)) + return ratio; + + ratio.numerator = t->bt.width * t->bt.picture_aspect.denominator; + ratio.denominator = t->bt.height * t->bt.picture_aspect.numerator; + + rational_best_approximation(ratio.numerator, ratio.denominator, + ratio.numerator, ratio.denominator, &n, &d); + ratio.numerator = n; + ratio.denominator = d; + return ratio; +} +EXPORT_SYMBOL_GPL(v4l2_dv_timings_aspect_ratio); + +/** v4l2_calc_timeperframe - helper function to calculate timeperframe based + * v4l2_dv_timings fields. + * @t - Timings for the video mode. + * + * Calculates the expected timeperframe using the pixel clock value and + * horizontal/vertical measures. This means that v4l2_dv_timings structure + * must be correctly and fully filled. + */ +struct v4l2_fract v4l2_calc_timeperframe(const struct v4l2_dv_timings *t) +{ + const struct v4l2_bt_timings *bt = &t->bt; + struct v4l2_fract fps_fract = { 1, 1 }; + unsigned long n, d; + u32 htot, vtot, fps; + u64 pclk; + + if (t->type != V4L2_DV_BT_656_1120) + return fps_fract; + + htot = V4L2_DV_BT_FRAME_WIDTH(bt); + vtot = V4L2_DV_BT_FRAME_HEIGHT(bt); + pclk = bt->pixelclock; + + if ((bt->flags & V4L2_DV_FL_CAN_DETECT_REDUCED_FPS) && + (bt->flags & V4L2_DV_FL_REDUCED_FPS)) + pclk = div_u64(pclk * 1000ULL, 1001); + + fps = (htot * vtot) > 0 ? div_u64((100 * pclk), (htot * vtot)) : 0; + if (!fps) + return fps_fract; + + rational_best_approximation(fps, 100, fps, 100, &n, &d); + + fps_fract.numerator = d; + fps_fract.denominator = n; + return fps_fract; +} +EXPORT_SYMBOL_GPL(v4l2_calc_timeperframe); + +/* + * CVT defines + * Based on Coordinated Video Timings Standard + * version 1.1 September 10, 2003 + */ + +#define CVT_PXL_CLK_GRAN 250000 /* pixel clock granularity */ +#define CVT_PXL_CLK_GRAN_RB_V2 1000 /* granularity for reduced blanking v2*/ + +/* Normal blanking */ +#define CVT_MIN_V_BPORCH 7 /* lines */ +#define CVT_MIN_V_PORCH_RND 3 /* lines */ +#define CVT_MIN_VSYNC_BP 550 /* min time of vsync + back porch (us) */ +#define CVT_HSYNC_PERCENT 8 /* nominal hsync as percentage of line */ + +/* Normal blanking for CVT uses GTF to calculate horizontal blanking */ +#define CVT_CELL_GRAN 8 /* character cell granularity */ +#define CVT_M 600 /* blanking formula gradient */ +#define CVT_C 40 /* blanking formula offset */ +#define CVT_K 128 /* blanking formula scaling factor */ +#define CVT_J 20 /* blanking formula scaling factor */ +#define CVT_C_PRIME (((CVT_C - CVT_J) * CVT_K / 256) + CVT_J) +#define CVT_M_PRIME (CVT_K * CVT_M / 256) + +/* Reduced Blanking */ +#define CVT_RB_MIN_V_BPORCH 7 /* lines */ +#define CVT_RB_V_FPORCH 3 /* lines */ +#define CVT_RB_MIN_V_BLANK 460 /* us */ +#define CVT_RB_H_SYNC 32 /* pixels */ +#define CVT_RB_H_BLANK 160 /* pixels */ +/* Reduce blanking Version 2 */ +#define CVT_RB_V2_H_BLANK 80 /* pixels */ +#define CVT_RB_MIN_V_FPORCH 3 /* lines */ +#define CVT_RB_V2_MIN_V_FPORCH 1 /* lines */ +#define CVT_RB_V_BPORCH 6 /* lines */ + +/** v4l2_detect_cvt - detect if the given timings follow the CVT standard + * @frame_height - the total height of the frame (including blanking) in lines. + * @hfreq - the horizontal frequency in Hz. + * @vsync - the height of the vertical sync in lines. + * @active_width - active width of image (does not include blanking). This + * information is needed only in case of version 2 of reduced blanking. + * In other cases, this parameter does not have any effect on timings. + * @polarities - the horizontal and vertical polarities (same as struct + * v4l2_bt_timings polarities). + * @interlaced - if this flag is true, it indicates interlaced format + * @cap - the v4l2_dv_timings_cap capabilities. + * @timings - the resulting timings. + * + * This function will attempt to detect if the given values correspond to a + * valid CVT format. If so, then it will return true, and fmt will be filled + * in with the found CVT timings. + */ +bool v4l2_detect_cvt(unsigned int frame_height, + unsigned int hfreq, + unsigned int vsync, + unsigned int active_width, + u32 polarities, + bool interlaced, + const struct v4l2_dv_timings_cap *cap, + struct v4l2_dv_timings *timings) +{ + struct v4l2_dv_timings t = {}; + int v_fp, v_bp, h_fp, h_bp, hsync; + int frame_width, image_height, image_width; + bool reduced_blanking; + bool rb_v2 = false; + unsigned int pix_clk; + + if (vsync < 4 || vsync > 8) + return false; + + if (polarities == V4L2_DV_VSYNC_POS_POL) + reduced_blanking = false; + else if (polarities == V4L2_DV_HSYNC_POS_POL) + reduced_blanking = true; + else + return false; + + if (reduced_blanking && vsync == 8) + rb_v2 = true; + + if (rb_v2 && active_width == 0) + return false; + + if (!rb_v2 && vsync > 7) + return false; + + if (hfreq == 0) + return false; + + /* Vertical */ + if (reduced_blanking) { + if (rb_v2) { + v_bp = CVT_RB_V_BPORCH; + v_fp = (CVT_RB_MIN_V_BLANK * hfreq) / 1000000 + 1; + v_fp -= vsync + v_bp; + + if (v_fp < CVT_RB_V2_MIN_V_FPORCH) + v_fp = CVT_RB_V2_MIN_V_FPORCH; + } else { + v_fp = CVT_RB_V_FPORCH; + v_bp = (CVT_RB_MIN_V_BLANK * hfreq) / 1000000 + 1; + v_bp -= vsync + v_fp; + + if (v_bp < CVT_RB_MIN_V_BPORCH) + v_bp = CVT_RB_MIN_V_BPORCH; + } + } else { + v_fp = CVT_MIN_V_PORCH_RND; + v_bp = (CVT_MIN_VSYNC_BP * hfreq) / 1000000 + 1 - vsync; + + if (v_bp < CVT_MIN_V_BPORCH) + v_bp = CVT_MIN_V_BPORCH; + } + + if (interlaced) + image_height = (frame_height - 2 * v_fp - 2 * vsync - 2 * v_bp) & ~0x1; + else + image_height = (frame_height - v_fp - vsync - v_bp + 1) & ~0x1; + + if (image_height < 0) + return false; + + /* Aspect ratio based on vsync */ + switch (vsync) { + case 4: + image_width = (image_height * 4) / 3; + break; + case 5: + image_width = (image_height * 16) / 9; + break; + case 6: + image_width = (image_height * 16) / 10; + break; + case 7: + /* special case */ + if (image_height == 1024) + image_width = (image_height * 5) / 4; + else if (image_height == 768) + image_width = (image_height * 15) / 9; + else + return false; + break; + case 8: + image_width = active_width; + break; + default: + return false; + } + + if (!rb_v2) + image_width = image_width & ~7; + + /* Horizontal */ + if (reduced_blanking) { + int h_blank; + int clk_gran; + + h_blank = rb_v2 ? CVT_RB_V2_H_BLANK : CVT_RB_H_BLANK; + clk_gran = rb_v2 ? CVT_PXL_CLK_GRAN_RB_V2 : CVT_PXL_CLK_GRAN; + + pix_clk = (image_width + h_blank) * hfreq; + pix_clk = (pix_clk / clk_gran) * clk_gran; + + h_bp = h_blank / 2; + hsync = CVT_RB_H_SYNC; + h_fp = h_blank - h_bp - hsync; + + frame_width = image_width + h_blank; + } else { + unsigned ideal_duty_cycle_per_myriad = + 100 * CVT_C_PRIME - (CVT_M_PRIME * 100000) / hfreq; + int h_blank; + + if (ideal_duty_cycle_per_myriad < 2000) + ideal_duty_cycle_per_myriad = 2000; + + h_blank = image_width * ideal_duty_cycle_per_myriad / + (10000 - ideal_duty_cycle_per_myriad); + h_blank = (h_blank / (2 * CVT_CELL_GRAN)) * 2 * CVT_CELL_GRAN; + + pix_clk = (image_width + h_blank) * hfreq; + pix_clk = (pix_clk / CVT_PXL_CLK_GRAN) * CVT_PXL_CLK_GRAN; + + h_bp = h_blank / 2; + frame_width = image_width + h_blank; + + hsync = frame_width * CVT_HSYNC_PERCENT / 100; + hsync = (hsync / CVT_CELL_GRAN) * CVT_CELL_GRAN; + h_fp = h_blank - hsync - h_bp; + } + + t.type = V4L2_DV_BT_656_1120; + t.bt.polarities = polarities; + t.bt.width = image_width; + t.bt.height = image_height; + t.bt.hfrontporch = h_fp; + t.bt.vfrontporch = v_fp; + t.bt.hsync = hsync; + t.bt.vsync = vsync; + t.bt.hbackporch = frame_width - image_width - h_fp - hsync; + + if (!interlaced) { + t.bt.vbackporch = frame_height - image_height - v_fp - vsync; + t.bt.interlaced = V4L2_DV_PROGRESSIVE; + } else { + t.bt.vbackporch = (frame_height - image_height - 2 * v_fp - + 2 * vsync) / 2; + t.bt.il_vbackporch = frame_height - image_height - 2 * v_fp - + 2 * vsync - t.bt.vbackporch; + t.bt.il_vfrontporch = v_fp; + t.bt.il_vsync = vsync; + t.bt.flags |= V4L2_DV_FL_HALF_LINE; + t.bt.interlaced = V4L2_DV_INTERLACED; + } + + t.bt.pixelclock = pix_clk; + t.bt.standards = V4L2_DV_BT_STD_CVT; + + if (reduced_blanking) + t.bt.flags |= V4L2_DV_FL_REDUCED_BLANKING; + + if (!v4l2_valid_dv_timings(&t, cap, NULL, NULL)) + return false; + *timings = t; + return true; +} +EXPORT_SYMBOL_GPL(v4l2_detect_cvt); + +/* + * GTF defines + * Based on Generalized Timing Formula Standard + * Version 1.1 September 2, 1999 + */ + +#define GTF_PXL_CLK_GRAN 250000 /* pixel clock granularity */ + +#define GTF_MIN_VSYNC_BP 550 /* min time of vsync + back porch (us) */ +#define GTF_V_FP 1 /* vertical front porch (lines) */ +#define GTF_CELL_GRAN 8 /* character cell granularity */ + +/* Default */ +#define GTF_D_M 600 /* blanking formula gradient */ +#define GTF_D_C 40 /* blanking formula offset */ +#define GTF_D_K 128 /* blanking formula scaling factor */ +#define GTF_D_J 20 /* blanking formula scaling factor */ +#define GTF_D_C_PRIME ((((GTF_D_C - GTF_D_J) * GTF_D_K) / 256) + GTF_D_J) +#define GTF_D_M_PRIME ((GTF_D_K * GTF_D_M) / 256) + +/* Secondary */ +#define GTF_S_M 3600 /* blanking formula gradient */ +#define GTF_S_C 40 /* blanking formula offset */ +#define GTF_S_K 128 /* blanking formula scaling factor */ +#define GTF_S_J 35 /* blanking formula scaling factor */ +#define GTF_S_C_PRIME ((((GTF_S_C - GTF_S_J) * GTF_S_K) / 256) + GTF_S_J) +#define GTF_S_M_PRIME ((GTF_S_K * GTF_S_M) / 256) + +/** v4l2_detect_gtf - detect if the given timings follow the GTF standard + * @frame_height - the total height of the frame (including blanking) in lines. + * @hfreq - the horizontal frequency in Hz. + * @vsync - the height of the vertical sync in lines. + * @polarities - the horizontal and vertical polarities (same as struct + * v4l2_bt_timings polarities). + * @interlaced - if this flag is true, it indicates interlaced format + * @aspect - preferred aspect ratio. GTF has no method of determining the + * aspect ratio in order to derive the image width from the + * image height, so it has to be passed explicitly. Usually + * the native screen aspect ratio is used for this. If it + * is not filled in correctly, then 16:9 will be assumed. + * @cap - the v4l2_dv_timings_cap capabilities. + * @timings - the resulting timings. + * + * This function will attempt to detect if the given values correspond to a + * valid GTF format. If so, then it will return true, and fmt will be filled + * in with the found GTF timings. + */ +bool v4l2_detect_gtf(unsigned int frame_height, + unsigned int hfreq, + unsigned int vsync, + u32 polarities, + bool interlaced, + struct v4l2_fract aspect, + const struct v4l2_dv_timings_cap *cap, + struct v4l2_dv_timings *timings) +{ + struct v4l2_dv_timings t = {}; + int pix_clk; + int v_fp, v_bp, h_fp, hsync; + int frame_width, image_height, image_width; + bool default_gtf; + int h_blank; + + if (vsync != 3) + return false; + + if (polarities == V4L2_DV_VSYNC_POS_POL) + default_gtf = true; + else if (polarities == V4L2_DV_HSYNC_POS_POL) + default_gtf = false; + else + return false; + + if (hfreq == 0) + return false; + + /* Vertical */ + v_fp = GTF_V_FP; + v_bp = (GTF_MIN_VSYNC_BP * hfreq + 500000) / 1000000 - vsync; + if (interlaced) + image_height = (frame_height - 2 * v_fp - 2 * vsync - 2 * v_bp) & ~0x1; + else + image_height = (frame_height - v_fp - vsync - v_bp + 1) & ~0x1; + + if (image_height < 0) + return false; + + if (aspect.numerator == 0 || aspect.denominator == 0) { + aspect.numerator = 16; + aspect.denominator = 9; + } + image_width = ((image_height * aspect.numerator) / aspect.denominator); + image_width = (image_width + GTF_CELL_GRAN/2) & ~(GTF_CELL_GRAN - 1); + + /* Horizontal */ + if (default_gtf) { + u64 num; + u32 den; + + num = (((u64)image_width * GTF_D_C_PRIME * hfreq) - + ((u64)image_width * GTF_D_M_PRIME * 1000)); + den = (hfreq * (100 - GTF_D_C_PRIME) + GTF_D_M_PRIME * 1000) * + (2 * GTF_CELL_GRAN); + h_blank = div_u64((num + (den >> 1)), den); + h_blank *= (2 * GTF_CELL_GRAN); + } else { + u64 num; + u32 den; + + num = (((u64)image_width * GTF_S_C_PRIME * hfreq) - + ((u64)image_width * GTF_S_M_PRIME * 1000)); + den = (hfreq * (100 - GTF_S_C_PRIME) + GTF_S_M_PRIME * 1000) * + (2 * GTF_CELL_GRAN); + h_blank = div_u64((num + (den >> 1)), den); + h_blank *= (2 * GTF_CELL_GRAN); + } + + frame_width = image_width + h_blank; + + pix_clk = (image_width + h_blank) * hfreq; + pix_clk = pix_clk / GTF_PXL_CLK_GRAN * GTF_PXL_CLK_GRAN; + + hsync = (frame_width * 8 + 50) / 100; + hsync = DIV_ROUND_CLOSEST(hsync, GTF_CELL_GRAN) * GTF_CELL_GRAN; + + h_fp = h_blank / 2 - hsync; + + t.type = V4L2_DV_BT_656_1120; + t.bt.polarities = polarities; + t.bt.width = image_width; + t.bt.height = image_height; + t.bt.hfrontporch = h_fp; + t.bt.vfrontporch = v_fp; + t.bt.hsync = hsync; + t.bt.vsync = vsync; + t.bt.hbackporch = frame_width - image_width - h_fp - hsync; + + if (!interlaced) { + t.bt.vbackporch = frame_height - image_height - v_fp - vsync; + t.bt.interlaced = V4L2_DV_PROGRESSIVE; + } else { + t.bt.vbackporch = (frame_height - image_height - 2 * v_fp - + 2 * vsync) / 2; + t.bt.il_vbackporch = frame_height - image_height - 2 * v_fp - + 2 * vsync - t.bt.vbackporch; + t.bt.il_vfrontporch = v_fp; + t.bt.il_vsync = vsync; + t.bt.flags |= V4L2_DV_FL_HALF_LINE; + t.bt.interlaced = V4L2_DV_INTERLACED; + } + + t.bt.pixelclock = pix_clk; + t.bt.standards = V4L2_DV_BT_STD_GTF; + + if (!default_gtf) + t.bt.flags |= V4L2_DV_FL_REDUCED_BLANKING; + + if (!v4l2_valid_dv_timings(&t, cap, NULL, NULL)) + return false; + *timings = t; + return true; +} +EXPORT_SYMBOL_GPL(v4l2_detect_gtf); + +/** v4l2_calc_aspect_ratio - calculate the aspect ratio based on bytes + * 0x15 and 0x16 from the EDID. + * @hor_landscape - byte 0x15 from the EDID. + * @vert_portrait - byte 0x16 from the EDID. + * + * Determines the aspect ratio from the EDID. + * See VESA Enhanced EDID standard, release A, rev 2, section 3.6.2: + * "Horizontal and Vertical Screen Size or Aspect Ratio" + */ +struct v4l2_fract v4l2_calc_aspect_ratio(u8 hor_landscape, u8 vert_portrait) +{ + struct v4l2_fract aspect = { 16, 9 }; + u8 ratio; + + /* Nothing filled in, fallback to 16:9 */ + if (!hor_landscape && !vert_portrait) + return aspect; + /* Both filled in, so they are interpreted as the screen size in cm */ + if (hor_landscape && vert_portrait) { + aspect.numerator = hor_landscape; + aspect.denominator = vert_portrait; + return aspect; + } + /* Only one is filled in, so interpret them as a ratio: + (val + 99) / 100 */ + ratio = hor_landscape | vert_portrait; + /* Change some rounded values into the exact aspect ratio */ + if (ratio == 79) { + aspect.numerator = 16; + aspect.denominator = 9; + } else if (ratio == 34) { + aspect.numerator = 4; + aspect.denominator = 3; + } else if (ratio == 68) { + aspect.numerator = 15; + aspect.denominator = 9; + } else { + aspect.numerator = hor_landscape + 99; + aspect.denominator = 100; + } + if (hor_landscape) + return aspect; + /* The aspect ratio is for portrait, so swap numerator and denominator */ + swap(aspect.denominator, aspect.numerator); + return aspect; +} +EXPORT_SYMBOL_GPL(v4l2_calc_aspect_ratio); + +/** v4l2_hdmi_rx_colorimetry - determine HDMI colorimetry information + * based on various InfoFrames. + * @avi: the AVI InfoFrame + * @hdmi: the HDMI Vendor InfoFrame, may be NULL + * @height: the frame height + * + * Determines the HDMI colorimetry information, i.e. how the HDMI + * pixel color data should be interpreted. + * + * Note that some of the newer features (DCI-P3, HDR) are not yet + * implemented: the hdmi.h header needs to be updated to the HDMI 2.0 + * and CTA-861-G standards. + */ +struct v4l2_hdmi_colorimetry +v4l2_hdmi_rx_colorimetry(const struct hdmi_avi_infoframe *avi, + const struct hdmi_vendor_infoframe *hdmi, + unsigned int height) +{ + struct v4l2_hdmi_colorimetry c = { + V4L2_COLORSPACE_SRGB, + V4L2_YCBCR_ENC_DEFAULT, + V4L2_QUANTIZATION_FULL_RANGE, + V4L2_XFER_FUNC_SRGB + }; + bool is_ce = avi->video_code || (hdmi && hdmi->vic); + bool is_sdtv = height <= 576; + bool default_is_lim_range_rgb = avi->video_code > 1; + + switch (avi->colorspace) { + case HDMI_COLORSPACE_RGB: + /* RGB pixel encoding */ + switch (avi->colorimetry) { + case HDMI_COLORIMETRY_EXTENDED: + switch (avi->extended_colorimetry) { + case HDMI_EXTENDED_COLORIMETRY_OPRGB: + c.colorspace = V4L2_COLORSPACE_OPRGB; + c.xfer_func = V4L2_XFER_FUNC_OPRGB; + break; + case HDMI_EXTENDED_COLORIMETRY_BT2020: + c.colorspace = V4L2_COLORSPACE_BT2020; + c.xfer_func = V4L2_XFER_FUNC_709; + break; + default: + break; + } + break; + default: + break; + } + switch (avi->quantization_range) { + case HDMI_QUANTIZATION_RANGE_LIMITED: + c.quantization = V4L2_QUANTIZATION_LIM_RANGE; + break; + case HDMI_QUANTIZATION_RANGE_FULL: + break; + default: + if (default_is_lim_range_rgb) + c.quantization = V4L2_QUANTIZATION_LIM_RANGE; + break; + } + break; + + default: + /* YCbCr pixel encoding */ + c.quantization = V4L2_QUANTIZATION_LIM_RANGE; + switch (avi->colorimetry) { + case HDMI_COLORIMETRY_NONE: + if (!is_ce) + break; + if (is_sdtv) { + c.colorspace = V4L2_COLORSPACE_SMPTE170M; + c.ycbcr_enc = V4L2_YCBCR_ENC_601; + } else { + c.colorspace = V4L2_COLORSPACE_REC709; + c.ycbcr_enc = V4L2_YCBCR_ENC_709; + } + c.xfer_func = V4L2_XFER_FUNC_709; + break; + case HDMI_COLORIMETRY_ITU_601: + c.colorspace = V4L2_COLORSPACE_SMPTE170M; + c.ycbcr_enc = V4L2_YCBCR_ENC_601; + c.xfer_func = V4L2_XFER_FUNC_709; + break; + case HDMI_COLORIMETRY_ITU_709: + c.colorspace = V4L2_COLORSPACE_REC709; + c.ycbcr_enc = V4L2_YCBCR_ENC_709; + c.xfer_func = V4L2_XFER_FUNC_709; + break; + case HDMI_COLORIMETRY_EXTENDED: + switch (avi->extended_colorimetry) { + case HDMI_EXTENDED_COLORIMETRY_XV_YCC_601: + c.colorspace = V4L2_COLORSPACE_REC709; + c.ycbcr_enc = V4L2_YCBCR_ENC_XV709; + c.xfer_func = V4L2_XFER_FUNC_709; + break; + case HDMI_EXTENDED_COLORIMETRY_XV_YCC_709: + c.colorspace = V4L2_COLORSPACE_REC709; + c.ycbcr_enc = V4L2_YCBCR_ENC_XV601; + c.xfer_func = V4L2_XFER_FUNC_709; + break; + case HDMI_EXTENDED_COLORIMETRY_S_YCC_601: + c.colorspace = V4L2_COLORSPACE_SRGB; + c.ycbcr_enc = V4L2_YCBCR_ENC_601; + c.xfer_func = V4L2_XFER_FUNC_SRGB; + break; + case HDMI_EXTENDED_COLORIMETRY_OPYCC_601: + c.colorspace = V4L2_COLORSPACE_OPRGB; + c.ycbcr_enc = V4L2_YCBCR_ENC_601; + c.xfer_func = V4L2_XFER_FUNC_OPRGB; + break; + case HDMI_EXTENDED_COLORIMETRY_BT2020: + c.colorspace = V4L2_COLORSPACE_BT2020; + c.ycbcr_enc = V4L2_YCBCR_ENC_BT2020; + c.xfer_func = V4L2_XFER_FUNC_709; + break; + case HDMI_EXTENDED_COLORIMETRY_BT2020_CONST_LUM: + c.colorspace = V4L2_COLORSPACE_BT2020; + c.ycbcr_enc = V4L2_YCBCR_ENC_BT2020_CONST_LUM; + c.xfer_func = V4L2_XFER_FUNC_709; + break; + default: /* fall back to ITU_709 */ + c.colorspace = V4L2_COLORSPACE_REC709; + c.ycbcr_enc = V4L2_YCBCR_ENC_709; + c.xfer_func = V4L2_XFER_FUNC_709; + break; + } + break; + default: + break; + } + /* + * YCC Quantization Range signaling is more-or-less broken, + * let's just ignore this. + */ + break; + } + return c; +} +EXPORT_SYMBOL_GPL(v4l2_hdmi_rx_colorimetry); + +/** + * v4l2_num_edid_blocks() - return the number of EDID blocks + * + * @edid: pointer to the EDID data + * @max_blocks: maximum number of supported EDID blocks + * + * Return: the number of EDID blocks based on the contents of the EDID. + * This supports the HDMI Forum EDID Extension Override Data Block. + */ +unsigned int v4l2_num_edid_blocks(const u8 *edid, unsigned int max_blocks) +{ + unsigned int blocks; + + if (!edid || !max_blocks) + return 0; + + // The number of extension blocks is recorded at byte 126 of the + // first 128-byte block in the EDID. + // + // If there is an HDMI Forum EDID Extension Override Data Block + // present, then it is in bytes 4-6 of the first CTA-861 extension + // block of the EDID. + blocks = edid[126] + 1; + // Check for HDMI Forum EDID Extension Override Data Block + if (blocks >= 2 && // The EDID must be at least 2 blocks + max_blocks >= 3 && // The caller supports at least 3 blocks + edid[128] == 2 && // The first extension block is type CTA-861 + edid[133] == 0x78 && // Identifier for the EEODB + (edid[132] & 0xe0) == 0xe0 && // Tag Code == 7 + (edid[132] & 0x1f) >= 2 && // Length >= 2 + edid[134] > 1) // Number of extension blocks is sane + blocks = edid[134] + 1; + return blocks > max_blocks ? max_blocks : blocks; +} +EXPORT_SYMBOL_GPL(v4l2_num_edid_blocks); + +/** + * v4l2_get_edid_phys_addr() - find and return the physical address + * + * @edid: pointer to the EDID data + * @size: size in bytes of the EDID data + * @offset: If not %NULL then the location of the physical address + * bytes in the EDID will be returned here. This is set to 0 + * if there is no physical address found. + * + * Return: the physical address or CEC_PHYS_ADDR_INVALID if there is none. + */ +u16 v4l2_get_edid_phys_addr(const u8 *edid, unsigned int size, + unsigned int *offset) +{ + unsigned int loc = cec_get_edid_spa_location(edid, size); + + if (offset) + *offset = loc; + if (loc == 0) + return CEC_PHYS_ADDR_INVALID; + return (edid[loc] << 8) | edid[loc + 1]; +} +EXPORT_SYMBOL_GPL(v4l2_get_edid_phys_addr); + +/** + * v4l2_set_edid_phys_addr() - find and set the physical address + * + * @edid: pointer to the EDID data + * @size: size in bytes of the EDID data + * @phys_addr: the new physical address + * + * This function finds the location of the physical address in the EDID + * and fills in the given physical address and updates the checksum + * at the end of the EDID block. It does nothing if the EDID doesn't + * contain a physical address. + */ +void v4l2_set_edid_phys_addr(u8 *edid, unsigned int size, u16 phys_addr) +{ + unsigned int loc = cec_get_edid_spa_location(edid, size); + u8 sum = 0; + unsigned int i; + + if (loc == 0) + return; + edid[loc] = phys_addr >> 8; + edid[loc + 1] = phys_addr & 0xff; + loc &= ~0x7f; + + /* update the checksum */ + for (i = loc; i < loc + 127; i++) + sum += edid[i]; + edid[i] = 256 - sum; +} +EXPORT_SYMBOL_GPL(v4l2_set_edid_phys_addr); + +/** + * v4l2_phys_addr_for_input() - calculate the PA for an input + * + * @phys_addr: the physical address of the parent + * @input: the number of the input port, must be between 1 and 15 + * + * This function calculates a new physical address based on the input + * port number. For example: + * + * PA = 0.0.0.0 and input = 2 becomes 2.0.0.0 + * + * PA = 3.0.0.0 and input = 1 becomes 3.1.0.0 + * + * PA = 3.2.1.0 and input = 5 becomes 3.2.1.5 + * + * PA = 3.2.1.3 and input = 5 becomes f.f.f.f since it maxed out the depth. + * + * Return: the new physical address or CEC_PHYS_ADDR_INVALID. + */ +u16 v4l2_phys_addr_for_input(u16 phys_addr, u8 input) +{ + /* Check if input is sane */ + if (WARN_ON(input == 0 || input > 0xf)) + return CEC_PHYS_ADDR_INVALID; + + if (phys_addr == 0) + return input << 12; + + if ((phys_addr & 0x0fff) == 0) + return phys_addr | (input << 8); + + if ((phys_addr & 0x00ff) == 0) + return phys_addr | (input << 4); + + if ((phys_addr & 0x000f) == 0) + return phys_addr | input; + + /* + * All nibbles are used so no valid physical addresses can be assigned + * to the input. + */ + return CEC_PHYS_ADDR_INVALID; +} +EXPORT_SYMBOL_GPL(v4l2_phys_addr_for_input); + +/** + * v4l2_phys_addr_validate() - validate a physical address from an EDID + * + * @phys_addr: the physical address to validate + * @parent: if not %NULL, then this is filled with the parents PA. + * @port: if not %NULL, then this is filled with the input port. + * + * This validates a physical address as read from an EDID. If the + * PA is invalid (such as 1.0.1.0 since '0' is only allowed at the end), + * then it will return -EINVAL. + * + * The parent PA is passed into %parent and the input port is passed into + * %port. For example: + * + * PA = 0.0.0.0: has parent 0.0.0.0 and input port 0. + * + * PA = 1.0.0.0: has parent 0.0.0.0 and input port 1. + * + * PA = 3.2.0.0: has parent 3.0.0.0 and input port 2. + * + * PA = f.f.f.f: has parent f.f.f.f and input port 0. + * + * Return: 0 if the PA is valid, -EINVAL if not. + */ +int v4l2_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port) +{ + int i; + + if (parent) + *parent = phys_addr; + if (port) + *port = 0; + if (phys_addr == CEC_PHYS_ADDR_INVALID) + return 0; + for (i = 0; i < 16; i += 4) + if (phys_addr & (0xf << i)) + break; + if (i == 16) + return 0; + if (parent) + *parent = phys_addr & (0xfff0 << i); + if (port) + *port = (phys_addr >> i) & 0xf; + for (i += 4; i < 16; i += 4) + if ((phys_addr & (0xf << i)) == 0) + return -EINVAL; + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_phys_addr_validate); + +#ifdef CONFIG_DEBUG_FS + +#define DEBUGFS_FOPS(type, flag) \ +static ssize_t \ +infoframe_read_##type(struct file *filp, \ + char __user *ubuf, size_t count, loff_t *ppos) \ +{ \ + struct v4l2_debugfs_if *infoframes = filp->private_data; \ + \ + return infoframes->if_read((flag), infoframes->priv, filp, \ + ubuf, count, ppos); \ +} \ + \ +static const struct file_operations infoframe_##type##_fops = { \ + .owner = THIS_MODULE, \ + .open = simple_open, \ + .read = infoframe_read_##type, \ +} + +DEBUGFS_FOPS(avi, V4L2_DEBUGFS_IF_AVI); +DEBUGFS_FOPS(audio, V4L2_DEBUGFS_IF_AUDIO); +DEBUGFS_FOPS(spd, V4L2_DEBUGFS_IF_SPD); +DEBUGFS_FOPS(hdmi, V4L2_DEBUGFS_IF_HDMI); +DEBUGFS_FOPS(drm, V4L2_DEBUGFS_IF_DRM); + +struct v4l2_debugfs_if *v4l2_debugfs_if_alloc(struct dentry *root, u32 if_types, + void *priv, + v4l2_debugfs_if_read_t if_read) +{ + struct v4l2_debugfs_if *infoframes; + + if (IS_ERR_OR_NULL(root) || !if_types || !if_read) + return NULL; + + infoframes = kzalloc(sizeof(*infoframes), GFP_KERNEL); + if (!infoframes) + return NULL; + + infoframes->if_dir = debugfs_create_dir("infoframes", root); + infoframes->priv = priv; + infoframes->if_read = if_read; + if (if_types & V4L2_DEBUGFS_IF_AVI) + debugfs_create_file("avi", 0400, infoframes->if_dir, + infoframes, &infoframe_avi_fops); + if (if_types & V4L2_DEBUGFS_IF_AUDIO) + debugfs_create_file("audio", 0400, infoframes->if_dir, + infoframes, &infoframe_audio_fops); + if (if_types & V4L2_DEBUGFS_IF_SPD) + debugfs_create_file("spd", 0400, infoframes->if_dir, + infoframes, &infoframe_spd_fops); + if (if_types & V4L2_DEBUGFS_IF_HDMI) + debugfs_create_file("hdmi", 0400, infoframes->if_dir, + infoframes, &infoframe_hdmi_fops); + if (if_types & V4L2_DEBUGFS_IF_DRM) + debugfs_create_file("hdr_drm", 0400, infoframes->if_dir, + infoframes, &infoframe_drm_fops); + return infoframes; +} +EXPORT_SYMBOL_GPL(v4l2_debugfs_if_alloc); + +void v4l2_debugfs_if_free(struct v4l2_debugfs_if *infoframes) +{ + if (infoframes) { + debugfs_remove_recursive(infoframes->if_dir); + kfree(infoframes); + } +} +EXPORT_SYMBOL_GPL(v4l2_debugfs_if_free); + +#endif diff --git a/6.18.0/drivers/media/v4l2-core/v4l2-event.c b/6.18.0/drivers/media/v4l2-core/v4l2-event.c new file mode 100644 index 0000000..3898ff7 --- /dev/null +++ b/6.18.0/drivers/media/v4l2-core/v4l2-event.c @@ -0,0 +1,373 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * v4l2-event.c + * + * V4L2 events. + * + * Copyright (C) 2009--2010 Nokia Corporation. + * + * Contact: Sakari Ailus + */ + +#include +#include +#include + +#include +#include +#include +#include + +static unsigned int sev_pos(const struct v4l2_subscribed_event *sev, unsigned int idx) +{ + idx += sev->first; + return idx >= sev->elems ? idx - sev->elems : idx; +} + +static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event) +{ + struct v4l2_kevent *kev; + struct timespec64 ts; + unsigned long flags; + + spin_lock_irqsave(&fh->vdev->fh_lock, flags); + + if (list_empty(&fh->available)) { + spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); + return -ENOENT; + } + + WARN_ON(fh->navailable == 0); + + kev = list_first_entry(&fh->available, struct v4l2_kevent, list); + list_del(&kev->list); + fh->navailable--; + + kev->event.pending = fh->navailable; + *event = kev->event; + ts = ns_to_timespec64(kev->ts); + event->timestamp.tv_sec = ts.tv_sec; + event->timestamp.tv_nsec = ts.tv_nsec; + kev->sev->first = sev_pos(kev->sev, 1); + kev->sev->in_use--; + + spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); + + return 0; +} + +int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event, + int nonblocking) +{ + int ret; + + if (nonblocking) + return __v4l2_event_dequeue(fh, event); + + /* Release the vdev lock while waiting */ + if (fh->vdev->lock) + mutex_unlock(fh->vdev->lock); + + do { + ret = wait_event_interruptible(fh->wait, + fh->navailable != 0); + if (ret < 0) + break; + + ret = __v4l2_event_dequeue(fh, event); + } while (ret == -ENOENT); + + if (fh->vdev->lock) + mutex_lock(fh->vdev->lock); + + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_event_dequeue); + +/* Caller must hold fh->vdev->fh_lock! */ +static struct v4l2_subscribed_event *v4l2_event_subscribed( + struct v4l2_fh *fh, u32 type, u32 id) +{ + struct v4l2_subscribed_event *sev; + + assert_spin_locked(&fh->vdev->fh_lock); + + list_for_each_entry(sev, &fh->subscribed, list) + if (sev->type == type && sev->id == id) + return sev; + + return NULL; +} + +static void __v4l2_event_queue_fh(struct v4l2_fh *fh, + const struct v4l2_event *ev, u64 ts) +{ + struct v4l2_subscribed_event *sev; + struct v4l2_kevent *kev; + bool copy_payload = true; + + /* Are we subscribed? */ + sev = v4l2_event_subscribed(fh, ev->type, ev->id); + if (sev == NULL) + return; + + /* Increase event sequence number on fh. */ + fh->sequence++; + + /* Do we have any free events? */ + if (sev->in_use == sev->elems) { + /* no, remove the oldest one */ + kev = sev->events + sev_pos(sev, 0); + list_del(&kev->list); + sev->in_use--; + sev->first = sev_pos(sev, 1); + fh->navailable--; + if (sev->elems == 1) { + if (sev->ops && sev->ops->replace) { + sev->ops->replace(&kev->event, ev); + copy_payload = false; + } + } else if (sev->ops && sev->ops->merge) { + struct v4l2_kevent *second_oldest = + sev->events + sev_pos(sev, 0); + sev->ops->merge(&kev->event, &second_oldest->event); + } + } + + /* Take one and fill it. */ + kev = sev->events + sev_pos(sev, sev->in_use); + kev->event.type = ev->type; + if (copy_payload) + kev->event.u = ev->u; + kev->event.id = ev->id; + kev->ts = ts; + kev->event.sequence = fh->sequence; + sev->in_use++; + list_add_tail(&kev->list, &fh->available); + + fh->navailable++; + + wake_up_all(&fh->wait); +} + +void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev) +{ + struct v4l2_fh *fh; + unsigned long flags; + u64 ts; + + if (vdev == NULL) + return; + + ts = ktime_get_ns(); + + spin_lock_irqsave(&vdev->fh_lock, flags); + + list_for_each_entry(fh, &vdev->fh_list, list) + __v4l2_event_queue_fh(fh, ev, ts); + + spin_unlock_irqrestore(&vdev->fh_lock, flags); +} +EXPORT_SYMBOL_GPL(v4l2_event_queue); + +void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev) +{ + unsigned long flags; + u64 ts = ktime_get_ns(); + + spin_lock_irqsave(&fh->vdev->fh_lock, flags); + __v4l2_event_queue_fh(fh, ev, ts); + spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); +} +EXPORT_SYMBOL_GPL(v4l2_event_queue_fh); + +int v4l2_event_pending(struct v4l2_fh *fh) +{ + return fh->navailable; +} +EXPORT_SYMBOL_GPL(v4l2_event_pending); + +void v4l2_event_wake_all(struct video_device *vdev) +{ + struct v4l2_fh *fh; + unsigned long flags; + + if (!vdev) + return; + + spin_lock_irqsave(&vdev->fh_lock, flags); + + list_for_each_entry(fh, &vdev->fh_list, list) + wake_up_all(&fh->wait); + + spin_unlock_irqrestore(&vdev->fh_lock, flags); +} +EXPORT_SYMBOL_GPL(v4l2_event_wake_all); + +static void __v4l2_event_unsubscribe(struct v4l2_subscribed_event *sev) +{ + struct v4l2_fh *fh = sev->fh; + unsigned int i; + + lockdep_assert_held(&fh->subscribe_lock); + assert_spin_locked(&fh->vdev->fh_lock); + + /* Remove any pending events for this subscription */ + for (i = 0; i < sev->in_use; i++) { + list_del(&sev->events[sev_pos(sev, i)].list); + fh->navailable--; + } + list_del(&sev->list); +} + +int v4l2_event_subscribe(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub, unsigned int elems, + const struct v4l2_subscribed_event_ops *ops) +{ + struct v4l2_subscribed_event *sev, *found_ev; + unsigned long flags; + unsigned int i; + int ret = 0; + + if (sub->type == V4L2_EVENT_ALL) + return -EINVAL; + + if (elems < 1) + elems = 1; + + sev = kvzalloc(struct_size(sev, events, elems), GFP_KERNEL); + if (!sev) + return -ENOMEM; + sev->elems = elems; + for (i = 0; i < elems; i++) + sev->events[i].sev = sev; + sev->type = sub->type; + sev->id = sub->id; + sev->flags = sub->flags; + sev->fh = fh; + sev->ops = ops; + + mutex_lock(&fh->subscribe_lock); + + spin_lock_irqsave(&fh->vdev->fh_lock, flags); + found_ev = v4l2_event_subscribed(fh, sub->type, sub->id); + if (!found_ev) + list_add(&sev->list, &fh->subscribed); + spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); + + if (found_ev) { + /* Already listening */ + kvfree(sev); + } else if (sev->ops && sev->ops->add) { + ret = sev->ops->add(sev, elems); + if (ret) { + spin_lock_irqsave(&fh->vdev->fh_lock, flags); + __v4l2_event_unsubscribe(sev); + spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); + kvfree(sev); + } + } + + mutex_unlock(&fh->subscribe_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_event_subscribe); + +void v4l2_event_unsubscribe_all(struct v4l2_fh *fh) +{ + struct v4l2_event_subscription sub; + struct v4l2_subscribed_event *sev; + unsigned long flags; + + do { + sev = NULL; + + spin_lock_irqsave(&fh->vdev->fh_lock, flags); + if (!list_empty(&fh->subscribed)) { + sev = list_first_entry(&fh->subscribed, + struct v4l2_subscribed_event, list); + sub.type = sev->type; + sub.id = sev->id; + } + spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); + if (sev) + v4l2_event_unsubscribe(fh, &sub); + } while (sev); +} +EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe_all); + +int v4l2_event_unsubscribe(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub) +{ + struct v4l2_subscribed_event *sev; + unsigned long flags; + + if (sub->type == V4L2_EVENT_ALL) { + v4l2_event_unsubscribe_all(fh); + return 0; + } + + mutex_lock(&fh->subscribe_lock); + + spin_lock_irqsave(&fh->vdev->fh_lock, flags); + + sev = v4l2_event_subscribed(fh, sub->type, sub->id); + if (sev != NULL) + __v4l2_event_unsubscribe(sev); + + spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); + + if (sev && sev->ops && sev->ops->del) + sev->ops->del(sev); + + mutex_unlock(&fh->subscribe_lock); + + kvfree(sev); + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe); + +int v4l2_event_subdev_unsubscribe(struct v4l2_subdev *sd, struct v4l2_fh *fh, + struct v4l2_event_subscription *sub) +{ + return v4l2_event_unsubscribe(fh, sub); +} +EXPORT_SYMBOL_GPL(v4l2_event_subdev_unsubscribe); + +static void v4l2_event_src_replace(struct v4l2_event *old, + const struct v4l2_event *new) +{ + u32 old_changes = old->u.src_change.changes; + + old->u.src_change = new->u.src_change; + old->u.src_change.changes |= old_changes; +} + +static void v4l2_event_src_merge(const struct v4l2_event *old, + struct v4l2_event *new) +{ + new->u.src_change.changes |= old->u.src_change.changes; +} + +static const struct v4l2_subscribed_event_ops v4l2_event_src_ch_ops = { + .replace = v4l2_event_src_replace, + .merge = v4l2_event_src_merge, +}; + +int v4l2_src_change_event_subscribe(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub) +{ + if (sub->type == V4L2_EVENT_SOURCE_CHANGE) + return v4l2_event_subscribe(fh, sub, 0, &v4l2_event_src_ch_ops); + return -EINVAL; +} +EXPORT_SYMBOL_GPL(v4l2_src_change_event_subscribe); + +int v4l2_src_change_event_subdev_subscribe(struct v4l2_subdev *sd, + struct v4l2_fh *fh, struct v4l2_event_subscription *sub) +{ + return v4l2_src_change_event_subscribe(fh, sub); +} +EXPORT_SYMBOL_GPL(v4l2_src_change_event_subdev_subscribe); diff --git a/6.18.0/drivers/media/v4l2-core/v4l2-fh.c b/6.18.0/drivers/media/v4l2-core/v4l2-fh.c new file mode 100644 index 0000000..df3ba9d --- /dev/null +++ b/6.18.0/drivers/media/v4l2-core/v4l2-fh.c @@ -0,0 +1,119 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * v4l2-fh.c + * + * V4L2 file handles. + * + * Copyright (C) 2009--2010 Nokia Corporation. + * + * Contact: Sakari Ailus + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +void v4l2_fh_init(struct v4l2_fh *fh, struct video_device *vdev) +{ + fh->vdev = vdev; + /* Inherit from video_device. May be overridden by the driver. */ + fh->ctrl_handler = vdev->ctrl_handler; + INIT_LIST_HEAD(&fh->list); + set_bit(V4L2_FL_USES_V4L2_FH, &fh->vdev->flags); + /* + * determine_valid_ioctls() does not know if struct v4l2_fh + * is used by this driver, but here we do. So enable the + * prio ioctls here. + */ + set_bit(_IOC_NR(VIDIOC_G_PRIORITY), vdev->valid_ioctls); + set_bit(_IOC_NR(VIDIOC_S_PRIORITY), vdev->valid_ioctls); + fh->prio = V4L2_PRIORITY_UNSET; + init_waitqueue_head(&fh->wait); + INIT_LIST_HEAD(&fh->available); + INIT_LIST_HEAD(&fh->subscribed); + fh->sequence = -1; + mutex_init(&fh->subscribe_lock); +} +EXPORT_SYMBOL_GPL(v4l2_fh_init); + +void v4l2_fh_add(struct v4l2_fh *fh, struct file *filp) +{ + unsigned long flags; + + filp->private_data = fh; + + v4l2_prio_open(fh->vdev->prio, &fh->prio); + spin_lock_irqsave(&fh->vdev->fh_lock, flags); + list_add(&fh->list, &fh->vdev->fh_list); + spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); +} +EXPORT_SYMBOL_GPL(v4l2_fh_add); + +int v4l2_fh_open(struct file *filp) +{ + struct video_device *vdev = video_devdata(filp); + struct v4l2_fh *fh = kzalloc(sizeof(*fh), GFP_KERNEL); + + if (fh == NULL) + return -ENOMEM; + v4l2_fh_init(fh, vdev); + v4l2_fh_add(fh, filp); + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_fh_open); + +void v4l2_fh_del(struct v4l2_fh *fh, struct file *filp) +{ + unsigned long flags; + + spin_lock_irqsave(&fh->vdev->fh_lock, flags); + list_del_init(&fh->list); + spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); + v4l2_prio_close(fh->vdev->prio, fh->prio); + + filp->private_data = NULL; +} +EXPORT_SYMBOL_GPL(v4l2_fh_del); + +void v4l2_fh_exit(struct v4l2_fh *fh) +{ + if (fh->vdev == NULL) + return; + v4l_disable_media_source(fh->vdev); + v4l2_event_unsubscribe_all(fh); + mutex_destroy(&fh->subscribe_lock); + fh->vdev = NULL; +} +EXPORT_SYMBOL_GPL(v4l2_fh_exit); + +int v4l2_fh_release(struct file *filp) +{ + struct v4l2_fh *fh = file_to_v4l2_fh(filp); + + if (fh) { + v4l2_fh_del(fh, filp); + v4l2_fh_exit(fh); + kfree(fh); + } + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_fh_release); + +int v4l2_fh_is_singular(struct v4l2_fh *fh) +{ + unsigned long flags; + int is_singular; + + if (fh == NULL || fh->vdev == NULL) + return 0; + spin_lock_irqsave(&fh->vdev->fh_lock, flags); + is_singular = list_is_singular(&fh->list); + spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); + return is_singular; +} +EXPORT_SYMBOL_GPL(v4l2_fh_is_singular); diff --git a/6.18.0/drivers/media/v4l2-core/v4l2-flash-led-class.c b/6.18.0/drivers/media/v4l2-core/v4l2-flash-led-class.c new file mode 100644 index 0000000..355595a --- /dev/null +++ b/6.18.0/drivers/media/v4l2-core/v4l2-flash-led-class.c @@ -0,0 +1,746 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * V4L2 flash LED sub-device registration helpers. + * + * Copyright (C) 2015 Samsung Electronics Co., Ltd + * Author: Jacek Anaszewski + */ + +#include +#include +#include +#include +#include +#include +#include + +#define has_flash_op(v4l2_flash, op) \ + (v4l2_flash && v4l2_flash->ops && v4l2_flash->ops->op) + +#define call_flash_op(v4l2_flash, op, arg) \ + (has_flash_op(v4l2_flash, op) ? \ + v4l2_flash->ops->op(v4l2_flash, arg) : \ + -EINVAL) + +enum ctrl_init_data_id { + LED_MODE, + TORCH_INTENSITY, + FLASH_INTENSITY, + INDICATOR_INTENSITY, + FLASH_TIMEOUT, + STROBE_SOURCE, + /* + * Only above values are applicable to + * the 'ctrls' array in the struct v4l2_flash. + */ + FLASH_STROBE, + STROBE_STOP, + STROBE_STATUS, + FLASH_FAULT, + NUM_FLASH_CTRLS, +}; + +static enum led_brightness __intensity_to_led_brightness( + struct v4l2_ctrl *ctrl, s32 intensity) +{ + intensity -= ctrl->minimum; + intensity /= (u32) ctrl->step; + + /* + * Indicator LEDs, unlike torch LEDs, are turned on/off basing on + * the state of V4L2_CID_FLASH_INDICATOR_INTENSITY control only. + * Therefore it must be possible to set it to 0 level which in + * the LED subsystem reflects LED_OFF state. + */ + if (ctrl->minimum) + ++intensity; + + return intensity; +} + +static s32 __led_brightness_to_intensity(struct v4l2_ctrl *ctrl, + enum led_brightness brightness) +{ + /* + * Indicator LEDs, unlike torch LEDs, are turned on/off basing on + * the state of V4L2_CID_FLASH_INDICATOR_INTENSITY control only. + * Do not decrement brightness read from the LED subsystem for + * indicator LED as it may equal 0. For torch LEDs this function + * is called only when V4L2_FLASH_LED_MODE_TORCH is set and the + * brightness read is guaranteed to be greater than 0. In the mode + * V4L2_FLASH_LED_MODE_NONE the cached torch intensity value is used. + */ + if (ctrl->id != V4L2_CID_FLASH_INDICATOR_INTENSITY) + --brightness; + + return (brightness * ctrl->step) + ctrl->minimum; +} + +static int v4l2_flash_set_led_brightness(struct v4l2_flash *v4l2_flash, + struct v4l2_ctrl *ctrl) +{ + struct v4l2_ctrl **ctrls = v4l2_flash->ctrls; + struct led_classdev *led_cdev; + enum led_brightness brightness; + + if (has_flash_op(v4l2_flash, intensity_to_led_brightness)) + brightness = call_flash_op(v4l2_flash, + intensity_to_led_brightness, + ctrl->val); + else + brightness = __intensity_to_led_brightness(ctrl, ctrl->val); + /* + * In case a LED Flash class driver provides ops for custom + * brightness <-> intensity conversion, it also must have defined + * related v4l2 control step == 1. In such a case a backward conversion + * from led brightness to v4l2 intensity is required to find out the + * aligned intensity value. + */ + if (has_flash_op(v4l2_flash, led_brightness_to_intensity)) + ctrl->val = call_flash_op(v4l2_flash, + led_brightness_to_intensity, + brightness); + + if (ctrl == ctrls[TORCH_INTENSITY]) { + if (ctrls[LED_MODE]->val != V4L2_FLASH_LED_MODE_TORCH) + return 0; + + if (WARN_ON_ONCE(!v4l2_flash->fled_cdev)) + return -EINVAL; + + led_cdev = &v4l2_flash->fled_cdev->led_cdev; + } else { + if (WARN_ON_ONCE(!v4l2_flash->iled_cdev)) + return -EINVAL; + + led_cdev = v4l2_flash->iled_cdev; + } + + return led_set_brightness_sync(led_cdev, brightness); +} + +static int v4l2_flash_update_led_brightness(struct v4l2_flash *v4l2_flash, + struct v4l2_ctrl *ctrl) +{ + struct v4l2_ctrl **ctrls = v4l2_flash->ctrls; + struct led_classdev *led_cdev; + int ret; + + if (ctrl == ctrls[TORCH_INTENSITY]) { + /* + * Update torch brightness only if in TORCH_MODE. In other modes + * torch led is turned off, which would spuriously inform the + * user space that V4L2_CID_FLASH_TORCH_INTENSITY control value + * has changed to 0. + */ + if (ctrls[LED_MODE]->val != V4L2_FLASH_LED_MODE_TORCH) + return 0; + + if (WARN_ON_ONCE(!v4l2_flash->fled_cdev)) + return -EINVAL; + + led_cdev = &v4l2_flash->fled_cdev->led_cdev; + } else { + if (WARN_ON_ONCE(!v4l2_flash->iled_cdev)) + return -EINVAL; + + led_cdev = v4l2_flash->iled_cdev; + } + + ret = led_update_brightness(led_cdev); + if (ret < 0) + return ret; + + if (has_flash_op(v4l2_flash, led_brightness_to_intensity)) + ctrl->val = call_flash_op(v4l2_flash, + led_brightness_to_intensity, + led_cdev->brightness); + else + ctrl->val = __led_brightness_to_intensity(ctrl, + led_cdev->brightness); + + return 0; +} + +static int v4l2_flash_g_volatile_ctrl(struct v4l2_ctrl *c) +{ + struct v4l2_flash *v4l2_flash = v4l2_ctrl_to_v4l2_flash(c); + struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev; + bool is_strobing; + int ret; + + switch (c->id) { + case V4L2_CID_FLASH_TORCH_INTENSITY: + case V4L2_CID_FLASH_INDICATOR_INTENSITY: + return v4l2_flash_update_led_brightness(v4l2_flash, c); + } + + if (!fled_cdev) + return -EINVAL; + + switch (c->id) { + case V4L2_CID_FLASH_INTENSITY: + ret = led_update_flash_brightness(fled_cdev); + if (ret < 0) + return ret; + /* + * No conversion is needed as LED Flash class also uses + * microamperes for flash intensity units. + */ + c->val = fled_cdev->brightness.val; + return 0; + case V4L2_CID_FLASH_STROBE_STATUS: + ret = led_get_flash_strobe(fled_cdev, &is_strobing); + if (ret < 0) + return ret; + c->val = is_strobing; + return 0; + case V4L2_CID_FLASH_FAULT: + /* LED faults map directly to V4L2 flash faults */ + return led_get_flash_fault(fled_cdev, &c->val); + default: + return -EINVAL; + } +} + +static bool __software_strobe_mode_inactive(struct v4l2_ctrl **ctrls) +{ + return ((ctrls[LED_MODE]->val != V4L2_FLASH_LED_MODE_FLASH) || + (ctrls[STROBE_SOURCE] && (ctrls[STROBE_SOURCE]->val != + V4L2_FLASH_STROBE_SOURCE_SOFTWARE))); +} + +static int v4l2_flash_s_ctrl(struct v4l2_ctrl *c) +{ + struct v4l2_flash *v4l2_flash = v4l2_ctrl_to_v4l2_flash(c); + struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev; + struct led_classdev *led_cdev; + struct v4l2_ctrl **ctrls = v4l2_flash->ctrls; + bool external_strobe; + int ret = 0; + + switch (c->id) { + case V4L2_CID_FLASH_TORCH_INTENSITY: + case V4L2_CID_FLASH_INDICATOR_INTENSITY: + return v4l2_flash_set_led_brightness(v4l2_flash, c); + } + + if (!fled_cdev) + return -EINVAL; + + led_cdev = &fled_cdev->led_cdev; + + switch (c->id) { + case V4L2_CID_FLASH_LED_MODE: + switch (c->val) { + case V4L2_FLASH_LED_MODE_NONE: + led_set_brightness_sync(led_cdev, LED_OFF); + return led_set_flash_strobe(fled_cdev, false); + case V4L2_FLASH_LED_MODE_FLASH: + /* Turn the torch LED off */ + led_set_brightness_sync(led_cdev, LED_OFF); + if (ctrls[STROBE_SOURCE]) { + external_strobe = (ctrls[STROBE_SOURCE]->val == + V4L2_FLASH_STROBE_SOURCE_EXTERNAL); + + ret = call_flash_op(v4l2_flash, + external_strobe_set, + external_strobe); + } + return ret; + case V4L2_FLASH_LED_MODE_TORCH: + if (ctrls[STROBE_SOURCE]) { + ret = call_flash_op(v4l2_flash, + external_strobe_set, + false); + if (ret < 0) + return ret; + } + /* Stop flash strobing */ + ret = led_set_flash_strobe(fled_cdev, false); + if (ret < 0) + return ret; + + return v4l2_flash_set_led_brightness(v4l2_flash, + ctrls[TORCH_INTENSITY]); + } + break; + case V4L2_CID_FLASH_STROBE_SOURCE: + external_strobe = (c->val == V4L2_FLASH_STROBE_SOURCE_EXTERNAL); + /* + * For some hardware arrangements setting strobe source may + * affect torch mode. Therefore, if not in the flash mode, + * cache only this setting. It will be applied upon switching + * to flash mode. + */ + if (ctrls[LED_MODE]->val != V4L2_FLASH_LED_MODE_FLASH) + return 0; + + return call_flash_op(v4l2_flash, external_strobe_set, + external_strobe); + case V4L2_CID_FLASH_STROBE: + if (__software_strobe_mode_inactive(ctrls)) + return -EBUSY; + return led_set_flash_strobe(fled_cdev, true); + case V4L2_CID_FLASH_STROBE_STOP: + if (__software_strobe_mode_inactive(ctrls)) + return -EBUSY; + return led_set_flash_strobe(fled_cdev, false); + case V4L2_CID_FLASH_TIMEOUT: + /* + * No conversion is needed as LED Flash class also uses + * microseconds for flash timeout units. + */ + return led_set_flash_timeout(fled_cdev, c->val); + case V4L2_CID_FLASH_INTENSITY: + /* + * No conversion is needed as LED Flash class also uses + * microamperes for flash intensity units. + */ + return led_set_flash_brightness(fled_cdev, c->val); + } + + return -EINVAL; +} + +static const struct v4l2_ctrl_ops v4l2_flash_ctrl_ops = { + .g_volatile_ctrl = v4l2_flash_g_volatile_ctrl, + .s_ctrl = v4l2_flash_s_ctrl, +}; + +static void __lfs_to_v4l2_ctrl_config(struct led_flash_setting *s, + struct v4l2_ctrl_config *c) +{ + c->min = s->min; + c->max = s->max; + c->step = s->step; + c->def = s->val; +} + +static void __fill_ctrl_init_data(struct v4l2_flash *v4l2_flash, + struct v4l2_flash_config *flash_cfg, + struct v4l2_flash_ctrl_data *ctrl_init_data) +{ + struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev; + struct led_classdev *led_cdev = fled_cdev ? &fled_cdev->led_cdev : NULL; + struct v4l2_ctrl_config *ctrl_cfg; + u32 mask; + + /* Init INDICATOR_INTENSITY ctrl data */ + if (v4l2_flash->iled_cdev) { + ctrl_init_data[INDICATOR_INTENSITY].cid = + V4L2_CID_FLASH_INDICATOR_INTENSITY; + ctrl_cfg = &ctrl_init_data[INDICATOR_INTENSITY].config; + __lfs_to_v4l2_ctrl_config(&flash_cfg->intensity, + ctrl_cfg); + ctrl_cfg->id = V4L2_CID_FLASH_INDICATOR_INTENSITY; + ctrl_cfg->min = 0; + ctrl_cfg->flags = V4L2_CTRL_FLAG_VOLATILE | + V4L2_CTRL_FLAG_EXECUTE_ON_WRITE; + } + + if (!led_cdev || WARN_ON(!(led_cdev->flags & LED_DEV_CAP_FLASH))) + return; + + /* Init FLASH_FAULT ctrl data */ + if (flash_cfg->flash_faults) { + ctrl_init_data[FLASH_FAULT].cid = V4L2_CID_FLASH_FAULT; + ctrl_cfg = &ctrl_init_data[FLASH_FAULT].config; + ctrl_cfg->id = V4L2_CID_FLASH_FAULT; + ctrl_cfg->max = flash_cfg->flash_faults; + ctrl_cfg->flags = V4L2_CTRL_FLAG_VOLATILE | + V4L2_CTRL_FLAG_READ_ONLY; + } + + /* Init FLASH_LED_MODE ctrl data */ + mask = 1 << V4L2_FLASH_LED_MODE_NONE | + 1 << V4L2_FLASH_LED_MODE_TORCH; + if (led_cdev->flags & LED_DEV_CAP_FLASH) + mask |= 1 << V4L2_FLASH_LED_MODE_FLASH; + + ctrl_init_data[LED_MODE].cid = V4L2_CID_FLASH_LED_MODE; + ctrl_cfg = &ctrl_init_data[LED_MODE].config; + ctrl_cfg->id = V4L2_CID_FLASH_LED_MODE; + ctrl_cfg->max = V4L2_FLASH_LED_MODE_TORCH; + ctrl_cfg->menu_skip_mask = ~mask; + ctrl_cfg->def = V4L2_FLASH_LED_MODE_NONE; + ctrl_cfg->flags = 0; + + /* Init TORCH_INTENSITY ctrl data */ + ctrl_init_data[TORCH_INTENSITY].cid = V4L2_CID_FLASH_TORCH_INTENSITY; + ctrl_cfg = &ctrl_init_data[TORCH_INTENSITY].config; + __lfs_to_v4l2_ctrl_config(&flash_cfg->intensity, ctrl_cfg); + ctrl_cfg->id = V4L2_CID_FLASH_TORCH_INTENSITY; + ctrl_cfg->flags = V4L2_CTRL_FLAG_VOLATILE | + V4L2_CTRL_FLAG_EXECUTE_ON_WRITE; + + /* Init FLASH_STROBE ctrl data */ + ctrl_init_data[FLASH_STROBE].cid = V4L2_CID_FLASH_STROBE; + ctrl_cfg = &ctrl_init_data[FLASH_STROBE].config; + ctrl_cfg->id = V4L2_CID_FLASH_STROBE; + + /* Init STROBE_STOP ctrl data */ + ctrl_init_data[STROBE_STOP].cid = V4L2_CID_FLASH_STROBE_STOP; + ctrl_cfg = &ctrl_init_data[STROBE_STOP].config; + ctrl_cfg->id = V4L2_CID_FLASH_STROBE_STOP; + + /* Init FLASH_STROBE_SOURCE ctrl data */ + if (flash_cfg->has_external_strobe) { + mask = (1 << V4L2_FLASH_STROBE_SOURCE_SOFTWARE) | + (1 << V4L2_FLASH_STROBE_SOURCE_EXTERNAL); + ctrl_init_data[STROBE_SOURCE].cid = + V4L2_CID_FLASH_STROBE_SOURCE; + ctrl_cfg = &ctrl_init_data[STROBE_SOURCE].config; + ctrl_cfg->id = V4L2_CID_FLASH_STROBE_SOURCE; + ctrl_cfg->max = V4L2_FLASH_STROBE_SOURCE_EXTERNAL; + ctrl_cfg->menu_skip_mask = ~mask; + ctrl_cfg->def = V4L2_FLASH_STROBE_SOURCE_SOFTWARE; + } + + /* Init STROBE_STATUS ctrl data */ + if (has_flash_op(fled_cdev, strobe_get)) { + ctrl_init_data[STROBE_STATUS].cid = + V4L2_CID_FLASH_STROBE_STATUS; + ctrl_cfg = &ctrl_init_data[STROBE_STATUS].config; + ctrl_cfg->id = V4L2_CID_FLASH_STROBE_STATUS; + ctrl_cfg->flags = V4L2_CTRL_FLAG_VOLATILE | + V4L2_CTRL_FLAG_READ_ONLY; + } + + /* Init FLASH_TIMEOUT ctrl data */ + if (has_flash_op(fled_cdev, timeout_set)) { + ctrl_init_data[FLASH_TIMEOUT].cid = V4L2_CID_FLASH_TIMEOUT; + ctrl_cfg = &ctrl_init_data[FLASH_TIMEOUT].config; + __lfs_to_v4l2_ctrl_config(&fled_cdev->timeout, ctrl_cfg); + ctrl_cfg->id = V4L2_CID_FLASH_TIMEOUT; + } + + /* Init FLASH_INTENSITY ctrl data */ + if (has_flash_op(fled_cdev, flash_brightness_set)) { + ctrl_init_data[FLASH_INTENSITY].cid = V4L2_CID_FLASH_INTENSITY; + ctrl_cfg = &ctrl_init_data[FLASH_INTENSITY].config; + __lfs_to_v4l2_ctrl_config(&fled_cdev->brightness, ctrl_cfg); + ctrl_cfg->id = V4L2_CID_FLASH_INTENSITY; + ctrl_cfg->flags = V4L2_CTRL_FLAG_VOLATILE | + V4L2_CTRL_FLAG_EXECUTE_ON_WRITE; + } +} + +static int v4l2_flash_init_controls(struct v4l2_flash *v4l2_flash, + struct v4l2_flash_config *flash_cfg) + +{ + struct v4l2_flash_ctrl_data *ctrl_init_data; + struct v4l2_ctrl *ctrl; + struct v4l2_ctrl_config *ctrl_cfg; + int i, ret, num_ctrls = 0; + + v4l2_flash->ctrls = devm_kcalloc(v4l2_flash->sd.dev, + STROBE_SOURCE + 1, + sizeof(*v4l2_flash->ctrls), + GFP_KERNEL); + if (!v4l2_flash->ctrls) + return -ENOMEM; + + /* allocate memory dynamically so as not to exceed stack frame size */ + ctrl_init_data = kcalloc(NUM_FLASH_CTRLS, sizeof(*ctrl_init_data), + GFP_KERNEL); + if (!ctrl_init_data) + return -ENOMEM; + + __fill_ctrl_init_data(v4l2_flash, flash_cfg, ctrl_init_data); + + for (i = 0; i < NUM_FLASH_CTRLS; ++i) + if (ctrl_init_data[i].cid) + ++num_ctrls; + + v4l2_ctrl_handler_init(&v4l2_flash->hdl, num_ctrls); + + for (i = 0; i < NUM_FLASH_CTRLS; ++i) { + ctrl_cfg = &ctrl_init_data[i].config; + if (!ctrl_init_data[i].cid) + continue; + + if (ctrl_cfg->id == V4L2_CID_FLASH_LED_MODE || + ctrl_cfg->id == V4L2_CID_FLASH_STROBE_SOURCE) + ctrl = v4l2_ctrl_new_std_menu(&v4l2_flash->hdl, + &v4l2_flash_ctrl_ops, + ctrl_cfg->id, + ctrl_cfg->max, + ctrl_cfg->menu_skip_mask, + ctrl_cfg->def); + else + ctrl = v4l2_ctrl_new_std(&v4l2_flash->hdl, + &v4l2_flash_ctrl_ops, + ctrl_cfg->id, + ctrl_cfg->min, + ctrl_cfg->max, + ctrl_cfg->step, + ctrl_cfg->def); + + if (ctrl) + ctrl->flags |= ctrl_cfg->flags; + + if (i <= STROBE_SOURCE) + v4l2_flash->ctrls[i] = ctrl; + } + + kfree(ctrl_init_data); + + if (v4l2_flash->hdl.error) { + ret = v4l2_flash->hdl.error; + goto error_free_handler; + } + + v4l2_ctrl_handler_setup(&v4l2_flash->hdl); + + v4l2_flash->sd.ctrl_handler = &v4l2_flash->hdl; + + return 0; + +error_free_handler: + v4l2_ctrl_handler_free(&v4l2_flash->hdl); + return ret; +} + +static int __sync_device_with_v4l2_controls(struct v4l2_flash *v4l2_flash) +{ + struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev; + struct v4l2_ctrl **ctrls = v4l2_flash->ctrls; + int ret = 0; + + if (ctrls[TORCH_INTENSITY]) { + ret = v4l2_flash_set_led_brightness(v4l2_flash, + ctrls[TORCH_INTENSITY]); + if (ret < 0) + return ret; + } + + if (ctrls[INDICATOR_INTENSITY]) { + ret = v4l2_flash_set_led_brightness(v4l2_flash, + ctrls[INDICATOR_INTENSITY]); + if (ret < 0) + return ret; + } + + if (ctrls[FLASH_TIMEOUT]) { + if (WARN_ON_ONCE(!fled_cdev)) + return -EINVAL; + + ret = led_set_flash_timeout(fled_cdev, + ctrls[FLASH_TIMEOUT]->val); + if (ret < 0) + return ret; + } + + if (ctrls[FLASH_INTENSITY]) { + if (WARN_ON_ONCE(!fled_cdev)) + return -EINVAL; + + ret = led_set_flash_brightness(fled_cdev, + ctrls[FLASH_INTENSITY]->val); + if (ret < 0) + return ret; + } + + /* + * For some hardware arrangements setting strobe source may affect + * torch mode. Synchronize strobe source setting only if not in torch + * mode. For torch mode case it will get synchronized upon switching + * to flash mode. + */ + if (ctrls[STROBE_SOURCE] && + ctrls[LED_MODE]->val != V4L2_FLASH_LED_MODE_TORCH) + ret = call_flash_op(v4l2_flash, external_strobe_set, + ctrls[STROBE_SOURCE]->val); + + return ret; +} + +/* + * V4L2 subdev internal operations + */ + +static int v4l2_flash_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) +{ + struct v4l2_flash *v4l2_flash = v4l2_subdev_to_v4l2_flash(sd); + struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev; + struct led_classdev *led_cdev = fled_cdev ? &fled_cdev->led_cdev : NULL; + struct led_classdev *led_cdev_ind = v4l2_flash->iled_cdev; + int ret = 0; + + if (!v4l2_fh_is_singular(&fh->vfh)) + return 0; + + if (led_cdev) { + mutex_lock(&led_cdev->led_access); + + led_sysfs_disable(led_cdev); + led_trigger_remove(led_cdev); + + mutex_unlock(&led_cdev->led_access); + } + + if (led_cdev_ind) { + mutex_lock(&led_cdev_ind->led_access); + + led_sysfs_disable(led_cdev_ind); + led_trigger_remove(led_cdev_ind); + + mutex_unlock(&led_cdev_ind->led_access); + } + + ret = __sync_device_with_v4l2_controls(v4l2_flash); + if (ret < 0) + goto out_sync_device; + + return 0; +out_sync_device: + if (led_cdev) { + mutex_lock(&led_cdev->led_access); + led_sysfs_enable(led_cdev); + mutex_unlock(&led_cdev->led_access); + } + + if (led_cdev_ind) { + mutex_lock(&led_cdev_ind->led_access); + led_sysfs_enable(led_cdev_ind); + mutex_unlock(&led_cdev_ind->led_access); + } + + return ret; +} + +static int v4l2_flash_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) +{ + struct v4l2_flash *v4l2_flash = v4l2_subdev_to_v4l2_flash(sd); + struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev; + struct led_classdev *led_cdev = fled_cdev ? &fled_cdev->led_cdev : NULL; + struct led_classdev *led_cdev_ind = v4l2_flash->iled_cdev; + int ret = 0; + + if (!v4l2_fh_is_singular(&fh->vfh)) + return 0; + + if (led_cdev) { + mutex_lock(&led_cdev->led_access); + + if (v4l2_flash->ctrls[STROBE_SOURCE]) + ret = v4l2_ctrl_s_ctrl( + v4l2_flash->ctrls[STROBE_SOURCE], + V4L2_FLASH_STROBE_SOURCE_SOFTWARE); + led_sysfs_enable(led_cdev); + + mutex_unlock(&led_cdev->led_access); + } + + if (led_cdev_ind) { + mutex_lock(&led_cdev_ind->led_access); + led_sysfs_enable(led_cdev_ind); + mutex_unlock(&led_cdev_ind->led_access); + } + + return ret; +} + +static const struct v4l2_subdev_internal_ops v4l2_flash_subdev_internal_ops = { + .open = v4l2_flash_open, + .close = v4l2_flash_close, +}; + +static const struct v4l2_subdev_ops v4l2_flash_subdev_ops; + +static struct v4l2_flash *__v4l2_flash_init( + struct device *dev, struct fwnode_handle *fwn, + struct led_classdev_flash *fled_cdev, struct led_classdev *iled_cdev, + const struct v4l2_flash_ops *ops, struct v4l2_flash_config *config) +{ + struct v4l2_flash *v4l2_flash; + struct v4l2_subdev *sd; + int ret; + + if (!config) + return ERR_PTR(-EINVAL); + + v4l2_flash = devm_kzalloc(dev, sizeof(*v4l2_flash), GFP_KERNEL); + if (!v4l2_flash) + return ERR_PTR(-ENOMEM); + + sd = &v4l2_flash->sd; + v4l2_flash->fled_cdev = fled_cdev; + v4l2_flash->iled_cdev = iled_cdev; + v4l2_flash->ops = ops; + sd->dev = dev; + sd->fwnode = fwn ? fwn : dev_fwnode(dev); + v4l2_subdev_init(sd, &v4l2_flash_subdev_ops); + sd->internal_ops = &v4l2_flash_subdev_internal_ops; + sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; + strscpy(sd->name, config->dev_name, sizeof(sd->name)); + + ret = media_entity_pads_init(&sd->entity, 0, NULL); + if (ret < 0) + return ERR_PTR(ret); + + sd->entity.function = MEDIA_ENT_F_FLASH; + + ret = v4l2_flash_init_controls(v4l2_flash, config); + if (ret < 0) + goto err_init_controls; + + fwnode_handle_get(sd->fwnode); + + ret = v4l2_async_register_subdev(sd); + if (ret < 0) + goto err_async_register_sd; + + return v4l2_flash; + +err_async_register_sd: + fwnode_handle_put(sd->fwnode); + v4l2_ctrl_handler_free(sd->ctrl_handler); +err_init_controls: + media_entity_cleanup(&sd->entity); + + return ERR_PTR(ret); +} + +struct v4l2_flash *v4l2_flash_init( + struct device *dev, struct fwnode_handle *fwn, + struct led_classdev_flash *fled_cdev, + const struct v4l2_flash_ops *ops, + struct v4l2_flash_config *config) +{ + return __v4l2_flash_init(dev, fwn, fled_cdev, NULL, ops, config); +} +EXPORT_SYMBOL_GPL(v4l2_flash_init); + +struct v4l2_flash *v4l2_flash_indicator_init( + struct device *dev, struct fwnode_handle *fwn, + struct led_classdev *iled_cdev, + struct v4l2_flash_config *config) +{ + return __v4l2_flash_init(dev, fwn, NULL, iled_cdev, NULL, config); +} +EXPORT_SYMBOL_GPL(v4l2_flash_indicator_init); + +void v4l2_flash_release(struct v4l2_flash *v4l2_flash) +{ + struct v4l2_subdev *sd; + + if (IS_ERR_OR_NULL(v4l2_flash)) + return; + + sd = &v4l2_flash->sd; + + v4l2_async_unregister_subdev(sd); + + fwnode_handle_put(sd->fwnode); + + v4l2_ctrl_handler_free(sd->ctrl_handler); + media_entity_cleanup(&sd->entity); +} +EXPORT_SYMBOL_GPL(v4l2_flash_release); + +MODULE_AUTHOR("Jacek Anaszewski "); +MODULE_DESCRIPTION("V4L2 Flash sub-device helpers"); +MODULE_LICENSE("GPL v2"); diff --git a/6.18.0/drivers/media/v4l2-core/v4l2-fwnode.c b/6.18.0/drivers/media/v4l2-core/v4l2-fwnode.c new file mode 100644 index 0000000..cb153ce --- /dev/null +++ b/6.18.0/drivers/media/v4l2-core/v4l2-fwnode.c @@ -0,0 +1,1299 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * V4L2 fwnode binding parsing library + * + * The origins of the V4L2 fwnode library are in V4L2 OF library that + * formerly was located in v4l2-of.c. + * + * Copyright (c) 2016 Intel Corporation. + * Author: Sakari Ailus + * + * Copyright (C) 2012 - 2013 Samsung Electronics Co., Ltd. + * Author: Sylwester Nawrocki + * + * Copyright (C) 2012 Renesas Electronics Corp. + * Author: Guennadi Liakhovetski + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "v4l2-subdev-priv.h" + +static const struct v4l2_fwnode_bus_conv { + enum v4l2_fwnode_bus_type fwnode_bus_type; + enum v4l2_mbus_type mbus_type; + const char *name; +} buses[] = { + { + V4L2_FWNODE_BUS_TYPE_GUESS, + V4L2_MBUS_UNKNOWN, + "not specified", + }, { + V4L2_FWNODE_BUS_TYPE_CSI2_CPHY, + V4L2_MBUS_CSI2_CPHY, + "MIPI CSI-2 C-PHY", + }, { + V4L2_FWNODE_BUS_TYPE_CSI1, + V4L2_MBUS_CSI1, + "MIPI CSI-1", + }, { + V4L2_FWNODE_BUS_TYPE_CCP2, + V4L2_MBUS_CCP2, + "compact camera port 2", + }, { + V4L2_FWNODE_BUS_TYPE_CSI2_DPHY, + V4L2_MBUS_CSI2_DPHY, + "MIPI CSI-2 D-PHY", + }, { + V4L2_FWNODE_BUS_TYPE_PARALLEL, + V4L2_MBUS_PARALLEL, + "parallel", + }, { + V4L2_FWNODE_BUS_TYPE_BT656, + V4L2_MBUS_BT656, + "Bt.656", + }, { + V4L2_FWNODE_BUS_TYPE_DPI, + V4L2_MBUS_DPI, + "DPI", + } +}; + +static const struct v4l2_fwnode_bus_conv * +get_v4l2_fwnode_bus_conv_by_fwnode_bus(enum v4l2_fwnode_bus_type type) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(buses); i++) + if (buses[i].fwnode_bus_type == type) + return &buses[i]; + + return NULL; +} + +static enum v4l2_mbus_type +v4l2_fwnode_bus_type_to_mbus(enum v4l2_fwnode_bus_type type) +{ + const struct v4l2_fwnode_bus_conv *conv = + get_v4l2_fwnode_bus_conv_by_fwnode_bus(type); + + return conv ? conv->mbus_type : V4L2_MBUS_INVALID; +} + +static const char * +v4l2_fwnode_bus_type_to_string(enum v4l2_fwnode_bus_type type) +{ + const struct v4l2_fwnode_bus_conv *conv = + get_v4l2_fwnode_bus_conv_by_fwnode_bus(type); + + return conv ? conv->name : "not found"; +} + +static const struct v4l2_fwnode_bus_conv * +get_v4l2_fwnode_bus_conv_by_mbus(enum v4l2_mbus_type type) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(buses); i++) + if (buses[i].mbus_type == type) + return &buses[i]; + + return NULL; +} + +static const char * +v4l2_fwnode_mbus_type_to_string(enum v4l2_mbus_type type) +{ + const struct v4l2_fwnode_bus_conv *conv = + get_v4l2_fwnode_bus_conv_by_mbus(type); + + return conv ? conv->name : "not found"; +} + +static int v4l2_fwnode_endpoint_parse_csi2_bus(struct fwnode_handle *fwnode, + struct v4l2_fwnode_endpoint *vep, + enum v4l2_mbus_type bus_type) +{ + struct v4l2_mbus_config_mipi_csi2 *bus = &vep->bus.mipi_csi2; + bool have_clk_lane = false, have_data_lanes = false, + have_lane_polarities = false, have_line_orders = false; + unsigned int flags = 0, lanes_used = 0; + u32 array[1 + V4L2_MBUS_CSI2_MAX_DATA_LANES]; + u32 clock_lane = 0; + unsigned int num_data_lanes = 0; + bool use_default_lane_mapping = false; + unsigned int i; + u32 v; + int rval; + + if (bus_type == V4L2_MBUS_CSI2_DPHY || + bus_type == V4L2_MBUS_CSI2_CPHY) { + use_default_lane_mapping = true; + + num_data_lanes = min_t(u32, bus->num_data_lanes, + V4L2_MBUS_CSI2_MAX_DATA_LANES); + + clock_lane = bus->clock_lane; + if (clock_lane) + use_default_lane_mapping = false; + + for (i = 0; i < num_data_lanes; i++) { + array[i] = bus->data_lanes[i]; + if (array[i]) + use_default_lane_mapping = false; + } + + if (use_default_lane_mapping) + pr_debug("no lane mapping given, using defaults\n"); + } + + rval = fwnode_property_count_u32(fwnode, "data-lanes"); + if (rval > 0) { + num_data_lanes = + min_t(int, V4L2_MBUS_CSI2_MAX_DATA_LANES, rval); + + fwnode_property_read_u32_array(fwnode, "data-lanes", array, + num_data_lanes); + + have_data_lanes = true; + if (use_default_lane_mapping) { + pr_debug("data-lanes property exists; disabling default mapping\n"); + use_default_lane_mapping = false; + } + } + + for (i = 0; i < num_data_lanes; i++) { + if (lanes_used & BIT(array[i])) { + if (have_data_lanes || !use_default_lane_mapping) + pr_warn("duplicated lane %u in data-lanes, using defaults\n", + array[i]); + use_default_lane_mapping = true; + } + lanes_used |= BIT(array[i]); + + if (have_data_lanes) + pr_debug("lane %u position %u\n", i, array[i]); + } + + rval = fwnode_property_count_u32(fwnode, "lane-polarities"); + if (rval > 0) { + if (rval != 1 + num_data_lanes /* clock+data */) { + pr_warn("invalid number of lane-polarities entries (need %u, got %u)\n", + 1 + num_data_lanes, rval); + return -EINVAL; + } + + have_lane_polarities = true; + } + + rval = fwnode_property_count_u32(fwnode, "line-orders"); + if (rval > 0) { + if (rval != num_data_lanes) { + pr_warn("invalid number of line-orders entries (need %u, got %u)\n", + num_data_lanes, rval); + return -EINVAL; + } + + have_line_orders = true; + } + + if (!fwnode_property_read_u32(fwnode, "clock-lanes", &v)) { + clock_lane = v; + pr_debug("clock lane position %u\n", v); + have_clk_lane = true; + } + + if (have_clk_lane && lanes_used & BIT(clock_lane) && + !use_default_lane_mapping) { + pr_warn("duplicated lane %u in clock-lanes, using defaults\n", + v); + use_default_lane_mapping = true; + } + + if (fwnode_property_present(fwnode, "clock-noncontinuous")) { + flags |= V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK; + pr_debug("non-continuous clock\n"); + } + + if (bus_type == V4L2_MBUS_CSI2_DPHY || + bus_type == V4L2_MBUS_CSI2_CPHY || + lanes_used || have_clk_lane || flags) { + /* Only D-PHY has a clock lane. */ + unsigned int dfl_data_lane_index = + bus_type == V4L2_MBUS_CSI2_DPHY; + + bus->flags = flags; + if (bus_type == V4L2_MBUS_UNKNOWN) + vep->bus_type = V4L2_MBUS_CSI2_DPHY; + bus->num_data_lanes = num_data_lanes; + + if (use_default_lane_mapping) { + bus->clock_lane = 0; + for (i = 0; i < num_data_lanes; i++) + bus->data_lanes[i] = dfl_data_lane_index + i; + } else { + bus->clock_lane = clock_lane; + for (i = 0; i < num_data_lanes; i++) + bus->data_lanes[i] = array[i]; + } + + if (have_lane_polarities) { + fwnode_property_read_u32_array(fwnode, + "lane-polarities", array, + 1 + num_data_lanes); + + for (i = 0; i < 1 + num_data_lanes; i++) { + bus->lane_polarities[i] = array[i]; + pr_debug("lane %u polarity %sinverted", + i, array[i] ? "" : "not "); + } + } else { + pr_debug("no lane polarities defined, assuming not inverted\n"); + } + + if (have_line_orders) { + fwnode_property_read_u32_array(fwnode, + "line-orders", array, + num_data_lanes); + + for (i = 0; i < num_data_lanes; i++) { + static const char * const orders[] = { + "ABC", "ACB", "BAC", "BCA", "CAB", "CBA" + }; + + if (array[i] >= ARRAY_SIZE(orders)) { + pr_warn("lane %u invalid line-order assuming ABC (got %u)\n", + i, array[i]); + bus->line_orders[i] = + V4L2_MBUS_CSI2_CPHY_LINE_ORDER_ABC; + continue; + } + + bus->line_orders[i] = array[i]; + pr_debug("lane %u line order %s", i, + orders[array[i]]); + } + } else { + for (i = 0; i < num_data_lanes; i++) + bus->line_orders[i] = + V4L2_MBUS_CSI2_CPHY_LINE_ORDER_ABC; + + pr_debug("no line orders defined, assuming ABC\n"); + } + } + + return 0; +} + +#define PARALLEL_MBUS_FLAGS (V4L2_MBUS_HSYNC_ACTIVE_HIGH | \ + V4L2_MBUS_HSYNC_ACTIVE_LOW | \ + V4L2_MBUS_VSYNC_ACTIVE_HIGH | \ + V4L2_MBUS_VSYNC_ACTIVE_LOW | \ + V4L2_MBUS_FIELD_EVEN_HIGH | \ + V4L2_MBUS_FIELD_EVEN_LOW) + +static void +v4l2_fwnode_endpoint_parse_parallel_bus(struct fwnode_handle *fwnode, + struct v4l2_fwnode_endpoint *vep, + enum v4l2_mbus_type bus_type) +{ + struct v4l2_mbus_config_parallel *bus = &vep->bus.parallel; + unsigned int flags = 0; + u32 v; + + if (bus_type == V4L2_MBUS_PARALLEL || bus_type == V4L2_MBUS_BT656) + flags = bus->flags; + + if (!fwnode_property_read_u32(fwnode, "hsync-active", &v)) { + flags &= ~(V4L2_MBUS_HSYNC_ACTIVE_HIGH | + V4L2_MBUS_HSYNC_ACTIVE_LOW); + flags |= v ? V4L2_MBUS_HSYNC_ACTIVE_HIGH : + V4L2_MBUS_HSYNC_ACTIVE_LOW; + pr_debug("hsync-active %s\n", v ? "high" : "low"); + } + + if (!fwnode_property_read_u32(fwnode, "vsync-active", &v)) { + flags &= ~(V4L2_MBUS_VSYNC_ACTIVE_HIGH | + V4L2_MBUS_VSYNC_ACTIVE_LOW); + flags |= v ? V4L2_MBUS_VSYNC_ACTIVE_HIGH : + V4L2_MBUS_VSYNC_ACTIVE_LOW; + pr_debug("vsync-active %s\n", v ? "high" : "low"); + } + + if (!fwnode_property_read_u32(fwnode, "field-even-active", &v)) { + flags &= ~(V4L2_MBUS_FIELD_EVEN_HIGH | + V4L2_MBUS_FIELD_EVEN_LOW); + flags |= v ? V4L2_MBUS_FIELD_EVEN_HIGH : + V4L2_MBUS_FIELD_EVEN_LOW; + pr_debug("field-even-active %s\n", v ? "high" : "low"); + } + + if (!fwnode_property_read_u32(fwnode, "pclk-sample", &v)) { + flags &= ~(V4L2_MBUS_PCLK_SAMPLE_RISING | + V4L2_MBUS_PCLK_SAMPLE_FALLING | + V4L2_MBUS_PCLK_SAMPLE_DUALEDGE); + switch (v) { + case 0: + flags |= V4L2_MBUS_PCLK_SAMPLE_FALLING; + pr_debug("pclk-sample low\n"); + break; + case 1: + flags |= V4L2_MBUS_PCLK_SAMPLE_RISING; + pr_debug("pclk-sample high\n"); + break; + case 2: + flags |= V4L2_MBUS_PCLK_SAMPLE_DUALEDGE; + pr_debug("pclk-sample dual edge\n"); + break; + default: + pr_warn("invalid argument for pclk-sample"); + break; + } + } + + if (!fwnode_property_read_u32(fwnode, "data-active", &v)) { + flags &= ~(V4L2_MBUS_DATA_ACTIVE_HIGH | + V4L2_MBUS_DATA_ACTIVE_LOW); + flags |= v ? V4L2_MBUS_DATA_ACTIVE_HIGH : + V4L2_MBUS_DATA_ACTIVE_LOW; + pr_debug("data-active %s\n", v ? "high" : "low"); + } + + if (fwnode_property_present(fwnode, "slave-mode")) { + pr_debug("slave mode\n"); + flags &= ~V4L2_MBUS_MASTER; + flags |= V4L2_MBUS_SLAVE; + } else { + flags &= ~V4L2_MBUS_SLAVE; + flags |= V4L2_MBUS_MASTER; + } + + if (!fwnode_property_read_u32(fwnode, "bus-width", &v)) { + bus->bus_width = v; + pr_debug("bus-width %u\n", v); + } + + if (!fwnode_property_read_u32(fwnode, "data-shift", &v)) { + bus->data_shift = v; + pr_debug("data-shift %u\n", v); + } + + if (!fwnode_property_read_u32(fwnode, "sync-on-green-active", &v)) { + flags &= ~(V4L2_MBUS_VIDEO_SOG_ACTIVE_HIGH | + V4L2_MBUS_VIDEO_SOG_ACTIVE_LOW); + flags |= v ? V4L2_MBUS_VIDEO_SOG_ACTIVE_HIGH : + V4L2_MBUS_VIDEO_SOG_ACTIVE_LOW; + pr_debug("sync-on-green-active %s\n", v ? "high" : "low"); + } + + if (!fwnode_property_read_u32(fwnode, "data-enable-active", &v)) { + flags &= ~(V4L2_MBUS_DATA_ENABLE_HIGH | + V4L2_MBUS_DATA_ENABLE_LOW); + flags |= v ? V4L2_MBUS_DATA_ENABLE_HIGH : + V4L2_MBUS_DATA_ENABLE_LOW; + pr_debug("data-enable-active %s\n", v ? "high" : "low"); + } + + switch (bus_type) { + default: + bus->flags = flags; + if (flags & PARALLEL_MBUS_FLAGS) + vep->bus_type = V4L2_MBUS_PARALLEL; + else + vep->bus_type = V4L2_MBUS_BT656; + break; + case V4L2_MBUS_PARALLEL: + vep->bus_type = V4L2_MBUS_PARALLEL; + bus->flags = flags; + break; + case V4L2_MBUS_BT656: + vep->bus_type = V4L2_MBUS_BT656; + bus->flags = flags & ~PARALLEL_MBUS_FLAGS; + break; + } +} + +static void +v4l2_fwnode_endpoint_parse_csi1_bus(struct fwnode_handle *fwnode, + struct v4l2_fwnode_endpoint *vep, + enum v4l2_mbus_type bus_type) +{ + struct v4l2_mbus_config_mipi_csi1 *bus = &vep->bus.mipi_csi1; + u32 v; + + if (!fwnode_property_read_u32(fwnode, "clock-inv", &v)) { + bus->clock_inv = v; + pr_debug("clock-inv %u\n", v); + } + + if (!fwnode_property_read_u32(fwnode, "strobe", &v)) { + bus->strobe = v; + pr_debug("strobe %u\n", v); + } + + if (!fwnode_property_read_u32(fwnode, "data-lanes", &v)) { + bus->data_lane = v; + pr_debug("data-lanes %u\n", v); + } + + if (!fwnode_property_read_u32(fwnode, "clock-lanes", &v)) { + bus->clock_lane = v; + pr_debug("clock-lanes %u\n", v); + } + + if (bus_type == V4L2_MBUS_CCP2) + vep->bus_type = V4L2_MBUS_CCP2; + else + vep->bus_type = V4L2_MBUS_CSI1; +} + +static int __v4l2_fwnode_endpoint_parse(struct fwnode_handle *fwnode, + struct v4l2_fwnode_endpoint *vep) +{ + u32 bus_type = V4L2_FWNODE_BUS_TYPE_GUESS; + enum v4l2_mbus_type mbus_type; + int rval; + + pr_debug("===== begin parsing endpoint %pfw\n", fwnode); + + fwnode_property_read_u32(fwnode, "bus-type", &bus_type); + pr_debug("fwnode video bus type %s (%u), mbus type %s (%u)\n", + v4l2_fwnode_bus_type_to_string(bus_type), bus_type, + v4l2_fwnode_mbus_type_to_string(vep->bus_type), + vep->bus_type); + mbus_type = v4l2_fwnode_bus_type_to_mbus(bus_type); + if (mbus_type == V4L2_MBUS_INVALID) { + pr_debug("unsupported bus type %u\n", bus_type); + return -EINVAL; + } + + if (vep->bus_type != V4L2_MBUS_UNKNOWN) { + if (mbus_type != V4L2_MBUS_UNKNOWN && + vep->bus_type != mbus_type) { + pr_debug("expecting bus type %s\n", + v4l2_fwnode_mbus_type_to_string(vep->bus_type)); + return -ENXIO; + } + } else { + vep->bus_type = mbus_type; + } + + switch (vep->bus_type) { + case V4L2_MBUS_UNKNOWN: + rval = v4l2_fwnode_endpoint_parse_csi2_bus(fwnode, vep, + V4L2_MBUS_UNKNOWN); + if (rval) + return rval; + + if (vep->bus_type == V4L2_MBUS_UNKNOWN) + v4l2_fwnode_endpoint_parse_parallel_bus(fwnode, vep, + V4L2_MBUS_UNKNOWN); + + pr_debug("assuming media bus type %s (%u)\n", + v4l2_fwnode_mbus_type_to_string(vep->bus_type), + vep->bus_type); + + break; + case V4L2_MBUS_CCP2: + case V4L2_MBUS_CSI1: + v4l2_fwnode_endpoint_parse_csi1_bus(fwnode, vep, vep->bus_type); + + break; + case V4L2_MBUS_CSI2_DPHY: + case V4L2_MBUS_CSI2_CPHY: + rval = v4l2_fwnode_endpoint_parse_csi2_bus(fwnode, vep, + vep->bus_type); + if (rval) + return rval; + + break; + case V4L2_MBUS_PARALLEL: + case V4L2_MBUS_BT656: + v4l2_fwnode_endpoint_parse_parallel_bus(fwnode, vep, + vep->bus_type); + + break; + default: + pr_warn("unsupported bus type %u\n", mbus_type); + return -EINVAL; + } + + fwnode_graph_parse_endpoint(fwnode, &vep->base); + + return 0; +} + +int v4l2_fwnode_endpoint_parse(struct fwnode_handle *fwnode, + struct v4l2_fwnode_endpoint *vep) +{ + int ret; + + ret = __v4l2_fwnode_endpoint_parse(fwnode, vep); + + pr_debug("===== end parsing endpoint %pfw\n", fwnode); + + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_fwnode_endpoint_parse); + +void v4l2_fwnode_endpoint_free(struct v4l2_fwnode_endpoint *vep) +{ + if (IS_ERR_OR_NULL(vep)) + return; + + kfree(vep->link_frequencies); + vep->link_frequencies = NULL; +} +EXPORT_SYMBOL_GPL(v4l2_fwnode_endpoint_free); + +int v4l2_fwnode_endpoint_alloc_parse(struct fwnode_handle *fwnode, + struct v4l2_fwnode_endpoint *vep) +{ + int rval; + + rval = __v4l2_fwnode_endpoint_parse(fwnode, vep); + if (rval < 0) + return rval; + + rval = fwnode_property_count_u64(fwnode, "link-frequencies"); + if (rval > 0) { + unsigned int i; + + vep->link_frequencies = + kmalloc_array(rval, sizeof(*vep->link_frequencies), + GFP_KERNEL); + if (!vep->link_frequencies) + return -ENOMEM; + + vep->nr_of_link_frequencies = rval; + + rval = fwnode_property_read_u64_array(fwnode, + "link-frequencies", + vep->link_frequencies, + vep->nr_of_link_frequencies); + if (rval < 0) { + v4l2_fwnode_endpoint_free(vep); + return rval; + } + + for (i = 0; i < vep->nr_of_link_frequencies; i++) + pr_debug("link-frequencies %u value %llu\n", i, + vep->link_frequencies[i]); + } + + pr_debug("===== end parsing endpoint %pfw\n", fwnode); + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_fwnode_endpoint_alloc_parse); + +int v4l2_fwnode_parse_link(struct fwnode_handle *fwnode, + struct v4l2_fwnode_link *link) +{ + struct fwnode_endpoint fwep; + + memset(link, 0, sizeof(*link)); + + fwnode_graph_parse_endpoint(fwnode, &fwep); + link->local_id = fwep.id; + link->local_port = fwep.port; + link->local_node = fwnode_graph_get_port_parent(fwnode); + if (!link->local_node) + return -ENOLINK; + + fwnode = fwnode_graph_get_remote_endpoint(fwnode); + if (!fwnode) + goto err_put_local_node; + + fwnode_graph_parse_endpoint(fwnode, &fwep); + link->remote_id = fwep.id; + link->remote_port = fwep.port; + link->remote_node = fwnode_graph_get_port_parent(fwnode); + if (!link->remote_node) + goto err_put_remote_endpoint; + + return 0; + +err_put_remote_endpoint: + fwnode_handle_put(fwnode); + +err_put_local_node: + fwnode_handle_put(link->local_node); + + return -ENOLINK; +} +EXPORT_SYMBOL_GPL(v4l2_fwnode_parse_link); + +void v4l2_fwnode_put_link(struct v4l2_fwnode_link *link) +{ + fwnode_handle_put(link->local_node); + fwnode_handle_put(link->remote_node); +} +EXPORT_SYMBOL_GPL(v4l2_fwnode_put_link); + +static const struct v4l2_fwnode_connector_conv { + enum v4l2_connector_type type; + const char *compatible; +} connectors[] = { + { + .type = V4L2_CONN_COMPOSITE, + .compatible = "composite-video-connector", + }, { + .type = V4L2_CONN_SVIDEO, + .compatible = "svideo-connector", + }, +}; + +static enum v4l2_connector_type +v4l2_fwnode_string_to_connector_type(const char *con_str) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(connectors); i++) + if (!strcmp(con_str, connectors[i].compatible)) + return connectors[i].type; + + return V4L2_CONN_UNKNOWN; +} + +static void +v4l2_fwnode_connector_parse_analog(struct fwnode_handle *fwnode, + struct v4l2_fwnode_connector *vc) +{ + u32 stds; + int ret; + + ret = fwnode_property_read_u32(fwnode, "sdtv-standards", &stds); + + /* The property is optional. */ + vc->connector.analog.sdtv_stds = ret ? V4L2_STD_ALL : stds; +} + +void v4l2_fwnode_connector_free(struct v4l2_fwnode_connector *connector) +{ + struct v4l2_connector_link *link, *tmp; + + if (IS_ERR_OR_NULL(connector) || connector->type == V4L2_CONN_UNKNOWN) + return; + + list_for_each_entry_safe(link, tmp, &connector->links, head) { + v4l2_fwnode_put_link(&link->fwnode_link); + list_del(&link->head); + kfree(link); + } + + kfree(connector->label); + connector->label = NULL; + connector->type = V4L2_CONN_UNKNOWN; +} +EXPORT_SYMBOL_GPL(v4l2_fwnode_connector_free); + +static enum v4l2_connector_type +v4l2_fwnode_get_connector_type(struct fwnode_handle *fwnode) +{ + const char *type_name; + int err; + + if (!fwnode) + return V4L2_CONN_UNKNOWN; + + /* The connector-type is stored within the compatible string. */ + err = fwnode_property_read_string(fwnode, "compatible", &type_name); + if (err) + return V4L2_CONN_UNKNOWN; + + return v4l2_fwnode_string_to_connector_type(type_name); +} + +int v4l2_fwnode_connector_parse(struct fwnode_handle *fwnode, + struct v4l2_fwnode_connector *connector) +{ + struct fwnode_handle *connector_node; + enum v4l2_connector_type connector_type; + const char *label; + int err; + + if (!fwnode) + return -EINVAL; + + memset(connector, 0, sizeof(*connector)); + + INIT_LIST_HEAD(&connector->links); + + connector_node = fwnode_graph_get_port_parent(fwnode); + connector_type = v4l2_fwnode_get_connector_type(connector_node); + if (connector_type == V4L2_CONN_UNKNOWN) { + fwnode_handle_put(connector_node); + connector_node = fwnode_graph_get_remote_port_parent(fwnode); + connector_type = v4l2_fwnode_get_connector_type(connector_node); + } + + if (connector_type == V4L2_CONN_UNKNOWN) { + pr_err("Unknown connector type\n"); + err = -ENOTCONN; + goto out; + } + + connector->type = connector_type; + connector->name = fwnode_get_name(connector_node); + err = fwnode_property_read_string(connector_node, "label", &label); + connector->label = err ? NULL : kstrdup_const(label, GFP_KERNEL); + + /* Parse the connector specific properties. */ + switch (connector->type) { + case V4L2_CONN_COMPOSITE: + case V4L2_CONN_SVIDEO: + v4l2_fwnode_connector_parse_analog(connector_node, connector); + break; + /* Avoid compiler warnings */ + case V4L2_CONN_UNKNOWN: + break; + } + +out: + fwnode_handle_put(connector_node); + + return err; +} +EXPORT_SYMBOL_GPL(v4l2_fwnode_connector_parse); + +int v4l2_fwnode_connector_add_link(struct fwnode_handle *fwnode, + struct v4l2_fwnode_connector *connector) +{ + struct fwnode_handle *connector_ep; + struct v4l2_connector_link *link; + int err; + + if (!fwnode || !connector || connector->type == V4L2_CONN_UNKNOWN) + return -EINVAL; + + connector_ep = fwnode_graph_get_remote_endpoint(fwnode); + if (!connector_ep) + return -ENOTCONN; + + link = kzalloc(sizeof(*link), GFP_KERNEL); + if (!link) { + err = -ENOMEM; + goto err; + } + + err = v4l2_fwnode_parse_link(connector_ep, &link->fwnode_link); + if (err) + goto err; + + fwnode_handle_put(connector_ep); + + list_add(&link->head, &connector->links); + connector->nr_of_links++; + + return 0; + +err: + kfree(link); + fwnode_handle_put(connector_ep); + + return err; +} +EXPORT_SYMBOL_GPL(v4l2_fwnode_connector_add_link); + +int v4l2_fwnode_device_parse(struct device *dev, + struct v4l2_fwnode_device_properties *props) +{ + struct fwnode_handle *fwnode = dev_fwnode(dev); + u32 val; + int ret; + + memset(props, 0, sizeof(*props)); + + props->orientation = V4L2_FWNODE_PROPERTY_UNSET; + ret = fwnode_property_read_u32(fwnode, "orientation", &val); + if (!ret) { + switch (val) { + case V4L2_FWNODE_ORIENTATION_FRONT: + case V4L2_FWNODE_ORIENTATION_BACK: + case V4L2_FWNODE_ORIENTATION_EXTERNAL: + break; + default: + dev_warn(dev, "Unsupported device orientation: %u\n", val); + return -EINVAL; + } + + props->orientation = val; + dev_dbg(dev, "device orientation: %u\n", val); + } + + props->rotation = V4L2_FWNODE_PROPERTY_UNSET; + ret = fwnode_property_read_u32(fwnode, "rotation", &val); + if (!ret) { + if (val >= 360) { + dev_warn(dev, "Unsupported device rotation: %u\n", val); + return -EINVAL; + } + + props->rotation = val; + dev_dbg(dev, "device rotation: %u\n", val); + } + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_fwnode_device_parse); + +/* + * v4l2_fwnode_reference_parse - parse references for async sub-devices + * @dev: the device node the properties of which are parsed for references + * @notifier: the async notifier where the async subdevs will be added + * @prop: the name of the property + * + * Return: 0 on success + * -ENOENT if no entries were found + * -ENOMEM if memory allocation failed + * -EINVAL if property parsing failed + */ +static int v4l2_fwnode_reference_parse(struct device *dev, + struct v4l2_async_notifier *notifier, + const char *prop) +{ + struct fwnode_reference_args args; + unsigned int index; + int ret; + + for (index = 0; + !(ret = fwnode_property_get_reference_args(dev_fwnode(dev), prop, + NULL, 0, index, &args)); + index++) { + struct v4l2_async_connection *asd; + + asd = v4l2_async_nf_add_fwnode(notifier, args.fwnode, + struct v4l2_async_connection); + fwnode_handle_put(args.fwnode); + if (IS_ERR(asd)) { + /* not an error if asd already exists */ + if (PTR_ERR(asd) == -EEXIST) + continue; + + return PTR_ERR(asd); + } + } + + /* -ENOENT here means successful parsing */ + if (ret != -ENOENT) + return ret; + + /* Return -ENOENT if no references were found */ + return index ? 0 : -ENOENT; +} + +/* + * v4l2_fwnode_reference_get_int_prop - parse a reference with integer + * arguments + * @fwnode: fwnode to read @prop from + * @notifier: notifier for @dev + * @prop: the name of the property + * @index: the index of the reference to get + * @props: the array of integer property names + * @nprops: the number of integer property names in @nprops + * + * First find an fwnode referred to by the reference at @index in @prop. + * + * Then under that fwnode, @nprops times, for each property in @props, + * iteratively follow child nodes starting from fwnode such that they have the + * property in @props array at the index of the child node distance from the + * root node and the value of that property matching with the integer argument + * of the reference, at the same index. + * + * The child fwnode reached at the end of the iteration is then returned to the + * caller. + * + * The core reason for this is that you cannot refer to just any node in ACPI. + * So to refer to an endpoint (easy in DT) you need to refer to a device, then + * provide a list of (property name, property value) tuples where each tuple + * uniquely identifies a child node. The first tuple identifies a child directly + * underneath the device fwnode, the next tuple identifies a child node + * underneath the fwnode identified by the previous tuple, etc. until you + * reached the fwnode you need. + * + * THIS EXAMPLE EXISTS MERELY TO DOCUMENT THIS FUNCTION. DO NOT USE IT AS A + * REFERENCE IN HOW ACPI TABLES SHOULD BE WRITTEN!! See documentation under + * Documentation/firmware-guide/acpi/dsd/ instead and especially graph.txt, + * data-node-references.txt and leds.txt . + * + * Scope (\_SB.PCI0.I2C2) + * { + * Device (CAM0) + * { + * Name (_DSD, Package () { + * ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"), + * Package () { + * Package () { + * "compatible", + * Package () { "nokia,smia" } + * }, + * }, + * ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"), + * Package () { + * Package () { "port0", "PRT0" }, + * } + * }) + * Name (PRT0, Package() { + * ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"), + * Package () { + * Package () { "port", 0 }, + * }, + * ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"), + * Package () { + * Package () { "endpoint0", "EP00" }, + * } + * }) + * Name (EP00, Package() { + * ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"), + * Package () { + * Package () { "endpoint", 0 }, + * Package () { + * "remote-endpoint", + * Package() { + * \_SB.PCI0.ISP, 4, 0 + * } + * }, + * } + * }) + * } + * } + * + * Scope (\_SB.PCI0) + * { + * Device (ISP) + * { + * Name (_DSD, Package () { + * ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"), + * Package () { + * Package () { "port4", "PRT4" }, + * } + * }) + * + * Name (PRT4, Package() { + * ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"), + * Package () { + * Package () { "port", 4 }, + * }, + * ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"), + * Package () { + * Package () { "endpoint0", "EP40" }, + * } + * }) + * + * Name (EP40, Package() { + * ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"), + * Package () { + * Package () { "endpoint", 0 }, + * Package () { + * "remote-endpoint", + * Package () { + * \_SB.PCI0.I2C2.CAM0, + * 0, 0 + * } + * }, + * } + * }) + * } + * } + * + * From the EP40 node under ISP device, you could parse the graph remote + * endpoint using v4l2_fwnode_reference_get_int_prop with these arguments: + * + * @fwnode: fwnode referring to EP40 under ISP. + * @prop: "remote-endpoint" + * @index: 0 + * @props: "port", "endpoint" + * @nprops: 2 + * + * And you'd get back fwnode referring to EP00 under CAM0. + * + * The same works the other way around: if you use EP00 under CAM0 as the + * fwnode, you'll get fwnode referring to EP40 under ISP. + * + * The same example in DT syntax would look like this: + * + * cam: cam0 { + * compatible = "nokia,smia"; + * + * port { + * port = <0>; + * endpoint { + * endpoint = <0>; + * remote-endpoint = <&isp 4 0>; + * }; + * }; + * }; + * + * isp: isp { + * ports { + * port@4 { + * port = <4>; + * endpoint { + * endpoint = <0>; + * remote-endpoint = <&cam 0 0>; + * }; + * }; + * }; + * }; + * + * Return: 0 on success + * -ENOENT if no entries (or the property itself) were found + * -EINVAL if property parsing otherwise failed + * -ENOMEM if memory allocation failed + */ +static struct fwnode_handle * +v4l2_fwnode_reference_get_int_prop(struct fwnode_handle *fwnode, + const char *prop, + unsigned int index, + const char * const *props, + unsigned int nprops) +{ + struct fwnode_reference_args fwnode_args; + u64 *args = fwnode_args.args; + struct fwnode_handle *child; + int ret; + + /* + * Obtain remote fwnode as well as the integer arguments. + * + * Note that right now both -ENODATA and -ENOENT may signal + * out-of-bounds access. Return -ENOENT in that case. + */ + ret = fwnode_property_get_reference_args(fwnode, prop, NULL, nprops, + index, &fwnode_args); + if (ret) + return ERR_PTR(ret == -ENODATA ? -ENOENT : ret); + + /* + * Find a node in the tree under the referred fwnode corresponding to + * the integer arguments. + */ + fwnode = fwnode_args.fwnode; + while (nprops--) { + u32 val; + + /* Loop over all child nodes under fwnode. */ + fwnode_for_each_child_node(fwnode, child) { + if (fwnode_property_read_u32(child, *props, &val)) + continue; + + /* Found property, see if its value matches. */ + if (val == *args) + break; + } + + fwnode_handle_put(fwnode); + + /* No property found; return an error here. */ + if (!child) { + fwnode = ERR_PTR(-ENOENT); + break; + } + + props++; + args++; + fwnode = child; + } + + return fwnode; +} + +struct v4l2_fwnode_int_props { + const char *name; + const char * const *props; + unsigned int nprops; +}; + +/* + * v4l2_fwnode_reference_parse_int_props - parse references for async + * sub-devices + * @dev: struct device pointer + * @notifier: notifier for @dev + * @prop: the name of the property + * @props: the array of integer property names + * @nprops: the number of integer properties + * + * Use v4l2_fwnode_reference_get_int_prop to find fwnodes through reference in + * property @prop with integer arguments with child nodes matching in properties + * @props. Then, set up V4L2 async sub-devices for those fwnodes in the notifier + * accordingly. + * + * While it is technically possible to use this function on DT, it is only + * meaningful on ACPI. On Device tree you can refer to any node in the tree but + * on ACPI the references are limited to devices. + * + * Return: 0 on success + * -ENOENT if no entries (or the property itself) were found + * -EINVAL if property parsing otherwisefailed + * -ENOMEM if memory allocation failed + */ +static int +v4l2_fwnode_reference_parse_int_props(struct device *dev, + struct v4l2_async_notifier *notifier, + const struct v4l2_fwnode_int_props *p) +{ + struct fwnode_handle *fwnode; + unsigned int index; + int ret; + const char *prop = p->name; + const char * const *props = p->props; + unsigned int nprops = p->nprops; + + index = 0; + do { + fwnode = v4l2_fwnode_reference_get_int_prop(dev_fwnode(dev), + prop, index, + props, nprops); + if (IS_ERR(fwnode)) { + /* + * Note that right now both -ENODATA and -ENOENT may + * signal out-of-bounds access. Return the error in + * cases other than that. + */ + if (PTR_ERR(fwnode) != -ENOENT && + PTR_ERR(fwnode) != -ENODATA) + return PTR_ERR(fwnode); + break; + } + fwnode_handle_put(fwnode); + index++; + } while (1); + + for (index = 0; + !IS_ERR((fwnode = v4l2_fwnode_reference_get_int_prop(dev_fwnode(dev), + prop, index, + props, + nprops))); + index++) { + struct v4l2_async_connection *asd; + + asd = v4l2_async_nf_add_fwnode(notifier, fwnode, + struct v4l2_async_connection); + fwnode_handle_put(fwnode); + if (IS_ERR(asd)) { + ret = PTR_ERR(asd); + /* not an error if asd already exists */ + if (ret == -EEXIST) + continue; + + return PTR_ERR(asd); + } + } + + return !fwnode || PTR_ERR(fwnode) == -ENOENT ? 0 : PTR_ERR(fwnode); +} + +/** + * v4l2_async_nf_parse_fwnode_sensor - parse common references on + * sensors for async sub-devices + * @dev: the device node the properties of which are parsed for references + * @notifier: the async notifier where the async subdevs will be added + * + * Parse common sensor properties for remote devices related to the + * sensor and set up async sub-devices for them. + * + * Any notifier populated using this function must be released with a call to + * v4l2_async_nf_release() after it has been unregistered and the async + * sub-devices are no longer in use, even in the case the function returned an + * error. + * + * Return: 0 on success + * -ENOMEM if memory allocation failed + * -EINVAL if property parsing failed + */ +static int +v4l2_async_nf_parse_fwnode_sensor(struct device *dev, + struct v4l2_async_notifier *notifier) +{ + static const char * const led_props[] = { "led" }; + static const struct v4l2_fwnode_int_props props[] = { + { "flash-leds", led_props, ARRAY_SIZE(led_props) }, + { "mipi-img-flash-leds", }, + { "lens-focus", }, + { "mipi-img-lens-focus", }, + }; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(props); i++) { + int ret; + + if (props[i].props && is_acpi_node(dev_fwnode(dev))) + ret = v4l2_fwnode_reference_parse_int_props(dev, + notifier, + &props[i]); + else + ret = v4l2_fwnode_reference_parse(dev, notifier, + props[i].name); + if (ret && ret != -ENOENT) { + dev_warn(dev, "parsing property \"%s\" failed (%d)\n", + props[i].name, ret); + return ret; + } + } + + return 0; +} + +int v4l2_async_register_subdev_sensor(struct v4l2_subdev *sd) +{ + struct v4l2_async_notifier *notifier; + int ret; + + if (WARN_ON(!sd->dev)) + return -ENODEV; + + notifier = kzalloc(sizeof(*notifier), GFP_KERNEL); + if (!notifier) + return -ENOMEM; + + v4l2_async_subdev_nf_init(notifier, sd); + + ret = v4l2_subdev_get_privacy_led(sd); + if (ret < 0) + goto out_cleanup; + + ret = v4l2_async_nf_parse_fwnode_sensor(sd->dev, notifier); + if (ret < 0) + goto out_cleanup; + + ret = v4l2_async_nf_register(notifier); + if (ret < 0) + goto out_cleanup; + + ret = v4l2_async_register_subdev(sd); + if (ret < 0) + goto out_unregister; + + sd->subdev_notifier = notifier; + + return 0; + +out_unregister: + v4l2_async_nf_unregister(notifier); + +out_cleanup: + v4l2_subdev_put_privacy_led(sd); + v4l2_async_nf_cleanup(notifier); + kfree(notifier); + + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_async_register_subdev_sensor); + +MODULE_DESCRIPTION("V4L2 fwnode binding parsing library"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Sakari Ailus "); +MODULE_AUTHOR("Sylwester Nawrocki "); +MODULE_AUTHOR("Guennadi Liakhovetski "); diff --git a/6.18.0/drivers/media/v4l2-core/v4l2-h264.c b/6.18.0/drivers/media/v4l2-core/v4l2-h264.c new file mode 100644 index 0000000..c00197d --- /dev/null +++ b/6.18.0/drivers/media/v4l2-core/v4l2-h264.c @@ -0,0 +1,453 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * V4L2 H264 helpers. + * + * Copyright (C) 2019 Collabora, Ltd. + * + * Author: Boris Brezillon + */ + +#include +#include + +#include + +/* + * Size of the tempory buffer allocated when printing reference lists. The + * output will be truncated if the size is too small. + */ +static const int tmp_str_size = 1024; + +/** + * v4l2_h264_init_reflist_builder() - Initialize a P/B0/B1 reference list + * builder + * + * @b: the builder context to initialize + * @dec_params: decode parameters control + * @sps: SPS control + * @dpb: DPB to use when creating the reference list + */ +void +v4l2_h264_init_reflist_builder(struct v4l2_h264_reflist_builder *b, + const struct v4l2_ctrl_h264_decode_params *dec_params, + const struct v4l2_ctrl_h264_sps *sps, + const struct v4l2_h264_dpb_entry dpb[V4L2_H264_NUM_DPB_ENTRIES]) +{ + int cur_frame_num, max_frame_num; + unsigned int i; + + max_frame_num = 1 << (sps->log2_max_frame_num_minus4 + 4); + cur_frame_num = dec_params->frame_num; + + memset(b, 0, sizeof(*b)); + if (!(dec_params->flags & V4L2_H264_DECODE_PARAM_FLAG_FIELD_PIC)) { + b->cur_pic_order_count = min(dec_params->bottom_field_order_cnt, + dec_params->top_field_order_cnt); + b->cur_pic_fields = V4L2_H264_FRAME_REF; + } else if (dec_params->flags & V4L2_H264_DECODE_PARAM_FLAG_BOTTOM_FIELD) { + b->cur_pic_order_count = dec_params->bottom_field_order_cnt; + b->cur_pic_fields = V4L2_H264_BOTTOM_FIELD_REF; + } else { + b->cur_pic_order_count = dec_params->top_field_order_cnt; + b->cur_pic_fields = V4L2_H264_TOP_FIELD_REF; + } + + for (i = 0; i < V4L2_H264_NUM_DPB_ENTRIES; i++) { + if (!(dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE)) + continue; + + if (dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM) + b->refs[i].longterm = true; + + /* + * Handle frame_num wraparound as described in section + * '8.2.4.1 Decoding process for picture numbers' of the spec. + * For long term references, frame_num is set to + * long_term_frame_idx which requires no wrapping. + */ + if (!b->refs[i].longterm && dpb[i].frame_num > cur_frame_num) + b->refs[i].frame_num = (int)dpb[i].frame_num - + max_frame_num; + else + b->refs[i].frame_num = dpb[i].frame_num; + + b->refs[i].top_field_order_cnt = dpb[i].top_field_order_cnt; + b->refs[i].bottom_field_order_cnt = dpb[i].bottom_field_order_cnt; + + if (b->cur_pic_fields == V4L2_H264_FRAME_REF) { + u8 fields = V4L2_H264_FRAME_REF; + + b->unordered_reflist[b->num_valid].index = i; + b->unordered_reflist[b->num_valid].fields = fields; + b->num_valid++; + continue; + } + + if (dpb[i].fields & V4L2_H264_TOP_FIELD_REF) { + u8 fields = V4L2_H264_TOP_FIELD_REF; + + b->unordered_reflist[b->num_valid].index = i; + b->unordered_reflist[b->num_valid].fields = fields; + b->num_valid++; + } + + if (dpb[i].fields & V4L2_H264_BOTTOM_FIELD_REF) { + u8 fields = V4L2_H264_BOTTOM_FIELD_REF; + + b->unordered_reflist[b->num_valid].index = i; + b->unordered_reflist[b->num_valid].fields = fields; + b->num_valid++; + } + } + + for (i = b->num_valid; i < ARRAY_SIZE(b->unordered_reflist); i++) + b->unordered_reflist[i].index = i; +} +EXPORT_SYMBOL_GPL(v4l2_h264_init_reflist_builder); + +static s32 v4l2_h264_get_poc(const struct v4l2_h264_reflist_builder *b, + const struct v4l2_h264_reference *ref) +{ + switch (ref->fields) { + case V4L2_H264_FRAME_REF: + return min(b->refs[ref->index].top_field_order_cnt, + b->refs[ref->index].bottom_field_order_cnt); + case V4L2_H264_TOP_FIELD_REF: + return b->refs[ref->index].top_field_order_cnt; + case V4L2_H264_BOTTOM_FIELD_REF: + return b->refs[ref->index].bottom_field_order_cnt; + } + + /* not reached */ + return 0; +} + +static int v4l2_h264_p_ref_list_cmp(const void *ptra, const void *ptrb, + const void *data) +{ + const struct v4l2_h264_reflist_builder *builder = data; + u8 idxa, idxb; + + idxa = ((struct v4l2_h264_reference *)ptra)->index; + idxb = ((struct v4l2_h264_reference *)ptrb)->index; + + if (WARN_ON(idxa >= V4L2_H264_NUM_DPB_ENTRIES || + idxb >= V4L2_H264_NUM_DPB_ENTRIES)) + return 1; + + if (builder->refs[idxa].longterm != builder->refs[idxb].longterm) { + /* Short term pics first. */ + if (!builder->refs[idxa].longterm) + return -1; + else + return 1; + } + + /* + * For frames, short term pics are in descending pic num order and long + * term ones in ascending order. For fields, the same direction is used + * but with frame_num (wrapped). For frames, the value of pic_num and + * frame_num are the same (see formula (8-28) and (8-29)). For this + * reason we can use frame_num only and share this function between + * frames and fields reflist. + */ + if (!builder->refs[idxa].longterm) + return builder->refs[idxb].frame_num < + builder->refs[idxa].frame_num ? + -1 : 1; + + return builder->refs[idxa].frame_num < builder->refs[idxb].frame_num ? + -1 : 1; +} + +static int v4l2_h264_b0_ref_list_cmp(const void *ptra, const void *ptrb, + const void *data) +{ + const struct v4l2_h264_reflist_builder *builder = data; + s32 poca, pocb; + u8 idxa, idxb; + + idxa = ((struct v4l2_h264_reference *)ptra)->index; + idxb = ((struct v4l2_h264_reference *)ptrb)->index; + + if (WARN_ON(idxa >= V4L2_H264_NUM_DPB_ENTRIES || + idxb >= V4L2_H264_NUM_DPB_ENTRIES)) + return 1; + + if (builder->refs[idxa].longterm != builder->refs[idxb].longterm) { + /* Short term pics first. */ + if (!builder->refs[idxa].longterm) + return -1; + else + return 1; + } + + /* Long term pics in ascending frame num order. */ + if (builder->refs[idxa].longterm) + return builder->refs[idxa].frame_num < + builder->refs[idxb].frame_num ? + -1 : 1; + + poca = v4l2_h264_get_poc(builder, ptra); + pocb = v4l2_h264_get_poc(builder, ptrb); + + /* + * Short term pics with POC < cur POC first in POC descending order + * followed by short term pics with POC > cur POC in POC ascending + * order. + */ + if ((poca < builder->cur_pic_order_count) != + (pocb < builder->cur_pic_order_count)) + return poca < pocb ? -1 : 1; + else if (poca < builder->cur_pic_order_count) + return pocb < poca ? -1 : 1; + + return poca < pocb ? -1 : 1; +} + +static int v4l2_h264_b1_ref_list_cmp(const void *ptra, const void *ptrb, + const void *data) +{ + const struct v4l2_h264_reflist_builder *builder = data; + s32 poca, pocb; + u8 idxa, idxb; + + idxa = ((struct v4l2_h264_reference *)ptra)->index; + idxb = ((struct v4l2_h264_reference *)ptrb)->index; + + if (WARN_ON(idxa >= V4L2_H264_NUM_DPB_ENTRIES || + idxb >= V4L2_H264_NUM_DPB_ENTRIES)) + return 1; + + if (builder->refs[idxa].longterm != builder->refs[idxb].longterm) { + /* Short term pics first. */ + if (!builder->refs[idxa].longterm) + return -1; + else + return 1; + } + + /* Long term pics in ascending frame num order. */ + if (builder->refs[idxa].longterm) + return builder->refs[idxa].frame_num < + builder->refs[idxb].frame_num ? + -1 : 1; + + poca = v4l2_h264_get_poc(builder, ptra); + pocb = v4l2_h264_get_poc(builder, ptrb); + + /* + * Short term pics with POC > cur POC first in POC ascending order + * followed by short term pics with POC < cur POC in POC descending + * order. + */ + if ((poca < builder->cur_pic_order_count) != + (pocb < builder->cur_pic_order_count)) + return pocb < poca ? -1 : 1; + else if (poca < builder->cur_pic_order_count) + return pocb < poca ? -1 : 1; + + return poca < pocb ? -1 : 1; +} + +/* + * The references need to be reordered so that references are alternating + * between top and bottom field references starting with the current picture + * parity. This has to be done for short term and long term references + * separately. + */ +static void reorder_field_reflist(const struct v4l2_h264_reflist_builder *b, + struct v4l2_h264_reference *reflist) +{ + struct v4l2_h264_reference tmplist[V4L2_H264_REF_LIST_LEN]; + u8 lt, i = 0, j = 0, k = 0; + + memcpy(tmplist, reflist, sizeof(tmplist[0]) * b->num_valid); + + for (lt = 0; lt <= 1; lt++) { + do { + for (; i < b->num_valid && b->refs[tmplist[i].index].longterm == lt; i++) { + if (tmplist[i].fields == b->cur_pic_fields) { + reflist[k++] = tmplist[i++]; + break; + } + } + + for (; j < b->num_valid && b->refs[tmplist[j].index].longterm == lt; j++) { + if (tmplist[j].fields != b->cur_pic_fields) { + reflist[k++] = tmplist[j++]; + break; + } + } + } while ((i < b->num_valid && b->refs[tmplist[i].index].longterm == lt) || + (j < b->num_valid && b->refs[tmplist[j].index].longterm == lt)); + } +} + +static char ref_type_to_char(u8 ref_type) +{ + switch (ref_type) { + case V4L2_H264_FRAME_REF: + return 'f'; + case V4L2_H264_TOP_FIELD_REF: + return 't'; + case V4L2_H264_BOTTOM_FIELD_REF: + return 'b'; + } + + return '?'; +} + +static const char *format_ref_list_p(const struct v4l2_h264_reflist_builder *builder, + struct v4l2_h264_reference *reflist, + char **out_str) +{ + int n = 0, i; + + *out_str = kmalloc(tmp_str_size, GFP_KERNEL); + if (!(*out_str)) + return NULL; + + n += snprintf(*out_str + n, tmp_str_size - n, "|"); + + for (i = 0; i < builder->num_valid; i++) { + /* this is pic_num for frame and frame_num (wrapped) for field, + * but for frame pic_num is equal to frame_num (wrapped). + */ + int frame_num = builder->refs[reflist[i].index].frame_num; + bool longterm = builder->refs[reflist[i].index].longterm; + + n += scnprintf(*out_str + n, tmp_str_size - n, "%i%c%c|", + frame_num, longterm ? 'l' : 's', + ref_type_to_char(reflist[i].fields)); + } + + return *out_str; +} + +static void print_ref_list_p(const struct v4l2_h264_reflist_builder *builder, + struct v4l2_h264_reference *reflist) +{ + char *buf = NULL; + + pr_debug("ref_pic_list_p (cur_poc %u%c) %s\n", + builder->cur_pic_order_count, + ref_type_to_char(builder->cur_pic_fields), + format_ref_list_p(builder, reflist, &buf)); + + kfree(buf); +} + +static const char *format_ref_list_b(const struct v4l2_h264_reflist_builder *builder, + struct v4l2_h264_reference *reflist, + char **out_str) +{ + int n = 0, i; + + *out_str = kmalloc(tmp_str_size, GFP_KERNEL); + if (!(*out_str)) + return NULL; + + n += snprintf(*out_str + n, tmp_str_size - n, "|"); + + for (i = 0; i < builder->num_valid; i++) { + int frame_num = builder->refs[reflist[i].index].frame_num; + u32 poc = v4l2_h264_get_poc(builder, reflist + i); + bool longterm = builder->refs[reflist[i].index].longterm; + + n += scnprintf(*out_str + n, tmp_str_size - n, "%i%c%c|", + longterm ? frame_num : poc, + longterm ? 'l' : 's', + ref_type_to_char(reflist[i].fields)); + } + + return *out_str; +} + +static void print_ref_list_b(const struct v4l2_h264_reflist_builder *builder, + struct v4l2_h264_reference *reflist, u8 list_num) +{ + char *buf = NULL; + + pr_debug("ref_pic_list_b%u (cur_poc %u%c) %s", + list_num, builder->cur_pic_order_count, + ref_type_to_char(builder->cur_pic_fields), + format_ref_list_b(builder, reflist, &buf)); + + kfree(buf); +} + +/** + * v4l2_h264_build_p_ref_list() - Build the P reference list + * + * @builder: reference list builder context + * @reflist: 32 sized array used to store the P reference list. Each entry + * is a v4l2_h264_reference structure + * + * This functions builds the P reference lists. This procedure is describe in + * section '8.2.4 Decoding process for reference picture lists construction' + * of the H264 spec. This function can be used by H264 decoder drivers that + * need to pass a P reference list to the hardware. + */ +void +v4l2_h264_build_p_ref_list(const struct v4l2_h264_reflist_builder *builder, + struct v4l2_h264_reference *reflist) +{ + memcpy(reflist, builder->unordered_reflist, + sizeof(builder->unordered_reflist[0]) * builder->num_valid); + sort_r(reflist, builder->num_valid, sizeof(*reflist), + v4l2_h264_p_ref_list_cmp, NULL, builder); + + if (builder->cur_pic_fields != V4L2_H264_FRAME_REF) + reorder_field_reflist(builder, reflist); + + print_ref_list_p(builder, reflist); +} +EXPORT_SYMBOL_GPL(v4l2_h264_build_p_ref_list); + +/** + * v4l2_h264_build_b_ref_lists() - Build the B0/B1 reference lists + * + * @builder: reference list builder context + * @b0_reflist: 32 sized array used to store the B0 reference list. Each entry + * is a v4l2_h264_reference structure + * @b1_reflist: 32 sized array used to store the B1 reference list. Each entry + * is a v4l2_h264_reference structure + * + * This functions builds the B0/B1 reference lists. This procedure is described + * in section '8.2.4 Decoding process for reference picture lists construction' + * of the H264 spec. This function can be used by H264 decoder drivers that + * need to pass B0/B1 reference lists to the hardware. + */ +void +v4l2_h264_build_b_ref_lists(const struct v4l2_h264_reflist_builder *builder, + struct v4l2_h264_reference *b0_reflist, + struct v4l2_h264_reference *b1_reflist) +{ + memcpy(b0_reflist, builder->unordered_reflist, + sizeof(builder->unordered_reflist[0]) * builder->num_valid); + sort_r(b0_reflist, builder->num_valid, sizeof(*b0_reflist), + v4l2_h264_b0_ref_list_cmp, NULL, builder); + + memcpy(b1_reflist, builder->unordered_reflist, + sizeof(builder->unordered_reflist[0]) * builder->num_valid); + sort_r(b1_reflist, builder->num_valid, sizeof(*b1_reflist), + v4l2_h264_b1_ref_list_cmp, NULL, builder); + + if (builder->cur_pic_fields != V4L2_H264_FRAME_REF) { + reorder_field_reflist(builder, b0_reflist); + reorder_field_reflist(builder, b1_reflist); + } + + if (builder->num_valid > 1 && + !memcmp(b1_reflist, b0_reflist, builder->num_valid)) + swap(b1_reflist[0], b1_reflist[1]); + + print_ref_list_b(builder, b0_reflist, 0); + print_ref_list_b(builder, b1_reflist, 1); +} +EXPORT_SYMBOL_GPL(v4l2_h264_build_b_ref_lists); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("V4L2 H264 Helpers"); +MODULE_AUTHOR("Boris Brezillon "); diff --git a/6.18.0/drivers/media/v4l2-core/v4l2-i2c.c b/6.18.0/drivers/media/v4l2-core/v4l2-i2c.c new file mode 100644 index 0000000..ffc64e1 --- /dev/null +++ b/6.18.0/drivers/media/v4l2-core/v4l2-i2c.c @@ -0,0 +1,185 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * v4l2-i2c - I2C helpers for Video4Linux2 + */ + +#include +#include +#include +#include +#include + +void v4l2_i2c_subdev_unregister(struct v4l2_subdev *sd) +{ + struct i2c_client *client = v4l2_get_subdevdata(sd); + + /* + * We need to unregister the i2c client + * explicitly. We cannot rely on + * i2c_del_adapter to always unregister + * clients for us, since if the i2c bus is a + * platform bus, then it is never deleted. + * + * Device tree or ACPI based devices must not + * be unregistered as they have not been + * registered by us, and would not be + * re-created by just probing the V4L2 driver. + */ + if (client && !dev_fwnode(&client->dev)) + i2c_unregister_device(client); +} + +void v4l2_i2c_subdev_set_name(struct v4l2_subdev *sd, + struct i2c_client *client, + const char *devname, const char *postfix) +{ + if (!devname) + devname = client->dev.driver->name; + if (!postfix) + postfix = ""; + + snprintf(sd->name, sizeof(sd->name), "%s%s %d-%04x", devname, postfix, + i2c_adapter_id(client->adapter), client->addr); +} +EXPORT_SYMBOL_GPL(v4l2_i2c_subdev_set_name); + +void v4l2_i2c_subdev_init(struct v4l2_subdev *sd, struct i2c_client *client, + const struct v4l2_subdev_ops *ops) +{ + v4l2_subdev_init(sd, ops); + sd->flags |= V4L2_SUBDEV_FL_IS_I2C; + /* the owner is the same as the i2c_client's driver owner */ + sd->owner = client->dev.driver->owner; + sd->dev = &client->dev; + /* i2c_client and v4l2_subdev point to one another */ + v4l2_set_subdevdata(sd, client); + i2c_set_clientdata(client, sd); + v4l2_i2c_subdev_set_name(sd, client, NULL, NULL); +} +EXPORT_SYMBOL_GPL(v4l2_i2c_subdev_init); + +/* Load an i2c sub-device. */ +struct v4l2_subdev +*v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev, + struct i2c_adapter *adapter, + struct i2c_board_info *info, + const unsigned short *probe_addrs) +{ + struct v4l2_subdev *sd = NULL; + struct i2c_client *client; + + if (!v4l2_dev) + return NULL; + + request_module(I2C_MODULE_PREFIX "%s", info->type); + + /* Create the i2c client */ + if (info->addr == 0 && probe_addrs) + client = i2c_new_scanned_device(adapter, info, probe_addrs, + NULL); + else + client = i2c_new_client_device(adapter, info); + + /* + * Note: by loading the module first we are certain that c->driver + * will be set if the driver was found. If the module was not loaded + * first, then the i2c core tries to delay-load the module for us, + * and then c->driver is still NULL until the module is finally + * loaded. This delay-load mechanism doesn't work if other drivers + * want to use the i2c device, so explicitly loading the module + * is the best alternative. + */ + if (!i2c_client_has_driver(client)) + goto error; + + /* Lock the module so we can safely get the v4l2_subdev pointer */ + if (!try_module_get(client->dev.driver->owner)) + goto error; + sd = i2c_get_clientdata(client); + + /* + * Register with the v4l2_device which increases the module's + * use count as well. + */ + if (__v4l2_device_register_subdev(v4l2_dev, sd, sd->owner)) + sd = NULL; + /* Decrease the module use count to match the first try_module_get. */ + module_put(client->dev.driver->owner); + +error: + /* + * If we have a client but no subdev, then something went wrong and + * we must unregister the client. + */ + if (!IS_ERR(client) && !sd) + i2c_unregister_device(client); + return sd; +} +EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev_board); + +struct v4l2_subdev *v4l2_i2c_new_subdev(struct v4l2_device *v4l2_dev, + struct i2c_adapter *adapter, + const char *client_type, + u8 addr, + const unsigned short *probe_addrs) +{ + struct i2c_board_info info; + + /* + * Setup the i2c board info with the device type and + * the device address. + */ + memset(&info, 0, sizeof(info)); + strscpy(info.type, client_type, sizeof(info.type)); + info.addr = addr; + + return v4l2_i2c_new_subdev_board(v4l2_dev, adapter, &info, + probe_addrs); +} +EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev); + +/* Return i2c client address of v4l2_subdev. */ +unsigned short v4l2_i2c_subdev_addr(struct v4l2_subdev *sd) +{ + struct i2c_client *client = v4l2_get_subdevdata(sd); + + return client ? client->addr : I2C_CLIENT_END; +} +EXPORT_SYMBOL_GPL(v4l2_i2c_subdev_addr); + +/* + * Return a list of I2C tuner addresses to probe. Use only if the tuner + * addresses are unknown. + */ +const unsigned short *v4l2_i2c_tuner_addrs(enum v4l2_i2c_tuner_type type) +{ + static const unsigned short radio_addrs[] = { +#if IS_ENABLED(CONFIG_MEDIA_TUNER_TEA5761) + 0x10, +#endif + 0x60, + I2C_CLIENT_END + }; + static const unsigned short demod_addrs[] = { + 0x42, 0x43, 0x4a, 0x4b, + I2C_CLIENT_END + }; + static const unsigned short tv_addrs[] = { + 0x42, 0x43, 0x4a, 0x4b, /* tda8290 */ + 0x60, 0x61, 0x62, 0x63, 0x64, + I2C_CLIENT_END + }; + + switch (type) { + case ADDRS_RADIO: + return radio_addrs; + case ADDRS_DEMOD: + return demod_addrs; + case ADDRS_TV: + return tv_addrs; + case ADDRS_TV_WITH_DEMOD: + return tv_addrs + 4; + } + return NULL; +} +EXPORT_SYMBOL_GPL(v4l2_i2c_tuner_addrs); diff --git a/6.18.0/drivers/media/v4l2-core/v4l2-ioctl.c b/6.18.0/drivers/media/v4l2-core/v4l2-ioctl.c new file mode 100644 index 0000000..01cf52c --- /dev/null +++ b/6.18.0/drivers/media/v4l2-core/v4l2-ioctl.c @@ -0,0 +1,3530 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Video capture interface for Linux version 2 + * + * A generic framework to process V4L2 ioctl commands. + * + * Authors: Alan Cox, (version 1) + * Mauro Carvalho Chehab (version 2) + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include /* for media_set_bus_info() */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define is_valid_ioctl(vfd, cmd) test_bit(_IOC_NR(cmd), (vfd)->valid_ioctls) + +struct std_descr { + v4l2_std_id std; + const char *descr; +}; + +static const struct std_descr standards[] = { + { V4L2_STD_NTSC, "NTSC" }, + { V4L2_STD_NTSC_M, "NTSC-M" }, + { V4L2_STD_NTSC_M_JP, "NTSC-M-JP" }, + { V4L2_STD_NTSC_M_KR, "NTSC-M-KR" }, + { V4L2_STD_NTSC_443, "NTSC-443" }, + { V4L2_STD_PAL, "PAL" }, + { V4L2_STD_PAL_BG, "PAL-BG" }, + { V4L2_STD_PAL_B, "PAL-B" }, + { V4L2_STD_PAL_B1, "PAL-B1" }, + { V4L2_STD_PAL_G, "PAL-G" }, + { V4L2_STD_PAL_H, "PAL-H" }, + { V4L2_STD_PAL_I, "PAL-I" }, + { V4L2_STD_PAL_DK, "PAL-DK" }, + { V4L2_STD_PAL_D, "PAL-D" }, + { V4L2_STD_PAL_D1, "PAL-D1" }, + { V4L2_STD_PAL_K, "PAL-K" }, + { V4L2_STD_PAL_M, "PAL-M" }, + { V4L2_STD_PAL_N, "PAL-N" }, + { V4L2_STD_PAL_Nc, "PAL-Nc" }, + { V4L2_STD_PAL_60, "PAL-60" }, + { V4L2_STD_SECAM, "SECAM" }, + { V4L2_STD_SECAM_B, "SECAM-B" }, + { V4L2_STD_SECAM_G, "SECAM-G" }, + { V4L2_STD_SECAM_H, "SECAM-H" }, + { V4L2_STD_SECAM_DK, "SECAM-DK" }, + { V4L2_STD_SECAM_D, "SECAM-D" }, + { V4L2_STD_SECAM_K, "SECAM-K" }, + { V4L2_STD_SECAM_K1, "SECAM-K1" }, + { V4L2_STD_SECAM_L, "SECAM-L" }, + { V4L2_STD_SECAM_LC, "SECAM-Lc" }, + { 0, "Unknown" } +}; + +/* video4linux standard ID conversion to standard name + */ +const char *v4l2_norm_to_name(v4l2_std_id id) +{ + u32 myid = id; + int i; + + /* HACK: ppc32 architecture doesn't have __ucmpdi2 function to handle + 64 bit comparisons. So, on that architecture, with some gcc + variants, compilation fails. Currently, the max value is 30bit wide. + */ + BUG_ON(myid != id); + + for (i = 0; standards[i].std; i++) + if (myid == standards[i].std) + break; + return standards[i].descr; +} +EXPORT_SYMBOL(v4l2_norm_to_name); + +/* Returns frame period for the given standard */ +void v4l2_video_std_frame_period(int id, struct v4l2_fract *frameperiod) +{ + if (id & V4L2_STD_525_60) { + frameperiod->numerator = 1001; + frameperiod->denominator = 30000; + } else { + frameperiod->numerator = 1; + frameperiod->denominator = 25; + } +} +EXPORT_SYMBOL(v4l2_video_std_frame_period); + +/* Fill in the fields of a v4l2_standard structure according to the + 'id' and 'transmission' parameters. Returns negative on error. */ +int v4l2_video_std_construct(struct v4l2_standard *vs, + int id, const char *name) +{ + vs->id = id; + v4l2_video_std_frame_period(id, &vs->frameperiod); + vs->framelines = (id & V4L2_STD_525_60) ? 525 : 625; + strscpy(vs->name, name, sizeof(vs->name)); + return 0; +} +EXPORT_SYMBOL(v4l2_video_std_construct); + +/* Fill in the fields of a v4l2_standard structure according to the + * 'id' and 'vs->index' parameters. Returns negative on error. */ +int v4l_video_std_enumstd(struct v4l2_standard *vs, v4l2_std_id id) +{ + v4l2_std_id curr_id = 0; + unsigned int index = vs->index, i, j = 0; + const char *descr = ""; + + /* Return -ENODATA if the id for the current input + or output is 0, meaning that it doesn't support this API. */ + if (id == 0) + return -ENODATA; + + /* Return norm array in a canonical way */ + for (i = 0; i <= index && id; i++) { + /* last std value in the standards array is 0, so this + while always ends there since (id & 0) == 0. */ + while ((id & standards[j].std) != standards[j].std) + j++; + curr_id = standards[j].std; + descr = standards[j].descr; + j++; + if (curr_id == 0) + break; + if (curr_id != V4L2_STD_PAL && + curr_id != V4L2_STD_SECAM && + curr_id != V4L2_STD_NTSC) + id &= ~curr_id; + } + if (i <= index) + return -EINVAL; + + v4l2_video_std_construct(vs, curr_id, descr); + return 0; +} + +/* ----------------------------------------------------------------- */ +/* some arrays for pretty-printing debug messages of enum types */ + +const char *v4l2_field_names[] = { + [V4L2_FIELD_ANY] = "any", + [V4L2_FIELD_NONE] = "none", + [V4L2_FIELD_TOP] = "top", + [V4L2_FIELD_BOTTOM] = "bottom", + [V4L2_FIELD_INTERLACED] = "interlaced", + [V4L2_FIELD_SEQ_TB] = "seq-tb", + [V4L2_FIELD_SEQ_BT] = "seq-bt", + [V4L2_FIELD_ALTERNATE] = "alternate", + [V4L2_FIELD_INTERLACED_TB] = "interlaced-tb", + [V4L2_FIELD_INTERLACED_BT] = "interlaced-bt", +}; +EXPORT_SYMBOL(v4l2_field_names); + +const char *v4l2_type_names[] = { + [0] = "0", + [V4L2_BUF_TYPE_VIDEO_CAPTURE] = "vid-cap", + [V4L2_BUF_TYPE_VIDEO_OVERLAY] = "vid-overlay", + [V4L2_BUF_TYPE_VIDEO_OUTPUT] = "vid-out", + [V4L2_BUF_TYPE_VBI_CAPTURE] = "vbi-cap", + [V4L2_BUF_TYPE_VBI_OUTPUT] = "vbi-out", + [V4L2_BUF_TYPE_SLICED_VBI_CAPTURE] = "sliced-vbi-cap", + [V4L2_BUF_TYPE_SLICED_VBI_OUTPUT] = "sliced-vbi-out", + [V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY] = "vid-out-overlay", + [V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE] = "vid-cap-mplane", + [V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE] = "vid-out-mplane", + [V4L2_BUF_TYPE_SDR_CAPTURE] = "sdr-cap", + [V4L2_BUF_TYPE_SDR_OUTPUT] = "sdr-out", + [V4L2_BUF_TYPE_META_CAPTURE] = "meta-cap", + [V4L2_BUF_TYPE_META_OUTPUT] = "meta-out", +}; +EXPORT_SYMBOL(v4l2_type_names); + +static const char *v4l2_memory_names[] = { + [V4L2_MEMORY_MMAP] = "mmap", + [V4L2_MEMORY_USERPTR] = "userptr", + [V4L2_MEMORY_OVERLAY] = "overlay", + [V4L2_MEMORY_DMABUF] = "dmabuf", +}; + +#define prt_names(a, arr) (((unsigned)(a)) < ARRAY_SIZE(arr) ? arr[a] : "unknown") + +/* ------------------------------------------------------------------ */ +/* debug help functions */ + +static void v4l_print_querycap(const void *arg, bool write_only) +{ + const struct v4l2_capability *p = arg; + + pr_cont("driver=%.*s, card=%.*s, bus=%.*s, version=0x%08x, capabilities=0x%08x, device_caps=0x%08x\n", + (int)sizeof(p->driver), p->driver, + (int)sizeof(p->card), p->card, + (int)sizeof(p->bus_info), p->bus_info, + p->version, p->capabilities, p->device_caps); +} + +static void v4l_print_enuminput(const void *arg, bool write_only) +{ + const struct v4l2_input *p = arg; + + pr_cont("index=%u, name=%.*s, type=%u, audioset=0x%x, tuner=%u, std=0x%08Lx, status=0x%x, capabilities=0x%x\n", + p->index, (int)sizeof(p->name), p->name, p->type, p->audioset, + p->tuner, (unsigned long long)p->std, p->status, + p->capabilities); +} + +static void v4l_print_enumoutput(const void *arg, bool write_only) +{ + const struct v4l2_output *p = arg; + + pr_cont("index=%u, name=%.*s, type=%u, audioset=0x%x, modulator=%u, std=0x%08Lx, capabilities=0x%x\n", + p->index, (int)sizeof(p->name), p->name, p->type, p->audioset, + p->modulator, (unsigned long long)p->std, p->capabilities); +} + +static void v4l_print_audio(const void *arg, bool write_only) +{ + const struct v4l2_audio *p = arg; + + if (write_only) + pr_cont("index=%u, mode=0x%x\n", p->index, p->mode); + else + pr_cont("index=%u, name=%.*s, capability=0x%x, mode=0x%x\n", + p->index, (int)sizeof(p->name), p->name, + p->capability, p->mode); +} + +static void v4l_print_audioout(const void *arg, bool write_only) +{ + const struct v4l2_audioout *p = arg; + + if (write_only) + pr_cont("index=%u\n", p->index); + else + pr_cont("index=%u, name=%.*s, capability=0x%x, mode=0x%x\n", + p->index, (int)sizeof(p->name), p->name, + p->capability, p->mode); +} + +static void v4l_print_fmtdesc(const void *arg, bool write_only) +{ + const struct v4l2_fmtdesc *p = arg; + + pr_cont("index=%u, type=%s, flags=0x%x, pixelformat=%p4cc, mbus_code=0x%04x, description='%.*s'\n", + p->index, prt_names(p->type, v4l2_type_names), + p->flags, &p->pixelformat, p->mbus_code, + (int)sizeof(p->description), p->description); +} + +static void v4l_print_format(const void *arg, bool write_only) +{ + const struct v4l2_format *p = arg; + const struct v4l2_pix_format *pix; + const struct v4l2_pix_format_mplane *mp; + const struct v4l2_vbi_format *vbi; + const struct v4l2_sliced_vbi_format *sliced; + const struct v4l2_window *win; + const struct v4l2_meta_format *meta; + u32 pixelformat; + u32 planes; + unsigned i; + + pr_cont("type=%s", prt_names(p->type, v4l2_type_names)); + switch (p->type) { + case V4L2_BUF_TYPE_VIDEO_CAPTURE: + case V4L2_BUF_TYPE_VIDEO_OUTPUT: + pix = &p->fmt.pix; + pr_cont(", width=%u, height=%u, pixelformat=%p4cc, field=%s, bytesperline=%u, sizeimage=%u, colorspace=%d, flags=0x%x, ycbcr_enc=%u, quantization=%u, xfer_func=%u\n", + pix->width, pix->height, &pix->pixelformat, + prt_names(pix->field, v4l2_field_names), + pix->bytesperline, pix->sizeimage, + pix->colorspace, pix->flags, pix->ycbcr_enc, + pix->quantization, pix->xfer_func); + break; + case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: + case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: + mp = &p->fmt.pix_mp; + pixelformat = mp->pixelformat; + pr_cont(", width=%u, height=%u, format=%p4cc, field=%s, colorspace=%d, num_planes=%u, flags=0x%x, ycbcr_enc=%u, quantization=%u, xfer_func=%u\n", + mp->width, mp->height, &pixelformat, + prt_names(mp->field, v4l2_field_names), + mp->colorspace, mp->num_planes, mp->flags, + mp->ycbcr_enc, mp->quantization, mp->xfer_func); + planes = min_t(u32, mp->num_planes, VIDEO_MAX_PLANES); + for (i = 0; i < planes; i++) + printk(KERN_DEBUG "plane %u: bytesperline=%u sizeimage=%u\n", i, + mp->plane_fmt[i].bytesperline, + mp->plane_fmt[i].sizeimage); + break; + case V4L2_BUF_TYPE_VIDEO_OVERLAY: + case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: + win = &p->fmt.win; + pr_cont(", (%d,%d)/%ux%u, field=%s, chromakey=0x%08x, global_alpha=0x%02x\n", + win->w.left, win->w.top, win->w.width, win->w.height, + prt_names(win->field, v4l2_field_names), + win->chromakey, win->global_alpha); + break; + case V4L2_BUF_TYPE_VBI_CAPTURE: + case V4L2_BUF_TYPE_VBI_OUTPUT: + vbi = &p->fmt.vbi; + pr_cont(", sampling_rate=%u, offset=%u, samples_per_line=%u, sample_format=%p4cc, start=%u,%u, count=%u,%u\n", + vbi->sampling_rate, vbi->offset, + vbi->samples_per_line, &vbi->sample_format, + vbi->start[0], vbi->start[1], + vbi->count[0], vbi->count[1]); + break; + case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: + case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: + sliced = &p->fmt.sliced; + pr_cont(", service_set=0x%08x, io_size=%d\n", + sliced->service_set, sliced->io_size); + for (i = 0; i < 24; i++) + printk(KERN_DEBUG "line[%02u]=0x%04x, 0x%04x\n", i, + sliced->service_lines[0][i], + sliced->service_lines[1][i]); + break; + case V4L2_BUF_TYPE_SDR_CAPTURE: + case V4L2_BUF_TYPE_SDR_OUTPUT: + pixelformat = p->fmt.sdr.pixelformat; + pr_cont(", pixelformat=%p4cc\n", &pixelformat); + break; + case V4L2_BUF_TYPE_META_CAPTURE: + case V4L2_BUF_TYPE_META_OUTPUT: + meta = &p->fmt.meta; + pixelformat = meta->dataformat; + pr_cont(", dataformat=%p4cc, buffersize=%u, width=%u, height=%u, bytesperline=%u\n", + &pixelformat, meta->buffersize, meta->width, + meta->height, meta->bytesperline); + break; + } +} + +static void v4l_print_framebuffer(const void *arg, bool write_only) +{ + const struct v4l2_framebuffer *p = arg; + + pr_cont("capability=0x%x, flags=0x%x, base=0x%p, width=%u, height=%u, pixelformat=%p4cc, bytesperline=%u, sizeimage=%u, colorspace=%d\n", + p->capability, p->flags, p->base, p->fmt.width, p->fmt.height, + &p->fmt.pixelformat, p->fmt.bytesperline, p->fmt.sizeimage, + p->fmt.colorspace); +} + +static void v4l_print_buftype(const void *arg, bool write_only) +{ + pr_cont("type=%s\n", prt_names(*(u32 *)arg, v4l2_type_names)); +} + +static void v4l_print_modulator(const void *arg, bool write_only) +{ + const struct v4l2_modulator *p = arg; + + if (write_only) + pr_cont("index=%u, txsubchans=0x%x\n", p->index, p->txsubchans); + else + pr_cont("index=%u, name=%.*s, capability=0x%x, rangelow=%u, rangehigh=%u, txsubchans=0x%x\n", + p->index, (int)sizeof(p->name), p->name, p->capability, + p->rangelow, p->rangehigh, p->txsubchans); +} + +static void v4l_print_tuner(const void *arg, bool write_only) +{ + const struct v4l2_tuner *p = arg; + + if (write_only) + pr_cont("index=%u, audmode=%u\n", p->index, p->audmode); + else + pr_cont("index=%u, name=%.*s, type=%u, capability=0x%x, rangelow=%u, rangehigh=%u, signal=%u, afc=%d, rxsubchans=0x%x, audmode=%u\n", + p->index, (int)sizeof(p->name), p->name, p->type, + p->capability, p->rangelow, + p->rangehigh, p->signal, p->afc, + p->rxsubchans, p->audmode); +} + +static void v4l_print_frequency(const void *arg, bool write_only) +{ + const struct v4l2_frequency *p = arg; + + pr_cont("tuner=%u, type=%u, frequency=%u\n", + p->tuner, p->type, p->frequency); +} + +static void v4l_print_standard(const void *arg, bool write_only) +{ + const struct v4l2_standard *p = arg; + + pr_cont("index=%u, id=0x%Lx, name=%.*s, fps=%u/%u, framelines=%u\n", + p->index, + (unsigned long long)p->id, (int)sizeof(p->name), p->name, + p->frameperiod.numerator, + p->frameperiod.denominator, + p->framelines); +} + +static void v4l_print_std(const void *arg, bool write_only) +{ + pr_cont("std=0x%08Lx\n", *(const long long unsigned *)arg); +} + +static void v4l_print_hw_freq_seek(const void *arg, bool write_only) +{ + const struct v4l2_hw_freq_seek *p = arg; + + pr_cont("tuner=%u, type=%u, seek_upward=%u, wrap_around=%u, spacing=%u, rangelow=%u, rangehigh=%u\n", + p->tuner, p->type, p->seek_upward, p->wrap_around, p->spacing, + p->rangelow, p->rangehigh); +} + +static void v4l_print_requestbuffers(const void *arg, bool write_only) +{ + const struct v4l2_requestbuffers *p = arg; + + pr_cont("count=%d, type=%s, memory=%s\n", + p->count, + prt_names(p->type, v4l2_type_names), + prt_names(p->memory, v4l2_memory_names)); +} + +static void v4l_print_buffer(const void *arg, bool write_only) +{ + const struct v4l2_buffer *p = arg; + const struct v4l2_timecode *tc = &p->timecode; + const struct v4l2_plane *plane; + int i; + + pr_cont("%02d:%02d:%02d.%06ld index=%d, type=%s, request_fd=%d, flags=0x%08x, field=%s, sequence=%d, memory=%s", + (int)p->timestamp.tv_sec / 3600, + ((int)p->timestamp.tv_sec / 60) % 60, + ((int)p->timestamp.tv_sec % 60), + (long)p->timestamp.tv_usec, + p->index, + prt_names(p->type, v4l2_type_names), p->request_fd, + p->flags, prt_names(p->field, v4l2_field_names), + p->sequence, prt_names(p->memory, v4l2_memory_names)); + + if (V4L2_TYPE_IS_MULTIPLANAR(p->type) && p->m.planes) { + pr_cont("\n"); + for (i = 0; i < p->length; ++i) { + plane = &p->m.planes[i]; + printk(KERN_DEBUG + "plane %d: bytesused=%d, data_offset=0x%08x, offset/userptr=0x%lx, length=%d\n", + i, plane->bytesused, plane->data_offset, + plane->m.userptr, plane->length); + } + } else { + pr_cont(", bytesused=%d, offset/userptr=0x%lx, length=%d\n", + p->bytesused, p->m.userptr, p->length); + } + + printk(KERN_DEBUG "timecode=%02d:%02d:%02d type=%d, flags=0x%08x, frames=%d, userbits=0x%08x\n", + tc->hours, tc->minutes, tc->seconds, + tc->type, tc->flags, tc->frames, *(__u32 *)tc->userbits); +} + +static void v4l_print_exportbuffer(const void *arg, bool write_only) +{ + const struct v4l2_exportbuffer *p = arg; + + pr_cont("fd=%d, type=%s, index=%u, plane=%u, flags=0x%08x\n", + p->fd, prt_names(p->type, v4l2_type_names), + p->index, p->plane, p->flags); +} + +static void v4l_print_create_buffers(const void *arg, bool write_only) +{ + const struct v4l2_create_buffers *p = arg; + + pr_cont("index=%d, count=%d, memory=%s, capabilities=0x%08x, max num buffers=%u, ", + p->index, p->count, prt_names(p->memory, v4l2_memory_names), + p->capabilities, p->max_num_buffers); + v4l_print_format(&p->format, write_only); +} + +static void v4l_print_remove_buffers(const void *arg, bool write_only) +{ + const struct v4l2_remove_buffers *p = arg; + + pr_cont("type=%s, index=%u, count=%u\n", + prt_names(p->type, v4l2_type_names), p->index, p->count); +} + +static void v4l_print_streamparm(const void *arg, bool write_only) +{ + const struct v4l2_streamparm *p = arg; + + pr_cont("type=%s", prt_names(p->type, v4l2_type_names)); + + if (p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE || + p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { + const struct v4l2_captureparm *c = &p->parm.capture; + + pr_cont(", capability=0x%x, capturemode=0x%x, timeperframe=%d/%d, extendedmode=%d, readbuffers=%d\n", + c->capability, c->capturemode, + c->timeperframe.numerator, c->timeperframe.denominator, + c->extendedmode, c->readbuffers); + } else if (p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT || + p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { + const struct v4l2_outputparm *c = &p->parm.output; + + pr_cont(", capability=0x%x, outputmode=0x%x, timeperframe=%d/%d, extendedmode=%d, writebuffers=%d\n", + c->capability, c->outputmode, + c->timeperframe.numerator, c->timeperframe.denominator, + c->extendedmode, c->writebuffers); + } else { + pr_cont("\n"); + } +} + +static void v4l_print_queryctrl(const void *arg, bool write_only) +{ + const struct v4l2_queryctrl *p = arg; + + pr_cont("id=0x%x, type=%d, name=%.*s, min/max=%d/%d, step=%d, default=%d, flags=0x%08x\n", + p->id, p->type, (int)sizeof(p->name), p->name, + p->minimum, p->maximum, + p->step, p->default_value, p->flags); +} + +static void v4l_print_query_ext_ctrl(const void *arg, bool write_only) +{ + const struct v4l2_query_ext_ctrl *p = arg; + + pr_cont("id=0x%x, type=%d, name=%.*s, min/max=%lld/%lld, step=%lld, default=%lld, flags=0x%08x, elem_size=%u, elems=%u, nr_of_dims=%u, dims=%u,%u,%u,%u\n", + p->id, p->type, (int)sizeof(p->name), p->name, + p->minimum, p->maximum, + p->step, p->default_value, p->flags, + p->elem_size, p->elems, p->nr_of_dims, + p->dims[0], p->dims[1], p->dims[2], p->dims[3]); +} + +static void v4l_print_querymenu(const void *arg, bool write_only) +{ + const struct v4l2_querymenu *p = arg; + + pr_cont("id=0x%x, index=%d\n", p->id, p->index); +} + +static void v4l_print_control(const void *arg, bool write_only) +{ + const struct v4l2_control *p = arg; + const char *name = v4l2_ctrl_get_name(p->id); + + if (name) + pr_cont("name=%s, ", name); + pr_cont("id=0x%x, value=%d\n", p->id, p->value); +} + +static void v4l_print_ext_controls(const void *arg, bool write_only) +{ + const struct v4l2_ext_controls *p = arg; + int i; + + pr_cont("which=0x%x, count=%d, error_idx=%d, request_fd=%d", + p->which, p->count, p->error_idx, p->request_fd); + for (i = 0; i < p->count; i++) { + unsigned int id = p->controls[i].id; + const char *name = v4l2_ctrl_get_name(id); + + if (name) + pr_cont(", name=%s", name); + if (!p->controls[i].size) + pr_cont(", id/val=0x%x/0x%x", id, p->controls[i].value); + else + pr_cont(", id/size=0x%x/%u", id, p->controls[i].size); + } + pr_cont("\n"); +} + +static void v4l_print_cropcap(const void *arg, bool write_only) +{ + const struct v4l2_cropcap *p = arg; + + pr_cont("type=%s, bounds (%d,%d)/%ux%u, defrect (%d,%d)/%ux%u, pixelaspect %d/%d\n", + prt_names(p->type, v4l2_type_names), + p->bounds.left, p->bounds.top, + p->bounds.width, p->bounds.height, + p->defrect.left, p->defrect.top, + p->defrect.width, p->defrect.height, + p->pixelaspect.numerator, p->pixelaspect.denominator); +} + +static void v4l_print_crop(const void *arg, bool write_only) +{ + const struct v4l2_crop *p = arg; + + pr_cont("type=%s, crop=(%d,%d)/%ux%u\n", + prt_names(p->type, v4l2_type_names), + p->c.left, p->c.top, + p->c.width, p->c.height); +} + +static void v4l_print_selection(const void *arg, bool write_only) +{ + const struct v4l2_selection *p = arg; + + pr_cont("type=%s, target=%d, flags=0x%x, rect=(%d,%d)/%ux%u\n", + prt_names(p->type, v4l2_type_names), + p->target, p->flags, + p->r.left, p->r.top, p->r.width, p->r.height); +} + +static void v4l_print_jpegcompression(const void *arg, bool write_only) +{ + const struct v4l2_jpegcompression *p = arg; + + pr_cont("quality=%d, APPn=%d, APP_len=%d, COM_len=%d, jpeg_markers=0x%x\n", + p->quality, p->APPn, p->APP_len, + p->COM_len, p->jpeg_markers); +} + +static void v4l_print_enc_idx(const void *arg, bool write_only) +{ + const struct v4l2_enc_idx *p = arg; + + pr_cont("entries=%d, entries_cap=%d\n", + p->entries, p->entries_cap); +} + +static void v4l_print_encoder_cmd(const void *arg, bool write_only) +{ + const struct v4l2_encoder_cmd *p = arg; + + pr_cont("cmd=%d, flags=0x%x\n", + p->cmd, p->flags); +} + +static void v4l_print_decoder_cmd(const void *arg, bool write_only) +{ + const struct v4l2_decoder_cmd *p = arg; + + pr_cont("cmd=%d, flags=0x%x\n", p->cmd, p->flags); + + if (p->cmd == V4L2_DEC_CMD_START) + pr_info("speed=%d, format=%u\n", + p->start.speed, p->start.format); + else if (p->cmd == V4L2_DEC_CMD_STOP) + pr_info("pts=%llu\n", p->stop.pts); +} + +static void v4l_print_dbg_chip_info(const void *arg, bool write_only) +{ + const struct v4l2_dbg_chip_info *p = arg; + + pr_cont("type=%u, ", p->match.type); + if (p->match.type == V4L2_CHIP_MATCH_I2C_DRIVER) + pr_cont("name=%.*s, ", + (int)sizeof(p->match.name), p->match.name); + else + pr_cont("addr=%u, ", p->match.addr); + pr_cont("name=%.*s\n", (int)sizeof(p->name), p->name); +} + +static void v4l_print_dbg_register(const void *arg, bool write_only) +{ + const struct v4l2_dbg_register *p = arg; + + pr_cont("type=%u, ", p->match.type); + if (p->match.type == V4L2_CHIP_MATCH_I2C_DRIVER) + pr_cont("name=%.*s, ", + (int)sizeof(p->match.name), p->match.name); + else + pr_cont("addr=%u, ", p->match.addr); + pr_cont("reg=0x%llx, val=0x%llx\n", + p->reg, p->val); +} + +static void v4l_print_dv_timings(const void *arg, bool write_only) +{ + const struct v4l2_dv_timings *p = arg; + + switch (p->type) { + case V4L2_DV_BT_656_1120: + pr_cont("type=bt-656/1120, interlaced=%u, pixelclock=%llu, width=%u, height=%u, polarities=0x%x, hfrontporch=%u, hsync=%u, hbackporch=%u, vfrontporch=%u, vsync=%u, vbackporch=%u, il_vfrontporch=%u, il_vsync=%u, il_vbackporch=%u, standards=0x%x, flags=0x%x\n", + p->bt.interlaced, p->bt.pixelclock, + p->bt.width, p->bt.height, + p->bt.polarities, p->bt.hfrontporch, + p->bt.hsync, p->bt.hbackporch, + p->bt.vfrontporch, p->bt.vsync, + p->bt.vbackporch, p->bt.il_vfrontporch, + p->bt.il_vsync, p->bt.il_vbackporch, + p->bt.standards, p->bt.flags); + break; + default: + pr_cont("type=%d\n", p->type); + break; + } +} + +static void v4l_print_enum_dv_timings(const void *arg, bool write_only) +{ + const struct v4l2_enum_dv_timings *p = arg; + + pr_cont("index=%u, ", p->index); + v4l_print_dv_timings(&p->timings, write_only); +} + +static void v4l_print_dv_timings_cap(const void *arg, bool write_only) +{ + const struct v4l2_dv_timings_cap *p = arg; + + switch (p->type) { + case V4L2_DV_BT_656_1120: + pr_cont("type=bt-656/1120, width=%u-%u, height=%u-%u, pixelclock=%llu-%llu, standards=0x%x, capabilities=0x%x\n", + p->bt.min_width, p->bt.max_width, + p->bt.min_height, p->bt.max_height, + p->bt.min_pixelclock, p->bt.max_pixelclock, + p->bt.standards, p->bt.capabilities); + break; + default: + pr_cont("type=%u\n", p->type); + break; + } +} + +static void v4l_print_frmsizeenum(const void *arg, bool write_only) +{ + const struct v4l2_frmsizeenum *p = arg; + + pr_cont("index=%u, pixelformat=%p4cc, type=%u", + p->index, &p->pixel_format, p->type); + switch (p->type) { + case V4L2_FRMSIZE_TYPE_DISCRETE: + pr_cont(", wxh=%ux%u\n", + p->discrete.width, p->discrete.height); + break; + case V4L2_FRMSIZE_TYPE_STEPWISE: + pr_cont(", min=%ux%u, max=%ux%u, step=%ux%u\n", + p->stepwise.min_width, + p->stepwise.min_height, + p->stepwise.max_width, + p->stepwise.max_height, + p->stepwise.step_width, + p->stepwise.step_height); + break; + case V4L2_FRMSIZE_TYPE_CONTINUOUS: + default: + pr_cont("\n"); + break; + } +} + +static void v4l_print_frmivalenum(const void *arg, bool write_only) +{ + const struct v4l2_frmivalenum *p = arg; + + pr_cont("index=%u, pixelformat=%p4cc, wxh=%ux%u, type=%u", + p->index, &p->pixel_format, p->width, p->height, p->type); + switch (p->type) { + case V4L2_FRMIVAL_TYPE_DISCRETE: + pr_cont(", fps=%d/%d\n", + p->discrete.numerator, + p->discrete.denominator); + break; + case V4L2_FRMIVAL_TYPE_STEPWISE: + pr_cont(", min=%d/%d, max=%d/%d, step=%d/%d\n", + p->stepwise.min.numerator, + p->stepwise.min.denominator, + p->stepwise.max.numerator, + p->stepwise.max.denominator, + p->stepwise.step.numerator, + p->stepwise.step.denominator); + break; + case V4L2_FRMIVAL_TYPE_CONTINUOUS: + default: + pr_cont("\n"); + break; + } +} + +static void v4l_print_event(const void *arg, bool write_only) +{ + const struct v4l2_event *p = arg; + const struct v4l2_event_ctrl *c; + + pr_cont("type=0x%x, pending=%u, sequence=%u, id=%u, timestamp=%llu.%9.9llu\n", + p->type, p->pending, p->sequence, p->id, + p->timestamp.tv_sec, p->timestamp.tv_nsec); + switch (p->type) { + case V4L2_EVENT_VSYNC: + printk(KERN_DEBUG "field=%s\n", + prt_names(p->u.vsync.field, v4l2_field_names)); + break; + case V4L2_EVENT_CTRL: + c = &p->u.ctrl; + printk(KERN_DEBUG "changes=0x%x, type=%u, ", + c->changes, c->type); + if (c->type == V4L2_CTRL_TYPE_INTEGER64) + pr_cont("value64=%lld, ", c->value64); + else + pr_cont("value=%d, ", c->value); + pr_cont("flags=0x%x, minimum=%d, maximum=%d, step=%d, default_value=%d\n", + c->flags, c->minimum, c->maximum, + c->step, c->default_value); + break; + case V4L2_EVENT_FRAME_SYNC: + pr_cont("frame_sequence=%u\n", + p->u.frame_sync.frame_sequence); + break; + } +} + +static void v4l_print_event_subscription(const void *arg, bool write_only) +{ + const struct v4l2_event_subscription *p = arg; + + pr_cont("type=0x%x, id=0x%x, flags=0x%x\n", + p->type, p->id, p->flags); +} + +static void v4l_print_sliced_vbi_cap(const void *arg, bool write_only) +{ + const struct v4l2_sliced_vbi_cap *p = arg; + int i; + + pr_cont("type=%s, service_set=0x%08x\n", + prt_names(p->type, v4l2_type_names), p->service_set); + for (i = 0; i < 24; i++) + printk(KERN_DEBUG "line[%02u]=0x%04x, 0x%04x\n", i, + p->service_lines[0][i], + p->service_lines[1][i]); +} + +static void v4l_print_freq_band(const void *arg, bool write_only) +{ + const struct v4l2_frequency_band *p = arg; + + pr_cont("tuner=%u, type=%u, index=%u, capability=0x%x, rangelow=%u, rangehigh=%u, modulation=0x%x\n", + p->tuner, p->type, p->index, + p->capability, p->rangelow, + p->rangehigh, p->modulation); +} + +static void v4l_print_edid(const void *arg, bool write_only) +{ + const struct v4l2_edid *p = arg; + + pr_cont("pad=%u, start_block=%u, blocks=%u\n", + p->pad, p->start_block, p->blocks); +} + +static void v4l_print_u32(const void *arg, bool write_only) +{ + pr_cont("value=%u\n", *(const u32 *)arg); +} + +static void v4l_print_newline(const void *arg, bool write_only) +{ + pr_cont("\n"); +} + +static void v4l_print_default(const void *arg, bool write_only) +{ + pr_cont("driver-specific ioctl\n"); +} + +static bool check_ext_ctrls(struct v4l2_ext_controls *c, unsigned long ioctl) +{ + __u32 i; + + /* zero the reserved fields */ + c->reserved[0] = 0; + for (i = 0; i < c->count; i++) + c->controls[i].reserved2[0] = 0; + + switch (c->which) { + case V4L2_CID_PRIVATE_BASE: + /* + * V4L2_CID_PRIVATE_BASE cannot be used as control class + * when using extended controls. + * Only when passed in through VIDIOC_G_CTRL and VIDIOC_S_CTRL + * is it allowed for backwards compatibility. + */ + if (ioctl == VIDIOC_G_CTRL || ioctl == VIDIOC_S_CTRL) + return false; + break; + case V4L2_CTRL_WHICH_DEF_VAL: + case V4L2_CTRL_WHICH_MIN_VAL: + case V4L2_CTRL_WHICH_MAX_VAL: + /* Default, minimum or maximum value cannot be changed */ + if (ioctl == VIDIOC_S_EXT_CTRLS || + ioctl == VIDIOC_TRY_EXT_CTRLS) { + c->error_idx = c->count; + return false; + } + return true; + case V4L2_CTRL_WHICH_CUR_VAL: + return true; + case V4L2_CTRL_WHICH_REQUEST_VAL: + c->error_idx = c->count; + return false; + } + + /* Check that all controls are from the same control class. */ + for (i = 0; i < c->count; i++) { + if (V4L2_CTRL_ID2WHICH(c->controls[i].id) != c->which) { + c->error_idx = ioctl == VIDIOC_TRY_EXT_CTRLS ? i : + c->count; + return false; + } + } + return true; +} + +static int check_fmt(struct file *file, enum v4l2_buf_type type) +{ + const u32 vid_caps = V4L2_CAP_VIDEO_CAPTURE | + V4L2_CAP_VIDEO_CAPTURE_MPLANE | + V4L2_CAP_VIDEO_OUTPUT | + V4L2_CAP_VIDEO_OUTPUT_MPLANE | + V4L2_CAP_VIDEO_M2M | V4L2_CAP_VIDEO_M2M_MPLANE; + const u32 meta_caps = V4L2_CAP_META_CAPTURE | + V4L2_CAP_META_OUTPUT; + struct video_device *vfd = video_devdata(file); + const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops; + bool is_vid = vfd->vfl_type == VFL_TYPE_VIDEO && + (vfd->device_caps & vid_caps); + bool is_vbi = vfd->vfl_type == VFL_TYPE_VBI; + bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR; + bool is_tch = vfd->vfl_type == VFL_TYPE_TOUCH; + bool is_meta = vfd->vfl_type == VFL_TYPE_VIDEO && + (vfd->device_caps & meta_caps); + bool is_rx = vfd->vfl_dir != VFL_DIR_TX; + bool is_tx = vfd->vfl_dir != VFL_DIR_RX; + + if (ops == NULL) + return -EINVAL; + + switch (type) { + case V4L2_BUF_TYPE_VIDEO_CAPTURE: + if ((is_vid || is_tch) && is_rx && + (ops->vidioc_g_fmt_vid_cap || ops->vidioc_g_fmt_vid_cap_mplane)) + return 0; + break; + case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: + if ((is_vid || is_tch) && is_rx && ops->vidioc_g_fmt_vid_cap_mplane) + return 0; + break; + case V4L2_BUF_TYPE_VIDEO_OVERLAY: + if (is_vid && is_rx && ops->vidioc_g_fmt_vid_overlay) + return 0; + break; + case V4L2_BUF_TYPE_VIDEO_OUTPUT: + if (is_vid && is_tx && + (ops->vidioc_g_fmt_vid_out || ops->vidioc_g_fmt_vid_out_mplane)) + return 0; + break; + case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: + if (is_vid && is_tx && ops->vidioc_g_fmt_vid_out_mplane) + return 0; + break; + case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: + if (is_vid && is_tx && ops->vidioc_g_fmt_vid_out_overlay) + return 0; + break; + case V4L2_BUF_TYPE_VBI_CAPTURE: + if (is_vbi && is_rx && ops->vidioc_g_fmt_vbi_cap) + return 0; + break; + case V4L2_BUF_TYPE_VBI_OUTPUT: + if (is_vbi && is_tx && ops->vidioc_g_fmt_vbi_out) + return 0; + break; + case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: + if (is_vbi && is_rx && ops->vidioc_g_fmt_sliced_vbi_cap) + return 0; + break; + case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: + if (is_vbi && is_tx && ops->vidioc_g_fmt_sliced_vbi_out) + return 0; + break; + case V4L2_BUF_TYPE_SDR_CAPTURE: + if (is_sdr && is_rx && ops->vidioc_g_fmt_sdr_cap) + return 0; + break; + case V4L2_BUF_TYPE_SDR_OUTPUT: + if (is_sdr && is_tx && ops->vidioc_g_fmt_sdr_out) + return 0; + break; + case V4L2_BUF_TYPE_META_CAPTURE: + if (is_meta && is_rx && ops->vidioc_g_fmt_meta_cap) + return 0; + break; + case V4L2_BUF_TYPE_META_OUTPUT: + if (is_meta && is_tx && ops->vidioc_g_fmt_meta_out) + return 0; + break; + default: + break; + } + return -EINVAL; +} + +static void v4l_sanitize_colorspace(u32 pixelformat, u32 *colorspace, + u32 *encoding, u32 *quantization, + u32 *xfer_func) +{ + bool is_hsv = pixelformat == V4L2_PIX_FMT_HSV24 || + pixelformat == V4L2_PIX_FMT_HSV32; + + if (!v4l2_is_colorspace_valid(*colorspace)) { + *colorspace = V4L2_COLORSPACE_DEFAULT; + *encoding = V4L2_YCBCR_ENC_DEFAULT; + *quantization = V4L2_QUANTIZATION_DEFAULT; + *xfer_func = V4L2_XFER_FUNC_DEFAULT; + } + + if ((!is_hsv && !v4l2_is_ycbcr_enc_valid(*encoding)) || + (is_hsv && !v4l2_is_hsv_enc_valid(*encoding))) + *encoding = V4L2_YCBCR_ENC_DEFAULT; + + if (!v4l2_is_quant_valid(*quantization)) + *quantization = V4L2_QUANTIZATION_DEFAULT; + + if (!v4l2_is_xfer_func_valid(*xfer_func)) + *xfer_func = V4L2_XFER_FUNC_DEFAULT; +} + +static void v4l_sanitize_format(struct v4l2_format *fmt) +{ + unsigned int offset; + + /* Make sure num_planes is not bogus */ + if (fmt->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE || + fmt->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + fmt->fmt.pix_mp.num_planes = min_t(u32, fmt->fmt.pix_mp.num_planes, + VIDEO_MAX_PLANES); + + /* + * The v4l2_pix_format structure has been extended with fields that were + * not previously required to be set to zero by applications. The priv + * field, when set to a magic value, indicates that the extended fields + * are valid. Otherwise they will contain undefined values. To simplify + * the API towards drivers zero the extended fields and set the priv + * field to the magic value when the extended pixel format structure + * isn't used by applications. + */ + if (fmt->type == V4L2_BUF_TYPE_VIDEO_CAPTURE || + fmt->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) { + if (fmt->fmt.pix.priv != V4L2_PIX_FMT_PRIV_MAGIC) { + fmt->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; + + offset = offsetof(struct v4l2_pix_format, priv) + + sizeof(fmt->fmt.pix.priv); + memset(((void *)&fmt->fmt.pix) + offset, 0, + sizeof(fmt->fmt.pix) - offset); + } + } + + /* Replace invalid colorspace values with defaults. */ + if (fmt->type == V4L2_BUF_TYPE_VIDEO_CAPTURE || + fmt->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) { + v4l_sanitize_colorspace(fmt->fmt.pix.pixelformat, + &fmt->fmt.pix.colorspace, + &fmt->fmt.pix.ycbcr_enc, + &fmt->fmt.pix.quantization, + &fmt->fmt.pix.xfer_func); + } else if (fmt->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE || + fmt->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { + u32 ycbcr_enc = fmt->fmt.pix_mp.ycbcr_enc; + u32 quantization = fmt->fmt.pix_mp.quantization; + u32 xfer_func = fmt->fmt.pix_mp.xfer_func; + + v4l_sanitize_colorspace(fmt->fmt.pix_mp.pixelformat, + &fmt->fmt.pix_mp.colorspace, &ycbcr_enc, + &quantization, &xfer_func); + + fmt->fmt.pix_mp.ycbcr_enc = ycbcr_enc; + fmt->fmt.pix_mp.quantization = quantization; + fmt->fmt.pix_mp.xfer_func = xfer_func; + } +} + +static int v4l_querycap(const struct v4l2_ioctl_ops *ops, struct file *file, + void *arg) +{ + struct v4l2_capability *cap = (struct v4l2_capability *)arg; + struct video_device *vfd = video_devdata(file); + int ret; + + cap->version = LINUX_VERSION_CODE; + cap->device_caps = vfd->device_caps; + cap->capabilities = vfd->device_caps | V4L2_CAP_DEVICE_CAPS; + + media_set_bus_info(cap->bus_info, sizeof(cap->bus_info), + vfd->dev_parent); + + ret = ops->vidioc_querycap(file, NULL, cap); + + /* + * Drivers must not change device_caps, so check for this and + * warn if this happened. + */ + WARN_ON(cap->device_caps != vfd->device_caps); + /* + * Check that capabilities is a superset of + * vfd->device_caps | V4L2_CAP_DEVICE_CAPS + */ + WARN_ON((cap->capabilities & + (vfd->device_caps | V4L2_CAP_DEVICE_CAPS)) != + (vfd->device_caps | V4L2_CAP_DEVICE_CAPS)); + cap->capabilities |= V4L2_CAP_EXT_PIX_FORMAT; + cap->device_caps |= V4L2_CAP_EXT_PIX_FORMAT; + + return ret; +} + +static int v4l_g_input(const struct v4l2_ioctl_ops *ops, struct file *file, + void *arg) +{ + struct video_device *vfd = video_devdata(file); + + if (vfd->device_caps & V4L2_CAP_IO_MC) { + *(int *)arg = 0; + return 0; + } + + return ops->vidioc_g_input(file, NULL, arg); +} + +static int v4l_g_output(const struct v4l2_ioctl_ops *ops, struct file *file, + void *arg) +{ + struct video_device *vfd = video_devdata(file); + + if (vfd->device_caps & V4L2_CAP_IO_MC) { + *(int *)arg = 0; + return 0; + } + + return ops->vidioc_g_output(file, NULL, arg); +} + +static int v4l_s_input(const struct v4l2_ioctl_ops *ops, struct file *file, + void *arg) +{ + struct video_device *vfd = video_devdata(file); + int ret; + + ret = v4l_enable_media_source(vfd); + if (ret) + return ret; + + if (vfd->device_caps & V4L2_CAP_IO_MC) + return *(int *)arg ? -EINVAL : 0; + + return ops->vidioc_s_input(file, NULL, *(unsigned int *)arg); +} + +static int v4l_s_output(const struct v4l2_ioctl_ops *ops, struct file *file, + void *arg) +{ + struct video_device *vfd = video_devdata(file); + + if (vfd->device_caps & V4L2_CAP_IO_MC) + return *(int *)arg ? -EINVAL : 0; + + return ops->vidioc_s_output(file, NULL, *(unsigned int *)arg); +} + +static int v4l_g_priority(const struct v4l2_ioctl_ops *ops, struct file *file, + void *arg) +{ + struct video_device *vfd; + u32 *p = arg; + + vfd = video_devdata(file); + *p = v4l2_prio_max(vfd->prio); + return 0; +} + +static int v4l_s_priority(const struct v4l2_ioctl_ops *ops, struct file *file, + void *arg) +{ + struct video_device *vfd; + struct v4l2_fh *vfh; + u32 *p = arg; + + vfd = video_devdata(file); + vfh = file_to_v4l2_fh(file); + return v4l2_prio_change(vfd->prio, &vfh->prio, *p); +} + +static int v4l_enuminput(const struct v4l2_ioctl_ops *ops, struct file *file, + void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_input *p = arg; + + /* + * We set the flags for CAP_DV_TIMINGS & + * CAP_STD here based on ioctl handler provided by the + * driver. If the driver doesn't support these + * for a specific input, it must override these flags. + */ + if (is_valid_ioctl(vfd, VIDIOC_S_STD)) + p->capabilities |= V4L2_IN_CAP_STD; + + if (vfd->device_caps & V4L2_CAP_IO_MC) { + if (p->index) + return -EINVAL; + strscpy(p->name, vfd->name, sizeof(p->name)); + p->type = V4L2_INPUT_TYPE_CAMERA; + return 0; + } + + return ops->vidioc_enum_input(file, NULL, p); +} + +static int v4l_enumoutput(const struct v4l2_ioctl_ops *ops, struct file *file, + void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_output *p = arg; + + /* + * We set the flags for CAP_DV_TIMINGS & + * CAP_STD here based on ioctl handler provided by the + * driver. If the driver doesn't support these + * for a specific output, it must override these flags. + */ + if (is_valid_ioctl(vfd, VIDIOC_S_STD)) + p->capabilities |= V4L2_OUT_CAP_STD; + + if (vfd->device_caps & V4L2_CAP_IO_MC) { + if (p->index) + return -EINVAL; + strscpy(p->name, vfd->name, sizeof(p->name)); + p->type = V4L2_OUTPUT_TYPE_ANALOG; + return 0; + } + + return ops->vidioc_enum_output(file, NULL, p); +} + +static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt) +{ + const unsigned sz = sizeof(fmt->description); + const char *descr = NULL; + u32 flags = 0; + + /* + * We depart from the normal coding style here since the descriptions + * should be aligned so it is easy to see which descriptions will be + * longer than 31 characters (the max length for a description). + * And frankly, this is easier to read anyway. + * + * Note that gcc will use O(log N) comparisons to find the right case. + */ + switch (fmt->pixelformat) { + /* Max description length mask: descr = "0123456789012345678901234567890" */ + case V4L2_PIX_FMT_RGB332: descr = "8-bit RGB 3-3-2"; break; + case V4L2_PIX_FMT_RGB444: descr = "16-bit A/XRGB 4-4-4-4"; break; + case V4L2_PIX_FMT_ARGB444: descr = "16-bit ARGB 4-4-4-4"; break; + case V4L2_PIX_FMT_XRGB444: descr = "16-bit XRGB 4-4-4-4"; break; + case V4L2_PIX_FMT_RGBA444: descr = "16-bit RGBA 4-4-4-4"; break; + case V4L2_PIX_FMT_RGBX444: descr = "16-bit RGBX 4-4-4-4"; break; + case V4L2_PIX_FMT_ABGR444: descr = "16-bit ABGR 4-4-4-4"; break; + case V4L2_PIX_FMT_XBGR444: descr = "16-bit XBGR 4-4-4-4"; break; + case V4L2_PIX_FMT_BGRA444: descr = "16-bit BGRA 4-4-4-4"; break; + case V4L2_PIX_FMT_BGRX444: descr = "16-bit BGRX 4-4-4-4"; break; + case V4L2_PIX_FMT_RGB555: descr = "16-bit A/XRGB 1-5-5-5"; break; + case V4L2_PIX_FMT_ARGB555: descr = "16-bit ARGB 1-5-5-5"; break; + case V4L2_PIX_FMT_XRGB555: descr = "16-bit XRGB 1-5-5-5"; break; + case V4L2_PIX_FMT_ABGR555: descr = "16-bit ABGR 1-5-5-5"; break; + case V4L2_PIX_FMT_XBGR555: descr = "16-bit XBGR 1-5-5-5"; break; + case V4L2_PIX_FMT_RGBA555: descr = "16-bit RGBA 5-5-5-1"; break; + case V4L2_PIX_FMT_RGBX555: descr = "16-bit RGBX 5-5-5-1"; break; + case V4L2_PIX_FMT_BGRA555: descr = "16-bit BGRA 5-5-5-1"; break; + case V4L2_PIX_FMT_BGRX555: descr = "16-bit BGRX 5-5-5-1"; break; + case V4L2_PIX_FMT_RGB565: descr = "16-bit RGB 5-6-5"; break; + case V4L2_PIX_FMT_RGB555X: descr = "16-bit A/XRGB 1-5-5-5 BE"; break; + case V4L2_PIX_FMT_ARGB555X: descr = "16-bit ARGB 1-5-5-5 BE"; break; + case V4L2_PIX_FMT_XRGB555X: descr = "16-bit XRGB 1-5-5-5 BE"; break; + case V4L2_PIX_FMT_RGB565X: descr = "16-bit RGB 5-6-5 BE"; break; + case V4L2_PIX_FMT_BGR666: descr = "18-bit BGRX 6-6-6-14"; break; + case V4L2_PIX_FMT_BGR24: descr = "24-bit BGR 8-8-8"; break; + case V4L2_PIX_FMT_RGB24: descr = "24-bit RGB 8-8-8"; break; + case V4L2_PIX_FMT_BGR32: descr = "32-bit BGRA/X 8-8-8-8"; break; + case V4L2_PIX_FMT_ABGR32: descr = "32-bit BGRA 8-8-8-8"; break; + case V4L2_PIX_FMT_XBGR32: descr = "32-bit BGRX 8-8-8-8"; break; + case V4L2_PIX_FMT_RGB32: descr = "32-bit A/XRGB 8-8-8-8"; break; + case V4L2_PIX_FMT_ARGB32: descr = "32-bit ARGB 8-8-8-8"; break; + case V4L2_PIX_FMT_XRGB32: descr = "32-bit XRGB 8-8-8-8"; break; + case V4L2_PIX_FMT_BGRA32: descr = "32-bit ABGR 8-8-8-8"; break; + case V4L2_PIX_FMT_BGRX32: descr = "32-bit XBGR 8-8-8-8"; break; + case V4L2_PIX_FMT_RGBA32: descr = "32-bit RGBA 8-8-8-8"; break; + case V4L2_PIX_FMT_RGBX32: descr = "32-bit RGBX 8-8-8-8"; break; + case V4L2_PIX_FMT_RGBX1010102: descr = "32-bit RGBX 10-10-10-2"; break; + case V4L2_PIX_FMT_RGBA1010102: descr = "32-bit RGBA 10-10-10-2"; break; + case V4L2_PIX_FMT_ARGB2101010: descr = "32-bit ARGB 2-10-10-10"; break; + case V4L2_PIX_FMT_BGR48: descr = "48-bit BGR 16-16-16"; break; + case V4L2_PIX_FMT_RGB48: descr = "48-bit RGB 16-16-16"; break; + case V4L2_PIX_FMT_BGR48_12: descr = "12-bit Depth BGR"; break; + case V4L2_PIX_FMT_ABGR64_12: descr = "12-bit Depth BGRA"; break; + case V4L2_PIX_FMT_GREY: descr = "8-bit Greyscale"; break; + case V4L2_PIX_FMT_Y4: descr = "4-bit Greyscale"; break; + case V4L2_PIX_FMT_Y6: descr = "6-bit Greyscale"; break; + case V4L2_PIX_FMT_Y10: descr = "10-bit Greyscale"; break; + case V4L2_PIX_FMT_Y12: descr = "12-bit Greyscale"; break; + case V4L2_PIX_FMT_Y012: descr = "12-bit Greyscale (bits 15-4)"; break; + case V4L2_PIX_FMT_Y14: descr = "14-bit Greyscale"; break; + case V4L2_PIX_FMT_Y16: descr = "16-bit Greyscale"; break; + case V4L2_PIX_FMT_Y16_BE: descr = "16-bit Greyscale BE"; break; + case V4L2_PIX_FMT_Y10BPACK: descr = "10-bit Greyscale (Packed)"; break; + case V4L2_PIX_FMT_Y10P: descr = "10-bit Greyscale (MIPI Packed)"; break; + case V4L2_PIX_FMT_IPU3_Y10: descr = "10-bit greyscale (IPU3 Packed)"; break; + case V4L2_PIX_FMT_Y12P: descr = "12-bit Greyscale (MIPI Packed)"; break; + case V4L2_PIX_FMT_Y14P: descr = "14-bit Greyscale (MIPI Packed)"; break; + case V4L2_PIX_FMT_Y8I: descr = "Interleaved 8-bit Greyscale"; break; + case V4L2_PIX_FMT_Y12I: descr = "Interleaved 12-bit Greyscale"; break; + case V4L2_PIX_FMT_Y16I: descr = "Interleaved 16-bit Greyscale"; break; + case V4L2_PIX_FMT_Z16: descr = "16-bit Depth"; break; + case V4L2_PIX_FMT_INZI: descr = "Planar 10:16 Greyscale Depth"; break; + case V4L2_PIX_FMT_CNF4: descr = "4-bit Depth Confidence (Packed)"; break; + case V4L2_PIX_FMT_PAL8: descr = "8-bit Palette"; break; + case V4L2_PIX_FMT_UV8: descr = "8-bit Chrominance UV 4-4"; break; + case V4L2_PIX_FMT_YVU410: descr = "Planar YVU 4:1:0"; break; + case V4L2_PIX_FMT_YVU420: descr = "Planar YVU 4:2:0"; break; + case V4L2_PIX_FMT_YUYV: descr = "YUYV 4:2:2"; break; + case V4L2_PIX_FMT_YYUV: descr = "YYUV 4:2:2"; break; + case V4L2_PIX_FMT_YVYU: descr = "YVYU 4:2:2"; break; + case V4L2_PIX_FMT_UYVY: descr = "UYVY 4:2:2"; break; + case V4L2_PIX_FMT_VYUY: descr = "VYUY 4:2:2"; break; + case V4L2_PIX_FMT_YUV422P: descr = "Planar YUV 4:2:2"; break; + case V4L2_PIX_FMT_YUV411P: descr = "Planar YUV 4:1:1"; break; + case V4L2_PIX_FMT_Y41P: descr = "YUV 4:1:1 (Packed)"; break; + case V4L2_PIX_FMT_YUV444: descr = "16-bit A/XYUV 4-4-4-4"; break; + case V4L2_PIX_FMT_YUV555: descr = "16-bit A/XYUV 1-5-5-5"; break; + case V4L2_PIX_FMT_YUV565: descr = "16-bit YUV 5-6-5"; break; + case V4L2_PIX_FMT_YUV24: descr = "24-bit YUV 4:4:4 8-8-8"; break; + case V4L2_PIX_FMT_YUV32: descr = "32-bit A/XYUV 8-8-8-8"; break; + case V4L2_PIX_FMT_AYUV32: descr = "32-bit AYUV 8-8-8-8"; break; + case V4L2_PIX_FMT_XYUV32: descr = "32-bit XYUV 8-8-8-8"; break; + case V4L2_PIX_FMT_VUYA32: descr = "32-bit VUYA 8-8-8-8"; break; + case V4L2_PIX_FMT_VUYX32: descr = "32-bit VUYX 8-8-8-8"; break; + case V4L2_PIX_FMT_YUVA32: descr = "32-bit YUVA 8-8-8-8"; break; + case V4L2_PIX_FMT_YUVX32: descr = "32-bit YUVX 8-8-8-8"; break; + case V4L2_PIX_FMT_YUV410: descr = "Planar YUV 4:1:0"; break; + case V4L2_PIX_FMT_YUV420: descr = "Planar YUV 4:2:0"; break; + case V4L2_PIX_FMT_HI240: descr = "8-bit Dithered RGB (BTTV)"; break; + case V4L2_PIX_FMT_M420: descr = "YUV 4:2:0 (M420)"; break; + case V4L2_PIX_FMT_YUV48_12: descr = "12-bit YUV 4:4:4 Packed"; break; + case V4L2_PIX_FMT_NV12: descr = "Y/UV 4:2:0"; break; + case V4L2_PIX_FMT_NV21: descr = "Y/VU 4:2:0"; break; + case V4L2_PIX_FMT_NV15: descr = "10-bit Y/UV 4:2:0 (Packed)"; break; + case V4L2_PIX_FMT_NV16: descr = "Y/UV 4:2:2"; break; + case V4L2_PIX_FMT_NV61: descr = "Y/VU 4:2:2"; break; + case V4L2_PIX_FMT_NV20: descr = "10-bit Y/UV 4:2:2 (Packed)"; break; + case V4L2_PIX_FMT_NV24: descr = "Y/UV 4:4:4"; break; + case V4L2_PIX_FMT_NV42: descr = "Y/VU 4:4:4"; break; + case V4L2_PIX_FMT_P010: descr = "10-bit Y/UV 4:2:0"; break; + case V4L2_PIX_FMT_P012: descr = "12-bit Y/UV 4:2:0"; break; + case V4L2_PIX_FMT_NV12_4L4: descr = "Y/UV 4:2:0 (4x4 Linear)"; break; + case V4L2_PIX_FMT_NV12_16L16: descr = "Y/UV 4:2:0 (16x16 Linear)"; break; + case V4L2_PIX_FMT_NV12_32L32: descr = "Y/UV 4:2:0 (32x32 Linear)"; break; + case V4L2_PIX_FMT_NV15_4L4: descr = "10-bit Y/UV 4:2:0 (4x4 Linear)"; break; + case V4L2_PIX_FMT_P010_4L4: descr = "10-bit Y/UV 4:2:0 (4x4 Linear)"; break; + case V4L2_PIX_FMT_NV12M: descr = "Y/UV 4:2:0 (N-C)"; break; + case V4L2_PIX_FMT_NV21M: descr = "Y/VU 4:2:0 (N-C)"; break; + case V4L2_PIX_FMT_NV16M: descr = "Y/UV 4:2:2 (N-C)"; break; + case V4L2_PIX_FMT_NV61M: descr = "Y/VU 4:2:2 (N-C)"; break; + case V4L2_PIX_FMT_NV12MT: descr = "Y/UV 4:2:0 (64x32 MB, N-C)"; break; + case V4L2_PIX_FMT_NV12MT_16X16: descr = "Y/UV 4:2:0 (16x16 MB, N-C)"; break; + case V4L2_PIX_FMT_P012M: descr = "12-bit Y/UV 4:2:0 (N-C)"; break; + case V4L2_PIX_FMT_YUV420M: descr = "Planar YUV 4:2:0 (N-C)"; break; + case V4L2_PIX_FMT_YVU420M: descr = "Planar YVU 4:2:0 (N-C)"; break; + case V4L2_PIX_FMT_YUV422M: descr = "Planar YUV 4:2:2 (N-C)"; break; + case V4L2_PIX_FMT_YVU422M: descr = "Planar YVU 4:2:2 (N-C)"; break; + case V4L2_PIX_FMT_YUV444M: descr = "Planar YUV 4:4:4 (N-C)"; break; + case V4L2_PIX_FMT_YVU444M: descr = "Planar YVU 4:4:4 (N-C)"; break; + case V4L2_PIX_FMT_SBGGR8: descr = "8-bit Bayer BGBG/GRGR"; break; + case V4L2_PIX_FMT_SGBRG8: descr = "8-bit Bayer GBGB/RGRG"; break; + case V4L2_PIX_FMT_SGRBG8: descr = "8-bit Bayer GRGR/BGBG"; break; + case V4L2_PIX_FMT_SRGGB8: descr = "8-bit Bayer RGRG/GBGB"; break; + case V4L2_PIX_FMT_SBGGR10: descr = "10-bit Bayer BGBG/GRGR"; break; + case V4L2_PIX_FMT_SGBRG10: descr = "10-bit Bayer GBGB/RGRG"; break; + case V4L2_PIX_FMT_SGRBG10: descr = "10-bit Bayer GRGR/BGBG"; break; + case V4L2_PIX_FMT_SRGGB10: descr = "10-bit Bayer RGRG/GBGB"; break; + case V4L2_PIX_FMT_SBGGR10P: descr = "10-bit Bayer BGBG/GRGR Packed"; break; + case V4L2_PIX_FMT_SGBRG10P: descr = "10-bit Bayer GBGB/RGRG Packed"; break; + case V4L2_PIX_FMT_SGRBG10P: descr = "10-bit Bayer GRGR/BGBG Packed"; break; + case V4L2_PIX_FMT_SRGGB10P: descr = "10-bit Bayer RGRG/GBGB Packed"; break; + case V4L2_PIX_FMT_IPU3_SBGGR10: descr = "10-bit bayer BGGR IPU3 Packed"; break; + case V4L2_PIX_FMT_IPU3_SGBRG10: descr = "10-bit bayer GBRG IPU3 Packed"; break; + case V4L2_PIX_FMT_IPU3_SGRBG10: descr = "10-bit bayer GRBG IPU3 Packed"; break; + case V4L2_PIX_FMT_IPU3_SRGGB10: descr = "10-bit bayer RGGB IPU3 Packed"; break; + case V4L2_PIX_FMT_SBGGR10ALAW8: descr = "8-bit Bayer BGBG/GRGR (A-law)"; break; + case V4L2_PIX_FMT_SGBRG10ALAW8: descr = "8-bit Bayer GBGB/RGRG (A-law)"; break; + case V4L2_PIX_FMT_SGRBG10ALAW8: descr = "8-bit Bayer GRGR/BGBG (A-law)"; break; + case V4L2_PIX_FMT_SRGGB10ALAW8: descr = "8-bit Bayer RGRG/GBGB (A-law)"; break; + case V4L2_PIX_FMT_SBGGR10DPCM8: descr = "8-bit Bayer BGBG/GRGR (DPCM)"; break; + case V4L2_PIX_FMT_SGBRG10DPCM8: descr = "8-bit Bayer GBGB/RGRG (DPCM)"; break; + case V4L2_PIX_FMT_SGRBG10DPCM8: descr = "8-bit Bayer GRGR/BGBG (DPCM)"; break; + case V4L2_PIX_FMT_SRGGB10DPCM8: descr = "8-bit Bayer RGRG/GBGB (DPCM)"; break; + case V4L2_PIX_FMT_RAW_CRU10: descr = "10-bit Raw CRU Packed"; break; + case V4L2_PIX_FMT_SBGGR12: descr = "12-bit Bayer BGBG/GRGR"; break; + case V4L2_PIX_FMT_SGBRG12: descr = "12-bit Bayer GBGB/RGRG"; break; + case V4L2_PIX_FMT_SGRBG12: descr = "12-bit Bayer GRGR/BGBG"; break; + case V4L2_PIX_FMT_SRGGB12: descr = "12-bit Bayer RGRG/GBGB"; break; + case V4L2_PIX_FMT_SBGGR12P: descr = "12-bit Bayer BGBG/GRGR Packed"; break; + case V4L2_PIX_FMT_SGBRG12P: descr = "12-bit Bayer GBGB/RGRG Packed"; break; + case V4L2_PIX_FMT_SGRBG12P: descr = "12-bit Bayer GRGR/BGBG Packed"; break; + case V4L2_PIX_FMT_SRGGB12P: descr = "12-bit Bayer RGRG/GBGB Packed"; break; + case V4L2_PIX_FMT_RAW_CRU12: descr = "12-bit Raw CRU Packed"; break; + case V4L2_PIX_FMT_SBGGR14: descr = "14-bit Bayer BGBG/GRGR"; break; + case V4L2_PIX_FMT_SGBRG14: descr = "14-bit Bayer GBGB/RGRG"; break; + case V4L2_PIX_FMT_SGRBG14: descr = "14-bit Bayer GRGR/BGBG"; break; + case V4L2_PIX_FMT_SRGGB14: descr = "14-bit Bayer RGRG/GBGB"; break; + case V4L2_PIX_FMT_SBGGR14P: descr = "14-bit Bayer BGBG/GRGR Packed"; break; + case V4L2_PIX_FMT_SGBRG14P: descr = "14-bit Bayer GBGB/RGRG Packed"; break; + case V4L2_PIX_FMT_SGRBG14P: descr = "14-bit Bayer GRGR/BGBG Packed"; break; + case V4L2_PIX_FMT_SRGGB14P: descr = "14-bit Bayer RGRG/GBGB Packed"; break; + case V4L2_PIX_FMT_RAW_CRU14: descr = "14-bit Raw CRU Packed"; break; + case V4L2_PIX_FMT_SBGGR16: descr = "16-bit Bayer BGBG/GRGR"; break; + case V4L2_PIX_FMT_SGBRG16: descr = "16-bit Bayer GBGB/RGRG"; break; + case V4L2_PIX_FMT_SGRBG16: descr = "16-bit Bayer GRGR/BGBG"; break; + case V4L2_PIX_FMT_SRGGB16: descr = "16-bit Bayer RGRG/GBGB"; break; + case V4L2_PIX_FMT_RAW_CRU20: descr = "14-bit Raw CRU Packed"; break; + case V4L2_PIX_FMT_SN9C20X_I420: descr = "GSPCA SN9C20X I420"; break; + case V4L2_PIX_FMT_SPCA501: descr = "GSPCA SPCA501"; break; + case V4L2_PIX_FMT_SPCA505: descr = "GSPCA SPCA505"; break; + case V4L2_PIX_FMT_SPCA508: descr = "GSPCA SPCA508"; break; + case V4L2_PIX_FMT_STV0680: descr = "GSPCA STV0680"; break; + case V4L2_PIX_FMT_TM6000: descr = "A/V + VBI Mux Packet"; break; + case V4L2_PIX_FMT_CIT_YYVYUY: descr = "GSPCA CIT YYVYUY"; break; + case V4L2_PIX_FMT_KONICA420: descr = "GSPCA KONICA420"; break; + case V4L2_PIX_FMT_MM21: descr = "Mediatek 8-bit Block Format"; break; + case V4L2_PIX_FMT_HSV24: descr = "24-bit HSV 8-8-8"; break; + case V4L2_PIX_FMT_HSV32: descr = "32-bit XHSV 8-8-8-8"; break; + case V4L2_SDR_FMT_CU8: descr = "Complex U8"; break; + case V4L2_SDR_FMT_CU16LE: descr = "Complex U16LE"; break; + case V4L2_SDR_FMT_CS8: descr = "Complex S8"; break; + case V4L2_SDR_FMT_CS14LE: descr = "Complex S14LE"; break; + case V4L2_SDR_FMT_RU12LE: descr = "Real U12LE"; break; + case V4L2_SDR_FMT_PCU16BE: descr = "Planar Complex U16BE"; break; + case V4L2_SDR_FMT_PCU18BE: descr = "Planar Complex U18BE"; break; + case V4L2_SDR_FMT_PCU20BE: descr = "Planar Complex U20BE"; break; + case V4L2_TCH_FMT_DELTA_TD16: descr = "16-bit Signed Deltas"; break; + case V4L2_TCH_FMT_DELTA_TD08: descr = "8-bit Signed Deltas"; break; + case V4L2_TCH_FMT_TU16: descr = "16-bit Unsigned Touch Data"; break; + case V4L2_TCH_FMT_TU08: descr = "8-bit Unsigned Touch Data"; break; + case V4L2_META_FMT_VSP1_HGO: descr = "R-Car VSP1 1-D Histogram"; break; + case V4L2_META_FMT_VSP1_HGT: descr = "R-Car VSP1 2-D Histogram"; break; + case V4L2_META_FMT_UVC: descr = "UVC Payload Header Metadata"; break; + case V4L2_META_FMT_UVC_MSXU_1_5: descr = "UVC MSXU Metadata"; break; + case V4L2_META_FMT_D4XX: descr = "Intel D4xx UVC Metadata"; break; + case V4L2_META_FMT_VIVID: descr = "Vivid Metadata"; break; + case V4L2_META_FMT_RK_ISP1_PARAMS: descr = "Rockchip ISP1 3A Parameters"; break; + case V4L2_META_FMT_RK_ISP1_STAT_3A: descr = "Rockchip ISP1 3A Statistics"; break; + case V4L2_META_FMT_RK_ISP1_EXT_PARAMS: descr = "Rockchip ISP1 Ext 3A Params"; break; + case V4L2_META_FMT_C3ISP_PARAMS: descr = "Amlogic C3 ISP Parameters"; break; + case V4L2_META_FMT_C3ISP_STATS: descr = "Amlogic C3 ISP Statistics"; break; + case V4L2_PIX_FMT_NV12_8L128: descr = "NV12 (8x128 Linear)"; break; + case V4L2_PIX_FMT_NV12M_8L128: descr = "NV12M (8x128 Linear)"; break; + case V4L2_PIX_FMT_NV12_10BE_8L128: descr = "10-bit NV12 (8x128 Linear, BE)"; break; + case V4L2_PIX_FMT_NV12M_10BE_8L128: descr = "10-bit NV12M (8x128 Linear, BE)"; break; + case V4L2_PIX_FMT_Y210: descr = "10-bit YUYV Packed"; break; + case V4L2_PIX_FMT_Y212: descr = "12-bit YUYV Packed"; break; + case V4L2_PIX_FMT_Y216: descr = "16-bit YUYV Packed"; break; + case V4L2_META_FMT_RPI_BE_CFG: descr = "RPi PiSP BE Config format"; break; + case V4L2_META_FMT_RPI_FE_CFG: descr = "RPi PiSP FE Config format"; break; + case V4L2_META_FMT_RPI_FE_STATS: descr = "RPi PiSP FE Statistics format"; break; + case V4L2_META_FMT_GENERIC_8: descr = "8-bit Generic Metadata"; break; + case V4L2_META_FMT_GENERIC_CSI2_10: descr = "8-bit Generic Meta, 10b CSI-2"; break; + case V4L2_META_FMT_GENERIC_CSI2_12: descr = "8-bit Generic Meta, 12b CSI-2"; break; + case V4L2_META_FMT_GENERIC_CSI2_14: descr = "8-bit Generic Meta, 14b CSI-2"; break; + case V4L2_META_FMT_GENERIC_CSI2_16: descr = "8-bit Generic Meta, 16b CSI-2"; break; + case V4L2_META_FMT_GENERIC_CSI2_20: descr = "8-bit Generic Meta, 20b CSI-2"; break; + case V4L2_META_FMT_GENERIC_CSI2_24: descr = "8-bit Generic Meta, 24b CSI-2"; break; + + default: + /* Compressed formats */ + flags = V4L2_FMT_FLAG_COMPRESSED; + switch (fmt->pixelformat) { + /* Max description length mask: descr = "0123456789012345678901234567890" */ + case V4L2_PIX_FMT_MJPEG: descr = "Motion-JPEG"; break; + case V4L2_PIX_FMT_JPEG: descr = "JFIF JPEG"; break; + case V4L2_PIX_FMT_DV: descr = "1394"; break; + case V4L2_PIX_FMT_MPEG: descr = "MPEG-1/2/4"; break; + case V4L2_PIX_FMT_H264: descr = "H.264"; break; + case V4L2_PIX_FMT_H264_NO_SC: descr = "H.264 (No Start Codes)"; break; + case V4L2_PIX_FMT_H264_MVC: descr = "H.264 MVC"; break; + case V4L2_PIX_FMT_H264_SLICE: descr = "H.264 Parsed Slice Data"; break; + case V4L2_PIX_FMT_H263: descr = "H.263"; break; + case V4L2_PIX_FMT_MPEG1: descr = "MPEG-1 ES"; break; + case V4L2_PIX_FMT_MPEG2: descr = "MPEG-2 ES"; break; + case V4L2_PIX_FMT_MPEG2_SLICE: descr = "MPEG-2 Parsed Slice Data"; break; + case V4L2_PIX_FMT_MPEG4: descr = "MPEG-4 Part 2 ES"; break; + case V4L2_PIX_FMT_XVID: descr = "Xvid"; break; + case V4L2_PIX_FMT_VC1_ANNEX_G: descr = "VC-1 (SMPTE 412M Annex G)"; break; + case V4L2_PIX_FMT_VC1_ANNEX_L: descr = "VC-1 (SMPTE 412M Annex L)"; break; + case V4L2_PIX_FMT_VP8: descr = "VP8"; break; + case V4L2_PIX_FMT_VP8_FRAME: descr = "VP8 Frame"; break; + case V4L2_PIX_FMT_VP9: descr = "VP9"; break; + case V4L2_PIX_FMT_VP9_FRAME: descr = "VP9 Frame"; break; + case V4L2_PIX_FMT_HEVC: descr = "HEVC"; break; /* aka H.265 */ + case V4L2_PIX_FMT_HEVC_SLICE: descr = "HEVC Parsed Slice Data"; break; + case V4L2_PIX_FMT_FWHT: descr = "FWHT"; break; /* used in vicodec */ + case V4L2_PIX_FMT_FWHT_STATELESS: descr = "FWHT Stateless"; break; /* used in vicodec */ + case V4L2_PIX_FMT_SPK: descr = "Sorenson Spark"; break; + case V4L2_PIX_FMT_RV30: descr = "RealVideo 8"; break; + case V4L2_PIX_FMT_RV40: descr = "RealVideo 9 & 10"; break; + case V4L2_PIX_FMT_CPIA1: descr = "GSPCA CPiA YUV"; break; + case V4L2_PIX_FMT_WNVA: descr = "WNVA"; break; + case V4L2_PIX_FMT_SN9C10X: descr = "GSPCA SN9C10X"; break; + case V4L2_PIX_FMT_PWC1: descr = "Raw Philips Webcam Type (Old)"; break; + case V4L2_PIX_FMT_PWC2: descr = "Raw Philips Webcam Type (New)"; break; + case V4L2_PIX_FMT_ET61X251: descr = "GSPCA ET61X251"; break; + case V4L2_PIX_FMT_SPCA561: descr = "GSPCA SPCA561"; break; + case V4L2_PIX_FMT_PAC207: descr = "GSPCA PAC207"; break; + case V4L2_PIX_FMT_MR97310A: descr = "GSPCA MR97310A"; break; + case V4L2_PIX_FMT_JL2005BCD: descr = "GSPCA JL2005BCD"; break; + case V4L2_PIX_FMT_SN9C2028: descr = "GSPCA SN9C2028"; break; + case V4L2_PIX_FMT_SQ905C: descr = "GSPCA SQ905C"; break; + case V4L2_PIX_FMT_PJPG: descr = "GSPCA PJPG"; break; + case V4L2_PIX_FMT_OV511: descr = "GSPCA OV511"; break; + case V4L2_PIX_FMT_OV518: descr = "GSPCA OV518"; break; + case V4L2_PIX_FMT_JPGL: descr = "JPEG Lite"; break; + case V4L2_PIX_FMT_SE401: descr = "GSPCA SE401"; break; + case V4L2_PIX_FMT_S5C_UYVY_JPG: descr = "S5C73MX interleaved UYVY/JPEG"; break; + case V4L2_PIX_FMT_MT21C: descr = "Mediatek Compressed Format"; break; + case V4L2_PIX_FMT_QC08C: descr = "QCOM Compressed 8-bit Format"; break; + case V4L2_PIX_FMT_QC10C: descr = "QCOM Compressed 10-bit Format"; break; + case V4L2_PIX_FMT_AJPG: descr = "Aspeed JPEG"; break; + case V4L2_PIX_FMT_AV1_FRAME: descr = "AV1 Frame"; break; + case V4L2_PIX_FMT_MT2110T: descr = "Mediatek 10bit Tile Mode"; break; + case V4L2_PIX_FMT_MT2110R: descr = "Mediatek 10bit Raster Mode"; break; + case V4L2_PIX_FMT_HEXTILE: descr = "Hextile Compressed Format"; break; + case V4L2_PIX_FMT_PISP_COMP1_RGGB: descr = "PiSP 8b RGRG/GBGB mode1 compr"; break; + case V4L2_PIX_FMT_PISP_COMP1_GRBG: descr = "PiSP 8b GRGR/BGBG mode1 compr"; break; + case V4L2_PIX_FMT_PISP_COMP1_GBRG: descr = "PiSP 8b GBGB/RGRG mode1 compr"; break; + case V4L2_PIX_FMT_PISP_COMP1_BGGR: descr = "PiSP 8b BGBG/GRGR mode1 compr"; break; + case V4L2_PIX_FMT_PISP_COMP1_MONO: descr = "PiSP 8b monochrome mode1 compr"; break; + case V4L2_PIX_FMT_PISP_COMP2_RGGB: descr = "PiSP 8b RGRG/GBGB mode2 compr"; break; + case V4L2_PIX_FMT_PISP_COMP2_GRBG: descr = "PiSP 8b GRGR/BGBG mode2 compr"; break; + case V4L2_PIX_FMT_PISP_COMP2_GBRG: descr = "PiSP 8b GBGB/RGRG mode2 compr"; break; + case V4L2_PIX_FMT_PISP_COMP2_BGGR: descr = "PiSP 8b BGBG/GRGR mode2 compr"; break; + case V4L2_PIX_FMT_PISP_COMP2_MONO: descr = "PiSP 8b monochrome mode2 compr"; break; + default: + if (fmt->description[0]) + return; + WARN(1, "Unknown pixelformat 0x%08x\n", fmt->pixelformat); + flags = 0; + snprintf(fmt->description, sz, "%p4cc", + &fmt->pixelformat); + break; + } + } + + if (fmt->type == V4L2_BUF_TYPE_META_CAPTURE) { + switch (fmt->pixelformat) { + case V4L2_META_FMT_GENERIC_8: + case V4L2_META_FMT_GENERIC_CSI2_10: + case V4L2_META_FMT_GENERIC_CSI2_12: + case V4L2_META_FMT_GENERIC_CSI2_14: + case V4L2_META_FMT_GENERIC_CSI2_16: + case V4L2_META_FMT_GENERIC_CSI2_20: + case V4L2_META_FMT_GENERIC_CSI2_24: + fmt->flags |= V4L2_FMT_FLAG_META_LINE_BASED; + break; + default: + fmt->flags &= ~V4L2_FMT_FLAG_META_LINE_BASED; + } + } + + if (descr) + WARN_ON(strscpy(fmt->description, descr, sz) < 0); + fmt->flags |= flags; +} + +static int v4l_enum_fmt(const struct v4l2_ioctl_ops *ops, struct file *file, + void *arg) +{ + struct video_device *vdev = video_devdata(file); + struct v4l2_fmtdesc *p = arg; + int ret = check_fmt(file, p->type); + u32 mbus_code; + u32 cap_mask; + + if (ret) + return ret; + ret = -EINVAL; + + if (!(vdev->device_caps & V4L2_CAP_IO_MC)) + p->mbus_code = 0; + + mbus_code = p->mbus_code; + memset_after(p, 0, type); + p->mbus_code = mbus_code; + + switch (p->type) { + case V4L2_BUF_TYPE_VIDEO_CAPTURE: + case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: + cap_mask = V4L2_CAP_VIDEO_CAPTURE_MPLANE | + V4L2_CAP_VIDEO_M2M_MPLANE; + if (!!(vdev->device_caps & cap_mask) != + (p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)) + break; + + if (unlikely(!ops->vidioc_enum_fmt_vid_cap)) + break; + ret = ops->vidioc_enum_fmt_vid_cap(file, NULL, arg); + break; + case V4L2_BUF_TYPE_VIDEO_OVERLAY: + if (unlikely(!ops->vidioc_enum_fmt_vid_overlay)) + break; + ret = ops->vidioc_enum_fmt_vid_overlay(file, NULL, arg); + break; + case V4L2_BUF_TYPE_VIDEO_OUTPUT: + case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: + cap_mask = V4L2_CAP_VIDEO_OUTPUT_MPLANE | + V4L2_CAP_VIDEO_M2M_MPLANE; + if (!!(vdev->device_caps & cap_mask) != + (p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)) + break; + + if (unlikely(!ops->vidioc_enum_fmt_vid_out)) + break; + ret = ops->vidioc_enum_fmt_vid_out(file, NULL, arg); + break; + case V4L2_BUF_TYPE_SDR_CAPTURE: + if (unlikely(!ops->vidioc_enum_fmt_sdr_cap)) + break; + ret = ops->vidioc_enum_fmt_sdr_cap(file, NULL, arg); + break; + case V4L2_BUF_TYPE_SDR_OUTPUT: + if (unlikely(!ops->vidioc_enum_fmt_sdr_out)) + break; + ret = ops->vidioc_enum_fmt_sdr_out(file, NULL, arg); + break; + case V4L2_BUF_TYPE_META_CAPTURE: + if (unlikely(!ops->vidioc_enum_fmt_meta_cap)) + break; + ret = ops->vidioc_enum_fmt_meta_cap(file, NULL, arg); + break; + case V4L2_BUF_TYPE_META_OUTPUT: + if (unlikely(!ops->vidioc_enum_fmt_meta_out)) + break; + ret = ops->vidioc_enum_fmt_meta_out(file, NULL, arg); + break; + } + if (ret == 0) + v4l_fill_fmtdesc(p); + return ret; +} + +static void v4l_pix_format_touch(struct v4l2_pix_format *p) +{ + /* + * The v4l2_pix_format structure contains fields that make no sense for + * touch. Set them to default values in this case. + */ + + p->field = V4L2_FIELD_NONE; + p->colorspace = V4L2_COLORSPACE_RAW; + p->flags = 0; + p->ycbcr_enc = 0; + p->quantization = 0; + p->xfer_func = 0; +} + +static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops, struct file *file, + void *arg) +{ + struct v4l2_format *p = arg; + struct video_device *vfd = video_devdata(file); + int ret = check_fmt(file, p->type); + + if (ret) + return ret; + + memset(&p->fmt, 0, sizeof(p->fmt)); + + switch (p->type) { + case V4L2_BUF_TYPE_VIDEO_CAPTURE: + if (unlikely(!ops->vidioc_g_fmt_vid_cap)) + break; + p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; + ret = ops->vidioc_g_fmt_vid_cap(file, NULL, arg); + /* just in case the driver zeroed it again */ + p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; + if (vfd->vfl_type == VFL_TYPE_TOUCH) + v4l_pix_format_touch(&p->fmt.pix); + return ret; + case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: + return ops->vidioc_g_fmt_vid_cap_mplane(file, NULL, arg); + case V4L2_BUF_TYPE_VIDEO_OVERLAY: + return ops->vidioc_g_fmt_vid_overlay(file, NULL, arg); + case V4L2_BUF_TYPE_VBI_CAPTURE: + return ops->vidioc_g_fmt_vbi_cap(file, NULL, arg); + case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: + return ops->vidioc_g_fmt_sliced_vbi_cap(file, NULL, arg); + case V4L2_BUF_TYPE_VIDEO_OUTPUT: + if (unlikely(!ops->vidioc_g_fmt_vid_out)) + break; + p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; + ret = ops->vidioc_g_fmt_vid_out(file, NULL, arg); + /* just in case the driver zeroed it again */ + p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; + return ret; + case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: + return ops->vidioc_g_fmt_vid_out_mplane(file, NULL, arg); + case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: + return ops->vidioc_g_fmt_vid_out_overlay(file, NULL, arg); + case V4L2_BUF_TYPE_VBI_OUTPUT: + return ops->vidioc_g_fmt_vbi_out(file, NULL, arg); + case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: + return ops->vidioc_g_fmt_sliced_vbi_out(file, NULL, arg); + case V4L2_BUF_TYPE_SDR_CAPTURE: + return ops->vidioc_g_fmt_sdr_cap(file, NULL, arg); + case V4L2_BUF_TYPE_SDR_OUTPUT: + return ops->vidioc_g_fmt_sdr_out(file, NULL, arg); + case V4L2_BUF_TYPE_META_CAPTURE: + return ops->vidioc_g_fmt_meta_cap(file, NULL, arg); + case V4L2_BUF_TYPE_META_OUTPUT: + return ops->vidioc_g_fmt_meta_out(file, NULL, arg); + } + return -EINVAL; +} + +static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops, struct file *file, + void *arg) +{ + struct v4l2_format *p = arg; + struct video_device *vfd = video_devdata(file); + int ret = check_fmt(file, p->type); + unsigned int i; + + if (ret) + return ret; + + ret = v4l_enable_media_source(vfd); + if (ret) + return ret; + v4l_sanitize_format(p); + + switch (p->type) { + case V4L2_BUF_TYPE_VIDEO_CAPTURE: + if (unlikely(!ops->vidioc_s_fmt_vid_cap)) + break; + memset_after(p, 0, fmt.pix); + ret = ops->vidioc_s_fmt_vid_cap(file, NULL, arg); + /* just in case the driver zeroed it again */ + p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; + if (vfd->vfl_type == VFL_TYPE_TOUCH) + v4l_pix_format_touch(&p->fmt.pix); + return ret; + case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: + if (unlikely(!ops->vidioc_s_fmt_vid_cap_mplane)) + break; + memset_after(p, 0, fmt.pix_mp.xfer_func); + for (i = 0; i < p->fmt.pix_mp.num_planes; i++) + memset_after(&p->fmt.pix_mp.plane_fmt[i], + 0, bytesperline); + return ops->vidioc_s_fmt_vid_cap_mplane(file, NULL, arg); + case V4L2_BUF_TYPE_VIDEO_OVERLAY: + if (unlikely(!ops->vidioc_s_fmt_vid_overlay)) + break; + memset_after(p, 0, fmt.win); + p->fmt.win.clips = NULL; + p->fmt.win.clipcount = 0; + p->fmt.win.bitmap = NULL; + return ops->vidioc_s_fmt_vid_overlay(file, NULL, arg); + case V4L2_BUF_TYPE_VBI_CAPTURE: + if (unlikely(!ops->vidioc_s_fmt_vbi_cap)) + break; + memset_after(p, 0, fmt.vbi.flags); + return ops->vidioc_s_fmt_vbi_cap(file, NULL, arg); + case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: + if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_cap)) + break; + memset_after(p, 0, fmt.sliced.io_size); + return ops->vidioc_s_fmt_sliced_vbi_cap(file, NULL, arg); + case V4L2_BUF_TYPE_VIDEO_OUTPUT: + if (unlikely(!ops->vidioc_s_fmt_vid_out)) + break; + memset_after(p, 0, fmt.pix); + ret = ops->vidioc_s_fmt_vid_out(file, NULL, arg); + /* just in case the driver zeroed it again */ + p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; + return ret; + case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: + if (unlikely(!ops->vidioc_s_fmt_vid_out_mplane)) + break; + memset_after(p, 0, fmt.pix_mp.xfer_func); + for (i = 0; i < p->fmt.pix_mp.num_planes; i++) + memset_after(&p->fmt.pix_mp.plane_fmt[i], + 0, bytesperline); + return ops->vidioc_s_fmt_vid_out_mplane(file, NULL, arg); + case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: + if (unlikely(!ops->vidioc_s_fmt_vid_out_overlay)) + break; + memset_after(p, 0, fmt.win); + p->fmt.win.clips = NULL; + p->fmt.win.clipcount = 0; + p->fmt.win.bitmap = NULL; + return ops->vidioc_s_fmt_vid_out_overlay(file, NULL, arg); + case V4L2_BUF_TYPE_VBI_OUTPUT: + if (unlikely(!ops->vidioc_s_fmt_vbi_out)) + break; + memset_after(p, 0, fmt.vbi.flags); + return ops->vidioc_s_fmt_vbi_out(file, NULL, arg); + case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: + if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_out)) + break; + memset_after(p, 0, fmt.sliced.io_size); + return ops->vidioc_s_fmt_sliced_vbi_out(file, NULL, arg); + case V4L2_BUF_TYPE_SDR_CAPTURE: + if (unlikely(!ops->vidioc_s_fmt_sdr_cap)) + break; + memset_after(p, 0, fmt.sdr.buffersize); + return ops->vidioc_s_fmt_sdr_cap(file, NULL, arg); + case V4L2_BUF_TYPE_SDR_OUTPUT: + if (unlikely(!ops->vidioc_s_fmt_sdr_out)) + break; + memset_after(p, 0, fmt.sdr.buffersize); + return ops->vidioc_s_fmt_sdr_out(file, NULL, arg); + case V4L2_BUF_TYPE_META_CAPTURE: + if (unlikely(!ops->vidioc_s_fmt_meta_cap)) + break; + memset_after(p, 0, fmt.meta); + return ops->vidioc_s_fmt_meta_cap(file, NULL, arg); + case V4L2_BUF_TYPE_META_OUTPUT: + if (unlikely(!ops->vidioc_s_fmt_meta_out)) + break; + memset_after(p, 0, fmt.meta); + return ops->vidioc_s_fmt_meta_out(file, NULL, arg); + } + return -EINVAL; +} + +static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops, struct file *file, + void *arg) +{ + struct v4l2_format *p = arg; + struct video_device *vfd = video_devdata(file); + int ret = check_fmt(file, p->type); + unsigned int i; + + if (ret) + return ret; + + v4l_sanitize_format(p); + + switch (p->type) { + case V4L2_BUF_TYPE_VIDEO_CAPTURE: + if (unlikely(!ops->vidioc_try_fmt_vid_cap)) + break; + memset_after(p, 0, fmt.pix); + ret = ops->vidioc_try_fmt_vid_cap(file, NULL, arg); + /* just in case the driver zeroed it again */ + p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; + if (vfd->vfl_type == VFL_TYPE_TOUCH) + v4l_pix_format_touch(&p->fmt.pix); + return ret; + case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: + if (unlikely(!ops->vidioc_try_fmt_vid_cap_mplane)) + break; + memset_after(p, 0, fmt.pix_mp.xfer_func); + for (i = 0; i < p->fmt.pix_mp.num_planes; i++) + memset_after(&p->fmt.pix_mp.plane_fmt[i], + 0, bytesperline); + return ops->vidioc_try_fmt_vid_cap_mplane(file, NULL, arg); + case V4L2_BUF_TYPE_VIDEO_OVERLAY: + if (unlikely(!ops->vidioc_try_fmt_vid_overlay)) + break; + memset_after(p, 0, fmt.win); + p->fmt.win.clips = NULL; + p->fmt.win.clipcount = 0; + p->fmt.win.bitmap = NULL; + return ops->vidioc_try_fmt_vid_overlay(file, NULL, arg); + case V4L2_BUF_TYPE_VBI_CAPTURE: + if (unlikely(!ops->vidioc_try_fmt_vbi_cap)) + break; + memset_after(p, 0, fmt.vbi.flags); + return ops->vidioc_try_fmt_vbi_cap(file, NULL, arg); + case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: + if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_cap)) + break; + memset_after(p, 0, fmt.sliced.io_size); + return ops->vidioc_try_fmt_sliced_vbi_cap(file, NULL, arg); + case V4L2_BUF_TYPE_VIDEO_OUTPUT: + if (unlikely(!ops->vidioc_try_fmt_vid_out)) + break; + memset_after(p, 0, fmt.pix); + ret = ops->vidioc_try_fmt_vid_out(file, NULL, arg); + /* just in case the driver zeroed it again */ + p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; + return ret; + case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: + if (unlikely(!ops->vidioc_try_fmt_vid_out_mplane)) + break; + memset_after(p, 0, fmt.pix_mp.xfer_func); + for (i = 0; i < p->fmt.pix_mp.num_planes; i++) + memset_after(&p->fmt.pix_mp.plane_fmt[i], + 0, bytesperline); + return ops->vidioc_try_fmt_vid_out_mplane(file, NULL, arg); + case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: + if (unlikely(!ops->vidioc_try_fmt_vid_out_overlay)) + break; + memset_after(p, 0, fmt.win); + p->fmt.win.clips = NULL; + p->fmt.win.clipcount = 0; + p->fmt.win.bitmap = NULL; + return ops->vidioc_try_fmt_vid_out_overlay(file, NULL, arg); + case V4L2_BUF_TYPE_VBI_OUTPUT: + if (unlikely(!ops->vidioc_try_fmt_vbi_out)) + break; + memset_after(p, 0, fmt.vbi.flags); + return ops->vidioc_try_fmt_vbi_out(file, NULL, arg); + case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: + if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_out)) + break; + memset_after(p, 0, fmt.sliced.io_size); + return ops->vidioc_try_fmt_sliced_vbi_out(file, NULL, arg); + case V4L2_BUF_TYPE_SDR_CAPTURE: + if (unlikely(!ops->vidioc_try_fmt_sdr_cap)) + break; + memset_after(p, 0, fmt.sdr.buffersize); + return ops->vidioc_try_fmt_sdr_cap(file, NULL, arg); + case V4L2_BUF_TYPE_SDR_OUTPUT: + if (unlikely(!ops->vidioc_try_fmt_sdr_out)) + break; + memset_after(p, 0, fmt.sdr.buffersize); + return ops->vidioc_try_fmt_sdr_out(file, NULL, arg); + case V4L2_BUF_TYPE_META_CAPTURE: + if (unlikely(!ops->vidioc_try_fmt_meta_cap)) + break; + memset_after(p, 0, fmt.meta); + return ops->vidioc_try_fmt_meta_cap(file, NULL, arg); + case V4L2_BUF_TYPE_META_OUTPUT: + if (unlikely(!ops->vidioc_try_fmt_meta_out)) + break; + memset_after(p, 0, fmt.meta); + return ops->vidioc_try_fmt_meta_out(file, NULL, arg); + } + return -EINVAL; +} + +static int v4l_streamon(const struct v4l2_ioctl_ops *ops, struct file *file, + void *arg) +{ + return ops->vidioc_streamon(file, NULL, *(unsigned int *)arg); +} + +static int v4l_streamoff(const struct v4l2_ioctl_ops *ops, struct file *file, + void *arg) +{ + return ops->vidioc_streamoff(file, NULL, *(unsigned int *)arg); +} + +static int v4l_g_tuner(const struct v4l2_ioctl_ops *ops, struct file *file, + void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_tuner *p = arg; + int err; + + p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ? + V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV; + err = ops->vidioc_g_tuner(file, NULL, p); + if (!err) + p->capability |= V4L2_TUNER_CAP_FREQ_BANDS; + return err; +} + +static int v4l_s_tuner(const struct v4l2_ioctl_ops *ops, struct file *file, + void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_tuner *p = arg; + int ret; + + ret = v4l_enable_media_source(vfd); + if (ret) + return ret; + p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ? + V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV; + return ops->vidioc_s_tuner(file, NULL, p); +} + +static int v4l_g_modulator(const struct v4l2_ioctl_ops *ops, + struct file *file, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_modulator *p = arg; + int err; + + if (vfd->vfl_type == VFL_TYPE_RADIO) + p->type = V4L2_TUNER_RADIO; + + err = ops->vidioc_g_modulator(file, NULL, p); + if (!err) + p->capability |= V4L2_TUNER_CAP_FREQ_BANDS; + return err; +} + +static int v4l_s_modulator(const struct v4l2_ioctl_ops *ops, + struct file *file, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_modulator *p = arg; + + if (vfd->vfl_type == VFL_TYPE_RADIO) + p->type = V4L2_TUNER_RADIO; + + return ops->vidioc_s_modulator(file, NULL, p); +} + +static int v4l_g_frequency(const struct v4l2_ioctl_ops *ops, + struct file *file, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_frequency *p = arg; + + if (vfd->vfl_type == VFL_TYPE_SDR) + p->type = V4L2_TUNER_SDR; + else + p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ? + V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV; + return ops->vidioc_g_frequency(file, NULL, p); +} + +static int v4l_s_frequency(const struct v4l2_ioctl_ops *ops, + struct file *file, void *arg) +{ + struct video_device *vfd = video_devdata(file); + const struct v4l2_frequency *p = arg; + enum v4l2_tuner_type type; + int ret; + + ret = v4l_enable_media_source(vfd); + if (ret) + return ret; + if (vfd->vfl_type == VFL_TYPE_SDR) { + if (p->type != V4L2_TUNER_SDR && p->type != V4L2_TUNER_RF) + return -EINVAL; + } else { + type = (vfd->vfl_type == VFL_TYPE_RADIO) ? + V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV; + if (type != p->type) + return -EINVAL; + } + return ops->vidioc_s_frequency(file, NULL, p); +} + +static int v4l_enumstd(const struct v4l2_ioctl_ops *ops, struct file *file, + void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_standard *p = arg; + + return v4l_video_std_enumstd(p, vfd->tvnorms); +} + +static int v4l_s_std(const struct v4l2_ioctl_ops *ops, struct file *file, + void *arg) +{ + struct video_device *vfd = video_devdata(file); + v4l2_std_id id = *(v4l2_std_id *)arg, norm; + int ret; + + ret = v4l_enable_media_source(vfd); + if (ret) + return ret; + norm = id & vfd->tvnorms; + if (vfd->tvnorms && !norm) /* Check if std is supported */ + return -EINVAL; + + /* Calls the specific handler */ + return ops->vidioc_s_std(file, NULL, norm); +} + +static int v4l_querystd(const struct v4l2_ioctl_ops *ops, struct file *file, + void *arg) +{ + struct video_device *vfd = video_devdata(file); + v4l2_std_id *p = arg; + int ret; + + ret = v4l_enable_media_source(vfd); + if (ret) + return ret; + /* + * If no signal is detected, then the driver should return + * V4L2_STD_UNKNOWN. Otherwise it should return tvnorms with + * any standards that do not apply removed. + * + * This means that tuners, audio and video decoders can join + * their efforts to improve the standards detection. + */ + *p = vfd->tvnorms; + return ops->vidioc_querystd(file, NULL, arg); +} + +static int v4l_s_hw_freq_seek(const struct v4l2_ioctl_ops *ops, + struct file *file, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_hw_freq_seek *p = arg; + enum v4l2_tuner_type type; + int ret; + + ret = v4l_enable_media_source(vfd); + if (ret) + return ret; + /* s_hw_freq_seek is not supported for SDR for now */ + if (vfd->vfl_type == VFL_TYPE_SDR) + return -EINVAL; + + type = (vfd->vfl_type == VFL_TYPE_RADIO) ? + V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV; + if (p->type != type) + return -EINVAL; + return ops->vidioc_s_hw_freq_seek(file, NULL, p); +} + +static int v4l_s_fbuf(const struct v4l2_ioctl_ops *ops, struct file *file, + void *arg) +{ + struct v4l2_framebuffer *p = arg; + + p->base = NULL; + return ops->vidioc_s_fbuf(file, NULL, p); +} + +static int v4l_overlay(const struct v4l2_ioctl_ops *ops, struct file *file, + void *arg) +{ + return ops->vidioc_overlay(file, NULL, *(unsigned int *)arg); +} + +static int v4l_reqbufs(const struct v4l2_ioctl_ops *ops, struct file *file, + void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_requestbuffers *p = arg; + int ret = check_fmt(file, p->type); + + if (ret) + return ret; + + memset_after(p, 0, flags); + + p->capabilities = 0; + if (is_valid_ioctl(vfd, VIDIOC_REMOVE_BUFS)) + p->capabilities = V4L2_BUF_CAP_SUPPORTS_REMOVE_BUFS; + + return ops->vidioc_reqbufs(file, NULL, p); +} + +static int v4l_querybuf(const struct v4l2_ioctl_ops *ops, struct file *file, + void *arg) +{ + struct v4l2_buffer *p = arg; + int ret = check_fmt(file, p->type); + + return ret ? ret : ops->vidioc_querybuf(file, NULL, p); +} + +static int v4l_qbuf(const struct v4l2_ioctl_ops *ops, struct file *file, + void *arg) +{ + struct v4l2_buffer *p = arg; + int ret = check_fmt(file, p->type); + + return ret ? ret : ops->vidioc_qbuf(file, NULL, p); +} + +static int v4l_dqbuf(const struct v4l2_ioctl_ops *ops, struct file *file, + void *arg) +{ + struct v4l2_buffer *p = arg; + int ret = check_fmt(file, p->type); + + return ret ? ret : ops->vidioc_dqbuf(file, NULL, p); +} + +static int v4l_create_bufs(const struct v4l2_ioctl_ops *ops, + struct file *file, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_create_buffers *create = arg; + int ret = check_fmt(file, create->format.type); + + if (ret) + return ret; + + memset_after(create, 0, flags); + + v4l_sanitize_format(&create->format); + + create->capabilities = 0; + if (is_valid_ioctl(vfd, VIDIOC_REMOVE_BUFS)) + create->capabilities = V4L2_BUF_CAP_SUPPORTS_REMOVE_BUFS; + + ret = ops->vidioc_create_bufs(file, NULL, create); + + if (create->format.type == V4L2_BUF_TYPE_VIDEO_CAPTURE || + create->format.type == V4L2_BUF_TYPE_VIDEO_OUTPUT) + create->format.fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; + + return ret; +} + +static int v4l_prepare_buf(const struct v4l2_ioctl_ops *ops, + struct file *file, void *arg) +{ + struct v4l2_buffer *b = arg; + int ret = check_fmt(file, b->type); + + return ret ? ret : ops->vidioc_prepare_buf(file, NULL, b); +} + +static int v4l_remove_bufs(const struct v4l2_ioctl_ops *ops, + struct file *file, void *arg) +{ + struct v4l2_remove_buffers *remove = arg; + + if (ops->vidioc_remove_bufs) + return ops->vidioc_remove_bufs(file, NULL, remove); + + return -ENOTTY; +} + +static int v4l_g_parm(const struct v4l2_ioctl_ops *ops, struct file *file, + void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_streamparm *p = arg; + v4l2_std_id std; + int ret = check_fmt(file, p->type); + + if (ret) + return ret; + if (ops->vidioc_g_parm) + return ops->vidioc_g_parm(file, NULL, p); + if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE && + p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) + return -EINVAL; + if (vfd->device_caps & V4L2_CAP_READWRITE) + p->parm.capture.readbuffers = 2; + ret = ops->vidioc_g_std(file, NULL, &std); + if (ret == 0) + v4l2_video_std_frame_period(std, &p->parm.capture.timeperframe); + return ret; +} + +static int v4l_s_parm(const struct v4l2_ioctl_ops *ops, struct file *file, + void *arg) +{ + struct v4l2_streamparm *p = arg; + int ret = check_fmt(file, p->type); + + if (ret) + return ret; + + /* Note: extendedmode is never used in drivers */ + if (V4L2_TYPE_IS_OUTPUT(p->type)) { + memset(p->parm.output.reserved, 0, + sizeof(p->parm.output.reserved)); + p->parm.output.extendedmode = 0; + p->parm.output.outputmode &= V4L2_MODE_HIGHQUALITY; + } else { + memset(p->parm.capture.reserved, 0, + sizeof(p->parm.capture.reserved)); + p->parm.capture.extendedmode = 0; + p->parm.capture.capturemode &= V4L2_MODE_HIGHQUALITY; + } + return ops->vidioc_s_parm(file, NULL, p); +} + +static int v4l_queryctrl(const struct v4l2_ioctl_ops *ops, struct file *file, + void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_query_ext_ctrl qec = {}; + struct v4l2_queryctrl *p = arg; + struct v4l2_fh *vfh = file_to_v4l2_fh(file); + int ret; + + if (vfh && vfh->ctrl_handler) + return v4l2_queryctrl(vfh->ctrl_handler, p); + if (vfd->ctrl_handler) + return v4l2_queryctrl(vfd->ctrl_handler, p); + if (!ops->vidioc_query_ext_ctrl) + return -ENOTTY; + + /* Simulate query_ext_ctr using query_ctrl. */ + qec.id = p->id; + ret = ops->vidioc_query_ext_ctrl(file, NULL, &qec); + if (ret) + return ret; + v4l2_query_ext_ctrl_to_v4l2_queryctrl(p, &qec); + return ret; +} + +static int v4l_query_ext_ctrl(const struct v4l2_ioctl_ops *ops, + struct file *file, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_query_ext_ctrl *p = arg; + struct v4l2_fh *vfh = file_to_v4l2_fh(file); + + if (vfh && vfh->ctrl_handler) + return v4l2_query_ext_ctrl(vfh->ctrl_handler, p); + if (vfd->ctrl_handler) + return v4l2_query_ext_ctrl(vfd->ctrl_handler, p); + if (ops->vidioc_query_ext_ctrl) + return ops->vidioc_query_ext_ctrl(file, NULL, p); + return -ENOTTY; +} + +static int v4l_querymenu(const struct v4l2_ioctl_ops *ops, struct file *file, + void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_querymenu *p = arg; + struct v4l2_fh *vfh = file_to_v4l2_fh(file); + + if (vfh && vfh->ctrl_handler) + return v4l2_querymenu(vfh->ctrl_handler, p); + if (vfd->ctrl_handler) + return v4l2_querymenu(vfd->ctrl_handler, p); + if (ops->vidioc_querymenu) + return ops->vidioc_querymenu(file, NULL, p); + return -ENOTTY; +} + +static int v4l_g_ctrl(const struct v4l2_ioctl_ops *ops, struct file *file, + void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_control *p = arg; + struct v4l2_fh *vfh = file_to_v4l2_fh(file); + struct v4l2_ext_controls ctrls; + struct v4l2_ext_control ctrl; + + if (vfh && vfh->ctrl_handler) + return v4l2_g_ctrl(vfh->ctrl_handler, p); + if (vfd->ctrl_handler) + return v4l2_g_ctrl(vfd->ctrl_handler, p); + if (ops->vidioc_g_ext_ctrls == NULL) + return -ENOTTY; + + ctrls.which = V4L2_CTRL_ID2WHICH(p->id); + ctrls.count = 1; + ctrls.controls = &ctrl; + ctrl.id = p->id; + ctrl.value = p->value; + if (check_ext_ctrls(&ctrls, VIDIOC_G_CTRL)) { + int ret = ops->vidioc_g_ext_ctrls(file, NULL, &ctrls); + + if (ret == 0) + p->value = ctrl.value; + return ret; + } + return -EINVAL; +} + +static int v4l_s_ctrl(const struct v4l2_ioctl_ops *ops, struct file *file, + void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_control *p = arg; + struct v4l2_fh *vfh = file_to_v4l2_fh(file); + struct v4l2_ext_controls ctrls; + struct v4l2_ext_control ctrl; + int ret; + + if (vfh && vfh->ctrl_handler) + return v4l2_s_ctrl(vfh, vfh->ctrl_handler, p); + if (vfd->ctrl_handler) + return v4l2_s_ctrl(NULL, vfd->ctrl_handler, p); + if (ops->vidioc_s_ext_ctrls == NULL) + return -ENOTTY; + + ctrls.which = V4L2_CTRL_ID2WHICH(p->id); + ctrls.count = 1; + ctrls.controls = &ctrl; + ctrl.id = p->id; + ctrl.value = p->value; + if (!check_ext_ctrls(&ctrls, VIDIOC_S_CTRL)) + return -EINVAL; + ret = ops->vidioc_s_ext_ctrls(file, NULL, &ctrls); + p->value = ctrl.value; + return ret; +} + +static int v4l_g_ext_ctrls(const struct v4l2_ioctl_ops *ops, + struct file *file, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_ext_controls *p = arg; + struct v4l2_fh *vfh = file_to_v4l2_fh(file); + + p->error_idx = p->count; + if (vfh && vfh->ctrl_handler) + return v4l2_g_ext_ctrls(vfh->ctrl_handler, + vfd, vfd->v4l2_dev->mdev, p); + if (vfd->ctrl_handler) + return v4l2_g_ext_ctrls(vfd->ctrl_handler, + vfd, vfd->v4l2_dev->mdev, p); + if (ops->vidioc_g_ext_ctrls == NULL) + return -ENOTTY; + return check_ext_ctrls(p, VIDIOC_G_EXT_CTRLS) ? + ops->vidioc_g_ext_ctrls(file, NULL, p) : -EINVAL; +} + +static int v4l_s_ext_ctrls(const struct v4l2_ioctl_ops *ops, + struct file *file, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_ext_controls *p = arg; + struct v4l2_fh *vfh = file_to_v4l2_fh(file); + + p->error_idx = p->count; + if (vfh && vfh->ctrl_handler) + return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler, + vfd, vfd->v4l2_dev->mdev, p); + if (vfd->ctrl_handler) + return v4l2_s_ext_ctrls(NULL, vfd->ctrl_handler, + vfd, vfd->v4l2_dev->mdev, p); + if (ops->vidioc_s_ext_ctrls == NULL) + return -ENOTTY; + return check_ext_ctrls(p, VIDIOC_S_EXT_CTRLS) ? + ops->vidioc_s_ext_ctrls(file, NULL, p) : -EINVAL; +} + +static int v4l_try_ext_ctrls(const struct v4l2_ioctl_ops *ops, + struct file *file, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_ext_controls *p = arg; + struct v4l2_fh *vfh = file_to_v4l2_fh(file); + + p->error_idx = p->count; + if (vfh && vfh->ctrl_handler) + return v4l2_try_ext_ctrls(vfh->ctrl_handler, + vfd, vfd->v4l2_dev->mdev, p); + if (vfd->ctrl_handler) + return v4l2_try_ext_ctrls(vfd->ctrl_handler, + vfd, vfd->v4l2_dev->mdev, p); + if (ops->vidioc_try_ext_ctrls == NULL) + return -ENOTTY; + return check_ext_ctrls(p, VIDIOC_TRY_EXT_CTRLS) ? + ops->vidioc_try_ext_ctrls(file, NULL, p) : -EINVAL; +} + +/* + * The selection API specified originally that the _MPLANE buffer types + * shouldn't be used. The reasons for this are lost in the mists of time + * (or just really crappy memories). Regardless, this is really annoying + * for userspace. So to keep things simple we map _MPLANE buffer types + * to their 'regular' counterparts before calling the driver. And we + * restore it afterwards. This way applications can use either buffer + * type and drivers don't need to check for both. + */ +static int v4l_g_selection(const struct v4l2_ioctl_ops *ops, + struct file *file, void *arg) +{ + struct v4l2_selection *p = arg; + u32 old_type = p->type; + int ret; + + if (p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) + p->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + else if (p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + p->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; + ret = ops->vidioc_g_selection(file, NULL, p); + p->type = old_type; + return ret; +} + +static int v4l_s_selection(const struct v4l2_ioctl_ops *ops, + struct file *file, void *arg) +{ + struct v4l2_selection *p = arg; + u32 old_type = p->type; + int ret; + + if (p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) + p->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + else if (p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + p->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; + ret = ops->vidioc_s_selection(file, NULL, p); + p->type = old_type; + return ret; +} + +static int v4l_g_crop(const struct v4l2_ioctl_ops *ops, struct file *file, + void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_crop *p = arg; + struct v4l2_selection s = { + .type = p->type, + }; + int ret; + + /* simulate capture crop using selection api */ + + /* crop means compose for output devices */ + if (V4L2_TYPE_IS_OUTPUT(p->type)) + s.target = V4L2_SEL_TGT_COMPOSE; + else + s.target = V4L2_SEL_TGT_CROP; + + if (test_bit(V4L2_FL_QUIRK_INVERTED_CROP, &vfd->flags)) + s.target = s.target == V4L2_SEL_TGT_COMPOSE ? + V4L2_SEL_TGT_CROP : V4L2_SEL_TGT_COMPOSE; + + ret = v4l_g_selection(ops, file, &s); + + /* copying results to old structure on success */ + if (!ret) + p->c = s.r; + return ret; +} + +static int v4l_s_crop(const struct v4l2_ioctl_ops *ops, struct file *file, + void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_crop *p = arg; + struct v4l2_selection s = { + .type = p->type, + .r = p->c, + }; + + /* simulate capture crop using selection api */ + + /* crop means compose for output devices */ + if (V4L2_TYPE_IS_OUTPUT(p->type)) + s.target = V4L2_SEL_TGT_COMPOSE; + else + s.target = V4L2_SEL_TGT_CROP; + + if (test_bit(V4L2_FL_QUIRK_INVERTED_CROP, &vfd->flags)) + s.target = s.target == V4L2_SEL_TGT_COMPOSE ? + V4L2_SEL_TGT_CROP : V4L2_SEL_TGT_COMPOSE; + + return v4l_s_selection(ops, file, &s); +} + +static int v4l_cropcap(const struct v4l2_ioctl_ops *ops, struct file *file, + void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_cropcap *p = arg; + struct v4l2_selection s = { .type = p->type }; + int ret = 0; + + /* setting trivial pixelaspect */ + p->pixelaspect.numerator = 1; + p->pixelaspect.denominator = 1; + + if (s.type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) + s.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + else if (s.type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + s.type = V4L2_BUF_TYPE_VIDEO_OUTPUT; + + /* + * The determine_valid_ioctls() call already should ensure + * that this can never happen, but just in case... + */ + if (WARN_ON(!ops->vidioc_g_selection)) + return -ENOTTY; + + if (ops->vidioc_g_pixelaspect) + ret = ops->vidioc_g_pixelaspect(file, NULL, s.type, + &p->pixelaspect); + + /* + * Ignore ENOTTY or ENOIOCTLCMD error returns, just use the + * square pixel aspect ratio in that case. + */ + if (ret && ret != -ENOTTY && ret != -ENOIOCTLCMD) + return ret; + + /* Use g_selection() to fill in the bounds and defrect rectangles */ + + /* obtaining bounds */ + if (V4L2_TYPE_IS_OUTPUT(p->type)) + s.target = V4L2_SEL_TGT_COMPOSE_BOUNDS; + else + s.target = V4L2_SEL_TGT_CROP_BOUNDS; + + if (test_bit(V4L2_FL_QUIRK_INVERTED_CROP, &vfd->flags)) + s.target = s.target == V4L2_SEL_TGT_COMPOSE_BOUNDS ? + V4L2_SEL_TGT_CROP_BOUNDS : V4L2_SEL_TGT_COMPOSE_BOUNDS; + + ret = v4l_g_selection(ops, file, &s); + if (ret) + return ret; + p->bounds = s.r; + + /* obtaining defrect */ + if (s.target == V4L2_SEL_TGT_COMPOSE_BOUNDS) + s.target = V4L2_SEL_TGT_COMPOSE_DEFAULT; + else + s.target = V4L2_SEL_TGT_CROP_DEFAULT; + + ret = v4l_g_selection(ops, file, &s); + if (ret) + return ret; + p->defrect = s.r; + + return 0; +} + +static int v4l_log_status(const struct v4l2_ioctl_ops *ops, struct file *file, + void *arg) +{ + struct video_device *vfd = video_devdata(file); + int ret; + + if (vfd->v4l2_dev) + pr_info("%s: ================= START STATUS =================\n", + vfd->v4l2_dev->name); + ret = ops->vidioc_log_status(file, NULL); + if (vfd->v4l2_dev) + pr_info("%s: ================== END STATUS ==================\n", + vfd->v4l2_dev->name); + return ret; +} + +static int v4l_dbg_g_register(const struct v4l2_ioctl_ops *ops, + struct file *file, void *arg) +{ +#ifdef CONFIG_VIDEO_ADV_DEBUG + struct v4l2_dbg_register *p = arg; + struct video_device *vfd = video_devdata(file); + struct v4l2_subdev *sd; + int idx = 0; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if (p->match.type == V4L2_CHIP_MATCH_SUBDEV) { + if (vfd->v4l2_dev == NULL) + return -EINVAL; + v4l2_device_for_each_subdev(sd, vfd->v4l2_dev) + if (p->match.addr == idx++) + return v4l2_subdev_call(sd, core, g_register, p); + return -EINVAL; + } + if (ops->vidioc_g_register && p->match.type == V4L2_CHIP_MATCH_BRIDGE && + (ops->vidioc_g_chip_info || p->match.addr == 0)) + return ops->vidioc_g_register(file, NULL, p); + return -EINVAL; +#else + return -ENOTTY; +#endif +} + +static int v4l_dbg_s_register(const struct v4l2_ioctl_ops *ops, + struct file *file, void *arg) +{ +#ifdef CONFIG_VIDEO_ADV_DEBUG + const struct v4l2_dbg_register *p = arg; + struct video_device *vfd = video_devdata(file); + struct v4l2_subdev *sd; + int idx = 0; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if (p->match.type == V4L2_CHIP_MATCH_SUBDEV) { + if (vfd->v4l2_dev == NULL) + return -EINVAL; + v4l2_device_for_each_subdev(sd, vfd->v4l2_dev) + if (p->match.addr == idx++) + return v4l2_subdev_call(sd, core, s_register, p); + return -EINVAL; + } + if (ops->vidioc_s_register && p->match.type == V4L2_CHIP_MATCH_BRIDGE && + (ops->vidioc_g_chip_info || p->match.addr == 0)) + return ops->vidioc_s_register(file, NULL, p); + return -EINVAL; +#else + return -ENOTTY; +#endif +} + +static int v4l_dbg_g_chip_info(const struct v4l2_ioctl_ops *ops, + struct file *file, void *arg) +{ +#ifdef CONFIG_VIDEO_ADV_DEBUG + struct video_device *vfd = video_devdata(file); + struct v4l2_dbg_chip_info *p = arg; + struct v4l2_subdev *sd; + int idx = 0; + + switch (p->match.type) { + case V4L2_CHIP_MATCH_BRIDGE: + if (ops->vidioc_s_register) + p->flags |= V4L2_CHIP_FL_WRITABLE; + if (ops->vidioc_g_register) + p->flags |= V4L2_CHIP_FL_READABLE; + strscpy(p->name, vfd->v4l2_dev->name, sizeof(p->name)); + if (ops->vidioc_g_chip_info) + return ops->vidioc_g_chip_info(file, NULL, arg); + if (p->match.addr) + return -EINVAL; + return 0; + + case V4L2_CHIP_MATCH_SUBDEV: + if (vfd->v4l2_dev == NULL) + break; + v4l2_device_for_each_subdev(sd, vfd->v4l2_dev) { + if (p->match.addr != idx++) + continue; + if (sd->ops->core && sd->ops->core->s_register) + p->flags |= V4L2_CHIP_FL_WRITABLE; + if (sd->ops->core && sd->ops->core->g_register) + p->flags |= V4L2_CHIP_FL_READABLE; + strscpy(p->name, sd->name, sizeof(p->name)); + return 0; + } + break; + } + return -EINVAL; +#else + return -ENOTTY; +#endif +} + +static int v4l_dqevent(const struct v4l2_ioctl_ops *ops, struct file *file, + void *arg) +{ + struct v4l2_fh *vfh = file_to_v4l2_fh(file); + + return v4l2_event_dequeue(vfh, arg, file->f_flags & O_NONBLOCK); +} + +static int v4l_subscribe_event(const struct v4l2_ioctl_ops *ops, + struct file *file, void *arg) +{ + struct v4l2_fh *vfh = file_to_v4l2_fh(file); + + return ops->vidioc_subscribe_event(vfh, arg); +} + +static int v4l_unsubscribe_event(const struct v4l2_ioctl_ops *ops, + struct file *file, void *arg) +{ + struct v4l2_fh *vfh = file_to_v4l2_fh(file); + + return ops->vidioc_unsubscribe_event(vfh, arg); +} + +static int v4l_g_sliced_vbi_cap(const struct v4l2_ioctl_ops *ops, + struct file *file, void *arg) +{ + struct v4l2_sliced_vbi_cap *p = arg; + int ret = check_fmt(file, p->type); + + if (ret) + return ret; + + /* Clear up to type, everything after type is zeroed already */ + memset(p, 0, offsetof(struct v4l2_sliced_vbi_cap, type)); + + return ops->vidioc_g_sliced_vbi_cap(file, NULL, p); +} + +static int v4l_enum_freq_bands(const struct v4l2_ioctl_ops *ops, + struct file *file, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct v4l2_frequency_band *p = arg; + enum v4l2_tuner_type type; + int err; + + if (vfd->vfl_type == VFL_TYPE_SDR) { + if (p->type != V4L2_TUNER_SDR && p->type != V4L2_TUNER_RF) + return -EINVAL; + type = p->type; + } else { + type = (vfd->vfl_type == VFL_TYPE_RADIO) ? + V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV; + if (type != p->type) + return -EINVAL; + } + if (ops->vidioc_enum_freq_bands) { + err = ops->vidioc_enum_freq_bands(file, NULL, p); + if (err != -ENOTTY) + return err; + } + if (is_valid_ioctl(vfd, VIDIOC_G_TUNER)) { + struct v4l2_tuner t = { + .index = p->tuner, + .type = type, + }; + + if (p->index) + return -EINVAL; + err = ops->vidioc_g_tuner(file, NULL, &t); + if (err) + return err; + p->capability = t.capability | V4L2_TUNER_CAP_FREQ_BANDS; + p->rangelow = t.rangelow; + p->rangehigh = t.rangehigh; + p->modulation = (type == V4L2_TUNER_RADIO) ? + V4L2_BAND_MODULATION_FM : V4L2_BAND_MODULATION_VSB; + return 0; + } + if (is_valid_ioctl(vfd, VIDIOC_G_MODULATOR)) { + struct v4l2_modulator m = { + .index = p->tuner, + }; + + if (type != V4L2_TUNER_RADIO) + return -EINVAL; + if (p->index) + return -EINVAL; + err = ops->vidioc_g_modulator(file, NULL, &m); + if (err) + return err; + p->capability = m.capability | V4L2_TUNER_CAP_FREQ_BANDS; + p->rangelow = m.rangelow; + p->rangehigh = m.rangehigh; + p->modulation = V4L2_BAND_MODULATION_FM; + return 0; + } + return -ENOTTY; +} + +struct v4l2_ioctl_info { + unsigned int ioctl; + u32 flags; + const char * const name; + int (*func)(const struct v4l2_ioctl_ops *ops, struct file *file, + void *p); + void (*debug)(const void *arg, bool write_only); +}; + +/* This control needs a priority check */ +#define INFO_FL_PRIO (1 << 0) +/* This control can be valid if the filehandle passes a control handler. */ +#define INFO_FL_CTRL (1 << 1) +/* Queuing ioctl */ +#define INFO_FL_QUEUE (1 << 2) +/* Always copy back result, even on error */ +#define INFO_FL_ALWAYS_COPY (1 << 3) +/* Zero struct from after the field to the end */ +#define INFO_FL_CLEAR(v4l2_struct, field) \ + ((offsetof(struct v4l2_struct, field) + \ + sizeof_field(struct v4l2_struct, field)) << 16) +#define INFO_FL_CLEAR_MASK (_IOC_SIZEMASK << 16) + +#define DEFINE_V4L_STUB_FUNC(_vidioc) \ + static int v4l_stub_ ## _vidioc( \ + const struct v4l2_ioctl_ops *ops, \ + struct file *file, void *p) \ + { \ + return ops->vidioc_ ## _vidioc(file, NULL, p); \ + } + +#define IOCTL_INFO(_ioctl, _func, _debug, _flags) \ + [_IOC_NR(_ioctl)] = { \ + .ioctl = _ioctl, \ + .flags = _flags, \ + .name = #_ioctl, \ + .func = _func, \ + .debug = _debug, \ + } + +DEFINE_V4L_STUB_FUNC(g_fbuf) +DEFINE_V4L_STUB_FUNC(expbuf) +DEFINE_V4L_STUB_FUNC(g_std) +DEFINE_V4L_STUB_FUNC(g_audio) +DEFINE_V4L_STUB_FUNC(s_audio) +DEFINE_V4L_STUB_FUNC(g_edid) +DEFINE_V4L_STUB_FUNC(s_edid) +DEFINE_V4L_STUB_FUNC(g_audout) +DEFINE_V4L_STUB_FUNC(s_audout) +DEFINE_V4L_STUB_FUNC(g_jpegcomp) +DEFINE_V4L_STUB_FUNC(s_jpegcomp) +DEFINE_V4L_STUB_FUNC(enumaudio) +DEFINE_V4L_STUB_FUNC(enumaudout) +DEFINE_V4L_STUB_FUNC(enum_framesizes) +DEFINE_V4L_STUB_FUNC(enum_frameintervals) +DEFINE_V4L_STUB_FUNC(g_enc_index) +DEFINE_V4L_STUB_FUNC(encoder_cmd) +DEFINE_V4L_STUB_FUNC(try_encoder_cmd) +DEFINE_V4L_STUB_FUNC(decoder_cmd) +DEFINE_V4L_STUB_FUNC(try_decoder_cmd) +DEFINE_V4L_STUB_FUNC(s_dv_timings) +DEFINE_V4L_STUB_FUNC(g_dv_timings) +DEFINE_V4L_STUB_FUNC(enum_dv_timings) +DEFINE_V4L_STUB_FUNC(query_dv_timings) +DEFINE_V4L_STUB_FUNC(dv_timings_cap) + +static const struct v4l2_ioctl_info v4l2_ioctls[] = { + IOCTL_INFO(VIDIOC_QUERYCAP, v4l_querycap, v4l_print_querycap, 0), + IOCTL_INFO(VIDIOC_ENUM_FMT, v4l_enum_fmt, v4l_print_fmtdesc, 0), + IOCTL_INFO(VIDIOC_G_FMT, v4l_g_fmt, v4l_print_format, 0), + IOCTL_INFO(VIDIOC_S_FMT, v4l_s_fmt, v4l_print_format, INFO_FL_PRIO), + IOCTL_INFO(VIDIOC_REQBUFS, v4l_reqbufs, v4l_print_requestbuffers, INFO_FL_PRIO | INFO_FL_QUEUE), + IOCTL_INFO(VIDIOC_QUERYBUF, v4l_querybuf, v4l_print_buffer, INFO_FL_QUEUE | INFO_FL_CLEAR(v4l2_buffer, length)), + IOCTL_INFO(VIDIOC_G_FBUF, v4l_stub_g_fbuf, v4l_print_framebuffer, 0), + IOCTL_INFO(VIDIOC_S_FBUF, v4l_s_fbuf, v4l_print_framebuffer, INFO_FL_PRIO), + IOCTL_INFO(VIDIOC_OVERLAY, v4l_overlay, v4l_print_u32, INFO_FL_PRIO), + IOCTL_INFO(VIDIOC_QBUF, v4l_qbuf, v4l_print_buffer, INFO_FL_QUEUE), + IOCTL_INFO(VIDIOC_EXPBUF, v4l_stub_expbuf, v4l_print_exportbuffer, INFO_FL_QUEUE | INFO_FL_CLEAR(v4l2_exportbuffer, flags)), + IOCTL_INFO(VIDIOC_DQBUF, v4l_dqbuf, v4l_print_buffer, INFO_FL_QUEUE), + IOCTL_INFO(VIDIOC_STREAMON, v4l_streamon, v4l_print_buftype, INFO_FL_PRIO | INFO_FL_QUEUE), + IOCTL_INFO(VIDIOC_STREAMOFF, v4l_streamoff, v4l_print_buftype, INFO_FL_PRIO | INFO_FL_QUEUE), + IOCTL_INFO(VIDIOC_G_PARM, v4l_g_parm, v4l_print_streamparm, INFO_FL_CLEAR(v4l2_streamparm, type)), + IOCTL_INFO(VIDIOC_S_PARM, v4l_s_parm, v4l_print_streamparm, INFO_FL_PRIO), + IOCTL_INFO(VIDIOC_G_STD, v4l_stub_g_std, v4l_print_std, 0), + IOCTL_INFO(VIDIOC_S_STD, v4l_s_std, v4l_print_std, INFO_FL_PRIO), + IOCTL_INFO(VIDIOC_ENUMSTD, v4l_enumstd, v4l_print_standard, INFO_FL_CLEAR(v4l2_standard, index)), + IOCTL_INFO(VIDIOC_ENUMINPUT, v4l_enuminput, v4l_print_enuminput, INFO_FL_CLEAR(v4l2_input, index)), + IOCTL_INFO(VIDIOC_G_CTRL, v4l_g_ctrl, v4l_print_control, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_control, id)), + IOCTL_INFO(VIDIOC_S_CTRL, v4l_s_ctrl, v4l_print_control, INFO_FL_PRIO | INFO_FL_CTRL), + IOCTL_INFO(VIDIOC_G_TUNER, v4l_g_tuner, v4l_print_tuner, INFO_FL_CLEAR(v4l2_tuner, index)), + IOCTL_INFO(VIDIOC_S_TUNER, v4l_s_tuner, v4l_print_tuner, INFO_FL_PRIO), + IOCTL_INFO(VIDIOC_G_AUDIO, v4l_stub_g_audio, v4l_print_audio, 0), + IOCTL_INFO(VIDIOC_S_AUDIO, v4l_stub_s_audio, v4l_print_audio, INFO_FL_PRIO), + IOCTL_INFO(VIDIOC_QUERYCTRL, v4l_queryctrl, v4l_print_queryctrl, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_queryctrl, id)), + IOCTL_INFO(VIDIOC_QUERYMENU, v4l_querymenu, v4l_print_querymenu, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_querymenu, index)), + IOCTL_INFO(VIDIOC_G_INPUT, v4l_g_input, v4l_print_u32, 0), + IOCTL_INFO(VIDIOC_S_INPUT, v4l_s_input, v4l_print_u32, INFO_FL_PRIO), + IOCTL_INFO(VIDIOC_G_EDID, v4l_stub_g_edid, v4l_print_edid, INFO_FL_ALWAYS_COPY), + IOCTL_INFO(VIDIOC_S_EDID, v4l_stub_s_edid, v4l_print_edid, INFO_FL_PRIO | INFO_FL_ALWAYS_COPY), + IOCTL_INFO(VIDIOC_G_OUTPUT, v4l_g_output, v4l_print_u32, 0), + IOCTL_INFO(VIDIOC_S_OUTPUT, v4l_s_output, v4l_print_u32, INFO_FL_PRIO), + IOCTL_INFO(VIDIOC_ENUMOUTPUT, v4l_enumoutput, v4l_print_enumoutput, INFO_FL_CLEAR(v4l2_output, index)), + IOCTL_INFO(VIDIOC_G_AUDOUT, v4l_stub_g_audout, v4l_print_audioout, 0), + IOCTL_INFO(VIDIOC_S_AUDOUT, v4l_stub_s_audout, v4l_print_audioout, INFO_FL_PRIO), + IOCTL_INFO(VIDIOC_G_MODULATOR, v4l_g_modulator, v4l_print_modulator, INFO_FL_CLEAR(v4l2_modulator, index)), + IOCTL_INFO(VIDIOC_S_MODULATOR, v4l_s_modulator, v4l_print_modulator, INFO_FL_PRIO), + IOCTL_INFO(VIDIOC_G_FREQUENCY, v4l_g_frequency, v4l_print_frequency, INFO_FL_CLEAR(v4l2_frequency, tuner)), + IOCTL_INFO(VIDIOC_S_FREQUENCY, v4l_s_frequency, v4l_print_frequency, INFO_FL_PRIO), + IOCTL_INFO(VIDIOC_CROPCAP, v4l_cropcap, v4l_print_cropcap, INFO_FL_CLEAR(v4l2_cropcap, type)), + IOCTL_INFO(VIDIOC_G_CROP, v4l_g_crop, v4l_print_crop, INFO_FL_CLEAR(v4l2_crop, type)), + IOCTL_INFO(VIDIOC_S_CROP, v4l_s_crop, v4l_print_crop, INFO_FL_PRIO), + IOCTL_INFO(VIDIOC_G_SELECTION, v4l_g_selection, v4l_print_selection, INFO_FL_CLEAR(v4l2_selection, r)), + IOCTL_INFO(VIDIOC_S_SELECTION, v4l_s_selection, v4l_print_selection, INFO_FL_PRIO | INFO_FL_CLEAR(v4l2_selection, r)), + IOCTL_INFO(VIDIOC_G_JPEGCOMP, v4l_stub_g_jpegcomp, v4l_print_jpegcompression, 0), + IOCTL_INFO(VIDIOC_S_JPEGCOMP, v4l_stub_s_jpegcomp, v4l_print_jpegcompression, INFO_FL_PRIO), + IOCTL_INFO(VIDIOC_QUERYSTD, v4l_querystd, v4l_print_std, 0), + IOCTL_INFO(VIDIOC_TRY_FMT, v4l_try_fmt, v4l_print_format, 0), + IOCTL_INFO(VIDIOC_ENUMAUDIO, v4l_stub_enumaudio, v4l_print_audio, INFO_FL_CLEAR(v4l2_audio, index)), + IOCTL_INFO(VIDIOC_ENUMAUDOUT, v4l_stub_enumaudout, v4l_print_audioout, INFO_FL_CLEAR(v4l2_audioout, index)), + IOCTL_INFO(VIDIOC_G_PRIORITY, v4l_g_priority, v4l_print_u32, 0), + IOCTL_INFO(VIDIOC_S_PRIORITY, v4l_s_priority, v4l_print_u32, INFO_FL_PRIO), + IOCTL_INFO(VIDIOC_G_SLICED_VBI_CAP, v4l_g_sliced_vbi_cap, v4l_print_sliced_vbi_cap, INFO_FL_CLEAR(v4l2_sliced_vbi_cap, type)), + IOCTL_INFO(VIDIOC_LOG_STATUS, v4l_log_status, v4l_print_newline, 0), + IOCTL_INFO(VIDIOC_G_EXT_CTRLS, v4l_g_ext_ctrls, v4l_print_ext_controls, INFO_FL_CTRL | INFO_FL_ALWAYS_COPY), + IOCTL_INFO(VIDIOC_S_EXT_CTRLS, v4l_s_ext_ctrls, v4l_print_ext_controls, INFO_FL_PRIO | INFO_FL_CTRL | INFO_FL_ALWAYS_COPY), + IOCTL_INFO(VIDIOC_TRY_EXT_CTRLS, v4l_try_ext_ctrls, v4l_print_ext_controls, INFO_FL_CTRL | INFO_FL_ALWAYS_COPY), + IOCTL_INFO(VIDIOC_ENUM_FRAMESIZES, v4l_stub_enum_framesizes, v4l_print_frmsizeenum, INFO_FL_CLEAR(v4l2_frmsizeenum, pixel_format)), + IOCTL_INFO(VIDIOC_ENUM_FRAMEINTERVALS, v4l_stub_enum_frameintervals, v4l_print_frmivalenum, INFO_FL_CLEAR(v4l2_frmivalenum, height)), + IOCTL_INFO(VIDIOC_G_ENC_INDEX, v4l_stub_g_enc_index, v4l_print_enc_idx, 0), + IOCTL_INFO(VIDIOC_ENCODER_CMD, v4l_stub_encoder_cmd, v4l_print_encoder_cmd, INFO_FL_PRIO | INFO_FL_CLEAR(v4l2_encoder_cmd, flags)), + IOCTL_INFO(VIDIOC_TRY_ENCODER_CMD, v4l_stub_try_encoder_cmd, v4l_print_encoder_cmd, INFO_FL_CLEAR(v4l2_encoder_cmd, flags)), + IOCTL_INFO(VIDIOC_DECODER_CMD, v4l_stub_decoder_cmd, v4l_print_decoder_cmd, INFO_FL_PRIO), + IOCTL_INFO(VIDIOC_TRY_DECODER_CMD, v4l_stub_try_decoder_cmd, v4l_print_decoder_cmd, 0), + IOCTL_INFO(VIDIOC_DBG_S_REGISTER, v4l_dbg_s_register, v4l_print_dbg_register, 0), + IOCTL_INFO(VIDIOC_DBG_G_REGISTER, v4l_dbg_g_register, v4l_print_dbg_register, 0), + IOCTL_INFO(VIDIOC_S_HW_FREQ_SEEK, v4l_s_hw_freq_seek, v4l_print_hw_freq_seek, INFO_FL_PRIO), + IOCTL_INFO(VIDIOC_S_DV_TIMINGS, v4l_stub_s_dv_timings, v4l_print_dv_timings, INFO_FL_PRIO | INFO_FL_CLEAR(v4l2_dv_timings, bt.flags)), + IOCTL_INFO(VIDIOC_G_DV_TIMINGS, v4l_stub_g_dv_timings, v4l_print_dv_timings, 0), + IOCTL_INFO(VIDIOC_DQEVENT, v4l_dqevent, v4l_print_event, 0), + IOCTL_INFO(VIDIOC_SUBSCRIBE_EVENT, v4l_subscribe_event, v4l_print_event_subscription, 0), + IOCTL_INFO(VIDIOC_UNSUBSCRIBE_EVENT, v4l_unsubscribe_event, v4l_print_event_subscription, 0), + IOCTL_INFO(VIDIOC_CREATE_BUFS, v4l_create_bufs, v4l_print_create_buffers, INFO_FL_PRIO | INFO_FL_QUEUE), + IOCTL_INFO(VIDIOC_PREPARE_BUF, v4l_prepare_buf, v4l_print_buffer, INFO_FL_QUEUE), + IOCTL_INFO(VIDIOC_ENUM_DV_TIMINGS, v4l_stub_enum_dv_timings, v4l_print_enum_dv_timings, INFO_FL_CLEAR(v4l2_enum_dv_timings, pad)), + IOCTL_INFO(VIDIOC_QUERY_DV_TIMINGS, v4l_stub_query_dv_timings, v4l_print_dv_timings, INFO_FL_ALWAYS_COPY), + IOCTL_INFO(VIDIOC_DV_TIMINGS_CAP, v4l_stub_dv_timings_cap, v4l_print_dv_timings_cap, INFO_FL_CLEAR(v4l2_dv_timings_cap, pad)), + IOCTL_INFO(VIDIOC_ENUM_FREQ_BANDS, v4l_enum_freq_bands, v4l_print_freq_band, 0), + IOCTL_INFO(VIDIOC_DBG_G_CHIP_INFO, v4l_dbg_g_chip_info, v4l_print_dbg_chip_info, INFO_FL_CLEAR(v4l2_dbg_chip_info, match)), + IOCTL_INFO(VIDIOC_QUERY_EXT_CTRL, v4l_query_ext_ctrl, v4l_print_query_ext_ctrl, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_query_ext_ctrl, id)), + IOCTL_INFO(VIDIOC_REMOVE_BUFS, v4l_remove_bufs, v4l_print_remove_buffers, INFO_FL_PRIO | INFO_FL_QUEUE | INFO_FL_CLEAR(v4l2_remove_buffers, type)), +}; +#define V4L2_IOCTLS ARRAY_SIZE(v4l2_ioctls) + +static bool v4l2_is_known_ioctl(unsigned int cmd) +{ + if (_IOC_NR(cmd) >= V4L2_IOCTLS) + return false; + return v4l2_ioctls[_IOC_NR(cmd)].ioctl == cmd; +} + +static struct mutex *v4l2_ioctl_get_lock(struct video_device *vdev, + struct v4l2_fh *vfh, unsigned int cmd, + void *arg) +{ + if (_IOC_NR(cmd) >= V4L2_IOCTLS) + return vdev->lock; + if (vfh && vfh->m2m_ctx && + (v4l2_ioctls[_IOC_NR(cmd)].flags & INFO_FL_QUEUE)) { + if (vfh->m2m_ctx->q_lock) + return vfh->m2m_ctx->q_lock; + } + if (vdev->queue && vdev->queue->lock && + (v4l2_ioctls[_IOC_NR(cmd)].flags & INFO_FL_QUEUE)) + return vdev->queue->lock; + return vdev->lock; +} + +/* Common ioctl debug function. This function can be used by + external ioctl messages as well as internal V4L ioctl */ +void v4l_printk_ioctl(const char *prefix, unsigned int cmd) +{ + const char *dir, *type; + + if (prefix) + printk(KERN_DEBUG "%s: ", prefix); + + switch (_IOC_TYPE(cmd)) { + case 'd': + type = "v4l2_int"; + break; + case 'V': + if (!v4l2_is_known_ioctl(cmd)) { + type = "v4l2"; + break; + } + pr_cont("%s", v4l2_ioctls[_IOC_NR(cmd)].name); + return; + default: + type = "unknown"; + break; + } + + switch (_IOC_DIR(cmd)) { + case _IOC_NONE: dir = "--"; break; + case _IOC_READ: dir = "r-"; break; + case _IOC_WRITE: dir = "-w"; break; + case _IOC_READ | _IOC_WRITE: dir = "rw"; break; + default: dir = "*ERR*"; break; + } + pr_cont("%s ioctl '%c', dir=%s, #%d (0x%08x)", + type, _IOC_TYPE(cmd), dir, _IOC_NR(cmd), cmd); +} +EXPORT_SYMBOL(v4l_printk_ioctl); + +static long __video_do_ioctl(struct file *file, + unsigned int cmd, void *arg) +{ + struct video_device *vfd = video_devdata(file); + struct mutex *req_queue_lock = NULL; + struct mutex *lock; /* ioctl serialization mutex */ + const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops; + bool write_only = false; + struct v4l2_ioctl_info default_info; + const struct v4l2_ioctl_info *info; + struct v4l2_fh *vfh = file_to_v4l2_fh(file); + int dev_debug = vfd->dev_debug; + long ret = -ENOTTY; + + if (ops == NULL) { + pr_warn("%s: has no ioctl_ops.\n", + video_device_node_name(vfd)); + return ret; + } + + /* + * We need to serialize streamon/off with queueing new requests. + * These ioctls may trigger the cancellation of a streaming + * operation, and that should not be mixed with queueing a new + * request at the same time. + */ + if (v4l2_device_supports_requests(vfd->v4l2_dev) && + (cmd == VIDIOC_STREAMON || cmd == VIDIOC_STREAMOFF)) { + req_queue_lock = &vfd->v4l2_dev->mdev->req_queue_mutex; + + if (mutex_lock_interruptible(req_queue_lock)) + return -ERESTARTSYS; + } + + lock = v4l2_ioctl_get_lock(vfd, vfh, cmd, arg); + + if (lock && mutex_lock_interruptible(lock)) { + if (req_queue_lock) + mutex_unlock(req_queue_lock); + return -ERESTARTSYS; + } + + if (!video_is_registered(vfd)) { + ret = -ENODEV; + goto unlock; + } + + if (v4l2_is_known_ioctl(cmd)) { + info = &v4l2_ioctls[_IOC_NR(cmd)]; + + if (!is_valid_ioctl(vfd, cmd) && + !((info->flags & INFO_FL_CTRL) && vfh->ctrl_handler)) + goto done; + + if (info->flags & INFO_FL_PRIO) { + ret = v4l2_prio_check(vfd->prio, vfh->prio); + if (ret) + goto done; + } + } else { + default_info.ioctl = cmd; + default_info.flags = 0; + default_info.debug = v4l_print_default; + info = &default_info; + } + + write_only = _IOC_DIR(cmd) == _IOC_WRITE; + if (info != &default_info) { + ret = info->func(ops, file, arg); + } else if (!ops->vidioc_default) { + ret = -ENOTTY; + } else { + ret = ops->vidioc_default(file, NULL, + v4l2_prio_check(vfd->prio, vfh->prio) >= 0, + cmd, arg); + } + +done: + if (dev_debug & (V4L2_DEV_DEBUG_IOCTL | V4L2_DEV_DEBUG_IOCTL_ARG)) { + if (!(dev_debug & V4L2_DEV_DEBUG_STREAMING) && + (cmd == VIDIOC_QBUF || cmd == VIDIOC_DQBUF)) + goto unlock; + + v4l_printk_ioctl(video_device_node_name(vfd), cmd); + if (ret < 0) + pr_cont(": error %ld", ret); + if (!(dev_debug & V4L2_DEV_DEBUG_IOCTL_ARG)) + pr_cont("\n"); + else if (_IOC_DIR(cmd) == _IOC_NONE) + info->debug(arg, write_only); + else { + pr_cont(": "); + info->debug(arg, write_only); + } + } + +unlock: + if (lock) + mutex_unlock(lock); + if (req_queue_lock) + mutex_unlock(req_queue_lock); + return ret; +} + +static int check_array_args(unsigned int cmd, void *parg, size_t *array_size, + void __user **user_ptr, void ***kernel_ptr) +{ + int ret = 0; + + switch (cmd) { + case VIDIOC_PREPARE_BUF: + case VIDIOC_QUERYBUF: + case VIDIOC_QBUF: + case VIDIOC_DQBUF: { + struct v4l2_buffer *buf = parg; + + if (V4L2_TYPE_IS_MULTIPLANAR(buf->type) && buf->length > 0) { + if (buf->length > VIDEO_MAX_PLANES) { + ret = -EINVAL; + break; + } + *user_ptr = (void __user *)buf->m.planes; + *kernel_ptr = (void **)&buf->m.planes; + *array_size = sizeof(struct v4l2_plane) * buf->length; + ret = 1; + } + break; + } + + case VIDIOC_G_EDID: + case VIDIOC_S_EDID: { + struct v4l2_edid *edid = parg; + + if (edid->blocks) { + if (edid->blocks > 256) { + ret = -EINVAL; + break; + } + *user_ptr = (void __user *)edid->edid; + *kernel_ptr = (void **)&edid->edid; + *array_size = edid->blocks * 128; + ret = 1; + } + break; + } + + case VIDIOC_S_EXT_CTRLS: + case VIDIOC_G_EXT_CTRLS: + case VIDIOC_TRY_EXT_CTRLS: { + struct v4l2_ext_controls *ctrls = parg; + + if (ctrls->count != 0) { + if (ctrls->count > V4L2_CID_MAX_CTRLS) { + ret = -EINVAL; + break; + } + *user_ptr = (void __user *)ctrls->controls; + *kernel_ptr = (void **)&ctrls->controls; + *array_size = sizeof(struct v4l2_ext_control) + * ctrls->count; + ret = 1; + } + break; + } + + case VIDIOC_SUBDEV_G_ROUTING: + case VIDIOC_SUBDEV_S_ROUTING: { + struct v4l2_subdev_routing *routing = parg; + + if (routing->len_routes > 256) + return -E2BIG; + + *user_ptr = u64_to_user_ptr(routing->routes); + *kernel_ptr = (void **)&routing->routes; + *array_size = sizeof(struct v4l2_subdev_route) + * routing->len_routes; + ret = 1; + break; + } + } + + return ret; +} + +unsigned int v4l2_translate_cmd(unsigned int cmd) +{ +#if !defined(CONFIG_64BIT) && defined(CONFIG_COMPAT_32BIT_TIME) + switch (cmd) { + case VIDIOC_DQEVENT_TIME32: + return VIDIOC_DQEVENT; + case VIDIOC_QUERYBUF_TIME32: + return VIDIOC_QUERYBUF; + case VIDIOC_QBUF_TIME32: + return VIDIOC_QBUF; + case VIDIOC_DQBUF_TIME32: + return VIDIOC_DQBUF; + case VIDIOC_PREPARE_BUF_TIME32: + return VIDIOC_PREPARE_BUF; + } +#endif + if (in_compat_syscall()) + return v4l2_compat_translate_cmd(cmd); + + return cmd; +} +EXPORT_SYMBOL_GPL(v4l2_translate_cmd); + +static int video_get_user(void __user *arg, void *parg, + unsigned int real_cmd, unsigned int cmd, + bool *always_copy) +{ + unsigned int n = _IOC_SIZE(real_cmd); + int err = 0; + + if (!(_IOC_DIR(cmd) & _IOC_WRITE)) { + /* read-only ioctl */ + memset(parg, 0, n); + return 0; + } + + /* + * In some cases, only a few fields are used as input, + * i.e. when the app sets "index" and then the driver + * fills in the rest of the structure for the thing + * with that index. We only need to copy up the first + * non-input field. + */ + if (v4l2_is_known_ioctl(real_cmd)) { + u32 flags = v4l2_ioctls[_IOC_NR(real_cmd)].flags; + + if (flags & INFO_FL_CLEAR_MASK) + n = (flags & INFO_FL_CLEAR_MASK) >> 16; + *always_copy = flags & INFO_FL_ALWAYS_COPY; + } + + if (cmd == real_cmd) { + if (copy_from_user(parg, (void __user *)arg, n)) + err = -EFAULT; + } else if (in_compat_syscall()) { + memset(parg, 0, n); + err = v4l2_compat_get_user(arg, parg, cmd); + } else { + memset(parg, 0, n); +#if !defined(CONFIG_64BIT) && defined(CONFIG_COMPAT_32BIT_TIME) + switch (cmd) { + case VIDIOC_QUERYBUF_TIME32: + case VIDIOC_QBUF_TIME32: + case VIDIOC_DQBUF_TIME32: + case VIDIOC_PREPARE_BUF_TIME32: { + struct v4l2_buffer_time32 vb32; + struct v4l2_buffer *vb = parg; + + if (copy_from_user(&vb32, arg, sizeof(vb32))) + return -EFAULT; + + *vb = (struct v4l2_buffer) { + .index = vb32.index, + .type = vb32.type, + .bytesused = vb32.bytesused, + .flags = vb32.flags, + .field = vb32.field, + .timestamp.tv_sec = vb32.timestamp.tv_sec, + .timestamp.tv_usec = vb32.timestamp.tv_usec, + .timecode = vb32.timecode, + .sequence = vb32.sequence, + .memory = vb32.memory, + .m.userptr = vb32.m.userptr, + .length = vb32.length, + .request_fd = vb32.request_fd, + }; + break; + } + } +#endif + } + + /* zero out anything we don't copy from userspace */ + if (!err && n < _IOC_SIZE(real_cmd)) + memset((u8 *)parg + n, 0, _IOC_SIZE(real_cmd) - n); + return err; +} + +static int video_put_user(void __user *arg, void *parg, + unsigned int real_cmd, unsigned int cmd) +{ + if (!(_IOC_DIR(cmd) & _IOC_READ)) + return 0; + + if (cmd == real_cmd) { + /* Copy results into user buffer */ + if (copy_to_user(arg, parg, _IOC_SIZE(cmd))) + return -EFAULT; + return 0; + } + + if (in_compat_syscall()) + return v4l2_compat_put_user(arg, parg, cmd); + +#if !defined(CONFIG_64BIT) && defined(CONFIG_COMPAT_32BIT_TIME) + switch (cmd) { + case VIDIOC_DQEVENT_TIME32: { + struct v4l2_event *ev = parg; + struct v4l2_event_time32 ev32; + + memset(&ev32, 0, sizeof(ev32)); + + ev32.type = ev->type; + ev32.pending = ev->pending; + ev32.sequence = ev->sequence; + ev32.timestamp.tv_sec = ev->timestamp.tv_sec; + ev32.timestamp.tv_nsec = ev->timestamp.tv_nsec; + ev32.id = ev->id; + + memcpy(&ev32.u, &ev->u, sizeof(ev->u)); + memcpy(&ev32.reserved, &ev->reserved, sizeof(ev->reserved)); + + if (copy_to_user(arg, &ev32, sizeof(ev32))) + return -EFAULT; + break; + } + case VIDIOC_QUERYBUF_TIME32: + case VIDIOC_QBUF_TIME32: + case VIDIOC_DQBUF_TIME32: + case VIDIOC_PREPARE_BUF_TIME32: { + struct v4l2_buffer *vb = parg; + struct v4l2_buffer_time32 vb32; + + memset(&vb32, 0, sizeof(vb32)); + + vb32.index = vb->index; + vb32.type = vb->type; + vb32.bytesused = vb->bytesused; + vb32.flags = vb->flags; + vb32.field = vb->field; + vb32.timestamp.tv_sec = vb->timestamp.tv_sec; + vb32.timestamp.tv_usec = vb->timestamp.tv_usec; + vb32.timecode = vb->timecode; + vb32.sequence = vb->sequence; + vb32.memory = vb->memory; + vb32.m.userptr = vb->m.userptr; + vb32.length = vb->length; + vb32.request_fd = vb->request_fd; + + if (copy_to_user(arg, &vb32, sizeof(vb32))) + return -EFAULT; + break; + } + } +#endif + + return 0; +} + +long +video_usercopy(struct file *file, unsigned int orig_cmd, unsigned long arg, + v4l2_kioctl func) +{ + char sbuf[128]; + void *mbuf = NULL, *array_buf = NULL; + void *parg = (void *)arg; + long err = -EINVAL; + bool has_array_args; + bool always_copy = false; + size_t array_size = 0; + void __user *user_ptr = NULL; + void **kernel_ptr = NULL; + unsigned int cmd = v4l2_translate_cmd(orig_cmd); + const size_t ioc_size = _IOC_SIZE(cmd); + + /* Copy arguments into temp kernel buffer */ + if (_IOC_DIR(cmd) != _IOC_NONE) { + if (ioc_size <= sizeof(sbuf)) { + parg = sbuf; + } else { + /* too big to allocate from stack */ + mbuf = kmalloc(ioc_size, GFP_KERNEL); + if (NULL == mbuf) + return -ENOMEM; + parg = mbuf; + } + + err = video_get_user((void __user *)arg, parg, cmd, + orig_cmd, &always_copy); + if (err) + goto out; + } + + err = check_array_args(cmd, parg, &array_size, &user_ptr, &kernel_ptr); + if (err < 0) + goto out; + has_array_args = err; + + if (has_array_args) { + array_buf = kvmalloc(array_size, GFP_KERNEL); + err = -ENOMEM; + if (array_buf == NULL) + goto out; + if (in_compat_syscall()) + err = v4l2_compat_get_array_args(file, array_buf, + user_ptr, array_size, + orig_cmd, parg); + else + err = copy_from_user(array_buf, user_ptr, array_size) ? + -EFAULT : 0; + if (err) + goto out; + *kernel_ptr = array_buf; + } + + /* Handles IOCTL */ + err = func(file, cmd, parg); + if (err == -ENOTTY || err == -ENOIOCTLCMD) { + err = -ENOTTY; + goto out; + } + + if (err == 0) { + if (cmd == VIDIOC_DQBUF) + trace_v4l2_dqbuf(video_devdata(file)->minor, parg); + else if (cmd == VIDIOC_QBUF) + trace_v4l2_qbuf(video_devdata(file)->minor, parg); + } + + /* + * Some ioctls can return an error, but still have valid + * results that must be returned. + * + * FIXME: subdev IOCTLS are partially handled here and partially in + * v4l2-subdev.c and the 'always_copy' flag can only be set for IOCTLS + * defined here as part of the 'v4l2_ioctls' array. As + * VIDIOC_SUBDEV_[GS]_ROUTING needs to return results to applications + * even in case of failure, but it is not defined here as part of the + * 'v4l2_ioctls' array, insert an ad-hoc check to address that. + */ + if (cmd == VIDIOC_SUBDEV_G_ROUTING || cmd == VIDIOC_SUBDEV_S_ROUTING) + always_copy = true; + + if (err < 0 && !always_copy) + goto out; + + if (has_array_args) { + *kernel_ptr = (void __force *)user_ptr; + if (in_compat_syscall()) { + int put_err; + + put_err = v4l2_compat_put_array_args(file, user_ptr, + array_buf, + array_size, + orig_cmd, parg); + if (put_err) + err = put_err; + } else if (copy_to_user(user_ptr, array_buf, array_size)) { + err = -EFAULT; + } + } + + if (video_put_user((void __user *)arg, parg, cmd, orig_cmd)) + err = -EFAULT; +out: + kvfree(array_buf); + kfree(mbuf); + return err; +} + +long video_ioctl2(struct file *file, + unsigned int cmd, unsigned long arg) +{ + return video_usercopy(file, cmd, arg, __video_do_ioctl); +} +EXPORT_SYMBOL(video_ioctl2); diff --git a/6.18.0/drivers/media/v4l2-core/v4l2-jpeg.c b/6.18.0/drivers/media/v4l2-core/v4l2-jpeg.c new file mode 100644 index 0000000..36a0f1a --- /dev/null +++ b/6.18.0/drivers/media/v4l2-core/v4l2-jpeg.c @@ -0,0 +1,713 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * V4L2 JPEG header parser helpers. + * + * Copyright (C) 2019 Pengutronix, Philipp Zabel + * + * For reference, see JPEG ITU-T.81 (ISO/IEC 10918-1) [1] + * + * [1] https://www.w3.org/Graphics/JPEG/itu-t81.pdf + */ + +#include +#include +#include +#include +#include +#include + +MODULE_DESCRIPTION("V4L2 JPEG header parser helpers"); +MODULE_AUTHOR("Philipp Zabel "); +MODULE_LICENSE("GPL"); + +/* Table B.1 - Marker code assignments */ +#define SOF0 0xffc0 /* start of frame */ +#define SOF1 0xffc1 +#define SOF2 0xffc2 +#define SOF3 0xffc3 +#define SOF5 0xffc5 +#define SOF7 0xffc7 +#define JPG 0xffc8 /* extensions */ +#define SOF9 0xffc9 +#define SOF11 0xffcb +#define SOF13 0xffcd +#define SOF15 0xffcf +#define DHT 0xffc4 /* huffman table */ +#define DAC 0xffcc /* arithmetic coding conditioning */ +#define RST0 0xffd0 /* restart */ +#define RST7 0xffd7 +#define SOI 0xffd8 /* start of image */ +#define EOI 0xffd9 /* end of image */ +#define SOS 0xffda /* start of stream */ +#define DQT 0xffdb /* quantization table */ +#define DNL 0xffdc /* number of lines */ +#define DRI 0xffdd /* restart interval */ +#define DHP 0xffde /* hierarchical progression */ +#define EXP 0xffdf /* expand reference */ +#define APP0 0xffe0 /* application data */ +#define APP14 0xffee /* application data for colour encoding */ +#define APP15 0xffef +#define JPG0 0xfff0 /* extensions */ +#define JPG13 0xfffd +#define COM 0xfffe /* comment */ +#define TEM 0xff01 /* temporary */ + +/* Luma and chroma qp tables to achieve 50% compression quality + * This is as per example in Annex K.1 of ITU-T.81 + */ +const u8 v4l2_jpeg_ref_table_luma_qt[V4L2_JPEG_PIXELS_IN_BLOCK] = { + 16, 11, 10, 16, 24, 40, 51, 61, + 12, 12, 14, 19, 26, 58, 60, 55, + 14, 13, 16, 24, 40, 57, 69, 56, + 14, 17, 22, 29, 51, 87, 80, 62, + 18, 22, 37, 56, 68, 109, 103, 77, + 24, 35, 55, 64, 81, 104, 113, 92, + 49, 64, 78, 87, 103, 121, 120, 101, + 72, 92, 95, 98, 112, 100, 103, 99 +}; +EXPORT_SYMBOL_GPL(v4l2_jpeg_ref_table_luma_qt); + +const u8 v4l2_jpeg_ref_table_chroma_qt[V4L2_JPEG_PIXELS_IN_BLOCK] = { + 17, 18, 24, 47, 99, 99, 99, 99, + 18, 21, 26, 66, 99, 99, 99, 99, + 24, 26, 56, 99, 99, 99, 99, 99, + 47, 66, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99 +}; +EXPORT_SYMBOL_GPL(v4l2_jpeg_ref_table_chroma_qt); + +/* Zigzag scan pattern indexes */ +const u8 v4l2_jpeg_zigzag_scan_index[V4L2_JPEG_PIXELS_IN_BLOCK] = { + 0, 1, 8, 16, 9, 2, 3, 10, + 17, 24, 32, 25, 18, 11, 4, 5, + 12, 19, 26, 33, 40, 48, 41, 34, + 27, 20, 13, 6, 7, 14, 21, 28, + 35, 42, 49, 56, 57, 50, 43, 36, + 29, 22, 15, 23, 30, 37, 44, 51, + 58, 59, 52, 45, 38, 31, 39, 46, + 53, 60, 61, 54, 47, 55, 62, 63 +}; +EXPORT_SYMBOL_GPL(v4l2_jpeg_zigzag_scan_index); + +/* + * Contains the data that needs to be sent in the marker segment of an + * interchange format JPEG stream or an abbreviated format table specification + * data stream. Specifies the huffman table used for encoding the luminance DC + * coefficient differences. The table represents Table K.3 of ITU-T.81 + */ +const u8 v4l2_jpeg_ref_table_luma_dc_ht[V4L2_JPEG_REF_HT_DC_LEN] = { + 0x00, 0x01, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B +}; +EXPORT_SYMBOL_GPL(v4l2_jpeg_ref_table_luma_dc_ht); + +/* + * Contains the data that needs to be sent in the marker segment of an + * interchange format JPEG stream or an abbreviated format table specification + * data stream. Specifies the huffman table used for encoding the luminance AC + * coefficients. The table represents Table K.5 of ITU-T.81 + */ +const u8 v4l2_jpeg_ref_table_luma_ac_ht[V4L2_JPEG_REF_HT_AC_LEN] = { + 0x00, 0x02, 0x01, 0x03, 0x03, 0x02, 0x04, 0x03, 0x05, 0x05, 0x04, 0x04, + 0x00, 0x00, 0x01, 0x7D, 0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12, + 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07, 0x22, 0x71, 0x14, 0x32, + 0x81, 0x91, 0xA1, 0x08, 0x23, 0x42, 0xB1, 0xC1, 0x15, 0x52, 0xD1, 0xF0, + 0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0A, 0x16, 0x17, 0x18, 0x19, 0x1A, + 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, + 0x3A, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x53, 0x54, 0x55, + 0x56, 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, + 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x83, 0x84, 0x85, + 0x86, 0x87, 0x88, 0x89, 0x8A, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, + 0x99, 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, 0xA9, 0xAA, 0xB2, + 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xC2, 0xC3, 0xC4, 0xC5, + 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, + 0xD9, 0xDA, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, + 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA +}; +EXPORT_SYMBOL_GPL(v4l2_jpeg_ref_table_luma_ac_ht); + +/* + * Contains the data that needs to be sent in the marker segment of an interchange format JPEG + * stream or an abbreviated format table specification data stream. + * Specifies the huffman table used for encoding the chrominance DC coefficient differences. + * The table represents Table K.4 of ITU-T.81 + */ +const u8 v4l2_jpeg_ref_table_chroma_dc_ht[V4L2_JPEG_REF_HT_DC_LEN] = { + 0x00, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B +}; +EXPORT_SYMBOL_GPL(v4l2_jpeg_ref_table_chroma_dc_ht); + +/* + * Contains the data that needs to be sent in the marker segment of an + * interchange format JPEG stream or an abbreviated format table specification + * data stream. Specifies the huffman table used for encoding the chrominance + * AC coefficients. The table represents Table K.6 of ITU-T.81 + */ +const u8 v4l2_jpeg_ref_table_chroma_ac_ht[V4L2_JPEG_REF_HT_AC_LEN] = { + 0x00, 0x02, 0x01, 0x02, 0x04, 0x04, 0x03, 0x04, 0x07, 0x05, 0x04, 0x04, + 0x00, 0x01, 0x02, 0x77, 0x00, 0x01, 0x02, 0x03, 0x11, 0x04, 0x05, 0x21, + 0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71, 0x13, 0x22, 0x32, 0x81, + 0x08, 0x14, 0x42, 0x91, 0xA1, 0xB1, 0xC1, 0x09, 0x23, 0x33, 0x52, 0xF0, + 0x15, 0x62, 0x72, 0xD1, 0x0A, 0x16, 0x24, 0x34, 0xE1, 0x25, 0xF1, 0x17, + 0x18, 0x19, 0x1A, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x35, 0x36, 0x37, 0x38, + 0x39, 0x3A, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x53, 0x54, + 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, + 0x69, 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x82, 0x83, + 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x92, 0x93, 0x94, 0x95, 0x96, + 0x97, 0x98, 0x99, 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, 0xA9, + 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xC2, 0xC3, + 0xC4, 0xC5, 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, + 0xD7, 0xD8, 0xD9, 0xDA, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, + 0xEA, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA +}; +EXPORT_SYMBOL_GPL(v4l2_jpeg_ref_table_chroma_ac_ht); + +/** + * struct jpeg_stream - JPEG byte stream + * @curr: current position in stream + * @end: end position, after last byte + */ +struct jpeg_stream { + u8 *curr; + u8 *end; +}; + +/* returns a value that fits into u8, or negative error */ +static int jpeg_get_byte(struct jpeg_stream *stream) +{ + if (stream->curr >= stream->end) + return -EINVAL; + + return *stream->curr++; +} + +/* returns a value that fits into u16, or negative error */ +static int jpeg_get_word_be(struct jpeg_stream *stream) +{ + u16 word; + + if (stream->curr + sizeof(__be16) > stream->end) + return -EINVAL; + + word = get_unaligned_be16(stream->curr); + stream->curr += sizeof(__be16); + + return word; +} + +static int jpeg_skip(struct jpeg_stream *stream, size_t len) +{ + if (stream->curr + len > stream->end) + return -EINVAL; + + stream->curr += len; + + return 0; +} + +static int jpeg_next_marker(struct jpeg_stream *stream) +{ + int byte; + u16 marker = 0; + + while ((byte = jpeg_get_byte(stream)) >= 0) { + marker = (marker << 8) | byte; + /* skip stuffing bytes and REServed markers */ + if (marker == TEM || (marker > 0xffbf && marker < 0xffff)) + return marker; + } + + return byte; +} + +/* this does not advance the current position in the stream */ +static int jpeg_reference_segment(struct jpeg_stream *stream, + struct v4l2_jpeg_reference *segment) +{ + u16 len; + + if (stream->curr + sizeof(__be16) > stream->end) + return -EINVAL; + + len = get_unaligned_be16(stream->curr); + if (stream->curr + len > stream->end) + return -EINVAL; + + segment->start = stream->curr; + segment->length = len; + + return 0; +} + +static int v4l2_jpeg_decode_subsampling(u8 nf, u8 h_v) +{ + if (nf == 1) + return V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY; + + /* no chroma subsampling for 4-component images */ + if (nf == 4 && h_v != 0x11) + return -EINVAL; + + switch (h_v) { + case 0x11: + return V4L2_JPEG_CHROMA_SUBSAMPLING_444; + case 0x21: + return V4L2_JPEG_CHROMA_SUBSAMPLING_422; + case 0x22: + return V4L2_JPEG_CHROMA_SUBSAMPLING_420; + case 0x41: + return V4L2_JPEG_CHROMA_SUBSAMPLING_411; + default: + return -EINVAL; + } +} + +static int jpeg_parse_frame_header(struct jpeg_stream *stream, u16 sof_marker, + struct v4l2_jpeg_frame_header *frame_header) +{ + int len = jpeg_get_word_be(stream); + + if (len < 0) + return len; + /* Lf = 8 + 3 * Nf, Nf >= 1 */ + if (len < 8 + 3) + return -EINVAL; + + if (frame_header) { + /* Table B.2 - Frame header parameter sizes and values */ + int p, y, x, nf; + int i; + + p = jpeg_get_byte(stream); + if (p < 0) + return p; + /* + * Baseline DCT only supports 8-bit precision. + * Extended sequential DCT also supports 12-bit precision. + */ + if (p != 8 && (p != 12 || sof_marker != SOF1)) + return -EINVAL; + + y = jpeg_get_word_be(stream); + if (y < 0) + return y; + if (y == 0) + return -EINVAL; + + x = jpeg_get_word_be(stream); + if (x < 0) + return x; + if (x == 0) + return -EINVAL; + + nf = jpeg_get_byte(stream); + if (nf < 0) + return nf; + /* + * The spec allows 1 <= Nf <= 255, but we only support up to 4 + * components. + */ + if (nf < 1 || nf > V4L2_JPEG_MAX_COMPONENTS) + return -EINVAL; + if (len != 8 + 3 * nf) + return -EINVAL; + + frame_header->precision = p; + frame_header->height = y; + frame_header->width = x; + frame_header->num_components = nf; + + for (i = 0; i < nf; i++) { + struct v4l2_jpeg_frame_component_spec *component; + int c, h_v, tq; + + c = jpeg_get_byte(stream); + if (c < 0) + return c; + + h_v = jpeg_get_byte(stream); + if (h_v < 0) + return h_v; + if (i == 0) { + int subs; + + subs = v4l2_jpeg_decode_subsampling(nf, h_v); + if (subs < 0) + return subs; + frame_header->subsampling = subs; + } else if (h_v != 0x11) { + /* all chroma sampling factors must be 1 */ + return -EINVAL; + } + + tq = jpeg_get_byte(stream); + if (tq < 0) + return tq; + + component = &frame_header->component[i]; + component->component_identifier = c; + component->horizontal_sampling_factor = + (h_v >> 4) & 0xf; + component->vertical_sampling_factor = h_v & 0xf; + component->quantization_table_selector = tq; + } + } else { + return jpeg_skip(stream, len - 2); + } + + return 0; +} + +static int jpeg_parse_scan_header(struct jpeg_stream *stream, + struct v4l2_jpeg_scan_header *scan_header) +{ + size_t skip; + int len = jpeg_get_word_be(stream); + + if (len < 0) + return len; + /* Ls = 8 + 3 * Ns, Ns >= 1 */ + if (len < 6 + 2) + return -EINVAL; + + if (scan_header) { + int ns; + int i; + + ns = jpeg_get_byte(stream); + if (ns < 0) + return ns; + if (ns < 1 || ns > 4 || len != 6 + 2 * ns) + return -EINVAL; + + scan_header->num_components = ns; + + for (i = 0; i < ns; i++) { + struct v4l2_jpeg_scan_component_spec *component; + int cs, td_ta; + + cs = jpeg_get_byte(stream); + if (cs < 0) + return cs; + + td_ta = jpeg_get_byte(stream); + if (td_ta < 0) + return td_ta; + + component = &scan_header->component[i]; + component->component_selector = cs; + component->dc_entropy_coding_table_selector = + (td_ta >> 4) & 0xf; + component->ac_entropy_coding_table_selector = + td_ta & 0xf; + } + + skip = 3; /* skip Ss, Se, Ah, and Al */ + } else { + skip = len - 2; + } + + return jpeg_skip(stream, skip); +} + +/* B.2.4.1 Quantization table-specification syntax */ +static int jpeg_parse_quantization_tables(struct jpeg_stream *stream, + u8 precision, + struct v4l2_jpeg_reference *tables) +{ + int len = jpeg_get_word_be(stream); + + if (len < 0) + return len; + /* Lq = 2 + n * 65 (for baseline DCT), n >= 1 */ + if (len < 2 + 65) + return -EINVAL; + + len -= 2; + while (len >= 65) { + u8 pq, tq, *qk; + int ret; + int pq_tq = jpeg_get_byte(stream); + + if (pq_tq < 0) + return pq_tq; + + /* quantization table element precision */ + pq = (pq_tq >> 4) & 0xf; + /* + * Only 8-bit Qk values for 8-bit sample precision. Extended + * sequential DCT with 12-bit sample precision also supports + * 16-bit Qk values. + */ + if (pq != 0 && (pq != 1 || precision != 12)) + return -EINVAL; + + /* quantization table destination identifier */ + tq = pq_tq & 0xf; + if (tq > 3) + return -EINVAL; + + /* quantization table element */ + qk = stream->curr; + ret = jpeg_skip(stream, pq ? 128 : 64); + if (ret < 0) + return -EINVAL; + + if (tables) { + tables[tq].start = qk; + tables[tq].length = pq ? 128 : 64; + } + + len -= pq ? 129 : 65; + } + + return 0; +} + +/* B.2.4.2 Huffman table-specification syntax */ +static int jpeg_parse_huffman_tables(struct jpeg_stream *stream, + struct v4l2_jpeg_reference *tables) +{ + int mt; + int len = jpeg_get_word_be(stream); + + if (len < 0) + return len; + /* Table B.5 - Huffman table specification parameter sizes and values */ + if (len < 2 + 17) + return -EINVAL; + + for (len -= 2; len >= 17; len -= 17 + mt) { + u8 tc, th, *table; + int tc_th = jpeg_get_byte(stream); + int i, ret; + + if (tc_th < 0) + return tc_th; + + /* table class - 0 = DC, 1 = AC */ + tc = (tc_th >> 4) & 0xf; + if (tc > 1) + return -EINVAL; + + /* huffman table destination identifier */ + th = tc_th & 0xf; + /* only two Huffman tables for baseline DCT */ + if (th > 1) + return -EINVAL; + + /* BITS - number of Huffman codes with length i */ + table = stream->curr; + mt = 0; + for (i = 0; i < 16; i++) { + int li; + + li = jpeg_get_byte(stream); + if (li < 0) + return li; + + mt += li; + } + /* HUFFVAL - values associated with each Huffman code */ + ret = jpeg_skip(stream, mt); + if (ret < 0) + return ret; + + if (tables) { + tables[(tc << 1) | th].start = table; + tables[(tc << 1) | th].length = stream->curr - table; + } + } + + return jpeg_skip(stream, len - 2); +} + +/* B.2.4.4 Restart interval definition syntax */ +static int jpeg_parse_restart_interval(struct jpeg_stream *stream, + u16 *restart_interval) +{ + int len = jpeg_get_word_be(stream); + int ri; + + if (len < 0) + return len; + if (len != 4) + return -EINVAL; + + ri = jpeg_get_word_be(stream); + if (ri < 0) + return ri; + + *restart_interval = ri; + + return 0; +} + +static int jpeg_skip_segment(struct jpeg_stream *stream) +{ + int len = jpeg_get_word_be(stream); + + if (len < 0) + return len; + if (len < 2) + return -EINVAL; + + return jpeg_skip(stream, len - 2); +} + +/* Rec. ITU-T T.872 (06/2012) 6.5.3 */ +static int jpeg_parse_app14_data(struct jpeg_stream *stream, + enum v4l2_jpeg_app14_tf *tf) +{ + int ret; + int lp; + int skip; + + lp = jpeg_get_word_be(stream); + if (lp < 0) + return lp; + + /* Check for "Adobe\0" in Ap1..6 */ + if (stream->curr + 6 > stream->end || + strncmp(stream->curr, "Adobe\0", 6)) + return jpeg_skip(stream, lp - 2); + + /* get to Ap12 */ + ret = jpeg_skip(stream, 11); + if (ret < 0) + return ret; + + ret = jpeg_get_byte(stream); + if (ret < 0) + return ret; + + *tf = ret; + + /* skip the rest of the segment, this ensures at least it is complete */ + skip = lp - 2 - 11 - 1; + return jpeg_skip(stream, skip); +} + +/** + * v4l2_jpeg_parse_header - locate marker segments and optionally parse headers + * @buf: address of the JPEG buffer, should start with a SOI marker + * @len: length of the JPEG buffer + * @out: returns marker segment positions and optionally parsed headers + * + * The out->scan_header pointer must be initialized to NULL or point to a valid + * v4l2_jpeg_scan_header structure. The out->huffman_tables and + * out->quantization_tables pointers must be initialized to NULL or point to a + * valid array of 4 v4l2_jpeg_reference structures each. + * + * Returns 0 or negative error if parsing failed. + */ +int v4l2_jpeg_parse_header(void *buf, size_t len, struct v4l2_jpeg_header *out) +{ + struct jpeg_stream stream; + int marker; + int ret = 0; + + stream.curr = buf; + stream.end = stream.curr + len; + + out->num_dht = 0; + out->num_dqt = 0; + + /* the first bytes must be SOI, B.2.1 High-level syntax */ + if (jpeg_get_word_be(&stream) != SOI) + return -EINVAL; + + /* init value to signal if this marker is not present */ + out->app14_tf = V4L2_JPEG_APP14_TF_UNKNOWN; + + /* loop through marker segments */ + while ((marker = jpeg_next_marker(&stream)) >= 0) { + switch (marker) { + /* baseline DCT, extended sequential DCT */ + case SOF0 ... SOF1: + ret = jpeg_reference_segment(&stream, &out->sof); + if (ret < 0) + return ret; + ret = jpeg_parse_frame_header(&stream, marker, + &out->frame); + break; + /* progressive, lossless */ + case SOF2 ... SOF3: + /* differential coding */ + case SOF5 ... SOF7: + /* arithmetic coding */ + case SOF9 ... SOF11: + case SOF13 ... SOF15: + case DAC: + case TEM: + return -EINVAL; + + case DHT: + ret = jpeg_reference_segment(&stream, + &out->dht[out->num_dht++ % 4]); + if (ret < 0) + return ret; + if (!out->huffman_tables) { + ret = jpeg_skip_segment(&stream); + break; + } + ret = jpeg_parse_huffman_tables(&stream, + out->huffman_tables); + break; + case DQT: + ret = jpeg_reference_segment(&stream, + &out->dqt[out->num_dqt++ % 4]); + if (ret < 0) + return ret; + if (!out->quantization_tables) { + ret = jpeg_skip_segment(&stream); + break; + } + ret = jpeg_parse_quantization_tables(&stream, + out->frame.precision, + out->quantization_tables); + break; + case DRI: + ret = jpeg_parse_restart_interval(&stream, + &out->restart_interval); + break; + case APP14: + ret = jpeg_parse_app14_data(&stream, + &out->app14_tf); + break; + case SOS: + ret = jpeg_reference_segment(&stream, &out->sos); + if (ret < 0) + return ret; + ret = jpeg_parse_scan_header(&stream, out->scan); + /* + * stop parsing, the scan header marks the beginning of + * the entropy coded segment + */ + out->ecs_offset = stream.curr - (u8 *)buf; + return ret; + + /* markers without parameters */ + case RST0 ... RST7: /* restart */ + case SOI: /* start of image */ + case EOI: /* end of image */ + break; + + /* skip unknown or unsupported marker segments */ + default: + ret = jpeg_skip_segment(&stream); + break; + } + if (ret < 0) + return ret; + } + + return marker; +} +EXPORT_SYMBOL_GPL(v4l2_jpeg_parse_header); diff --git a/6.18.0/drivers/media/v4l2-core/v4l2-mc.c b/6.18.0/drivers/media/v4l2-core/v4l2-mc.c new file mode 100644 index 0000000..937d358 --- /dev/null +++ b/6.18.0/drivers/media/v4l2-core/v4l2-mc.c @@ -0,0 +1,614 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +/* + * Media Controller ancillary functions + * + * Copyright (c) 2016 Mauro Carvalho Chehab + * Copyright (C) 2016 Shuah Khan + * Copyright (C) 2006-2010 Nokia Corporation + * Copyright (c) 2016 Intel Corporation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +int v4l2_mc_create_media_graph(struct media_device *mdev) + +{ + struct media_entity *entity; + struct media_entity *if_vid = NULL, *if_aud = NULL; + struct media_entity *tuner = NULL, *decoder = NULL; + struct media_entity *io_v4l = NULL, *io_vbi = NULL, *io_swradio = NULL; + bool is_webcam = false; + u32 flags; + int ret, pad_sink, pad_source; + + if (!mdev) + return 0; + + media_device_for_each_entity(entity, mdev) { + switch (entity->function) { + case MEDIA_ENT_F_IF_VID_DECODER: + if_vid = entity; + break; + case MEDIA_ENT_F_IF_AUD_DECODER: + if_aud = entity; + break; + case MEDIA_ENT_F_TUNER: + tuner = entity; + break; + case MEDIA_ENT_F_ATV_DECODER: + decoder = entity; + break; + case MEDIA_ENT_F_IO_V4L: + io_v4l = entity; + break; + case MEDIA_ENT_F_IO_VBI: + io_vbi = entity; + break; + case MEDIA_ENT_F_IO_SWRADIO: + io_swradio = entity; + break; + case MEDIA_ENT_F_CAM_SENSOR: + is_webcam = true; + break; + } + } + + /* It should have at least one I/O entity */ + if (!io_v4l && !io_vbi && !io_swradio) { + dev_warn(mdev->dev, "Didn't find any I/O entity\n"); + return -EINVAL; + } + + /* + * Here, webcams are modelled on a very simple way: the sensor is + * connected directly to the I/O entity. All dirty details, like + * scaler and crop HW are hidden. While such mapping is not enough + * for mc-centric hardware, it is enough for v4l2 interface centric + * PC-consumer's hardware. + */ + if (is_webcam) { + if (!io_v4l) { + dev_warn(mdev->dev, "Didn't find a MEDIA_ENT_F_IO_V4L\n"); + return -EINVAL; + } + + media_device_for_each_entity(entity, mdev) { + if (entity->function != MEDIA_ENT_F_CAM_SENSOR) + continue; + ret = media_create_pad_link(entity, 0, + io_v4l, 0, + MEDIA_LNK_FL_ENABLED); + if (ret) { + dev_warn(mdev->dev, "Failed to create a sensor link\n"); + return ret; + } + } + if (!decoder) + return 0; + } + + /* The device isn't a webcam. So, it should have a decoder */ + if (!decoder) { + dev_warn(mdev->dev, "Decoder not found\n"); + return -EINVAL; + } + + /* Link the tuner and IF video output pads */ + if (tuner) { + if (if_vid) { + pad_source = media_get_pad_index(tuner, + MEDIA_PAD_FL_SOURCE, + PAD_SIGNAL_ANALOG); + pad_sink = media_get_pad_index(if_vid, + MEDIA_PAD_FL_SINK, + PAD_SIGNAL_ANALOG); + if (pad_source < 0 || pad_sink < 0) { + dev_warn(mdev->dev, "Couldn't get tuner and/or PLL pad(s): (%d, %d)\n", + pad_source, pad_sink); + return -EINVAL; + } + ret = media_create_pad_link(tuner, pad_source, + if_vid, pad_sink, + MEDIA_LNK_FL_ENABLED); + if (ret) { + dev_warn(mdev->dev, "Couldn't create tuner->PLL link)\n"); + return ret; + } + + pad_source = media_get_pad_index(if_vid, + MEDIA_PAD_FL_SOURCE, + PAD_SIGNAL_ANALOG); + pad_sink = media_get_pad_index(decoder, + MEDIA_PAD_FL_SINK, + PAD_SIGNAL_ANALOG); + if (pad_source < 0 || pad_sink < 0) { + dev_warn(mdev->dev, "get decoder and/or PLL pad(s): (%d, %d)\n", + pad_source, pad_sink); + return -EINVAL; + } + ret = media_create_pad_link(if_vid, pad_source, + decoder, pad_sink, + MEDIA_LNK_FL_ENABLED); + if (ret) { + dev_warn(mdev->dev, "couldn't link PLL to decoder\n"); + return ret; + } + } else { + pad_source = media_get_pad_index(tuner, + MEDIA_PAD_FL_SOURCE, + PAD_SIGNAL_ANALOG); + pad_sink = media_get_pad_index(decoder, + MEDIA_PAD_FL_SINK, + PAD_SIGNAL_ANALOG); + if (pad_source < 0 || pad_sink < 0) { + dev_warn(mdev->dev, "couldn't get tuner and/or decoder pad(s): (%d, %d)\n", + pad_source, pad_sink); + return -EINVAL; + } + ret = media_create_pad_link(tuner, pad_source, + decoder, pad_sink, + MEDIA_LNK_FL_ENABLED); + if (ret) + return ret; + } + + if (if_aud) { + pad_source = media_get_pad_index(tuner, + MEDIA_PAD_FL_SOURCE, + PAD_SIGNAL_AUDIO); + pad_sink = media_get_pad_index(if_aud, + MEDIA_PAD_FL_SINK, + PAD_SIGNAL_AUDIO); + if (pad_source < 0 || pad_sink < 0) { + dev_warn(mdev->dev, "couldn't get tuner and/or decoder pad(s) for audio: (%d, %d)\n", + pad_source, pad_sink); + return -EINVAL; + } + ret = media_create_pad_link(tuner, pad_source, + if_aud, pad_sink, + MEDIA_LNK_FL_ENABLED); + if (ret) { + dev_warn(mdev->dev, "couldn't link tuner->audio PLL\n"); + return ret; + } + } else { + if_aud = tuner; + } + + } + + /* Create demod to V4L, VBI and SDR radio links */ + if (io_v4l) { + pad_source = media_get_pad_index(decoder, MEDIA_PAD_FL_SOURCE, + PAD_SIGNAL_DV); + if (pad_source < 0) { + dev_warn(mdev->dev, "couldn't get decoder output pad for V4L I/O\n"); + return -EINVAL; + } + ret = media_create_pad_link(decoder, pad_source, + io_v4l, 0, + MEDIA_LNK_FL_ENABLED); + if (ret) { + dev_warn(mdev->dev, "couldn't link decoder output to V4L I/O\n"); + return ret; + } + } + + if (io_swradio) { + pad_source = media_get_pad_index(decoder, MEDIA_PAD_FL_SOURCE, + PAD_SIGNAL_DV); + if (pad_source < 0) { + dev_warn(mdev->dev, "couldn't get decoder output pad for SDR\n"); + return -EINVAL; + } + ret = media_create_pad_link(decoder, pad_source, + io_swradio, 0, + MEDIA_LNK_FL_ENABLED); + if (ret) { + dev_warn(mdev->dev, "couldn't link decoder output to SDR\n"); + return ret; + } + } + + if (io_vbi) { + pad_source = media_get_pad_index(decoder, MEDIA_PAD_FL_SOURCE, + PAD_SIGNAL_DV); + if (pad_source < 0) { + dev_warn(mdev->dev, "couldn't get decoder output pad for VBI\n"); + return -EINVAL; + } + ret = media_create_pad_link(decoder, pad_source, + io_vbi, 0, + MEDIA_LNK_FL_ENABLED); + if (ret) { + dev_warn(mdev->dev, "couldn't link decoder output to VBI\n"); + return ret; + } + } + + /* Create links for the media connectors */ + flags = MEDIA_LNK_FL_ENABLED; + media_device_for_each_entity(entity, mdev) { + switch (entity->function) { + case MEDIA_ENT_F_CONN_RF: + if (!tuner) + continue; + pad_sink = media_get_pad_index(tuner, MEDIA_PAD_FL_SINK, + PAD_SIGNAL_ANALOG); + if (pad_sink < 0) { + dev_warn(mdev->dev, "couldn't get tuner analog pad sink\n"); + return -EINVAL; + } + ret = media_create_pad_link(entity, 0, tuner, + pad_sink, + flags); + break; + case MEDIA_ENT_F_CONN_SVIDEO: + case MEDIA_ENT_F_CONN_COMPOSITE: + pad_sink = media_get_pad_index(decoder, + MEDIA_PAD_FL_SINK, + PAD_SIGNAL_ANALOG); + if (pad_sink < 0) { + dev_warn(mdev->dev, "couldn't get decoder analog pad sink\n"); + return -EINVAL; + } + ret = media_create_pad_link(entity, 0, decoder, + pad_sink, + flags); + break; + default: + continue; + } + if (ret) + return ret; + + flags = 0; + } + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_mc_create_media_graph); + +int v4l_enable_media_source(struct video_device *vdev) +{ + struct media_device *mdev = vdev->entity.graph_obj.mdev; + int ret = 0, err; + + if (!mdev) + return 0; + + mutex_lock(&mdev->graph_mutex); + if (!mdev->enable_source) + goto end; + err = mdev->enable_source(&vdev->entity, &vdev->pipe); + if (err) + ret = -EBUSY; +end: + mutex_unlock(&mdev->graph_mutex); + return ret; +} +EXPORT_SYMBOL_GPL(v4l_enable_media_source); + +void v4l_disable_media_source(struct video_device *vdev) +{ + struct media_device *mdev = vdev->entity.graph_obj.mdev; + + if (mdev) { + mutex_lock(&mdev->graph_mutex); + if (mdev->disable_source) + mdev->disable_source(&vdev->entity); + mutex_unlock(&mdev->graph_mutex); + } +} +EXPORT_SYMBOL_GPL(v4l_disable_media_source); + +int v4l_vb2q_enable_media_source(struct vb2_queue *q) +{ + struct v4l2_fh *fh = q->owner; + + if (fh && fh->vdev) + return v4l_enable_media_source(fh->vdev); + return 0; +} +EXPORT_SYMBOL_GPL(v4l_vb2q_enable_media_source); + +int v4l2_create_fwnode_links_to_pad(struct v4l2_subdev *src_sd, + struct media_pad *sink, u32 flags) +{ + struct fwnode_handle *endpoint; + + if (!(sink->flags & MEDIA_PAD_FL_SINK)) + return -EINVAL; + + fwnode_graph_for_each_endpoint(src_sd->fwnode, endpoint) { + struct fwnode_handle *remote_ep; + int src_idx, sink_idx, ret; + struct media_pad *src; + + src_idx = media_entity_get_fwnode_pad(&src_sd->entity, + endpoint, + MEDIA_PAD_FL_SOURCE); + if (src_idx < 0) { + dev_dbg(src_sd->dev, "no source pad found for %pfw\n", + endpoint); + continue; + } + + remote_ep = fwnode_graph_get_remote_endpoint(endpoint); + if (!remote_ep) { + dev_dbg(src_sd->dev, "no remote ep found for %pfw\n", + endpoint); + continue; + } + + /* + * ask the sink to verify it owns the remote endpoint, + * and translate to a sink pad. + */ + sink_idx = media_entity_get_fwnode_pad(sink->entity, + remote_ep, + MEDIA_PAD_FL_SINK); + fwnode_handle_put(remote_ep); + + if (sink_idx < 0 || sink_idx != sink->index) { + dev_dbg(src_sd->dev, + "sink pad index mismatch or error (is %d, expected %u)\n", + sink_idx, sink->index); + continue; + } + + /* + * the source endpoint corresponds to one of its source pads, + * the source endpoint connects to an endpoint at the sink + * entity, and the sink endpoint corresponds to the sink + * pad requested, so we have found an endpoint connection + * that works, create the media link for it. + */ + + src = &src_sd->entity.pads[src_idx]; + + /* skip if link already exists */ + if (media_entity_find_link(src, sink)) { + dev_dbg(src_sd->dev, + "link %s:%d -> %s:%d already exists\n", + src_sd->entity.name, src_idx, + sink->entity->name, sink_idx); + continue; + } + + dev_dbg(src_sd->dev, "creating link %s:%d -> %s:%d\n", + src_sd->entity.name, src_idx, + sink->entity->name, sink_idx); + + ret = media_create_pad_link(&src_sd->entity, src_idx, + sink->entity, sink_idx, flags); + if (ret) { + dev_err(src_sd->dev, + "link %s:%d -> %s:%d failed with %d\n", + src_sd->entity.name, src_idx, + sink->entity->name, sink_idx, ret); + + fwnode_handle_put(endpoint); + return ret; + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_create_fwnode_links_to_pad); + +int v4l2_create_fwnode_links(struct v4l2_subdev *src_sd, + struct v4l2_subdev *sink_sd) +{ + unsigned int i; + + for (i = 0; i < sink_sd->entity.num_pads; i++) { + struct media_pad *pad = &sink_sd->entity.pads[i]; + int ret; + + if (!(pad->flags & MEDIA_PAD_FL_SINK)) + continue; + + ret = v4l2_create_fwnode_links_to_pad(src_sd, pad, 0); + if (ret) + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_create_fwnode_links); + +/* ----------------------------------------------------------------------------- + * Pipeline power management + * + * Entities must be powered up when part of a pipeline that contains at least + * one open video device node. + * + * To achieve this use the entity use_count field to track the number of users. + * For entities corresponding to video device nodes the use_count field stores + * the users count of the node. For entities corresponding to subdevs the + * use_count field stores the total number of users of all video device nodes + * in the pipeline. + * + * The v4l2_pipeline_pm_{get, put}() functions must be called in the open() and + * close() handlers of video device nodes. It increments or decrements the use + * count of all subdev entities in the pipeline. + * + * To react to link management on powered pipelines, the link setup notification + * callback updates the use count of all entities in the source and sink sides + * of the link. + */ + +/* + * pipeline_pm_use_count - Count the number of users of a pipeline + * @entity: The entity + * + * Return the total number of users of all video device nodes in the pipeline. + */ +static int pipeline_pm_use_count(struct media_entity *entity, + struct media_graph *graph) +{ + int use = 0; + + media_graph_walk_start(graph, entity); + + while ((entity = media_graph_walk_next(graph))) { + if (is_media_entity_v4l2_video_device(entity)) + use += entity->use_count; + } + + return use; +} + +/* + * pipeline_pm_power_one - Apply power change to an entity + * @entity: The entity + * @change: Use count change + * + * Change the entity use count by @change. If the entity is a subdev update its + * power state by calling the core::s_power operation when the use count goes + * from 0 to != 0 or from != 0 to 0. + * + * Return 0 on success or a negative error code on failure. + */ +static int pipeline_pm_power_one(struct media_entity *entity, int change) +{ + struct v4l2_subdev *subdev; + int ret; + + subdev = is_media_entity_v4l2_subdev(entity) + ? media_entity_to_v4l2_subdev(entity) : NULL; + + if (entity->use_count == 0 && change > 0 && subdev != NULL) { + ret = v4l2_subdev_call(subdev, core, s_power, 1); + if (ret < 0 && ret != -ENOIOCTLCMD) + return ret; + } + + entity->use_count += change; + WARN_ON(entity->use_count < 0); + + if (entity->use_count == 0 && change < 0 && subdev != NULL) + v4l2_subdev_call(subdev, core, s_power, 0); + + return 0; +} + +/* + * pipeline_pm_power - Apply power change to all entities in a pipeline + * @entity: The entity + * @change: Use count change + * + * Walk the pipeline to update the use count and the power state of all non-node + * entities. + * + * Return 0 on success or a negative error code on failure. + */ +static int pipeline_pm_power(struct media_entity *entity, int change, + struct media_graph *graph) +{ + struct media_entity *first = entity; + int ret = 0; + + if (!change) + return 0; + + media_graph_walk_start(graph, entity); + + while (!ret && (entity = media_graph_walk_next(graph))) + if (is_media_entity_v4l2_subdev(entity)) + ret = pipeline_pm_power_one(entity, change); + + if (!ret) + return ret; + + media_graph_walk_start(graph, first); + + while ((first = media_graph_walk_next(graph)) + && first != entity) + if (is_media_entity_v4l2_subdev(first)) + pipeline_pm_power_one(first, -change); + + return ret; +} + +static int v4l2_pipeline_pm_use(struct media_entity *entity, unsigned int use) +{ + struct media_device *mdev = entity->graph_obj.mdev; + int change = use ? 1 : -1; + int ret; + + mutex_lock(&mdev->graph_mutex); + + /* Apply use count to node. */ + entity->use_count += change; + WARN_ON(entity->use_count < 0); + + /* Apply power change to connected non-nodes. */ + ret = pipeline_pm_power(entity, change, &mdev->pm_count_walk); + if (ret < 0) + entity->use_count -= change; + + mutex_unlock(&mdev->graph_mutex); + + return ret; +} + +int v4l2_pipeline_pm_get(struct media_entity *entity) +{ + return v4l2_pipeline_pm_use(entity, 1); +} +EXPORT_SYMBOL_GPL(v4l2_pipeline_pm_get); + +void v4l2_pipeline_pm_put(struct media_entity *entity) +{ + /* Powering off entities shouldn't fail. */ + WARN_ON(v4l2_pipeline_pm_use(entity, 0)); +} +EXPORT_SYMBOL_GPL(v4l2_pipeline_pm_put); + +int v4l2_pipeline_link_notify(struct media_link *link, u32 flags, + unsigned int notification) +{ + struct media_graph *graph = &link->graph_obj.mdev->pm_count_walk; + struct media_entity *source = link->source->entity; + struct media_entity *sink = link->sink->entity; + int source_use; + int sink_use; + int ret = 0; + + source_use = pipeline_pm_use_count(source, graph); + sink_use = pipeline_pm_use_count(sink, graph); + + if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH && + !(flags & MEDIA_LNK_FL_ENABLED)) { + /* Powering off entities is assumed to never fail. */ + pipeline_pm_power(source, -sink_use, graph); + pipeline_pm_power(sink, -source_use, graph); + return 0; + } + + if (notification == MEDIA_DEV_NOTIFY_PRE_LINK_CH && + (flags & MEDIA_LNK_FL_ENABLED)) { + + ret = pipeline_pm_power(source, sink_use, graph); + if (ret < 0) + return ret; + + ret = pipeline_pm_power(sink, source_use, graph); + if (ret < 0) + pipeline_pm_power(source, -sink_use, graph); + } + + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_pipeline_link_notify); diff --git a/6.18.0/drivers/media/v4l2-core/v4l2-mem2mem.c b/6.18.0/drivers/media/v4l2-core/v4l2-mem2mem.c new file mode 100644 index 0000000..21acd9b --- /dev/null +++ b/6.18.0/drivers/media/v4l2-core/v4l2-mem2mem.c @@ -0,0 +1,1639 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Memory-to-memory device framework for Video for Linux 2 and vb2. + * + * Helper functions for devices that use vb2 buffers for both their + * source and destination. + * + * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd. + * Pawel Osciak, + * Marek Szyprowski, + */ +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +MODULE_DESCRIPTION("Mem to mem device framework for vb2"); +MODULE_AUTHOR("Pawel Osciak, "); +MODULE_LICENSE("GPL"); + +static bool debug; +module_param(debug, bool, 0644); + +#define dprintk(fmt, arg...) \ + do { \ + if (debug) \ + printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\ + } while (0) + + +/* Instance is already queued on the job_queue */ +#define TRANS_QUEUED (1 << 0) +/* Instance is currently running in hardware */ +#define TRANS_RUNNING (1 << 1) +/* Instance is currently aborting */ +#define TRANS_ABORT (1 << 2) + + +/* The job queue is not running new jobs */ +#define QUEUE_PAUSED (1 << 0) + + +/* Offset base for buffers on the destination queue - used to distinguish + * between source and destination buffers when mmapping - they receive the same + * offsets but for different queues */ +#define DST_QUEUE_OFF_BASE (1 << 30) + +enum v4l2_m2m_entity_type { + MEM2MEM_ENT_TYPE_SOURCE, + MEM2MEM_ENT_TYPE_SINK, + MEM2MEM_ENT_TYPE_PROC +}; + +static const char * const m2m_entity_name[] = { + "source", + "sink", + "proc" +}; + +/** + * struct v4l2_m2m_dev - per-device context + * @source: &struct media_entity pointer with the source entity + * Used only when the M2M device is registered via + * v4l2_m2m_register_media_controller(). + * @source_pad: &struct media_pad with the source pad. + * Used only when the M2M device is registered via + * v4l2_m2m_register_media_controller(). + * @sink: &struct media_entity pointer with the sink entity + * Used only when the M2M device is registered via + * v4l2_m2m_register_media_controller(). + * @sink_pad: &struct media_pad with the sink pad. + * Used only when the M2M device is registered via + * v4l2_m2m_register_media_controller(). + * @proc: &struct media_entity pointer with the M2M device itself. + * @proc_pads: &struct media_pad with the @proc pads. + * Used only when the M2M device is registered via + * v4l2_m2m_unregister_media_controller(). + * @intf_devnode: &struct media_intf devnode pointer with the interface + * with controls the M2M device. + * @curr_ctx: currently running instance + * @job_queue: instances queued to run + * @job_spinlock: protects job_queue + * @job_work: worker to run queued jobs. + * @job_queue_flags: flags of the queue status, %QUEUE_PAUSED. + * @m2m_ops: driver callbacks + */ +struct v4l2_m2m_dev { + struct v4l2_m2m_ctx *curr_ctx; +#ifdef CONFIG_MEDIA_CONTROLLER + struct media_entity *source; + struct media_pad source_pad; + struct media_entity sink; + struct media_pad sink_pad; + struct media_entity proc; + struct media_pad proc_pads[2]; + struct media_intf_devnode *intf_devnode; +#endif + + struct list_head job_queue; + spinlock_t job_spinlock; + struct work_struct job_work; + unsigned long job_queue_flags; + + const struct v4l2_m2m_ops *m2m_ops; +}; + +static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx, + enum v4l2_buf_type type) +{ + if (V4L2_TYPE_IS_OUTPUT(type)) + return &m2m_ctx->out_q_ctx; + else + return &m2m_ctx->cap_q_ctx; +} + +struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx, + enum v4l2_buf_type type) +{ + struct v4l2_m2m_queue_ctx *q_ctx; + + q_ctx = get_queue_ctx(m2m_ctx, type); + if (!q_ctx) + return NULL; + + return &q_ctx->q; +} +EXPORT_SYMBOL(v4l2_m2m_get_vq); + +struct vb2_v4l2_buffer *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx) +{ + struct v4l2_m2m_buffer *b; + unsigned long flags; + + spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); + + if (list_empty(&q_ctx->rdy_queue)) { + spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); + return NULL; + } + + b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); + spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); + return &b->vb; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf); + +struct vb2_v4l2_buffer *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx) +{ + struct v4l2_m2m_buffer *b; + unsigned long flags; + + spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); + + if (list_empty(&q_ctx->rdy_queue)) { + spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); + return NULL; + } + + b = list_last_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); + spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); + return &b->vb; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_last_buf); + +struct vb2_v4l2_buffer *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx) +{ + struct v4l2_m2m_buffer *b; + unsigned long flags; + + spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); + if (list_empty(&q_ctx->rdy_queue)) { + spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); + return NULL; + } + b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); + list_del(&b->list); + q_ctx->num_rdy--; + spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); + + return &b->vb; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove); + +void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx *q_ctx, + struct vb2_v4l2_buffer *vbuf) +{ + struct v4l2_m2m_buffer *b; + unsigned long flags; + + spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); + b = container_of(vbuf, struct v4l2_m2m_buffer, vb); + list_del(&b->list); + q_ctx->num_rdy--; + spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_buf); + +struct vb2_v4l2_buffer * +v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx *q_ctx, unsigned int idx) + +{ + struct v4l2_m2m_buffer *b, *tmp; + struct vb2_v4l2_buffer *ret = NULL; + unsigned long flags; + + spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); + list_for_each_entry_safe(b, tmp, &q_ctx->rdy_queue, list) { + if (b->vb.vb2_buf.index == idx) { + list_del(&b->list); + q_ctx->num_rdy--; + ret = &b->vb; + break; + } + } + spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); + + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_idx); + +/* + * Scheduling handlers + */ + +void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev) +{ + unsigned long flags; + void *ret = NULL; + + spin_lock_irqsave(&m2m_dev->job_spinlock, flags); + if (m2m_dev->curr_ctx) + ret = m2m_dev->curr_ctx->priv; + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); + + return ret; +} +EXPORT_SYMBOL(v4l2_m2m_get_curr_priv); + +/** + * v4l2_m2m_try_run() - select next job to perform and run it if possible + * @m2m_dev: per-device context + * + * Get next transaction (if present) from the waiting jobs list and run it. + * + * Note that this function can run on a given v4l2_m2m_ctx context, + * but call .device_run for another context. + */ +static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev) +{ + unsigned long flags; + + spin_lock_irqsave(&m2m_dev->job_spinlock, flags); + if (NULL != m2m_dev->curr_ctx) { + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); + dprintk("Another instance is running, won't run now\n"); + return; + } + + if (list_empty(&m2m_dev->job_queue)) { + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); + dprintk("No job pending\n"); + return; + } + + if (m2m_dev->job_queue_flags & QUEUE_PAUSED) { + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); + dprintk("Running new jobs is paused\n"); + return; + } + + m2m_dev->curr_ctx = list_first_entry(&m2m_dev->job_queue, + struct v4l2_m2m_ctx, queue); + m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING; + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); + + dprintk("Running job on m2m_ctx: %p\n", m2m_dev->curr_ctx); + m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv); +} + +/* + * __v4l2_m2m_try_queue() - queue a job + * @m2m_dev: m2m device + * @m2m_ctx: m2m context + * + * Check if this context is ready to queue a job. + * + * This function can run in interrupt context. + */ +static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev, + struct v4l2_m2m_ctx *m2m_ctx) +{ + unsigned long flags_job; + struct vb2_v4l2_buffer *dst, *src; + + dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx); + + if (!m2m_ctx->out_q_ctx.q.streaming || + (!m2m_ctx->cap_q_ctx.q.streaming && !m2m_ctx->ignore_cap_streaming)) { + if (!m2m_ctx->ignore_cap_streaming) + dprintk("Streaming needs to be on for both queues\n"); + else + dprintk("Streaming needs to be on for the OUTPUT queue\n"); + return; + } + + spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job); + + /* If the context is aborted then don't schedule it */ + if (m2m_ctx->job_flags & TRANS_ABORT) { + dprintk("Aborted context\n"); + goto job_unlock; + } + + if (m2m_ctx->job_flags & TRANS_QUEUED) { + dprintk("On job queue already\n"); + goto job_unlock; + } + + src = v4l2_m2m_next_src_buf(m2m_ctx); + dst = v4l2_m2m_next_dst_buf(m2m_ctx); + if (!src && !m2m_ctx->out_q_ctx.buffered) { + dprintk("No input buffers available\n"); + goto job_unlock; + } + if (!dst && !m2m_ctx->cap_q_ctx.buffered) { + dprintk("No output buffers available\n"); + goto job_unlock; + } + + m2m_ctx->new_frame = true; + + if (src && dst && dst->is_held && + dst->vb2_buf.copied_timestamp && + dst->vb2_buf.timestamp != src->vb2_buf.timestamp) { + dprintk("Timestamp mismatch, returning held capture buffer\n"); + dst->is_held = false; + v4l2_m2m_dst_buf_remove(m2m_ctx); + v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE); + dst = v4l2_m2m_next_dst_buf(m2m_ctx); + + if (!dst && !m2m_ctx->cap_q_ctx.buffered) { + dprintk("No output buffers available after returning held buffer\n"); + goto job_unlock; + } + } + + if (src && dst && (m2m_ctx->out_q_ctx.q.subsystem_flags & + VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF)) + m2m_ctx->new_frame = !dst->vb2_buf.copied_timestamp || + dst->vb2_buf.timestamp != src->vb2_buf.timestamp; + + if (m2m_ctx->has_stopped) { + dprintk("Device has stopped\n"); + goto job_unlock; + } + + if (m2m_dev->m2m_ops->job_ready + && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) { + dprintk("Driver not ready\n"); + goto job_unlock; + } + + list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue); + m2m_ctx->job_flags |= TRANS_QUEUED; + +job_unlock: + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); +} + +/** + * v4l2_m2m_try_schedule() - schedule and possibly run a job for any context + * @m2m_ctx: m2m context + * + * Check if this context is ready to queue a job. If suitable, + * run the next queued job on the mem2mem device. + * + * This function shouldn't run in interrupt context. + * + * Note that v4l2_m2m_try_schedule() can schedule one job for this context, + * and then run another job for another context. + */ +void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx) +{ + struct v4l2_m2m_dev *m2m_dev = m2m_ctx->m2m_dev; + + __v4l2_m2m_try_queue(m2m_dev, m2m_ctx); + v4l2_m2m_try_run(m2m_dev); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule); + +/** + * v4l2_m2m_device_run_work() - run pending jobs for the context + * @work: Work structure used for scheduling the execution of this function. + */ +static void v4l2_m2m_device_run_work(struct work_struct *work) +{ + struct v4l2_m2m_dev *m2m_dev = + container_of(work, struct v4l2_m2m_dev, job_work); + + v4l2_m2m_try_run(m2m_dev); +} + +/** + * v4l2_m2m_cancel_job() - cancel pending jobs for the context + * @m2m_ctx: m2m context with jobs to be canceled + * + * In case of streamoff or release called on any context, + * 1] If the context is currently running, then abort job will be called + * 2] If the context is queued, then the context will be removed from + * the job_queue + */ +static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx) +{ + struct v4l2_m2m_dev *m2m_dev; + unsigned long flags; + + m2m_dev = m2m_ctx->m2m_dev; + spin_lock_irqsave(&m2m_dev->job_spinlock, flags); + + m2m_ctx->job_flags |= TRANS_ABORT; + if (m2m_ctx->job_flags & TRANS_RUNNING) { + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); + if (m2m_dev->m2m_ops->job_abort) + m2m_dev->m2m_ops->job_abort(m2m_ctx->priv); + dprintk("m2m_ctx %p running, will wait to complete\n", m2m_ctx); + wait_event(m2m_ctx->finished, + !(m2m_ctx->job_flags & TRANS_RUNNING)); + } else if (m2m_ctx->job_flags & TRANS_QUEUED) { + list_del(&m2m_ctx->queue); + m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING); + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); + dprintk("m2m_ctx: %p had been on queue and was removed\n", + m2m_ctx); + } else { + /* Do nothing, was not on queue/running */ + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); + } +} + +/* + * Schedule the next job, called from v4l2_m2m_job_finish() or + * v4l2_m2m_buf_done_and_job_finish(). + */ +static void v4l2_m2m_schedule_next_job(struct v4l2_m2m_dev *m2m_dev, + struct v4l2_m2m_ctx *m2m_ctx) +{ + /* + * This instance might have more buffers ready, but since we do not + * allow more than one job on the job_queue per instance, each has + * to be scheduled separately after the previous one finishes. + */ + __v4l2_m2m_try_queue(m2m_dev, m2m_ctx); + + /* + * We might be running in atomic context, + * but the job must be run in non-atomic context. + */ + schedule_work(&m2m_dev->job_work); +} + +/* + * Assumes job_spinlock is held, called from v4l2_m2m_job_finish() or + * v4l2_m2m_buf_done_and_job_finish(). + */ +static bool _v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, + struct v4l2_m2m_ctx *m2m_ctx) +{ + if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) { + dprintk("Called by an instance not currently running\n"); + return false; + } + + list_del(&m2m_dev->curr_ctx->queue); + m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING); + wake_up(&m2m_dev->curr_ctx->finished); + m2m_dev->curr_ctx = NULL; + return true; +} + +void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, + struct v4l2_m2m_ctx *m2m_ctx) +{ + unsigned long flags; + bool schedule_next; + + /* + * This function should not be used for drivers that support + * holding capture buffers. Those should use + * v4l2_m2m_buf_done_and_job_finish() instead. + */ + WARN_ON(m2m_ctx->out_q_ctx.q.subsystem_flags & + VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF); + spin_lock_irqsave(&m2m_dev->job_spinlock, flags); + schedule_next = _v4l2_m2m_job_finish(m2m_dev, m2m_ctx); + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); + + if (schedule_next) + v4l2_m2m_schedule_next_job(m2m_dev, m2m_ctx); +} +EXPORT_SYMBOL(v4l2_m2m_job_finish); + +void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev *m2m_dev, + struct v4l2_m2m_ctx *m2m_ctx, + enum vb2_buffer_state state) +{ + struct vb2_v4l2_buffer *src_buf, *dst_buf; + bool schedule_next = false; + unsigned long flags; + + spin_lock_irqsave(&m2m_dev->job_spinlock, flags); + src_buf = v4l2_m2m_src_buf_remove(m2m_ctx); + dst_buf = v4l2_m2m_next_dst_buf(m2m_ctx); + + if (WARN_ON(!src_buf || !dst_buf)) + goto unlock; + dst_buf->is_held = src_buf->flags & V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF; + if (!dst_buf->is_held) { + v4l2_m2m_dst_buf_remove(m2m_ctx); + v4l2_m2m_buf_done(dst_buf, state); + } + /* + * If the request API is being used, returning the OUTPUT + * (src) buffer will wake-up any process waiting on the + * request file descriptor. + * + * Therefore, return the CAPTURE (dst) buffer first, + * to avoid signalling the request file descriptor + * before the CAPTURE buffer is done. + */ + v4l2_m2m_buf_done(src_buf, state); + schedule_next = _v4l2_m2m_job_finish(m2m_dev, m2m_ctx); +unlock: + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); + + if (schedule_next) + v4l2_m2m_schedule_next_job(m2m_dev, m2m_ctx); +} +EXPORT_SYMBOL(v4l2_m2m_buf_done_and_job_finish); + +void v4l2_m2m_suspend(struct v4l2_m2m_dev *m2m_dev) +{ + unsigned long flags; + struct v4l2_m2m_ctx *curr_ctx; + + spin_lock_irqsave(&m2m_dev->job_spinlock, flags); + m2m_dev->job_queue_flags |= QUEUE_PAUSED; + curr_ctx = m2m_dev->curr_ctx; + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); + + if (curr_ctx) + wait_event(curr_ctx->finished, + !(curr_ctx->job_flags & TRANS_RUNNING)); +} +EXPORT_SYMBOL(v4l2_m2m_suspend); + +void v4l2_m2m_resume(struct v4l2_m2m_dev *m2m_dev) +{ + unsigned long flags; + + spin_lock_irqsave(&m2m_dev->job_spinlock, flags); + m2m_dev->job_queue_flags &= ~QUEUE_PAUSED; + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); + + v4l2_m2m_try_run(m2m_dev); +} +EXPORT_SYMBOL(v4l2_m2m_resume); + +int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_requestbuffers *reqbufs) +{ + struct vb2_queue *vq; + int ret; + + vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type); + ret = vb2_reqbufs(vq, reqbufs); + /* If count == 0, then the owner has released all buffers and he + is no longer owner of the queue. Otherwise we have an owner. */ + if (ret == 0) + vq->owner = reqbufs->count ? file->private_data : NULL; + + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs); + +static void v4l2_m2m_adjust_mem_offset(struct vb2_queue *vq, + struct v4l2_buffer *buf) +{ + /* Adjust MMAP memory offsets for the CAPTURE queue */ + if (buf->memory == V4L2_MEMORY_MMAP && V4L2_TYPE_IS_CAPTURE(vq->type)) { + if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) { + unsigned int i; + + for (i = 0; i < buf->length; ++i) + buf->m.planes[i].m.mem_offset + += DST_QUEUE_OFF_BASE; + } else { + buf->m.offset += DST_QUEUE_OFF_BASE; + } + } +} + +int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_buffer *buf) +{ + struct vb2_queue *vq; + int ret; + + vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); + ret = vb2_querybuf(vq, buf); + if (ret) + return ret; + + /* Adjust MMAP memory offsets for the CAPTURE queue */ + v4l2_m2m_adjust_mem_offset(vq, buf); + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf); + +/* + * This will add the LAST flag and mark the buffer management + * state as stopped. + * This is called when the last capture buffer must be flagged as LAST + * in draining mode from the encoder/decoder driver buf_queue() callback + * or from v4l2_update_last_buf_state() when a capture buffer is available. + */ +void v4l2_m2m_last_buffer_done(struct v4l2_m2m_ctx *m2m_ctx, + struct vb2_v4l2_buffer *vbuf) +{ + vbuf->flags |= V4L2_BUF_FLAG_LAST; + vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_DONE); + + v4l2_m2m_mark_stopped(m2m_ctx); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_last_buffer_done); + +/* When stop command is issued, update buffer management state */ +static int v4l2_update_last_buf_state(struct v4l2_m2m_ctx *m2m_ctx) +{ + struct vb2_v4l2_buffer *next_dst_buf; + + if (m2m_ctx->is_draining) + return -EBUSY; + + if (m2m_ctx->has_stopped) + return 0; + + m2m_ctx->last_src_buf = v4l2_m2m_last_src_buf(m2m_ctx); + m2m_ctx->is_draining = true; + + /* + * The processing of the last output buffer queued before + * the STOP command is expected to mark the buffer management + * state as stopped with v4l2_m2m_mark_stopped(). + */ + if (m2m_ctx->last_src_buf) + return 0; + + /* + * In case the output queue is empty, try to mark the last capture + * buffer as LAST. + */ + next_dst_buf = v4l2_m2m_dst_buf_remove(m2m_ctx); + if (!next_dst_buf) { + /* + * Wait for the next queued one in encoder/decoder driver + * buf_queue() callback using the v4l2_m2m_dst_buf_is_last() + * helper or in v4l2_m2m_qbuf() if encoder/decoder is not yet + * streaming. + */ + m2m_ctx->next_buf_last = true; + return 0; + } + + v4l2_m2m_last_buffer_done(m2m_ctx, next_dst_buf); + + return 0; +} + +/* + * Updates the encoding/decoding buffer management state, should + * be called from encoder/decoder drivers start_streaming() + */ +void v4l2_m2m_update_start_streaming_state(struct v4l2_m2m_ctx *m2m_ctx, + struct vb2_queue *q) +{ + /* If start streaming again, untag the last output buffer */ + if (V4L2_TYPE_IS_OUTPUT(q->type)) + m2m_ctx->last_src_buf = NULL; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_update_start_streaming_state); + +/* + * Updates the encoding/decoding buffer management state, should + * be called from encoder/decoder driver stop_streaming() + */ +void v4l2_m2m_update_stop_streaming_state(struct v4l2_m2m_ctx *m2m_ctx, + struct vb2_queue *q) +{ + if (V4L2_TYPE_IS_OUTPUT(q->type)) { + /* + * If in draining state, either mark next dst buffer as + * done or flag next one to be marked as done either + * in encoder/decoder driver buf_queue() callback using + * the v4l2_m2m_dst_buf_is_last() helper or in v4l2_m2m_qbuf() + * if encoder/decoder is not yet streaming + */ + if (m2m_ctx->is_draining) { + struct vb2_v4l2_buffer *next_dst_buf; + + m2m_ctx->last_src_buf = NULL; + next_dst_buf = v4l2_m2m_dst_buf_remove(m2m_ctx); + if (!next_dst_buf) + m2m_ctx->next_buf_last = true; + else + v4l2_m2m_last_buffer_done(m2m_ctx, + next_dst_buf); + } + } else { + v4l2_m2m_clear_state(m2m_ctx); + } +} +EXPORT_SYMBOL_GPL(v4l2_m2m_update_stop_streaming_state); + +static void v4l2_m2m_force_last_buf_done(struct v4l2_m2m_ctx *m2m_ctx, + struct vb2_queue *q) +{ + struct vb2_buffer *vb; + struct vb2_v4l2_buffer *vbuf; + unsigned int i; + + if (WARN_ON(q->is_output)) + return; + if (list_empty(&q->queued_list)) + return; + + vb = list_first_entry(&q->queued_list, struct vb2_buffer, queued_entry); + for (i = 0; i < vb->num_planes; i++) + vb2_set_plane_payload(vb, i, 0); + + /* + * Since the buffer hasn't been queued to the ready queue, + * mark is active and owned before marking it LAST and DONE + */ + vb->state = VB2_BUF_STATE_ACTIVE; + atomic_inc(&q->owned_by_drv_count); + + vbuf = to_vb2_v4l2_buffer(vb); + vbuf->field = V4L2_FIELD_NONE; + + v4l2_m2m_last_buffer_done(m2m_ctx, vbuf); +} + +int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_buffer *buf) +{ + struct video_device *vdev = video_devdata(file); + struct vb2_queue *vq; + int ret; + + vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); + if (V4L2_TYPE_IS_CAPTURE(vq->type) && + (buf->flags & V4L2_BUF_FLAG_REQUEST_FD)) { + dprintk("%s: requests cannot be used with capture buffers\n", + __func__); + return -EPERM; + } + + ret = vb2_qbuf(vq, vdev->v4l2_dev->mdev, buf); + if (ret) + return ret; + + /* Adjust MMAP memory offsets for the CAPTURE queue */ + v4l2_m2m_adjust_mem_offset(vq, buf); + + /* + * If the capture queue is streaming, but streaming hasn't started + * on the device, but was asked to stop, mark the previously queued + * buffer as DONE with LAST flag since it won't be queued on the + * device. + */ + if (V4L2_TYPE_IS_CAPTURE(vq->type) && + vb2_is_streaming(vq) && !vb2_start_streaming_called(vq) && + (v4l2_m2m_has_stopped(m2m_ctx) || v4l2_m2m_dst_buf_is_last(m2m_ctx))) + v4l2_m2m_force_last_buf_done(m2m_ctx, vq); + else if (!(buf->flags & V4L2_BUF_FLAG_IN_REQUEST)) + v4l2_m2m_try_schedule(m2m_ctx); + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf); + +int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_buffer *buf) +{ + struct vb2_queue *vq; + int ret; + + vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); + ret = vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK); + if (ret) + return ret; + + /* Adjust MMAP memory offsets for the CAPTURE queue */ + v4l2_m2m_adjust_mem_offset(vq, buf); + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf); + +int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_buffer *buf) +{ + struct video_device *vdev = video_devdata(file); + struct vb2_queue *vq; + int ret; + + vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); + ret = vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf); + if (ret) + return ret; + + /* Adjust MMAP memory offsets for the CAPTURE queue */ + v4l2_m2m_adjust_mem_offset(vq, buf); + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf); + +int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_create_buffers *create) +{ + struct vb2_queue *vq; + + vq = v4l2_m2m_get_vq(m2m_ctx, create->format.type); + return vb2_create_bufs(vq, create); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs); + +int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_exportbuffer *eb) +{ + struct vb2_queue *vq; + + vq = v4l2_m2m_get_vq(m2m_ctx, eb->type); + return vb2_expbuf(vq, eb); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf); + +int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + enum v4l2_buf_type type) +{ + struct vb2_queue *vq; + int ret; + + vq = v4l2_m2m_get_vq(m2m_ctx, type); + ret = vb2_streamon(vq, type); + if (!ret) + v4l2_m2m_try_schedule(m2m_ctx); + + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_streamon); + +int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + enum v4l2_buf_type type) +{ + struct v4l2_m2m_dev *m2m_dev; + struct v4l2_m2m_queue_ctx *q_ctx; + unsigned long flags_job, flags; + int ret; + + /* wait until the current context is dequeued from job_queue */ + v4l2_m2m_cancel_job(m2m_ctx); + + q_ctx = get_queue_ctx(m2m_ctx, type); + ret = vb2_streamoff(&q_ctx->q, type); + if (ret) + return ret; + + m2m_dev = m2m_ctx->m2m_dev; + spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job); + /* We should not be scheduled anymore, since we're dropping a queue. */ + if (m2m_ctx->job_flags & TRANS_QUEUED) + list_del(&m2m_ctx->queue); + m2m_ctx->job_flags = 0; + + spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); + /* Drop queue, since streamoff returns device to the same state as after + * calling reqbufs. */ + INIT_LIST_HEAD(&q_ctx->rdy_queue); + q_ctx->num_rdy = 0; + spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); + + if (m2m_dev->curr_ctx == m2m_ctx) { + m2m_dev->curr_ctx = NULL; + wake_up(&m2m_ctx->finished); + } + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff); + +static __poll_t v4l2_m2m_poll_for_data(struct file *file, + struct v4l2_m2m_ctx *m2m_ctx, + struct poll_table_struct *wait) +{ + struct vb2_queue *src_q, *dst_q; + __poll_t rc = 0; + unsigned long flags; + + src_q = v4l2_m2m_get_src_vq(m2m_ctx); + dst_q = v4l2_m2m_get_dst_vq(m2m_ctx); + + /* + * There has to be at least one buffer queued on each queued_list, which + * means either in driver already or waiting for driver to claim it + * and start processing. + */ + if ((!vb2_is_streaming(src_q) || src_q->error || + list_empty(&src_q->queued_list)) && + (!vb2_is_streaming(dst_q) || dst_q->error || + (list_empty(&dst_q->queued_list) && !dst_q->last_buffer_dequeued))) + return EPOLLERR; + + spin_lock_irqsave(&src_q->done_lock, flags); + if (!list_empty(&src_q->done_list)) + rc |= EPOLLOUT | EPOLLWRNORM; + spin_unlock_irqrestore(&src_q->done_lock, flags); + + spin_lock_irqsave(&dst_q->done_lock, flags); + /* + * If the last buffer was dequeued from the capture queue, signal + * userspace. DQBUF(CAPTURE) will return -EPIPE. + */ + if (!list_empty(&dst_q->done_list) || dst_q->last_buffer_dequeued) + rc |= EPOLLIN | EPOLLRDNORM; + spin_unlock_irqrestore(&dst_q->done_lock, flags); + + return rc; +} + +__poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct poll_table_struct *wait) +{ + struct v4l2_fh *fh = file_to_v4l2_fh(file); + struct vb2_queue *src_q = v4l2_m2m_get_src_vq(m2m_ctx); + struct vb2_queue *dst_q = v4l2_m2m_get_dst_vq(m2m_ctx); + __poll_t req_events = poll_requested_events(wait); + __poll_t rc = 0; + + /* + * poll_wait() MUST be called on the first invocation on all the + * potential queues of interest, even if we are not interested in their + * events during this first call. Failure to do so will result in + * queue's events to be ignored because the poll_table won't be capable + * of adding new wait queues thereafter. + */ + poll_wait(file, &src_q->done_wq, wait); + poll_wait(file, &dst_q->done_wq, wait); + + if (req_events & (EPOLLOUT | EPOLLWRNORM | EPOLLIN | EPOLLRDNORM)) + rc = v4l2_m2m_poll_for_data(file, m2m_ctx, wait); + + poll_wait(file, &fh->wait, wait); + if (v4l2_event_pending(fh)) + rc |= EPOLLPRI; + + return rc; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_poll); + +int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct vm_area_struct *vma) +{ + unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; + struct vb2_queue *vq; + + if (offset < DST_QUEUE_OFF_BASE) { + vq = v4l2_m2m_get_src_vq(m2m_ctx); + } else { + vq = v4l2_m2m_get_dst_vq(m2m_ctx); + vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT); + } + + return vb2_mmap(vq, vma); +} +EXPORT_SYMBOL(v4l2_m2m_mmap); + +#ifndef CONFIG_MMU +unsigned long v4l2_m2m_get_unmapped_area(struct file *file, unsigned long addr, + unsigned long len, unsigned long pgoff, + unsigned long flags) +{ + struct v4l2_fh *fh = file_to_v4l2_fh(file); + unsigned long offset = pgoff << PAGE_SHIFT; + struct vb2_queue *vq; + + if (offset < DST_QUEUE_OFF_BASE) { + vq = v4l2_m2m_get_src_vq(fh->m2m_ctx); + } else { + vq = v4l2_m2m_get_dst_vq(fh->m2m_ctx); + pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT); + } + + return vb2_get_unmapped_area(vq, addr, len, pgoff, flags); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_get_unmapped_area); +#endif + +#if defined(CONFIG_MEDIA_CONTROLLER) +void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev) +{ + media_remove_intf_links(&m2m_dev->intf_devnode->intf); + media_devnode_remove(m2m_dev->intf_devnode); + + media_entity_remove_links(m2m_dev->source); + media_entity_remove_links(&m2m_dev->sink); + media_entity_remove_links(&m2m_dev->proc); + media_device_unregister_entity(m2m_dev->source); + media_device_unregister_entity(&m2m_dev->sink); + media_device_unregister_entity(&m2m_dev->proc); + kfree(m2m_dev->source->name); + kfree(m2m_dev->sink.name); + kfree(m2m_dev->proc.name); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_unregister_media_controller); + +static int v4l2_m2m_register_entity(struct media_device *mdev, + struct v4l2_m2m_dev *m2m_dev, enum v4l2_m2m_entity_type type, + struct video_device *vdev, int function) +{ + struct media_entity *entity; + struct media_pad *pads; + char *name; + unsigned int len; + int num_pads; + int ret; + + switch (type) { + case MEM2MEM_ENT_TYPE_SOURCE: + entity = m2m_dev->source; + pads = &m2m_dev->source_pad; + pads[0].flags = MEDIA_PAD_FL_SOURCE; + num_pads = 1; + break; + case MEM2MEM_ENT_TYPE_SINK: + entity = &m2m_dev->sink; + pads = &m2m_dev->sink_pad; + pads[0].flags = MEDIA_PAD_FL_SINK; + num_pads = 1; + break; + case MEM2MEM_ENT_TYPE_PROC: + entity = &m2m_dev->proc; + pads = m2m_dev->proc_pads; + pads[0].flags = MEDIA_PAD_FL_SINK; + pads[1].flags = MEDIA_PAD_FL_SOURCE; + num_pads = 2; + break; + default: + return -EINVAL; + } + + entity->obj_type = MEDIA_ENTITY_TYPE_BASE; + if (type != MEM2MEM_ENT_TYPE_PROC) { + entity->info.dev.major = VIDEO_MAJOR; + entity->info.dev.minor = vdev->minor; + } + len = strlen(vdev->name) + 2 + strlen(m2m_entity_name[type]); + name = kmalloc(len, GFP_KERNEL); + if (!name) + return -ENOMEM; + snprintf(name, len, "%s-%s", vdev->name, m2m_entity_name[type]); + entity->name = name; + entity->function = function; + + ret = media_entity_pads_init(entity, num_pads, pads); + if (ret) { + kfree(entity->name); + entity->name = NULL; + return ret; + } + ret = media_device_register_entity(mdev, entity); + if (ret) { + kfree(entity->name); + entity->name = NULL; + return ret; + } + + return 0; +} + +int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev, + struct video_device *vdev, int function) +{ + struct media_device *mdev = vdev->v4l2_dev->mdev; + struct media_link *link; + int ret; + + if (!mdev) + return 0; + + /* A memory-to-memory device consists in two + * DMA engine and one video processing entities. + * The DMA engine entities are linked to a V4L interface + */ + + /* Create the three entities with their pads */ + m2m_dev->source = &vdev->entity; + ret = v4l2_m2m_register_entity(mdev, m2m_dev, + MEM2MEM_ENT_TYPE_SOURCE, vdev, MEDIA_ENT_F_IO_V4L); + if (ret) + return ret; + ret = v4l2_m2m_register_entity(mdev, m2m_dev, + MEM2MEM_ENT_TYPE_PROC, vdev, function); + if (ret) + goto err_rel_entity0; + ret = v4l2_m2m_register_entity(mdev, m2m_dev, + MEM2MEM_ENT_TYPE_SINK, vdev, MEDIA_ENT_F_IO_V4L); + if (ret) + goto err_rel_entity1; + + /* Connect the three entities */ + ret = media_create_pad_link(m2m_dev->source, 0, &m2m_dev->proc, 0, + MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); + if (ret) + goto err_rel_entity2; + + ret = media_create_pad_link(&m2m_dev->proc, 1, &m2m_dev->sink, 0, + MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); + if (ret) + goto err_rm_links0; + + /* Create video interface */ + m2m_dev->intf_devnode = media_devnode_create(mdev, + MEDIA_INTF_T_V4L_VIDEO, 0, + VIDEO_MAJOR, vdev->minor); + if (!m2m_dev->intf_devnode) { + ret = -ENOMEM; + goto err_rm_links1; + } + + /* Connect the two DMA engines to the interface */ + link = media_create_intf_link(m2m_dev->source, + &m2m_dev->intf_devnode->intf, + MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); + if (!link) { + ret = -ENOMEM; + goto err_rm_devnode; + } + + link = media_create_intf_link(&m2m_dev->sink, + &m2m_dev->intf_devnode->intf, + MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); + if (!link) { + ret = -ENOMEM; + goto err_rm_intf_link; + } + return 0; + +err_rm_intf_link: + media_remove_intf_links(&m2m_dev->intf_devnode->intf); +err_rm_devnode: + media_devnode_remove(m2m_dev->intf_devnode); +err_rm_links1: + media_entity_remove_links(&m2m_dev->sink); +err_rm_links0: + media_entity_remove_links(&m2m_dev->proc); + media_entity_remove_links(m2m_dev->source); +err_rel_entity2: + media_device_unregister_entity(&m2m_dev->proc); + kfree(m2m_dev->proc.name); +err_rel_entity1: + media_device_unregister_entity(&m2m_dev->sink); + kfree(m2m_dev->sink.name); +err_rel_entity0: + media_device_unregister_entity(m2m_dev->source); + kfree(m2m_dev->source->name); + return ret; + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_register_media_controller); +#endif + +struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops) +{ + struct v4l2_m2m_dev *m2m_dev; + + if (!m2m_ops || WARN_ON(!m2m_ops->device_run)) + return ERR_PTR(-EINVAL); + + m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL); + if (!m2m_dev) + return ERR_PTR(-ENOMEM); + + m2m_dev->curr_ctx = NULL; + m2m_dev->m2m_ops = m2m_ops; + INIT_LIST_HEAD(&m2m_dev->job_queue); + spin_lock_init(&m2m_dev->job_spinlock); + INIT_WORK(&m2m_dev->job_work, v4l2_m2m_device_run_work); + + return m2m_dev; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_init); + +void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev) +{ + kfree(m2m_dev); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_release); + +struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev, + void *drv_priv, + int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)) +{ + struct v4l2_m2m_ctx *m2m_ctx; + struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx; + int ret; + + m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL); + if (!m2m_ctx) + return ERR_PTR(-ENOMEM); + + m2m_ctx->priv = drv_priv; + m2m_ctx->m2m_dev = m2m_dev; + init_waitqueue_head(&m2m_ctx->finished); + + out_q_ctx = &m2m_ctx->out_q_ctx; + cap_q_ctx = &m2m_ctx->cap_q_ctx; + + INIT_LIST_HEAD(&out_q_ctx->rdy_queue); + INIT_LIST_HEAD(&cap_q_ctx->rdy_queue); + spin_lock_init(&out_q_ctx->rdy_spinlock); + spin_lock_init(&cap_q_ctx->rdy_spinlock); + + INIT_LIST_HEAD(&m2m_ctx->queue); + + ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q); + + if (ret) + goto err; + /* + * Both queues should use same the mutex to lock the m2m context. + * This lock is used in some v4l2_m2m_* helpers. + */ + if (WARN_ON(out_q_ctx->q.lock != cap_q_ctx->q.lock)) { + ret = -EINVAL; + goto err; + } + m2m_ctx->q_lock = out_q_ctx->q.lock; + + return m2m_ctx; +err: + kfree(m2m_ctx); + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init); + +void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx) +{ + /* wait until the current context is dequeued from job_queue */ + v4l2_m2m_cancel_job(m2m_ctx); + + vb2_queue_release(&m2m_ctx->cap_q_ctx.q); + vb2_queue_release(&m2m_ctx->out_q_ctx.q); + + kfree(m2m_ctx); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release); + +void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, + struct vb2_v4l2_buffer *vbuf) +{ + struct v4l2_m2m_buffer *b = container_of(vbuf, + struct v4l2_m2m_buffer, vb); + struct v4l2_m2m_queue_ctx *q_ctx; + unsigned long flags; + + q_ctx = get_queue_ctx(m2m_ctx, vbuf->vb2_buf.vb2_queue->type); + if (!q_ctx) + return; + + spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); + list_add_tail(&b->list, &q_ctx->rdy_queue); + q_ctx->num_rdy++; + spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue); + +void v4l2_m2m_buf_copy_metadata(const struct vb2_v4l2_buffer *out_vb, + struct vb2_v4l2_buffer *cap_vb, + bool copy_frame_flags) +{ + u32 mask = V4L2_BUF_FLAG_TIMECODE | V4L2_BUF_FLAG_TSTAMP_SRC_MASK; + + if (copy_frame_flags) + mask |= V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_PFRAME | + V4L2_BUF_FLAG_BFRAME; + + cap_vb->vb2_buf.timestamp = out_vb->vb2_buf.timestamp; + + if (out_vb->flags & V4L2_BUF_FLAG_TIMECODE) + cap_vb->timecode = out_vb->timecode; + cap_vb->field = out_vb->field; + cap_vb->flags &= ~mask; + cap_vb->flags |= out_vb->flags & mask; + cap_vb->vb2_buf.copied_timestamp = 1; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_buf_copy_metadata); + +void v4l2_m2m_request_queue(struct media_request *req) +{ + struct media_request_object *obj, *obj_safe; + struct v4l2_m2m_ctx *m2m_ctx = NULL; + + /* + * Queue all objects. Note that buffer objects are at the end of the + * objects list, after all other object types. Once buffer objects + * are queued, the driver might delete them immediately (if the driver + * processes the buffer at once), so we have to use + * list_for_each_entry_safe() to handle the case where the object we + * queue is deleted. + */ + list_for_each_entry_safe(obj, obj_safe, &req->objects, list) { + struct v4l2_m2m_ctx *m2m_ctx_obj; + struct vb2_buffer *vb; + + if (!obj->ops->queue) + continue; + + if (vb2_request_object_is_buffer(obj)) { + /* Sanity checks */ + vb = container_of(obj, struct vb2_buffer, req_obj); + WARN_ON(!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)); + m2m_ctx_obj = container_of(vb->vb2_queue, + struct v4l2_m2m_ctx, + out_q_ctx.q); + WARN_ON(m2m_ctx && m2m_ctx_obj != m2m_ctx); + m2m_ctx = m2m_ctx_obj; + } + + /* + * The buffer we queue here can in theory be immediately + * unbound, hence the use of list_for_each_entry_safe() + * above and why we call the queue op last. + */ + obj->ops->queue(obj); + } + + WARN_ON(!m2m_ctx); + + if (m2m_ctx) + v4l2_m2m_try_schedule(m2m_ctx); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_request_queue); + +/* Videobuf2 ioctl helpers */ + +int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv, + struct v4l2_requestbuffers *rb) +{ + struct v4l2_fh *fh = file_to_v4l2_fh(file); + + return v4l2_m2m_reqbufs(file, fh->m2m_ctx, rb); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_reqbufs); + +int v4l2_m2m_ioctl_create_bufs(struct file *file, void *priv, + struct v4l2_create_buffers *create) +{ + struct v4l2_fh *fh = file_to_v4l2_fh(file); + + return v4l2_m2m_create_bufs(file, fh->m2m_ctx, create); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs); + +int v4l2_m2m_ioctl_remove_bufs(struct file *file, void *priv, + struct v4l2_remove_buffers *remove) +{ + struct v4l2_fh *fh = file_to_v4l2_fh(file); + struct vb2_queue *q = v4l2_m2m_get_vq(fh->m2m_ctx, remove->type); + + if (!q) + return -EINVAL; + if (q->type != remove->type) + return -EINVAL; + + return vb2_core_remove_bufs(q, remove->index, remove->count); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_remove_bufs); + +int v4l2_m2m_ioctl_querybuf(struct file *file, void *priv, + struct v4l2_buffer *buf) +{ + struct v4l2_fh *fh = file_to_v4l2_fh(file); + + return v4l2_m2m_querybuf(file, fh->m2m_ctx, buf); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_querybuf); + +int v4l2_m2m_ioctl_qbuf(struct file *file, void *priv, + struct v4l2_buffer *buf) +{ + struct v4l2_fh *fh = file_to_v4l2_fh(file); + + return v4l2_m2m_qbuf(file, fh->m2m_ctx, buf); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_qbuf); + +int v4l2_m2m_ioctl_dqbuf(struct file *file, void *priv, + struct v4l2_buffer *buf) +{ + struct v4l2_fh *fh = file_to_v4l2_fh(file); + + return v4l2_m2m_dqbuf(file, fh->m2m_ctx, buf); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_dqbuf); + +int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *priv, + struct v4l2_buffer *buf) +{ + struct v4l2_fh *fh = file_to_v4l2_fh(file); + + return v4l2_m2m_prepare_buf(file, fh->m2m_ctx, buf); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_prepare_buf); + +int v4l2_m2m_ioctl_expbuf(struct file *file, void *priv, + struct v4l2_exportbuffer *eb) +{ + struct v4l2_fh *fh = file_to_v4l2_fh(file); + + return v4l2_m2m_expbuf(file, fh->m2m_ctx, eb); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_expbuf); + +int v4l2_m2m_ioctl_streamon(struct file *file, void *priv, + enum v4l2_buf_type type) +{ + struct v4l2_fh *fh = file_to_v4l2_fh(file); + + return v4l2_m2m_streamon(file, fh->m2m_ctx, type); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamon); + +int v4l2_m2m_ioctl_streamoff(struct file *file, void *priv, + enum v4l2_buf_type type) +{ + struct v4l2_fh *fh = file_to_v4l2_fh(file); + + return v4l2_m2m_streamoff(file, fh->m2m_ctx, type); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff); + +int v4l2_m2m_ioctl_try_encoder_cmd(struct file *file, void *priv, + struct v4l2_encoder_cmd *ec) +{ + if (ec->cmd != V4L2_ENC_CMD_STOP && ec->cmd != V4L2_ENC_CMD_START) + return -EINVAL; + + ec->flags = 0; + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_encoder_cmd); + +int v4l2_m2m_ioctl_try_decoder_cmd(struct file *file, void *priv, + struct v4l2_decoder_cmd *dc) +{ + if (dc->cmd != V4L2_DEC_CMD_STOP && dc->cmd != V4L2_DEC_CMD_START) + return -EINVAL; + + dc->flags = 0; + + if (dc->cmd == V4L2_DEC_CMD_STOP) { + dc->stop.pts = 0; + } else if (dc->cmd == V4L2_DEC_CMD_START) { + dc->start.speed = 0; + dc->start.format = V4L2_DEC_START_FMT_NONE; + } + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_decoder_cmd); + +/* + * Updates the encoding state on ENC_CMD_STOP/ENC_CMD_START + * Should be called from the encoder driver encoder_cmd() callback + */ +int v4l2_m2m_encoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_encoder_cmd *ec) +{ + if (ec->cmd != V4L2_ENC_CMD_STOP && ec->cmd != V4L2_ENC_CMD_START) + return -EINVAL; + + if (ec->cmd == V4L2_ENC_CMD_STOP) + return v4l2_update_last_buf_state(m2m_ctx); + + if (m2m_ctx->is_draining) + return -EBUSY; + + if (m2m_ctx->has_stopped) + m2m_ctx->has_stopped = false; + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_encoder_cmd); + +/* + * Updates the decoding state on DEC_CMD_STOP/DEC_CMD_START + * Should be called from the decoder driver decoder_cmd() callback + */ +int v4l2_m2m_decoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_decoder_cmd *dc) +{ + if (dc->cmd != V4L2_DEC_CMD_STOP && dc->cmd != V4L2_DEC_CMD_START) + return -EINVAL; + + if (dc->cmd == V4L2_DEC_CMD_STOP) + return v4l2_update_last_buf_state(m2m_ctx); + + if (m2m_ctx->is_draining) + return -EBUSY; + + if (m2m_ctx->has_stopped) + m2m_ctx->has_stopped = false; + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_decoder_cmd); + +int v4l2_m2m_ioctl_encoder_cmd(struct file *file, void *priv, + struct v4l2_encoder_cmd *ec) +{ + struct v4l2_fh *fh = file_to_v4l2_fh(file); + + return v4l2_m2m_encoder_cmd(file, fh->m2m_ctx, ec); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_encoder_cmd); + +int v4l2_m2m_ioctl_decoder_cmd(struct file *file, void *priv, + struct v4l2_decoder_cmd *dc) +{ + struct v4l2_fh *fh = file_to_v4l2_fh(file); + + return v4l2_m2m_decoder_cmd(file, fh->m2m_ctx, dc); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_decoder_cmd); + +int v4l2_m2m_ioctl_stateless_try_decoder_cmd(struct file *file, void *priv, + struct v4l2_decoder_cmd *dc) +{ + if (dc->cmd != V4L2_DEC_CMD_FLUSH) + return -EINVAL; + + dc->flags = 0; + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_stateless_try_decoder_cmd); + +int v4l2_m2m_ioctl_stateless_decoder_cmd(struct file *file, void *priv, + struct v4l2_decoder_cmd *dc) +{ + struct v4l2_fh *fh = file_to_v4l2_fh(file); + struct vb2_v4l2_buffer *out_vb, *cap_vb; + struct v4l2_m2m_dev *m2m_dev = fh->m2m_ctx->m2m_dev; + unsigned long flags; + int ret; + + ret = v4l2_m2m_ioctl_stateless_try_decoder_cmd(file, priv, dc); + if (ret < 0) + return ret; + + spin_lock_irqsave(&m2m_dev->job_spinlock, flags); + out_vb = v4l2_m2m_last_src_buf(fh->m2m_ctx); + cap_vb = v4l2_m2m_last_dst_buf(fh->m2m_ctx); + + /* + * If there is an out buffer pending, then clear any HOLD flag. + * + * By clearing this flag we ensure that when this output + * buffer is processed any held capture buffer will be released. + */ + if (out_vb) { + out_vb->flags &= ~V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF; + } else if (cap_vb && cap_vb->is_held) { + /* + * If there were no output buffers, but there is a + * capture buffer that is held, then release that + * buffer. + */ + cap_vb->is_held = false; + v4l2_m2m_dst_buf_remove(fh->m2m_ctx); + v4l2_m2m_buf_done(cap_vb, VB2_BUF_STATE_DONE); + } + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_stateless_decoder_cmd); + +/* + * v4l2_file_operations helpers. It is assumed here same lock is used + * for the output and the capture buffer queue. + */ + +int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct v4l2_fh *fh = file_to_v4l2_fh(file); + + return v4l2_m2m_mmap(file, fh->m2m_ctx, vma); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap); + +__poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait) +{ + struct v4l2_fh *fh = file_to_v4l2_fh(file); + struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx; + __poll_t ret; + + if (m2m_ctx->q_lock) + mutex_lock(m2m_ctx->q_lock); + + ret = v4l2_m2m_poll(file, m2m_ctx, wait); + + if (m2m_ctx->q_lock) + mutex_unlock(m2m_ctx->q_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_fop_poll); + diff --git a/6.18.0/drivers/media/v4l2-core/v4l2-spi.c b/6.18.0/drivers/media/v4l2-core/v4l2-spi.c new file mode 100644 index 0000000..1baf8e6 --- /dev/null +++ b/6.18.0/drivers/media/v4l2-core/v4l2-spi.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * v4l2-spi - SPI helpers for Video4Linux2 + */ + +#include +#include +#include +#include + +void v4l2_spi_subdev_unregister(struct v4l2_subdev *sd) +{ + struct spi_device *spi = v4l2_get_subdevdata(sd); + + if (spi && !spi->dev.of_node && !spi->dev.fwnode) + spi_unregister_device(spi); +} + +void v4l2_spi_subdev_init(struct v4l2_subdev *sd, struct spi_device *spi, + const struct v4l2_subdev_ops *ops) +{ + v4l2_subdev_init(sd, ops); + sd->flags |= V4L2_SUBDEV_FL_IS_SPI; + /* the owner is the same as the spi_device's driver owner */ + sd->owner = spi->dev.driver->owner; + sd->dev = &spi->dev; + /* spi_device and v4l2_subdev point to one another */ + v4l2_set_subdevdata(sd, spi); + spi_set_drvdata(spi, sd); + /* initialize name */ + snprintf(sd->name, sizeof(sd->name), "%s %s", + spi->dev.driver->name, dev_name(&spi->dev)); +} +EXPORT_SYMBOL_GPL(v4l2_spi_subdev_init); + +struct v4l2_subdev *v4l2_spi_new_subdev(struct v4l2_device *v4l2_dev, + struct spi_controller *ctlr, + struct spi_board_info *info) +{ + struct v4l2_subdev *sd = NULL; + struct spi_device *spi = NULL; + + if (!v4l2_dev) + return NULL; + if (info->modalias[0]) + request_module(info->modalias); + + spi = spi_new_device(ctlr, info); + + if (!spi || !spi->dev.driver) + goto error; + + if (!try_module_get(spi->dev.driver->owner)) + goto error; + + sd = spi_get_drvdata(spi); + + /* + * Register with the v4l2_device which increases the module's + * use count as well. + */ + if (__v4l2_device_register_subdev(v4l2_dev, sd, sd->owner)) + sd = NULL; + + /* Decrease the module use count to match the first try_module_get. */ + module_put(spi->dev.driver->owner); + +error: + /* + * If we have a client but no subdev, then something went wrong and + * we must unregister the client. + */ + if (!sd) + spi_unregister_device(spi); + + return sd; +} +EXPORT_SYMBOL_GPL(v4l2_spi_new_subdev); diff --git a/6.18.0/drivers/media/v4l2-core/v4l2-subdev-priv.h b/6.18.0/drivers/media/v4l2-core/v4l2-subdev-priv.h new file mode 100644 index 0000000..52391d6 --- /dev/null +++ b/6.18.0/drivers/media/v4l2-core/v4l2-subdev-priv.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * V4L2 sub-device pivate header. + * + * Copyright (C) 2023 Hans de Goede + */ + +#ifndef _V4L2_SUBDEV_PRIV_H_ +#define _V4L2_SUBDEV_PRIV_H_ + +int v4l2_subdev_get_privacy_led(struct v4l2_subdev *sd); +void v4l2_subdev_put_privacy_led(struct v4l2_subdev *sd); + +#endif diff --git a/6.18.0/drivers/media/v4l2-core/v4l2-subdev.c b/6.18.0/drivers/media/v4l2-core/v4l2-subdev.c new file mode 100644 index 0000000..25e66bf --- /dev/null +++ b/6.18.0/drivers/media/v4l2-core/v4l2-subdev.c @@ -0,0 +1,2639 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * V4L2 sub-device + * + * Copyright (C) 2010 Nokia Corporation + * + * Contact: Laurent Pinchart + * Sakari Ailus + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +/** + * struct v4l2_subdev_stream_config - Used for storing stream configuration. + * + * @pad: pad number + * @stream: stream number + * @enabled: has the stream been enabled with v4l2_subdev_enable_streams() + * @fmt: &struct v4l2_mbus_framefmt + * @crop: &struct v4l2_rect to be used for crop + * @compose: &struct v4l2_rect to be used for compose + * @interval: frame interval + * + * This structure stores configuration for a stream. + */ +struct v4l2_subdev_stream_config { + u32 pad; + u32 stream; + bool enabled; + + struct v4l2_mbus_framefmt fmt; + struct v4l2_rect crop; + struct v4l2_rect compose; + struct v4l2_fract interval; +}; + +#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) +/* + * The Streams API is an experimental feature. To use the Streams API, set + * 'v4l2_subdev_enable_streams_api' to 1 below. + */ + +static bool v4l2_subdev_enable_streams_api; +#endif + +/* + * Maximum stream ID is 63 for now, as we use u64 bitmask to represent a set + * of streams. + * + * Note that V4L2_FRAME_DESC_ENTRY_MAX is related: V4L2_FRAME_DESC_ENTRY_MAX + * restricts the total number of streams in a pad, although the stream ID is + * not restricted. + */ +#define V4L2_SUBDEV_MAX_STREAM_ID 63 + +#include "v4l2-subdev-priv.h" + +#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) +static int subdev_fh_init(struct v4l2_subdev_fh *fh, struct v4l2_subdev *sd) +{ + struct v4l2_subdev_state *state; + static struct lock_class_key key; + + state = __v4l2_subdev_state_alloc(sd, "fh->state->lock", &key); + if (IS_ERR(state)) + return PTR_ERR(state); + + fh->state = state; + + return 0; +} + +static void subdev_fh_free(struct v4l2_subdev_fh *fh) +{ + __v4l2_subdev_state_free(fh->state); + fh->state = NULL; +} + +static int subdev_open(struct file *file) +{ + struct video_device *vdev = video_devdata(file); + struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev); + struct v4l2_subdev_fh *subdev_fh; + int ret; + + subdev_fh = kzalloc(sizeof(*subdev_fh), GFP_KERNEL); + if (subdev_fh == NULL) + return -ENOMEM; + + ret = subdev_fh_init(subdev_fh, sd); + if (ret) { + kfree(subdev_fh); + return ret; + } + + v4l2_fh_init(&subdev_fh->vfh, vdev); + v4l2_fh_add(&subdev_fh->vfh, file); + + if (sd->v4l2_dev->mdev && sd->entity.graph_obj.mdev->dev) { + struct module *owner; + + owner = sd->entity.graph_obj.mdev->dev->driver->owner; + if (!try_module_get(owner)) { + ret = -EBUSY; + goto err; + } + subdev_fh->owner = owner; + } + + if (sd->internal_ops && sd->internal_ops->open) { + ret = sd->internal_ops->open(sd, subdev_fh); + if (ret < 0) + goto err; + } + + return 0; + +err: + module_put(subdev_fh->owner); + v4l2_fh_del(&subdev_fh->vfh, file); + v4l2_fh_exit(&subdev_fh->vfh); + subdev_fh_free(subdev_fh); + kfree(subdev_fh); + + return ret; +} + +static int subdev_close(struct file *file) +{ + struct video_device *vdev = video_devdata(file); + struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev); + struct v4l2_fh *vfh = file_to_v4l2_fh(file); + struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh); + + if (sd->internal_ops && sd->internal_ops->close) + sd->internal_ops->close(sd, subdev_fh); + module_put(subdev_fh->owner); + v4l2_fh_del(vfh, file); + v4l2_fh_exit(vfh); + subdev_fh_free(subdev_fh); + kfree(subdev_fh); + + return 0; +} +#else /* CONFIG_VIDEO_V4L2_SUBDEV_API */ +static int subdev_open(struct file *file) +{ + return -ENODEV; +} + +static int subdev_close(struct file *file) +{ + return -ENODEV; +} +#endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */ + +static void v4l2_subdev_enable_privacy_led(struct v4l2_subdev *sd) +{ +#if IS_REACHABLE(CONFIG_LEDS_CLASS) + if (!IS_ERR_OR_NULL(sd->privacy_led)) + led_set_brightness(sd->privacy_led, + sd->privacy_led->max_brightness); +#endif +} + +static void v4l2_subdev_disable_privacy_led(struct v4l2_subdev *sd) +{ +#if IS_REACHABLE(CONFIG_LEDS_CLASS) + if (!IS_ERR_OR_NULL(sd->privacy_led)) + led_set_brightness(sd->privacy_led, 0); +#endif +} + +static inline int check_which(u32 which) +{ + if (which != V4L2_SUBDEV_FORMAT_TRY && + which != V4L2_SUBDEV_FORMAT_ACTIVE) + return -EINVAL; + + return 0; +} + +static inline int check_pad(struct v4l2_subdev *sd, u32 pad) +{ +#if defined(CONFIG_MEDIA_CONTROLLER) + if (sd->entity.num_pads) { + if (pad >= sd->entity.num_pads) + return -EINVAL; + return 0; + } +#endif + /* allow pad 0 on subdevices not registered as media entities */ + if (pad > 0) + return -EINVAL; + return 0; +} + +static int check_state(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, + u32 which, u32 pad, u32 stream) +{ + if (sd->flags & V4L2_SUBDEV_FL_STREAMS) { +#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) + if (!v4l2_subdev_state_get_format(state, pad, stream)) + return -EINVAL; + return 0; +#else + return -EINVAL; +#endif + } + + if (stream != 0) + return -EINVAL; + + if (which == V4L2_SUBDEV_FORMAT_TRY && (!state || !state->pads)) + return -EINVAL; + + return 0; +} + +static inline int check_format(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_format *format) +{ + if (!format) + return -EINVAL; + + return check_which(format->which) ? : check_pad(sd, format->pad) ? : + check_state(sd, state, format->which, format->pad, format->stream); +} + +static int call_get_fmt(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_format *format) +{ + return check_format(sd, state, format) ? : + sd->ops->pad->get_fmt(sd, state, format); +} + +static int call_set_fmt(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_format *format) +{ + return check_format(sd, state, format) ? : + sd->ops->pad->set_fmt(sd, state, format); +} + +static int call_enum_mbus_code(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_mbus_code_enum *code) +{ + if (!code) + return -EINVAL; + + return check_which(code->which) ? : check_pad(sd, code->pad) ? : + check_state(sd, state, code->which, code->pad, code->stream) ? : + sd->ops->pad->enum_mbus_code(sd, state, code); +} + +static int call_enum_frame_size(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_frame_size_enum *fse) +{ + if (!fse) + return -EINVAL; + + return check_which(fse->which) ? : check_pad(sd, fse->pad) ? : + check_state(sd, state, fse->which, fse->pad, fse->stream) ? : + sd->ops->pad->enum_frame_size(sd, state, fse); +} + +static int call_enum_frame_interval(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_frame_interval_enum *fie) +{ + if (!fie) + return -EINVAL; + + return check_which(fie->which) ? : check_pad(sd, fie->pad) ? : + check_state(sd, state, fie->which, fie->pad, fie->stream) ? : + sd->ops->pad->enum_frame_interval(sd, state, fie); +} + +static inline int check_selection(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_selection *sel) +{ + if (!sel) + return -EINVAL; + + return check_which(sel->which) ? : check_pad(sd, sel->pad) ? : + check_state(sd, state, sel->which, sel->pad, sel->stream); +} + +static int call_get_selection(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_selection *sel) +{ + return check_selection(sd, state, sel) ? : + sd->ops->pad->get_selection(sd, state, sel); +} + +static int call_set_selection(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_selection *sel) +{ + return check_selection(sd, state, sel) ? : + sd->ops->pad->set_selection(sd, state, sel); +} + +static inline int check_frame_interval(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_frame_interval *fi) +{ + if (!fi) + return -EINVAL; + + return check_which(fi->which) ? : check_pad(sd, fi->pad) ? : + check_state(sd, state, fi->which, fi->pad, fi->stream); +} + +static int call_get_frame_interval(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_frame_interval *fi) +{ + return check_frame_interval(sd, state, fi) ? : + sd->ops->pad->get_frame_interval(sd, state, fi); +} + +static int call_set_frame_interval(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_frame_interval *fi) +{ + return check_frame_interval(sd, state, fi) ? : + sd->ops->pad->set_frame_interval(sd, state, fi); +} + +static int call_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad, + struct v4l2_mbus_frame_desc *fd) +{ + unsigned int i; + int ret; + +#if defined(CONFIG_MEDIA_CONTROLLER) + if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE)) + return -EOPNOTSUPP; +#endif + + memset(fd, 0, sizeof(*fd)); + + ret = sd->ops->pad->get_frame_desc(sd, pad, fd); + if (ret) + return ret; + + dev_dbg(sd->dev, "Frame descriptor on pad %u, type %s\n", pad, + fd->type == V4L2_MBUS_FRAME_DESC_TYPE_PARALLEL ? "parallel" : + fd->type == V4L2_MBUS_FRAME_DESC_TYPE_CSI2 ? "CSI-2" : + "unknown"); + + for (i = 0; i < fd->num_entries; i++) { + struct v4l2_mbus_frame_desc_entry *entry = &fd->entry[i]; + char buf[20] = ""; + + if (fd->type == V4L2_MBUS_FRAME_DESC_TYPE_CSI2) + WARN_ON(snprintf(buf, sizeof(buf), + ", vc %u, dt 0x%02x", + entry->bus.csi2.vc, + entry->bus.csi2.dt) >= sizeof(buf)); + + dev_dbg(sd->dev, + "\tstream %u, code 0x%04x, length %u, flags 0x%04x%s\n", + entry->stream, entry->pixelcode, entry->length, + entry->flags, buf); + } + + return 0; +} + +static inline int check_edid(struct v4l2_subdev *sd, + struct v4l2_subdev_edid *edid) +{ + if (!edid) + return -EINVAL; + + if (edid->blocks && edid->edid == NULL) + return -EINVAL; + + return check_pad(sd, edid->pad); +} + +static int call_get_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid) +{ + return check_edid(sd, edid) ? : sd->ops->pad->get_edid(sd, edid); +} + +static int call_set_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid) +{ + return check_edid(sd, edid) ? : sd->ops->pad->set_edid(sd, edid); +} + +static int call_s_dv_timings(struct v4l2_subdev *sd, unsigned int pad, + struct v4l2_dv_timings *timings) +{ + if (!timings) + return -EINVAL; + + return check_pad(sd, pad) ? : + sd->ops->pad->s_dv_timings(sd, pad, timings); +} + +static int call_g_dv_timings(struct v4l2_subdev *sd, unsigned int pad, + struct v4l2_dv_timings *timings) +{ + if (!timings) + return -EINVAL; + + return check_pad(sd, pad) ? : + sd->ops->pad->g_dv_timings(sd, pad, timings); +} + +static int call_query_dv_timings(struct v4l2_subdev *sd, unsigned int pad, + struct v4l2_dv_timings *timings) +{ + if (!timings) + return -EINVAL; + + return check_pad(sd, pad) ? : + sd->ops->pad->query_dv_timings(sd, pad, timings); +} + +static int call_dv_timings_cap(struct v4l2_subdev *sd, + struct v4l2_dv_timings_cap *cap) +{ + if (!cap) + return -EINVAL; + + return check_pad(sd, cap->pad) ? : + sd->ops->pad->dv_timings_cap(sd, cap); +} + +static int call_enum_dv_timings(struct v4l2_subdev *sd, + struct v4l2_enum_dv_timings *dvt) +{ + if (!dvt) + return -EINVAL; + + return check_pad(sd, dvt->pad) ? : + sd->ops->pad->enum_dv_timings(sd, dvt); +} + +static int call_get_mbus_config(struct v4l2_subdev *sd, unsigned int pad, + struct v4l2_mbus_config *config) +{ + memset(config, 0, sizeof(*config)); + + return check_pad(sd, pad) ? : + sd->ops->pad->get_mbus_config(sd, pad, config); +} + +static int call_s_stream(struct v4l2_subdev *sd, int enable) +{ + int ret; + + /* + * The .s_stream() operation must never be called to start or stop an + * already started or stopped subdev. Catch offenders but don't return + * an error yet to avoid regressions. + */ + if (WARN_ON(sd->s_stream_enabled == !!enable)) + return 0; + + ret = sd->ops->video->s_stream(sd, enable); + + if (!enable && ret < 0) { + dev_warn(sd->dev, "disabling streaming failed (%d)\n", ret); + ret = 0; + } + + if (!ret) { + sd->s_stream_enabled = enable; + + if (enable) + v4l2_subdev_enable_privacy_led(sd); + else + v4l2_subdev_disable_privacy_led(sd); + } + + return ret; +} + +#ifdef CONFIG_MEDIA_CONTROLLER +/* + * Create state-management wrapper for pad ops dealing with subdev state. The + * wrapper handles the case where the caller does not provide the called + * subdev's state. This should be removed when all the callers are fixed. + */ +#define DEFINE_STATE_WRAPPER(f, arg_type) \ + static int call_##f##_state(struct v4l2_subdev *sd, \ + struct v4l2_subdev_state *_state, \ + arg_type *arg) \ + { \ + struct v4l2_subdev_state *state = _state; \ + int ret; \ + if (!_state) \ + state = v4l2_subdev_lock_and_get_active_state(sd); \ + ret = call_##f(sd, state, arg); \ + if (!_state && state) \ + v4l2_subdev_unlock_state(state); \ + return ret; \ + } + +#else /* CONFIG_MEDIA_CONTROLLER */ + +#define DEFINE_STATE_WRAPPER(f, arg_type) \ + static int call_##f##_state(struct v4l2_subdev *sd, \ + struct v4l2_subdev_state *state, \ + arg_type *arg) \ + { \ + return call_##f(sd, state, arg); \ + } + +#endif /* CONFIG_MEDIA_CONTROLLER */ + +DEFINE_STATE_WRAPPER(get_fmt, struct v4l2_subdev_format); +DEFINE_STATE_WRAPPER(set_fmt, struct v4l2_subdev_format); +DEFINE_STATE_WRAPPER(enum_mbus_code, struct v4l2_subdev_mbus_code_enum); +DEFINE_STATE_WRAPPER(enum_frame_size, struct v4l2_subdev_frame_size_enum); +DEFINE_STATE_WRAPPER(enum_frame_interval, struct v4l2_subdev_frame_interval_enum); +DEFINE_STATE_WRAPPER(get_selection, struct v4l2_subdev_selection); +DEFINE_STATE_WRAPPER(set_selection, struct v4l2_subdev_selection); + +static const struct v4l2_subdev_pad_ops v4l2_subdev_call_pad_wrappers = { + .get_fmt = call_get_fmt_state, + .set_fmt = call_set_fmt_state, + .enum_mbus_code = call_enum_mbus_code_state, + .enum_frame_size = call_enum_frame_size_state, + .enum_frame_interval = call_enum_frame_interval_state, + .get_selection = call_get_selection_state, + .set_selection = call_set_selection_state, + .get_frame_interval = call_get_frame_interval, + .set_frame_interval = call_set_frame_interval, + .get_edid = call_get_edid, + .set_edid = call_set_edid, + .s_dv_timings = call_s_dv_timings, + .g_dv_timings = call_g_dv_timings, + .query_dv_timings = call_query_dv_timings, + .dv_timings_cap = call_dv_timings_cap, + .enum_dv_timings = call_enum_dv_timings, + .get_frame_desc = call_get_frame_desc, + .get_mbus_config = call_get_mbus_config, +}; + +static const struct v4l2_subdev_video_ops v4l2_subdev_call_video_wrappers = { + .s_stream = call_s_stream, +}; + +const struct v4l2_subdev_ops v4l2_subdev_call_wrappers = { + .pad = &v4l2_subdev_call_pad_wrappers, + .video = &v4l2_subdev_call_video_wrappers, +}; +EXPORT_SYMBOL(v4l2_subdev_call_wrappers); + +#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) + +static struct v4l2_subdev_state * +subdev_ioctl_get_state(struct v4l2_subdev *sd, struct v4l2_subdev_fh *subdev_fh, + unsigned int cmd, void *arg) +{ + u32 which; + + switch (cmd) { + default: + return NULL; + case VIDIOC_SUBDEV_G_FMT: + case VIDIOC_SUBDEV_S_FMT: + which = ((struct v4l2_subdev_format *)arg)->which; + break; + case VIDIOC_SUBDEV_G_CROP: + case VIDIOC_SUBDEV_S_CROP: + which = ((struct v4l2_subdev_crop *)arg)->which; + break; + case VIDIOC_SUBDEV_ENUM_MBUS_CODE: + which = ((struct v4l2_subdev_mbus_code_enum *)arg)->which; + break; + case VIDIOC_SUBDEV_ENUM_FRAME_SIZE: + which = ((struct v4l2_subdev_frame_size_enum *)arg)->which; + break; + case VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL: + which = ((struct v4l2_subdev_frame_interval_enum *)arg)->which; + break; + case VIDIOC_SUBDEV_G_SELECTION: + case VIDIOC_SUBDEV_S_SELECTION: + which = ((struct v4l2_subdev_selection *)arg)->which; + break; + case VIDIOC_SUBDEV_G_FRAME_INTERVAL: + case VIDIOC_SUBDEV_S_FRAME_INTERVAL: { + struct v4l2_subdev_frame_interval *fi = arg; + + if (!(subdev_fh->client_caps & + V4L2_SUBDEV_CLIENT_CAP_INTERVAL_USES_WHICH)) + fi->which = V4L2_SUBDEV_FORMAT_ACTIVE; + + which = fi->which; + break; + } + case VIDIOC_SUBDEV_G_ROUTING: + case VIDIOC_SUBDEV_S_ROUTING: + which = ((struct v4l2_subdev_routing *)arg)->which; + break; + } + + return which == V4L2_SUBDEV_FORMAT_TRY ? + subdev_fh->state : + v4l2_subdev_get_unlocked_active_state(sd); +} + +static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg, + struct v4l2_subdev_state *state) +{ + struct video_device *vdev = video_devdata(file); + struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev); + struct v4l2_fh *vfh = file_to_v4l2_fh(file); + struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh); + bool ro_subdev = test_bit(V4L2_FL_SUBDEV_RO_DEVNODE, &vdev->flags); + bool streams_subdev = sd->flags & V4L2_SUBDEV_FL_STREAMS; + bool client_supports_streams = subdev_fh->client_caps & + V4L2_SUBDEV_CLIENT_CAP_STREAMS; + int rval; + + /* + * If the streams API is not enabled, remove V4L2_SUBDEV_CAP_STREAMS. + * Remove this when the API is no longer experimental. + */ + if (!v4l2_subdev_enable_streams_api) + streams_subdev = false; + + switch (cmd) { + case VIDIOC_SUBDEV_QUERYCAP: { + struct v4l2_subdev_capability *cap = arg; + + memset(cap->reserved, 0, sizeof(cap->reserved)); + cap->version = LINUX_VERSION_CODE; + cap->capabilities = + (ro_subdev ? V4L2_SUBDEV_CAP_RO_SUBDEV : 0) | + (streams_subdev ? V4L2_SUBDEV_CAP_STREAMS : 0); + + return 0; + } + + case VIDIOC_QUERYCTRL: + /* + * TODO: this really should be folded into v4l2_queryctrl (this + * currently returns -EINVAL for NULL control handlers). + * However, v4l2_queryctrl() is still called directly by + * drivers as well and until that has been addressed I believe + * it is safer to do the check here. The same is true for the + * other control ioctls below. + */ + if (!vfh->ctrl_handler) + return -ENOTTY; + return v4l2_queryctrl(vfh->ctrl_handler, arg); + + case VIDIOC_QUERY_EXT_CTRL: + if (!vfh->ctrl_handler) + return -ENOTTY; + return v4l2_query_ext_ctrl(vfh->ctrl_handler, arg); + + case VIDIOC_QUERYMENU: + if (!vfh->ctrl_handler) + return -ENOTTY; + return v4l2_querymenu(vfh->ctrl_handler, arg); + + case VIDIOC_G_CTRL: + if (!vfh->ctrl_handler) + return -ENOTTY; + return v4l2_g_ctrl(vfh->ctrl_handler, arg); + + case VIDIOC_S_CTRL: + if (!vfh->ctrl_handler) + return -ENOTTY; + return v4l2_s_ctrl(vfh, vfh->ctrl_handler, arg); + + case VIDIOC_G_EXT_CTRLS: + if (!vfh->ctrl_handler) + return -ENOTTY; + return v4l2_g_ext_ctrls(vfh->ctrl_handler, + vdev, sd->v4l2_dev->mdev, arg); + + case VIDIOC_S_EXT_CTRLS: + if (!vfh->ctrl_handler) + return -ENOTTY; + return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler, + vdev, sd->v4l2_dev->mdev, arg); + + case VIDIOC_TRY_EXT_CTRLS: + if (!vfh->ctrl_handler) + return -ENOTTY; + return v4l2_try_ext_ctrls(vfh->ctrl_handler, + vdev, sd->v4l2_dev->mdev, arg); + + case VIDIOC_DQEVENT: + if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS)) + return -ENOIOCTLCMD; + + return v4l2_event_dequeue(vfh, arg, file->f_flags & O_NONBLOCK); + + case VIDIOC_SUBSCRIBE_EVENT: + if (v4l2_subdev_has_op(sd, core, subscribe_event)) + return v4l2_subdev_call(sd, core, subscribe_event, + vfh, arg); + + if ((sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS) && + vfh->ctrl_handler) + return v4l2_ctrl_subdev_subscribe_event(sd, vfh, arg); + + return -ENOIOCTLCMD; + + case VIDIOC_UNSUBSCRIBE_EVENT: + if (v4l2_subdev_has_op(sd, core, unsubscribe_event)) + return v4l2_subdev_call(sd, core, unsubscribe_event, + vfh, arg); + + if (sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS) + return v4l2_event_subdev_unsubscribe(sd, vfh, arg); + + return -ENOIOCTLCMD; + +#ifdef CONFIG_VIDEO_ADV_DEBUG + case VIDIOC_DBG_G_REGISTER: + { + struct v4l2_dbg_register *p = arg; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + return v4l2_subdev_call(sd, core, g_register, p); + } + case VIDIOC_DBG_S_REGISTER: + { + struct v4l2_dbg_register *p = arg; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + return v4l2_subdev_call(sd, core, s_register, p); + } + case VIDIOC_DBG_G_CHIP_INFO: + { + struct v4l2_dbg_chip_info *p = arg; + + if (p->match.type != V4L2_CHIP_MATCH_SUBDEV || p->match.addr) + return -EINVAL; + if (sd->ops->core && sd->ops->core->s_register) + p->flags |= V4L2_CHIP_FL_WRITABLE; + if (sd->ops->core && sd->ops->core->g_register) + p->flags |= V4L2_CHIP_FL_READABLE; + strscpy(p->name, sd->name, sizeof(p->name)); + return 0; + } +#endif + + case VIDIOC_LOG_STATUS: { + int ret; + + pr_info("%s: ================= START STATUS =================\n", + sd->name); + ret = v4l2_subdev_call(sd, core, log_status); + pr_info("%s: ================== END STATUS ==================\n", + sd->name); + return ret; + } + + case VIDIOC_SUBDEV_G_FMT: { + struct v4l2_subdev_format *format = arg; + + if (!client_supports_streams) + format->stream = 0; + + memset(format->reserved, 0, sizeof(format->reserved)); + memset(format->format.reserved, 0, sizeof(format->format.reserved)); + return v4l2_subdev_call(sd, pad, get_fmt, state, format); + } + + case VIDIOC_SUBDEV_S_FMT: { + struct v4l2_subdev_format *format = arg; + + if (format->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev) + return -EPERM; + + if (!client_supports_streams) + format->stream = 0; + + memset(format->reserved, 0, sizeof(format->reserved)); + memset(format->format.reserved, 0, sizeof(format->format.reserved)); + return v4l2_subdev_call(sd, pad, set_fmt, state, format); + } + + case VIDIOC_SUBDEV_G_CROP: { + struct v4l2_subdev_crop *crop = arg; + struct v4l2_subdev_selection sel; + + if (!client_supports_streams) + crop->stream = 0; + + memset(crop->reserved, 0, sizeof(crop->reserved)); + memset(&sel, 0, sizeof(sel)); + sel.which = crop->which; + sel.pad = crop->pad; + sel.stream = crop->stream; + sel.target = V4L2_SEL_TGT_CROP; + + rval = v4l2_subdev_call( + sd, pad, get_selection, state, &sel); + + crop->rect = sel.r; + + return rval; + } + + case VIDIOC_SUBDEV_S_CROP: { + struct v4l2_subdev_crop *crop = arg; + struct v4l2_subdev_selection sel; + + if (crop->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev) + return -EPERM; + + if (!client_supports_streams) + crop->stream = 0; + + memset(crop->reserved, 0, sizeof(crop->reserved)); + memset(&sel, 0, sizeof(sel)); + sel.which = crop->which; + sel.pad = crop->pad; + sel.stream = crop->stream; + sel.target = V4L2_SEL_TGT_CROP; + sel.r = crop->rect; + + rval = v4l2_subdev_call( + sd, pad, set_selection, state, &sel); + + crop->rect = sel.r; + + return rval; + } + + case VIDIOC_SUBDEV_ENUM_MBUS_CODE: { + struct v4l2_subdev_mbus_code_enum *code = arg; + + if (!client_supports_streams) + code->stream = 0; + + memset(code->reserved, 0, sizeof(code->reserved)); + return v4l2_subdev_call(sd, pad, enum_mbus_code, state, + code); + } + + case VIDIOC_SUBDEV_ENUM_FRAME_SIZE: { + struct v4l2_subdev_frame_size_enum *fse = arg; + + if (!client_supports_streams) + fse->stream = 0; + + memset(fse->reserved, 0, sizeof(fse->reserved)); + return v4l2_subdev_call(sd, pad, enum_frame_size, state, + fse); + } + + case VIDIOC_SUBDEV_G_FRAME_INTERVAL: { + struct v4l2_subdev_frame_interval *fi = arg; + + if (!client_supports_streams) + fi->stream = 0; + + memset(fi->reserved, 0, sizeof(fi->reserved)); + return v4l2_subdev_call(sd, pad, get_frame_interval, state, fi); + } + + case VIDIOC_SUBDEV_S_FRAME_INTERVAL: { + struct v4l2_subdev_frame_interval *fi = arg; + + if (!client_supports_streams) + fi->stream = 0; + + if (fi->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev) + return -EPERM; + + memset(fi->reserved, 0, sizeof(fi->reserved)); + return v4l2_subdev_call(sd, pad, set_frame_interval, state, fi); + } + + case VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL: { + struct v4l2_subdev_frame_interval_enum *fie = arg; + + if (!client_supports_streams) + fie->stream = 0; + + memset(fie->reserved, 0, sizeof(fie->reserved)); + return v4l2_subdev_call(sd, pad, enum_frame_interval, state, + fie); + } + + case VIDIOC_SUBDEV_G_SELECTION: { + struct v4l2_subdev_selection *sel = arg; + + if (!client_supports_streams) + sel->stream = 0; + + memset(sel->reserved, 0, sizeof(sel->reserved)); + return v4l2_subdev_call( + sd, pad, get_selection, state, sel); + } + + case VIDIOC_SUBDEV_S_SELECTION: { + struct v4l2_subdev_selection *sel = arg; + + if (sel->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev) + return -EPERM; + + if (!client_supports_streams) + sel->stream = 0; + + memset(sel->reserved, 0, sizeof(sel->reserved)); + return v4l2_subdev_call( + sd, pad, set_selection, state, sel); + } + + case VIDIOC_G_EDID: { + struct v4l2_subdev_edid *edid = arg; + + return v4l2_subdev_call(sd, pad, get_edid, edid); + } + + case VIDIOC_S_EDID: { + struct v4l2_subdev_edid *edid = arg; + + return v4l2_subdev_call(sd, pad, set_edid, edid); + } + + case VIDIOC_SUBDEV_DV_TIMINGS_CAP: { + struct v4l2_dv_timings_cap *cap = arg; + + return v4l2_subdev_call(sd, pad, dv_timings_cap, cap); + } + + case VIDIOC_SUBDEV_ENUM_DV_TIMINGS: { + struct v4l2_enum_dv_timings *dvt = arg; + + return v4l2_subdev_call(sd, pad, enum_dv_timings, dvt); + } + + case VIDIOC_SUBDEV_QUERY_DV_TIMINGS: + return v4l2_subdev_call(sd, pad, query_dv_timings, 0, arg); + + case VIDIOC_SUBDEV_G_DV_TIMINGS: + return v4l2_subdev_call(sd, pad, g_dv_timings, 0, arg); + + case VIDIOC_SUBDEV_S_DV_TIMINGS: + if (ro_subdev) + return -EPERM; + + return v4l2_subdev_call(sd, pad, s_dv_timings, 0, arg); + + case VIDIOC_SUBDEV_G_STD: + return v4l2_subdev_call(sd, video, g_std, arg); + + case VIDIOC_SUBDEV_S_STD: { + v4l2_std_id *std = arg; + + if (ro_subdev) + return -EPERM; + + return v4l2_subdev_call(sd, video, s_std, *std); + } + + case VIDIOC_SUBDEV_ENUMSTD: { + struct v4l2_standard *p = arg; + v4l2_std_id id; + + if (v4l2_subdev_call(sd, video, g_tvnorms, &id)) + return -EINVAL; + + return v4l_video_std_enumstd(p, id); + } + + case VIDIOC_SUBDEV_QUERYSTD: + return v4l2_subdev_call(sd, video, querystd, arg); + + case VIDIOC_SUBDEV_G_ROUTING: { + struct v4l2_subdev_routing *routing = arg; + struct v4l2_subdev_krouting *krouting; + + if (!v4l2_subdev_enable_streams_api) + return -ENOIOCTLCMD; + + if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS)) + return -ENOIOCTLCMD; + + memset(routing->reserved, 0, sizeof(routing->reserved)); + + krouting = &state->routing; + + memcpy((struct v4l2_subdev_route *)(uintptr_t)routing->routes, + krouting->routes, + min(krouting->num_routes, routing->len_routes) * + sizeof(*krouting->routes)); + routing->num_routes = krouting->num_routes; + + return 0; + } + + case VIDIOC_SUBDEV_S_ROUTING: { + struct v4l2_subdev_routing *routing = arg; + struct v4l2_subdev_route *routes = + (struct v4l2_subdev_route *)(uintptr_t)routing->routes; + struct v4l2_subdev_krouting krouting = {}; + unsigned int num_active_routes = 0; + unsigned int i; + + if (!v4l2_subdev_enable_streams_api) + return -ENOIOCTLCMD; + + if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS)) + return -ENOIOCTLCMD; + + if (routing->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev) + return -EPERM; + + if (routing->num_routes > routing->len_routes) + return -EINVAL; + + memset(routing->reserved, 0, sizeof(routing->reserved)); + + for (i = 0; i < routing->num_routes; ++i) { + const struct v4l2_subdev_route *route = &routes[i]; + const struct media_pad *pads = sd->entity.pads; + + if (route->sink_stream > V4L2_SUBDEV_MAX_STREAM_ID || + route->source_stream > V4L2_SUBDEV_MAX_STREAM_ID) + return -EINVAL; + + if (route->sink_pad >= sd->entity.num_pads) + return -EINVAL; + + if (!(pads[route->sink_pad].flags & + MEDIA_PAD_FL_SINK)) + return -EINVAL; + + if (route->source_pad >= sd->entity.num_pads) + return -EINVAL; + + if (!(pads[route->source_pad].flags & + MEDIA_PAD_FL_SOURCE)) + return -EINVAL; + + if (route->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE) + num_active_routes++; + } + + /* + * Drivers that implement routing need to report a frame + * descriptor accordingly, with up to one entry per route. Until + * the frame descriptors entries get allocated dynamically, + * limit the number of active routes to + * V4L2_FRAME_DESC_ENTRY_MAX. + */ + if (num_active_routes > V4L2_FRAME_DESC_ENTRY_MAX) + return -E2BIG; + + /* + * If the driver doesn't support setting routing, just return + * the routing table. + */ + if (!v4l2_subdev_has_op(sd, pad, set_routing)) { + memcpy((struct v4l2_subdev_route *)(uintptr_t)routing->routes, + state->routing.routes, + min(state->routing.num_routes, routing->len_routes) * + sizeof(*state->routing.routes)); + routing->num_routes = state->routing.num_routes; + + return 0; + } + + krouting.num_routes = routing->num_routes; + krouting.len_routes = routing->len_routes; + krouting.routes = routes; + + rval = v4l2_subdev_call(sd, pad, set_routing, state, + routing->which, &krouting); + if (rval < 0) + return rval; + + memcpy((struct v4l2_subdev_route *)(uintptr_t)routing->routes, + state->routing.routes, + min(state->routing.num_routes, routing->len_routes) * + sizeof(*state->routing.routes)); + routing->num_routes = state->routing.num_routes; + + return 0; + } + + case VIDIOC_SUBDEV_G_CLIENT_CAP: { + struct v4l2_subdev_client_capability *client_cap = arg; + + client_cap->capabilities = subdev_fh->client_caps; + + return 0; + } + + case VIDIOC_SUBDEV_S_CLIENT_CAP: { + struct v4l2_subdev_client_capability *client_cap = arg; + + /* + * Clear V4L2_SUBDEV_CLIENT_CAP_STREAMS if streams API is not + * enabled. Remove this when streams API is no longer + * experimental. + */ + if (!v4l2_subdev_enable_streams_api) + client_cap->capabilities &= ~V4L2_SUBDEV_CLIENT_CAP_STREAMS; + + /* Filter out unsupported capabilities */ + client_cap->capabilities &= (V4L2_SUBDEV_CLIENT_CAP_STREAMS | + V4L2_SUBDEV_CLIENT_CAP_INTERVAL_USES_WHICH); + + subdev_fh->client_caps = client_cap->capabilities; + + return 0; + } + + default: + return v4l2_subdev_call(sd, core, ioctl, cmd, arg); + } + + return 0; +} + +static long subdev_do_ioctl_lock(struct file *file, unsigned int cmd, void *arg) +{ + struct video_device *vdev = video_devdata(file); + struct mutex *lock = vdev->lock; + long ret = -ENODEV; + + if (lock && mutex_lock_interruptible(lock)) + return -ERESTARTSYS; + + if (video_is_registered(vdev)) { + struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev); + struct v4l2_fh *vfh = file_to_v4l2_fh(file); + struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh); + struct v4l2_subdev_state *state; + + state = subdev_ioctl_get_state(sd, subdev_fh, cmd, arg); + + if (state) + v4l2_subdev_lock_state(state); + + ret = subdev_do_ioctl(file, cmd, arg, state); + + if (state) + v4l2_subdev_unlock_state(state); + } + + if (lock) + mutex_unlock(lock); + return ret; +} + +static long subdev_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + return video_usercopy(file, cmd, arg, subdev_do_ioctl_lock); +} + +#ifdef CONFIG_COMPAT +static long subdev_compat_ioctl32(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct video_device *vdev = video_devdata(file); + struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev); + + return v4l2_subdev_call(sd, core, compat_ioctl32, cmd, arg); +} +#endif + +#else /* CONFIG_VIDEO_V4L2_SUBDEV_API */ +static long subdev_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + return -ENODEV; +} + +#ifdef CONFIG_COMPAT +static long subdev_compat_ioctl32(struct file *file, unsigned int cmd, + unsigned long arg) +{ + return -ENODEV; +} +#endif +#endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */ + +static __poll_t subdev_poll(struct file *file, poll_table *wait) +{ + struct video_device *vdev = video_devdata(file); + struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev); + struct v4l2_fh *fh = file_to_v4l2_fh(file); + + if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS)) + return EPOLLERR; + + poll_wait(file, &fh->wait, wait); + + if (v4l2_event_pending(fh)) + return EPOLLPRI; + + return 0; +} + +const struct v4l2_file_operations v4l2_subdev_fops = { + .owner = THIS_MODULE, + .open = subdev_open, + .unlocked_ioctl = subdev_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl32 = subdev_compat_ioctl32, +#endif + .release = subdev_close, + .poll = subdev_poll, +}; + +#ifdef CONFIG_MEDIA_CONTROLLER + +int v4l2_subdev_get_fwnode_pad_1_to_1(struct media_entity *entity, + struct fwnode_endpoint *endpoint) +{ + struct fwnode_handle *fwnode; + struct v4l2_subdev *sd; + + if (!is_media_entity_v4l2_subdev(entity)) + return -EINVAL; + + sd = media_entity_to_v4l2_subdev(entity); + + fwnode = fwnode_graph_get_port_parent(endpoint->local_fwnode); + fwnode_handle_put(fwnode); + + if (device_match_fwnode(sd->dev, fwnode)) + return endpoint->port; + + return -ENXIO; +} +EXPORT_SYMBOL_GPL(v4l2_subdev_get_fwnode_pad_1_to_1); + +int v4l2_subdev_link_validate_default(struct v4l2_subdev *sd, + struct media_link *link, + struct v4l2_subdev_format *source_fmt, + struct v4l2_subdev_format *sink_fmt) +{ + bool pass = true; + + /* The width, height and code must match. */ + if (source_fmt->format.width != sink_fmt->format.width) { + dev_dbg(sd->entity.graph_obj.mdev->dev, + "%s: width does not match (source %u, sink %u)\n", + __func__, + source_fmt->format.width, sink_fmt->format.width); + pass = false; + } + + if (source_fmt->format.height != sink_fmt->format.height) { + dev_dbg(sd->entity.graph_obj.mdev->dev, + "%s: height does not match (source %u, sink %u)\n", + __func__, + source_fmt->format.height, sink_fmt->format.height); + pass = false; + } + + if (source_fmt->format.code != sink_fmt->format.code) { + dev_dbg(sd->entity.graph_obj.mdev->dev, + "%s: media bus code does not match (source 0x%8.8x, sink 0x%8.8x)\n", + __func__, + source_fmt->format.code, sink_fmt->format.code); + pass = false; + } + + /* The field order must match, or the sink field order must be NONE + * to support interlaced hardware connected to bridges that support + * progressive formats only. + */ + if (source_fmt->format.field != sink_fmt->format.field && + sink_fmt->format.field != V4L2_FIELD_NONE) { + dev_dbg(sd->entity.graph_obj.mdev->dev, + "%s: field does not match (source %u, sink %u)\n", + __func__, + source_fmt->format.field, sink_fmt->format.field); + pass = false; + } + + if (pass) + return 0; + + dev_dbg(sd->entity.graph_obj.mdev->dev, + "%s: link was \"%s\":%u -> \"%s\":%u\n", __func__, + link->source->entity->name, link->source->index, + link->sink->entity->name, link->sink->index); + + return -EPIPE; +} +EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate_default); + +static int +v4l2_subdev_link_validate_get_format(struct media_pad *pad, u32 stream, + struct v4l2_subdev_format *fmt, + bool states_locked) +{ + struct v4l2_subdev_state *state; + struct v4l2_subdev *sd; + int ret; + + sd = media_entity_to_v4l2_subdev(pad->entity); + + fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE; + fmt->pad = pad->index; + fmt->stream = stream; + + if (states_locked) + state = v4l2_subdev_get_locked_active_state(sd); + else + state = v4l2_subdev_lock_and_get_active_state(sd); + + ret = v4l2_subdev_call(sd, pad, get_fmt, state, fmt); + + if (!states_locked && state) + v4l2_subdev_unlock_state(state); + + return ret; +} + +#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) + +static void __v4l2_link_validate_get_streams(struct media_pad *pad, + u64 *streams_mask, + bool states_locked) +{ + struct v4l2_subdev_route *route; + struct v4l2_subdev_state *state; + struct v4l2_subdev *subdev; + + subdev = media_entity_to_v4l2_subdev(pad->entity); + + *streams_mask = 0; + + if (states_locked) + state = v4l2_subdev_get_locked_active_state(subdev); + else + state = v4l2_subdev_lock_and_get_active_state(subdev); + + if (WARN_ON(!state)) + return; + + for_each_active_route(&state->routing, route) { + u32 route_pad; + u32 route_stream; + + if (pad->flags & MEDIA_PAD_FL_SOURCE) { + route_pad = route->source_pad; + route_stream = route->source_stream; + } else { + route_pad = route->sink_pad; + route_stream = route->sink_stream; + } + + if (route_pad != pad->index) + continue; + + *streams_mask |= BIT_ULL(route_stream); + } + + if (!states_locked) + v4l2_subdev_unlock_state(state); +} + +#endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */ + +static void v4l2_link_validate_get_streams(struct media_pad *pad, + u64 *streams_mask, + bool states_locked) +{ + struct v4l2_subdev *subdev = media_entity_to_v4l2_subdev(pad->entity); + + if (!(subdev->flags & V4L2_SUBDEV_FL_STREAMS)) { + /* Non-streams subdevs have an implicit stream 0 */ + *streams_mask = BIT_ULL(0); + return; + } + +#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) + __v4l2_link_validate_get_streams(pad, streams_mask, states_locked); +#else + /* This shouldn't happen */ + *streams_mask = 0; +#endif +} + +static int v4l2_subdev_link_validate_locked(struct media_link *link, bool states_locked) +{ + struct v4l2_subdev *sink_subdev = + media_entity_to_v4l2_subdev(link->sink->entity); + struct device *dev = sink_subdev->entity.graph_obj.mdev->dev; + u64 source_streams_mask; + u64 sink_streams_mask; + u64 dangling_sink_streams; + u32 stream; + int ret; + + dev_dbg(dev, "validating link \"%s\":%u -> \"%s\":%u\n", + link->source->entity->name, link->source->index, + link->sink->entity->name, link->sink->index); + + v4l2_link_validate_get_streams(link->source, &source_streams_mask, states_locked); + v4l2_link_validate_get_streams(link->sink, &sink_streams_mask, states_locked); + + /* + * It is ok to have more source streams than sink streams as extra + * source streams can just be ignored by the receiver, but having extra + * sink streams is an error as streams must have a source. + */ + dangling_sink_streams = (source_streams_mask ^ sink_streams_mask) & + sink_streams_mask; + if (dangling_sink_streams) { + dev_err(dev, "Dangling sink streams: mask %#llx\n", + dangling_sink_streams); + return -EINVAL; + } + + /* Validate source and sink stream formats */ + + for (stream = 0; stream < sizeof(sink_streams_mask) * 8; ++stream) { + struct v4l2_subdev_format sink_fmt, source_fmt; + + if (!(sink_streams_mask & BIT_ULL(stream))) + continue; + + dev_dbg(dev, "validating stream \"%s\":%u:%u -> \"%s\":%u:%u\n", + link->source->entity->name, link->source->index, stream, + link->sink->entity->name, link->sink->index, stream); + + ret = v4l2_subdev_link_validate_get_format(link->source, stream, + &source_fmt, states_locked); + if (ret < 0) { + dev_dbg(dev, + "Failed to get format for \"%s\":%u:%u (but that's ok)\n", + link->source->entity->name, link->source->index, + stream); + continue; + } + + ret = v4l2_subdev_link_validate_get_format(link->sink, stream, + &sink_fmt, states_locked); + if (ret < 0) { + dev_dbg(dev, + "Failed to get format for \"%s\":%u:%u (but that's ok)\n", + link->sink->entity->name, link->sink->index, + stream); + continue; + } + + /* TODO: add stream number to link_validate() */ + ret = v4l2_subdev_call(sink_subdev, pad, link_validate, link, + &source_fmt, &sink_fmt); + if (!ret) + continue; + + if (ret != -ENOIOCTLCMD) + return ret; + + ret = v4l2_subdev_link_validate_default(sink_subdev, link, + &source_fmt, &sink_fmt); + + if (ret) + return ret; + } + + return 0; +} + +int v4l2_subdev_link_validate(struct media_link *link) +{ + struct v4l2_subdev *source_sd, *sink_sd; + struct v4l2_subdev_state *source_state, *sink_state; + bool states_locked; + int ret; + + /* + * Links are validated in the context of the sink entity. Usage of this + * helper on a sink that is not a subdev is a clear driver bug. + */ + if (WARN_ON_ONCE(!is_media_entity_v4l2_subdev(link->sink->entity))) + return -EINVAL; + + /* + * If the source is a video device, delegate link validation to it. This + * allows usage of this helper for subdev connected to a video output + * device, provided that the driver implement the video output device's + * .link_validate() operation. + */ + if (is_media_entity_v4l2_video_device(link->source->entity)) { + struct media_entity *source = link->source->entity; + + if (!source->ops || !source->ops->link_validate) { + /* + * Many existing drivers do not implement the required + * .link_validate() operation for their video devices. + * Print a warning to get the drivers fixed, and return + * 0 to avoid breaking userspace. This should + * eventually be turned into a WARN_ON() when all + * drivers will have been fixed. + */ + pr_warn_once("video device '%s' does not implement .link_validate(), driver bug!\n", + source->name); + return 0; + } + + /* + * Avoid infinite loops in case a video device incorrectly uses + * this helper function as its .link_validate() handler. + */ + if (WARN_ON(source->ops->link_validate == v4l2_subdev_link_validate)) + return -EINVAL; + + return source->ops->link_validate(link); + } + + /* + * If the source is still not a subdev, usage of this helper is a clear + * driver bug. + */ + if (WARN_ON(!is_media_entity_v4l2_subdev(link->source->entity))) + return -EINVAL; + + sink_sd = media_entity_to_v4l2_subdev(link->sink->entity); + source_sd = media_entity_to_v4l2_subdev(link->source->entity); + + sink_state = v4l2_subdev_get_unlocked_active_state(sink_sd); + source_state = v4l2_subdev_get_unlocked_active_state(source_sd); + + states_locked = sink_state && source_state; + + if (states_locked) + v4l2_subdev_lock_states(sink_state, source_state); + + ret = v4l2_subdev_link_validate_locked(link, states_locked); + + if (states_locked) + v4l2_subdev_unlock_states(sink_state, source_state); + + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate); + +bool v4l2_subdev_has_pad_interdep(struct media_entity *entity, + unsigned int pad0, unsigned int pad1) +{ + struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity); + struct v4l2_subdev_krouting *routing; + struct v4l2_subdev_state *state; + unsigned int i; + + state = v4l2_subdev_lock_and_get_active_state(sd); + + routing = &state->routing; + + for (i = 0; i < routing->num_routes; ++i) { + struct v4l2_subdev_route *route = &routing->routes[i]; + + if (!(route->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE)) + continue; + + if ((route->sink_pad == pad0 && route->source_pad == pad1) || + (route->source_pad == pad0 && route->sink_pad == pad1)) { + v4l2_subdev_unlock_state(state); + return true; + } + } + + v4l2_subdev_unlock_state(state); + + return false; +} +EXPORT_SYMBOL_GPL(v4l2_subdev_has_pad_interdep); + +struct v4l2_subdev_state * +__v4l2_subdev_state_alloc(struct v4l2_subdev *sd, const char *lock_name, + struct lock_class_key *lock_key) +{ + struct v4l2_subdev_state *state; + int ret; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return ERR_PTR(-ENOMEM); + + __mutex_init(&state->_lock, lock_name, lock_key); + if (sd->state_lock) + state->lock = sd->state_lock; + else + state->lock = &state->_lock; + + state->sd = sd; + + /* Drivers that support streams do not need the legacy pad config */ + if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS) && sd->entity.num_pads) { + state->pads = kvcalloc(sd->entity.num_pads, + sizeof(*state->pads), GFP_KERNEL); + if (!state->pads) { + ret = -ENOMEM; + goto err; + } + } + + if (sd->internal_ops && sd->internal_ops->init_state) { + /* + * There can be no race at this point, but we lock the state + * anyway to satisfy lockdep checks. + */ + v4l2_subdev_lock_state(state); + ret = sd->internal_ops->init_state(sd, state); + v4l2_subdev_unlock_state(state); + + if (ret) + goto err; + } + + return state; + +err: + if (state && state->pads) + kvfree(state->pads); + + kfree(state); + + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(__v4l2_subdev_state_alloc); + +void __v4l2_subdev_state_free(struct v4l2_subdev_state *state) +{ + if (!state) + return; + + mutex_destroy(&state->_lock); + + kfree(state->routing.routes); + kvfree(state->stream_configs.configs); + kvfree(state->pads); + kfree(state); +} +EXPORT_SYMBOL_GPL(__v4l2_subdev_state_free); + +int __v4l2_subdev_init_finalize(struct v4l2_subdev *sd, const char *name, + struct lock_class_key *key) +{ + struct v4l2_subdev_state *state; + struct device *dev = sd->dev; + bool has_disable_streams; + bool has_enable_streams; + bool has_s_stream; + + /* Check that the subdevice implements the required features */ + + has_s_stream = v4l2_subdev_has_op(sd, video, s_stream); + has_enable_streams = v4l2_subdev_has_op(sd, pad, enable_streams); + has_disable_streams = v4l2_subdev_has_op(sd, pad, disable_streams); + + if (has_enable_streams != has_disable_streams) { + dev_err(dev, + "subdev '%s' must implement both or neither of .enable_streams() and .disable_streams()\n", + sd->name); + return -EINVAL; + } + + if (sd->flags & V4L2_SUBDEV_FL_STREAMS) { + if (has_s_stream && !has_enable_streams) { + dev_err(dev, + "subdev '%s' must implement .enable/disable_streams()\n", + sd->name); + + return -EINVAL; + } + } + + if (sd->ctrl_handler) + sd->flags |= V4L2_SUBDEV_FL_HAS_EVENTS; + + state = __v4l2_subdev_state_alloc(sd, name, key); + if (IS_ERR(state)) + return PTR_ERR(state); + + sd->active_state = state; + + return 0; +} +EXPORT_SYMBOL_GPL(__v4l2_subdev_init_finalize); + +void v4l2_subdev_cleanup(struct v4l2_subdev *sd) +{ + struct v4l2_async_subdev_endpoint *ase, *ase_tmp; + + __v4l2_subdev_state_free(sd->active_state); + sd->active_state = NULL; + + /* Uninitialised sub-device, bail out here. */ + if (!sd->async_subdev_endpoint_list.next) + return; + + list_for_each_entry_safe(ase, ase_tmp, &sd->async_subdev_endpoint_list, + async_subdev_endpoint_entry) { + list_del(&ase->async_subdev_endpoint_entry); + + kfree(ase); + } +} +EXPORT_SYMBOL_GPL(v4l2_subdev_cleanup); + +struct v4l2_mbus_framefmt * +__v4l2_subdev_state_get_format(struct v4l2_subdev_state *state, + unsigned int pad, u32 stream) +{ + struct v4l2_subdev_stream_configs *stream_configs; + unsigned int i; + + if (WARN_ON_ONCE(!state)) + return NULL; + + if (state->pads) { + if (stream) + return NULL; + + if (pad >= state->sd->entity.num_pads) + return NULL; + + return &state->pads[pad].format; + } + + lockdep_assert_held(state->lock); + + stream_configs = &state->stream_configs; + + for (i = 0; i < stream_configs->num_configs; ++i) { + if (stream_configs->configs[i].pad == pad && + stream_configs->configs[i].stream == stream) + return &stream_configs->configs[i].fmt; + } + + return NULL; +} +EXPORT_SYMBOL_GPL(__v4l2_subdev_state_get_format); + +struct v4l2_rect * +__v4l2_subdev_state_get_crop(struct v4l2_subdev_state *state, unsigned int pad, + u32 stream) +{ + struct v4l2_subdev_stream_configs *stream_configs; + unsigned int i; + + if (WARN_ON_ONCE(!state)) + return NULL; + + if (state->pads) { + if (stream) + return NULL; + + if (pad >= state->sd->entity.num_pads) + return NULL; + + return &state->pads[pad].crop; + } + + lockdep_assert_held(state->lock); + + stream_configs = &state->stream_configs; + + for (i = 0; i < stream_configs->num_configs; ++i) { + if (stream_configs->configs[i].pad == pad && + stream_configs->configs[i].stream == stream) + return &stream_configs->configs[i].crop; + } + + return NULL; +} +EXPORT_SYMBOL_GPL(__v4l2_subdev_state_get_crop); + +struct v4l2_rect * +__v4l2_subdev_state_get_compose(struct v4l2_subdev_state *state, + unsigned int pad, u32 stream) +{ + struct v4l2_subdev_stream_configs *stream_configs; + unsigned int i; + + if (WARN_ON_ONCE(!state)) + return NULL; + + if (state->pads) { + if (stream) + return NULL; + + if (pad >= state->sd->entity.num_pads) + return NULL; + + return &state->pads[pad].compose; + } + + lockdep_assert_held(state->lock); + + stream_configs = &state->stream_configs; + + for (i = 0; i < stream_configs->num_configs; ++i) { + if (stream_configs->configs[i].pad == pad && + stream_configs->configs[i].stream == stream) + return &stream_configs->configs[i].compose; + } + + return NULL; +} +EXPORT_SYMBOL_GPL(__v4l2_subdev_state_get_compose); + +struct v4l2_fract * +__v4l2_subdev_state_get_interval(struct v4l2_subdev_state *state, + unsigned int pad, u32 stream) +{ + struct v4l2_subdev_stream_configs *stream_configs; + unsigned int i; + + if (WARN_ON(!state)) + return NULL; + + lockdep_assert_held(state->lock); + + if (state->pads) { + if (stream) + return NULL; + + if (pad >= state->sd->entity.num_pads) + return NULL; + + return &state->pads[pad].interval; + } + + lockdep_assert_held(state->lock); + + stream_configs = &state->stream_configs; + + for (i = 0; i < stream_configs->num_configs; ++i) { + if (stream_configs->configs[i].pad == pad && + stream_configs->configs[i].stream == stream) + return &stream_configs->configs[i].interval; + } + + return NULL; +} +EXPORT_SYMBOL_GPL(__v4l2_subdev_state_get_interval); + +#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) + +static int +v4l2_subdev_init_stream_configs(struct v4l2_subdev_stream_configs *stream_configs, + const struct v4l2_subdev_krouting *routing) +{ + struct v4l2_subdev_stream_configs new_configs = { 0 }; + struct v4l2_subdev_route *route; + u32 idx; + + /* Count number of formats needed */ + for_each_active_route(routing, route) { + /* + * Each route needs a format on both ends of the route. + */ + new_configs.num_configs += 2; + } + + if (new_configs.num_configs) { + new_configs.configs = kvcalloc(new_configs.num_configs, + sizeof(*new_configs.configs), + GFP_KERNEL); + + if (!new_configs.configs) + return -ENOMEM; + } + + /* + * Fill in the 'pad' and stream' value for each item in the array from + * the routing table + */ + idx = 0; + + for_each_active_route(routing, route) { + new_configs.configs[idx].pad = route->sink_pad; + new_configs.configs[idx].stream = route->sink_stream; + + idx++; + + new_configs.configs[idx].pad = route->source_pad; + new_configs.configs[idx].stream = route->source_stream; + + idx++; + } + + kvfree(stream_configs->configs); + *stream_configs = new_configs; + + return 0; +} + +int v4l2_subdev_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, + struct v4l2_subdev_format *format) +{ + struct v4l2_mbus_framefmt *fmt; + + fmt = v4l2_subdev_state_get_format(state, format->pad, format->stream); + if (!fmt) + return -EINVAL; + + format->format = *fmt; + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_subdev_get_fmt); + +int v4l2_subdev_get_frame_interval(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_frame_interval *fi) +{ + struct v4l2_fract *interval; + + interval = v4l2_subdev_state_get_interval(state, fi->pad, fi->stream); + if (!interval) + return -EINVAL; + + fi->interval = *interval; + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_subdev_get_frame_interval); + +int v4l2_subdev_set_routing(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + const struct v4l2_subdev_krouting *routing) +{ + struct v4l2_subdev_krouting *dst = &state->routing; + const struct v4l2_subdev_krouting *src = routing; + struct v4l2_subdev_krouting new_routing = { 0 }; + size_t bytes; + int r; + + if (unlikely(check_mul_overflow((size_t)src->num_routes, + sizeof(*src->routes), &bytes))) + return -EOVERFLOW; + + lockdep_assert_held(state->lock); + + if (src->num_routes > 0) { + new_routing.routes = kmemdup(src->routes, bytes, GFP_KERNEL); + if (!new_routing.routes) + return -ENOMEM; + } + + new_routing.num_routes = src->num_routes; + + r = v4l2_subdev_init_stream_configs(&state->stream_configs, + &new_routing); + if (r) { + kfree(new_routing.routes); + return r; + } + + kfree(dst->routes); + *dst = new_routing; + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_subdev_set_routing); + +struct v4l2_subdev_route * +__v4l2_subdev_next_active_route(const struct v4l2_subdev_krouting *routing, + struct v4l2_subdev_route *route) +{ + if (route) + ++route; + else + route = &routing->routes[0]; + + for (; route < routing->routes + routing->num_routes; ++route) { + if (!(route->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE)) + continue; + + return route; + } + + return NULL; +} +EXPORT_SYMBOL_GPL(__v4l2_subdev_next_active_route); + +int v4l2_subdev_set_routing_with_fmt(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + const struct v4l2_subdev_krouting *routing, + const struct v4l2_mbus_framefmt *fmt) +{ + struct v4l2_subdev_stream_configs *stream_configs; + unsigned int i; + int ret; + + ret = v4l2_subdev_set_routing(sd, state, routing); + if (ret) + return ret; + + stream_configs = &state->stream_configs; + + for (i = 0; i < stream_configs->num_configs; ++i) + stream_configs->configs[i].fmt = *fmt; + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_subdev_set_routing_with_fmt); + +int v4l2_subdev_routing_find_opposite_end(const struct v4l2_subdev_krouting *routing, + u32 pad, u32 stream, u32 *other_pad, + u32 *other_stream) +{ + unsigned int i; + + for (i = 0; i < routing->num_routes; ++i) { + struct v4l2_subdev_route *route = &routing->routes[i]; + + if (route->source_pad == pad && + route->source_stream == stream) { + if (other_pad) + *other_pad = route->sink_pad; + if (other_stream) + *other_stream = route->sink_stream; + return 0; + } + + if (route->sink_pad == pad && route->sink_stream == stream) { + if (other_pad) + *other_pad = route->source_pad; + if (other_stream) + *other_stream = route->source_stream; + return 0; + } + } + + return -EINVAL; +} +EXPORT_SYMBOL_GPL(v4l2_subdev_routing_find_opposite_end); + +struct v4l2_mbus_framefmt * +v4l2_subdev_state_get_opposite_stream_format(struct v4l2_subdev_state *state, + u32 pad, u32 stream) +{ + u32 other_pad, other_stream; + int ret; + + ret = v4l2_subdev_routing_find_opposite_end(&state->routing, + pad, stream, + &other_pad, &other_stream); + if (ret) + return NULL; + + return v4l2_subdev_state_get_format(state, other_pad, other_stream); +} +EXPORT_SYMBOL_GPL(v4l2_subdev_state_get_opposite_stream_format); + +u64 v4l2_subdev_state_xlate_streams(const struct v4l2_subdev_state *state, + u32 pad0, u32 pad1, u64 *streams) +{ + const struct v4l2_subdev_krouting *routing = &state->routing; + struct v4l2_subdev_route *route; + u64 streams0 = 0; + u64 streams1 = 0; + + for_each_active_route(routing, route) { + if (route->sink_pad == pad0 && route->source_pad == pad1 && + (*streams & BIT_ULL(route->sink_stream))) { + streams0 |= BIT_ULL(route->sink_stream); + streams1 |= BIT_ULL(route->source_stream); + } + if (route->source_pad == pad0 && route->sink_pad == pad1 && + (*streams & BIT_ULL(route->source_stream))) { + streams0 |= BIT_ULL(route->source_stream); + streams1 |= BIT_ULL(route->sink_stream); + } + } + + *streams = streams0; + return streams1; +} +EXPORT_SYMBOL_GPL(v4l2_subdev_state_xlate_streams); + +int v4l2_subdev_routing_validate(struct v4l2_subdev *sd, + const struct v4l2_subdev_krouting *routing, + enum v4l2_subdev_routing_restriction disallow) +{ + u32 *remote_pads = NULL; + unsigned int i, j; + int ret = -EINVAL; + + if (disallow & (V4L2_SUBDEV_ROUTING_NO_STREAM_MIX | + V4L2_SUBDEV_ROUTING_NO_MULTIPLEXING)) { + remote_pads = kcalloc(sd->entity.num_pads, sizeof(*remote_pads), + GFP_KERNEL); + if (!remote_pads) + return -ENOMEM; + + for (i = 0; i < sd->entity.num_pads; ++i) + remote_pads[i] = U32_MAX; + } + + for (i = 0; i < routing->num_routes; ++i) { + const struct v4l2_subdev_route *route = &routing->routes[i]; + + /* Validate the sink and source pad numbers. */ + if (route->sink_pad >= sd->entity.num_pads || + !(sd->entity.pads[route->sink_pad].flags & MEDIA_PAD_FL_SINK)) { + dev_dbg(sd->dev, "route %u sink (%u) is not a sink pad\n", + i, route->sink_pad); + goto out; + } + + if (route->source_pad >= sd->entity.num_pads || + !(sd->entity.pads[route->source_pad].flags & MEDIA_PAD_FL_SOURCE)) { + dev_dbg(sd->dev, "route %u source (%u) is not a source pad\n", + i, route->source_pad); + goto out; + } + + /* + * V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX: all streams from a + * sink pad must be routed to a single source pad. + */ + if (disallow & V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX) { + if (remote_pads[route->sink_pad] != U32_MAX && + remote_pads[route->sink_pad] != route->source_pad) { + dev_dbg(sd->dev, + "route %u attempts to mix %s streams\n", + i, "sink"); + goto out; + } + } + + /* + * V4L2_SUBDEV_ROUTING_NO_SOURCE_STREAM_MIX: all streams on a + * source pad must originate from a single sink pad. + */ + if (disallow & V4L2_SUBDEV_ROUTING_NO_SOURCE_STREAM_MIX) { + if (remote_pads[route->source_pad] != U32_MAX && + remote_pads[route->source_pad] != route->sink_pad) { + dev_dbg(sd->dev, + "route %u attempts to mix %s streams\n", + i, "source"); + goto out; + } + } + + /* + * V4L2_SUBDEV_ROUTING_NO_SINK_MULTIPLEXING: Pads on the sink + * side can not do stream multiplexing, i.e. there can be only + * a single stream in a sink pad. + */ + if (disallow & V4L2_SUBDEV_ROUTING_NO_SINK_MULTIPLEXING) { + if (remote_pads[route->sink_pad] != U32_MAX) { + dev_dbg(sd->dev, + "route %u attempts to multiplex on %s pad %u\n", + i, "sink", route->sink_pad); + goto out; + } + } + + /* + * V4L2_SUBDEV_ROUTING_NO_SOURCE_MULTIPLEXING: Pads on the + * source side can not do stream multiplexing, i.e. there can + * be only a single stream in a source pad. + */ + if (disallow & V4L2_SUBDEV_ROUTING_NO_SOURCE_MULTIPLEXING) { + if (remote_pads[route->source_pad] != U32_MAX) { + dev_dbg(sd->dev, + "route %u attempts to multiplex on %s pad %u\n", + i, "source", route->source_pad); + goto out; + } + } + + if (remote_pads) { + remote_pads[route->sink_pad] = route->source_pad; + remote_pads[route->source_pad] = route->sink_pad; + } + + for (j = i + 1; j < routing->num_routes; ++j) { + const struct v4l2_subdev_route *r = &routing->routes[j]; + + /* + * V4L2_SUBDEV_ROUTING_NO_1_TO_N: No two routes can + * originate from the same (sink) stream. + */ + if ((disallow & V4L2_SUBDEV_ROUTING_NO_1_TO_N) && + route->sink_pad == r->sink_pad && + route->sink_stream == r->sink_stream) { + dev_dbg(sd->dev, + "routes %u and %u originate from same sink (%u/%u)\n", + i, j, route->sink_pad, + route->sink_stream); + goto out; + } + + /* + * V4L2_SUBDEV_ROUTING_NO_N_TO_1: No two routes can end + * at the same (source) stream. + */ + if ((disallow & V4L2_SUBDEV_ROUTING_NO_N_TO_1) && + route->source_pad == r->source_pad && + route->source_stream == r->source_stream) { + dev_dbg(sd->dev, + "routes %u and %u end at same source (%u/%u)\n", + i, j, route->source_pad, + route->source_stream); + goto out; + } + } + } + + ret = 0; + +out: + kfree(remote_pads); + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_subdev_routing_validate); + +static void v4l2_subdev_collect_streams(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + u32 pad, u64 streams_mask, + u64 *found_streams, + u64 *enabled_streams) +{ + if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS)) { + *found_streams = BIT_ULL(0); + *enabled_streams = + (sd->enabled_pads & BIT_ULL(pad)) ? BIT_ULL(0) : 0; + dev_dbg(sd->dev, + "collect_streams: sub-device \"%s\" does not support streams\n", + sd->entity.name); + return; + } + + *found_streams = 0; + *enabled_streams = 0; + + for (unsigned int i = 0; i < state->stream_configs.num_configs; ++i) { + const struct v4l2_subdev_stream_config *cfg = + &state->stream_configs.configs[i]; + + if (cfg->pad != pad || !(streams_mask & BIT_ULL(cfg->stream))) + continue; + + *found_streams |= BIT_ULL(cfg->stream); + if (cfg->enabled) + *enabled_streams |= BIT_ULL(cfg->stream); + } + + dev_dbg(sd->dev, + "collect_streams: \"%s\":%u: found %#llx enabled %#llx\n", + sd->entity.name, pad, *found_streams, *enabled_streams); +} + +static void v4l2_subdev_set_streams_enabled(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + u32 pad, u64 streams_mask, + bool enabled) +{ + if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS)) { + if (enabled) + sd->enabled_pads |= BIT_ULL(pad); + else + sd->enabled_pads &= ~BIT_ULL(pad); + return; + } + + for (unsigned int i = 0; i < state->stream_configs.num_configs; ++i) { + struct v4l2_subdev_stream_config *cfg = + &state->stream_configs.configs[i]; + + if (cfg->pad == pad && (streams_mask & BIT_ULL(cfg->stream))) + cfg->enabled = enabled; + } +} + +int v4l2_subdev_enable_streams(struct v4l2_subdev *sd, u32 pad, + u64 streams_mask) +{ + struct device *dev = sd->entity.graph_obj.mdev->dev; + struct v4l2_subdev_state *state; + bool already_streaming; + u64 enabled_streams; + u64 found_streams; + bool use_s_stream; + int ret; + + dev_dbg(dev, "enable streams \"%s\":%u/%#llx\n", sd->entity.name, pad, + streams_mask); + + /* A few basic sanity checks first. */ + if (pad >= sd->entity.num_pads) + return -EINVAL; + + if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE)) + return -EOPNOTSUPP; + + /* + * We use a 64-bit bitmask for tracking enabled pads, so only subdevices + * with 64 pads or less can be supported. + */ + if (pad >= sizeof(sd->enabled_pads) * BITS_PER_BYTE) + return -EOPNOTSUPP; + + if (!streams_mask) + return 0; + + /* Fallback on .s_stream() if .enable_streams() isn't available. */ + use_s_stream = !v4l2_subdev_has_op(sd, pad, enable_streams); + + if (!use_s_stream) + state = v4l2_subdev_lock_and_get_active_state(sd); + else + state = NULL; + + /* + * Verify that the requested streams exist and that they are not + * already enabled. + */ + + v4l2_subdev_collect_streams(sd, state, pad, streams_mask, + &found_streams, &enabled_streams); + + if (found_streams != streams_mask) { + dev_dbg(dev, "streams 0x%llx not found on %s:%u\n", + streams_mask & ~found_streams, sd->entity.name, pad); + ret = -EINVAL; + goto done; + } + + if (enabled_streams) { + dev_dbg(dev, "streams 0x%llx already enabled on %s:%u\n", + enabled_streams, sd->entity.name, pad); + ret = -EALREADY; + goto done; + } + + already_streaming = v4l2_subdev_is_streaming(sd); + + if (!use_s_stream) { + /* Call the .enable_streams() operation. */ + ret = v4l2_subdev_call(sd, pad, enable_streams, state, pad, + streams_mask); + } else { + /* Start streaming when the first pad is enabled. */ + if (!already_streaming) + ret = v4l2_subdev_call(sd, video, s_stream, 1); + else + ret = 0; + } + + if (ret) { + dev_dbg(dev, "enable streams %u:%#llx failed: %d\n", pad, + streams_mask, ret); + goto done; + } + + /* Mark the streams as enabled. */ + v4l2_subdev_set_streams_enabled(sd, state, pad, streams_mask, true); + + /* + * TODO: When all the drivers have been changed to use + * v4l2_subdev_enable_streams() and v4l2_subdev_disable_streams(), + * instead of calling .s_stream() operation directly, we can remove + * the privacy LED handling from call_s_stream() and do it here + * for all cases. + */ + if (!use_s_stream && !already_streaming) + v4l2_subdev_enable_privacy_led(sd); + +done: + if (!use_s_stream) + v4l2_subdev_unlock_state(state); + + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_subdev_enable_streams); + +int v4l2_subdev_disable_streams(struct v4l2_subdev *sd, u32 pad, + u64 streams_mask) +{ + struct device *dev = sd->entity.graph_obj.mdev->dev; + struct v4l2_subdev_state *state; + u64 enabled_streams; + u64 found_streams; + bool use_s_stream; + int ret; + + dev_dbg(dev, "disable streams \"%s\":%u/%#llx\n", sd->entity.name, pad, + streams_mask); + + /* A few basic sanity checks first. */ + if (pad >= sd->entity.num_pads) + return -EINVAL; + + if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE)) + return -EOPNOTSUPP; + + /* + * We use a 64-bit bitmask for tracking enabled pads, so only subdevices + * with 64 pads or less can be supported. + */ + if (pad >= sizeof(sd->enabled_pads) * BITS_PER_BYTE) + return -EOPNOTSUPP; + + if (!streams_mask) + return 0; + + /* Fallback on .s_stream() if .disable_streams() isn't available. */ + use_s_stream = !v4l2_subdev_has_op(sd, pad, disable_streams); + + if (!use_s_stream) + state = v4l2_subdev_lock_and_get_active_state(sd); + else + state = NULL; + + /* + * Verify that the requested streams exist and that they are not + * already disabled. + */ + + v4l2_subdev_collect_streams(sd, state, pad, streams_mask, + &found_streams, &enabled_streams); + + if (found_streams != streams_mask) { + dev_dbg(dev, "streams 0x%llx not found on %s:%u\n", + streams_mask & ~found_streams, sd->entity.name, pad); + ret = -EINVAL; + goto done; + } + + if (enabled_streams != streams_mask) { + dev_dbg(dev, "streams 0x%llx already disabled on %s:%u\n", + streams_mask & ~enabled_streams, sd->entity.name, pad); + ret = -EALREADY; + goto done; + } + + if (!use_s_stream) { + /* Call the .disable_streams() operation. */ + ret = v4l2_subdev_call(sd, pad, disable_streams, state, pad, + streams_mask); + } else { + /* Stop streaming when the last streams are disabled. */ + + if (!(sd->enabled_pads & ~BIT_ULL(pad))) + ret = v4l2_subdev_call(sd, video, s_stream, 0); + else + ret = 0; + } + + if (ret) { + dev_dbg(dev, "disable streams %u:%#llx failed: %d\n", pad, + streams_mask, ret); + goto done; + } + + v4l2_subdev_set_streams_enabled(sd, state, pad, streams_mask, false); + +done: + if (!use_s_stream) { + if (!v4l2_subdev_is_streaming(sd)) + v4l2_subdev_disable_privacy_led(sd); + + v4l2_subdev_unlock_state(state); + } + + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_subdev_disable_streams); + +int v4l2_subdev_s_stream_helper(struct v4l2_subdev *sd, int enable) +{ + struct v4l2_subdev_state *state; + struct v4l2_subdev_route *route; + struct media_pad *pad; + u64 source_mask = 0; + int pad_index = -1; + + /* + * Find the source pad. This helper is meant for subdevs that have a + * single source pad, so failures shouldn't happen, but catch them + * loudly nonetheless as they indicate a driver bug. + */ + media_entity_for_each_pad(&sd->entity, pad) { + if (pad->flags & MEDIA_PAD_FL_SOURCE) { + pad_index = pad->index; + break; + } + } + + if (WARN_ON(pad_index == -1)) + return -EINVAL; + + if (sd->flags & V4L2_SUBDEV_FL_STREAMS) { + /* + * As there's a single source pad, just collect all the source + * streams. + */ + state = v4l2_subdev_lock_and_get_active_state(sd); + + for_each_active_route(&state->routing, route) + source_mask |= BIT_ULL(route->source_stream); + + v4l2_subdev_unlock_state(state); + } else { + /* + * For non-streams subdevices, there's a single implicit stream + * per pad. + */ + source_mask = BIT_ULL(0); + } + + if (enable) + return v4l2_subdev_enable_streams(sd, pad_index, source_mask); + else + return v4l2_subdev_disable_streams(sd, pad_index, source_mask); +} +EXPORT_SYMBOL_GPL(v4l2_subdev_s_stream_helper); + +#endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */ + +#endif /* CONFIG_MEDIA_CONTROLLER */ + +void v4l2_subdev_init(struct v4l2_subdev *sd, const struct v4l2_subdev_ops *ops) +{ + INIT_LIST_HEAD(&sd->list); + BUG_ON(!ops); + sd->ops = ops; + sd->v4l2_dev = NULL; + sd->flags = 0; + sd->name[0] = '\0'; + sd->grp_id = 0; + sd->dev_priv = NULL; + sd->host_priv = NULL; + sd->privacy_led = NULL; + INIT_LIST_HEAD(&sd->async_subdev_endpoint_list); +#if defined(CONFIG_MEDIA_CONTROLLER) + sd->entity.name = sd->name; + sd->entity.obj_type = MEDIA_ENTITY_TYPE_V4L2_SUBDEV; + sd->entity.function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN; +#endif +} +EXPORT_SYMBOL(v4l2_subdev_init); + +void v4l2_subdev_notify_event(struct v4l2_subdev *sd, + const struct v4l2_event *ev) +{ + v4l2_event_queue(sd->devnode, ev); + v4l2_subdev_notify(sd, V4L2_DEVICE_NOTIFY_EVENT, (void *)ev); +} +EXPORT_SYMBOL_GPL(v4l2_subdev_notify_event); + +bool v4l2_subdev_is_streaming(struct v4l2_subdev *sd) +{ + struct v4l2_subdev_state *state; + + if (!v4l2_subdev_has_op(sd, pad, enable_streams)) + return sd->s_stream_enabled; + + if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS)) + return !!sd->enabled_pads; + + state = v4l2_subdev_get_locked_active_state(sd); + + for (unsigned int i = 0; i < state->stream_configs.num_configs; ++i) { + const struct v4l2_subdev_stream_config *cfg; + + cfg = &state->stream_configs.configs[i]; + + if (cfg->enabled) + return true; + } + + return false; +} +EXPORT_SYMBOL_GPL(v4l2_subdev_is_streaming); + +int v4l2_subdev_get_privacy_led(struct v4l2_subdev *sd) +{ +#if IS_REACHABLE(CONFIG_LEDS_CLASS) + sd->privacy_led = led_get(sd->dev, "privacy"); + if (IS_ERR(sd->privacy_led) && PTR_ERR(sd->privacy_led) != -ENOENT) + return dev_err_probe(sd->dev, PTR_ERR(sd->privacy_led), + "getting privacy LED\n"); + + if (!IS_ERR_OR_NULL(sd->privacy_led)) { + mutex_lock(&sd->privacy_led->led_access); + led_sysfs_disable(sd->privacy_led); + led_trigger_remove(sd->privacy_led); + led_set_brightness(sd->privacy_led, 0); + mutex_unlock(&sd->privacy_led->led_access); + } +#endif + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_subdev_get_privacy_led); + +void v4l2_subdev_put_privacy_led(struct v4l2_subdev *sd) +{ +#if IS_REACHABLE(CONFIG_LEDS_CLASS) + if (!IS_ERR_OR_NULL(sd->privacy_led)) { + mutex_lock(&sd->privacy_led->led_access); + led_sysfs_enable(sd->privacy_led); + mutex_unlock(&sd->privacy_led->led_access); + led_put(sd->privacy_led); + } +#endif +} +EXPORT_SYMBOL_GPL(v4l2_subdev_put_privacy_led); diff --git a/6.18.0/drivers/media/v4l2-core/v4l2-trace.c b/6.18.0/drivers/media/v4l2-core/v4l2-trace.c new file mode 100644 index 0000000..95f3b02 --- /dev/null +++ b/6.18.0/drivers/media/v4l2-core/v4l2-trace.c @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include + +#define CREATE_TRACE_POINTS +#include + +EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_v4l2_buf_done); +EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_v4l2_buf_queue); +EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_v4l2_dqbuf); +EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_v4l2_qbuf); diff --git a/6.18.0/drivers/media/v4l2-core/v4l2-vp9.c b/6.18.0/drivers/media/v4l2-core/v4l2-vp9.c new file mode 100644 index 0000000..859589f --- /dev/null +++ b/6.18.0/drivers/media/v4l2-core/v4l2-vp9.c @@ -0,0 +1,1850 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * V4L2 VP9 helpers. + * + * Copyright (C) 2021 Collabora, Ltd. + * + * Author: Andrzej Pietrasiewicz + */ + +#include + +#include + +const u8 v4l2_vp9_kf_y_mode_prob[10][10][9] = { + { + /* above = dc */ + { 137, 30, 42, 148, 151, 207, 70, 52, 91 }, /*left = dc */ + { 92, 45, 102, 136, 116, 180, 74, 90, 100 }, /*left = v */ + { 73, 32, 19, 187, 222, 215, 46, 34, 100 }, /*left = h */ + { 91, 30, 32, 116, 121, 186, 93, 86, 94 }, /*left = d45 */ + { 72, 35, 36, 149, 68, 206, 68, 63, 105 }, /*left = d135*/ + { 73, 31, 28, 138, 57, 124, 55, 122, 151 }, /*left = d117*/ + { 67, 23, 21, 140, 126, 197, 40, 37, 171 }, /*left = d153*/ + { 86, 27, 28, 128, 154, 212, 45, 43, 53 }, /*left = d207*/ + { 74, 32, 27, 107, 86, 160, 63, 134, 102 }, /*left = d63 */ + { 59, 67, 44, 140, 161, 202, 78, 67, 119 }, /*left = tm */ + }, { /* above = v */ + { 63, 36, 126, 146, 123, 158, 60, 90, 96 }, /*left = dc */ + { 43, 46, 168, 134, 107, 128, 69, 142, 92 }, /*left = v */ + { 44, 29, 68, 159, 201, 177, 50, 57, 77 }, /*left = h */ + { 58, 38, 76, 114, 97, 172, 78, 133, 92 }, /*left = d45 */ + { 46, 41, 76, 140, 63, 184, 69, 112, 57 }, /*left = d135*/ + { 38, 32, 85, 140, 46, 112, 54, 151, 133 }, /*left = d117*/ + { 39, 27, 61, 131, 110, 175, 44, 75, 136 }, /*left = d153*/ + { 52, 30, 74, 113, 130, 175, 51, 64, 58 }, /*left = d207*/ + { 47, 35, 80, 100, 74, 143, 64, 163, 74 }, /*left = d63 */ + { 36, 61, 116, 114, 128, 162, 80, 125, 82 }, /*left = tm */ + }, { /* above = h */ + { 82, 26, 26, 171, 208, 204, 44, 32, 105 }, /*left = dc */ + { 55, 44, 68, 166, 179, 192, 57, 57, 108 }, /*left = v */ + { 42, 26, 11, 199, 241, 228, 23, 15, 85 }, /*left = h */ + { 68, 42, 19, 131, 160, 199, 55, 52, 83 }, /*left = d45 */ + { 58, 50, 25, 139, 115, 232, 39, 52, 118 }, /*left = d135*/ + { 50, 35, 33, 153, 104, 162, 64, 59, 131 }, /*left = d117*/ + { 44, 24, 16, 150, 177, 202, 33, 19, 156 }, /*left = d153*/ + { 55, 27, 12, 153, 203, 218, 26, 27, 49 }, /*left = d207*/ + { 53, 49, 21, 110, 116, 168, 59, 80, 76 }, /*left = d63 */ + { 38, 72, 19, 168, 203, 212, 50, 50, 107 }, /*left = tm */ + }, { /* above = d45 */ + { 103, 26, 36, 129, 132, 201, 83, 80, 93 }, /*left = dc */ + { 59, 38, 83, 112, 103, 162, 98, 136, 90 }, /*left = v */ + { 62, 30, 23, 158, 200, 207, 59, 57, 50 }, /*left = h */ + { 67, 30, 29, 84, 86, 191, 102, 91, 59 }, /*left = d45 */ + { 60, 32, 33, 112, 71, 220, 64, 89, 104 }, /*left = d135*/ + { 53, 26, 34, 130, 56, 149, 84, 120, 103 }, /*left = d117*/ + { 53, 21, 23, 133, 109, 210, 56, 77, 172 }, /*left = d153*/ + { 77, 19, 29, 112, 142, 228, 55, 66, 36 }, /*left = d207*/ + { 61, 29, 29, 93, 97, 165, 83, 175, 162 }, /*left = d63 */ + { 47, 47, 43, 114, 137, 181, 100, 99, 95 }, /*left = tm */ + }, { /* above = d135 */ + { 69, 23, 29, 128, 83, 199, 46, 44, 101 }, /*left = dc */ + { 53, 40, 55, 139, 69, 183, 61, 80, 110 }, /*left = v */ + { 40, 29, 19, 161, 180, 207, 43, 24, 91 }, /*left = h */ + { 60, 34, 19, 105, 61, 198, 53, 64, 89 }, /*left = d45 */ + { 52, 31, 22, 158, 40, 209, 58, 62, 89 }, /*left = d135*/ + { 44, 31, 29, 147, 46, 158, 56, 102, 198 }, /*left = d117*/ + { 35, 19, 12, 135, 87, 209, 41, 45, 167 }, /*left = d153*/ + { 55, 25, 21, 118, 95, 215, 38, 39, 66 }, /*left = d207*/ + { 51, 38, 25, 113, 58, 164, 70, 93, 97 }, /*left = d63 */ + { 47, 54, 34, 146, 108, 203, 72, 103, 151 }, /*left = tm */ + }, { /* above = d117 */ + { 64, 19, 37, 156, 66, 138, 49, 95, 133 }, /*left = dc */ + { 46, 27, 80, 150, 55, 124, 55, 121, 135 }, /*left = v */ + { 36, 23, 27, 165, 149, 166, 54, 64, 118 }, /*left = h */ + { 53, 21, 36, 131, 63, 163, 60, 109, 81 }, /*left = d45 */ + { 40, 26, 35, 154, 40, 185, 51, 97, 123 }, /*left = d135*/ + { 35, 19, 34, 179, 19, 97, 48, 129, 124 }, /*left = d117*/ + { 36, 20, 26, 136, 62, 164, 33, 77, 154 }, /*left = d153*/ + { 45, 18, 32, 130, 90, 157, 40, 79, 91 }, /*left = d207*/ + { 45, 26, 28, 129, 45, 129, 49, 147, 123 }, /*left = d63 */ + { 38, 44, 51, 136, 74, 162, 57, 97, 121 }, /*left = tm */ + }, { /* above = d153 */ + { 75, 17, 22, 136, 138, 185, 32, 34, 166 }, /*left = dc */ + { 56, 39, 58, 133, 117, 173, 48, 53, 187 }, /*left = v */ + { 35, 21, 12, 161, 212, 207, 20, 23, 145 }, /*left = h */ + { 56, 29, 19, 117, 109, 181, 55, 68, 112 }, /*left = d45 */ + { 47, 29, 17, 153, 64, 220, 59, 51, 114 }, /*left = d135*/ + { 46, 16, 24, 136, 76, 147, 41, 64, 172 }, /*left = d117*/ + { 34, 17, 11, 108, 152, 187, 13, 15, 209 }, /*left = d153*/ + { 51, 24, 14, 115, 133, 209, 32, 26, 104 }, /*left = d207*/ + { 55, 30, 18, 122, 79, 179, 44, 88, 116 }, /*left = d63 */ + { 37, 49, 25, 129, 168, 164, 41, 54, 148 }, /*left = tm */ + }, { /* above = d207 */ + { 82, 22, 32, 127, 143, 213, 39, 41, 70 }, /*left = dc */ + { 62, 44, 61, 123, 105, 189, 48, 57, 64 }, /*left = v */ + { 47, 25, 17, 175, 222, 220, 24, 30, 86 }, /*left = h */ + { 68, 36, 17, 106, 102, 206, 59, 74, 74 }, /*left = d45 */ + { 57, 39, 23, 151, 68, 216, 55, 63, 58 }, /*left = d135*/ + { 49, 30, 35, 141, 70, 168, 82, 40, 115 }, /*left = d117*/ + { 51, 25, 15, 136, 129, 202, 38, 35, 139 }, /*left = d153*/ + { 68, 26, 16, 111, 141, 215, 29, 28, 28 }, /*left = d207*/ + { 59, 39, 19, 114, 75, 180, 77, 104, 42 }, /*left = d63 */ + { 40, 61, 26, 126, 152, 206, 61, 59, 93 }, /*left = tm */ + }, { /* above = d63 */ + { 78, 23, 39, 111, 117, 170, 74, 124, 94 }, /*left = dc */ + { 48, 34, 86, 101, 92, 146, 78, 179, 134 }, /*left = v */ + { 47, 22, 24, 138, 187, 178, 68, 69, 59 }, /*left = h */ + { 56, 25, 33, 105, 112, 187, 95, 177, 129 }, /*left = d45 */ + { 48, 31, 27, 114, 63, 183, 82, 116, 56 }, /*left = d135*/ + { 43, 28, 37, 121, 63, 123, 61, 192, 169 }, /*left = d117*/ + { 42, 17, 24, 109, 97, 177, 56, 76, 122 }, /*left = d153*/ + { 58, 18, 28, 105, 139, 182, 70, 92, 63 }, /*left = d207*/ + { 46, 23, 32, 74, 86, 150, 67, 183, 88 }, /*left = d63 */ + { 36, 38, 48, 92, 122, 165, 88, 137, 91 }, /*left = tm */ + }, { /* above = tm */ + { 65, 70, 60, 155, 159, 199, 61, 60, 81 }, /*left = dc */ + { 44, 78, 115, 132, 119, 173, 71, 112, 93 }, /*left = v */ + { 39, 38, 21, 184, 227, 206, 42, 32, 64 }, /*left = h */ + { 58, 47, 36, 124, 137, 193, 80, 82, 78 }, /*left = d45 */ + { 49, 50, 35, 144, 95, 205, 63, 78, 59 }, /*left = d135*/ + { 41, 53, 52, 148, 71, 142, 65, 128, 51 }, /*left = d117*/ + { 40, 36, 28, 143, 143, 202, 40, 55, 137 }, /*left = d153*/ + { 52, 34, 29, 129, 183, 227, 42, 35, 43 }, /*left = d207*/ + { 42, 44, 44, 104, 105, 164, 64, 130, 80 }, /*left = d63 */ + { 43, 81, 53, 140, 169, 204, 68, 84, 72 }, /*left = tm */ + } +}; +EXPORT_SYMBOL_GPL(v4l2_vp9_kf_y_mode_prob); + +const u8 v4l2_vp9_kf_partition_probs[16][3] = { + /* 8x8 -> 4x4 */ + { 158, 97, 94 }, /* a/l both not split */ + { 93, 24, 99 }, /* a split, l not split */ + { 85, 119, 44 }, /* l split, a not split */ + { 62, 59, 67 }, /* a/l both split */ + /* 16x16 -> 8x8 */ + { 149, 53, 53 }, /* a/l both not split */ + { 94, 20, 48 }, /* a split, l not split */ + { 83, 53, 24 }, /* l split, a not split */ + { 52, 18, 18 }, /* a/l both split */ + /* 32x32 -> 16x16 */ + { 150, 40, 39 }, /* a/l both not split */ + { 78, 12, 26 }, /* a split, l not split */ + { 67, 33, 11 }, /* l split, a not split */ + { 24, 7, 5 }, /* a/l both split */ + /* 64x64 -> 32x32 */ + { 174, 35, 49 }, /* a/l both not split */ + { 68, 11, 27 }, /* a split, l not split */ + { 57, 15, 9 }, /* l split, a not split */ + { 12, 3, 3 }, /* a/l both split */ +}; +EXPORT_SYMBOL_GPL(v4l2_vp9_kf_partition_probs); + +const u8 v4l2_vp9_kf_uv_mode_prob[10][9] = { + { 144, 11, 54, 157, 195, 130, 46, 58, 108 }, /* y = dc */ + { 118, 15, 123, 148, 131, 101, 44, 93, 131 }, /* y = v */ + { 113, 12, 23, 188, 226, 142, 26, 32, 125 }, /* y = h */ + { 120, 11, 50, 123, 163, 135, 64, 77, 103 }, /* y = d45 */ + { 113, 9, 36, 155, 111, 157, 32, 44, 161 }, /* y = d135 */ + { 116, 9, 55, 176, 76, 96, 37, 61, 149 }, /* y = d117 */ + { 115, 9, 28, 141, 161, 167, 21, 25, 193 }, /* y = d153 */ + { 120, 12, 32, 145, 195, 142, 32, 38, 86 }, /* y = d207 */ + { 116, 12, 64, 120, 140, 125, 49, 115, 121 }, /* y = d63 */ + { 102, 19, 66, 162, 182, 122, 35, 59, 128 } /* y = tm */ +}; +EXPORT_SYMBOL_GPL(v4l2_vp9_kf_uv_mode_prob); + +const struct v4l2_vp9_frame_context v4l2_vp9_default_probs = { + .tx8 = { + { 100 }, + { 66 }, + }, + .tx16 = { + { 20, 152 }, + { 15, 101 }, + }, + .tx32 = { + { 3, 136, 37 }, + { 5, 52, 13 }, + }, + .coef = { + { /* tx = 4x4 */ + { /* block Type 0 */ + { /* Intra */ + { /* Coeff Band 0 */ + { 195, 29, 183 }, + { 84, 49, 136 }, + { 8, 42, 71 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + }, + { /* Coeff Band 1 */ + { 31, 107, 169 }, + { 35, 99, 159 }, + { 17, 82, 140 }, + { 8, 66, 114 }, + { 2, 44, 76 }, + { 1, 19, 32 }, + }, + { /* Coeff Band 2 */ + { 40, 132, 201 }, + { 29, 114, 187 }, + { 13, 91, 157 }, + { 7, 75, 127 }, + { 3, 58, 95 }, + { 1, 28, 47 }, + }, + { /* Coeff Band 3 */ + { 69, 142, 221 }, + { 42, 122, 201 }, + { 15, 91, 159 }, + { 6, 67, 121 }, + { 1, 42, 77 }, + { 1, 17, 31 }, + }, + { /* Coeff Band 4 */ + { 102, 148, 228 }, + { 67, 117, 204 }, + { 17, 82, 154 }, + { 6, 59, 114 }, + { 2, 39, 75 }, + { 1, 15, 29 }, + }, + { /* Coeff Band 5 */ + { 156, 57, 233 }, + { 119, 57, 212 }, + { 58, 48, 163 }, + { 29, 40, 124 }, + { 12, 30, 81 }, + { 3, 12, 31 } + }, + }, + { /* Inter */ + { /* Coeff Band 0 */ + { 191, 107, 226 }, + { 124, 117, 204 }, + { 25, 99, 155 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + }, + { /* Coeff Band 1 */ + { 29, 148, 210 }, + { 37, 126, 194 }, + { 8, 93, 157 }, + { 2, 68, 118 }, + { 1, 39, 69 }, + { 1, 17, 33 }, + }, + { /* Coeff Band 2 */ + { 41, 151, 213 }, + { 27, 123, 193 }, + { 3, 82, 144 }, + { 1, 58, 105 }, + { 1, 32, 60 }, + { 1, 13, 26 }, + }, + { /* Coeff Band 3 */ + { 59, 159, 220 }, + { 23, 126, 198 }, + { 4, 88, 151 }, + { 1, 66, 114 }, + { 1, 38, 71 }, + { 1, 18, 34 }, + }, + { /* Coeff Band 4 */ + { 114, 136, 232 }, + { 51, 114, 207 }, + { 11, 83, 155 }, + { 3, 56, 105 }, + { 1, 33, 65 }, + { 1, 17, 34 }, + }, + { /* Coeff Band 5 */ + { 149, 65, 234 }, + { 121, 57, 215 }, + { 61, 49, 166 }, + { 28, 36, 114 }, + { 12, 25, 76 }, + { 3, 16, 42 }, + }, + }, + }, + { /* block Type 1 */ + { /* Intra */ + { /* Coeff Band 0 */ + { 214, 49, 220 }, + { 132, 63, 188 }, + { 42, 65, 137 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + }, + { /* Coeff Band 1 */ + { 85, 137, 221 }, + { 104, 131, 216 }, + { 49, 111, 192 }, + { 21, 87, 155 }, + { 2, 49, 87 }, + { 1, 16, 28 }, + }, + { /* Coeff Band 2 */ + { 89, 163, 230 }, + { 90, 137, 220 }, + { 29, 100, 183 }, + { 10, 70, 135 }, + { 2, 42, 81 }, + { 1, 17, 33 }, + }, + { /* Coeff Band 3 */ + { 108, 167, 237 }, + { 55, 133, 222 }, + { 15, 97, 179 }, + { 4, 72, 135 }, + { 1, 45, 85 }, + { 1, 19, 38 }, + }, + { /* Coeff Band 4 */ + { 124, 146, 240 }, + { 66, 124, 224 }, + { 17, 88, 175 }, + { 4, 58, 122 }, + { 1, 36, 75 }, + { 1, 18, 37 }, + }, + { /* Coeff Band 5 */ + { 141, 79, 241 }, + { 126, 70, 227 }, + { 66, 58, 182 }, + { 30, 44, 136 }, + { 12, 34, 96 }, + { 2, 20, 47 }, + }, + }, + { /* Inter */ + { /* Coeff Band 0 */ + { 229, 99, 249 }, + { 143, 111, 235 }, + { 46, 109, 192 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + }, + { /* Coeff Band 1 */ + { 82, 158, 236 }, + { 94, 146, 224 }, + { 25, 117, 191 }, + { 9, 87, 149 }, + { 3, 56, 99 }, + { 1, 33, 57 }, + }, + { /* Coeff Band 2 */ + { 83, 167, 237 }, + { 68, 145, 222 }, + { 10, 103, 177 }, + { 2, 72, 131 }, + { 1, 41, 79 }, + { 1, 20, 39 }, + }, + { /* Coeff Band 3 */ + { 99, 167, 239 }, + { 47, 141, 224 }, + { 10, 104, 178 }, + { 2, 73, 133 }, + { 1, 44, 85 }, + { 1, 22, 47 }, + }, + { /* Coeff Band 4 */ + { 127, 145, 243 }, + { 71, 129, 228 }, + { 17, 93, 177 }, + { 3, 61, 124 }, + { 1, 41, 84 }, + { 1, 21, 52 }, + }, + { /* Coeff Band 5 */ + { 157, 78, 244 }, + { 140, 72, 231 }, + { 69, 58, 184 }, + { 31, 44, 137 }, + { 14, 38, 105 }, + { 8, 23, 61 }, + }, + }, + }, + }, + { /* tx = 8x8 */ + { /* block Type 0 */ + { /* Intra */ + { /* Coeff Band 0 */ + { 125, 34, 187 }, + { 52, 41, 133 }, + { 6, 31, 56 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + }, + { /* Coeff Band 1 */ + { 37, 109, 153 }, + { 51, 102, 147 }, + { 23, 87, 128 }, + { 8, 67, 101 }, + { 1, 41, 63 }, + { 1, 19, 29 }, + }, + { /* Coeff Band 2 */ + { 31, 154, 185 }, + { 17, 127, 175 }, + { 6, 96, 145 }, + { 2, 73, 114 }, + { 1, 51, 82 }, + { 1, 28, 45 }, + }, + { /* Coeff Band 3 */ + { 23, 163, 200 }, + { 10, 131, 185 }, + { 2, 93, 148 }, + { 1, 67, 111 }, + { 1, 41, 69 }, + { 1, 14, 24 }, + }, + { /* Coeff Band 4 */ + { 29, 176, 217 }, + { 12, 145, 201 }, + { 3, 101, 156 }, + { 1, 69, 111 }, + { 1, 39, 63 }, + { 1, 14, 23 }, + }, + { /* Coeff Band 5 */ + { 57, 192, 233 }, + { 25, 154, 215 }, + { 6, 109, 167 }, + { 3, 78, 118 }, + { 1, 48, 69 }, + { 1, 21, 29 }, + }, + }, + { /* Inter */ + { /* Coeff Band 0 */ + { 202, 105, 245 }, + { 108, 106, 216 }, + { 18, 90, 144 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + }, + { /* Coeff Band 1 */ + { 33, 172, 219 }, + { 64, 149, 206 }, + { 14, 117, 177 }, + { 5, 90, 141 }, + { 2, 61, 95 }, + { 1, 37, 57 }, + }, + { /* Coeff Band 2 */ + { 33, 179, 220 }, + { 11, 140, 198 }, + { 1, 89, 148 }, + { 1, 60, 104 }, + { 1, 33, 57 }, + { 1, 12, 21 }, + }, + { /* Coeff Band 3 */ + { 30, 181, 221 }, + { 8, 141, 198 }, + { 1, 87, 145 }, + { 1, 58, 100 }, + { 1, 31, 55 }, + { 1, 12, 20 }, + }, + { /* Coeff Band 4 */ + { 32, 186, 224 }, + { 7, 142, 198 }, + { 1, 86, 143 }, + { 1, 58, 100 }, + { 1, 31, 55 }, + { 1, 12, 22 }, + }, + { /* Coeff Band 5 */ + { 57, 192, 227 }, + { 20, 143, 204 }, + { 3, 96, 154 }, + { 1, 68, 112 }, + { 1, 42, 69 }, + { 1, 19, 32 }, + }, + }, + }, + { /* block Type 1 */ + { /* Intra */ + { /* Coeff Band 0 */ + { 212, 35, 215 }, + { 113, 47, 169 }, + { 29, 48, 105 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + }, + { /* Coeff Band 1 */ + { 74, 129, 203 }, + { 106, 120, 203 }, + { 49, 107, 178 }, + { 19, 84, 144 }, + { 4, 50, 84 }, + { 1, 15, 25 }, + }, + { /* Coeff Band 2 */ + { 71, 172, 217 }, + { 44, 141, 209 }, + { 15, 102, 173 }, + { 6, 76, 133 }, + { 2, 51, 89 }, + { 1, 24, 42 }, + }, + { /* Coeff Band 3 */ + { 64, 185, 231 }, + { 31, 148, 216 }, + { 8, 103, 175 }, + { 3, 74, 131 }, + { 1, 46, 81 }, + { 1, 18, 30 }, + }, + { /* Coeff Band 4 */ + { 65, 196, 235 }, + { 25, 157, 221 }, + { 5, 105, 174 }, + { 1, 67, 120 }, + { 1, 38, 69 }, + { 1, 15, 30 }, + }, + { /* Coeff Band 5 */ + { 65, 204, 238 }, + { 30, 156, 224 }, + { 7, 107, 177 }, + { 2, 70, 124 }, + { 1, 42, 73 }, + { 1, 18, 34 }, + }, + }, + { /* Inter */ + { /* Coeff Band 0 */ + { 225, 86, 251 }, + { 144, 104, 235 }, + { 42, 99, 181 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + }, + { /* Coeff Band 1 */ + { 85, 175, 239 }, + { 112, 165, 229 }, + { 29, 136, 200 }, + { 12, 103, 162 }, + { 6, 77, 123 }, + { 2, 53, 84 }, + }, + { /* Coeff Band 2 */ + { 75, 183, 239 }, + { 30, 155, 221 }, + { 3, 106, 171 }, + { 1, 74, 128 }, + { 1, 44, 76 }, + { 1, 17, 28 }, + }, + { /* Coeff Band 3 */ + { 73, 185, 240 }, + { 27, 159, 222 }, + { 2, 107, 172 }, + { 1, 75, 127 }, + { 1, 42, 73 }, + { 1, 17, 29 }, + }, + { /* Coeff Band 4 */ + { 62, 190, 238 }, + { 21, 159, 222 }, + { 2, 107, 172 }, + { 1, 72, 122 }, + { 1, 40, 71 }, + { 1, 18, 32 }, + }, + { /* Coeff Band 5 */ + { 61, 199, 240 }, + { 27, 161, 226 }, + { 4, 113, 180 }, + { 1, 76, 129 }, + { 1, 46, 80 }, + { 1, 23, 41 }, + }, + }, + }, + }, + { /* tx = 16x16 */ + { /* block Type 0 */ + { /* Intra */ + { /* Coeff Band 0 */ + { 7, 27, 153 }, + { 5, 30, 95 }, + { 1, 16, 30 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + }, + { /* Coeff Band 1 */ + { 50, 75, 127 }, + { 57, 75, 124 }, + { 27, 67, 108 }, + { 10, 54, 86 }, + { 1, 33, 52 }, + { 1, 12, 18 }, + }, + { /* Coeff Band 2 */ + { 43, 125, 151 }, + { 26, 108, 148 }, + { 7, 83, 122 }, + { 2, 59, 89 }, + { 1, 38, 60 }, + { 1, 17, 27 }, + }, + { /* Coeff Band 3 */ + { 23, 144, 163 }, + { 13, 112, 154 }, + { 2, 75, 117 }, + { 1, 50, 81 }, + { 1, 31, 51 }, + { 1, 14, 23 }, + }, + { /* Coeff Band 4 */ + { 18, 162, 185 }, + { 6, 123, 171 }, + { 1, 78, 125 }, + { 1, 51, 86 }, + { 1, 31, 54 }, + { 1, 14, 23 }, + }, + { /* Coeff Band 5 */ + { 15, 199, 227 }, + { 3, 150, 204 }, + { 1, 91, 146 }, + { 1, 55, 95 }, + { 1, 30, 53 }, + { 1, 11, 20 }, + } + }, + { /* Inter */ + { /* Coeff Band 0 */ + { 19, 55, 240 }, + { 19, 59, 196 }, + { 3, 52, 105 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + }, + { /* Coeff Band 1 */ + { 41, 166, 207 }, + { 104, 153, 199 }, + { 31, 123, 181 }, + { 14, 101, 152 }, + { 5, 72, 106 }, + { 1, 36, 52 }, + }, + { /* Coeff Band 2 */ + { 35, 176, 211 }, + { 12, 131, 190 }, + { 2, 88, 144 }, + { 1, 60, 101 }, + { 1, 36, 60 }, + { 1, 16, 28 }, + }, + { /* Coeff Band 3 */ + { 28, 183, 213 }, + { 8, 134, 191 }, + { 1, 86, 142 }, + { 1, 56, 96 }, + { 1, 30, 53 }, + { 1, 12, 20 }, + }, + { /* Coeff Band 4 */ + { 20, 190, 215 }, + { 4, 135, 192 }, + { 1, 84, 139 }, + { 1, 53, 91 }, + { 1, 28, 49 }, + { 1, 11, 20 }, + }, + { /* Coeff Band 5 */ + { 13, 196, 216 }, + { 2, 137, 192 }, + { 1, 86, 143 }, + { 1, 57, 99 }, + { 1, 32, 56 }, + { 1, 13, 24 }, + }, + }, + }, + { /* block Type 1 */ + { /* Intra */ + { /* Coeff Band 0 */ + { 211, 29, 217 }, + { 96, 47, 156 }, + { 22, 43, 87 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + }, + { /* Coeff Band 1 */ + { 78, 120, 193 }, + { 111, 116, 186 }, + { 46, 102, 164 }, + { 15, 80, 128 }, + { 2, 49, 76 }, + { 1, 18, 28 }, + }, + { /* Coeff Band 2 */ + { 71, 161, 203 }, + { 42, 132, 192 }, + { 10, 98, 150 }, + { 3, 69, 109 }, + { 1, 44, 70 }, + { 1, 18, 29 }, + }, + { /* Coeff Band 3 */ + { 57, 186, 211 }, + { 30, 140, 196 }, + { 4, 93, 146 }, + { 1, 62, 102 }, + { 1, 38, 65 }, + { 1, 16, 27 }, + }, + { /* Coeff Band 4 */ + { 47, 199, 217 }, + { 14, 145, 196 }, + { 1, 88, 142 }, + { 1, 57, 98 }, + { 1, 36, 62 }, + { 1, 15, 26 }, + }, + { /* Coeff Band 5 */ + { 26, 219, 229 }, + { 5, 155, 207 }, + { 1, 94, 151 }, + { 1, 60, 104 }, + { 1, 36, 62 }, + { 1, 16, 28 }, + } + }, + { /* Inter */ + { /* Coeff Band 0 */ + { 233, 29, 248 }, + { 146, 47, 220 }, + { 43, 52, 140 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + }, + { /* Coeff Band 1 */ + { 100, 163, 232 }, + { 179, 161, 222 }, + { 63, 142, 204 }, + { 37, 113, 174 }, + { 26, 89, 137 }, + { 18, 68, 97 }, + }, + { /* Coeff Band 2 */ + { 85, 181, 230 }, + { 32, 146, 209 }, + { 7, 100, 164 }, + { 3, 71, 121 }, + { 1, 45, 77 }, + { 1, 18, 30 }, + }, + { /* Coeff Band 3 */ + { 65, 187, 230 }, + { 20, 148, 207 }, + { 2, 97, 159 }, + { 1, 68, 116 }, + { 1, 40, 70 }, + { 1, 14, 29 }, + }, + { /* Coeff Band 4 */ + { 40, 194, 227 }, + { 8, 147, 204 }, + { 1, 94, 155 }, + { 1, 65, 112 }, + { 1, 39, 66 }, + { 1, 14, 26 }, + }, + { /* Coeff Band 5 */ + { 16, 208, 228 }, + { 3, 151, 207 }, + { 1, 98, 160 }, + { 1, 67, 117 }, + { 1, 41, 74 }, + { 1, 17, 31 }, + }, + }, + }, + }, + { /* tx = 32x32 */ + { /* block Type 0 */ + { /* Intra */ + { /* Coeff Band 0 */ + { 17, 38, 140 }, + { 7, 34, 80 }, + { 1, 17, 29 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + }, + { /* Coeff Band 1 */ + { 37, 75, 128 }, + { 41, 76, 128 }, + { 26, 66, 116 }, + { 12, 52, 94 }, + { 2, 32, 55 }, + { 1, 10, 16 }, + }, + { /* Coeff Band 2 */ + { 50, 127, 154 }, + { 37, 109, 152 }, + { 16, 82, 121 }, + { 5, 59, 85 }, + { 1, 35, 54 }, + { 1, 13, 20 }, + }, + { /* Coeff Band 3 */ + { 40, 142, 167 }, + { 17, 110, 157 }, + { 2, 71, 112 }, + { 1, 44, 72 }, + { 1, 27, 45 }, + { 1, 11, 17 }, + }, + { /* Coeff Band 4 */ + { 30, 175, 188 }, + { 9, 124, 169 }, + { 1, 74, 116 }, + { 1, 48, 78 }, + { 1, 30, 49 }, + { 1, 11, 18 }, + }, + { /* Coeff Band 5 */ + { 10, 222, 223 }, + { 2, 150, 194 }, + { 1, 83, 128 }, + { 1, 48, 79 }, + { 1, 27, 45 }, + { 1, 11, 17 }, + }, + }, + { /* Inter */ + { /* Coeff Band 0 */ + { 36, 41, 235 }, + { 29, 36, 193 }, + { 10, 27, 111 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + }, + { /* Coeff Band 1 */ + { 85, 165, 222 }, + { 177, 162, 215 }, + { 110, 135, 195 }, + { 57, 113, 168 }, + { 23, 83, 120 }, + { 10, 49, 61 }, + }, + { /* Coeff Band 2 */ + { 85, 190, 223 }, + { 36, 139, 200 }, + { 5, 90, 146 }, + { 1, 60, 103 }, + { 1, 38, 65 }, + { 1, 18, 30 }, + }, + { /* Coeff Band 3 */ + { 72, 202, 223 }, + { 23, 141, 199 }, + { 2, 86, 140 }, + { 1, 56, 97 }, + { 1, 36, 61 }, + { 1, 16, 27 }, + }, + { /* Coeff Band 4 */ + { 55, 218, 225 }, + { 13, 145, 200 }, + { 1, 86, 141 }, + { 1, 57, 99 }, + { 1, 35, 61 }, + { 1, 13, 22 }, + }, + { /* Coeff Band 5 */ + { 15, 235, 212 }, + { 1, 132, 184 }, + { 1, 84, 139 }, + { 1, 57, 97 }, + { 1, 34, 56 }, + { 1, 14, 23 }, + }, + }, + }, + { /* block Type 1 */ + { /* Intra */ + { /* Coeff Band 0 */ + { 181, 21, 201 }, + { 61, 37, 123 }, + { 10, 38, 71 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + }, + { /* Coeff Band 1 */ + { 47, 106, 172 }, + { 95, 104, 173 }, + { 42, 93, 159 }, + { 18, 77, 131 }, + { 4, 50, 81 }, + { 1, 17, 23 }, + }, + { /* Coeff Band 2 */ + { 62, 147, 199 }, + { 44, 130, 189 }, + { 28, 102, 154 }, + { 18, 75, 115 }, + { 2, 44, 65 }, + { 1, 12, 19 }, + }, + { /* Coeff Band 3 */ + { 55, 153, 210 }, + { 24, 130, 194 }, + { 3, 93, 146 }, + { 1, 61, 97 }, + { 1, 31, 50 }, + { 1, 10, 16 }, + }, + { /* Coeff Band 4 */ + { 49, 186, 223 }, + { 17, 148, 204 }, + { 1, 96, 142 }, + { 1, 53, 83 }, + { 1, 26, 44 }, + { 1, 11, 17 }, + }, + { /* Coeff Band 5 */ + { 13, 217, 212 }, + { 2, 136, 180 }, + { 1, 78, 124 }, + { 1, 50, 83 }, + { 1, 29, 49 }, + { 1, 14, 23 }, + }, + }, + { /* Inter */ + { /* Coeff Band 0 */ + { 197, 13, 247 }, + { 82, 17, 222 }, + { 25, 17, 162 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + { 0, 0, 0 }, + }, + { /* Coeff Band 1 */ + { 126, 186, 247 }, + { 234, 191, 243 }, + { 176, 177, 234 }, + { 104, 158, 220 }, + { 66, 128, 186 }, + { 55, 90, 137 }, + }, + { /* Coeff Band 2 */ + { 111, 197, 242 }, + { 46, 158, 219 }, + { 9, 104, 171 }, + { 2, 65, 125 }, + { 1, 44, 80 }, + { 1, 17, 91 }, + }, + { /* Coeff Band 3 */ + { 104, 208, 245 }, + { 39, 168, 224 }, + { 3, 109, 162 }, + { 1, 79, 124 }, + { 1, 50, 102 }, + { 1, 43, 102 }, + }, + { /* Coeff Band 4 */ + { 84, 220, 246 }, + { 31, 177, 231 }, + { 2, 115, 180 }, + { 1, 79, 134 }, + { 1, 55, 77 }, + { 1, 60, 79 }, + }, + { /* Coeff Band 5 */ + { 43, 243, 240 }, + { 8, 180, 217 }, + { 1, 115, 166 }, + { 1, 84, 121 }, + { 1, 51, 67 }, + { 1, 16, 6 }, + }, + }, + }, + }, + }, + + .skip = { 192, 128, 64 }, + .inter_mode = { + { 2, 173, 34 }, + { 7, 145, 85 }, + { 7, 166, 63 }, + { 7, 94, 66 }, + { 8, 64, 46 }, + { 17, 81, 31 }, + { 25, 29, 30 }, + }, + .interp_filter = { + { 235, 162 }, + { 36, 255 }, + { 34, 3 }, + { 149, 144 }, + }, + .is_inter = { 9, 102, 187, 225 }, + .comp_mode = { 239, 183, 119, 96, 41 }, + .single_ref = { + { 33, 16 }, + { 77, 74 }, + { 142, 142 }, + { 172, 170 }, + { 238, 247 }, + }, + .comp_ref = { 50, 126, 123, 221, 226 }, + .y_mode = { + { 65, 32, 18, 144, 162, 194, 41, 51, 98 }, + { 132, 68, 18, 165, 217, 196, 45, 40, 78 }, + { 173, 80, 19, 176, 240, 193, 64, 35, 46 }, + { 221, 135, 38, 194, 248, 121, 96, 85, 29 }, + }, + .uv_mode = { + { 120, 7, 76, 176, 208, 126, 28, 54, 103 } /* y = dc */, + { 48, 12, 154, 155, 139, 90, 34, 117, 119 } /* y = v */, + { 67, 6, 25, 204, 243, 158, 13, 21, 96 } /* y = h */, + { 97, 5, 44, 131, 176, 139, 48, 68, 97 } /* y = d45 */, + { 83, 5, 42, 156, 111, 152, 26, 49, 152 } /* y = d135 */, + { 80, 5, 58, 178, 74, 83, 33, 62, 145 } /* y = d117 */, + { 86, 5, 32, 154, 192, 168, 14, 22, 163 } /* y = d153 */, + { 85, 5, 32, 156, 216, 148, 19, 29, 73 } /* y = d207 */, + { 77, 7, 64, 116, 132, 122, 37, 126, 120 } /* y = d63 */, + { 101, 21, 107, 181, 192, 103, 19, 67, 125 } /* y = tm */ + }, + .partition = { + /* 8x8 -> 4x4 */ + { 199, 122, 141 } /* a/l both not split */, + { 147, 63, 159 } /* a split, l not split */, + { 148, 133, 118 } /* l split, a not split */, + { 121, 104, 114 } /* a/l both split */, + /* 16x16 -> 8x8 */ + { 174, 73, 87 } /* a/l both not split */, + { 92, 41, 83 } /* a split, l not split */, + { 82, 99, 50 } /* l split, a not split */, + { 53, 39, 39 } /* a/l both split */, + /* 32x32 -> 16x16 */ + { 177, 58, 59 } /* a/l both not split */, + { 68, 26, 63 } /* a split, l not split */, + { 52, 79, 25 } /* l split, a not split */, + { 17, 14, 12 } /* a/l both split */, + /* 64x64 -> 32x32 */ + { 222, 34, 30 } /* a/l both not split */, + { 72, 16, 44 } /* a split, l not split */, + { 58, 32, 12 } /* l split, a not split */, + { 10, 7, 6 } /* a/l both split */, + }, + + .mv = { + .joint = { 32, 64, 96 }, + .sign = { 128, 128 }, + .classes = { + { 224, 144, 192, 168, 192, 176, 192, 198, 198, 245 }, + { 216, 128, 176, 160, 176, 176, 192, 198, 198, 208 }, + }, + .class0_bit = { 216, 208 }, + .bits = { + { 136, 140, 148, 160, 176, 192, 224, 234, 234, 240}, + { 136, 140, 148, 160, 176, 192, 224, 234, 234, 240}, + }, + .class0_fr = { + { + { 128, 128, 64 }, + { 96, 112, 64 }, + }, + { + { 128, 128, 64 }, + { 96, 112, 64 }, + }, + }, + .fr = { + { 64, 96, 64 }, + { 64, 96, 64 }, + }, + .class0_hp = { 160, 160 }, + .hp = { 128, 128 }, + }, +}; +EXPORT_SYMBOL_GPL(v4l2_vp9_default_probs); + +static u32 fastdiv(u32 dividend, u16 divisor) +{ +#define DIV_INV(d) ((u32)(((1ULL << 32) + ((d) - 1)) / (d))) +#define DIVS_INV(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) \ + DIV_INV(d0), DIV_INV(d1), DIV_INV(d2), DIV_INV(d3), \ + DIV_INV(d4), DIV_INV(d5), DIV_INV(d6), DIV_INV(d7), \ + DIV_INV(d8), DIV_INV(d9) + + static const u32 inv[] = { + DIV_INV(2), DIV_INV(3), DIV_INV(4), DIV_INV(5), + DIV_INV(6), DIV_INV(7), DIV_INV(8), DIV_INV(9), + DIVS_INV(10, 11, 12, 13, 14, 15, 16, 17, 18, 19), + DIVS_INV(20, 21, 22, 23, 24, 25, 26, 27, 28, 29), + DIVS_INV(30, 31, 32, 33, 34, 35, 36, 37, 38, 39), + DIVS_INV(40, 41, 42, 43, 44, 45, 46, 47, 48, 49), + DIVS_INV(50, 51, 52, 53, 54, 55, 56, 57, 58, 59), + DIVS_INV(60, 61, 62, 63, 64, 65, 66, 67, 68, 69), + DIVS_INV(70, 71, 72, 73, 74, 75, 76, 77, 78, 79), + DIVS_INV(80, 81, 82, 83, 84, 85, 86, 87, 88, 89), + DIVS_INV(90, 91, 92, 93, 94, 95, 96, 97, 98, 99), + DIVS_INV(100, 101, 102, 103, 104, 105, 106, 107, 108, 109), + DIVS_INV(110, 111, 112, 113, 114, 115, 116, 117, 118, 119), + DIVS_INV(120, 121, 122, 123, 124, 125, 126, 127, 128, 129), + DIVS_INV(130, 131, 132, 133, 134, 135, 136, 137, 138, 139), + DIVS_INV(140, 141, 142, 143, 144, 145, 146, 147, 148, 149), + DIVS_INV(150, 151, 152, 153, 154, 155, 156, 157, 158, 159), + DIVS_INV(160, 161, 162, 163, 164, 165, 166, 167, 168, 169), + DIVS_INV(170, 171, 172, 173, 174, 175, 176, 177, 178, 179), + DIVS_INV(180, 181, 182, 183, 184, 185, 186, 187, 188, 189), + DIVS_INV(190, 191, 192, 193, 194, 195, 196, 197, 198, 199), + DIVS_INV(200, 201, 202, 203, 204, 205, 206, 207, 208, 209), + DIVS_INV(210, 211, 212, 213, 214, 215, 216, 217, 218, 219), + DIVS_INV(220, 221, 222, 223, 224, 225, 226, 227, 228, 229), + DIVS_INV(230, 231, 232, 233, 234, 235, 236, 237, 238, 239), + DIVS_INV(240, 241, 242, 243, 244, 245, 246, 247, 248, 249), + DIV_INV(250), DIV_INV(251), DIV_INV(252), DIV_INV(253), + DIV_INV(254), DIV_INV(255), DIV_INV(256), + }; + + if (divisor == 0) + return 0; + else if (divisor == 1) + return dividend; + + if (WARN_ON(divisor - 2 >= ARRAY_SIZE(inv))) + return dividend; + + return ((u64)dividend * inv[divisor - 2]) >> 32; +} + +/* 6.3.6 inv_recenter_nonneg(v, m) */ +static int inv_recenter_nonneg(int v, int m) +{ + if (v > 2 * m) + return v; + + if (v & 1) + return m - ((v + 1) >> 1); + + return m + (v >> 1); +} + +/* + * part of 6.3.5 inv_remap_prob(deltaProb, prob) + * delta = inv_map_table[deltaProb] done by userspace + */ +static int update_prob(int delta, int prob) +{ + if (!delta) + return prob; + + return prob <= 128 ? + 1 + inv_recenter_nonneg(delta, prob - 1) : + 255 - inv_recenter_nonneg(delta, 255 - prob); +} + +/* Counterpart to 6.3.2 tx_mode_probs() */ +static void update_tx_probs(struct v4l2_vp9_frame_context *probs, + const struct v4l2_ctrl_vp9_compressed_hdr *deltas) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(probs->tx8); i++) { + u8 *p8x8 = probs->tx8[i]; + u8 *p16x16 = probs->tx16[i]; + u8 *p32x32 = probs->tx32[i]; + const u8 *d8x8 = deltas->tx8[i]; + const u8 *d16x16 = deltas->tx16[i]; + const u8 *d32x32 = deltas->tx32[i]; + + p8x8[0] = update_prob(d8x8[0], p8x8[0]); + p16x16[0] = update_prob(d16x16[0], p16x16[0]); + p16x16[1] = update_prob(d16x16[1], p16x16[1]); + p32x32[0] = update_prob(d32x32[0], p32x32[0]); + p32x32[1] = update_prob(d32x32[1], p32x32[1]); + p32x32[2] = update_prob(d32x32[2], p32x32[2]); + } +} + +#define BAND_6(band) ((band) == 0 ? 3 : 6) + +static void update_coeff(const u8 deltas[6][6][3], u8 probs[6][6][3]) +{ + int l, m, n; + + for (l = 0; l < 6; l++) + for (m = 0; m < BAND_6(l); m++) { + u8 *p = probs[l][m]; + const u8 *d = deltas[l][m]; + + for (n = 0; n < 3; n++) + p[n] = update_prob(d[n], p[n]); + } +} + +/* Counterpart to 6.3.7 read_coef_probs() */ +static void update_coef_probs(struct v4l2_vp9_frame_context *probs, + const struct v4l2_ctrl_vp9_compressed_hdr *deltas, + const struct v4l2_ctrl_vp9_frame *dec_params) +{ + int i, j, k; + + for (i = 0; i < ARRAY_SIZE(probs->coef); i++) { + for (j = 0; j < ARRAY_SIZE(probs->coef[0]); j++) + for (k = 0; k < ARRAY_SIZE(probs->coef[0][0]); k++) + update_coeff(deltas->coef[i][j][k], probs->coef[i][j][k]); + + if (deltas->tx_mode == i) + break; + } +} + +/* Counterpart to 6.3.8 read_skip_prob() */ +static void update_skip_probs(struct v4l2_vp9_frame_context *probs, + const struct v4l2_ctrl_vp9_compressed_hdr *deltas) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(probs->skip); i++) + probs->skip[i] = update_prob(deltas->skip[i], probs->skip[i]); +} + +/* Counterpart to 6.3.9 read_inter_mode_probs() */ +static void update_inter_mode_probs(struct v4l2_vp9_frame_context *probs, + const struct v4l2_ctrl_vp9_compressed_hdr *deltas) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(probs->inter_mode); i++) { + u8 *p = probs->inter_mode[i]; + const u8 *d = deltas->inter_mode[i]; + + p[0] = update_prob(d[0], p[0]); + p[1] = update_prob(d[1], p[1]); + p[2] = update_prob(d[2], p[2]); + } +} + +/* Counterpart to 6.3.10 read_interp_filter_probs() */ +static void update_interp_filter_probs(struct v4l2_vp9_frame_context *probs, + const struct v4l2_ctrl_vp9_compressed_hdr *deltas) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(probs->interp_filter); i++) { + u8 *p = probs->interp_filter[i]; + const u8 *d = deltas->interp_filter[i]; + + p[0] = update_prob(d[0], p[0]); + p[1] = update_prob(d[1], p[1]); + } +} + +/* Counterpart to 6.3.11 read_is_inter_probs() */ +static void update_is_inter_probs(struct v4l2_vp9_frame_context *probs, + const struct v4l2_ctrl_vp9_compressed_hdr *deltas) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(probs->is_inter); i++) + probs->is_inter[i] = update_prob(deltas->is_inter[i], probs->is_inter[i]); +} + +/* 6.3.12 frame_reference_mode() done entirely in userspace */ + +/* Counterpart to 6.3.13 frame_reference_mode_probs() */ +static void +update_frame_reference_mode_probs(unsigned int reference_mode, + struct v4l2_vp9_frame_context *probs, + const struct v4l2_ctrl_vp9_compressed_hdr *deltas) +{ + int i; + + if (reference_mode == V4L2_VP9_REFERENCE_MODE_SELECT) + for (i = 0; i < ARRAY_SIZE(probs->comp_mode); i++) + probs->comp_mode[i] = update_prob(deltas->comp_mode[i], + probs->comp_mode[i]); + + if (reference_mode != V4L2_VP9_REFERENCE_MODE_COMPOUND_REFERENCE) + for (i = 0; i < ARRAY_SIZE(probs->single_ref); i++) { + u8 *p = probs->single_ref[i]; + const u8 *d = deltas->single_ref[i]; + + p[0] = update_prob(d[0], p[0]); + p[1] = update_prob(d[1], p[1]); + } + + if (reference_mode != V4L2_VP9_REFERENCE_MODE_SINGLE_REFERENCE) + for (i = 0; i < ARRAY_SIZE(probs->comp_ref); i++) + probs->comp_ref[i] = update_prob(deltas->comp_ref[i], probs->comp_ref[i]); +} + +/* Counterpart to 6.3.14 read_y_mode_probs() */ +static void update_y_mode_probs(struct v4l2_vp9_frame_context *probs, + const struct v4l2_ctrl_vp9_compressed_hdr *deltas) +{ + int i, j; + + for (i = 0; i < ARRAY_SIZE(probs->y_mode); i++) + for (j = 0; j < ARRAY_SIZE(probs->y_mode[0]); ++j) + probs->y_mode[i][j] = + update_prob(deltas->y_mode[i][j], probs->y_mode[i][j]); +} + +/* Counterpart to 6.3.15 read_partition_probs() */ +static void update_partition_probs(struct v4l2_vp9_frame_context *probs, + const struct v4l2_ctrl_vp9_compressed_hdr *deltas) +{ + int i, j; + + for (i = 0; i < 4; i++) + for (j = 0; j < 4; j++) { + u8 *p = probs->partition[i * 4 + j]; + const u8 *d = deltas->partition[i * 4 + j]; + + p[0] = update_prob(d[0], p[0]); + p[1] = update_prob(d[1], p[1]); + p[2] = update_prob(d[2], p[2]); + } +} + +static inline int update_mv_prob(int delta, int prob) +{ + if (!delta) + return prob; + + return delta; +} + +/* Counterpart to 6.3.16 mv_probs() */ +static void update_mv_probs(struct v4l2_vp9_frame_context *probs, + const struct v4l2_ctrl_vp9_compressed_hdr *deltas, + const struct v4l2_ctrl_vp9_frame *dec_params) +{ + u8 *p = probs->mv.joint; + const u8 *d = deltas->mv.joint; + unsigned int i, j; + + p[0] = update_mv_prob(d[0], p[0]); + p[1] = update_mv_prob(d[1], p[1]); + p[2] = update_mv_prob(d[2], p[2]); + + for (i = 0; i < ARRAY_SIZE(probs->mv.sign); i++) { + p = probs->mv.sign; + d = deltas->mv.sign; + p[i] = update_mv_prob(d[i], p[i]); + + p = probs->mv.classes[i]; + d = deltas->mv.classes[i]; + for (j = 0; j < ARRAY_SIZE(probs->mv.classes[0]); j++) + p[j] = update_mv_prob(d[j], p[j]); + + p = probs->mv.class0_bit; + d = deltas->mv.class0_bit; + p[i] = update_mv_prob(d[i], p[i]); + + p = probs->mv.bits[i]; + d = deltas->mv.bits[i]; + for (j = 0; j < ARRAY_SIZE(probs->mv.bits[0]); j++) + p[j] = update_mv_prob(d[j], p[j]); + + for (j = 0; j < ARRAY_SIZE(probs->mv.class0_fr[0]); j++) { + p = probs->mv.class0_fr[i][j]; + d = deltas->mv.class0_fr[i][j]; + + p[0] = update_mv_prob(d[0], p[0]); + p[1] = update_mv_prob(d[1], p[1]); + p[2] = update_mv_prob(d[2], p[2]); + } + + p = probs->mv.fr[i]; + d = deltas->mv.fr[i]; + for (j = 0; j < ARRAY_SIZE(probs->mv.fr[i]); j++) + p[j] = update_mv_prob(d[j], p[j]); + + if (dec_params->flags & V4L2_VP9_FRAME_FLAG_ALLOW_HIGH_PREC_MV) { + p = probs->mv.class0_hp; + d = deltas->mv.class0_hp; + p[i] = update_mv_prob(d[i], p[i]); + + p = probs->mv.hp; + d = deltas->mv.hp; + p[i] = update_mv_prob(d[i], p[i]); + } + } +} + +/* Counterpart to 6.3 compressed_header(), but parsing has been done in userspace. */ +void v4l2_vp9_fw_update_probs(struct v4l2_vp9_frame_context *probs, + const struct v4l2_ctrl_vp9_compressed_hdr *deltas, + const struct v4l2_ctrl_vp9_frame *dec_params) +{ + if (deltas->tx_mode == V4L2_VP9_TX_MODE_SELECT) + update_tx_probs(probs, deltas); + + update_coef_probs(probs, deltas, dec_params); + + update_skip_probs(probs, deltas); + + if (dec_params->flags & V4L2_VP9_FRAME_FLAG_KEY_FRAME || + dec_params->flags & V4L2_VP9_FRAME_FLAG_INTRA_ONLY) + return; + + update_inter_mode_probs(probs, deltas); + + if (dec_params->interpolation_filter == V4L2_VP9_INTERP_FILTER_SWITCHABLE) + update_interp_filter_probs(probs, deltas); + + update_is_inter_probs(probs, deltas); + + update_frame_reference_mode_probs(dec_params->reference_mode, probs, deltas); + + update_y_mode_probs(probs, deltas); + + update_partition_probs(probs, deltas); + + update_mv_probs(probs, deltas, dec_params); +} +EXPORT_SYMBOL_GPL(v4l2_vp9_fw_update_probs); + +u8 v4l2_vp9_reset_frame_ctx(const struct v4l2_ctrl_vp9_frame *dec_params, + struct v4l2_vp9_frame_context *frame_context) +{ + int i; + + u8 fctx_idx = dec_params->frame_context_idx; + + if (dec_params->flags & V4L2_VP9_FRAME_FLAG_KEY_FRAME || + dec_params->flags & V4L2_VP9_FRAME_FLAG_INTRA_ONLY || + dec_params->flags & V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT) { + /* + * setup_past_independence() + * We do nothing here. Instead of storing default probs in some intermediate + * location and then copying from that location to appropriate contexts + * in save_probs() below, we skip that step and save default probs directly + * to appropriate contexts. + */ + if (dec_params->flags & V4L2_VP9_FRAME_FLAG_KEY_FRAME || + dec_params->flags & V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT || + dec_params->reset_frame_context == V4L2_VP9_RESET_FRAME_CTX_ALL) + for (i = 0; i < 4; ++i) + /* save_probs(i) */ + memcpy(&frame_context[i], &v4l2_vp9_default_probs, + sizeof(v4l2_vp9_default_probs)); + else if (dec_params->reset_frame_context == V4L2_VP9_RESET_FRAME_CTX_SPEC) + /* save_probs(fctx_idx) */ + memcpy(&frame_context[fctx_idx], &v4l2_vp9_default_probs, + sizeof(v4l2_vp9_default_probs)); + fctx_idx = 0; + } + + return fctx_idx; +} +EXPORT_SYMBOL_GPL(v4l2_vp9_reset_frame_ctx); + +/* 8.4.1 Merge prob process */ +static u8 merge_prob(u8 pre_prob, u32 ct0, u32 ct1, u16 count_sat, u32 max_update_factor) +{ + u32 den, prob, count, factor; + + den = ct0 + ct1; + if (!den) { + /* + * prob = 128, count = 0, update_factor = 0 + * Round2's argument: pre_prob * 256 + * (pre_prob * 256 + 128) >> 8 == pre_prob + */ + return pre_prob; + } + + prob = clamp(((ct0 << 8) + (den >> 1)) / den, (u32)1, (u32)255); + count = min_t(u32, den, count_sat); + factor = fastdiv(max_update_factor * count, count_sat); + + /* + * Round2(pre_prob * (256 - factor) + prob * factor, 8) + * Round2(pre_prob * 256 + (prob - pre_prob) * factor, 8) + * (pre_prob * 256 >> 8) + (((prob - pre_prob) * factor + 128) >> 8) + */ + return pre_prob + (((prob - pre_prob) * factor + 128) >> 8); +} + +static inline u8 noncoef_merge_prob(u8 pre_prob, u32 ct0, u32 ct1) +{ + return merge_prob(pre_prob, ct0, ct1, 20, 128); +} + +/* 8.4.2 Merge probs process */ +/* + * merge_probs() is a recursive function in the spec. We avoid recursion in the kernel. + * That said, the "tree" parameter of merge_probs() controls how deep the recursion goes. + * It turns out that in all cases the recursive calls boil down to a short-ish series + * of merge_prob() invocations (note no "s"). + * + * Variant A + * --------- + * merge_probs(small_token_tree, 2): + * merge_prob(p[1], c[0], c[1] + c[2]) + * merge_prob(p[2], c[1], c[2]) + * + * Variant B + * --------- + * merge_probs(binary_tree, 0) or + * merge_probs(tx_size_8_tree, 0): + * merge_prob(p[0], c[0], c[1]) + * + * Variant C + * --------- + * merge_probs(inter_mode_tree, 0): + * merge_prob(p[0], c[2], c[1] + c[0] + c[3]) + * merge_prob(p[1], c[0], c[1] + c[3]) + * merge_prob(p[2], c[1], c[3]) + * + * Variant D + * --------- + * merge_probs(intra_mode_tree, 0): + * merge_prob(p[0], c[0], c[1] + ... + c[9]) + * merge_prob(p[1], c[9], c[1] + ... + c[8]) + * merge_prob(p[2], c[1], c[2] + ... + c[8]) + * merge_prob(p[3], c[2] + c[4] + c[5], c[3] + c[8] + c[6] + c[7]) + * merge_prob(p[4], c[2], c[4] + c[5]) + * merge_prob(p[5], c[4], c[5]) + * merge_prob(p[6], c[3], c[8] + c[6] + c[7]) + * merge_prob(p[7], c[8], c[6] + c[7]) + * merge_prob(p[8], c[6], c[7]) + * + * Variant E + * --------- + * merge_probs(partition_tree, 0) or + * merge_probs(tx_size_32_tree, 0) or + * merge_probs(mv_joint_tree, 0) or + * merge_probs(mv_fr_tree, 0): + * merge_prob(p[0], c[0], c[1] + c[2] + c[3]) + * merge_prob(p[1], c[1], c[2] + c[3]) + * merge_prob(p[2], c[2], c[3]) + * + * Variant F + * --------- + * merge_probs(interp_filter_tree, 0) or + * merge_probs(tx_size_16_tree, 0): + * merge_prob(p[0], c[0], c[1] + c[2]) + * merge_prob(p[1], c[1], c[2]) + * + * Variant G + * --------- + * merge_probs(mv_class_tree, 0): + * merge_prob(p[0], c[0], c[1] + ... + c[10]) + * merge_prob(p[1], c[1], c[2] + ... + c[10]) + * merge_prob(p[2], c[2] + c[3], c[4] + ... + c[10]) + * merge_prob(p[3], c[2], c[3]) + * merge_prob(p[4], c[4] + c[5], c[6] + ... + c[10]) + * merge_prob(p[5], c[4], c[5]) + * merge_prob(p[6], c[6], c[7] + ... + c[10]) + * merge_prob(p[7], c[7] + c[8], c[9] + c[10]) + * merge_prob(p[8], c[7], c[8]) + * merge_prob(p[9], c[9], [10]) + */ + +static inline void merge_probs_variant_a(u8 *p, const u32 *c, u16 count_sat, u32 update_factor) +{ + p[1] = merge_prob(p[1], c[0], c[1] + c[2], count_sat, update_factor); + p[2] = merge_prob(p[2], c[1], c[2], count_sat, update_factor); +} + +static inline void merge_probs_variant_b(u8 *p, const u32 *c, u16 count_sat, u32 update_factor) +{ + p[0] = merge_prob(p[0], c[0], c[1], count_sat, update_factor); +} + +static inline void merge_probs_variant_c(u8 *p, const u32 *c) +{ + p[0] = noncoef_merge_prob(p[0], c[2], c[1] + c[0] + c[3]); + p[1] = noncoef_merge_prob(p[1], c[0], c[1] + c[3]); + p[2] = noncoef_merge_prob(p[2], c[1], c[3]); +} + +static void merge_probs_variant_d(u8 *p, const u32 *c) +{ + u32 sum = 0, s2; + + sum = c[1] + c[2] + c[3] + c[4] + c[5] + c[6] + c[7] + c[8] + c[9]; + + p[0] = noncoef_merge_prob(p[0], c[0], sum); + sum -= c[9]; + p[1] = noncoef_merge_prob(p[1], c[9], sum); + sum -= c[1]; + p[2] = noncoef_merge_prob(p[2], c[1], sum); + s2 = c[2] + c[4] + c[5]; + sum -= s2; + p[3] = noncoef_merge_prob(p[3], s2, sum); + s2 -= c[2]; + p[4] = noncoef_merge_prob(p[4], c[2], s2); + p[5] = noncoef_merge_prob(p[5], c[4], c[5]); + sum -= c[3]; + p[6] = noncoef_merge_prob(p[6], c[3], sum); + sum -= c[8]; + p[7] = noncoef_merge_prob(p[7], c[8], sum); + p[8] = noncoef_merge_prob(p[8], c[6], c[7]); +} + +static inline void merge_probs_variant_e(u8 *p, const u32 *c) +{ + p[0] = noncoef_merge_prob(p[0], c[0], c[1] + c[2] + c[3]); + p[1] = noncoef_merge_prob(p[1], c[1], c[2] + c[3]); + p[2] = noncoef_merge_prob(p[2], c[2], c[3]); +} + +static inline void merge_probs_variant_f(u8 *p, const u32 *c) +{ + p[0] = noncoef_merge_prob(p[0], c[0], c[1] + c[2]); + p[1] = noncoef_merge_prob(p[1], c[1], c[2]); +} + +static void merge_probs_variant_g(u8 *p, const u32 *c) +{ + u32 sum; + + sum = c[1] + c[2] + c[3] + c[4] + c[5] + c[6] + c[7] + c[8] + c[9] + c[10]; + p[0] = noncoef_merge_prob(p[0], c[0], sum); + sum -= c[1]; + p[1] = noncoef_merge_prob(p[1], c[1], sum); + sum -= c[2] + c[3]; + p[2] = noncoef_merge_prob(p[2], c[2] + c[3], sum); + p[3] = noncoef_merge_prob(p[3], c[2], c[3]); + sum -= c[4] + c[5]; + p[4] = noncoef_merge_prob(p[4], c[4] + c[5], sum); + p[5] = noncoef_merge_prob(p[5], c[4], c[5]); + sum -= c[6]; + p[6] = noncoef_merge_prob(p[6], c[6], sum); + p[7] = noncoef_merge_prob(p[7], c[7] + c[8], c[9] + c[10]); + p[8] = noncoef_merge_prob(p[8], c[7], c[8]); + p[9] = noncoef_merge_prob(p[9], c[9], c[10]); +} + +/* 8.4.3 Coefficient probability adaptation process */ +static inline void adapt_probs_variant_a_coef(u8 *p, const u32 *c, u32 update_factor) +{ + merge_probs_variant_a(p, c, 24, update_factor); +} + +static inline void adapt_probs_variant_b_coef(u8 *p, const u32 *c, u32 update_factor) +{ + merge_probs_variant_b(p, c, 24, update_factor); +} + +static void _adapt_coeff(unsigned int i, unsigned int j, unsigned int k, + struct v4l2_vp9_frame_context *probs, + const struct v4l2_vp9_frame_symbol_counts *counts, + u32 uf) +{ + s32 l, m; + + for (l = 0; l < ARRAY_SIZE(probs->coef[0][0][0]); l++) { + for (m = 0; m < BAND_6(l); m++) { + u8 *p = probs->coef[i][j][k][l][m]; + const u32 counts_more_coefs[2] = { + *counts->eob[i][j][k][l][m][1], + *counts->eob[i][j][k][l][m][0] - *counts->eob[i][j][k][l][m][1], + }; + + adapt_probs_variant_a_coef(p, *counts->coeff[i][j][k][l][m], uf); + adapt_probs_variant_b_coef(p, counts_more_coefs, uf); + } + } +} + +static void _adapt_coef_probs(struct v4l2_vp9_frame_context *probs, + const struct v4l2_vp9_frame_symbol_counts *counts, + unsigned int uf) +{ + unsigned int i, j, k; + + for (i = 0; i < ARRAY_SIZE(probs->coef); i++) + for (j = 0; j < ARRAY_SIZE(probs->coef[0]); j++) + for (k = 0; k < ARRAY_SIZE(probs->coef[0][0]); k++) + _adapt_coeff(i, j, k, probs, counts, uf); +} + +void v4l2_vp9_adapt_coef_probs(struct v4l2_vp9_frame_context *probs, + struct v4l2_vp9_frame_symbol_counts *counts, + bool use_128, + bool frame_is_intra) +{ + if (frame_is_intra) { + _adapt_coef_probs(probs, counts, 112); + } else { + if (use_128) + _adapt_coef_probs(probs, counts, 128); + else + _adapt_coef_probs(probs, counts, 112); + } +} +EXPORT_SYMBOL_GPL(v4l2_vp9_adapt_coef_probs); + +/* 8.4.4 Non coefficient probability adaptation process, adapt_probs() */ +static inline void adapt_probs_variant_b(u8 *p, const u32 *c) +{ + merge_probs_variant_b(p, c, 20, 128); +} + +static inline void adapt_probs_variant_c(u8 *p, const u32 *c) +{ + merge_probs_variant_c(p, c); +} + +static inline void adapt_probs_variant_d(u8 *p, const u32 *c) +{ + merge_probs_variant_d(p, c); +} + +static inline void adapt_probs_variant_e(u8 *p, const u32 *c) +{ + merge_probs_variant_e(p, c); +} + +static inline void adapt_probs_variant_f(u8 *p, const u32 *c) +{ + merge_probs_variant_f(p, c); +} + +static inline void adapt_probs_variant_g(u8 *p, const u32 *c) +{ + merge_probs_variant_g(p, c); +} + +/* 8.4.4 Non coefficient probability adaptation process, adapt_prob() */ +static inline u8 adapt_prob(u8 prob, const u32 counts[2]) +{ + return noncoef_merge_prob(prob, counts[0], counts[1]); +} + +/* 8.4.4 Non coefficient probability adaptation process */ +void v4l2_vp9_adapt_noncoef_probs(struct v4l2_vp9_frame_context *probs, + struct v4l2_vp9_frame_symbol_counts *counts, + u8 reference_mode, u8 interpolation_filter, u8 tx_mode, + u32 flags) +{ + unsigned int i, j; + + for (i = 0; i < ARRAY_SIZE(probs->is_inter); i++) + probs->is_inter[i] = adapt_prob(probs->is_inter[i], (*counts->intra_inter)[i]); + + for (i = 0; i < ARRAY_SIZE(probs->comp_mode); i++) + probs->comp_mode[i] = adapt_prob(probs->comp_mode[i], (*counts->comp)[i]); + + for (i = 0; i < ARRAY_SIZE(probs->comp_ref); i++) + probs->comp_ref[i] = adapt_prob(probs->comp_ref[i], (*counts->comp_ref)[i]); + + if (reference_mode != V4L2_VP9_REFERENCE_MODE_COMPOUND_REFERENCE) + for (i = 0; i < ARRAY_SIZE(probs->single_ref); i++) + for (j = 0; j < ARRAY_SIZE(probs->single_ref[0]); j++) + probs->single_ref[i][j] = adapt_prob(probs->single_ref[i][j], + (*counts->single_ref)[i][j]); + + for (i = 0; i < ARRAY_SIZE(probs->inter_mode); i++) + adapt_probs_variant_c(probs->inter_mode[i], (*counts->mv_mode)[i]); + + for (i = 0; i < ARRAY_SIZE(probs->y_mode); i++) + adapt_probs_variant_d(probs->y_mode[i], (*counts->y_mode)[i]); + + for (i = 0; i < ARRAY_SIZE(probs->uv_mode); i++) + adapt_probs_variant_d(probs->uv_mode[i], (*counts->uv_mode)[i]); + + for (i = 0; i < ARRAY_SIZE(probs->partition); i++) + adapt_probs_variant_e(probs->partition[i], (*counts->partition)[i]); + + for (i = 0; i < ARRAY_SIZE(probs->skip); i++) + probs->skip[i] = adapt_prob(probs->skip[i], (*counts->skip)[i]); + + if (interpolation_filter == V4L2_VP9_INTERP_FILTER_SWITCHABLE) + for (i = 0; i < ARRAY_SIZE(probs->interp_filter); i++) + adapt_probs_variant_f(probs->interp_filter[i], (*counts->filter)[i]); + + if (tx_mode == V4L2_VP9_TX_MODE_SELECT) + for (i = 0; i < ARRAY_SIZE(probs->tx8); i++) { + adapt_probs_variant_b(probs->tx8[i], (*counts->tx8p)[i]); + adapt_probs_variant_f(probs->tx16[i], (*counts->tx16p)[i]); + adapt_probs_variant_e(probs->tx32[i], (*counts->tx32p)[i]); + } + + adapt_probs_variant_e(probs->mv.joint, *counts->mv_joint); + + for (i = 0; i < ARRAY_SIZE(probs->mv.sign); i++) { + probs->mv.sign[i] = adapt_prob(probs->mv.sign[i], (*counts->sign)[i]); + + adapt_probs_variant_g(probs->mv.classes[i], (*counts->classes)[i]); + + probs->mv.class0_bit[i] = adapt_prob(probs->mv.class0_bit[i], (*counts->class0)[i]); + + for (j = 0; j < ARRAY_SIZE(probs->mv.bits[0]); j++) + probs->mv.bits[i][j] = adapt_prob(probs->mv.bits[i][j], + (*counts->bits)[i][j]); + + for (j = 0; j < ARRAY_SIZE(probs->mv.class0_fr[0]); j++) + adapt_probs_variant_e(probs->mv.class0_fr[i][j], + (*counts->class0_fp)[i][j]); + + adapt_probs_variant_e(probs->mv.fr[i], (*counts->fp)[i]); + + if (!(flags & V4L2_VP9_FRAME_FLAG_ALLOW_HIGH_PREC_MV)) + continue; + + probs->mv.class0_hp[i] = adapt_prob(probs->mv.class0_hp[i], + (*counts->class0_hp)[i]); + + probs->mv.hp[i] = adapt_prob(probs->mv.hp[i], (*counts->hp)[i]); + } +} +EXPORT_SYMBOL_GPL(v4l2_vp9_adapt_noncoef_probs); + +bool +v4l2_vp9_seg_feat_enabled(const u8 *feature_enabled, + unsigned int feature, + unsigned int segid) +{ + u8 mask = V4L2_VP9_SEGMENT_FEATURE_ENABLED(feature); + + return !!(feature_enabled[segid] & mask); +} +EXPORT_SYMBOL_GPL(v4l2_vp9_seg_feat_enabled); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("V4L2 VP9 Helpers"); +MODULE_AUTHOR("Andrzej Pietrasiewicz "); diff --git a/6.18.0/include-overrides/media/cadence/cdns-csi2rx.h b/6.18.0/include-overrides/media/cadence/cdns-csi2rx.h new file mode 100644 index 0000000..782d03f --- /dev/null +++ b/6.18.0/include-overrides/media/cadence/cdns-csi2rx.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +#ifndef _CDNS_CSI2RX_H +#define _CDNS_CSI2RX_H + +#include + +/** + * cdns_csi2rx_negotiate_ppc - Negotiate pixel-per-clock on output interface + * + * @subdev: point to &struct v4l2_subdev + * @pad: pad number of the source pad + * @ppc: pointer to requested pixel-per-clock value + * + * Returns 0 on success, negative error code otherwise. + */ +int cdns_csi2rx_negotiate_ppc(struct v4l2_subdev *subdev, unsigned int pad, + u8 *ppc); + +#endif diff --git a/6.18.0/include-overrides/media/cec-notifier.h b/6.18.0/include-overrides/media/cec-notifier.h new file mode 100644 index 0000000..b1c8397 --- /dev/null +++ b/6.18.0/include-overrides/media/cec-notifier.h @@ -0,0 +1,166 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * cec-notifier.h - notify CEC drivers of physical address changes + * + * Copyright 2016 Russell King. + * Copyright 2016-2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + */ + +#ifndef LINUX_CEC_NOTIFIER_H +#define LINUX_CEC_NOTIFIER_H + +#include +#include + +struct device; +struct edid; +struct cec_adapter; +struct cec_notifier; + +#if IS_REACHABLE(CONFIG_CEC_CORE) && IS_ENABLED(CONFIG_CEC_NOTIFIER) + +/** + * cec_notifier_conn_register - find or create a new cec_notifier for the given + * HDMI device and connector tuple. + * @hdmi_dev: HDMI device that sends the events. + * @port_name: the connector name from which the event occurs. May be NULL + * if there is always only one HDMI connector created by the HDMI device. + * @conn_info: the connector info from which the event occurs (may be NULL) + * + * If a notifier for device @dev and connector @port_name already exists, then + * increase the refcount and return that notifier. + * + * If it doesn't exist, then allocate a new notifier struct and return a + * pointer to that new struct. + * + * Return NULL if the memory could not be allocated. + */ +struct cec_notifier * +cec_notifier_conn_register(struct device *hdmi_dev, const char *port_name, + const struct cec_connector_info *conn_info); + +/** + * cec_notifier_conn_unregister - decrease refcount and delete when the + * refcount reaches 0. + * @n: notifier. If NULL, then this function does nothing. + */ +void cec_notifier_conn_unregister(struct cec_notifier *n); + +/** + * cec_notifier_cec_adap_register - find or create a new cec_notifier for the + * given device. + * @hdmi_dev: HDMI device that sends the events. + * @port_name: the connector name from which the event occurs. May be NULL + * if there is always only one HDMI connector created by the HDMI device. + * @adap: the cec adapter that registered this notifier. + * + * If a notifier for device @dev and connector @port_name already exists, then + * increase the refcount and return that notifier. + * + * If it doesn't exist, then allocate a new notifier struct and return a + * pointer to that new struct. + * + * Return NULL if the memory could not be allocated. + */ +struct cec_notifier * +cec_notifier_cec_adap_register(struct device *hdmi_dev, const char *port_name, + struct cec_adapter *adap); + +/** + * cec_notifier_cec_adap_unregister - decrease refcount and delete when the + * refcount reaches 0. + * @n: notifier. If NULL, then this function does nothing. + * @adap: the cec adapter that registered this notifier. + */ +void cec_notifier_cec_adap_unregister(struct cec_notifier *n, + struct cec_adapter *adap); + +/** + * cec_notifier_set_phys_addr - set a new physical address. + * @n: the CEC notifier + * @pa: the CEC physical address + * + * Set a new CEC physical address. + * Does nothing if @n == NULL. + */ +void cec_notifier_set_phys_addr(struct cec_notifier *n, u16 pa); + +/** + * cec_notifier_set_phys_addr_from_edid - set parse the PA from the EDID. + * @n: the CEC notifier + * @edid: the struct edid pointer + * + * Parses the EDID to obtain the new CEC physical address and set it. + * Does nothing if @n == NULL. + */ +void cec_notifier_set_phys_addr_from_edid(struct cec_notifier *n, + const struct edid *edid); + +/** + * cec_notifier_parse_hdmi_phandle - find the hdmi device from "hdmi-phandle" + * @dev: the device with the "hdmi-phandle" device tree property + * + * Returns the device pointer referenced by the "hdmi-phandle" property. + * Note that the refcount of the returned device is not incremented. + * This device pointer is only used as a key value in the notifier + * list, but it is never accessed by the CEC driver. + */ +struct device *cec_notifier_parse_hdmi_phandle(struct device *dev); + +#else + +static inline struct cec_notifier * +cec_notifier_conn_register(struct device *hdmi_dev, const char *port_name, + const struct cec_connector_info *conn_info) +{ + /* A non-NULL pointer is expected on success */ + return (struct cec_notifier *)0xdeadfeed; +} + +static inline void cec_notifier_conn_unregister(struct cec_notifier *n) +{ +} + +static inline struct cec_notifier * +cec_notifier_cec_adap_register(struct device *hdmi_dev, const char *port_name, + struct cec_adapter *adap) +{ + /* A non-NULL pointer is expected on success */ + return (struct cec_notifier *)0xdeadfeed; +} + +static inline void cec_notifier_cec_adap_unregister(struct cec_notifier *n, + struct cec_adapter *adap) +{ +} + +static inline void cec_notifier_set_phys_addr(struct cec_notifier *n, u16 pa) +{ +} + +static inline void cec_notifier_set_phys_addr_from_edid(struct cec_notifier *n, + const struct edid *edid) +{ +} + +static inline struct device *cec_notifier_parse_hdmi_phandle(struct device *dev) +{ + return ERR_PTR(-ENODEV); +} + +#endif + +/** + * cec_notifier_phys_addr_invalidate() - set the physical address to INVALID + * + * @n: the CEC notifier + * + * This is a simple helper function to invalidate the physical + * address. Does nothing if @n == NULL. + */ +static inline void cec_notifier_phys_addr_invalidate(struct cec_notifier *n) +{ + cec_notifier_set_phys_addr(n, CEC_PHYS_ADDR_INVALID); +} + +#endif diff --git a/6.18.0/include-overrides/media/cec-pin.h b/6.18.0/include-overrides/media/cec-pin.h new file mode 100644 index 0000000..483bc47 --- /dev/null +++ b/6.18.0/include-overrides/media/cec-pin.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * cec-pin.h - low-level CEC pin control + * + * Copyright 2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + */ + +#ifndef LINUX_CEC_PIN_H +#define LINUX_CEC_PIN_H + +#include +#include + +/** + * struct cec_pin_ops - low-level CEC pin operations + * @read: read the CEC pin. Returns > 0 if high, 0 if low, or an error + * if negative. + * @low: drive the CEC pin low. + * @high: stop driving the CEC pin. The pull-up will drive the pin + * high, unless someone else is driving the pin low. + * @enable_irq: optional, enable the interrupt to detect pin voltage changes. + * @disable_irq: optional, disable the interrupt. + * @free: optional. Free any allocated resources. Called when the + * adapter is deleted. + * @status: optional, log status information. + * @read_hpd: optional. Read the HPD pin. Returns > 0 if high, 0 if low or + * an error if negative. + * @read_5v: optional. Read the 5V pin. Returns > 0 if high, 0 if low or + * an error if negative. + * @received: optional. High-level CEC message callback. Allows the driver + * to process CEC messages. + * + * These operations (except for the @received op) are used by the + * cec pin framework to manipulate the CEC pin. + */ +struct cec_pin_ops { + int (*read)(struct cec_adapter *adap); + void (*low)(struct cec_adapter *adap); + void (*high)(struct cec_adapter *adap); + bool (*enable_irq)(struct cec_adapter *adap); + void (*disable_irq)(struct cec_adapter *adap); + void (*free)(struct cec_adapter *adap); + void (*status)(struct cec_adapter *adap, struct seq_file *file); + int (*read_hpd)(struct cec_adapter *adap); + int (*read_5v)(struct cec_adapter *adap); + + /* High-level CEC message callback */ + int (*received)(struct cec_adapter *adap, struct cec_msg *msg); +}; + +/** + * cec_pin_changed() - update pin state from interrupt + * + * @adap: pointer to the cec adapter + * @value: when true the pin is high, otherwise it is low + * + * If changes of the CEC voltage are detected via an interrupt, then + * cec_pin_changed is called from the interrupt with the new value. + */ +void cec_pin_changed(struct cec_adapter *adap, bool value); + +/** + * cec_pin_allocate_adapter() - allocate a pin-based cec adapter + * + * @pin_ops: low-level pin operations + * @priv: will be stored in adap->priv and can be used by the adapter ops. + * Use cec_get_drvdata(adap) to get the priv pointer. + * @name: the name of the CEC adapter. Note: this name will be copied. + * @caps: capabilities of the CEC adapter. This will be ORed with + * CEC_CAP_MONITOR_ALL and CEC_CAP_MONITOR_PIN. + * + * Allocate a cec adapter using the cec pin framework. + * + * Return: a pointer to the cec adapter or an error pointer + */ +struct cec_adapter *cec_pin_allocate_adapter(const struct cec_pin_ops *pin_ops, + void *priv, const char *name, u32 caps); + +#endif diff --git a/6.18.0/include-overrides/media/cec.h b/6.18.0/include-overrides/media/cec.h new file mode 100644 index 0000000..0c8e861 --- /dev/null +++ b/6.18.0/include-overrides/media/cec.h @@ -0,0 +1,597 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * cec - HDMI Consumer Electronics Control support header + * + * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + */ + +#ifndef _MEDIA_CEC_H +#define _MEDIA_CEC_H + +#include +#include +#include +#include +#include +#include +#include +#include + +#define CEC_CAP_DEFAULTS (CEC_CAP_LOG_ADDRS | CEC_CAP_TRANSMIT | \ + CEC_CAP_PASSTHROUGH | CEC_CAP_RC) + +/** + * struct cec_devnode - cec device node + * @dev: cec device + * @cdev: cec character device + * @minor: device node minor number + * @lock: lock to serialize open/release and registration + * @registered: the device was correctly registered + * @unregistered: the device was unregistered + * @lock_fhs: lock to control access to @fhs + * @fhs: the list of open filehandles (cec_fh) + * + * This structure represents a cec-related device node. + * + * To add or remove filehandles from @fhs the @lock must be taken first, + * followed by @lock_fhs. It is safe to access @fhs if either lock is held. + * + * The @parent is a physical device. It must be set by core or device drivers + * before registering the node. + */ +struct cec_devnode { + /* sysfs */ + struct device dev; + struct cdev cdev; + + /* device info */ + int minor; + /* serialize open/release and registration */ + struct mutex lock; + bool registered; + bool unregistered; + /* protect access to fhs */ + struct mutex lock_fhs; + struct list_head fhs; +}; + +struct cec_adapter; +struct cec_data; +struct cec_pin; +struct cec_notifier; + +struct cec_data { + struct list_head list; + struct list_head xfer_list; + struct cec_adapter *adap; + struct cec_msg msg; + u8 match_len; + u8 match_reply[5]; + struct cec_fh *fh; + struct delayed_work work; + struct completion c; + u8 attempts; + bool blocking; + bool completed; +}; + +struct cec_msg_entry { + struct list_head list; + struct cec_msg msg; +}; + +struct cec_event_entry { + struct list_head list; + struct cec_event ev; +}; + +#define CEC_NUM_CORE_EVENTS 2 +#define CEC_NUM_EVENTS CEC_EVENT_PIN_5V_HIGH + +struct cec_fh { + struct list_head list; + struct list_head xfer_list; + struct cec_adapter *adap; + u8 mode_initiator; + u8 mode_follower; + + /* Events */ + wait_queue_head_t wait; + struct mutex lock; + struct list_head events[CEC_NUM_EVENTS]; /* queued events */ + u16 queued_events[CEC_NUM_EVENTS]; + unsigned int total_queued_events; + struct cec_event_entry core_events[CEC_NUM_CORE_EVENTS]; + struct list_head msgs; /* queued messages */ + unsigned int queued_msgs; +}; + +#define CEC_SIGNAL_FREE_TIME_RETRY 3 +#define CEC_SIGNAL_FREE_TIME_NEW_INITIATOR 5 +#define CEC_SIGNAL_FREE_TIME_NEXT_XFER 7 + +/* The nominal data bit period is 2.4 ms */ +#define CEC_FREE_TIME_TO_USEC(ft) ((ft) * 2400) + +struct cec_adap_ops { + /* Low-level callbacks, called with adap->lock held */ + int (*adap_enable)(struct cec_adapter *adap, bool enable); + int (*adap_monitor_all_enable)(struct cec_adapter *adap, bool enable); + int (*adap_monitor_pin_enable)(struct cec_adapter *adap, bool enable); + int (*adap_log_addr)(struct cec_adapter *adap, u8 logical_addr); + void (*adap_unconfigured)(struct cec_adapter *adap); + int (*adap_transmit)(struct cec_adapter *adap, u8 attempts, + u32 signal_free_time, struct cec_msg *msg); + void (*adap_nb_transmit_canceled)(struct cec_adapter *adap, + const struct cec_msg *msg); + void (*adap_status)(struct cec_adapter *adap, struct seq_file *file); + void (*adap_free)(struct cec_adapter *adap); + + /* Error injection callbacks, called without adap->lock held */ + int (*error_inj_show)(struct cec_adapter *adap, struct seq_file *sf); + bool (*error_inj_parse_line)(struct cec_adapter *adap, char *line); + + /* High-level CEC message callback, called without adap->lock held */ + void (*configured)(struct cec_adapter *adap); + int (*received)(struct cec_adapter *adap, struct cec_msg *msg); +}; + +/* + * The minimum message length you can receive (excepting poll messages) is 2. + * With a transfer rate of at most 36 bytes per second this makes 18 messages + * per second worst case. + * + * We queue at most 3 seconds worth of received messages. The CEC specification + * requires that messages are replied to within a second, so 3 seconds should + * give more than enough margin. Since most messages are actually more than 2 + * bytes, this is in practice a lot more than 3 seconds. + */ +#define CEC_MAX_MSG_RX_QUEUE_SZ (18 * 3) + +/* + * The transmit queue is limited to 1 second worth of messages (worst case). + * Messages can be transmitted by userspace and kernel space. But for both it + * makes no sense to have a lot of messages queued up. One second seems + * reasonable. + */ +#define CEC_MAX_MSG_TX_QUEUE_SZ (18 * 1) + +/** + * struct cec_adapter - cec adapter structure + * @owner: module owner + * @name: name of the CEC adapter + * @devnode: device node for the /dev/cecX device + * @lock: mutex controlling access to this structure + * @rc: remote control device + * @transmit_queue: queue of pending transmits + * @transmit_queue_sz: number of pending transmits + * @wait_queue: queue of transmits waiting for a reply + * @transmitting: CEC messages currently being transmitted + * @transmit_in_progress: true if a transmit is in progress + * @transmit_in_progress_aborted: true if a transmit is in progress is to be + * aborted. This happens if the logical address is + * invalidated while the transmit is ongoing. In that + * case the transmit will finish, but will not retransmit + * and be marked as ABORTED. + * @xfer_timeout_ms: the transfer timeout in ms. + * If 0, then timeout after 2100 ms. + * @kthread_config: kthread used to configure a CEC adapter + * @config_completion: used to signal completion of the config kthread + * @kthread: main CEC processing thread + * @kthread_waitq: main CEC processing wait_queue + * @ops: cec adapter ops + * @priv: cec driver's private data + * @capabilities: cec adapter capabilities + * @available_log_addrs: maximum number of available logical addresses + * @phys_addr: the current physical address + * @needs_hpd: if true, then the HDMI HotPlug Detect pin must be high + * in order to transmit or receive CEC messages. This is usually a HW + * limitation. + * @is_enabled: the CEC adapter is enabled + * @is_claiming_log_addrs: true if cec_claim_log_addrs() is running + * @is_configuring: the CEC adapter is configuring (i.e. claiming LAs) + * @must_reconfigure: while configuring, the PA changed, so reclaim LAs + * @is_configured: the CEC adapter is configured (i.e. has claimed LAs) + * @cec_pin_is_high: if true then the CEC pin is high. Only used with the + * CEC pin framework. + * @adap_controls_phys_addr: if true, then the CEC adapter controls the + * physical address, i.e. the CEC hardware can detect HPD changes and + * read the EDID and is not dependent on an external HDMI driver. + * Drivers that need this can set this field to true after the + * cec_allocate_adapter() call. + * @last_initiator: the initiator of the last transmitted message. + * @monitor_all_cnt: number of filehandles monitoring all msgs + * @monitor_pin_cnt: number of filehandles monitoring pin changes + * @follower_cnt: number of filehandles in follower mode + * @cec_follower: filehandle of the exclusive follower + * @cec_initiator: filehandle of the exclusive initiator + * @passthrough: if true, then the exclusive follower is in + * passthrough mode. + * @log_addrs: current logical addresses + * @conn_info: current connector info + * @tx_timeout_cnt: count the number of Timed Out transmits. + * Reset to 0 when this is reported in cec_adap_status(). + * @tx_low_drive_cnt: count the number of Low Drive transmits. + * Reset to 0 when this is reported in cec_adap_status(). + * @tx_error_cnt: count the number of Error transmits. + * Reset to 0 when this is reported in cec_adap_status(). + * @tx_arb_lost_cnt: count the number of Arb Lost transmits. + * Reset to 0 when this is reported in cec_adap_status(). + * @tx_low_drive_log_cnt: number of logged Low Drive transmits since the + * adapter was enabled. Used to avoid flooding the kernel + * log if this happens a lot. + * @tx_error_log_cnt: number of logged Error transmits since the adapter was + * enabled. Used to avoid flooding the kernel log if this + * happens a lot. + * @notifier: CEC notifier + * @pin: CEC pin status struct + * @cec_dir: debugfs cec directory + * @sequence: transmit sequence counter + * @input_phys: remote control input_phys name + * + * This structure represents a cec adapter. + */ +struct cec_adapter { + struct module *owner; + char name[32]; + struct cec_devnode devnode; + struct mutex lock; + struct rc_dev *rc; + + struct list_head transmit_queue; + unsigned int transmit_queue_sz; + struct list_head wait_queue; + struct cec_data *transmitting; + bool transmit_in_progress; + bool transmit_in_progress_aborted; + unsigned int xfer_timeout_ms; + + struct task_struct *kthread_config; + struct completion config_completion; + + struct task_struct *kthread; + wait_queue_head_t kthread_waitq; + + const struct cec_adap_ops *ops; + void *priv; + u32 capabilities; + u8 available_log_addrs; + + u16 phys_addr; + bool needs_hpd; + bool is_enabled; + bool is_claiming_log_addrs; + bool is_configuring; + bool must_reconfigure; + bool is_configured; + bool cec_pin_is_high; + bool adap_controls_phys_addr; + u8 last_initiator; + u32 monitor_all_cnt; + u32 monitor_pin_cnt; + u32 follower_cnt; + struct cec_fh *cec_follower; + struct cec_fh *cec_initiator; + bool passthrough; + struct cec_log_addrs log_addrs; + struct cec_connector_info conn_info; + + u32 tx_timeout_cnt; + u32 tx_low_drive_cnt; + u32 tx_error_cnt; + u32 tx_arb_lost_cnt; + u32 tx_low_drive_log_cnt; + u32 tx_error_log_cnt; + +#ifdef CONFIG_CEC_NOTIFIER + struct cec_notifier *notifier; +#endif +#ifdef CONFIG_CEC_PIN + struct cec_pin *pin; +#endif + + struct dentry *cec_dir; + + u32 sequence; + + char input_phys[40]; +}; + +static inline int cec_get_device(struct cec_adapter *adap) +{ + struct cec_devnode *devnode = &adap->devnode; + + /* + * Check if the cec device is available. This needs to be done with + * the devnode->lock held to prevent an open/unregister race: + * without the lock, the device could be unregistered and freed between + * the devnode->registered check and get_device() calls, leading to + * a crash. + */ + mutex_lock(&devnode->lock); + /* + * return ENODEV if the cec device has been removed + * already or if it is not registered anymore. + */ + if (!devnode->registered) { + mutex_unlock(&devnode->lock); + return -ENODEV; + } + /* and increase the device refcount */ + get_device(&devnode->dev); + mutex_unlock(&devnode->lock); + return 0; +} + +static inline void cec_put_device(struct cec_adapter *adap) +{ + put_device(&adap->devnode.dev); +} + +static inline void *cec_get_drvdata(const struct cec_adapter *adap) +{ + return adap->priv; +} + +static inline bool cec_has_log_addr(const struct cec_adapter *adap, u8 log_addr) +{ + return adap->log_addrs.log_addr_mask & (1 << log_addr); +} + +static inline bool cec_is_sink(const struct cec_adapter *adap) +{ + return adap->phys_addr == 0; +} + +/** + * cec_is_registered() - is the CEC adapter registered? + * + * @adap: the CEC adapter, may be NULL. + * + * Return: true if the adapter is registered, false otherwise. + */ +static inline bool cec_is_registered(const struct cec_adapter *adap) +{ + return adap && adap->devnode.registered; +} + +#define cec_phys_addr_exp(pa) \ + ((pa) >> 12), ((pa) >> 8) & 0xf, ((pa) >> 4) & 0xf, (pa) & 0xf + +struct edid; +struct drm_connector; + +#if IS_REACHABLE(CONFIG_CEC_CORE) +struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops, + void *priv, const char *name, u32 caps, u8 available_las); +int cec_register_adapter(struct cec_adapter *adap, struct device *parent); +void cec_unregister_adapter(struct cec_adapter *adap); +void cec_delete_adapter(struct cec_adapter *adap); + +int cec_s_log_addrs(struct cec_adapter *adap, struct cec_log_addrs *log_addrs, + bool block); +void cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, + bool block); +void cec_s_phys_addr_from_edid(struct cec_adapter *adap, + const struct edid *edid); +void cec_s_conn_info(struct cec_adapter *adap, + const struct cec_connector_info *conn_info); +int cec_transmit_msg(struct cec_adapter *adap, struct cec_msg *msg, + bool block); + +/* Called by the adapter */ +void cec_transmit_done_ts(struct cec_adapter *adap, u8 status, + u8 arb_lost_cnt, u8 nack_cnt, u8 low_drive_cnt, + u8 error_cnt, ktime_t ts); + +static inline void cec_transmit_done(struct cec_adapter *adap, u8 status, + u8 arb_lost_cnt, u8 nack_cnt, + u8 low_drive_cnt, u8 error_cnt) +{ + cec_transmit_done_ts(adap, status, arb_lost_cnt, nack_cnt, + low_drive_cnt, error_cnt, ktime_get()); +} +/* + * Simplified version of cec_transmit_done for hardware that doesn't retry + * failed transmits. So this is always just one attempt in which case + * the status is sufficient. + */ +void cec_transmit_attempt_done_ts(struct cec_adapter *adap, + u8 status, ktime_t ts); + +static inline void cec_transmit_attempt_done(struct cec_adapter *adap, + u8 status) +{ + cec_transmit_attempt_done_ts(adap, status, ktime_get()); +} + +void cec_received_msg_ts(struct cec_adapter *adap, + struct cec_msg *msg, ktime_t ts); + +static inline void cec_received_msg(struct cec_adapter *adap, + struct cec_msg *msg) +{ + cec_received_msg_ts(adap, msg, ktime_get()); +} + +/** + * cec_queue_pin_cec_event() - queue a CEC pin event with a given timestamp. + * + * @adap: pointer to the cec adapter + * @is_high: when true the CEC pin is high, otherwise it is low + * @dropped_events: when true some events were dropped + * @ts: the timestamp for this event + * + */ +void cec_queue_pin_cec_event(struct cec_adapter *adap, bool is_high, + bool dropped_events, ktime_t ts); + +/** + * cec_queue_pin_hpd_event() - queue a pin event with a given timestamp. + * + * @adap: pointer to the cec adapter + * @is_high: when true the HPD pin is high, otherwise it is low + * @ts: the timestamp for this event + * + */ +void cec_queue_pin_hpd_event(struct cec_adapter *adap, bool is_high, ktime_t ts); + +/** + * cec_queue_pin_5v_event() - queue a pin event with a given timestamp. + * + * @adap: pointer to the cec adapter + * @is_high: when true the 5V pin is high, otherwise it is low + * @ts: the timestamp for this event + * + */ +void cec_queue_pin_5v_event(struct cec_adapter *adap, bool is_high, ktime_t ts); + +/** + * cec_get_edid_phys_addr() - find and return the physical address + * + * @edid: pointer to the EDID data + * @size: size in bytes of the EDID data + * @offset: If not %NULL then the location of the physical address + * bytes in the EDID will be returned here. This is set to 0 + * if there is no physical address found. + * + * Return: the physical address or CEC_PHYS_ADDR_INVALID if there is none. + */ +u16 cec_get_edid_phys_addr(const u8 *edid, unsigned int size, + unsigned int *offset); + +void cec_fill_conn_info_from_drm(struct cec_connector_info *conn_info, + const struct drm_connector *connector); + +#else + +static inline int cec_register_adapter(struct cec_adapter *adap, + struct device *parent) +{ + return 0; +} + +static inline void cec_unregister_adapter(struct cec_adapter *adap) +{ +} + +static inline void cec_delete_adapter(struct cec_adapter *adap) +{ +} + +static inline void cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, + bool block) +{ +} + +static inline void cec_s_phys_addr_from_edid(struct cec_adapter *adap, + const struct edid *edid) +{ +} + +static inline u16 cec_get_edid_phys_addr(const u8 *edid, unsigned int size, + unsigned int *offset) +{ + if (offset) + *offset = 0; + return CEC_PHYS_ADDR_INVALID; +} + +static inline void cec_s_conn_info(struct cec_adapter *adap, + const struct cec_connector_info *conn_info) +{ +} + +static inline void +cec_fill_conn_info_from_drm(struct cec_connector_info *conn_info, + const struct drm_connector *connector) +{ + memset(conn_info, 0, sizeof(*conn_info)); +} + +#endif + +/** + * cec_phys_addr_invalidate() - set the physical address to INVALID + * + * @adap: the CEC adapter + * + * This is a simple helper function to invalidate the physical + * address. + */ +static inline void cec_phys_addr_invalidate(struct cec_adapter *adap) +{ + cec_s_phys_addr(adap, CEC_PHYS_ADDR_INVALID, false); +} + +/** + * cec_get_edid_spa_location() - find location of the Source Physical Address + * + * @edid: the EDID + * @size: the size of the EDID + * + * This EDID is expected to be a CEA-861 compliant, which means that there are + * at least two blocks and one or more of the extensions blocks are CEA-861 + * blocks. + * + * The returned location is guaranteed to be <= size-2. + * + * This is an inline function since it is used by both CEC and V4L2. + * Ideally this would go in a module shared by both, but it is overkill to do + * that for just a single function. + */ +static inline unsigned int cec_get_edid_spa_location(const u8 *edid, + unsigned int size) +{ + unsigned int blocks = size / 128; + unsigned int block; + u8 d; + + /* Sanity check: at least 2 blocks and a multiple of the block size */ + if (blocks < 2 || size % 128) + return 0; + + /* + * If there are fewer extension blocks than the size, then update + * 'blocks'. It is allowed to have more extension blocks than the size, + * since some hardware can only read e.g. 256 bytes of the EDID, even + * though more blocks are present. The first CEA-861 extension block + * should normally be in block 1 anyway. + */ + if (edid[0x7e] + 1 < blocks) + blocks = edid[0x7e] + 1; + + for (block = 1; block < blocks; block++) { + unsigned int offset = block * 128; + + /* Skip any non-CEA-861 extension blocks */ + if (edid[offset] != 0x02 || edid[offset + 1] != 0x03) + continue; + + /* search Vendor Specific Data Block (tag 3) */ + d = edid[offset + 2] & 0x7f; + /* Check if there are Data Blocks */ + if (d <= 4) + continue; + if (d > 4) { + unsigned int i = offset + 4; + unsigned int end = offset + d; + + /* Note: 'end' is always < 'size' */ + do { + u8 tag = edid[i] >> 5; + u8 len = edid[i] & 0x1f; + + if (tag == 3 && len >= 5 && i + len <= end && + edid[i + 1] == 0x03 && + edid[i + 2] == 0x0c && + edid[i + 3] == 0x00) + return i + 4; + i += len + 1; + } while (i < end); + } + } + return 0; +} + +#endif /* _MEDIA_CEC_H */ diff --git a/6.18.0/include-overrides/media/davinci/vpfe_types.h b/6.18.0/include-overrides/media/davinci/vpfe_types.h new file mode 100644 index 0000000..e65c9a4 --- /dev/null +++ b/6.18.0/include-overrides/media/davinci/vpfe_types.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2008-2009 Texas Instruments Inc + */ +#ifndef _VPFE_TYPES_H +#define _VPFE_TYPES_H + +#ifdef __KERNEL__ + +enum vpfe_pin_pol { + VPFE_PINPOL_POSITIVE, + VPFE_PINPOL_NEGATIVE +}; + +enum vpfe_hw_if_type { + /* BT656 - 8 bit */ + VPFE_BT656, + /* BT1120 - 16 bit */ + VPFE_BT1120, + /* Raw Bayer */ + VPFE_RAW_BAYER, + /* YCbCr - 8 bit with external sync */ + VPFE_YCBCR_SYNC_8, + /* YCbCr - 16 bit with external sync */ + VPFE_YCBCR_SYNC_16, + /* BT656 - 10 bit */ + VPFE_BT656_10BIT +}; + +/* interface description */ +struct vpfe_hw_if_param { + enum vpfe_hw_if_type if_type; + enum vpfe_pin_pol hdpol; + enum vpfe_pin_pol vdpol; +}; + +#endif +#endif diff --git a/6.18.0/include-overrides/media/davinci/vpif_types.h b/6.18.0/include-overrides/media/davinci/vpif_types.h new file mode 100644 index 0000000..6cce1f0 --- /dev/null +++ b/6.18.0/include-overrides/media/davinci/vpif_types.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2011 Texas Instruments Inc + */ +#ifndef _VPIF_TYPES_H +#define _VPIF_TYPES_H + +#include + +#define VPIF_CAPTURE_MAX_CHANNELS 2 +#define VPIF_DISPLAY_MAX_CHANNELS 2 + +enum vpif_if_type { + VPIF_IF_BT656, + VPIF_IF_BT1120, + VPIF_IF_RAW_BAYER +}; + +struct vpif_interface { + enum vpif_if_type if_type; + unsigned hd_pol:1; + unsigned vd_pol:1; + unsigned fid_pol:1; +}; + +struct vpif_subdev_info { + const char *name; + struct i2c_board_info board_info; +}; + +struct vpif_output { + struct v4l2_output output; + const char *subdev_name; + u32 input_route; + u32 output_route; +}; + +struct vpif_display_chan_config { + const struct vpif_output *outputs; + int output_count; + bool clip_en; +}; + +struct vpif_display_config { + int (*set_clock)(int, int); + struct vpif_subdev_info *subdevinfo; + int subdev_count; + int i2c_adapter_id; + struct vpif_display_chan_config chan_config[VPIF_DISPLAY_MAX_CHANNELS]; + const char *card_name; +}; + +struct vpif_input { + struct v4l2_input input; + char *subdev_name; + u32 input_route; + u32 output_route; +}; + +struct vpif_capture_chan_config { + struct vpif_interface vpif_if; + struct vpif_input *inputs; + int input_count; +}; + +struct vpif_capture_config { + int (*setup_input_channel_mode)(int); + int (*setup_input_path)(int, const char *); + struct vpif_capture_chan_config chan_config[VPIF_CAPTURE_MAX_CHANNELS]; + struct vpif_subdev_info *subdev_info; + int subdev_count; + int i2c_adapter_id; + const char *card_name; + + struct v4l2_async_connection *asd[VPIF_CAPTURE_MAX_CHANNELS]; + int asd_sizes[VPIF_CAPTURE_MAX_CHANNELS]; +}; +#endif /* _VPIF_TYPES_H */ diff --git a/6.18.0/include-overrides/media/demux.h b/6.18.0/include-overrides/media/demux.h new file mode 100644 index 0000000..bf00a5a --- /dev/null +++ b/6.18.0/include-overrides/media/demux.h @@ -0,0 +1,600 @@ +/* + * demux.h + * + * The Kernel Digital TV Demux kABI defines a driver-internal interface for + * registering low-level, hardware specific driver to a hardware independent + * demux layer. + * + * Copyright (c) 2002 Convergence GmbH + * + * based on code: + * Copyright (c) 2000 Nokia Research Center + * Tampere, FINLAND + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either version 2.1 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __DEMUX_H +#define __DEMUX_H + +#include +#include +#include +#include +#include + +/* + * Common definitions + */ + +/* + * DMX_MAX_FILTER_SIZE: Maximum length (in bytes) of a section/PES filter. + */ + +#ifndef DMX_MAX_FILTER_SIZE +#define DMX_MAX_FILTER_SIZE 18 +#endif + +/* + * DMX_MAX_SECFEED_SIZE: Maximum length (in bytes) of a private section feed + * filter. + */ + +#ifndef DMX_MAX_SECTION_SIZE +#define DMX_MAX_SECTION_SIZE 4096 +#endif +#ifndef DMX_MAX_SECFEED_SIZE +#define DMX_MAX_SECFEED_SIZE (DMX_MAX_SECTION_SIZE + 188) +#endif + +/* + * TS packet reception + */ + +/** + * enum ts_filter_type - filter type bitmap for dmx_ts_feed.set\(\) + * + * @TS_PACKET: Send TS packets (188 bytes) to callback (default). + * @TS_PAYLOAD_ONLY: In case TS_PACKET is set, only send the TS payload + * (<=184 bytes per packet) to callback + * @TS_DECODER: Send stream to built-in decoder (if present). + * @TS_DEMUX: In case TS_PACKET is set, send the TS to the demux + * device, not to the dvr device + */ +enum ts_filter_type { + TS_PACKET = 1, + TS_PAYLOAD_ONLY = 2, + TS_DECODER = 4, + TS_DEMUX = 8, +}; + +/** + * struct dmx_ts_feed - Structure that contains a TS feed filter + * + * @is_filtering: Set to non-zero when filtering in progress + * @parent: pointer to struct dmx_demux + * @priv: pointer to private data of the API client + * @set: sets the TS filter + * @start_filtering: starts TS filtering + * @stop_filtering: stops TS filtering + * + * A TS feed is typically mapped to a hardware PID filter on the demux chip. + * Using this API, the client can set the filtering properties to start/stop + * filtering TS packets on a particular TS feed. + */ +struct dmx_ts_feed { + int is_filtering; + struct dmx_demux *parent; + void *priv; + int (*set)(struct dmx_ts_feed *feed, + u16 pid, + int type, + enum dmx_ts_pes pes_type, + ktime_t timeout); + int (*start_filtering)(struct dmx_ts_feed *feed); + int (*stop_filtering)(struct dmx_ts_feed *feed); +}; + +/* + * Section reception + */ + +/** + * struct dmx_section_filter - Structure that describes a section filter + * + * @filter_value: Contains up to 16 bytes (128 bits) of the TS section header + * that will be matched by the section filter + * @filter_mask: Contains a 16 bytes (128 bits) filter mask with the bits + * specified by @filter_value that will be used on the filter + * match logic. + * @filter_mode: Contains a 16 bytes (128 bits) filter mode. + * @parent: Back-pointer to struct dmx_section_feed. + * @priv: Pointer to private data of the API client. + * + * + * The @filter_mask controls which bits of @filter_value are compared with + * the section headers/payload. On a binary value of 1 in filter_mask, the + * corresponding bits are compared. The filter only accepts sections that are + * equal to filter_value in all the tested bit positions. + */ +struct dmx_section_filter { + u8 filter_value[DMX_MAX_FILTER_SIZE]; + u8 filter_mask[DMX_MAX_FILTER_SIZE]; + u8 filter_mode[DMX_MAX_FILTER_SIZE]; + struct dmx_section_feed *parent; + + void *priv; +}; + +/** + * struct dmx_section_feed - Structure that contains a section feed filter + * + * @is_filtering: Set to non-zero when filtering in progress + * @parent: pointer to struct dmx_demux + * @priv: pointer to private data of the API client + * @check_crc: If non-zero, check the CRC values of filtered sections. + * @set: sets the section filter + * @allocate_filter: This function is used to allocate a section filter on + * the demux. It should only be called when no filtering + * is in progress on this section feed. If a filter cannot + * be allocated, the function fails with -ENOSPC. + * @release_filter: This function releases all the resources of a + * previously allocated section filter. The function + * should not be called while filtering is in progress + * on this section feed. After calling this function, + * the caller should not try to dereference the filter + * pointer. + * @start_filtering: starts section filtering + * @stop_filtering: stops section filtering + * + * A TS feed is typically mapped to a hardware PID filter on the demux chip. + * Using this API, the client can set the filtering properties to start/stop + * filtering TS packets on a particular TS feed. + */ +struct dmx_section_feed { + int is_filtering; + struct dmx_demux *parent; + void *priv; + + int check_crc; + + /* private: Used internally at dvb_demux.c */ + u32 crc_val; + + u8 *secbuf; + u8 secbuf_base[DMX_MAX_SECFEED_SIZE]; + u16 secbufp, seclen, tsfeedp; + + /* public: */ + int (*set)(struct dmx_section_feed *feed, + u16 pid, + int check_crc); + int (*allocate_filter)(struct dmx_section_feed *feed, + struct dmx_section_filter **filter); + int (*release_filter)(struct dmx_section_feed *feed, + struct dmx_section_filter *filter); + int (*start_filtering)(struct dmx_section_feed *feed); + int (*stop_filtering)(struct dmx_section_feed *feed); +}; + +/** + * typedef dmx_ts_cb - DVB demux TS filter callback function prototype + * + * @buffer1: Pointer to the start of the filtered TS packets. + * @buffer1_length: Length of the TS data in buffer1. + * @buffer2: Pointer to the tail of the filtered TS packets, or NULL. + * @buffer2_length: Length of the TS data in buffer2. + * @source: Indicates which TS feed is the source of the callback. + * @buffer_flags: Address where buffer flags are stored. Those are + * used to report discontinuity users via DVB + * memory mapped API, as defined by + * &enum dmx_buffer_flags. + * + * This function callback prototype, provided by the client of the demux API, + * is called from the demux code. The function is only called when filtering + * on a TS feed has been enabled using the start_filtering\(\) function at + * the &dmx_demux. + * Any TS packets that match the filter settings are copied to a circular + * buffer. The filtered TS packets are delivered to the client using this + * callback function. + * It is expected that the @buffer1 and @buffer2 callback parameters point to + * addresses within the circular buffer, but other implementations are also + * possible. Note that the called party should not try to free the memory + * the @buffer1 and @buffer2 parameters point to. + * + * When this function is called, the @buffer1 parameter typically points to + * the start of the first undelivered TS packet within a circular buffer. + * The @buffer2 buffer parameter is normally NULL, except when the received + * TS packets have crossed the last address of the circular buffer and + * "wrapped" to the beginning of the buffer. In the latter case the @buffer1 + * parameter would contain an address within the circular buffer, while the + * @buffer2 parameter would contain the first address of the circular buffer. + * The number of bytes delivered with this function (i.e. @buffer1_length + + * @buffer2_length) is usually equal to the value of callback_length parameter + * given in the set() function, with one exception: if a timeout occurs before + * receiving callback_length bytes of TS data, any undelivered packets are + * immediately delivered to the client by calling this function. The timeout + * duration is controlled by the set() function in the TS Feed API. + * + * If a TS packet is received with errors that could not be fixed by the + * TS-level forward error correction (FEC), the Transport_error_indicator + * flag of the TS packet header should be set. The TS packet should not be + * discarded, as the error can possibly be corrected by a higher layer + * protocol. If the called party is slow in processing the callback, it + * is possible that the circular buffer eventually fills up. If this happens, + * the demux driver should discard any TS packets received while the buffer + * is full and return -EOVERFLOW. + * + * The type of data returned to the callback can be selected by the + * &dmx_ts_feed.@set function. The type parameter decides if the raw + * TS packet (TS_PACKET) or just the payload (TS_PACKET|TS_PAYLOAD_ONLY) + * should be returned. If additionally the TS_DECODER bit is set the stream + * will also be sent to the hardware MPEG decoder. + * + * Return: + * + * - 0, on success; + * + * - -EOVERFLOW, on buffer overflow. + */ +typedef int (*dmx_ts_cb)(const u8 *buffer1, + size_t buffer1_length, + const u8 *buffer2, + size_t buffer2_length, + struct dmx_ts_feed *source, + u32 *buffer_flags); + +/** + * typedef dmx_section_cb - DVB demux TS filter callback function prototype + * + * @buffer1: Pointer to the start of the filtered section, e.g. + * within the circular buffer of the demux driver. + * @buffer1_len: Length of the filtered section data in @buffer1, + * including headers and CRC. + * @buffer2: Pointer to the tail of the filtered section data, + * or NULL. Useful to handle the wrapping of a + * circular buffer. + * @buffer2_len: Length of the filtered section data in @buffer2, + * including headers and CRC. + * @source: Indicates which section feed is the source of the + * callback. + * @buffer_flags: Address where buffer flags are stored. Those are + * used to report discontinuity users via DVB + * memory mapped API, as defined by + * &enum dmx_buffer_flags. + * + * This function callback prototype, provided by the client of the demux API, + * is called from the demux code. The function is only called when + * filtering of sections has been enabled using the function + * &dmx_ts_feed.@start_filtering. When the demux driver has received a + * complete section that matches at least one section filter, the client + * is notified via this callback function. Normally this function is called + * for each received section; however, it is also possible to deliver + * multiple sections with one callback, for example when the system load + * is high. If an error occurs while receiving a section, this + * function should be called with the corresponding error type set in the + * success field, whether or not there is data to deliver. The Section Feed + * implementation should maintain a circular buffer for received sections. + * However, this is not necessary if the Section Feed API is implemented as + * a client of the TS Feed API, because the TS Feed implementation then + * buffers the received data. The size of the circular buffer can be + * configured using the &dmx_ts_feed.@set function in the Section Feed API. + * If there is no room in the circular buffer when a new section is received, + * the section must be discarded. If this happens, the value of the success + * parameter should be DMX_OVERRUN_ERROR on the next callback. + */ +typedef int (*dmx_section_cb)(const u8 *buffer1, + size_t buffer1_len, + const u8 *buffer2, + size_t buffer2_len, + struct dmx_section_filter *source, + u32 *buffer_flags); + +/* + * DVB Front-End + */ + +/** + * enum dmx_frontend_source - Used to identify the type of frontend + * + * @DMX_MEMORY_FE: The source of the demux is memory. It means that + * the MPEG-TS to be filtered comes from userspace, + * via write() syscall. + * + * @DMX_FRONTEND_0: The source of the demux is a frontend connected + * to the demux. + */ +enum dmx_frontend_source { + DMX_MEMORY_FE, + DMX_FRONTEND_0, +}; + +/** + * struct dmx_frontend - Structure that lists the frontends associated with + * a demux + * + * @connectivity_list: List of front-ends that can be connected to a + * particular demux; + * @source: Type of the frontend. + * + * FIXME: this structure should likely be replaced soon by some + * media-controller based logic. + */ +struct dmx_frontend { + struct list_head connectivity_list; + enum dmx_frontend_source source; +}; + +/* + * MPEG-2 TS Demux + */ + +/** + * enum dmx_demux_caps - MPEG-2 TS Demux capabilities bitmap + * + * @DMX_TS_FILTERING: set if TS filtering is supported; + * @DMX_SECTION_FILTERING: set if section filtering is supported; + * @DMX_MEMORY_BASED_FILTERING: set if write() available. + * + * Those flags are OR'ed in the &dmx_demux.capabilities field + */ +enum dmx_demux_caps { + DMX_TS_FILTERING = 1, + DMX_SECTION_FILTERING = 4, + DMX_MEMORY_BASED_FILTERING = 8, +}; + +/* + * Demux resource type identifier. + */ + +/** + * DMX_FE_ENTRY - Casts elements in the list of registered + * front-ends from the generic type struct list_head + * to the type * struct dmx_frontend + * + * @list: list of struct dmx_frontend + */ +#define DMX_FE_ENTRY(list) \ + list_entry(list, struct dmx_frontend, connectivity_list) + +/** + * struct dmx_demux - Structure that contains the demux capabilities and + * callbacks. + * + * @capabilities: Bitfield of capability flags. + * + * @frontend: Front-end connected to the demux + * + * @priv: Pointer to private data of the API client + * + * @open: This function reserves the demux for use by the caller and, if + * necessary, initializes the demux. When the demux is no longer needed, + * the function @close should be called. It should be possible for + * multiple clients to access the demux at the same time. Thus, the + * function implementation should increment the demux usage count when + * @open is called and decrement it when @close is called. + * The @demux function parameter contains a pointer to the demux API and + * instance data. + * It returns: + * 0 on success; + * -EUSERS, if maximum usage count was reached; + * -EINVAL, on bad parameter. + * + * @close: This function reserves the demux for use by the caller and, if + * necessary, initializes the demux. When the demux is no longer needed, + * the function @close should be called. It should be possible for + * multiple clients to access the demux at the same time. Thus, the + * function implementation should increment the demux usage count when + * @open is called and decrement it when @close is called. + * The @demux function parameter contains a pointer to the demux API and + * instance data. + * It returns: + * 0 on success; + * -ENODEV, if demux was not in use (e. g. no users); + * -EINVAL, on bad parameter. + * + * @write: This function provides the demux driver with a memory buffer + * containing TS packets. Instead of receiving TS packets from the DVB + * front-end, the demux driver software will read packets from memory. + * Any clients of this demux with active TS, PES or Section filters will + * receive filtered data via the Demux callback API (see 0). The function + * returns when all the data in the buffer has been consumed by the demux. + * Demux hardware typically cannot read TS from memory. If this is the + * case, memory-based filtering has to be implemented entirely in software. + * The @demux function parameter contains a pointer to the demux API and + * instance data. + * The @buf function parameter contains a pointer to the TS data in + * kernel-space memory. + * The @count function parameter contains the length of the TS data. + * It returns: + * 0 on success; + * -ERESTARTSYS, if mutex lock was interrupted; + * -EINTR, if a signal handling is pending; + * -ENODEV, if demux was removed; + * -EINVAL, on bad parameter. + * + * @allocate_ts_feed: Allocates a new TS feed, which is used to filter the TS + * packets carrying a certain PID. The TS feed normally corresponds to a + * hardware PID filter on the demux chip. + * The @demux function parameter contains a pointer to the demux API and + * instance data. + * The @feed function parameter contains a pointer to the TS feed API and + * instance data. + * The @callback function parameter contains a pointer to the callback + * function for passing received TS packet. + * It returns: + * 0 on success; + * -ERESTARTSYS, if mutex lock was interrupted; + * -EBUSY, if no more TS feeds is available; + * -EINVAL, on bad parameter. + * + * @release_ts_feed: Releases the resources allocated with @allocate_ts_feed. + * Any filtering in progress on the TS feed should be stopped before + * calling this function. + * The @demux function parameter contains a pointer to the demux API and + * instance data. + * The @feed function parameter contains a pointer to the TS feed API and + * instance data. + * It returns: + * 0 on success; + * -EINVAL on bad parameter. + * + * @allocate_section_feed: Allocates a new section feed, i.e. a demux resource + * for filtering and receiving sections. On platforms with hardware + * support for section filtering, a section feed is directly mapped to + * the demux HW. On other platforms, TS packets are first PID filtered in + * hardware and a hardware section filter then emulated in software. The + * caller obtains an API pointer of type dmx_section_feed_t as an out + * parameter. Using this API the caller can set filtering parameters and + * start receiving sections. + * The @demux function parameter contains a pointer to the demux API and + * instance data. + * The @feed function parameter contains a pointer to the TS feed API and + * instance data. + * The @callback function parameter contains a pointer to the callback + * function for passing received TS packet. + * It returns: + * 0 on success; + * -EBUSY, if no more TS feeds is available; + * -EINVAL, on bad parameter. + * + * @release_section_feed: Releases the resources allocated with + * @allocate_section_feed, including allocated filters. Any filtering in + * progress on the section feed should be stopped before calling this + * function. + * The @demux function parameter contains a pointer to the demux API and + * instance data. + * The @feed function parameter contains a pointer to the TS feed API and + * instance data. + * It returns: + * 0 on success; + * -EINVAL, on bad parameter. + * + * @add_frontend: Registers a connectivity between a demux and a front-end, + * i.e., indicates that the demux can be connected via a call to + * @connect_frontend to use the given front-end as a TS source. The + * client of this function has to allocate dynamic or static memory for + * the frontend structure and initialize its fields before calling this + * function. This function is normally called during the driver + * initialization. The caller must not free the memory of the frontend + * struct before successfully calling @remove_frontend. + * The @demux function parameter contains a pointer to the demux API and + * instance data. + * The @frontend function parameter contains a pointer to the front-end + * instance data. + * It returns: + * 0 on success; + * -EINVAL, on bad parameter. + * + * @remove_frontend: Indicates that the given front-end, registered by a call + * to @add_frontend, can no longer be connected as a TS source by this + * demux. The function should be called when a front-end driver or a demux + * driver is removed from the system. If the front-end is in use, the + * function fails with the return value of -EBUSY. After successfully + * calling this function, the caller can free the memory of the frontend + * struct if it was dynamically allocated before the @add_frontend + * operation. + * The @demux function parameter contains a pointer to the demux API and + * instance data. + * The @frontend function parameter contains a pointer to the front-end + * instance data. + * It returns: + * 0 on success; + * -ENODEV, if the front-end was not found, + * -EINVAL, on bad parameter. + * + * @get_frontends: Provides the APIs of the front-ends that have been + * registered for this demux. Any of the front-ends obtained with this + * call can be used as a parameter for @connect_frontend. The include + * file demux.h contains the macro DMX_FE_ENTRY() for converting an + * element of the generic type struct &list_head * to the type + * struct &dmx_frontend *. The caller must not free the memory of any of + * the elements obtained via this function call. + * The @demux function parameter contains a pointer to the demux API and + * instance data. + * It returns a struct list_head pointer to the list of front-end + * interfaces, or NULL in the case of an empty list. + * + * @connect_frontend: Connects the TS output of the front-end to the input of + * the demux. A demux can only be connected to a front-end registered to + * the demux with the function @add_frontend. It may or may not be + * possible to connect multiple demuxes to the same front-end, depending + * on the capabilities of the HW platform. When not used, the front-end + * should be released by calling @disconnect_frontend. + * The @demux function parameter contains a pointer to the demux API and + * instance data. + * The @frontend function parameter contains a pointer to the front-end + * instance data. + * It returns: + * 0 on success; + * -EINVAL, on bad parameter. + * + * @disconnect_frontend: Disconnects the demux and a front-end previously + * connected by a @connect_frontend call. + * The @demux function parameter contains a pointer to the demux API and + * instance data. + * It returns: + * 0 on success; + * -EINVAL on bad parameter. + * + * @get_pes_pids: Get the PIDs for DMX_PES_AUDIO0, DMX_PES_VIDEO0, + * DMX_PES_TELETEXT0, DMX_PES_SUBTITLE0 and DMX_PES_PCR0. + * The @demux function parameter contains a pointer to the demux API and + * instance data. + * The @pids function parameter contains an array with five u16 elements + * where the PIDs will be stored. + * It returns: + * 0 on success; + * -EINVAL on bad parameter. + */ +struct dmx_demux { + enum dmx_demux_caps capabilities; + struct dmx_frontend *frontend; + void *priv; + int (*open)(struct dmx_demux *demux); + int (*close)(struct dmx_demux *demux); + int (*write)(struct dmx_demux *demux, const char __user *buf, + size_t count); + int (*allocate_ts_feed)(struct dmx_demux *demux, + struct dmx_ts_feed **feed, + dmx_ts_cb callback); + int (*release_ts_feed)(struct dmx_demux *demux, + struct dmx_ts_feed *feed); + int (*allocate_section_feed)(struct dmx_demux *demux, + struct dmx_section_feed **feed, + dmx_section_cb callback); + int (*release_section_feed)(struct dmx_demux *demux, + struct dmx_section_feed *feed); + int (*add_frontend)(struct dmx_demux *demux, + struct dmx_frontend *frontend); + int (*remove_frontend)(struct dmx_demux *demux, + struct dmx_frontend *frontend); + struct list_head *(*get_frontends)(struct dmx_demux *demux); + int (*connect_frontend)(struct dmx_demux *demux, + struct dmx_frontend *frontend); + int (*disconnect_frontend)(struct dmx_demux *demux); + + int (*get_pes_pids)(struct dmx_demux *demux, u16 *pids); + + /* private: */ + + /* + * Only used at av7110, to read some data from firmware. + * As this was never documented, we have no clue about what's + * there, and its usage on other drivers aren't encouraged. + */ + int (*get_stc)(struct dmx_demux *demux, unsigned int num, + u64 *stc, unsigned int *base); +}; + +#endif /* #ifndef __DEMUX_H */ diff --git a/6.18.0/include-overrides/media/dmxdev.h b/6.18.0/include-overrides/media/dmxdev.h new file mode 100644 index 0000000..63219a6 --- /dev/null +++ b/6.18.0/include-overrides/media/dmxdev.h @@ -0,0 +1,213 @@ +/* + * dmxdev.h + * + * Copyright (C) 2000 Ralph Metzler & Marcus Metzler + * for convergence integrated media GmbH + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either version 2.1 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _DMXDEV_H_ +#define _DMXDEV_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + +/** + * enum dmxdev_type - type of demux filter type. + * + * @DMXDEV_TYPE_NONE: no filter set. + * @DMXDEV_TYPE_SEC: section filter. + * @DMXDEV_TYPE_PES: Program Elementary Stream (PES) filter. + */ +enum dmxdev_type { + DMXDEV_TYPE_NONE, + DMXDEV_TYPE_SEC, + DMXDEV_TYPE_PES, +}; + +/** + * enum dmxdev_state - state machine for the dmxdev. + * + * @DMXDEV_STATE_FREE: indicates that the filter is freed. + * @DMXDEV_STATE_ALLOCATED: indicates that the filter was allocated + * to be used. + * @DMXDEV_STATE_SET: indicates that the filter parameters are set. + * @DMXDEV_STATE_GO: indicates that the filter is running. + * @DMXDEV_STATE_DONE: indicates that a packet was already filtered + * and the filter is now disabled. + * Set only if %DMX_ONESHOT. See + * &dmx_sct_filter_params. + * @DMXDEV_STATE_TIMEDOUT: Indicates a timeout condition. + */ +enum dmxdev_state { + DMXDEV_STATE_FREE, + DMXDEV_STATE_ALLOCATED, + DMXDEV_STATE_SET, + DMXDEV_STATE_GO, + DMXDEV_STATE_DONE, + DMXDEV_STATE_TIMEDOUT +}; + +/** + * struct dmxdev_feed - digital TV dmxdev feed + * + * @pid: Program ID to be filtered + * @ts: pointer to &struct dmx_ts_feed + * @next: &struct list_head pointing to the next feed. + */ + +struct dmxdev_feed { + u16 pid; + struct dmx_ts_feed *ts; + struct list_head next; +}; + +/** + * struct dmxdev_filter - digital TV dmxdev filter + * + * @filter: a union describing a dmxdev filter. + * Currently used only for section filters. + * @filter.sec: a &struct dmx_section_filter pointer. + * For section filter only. + * @feed: a union describing a dmxdev feed. + * Depending on the filter type, it can be either + * @feed.ts or @feed.sec. + * @feed.ts: a &struct list_head list. + * For TS and PES feeds. + * @feed.sec: a &struct dmx_section_feed pointer. + * For section feed only. + * @params: a union describing dmxdev filter parameters. + * Depending on the filter type, it can be either + * @params.sec or @params.pes. + * @params.sec: a &struct dmx_sct_filter_params embedded struct. + * For section filter only. + * @params.pes: a &struct dmx_pes_filter_params embedded struct. + * For PES filter only. + * @type: type of the dmxdev filter, as defined by &enum dmxdev_type. + * @state: state of the dmxdev filter, as defined by &enum dmxdev_state. + * @dev: pointer to &struct dmxdev. + * @buffer: an embedded &struct dvb_ringbuffer buffer. + * @vb2_ctx: control struct for VB2 handler + * @mutex: protects the access to &struct dmxdev_filter. + * @timer: &struct timer_list embedded timer, used to check for + * feed timeouts. + * Only for section filter. + * @todo: index for the @secheader. + * Only for section filter. + * @secheader: buffer cache to parse the section header. + * Only for section filter. + */ +struct dmxdev_filter { + union { + struct dmx_section_filter *sec; + } filter; + + union { + /* list of TS and PES feeds (struct dmxdev_feed) */ + struct list_head ts; + struct dmx_section_feed *sec; + } feed; + + union { + struct dmx_sct_filter_params sec; + struct dmx_pes_filter_params pes; + } params; + + enum dmxdev_type type; + enum dmxdev_state state; + struct dmxdev *dev; + struct dvb_ringbuffer buffer; + struct dvb_vb2_ctx vb2_ctx; + + struct mutex mutex; + + /* only for sections */ + struct timer_list timer; + int todo; + u8 secheader[3]; +}; + +/** + * struct dmxdev - Describes a digital TV demux device. + * + * @dvbdev: pointer to &struct dvb_device associated with + * the demux device node. + * @dvr_dvbdev: pointer to &struct dvb_device associated with + * the dvr device node. + * @filter: pointer to &struct dmxdev_filter. + * @demux: pointer to &struct dmx_demux. + * @filternum: number of filters. + * @capabilities: demux capabilities as defined by &enum dmx_demux_caps. + * @may_do_mmap: flag used to indicate if the device may do mmap. + * @exit: flag to indicate that the demux is being released. + * @dvr_orig_fe: pointer to &struct dmx_frontend. + * @dvr_buffer: embedded &struct dvb_ringbuffer for DVB output. + * @dvr_vb2_ctx: control struct for VB2 handler + * @mutex: protects the usage of this structure. + * @lock: protects access to &dmxdev->filter->data. + */ +struct dmxdev { + struct dvb_device *dvbdev; + struct dvb_device *dvr_dvbdev; + + struct dmxdev_filter *filter; + struct dmx_demux *demux; + + int filternum; + int capabilities; + + unsigned int may_do_mmap:1; + unsigned int exit:1; +#define DMXDEV_CAP_DUPLEX 1 + struct dmx_frontend *dvr_orig_fe; + + struct dvb_ringbuffer dvr_buffer; +#define DVR_BUFFER_SIZE (10*188*1024) + + struct dvb_vb2_ctx dvr_vb2_ctx; + + struct mutex mutex; + spinlock_t lock; +}; + +/** + * dvb_dmxdev_init - initializes a digital TV demux and registers both demux + * and DVR devices. + * + * @dmxdev: pointer to &struct dmxdev. + * @adap: pointer to &struct dvb_adapter. + */ +int dvb_dmxdev_init(struct dmxdev *dmxdev, struct dvb_adapter *adap); + +/** + * dvb_dmxdev_release - releases a digital TV demux and unregisters it. + * + * @dmxdev: pointer to &struct dmxdev. + */ +void dvb_dmxdev_release(struct dmxdev *dmxdev); + +#endif /* _DMXDEV_H_ */ diff --git a/6.18.0/include-overrides/media/drv-intf/cx2341x.h b/6.18.0/include-overrides/media/drv-intf/cx2341x.h new file mode 100644 index 0000000..722f590 --- /dev/null +++ b/6.18.0/include-overrides/media/drv-intf/cx2341x.h @@ -0,0 +1,283 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + cx23415/6/8 header containing common defines. + + */ + +#ifndef CX2341X_H +#define CX2341X_H + +#include + +enum cx2341x_port { + CX2341X_PORT_MEMORY = 0, + CX2341X_PORT_STREAMING = 1, + CX2341X_PORT_SERIAL = 2 +}; + +enum cx2341x_cap { + CX2341X_CAP_HAS_SLICED_VBI = 1 << 0, + CX2341X_CAP_HAS_TS = 1 << 1, + CX2341X_CAP_HAS_AC3 = 1 << 2, +}; + +struct cx2341x_mpeg_params { + /* misc */ + u32 capabilities; + enum cx2341x_port port; + u16 width; + u16 height; + u16 is_50hz; + + /* stream */ + enum v4l2_mpeg_stream_type stream_type; + enum v4l2_mpeg_stream_vbi_fmt stream_vbi_fmt; + u16 stream_insert_nav_packets; + + /* audio */ + enum v4l2_mpeg_audio_sampling_freq audio_sampling_freq; + enum v4l2_mpeg_audio_encoding audio_encoding; + enum v4l2_mpeg_audio_l2_bitrate audio_l2_bitrate; + enum v4l2_mpeg_audio_ac3_bitrate audio_ac3_bitrate; + enum v4l2_mpeg_audio_mode audio_mode; + enum v4l2_mpeg_audio_mode_extension audio_mode_extension; + enum v4l2_mpeg_audio_emphasis audio_emphasis; + enum v4l2_mpeg_audio_crc audio_crc; + u32 audio_properties; + u16 audio_mute; + + /* video */ + enum v4l2_mpeg_video_encoding video_encoding; + enum v4l2_mpeg_video_aspect video_aspect; + u16 video_b_frames; + u16 video_gop_size; + u16 video_gop_closure; + enum v4l2_mpeg_video_bitrate_mode video_bitrate_mode; + u32 video_bitrate; + u32 video_bitrate_peak; + u16 video_temporal_decimation; + u16 video_mute; + u32 video_mute_yuv; + + /* encoding filters */ + enum v4l2_mpeg_cx2341x_video_spatial_filter_mode video_spatial_filter_mode; + u16 video_spatial_filter; + enum v4l2_mpeg_cx2341x_video_luma_spatial_filter_type video_luma_spatial_filter_type; + enum v4l2_mpeg_cx2341x_video_chroma_spatial_filter_type video_chroma_spatial_filter_type; + enum v4l2_mpeg_cx2341x_video_temporal_filter_mode video_temporal_filter_mode; + u16 video_temporal_filter; + enum v4l2_mpeg_cx2341x_video_median_filter_type video_median_filter_type; + u16 video_luma_median_filter_top; + u16 video_luma_median_filter_bottom; + u16 video_chroma_median_filter_top; + u16 video_chroma_median_filter_bottom; +}; + +#define CX2341X_MBOX_MAX_DATA 16 + +extern const u32 cx2341x_mpeg_ctrls[]; +typedef int (*cx2341x_mbox_func)(void *priv, u32 cmd, int in, int out, + u32 data[CX2341X_MBOX_MAX_DATA]); +int cx2341x_update(void *priv, cx2341x_mbox_func func, + const struct cx2341x_mpeg_params *old, + const struct cx2341x_mpeg_params *new); +int cx2341x_ctrl_query(const struct cx2341x_mpeg_params *params, + struct v4l2_queryctrl *qctrl); +const char * const *cx2341x_ctrl_get_menu(const struct cx2341x_mpeg_params *p, u32 id); +int cx2341x_ext_ctrls(struct cx2341x_mpeg_params *params, int busy, + struct v4l2_ext_controls *ctrls, unsigned int cmd); +void cx2341x_fill_defaults(struct cx2341x_mpeg_params *p); +void cx2341x_log_status(const struct cx2341x_mpeg_params *p, const char *prefix); + +struct cx2341x_handler; + +struct cx2341x_handler_ops { + /* needed for the video clock freq */ + int (*s_audio_sampling_freq)(struct cx2341x_handler *hdl, u32 val); + /* needed for dualwatch */ + int (*s_audio_mode)(struct cx2341x_handler *hdl, u32 val); + /* needed for setting up the video resolution */ + int (*s_video_encoding)(struct cx2341x_handler *hdl, u32 val); + /* needed for setting up the sliced vbi insertion data structures */ + int (*s_stream_vbi_fmt)(struct cx2341x_handler *hdl, u32 val); +}; + +struct cx2341x_handler { + u32 capabilities; + enum cx2341x_port port; + u16 width; + u16 height; + u16 is_50hz; + u32 audio_properties; + + struct v4l2_ctrl_handler hdl; + void *priv; + cx2341x_mbox_func func; + const struct cx2341x_handler_ops *ops; + + struct v4l2_ctrl *stream_vbi_fmt; + + struct { + /* audio cluster */ + struct v4l2_ctrl *audio_sampling_freq; + struct v4l2_ctrl *audio_encoding; + struct v4l2_ctrl *audio_l2_bitrate; + struct v4l2_ctrl *audio_mode; + struct v4l2_ctrl *audio_mode_extension; + struct v4l2_ctrl *audio_emphasis; + struct v4l2_ctrl *audio_crc; + struct v4l2_ctrl *audio_ac3_bitrate; + }; + + struct { + /* video gop cluster */ + struct v4l2_ctrl *video_b_frames; + struct v4l2_ctrl *video_gop_size; + }; + + struct { + /* stream type cluster */ + struct v4l2_ctrl *stream_type; + struct v4l2_ctrl *video_encoding; + struct v4l2_ctrl *video_bitrate_mode; + struct v4l2_ctrl *video_bitrate; + struct v4l2_ctrl *video_bitrate_peak; + }; + + struct { + /* video mute cluster */ + struct v4l2_ctrl *video_mute; + struct v4l2_ctrl *video_mute_yuv; + }; + + struct { + /* video filter mode cluster */ + struct v4l2_ctrl *video_spatial_filter_mode; + struct v4l2_ctrl *video_temporal_filter_mode; + struct v4l2_ctrl *video_median_filter_type; + }; + + struct { + /* video filter type cluster */ + struct v4l2_ctrl *video_luma_spatial_filter_type; + struct v4l2_ctrl *video_chroma_spatial_filter_type; + }; + + struct { + /* video filter cluster */ + struct v4l2_ctrl *video_spatial_filter; + struct v4l2_ctrl *video_temporal_filter; + }; + + struct { + /* video median cluster */ + struct v4l2_ctrl *video_luma_median_filter_top; + struct v4l2_ctrl *video_luma_median_filter_bottom; + struct v4l2_ctrl *video_chroma_median_filter_top; + struct v4l2_ctrl *video_chroma_median_filter_bottom; + }; +}; + +int cx2341x_handler_init(struct cx2341x_handler *cxhdl, + unsigned nr_of_controls_hint); +void cx2341x_handler_set_50hz(struct cx2341x_handler *cxhdl, int is_50hz); +int cx2341x_handler_setup(struct cx2341x_handler *cxhdl); +void cx2341x_handler_set_busy(struct cx2341x_handler *cxhdl, int busy); + +/* Firmware names */ +#define CX2341X_FIRM_ENC_FILENAME "v4l-cx2341x-enc.fw" +/* Decoder firmware for the cx23415 only */ +#define CX2341X_FIRM_DEC_FILENAME "v4l-cx2341x-dec.fw" + +/* Firmware API commands */ + +/* MPEG decoder API, specific to the cx23415 */ +#define CX2341X_DEC_PING_FW 0x00 +#define CX2341X_DEC_START_PLAYBACK 0x01 +#define CX2341X_DEC_STOP_PLAYBACK 0x02 +#define CX2341X_DEC_SET_PLAYBACK_SPEED 0x03 +#define CX2341X_DEC_STEP_VIDEO 0x05 +#define CX2341X_DEC_SET_DMA_BLOCK_SIZE 0x08 +#define CX2341X_DEC_GET_XFER_INFO 0x09 +#define CX2341X_DEC_GET_DMA_STATUS 0x0a +#define CX2341X_DEC_SCHED_DMA_FROM_HOST 0x0b +#define CX2341X_DEC_PAUSE_PLAYBACK 0x0d +#define CX2341X_DEC_HALT_FW 0x0e +#define CX2341X_DEC_SET_STANDARD 0x10 +#define CX2341X_DEC_GET_VERSION 0x11 +#define CX2341X_DEC_SET_STREAM_INPUT 0x14 +#define CX2341X_DEC_GET_TIMING_INFO 0x15 +#define CX2341X_DEC_SET_AUDIO_MODE 0x16 +#define CX2341X_DEC_SET_EVENT_NOTIFICATION 0x17 +#define CX2341X_DEC_SET_DISPLAY_BUFFERS 0x18 +#define CX2341X_DEC_EXTRACT_VBI 0x19 +#define CX2341X_DEC_SET_DECODER_SOURCE 0x1a +#define CX2341X_DEC_SET_PREBUFFERING 0x1e + +/* MPEG encoder API */ +#define CX2341X_ENC_PING_FW 0x80 +#define CX2341X_ENC_START_CAPTURE 0x81 +#define CX2341X_ENC_STOP_CAPTURE 0x82 +#define CX2341X_ENC_SET_AUDIO_ID 0x89 +#define CX2341X_ENC_SET_VIDEO_ID 0x8b +#define CX2341X_ENC_SET_PCR_ID 0x8d +#define CX2341X_ENC_SET_FRAME_RATE 0x8f +#define CX2341X_ENC_SET_FRAME_SIZE 0x91 +#define CX2341X_ENC_SET_BIT_RATE 0x95 +#define CX2341X_ENC_SET_GOP_PROPERTIES 0x97 +#define CX2341X_ENC_SET_ASPECT_RATIO 0x99 +#define CX2341X_ENC_SET_DNR_FILTER_MODE 0x9b +#define CX2341X_ENC_SET_DNR_FILTER_PROPS 0x9d +#define CX2341X_ENC_SET_CORING_LEVELS 0x9f +#define CX2341X_ENC_SET_SPATIAL_FILTER_TYPE 0xa1 +#define CX2341X_ENC_SET_VBI_LINE 0xb7 +#define CX2341X_ENC_SET_STREAM_TYPE 0xb9 +#define CX2341X_ENC_SET_OUTPUT_PORT 0xbb +#define CX2341X_ENC_SET_AUDIO_PROPERTIES 0xbd +#define CX2341X_ENC_HALT_FW 0xc3 +#define CX2341X_ENC_GET_VERSION 0xc4 +#define CX2341X_ENC_SET_GOP_CLOSURE 0xc5 +#define CX2341X_ENC_GET_SEQ_END 0xc6 +#define CX2341X_ENC_SET_PGM_INDEX_INFO 0xc7 +#define CX2341X_ENC_SET_VBI_CONFIG 0xc8 +#define CX2341X_ENC_SET_DMA_BLOCK_SIZE 0xc9 +#define CX2341X_ENC_GET_PREV_DMA_INFO_MB_10 0xca +#define CX2341X_ENC_GET_PREV_DMA_INFO_MB_9 0xcb +#define CX2341X_ENC_SCHED_DMA_TO_HOST 0xcc +#define CX2341X_ENC_INITIALIZE_INPUT 0xcd +#define CX2341X_ENC_SET_FRAME_DROP_RATE 0xd0 +#define CX2341X_ENC_PAUSE_ENCODER 0xd2 +#define CX2341X_ENC_REFRESH_INPUT 0xd3 +#define CX2341X_ENC_SET_COPYRIGHT 0xd4 +#define CX2341X_ENC_SET_EVENT_NOTIFICATION 0xd5 +#define CX2341X_ENC_SET_NUM_VSYNC_LINES 0xd6 +#define CX2341X_ENC_SET_PLACEHOLDER 0xd7 +#define CX2341X_ENC_MUTE_VIDEO 0xd9 +#define CX2341X_ENC_MUTE_AUDIO 0xda +#define CX2341X_ENC_SET_VERT_CROP_LINE 0xdb +#define CX2341X_ENC_MISC 0xdc + +/* OSD API, specific to the cx23415 */ +#define CX2341X_OSD_GET_FRAMEBUFFER 0x41 +#define CX2341X_OSD_GET_PIXEL_FORMAT 0x42 +#define CX2341X_OSD_SET_PIXEL_FORMAT 0x43 +#define CX2341X_OSD_GET_STATE 0x44 +#define CX2341X_OSD_SET_STATE 0x45 +#define CX2341X_OSD_GET_OSD_COORDS 0x46 +#define CX2341X_OSD_SET_OSD_COORDS 0x47 +#define CX2341X_OSD_GET_SCREEN_COORDS 0x48 +#define CX2341X_OSD_SET_SCREEN_COORDS 0x49 +#define CX2341X_OSD_GET_GLOBAL_ALPHA 0x4a +#define CX2341X_OSD_SET_GLOBAL_ALPHA 0x4b +#define CX2341X_OSD_SET_BLEND_COORDS 0x4c +#define CX2341X_OSD_GET_FLICKER_STATE 0x4f +#define CX2341X_OSD_SET_FLICKER_STATE 0x50 +#define CX2341X_OSD_BLT_COPY 0x52 +#define CX2341X_OSD_BLT_FILL 0x53 +#define CX2341X_OSD_BLT_TEXT 0x54 +#define CX2341X_OSD_SET_FRAMEBUFFER_WINDOW 0x56 +#define CX2341X_OSD_SET_CHROMA_KEY 0x60 +#define CX2341X_OSD_GET_ALPHA_CONTENT_INDEX 0x61 +#define CX2341X_OSD_SET_ALPHA_CONTENT_INDEX 0x62 + +#endif /* CX2341X_H */ diff --git a/6.18.0/include-overrides/media/drv-intf/cx25840.h b/6.18.0/include-overrides/media/drv-intf/cx25840.h new file mode 100644 index 0000000..8b455d9 --- /dev/null +++ b/6.18.0/include-overrides/media/drv-intf/cx25840.h @@ -0,0 +1,262 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +/* + * cx25840.h - definition for cx25840/1/2/3 inputs + * + * Copyright (C) 2006 Hans Verkuil (hverkuil@kernel.org) + */ + +#ifndef _CX25840_H_ +#define _CX25840_H_ + +/* + * Note that the cx25840 driver requires that the bridge driver calls the + * v4l2_subdev's load_fw operation in order to load the driver's firmware. + * This will load the firmware on the first invocation (further ones are NOP). + * Without this the audio standard detection will fail and you will + * only get mono. + * Alternatively, you can call the reset operation (this can be done + * multiple times if needed, each invocation will fully reinitialize + * the device). + * + * Since loading the firmware is often problematic when the driver is + * compiled into the kernel I recommend postponing calling this function + * until the first open of the video device. Another reason for + * postponing it is that loading this firmware takes a long time (seconds) + * due to the slow i2c bus speed. So it will speed up the boot process if + * you can avoid loading the fw as long as the video device isn't used. + */ + +enum cx25840_video_input { + /* Composite video inputs In1-In8 */ + CX25840_COMPOSITE1 = 1, + CX25840_COMPOSITE2, + CX25840_COMPOSITE3, + CX25840_COMPOSITE4, + CX25840_COMPOSITE5, + CX25840_COMPOSITE6, + CX25840_COMPOSITE7, + CX25840_COMPOSITE8, + + /* + * S-Video inputs consist of one luma input (In1-In8) ORed with one + * chroma input (In5-In8) + */ + CX25840_SVIDEO_LUMA1 = 0x10, + CX25840_SVIDEO_LUMA2 = 0x20, + CX25840_SVIDEO_LUMA3 = 0x30, + CX25840_SVIDEO_LUMA4 = 0x40, + CX25840_SVIDEO_LUMA5 = 0x50, + CX25840_SVIDEO_LUMA6 = 0x60, + CX25840_SVIDEO_LUMA7 = 0x70, + CX25840_SVIDEO_LUMA8 = 0x80, + CX25840_SVIDEO_CHROMA4 = 0x400, + CX25840_SVIDEO_CHROMA5 = 0x500, + CX25840_SVIDEO_CHROMA6 = 0x600, + CX25840_SVIDEO_CHROMA7 = 0x700, + CX25840_SVIDEO_CHROMA8 = 0x800, + + /* S-Video aliases for common luma/chroma combinations */ + CX25840_SVIDEO1 = 0x510, + CX25840_SVIDEO2 = 0x620, + CX25840_SVIDEO3 = 0x730, + CX25840_SVIDEO4 = 0x840, + + /* Allow frames to specify specific input configurations */ + CX25840_VIN1_CH1 = 0x80000000, + CX25840_VIN2_CH1 = 0x80000001, + CX25840_VIN3_CH1 = 0x80000002, + CX25840_VIN4_CH1 = 0x80000003, + CX25840_VIN5_CH1 = 0x80000004, + CX25840_VIN6_CH1 = 0x80000005, + CX25840_VIN7_CH1 = 0x80000006, + CX25840_VIN8_CH1 = 0x80000007, + CX25840_VIN4_CH2 = 0x80000000, + CX25840_VIN5_CH2 = 0x80000010, + CX25840_VIN6_CH2 = 0x80000020, + CX25840_NONE_CH2 = 0x80000030, + CX25840_VIN7_CH3 = 0x80000000, + CX25840_VIN8_CH3 = 0x80000040, + CX25840_NONE0_CH3 = 0x80000080, + CX25840_NONE1_CH3 = 0x800000c0, + CX25840_SVIDEO_ON = 0x80000100, + CX25840_COMPONENT_ON = 0x80000200, + CX25840_DIF_ON = 0x80000400, +}; + +/* + * The defines below are used to set the chip video output settings + * in the generic mode that can be enabled by calling the subdevice + * init core op. + * + * The requested settings can be passed to the init core op as + * @val parameter and to the s_routing video op as @config parameter. + * + * For details please refer to the section 3.7 Video Output Formatting and + * to Video Out Control 1 to 4 registers in the section 5.6 Video Decoder Core + * of the chip datasheet. + */ +#define CX25840_VCONFIG_FMT_SHIFT 0 +#define CX25840_VCONFIG_FMT_MASK GENMASK(2, 0) +#define CX25840_VCONFIG_FMT_BT601 BIT(0) +#define CX25840_VCONFIG_FMT_BT656 BIT(1) +#define CX25840_VCONFIG_FMT_VIP11 GENMASK(1, 0) +#define CX25840_VCONFIG_FMT_VIP2 BIT(2) + +#define CX25840_VCONFIG_RES_SHIFT 3 +#define CX25840_VCONFIG_RES_MASK GENMASK(4, 3) +#define CX25840_VCONFIG_RES_8BIT BIT(3) +#define CX25840_VCONFIG_RES_10BIT BIT(4) + +#define CX25840_VCONFIG_VBIRAW_SHIFT 5 +#define CX25840_VCONFIG_VBIRAW_MASK GENMASK(6, 5) +#define CX25840_VCONFIG_VBIRAW_DISABLED BIT(5) +#define CX25840_VCONFIG_VBIRAW_ENABLED BIT(6) + +#define CX25840_VCONFIG_ANCDATA_SHIFT 7 +#define CX25840_VCONFIG_ANCDATA_MASK GENMASK(8, 7) +#define CX25840_VCONFIG_ANCDATA_DISABLED BIT(7) +#define CX25840_VCONFIG_ANCDATA_ENABLED BIT(8) + +#define CX25840_VCONFIG_TASKBIT_SHIFT 9 +#define CX25840_VCONFIG_TASKBIT_MASK GENMASK(10, 9) +#define CX25840_VCONFIG_TASKBIT_ZERO BIT(9) +#define CX25840_VCONFIG_TASKBIT_ONE BIT(10) + +#define CX25840_VCONFIG_ACTIVE_SHIFT 11 +#define CX25840_VCONFIG_ACTIVE_MASK GENMASK(12, 11) +#define CX25840_VCONFIG_ACTIVE_COMPOSITE BIT(11) +#define CX25840_VCONFIG_ACTIVE_HORIZONTAL BIT(12) + +#define CX25840_VCONFIG_VALID_SHIFT 13 +#define CX25840_VCONFIG_VALID_MASK GENMASK(14, 13) +#define CX25840_VCONFIG_VALID_NORMAL BIT(13) +#define CX25840_VCONFIG_VALID_ANDACTIVE BIT(14) + +#define CX25840_VCONFIG_HRESETW_SHIFT 15 +#define CX25840_VCONFIG_HRESETW_MASK GENMASK(16, 15) +#define CX25840_VCONFIG_HRESETW_NORMAL BIT(15) +#define CX25840_VCONFIG_HRESETW_PIXCLK BIT(16) + +#define CX25840_VCONFIG_CLKGATE_SHIFT 17 +#define CX25840_VCONFIG_CLKGATE_MASK GENMASK(18, 17) +#define CX25840_VCONFIG_CLKGATE_NONE BIT(17) +#define CX25840_VCONFIG_CLKGATE_VALID BIT(18) +#define CX25840_VCONFIG_CLKGATE_VALIDACTIVE GENMASK(18, 17) + +#define CX25840_VCONFIG_DCMODE_SHIFT 19 +#define CX25840_VCONFIG_DCMODE_MASK GENMASK(20, 19) +#define CX25840_VCONFIG_DCMODE_DWORDS BIT(19) +#define CX25840_VCONFIG_DCMODE_BYTES BIT(20) + +#define CX25840_VCONFIG_IDID0S_SHIFT 21 +#define CX25840_VCONFIG_IDID0S_MASK GENMASK(22, 21) +#define CX25840_VCONFIG_IDID0S_NORMAL BIT(21) +#define CX25840_VCONFIG_IDID0S_LINECNT BIT(22) + +#define CX25840_VCONFIG_VIPCLAMP_SHIFT 23 +#define CX25840_VCONFIG_VIPCLAMP_MASK GENMASK(24, 23) +#define CX25840_VCONFIG_VIPCLAMP_ENABLED BIT(23) +#define CX25840_VCONFIG_VIPCLAMP_DISABLED BIT(24) + +enum cx25840_audio_input { + /* Audio inputs: serial or In4-In8 */ + CX25840_AUDIO_SERIAL, + CX25840_AUDIO4 = 4, + CX25840_AUDIO5, + CX25840_AUDIO6, + CX25840_AUDIO7, + CX25840_AUDIO8, +}; + +enum cx25840_io_pin { + CX25840_PIN_DVALID_PRGM0 = 0, + CX25840_PIN_FIELD_PRGM1, + CX25840_PIN_HRESET_PRGM2, + CX25840_PIN_VRESET_HCTL_PRGM3, + CX25840_PIN_IRQ_N_PRGM4, + CX25840_PIN_IR_TX_PRGM6, + CX25840_PIN_IR_RX_PRGM5, + CX25840_PIN_GPIO0_PRGM8, + CX25840_PIN_GPIO1_PRGM9, + CX25840_PIN_SA_SDIN, /* Alternate GP Input only */ + CX25840_PIN_SA_SDOUT, /* Alternate GP Input only */ + CX25840_PIN_PLL_CLK_PRGM7, + CX25840_PIN_CHIP_SEL_VIPCLK, /* Output only */ +}; + +enum cx25840_io_pad { + /* Output pads, these must match the actual chip register values */ + CX25840_PAD_DEFAULT = 0, + CX25840_PAD_ACTIVE, + CX25840_PAD_VACTIVE, + CX25840_PAD_CBFLAG, + CX25840_PAD_VID_DATA_EXT0, + CX25840_PAD_VID_DATA_EXT1, + CX25840_PAD_GPO0, + CX25840_PAD_GPO1, + CX25840_PAD_GPO2, + CX25840_PAD_GPO3, + CX25840_PAD_IRQ_N, + CX25840_PAD_AC_SYNC, + CX25840_PAD_AC_SDOUT, + CX25840_PAD_PLL_CLK, + CX25840_PAD_VRESET, + CX25840_PAD_RESERVED, + /* Pads for PLL_CLK output only */ + CX25840_PAD_XTI_X5_DLL, + CX25840_PAD_AUX_PLL, + CX25840_PAD_VID_PLL, + CX25840_PAD_XTI, + /* Input Pads */ + CX25840_PAD_GPI0, + CX25840_PAD_GPI1, + CX25840_PAD_GPI2, + CX25840_PAD_GPI3, +}; + +enum cx25840_io_pin_strength { + CX25840_PIN_DRIVE_MEDIUM = 0, + CX25840_PIN_DRIVE_SLOW, + CX25840_PIN_DRIVE_FAST, +}; + +enum cx23885_io_pin { + CX23885_PIN_IR_RX_GPIO19, + CX23885_PIN_IR_TX_GPIO20, + CX23885_PIN_I2S_SDAT_GPIO21, + CX23885_PIN_I2S_WCLK_GPIO22, + CX23885_PIN_I2S_BCLK_GPIO23, + CX23885_PIN_IRQ_N_GPIO16, +}; + +enum cx23885_io_pad { + CX23885_PAD_IR_RX, + CX23885_PAD_GPIO19, + CX23885_PAD_IR_TX, + CX23885_PAD_GPIO20, + CX23885_PAD_I2S_SDAT, + CX23885_PAD_GPIO21, + CX23885_PAD_I2S_WCLK, + CX23885_PAD_GPIO22, + CX23885_PAD_I2S_BCLK, + CX23885_PAD_GPIO23, + CX23885_PAD_IRQ_N, + CX23885_PAD_GPIO16, +}; + +/* + * pvr150_workaround activates a workaround for a hardware bug that is + * present in Hauppauge PVR-150 (and possibly PVR-500) cards that have + * certain NTSC tuners (tveeprom tuner model numbers 85, 99 and 112). The + * audio autodetect fails on some channels for these models and the workaround + * is to select the audio standard explicitly. Many thanks to Hauppauge for + * providing this information. + * + * This platform data only needs to be supplied by the ivtv driver. + */ +struct cx25840_platform_data { + int pvr150_workaround; +}; + +#endif diff --git a/6.18.0/include-overrides/media/drv-intf/exynos-fimc.h b/6.18.0/include-overrides/media/drv-intf/exynos-fimc.h new file mode 100644 index 0000000..6b9ef63 --- /dev/null +++ b/6.18.0/include-overrides/media/drv-intf/exynos-fimc.h @@ -0,0 +1,157 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Samsung S5P/Exynos4 SoC series camera interface driver header + * + * Copyright (C) 2010 - 2013 Samsung Electronics Co., Ltd. + * Sylwester Nawrocki + */ + +#ifndef S5P_FIMC_H_ +#define S5P_FIMC_H_ + +#include +#include +#include + +/* + * Enumeration of data inputs to the camera subsystem. + */ +enum fimc_input { + FIMC_INPUT_PARALLEL_0 = 1, + FIMC_INPUT_PARALLEL_1, + FIMC_INPUT_MIPI_CSI2_0 = 3, + FIMC_INPUT_MIPI_CSI2_1, + FIMC_INPUT_WRITEBACK_A = 5, + FIMC_INPUT_WRITEBACK_B, + FIMC_INPUT_WRITEBACK_ISP = 5, +}; + +/* + * Enumeration of the FIMC data bus types. + */ +enum fimc_bus_type { + /* Camera parallel bus */ + FIMC_BUS_TYPE_ITU_601 = 1, + /* Camera parallel bus with embedded synchronization */ + FIMC_BUS_TYPE_ITU_656, + /* Camera MIPI-CSI2 serial bus */ + FIMC_BUS_TYPE_MIPI_CSI2, + /* FIFO link from LCD controller (WriteBack A) */ + FIMC_BUS_TYPE_LCD_WRITEBACK_A, + /* FIFO link from LCD controller (WriteBack B) */ + FIMC_BUS_TYPE_LCD_WRITEBACK_B, + /* FIFO link from FIMC-IS */ + FIMC_BUS_TYPE_ISP_WRITEBACK = FIMC_BUS_TYPE_LCD_WRITEBACK_B, +}; + +#define fimc_input_is_parallel(x) ((x) == 1 || (x) == 2) +#define fimc_input_is_mipi_csi(x) ((x) == 3 || (x) == 4) + +/* + * The subdevices' group IDs. + */ +#define GRP_ID_SENSOR (1 << 8) +#define GRP_ID_FIMC_IS_SENSOR (1 << 9) +#define GRP_ID_WRITEBACK (1 << 10) +#define GRP_ID_CSIS (1 << 11) +#define GRP_ID_FIMC (1 << 12) +#define GRP_ID_FLITE (1 << 13) +#define GRP_ID_FIMC_IS (1 << 14) + +/** + * struct fimc_source_info - video source description required for the host + * interface configuration + * + * @fimc_bus_type: FIMC camera input type + * @sensor_bus_type: image sensor bus type, MIPI, ITU-R BT.601 etc. + * @flags: the parallel sensor bus flags defining signals polarity (V4L2_MBUS_*) + * @mux_id: FIMC camera interface multiplexer index (separate for MIPI and ITU) + */ +struct fimc_source_info { + enum fimc_bus_type fimc_bus_type; + enum fimc_bus_type sensor_bus_type; + u16 flags; + u16 mux_id; +}; + +/* + * v4l2_device notification id. This is only for internal use in the kernel. + * Sensor subdevs should issue S5P_FIMC_TX_END_NOTIFY notification in single + * frame capture mode when there is only one VSYNC pulse issued by the sensor + * at beginning of the frame transmission. + */ +#define S5P_FIMC_TX_END_NOTIFY _IO('e', 0) + +#define FIMC_MAX_PLANES 3 + +/** + * struct fimc_fmt - color format data structure + * @mbus_code: media bus pixel code, -1 if not applicable + * @fourcc: fourcc code for this format, 0 if not applicable + * @color: the driver's private color format id + * @memplanes: number of physically non-contiguous data planes + * @colplanes: number of physically contiguous data planes + * @colorspace: v4l2 colorspace (V4L2_COLORSPACE_*) + * @depth: per plane driver's private 'number of bits per pixel' + * @mdataplanes: bitmask indicating meta data plane(s), (1 << plane_no) + * @flags: flags indicating which operation mode format applies to + */ +struct fimc_fmt { + u32 mbus_code; + u32 fourcc; + u32 color; + u16 memplanes; + u16 colplanes; + u8 colorspace; + u8 depth[FIMC_MAX_PLANES]; + u16 mdataplanes; + u16 flags; +#define FMT_FLAGS_CAM (1 << 0) +#define FMT_FLAGS_M2M_IN (1 << 1) +#define FMT_FLAGS_M2M_OUT (1 << 2) +#define FMT_FLAGS_M2M (1 << 1 | 1 << 2) +#define FMT_HAS_ALPHA (1 << 3) +#define FMT_FLAGS_COMPRESSED (1 << 4) +#define FMT_FLAGS_WRITEBACK (1 << 5) +#define FMT_FLAGS_RAW_BAYER (1 << 6) +#define FMT_FLAGS_YUV (1 << 7) +}; + +struct exynos_media_pipeline; + +/* + * Media pipeline operations to be called from within a video node, i.e. the + * last entity within the pipeline. Implemented by related media device driver. + */ +struct exynos_media_pipeline_ops { + int (*prepare)(struct exynos_media_pipeline *p, + struct media_entity *me); + int (*unprepare)(struct exynos_media_pipeline *p); + int (*open)(struct exynos_media_pipeline *p, struct media_entity *me, + bool resume); + int (*close)(struct exynos_media_pipeline *p); + int (*set_stream)(struct exynos_media_pipeline *p, bool state); +}; + +struct exynos_video_entity { + struct video_device vdev; + struct exynos_media_pipeline *pipe; +}; + +struct exynos_media_pipeline { + struct media_pipeline mp; + const struct exynos_media_pipeline_ops *ops; +}; + +static inline struct exynos_video_entity *vdev_to_exynos_video_entity( + struct video_device *vdev) +{ + return container_of(vdev, struct exynos_video_entity, vdev); +} + +#define fimc_pipeline_call(ent, op, args...) \ + ((!(ent) || !(ent)->pipe) ? -ENOENT : \ + (((ent)->pipe->ops && (ent)->pipe->ops->op) ? \ + (ent)->pipe->ops->op(((ent)->pipe), ##args) : -ENOIOCTLCMD)) \ + +#endif /* S5P_FIMC_H_ */ diff --git a/6.18.0/include-overrides/media/drv-intf/msp3400.h b/6.18.0/include-overrides/media/drv-intf/msp3400.h new file mode 100644 index 0000000..853258e --- /dev/null +++ b/6.18.0/include-overrides/media/drv-intf/msp3400.h @@ -0,0 +1,213 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + msp3400.h - definition for msp3400 inputs and outputs + + Copyright (C) 2006 Hans Verkuil (hverkuil@kernel.org) + +*/ + +#ifndef _MSP3400_H_ +#define _MSP3400_H_ + +/* msp3400 routing + =============== + + The msp3400 has a complicated routing scheme with many possible + combinations. The details are all in the datasheets but I will try + to give a short description here. + + Inputs + ====== + + There are 1) tuner inputs, 2) I2S inputs, 3) SCART inputs. You will have + to select which tuner input to use and which SCART input to use. The + selected tuner input, the selected SCART input and all I2S inputs go to + the DSP (the tuner input first goes through the demodulator). + + The DSP handles things like volume, bass/treble, balance, and some chips + have support for surround sound. It has several outputs: MAIN, AUX, I2S + and SCART1/2. Each output can select which DSP input to use. So the MAIN + output can select the tuner input while at the same time the SCART1 output + uses the I2S input. + + Outputs + ======= + + Most DSP outputs are also the outputs of the msp3400. However, the SCART + outputs of the msp3400 can select which input to use: either the SCART1 or + SCART2 output from the DSP, or the msp3400 SCART inputs, thus completely + bypassing the DSP. + + Summary + ======= + + So to specify a complete routing scheme for the msp3400 you will have to + specify in the 'input' arg of the s_routing function: + + 1) which tuner input to use + 2) which SCART input to use + 3) which DSP input to use for each DSP output + + And in the 'output' arg of the s_routing function you specify: + + 1) which SCART input to use for each SCART output + + Depending on how the msp is wired to the other components you can + ignore or mute certain inputs or outputs. + + Also, depending on the msp version only a subset of the inputs or + outputs may be present. At the end of this header some tables are + added containing a list of what is available for each msp version. + */ + +/* Inputs to the DSP unit: two independent selections have to be made: + 1) the tuner (SIF) input + 2) the SCART input + Bits 0-2 are used for the SCART input select, bit 3 is used for the tuner + input, bits 4-7 are reserved. + */ + +/* SCART input to DSP selection */ +#define MSP_IN_SCART1 0 /* Pin SC1_IN */ +#define MSP_IN_SCART2 1 /* Pin SC2_IN */ +#define MSP_IN_SCART3 2 /* Pin SC3_IN */ +#define MSP_IN_SCART4 3 /* Pin SC4_IN */ +#define MSP_IN_MONO 6 /* Pin MONO_IN */ +#define MSP_IN_MUTE 7 /* Mute DSP input */ +#define MSP_SCART_TO_DSP(in) (in) +/* Tuner input to demodulator and DSP selection */ +#define MSP_IN_TUNER1 0 /* Analog Sound IF input pin ANA_IN1 */ +#define MSP_IN_TUNER2 1 /* Analog Sound IF input pin ANA_IN2 */ +#define MSP_TUNER_TO_DSP(in) ((in) << 3) + +/* The msp has up to 5 DSP outputs, each output can independently select + a DSP input. + + The DSP outputs are: loudspeaker output (aka MAIN), headphones output + (aka AUX), SCART1 DA output, SCART2 DA output and an I2S output. + There also is a quasi-peak detector output, but that is not used by + this driver and is set to the same input as the loudspeaker output. + Not all outputs are supported by all msp models. Setting the input + of an unsupported output will be ignored by the driver. + + There are up to 16 DSP inputs to choose from, so each output is + assigned 4 bits. + + Note: the 44x8G can mix two inputs and feed the result back to the + DSP. This is currently not implemented. Also not implemented is the + multi-channel capable I2S3 input of the 44x0G. If someone can demonstrate + a need for one of those features then additional support can be added. */ +#define MSP_DSP_IN_TUNER 0 /* Tuner DSP input */ +#define MSP_DSP_IN_SCART 2 /* SCART DSP input */ +#define MSP_DSP_IN_I2S1 5 /* I2S1 DSP input */ +#define MSP_DSP_IN_I2S2 6 /* I2S2 DSP input */ +#define MSP_DSP_IN_I2S3 7 /* I2S3 DSP input */ +#define MSP_DSP_IN_MAIN_AVC 11 /* MAIN AVC processed DSP input */ +#define MSP_DSP_IN_MAIN 12 /* MAIN DSP input */ +#define MSP_DSP_IN_AUX 13 /* AUX DSP input */ +#define MSP_DSP_TO_MAIN(in) ((in) << 4) +#define MSP_DSP_TO_AUX(in) ((in) << 8) +#define MSP_DSP_TO_SCART1(in) ((in) << 12) +#define MSP_DSP_TO_SCART2(in) ((in) << 16) +#define MSP_DSP_TO_I2S(in) ((in) << 20) + +/* Output SCART select: the SCART outputs can select which input + to use. */ +#define MSP_SC_IN_SCART1 0 /* SCART1 input, bypassing the DSP */ +#define MSP_SC_IN_SCART2 1 /* SCART2 input, bypassing the DSP */ +#define MSP_SC_IN_SCART3 2 /* SCART3 input, bypassing the DSP */ +#define MSP_SC_IN_SCART4 3 /* SCART4 input, bypassing the DSP */ +#define MSP_SC_IN_DSP_SCART1 4 /* DSP SCART1 input */ +#define MSP_SC_IN_DSP_SCART2 5 /* DSP SCART2 input */ +#define MSP_SC_IN_MONO 6 /* MONO input, bypassing the DSP */ +#define MSP_SC_IN_MUTE 7 /* MUTE output */ +#define MSP_SC_TO_SCART1(in) (in) +#define MSP_SC_TO_SCART2(in) ((in) << 4) + +/* Shortcut macros */ +#define MSP_INPUT(sc, t, main_aux_src, sc_i2s_src) \ + (MSP_SCART_TO_DSP(sc) | \ + MSP_TUNER_TO_DSP(t) | \ + MSP_DSP_TO_MAIN(main_aux_src) | \ + MSP_DSP_TO_AUX(main_aux_src) | \ + MSP_DSP_TO_SCART1(sc_i2s_src) | \ + MSP_DSP_TO_SCART2(sc_i2s_src) | \ + MSP_DSP_TO_I2S(sc_i2s_src)) +#define MSP_INPUT_DEFAULT MSP_INPUT(MSP_IN_SCART1, MSP_IN_TUNER1, \ + MSP_DSP_IN_TUNER, MSP_DSP_IN_TUNER) +#define MSP_OUTPUT(sc) \ + (MSP_SC_TO_SCART1(sc) | \ + MSP_SC_TO_SCART2(sc)) +/* This equals the RESET position of the msp3400 ACB register */ +#define MSP_OUTPUT_DEFAULT (MSP_SC_TO_SCART1(MSP_SC_IN_SCART3) | \ + MSP_SC_TO_SCART2(MSP_SC_IN_DSP_SCART1)) + +/* Tuner inputs vs. msp version */ +/* Chip TUNER_1 TUNER_2 + ------------------------- + msp34x0b y y + msp34x0c y y + msp34x0d y y + msp34x5d y n + msp34x7d y n + msp34x0g y y + msp34x1g y y + msp34x2g y y + msp34x5g y n + msp34x7g y n + msp44x0g y y + msp44x8g y y + */ + +/* SCART inputs vs. msp version */ +/* Chip SC1 SC2 SC3 SC4 + ------------------------- + msp34x0b y y y n + msp34x0c y y y n + msp34x0d y y y y + msp34x5d y y n n + msp34x7d y n n n + msp34x0g y y y y + msp34x1g y y y y + msp34x2g y y y y + msp34x5g y y n n + msp34x7g y n n n + msp44x0g y y y y + msp44x8g y y y y + */ + +/* DSP inputs vs. msp version (tuner and SCART inputs are always available) */ +/* Chip I2S1 I2S2 I2S3 MAIN_AVC MAIN AUX + ------------------------------------------ + msp34x0b y n n n n n + msp34x0c y y n n n n + msp34x0d y y n n n n + msp34x5d y y n n n n + msp34x7d n n n n n n + msp34x0g y y n n n n + msp34x1g y y n n n n + msp34x2g y y n y y y + msp34x5g y y n n n n + msp34x7g n n n n n n + msp44x0g y y y y y y + msp44x8g y y y n n n + */ + +/* DSP outputs vs. msp version */ +/* Chip MAIN AUX SCART1 SCART2 I2S + ------------------------------------ + msp34x0b y y y n y + msp34x0c y y y n y + msp34x0d y y y y y + msp34x5d y n y n y + msp34x7d y n y n n + msp34x0g y y y y y + msp34x1g y y y y y + msp34x2g y y y y y + msp34x5g y n y n y + msp34x7g y n y n n + msp44x0g y y y y y + msp44x8g y y y y y + */ + +#endif /* MSP3400_H */ diff --git a/6.18.0/include-overrides/media/drv-intf/renesas-ceu.h b/6.18.0/include-overrides/media/drv-intf/renesas-ceu.h new file mode 100644 index 0000000..52841d1 --- /dev/null +++ b/6.18.0/include-overrides/media/drv-intf/renesas-ceu.h @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * renesas-ceu.h - Renesas CEU driver interface + * + * Copyright 2017-2018 Jacopo Mondi + */ + +#ifndef __MEDIA_DRV_INTF_RENESAS_CEU_H__ +#define __MEDIA_DRV_INTF_RENESAS_CEU_H__ + +#define CEU_MAX_SUBDEVS 2 + +struct ceu_async_subdev { + unsigned long flags; + unsigned char bus_width; + unsigned char bus_shift; + unsigned int i2c_adapter_id; + unsigned int i2c_address; +}; + +struct ceu_platform_data { + unsigned int num_subdevs; + struct ceu_async_subdev subdevs[CEU_MAX_SUBDEVS]; +}; + +#endif /* ___MEDIA_DRV_INTF_RENESAS_CEU_H__ */ diff --git a/6.18.0/include-overrides/media/drv-intf/s3c_camif.h b/6.18.0/include-overrides/media/drv-intf/s3c_camif.h new file mode 100644 index 0000000..f746851 --- /dev/null +++ b/6.18.0/include-overrides/media/drv-intf/s3c_camif.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * s3c24xx/s3c64xx SoC series Camera Interface (CAMIF) driver + * + * Copyright (C) 2012 Sylwester Nawrocki +*/ + +#ifndef MEDIA_S3C_CAMIF_ +#define MEDIA_S3C_CAMIF_ + +#include +#include + +/** + * struct s3c_camif_sensor_info - an image sensor description + * @i2c_board_info: pointer to an I2C sensor subdevice board info + * @clock_frequency: frequency of the clock the host provides to a sensor + * @mbus_type: media bus type + * @i2c_bus_num: i2c control bus id the sensor is attached to + * @flags: the parallel bus flags defining signals polarity (V4L2_MBUS_*) + * @use_field: 1 if parallel bus FIELD signal is used (only s3c64xx) + */ +struct s3c_camif_sensor_info { + struct i2c_board_info i2c_board_info; + unsigned long clock_frequency; + enum v4l2_mbus_type mbus_type; + u16 i2c_bus_num; + u16 flags; + u8 use_field; +}; + +struct s3c_camif_plat_data { + struct s3c_camif_sensor_info sensor; + int (*gpio_get)(void); + int (*gpio_put)(void); +}; + +#endif /* MEDIA_S3C_CAMIF_ */ diff --git a/6.18.0/include-overrides/media/drv-intf/saa7146.h b/6.18.0/include-overrides/media/drv-intf/saa7146.h new file mode 100644 index 0000000..71ce63c --- /dev/null +++ b/6.18.0/include-overrides/media/drv-intf/saa7146.h @@ -0,0 +1,472 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __SAA7146__ +#define __SAA7146__ + +#include /* for delay-stuff */ +#include /* for kmalloc/kfree */ +#include /* for pci-config-stuff, vendor ids etc. */ +#include /* for "__init" */ +#include /* for IMMEDIATE_BH */ +#include /* for kernel module loader */ +#include /* for i2c subsystem */ +#include /* for accessing devices */ +#include +#include +#include +#include +#include + +#include /* for vmalloc() */ +#include /* for vmalloc_to_page() */ + +#define saa7146_write(sxy,adr,dat) writel((dat),(sxy->mem+(adr))) +#define saa7146_read(sxy,adr) readl(sxy->mem+(adr)) + +extern unsigned int saa7146_debug; + +#ifndef DEBUG_VARIABLE + #define DEBUG_VARIABLE saa7146_debug +#endif + +#define ERR(fmt, ...) pr_err("%s: " fmt, __func__, ##__VA_ARGS__) + +#define _DBG(mask, fmt, ...) \ +do { \ + if (DEBUG_VARIABLE & mask) \ + pr_debug("%s(): " fmt, __func__, ##__VA_ARGS__); \ +} while (0) + +/* simple debug messages */ +#define DEB_S(fmt, ...) _DBG(0x01, fmt, ##__VA_ARGS__) +/* more detailed debug messages */ +#define DEB_D(fmt, ...) _DBG(0x02, fmt, ##__VA_ARGS__) +/* print enter and exit of functions */ +#define DEB_EE(fmt, ...) _DBG(0x04, fmt, ##__VA_ARGS__) +/* i2c debug messages */ +#define DEB_I2C(fmt, ...) _DBG(0x08, fmt, ##__VA_ARGS__) +/* vbi debug messages */ +#define DEB_VBI(fmt, ...) _DBG(0x10, fmt, ##__VA_ARGS__) +/* interrupt debug messages */ +#define DEB_INT(fmt, ...) _DBG(0x20, fmt, ##__VA_ARGS__) +/* capture debug messages */ +#define DEB_CAP(fmt, ...) _DBG(0x40, fmt, ##__VA_ARGS__) + +#define SAA7146_ISR_CLEAR(x,y) \ + saa7146_write(x, ISR, (y)); + +struct module; + +struct saa7146_dev; +struct saa7146_extension; +struct saa7146_vv; + +/* saa7146 page table */ +struct saa7146_pgtable { + unsigned int size; + __le32 *cpu; + dma_addr_t dma; + /* used for offsets for u,v planes for planar capture modes */ + unsigned long offset; + /* used for custom pagetables (used for example by budget dvb cards) */ + struct scatterlist *slist; + int nents; +}; + +struct saa7146_pci_extension_data { + struct saa7146_extension *ext; + void *ext_priv; /* most likely a name string */ +}; + +#define MAKE_EXTENSION_PCI(x_var, x_vendor, x_device) \ + { \ + .vendor = PCI_VENDOR_ID_PHILIPS, \ + .device = PCI_DEVICE_ID_PHILIPS_SAA7146, \ + .subvendor = x_vendor, \ + .subdevice = x_device, \ + .driver_data = (unsigned long)& x_var, \ + } + +struct saa7146_extension +{ + char name[32]; /* name of the device */ +#define SAA7146_USE_I2C_IRQ 0x1 +#define SAA7146_I2C_SHORT_DELAY 0x2 + int flags; + + /* pairs of subvendor and subdevice ids for + supported devices, last entry 0xffff, 0xfff */ + struct module *module; + struct pci_driver driver; + const struct pci_device_id *pci_tbl; + + /* extension functions */ + int (*probe)(struct saa7146_dev *); + int (*attach)(struct saa7146_dev *, struct saa7146_pci_extension_data *); + int (*detach)(struct saa7146_dev*); + + u32 irq_mask; /* mask to indicate, which irq-events are handled by the extension */ + void (*irq_func)(struct saa7146_dev*, u32* irq_mask); +}; + +struct saa7146_dma +{ + dma_addr_t dma_handle; + __le32 *cpu_addr; +}; + +struct saa7146_dev +{ + struct module *module; + + struct v4l2_device v4l2_dev; + struct v4l2_ctrl_handler ctrl_handler; + + /* different device locks */ + spinlock_t slock; + struct mutex v4l2_lock; + + unsigned char __iomem *mem; /* pointer to mapped IO memory */ + u32 revision; /* chip revision; needed for bug-workarounds*/ + + /* pci-device & irq stuff*/ + char name[32]; + struct pci_dev *pci; + u32 int_todo; + spinlock_t int_slock; + + /* extension handling */ + struct saa7146_extension *ext; /* indicates if handled by extension */ + void *ext_priv; /* pointer for extension private use (most likely some private data) */ + struct saa7146_ext_vv *ext_vv_data; + + /* per device video/vbi information (if available) */ + struct saa7146_vv *vv_data; + void (*vv_callback)(struct saa7146_dev *dev, unsigned long status); + + /* i2c-stuff */ + struct mutex i2c_lock; + + u32 i2c_bitrate; + struct saa7146_dma d_i2c; /* pointer to i2c memory */ + wait_queue_head_t i2c_wq; + int i2c_op; + + /* memories */ + struct saa7146_dma d_rps0; + struct saa7146_dma d_rps1; +}; + +static inline struct saa7146_dev *to_saa7146_dev(struct v4l2_device *v4l2_dev) +{ + return container_of(v4l2_dev, struct saa7146_dev, v4l2_dev); +} + +/* from saa7146_i2c.c */ +int saa7146_i2c_adapter_prepare(struct saa7146_dev *dev, struct i2c_adapter *i2c_adapter, u32 bitrate); + +/* from saa7146_core.c */ +int saa7146_register_extension(struct saa7146_extension*); +int saa7146_unregister_extension(struct saa7146_extension*); +struct saa7146_format* saa7146_format_by_fourcc(struct saa7146_dev *dev, int fourcc); +int saa7146_pgtable_alloc(struct pci_dev *pci, struct saa7146_pgtable *pt); +void saa7146_pgtable_free(struct pci_dev *pci, struct saa7146_pgtable *pt); +int saa7146_pgtable_build_single(struct pci_dev *pci, struct saa7146_pgtable *pt, struct scatterlist *list, int length ); +void *saa7146_vmalloc_build_pgtable(struct pci_dev *pci, long length, struct saa7146_pgtable *pt); +void saa7146_vfree_destroy_pgtable(struct pci_dev *pci, void *mem, struct saa7146_pgtable *pt); +void saa7146_setgpio(struct saa7146_dev *dev, int port, u32 data); +int saa7146_wait_for_debi_done(struct saa7146_dev *dev, int nobusyloop); + +/* some memory sizes */ +#define SAA7146_I2C_MEM ( 1*PAGE_SIZE) +#define SAA7146_RPS_MEM ( 1*PAGE_SIZE) + +/* some i2c constants */ +#define SAA7146_I2C_TIMEOUT 100 /* i2c-timeout-value in ms */ +#define SAA7146_I2C_RETRIES 3 /* how many times shall we retry an i2c-operation? */ +#define SAA7146_I2C_DELAY 5 /* time we wait after certain i2c-operations */ + +/* unsorted defines */ +#define ME1 0x0000000800 +#define PV1 0x0000000008 + +/* gpio defines */ +#define SAA7146_GPIO_INPUT 0x00 +#define SAA7146_GPIO_IRQHI 0x10 +#define SAA7146_GPIO_IRQLO 0x20 +#define SAA7146_GPIO_IRQHL 0x30 +#define SAA7146_GPIO_OUTLO 0x40 +#define SAA7146_GPIO_OUTHI 0x50 + +/* debi defines */ +#define DEBINOSWAP 0x000e0000 + +/* define for the register programming sequencer (rps) */ +#define CMD_NOP 0x00000000 /* No operation */ +#define CMD_CLR_EVENT 0x00000000 /* Clear event */ +#define CMD_SET_EVENT 0x10000000 /* Set signal event */ +#define CMD_PAUSE 0x20000000 /* Pause */ +#define CMD_CHECK_LATE 0x30000000 /* Check late */ +#define CMD_UPLOAD 0x40000000 /* Upload */ +#define CMD_STOP 0x50000000 /* Stop */ +#define CMD_INTERRUPT 0x60000000 /* Interrupt */ +#define CMD_JUMP 0x80000000 /* Jump */ +#define CMD_WR_REG 0x90000000 /* Write (load) register */ +#define CMD_RD_REG 0xa0000000 /* Read (store) register */ +#define CMD_WR_REG_MASK 0xc0000000 /* Write register with mask */ + +#define CMD_OAN MASK_27 +#define CMD_INV MASK_26 +#define CMD_SIG4 MASK_25 +#define CMD_SIG3 MASK_24 +#define CMD_SIG2 MASK_23 +#define CMD_SIG1 MASK_22 +#define CMD_SIG0 MASK_21 +#define CMD_O_FID_B MASK_14 +#define CMD_E_FID_B MASK_13 +#define CMD_O_FID_A MASK_12 +#define CMD_E_FID_A MASK_11 + +/* some events and command modifiers for rps1 squarewave generator */ +#define EVT_HS (1<<15) // Source Line Threshold reached +#define EVT_VBI_B (1<<9) // VSYNC Event +#define RPS_OAN (1<<27) // 1: OR events, 0: AND events +#define RPS_INV (1<<26) // Invert (compound) event +#define GPIO3_MSK 0xFF000000 // GPIO #3 control bits + +/* Bit mask constants */ +#define MASK_00 0x00000001 /* Mask value for bit 0 */ +#define MASK_01 0x00000002 /* Mask value for bit 1 */ +#define MASK_02 0x00000004 /* Mask value for bit 2 */ +#define MASK_03 0x00000008 /* Mask value for bit 3 */ +#define MASK_04 0x00000010 /* Mask value for bit 4 */ +#define MASK_05 0x00000020 /* Mask value for bit 5 */ +#define MASK_06 0x00000040 /* Mask value for bit 6 */ +#define MASK_07 0x00000080 /* Mask value for bit 7 */ +#define MASK_08 0x00000100 /* Mask value for bit 8 */ +#define MASK_09 0x00000200 /* Mask value for bit 9 */ +#define MASK_10 0x00000400 /* Mask value for bit 10 */ +#define MASK_11 0x00000800 /* Mask value for bit 11 */ +#define MASK_12 0x00001000 /* Mask value for bit 12 */ +#define MASK_13 0x00002000 /* Mask value for bit 13 */ +#define MASK_14 0x00004000 /* Mask value for bit 14 */ +#define MASK_15 0x00008000 /* Mask value for bit 15 */ +#define MASK_16 0x00010000 /* Mask value for bit 16 */ +#define MASK_17 0x00020000 /* Mask value for bit 17 */ +#define MASK_18 0x00040000 /* Mask value for bit 18 */ +#define MASK_19 0x00080000 /* Mask value for bit 19 */ +#define MASK_20 0x00100000 /* Mask value for bit 20 */ +#define MASK_21 0x00200000 /* Mask value for bit 21 */ +#define MASK_22 0x00400000 /* Mask value for bit 22 */ +#define MASK_23 0x00800000 /* Mask value for bit 23 */ +#define MASK_24 0x01000000 /* Mask value for bit 24 */ +#define MASK_25 0x02000000 /* Mask value for bit 25 */ +#define MASK_26 0x04000000 /* Mask value for bit 26 */ +#define MASK_27 0x08000000 /* Mask value for bit 27 */ +#define MASK_28 0x10000000 /* Mask value for bit 28 */ +#define MASK_29 0x20000000 /* Mask value for bit 29 */ +#define MASK_30 0x40000000 /* Mask value for bit 30 */ +#define MASK_31 0x80000000 /* Mask value for bit 31 */ + +#define MASK_B0 0x000000ff /* Mask value for byte 0 */ +#define MASK_B1 0x0000ff00 /* Mask value for byte 1 */ +#define MASK_B2 0x00ff0000 /* Mask value for byte 2 */ +#define MASK_B3 0xff000000 /* Mask value for byte 3 */ + +#define MASK_W0 0x0000ffff /* Mask value for word 0 */ +#define MASK_W1 0xffff0000 /* Mask value for word 1 */ + +#define MASK_PA 0xfffffffc /* Mask value for physical address */ +#define MASK_PR 0xfffffffe /* Mask value for protection register */ +#define MASK_ER 0xffffffff /* Mask value for the entire register */ + +#define MASK_NONE 0x00000000 /* No mask */ + +/* register aliases */ +#define BASE_ODD1 0x00 /* Video DMA 1 registers */ +#define BASE_EVEN1 0x04 +#define PROT_ADDR1 0x08 +#define PITCH1 0x0C +#define BASE_PAGE1 0x10 /* Video DMA 1 base page */ +#define NUM_LINE_BYTE1 0x14 + +#define BASE_ODD2 0x18 /* Video DMA 2 registers */ +#define BASE_EVEN2 0x1C +#define PROT_ADDR2 0x20 +#define PITCH2 0x24 +#define BASE_PAGE2 0x28 /* Video DMA 2 base page */ +#define NUM_LINE_BYTE2 0x2C + +#define BASE_ODD3 0x30 /* Video DMA 3 registers */ +#define BASE_EVEN3 0x34 +#define PROT_ADDR3 0x38 +#define PITCH3 0x3C +#define BASE_PAGE3 0x40 /* Video DMA 3 base page */ +#define NUM_LINE_BYTE3 0x44 + +#define PCI_BT_V1 0x48 /* Video/FIFO 1 */ +#define PCI_BT_V2 0x49 /* Video/FIFO 2 */ +#define PCI_BT_V3 0x4A /* Video/FIFO 3 */ +#define PCI_BT_DEBI 0x4B /* DEBI */ +#define PCI_BT_A 0x4C /* Audio */ + +#define DD1_INIT 0x50 /* Init setting of DD1 interface */ + +#define DD1_STREAM_B 0x54 /* DD1 B video data stream handling */ +#define DD1_STREAM_A 0x56 /* DD1 A video data stream handling */ + +#define BRS_CTRL 0x58 /* BRS control register */ +#define HPS_CTRL 0x5C /* HPS control register */ +#define HPS_V_SCALE 0x60 /* HPS vertical scale */ +#define HPS_V_GAIN 0x64 /* HPS vertical ACL and gain */ +#define HPS_H_PRESCALE 0x68 /* HPS horizontal prescale */ +#define HPS_H_SCALE 0x6C /* HPS horizontal scale */ +#define BCS_CTRL 0x70 /* BCS control */ +#define CHROMA_KEY_RANGE 0x74 +#define CLIP_FORMAT_CTRL 0x78 /* HPS outputs formats & clipping */ + +#define DEBI_CONFIG 0x7C +#define DEBI_COMMAND 0x80 +#define DEBI_PAGE 0x84 +#define DEBI_AD 0x88 + +#define I2C_TRANSFER 0x8C +#define I2C_STATUS 0x90 + +#define BASE_A1_IN 0x94 /* Audio 1 input DMA */ +#define PROT_A1_IN 0x98 +#define PAGE_A1_IN 0x9C + +#define BASE_A1_OUT 0xA0 /* Audio 1 output DMA */ +#define PROT_A1_OUT 0xA4 +#define PAGE_A1_OUT 0xA8 + +#define BASE_A2_IN 0xAC /* Audio 2 input DMA */ +#define PROT_A2_IN 0xB0 +#define PAGE_A2_IN 0xB4 + +#define BASE_A2_OUT 0xB8 /* Audio 2 output DMA */ +#define PROT_A2_OUT 0xBC +#define PAGE_A2_OUT 0xC0 + +#define RPS_PAGE0 0xC4 /* RPS task 0 page register */ +#define RPS_PAGE1 0xC8 /* RPS task 1 page register */ + +#define RPS_THRESH0 0xCC /* HBI threshold for task 0 */ +#define RPS_THRESH1 0xD0 /* HBI threshold for task 1 */ + +#define RPS_TOV0 0xD4 /* RPS timeout for task 0 */ +#define RPS_TOV1 0xD8 /* RPS timeout for task 1 */ + +#define IER 0xDC /* Interrupt enable register */ + +#define GPIO_CTRL 0xE0 /* GPIO 0-3 register */ + +#define EC1SSR 0xE4 /* Event cnt set 1 source select */ +#define EC2SSR 0xE8 /* Event cnt set 2 source select */ +#define ECT1R 0xEC /* Event cnt set 1 thresholds */ +#define ECT2R 0xF0 /* Event cnt set 2 thresholds */ + +#define ACON1 0xF4 +#define ACON2 0xF8 + +#define MC1 0xFC /* Main control register 1 */ +#define MC2 0x100 /* Main control register 2 */ + +#define RPS_ADDR0 0x104 /* RPS task 0 address register */ +#define RPS_ADDR1 0x108 /* RPS task 1 address register */ + +#define ISR 0x10C /* Interrupt status register */ +#define PSR 0x110 /* Primary status register */ +#define SSR 0x114 /* Secondary status register */ + +#define EC1R 0x118 /* Event counter set 1 register */ +#define EC2R 0x11C /* Event counter set 2 register */ + +#define PCI_VDP1 0x120 /* Video DMA pointer of FIFO 1 */ +#define PCI_VDP2 0x124 /* Video DMA pointer of FIFO 2 */ +#define PCI_VDP3 0x128 /* Video DMA pointer of FIFO 3 */ +#define PCI_ADP1 0x12C /* Audio DMA pointer of audio out 1 */ +#define PCI_ADP2 0x130 /* Audio DMA pointer of audio in 1 */ +#define PCI_ADP3 0x134 /* Audio DMA pointer of audio out 2 */ +#define PCI_ADP4 0x138 /* Audio DMA pointer of audio in 2 */ +#define PCI_DMA_DDP 0x13C /* DEBI DMA pointer */ + +#define LEVEL_REP 0x140, +#define A_TIME_SLOT1 0x180, /* from 180 - 1BC */ +#define A_TIME_SLOT2 0x1C0, /* from 1C0 - 1FC */ + +/* isr masks */ +#define SPCI_PPEF 0x80000000 /* PCI parity error */ +#define SPCI_PABO 0x40000000 /* PCI access error (target or master abort) */ +#define SPCI_PPED 0x20000000 /* PCI parity error on 'real time data' */ +#define SPCI_RPS_I1 0x10000000 /* Interrupt issued by RPS1 */ +#define SPCI_RPS_I0 0x08000000 /* Interrupt issued by RPS0 */ +#define SPCI_RPS_LATE1 0x04000000 /* RPS task 1 is late */ +#define SPCI_RPS_LATE0 0x02000000 /* RPS task 0 is late */ +#define SPCI_RPS_E1 0x01000000 /* RPS error from task 1 */ +#define SPCI_RPS_E0 0x00800000 /* RPS error from task 0 */ +#define SPCI_RPS_TO1 0x00400000 /* RPS timeout task 1 */ +#define SPCI_RPS_TO0 0x00200000 /* RPS timeout task 0 */ +#define SPCI_UPLD 0x00100000 /* RPS in upload */ +#define SPCI_DEBI_S 0x00080000 /* DEBI status */ +#define SPCI_DEBI_E 0x00040000 /* DEBI error */ +#define SPCI_IIC_S 0x00020000 /* I2C status */ +#define SPCI_IIC_E 0x00010000 /* I2C error */ +#define SPCI_A2_IN 0x00008000 /* Audio 2 input DMA protection / limit */ +#define SPCI_A2_OUT 0x00004000 /* Audio 2 output DMA protection / limit */ +#define SPCI_A1_IN 0x00002000 /* Audio 1 input DMA protection / limit */ +#define SPCI_A1_OUT 0x00001000 /* Audio 1 output DMA protection / limit */ +#define SPCI_AFOU 0x00000800 /* Audio FIFO over- / underflow */ +#define SPCI_V_PE 0x00000400 /* Video protection address */ +#define SPCI_VFOU 0x00000200 /* Video FIFO over- / underflow */ +#define SPCI_FIDA 0x00000100 /* Field ID video port A */ +#define SPCI_FIDB 0x00000080 /* Field ID video port B */ +#define SPCI_PIN3 0x00000040 /* GPIO pin 3 */ +#define SPCI_PIN2 0x00000020 /* GPIO pin 2 */ +#define SPCI_PIN1 0x00000010 /* GPIO pin 1 */ +#define SPCI_PIN0 0x00000008 /* GPIO pin 0 */ +#define SPCI_ECS 0x00000004 /* Event counter 1, 2, 4, 5 */ +#define SPCI_EC3S 0x00000002 /* Event counter 3 */ +#define SPCI_EC0S 0x00000001 /* Event counter 0 */ + +/* i2c */ +#define SAA7146_I2C_ABORT (1<<7) +#define SAA7146_I2C_SPERR (1<<6) +#define SAA7146_I2C_APERR (1<<5) +#define SAA7146_I2C_DTERR (1<<4) +#define SAA7146_I2C_DRERR (1<<3) +#define SAA7146_I2C_AL (1<<2) +#define SAA7146_I2C_ERR (1<<1) +#define SAA7146_I2C_BUSY (1<<0) + +#define SAA7146_I2C_START (0x3) +#define SAA7146_I2C_CONT (0x2) +#define SAA7146_I2C_STOP (0x1) +#define SAA7146_I2C_NOP (0x0) + +#define SAA7146_I2C_BUS_BIT_RATE_6400 (0x500) +#define SAA7146_I2C_BUS_BIT_RATE_3200 (0x100) +#define SAA7146_I2C_BUS_BIT_RATE_480 (0x400) +#define SAA7146_I2C_BUS_BIT_RATE_320 (0x600) +#define SAA7146_I2C_BUS_BIT_RATE_240 (0x700) +#define SAA7146_I2C_BUS_BIT_RATE_120 (0x000) +#define SAA7146_I2C_BUS_BIT_RATE_80 (0x200) +#define SAA7146_I2C_BUS_BIT_RATE_60 (0x300) + +static inline void SAA7146_IER_DISABLE(struct saa7146_dev *x, unsigned y) +{ + unsigned long flags; + spin_lock_irqsave(&x->int_slock, flags); + saa7146_write(x, IER, saa7146_read(x, IER) & ~y); + spin_unlock_irqrestore(&x->int_slock, flags); +} + +static inline void SAA7146_IER_ENABLE(struct saa7146_dev *x, unsigned y) +{ + unsigned long flags; + spin_lock_irqsave(&x->int_slock, flags); + saa7146_write(x, IER, saa7146_read(x, IER) | y); + spin_unlock_irqrestore(&x->int_slock, flags); +} + +#endif diff --git a/6.18.0/include-overrides/media/drv-intf/saa7146_vv.h b/6.18.0/include-overrides/media/drv-intf/saa7146_vv.h new file mode 100644 index 0000000..55c7d70 --- /dev/null +++ b/6.18.0/include-overrides/media/drv-intf/saa7146_vv.h @@ -0,0 +1,221 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __SAA7146_VV__ +#define __SAA7146_VV__ + +#include +#include +#include +#include +#include + +#define MAX_SAA7146_CAPTURE_BUFFERS 32 /* arbitrary */ +#define BUFFER_TIMEOUT (HZ/2) /* 0.5 seconds */ + +#define WRITE_RPS0(x) do { \ + dev->d_rps0.cpu_addr[ count++ ] = cpu_to_le32(x); \ + } while (0); + +#define WRITE_RPS1(x) do { \ + dev->d_rps1.cpu_addr[ count++ ] = cpu_to_le32(x); \ + } while (0); + +struct saa7146_video_dma { + u32 base_odd; + u32 base_even; + u32 prot_addr; + u32 pitch; + u32 base_page; + u32 num_line_byte; +}; + +#define FORMAT_BYTE_SWAP 0x1 +#define FORMAT_IS_PLANAR 0x2 + +struct saa7146_format { + u32 pixelformat; + u32 trans; + u8 depth; + u8 flags; + u8 swap; +}; + +struct saa7146_standard +{ + char *name; + v4l2_std_id id; + + int v_offset; /* number of lines of vertical offset before processing */ + int v_field; /* number of lines in a field for HPS to process */ + + int h_offset; /* horizontal offset of processing window */ + int h_pixels; /* number of horizontal pixels to process */ + + int v_max_out; + int h_max_out; +}; + +/* buffer for one video/vbi frame */ +struct saa7146_buf { + /* common v4l buffer stuff -- must be first */ + struct vb2_v4l2_buffer vb; + struct list_head list; + + /* saa7146 specific */ + int (*activate)(struct saa7146_dev *dev, + struct saa7146_buf *buf, + struct saa7146_buf *next); + + /* page tables */ + struct saa7146_pgtable pt[3]; +}; + +struct saa7146_dmaqueue { + struct saa7146_dev *dev; + struct saa7146_buf *curr; + struct list_head queue; + struct timer_list timeout; + struct vb2_queue q; +}; + +struct saa7146_vv +{ + /* vbi capture */ + struct saa7146_dmaqueue vbi_dmaq; + struct v4l2_vbi_format vbi_fmt; + struct timer_list vbi_read_timeout; + /* vbi workaround interrupt queue */ + wait_queue_head_t vbi_wq; + + /* video capture */ + struct saa7146_dmaqueue video_dmaq; + struct v4l2_pix_format video_fmt; + enum v4l2_field last_field; + u32 seqnr; + + /* common: fixme? shouldn't this be in saa7146_fh? + (this leads to a more complicated question: shall the driver + store the different settings (for example S_INPUT) for every open + and restore it appropriately, or should all settings be common for + all opens? currently, we do the latter, like all other + drivers do... */ + struct saa7146_standard *standard; + + int vflip; + int hflip; + int current_hps_source; + int current_hps_sync; + + unsigned int resources; /* resource management for device */ +}; + +/* flags */ +#define SAA7146_USE_PORT_B_FOR_VBI 0x2 /* use input port b for vbi hardware bug workaround */ + +struct saa7146_ext_vv +{ + /* information about the video capabilities of the device */ + int inputs; + int audios; + u32 capabilities; + int flags; + + /* additionally supported transmission standards */ + struct saa7146_standard *stds; + int num_stds; + int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *); + + /* the extension can override this */ + struct v4l2_ioctl_ops vid_ops; + struct v4l2_ioctl_ops vbi_ops; + /* pointer to the saa7146 core ops */ + const struct v4l2_ioctl_ops *core_ops; + + struct v4l2_file_operations vbi_fops; +}; + +struct saa7146_use_ops { + void (*init)(struct saa7146_dev *, struct saa7146_vv *); + void (*irq_done)(struct saa7146_dev *, unsigned long status); +}; + +/* from saa7146_fops.c */ +int saa7146_register_device(struct video_device *vid, struct saa7146_dev *dev, char *name, int type); +int saa7146_unregister_device(struct video_device *vid, struct saa7146_dev *dev); +void saa7146_buffer_finish(struct saa7146_dev *dev, struct saa7146_dmaqueue *q, int state); +void saa7146_buffer_next(struct saa7146_dev *dev, struct saa7146_dmaqueue *q,int vbi); +int saa7146_buffer_queue(struct saa7146_dev *dev, struct saa7146_dmaqueue *q, struct saa7146_buf *buf); +void saa7146_buffer_timeout(struct timer_list *t); + +int saa7146_vv_init(struct saa7146_dev* dev, struct saa7146_ext_vv *ext_vv); +int saa7146_vv_release(struct saa7146_dev* dev); + +/* from saa7146_hlp.c */ +void saa7146_set_capture(struct saa7146_dev *dev, struct saa7146_buf *buf, struct saa7146_buf *next); +void saa7146_write_out_dma(struct saa7146_dev* dev, int which, struct saa7146_video_dma* vdma) ; +void saa7146_set_hps_source_and_sync(struct saa7146_dev *saa, int source, int sync); +void saa7146_set_gpio(struct saa7146_dev *saa, u8 pin, u8 data); + +/* from saa7146_video.c */ +extern const struct v4l2_ioctl_ops saa7146_video_ioctl_ops; +extern const struct v4l2_ioctl_ops saa7146_vbi_ioctl_ops; +extern const struct saa7146_use_ops saa7146_video_uops; +extern const struct vb2_ops video_qops; +long saa7146_video_do_ioctl(struct file *file, unsigned int cmd, void *arg); +int saa7146_s_ctrl(struct v4l2_ctrl *ctrl); + +/* from saa7146_vbi.c */ +extern const struct saa7146_use_ops saa7146_vbi_uops; +extern const struct vb2_ops vbi_qops; + +/* resource management functions */ +int saa7146_res_get(struct saa7146_dev *dev, unsigned int bit); +void saa7146_res_free(struct saa7146_dev *dev, unsigned int bits); + +#define RESOURCE_DMA1_HPS 0x1 +#define RESOURCE_DMA2_CLP 0x2 +#define RESOURCE_DMA3_BRS 0x4 + +/* saa7146 source inputs */ +#define SAA7146_HPS_SOURCE_PORT_A 0x00 +#define SAA7146_HPS_SOURCE_PORT_B 0x01 +#define SAA7146_HPS_SOURCE_YPB_CPA 0x02 +#define SAA7146_HPS_SOURCE_YPA_CPB 0x03 + +/* sync inputs */ +#define SAA7146_HPS_SYNC_PORT_A 0x00 +#define SAA7146_HPS_SYNC_PORT_B 0x01 + +/* some memory sizes */ +/* max. 16 clipping rectangles */ +#define SAA7146_CLIPPING_MEM (16 * 4 * sizeof(u32)) + +/* some defines for the various clipping-modes */ +#define SAA7146_CLIPPING_RECT 0x4 +#define SAA7146_CLIPPING_RECT_INVERTED 0x5 +#define SAA7146_CLIPPING_MASK 0x6 +#define SAA7146_CLIPPING_MASK_INVERTED 0x7 + +/* output formats: each entry holds four information */ +#define RGB08_COMPOSED 0x0217 /* composed is used in the sense of "not-planar" */ +/* this means: planar?=0, yuv2rgb-conversation-mode=2, dither=yes(=1), format-mode = 7 */ +#define RGB15_COMPOSED 0x0213 +#define RGB16_COMPOSED 0x0210 +#define RGB24_COMPOSED 0x0201 +#define RGB32_COMPOSED 0x0202 + +#define Y8 0x0006 +#define YUV411_COMPOSED 0x0003 +#define YUV422_COMPOSED 0x0000 +/* this means: planar?=1, yuv2rgb-conversion-mode=0, dither=no(=0), format-mode = b */ +#define YUV411_DECOMPOSED 0x100b +#define YUV422_DECOMPOSED 0x1009 +#define YUV420_DECOMPOSED 0x100a + +#define IS_PLANAR(x) (x & 0xf000) + +/* misc defines */ +#define SAA7146_NO_SWAP (0x0) +#define SAA7146_TWO_BYTE_SWAP (0x1) +#define SAA7146_FOUR_BYTE_SWAP (0x2) + +#endif diff --git a/6.18.0/include-overrides/media/drv-intf/sh_vou.h b/6.18.0/include-overrides/media/drv-intf/sh_vou.h new file mode 100644 index 0000000..8d23181 --- /dev/null +++ b/6.18.0/include-overrides/media/drv-intf/sh_vou.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * SuperH Video Output Unit (VOU) driver header + * + * Copyright (C) 2010, Guennadi Liakhovetski + */ +#ifndef SH_VOU_H +#define SH_VOU_H + +#include + +/* Bus flags */ +#define SH_VOU_PCLK_FALLING (1 << 0) +#define SH_VOU_HSYNC_LOW (1 << 1) +#define SH_VOU_VSYNC_LOW (1 << 2) + +enum sh_vou_bus_fmt { + SH_VOU_BUS_8BIT, + SH_VOU_BUS_16BIT, + SH_VOU_BUS_BT656, +}; + +struct sh_vou_pdata { + enum sh_vou_bus_fmt bus_fmt; + int i2c_adap; + struct i2c_board_info *board_info; + unsigned long flags; +}; + +#endif diff --git a/6.18.0/include-overrides/media/drv-intf/si476x.h b/6.18.0/include-overrides/media/drv-intf/si476x.h new file mode 100644 index 0000000..07f291d --- /dev/null +++ b/6.18.0/include-overrides/media/drv-intf/si476x.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * include/media/drv-intf/si476x.h -- Common definitions for si476x driver + * + * Copyright (C) 2012 Innovative Converged Devices(ICD) + * Copyright (C) 2013 Andrey Smirnov + * + * Author: Andrey Smirnov + */ + +#ifndef SI476X_H +#define SI476X_H + +#include +#include + +#include + +enum si476x_ctrl_id { + V4L2_CID_SI476X_RSSI_THRESHOLD = (V4L2_CID_USER_SI476X_BASE + 1), + V4L2_CID_SI476X_SNR_THRESHOLD = (V4L2_CID_USER_SI476X_BASE + 2), + V4L2_CID_SI476X_MAX_TUNE_ERROR = (V4L2_CID_USER_SI476X_BASE + 3), + V4L2_CID_SI476X_HARMONICS_COUNT = (V4L2_CID_USER_SI476X_BASE + 4), + V4L2_CID_SI476X_DIVERSITY_MODE = (V4L2_CID_USER_SI476X_BASE + 5), + V4L2_CID_SI476X_INTERCHIP_LINK = (V4L2_CID_USER_SI476X_BASE + 6), +}; + +#endif /* SI476X_H*/ diff --git a/6.18.0/include-overrides/media/drv-intf/tea575x.h b/6.18.0/include-overrides/media/drv-intf/tea575x.h new file mode 100644 index 0000000..50c9f89 --- /dev/null +++ b/6.18.0/include-overrides/media/drv-intf/tea575x.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef __SOUND_TEA575X_TUNER_H +#define __SOUND_TEA575X_TUNER_H + +/* + * ALSA driver for TEA5757/5759 Philips AM/FM tuner chips + * + * Copyright (c) 2004 Jaroslav Kysela + */ + +#include +#include +#include +#include + +#define TEA575X_FMIF 10700 +#define TEA575X_AMIF 450 + +#define TEA575X_DATA (1 << 0) +#define TEA575X_CLK (1 << 1) +#define TEA575X_WREN (1 << 2) +#define TEA575X_MOST (1 << 3) + +struct snd_tea575x; + +struct snd_tea575x_ops { + /* Drivers using snd_tea575x must either define read_ and write_val */ + void (*write_val)(struct snd_tea575x *tea, u32 val); + u32 (*read_val)(struct snd_tea575x *tea); + /* Or define the 3 pin functions */ + void (*set_pins)(struct snd_tea575x *tea, u8 pins); + u8 (*get_pins)(struct snd_tea575x *tea); + void (*set_direction)(struct snd_tea575x *tea, bool output); +}; + +struct snd_tea575x { + struct v4l2_device *v4l2_dev; + struct v4l2_file_operations fops; + struct video_device vd; /* video device */ + int radio_nr; /* radio_nr */ + bool tea5759; /* 5759 chip is present */ + bool has_am; /* Device can tune to AM freqs */ + bool cannot_read_data; /* Device cannot read the data pin */ + bool cannot_mute; /* Device cannot mute */ + bool mute; /* Device is muted? */ + bool stereo; /* receiving stereo */ + bool tuned; /* tuned to a station */ + unsigned int val; /* hw value */ + u32 band; /* 0: FM, 1: FM-Japan, 2: AM */ + u32 freq; /* frequency */ + struct mutex mutex; + const struct snd_tea575x_ops *ops; + void *private_data; + u8 card[32]; + u8 bus_info[32]; + struct v4l2_ctrl_handler ctrl_handler; + int (*ext_init)(struct snd_tea575x *tea); +}; + +int snd_tea575x_enum_freq_bands(struct snd_tea575x *tea, + struct v4l2_frequency_band *band); +int snd_tea575x_g_tuner(struct snd_tea575x *tea, struct v4l2_tuner *v); +int snd_tea575x_s_hw_freq_seek(struct file *file, struct snd_tea575x *tea, + const struct v4l2_hw_freq_seek *a); +int snd_tea575x_hw_init(struct snd_tea575x *tea); +int snd_tea575x_init(struct snd_tea575x *tea, struct module *owner); +void snd_tea575x_exit(struct snd_tea575x *tea); +void snd_tea575x_set_freq(struct snd_tea575x *tea); + +#endif /* __SOUND_TEA575X_TUNER_H */ diff --git a/6.18.0/include-overrides/media/dvb-usb-ids.h b/6.18.0/include-overrides/media/dvb-usb-ids.h new file mode 100644 index 0000000..1b7d10f --- /dev/null +++ b/6.18.0/include-overrides/media/dvb-usb-ids.h @@ -0,0 +1,471 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* dvb-usb-ids.h is part of the DVB USB library. + * + * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de) see + * dvb-usb-init.c for copyright information. + * + * a header file containing define's for the USB device supported by the + * various drivers. + */ +#ifndef _DVB_USB_IDS_H_ +#define _DVB_USB_IDS_H_ + +#include + +#define DVB_USB_DEV(pid, vid) \ + [vid] = { USB_DEVICE(USB_VID_ ## pid, USB_PID_ ## vid) } + +#define DVB_USB_DEV_VER(pid, vid, lo, hi) \ + [vid] = { USB_DEVICE_VER(USB_VID_ ## pid, USB_PID_ ## vid, lo, hi) } + +/* Vendor IDs */ + +#define USB_VID_774 0x7a69 +#define USB_VID_ADSTECH 0x06e1 +#define USB_VID_AFATECH 0x15a4 +#define USB_VID_ALCOR_MICRO 0x058f +#define USB_VID_ALINK 0x05e3 +#define USB_VID_AME 0x06be +#define USB_VID_AMT 0x1c73 +#define USB_VID_ANCHOR 0x0547 +#define USB_VID_ANSONIC 0x10b9 +#define USB_VID_ANUBIS_ELECTRONIC 0x10fd +#define USB_VID_ASUS 0x0b05 +#define USB_VID_AVERMEDIA 0x07ca +#define USB_VID_AZUREWAVE 0x13d3 +#define USB_VID_COMPRO 0x185b +#define USB_VID_COMPRO_UNK 0x145f +#define USB_VID_CONEXANT 0x0572 +#define USB_VID_CYPRESS 0x04b4 +#define USB_VID_DEXATEK 0x1d19 +#define USB_VID_DIBCOM 0x10b8 +#define USB_VID_DPOSH 0x1498 +#define USB_VID_DVICO 0x0fe9 +#define USB_VID_E3C 0x18b4 +#define USB_VID_ELGATO 0x0fd9 +#define USB_VID_EMPIA 0xeb1a +#define USB_VID_EVOLUTEPC 0x1e59 +#define USB_VID_GENPIX 0x09c0 +#define USB_VID_GIGABYTE 0x1044 +#define USB_VID_GOTVIEW 0x1fe1 +#define USB_VID_GRANDTEC 0x5032 +#define USB_VID_GTEK 0x1f4d +#define USB_VID_HAMA 0x147f +#define USB_VID_HANFTEK 0x15f4 +#define USB_VID_HAUPPAUGE 0x2040 +#define USB_VID_HUMAX_COEX 0x10b9 +#define USB_VID_HYPER_PALTEK 0x1025 +#define USB_VID_INTEL 0x8086 +#define USB_VID_ITETECH 0x048d +#define USB_VID_KWORLD 0xeb2a +#define USB_VID_KWORLD_2 0x1b80 +#define USB_VID_KYE 0x0458 +#define USB_VID_LEADTEK 0x0413 +#define USB_VID_LITEON 0x04ca +#define USB_VID_MEDION 0x1660 +#define USB_VID_MICROSOFT 0x045e +#define USB_VID_MIGLIA 0x18f3 +#define USB_VID_MSI 0x0db0 +#define USB_VID_MSI_2 0x1462 +#define USB_VID_OPERA1 0x695c +#define USB_VID_PCTV 0x2013 +#define USB_VID_PINNACLE 0x2304 +#define USB_VID_PIXELVIEW 0x1554 +#define USB_VID_PROF_1 0x3011 +#define USB_VID_PROF_2 0x3034 +#define USB_VID_REALTEK 0x0bda +#define USB_VID_SONY 0x1415 +#define USB_VID_TECHNISAT 0x14f7 +#define USB_VID_TECHNOTREND 0x0b48 +#define USB_VID_TELESTAR 0x10b9 +#define USB_VID_TERRATEC 0x0ccd +#define USB_VID_TERRATEC_2 0x153b +#define USB_VID_TEVII 0x9022 +#define USB_VID_TWINHAN 0x1822 +#define USB_VID_ULTIMA_ELECTRONIC 0x05d8 +#define USB_VID_UNIWILL 0x1584 +#define USB_VID_VISIONPLUS 0x13d3 +#define USB_VID_WIDEVIEW 0x14aa +#define USB_VID_XTENSIONS 0x1ae7 +#define USB_VID_YUAN 0x1164 +#define USB_VID_ZYDAS 0x0ace + +/* Product IDs */ + +#define USB_PID_ADSTECH_USB2_COLD 0xa333 +#define USB_PID_ADSTECH_USB2_WARM 0xa334 +#define USB_PID_AFATECH_AF9005 0x9020 +#define USB_PID_AFATECH_AF9015_9015 0x9015 +#define USB_PID_AFATECH_AF9015_9016 0x9016 +#define USB_PID_AFATECH_AF9035_1000 0x1000 +#define USB_PID_AFATECH_AF9035_1001 0x1001 +#define USB_PID_AFATECH_AF9035_1002 0x1002 +#define USB_PID_AFATECH_AF9035_1003 0x1003 +#define USB_PID_AFATECH_AF9035_9035 0x9035 +#define USB_PID_ALINK_DTU 0xf170 +#define USB_PID_AME_DTV5100 0xa232 +#define USB_PID_ANCHOR_NEBULA_DIGITV 0x0201 +#define USB_PID_ANSONIC_DVBT_USB 0x6000 +#define USB_PID_ANUBIS_LIFEVIEW_TV_WALKER_TWIN_COLD 0x0514 +#define USB_PID_ANUBIS_LIFEVIEW_TV_WALKER_TWIN_WARM 0x0513 +#define USB_PID_ANUBIS_MSI_DIGI_VOX_MINI_II 0x1513 +#define USB_PID_ANYSEE 0x861f +#define USB_PID_ASUS_U3000 0x171f +#define USB_PID_ASUS_U3000H 0x1736 +#define USB_PID_ASUS_U3100 0x173f +#define USB_PID_ASUS_U3100MINI_PLUS 0x1779 +#define USB_PID_AVERMEDIA_1867 0x1867 +#define USB_PID_AVERMEDIA_A309 0xa309 +#define USB_PID_AVERMEDIA_A310 0xa310 +#define USB_PID_AVERMEDIA_A805 0xa805 +#define USB_PID_AVERMEDIA_A815M 0x815a +#define USB_PID_AVERMEDIA_A835 0xa835 +#define USB_PID_AVERMEDIA_A835B_1835 0x1835 +#define USB_PID_AVERMEDIA_A835B_2835 0x2835 +#define USB_PID_AVERMEDIA_A835B_3835 0x3835 +#define USB_PID_AVERMEDIA_A835B_4835 0x4835 +#define USB_PID_AVERMEDIA_A850 0x850a +#define USB_PID_AVERMEDIA_A850T 0x850b +#define USB_PID_AVERMEDIA_A867 0xa867 +#define USB_PID_AVERMEDIA_B835 0xb835 +#define USB_PID_AVERMEDIA_DVBT_USB2_COLD 0xa800 +#define USB_PID_AVERMEDIA_DVBT_USB2_WARM 0xa801 +#define USB_PID_AVERMEDIA_EXPRESS 0xb568 +#define USB_PID_AVERMEDIA_H335 0x0335 +#define USB_PID_AVERMEDIA_HYBRID_ULTRA_USB_M039R 0x0039 +#define USB_PID_AVERMEDIA_HYBRID_ULTRA_USB_M039R_ATSC 0x1039 +#define USB_PID_AVERMEDIA_HYBRID_ULTRA_USB_M039R_DVBT 0x2039 +#define USB_PID_AVERMEDIA_MCE_USB_M038 0x1228 +#define USB_PID_AVERMEDIA_TD110 0xa110 +#define USB_PID_AVERMEDIA_TD310 0x1871 +#define USB_PID_AVERMEDIA_TWINSTAR 0x0825 +#define USB_PID_AVERMEDIA_VOLAR 0xa807 +#define USB_PID_AVERMEDIA_VOLAR_2 0xb808 +#define USB_PID_AVERMEDIA_VOLAR_A868R 0xa868 +#define USB_PID_AVERMEDIA_VOLAR_X 0xa815 +#define USB_PID_AVERMEDIA_VOLAR_X_2 0x8150 +#define USB_PID_AZUREWAVE_6007 0x0ccd +#define USB_PID_AZUREWAVE_AD_TU700 0x3237 +#define USB_PID_AZUREWAVE_AZ6027 0x3275 +#define USB_PID_AZUREWAVE_TWINHAN_VP7049 0x3219 +#define USB_PID_COMPRO_DVBU2000_COLD 0xd000 +#define USB_PID_COMPRO_DVBU2000_UNK_COLD 0x010c +#define USB_PID_COMPRO_DVBU2000_UNK_WARM 0x010d +#define USB_PID_COMPRO_DVBU2000_WARM 0xd001 +#define USB_PID_COMPRO_VIDEOMATE_U500 0x1e78 +#define USB_PID_COMPRO_VIDEOMATE_U500_PC 0x1e80 +#define USB_PID_CONCEPTRONIC_CTVDIGRCU 0xe397 +#define USB_PID_CONEXANT_D680_DMB 0x86d6 +#define USB_PID_CPYTO_REDI_PC50A 0xa803 +#define USB_PID_CTVDIGDUAL_V2 0xe410 +#define USB_PID_CYPRESS_DW2101 0x2101 +#define USB_PID_CYPRESS_DW2102 0x2102 +#define USB_PID_CYPRESS_DW2104 0x2104 +#define USB_PID_CYPRESS_DW3101 0x3101 +#define USB_PID_CYPRESS_OPERA1_COLD 0x2830 +#define USB_PID_DELOCK_USB2_DVBT 0xb803 +#define USB_PID_DIBCOM_ANCHOR_2135_COLD 0x2131 +#define USB_PID_DIBCOM_HOOK_DEFAULT 0x0064 +#define USB_PID_DIBCOM_HOOK_DEFAULT_REENUM 0x0065 +#define USB_PID_DIBCOM_MOD3000_COLD 0x0bb8 +#define USB_PID_DIBCOM_MOD3000_WARM 0x0bb9 +#define USB_PID_DIBCOM_MOD3001_COLD 0x0bc6 +#define USB_PID_DIBCOM_MOD3001_WARM 0x0bc7 +#define USB_PID_DIBCOM_NIM7090 0x1bb2 +#define USB_PID_DIBCOM_NIM8096MD 0x1fa8 +#define USB_PID_DIBCOM_NIM9090M 0x2383 +#define USB_PID_DIBCOM_NIM9090MD 0x2384 +#define USB_PID_DIBCOM_STK7070P 0x1ebc +#define USB_PID_DIBCOM_STK7070PD 0x1ebe +#define USB_PID_DIBCOM_STK7700D 0x1ef0 +#define USB_PID_DIBCOM_STK7700P 0x1e14 +#define USB_PID_DIBCOM_STK7700P_PC 0x1e78 +#define USB_PID_DIBCOM_STK7700_U7000 0x7001 +#define USB_PID_DIBCOM_STK7770P 0x1e80 +#define USB_PID_DIBCOM_STK807XP 0x1f90 +#define USB_PID_DIBCOM_STK807XPVR 0x1f98 +#define USB_PID_DIBCOM_STK8096GP 0x1fa0 +#define USB_PID_DIBCOM_STK8096PVR 0x1faa +#define USB_PID_DIBCOM_TFE7090PVR 0x1bb4 +#define USB_PID_DIBCOM_TFE7790P 0x1e6e +#define USB_PID_DIBCOM_TFE8096P 0x1f9C +#define USB_PID_DIGITALNOW_BLUEBIRD_DUAL_1_COLD 0xdb54 +#define USB_PID_DIGITALNOW_BLUEBIRD_DUAL_1_WARM 0xdb55 +#define USB_PID_DPOSH_M9206_COLD 0x9206 +#define USB_PID_DPOSH_M9206_WARM 0xa090 +#define USB_PID_DVICO_BLUEBIRD_DUAL_1_COLD 0xdb50 +#define USB_PID_DVICO_BLUEBIRD_DUAL_1_WARM 0xdb51 +#define USB_PID_DVICO_BLUEBIRD_DUAL_2_COLD 0xdb58 +#define USB_PID_DVICO_BLUEBIRD_DUAL_2_WARM 0xdb59 +#define USB_PID_DVICO_BLUEBIRD_DUAL_4 0xdb78 +#define USB_PID_DVICO_BLUEBIRD_DUAL_4_REV_2 0xdb98 +#define USB_PID_DVICO_BLUEBIRD_DVB_T_NANO_2 0xdb70 +#define USB_PID_DVICO_BLUEBIRD_DVB_T_NANO_2_NFW_WARM 0xdb71 +#define USB_PID_DVICO_BLUEBIRD_LG064F_COLD 0xd500 +#define USB_PID_DVICO_BLUEBIRD_LG064F_WARM 0xd501 +#define USB_PID_DVICO_BLUEBIRD_LGDT 0xd820 +#define USB_PID_DVICO_BLUEBIRD_LGZ201_COLD 0xdb00 +#define USB_PID_DVICO_BLUEBIRD_LGZ201_WARM 0xdb01 +#define USB_PID_DVICO_BLUEBIRD_TH7579_COLD 0xdb10 +#define USB_PID_DVICO_BLUEBIRD_TH7579_WARM 0xdb11 +#define USB_PID_E3C_EC168 0x1689 +#define USB_PID_E3C_EC168_2 0xfffa +#define USB_PID_E3C_EC168_3 0xfffb +#define USB_PID_E3C_EC168_4 0x1001 +#define USB_PID_E3C_EC168_5 0x1002 +#define USB_PID_ELGATO_EYETV_DIVERSITY 0x0011 +#define USB_PID_ELGATO_EYETV_DTT 0x0021 +#define USB_PID_ELGATO_EYETV_DTT_2 0x003f +#define USB_PID_ELGATO_EYETV_DTT_Dlx 0x0020 +#define USB_PID_ELGATO_EYETV_SAT 0x002a +#define USB_PID_ELGATO_EYETV_SAT_V2 0x0025 +#define USB_PID_ELGATO_EYETV_SAT_V3 0x0036 +#define USB_PID_EMPIA_DIGIVOX_MINI_SL_COLD 0xe360 +#define USB_PID_EMPIA_DIGIVOX_MINI_SL_WARM 0xe361 +#define USB_PID_EMPIA_VSTREAM_COLD 0x17de +#define USB_PID_EMPIA_VSTREAM_WARM 0x17df +#define USB_PID_EVOLUTEPC_TVWAY_PLUS 0x0002 +#define USB_PID_EVOLVEO_XTRATV_STICK 0xa115 +#define USB_PID_FREECOM_DVBT 0x0160 +#define USB_PID_FREECOM_DVBT_2 0x0161 +#define USB_PID_FRIIO_WHITE 0x0001 +#define USB_PID_GENIATECH_SU3000 0x3000 +#define USB_PID_GENIATECH_T220 0xd220 +#define USB_PID_GENIATECH_X3M_SPC1400HD 0x3100 +#define USB_PID_GENIUS_TVGO_DVB_T03 0x4012 +#define USB_PID_GENPIX_8PSK_REV_1_COLD 0x0200 +#define USB_PID_GENPIX_8PSK_REV_1_WARM 0x0201 +#define USB_PID_GENPIX_8PSK_REV_2 0x0202 +#define USB_PID_GENPIX_SKYWALKER_1 0x0203 +#define USB_PID_GENPIX_SKYWALKER_2 0x0206 +#define USB_PID_GENPIX_SKYWALKER_CW3K 0x0204 +#define USB_PID_GIGABYTE_U7000 0x7001 +#define USB_PID_GIGABYTE_U8000 0x7002 +#define USB_PID_GOTVIEW_SAT_HD 0x5456 +#define USB_PID_GRANDTEC_DVBT_USB2_COLD 0x0bc6 +#define USB_PID_GRANDTEC_DVBT_USB2_WARM 0x0bc7 +#define USB_PID_GRANDTEC_DVBT_USB_COLD 0x0fa0 +#define USB_PID_GRANDTEC_DVBT_USB_WARM 0x0fa1 +#define USB_PID_GRANDTEC_MOD3000_COLD 0x0bb8 +#define USB_PID_GRANDTEC_MOD3000_WARM 0x0bb9 +#define USB_PID_HAMA_DVBT_HYBRID 0x2758 +#define USB_PID_HANFTEK_UMT_010_COLD 0x0001 +#define USB_PID_HANFTEK_UMT_010_WARM 0x0015 +#define USB_PID_HAUPPAUGE_MAX_S2 0xd900 +#define USB_PID_HAUPPAUGE_MYTV_T 0x7080 +#define USB_PID_HAUPPAUGE_NOVA_TD_STICK 0x9580 +#define USB_PID_HAUPPAUGE_NOVA_TD_STICK_52009 0x5200 +#define USB_PID_HAUPPAUGE_NOVA_T_500 0x9941 +#define USB_PID_HAUPPAUGE_NOVA_T_500_2 0x9950 +#define USB_PID_HAUPPAUGE_NOVA_T_500_3 0x8400 +#define USB_PID_HAUPPAUGE_NOVA_T_STICK 0x7050 +#define USB_PID_HAUPPAUGE_NOVA_T_STICK_2 0x7060 +#define USB_PID_HAUPPAUGE_NOVA_T_STICK_3 0x7070 +#define USB_PID_HAUPPAUGE_TIGER_ATSC 0xb200 +#define USB_PID_HAUPPAUGE_TIGER_ATSC_B210 0xb210 +#define USB_PID_HAUPPAUGE_WINTV_NOVA_T_USB2_COLD 0x9300 +#define USB_PID_HAUPPAUGE_WINTV_NOVA_T_USB2_WARM 0x9301 +#define USB_PID_HUMAX_DVB_T_STICK_HIGH_SPEED_COLD 0x5000 +#define USB_PID_HUMAX_DVB_T_STICK_HIGH_SPEED_WARM 0x5001 +#define USB_PID_INTEL_CE9500 0x9500 +#define USB_PID_ITETECH_IT9135 0x9135 +#define USB_PID_ITETECH_IT9135_9005 0x9005 +#define USB_PID_ITETECH_IT9135_9006 0x9006 +#define USB_PID_ITETECH_IT9303 0x9306 +#define USB_PID_KWORLD_395U 0xe396 +#define USB_PID_KWORLD_395U_2 0xe39b +#define USB_PID_KWORLD_395U_3 0xe395 +#define USB_PID_KWORLD_395U_4 0xe39a +#define USB_PID_KWORLD_399U 0xe399 +#define USB_PID_KWORLD_399U_2 0xe400 +#define USB_PID_KWORLD_MC810 0xc810 +#define USB_PID_KWORLD_PC160_2T 0xc160 +#define USB_PID_KWORLD_PC160_T 0xc161 +#define USB_PID_KWORLD_UB383_T 0xe383 +#define USB_PID_KWORLD_UB499_2T_T09 0xe409 +#define USB_PID_KWORLD_VSTREAM_COLD 0x17de +#define USB_PID_KYE_DVB_T_COLD 0x701e +#define USB_PID_KYE_DVB_T_WARM 0x701f +#define USB_PID_LEADTEK_WINFAST_DTV_DONGLE_COLD 0x6025 +#define USB_PID_LEADTEK_WINFAST_DTV_DONGLE_H 0x60f6 +#define USB_PID_LEADTEK_WINFAST_DTV_DONGLE_STK7700P 0x6f00 +#define USB_PID_LEADTEK_WINFAST_DTV_DONGLE_STK7700P_2 0x6f01 +#define USB_PID_LEADTEK_WINFAST_DTV_DONGLE_WARM 0x6026 +#define USB_PID_LITEON_DVB_T_COLD 0xf000 +#define USB_PID_LITEON_DVB_T_WARM 0xf001 +#define USB_PID_MEDION_CREATIX_CTX1921 0x1921 +#define USB_PID_MEDION_MD95700 0x0932 +#define USB_PID_MICROSOFT_XBOX_ONE_TUNER 0x02d5 +#define USB_PID_MIGLIA_WT220U_ZAP250_COLD 0x0220 +#define USB_PID_MSI_DIGIVOX_DUO 0x8801 +#define USB_PID_MSI_DIGI_VOX_MINI_III 0x8807 +#define USB_PID_MSI_MEGASKY580 0x5580 +#define USB_PID_MSI_MEGASKY580_55801 0x5581 +#define USB_PID_MYGICA_D689 0xd811 +#define USB_PID_MYGICA_T230 0xc688 +#define USB_PID_MYGICA_T230A 0x689a +#define USB_PID_MYGICA_T230C 0xc689 +#define USB_PID_MYGICA_T230C2 0xc68a +#define USB_PID_MYGICA_T230C2_LITE 0xc69a +#define USB_PID_MYGICA_T230C_LITE 0xc699 +#define USB_PID_NOXON_DAB_STICK 0x00b3 +#define USB_PID_NOXON_DAB_STICK_REV2 0x00e0 +#define USB_PID_NOXON_DAB_STICK_REV3 0x00b4 +#define USB_PID_OPERA1_WARM 0x3829 +#define USB_PID_PCTV_2002E 0x025c +#define USB_PID_PCTV_2002E_SE 0x025d +#define USB_PID_PCTV_200E 0x020e +#define USB_PID_PCTV_78E 0x025a +#define USB_PID_PCTV_79E 0x0262 +#define USB_PID_PCTV_DIBCOM_STK8096PVR 0x1faa +#define USB_PID_PCTV_PINNACLE_PCTV282E 0x0248 +#define USB_PID_PCTV_PINNACLE_PCTV73ESE 0x0245 +#define USB_PID_PINNACLE_EXPRESSCARD_320CX 0x022e +#define USB_PID_PINNACLE_PCTV2000E 0x022c +#define USB_PID_PINNACLE_PCTV282E 0x0248 +#define USB_PID_PINNACLE_PCTV340E 0x023d +#define USB_PID_PINNACLE_PCTV340E_SE 0x023e +#define USB_PID_PINNACLE_PCTV71E 0x022b +#define USB_PID_PINNACLE_PCTV72E 0x0236 +#define USB_PID_PINNACLE_PCTV73A 0x0243 +#define USB_PID_PINNACLE_PCTV73E 0x0237 +#define USB_PID_PINNACLE_PCTV73ESE 0x0245 +#define USB_PID_PINNACLE_PCTV74E 0x0246 +#define USB_PID_PINNACLE_PCTV801E 0x023a +#define USB_PID_PINNACLE_PCTV801E_SE 0x023b +#define USB_PID_PINNACLE_PCTV_400E 0x020f +#define USB_PID_PINNACLE_PCTV_450E 0x0222 +#define USB_PID_PINNACLE_PCTV_452E 0x021f +#define USB_PID_PINNACLE_PCTV_DUAL_DIVERSITY_DVB_T 0x0229 +#define USB_PID_PINNACLE_PCTV_DVB_T_FLASH 0x0228 +#define USB_PID_PIXELVIEW_SBTVD 0x5010 +#define USB_PID_PROF_1100 0xb012 +#define USB_PID_PROF_7500 0x7500 +#define USB_PID_PROLECTRIX_DV107669 0xd803 +#define USB_PID_REALTEK_RTL2831U 0x2831 +#define USB_PID_REALTEK_RTL2832U 0x2832 +#define USB_PID_SIGMATEK_DVB_110 0x6610 +#define USB_PID_SONY_PLAYTV 0x0003 +#define USB_PID_SVEON_STV20 0xe39d +#define USB_PID_SVEON_STV20_RTL2832U 0xd39d +#define USB_PID_SVEON_STV21 0xd3b0 +#define USB_PID_SVEON_STV22 0xe401 +#define USB_PID_SVEON_STV22_IT9137 0xe411 +#define USB_PID_SVEON_STV27 0xd3af +#define USB_PID_TECHNISAT_AIRSTAR_TELESTICK_2 0x0004 +#define USB_PID_TECHNISAT_USB2_CABLESTAR_HDCI 0x0003 +#define USB_PID_TECHNISAT_USB2_DVB_S2 0x0500 +#define USB_PID_TECHNISAT_USB2_HDCI_V1 0x0001 +#define USB_PID_TECHNISAT_USB2_HDCI_V2 0x0002 +#define USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI 0x3012 +#define USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI_2 0x3015 +#define USB_PID_TECHNOTREND_CONNECT_CT3650 0x300d +#define USB_PID_TECHNOTREND_CONNECT_S2400 0x3006 +#define USB_PID_TECHNOTREND_CONNECT_S2400_8KEEPROM 0x3009 +#define USB_PID_TECHNOTREND_CONNECT_S2_3600 0x3007 +#define USB_PID_TECHNOTREND_CONNECT_S2_3650_CI 0x300a +#define USB_PID_TECHNOTREND_CONNECT_S2_4600 0x3011 +#define USB_PID_TECHNOTREND_CONNECT_S2_4650_CI 0x3017 +#define USB_PID_TECHNOTREND_TVSTICK_CT2_4400 0x3014 +#define USB_PID_TELESTAR_STARSTICK_2 0x8000 +#define USB_PID_TERRATEC_CINERGY_DT_XS_DIVERSITY 0x005a +#define USB_PID_TERRATEC_CINERGY_DT_XS_DIVERSITY_2 0x0081 +#define USB_PID_TERRATEC_CINERGY_HT_EXPRESS 0x0060 +#define USB_PID_TERRATEC_CINERGY_HT_USB_XE 0x0058 +#define USB_PID_TERRATEC_CINERGY_S 0x0064 +#define USB_PID_TERRATEC_CINERGY_S2_1 0x1181 +#define USB_PID_TERRATEC_CINERGY_S2_2 0x1182 +#define USB_PID_TERRATEC_CINERGY_S2_BOX 0x0105 +#define USB_PID_TERRATEC_CINERGY_S2_R1 0x00a8 +#define USB_PID_TERRATEC_CINERGY_S2_R2 0x00b0 +#define USB_PID_TERRATEC_CINERGY_S2_R3 0x0102 +#define USB_PID_TERRATEC_CINERGY_S2_R4 0x0105 +#define USB_PID_TERRATEC_CINERGY_T2 0x0038 +#define USB_PID_TERRATEC_CINERGY_TC2_STICK 0x10b2 +#define USB_PID_TERRATEC_CINERGY_T_EXPRESS 0x0062 +#define USB_PID_TERRATEC_CINERGY_T_STICK 0x0093 +#define USB_PID_TERRATEC_CINERGY_T_STICK_BLACK_REV1 0x00a9 +#define USB_PID_TERRATEC_CINERGY_T_STICK_DUAL_RC 0x0099 +#define USB_PID_TERRATEC_CINERGY_T_STICK_RC 0x0097 +#define USB_PID_TERRATEC_CINERGY_T_USB_XE 0x0055 +#define USB_PID_TERRATEC_CINERGY_T_USB_XE_REV2 0x0069 +#define USB_PID_TERRATEC_CINERGY_T_XXS 0x0078 +#define USB_PID_TERRATEC_CINERGY_T_XXS_2 0x00ab +#define USB_PID_TERRATEC_DVBS2CI_V1 0x10a4 +#define USB_PID_TERRATEC_DVBS2CI_V2 0x10ac +#define USB_PID_TERRATEC_H7 0x10b4 +#define USB_PID_TERRATEC_H7_2 0x10a3 +#define USB_PID_TERRATEC_H7_3 0x10a5 +#define USB_PID_TERRATEC_T1 0x10ae +#define USB_PID_TERRATEC_T3 0x10a0 +#define USB_PID_TERRATEC_T5 0x10a1 +#define USB_PID_TEVII_S421 0xd421 +#define USB_PID_TEVII_S480_1 0xd481 +#define USB_PID_TEVII_S480_2 0xd482 +#define USB_PID_TEVII_S482_1 0xd483 +#define USB_PID_TEVII_S482_2 0xd484 +#define USB_PID_TEVII_S630 0xd630 +#define USB_PID_TEVII_S632 0xd632 +#define USB_PID_TEVII_S650 0xd650 +#define USB_PID_TEVII_S660 0xd660 +#define USB_PID_TEVII_S662 0xd662 +#define USB_PID_TINYTWIN 0x3226 +#define USB_PID_TINYTWIN_2 0xe402 +#define USB_PID_TINYTWIN_3 0x9016 +#define USB_PID_TREKSTOR_DVBT 0x901b +#define USB_PID_TREKSTOR_TERRES_2_0 0xC803 +#define USB_PID_TURBOX_DTT_2000 0xd3a4 +#define USB_PID_TWINHAN_VP7021_WARM 0x3208 +#define USB_PID_TWINHAN_VP7041_COLD 0x3201 +#define USB_PID_TWINHAN_VP7041_WARM 0x3202 +#define USB_PID_ULTIMA_ARTEC_T14BR 0x810f +#define USB_PID_ULTIMA_ARTEC_T14_COLD 0x810b +#define USB_PID_ULTIMA_ARTEC_T14_WARM 0x810c +#define USB_PID_ULTIMA_TVBOX_AN2235_COLD 0x8107 +#define USB_PID_ULTIMA_TVBOX_AN2235_WARM 0x8108 +#define USB_PID_ULTIMA_TVBOX_ANCHOR_COLD 0x2235 +#define USB_PID_ULTIMA_TVBOX_COLD 0x8105 +#define USB_PID_ULTIMA_TVBOX_USB2_COLD 0x8109 +#define USB_PID_ULTIMA_TVBOX_USB2_FX_COLD 0x8613 +#define USB_PID_ULTIMA_TVBOX_USB2_FX_WARM 0x1002 +#define USB_PID_ULTIMA_TVBOX_USB2_WARM 0x810a +#define USB_PID_ULTIMA_TVBOX_WARM 0x8106 +#define USB_PID_UNIWILL_STK7700P 0x6003 +#define USB_PID_UNK_HYPER_PALTEK_COLD 0x005e +#define USB_PID_UNK_HYPER_PALTEK_WARM 0x005f +#define USB_PID_VISIONPLUS_PINNACLE_PCTV310E 0x3211 +#define USB_PID_VISIONPLUS_TINYUSB2_COLD 0x3223 +#define USB_PID_VISIONPLUS_TINYUSB2_WARM 0x3224 +#define USB_PID_VISIONPLUS_VP7020_COLD 0x3203 +#define USB_PID_VISIONPLUS_VP7020_WARM 0x3204 +#define USB_PID_VISIONPLUS_VP7021_COLD 0x3207 +#define USB_PID_VISIONPLUS_VP7041_COLD 0x3201 +#define USB_PID_VISIONPLUS_VP7041_WARM 0x3202 +#define USB_PID_VISIONPLUS_VP7045_COLD 0x3205 +#define USB_PID_VISIONPLUS_VP7045_WARM 0x3206 +#define USB_PID_WIDEVIEW_DTT200U_COLD 0x0201 +#define USB_PID_WIDEVIEW_DTT200U_WARM 0x0301 +#define USB_PID_WIDEVIEW_DVBT_USB_COLD 0x0001 +#define USB_PID_WIDEVIEW_DVBT_USB_WARM 0x0002 +#define USB_PID_WIDEVIEW_WT220U_COLD 0x0222 +#define USB_PID_WIDEVIEW_WT220U_FC_COLD 0x0225 +#define USB_PID_WIDEVIEW_WT220U_FC_WARM 0x0226 +#define USB_PID_WIDEVIEW_WT220U_WARM 0x0221 +#define USB_PID_WIDEVIEW_WT220U_ZAP250_COLD 0x0220 +#define USB_PID_WIDEVIEW_WT220U_ZL0353_COLD 0x022a +#define USB_PID_WIDEVIEW_WT220U_ZL0353_WARM 0x022b +#define USB_PID_WINFAST_DTV2000DS 0x6a04 +#define USB_PID_WINFAST_DTV2000DS_PLUS 0x6f12 +#define USB_PID_WINFAST_DTV_DONGLE_GOLD 0x6029 +#define USB_PID_WINFAST_DTV_DONGLE_MINID 0x6f0f +#define USB_PID_WINTV_SOLOHD 0x0264 +#define USB_PID_WINTV_SOLOHD_2 0x8268 +#define USB_PID_XTENSIONS_XD_380 0x0381 +#define USB_PID_YUAN_EC372S 0x1edc +#define USB_PID_YUAN_MC770 0x0871 +#define USB_PID_YUAN_PD378S 0x2edc +#define USB_PID_YUAN_STK7700D 0x1efc +#define USB_PID_YUAN_STK7700D_2 0x1e8c +#define USB_PID_YUAN_STK7700PH 0x1f08 + +#endif diff --git a/6.18.0/include-overrides/media/dvb_ca_en50221.h b/6.18.0/include-overrides/media/dvb_ca_en50221.h new file mode 100644 index 0000000..a1c014b --- /dev/null +++ b/6.18.0/include-overrides/media/dvb_ca_en50221.h @@ -0,0 +1,142 @@ +/* + * dvb_ca.h: generic DVB functions for EN50221 CA interfaces + * + * Copyright (C) 2004 Andrew de Quincey + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either version 2.1 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DVB_CA_EN50221_H_ +#define _DVB_CA_EN50221_H_ + +#include +#include + +#include + +#define DVB_CA_EN50221_POLL_CAM_PRESENT 1 +#define DVB_CA_EN50221_POLL_CAM_CHANGED 2 +#define DVB_CA_EN50221_POLL_CAM_READY 4 + +#define DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE 1 +#define DVB_CA_EN50221_FLAG_IRQ_FR 2 +#define DVB_CA_EN50221_FLAG_IRQ_DA 4 + +#define DVB_CA_EN50221_CAMCHANGE_REMOVED 0 +#define DVB_CA_EN50221_CAMCHANGE_INSERTED 1 + +/** + * struct dvb_ca_en50221- Structure describing a CA interface + * + * @owner: the module owning this structure + * @read_attribute_mem: function for reading attribute memory on the CAM + * @write_attribute_mem: function for writing attribute memory on the CAM + * @read_cam_control: function for reading the control interface on the CAM + * @write_cam_control: function for reading the control interface on the CAM + * @read_data: function for reading data (block mode) + * @write_data: function for writing data (block mode) + * @slot_reset: function to reset the CAM slot + * @slot_shutdown: function to shutdown a CAM slot + * @slot_ts_enable: function to enable the Transport Stream on a CAM slot + * @poll_slot_status: function to poll slot status. Only necessary if + * DVB_CA_FLAG_EN50221_IRQ_CAMCHANGE is not set. + * @data: private data, used by caller. + * @private: Opaque data used by the dvb_ca core. Do not modify! + * + * NOTE: the read_*, write_* and poll_slot_status functions will be + * called for different slots concurrently and need to use locks where + * and if appropriate. There will be no concurrent access to one slot. + */ +struct dvb_ca_en50221 { + struct module *owner; + + int (*read_attribute_mem)(struct dvb_ca_en50221 *ca, + int slot, int address); + int (*write_attribute_mem)(struct dvb_ca_en50221 *ca, + int slot, int address, u8 value); + + int (*read_cam_control)(struct dvb_ca_en50221 *ca, + int slot, u8 address); + int (*write_cam_control)(struct dvb_ca_en50221 *ca, + int slot, u8 address, u8 value); + + int (*read_data)(struct dvb_ca_en50221 *ca, + int slot, u8 *ebuf, int ecount); + int (*write_data)(struct dvb_ca_en50221 *ca, + int slot, u8 *ebuf, int ecount); + + int (*slot_reset)(struct dvb_ca_en50221 *ca, int slot); + int (*slot_shutdown)(struct dvb_ca_en50221 *ca, int slot); + int (*slot_ts_enable)(struct dvb_ca_en50221 *ca, int slot); + + int (*poll_slot_status)(struct dvb_ca_en50221 *ca, int slot, int open); + + void *data; + + void *private; +}; + +/* + * Functions for reporting IRQ events + */ + +/** + * dvb_ca_en50221_camchange_irq - A CAMCHANGE IRQ has occurred. + * + * @pubca: CA instance. + * @slot: Slot concerned. + * @change_type: One of the DVB_CA_CAMCHANGE_* values + */ +void dvb_ca_en50221_camchange_irq(struct dvb_ca_en50221 *pubca, int slot, + int change_type); + +/** + * dvb_ca_en50221_camready_irq - A CAMREADY IRQ has occurred. + * + * @pubca: CA instance. + * @slot: Slot concerned. + */ +void dvb_ca_en50221_camready_irq(struct dvb_ca_en50221 *pubca, int slot); + +/** + * dvb_ca_en50221_frda_irq - An FR or a DA IRQ has occurred. + * + * @ca: CA instance. + * @slot: Slot concerned. + */ +void dvb_ca_en50221_frda_irq(struct dvb_ca_en50221 *ca, int slot); + +/* + * Initialisation/shutdown functions + */ + +/** + * dvb_ca_en50221_init - Initialise a new DVB CA device. + * + * @dvb_adapter: DVB adapter to attach the new CA device to. + * @ca: The dvb_ca instance. + * @flags: Flags describing the CA device (DVB_CA_EN50221_FLAG_*). + * @slot_count: Number of slots supported. + * + * @return 0 on success, nonzero on failure + */ +int dvb_ca_en50221_init(struct dvb_adapter *dvb_adapter, + struct dvb_ca_en50221 *ca, int flags, + int slot_count); + +/** + * dvb_ca_en50221_release - Release a DVB CA device. + * + * @ca: The associated dvb_ca instance. + */ +void dvb_ca_en50221_release(struct dvb_ca_en50221 *ca); + +#endif diff --git a/6.18.0/include-overrides/media/dvb_demux.h b/6.18.0/include-overrides/media/dvb_demux.h new file mode 100644 index 0000000..3b6aeca --- /dev/null +++ b/6.18.0/include-overrides/media/dvb_demux.h @@ -0,0 +1,354 @@ +/* + * dvb_demux.h: DVB kernel demux API + * + * Copyright (C) 2000-2001 Marcus Metzler & Ralph Metzler + * for convergence integrated media GmbH + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either version 2.1 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _DVB_DEMUX_H_ +#define _DVB_DEMUX_H_ + +#include +#include +#include +#include + +#include + +/** + * enum dvb_dmx_filter_type - type of demux feed. + * + * @DMX_TYPE_TS: feed is in TS mode. + * @DMX_TYPE_SEC: feed is in Section mode. + */ +enum dvb_dmx_filter_type { + DMX_TYPE_TS, + DMX_TYPE_SEC, +}; + +/** + * enum dvb_dmx_state - state machine for a demux filter. + * + * @DMX_STATE_FREE: indicates that the filter is freed. + * @DMX_STATE_ALLOCATED: indicates that the filter was allocated + * to be used. + * @DMX_STATE_READY: indicates that the filter is ready + * to be used. + * @DMX_STATE_GO: indicates that the filter is running. + */ +enum dvb_dmx_state { + DMX_STATE_FREE, + DMX_STATE_ALLOCATED, + DMX_STATE_READY, + DMX_STATE_GO, +}; + +#define DVB_DEMUX_MASK_MAX 18 + +#define MAX_PID 0x1fff + +#define SPEED_PKTS_INTERVAL 50000 + +/** + * struct dvb_demux_filter - Describes a DVB demux section filter. + * + * @filter: Section filter as defined by &struct dmx_section_filter. + * @maskandmode: logical ``and`` bit mask. + * @maskandnotmode: logical ``and not`` bit mask. + * @doneq: flag that indicates when a filter is ready. + * @next: pointer to the next section filter. + * @feed: &struct dvb_demux_feed pointer. + * @index: index of the used demux filter. + * @state: state of the filter as described by &enum dvb_dmx_state. + * @type: type of the filter as described + * by &enum dvb_dmx_filter_type. + */ + +struct dvb_demux_filter { + struct dmx_section_filter filter; + u8 maskandmode[DMX_MAX_FILTER_SIZE]; + u8 maskandnotmode[DMX_MAX_FILTER_SIZE]; + bool doneq; + + struct dvb_demux_filter *next; + struct dvb_demux_feed *feed; + int index; + enum dvb_dmx_state state; + enum dvb_dmx_filter_type type; + + /* private: used only by av7110 */ + u16 hw_handle; +}; + +/** + * struct dvb_demux_feed - describes a DVB field + * + * @feed: a union describing a digital TV feed. + * Depending on the feed type, it can be either + * @feed.ts or @feed.sec. + * @feed.ts: a &struct dmx_ts_feed pointer. + * For TS feed only. + * @feed.sec: a &struct dmx_section_feed pointer. + * For section feed only. + * @cb: a union describing digital TV callbacks. + * Depending on the feed type, it can be either + * @cb.ts or @cb.sec. + * @cb.ts: a dmx_ts_cb() calback function pointer. + * For TS feed only. + * @cb.sec: a dmx_section_cb() callback function pointer. + * For section feed only. + * @demux: pointer to &struct dvb_demux. + * @priv: private data that can optionally be used by a DVB driver. + * @type: type of the filter, as defined by &enum dvb_dmx_filter_type. + * @state: state of the filter as defined by &enum dvb_dmx_state. + * @pid: PID to be filtered. + * @timeout: feed timeout. + * @filter: pointer to &struct dvb_demux_filter. + * @buffer_flags: Buffer flags used to report discontinuity users via DVB + * memory mapped API, as defined by &enum dmx_buffer_flags. + * @ts_type: type of TS, as defined by &enum ts_filter_type. + * @pes_type: type of PES, as defined by &enum dmx_ts_pes. + * @cc: MPEG-TS packet continuity counter + * @pusi_seen: if true, indicates that a discontinuity was detected. + * it is used to prevent feeding of garbage from previous section. + * @peslen: length of the PES (Packet Elementary Stream). + * @list_head: head for the list of digital TV demux feeds. + * @index: a unique index for each feed. Can be used as hardware + * pid filter index. + */ +struct dvb_demux_feed { + union { + struct dmx_ts_feed ts; + struct dmx_section_feed sec; + } feed; + + union { + dmx_ts_cb ts; + dmx_section_cb sec; + } cb; + + struct dvb_demux *demux; + void *priv; + enum dvb_dmx_filter_type type; + enum dvb_dmx_state state; + u16 pid; + + ktime_t timeout; + struct dvb_demux_filter *filter; + + u32 buffer_flags; + + enum ts_filter_type ts_type; + enum dmx_ts_pes pes_type; + + int cc; + bool pusi_seen; + + u16 peslen; + + struct list_head list_head; + unsigned int index; +}; + +/** + * struct dvb_demux - represents a digital TV demux + * @dmx: embedded &struct dmx_demux with demux capabilities + * and callbacks. + * @priv: private data that can optionally be used by + * a DVB driver. + * @filternum: maximum amount of DVB filters. + * @feednum: maximum amount of DVB feeds. + * @start_feed: callback routine to be called in order to start + * a DVB feed. + * @stop_feed: callback routine to be called in order to stop + * a DVB feed. + * @write_to_decoder: callback routine to be called if the feed is TS and + * it is routed to an A/V decoder, when a new TS packet + * is received. + * Used only on av7110-av.c. + * @check_crc32: callback routine to check CRC. If not initialized, + * dvb_demux will use an internal one. + * @memcopy: callback routine to memcopy received data. + * If not initialized, dvb_demux will default to memcpy(). + * @users: counter for the number of demux opened file descriptors. + * Currently, it is limited to 10 users. + * @filter: pointer to &struct dvb_demux_filter. + * @feed: pointer to &struct dvb_demux_feed. + * @frontend_list: &struct list_head with frontends used by the demux. + * @pesfilter: array of &struct dvb_demux_feed with the PES types + * that will be filtered. + * @pids: list of filtered program IDs. + * @feed_list: &struct list_head with feeds. + * @tsbuf: temporary buffer used internally to store TS packets. + * @tsbufp: temporary buffer index used internally. + * @mutex: pointer to &struct mutex used to protect feed set + * logic. + * @lock: pointer to &spinlock_t, used to protect buffer handling. + * @cnt_storage: buffer used for TS/TEI continuity check. + * @speed_last_time: &ktime_t used for TS speed check. + * @speed_pkts_cnt: packets count used for TS speed check. + */ +struct dvb_demux { + struct dmx_demux dmx; + void *priv; + int filternum; + int feednum; + int (*start_feed)(struct dvb_demux_feed *feed); + int (*stop_feed)(struct dvb_demux_feed *feed); + int (*write_to_decoder)(struct dvb_demux_feed *feed, + const u8 *buf, size_t len); + u32 (*check_crc32)(struct dvb_demux_feed *feed, + const u8 *buf, size_t len); + void (*memcopy)(struct dvb_demux_feed *feed, u8 *dst, + const u8 *src, size_t len); + + int users; +#define MAX_DVB_DEMUX_USERS 10 + struct dvb_demux_filter *filter; + struct dvb_demux_feed *feed; + + struct list_head frontend_list; + + struct dvb_demux_feed *pesfilter[DMX_PES_OTHER]; + u16 pids[DMX_PES_OTHER]; + +#define DMX_MAX_PID 0x2000 + struct list_head feed_list; + u8 tsbuf[204]; + int tsbufp; + + struct mutex mutex; + spinlock_t lock; + + uint8_t *cnt_storage; /* for TS continuity check */ + + ktime_t speed_last_time; /* for TS speed check */ + uint32_t speed_pkts_cnt; /* for TS speed check */ + + /* private: used only on av7110 */ + int playing; + int recording; +}; + +/** + * dvb_dmx_init - initialize a digital TV demux struct. + * + * @demux: &struct dvb_demux to be initialized. + * + * Before being able to register a digital TV demux struct, drivers + * should call this routine. On its typical usage, some fields should + * be initialized at the driver before calling it. + * + * A typical usecase is:: + * + * dvb->demux.dmx.capabilities = + * DMX_TS_FILTERING | DMX_SECTION_FILTERING | + * DMX_MEMORY_BASED_FILTERING; + * dvb->demux.priv = dvb; + * dvb->demux.filternum = 256; + * dvb->demux.feednum = 256; + * dvb->demux.start_feed = driver_start_feed; + * dvb->demux.stop_feed = driver_stop_feed; + * ret = dvb_dmx_init(&dvb->demux); + * if (ret < 0) + * return ret; + */ +int dvb_dmx_init(struct dvb_demux *demux); + +/** + * dvb_dmx_release - releases a digital TV demux internal buffers. + * + * @demux: &struct dvb_demux to be released. + * + * The DVB core internally allocates data at @demux. This routine + * releases those data. Please notice that the struct itelf is not + * released, as it can be embedded on other structs. + */ +void dvb_dmx_release(struct dvb_demux *demux); + +/** + * dvb_dmx_swfilter_packets - use dvb software filter for a buffer with + * multiple MPEG-TS packets with 188 bytes each. + * + * @demux: pointer to &struct dvb_demux + * @buf: buffer with data to be filtered + * @count: number of MPEG-TS packets with size of 188. + * + * The routine will discard a DVB packet that don't start with 0x47. + * + * Use this routine if the DVB demux fills MPEG-TS buffers that are + * already aligned. + * + * NOTE: The @buf size should have size equal to ``count * 188``. + */ +void dvb_dmx_swfilter_packets(struct dvb_demux *demux, const u8 *buf, + size_t count); + +/** + * dvb_dmx_swfilter - use dvb software filter for a buffer with + * multiple MPEG-TS packets with 188 bytes each. + * + * @demux: pointer to &struct dvb_demux + * @buf: buffer with data to be filtered + * @count: number of MPEG-TS packets with size of 188. + * + * If a DVB packet doesn't start with 0x47, it will seek for the first + * byte that starts with 0x47. + * + * Use this routine if the DVB demux fill buffers that may not start with + * a packet start mark (0x47). + * + * NOTE: The @buf size should have size equal to ``count * 188``. + */ +void dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf, size_t count); + +/** + * dvb_dmx_swfilter_204 - use dvb software filter for a buffer with + * multiple MPEG-TS packets with 204 bytes each. + * + * @demux: pointer to &struct dvb_demux + * @buf: buffer with data to be filtered + * @count: number of MPEG-TS packets with size of 204. + * + * If a DVB packet doesn't start with 0x47, it will seek for the first + * byte that starts with 0x47. + * + * Use this routine if the DVB demux fill buffers that may not start with + * a packet start mark (0x47). + * + * NOTE: The @buf size should have size equal to ``count * 204``. + */ +void dvb_dmx_swfilter_204(struct dvb_demux *demux, const u8 *buf, + size_t count); + +/** + * dvb_dmx_swfilter_raw - make the raw data available to userspace without + * filtering + * + * @demux: pointer to &struct dvb_demux + * @buf: buffer with data + * @count: number of packets to be passed. The actual size of each packet + * depends on the &dvb_demux->feed->cb.ts logic. + * + * Use it if the driver needs to deliver the raw payload to userspace without + * passing through the kernel demux. That is meant to support some + * delivery systems that aren't based on MPEG-TS. + * + * This function relies on &dvb_demux->feed->cb.ts to actually handle the + * buffer. + */ +void dvb_dmx_swfilter_raw(struct dvb_demux *demux, const u8 *buf, + size_t count); + +#endif /* _DVB_DEMUX_H_ */ diff --git a/6.18.0/include-overrides/media/dvb_frontend.h b/6.18.0/include-overrides/media/dvb_frontend.h new file mode 100644 index 0000000..e7c4487 --- /dev/null +++ b/6.18.0/include-overrides/media/dvb_frontend.h @@ -0,0 +1,834 @@ +/* + * dvb_frontend.h + * + * The Digital TV Frontend kABI defines a driver-internal interface for + * registering low-level, hardware specific driver to a hardware independent + * frontend layer. + * + * Copyright (C) 2001 convergence integrated media GmbH + * Copyright (C) 2004 convergence GmbH + * + * Written by Ralph Metzler + * Overhauled by Holger Waechtler + * Kernel I2C stuff by Michael Hunold + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either version 2.1 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + + * You should have received a copy of the GNU Lesser General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +#ifndef _DVB_FRONTEND_H_ +#define _DVB_FRONTEND_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +/* + * Maximum number of Delivery systems per frontend. It + * should be smaller or equal to 32 + */ +#define MAX_DELSYS 8 + +/* Helper definitions to be used at frontend drivers */ +#define kHz 1000UL +#define MHz 1000000UL + +/** + * struct dvb_frontend_tune_settings - parameters to adjust frontend tuning + * + * @min_delay_ms: minimum delay for tuning, in ms + * @step_size: step size between two consecutive frequencies + * @max_drift: maximum drift + * + * NOTE: step_size is in Hz, for terrestrial/cable or kHz for satellite + */ +struct dvb_frontend_tune_settings { + int min_delay_ms; + int step_size; + int max_drift; +}; + +struct dvb_frontend; + +/** + * struct dvb_tuner_info - Frontend name and min/max ranges/bandwidths + * + * @name: name of the Frontend + * @frequency_min_hz: minimal frequency supported in Hz + * @frequency_max_hz: maximum frequency supported in Hz + * @frequency_step_hz: frequency step in Hz + * @bandwidth_min: minimal frontend bandwidth supported + * @bandwidth_max: maximum frontend bandwidth supported + * @bandwidth_step: frontend bandwidth step + */ +struct dvb_tuner_info { + char name[128]; + + u32 frequency_min_hz; + u32 frequency_max_hz; + u32 frequency_step_hz; + + u32 bandwidth_min; + u32 bandwidth_max; + u32 bandwidth_step; +}; + +/** + * struct analog_parameters - Parameters to tune into an analog/radio channel + * + * @frequency: Frequency used by analog TV tuner (either in 62.5 kHz step, + * for TV, or 62.5 Hz for radio) + * @mode: Tuner mode, as defined on enum v4l2_tuner_type + * @audmode: Audio mode as defined for the rxsubchans field at videodev2.h, + * e. g. V4L2_TUNER_MODE_* + * @std: TV standard bitmap as defined at videodev2.h, e. g. V4L2_STD_* + * + * Hybrid tuners should be supported by both V4L2 and DVB APIs. This + * struct contains the data that are used by the V4L2 side. To avoid + * dependencies from V4L2 headers, all enums here are declared as integers. + */ +struct analog_parameters { + unsigned int frequency; + unsigned int mode; + unsigned int audmode; + u64 std; +}; + +/** + * enum dvbfe_algo - defines the algorithm used to tune into a channel + * + * @DVBFE_ALGO_HW: Hardware Algorithm - + * Devices that support this algorithm do everything in hardware + * and no software support is needed to handle them. + * Requesting these devices to LOCK is the only thing required, + * device is supposed to do everything in the hardware. + * + * @DVBFE_ALGO_SW: Software Algorithm - + * These are dumb devices, that require software to do everything + * + * @DVBFE_ALGO_CUSTOM: Customizable Agorithm - + * Devices having this algorithm can be customized to have specific + * algorithms in the frontend driver, rather than simply doing a + * software zig-zag. In this case the zigzag maybe hardware assisted + * or it maybe completely done in hardware. In all cases, usage of + * this algorithm, in conjunction with the search and track + * callbacks, utilizes the driver specific algorithm. + * + * @DVBFE_ALGO_RECOVERY: Recovery Algorithm - + * These devices have AUTO recovery capabilities from LOCK failure + */ +enum dvbfe_algo { + DVBFE_ALGO_HW = BIT(0), + DVBFE_ALGO_SW = BIT(1), + DVBFE_ALGO_CUSTOM = BIT(2), + DVBFE_ALGO_RECOVERY = BIT(31), +}; + +/** + * enum dvbfe_search - search callback possible return status + * + * @DVBFE_ALGO_SEARCH_SUCCESS: + * The frontend search algorithm completed and returned successfully + * + * @DVBFE_ALGO_SEARCH_ASLEEP: + * The frontend search algorithm is sleeping + * + * @DVBFE_ALGO_SEARCH_FAILED: + * The frontend search for a signal failed + * + * @DVBFE_ALGO_SEARCH_INVALID: + * The frontend search algorithm was probably supplied with invalid + * parameters and the search is an invalid one + * + * @DVBFE_ALGO_SEARCH_ERROR: + * The frontend search algorithm failed due to some error + * + * @DVBFE_ALGO_SEARCH_AGAIN: + * The frontend search algorithm was requested to search again + */ +enum dvbfe_search { + DVBFE_ALGO_SEARCH_SUCCESS = BIT(0), + DVBFE_ALGO_SEARCH_ASLEEP = BIT(1), + DVBFE_ALGO_SEARCH_FAILED = BIT(2), + DVBFE_ALGO_SEARCH_INVALID = BIT(3), + DVBFE_ALGO_SEARCH_AGAIN = BIT(4), + DVBFE_ALGO_SEARCH_ERROR = BIT(31), +}; + +/** + * struct dvb_tuner_ops - Tuner information and callbacks + * + * @info: embedded &struct dvb_tuner_info with tuner properties + * @release: callback function called when frontend is detached. + * drivers should free any allocated memory. + * @init: callback function used to initialize the tuner device. + * @sleep: callback function used to put the tuner to sleep. + * @suspend: callback function used to inform that the Kernel will + * suspend. + * @resume: callback function used to inform that the Kernel is + * resuming from suspend. + * @set_params: callback function used to inform the tuner to tune + * into a digital TV channel. The properties to be used + * are stored at &struct dvb_frontend.dtv_property_cache. + * The tuner demod can change the parameters to reflect + * the changes needed for the channel to be tuned, and + * update statistics. This is the recommended way to set + * the tuner parameters and should be used on newer + * drivers. + * @set_analog_params: callback function used to tune into an analog TV + * channel on hybrid tuners. It passes @analog_parameters + * to the driver. + * @set_config: callback function used to send some tuner-specific + * parameters. + * @get_frequency: get the actual tuned frequency + * @get_bandwidth: get the bandwidth used by the low pass filters + * @get_if_frequency: get the Intermediate Frequency, in Hz. For baseband, + * should return 0. + * @get_status: returns the frontend lock status + * @get_rf_strength: returns the RF signal strength. Used mostly to support + * analog TV and radio. Digital TV should report, instead, + * via DVBv5 API (&struct dvb_frontend.dtv_property_cache). + * @get_afc: Used only by analog TV core. Reports the frequency + * drift due to AFC. + * @calc_regs: callback function used to pass register data settings + * for simple tuners. Shouldn't be used on newer drivers. + * @set_frequency: Set a new frequency. Shouldn't be used on newer drivers. + * @set_bandwidth: Set a new frequency. Shouldn't be used on newer drivers. + * + * NOTE: frequencies used on @get_frequency and @set_frequency are in Hz for + * terrestrial/cable or kHz for satellite. + * + */ +struct dvb_tuner_ops { + + struct dvb_tuner_info info; + + void (*release)(struct dvb_frontend *fe); + int (*init)(struct dvb_frontend *fe); + int (*sleep)(struct dvb_frontend *fe); + int (*suspend)(struct dvb_frontend *fe); + int (*resume)(struct dvb_frontend *fe); + + /* This is the recommended way to set the tuner */ + int (*set_params)(struct dvb_frontend *fe); + int (*set_analog_params)(struct dvb_frontend *fe, struct analog_parameters *p); + + int (*set_config)(struct dvb_frontend *fe, void *priv_cfg); + + int (*get_frequency)(struct dvb_frontend *fe, u32 *frequency); + int (*get_bandwidth)(struct dvb_frontend *fe, u32 *bandwidth); + int (*get_if_frequency)(struct dvb_frontend *fe, u32 *frequency); + +#define TUNER_STATUS_LOCKED 1 +#define TUNER_STATUS_STEREO 2 + int (*get_status)(struct dvb_frontend *fe, u32 *status); + int (*get_rf_strength)(struct dvb_frontend *fe, u16 *strength); + int (*get_afc)(struct dvb_frontend *fe, s32 *afc); + + /* + * This is support for demods like the mt352 - fills out the supplied + * buffer with what to write. + * + * Don't use on newer drivers. + */ + int (*calc_regs)(struct dvb_frontend *fe, u8 *buf, int buf_len); + + /* + * These are provided separately from set_params in order to + * facilitate silicon tuners which require sophisticated tuning loops, + * controlling each parameter separately. + * + * Don't use on newer drivers. + */ + int (*set_frequency)(struct dvb_frontend *fe, u32 frequency); + int (*set_bandwidth)(struct dvb_frontend *fe, u32 bandwidth); +}; + +/** + * struct analog_demod_info - Information struct for analog TV part of the demod + * + * @name: Name of the analog TV demodulator + */ +struct analog_demod_info { + char *name; +}; + +/** + * struct analog_demod_ops - Demodulation information and callbacks for + * analog TV and radio + * + * @info: pointer to struct analog_demod_info + * @set_params: callback function used to inform the demod to set the + * demodulator parameters needed to decode an analog or + * radio channel. The properties are passed via + * &struct analog_params. + * @has_signal: returns 0xffff if has signal, or 0 if it doesn't. + * @get_afc: Used only by analog TV core. Reports the frequency + * drift due to AFC. + * @tuner_status: callback function that returns tuner status bits, e. g. + * %TUNER_STATUS_LOCKED and %TUNER_STATUS_STEREO. + * @standby: set the tuner to standby mode. + * @release: callback function called when frontend is detached. + * drivers should free any allocated memory. + * @i2c_gate_ctrl: controls the I2C gate. Newer drivers should use I2C + * mux support instead. + * @set_config: callback function used to send some tuner-specific + * parameters. + */ +struct analog_demod_ops { + + struct analog_demod_info info; + + void (*set_params)(struct dvb_frontend *fe, + struct analog_parameters *params); + int (*has_signal)(struct dvb_frontend *fe, u16 *signal); + int (*get_afc)(struct dvb_frontend *fe, s32 *afc); + void (*tuner_status)(struct dvb_frontend *fe); + void (*standby)(struct dvb_frontend *fe); + void (*release)(struct dvb_frontend *fe); + int (*i2c_gate_ctrl)(struct dvb_frontend *fe, int enable); + + /** This is to allow setting tuner-specific configuration */ + int (*set_config)(struct dvb_frontend *fe, void *priv_cfg); +}; + +struct dtv_frontend_properties; + +/** + * struct dvb_frontend_internal_info - Frontend properties and capabilities + * + * @name: Name of the frontend + * @frequency_min_hz: Minimal frequency supported by the frontend. + * @frequency_max_hz: Minimal frequency supported by the frontend. + * @frequency_stepsize_hz: All frequencies are multiple of this value. + * @frequency_tolerance_hz: Frequency tolerance. + * @symbol_rate_min: Minimal symbol rate, in bauds + * (for Cable/Satellite systems). + * @symbol_rate_max: Maximal symbol rate, in bauds + * (for Cable/Satellite systems). + * @symbol_rate_tolerance: Maximal symbol rate tolerance, in ppm + * (for Cable/Satellite systems). + * @caps: Capabilities supported by the frontend, + * as specified in &enum fe_caps. + */ +struct dvb_frontend_internal_info { + char name[128]; + u32 frequency_min_hz; + u32 frequency_max_hz; + u32 frequency_stepsize_hz; + u32 frequency_tolerance_hz; + u32 symbol_rate_min; + u32 symbol_rate_max; + u32 symbol_rate_tolerance; + enum fe_caps caps; +}; + +/** + * struct dvb_frontend_ops - Demodulation information and callbacks for + * ditialt TV + * + * @info: embedded &struct dvb_tuner_info with tuner properties + * @delsys: Delivery systems supported by the frontend + * @detach: callback function called when frontend is detached. + * drivers should clean up, but not yet free the &struct + * dvb_frontend allocation. + * @release: callback function called when frontend is ready to be + * freed. + * drivers should free any allocated memory. + * @release_sec: callback function requesting that the Satellite Equipment + * Control (SEC) driver to release and free any memory + * allocated by the driver. + * @init: callback function used to initialize the tuner device. + * @sleep: callback function used to put the tuner to sleep. + * @suspend: callback function used to inform that the Kernel will + * suspend. + * @resume: callback function used to inform that the Kernel is + * resuming from suspend. + * @write: callback function used by some demod legacy drivers to + * allow other drivers to write data into their registers. + * Should not be used on new drivers. + * @tune: callback function used by demod drivers that use + * @DVBFE_ALGO_HW to tune into a frequency. + * @get_frontend_algo: returns the desired hardware algorithm. + * @set_frontend: callback function used to inform the demod to set the + * parameters for demodulating a digital TV channel. + * The properties to be used are stored at &struct + * dvb_frontend.dtv_property_cache. The demod can change + * the parameters to reflect the changes needed for the + * channel to be decoded, and update statistics. + * @get_tune_settings: callback function + * @get_frontend: callback function used to inform the parameters + * actuall in use. The properties to be used are stored at + * &struct dvb_frontend.dtv_property_cache and update + * statistics. Please notice that it should not return + * an error code if the statistics are not available + * because the demog is not locked. + * @read_status: returns the locking status of the frontend. + * @read_ber: legacy callback function to return the bit error rate. + * Newer drivers should provide such info via DVBv5 API, + * e. g. @set_frontend;/@get_frontend, implementing this + * callback only if DVBv3 API compatibility is wanted. + * @read_signal_strength: legacy callback function to return the signal + * strength. Newer drivers should provide such info via + * DVBv5 API, e. g. @set_frontend/@get_frontend, + * implementing this callback only if DVBv3 API + * compatibility is wanted. + * @read_snr: legacy callback function to return the Signal/Noise + * rate. Newer drivers should provide such info via + * DVBv5 API, e. g. @set_frontend/@get_frontend, + * implementing this callback only if DVBv3 API + * compatibility is wanted. + * @read_ucblocks: legacy callback function to return the Uncorrected Error + * Blocks. Newer drivers should provide such info via + * DVBv5 API, e. g. @set_frontend/@get_frontend, + * implementing this callback only if DVBv3 API + * compatibility is wanted. + * @diseqc_reset_overload: callback function to implement the + * FE_DISEQC_RESET_OVERLOAD() ioctl (only Satellite) + * @diseqc_send_master_cmd: callback function to implement the + * FE_DISEQC_SEND_MASTER_CMD() ioctl (only Satellite). + * @diseqc_recv_slave_reply: callback function to implement the + * FE_DISEQC_RECV_SLAVE_REPLY() ioctl (only Satellite) + * @diseqc_send_burst: callback function to implement the + * FE_DISEQC_SEND_BURST() ioctl (only Satellite). + * @set_tone: callback function to implement the + * FE_SET_TONE() ioctl (only Satellite). + * @set_voltage: callback function to implement the + * FE_SET_VOLTAGE() ioctl (only Satellite). + * @enable_high_lnb_voltage: callback function to implement the + * FE_ENABLE_HIGH_LNB_VOLTAGE() ioctl (only Satellite). + * @dishnetwork_send_legacy_command: callback function to implement the + * FE_DISHNETWORK_SEND_LEGACY_CMD() ioctl (only Satellite). + * Drivers should not use this, except when the DVB + * core emulation fails to provide proper support (e.g. + * if @set_voltage takes more than 8ms to work), and + * when backward compatibility with this legacy API is + * required. + * @i2c_gate_ctrl: controls the I2C gate. Newer drivers should use I2C + * mux support instead. + * @ts_bus_ctrl: callback function used to take control of the TS bus. + * @set_lna: callback function to power on/off/auto the LNA. + * @search: callback function used on some custom algo search algos. + * @tuner_ops: pointer to &struct dvb_tuner_ops + * @analog_ops: pointer to &struct analog_demod_ops + */ +struct dvb_frontend_ops { + struct dvb_frontend_internal_info info; + + u8 delsys[MAX_DELSYS]; + + void (*detach)(struct dvb_frontend *fe); + void (*release)(struct dvb_frontend* fe); + void (*release_sec)(struct dvb_frontend* fe); + + int (*init)(struct dvb_frontend* fe); + int (*sleep)(struct dvb_frontend* fe); + int (*suspend)(struct dvb_frontend *fe); + int (*resume)(struct dvb_frontend *fe); + + int (*write)(struct dvb_frontend* fe, const u8 buf[], int len); + + /* if this is set, it overrides the default swzigzag */ + int (*tune)(struct dvb_frontend* fe, + bool re_tune, + unsigned int mode_flags, + unsigned int *delay, + enum fe_status *status); + + /* get frontend tuning algorithm from the module */ + enum dvbfe_algo (*get_frontend_algo)(struct dvb_frontend *fe); + + /* these two are only used for the swzigzag code */ + int (*set_frontend)(struct dvb_frontend *fe); + int (*get_tune_settings)(struct dvb_frontend* fe, struct dvb_frontend_tune_settings* settings); + + int (*get_frontend)(struct dvb_frontend *fe, + struct dtv_frontend_properties *props); + + int (*read_status)(struct dvb_frontend *fe, enum fe_status *status); + int (*read_ber)(struct dvb_frontend* fe, u32* ber); + int (*read_signal_strength)(struct dvb_frontend* fe, u16* strength); + int (*read_snr)(struct dvb_frontend* fe, u16* snr); + int (*read_ucblocks)(struct dvb_frontend* fe, u32* ucblocks); + + int (*diseqc_reset_overload)(struct dvb_frontend* fe); + int (*diseqc_send_master_cmd)(struct dvb_frontend* fe, struct dvb_diseqc_master_cmd* cmd); + int (*diseqc_recv_slave_reply)(struct dvb_frontend* fe, struct dvb_diseqc_slave_reply* reply); + int (*diseqc_send_burst)(struct dvb_frontend *fe, + enum fe_sec_mini_cmd minicmd); + int (*set_tone)(struct dvb_frontend *fe, enum fe_sec_tone_mode tone); + int (*set_voltage)(struct dvb_frontend *fe, + enum fe_sec_voltage voltage); + int (*enable_high_lnb_voltage)(struct dvb_frontend* fe, long arg); + int (*dishnetwork_send_legacy_command)(struct dvb_frontend* fe, unsigned long cmd); + int (*i2c_gate_ctrl)(struct dvb_frontend* fe, int enable); + int (*ts_bus_ctrl)(struct dvb_frontend* fe, int acquire); + int (*set_lna)(struct dvb_frontend *); + + /* + * These callbacks are for devices that implement their own + * tuning algorithms, rather than a simple swzigzag + */ + enum dvbfe_search (*search)(struct dvb_frontend *fe); + + struct dvb_tuner_ops tuner_ops; + struct analog_demod_ops analog_ops; +}; + +#ifdef __DVB_CORE__ +#define MAX_EVENT 8 + +/* Used only internally at dvb_frontend.c */ +struct dvb_fe_events { + struct dvb_frontend_event events[MAX_EVENT]; + int eventw; + int eventr; + int overflow; + wait_queue_head_t wait_queue; + struct mutex mtx; +}; +#endif + +/** + * struct dtv_frontend_properties - contains a list of properties that are + * specific to a digital TV standard. + * + * @frequency: frequency in Hz for terrestrial/cable or in kHz for + * Satellite + * @modulation: Frontend modulation type + * @voltage: SEC voltage (only Satellite) + * @sectone: SEC tone mode (only Satellite) + * @inversion: Spectral inversion + * @fec_inner: Forward error correction inner Code Rate + * @transmission_mode: Transmission Mode + * @bandwidth_hz: Bandwidth, in Hz. A zero value means that userspace + * wants to autodetect. + * @guard_interval: Guard Interval + * @hierarchy: Hierarchy + * @symbol_rate: Symbol Rate + * @code_rate_HP: high priority stream code rate + * @code_rate_LP: low priority stream code rate + * @pilot: Enable/disable/autodetect pilot tones + * @rolloff: Rolloff factor (alpha) + * @delivery_system: FE delivery system (e. g. digital TV standard) + * @interleaving: interleaving + * @isdbt_partial_reception: ISDB-T partial reception (only ISDB standard) + * @isdbt_sb_mode: ISDB-T Sound Broadcast (SB) mode (only ISDB standard) + * @isdbt_sb_subchannel: ISDB-T SB subchannel (only ISDB standard) + * @isdbt_sb_segment_idx: ISDB-T SB segment index (only ISDB standard) + * @isdbt_sb_segment_count: ISDB-T SB segment count (only ISDB standard) + * @isdbt_layer_enabled: ISDB Layer enabled (only ISDB standard) + * @layer: ISDB per-layer data (only ISDB standard) + * @layer.segment_count: Segment Count; + * @layer.fec: per layer code rate; + * @layer.modulation: per layer modulation; + * @layer.interleaving: per layer interleaving. + * @stream_id: If different than zero, enable substream filtering, if + * hardware supports (DVB-S2 and DVB-T2). + * @scrambling_sequence_index: Carries the index of the DVB-S2 physical layer + * scrambling sequence. + * @atscmh_fic_ver: Version number of the FIC (Fast Information Channel) + * signaling data (only ATSC-M/H) + * @atscmh_parade_id: Parade identification number (only ATSC-M/H) + * @atscmh_nog: Number of MH groups per MH subframe for a designated + * parade (only ATSC-M/H) + * @atscmh_tnog: Total number of MH groups including all MH groups + * belonging to all MH parades in one MH subframe + * (only ATSC-M/H) + * @atscmh_sgn: Start group number (only ATSC-M/H) + * @atscmh_prc: Parade repetition cycle (only ATSC-M/H) + * @atscmh_rs_frame_mode: Reed Solomon (RS) frame mode (only ATSC-M/H) + * @atscmh_rs_frame_ensemble: RS frame ensemble (only ATSC-M/H) + * @atscmh_rs_code_mode_pri: RS code mode pri (only ATSC-M/H) + * @atscmh_rs_code_mode_sec: RS code mode sec (only ATSC-M/H) + * @atscmh_sccc_block_mode: Series Concatenated Convolutional Code (SCCC) + * Block Mode (only ATSC-M/H) + * @atscmh_sccc_code_mode_a: SCCC code mode A (only ATSC-M/H) + * @atscmh_sccc_code_mode_b: SCCC code mode B (only ATSC-M/H) + * @atscmh_sccc_code_mode_c: SCCC code mode C (only ATSC-M/H) + * @atscmh_sccc_code_mode_d: SCCC code mode D (only ATSC-M/H) + * @lna: Power ON/OFF/AUTO the Linear Now-noise Amplifier (LNA) + * @strength: DVBv5 API statistics: Signal Strength + * @cnr: DVBv5 API statistics: Signal to Noise ratio of the + * (main) carrier + * @pre_bit_error: DVBv5 API statistics: pre-Viterbi bit error count + * @pre_bit_count: DVBv5 API statistics: pre-Viterbi bit count + * @post_bit_error: DVBv5 API statistics: post-Viterbi bit error count + * @post_bit_count: DVBv5 API statistics: post-Viterbi bit count + * @block_error: DVBv5 API statistics: block error count + * @block_count: DVBv5 API statistics: block count + * + * NOTE: derivated statistics like Uncorrected Error blocks (UCE) are + * calculated on userspace. + * + * Only a subset of the properties are needed for a given delivery system. + * For more info, consult the media_api.html with the documentation of the + * Userspace API. + */ +struct dtv_frontend_properties { + u32 frequency; + enum fe_modulation modulation; + + enum fe_sec_voltage voltage; + enum fe_sec_tone_mode sectone; + enum fe_spectral_inversion inversion; + enum fe_code_rate fec_inner; + enum fe_transmit_mode transmission_mode; + u32 bandwidth_hz; /* 0 = AUTO */ + enum fe_guard_interval guard_interval; + enum fe_hierarchy hierarchy; + u32 symbol_rate; + enum fe_code_rate code_rate_HP; + enum fe_code_rate code_rate_LP; + + enum fe_pilot pilot; + enum fe_rolloff rolloff; + + enum fe_delivery_system delivery_system; + + enum fe_interleaving interleaving; + + /* ISDB-T specifics */ + u8 isdbt_partial_reception; + u8 isdbt_sb_mode; + u8 isdbt_sb_subchannel; + u32 isdbt_sb_segment_idx; + u32 isdbt_sb_segment_count; + u8 isdbt_layer_enabled; + struct { + u8 segment_count; + enum fe_code_rate fec; + enum fe_modulation modulation; + u8 interleaving; + } layer[3]; + + /* Multistream specifics */ + u32 stream_id; + + /* Physical Layer Scrambling specifics */ + u32 scrambling_sequence_index; + + /* ATSC-MH specifics */ + u8 atscmh_fic_ver; + u8 atscmh_parade_id; + u8 atscmh_nog; + u8 atscmh_tnog; + u8 atscmh_sgn; + u8 atscmh_prc; + + u8 atscmh_rs_frame_mode; + u8 atscmh_rs_frame_ensemble; + u8 atscmh_rs_code_mode_pri; + u8 atscmh_rs_code_mode_sec; + u8 atscmh_sccc_block_mode; + u8 atscmh_sccc_code_mode_a; + u8 atscmh_sccc_code_mode_b; + u8 atscmh_sccc_code_mode_c; + u8 atscmh_sccc_code_mode_d; + + u32 lna; + + /* statistics data */ + struct dtv_fe_stats strength; + struct dtv_fe_stats cnr; + struct dtv_fe_stats pre_bit_error; + struct dtv_fe_stats pre_bit_count; + struct dtv_fe_stats post_bit_error; + struct dtv_fe_stats post_bit_count; + struct dtv_fe_stats block_error; + struct dtv_fe_stats block_count; +}; + +#define DVB_FE_NO_EXIT 0 +#define DVB_FE_NORMAL_EXIT 1 +#define DVB_FE_DEVICE_REMOVED 2 +#define DVB_FE_DEVICE_RESUME 3 + +/** + * struct dvb_frontend - Frontend structure to be used on drivers. + * + * @refcount: refcount to keep track of &struct dvb_frontend + * references + * @ops: embedded &struct dvb_frontend_ops + * @dvb: pointer to &struct dvb_adapter + * @demodulator_priv: demod private data + * @tuner_priv: tuner private data + * @frontend_priv: frontend private data + * @sec_priv: SEC private data + * @analog_demod_priv: Analog demod private data + * @dtv_property_cache: embedded &struct dtv_frontend_properties + * @callback: callback function used on some drivers to call + * either the tuner or the demodulator. + * @id: Frontend ID + * @exit: Used to inform the DVB core that the frontend + * thread should exit (usually, means that the hardware + * got disconnected. + */ + +struct dvb_frontend { + struct kref refcount; + struct dvb_frontend_ops ops; + struct dvb_adapter *dvb; + void *demodulator_priv; + void *tuner_priv; + void *frontend_priv; + void *sec_priv; + void *analog_demod_priv; + struct dtv_frontend_properties dtv_property_cache; +#define DVB_FRONTEND_COMPONENT_TUNER 0 +#define DVB_FRONTEND_COMPONENT_DEMOD 1 + int (*callback)(void *adapter_priv, int component, int cmd, int arg); + int id; + unsigned int exit; +}; + +/** + * dvb_register_frontend() - Registers a DVB frontend at the adapter + * + * @dvb: pointer to &struct dvb_adapter + * @fe: pointer to &struct dvb_frontend + * + * Allocate and initialize the private data needed by the frontend core to + * manage the frontend and calls dvb_register_device() to register a new + * frontend. It also cleans the property cache that stores the frontend + * parameters and selects the first available delivery system. + */ +int dvb_register_frontend(struct dvb_adapter *dvb, + struct dvb_frontend *fe); + +/** + * dvb_unregister_frontend() - Unregisters a DVB frontend + * + * @fe: pointer to &struct dvb_frontend + * + * Stops the frontend kthread, calls dvb_unregister_device() and frees the + * private frontend data allocated by dvb_register_frontend(). + * + * NOTE: This function doesn't frees the memory allocated by the demod, + * by the SEC driver and by the tuner. In order to free it, an explicit call to + * dvb_frontend_detach() is needed, after calling this function. + */ +int dvb_unregister_frontend(struct dvb_frontend *fe); + +/** + * dvb_frontend_detach() - Detaches and frees frontend specific data + * + * @fe: pointer to &struct dvb_frontend + * + * This function should be called after dvb_unregister_frontend(). It + * calls the SEC, tuner and demod release functions: + * &dvb_frontend_ops.release_sec, &dvb_frontend_ops.tuner_ops.release, + * &dvb_frontend_ops.analog_ops.release and &dvb_frontend_ops.release. + * + * If the driver is compiled with %CONFIG_MEDIA_ATTACH, it also decreases + * the module reference count, needed to allow userspace to remove the + * previously used DVB frontend modules. + */ +void dvb_frontend_detach(struct dvb_frontend *fe); + +/** + * dvb_frontend_suspend() - Suspends a Digital TV frontend + * + * @fe: pointer to &struct dvb_frontend + * + * This function prepares a Digital TV frontend to suspend. + * + * In order to prepare the tuner to suspend, if + * &dvb_frontend_ops.tuner_ops.suspend\(\) is available, it calls it. Otherwise, + * it will call &dvb_frontend_ops.tuner_ops.sleep\(\), if available. + * + * It will also call &dvb_frontend_ops.suspend\(\) to put the demod to suspend, + * if available. Otherwise it will call &dvb_frontend_ops.sleep\(\). + * + * The drivers should also call dvb_frontend_suspend\(\) as part of their + * handler for the &device_driver.suspend\(\). + */ +int dvb_frontend_suspend(struct dvb_frontend *fe); + +/** + * dvb_frontend_resume() - Resumes a Digital TV frontend + * + * @fe: pointer to &struct dvb_frontend + * + * This function resumes the usual operation of the tuner after resume. + * + * In order to resume the frontend, it calls the demod + * &dvb_frontend_ops.resume\(\) if available. Otherwise it calls demod + * &dvb_frontend_ops.init\(\). + * + * If &dvb_frontend_ops.tuner_ops.resume\(\) is available, It, it calls it. + * Otherwise,t will call &dvb_frontend_ops.tuner_ops.init\(\), if available. + * + * Once tuner and demods are resumed, it will enforce that the SEC voltage and + * tone are restored to their previous values and wake up the frontend's + * kthread in order to retune the frontend. + * + * The drivers should also call dvb_frontend_resume() as part of their + * handler for the &device_driver.resume\(\). + */ +int dvb_frontend_resume(struct dvb_frontend *fe); + +/** + * dvb_frontend_reinitialise() - forces a reinitialisation at the frontend + * + * @fe: pointer to &struct dvb_frontend + * + * Calls &dvb_frontend_ops.init\(\) and &dvb_frontend_ops.tuner_ops.init\(\), + * and resets SEC tone and voltage (for Satellite systems). + * + * NOTE: Currently, this function is used only by one driver (budget-av). + * It seems to be due to address some special issue with that specific + * frontend. + */ +void dvb_frontend_reinitialise(struct dvb_frontend *fe); + +/** + * dvb_frontend_sleep_until() - Sleep for the amount of time given by + * add_usec parameter + * + * @waketime: pointer to &struct ktime_t + * @add_usec: time to sleep, in microseconds + * + * This function is used to measure the time required for the + * FE_DISHNETWORK_SEND_LEGACY_CMD() ioctl to work. It needs to be as precise + * as possible, as it affects the detection of the dish tone command at the + * satellite subsystem. + * + * Its used internally by the DVB frontend core, in order to emulate + * FE_DISHNETWORK_SEND_LEGACY_CMD() using the &dvb_frontend_ops.set_voltage\(\) + * callback. + * + * NOTE: it should not be used at the drivers, as the emulation for the + * legacy callback is provided by the Kernel. The only situation where this + * should be at the drivers is when there are some bugs at the hardware that + * would prevent the core emulation to work. On such cases, the driver would + * be writing a &dvb_frontend_ops.dishnetwork_send_legacy_command\(\) and + * calling this function directly. + */ +void dvb_frontend_sleep_until(ktime_t *waketime, u32 add_usec); + +#endif diff --git a/6.18.0/include-overrides/media/dvb_net.h b/6.18.0/include-overrides/media/dvb_net.h new file mode 100644 index 0000000..4a921ea --- /dev/null +++ b/6.18.0/include-overrides/media/dvb_net.h @@ -0,0 +1,95 @@ +/* + * dvb_net.h + * + * Copyright (C) 2001 Ralph Metzler for convergence integrated media GmbH + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either version 2.1 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _DVB_NET_H_ +#define _DVB_NET_H_ + +#include + +#include + +struct net_device; + +#define DVB_NET_DEVICES_MAX 10 + +#ifdef CONFIG_DVB_NET + +/** + * struct dvb_net - describes a DVB network interface + * + * @dvbdev: pointer to &struct dvb_device. + * @device: array of pointers to &struct net_device. + * @state: array of integers to each net device. A value + * different than zero means that the interface is + * in usage. + * @exit: flag to indicate when the device is being removed. + * @demux: pointer to &struct dmx_demux. + * @ioctl_mutex: protect access to this struct. + * @remove_mutex: mutex that avoids a race condition between a callback + * called when the hardware is disconnected and the + * file_operations of dvb_net. + * + * Currently, the core supports up to %DVB_NET_DEVICES_MAX (10) network + * devices. + */ + +struct dvb_net { + struct dvb_device *dvbdev; + struct net_device *device[DVB_NET_DEVICES_MAX]; + int state[DVB_NET_DEVICES_MAX]; + unsigned int exit:1; + struct dmx_demux *demux; + struct mutex ioctl_mutex; + struct mutex remove_mutex; +}; + +/** + * dvb_net_init - nitializes a digital TV network device and registers it. + * + * @adap: pointer to &struct dvb_adapter. + * @dvbnet: pointer to &struct dvb_net. + * @dmxdemux: pointer to &struct dmx_demux. + */ +int dvb_net_init(struct dvb_adapter *adap, struct dvb_net *dvbnet, + struct dmx_demux *dmxdemux); + +/** + * dvb_net_release - releases a digital TV network device and unregisters it. + * + * @dvbnet: pointer to &struct dvb_net. + */ +void dvb_net_release(struct dvb_net *dvbnet); + +#else + +struct dvb_net { + struct dvb_device *dvbdev; +}; + +static inline void dvb_net_release(struct dvb_net *dvbnet) +{ +} + +static inline int dvb_net_init(struct dvb_adapter *adap, + struct dvb_net *dvbnet, struct dmx_demux *dmx) +{ + return 0; +} + +#endif /* ifdef CONFIG_DVB_NET */ + +#endif diff --git a/6.18.0/include-overrides/media/dvb_ringbuffer.h b/6.18.0/include-overrides/media/dvb_ringbuffer.h new file mode 100644 index 0000000..029c8b6 --- /dev/null +++ b/6.18.0/include-overrides/media/dvb_ringbuffer.h @@ -0,0 +1,280 @@ +/* + * + * dvb_ringbuffer.h: ring buffer implementation for the dvb driver + * + * Copyright (C) 2003 Oliver Endriss + * Copyright (C) 2004 Andrew de Quincey + * + * based on code originally found in av7110.c & dvb_ci.c: + * Copyright (C) 1999-2003 Ralph Metzler & Marcus Metzler + * for convergence integrated media GmbH + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either version 2.1 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + */ + +#ifndef _DVB_RINGBUFFER_H_ +#define _DVB_RINGBUFFER_H_ + +#include +#include + +/** + * struct dvb_ringbuffer - Describes a ring buffer used at DVB framework + * + * @data: Area were the ringbuffer data is written + * @size: size of the ringbuffer + * @pread: next position to read + * @pwrite: next position to write + * @error: used by ringbuffer clients to indicate that an error happened. + * @queue: Wait queue used by ringbuffer clients to indicate when buffer + * was filled + * @lock: Spinlock used to protect the ringbuffer + */ +struct dvb_ringbuffer { + u8 *data; + ssize_t size; + ssize_t pread; + ssize_t pwrite; + int error; + + wait_queue_head_t queue; + spinlock_t lock; +}; + +#define DVB_RINGBUFFER_PKTHDRSIZE 3 + +/** + * dvb_ringbuffer_init - initialize ring buffer, lock and queue + * + * @rbuf: pointer to struct dvb_ringbuffer + * @data: pointer to the buffer where the data will be stored + * @len: bytes from ring buffer into @buf + */ +extern void dvb_ringbuffer_init(struct dvb_ringbuffer *rbuf, void *data, + size_t len); + +/** + * dvb_ringbuffer_empty - test whether buffer is empty + * + * @rbuf: pointer to struct dvb_ringbuffer + */ +extern int dvb_ringbuffer_empty(struct dvb_ringbuffer *rbuf); + +/** + * dvb_ringbuffer_free - returns the number of free bytes in the buffer + * + * @rbuf: pointer to struct dvb_ringbuffer + * + * Return: number of free bytes in the buffer + */ +extern ssize_t dvb_ringbuffer_free(struct dvb_ringbuffer *rbuf); + +/** + * dvb_ringbuffer_avail - returns the number of bytes waiting in the buffer + * + * @rbuf: pointer to struct dvb_ringbuffer + * + * Return: number of bytes waiting in the buffer + */ +extern ssize_t dvb_ringbuffer_avail(struct dvb_ringbuffer *rbuf); + +/** + * dvb_ringbuffer_reset - resets the ringbuffer to initial state + * + * @rbuf: pointer to struct dvb_ringbuffer + * + * Resets the read and write pointers to zero and flush the buffer. + * + * This counts as a read and write operation + */ +extern void dvb_ringbuffer_reset(struct dvb_ringbuffer *rbuf); + +/* + * read routines & macros + */ + +/** + * dvb_ringbuffer_flush - flush buffer + * + * @rbuf: pointer to struct dvb_ringbuffer + */ +extern void dvb_ringbuffer_flush(struct dvb_ringbuffer *rbuf); + +/** + * dvb_ringbuffer_flush_spinlock_wakeup- flush buffer protected by spinlock + * and wake-up waiting task(s) + * + * @rbuf: pointer to struct dvb_ringbuffer + */ +extern void dvb_ringbuffer_flush_spinlock_wakeup(struct dvb_ringbuffer *rbuf); + +/** + * DVB_RINGBUFFER_PEEK - peek at byte @offs in the buffer + * + * @rbuf: pointer to struct dvb_ringbuffer + * @offs: offset inside the ringbuffer + */ +#define DVB_RINGBUFFER_PEEK(rbuf, offs) \ + ((rbuf)->data[((rbuf)->pread + (offs)) % (rbuf)->size]) + +/** + * DVB_RINGBUFFER_SKIP - advance read ptr by @num bytes + * + * @rbuf: pointer to struct dvb_ringbuffer + * @num: number of bytes to advance + */ +#define DVB_RINGBUFFER_SKIP(rbuf, num) {\ + (rbuf)->pread = ((rbuf)->pread + (num)) % (rbuf)->size;\ +} + +/** + * dvb_ringbuffer_read_user - Reads a buffer into a user pointer + * + * @rbuf: pointer to struct dvb_ringbuffer + * @buf: pointer to the buffer where the data will be stored + * @len: bytes from ring buffer into @buf + * + * This variant assumes that the buffer is a memory at the userspace. So, + * it will internally call copy_to_user(). + * + * Return: number of bytes transferred or -EFAULT + */ +extern ssize_t dvb_ringbuffer_read_user(struct dvb_ringbuffer *rbuf, + u8 __user *buf, size_t len); + +/** + * dvb_ringbuffer_read - Reads a buffer into a pointer + * + * @rbuf: pointer to struct dvb_ringbuffer + * @buf: pointer to the buffer where the data will be stored + * @len: bytes from ring buffer into @buf + * + * This variant assumes that the buffer is a memory at the Kernel space + * + * Return: number of bytes transferred or -EFAULT + */ +extern void dvb_ringbuffer_read(struct dvb_ringbuffer *rbuf, + u8 *buf, size_t len); + +/* + * write routines & macros + */ + +/** + * DVB_RINGBUFFER_WRITE_BYTE - write single byte to ring buffer + * + * @rbuf: pointer to struct dvb_ringbuffer + * @byte: byte to write + */ +#define DVB_RINGBUFFER_WRITE_BYTE(rbuf, byte) \ + { (rbuf)->data[(rbuf)->pwrite] = (byte); \ + (rbuf)->pwrite = ((rbuf)->pwrite + 1) % (rbuf)->size; } + +/** + * dvb_ringbuffer_write - Writes a buffer into the ringbuffer + * + * @rbuf: pointer to struct dvb_ringbuffer + * @buf: pointer to the buffer where the data will be read + * @len: bytes from ring buffer into @buf + * + * This variant assumes that the buffer is a memory at the Kernel space + * + * return: number of bytes transferred or -EFAULT + */ +extern ssize_t dvb_ringbuffer_write(struct dvb_ringbuffer *rbuf, const u8 *buf, + size_t len); + +/** + * dvb_ringbuffer_write_user - Writes a buffer received via a user pointer + * + * @rbuf: pointer to struct dvb_ringbuffer + * @buf: pointer to the buffer where the data will be read + * @len: bytes from ring buffer into @buf + * + * This variant assumes that the buffer is a memory at the userspace. So, + * it will internally call copy_from_user(). + * + * Return: number of bytes transferred or -EFAULT + */ +extern ssize_t dvb_ringbuffer_write_user(struct dvb_ringbuffer *rbuf, + const u8 __user *buf, size_t len); + +/** + * dvb_ringbuffer_pkt_write - Write a packet into the ringbuffer. + * + * @rbuf: Ringbuffer to write to. + * @buf: Buffer to write. + * @len: Length of buffer (currently limited to 65535 bytes max). + * + * Return: Number of bytes written, or -EFAULT, -ENOMEM, -EINVAL. + */ +extern ssize_t dvb_ringbuffer_pkt_write(struct dvb_ringbuffer *rbuf, u8 *buf, + size_t len); + +/** + * dvb_ringbuffer_pkt_read_user - Read from a packet in the ringbuffer. + * + * @rbuf: Ringbuffer concerned. + * @idx: Packet index as returned by dvb_ringbuffer_pkt_next(). + * @offset: Offset into packet to read from. + * @buf: Destination buffer for data. + * @len: Size of destination buffer. + * + * Return: Number of bytes read, or -EFAULT. + * + * .. note:: + * + * unlike dvb_ringbuffer_read(), this does **NOT** update the read pointer + * in the ringbuffer. You must use dvb_ringbuffer_pkt_dispose() to mark a + * packet as no longer required. + */ +extern ssize_t dvb_ringbuffer_pkt_read_user(struct dvb_ringbuffer *rbuf, + size_t idx, + int offset, u8 __user *buf, + size_t len); + +/** + * dvb_ringbuffer_pkt_read - Read from a packet in the ringbuffer. + * Note: unlike dvb_ringbuffer_read_user(), this DOES update the read pointer + * in the ringbuffer. + * + * @rbuf: Ringbuffer concerned. + * @idx: Packet index as returned by dvb_ringbuffer_pkt_next(). + * @offset: Offset into packet to read from. + * @buf: Destination buffer for data. + * @len: Size of destination buffer. + * + * Return: Number of bytes read, or -EFAULT. + */ +extern ssize_t dvb_ringbuffer_pkt_read(struct dvb_ringbuffer *rbuf, size_t idx, + int offset, u8 *buf, size_t len); + +/** + * dvb_ringbuffer_pkt_dispose - Dispose of a packet in the ring buffer. + * + * @rbuf: Ring buffer concerned. + * @idx: Packet index as returned by dvb_ringbuffer_pkt_next(). + */ +extern void dvb_ringbuffer_pkt_dispose(struct dvb_ringbuffer *rbuf, size_t idx); + +/** + * dvb_ringbuffer_pkt_next - Get the index of the next packet in a ringbuffer. + * + * @rbuf: Ringbuffer concerned. + * @idx: Previous packet index, or -1 to return the first packet index. + * @pktlen: On success, will be updated to contain the length of the packet + * in bytes. + * returns Packet index (if >=0), or -1 if no packets available. + */ +extern ssize_t dvb_ringbuffer_pkt_next(struct dvb_ringbuffer *rbuf, + size_t idx, size_t *pktlen); + +#endif /* _DVB_RINGBUFFER_H_ */ diff --git a/6.18.0/include-overrides/media/dvb_vb2.h b/6.18.0/include-overrides/media/dvb_vb2.h new file mode 100644 index 0000000..8cb8845 --- /dev/null +++ b/6.18.0/include-overrides/media/dvb_vb2.h @@ -0,0 +1,280 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * dvb-vb2.h - DVB driver helper framework for streaming I/O + * + * Copyright (C) 2015 Samsung Electronics + * + * Author: jh1009.sung@samsung.com + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef _DVB_VB2_H +#define _DVB_VB2_H + +#include +#include +#include +#include +#include +#include + +/** + * enum dvb_buf_type - types of Digital TV memory-mapped buffers + * + * @DVB_BUF_TYPE_CAPTURE: buffer is filled by the Kernel, + * with a received Digital TV stream + */ +enum dvb_buf_type { + DVB_BUF_TYPE_CAPTURE = 1, +}; + +/** + * enum dvb_vb2_states - states to control VB2 state machine + * @DVB_VB2_STATE_NONE: + * VB2 engine not initialized yet, init failed or VB2 was released. + * @DVB_VB2_STATE_INIT: + * VB2 engine initialized. + * @DVB_VB2_STATE_REQBUFS: + * Buffers were requested + * @DVB_VB2_STATE_STREAMON: + * VB2 is streaming. Callers should not check it directly. Instead, + * they should use dvb_vb2_is_streaming(). + * + * Note: + * + * Callers should not touch at the state machine directly. This + * is handled inside dvb_vb2.c. + */ +enum dvb_vb2_states { + DVB_VB2_STATE_NONE = 0x0, + DVB_VB2_STATE_INIT = 0x1, + DVB_VB2_STATE_REQBUFS = 0x2, + DVB_VB2_STATE_STREAMON = 0x4, +}; + +#define DVB_VB2_NAME_MAX (20) + +/** + * struct dvb_buffer - video buffer information for v4l2. + * + * @vb: embedded struct &vb2_buffer. + * @list: list of &struct dvb_buffer. + */ +struct dvb_buffer { + struct vb2_buffer vb; + struct list_head list; +}; + +/** + * struct dvb_vb2_ctx - control struct for VB2 handler + * @vb_q: pointer to &struct vb2_queue with videobuf2 queue. + * @mutex: mutex to serialize vb2 operations. Used by + * vb2 core %wait_prepare and %wait_finish operations. + * @slock: spin lock used to protect buffer filling at dvb_vb2.c. + * @dvb_q: List of buffers that are not filled yet. + * @buf: Pointer to the buffer that are currently being filled. + * @offset: index to the next position at the @buf to be filled. + * @remain: How many bytes are left to be filled at @buf. + * @state: bitmask of buffer states as defined by &enum dvb_vb2_states. + * @buf_siz: size of each VB2 buffer. + * @buf_cnt: number of VB2 buffers. + * @nonblocking: + * If different than zero, device is operating on non-blocking + * mode. + * @flags: buffer flags as defined by &enum dmx_buffer_flags. + * Filled only at &DMX_DQBUF. &DMX_QBUF should zero this field. + * @count: monotonic counter for filled buffers. Helps to identify + * data stream loses. Filled only at &DMX_DQBUF. &DMX_QBUF should + * zero this field. + * + * @name: name of the device type. Currently, it can either be + * "dvr" or "demux_filter". + */ +struct dvb_vb2_ctx { + struct vb2_queue vb_q; + struct mutex mutex; + spinlock_t slock; + struct list_head dvb_q; + struct dvb_buffer *buf; + int offset; + int remain; + int state; + int buf_siz; + int buf_cnt; + int nonblocking; + + enum dmx_buffer_flags flags; + u32 count; + + char name[DVB_VB2_NAME_MAX + 1]; +}; + +#ifndef CONFIG_DVB_MMAP +static inline int dvb_vb2_init(struct dvb_vb2_ctx *ctx, + const char *name, int non_blocking) +{ + return 0; +}; +static inline int dvb_vb2_release(struct dvb_vb2_ctx *ctx) +{ + return 0; +}; +#define dvb_vb2_is_streaming(ctx) (0) +#define dvb_vb2_fill_buffer(ctx, file, wait, flags) (0) + +static inline __poll_t dvb_vb2_poll(struct dvb_vb2_ctx *ctx, + struct file *file, + poll_table *wait) +{ + return 0; +} +#else +/** + * dvb_vb2_init - initializes VB2 handler + * + * @ctx: control struct for VB2 handler + * @name: name for the VB2 handler + * @non_blocking: + * if not zero, it means that the device is at non-blocking mode + */ +int dvb_vb2_init(struct dvb_vb2_ctx *ctx, const char *name, int non_blocking); + +/** + * dvb_vb2_release - Releases the VB2 handler allocated resources and + * put @ctx at DVB_VB2_STATE_NONE state. + * @ctx: control struct for VB2 handler + */ +int dvb_vb2_release(struct dvb_vb2_ctx *ctx); + +/** + * dvb_vb2_is_streaming - checks if the VB2 handler is streaming + * @ctx: control struct for VB2 handler + * + * Return: 0 if not streaming, 1 otherwise. + */ +int dvb_vb2_is_streaming(struct dvb_vb2_ctx *ctx); + +/** + * dvb_vb2_fill_buffer - fills a VB2 buffer + * @ctx: control struct for VB2 handler + * @src: place where the data is stored + * @len: number of bytes to be copied from @src + * @buffer_flags: + * pointer to buffer flags as defined by &enum dmx_buffer_flags. + * can be NULL. + */ +int dvb_vb2_fill_buffer(struct dvb_vb2_ctx *ctx, + const unsigned char *src, int len, + enum dmx_buffer_flags *buffer_flags); + +/** + * dvb_vb2_poll - Wrapper to vb2_core_streamon() for Digital TV + * buffer handling. + * + * @ctx: control struct for VB2 handler + * @file: &struct file argument passed to the poll + * file operation handler. + * @wait: &poll_table wait argument passed to the poll + * file operation handler. + * + * Implements poll syscall() logic. + */ +__poll_t dvb_vb2_poll(struct dvb_vb2_ctx *ctx, struct file *file, + poll_table *wait); +#endif + +/** + * dvb_vb2_stream_on() - Wrapper to vb2_core_streamon() for Digital TV + * buffer handling. + * + * @ctx: control struct for VB2 handler + * + * Starts dvb streaming + */ +int dvb_vb2_stream_on(struct dvb_vb2_ctx *ctx); +/** + * dvb_vb2_stream_off() - Wrapper to vb2_core_streamoff() for Digital TV + * buffer handling. + * + * @ctx: control struct for VB2 handler + * + * Stops dvb streaming + */ +int dvb_vb2_stream_off(struct dvb_vb2_ctx *ctx); + +/** + * dvb_vb2_reqbufs() - Wrapper to vb2_core_reqbufs() for Digital TV + * buffer handling. + * + * @ctx: control struct for VB2 handler + * @req: &struct dmx_requestbuffers passed from userspace in + * order to handle &DMX_REQBUFS. + * + * Initiate streaming by requesting a number of buffers. Also used to + * free previously requested buffers, is ``req->count`` is zero. + */ +int dvb_vb2_reqbufs(struct dvb_vb2_ctx *ctx, struct dmx_requestbuffers *req); + +/** + * dvb_vb2_querybuf() - Wrapper to vb2_core_querybuf() for Digital TV + * buffer handling. + * + * @ctx: control struct for VB2 handler + * @b: &struct dmx_buffer passed from userspace in + * order to handle &DMX_QUERYBUF. + * + * + */ +int dvb_vb2_querybuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b); + +/** + * dvb_vb2_expbuf() - Wrapper to vb2_core_expbuf() for Digital TV + * buffer handling. + * + * @ctx: control struct for VB2 handler + * @exp: &struct dmx_exportbuffer passed from userspace in + * order to handle &DMX_EXPBUF. + * + * Export a buffer as a file descriptor. + */ +int dvb_vb2_expbuf(struct dvb_vb2_ctx *ctx, struct dmx_exportbuffer *exp); + +/** + * dvb_vb2_qbuf() - Wrapper to vb2_core_qbuf() for Digital TV buffer handling. + * + * @ctx: control struct for VB2 handler + * @b: &struct dmx_buffer passed from userspace in + * order to handle &DMX_QBUF. + * + * Queue a Digital TV buffer as requested by userspace + */ +int dvb_vb2_qbuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b); + +/** + * dvb_vb2_dqbuf() - Wrapper to vb2_core_dqbuf() for Digital TV + * buffer handling. + * + * @ctx: control struct for VB2 handler + * @b: &struct dmx_buffer passed from userspace in + * order to handle &DMX_DQBUF. + * + * Dequeue a Digital TV buffer to the userspace + */ +int dvb_vb2_dqbuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b); + +/** + * dvb_vb2_mmap() - Wrapper to vb2_mmap() for Digital TV buffer handling. + * + * @ctx: control struct for VB2 handler + * @vma: pointer to &struct vm_area_struct with the vma passed + * to the mmap file operation handler in the driver. + * + * map Digital TV video buffers into application address space. + */ +int dvb_vb2_mmap(struct dvb_vb2_ctx *ctx, struct vm_area_struct *vma); + +#endif /* _DVB_VB2_H */ diff --git a/6.18.0/include-overrides/media/dvbdev.h b/6.18.0/include-overrides/media/dvbdev.h new file mode 100644 index 0000000..e5a00d1 --- /dev/null +++ b/6.18.0/include-overrides/media/dvbdev.h @@ -0,0 +1,493 @@ +/* + * dvbdev.h + * + * Copyright (C) 2000 Ralph Metzler & Marcus Metzler + * for convergence integrated media GmbH + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Lesser Public License + * as published by the Free Software Foundation; either version 2.1 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _DVBDEV_H_ +#define _DVBDEV_H_ + +#include +#include +#include +#include +#include + +#define DVB_MAJOR 212 + +#if defined(CONFIG_DVB_MAX_ADAPTERS) && CONFIG_DVB_MAX_ADAPTERS > 0 + #define DVB_MAX_ADAPTERS CONFIG_DVB_MAX_ADAPTERS +#else + #define DVB_MAX_ADAPTERS 16 +#endif + +#define DVB_UNSET (-1) + +/* List of DVB device types */ + +/** + * enum dvb_device_type - type of the Digital TV device + * + * @DVB_DEVICE_SEC: Digital TV standalone Common Interface (CI) + * @DVB_DEVICE_FRONTEND: Digital TV frontend. + * @DVB_DEVICE_DEMUX: Digital TV demux. + * @DVB_DEVICE_DVR: Digital TV digital video record (DVR). + * @DVB_DEVICE_CA: Digital TV Conditional Access (CA). + * @DVB_DEVICE_NET: Digital TV network. + * + * @DVB_DEVICE_VIDEO: Digital TV video decoder. + * Deprecated. Used only on av7110-av. + * @DVB_DEVICE_AUDIO: Digital TV audio decoder. + * Deprecated. Used only on av7110-av. + * @DVB_DEVICE_OSD: Digital TV On Screen Display (OSD). + * Deprecated. Used only on av7110. + */ +enum dvb_device_type { + DVB_DEVICE_SEC, + DVB_DEVICE_FRONTEND, + DVB_DEVICE_DEMUX, + DVB_DEVICE_DVR, + DVB_DEVICE_CA, + DVB_DEVICE_NET, + + DVB_DEVICE_VIDEO, + DVB_DEVICE_AUDIO, + DVB_DEVICE_OSD, +}; + +#define DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr) \ + static short adapter_nr[] = \ + {[0 ... (DVB_MAX_ADAPTERS - 1)] = DVB_UNSET }; \ + module_param_array(adapter_nr, short, NULL, 0444); \ + MODULE_PARM_DESC(adapter_nr, "DVB adapter numbers") + +struct dvb_frontend; + +/** + * struct dvb_adapter - represents a Digital TV adapter using Linux DVB API + * + * @num: Number of the adapter + * @list_head: List with the DVB adapters + * @device_list: List with the DVB devices + * @name: Name of the adapter + * @proposed_mac: proposed MAC address for the adapter + * @priv: private data + * @device: pointer to struct device + * @module: pointer to struct module + * @mfe_shared: indicates mutually exclusive frontends. + * 1 = legacy exclusion behavior: blocking any open() call + * 2 = enhanced exclusion behavior, emulating the standard + * behavior of busy frontends: allowing read-only sharing + * and otherwise returning immediately with -EBUSY when any + * of the frontends is already opened with write access. + * @mfe_dvbdev: Frontend device in use, in the case of MFE + * @mfe_lock: Lock to prevent using the other frontends when MFE is + * used. + * @mdev_lock: Protect access to the mdev pointer. + * @mdev: pointer to struct media_device, used when the media + * controller is used. + * @conn: RF connector. Used only if the device has no separate + * tuner. + * @conn_pads: pointer to struct media_pad associated with @conn; + */ +struct dvb_adapter { + int num; + struct list_head list_head; + struct list_head device_list; + const char *name; + u8 proposed_mac [6]; + void* priv; + + struct device *device; + + struct module *module; + + int mfe_shared; /* indicates mutually exclusive frontends */ + struct dvb_device *mfe_dvbdev; /* frontend device in use */ + struct mutex mfe_lock; /* access lock for thread creation */ + +#if defined(CONFIG_MEDIA_CONTROLLER_DVB) + struct mutex mdev_lock; + struct media_device *mdev; + struct media_entity *conn; + struct media_pad *conn_pads; +#endif +}; + +/** + * struct dvb_device - represents a DVB device node + * + * @list_head: List head with all DVB devices + * @ref: reference count for this device + * @fops: pointer to struct file_operations + * @adapter: pointer to the adapter that holds this device node + * @type: type of the device, as defined by &enum dvb_device_type. + * @minor: devnode minor number. Major number is always DVB_MAJOR. + * @id: device ID number, inside the adapter + * @readers: Initialized by the caller. Each call to open() in Read Only mode + * decreases this counter by one. + * @writers: Initialized by the caller. Each call to open() in Read/Write + * mode decreases this counter by one. + * @users: Initialized by the caller. Each call to open() in any mode + * decreases this counter by one. + * @wait_queue: wait queue, used to wait for certain events inside one of + * the DVB API callers + * @kernel_ioctl: callback function used to handle ioctl calls from userspace. + * @name: Name to be used for the device at the Media Controller + * @entity: pointer to struct media_entity associated with the device node + * @pads: pointer to struct media_pad associated with @entity; + * @priv: private data + * @intf_devnode: Pointer to media_intf_devnode. Used by the dvbdev core to + * store the MC device node interface + * @tsout_num_entities: Number of Transport Stream output entities + * @tsout_entity: array with MC entities associated to each TS output node + * @tsout_pads: array with the source pads for each @tsout_entity + * + * This structure is used by the DVB core (frontend, CA, net, demux) in + * order to create the device nodes. Usually, driver should not initialize + * this struct diretly. + */ +struct dvb_device { + struct list_head list_head; + struct kref ref; + const struct file_operations *fops; + struct dvb_adapter *adapter; + enum dvb_device_type type; + int minor; + u32 id; + + /* in theory, 'users' can vanish now, + but I don't want to change too much now... */ + int readers; + int writers; + int users; + + wait_queue_head_t wait_queue; + /* don't really need those !? -- FIXME: use video_usercopy */ + int (*kernel_ioctl)(struct file *file, unsigned int cmd, void *arg); + + /* Needed for media controller register/unregister */ +#if defined(CONFIG_MEDIA_CONTROLLER_DVB) + const char *name; + + /* Allocated and filled inside dvbdev.c */ + struct media_intf_devnode *intf_devnode; + + unsigned tsout_num_entities; + struct media_entity *entity, *tsout_entity; + struct media_pad *pads, *tsout_pads; +#endif + + void *priv; +}; + +/** + * struct dvbdevfops_node - fops nodes registered in dvbdevfops_list + * + * @fops: Dynamically allocated fops for ->owner registration + * @type: type of dvb_device + * @template: dvb_device used for registration + * @list_head: list_head for dvbdevfops_list + */ +struct dvbdevfops_node { + struct file_operations *fops; + enum dvb_device_type type; + const struct dvb_device *template; + struct list_head list_head; +}; + +/** + * dvb_device_get - Increase dvb_device reference + * + * @dvbdev: pointer to struct dvb_device + */ +struct dvb_device *dvb_device_get(struct dvb_device *dvbdev); + +/** + * dvb_device_put - Decrease dvb_device reference + * + * @dvbdev: pointer to struct dvb_device + */ +void dvb_device_put(struct dvb_device *dvbdev); + +/** + * dvb_register_adapter - Registers a new DVB adapter + * + * @adap: pointer to struct dvb_adapter + * @name: Adapter's name + * @module: initialized with THIS_MODULE at the caller + * @device: pointer to struct device that corresponds to the device driver + * @adapter_nums: Array with a list of the numbers for @dvb_register_adapter; + * to select among them. Typically, initialized with: + * DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nums) + */ +int dvb_register_adapter(struct dvb_adapter *adap, const char *name, + struct module *module, struct device *device, + short *adapter_nums); + +/** + * dvb_unregister_adapter - Unregisters a DVB adapter + * + * @adap: pointer to struct dvb_adapter + */ +int dvb_unregister_adapter(struct dvb_adapter *adap); + +/** + * dvb_register_device - Registers a new DVB device + * + * @adap: pointer to struct dvb_adapter + * @pdvbdev: pointer to the place where the new struct dvb_device will be + * stored + * @template: Template used to create &pdvbdev; + * @priv: private data + * @type: type of the device, as defined by &enum dvb_device_type. + * @demux_sink_pads: Number of demux outputs, to be used to create the TS + * outputs via the Media Controller. + */ +int dvb_register_device(struct dvb_adapter *adap, + struct dvb_device **pdvbdev, + const struct dvb_device *template, + void *priv, + enum dvb_device_type type, + int demux_sink_pads); + +/** + * dvb_remove_device - Remove a registered DVB device + * + * @dvbdev: pointer to struct dvb_device + * + * This does not free memory. dvb_free_device() will do that when + * reference counter is empty + */ +void dvb_remove_device(struct dvb_device *dvbdev); + + +/** + * dvb_unregister_device - Unregisters a DVB device + * + * @dvbdev: pointer to struct dvb_device + */ +void dvb_unregister_device(struct dvb_device *dvbdev); + +#ifdef CONFIG_MEDIA_CONTROLLER_DVB +/** + * dvb_create_media_graph - Creates media graph for the Digital TV part of the + * device. + * + * @adap: pointer to &struct dvb_adapter + * @create_rf_connector: if true, it creates the RF connector too + * + * This function checks all DVB-related functions at the media controller + * entities and creates the needed links for the media graph. It is + * capable of working with multiple tuners or multiple frontends, but it + * won't create links if the device has multiple tuners and multiple frontends + * or if the device has multiple muxes. In such case, the caller driver should + * manually create the remaining links. + */ +__must_check int dvb_create_media_graph(struct dvb_adapter *adap, + bool create_rf_connector); + +/** + * dvb_register_media_controller - registers a media controller at DVB adapter + * + * @adap: pointer to &struct dvb_adapter + * @mdev: pointer to &struct media_device + */ +static inline void dvb_register_media_controller(struct dvb_adapter *adap, + struct media_device *mdev) +{ + adap->mdev = mdev; +} + +/** + * dvb_get_media_controller - gets the associated media controller + * + * @adap: pointer to &struct dvb_adapter + */ +static inline struct media_device * +dvb_get_media_controller(struct dvb_adapter *adap) +{ + return adap->mdev; +} +#else +static inline +int dvb_create_media_graph(struct dvb_adapter *adap, + bool create_rf_connector) +{ + return 0; +}; +#define dvb_register_media_controller(a, b) {} +#define dvb_get_media_controller(a) NULL +#endif + +/** + * dvb_generic_open - Digital TV open function, used by DVB devices + * + * @inode: pointer to &struct inode. + * @file: pointer to &struct file. + * + * Checks if a DVB devnode is still valid, and if the permissions are + * OK and increment negative use count. + */ +int dvb_generic_open(struct inode *inode, struct file *file); + +/** + * dvb_generic_release - Digital TV close function, used by DVB devices + * + * @inode: pointer to &struct inode. + * @file: pointer to &struct file. + * + * Checks if a DVB devnode is still valid, and if the permissions are + * OK and decrement negative use count. + */ +int dvb_generic_release(struct inode *inode, struct file *file); + +/** + * dvb_generic_ioctl - Digital TV close function, used by DVB devices + * + * @file: pointer to &struct file. + * @cmd: Ioctl name. + * @arg: Ioctl argument. + * + * Checks if a DVB devnode and struct dvbdev.kernel_ioctl is still valid. + * If so, calls dvb_usercopy(). + */ +long dvb_generic_ioctl(struct file *file, + unsigned int cmd, unsigned long arg); + +/** + * dvb_usercopy - copies data from/to userspace memory when an ioctl is + * issued. + * + * @file: Pointer to struct &file. + * @cmd: Ioctl name. + * @arg: Ioctl argument. + * @func: function that will actually handle the ioctl + * + * Ancillary function that uses ioctl direction and size to copy from + * userspace. Then, it calls @func, and, if needed, data is copied back + * to userspace. + */ +int dvb_usercopy(struct file *file, unsigned int cmd, unsigned long arg, + int (*func)(struct file *file, unsigned int cmd, void *arg)); + +#if IS_ENABLED(CONFIG_I2C) + +struct i2c_adapter; +struct i2c_client; +/** + * dvb_module_probe - helper routine to probe an I2C module + * + * @module_name: + * Name of the I2C module to be probed + * @name: + * Optional name for the I2C module. Used for debug purposes. + * If %NULL, defaults to @module_name. + * @adap: + * pointer to &struct i2c_adapter that describes the I2C adapter where + * the module will be bound. + * @addr: + * I2C address of the adapter, in 7-bit notation. + * @platform_data: + * Platform data to be passed to the I2C module probed. + * + * This function binds an I2C device into the DVB core. Should be used by + * all drivers that use I2C bus to control the hardware. A module bound + * with dvb_module_probe() should use dvb_module_release() to unbind. + * + * Return: + * On success, return an &struct i2c_client, pointing to the bound + * I2C device. %NULL otherwise. + * + * .. note:: + * + * In the past, DVB modules (mainly, frontends) were bound via dvb_attach() + * macro, with does an ugly hack, using I2C low level functions. Such + * usage is deprecated and will be removed soon. Instead, use this routine. + */ +struct i2c_client *dvb_module_probe(const char *module_name, + const char *name, + struct i2c_adapter *adap, + unsigned char addr, + void *platform_data); + +/** + * dvb_module_release - releases an I2C device allocated with + * dvb_module_probe(). + * + * @client: pointer to &struct i2c_client with the I2C client to be released. + * can be %NULL. + * + * This function should be used to free all resources reserved by + * dvb_module_probe() and unbinding the I2C hardware. + */ +void dvb_module_release(struct i2c_client *client); + +#endif /* CONFIG_I2C */ + +/* Legacy generic DVB attach function. */ +#ifdef CONFIG_MEDIA_ATTACH + +/** + * dvb_attach - attaches a DVB frontend into the DVB core. + * + * @FUNCTION: function on a frontend module to be called. + * @ARGS: @FUNCTION arguments. + * + * This ancillary function loads a frontend module in runtime and runs + * the @FUNCTION function there, with @ARGS. + * As it increments symbol usage cont, at unregister, dvb_detach() + * should be called. + * + * .. note:: + * + * In the past, DVB modules (mainly, frontends) were bound via dvb_attach() + * macro, with does an ugly hack, using I2C low level functions. Such + * usage is deprecated and will be removed soon. Instead, you should use + * dvb_module_probe(). + */ +#define dvb_attach(FUNCTION, ARGS...) ({ \ + void *__r = NULL; \ + typeof(&FUNCTION) __a = symbol_request(FUNCTION); \ + if (__a) { \ + __r = (void *) __a(ARGS); \ + if (__r == NULL) \ + symbol_put(FUNCTION); \ + } else { \ + printk(KERN_ERR "DVB: Unable to find symbol "#FUNCTION"()\n"); \ + } \ + __r; \ +}) + +/** + * dvb_detach - detaches a DVB frontend loaded via dvb_attach() + * + * @FUNC: attach function + * + * Decrements usage count for a function previously called via dvb_attach(). + */ + +#define dvb_detach(FUNC) symbol_put_addr(FUNC) + +#else +#define dvb_attach(FUNCTION, ARGS...) ({ \ + FUNCTION(ARGS); \ +}) + +#define dvb_detach(FUNC) {} + +#endif /* CONFIG_MEDIA_ATTACH */ + +#endif /* #ifndef _DVBDEV_H_ */ diff --git a/6.18.0/include-overrides/media/frame_vector.h b/6.18.0/include-overrides/media/frame_vector.h new file mode 100644 index 0000000..541c71a --- /dev/null +++ b/6.18.0/include-overrides/media/frame_vector.h @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: GPL-2.0 +#ifndef _MEDIA_FRAME_VECTOR_H +#define _MEDIA_FRAME_VECTOR_H + +/* Container for pinned pfns / pages in frame_vector.c */ +struct frame_vector { + unsigned int nr_allocated; /* Number of frames we have space for */ + unsigned int nr_frames; /* Number of frames stored in ptrs array */ + bool got_ref; /* Did we pin pages by getting page ref? */ + bool is_pfns; /* Does array contain pages or pfns? */ + void *ptrs[]; /* Array of pinned pfns / pages. Use + * pfns_vector_pages() or pfns_vector_pfns() + * for access */ +}; + +struct frame_vector *frame_vector_create(unsigned int nr_frames); +void frame_vector_destroy(struct frame_vector *vec); +int get_vaddr_frames(unsigned long start, unsigned int nr_pfns, + bool write, struct frame_vector *vec); +void put_vaddr_frames(struct frame_vector *vec); +int frame_vector_to_pages(struct frame_vector *vec); +void frame_vector_to_pfns(struct frame_vector *vec); + +static inline unsigned int frame_vector_count(struct frame_vector *vec) +{ + return vec->nr_frames; +} + +static inline struct page **frame_vector_pages(struct frame_vector *vec) +{ + if (vec->is_pfns) { + int err = frame_vector_to_pages(vec); + + if (err) + return ERR_PTR(err); + } + return (struct page **)(vec->ptrs); +} + +static inline unsigned long *frame_vector_pfns(struct frame_vector *vec) +{ + if (!vec->is_pfns) + frame_vector_to_pfns(vec); + return (unsigned long *)(vec->ptrs); +} + +#endif /* _MEDIA_FRAME_VECTOR_H */ diff --git a/6.18.0/include-overrides/media/i2c/adp1653.h b/6.18.0/include-overrides/media/i2c/adp1653.h new file mode 100644 index 0000000..096de91 --- /dev/null +++ b/6.18.0/include-overrides/media/i2c/adp1653.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * include/media/i2c/adp1653.h + * + * Copyright (C) 2008--2011 Nokia Corporation + * + * Contact: Sakari Ailus + * + * Contributors: + * Sakari Ailus + * Tuukka Toivonen + */ + +#ifndef ADP1653_H +#define ADP1653_H + +#include +#include +#include +#include +#include + +#define ADP1653_NAME "adp1653" +#define ADP1653_I2C_ADDR (0x60 >> 1) + +/* Register definitions */ +#define ADP1653_REG_OUT_SEL 0x00 +#define ADP1653_REG_OUT_SEL_HPLED_TORCH_MIN 0x01 +#define ADP1653_REG_OUT_SEL_HPLED_TORCH_MAX 0x0b +#define ADP1653_REG_OUT_SEL_HPLED_FLASH_MIN 0x0c +#define ADP1653_REG_OUT_SEL_HPLED_FLASH_MAX 0x1f +#define ADP1653_REG_OUT_SEL_HPLED_SHIFT 3 +#define ADP1653_REG_OUT_SEL_ILED_MAX 0x07 +#define ADP1653_REG_OUT_SEL_ILED_SHIFT 0 + +#define ADP1653_REG_CONFIG 0x01 +#define ADP1653_REG_CONFIG_TMR_CFG (1 << 4) +#define ADP1653_REG_CONFIG_TMR_SET_MAX 0x0f +#define ADP1653_REG_CONFIG_TMR_SET_SHIFT 0 + +#define ADP1653_REG_SW_STROBE 0x02 +#define ADP1653_REG_SW_STROBE_SW_STROBE (1 << 0) + +#define ADP1653_REG_FAULT 0x03 +#define ADP1653_REG_FAULT_FLT_SCP (1 << 3) +#define ADP1653_REG_FAULT_FLT_OT (1 << 2) +#define ADP1653_REG_FAULT_FLT_TMR (1 << 1) +#define ADP1653_REG_FAULT_FLT_OV (1 << 0) + +#define ADP1653_INDICATOR_INTENSITY_MIN 0 +#define ADP1653_INDICATOR_INTENSITY_STEP 2500 +#define ADP1653_INDICATOR_INTENSITY_MAX \ + (ADP1653_REG_OUT_SEL_ILED_MAX * ADP1653_INDICATOR_INTENSITY_STEP) +#define ADP1653_INDICATOR_INTENSITY_uA_TO_REG(a) \ + ((a) / ADP1653_INDICATOR_INTENSITY_STEP) +#define ADP1653_INDICATOR_INTENSITY_REG_TO_uA(a) \ + ((a) * ADP1653_INDICATOR_INTENSITY_STEP) + +#define ADP1653_FLASH_INTENSITY_BASE 35 +#define ADP1653_FLASH_INTENSITY_STEP 15 +#define ADP1653_FLASH_INTENSITY_MIN \ + (ADP1653_FLASH_INTENSITY_BASE \ + + ADP1653_REG_OUT_SEL_HPLED_FLASH_MIN * ADP1653_FLASH_INTENSITY_STEP) +#define ADP1653_FLASH_INTENSITY_MAX \ + (ADP1653_FLASH_INTENSITY_MIN + \ + (ADP1653_REG_OUT_SEL_HPLED_FLASH_MAX - \ + ADP1653_REG_OUT_SEL_HPLED_FLASH_MIN + 1) * \ + ADP1653_FLASH_INTENSITY_STEP) + +#define ADP1653_FLASH_INTENSITY_mA_TO_REG(a) \ + ((a) < ADP1653_FLASH_INTENSITY_BASE ? 0 : \ + (((a) - ADP1653_FLASH_INTENSITY_BASE) / ADP1653_FLASH_INTENSITY_STEP)) +#define ADP1653_FLASH_INTENSITY_REG_TO_mA(a) \ + ((a) * ADP1653_FLASH_INTENSITY_STEP + ADP1653_FLASH_INTENSITY_BASE) + +#define ADP1653_TORCH_INTENSITY_MIN \ + (ADP1653_FLASH_INTENSITY_BASE \ + + ADP1653_REG_OUT_SEL_HPLED_TORCH_MIN * ADP1653_FLASH_INTENSITY_STEP) +#define ADP1653_TORCH_INTENSITY_MAX \ + (ADP1653_TORCH_INTENSITY_MIN + \ + (ADP1653_REG_OUT_SEL_HPLED_TORCH_MAX - \ + ADP1653_REG_OUT_SEL_HPLED_TORCH_MIN + 1) * \ + ADP1653_FLASH_INTENSITY_STEP) + +struct adp1653_platform_data { + int (*power)(struct v4l2_subdev *sd, int on); + + u32 max_flash_timeout; /* flash light timeout in us */ + u32 max_flash_intensity; /* led intensity, flash mode, mA */ + u32 max_torch_intensity; /* led intensity, torch mode, mA */ + u32 max_indicator_intensity; /* indicator led intensity, uA */ + + struct gpio_desc *enable_gpio; /* for device-tree based boot */ +}; + +#define to_adp1653_flash(sd) container_of(sd, struct adp1653_flash, subdev) + +struct adp1653_flash { + struct v4l2_subdev subdev; + struct adp1653_platform_data *platform_data; + + struct v4l2_ctrl_handler ctrls; + struct v4l2_ctrl *led_mode; + struct v4l2_ctrl *flash_timeout; + struct v4l2_ctrl *flash_intensity; + struct v4l2_ctrl *torch_intensity; + struct v4l2_ctrl *indicator_intensity; + + struct mutex power_lock; + int power_count; + int fault; +}; + +#endif /* ADP1653_H */ diff --git a/6.18.0/include-overrides/media/i2c/adv7183.h b/6.18.0/include-overrides/media/i2c/adv7183.h new file mode 100644 index 0000000..49faece --- /dev/null +++ b/6.18.0/include-overrides/media/i2c/adv7183.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * adv7183.h - definition for adv7183 inputs and outputs + * + * Copyright (c) 2011 Analog Devices Inc. + */ + +#ifndef _ADV7183_H_ +#define _ADV7183_H_ + +/* ADV7183 HW inputs */ +#define ADV7183_COMPOSITE0 0 /* CVBS in on AIN1 */ +#define ADV7183_COMPOSITE1 1 /* CVBS in on AIN2 */ +#define ADV7183_COMPOSITE2 2 /* CVBS in on AIN3 */ +#define ADV7183_COMPOSITE3 3 /* CVBS in on AIN4 */ +#define ADV7183_COMPOSITE4 4 /* CVBS in on AIN5 */ +#define ADV7183_COMPOSITE5 5 /* CVBS in on AIN6 */ +#define ADV7183_COMPOSITE6 6 /* CVBS in on AIN7 */ +#define ADV7183_COMPOSITE7 7 /* CVBS in on AIN8 */ +#define ADV7183_COMPOSITE8 8 /* CVBS in on AIN9 */ +#define ADV7183_COMPOSITE9 9 /* CVBS in on AIN10 */ +#define ADV7183_COMPOSITE10 10 /* CVBS in on AIN11 */ + +#define ADV7183_SVIDEO0 11 /* Y on AIN1, C on AIN4 */ +#define ADV7183_SVIDEO1 12 /* Y on AIN2, C on AIN5 */ +#define ADV7183_SVIDEO2 13 /* Y on AIN3, C on AIN6 */ + +#define ADV7183_COMPONENT0 14 /* Y on AIN1, Pr on AIN4, Pb on AIN5 */ +#define ADV7183_COMPONENT1 15 /* Y on AIN2, Pr on AIN3, Pb on AIN6 */ + +/* ADV7183 HW outputs */ +#define ADV7183_8BIT_OUT 0 +#define ADV7183_16BIT_OUT 1 + +#endif diff --git a/6.18.0/include-overrides/media/i2c/adv7343.h b/6.18.0/include-overrides/media/i2c/adv7343.h new file mode 100644 index 0000000..d35d3e9 --- /dev/null +++ b/6.18.0/include-overrides/media/i2c/adv7343.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * ADV7343 header file + * + * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/ + */ + +#ifndef ADV7343_H +#define ADV7343_H + +#define ADV7343_COMPOSITE_ID (0) +#define ADV7343_COMPONENT_ID (1) +#define ADV7343_SVIDEO_ID (2) + +/** + * struct adv7343_power_mode - power mode configuration. + * @sleep_mode: on enable the current consumption is reduced to micro ampere + * level. All DACs and the internal PLL circuit are disabled. + * Registers can be read from and written in sleep mode. + * @pll_control: PLL and oversampling control. This control allows internal + * PLL 1 circuit to be powered down and the oversampling to be + * switched off. + * @dac: array to configure power on/off DAC's 1..6 + * + * Power mode register (Register 0x0), for more info refer REGISTER MAP ACCESS + * section of datasheet[1], table 17 page no 30. + * + * [1] http://www.analog.com/static/imported-files/data_sheets/ADV7342_7343.pdf + */ +struct adv7343_power_mode { + bool sleep_mode; + bool pll_control; + u32 dac[6]; +}; + +/** + * struct adv7343_sd_config - SD Only Output Configuration. + * @sd_dac_out: array configuring SD DAC Outputs 1 and 2 + */ +struct adv7343_sd_config { + /* SD only Output Configuration */ + u32 sd_dac_out[2]; +}; + +/** + * struct adv7343_platform_data - Platform data values and access functions. + * @mode_config: Configuration for power mode. + * @sd_config: SD Only Configuration. + */ +struct adv7343_platform_data { + struct adv7343_power_mode mode_config; + struct adv7343_sd_config sd_config; +}; + +#endif /* End of #ifndef ADV7343_H */ diff --git a/6.18.0/include-overrides/media/i2c/adv7393.h b/6.18.0/include-overrides/media/i2c/adv7393.h new file mode 100644 index 0000000..c73b363 --- /dev/null +++ b/6.18.0/include-overrides/media/i2c/adv7393.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * ADV7393 header file + * + * Copyright (C) 2010-2012 ADVANSEE - http://www.advansee.com/ + * Benoît Thébaudeau + * + * Based on ADV7343 driver, + * + * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/ + */ + +#ifndef ADV7393_H +#define ADV7393_H + +#define ADV7393_COMPOSITE_ID (0) +#define ADV7393_COMPONENT_ID (1) +#define ADV7393_SVIDEO_ID (2) + +#endif /* End of #ifndef ADV7393_H */ diff --git a/6.18.0/include-overrides/media/i2c/adv7511.h b/6.18.0/include-overrides/media/i2c/adv7511.h new file mode 100644 index 0000000..1874c05 --- /dev/null +++ b/6.18.0/include-overrides/media/i2c/adv7511.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Analog Devices ADV7511 HDMI Transmitter Device Driver + * + * Copyright 2013 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + */ + +#ifndef ADV7511_H +#define ADV7511_H + +/* notify events */ +#define ADV7511_MONITOR_DETECT 0 +#define ADV7511_EDID_DETECT 1 + + +struct adv7511_monitor_detect { + int present; +}; + +struct adv7511_edid_detect { + int present; + int segment; + uint16_t phys_addr; +}; + +struct adv7511_platform_data { + u8 i2c_edid; + u8 i2c_cec; + u8 i2c_pktmem; + u32 cec_clk; +}; + +#endif diff --git a/6.18.0/include-overrides/media/i2c/adv7604.h b/6.18.0/include-overrides/media/i2c/adv7604.h new file mode 100644 index 0000000..77a9799 --- /dev/null +++ b/6.18.0/include-overrides/media/i2c/adv7604.h @@ -0,0 +1,157 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * adv7604 - Analog Devices ADV7604 video decoder driver + * + * Copyright 2012 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + */ + +#ifndef _ADV7604_ +#define _ADV7604_ + +#include + +/* Analog input muxing modes (AFE register 0x02, [2:0]) */ +enum adv7604_ain_sel { + ADV7604_AIN1_2_3_NC_SYNC_1_2 = 0, + ADV7604_AIN4_5_6_NC_SYNC_2_1 = 1, + ADV7604_AIN7_8_9_NC_SYNC_3_1 = 2, + ADV7604_AIN10_11_12_NC_SYNC_4_1 = 3, + ADV7604_AIN9_4_5_6_SYNC_2_1 = 4, +}; + +/* + * Bus rotation and reordering. This is used to specify component reordering on + * the board and describes the components order on the bus when the ADV7604 + * outputs RGB. + */ +enum adv7604_bus_order { + ADV7604_BUS_ORDER_RGB, /* No operation */ + ADV7604_BUS_ORDER_GRB, /* Swap 1-2 */ + ADV7604_BUS_ORDER_RBG, /* Swap 2-3 */ + ADV7604_BUS_ORDER_BGR, /* Swap 1-3 */ + ADV7604_BUS_ORDER_BRG, /* Rotate right */ + ADV7604_BUS_ORDER_GBR, /* Rotate left */ +}; + +/* Input Color Space (IO register 0x02, [7:4]) */ +enum adv76xx_inp_color_space { + ADV76XX_INP_COLOR_SPACE_LIM_RGB = 0, + ADV76XX_INP_COLOR_SPACE_FULL_RGB = 1, + ADV76XX_INP_COLOR_SPACE_LIM_YCbCr_601 = 2, + ADV76XX_INP_COLOR_SPACE_LIM_YCbCr_709 = 3, + ADV76XX_INP_COLOR_SPACE_XVYCC_601 = 4, + ADV76XX_INP_COLOR_SPACE_XVYCC_709 = 5, + ADV76XX_INP_COLOR_SPACE_FULL_YCbCr_601 = 6, + ADV76XX_INP_COLOR_SPACE_FULL_YCbCr_709 = 7, + ADV76XX_INP_COLOR_SPACE_AUTO = 0xf, +}; + +/* Select output format (IO register 0x03, [4:2]) */ +enum adv7604_op_format_mode_sel { + ADV7604_OP_FORMAT_MODE0 = 0x00, + ADV7604_OP_FORMAT_MODE1 = 0x04, + ADV7604_OP_FORMAT_MODE2 = 0x08, +}; + +enum adv76xx_drive_strength { + ADV76XX_DR_STR_MEDIUM_LOW = 1, + ADV76XX_DR_STR_MEDIUM_HIGH = 2, + ADV76XX_DR_STR_HIGH = 3, +}; + +/* INT1 Configuration (IO register 0x40, [1:0]) */ +enum adv76xx_int1_config { + ADV76XX_INT1_CONFIG_OPEN_DRAIN, + ADV76XX_INT1_CONFIG_ACTIVE_LOW, + ADV76XX_INT1_CONFIG_ACTIVE_HIGH, + ADV76XX_INT1_CONFIG_DISABLED, +}; + +enum adv76xx_page { + ADV76XX_PAGE_IO, + ADV7604_PAGE_AVLINK, + ADV76XX_PAGE_CEC, + ADV76XX_PAGE_INFOFRAME, + ADV7604_PAGE_ESDP, + ADV7604_PAGE_DPP, + ADV76XX_PAGE_AFE, + ADV76XX_PAGE_REP, + ADV76XX_PAGE_EDID, + ADV76XX_PAGE_HDMI, + ADV76XX_PAGE_TEST, + ADV76XX_PAGE_CP, + ADV7604_PAGE_VDP, + ADV76XX_PAGE_MAX, +}; + +/* Platform dependent definition */ +struct adv76xx_platform_data { + /* DIS_PWRDNB: 1 if the PWRDNB pin is unused and unconnected */ + unsigned disable_pwrdnb:1; + + /* DIS_CABLE_DET_RST: 1 if the 5V pins are unused and unconnected */ + unsigned disable_cable_det_rst:1; + + int default_input; + + /* Analog input muxing mode */ + enum adv7604_ain_sel ain_sel; + + /* Bus rotation and reordering */ + enum adv7604_bus_order bus_order; + + /* Select output format mode */ + enum adv7604_op_format_mode_sel op_format_mode_sel; + + /* Configuration of the INT1 pin */ + enum adv76xx_int1_config int1_config; + + /* IO register 0x02 */ + unsigned alt_gamma:1; + + /* IO register 0x05 */ + unsigned blank_data:1; + unsigned insert_av_codes:1; + unsigned replicate_av_codes:1; + + /* IO register 0x06 */ + unsigned inv_vs_pol:1; + unsigned inv_hs_pol:1; + unsigned inv_llc_pol:1; + + /* IO register 0x14 */ + enum adv76xx_drive_strength dr_str_data; + enum adv76xx_drive_strength dr_str_clk; + enum adv76xx_drive_strength dr_str_sync; + + /* IO register 0x30 */ + unsigned output_bus_lsb_to_msb:1; + + /* Free run */ + unsigned hdmi_free_run_mode; + + /* i2c addresses: 0 == use default */ + u8 i2c_addresses[ADV76XX_PAGE_MAX]; +}; + +enum adv76xx_pad { + ADV76XX_PAD_HDMI_PORT_A = 0, + ADV7604_PAD_HDMI_PORT_B = 1, + ADV7604_PAD_HDMI_PORT_C = 2, + ADV7604_PAD_HDMI_PORT_D = 3, + ADV7604_PAD_VGA_RGB = 4, + ADV7604_PAD_VGA_COMP = 5, + /* The source pad is either 1 (ADV7611) or 6 (ADV7604) */ + ADV7604_PAD_SOURCE = 6, + ADV7611_PAD_SOURCE = 1, + ADV76XX_PAD_MAX = 7, +}; + +#define V4L2_CID_ADV_RX_ANALOG_SAMPLING_PHASE (V4L2_CID_DV_CLASS_BASE + 0x1000) +#define V4L2_CID_ADV_RX_FREE_RUN_COLOR_MANUAL (V4L2_CID_DV_CLASS_BASE + 0x1001) +#define V4L2_CID_ADV_RX_FREE_RUN_COLOR (V4L2_CID_DV_CLASS_BASE + 0x1002) + +/* notify events */ +#define ADV76XX_HOTPLUG 1 + +#endif diff --git a/6.18.0/include-overrides/media/i2c/adv7842.h b/6.18.0/include-overrides/media/i2c/adv7842.h new file mode 100644 index 0000000..05e01f0 --- /dev/null +++ b/6.18.0/include-overrides/media/i2c/adv7842.h @@ -0,0 +1,227 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * adv7842 - Analog Devices ADV7842 video decoder driver + * + * Copyright 2013 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + */ + +#ifndef _ADV7842_ +#define _ADV7842_ + +/* Analog input muxing modes (AFE register 0x02, [2:0]) */ +enum adv7842_ain_sel { + ADV7842_AIN1_2_3_NC_SYNC_1_2 = 0, + ADV7842_AIN4_5_6_NC_SYNC_2_1 = 1, + ADV7842_AIN7_8_9_NC_SYNC_3_1 = 2, + ADV7842_AIN10_11_12_NC_SYNC_4_1 = 3, + ADV7842_AIN9_4_5_6_SYNC_2_1 = 4, +}; + +/* + * Bus rotation and reordering. This is used to specify component reordering on + * the board and describes the components order on the bus when the ADV7842 + * outputs RGB. + */ +enum adv7842_bus_order { + ADV7842_BUS_ORDER_RGB, /* No operation */ + ADV7842_BUS_ORDER_GRB, /* Swap 1-2 */ + ADV7842_BUS_ORDER_RBG, /* Swap 2-3 */ + ADV7842_BUS_ORDER_BGR, /* Swap 1-3 */ + ADV7842_BUS_ORDER_BRG, /* Rotate right */ + ADV7842_BUS_ORDER_GBR, /* Rotate left */ +}; + +/* Input Color Space (IO register 0x02, [7:4]) */ +enum adv7842_inp_color_space { + ADV7842_INP_COLOR_SPACE_LIM_RGB = 0, + ADV7842_INP_COLOR_SPACE_FULL_RGB = 1, + ADV7842_INP_COLOR_SPACE_LIM_YCbCr_601 = 2, + ADV7842_INP_COLOR_SPACE_LIM_YCbCr_709 = 3, + ADV7842_INP_COLOR_SPACE_XVYCC_601 = 4, + ADV7842_INP_COLOR_SPACE_XVYCC_709 = 5, + ADV7842_INP_COLOR_SPACE_FULL_YCbCr_601 = 6, + ADV7842_INP_COLOR_SPACE_FULL_YCbCr_709 = 7, + ADV7842_INP_COLOR_SPACE_AUTO = 0xf, +}; + +/* Select output format (IO register 0x03, [4:2]) */ +enum adv7842_op_format_mode_sel { + ADV7842_OP_FORMAT_MODE0 = 0x00, + ADV7842_OP_FORMAT_MODE1 = 0x04, + ADV7842_OP_FORMAT_MODE2 = 0x08, +}; + +/* Mode of operation */ +enum adv7842_mode { + ADV7842_MODE_SDP, + ADV7842_MODE_COMP, + ADV7842_MODE_RGB, + ADV7842_MODE_HDMI +}; + +/* Video standard select (IO register 0x00, [5:0]) */ +enum adv7842_vid_std_select { + /* SDP */ + ADV7842_SDP_VID_STD_CVBS_SD_4x1 = 0x01, + ADV7842_SDP_VID_STD_YC_SD4_x1 = 0x09, + /* RGB */ + ADV7842_RGB_VID_STD_AUTO_GRAPH_MODE = 0x07, + /* HDMI GR */ + ADV7842_HDMI_GR_VID_STD_AUTO_GRAPH_MODE = 0x02, + /* HDMI COMP */ + ADV7842_HDMI_COMP_VID_STD_HD_1250P = 0x1e, +}; + +enum adv7842_select_input { + ADV7842_SELECT_HDMI_PORT_A, + ADV7842_SELECT_HDMI_PORT_B, + ADV7842_SELECT_VGA_RGB, + ADV7842_SELECT_VGA_COMP, + ADV7842_SELECT_SDP_CVBS, + ADV7842_SELECT_SDP_YC, +}; + +enum adv7842_drive_strength { + ADV7842_DR_STR_LOW = 0, + ADV7842_DR_STR_MEDIUM_LOW = 1, + ADV7842_DR_STR_MEDIUM_HIGH = 2, + ADV7842_DR_STR_HIGH = 3, +}; + +struct adv7842_sdp_csc_coeff { + bool manual; + u16 scaling; + u16 A1; + u16 A2; + u16 A3; + u16 A4; + u16 B1; + u16 B2; + u16 B3; + u16 B4; + u16 C1; + u16 C2; + u16 C3; + u16 C4; +}; + +struct adv7842_sdp_io_sync_adjustment { + bool adjust; + u16 hs_beg; + u16 hs_width; + u16 de_beg; + u16 de_end; + u8 vs_beg_o; + u8 vs_beg_e; + u8 vs_end_o; + u8 vs_end_e; + u8 de_v_beg_o; + u8 de_v_beg_e; + u8 de_v_end_o; + u8 de_v_end_e; +}; + +/* Platform dependent definition */ +struct adv7842_platform_data { + /* chip reset during probe */ + unsigned chip_reset:1; + + /* DIS_PWRDNB: 1 if the PWRDNB pin is unused and unconnected */ + unsigned disable_pwrdnb:1; + + /* DIS_CABLE_DET_RST: 1 if the 5V pins are unused and unconnected */ + unsigned disable_cable_det_rst:1; + + /* Analog input muxing mode */ + enum adv7842_ain_sel ain_sel; + + /* Bus rotation and reordering */ + enum adv7842_bus_order bus_order; + + /* Select output format mode */ + enum adv7842_op_format_mode_sel op_format_mode_sel; + + /* Default mode */ + enum adv7842_mode mode; + + /* Default input */ + unsigned input; + + /* Video standard */ + enum adv7842_vid_std_select vid_std_select; + + /* IO register 0x02 */ + unsigned alt_gamma:1; + + /* IO register 0x05 */ + unsigned blank_data:1; + unsigned insert_av_codes:1; + unsigned replicate_av_codes:1; + + /* IO register 0x30 */ + unsigned output_bus_lsb_to_msb:1; + + /* IO register 0x14 */ + enum adv7842_drive_strength dr_str_data; + enum adv7842_drive_strength dr_str_clk; + enum adv7842_drive_strength dr_str_sync; + + /* + * IO register 0x19: Adjustment to the LLC DLL phase in + * increments of 1/32 of a clock period. + */ + unsigned llc_dll_phase:5; + + /* External RAM for 3-D comb or frame synchronizer */ + unsigned sd_ram_size; /* ram size in MB */ + unsigned sd_ram_ddr:1; /* ddr or sdr sdram */ + + /* HDMI free run, CP-reg 0xBA */ + unsigned hdmi_free_run_enable:1; + /* 0 = Mode 0: run when there is no TMDS clock + 1 = Mode 1: run when there is no TMDS clock or the + video resolution does not match programmed one. */ + unsigned hdmi_free_run_mode:1; + + /* SDP free run, CP-reg 0xDD */ + unsigned sdp_free_run_auto:1; + unsigned sdp_free_run_man_col_en:1; + unsigned sdp_free_run_cbar_en:1; + unsigned sdp_free_run_force:1; + + /* HPA manual (0) or auto (1), affects HDMI register 0x69 */ + unsigned hpa_auto:1; + + struct adv7842_sdp_csc_coeff sdp_csc_coeff; + + struct adv7842_sdp_io_sync_adjustment sdp_io_sync_625; + struct adv7842_sdp_io_sync_adjustment sdp_io_sync_525; + + /* i2c addresses */ + u8 i2c_sdp_io; + u8 i2c_sdp; + u8 i2c_cp; + u8 i2c_vdp; + u8 i2c_afe; + u8 i2c_hdmi; + u8 i2c_repeater; + u8 i2c_edid; + u8 i2c_infoframe; + u8 i2c_cec; + u8 i2c_avlink; +}; + +#define V4L2_CID_ADV_RX_ANALOG_SAMPLING_PHASE (V4L2_CID_DV_CLASS_BASE + 0x1000) +#define V4L2_CID_ADV_RX_FREE_RUN_COLOR_MANUAL (V4L2_CID_DV_CLASS_BASE + 0x1001) +#define V4L2_CID_ADV_RX_FREE_RUN_COLOR (V4L2_CID_DV_CLASS_BASE + 0x1002) + +/* custom ioctl, used to test the external RAM that's used by the + * deinterlacer. */ +#define ADV7842_CMD_RAM_TEST _IO('V', BASE_VIDIOC_PRIVATE) + +#define ADV7842_EDID_PORT_A 0 +#define ADV7842_EDID_PORT_B 1 +#define ADV7842_EDID_PORT_VGA 2 +#define ADV7842_PAD_SOURCE 3 + +#endif diff --git a/6.18.0/include-overrides/media/i2c/ak881x.h b/6.18.0/include-overrides/media/i2c/ak881x.h new file mode 100644 index 0000000..ff05971 --- /dev/null +++ b/6.18.0/include-overrides/media/i2c/ak881x.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Header for AK8813 / AK8814 TV-ecoders from Asahi Kasei Microsystems Co., Ltd. (AKM) + * + * Copyright (C) 2010, Guennadi Liakhovetski + */ + +#ifndef AK881X_H +#define AK881X_H + +#define AK881X_IF_MODE_MASK (3 << 0) +#define AK881X_IF_MODE_BT656 (0 << 0) +#define AK881X_IF_MODE_MASTER (1 << 0) +#define AK881X_IF_MODE_SLAVE (2 << 0) +#define AK881X_FIELD (1 << 2) +#define AK881X_COMPONENT (1 << 3) + +struct ak881x_pdata { + unsigned long flags; +}; + +#endif diff --git a/6.18.0/include-overrides/media/i2c/bt819.h b/6.18.0/include-overrides/media/i2c/bt819.h new file mode 100644 index 0000000..2277a7e --- /dev/null +++ b/6.18.0/include-overrides/media/i2c/bt819.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + bt819.h - bt819 notifications + + Copyright (C) 2009 Hans Verkuil (hverkuil@kernel.org) + +*/ + +#ifndef _BT819_H_ +#define _BT819_H_ + +#include + +/* v4l2_device notifications. */ + +/* Needed to reset the FIFO buffer when changing the input + or the video standard. + + Note: these ioctls that internal to the kernel and are never called + from userspace. */ +#define BT819_FIFO_RESET_LOW _IO('b', 0) +#define BT819_FIFO_RESET_HIGH _IO('b', 1) + +#endif diff --git a/6.18.0/include-overrides/media/i2c/cs5345.h b/6.18.0/include-overrides/media/i2c/cs5345.h new file mode 100644 index 0000000..39e1cf6 --- /dev/null +++ b/6.18.0/include-overrides/media/i2c/cs5345.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + cs5345.h - definition for cs5345 inputs and outputs + + Copyright (C) 2007 Hans Verkuil (hverkuil@kernel.org) + +*/ + +#ifndef _CS5345_H_ +#define _CS5345_H_ + +/* CS5345 HW inputs */ +#define CS5345_IN_MIC 0 +#define CS5345_IN_1 1 +#define CS5345_IN_2 2 +#define CS5345_IN_3 3 +#define CS5345_IN_4 4 +#define CS5345_IN_5 5 +#define CS5345_IN_6 6 + +#define CS5345_MCLK_1 0x00 +#define CS5345_MCLK_1_5 0x10 +#define CS5345_MCLK_2 0x20 +#define CS5345_MCLK_3 0x30 +#define CS5345_MCLK_4 0x40 + +#endif diff --git a/6.18.0/include-overrides/media/i2c/cs53l32a.h b/6.18.0/include-overrides/media/i2c/cs53l32a.h new file mode 100644 index 0000000..777f667 --- /dev/null +++ b/6.18.0/include-overrides/media/i2c/cs53l32a.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + cs53l32a.h - definition for cs53l32a inputs and outputs + + Copyright (C) 2006 Hans Verkuil (hverkuil@kernel.org) + +*/ + +#ifndef _CS53L32A_H_ +#define _CS53L32A_H_ + +/* There are 2 physical inputs, but the second input can be + placed in two modes, the first mode bypasses the PGA (gain), + the second goes through the PGA. Hence there are three + possible inputs to choose from. */ + +/* CS53L32A HW inputs */ +#define CS53L32A_IN0 0 +#define CS53L32A_IN1 1 +#define CS53L32A_IN2 2 + +#endif diff --git a/6.18.0/include-overrides/media/i2c/ds90ub9xx.h b/6.18.0/include-overrides/media/i2c/ds90ub9xx.h new file mode 100644 index 0000000..0245198 --- /dev/null +++ b/6.18.0/include-overrides/media/i2c/ds90ub9xx.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __MEDIA_I2C_DS90UB9XX_H__ +#define __MEDIA_I2C_DS90UB9XX_H__ + +#include + +struct i2c_atr; + +/** + * struct ds90ub9xx_platform_data - platform data for FPD-Link Serializers. + * @port: Deserializer RX port for this Serializer + * @atr: I2C ATR + * @bc_rate: back-channel clock rate + */ +struct ds90ub9xx_platform_data { + u32 port; + struct i2c_atr *atr; + unsigned long bc_rate; +}; + +#endif /* __MEDIA_I2C_DS90UB9XX_H__ */ diff --git a/6.18.0/include-overrides/media/i2c/ir-kbd-i2c.h b/6.18.0/include-overrides/media/i2c/ir-kbd-i2c.h new file mode 100644 index 0000000..0b58f8b --- /dev/null +++ b/6.18.0/include-overrides/media/i2c/ir-kbd-i2c.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _IR_I2C +#define _IR_I2C + +#include + +#define DEFAULT_POLLING_INTERVAL 100 /* ms */ + +struct IR_i2c; + +struct IR_i2c { + char *ir_codes; + struct i2c_client *c; + struct rc_dev *rc; + + /* Used to avoid fast repeating */ + unsigned char old; + + u32 polling_interval; /* in ms */ + + struct delayed_work work; + char phys[32]; + int (*get_key)(struct IR_i2c *ir, + enum rc_proto *protocol, + u32 *scancode, u8 *toggle); + /* tx */ + struct i2c_client *tx_c; + struct mutex lock; /* do not poll Rx during Tx */ + unsigned int carrier; + unsigned int duty_cycle; +}; + +enum ir_kbd_get_key_fn { + IR_KBD_GET_KEY_CUSTOM = 0, + IR_KBD_GET_KEY_PIXELVIEW, + IR_KBD_GET_KEY_HAUP, + IR_KBD_GET_KEY_KNC1, + IR_KBD_GET_KEY_GENIATECH, + IR_KBD_GET_KEY_FUSIONHDTV, + IR_KBD_GET_KEY_HAUP_XVR, + IR_KBD_GET_KEY_AVERMEDIA_CARDBUS, +}; + +/* Can be passed when instantiating an ir_video i2c device */ +struct IR_i2c_init_data { + char *ir_codes; + const char *name; + u64 type; /* RC_PROTO_BIT_RC5, etc */ + u32 polling_interval; /* 0 means DEFAULT_POLLING_INTERVAL */ + + /* + * Specify either a function pointer or a value indicating one of + * ir_kbd_i2c's internal get_key functions + */ + int (*get_key)(struct IR_i2c *ir, + enum rc_proto *protocol, + u32 *scancode, u8 *toggle); + enum ir_kbd_get_key_fn internal_get_key_func; + + struct rc_dev *rc_dev; +}; +#endif diff --git a/6.18.0/include-overrides/media/i2c/lm3560.h b/6.18.0/include-overrides/media/i2c/lm3560.h new file mode 100644 index 0000000..770d8c7 --- /dev/null +++ b/6.18.0/include-overrides/media/i2c/lm3560.h @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * include/media/i2c/lm3560.h + * + * Copyright (C) 2013 Texas Instruments + * + * Contact: Daniel Jeong + * Ldd-Mlp + */ + +#ifndef __LM3560_H__ +#define __LM3560_H__ + +#include + +#define LM3559_NAME "lm3559" +#define LM3560_NAME "lm3560" +#define LM3560_I2C_ADDR (0x53) + +/* FLASH Brightness + * min 62500uA, step 62500uA, max 1000000uA + */ +#define LM3560_FLASH_BRT_MIN 62500 +#define LM3560_FLASH_BRT_STEP 62500 +#define LM3560_FLASH_BRT_MAX 1000000 +#define LM3560_FLASH_BRT_uA_TO_REG(a) \ + ((a) < LM3560_FLASH_BRT_MIN ? 0 : \ + (((a) - LM3560_FLASH_BRT_MIN) / LM3560_FLASH_BRT_STEP)) +#define LM3560_FLASH_BRT_REG_TO_uA(a) \ + ((a) * LM3560_FLASH_BRT_STEP + LM3560_FLASH_BRT_MIN) + +/* FLASH TIMEOUT DURATION + * min 32ms, step 32ms, max 1024ms + */ +#define LM3560_FLASH_TOUT_MIN 32 +#define LM3560_FLASH_TOUT_STEP 32 +#define LM3560_FLASH_TOUT_MAX 1024 +#define LM3560_FLASH_TOUT_ms_TO_REG(a) \ + ((a) < LM3560_FLASH_TOUT_MIN ? 0 : \ + (((a) - LM3560_FLASH_TOUT_MIN) / LM3560_FLASH_TOUT_STEP)) +#define LM3560_FLASH_TOUT_REG_TO_ms(a) \ + ((a) * LM3560_FLASH_TOUT_STEP + LM3560_FLASH_TOUT_MIN) + +/* TORCH BRT + * min 31250uA, step 31250uA, max 250000uA + */ +#define LM3560_TORCH_BRT_MIN 31250 +#define LM3560_TORCH_BRT_STEP 31250 +#define LM3560_TORCH_BRT_MAX 250000 +#define LM3560_TORCH_BRT_uA_TO_REG(a) \ + ((a) < LM3560_TORCH_BRT_MIN ? 0 : \ + (((a) - LM3560_TORCH_BRT_MIN) / LM3560_TORCH_BRT_STEP)) +#define LM3560_TORCH_BRT_REG_TO_uA(a) \ + ((a) * LM3560_TORCH_BRT_STEP + LM3560_TORCH_BRT_MIN) + +enum lm3560_led_id { + LM3560_LED0 = 0, + LM3560_LED1, + LM3560_LED_MAX +}; + +enum lm3560_peak_current { + LM3560_PEAK_1600mA = 0x00, + LM3560_PEAK_2300mA = 0x20, + LM3560_PEAK_3000mA = 0x40, + LM3560_PEAK_3600mA = 0x60 +}; + +/* struct lm3560_platform_data + * + * @peak : peak current + * @max_flash_timeout: flash timeout + * @max_flash_brt: flash mode led brightness + * @max_torch_brt: torch mode led brightness + */ +struct lm3560_platform_data { + enum lm3560_peak_current peak; + + u32 max_flash_timeout; + u32 max_flash_brt[LM3560_LED_MAX]; + u32 max_torch_brt[LM3560_LED_MAX]; +}; + +#endif /* __LM3560_H__ */ diff --git a/6.18.0/include-overrides/media/i2c/lm3646.h b/6.18.0/include-overrides/media/i2c/lm3646.h new file mode 100644 index 0000000..845f07b --- /dev/null +++ b/6.18.0/include-overrides/media/i2c/lm3646.h @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * include/media/i2c/lm3646.h + * + * Copyright (C) 2014 Texas Instruments + * + * Contact: Daniel Jeong + * Ldd-Mlp + */ + +#ifndef __LM3646_H__ +#define __LM3646_H__ + +#include + +#define LM3646_NAME "lm3646" +#define LM3646_I2C_ADDR_REV1 (0x67) +#define LM3646_I2C_ADDR_REV0 (0x63) + +/* TOTAL FLASH Brightness Max + * min 93350uA, step 93750uA, max 1499600uA + */ +#define LM3646_TOTAL_FLASH_BRT_MIN 93350 +#define LM3646_TOTAL_FLASH_BRT_STEP 93750 +#define LM3646_TOTAL_FLASH_BRT_MAX 1499600 +#define LM3646_TOTAL_FLASH_BRT_uA_TO_REG(a) \ + ((a) < LM3646_TOTAL_FLASH_BRT_MIN ? 0 : \ + ((((a) - LM3646_TOTAL_FLASH_BRT_MIN) / LM3646_TOTAL_FLASH_BRT_STEP))) + +/* TOTAL TORCH Brightness Max + * min 23040uA, step 23430uA, max 187100uA + */ +#define LM3646_TOTAL_TORCH_BRT_MIN 23040 +#define LM3646_TOTAL_TORCH_BRT_STEP 23430 +#define LM3646_TOTAL_TORCH_BRT_MAX 187100 +#define LM3646_TOTAL_TORCH_BRT_uA_TO_REG(a) \ + ((a) < LM3646_TOTAL_TORCH_BRT_MIN ? 0 : \ + ((((a) - LM3646_TOTAL_TORCH_BRT_MIN) / LM3646_TOTAL_TORCH_BRT_STEP))) + +/* LED1 FLASH Brightness + * min 23040uA, step 11718uA, max 1499600uA + */ +#define LM3646_LED1_FLASH_BRT_MIN 23040 +#define LM3646_LED1_FLASH_BRT_STEP 11718 +#define LM3646_LED1_FLASH_BRT_MAX 1499600 +#define LM3646_LED1_FLASH_BRT_uA_TO_REG(a) \ + ((a) <= LM3646_LED1_FLASH_BRT_MIN ? 0 : \ + ((((a) - LM3646_LED1_FLASH_BRT_MIN) / LM3646_LED1_FLASH_BRT_STEP))+1) + +/* LED1 TORCH Brightness + * min 2530uA, step 1460uA, max 187100uA + */ +#define LM3646_LED1_TORCH_BRT_MIN 2530 +#define LM3646_LED1_TORCH_BRT_STEP 1460 +#define LM3646_LED1_TORCH_BRT_MAX 187100 +#define LM3646_LED1_TORCH_BRT_uA_TO_REG(a) \ + ((a) <= LM3646_LED1_TORCH_BRT_MIN ? 0 : \ + ((((a) - LM3646_LED1_TORCH_BRT_MIN) / LM3646_LED1_TORCH_BRT_STEP))+1) + +/* FLASH TIMEOUT DURATION + * min 50ms, step 50ms, max 400ms + */ +#define LM3646_FLASH_TOUT_MIN 50 +#define LM3646_FLASH_TOUT_STEP 50 +#define LM3646_FLASH_TOUT_MAX 400 +#define LM3646_FLASH_TOUT_ms_TO_REG(a) \ + ((a) <= LM3646_FLASH_TOUT_MIN ? 0 : \ + (((a) - LM3646_FLASH_TOUT_MIN) / LM3646_FLASH_TOUT_STEP)) + +/* struct lm3646_platform_data + * + * @flash_timeout: flash timeout + * @led1_flash_brt: led1 flash mode brightness, uA + * @led1_torch_brt: led1 torch mode brightness, uA + */ +struct lm3646_platform_data { + + u32 flash_timeout; + + u32 led1_flash_brt; + u32 led1_torch_brt; +}; + +#endif /* __LM3646_H__ */ diff --git a/6.18.0/include-overrides/media/i2c/m52790.h b/6.18.0/include-overrides/media/i2c/m52790.h new file mode 100644 index 0000000..cedaaf2 --- /dev/null +++ b/6.18.0/include-overrides/media/i2c/m52790.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + m52790.h - definition for m52790 inputs and outputs + + Copyright (C) 2007 Hans Verkuil (hverkuil@kernel.org) + +*/ + +#ifndef _M52790_H_ +#define _M52790_H_ + +/* Input routing switch 1 */ + +#define M52790_SW1_IN_MASK 0x0003 +#define M52790_SW1_IN_TUNER 0x0000 +#define M52790_SW1_IN_V2 0x0001 +#define M52790_SW1_IN_V3 0x0002 +#define M52790_SW1_IN_V4 0x0003 + +/* Selects component input instead of composite */ +#define M52790_SW1_YCMIX 0x0004 + + +/* Input routing switch 2 */ + +#define M52790_SW2_IN_MASK 0x0300 +#define M52790_SW2_IN_TUNER 0x0000 +#define M52790_SW2_IN_V2 0x0100 +#define M52790_SW2_IN_V3 0x0200 +#define M52790_SW2_IN_V4 0x0300 + +/* Selects component input instead of composite */ +#define M52790_SW2_YCMIX 0x0400 + + +/* Output routing switch 1 */ + +/* Enable 6dB amplifier for composite out */ +#define M52790_SW1_V_AMP 0x0008 + +/* Enable 6dB amplifier for component out */ +#define M52790_SW1_YC_AMP 0x0010 + +/* Audio output mode */ +#define M52790_SW1_AUDIO_MASK 0x00c0 +#define M52790_SW1_AUDIO_MUTE 0x0000 +#define M52790_SW1_AUDIO_R 0x0040 +#define M52790_SW1_AUDIO_L 0x0080 +#define M52790_SW1_AUDIO_STEREO 0x00c0 + + +/* Output routing switch 2 */ + +/* Enable 6dB amplifier for composite out */ +#define M52790_SW2_V_AMP 0x0800 + +/* Enable 6dB amplifier for component out */ +#define M52790_SW2_YC_AMP 0x1000 + +/* Audio output mode */ +#define M52790_SW2_AUDIO_MASK 0xc000 +#define M52790_SW2_AUDIO_MUTE 0x0000 +#define M52790_SW2_AUDIO_R 0x4000 +#define M52790_SW2_AUDIO_L 0x8000 +#define M52790_SW2_AUDIO_STEREO 0xc000 + + +/* Common values */ +#define M52790_IN_TUNER (M52790_SW1_IN_TUNER | M52790_SW2_IN_TUNER) +#define M52790_IN_V2 (M52790_SW1_IN_V2 | M52790_SW2_IN_V2) +#define M52790_IN_V3 (M52790_SW1_IN_V3 | M52790_SW2_IN_V3) +#define M52790_IN_V4 (M52790_SW1_IN_V4 | M52790_SW2_IN_V4) + +#define M52790_OUT_STEREO (M52790_SW1_AUDIO_STEREO | \ + M52790_SW2_AUDIO_STEREO) +#define M52790_OUT_AMP_STEREO (M52790_SW1_AUDIO_STEREO | \ + M52790_SW1_V_AMP | \ + M52790_SW2_AUDIO_STEREO | \ + M52790_SW2_V_AMP) + +#endif diff --git a/6.18.0/include-overrides/media/i2c/mt9t112.h b/6.18.0/include-overrides/media/i2c/mt9t112.h new file mode 100644 index 0000000..825b4a1 --- /dev/null +++ b/6.18.0/include-overrides/media/i2c/mt9t112.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* mt9t112 Camera + * + * Copyright (C) 2009 Renesas Solutions Corp. + * Kuninori Morimoto + */ + +#ifndef __MT9T112_H__ +#define __MT9T112_H__ + +struct mt9t112_pll_divider { + u8 m, n; + u8 p1, p2, p3, p4, p5, p6, p7; +}; + +/** + * struct mt9t112_platform_data - mt9t112 driver interface + * @flags: Sensor media bus configuration. + * @divider: Sensor PLL configuration + */ +struct mt9t112_platform_data { +#define MT9T112_FLAG_PCLK_RISING_EDGE BIT(0) + u32 flags; + struct mt9t112_pll_divider divider; +}; + +#endif /* __MT9T112_H__ */ diff --git a/6.18.0/include-overrides/media/i2c/mt9v011.h b/6.18.0/include-overrides/media/i2c/mt9v011.h new file mode 100644 index 0000000..5528397 --- /dev/null +++ b/6.18.0/include-overrides/media/i2c/mt9v011.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* mt9v011 sensor + * + * Copyright (C) 2011 Hans Verkuil + */ + +#ifndef __MT9V011_H__ +#define __MT9V011_H__ + +struct mt9v011_platform_data { + unsigned xtal; /* Hz */ +}; + +#endif diff --git a/6.18.0/include-overrides/media/i2c/ov2659.h b/6.18.0/include-overrides/media/i2c/ov2659.h new file mode 100644 index 0000000..c9ea318 --- /dev/null +++ b/6.18.0/include-overrides/media/i2c/ov2659.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Omnivision OV2659 CMOS Image Sensor driver + * + * Copyright (C) 2015 Texas Instruments, Inc. + * + * Benoit Parrot + * Lad, Prabhakar + */ + +#ifndef OV2659_H +#define OV2659_H + +/** + * struct ov2659_platform_data - ov2659 driver platform data + * @link_frequency: target pixel clock frequency + */ +struct ov2659_platform_data { + s64 link_frequency; +}; + +#endif /* OV2659_H */ diff --git a/6.18.0/include-overrides/media/i2c/ov7670.h b/6.18.0/include-overrides/media/i2c/ov7670.h new file mode 100644 index 0000000..8686fc2 --- /dev/null +++ b/6.18.0/include-overrides/media/i2c/ov7670.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * A V4L2 driver for OmniVision OV7670 cameras. + * + * Copyright 2010 One Laptop Per Child + */ + +#ifndef __OV7670_H +#define __OV7670_H + +struct ov7670_config { + int min_width; /* Filter out smaller sizes */ + int min_height; /* Filter out smaller sizes */ + int clock_speed; /* External clock speed (MHz) */ + bool use_smbus; /* Use smbus I/O instead of I2C */ + bool pll_bypass; /* Choose whether to bypass the PLL */ + bool pclk_hb_disable; /* Disable toggling pixclk during horizontal blanking */ +}; + +#endif diff --git a/6.18.0/include-overrides/media/i2c/ov772x.h b/6.18.0/include-overrides/media/i2c/ov772x.h new file mode 100644 index 0000000..26f363e --- /dev/null +++ b/6.18.0/include-overrides/media/i2c/ov772x.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * ov772x Camera + * + * Copyright (C) 2008 Renesas Solutions Corp. + * Kuninori Morimoto + */ + +#ifndef __OV772X_H__ +#define __OV772X_H__ + +/* for flags */ +#define OV772X_FLAG_VFLIP (1 << 0) /* Vertical flip image */ +#define OV772X_FLAG_HFLIP (1 << 1) /* Horizontal flip image */ + +/* + * for Edge ctrl + * + * strength also control Auto or Manual Edge Control Mode + * see also OV772X_MANUAL_EDGE_CTRL + */ +struct ov772x_edge_ctrl { + unsigned char strength; + unsigned char threshold; + unsigned char upper; + unsigned char lower; +}; + +#define OV772X_MANUAL_EDGE_CTRL 0x80 /* un-used bit of strength */ +#define OV772X_EDGE_STRENGTH_MASK 0x1F +#define OV772X_EDGE_THRESHOLD_MASK 0x0F +#define OV772X_EDGE_UPPER_MASK 0xFF +#define OV772X_EDGE_LOWER_MASK 0xFF + +#define OV772X_AUTO_EDGECTRL(u, l) \ +{ \ + .upper = (u & OV772X_EDGE_UPPER_MASK), \ + .lower = (l & OV772X_EDGE_LOWER_MASK), \ +} + +#define OV772X_MANUAL_EDGECTRL(s, t) \ +{ \ + .strength = (s & OV772X_EDGE_STRENGTH_MASK) | \ + OV772X_MANUAL_EDGE_CTRL, \ + .threshold = (t & OV772X_EDGE_THRESHOLD_MASK), \ +} + +/** + * struct ov772x_camera_info - ov772x driver interface structure + * @flags: Sensor configuration flags + * @edgectrl: Sensor edge control + */ +struct ov772x_camera_info { + unsigned long flags; + struct ov772x_edge_ctrl edgectrl; +}; + +#endif /* __OV772X_H__ */ diff --git a/6.18.0/include-overrides/media/i2c/rj54n1cb0c.h b/6.18.0/include-overrides/media/i2c/rj54n1cb0c.h new file mode 100644 index 0000000..5689c09 --- /dev/null +++ b/6.18.0/include-overrides/media/i2c/rj54n1cb0c.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * RJ54N1CB0C Private data + * + * Copyright (C) 2009, Guennadi Liakhovetski + */ + +#ifndef __RJ54N1CB0C_H__ +#define __RJ54N1CB0C_H__ + +struct rj54n1_pdata { + unsigned int mclk_freq; + bool ioctl_high; +}; + +#endif diff --git a/6.18.0/include-overrides/media/i2c/saa6588.h b/6.18.0/include-overrides/media/i2c/saa6588.h new file mode 100644 index 0000000..bbec05a --- /dev/null +++ b/6.18.0/include-overrides/media/i2c/saa6588.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + + Types and defines needed for RDS. This is included by + saa6588.c and every driver (e.g. bttv-driver.c) that wants + to use the saa6588 module. + + (c) 2005 by Hans J. Koch + + +*/ + +#ifndef _SAA6588_H +#define _SAA6588_H + +struct saa6588_command { + unsigned int block_count; + bool nonblocking; + int result; + unsigned char __user *buffer; + struct file *instance; + poll_table *event_list; + __poll_t poll_mask; +}; + +/* These ioctls are internal to the kernel */ +#define SAA6588_CMD_CLOSE _IOW('R', 2, int) +#define SAA6588_CMD_READ _IOR('R', 3, int) +#define SAA6588_CMD_POLL _IOR('R', 4, int) + +#endif diff --git a/6.18.0/include-overrides/media/i2c/saa7115.h b/6.18.0/include-overrides/media/i2c/saa7115.h new file mode 100644 index 0000000..a607c91 --- /dev/null +++ b/6.18.0/include-overrides/media/i2c/saa7115.h @@ -0,0 +1,128 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + saa7115.h - definition for saa7111/3/4/5 inputs and frequency flags + + Copyright (C) 2006 Hans Verkuil (hverkuil@kernel.org) + +*/ + +#ifndef _SAA7115_H_ +#define _SAA7115_H_ + +/* s_routing inputs, outputs, and config */ + +/* SAA7111/3/4/5 HW inputs */ +#define SAA7115_COMPOSITE0 0 +#define SAA7115_COMPOSITE1 1 +#define SAA7115_COMPOSITE2 2 +#define SAA7115_COMPOSITE3 3 +#define SAA7115_COMPOSITE4 4 /* not available for the saa7111/3 */ +#define SAA7115_COMPOSITE5 5 /* not available for the saa7111/3 */ +#define SAA7115_SVIDEO0 6 +#define SAA7115_SVIDEO1 7 +#define SAA7115_SVIDEO2 8 +#define SAA7115_SVIDEO3 9 + +/* outputs */ +#define SAA7115_IPORT_ON 1 +#define SAA7115_IPORT_OFF 0 + +/* SAA7111 specific outputs. */ +#define SAA7111_VBI_BYPASS 2 +#define SAA7111_FMT_YUV422 0x00 +#define SAA7111_FMT_RGB 0x40 +#define SAA7111_FMT_CCIR 0x80 +#define SAA7111_FMT_YUV411 0xc0 + +/* config flags */ +/* + * Register 0x85 should set bit 0 to 0 (it's 1 by default). This bit + * controls the IDQ signal polarity which is set to 'inverted' if the bit + * it 1 and to 'default' if it is 0. + */ +#define SAA7115_IDQ_IS_DEFAULT (1 << 0) + +/* s_crystal_freq values and flags */ + +/* SAA7115 v4l2_crystal_freq frequency values */ +#define SAA7115_FREQ_32_11_MHZ 32110000 /* 32.11 MHz crystal, SAA7114/5 only */ +#define SAA7115_FREQ_24_576_MHZ 24576000 /* 24.576 MHz crystal */ + +/* SAA7115 v4l2_crystal_freq audio clock control flags */ +#define SAA7115_FREQ_FL_UCGC (1 << 0) /* SA 3A[7], UCGC, SAA7115 only */ +#define SAA7115_FREQ_FL_CGCDIV (1 << 1) /* SA 3A[6], CGCDIV, SAA7115 only */ +#define SAA7115_FREQ_FL_APLL (1 << 2) /* SA 3A[3], APLL, SAA7114/5 only */ +#define SAA7115_FREQ_FL_DOUBLE_ASCLK (1 << 3) /* SA 39, LRDIV, SAA7114/5 only */ + +/* ===== SAA7113 Config enums ===== */ + +/* Register 0x08 "Horizontal time constant" [Bit 3..4]: + * Should be set to "Fast Locking Mode" according to the datasheet, + * and that is the default setting in the gm7113c_init table. + * saa7113_init sets this value to "VTR Mode". */ +enum saa7113_r08_htc { + SAA7113_HTC_TV_MODE = 0x00, + SAA7113_HTC_VTR_MODE, /* Default for saa7113_init */ + SAA7113_HTC_FAST_LOCKING_MODE = 0x03 /* Default for gm7113c_init */ +}; + +/* Register 0x10 "Output format selection" [Bit 6..7]: + * Defaults to ITU_656 as specified in datasheet. */ +enum saa7113_r10_ofts { + SAA7113_OFTS_ITU_656 = 0x0, /* Default */ + SAA7113_OFTS_VFLAG_BY_VREF, + SAA7113_OFTS_VFLAG_BY_DATA_TYPE +}; + +/* + * Register 0x12 "Output control" [Bit 0..3 Or Bit 4..7]: + * This is used to select what data is output on the RTS0 and RTS1 pins. + * RTS1 [Bit 4..7] Defaults to DOT_IN. (This value can not be set for RTS0) + * RTS0 [Bit 0..3] Defaults to VIPB in gm7113c_init as specified + * in the datasheet, but is set to HREF_HS in the saa7113_init table. + */ +enum saa7113_r12_rts { + SAA7113_RTS_DOT_IN = 0, /* OBS: Only for RTS1 (Default RTS1) */ + SAA7113_RTS_VIPB, /* Default RTS0 For gm7113c_init */ + SAA7113_RTS_GPSW, + SAA7115_RTS_HL, + SAA7113_RTS_VL, + SAA7113_RTS_DL, + SAA7113_RTS_PLIN, + SAA7113_RTS_HREF_HS, /* Default RTS0 For saa7113_init */ + SAA7113_RTS_HS, + SAA7113_RTS_HQ, + SAA7113_RTS_ODD, + SAA7113_RTS_VS, + SAA7113_RTS_V123, + SAA7113_RTS_VGATE, + SAA7113_RTS_VREF, + SAA7113_RTS_FID +}; + +/** + * struct saa7115_platform_data - Allow overriding default initialization + * + * @saa7113_force_gm7113c_init: Force the use of the gm7113c_init table + * instead of saa7113_init table + * (saa7113 only) + * @saa7113_r08_htc: [R_08 - Bit 3..4] + * @saa7113_r10_vrln: [R_10 - Bit 3] + * default: Disabled for gm7113c_init + * Enabled for saa7113c_init + * @saa7113_r10_ofts: [R_10 - Bit 6..7] + * @saa7113_r12_rts0: [R_12 - Bit 0..3] + * @saa7113_r12_rts1: [R_12 - Bit 4..7] + * @saa7113_r13_adlsb: [R_13 - Bit 7] - default: disabled + */ +struct saa7115_platform_data { + bool saa7113_force_gm7113c_init; + enum saa7113_r08_htc *saa7113_r08_htc; + bool *saa7113_r10_vrln; + enum saa7113_r10_ofts *saa7113_r10_ofts; + enum saa7113_r12_rts *saa7113_r12_rts0; + enum saa7113_r12_rts *saa7113_r12_rts1; + bool *saa7113_r13_adlsb; +}; + +#endif diff --git a/6.18.0/include-overrides/media/i2c/saa7127.h b/6.18.0/include-overrides/media/i2c/saa7127.h new file mode 100644 index 0000000..c81ee17 --- /dev/null +++ b/6.18.0/include-overrides/media/i2c/saa7127.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + saa7127.h - definition for saa7126/7/8/9 inputs/outputs + + Copyright (C) 2006 Hans Verkuil (hverkuil@kernel.org) + +*/ + +#ifndef _SAA7127_H_ +#define _SAA7127_H_ + +/* Enumeration for the supported input types */ +enum saa7127_input_type { + SAA7127_INPUT_TYPE_NORMAL, + SAA7127_INPUT_TYPE_TEST_IMAGE +}; + +/* Enumeration for the supported output signal types */ +enum saa7127_output_type { + SAA7127_OUTPUT_TYPE_BOTH, + SAA7127_OUTPUT_TYPE_COMPOSITE, + SAA7127_OUTPUT_TYPE_SVIDEO, + SAA7127_OUTPUT_TYPE_RGB, + SAA7127_OUTPUT_TYPE_YUV_C, + SAA7127_OUTPUT_TYPE_YUV_V +}; + +#endif diff --git a/6.18.0/include-overrides/media/i2c/tc358743.h b/6.18.0/include-overrides/media/i2c/tc358743.h new file mode 100644 index 0000000..b343650 --- /dev/null +++ b/6.18.0/include-overrides/media/i2c/tc358743.h @@ -0,0 +1,117 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * tc358743 - Toshiba HDMI to CSI-2 bridge + * + * Copyright 2015 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + */ + +/* + * References (c = chapter, p = page): + * REF_01 - Toshiba, TC358743XBG (H2C), Functional Specification, Rev 0.60 + * REF_02 - Toshiba, TC358743XBG_HDMI-CSI_Tv11p_nm.xls + */ + +#ifndef _TC358743_ +#define _TC358743_ + +enum tc358743_ddc5v_delays { + DDC5V_DELAY_0_MS, + DDC5V_DELAY_50_MS, + DDC5V_DELAY_100_MS, + DDC5V_DELAY_200_MS, +}; + +enum tc358743_hdmi_detection_delay { + HDMI_MODE_DELAY_0_MS, + HDMI_MODE_DELAY_25_MS, + HDMI_MODE_DELAY_50_MS, + HDMI_MODE_DELAY_100_MS, +}; + +struct tc358743_platform_data { + /* System clock connected to REFCLK (pin H5) */ + u32 refclk_hz; /* 26 MHz, 27 MHz or 42 MHz */ + + /* DDC +5V debounce delay to avoid spurious interrupts when the cable + * is connected. + * Sets DDC5V_MODE in register DDC_CTL. + * Default: DDC5V_DELAY_0_MS + */ + enum tc358743_ddc5v_delays ddc5v_delay; + + bool enable_hdcp; + + /* + * The FIFO size is 512x32, so Toshiba recommend to set the default FIFO + * level to somewhere in the middle (e.g. 300), so it can cover speed + * mismatches in input and output ports. + */ + u16 fifo_level; + + /* Bps pr lane is (refclk_hz / pll_prd) * pll_fbd */ + u16 pll_prd; + u16 pll_fbd; + + /* CSI + * Calculate CSI parameters with REF_02 for the highest resolution your + * CSI interface can handle. The driver will adjust the number of CSI + * lanes in use according to the pixel clock. + * + * The values in brackets are calculated with REF_02 when the number of + * bps pr lane is 823.5 MHz, and can serve as a starting point. + */ + u32 lineinitcnt; /* (0x00001770) */ + u32 lptxtimecnt; /* (0x00000005) */ + u32 tclk_headercnt; /* (0x00001d04) */ + u32 tclk_trailcnt; /* (0x00000000) */ + u32 ths_headercnt; /* (0x00000505) */ + u32 twakeup; /* (0x00004650) */ + u32 tclk_postcnt; /* (0x00000000) */ + u32 ths_trailcnt; /* (0x00000004) */ + u32 hstxvregcnt; /* (0x00000005) */ + + /* DVI->HDMI detection delay to avoid unnecessary switching between DVI + * and HDMI mode. + * Sets HDMI_DET_V in register HDMI_DET. + * Default: HDMI_MODE_DELAY_0_MS + */ + enum tc358743_hdmi_detection_delay hdmi_detection_delay; + + /* Reset PHY automatically when TMDS clock goes from DC to AC. + * Sets PHY_AUTO_RST2 in register PHY_CTL2. + * Default: false + */ + bool hdmi_phy_auto_reset_tmds_detected; + + /* Reset PHY automatically when TMDS clock passes 21 MHz. + * Sets PHY_AUTO_RST3 in register PHY_CTL2. + * Default: false + */ + bool hdmi_phy_auto_reset_tmds_in_range; + + /* Reset PHY automatically when TMDS clock is detected. + * Sets PHY_AUTO_RST4 in register PHY_CTL2. + * Default: false + */ + bool hdmi_phy_auto_reset_tmds_valid; + + /* Reset HDMI PHY automatically when hsync period is out of range. + * Sets H_PI_RST in register HV_RST. + * Default: false + */ + bool hdmi_phy_auto_reset_hsync_out_of_range; + + /* Reset HDMI PHY automatically when vsync period is out of range. + * Sets V_PI_RST in register HV_RST. + * Default: false + */ + bool hdmi_phy_auto_reset_vsync_out_of_range; +}; + +/* custom controls */ +/* Audio sample rate in Hz */ +#define TC358743_CID_AUDIO_SAMPLING_RATE (V4L2_CID_USER_TC358743_BASE + 0) +/* Audio present status */ +#define TC358743_CID_AUDIO_PRESENT (V4L2_CID_USER_TC358743_BASE + 1) + +#endif diff --git a/6.18.0/include-overrides/media/i2c/tda1997x.h b/6.18.0/include-overrides/media/i2c/tda1997x.h new file mode 100644 index 0000000..c6c2a8a --- /dev/null +++ b/6.18.0/include-overrides/media/i2c/tda1997x.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * tda1997x - NXP HDMI receiver + * + * Copyright 2017 Tim Harvey + * + */ + +#ifndef _TDA1997X_ +#define _TDA1997X_ + +/* Platform Data */ +struct tda1997x_platform_data { + enum v4l2_mbus_type vidout_bus_type; + u32 vidout_bus_width; + u8 vidout_port_cfg[9]; + /* pin polarity (1=invert) */ + bool vidout_inv_de; + bool vidout_inv_hs; + bool vidout_inv_vs; + bool vidout_inv_pclk; + /* clock delays (0=-8, 1=-7 ... 15=+7 pixels) */ + u8 vidout_delay_hs; + u8 vidout_delay_vs; + u8 vidout_delay_de; + u8 vidout_delay_pclk; + /* sync selections (controls how sync pins are derived) */ + u8 vidout_sel_hs; + u8 vidout_sel_vs; + u8 vidout_sel_de; + + /* Audio Port Output */ + int audout_format; + u32 audout_mclk_fs; /* clock multiplier */ + u32 audout_width; /* 13 or 32 bit */ + u32 audout_layout; /* layout0=AP0 layout1=AP0,AP1,AP2,AP3 */ + bool audout_layoutauto; /* audio layout dictated by pkt header */ + bool audout_invert_clk; /* data valid on rising edge of BCLK */ + bool audio_auto_mute; /* enable hardware audio auto-mute */ +}; + +#endif diff --git a/6.18.0/include-overrides/media/i2c/ths7303.h b/6.18.0/include-overrides/media/i2c/ths7303.h new file mode 100644 index 0000000..7eda467 --- /dev/null +++ b/6.18.0/include-overrides/media/i2c/ths7303.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2013 Texas Instruments Inc + * + * Copyright 2013 Cisco Systems, Inc. and/or its affiliates. + * + * Contributors: + * Hans Verkuil + * Lad, Prabhakar + * Martin Bugge + */ + +#ifndef THS7303_H +#define THS7303_H + +/** + * struct ths7303_platform_data - Platform dependent data + * @ch_1: Bias value for channel one. + * @ch_2: Bias value for channel two. + * @ch_3: Bias value for channel three. + */ +struct ths7303_platform_data { + u8 ch_1; + u8 ch_2; + u8 ch_3; +}; + +#endif diff --git a/6.18.0/include-overrides/media/i2c/tvaudio.h b/6.18.0/include-overrides/media/i2c/tvaudio.h new file mode 100644 index 0000000..206f42e --- /dev/null +++ b/6.18.0/include-overrides/media/i2c/tvaudio.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + tvaudio.h - definition for tvaudio inputs + + Copyright (C) 2006 Hans Verkuil (hverkuil@kernel.org) + +*/ + +#ifndef _TVAUDIO_H +#define _TVAUDIO_H + +/* + * i2c bus addresses for the chips supported by tvaudio.c + */ + +#define I2C_ADDR_TDA8425 0x82 +#define I2C_ADDR_TDA9840 0x84 +#define I2C_ADDR_TDA9874 0xb0 /* also used by 9875 */ +#define I2C_ADDR_TDA9875 0xb0 +#define I2C_ADDR_TDA8425 0x82 +#define I2C_ADDR_TDA9840 0x84 /* also used by TA8874Z */ +#define I2C_ADDR_TDA985x_L 0xb4 /* also used by 9873 */ +#define I2C_ADDR_TDA985x_H 0xb6 +#define I2C_ADDR_TDA9874 0xb0 /* also used by 9875 */ +#define I2C_ADDR_TEA6300 0x80 /* also used by 6320 */ +#define I2C_ADDR_TEA6420 0x98 +#define I2C_ADDR_PIC16C54 0x96 /* PV951 */ + +/* The tvaudio module accepts the following inputs: */ +#define TVAUDIO_INPUT_TUNER 0 +#define TVAUDIO_INPUT_RADIO 1 +#define TVAUDIO_INPUT_EXTERN 2 +#define TVAUDIO_INPUT_INTERN 3 + +static inline const unsigned short *tvaudio_addrs(void) +{ + static const unsigned short addrs[] = { + I2C_ADDR_TDA8425 >> 1, + I2C_ADDR_TEA6300 >> 1, + I2C_ADDR_TEA6420 >> 1, + I2C_ADDR_TDA9840 >> 1, + I2C_ADDR_TDA985x_L >> 1, + I2C_ADDR_TDA985x_H >> 1, + I2C_ADDR_TDA9874 >> 1, + I2C_ADDR_PIC16C54 >> 1, + I2C_CLIENT_END + }; + + return addrs; +} + +#endif diff --git a/6.18.0/include-overrides/media/i2c/tvp514x.h b/6.18.0/include-overrides/media/i2c/tvp514x.h new file mode 100644 index 0000000..837efff --- /dev/null +++ b/6.18.0/include-overrides/media/i2c/tvp514x.h @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * drivers/media/video/tvp514x.h + * + * Copyright (C) 2008 Texas Instruments Inc + * Author: Vaibhav Hiremath + * + * Contributors: + * Sivaraj R + * Brijesh R Jadav + * Hardik Shah + * Manjunath Hadli + * Karicheri Muralidharan + */ + +#ifndef _TVP514X_H +#define _TVP514X_H + +/* + * Other macros + */ +#define TVP514X_MODULE_NAME "tvp514x" + +#define TVP514X_XCLK_BT656 (27000000) + +/* Number of pixels and number of lines per frame for different standards */ +#define NTSC_NUM_ACTIVE_PIXELS (720) +#define NTSC_NUM_ACTIVE_LINES (480) +#define PAL_NUM_ACTIVE_PIXELS (720) +#define PAL_NUM_ACTIVE_LINES (576) + +/* enum for different decoder input pin configuration */ +enum tvp514x_input { + /* + * CVBS input selection + */ + INPUT_CVBS_VI1A = 0x0, + INPUT_CVBS_VI1B, + INPUT_CVBS_VI1C, + INPUT_CVBS_VI2A = 0x04, + INPUT_CVBS_VI2B, + INPUT_CVBS_VI2C, + INPUT_CVBS_VI3A = 0x08, + INPUT_CVBS_VI3B, + INPUT_CVBS_VI3C, + INPUT_CVBS_VI4A = 0x0C, + /* + * S-Video input selection + */ + INPUT_SVIDEO_VI2A_VI1A = 0x44, + INPUT_SVIDEO_VI2B_VI1B, + INPUT_SVIDEO_VI2C_VI1C, + INPUT_SVIDEO_VI2A_VI3A = 0x54, + INPUT_SVIDEO_VI2B_VI3B, + INPUT_SVIDEO_VI2C_VI3C, + INPUT_SVIDEO_VI4A_VI1A = 0x4C, + INPUT_SVIDEO_VI4A_VI1B, + INPUT_SVIDEO_VI4A_VI1C, + INPUT_SVIDEO_VI4A_VI3A = 0x5C, + INPUT_SVIDEO_VI4A_VI3B, + INPUT_SVIDEO_VI4A_VI3C, + + /* Need to add entries for + * RGB, YPbPr and SCART. + */ + INPUT_INVALID +}; + +/* enum for output format supported. */ +enum tvp514x_output { + OUTPUT_10BIT_422_EMBEDDED_SYNC = 0, + OUTPUT_20BIT_422_SEPERATE_SYNC, + OUTPUT_10BIT_422_SEPERATE_SYNC = 3, + OUTPUT_INVALID +}; + +/** + * struct tvp514x_platform_data - Platform data values and access functions. + * @clk_polarity: Clock polarity of the current interface. + * @hs_polarity: HSYNC Polarity configuration for current interface. + * @vs_polarity: VSYNC Polarity configuration for current interface. + */ +struct tvp514x_platform_data { + /* Interface control params */ + bool clk_polarity; + bool hs_polarity; + bool vs_polarity; +}; + + +#endif /* ifndef _TVP514X_H */ diff --git a/6.18.0/include-overrides/media/i2c/tvp7002.h b/6.18.0/include-overrides/media/i2c/tvp7002.h new file mode 100644 index 0000000..c31eb00 --- /dev/null +++ b/6.18.0/include-overrides/media/i2c/tvp7002.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* Texas Instruments Triple 8-/10-BIT 165-/110-MSPS Video and Graphics + * Digitizer with Horizontal PLL registers + * + * Copyright (C) 2009 Texas Instruments Inc + * Author: Santiago Nunez-Corrales + * + * This code is partially based upon the TVP5150 driver + * written by Mauro Carvalho Chehab , + * the TVP514x driver written by Vaibhav Hiremath + * and the TVP7002 driver in the TI LSP 2.10.00.14 + */ +#ifndef _TVP7002_H_ +#define _TVP7002_H_ + +#define TVP7002_MODULE_NAME "tvp7002" + +/** + * struct tvp7002_config - Platform dependent data + *@clk_polarity: Clock polarity + * 0 - Data clocked out on rising edge of DATACLK signal + * 1 - Data clocked out on falling edge of DATACLK signal + *@hs_polarity: HSYNC polarity + * 0 - Active low HSYNC output, 1 - Active high HSYNC output + *@vs_polarity: VSYNC Polarity + * 0 - Active low VSYNC output, 1 - Active high VSYNC output + *@fid_polarity: Active-high Field ID polarity. + * 0 - The field ID output is set to logic 1 for an odd field + * (field 1) and set to logic 0 for an even field (field 0). + * 1 - Operation with polarity inverted. + *@sog_polarity: Active high Sync on Green output polarity. + * 0 - Normal operation, 1 - Operation with polarity inverted + */ +struct tvp7002_config { + bool clk_polarity; + bool hs_polarity; + bool vs_polarity; + bool fid_polarity; + bool sog_polarity; +}; +#endif diff --git a/6.18.0/include-overrides/media/i2c/tw9910.h b/6.18.0/include-overrides/media/i2c/tw9910.h new file mode 100644 index 0000000..77da94f --- /dev/null +++ b/6.18.0/include-overrides/media/i2c/tw9910.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * tw9910 Driver header + * + * Copyright (C) 2008 Renesas Solutions Corp. + * Kuninori Morimoto + * + * Based on ov772x.h + * + * Copyright (C) Kuninori Morimoto + */ + +#ifndef __TW9910_H__ +#define __TW9910_H__ + +/* MPOUT (multi-purpose output) pin functions */ +enum tw9910_mpout_pin { + TW9910_MPO_VLOSS, + TW9910_MPO_HLOCK, + TW9910_MPO_SLOCK, + TW9910_MPO_VLOCK, + TW9910_MPO_MONO, + TW9910_MPO_DET50, + TW9910_MPO_FIELD, + TW9910_MPO_RTCO, +}; + +/** + * struct tw9910_video_info - tw9910 driver interface structure + * @buswidth: Parallel data bus width (8 or 16). + * @mpout: Selected function of MPOUT (multi-purpose output) pin. + * See enum tw9910_mpout_pin + */ +struct tw9910_video_info { + unsigned long buswidth; + enum tw9910_mpout_pin mpout; +}; + + +#endif /* __TW9910_H__ */ diff --git a/6.18.0/include-overrides/media/i2c/uda1342.h b/6.18.0/include-overrides/media/i2c/uda1342.h new file mode 100644 index 0000000..cb412d4 --- /dev/null +++ b/6.18.0/include-overrides/media/i2c/uda1342.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * uda1342.h - definition for uda1342 inputs + * + * Copyright 2013 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + */ + +#ifndef _UDA1342_H_ +#define _UDA1342_H_ + +/* The UDA1342 has 2 inputs */ + +#define UDA1342_IN1 1 +#define UDA1342_IN2 2 + +#endif diff --git a/6.18.0/include-overrides/media/i2c/upd64031a.h b/6.18.0/include-overrides/media/i2c/upd64031a.h new file mode 100644 index 0000000..d39b2b7 --- /dev/null +++ b/6.18.0/include-overrides/media/i2c/upd64031a.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * upd64031a - NEC Electronics Ghost Reduction input defines + * + * 2006 by Hans Verkuil (hverkuil@kernel.org) + */ + +#ifndef _UPD64031A_H_ +#define _UPD64031A_H_ + +/* Ghost reduction modes */ +#define UPD64031A_GR_ON 0 +#define UPD64031A_GR_OFF 1 +#define UPD64031A_GR_THROUGH 3 + +/* Direct 3D/YCS Connection */ +#define UPD64031A_3DYCS_DISABLE (0 << 2) +#define UPD64031A_3DYCS_COMPOSITE (2 << 2) +#define UPD64031A_3DYCS_SVIDEO (3 << 2) + +/* Composite sync digital separation circuit */ +#define UPD64031A_COMPOSITE_EXTERNAL (1 << 4) + +/* Vertical sync digital separation circuit */ +#define UPD64031A_VERTICAL_EXTERNAL (1 << 5) + +#endif diff --git a/6.18.0/include-overrides/media/i2c/upd64083.h b/6.18.0/include-overrides/media/i2c/upd64083.h new file mode 100644 index 0000000..72cf547 --- /dev/null +++ b/6.18.0/include-overrides/media/i2c/upd64083.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * upd6408x - NEC Electronics 3-Dimensional Y/C separation input defines + * + * 2006 by Hans Verkuil (hverkuil@kernel.org) + */ + +#ifndef _UPD64083_H_ +#define _UPD64083_H_ + +/* There are two bits of information that the driver needs in order + to select the correct routing: the operating mode and the selection + of the Y input (external or internal). + + The first two operating modes expect a composite signal on the Y input, + the second two operating modes use both the Y and C inputs. + + Normally YCS_MODE is used for tuner and composite inputs, and the + YCNR mode is used for S-Video inputs. + + The external Y-ADC is selected when the composite input comes from a + upd64031a ghost reduction device. If this device is not present, or + the input is a S-Video signal, then the internal Y-ADC input should + be used. */ + +/* Operating modes: */ + +/* YCS mode: Y/C separation (burst locked clocking) */ +#define UPD64083_YCS_MODE 0 +/* YCS+ mode: 2D Y/C separation and YCNR (burst locked clocking) */ +#define UPD64083_YCS_PLUS_MODE 1 + +/* Note: the following two modes cannot be used in combination with the + external Y-ADC. */ +/* MNNR mode: frame comb type YNR+C delay (line locked clocking) */ +#define UPD64083_MNNR_MODE 2 +/* YCNR mode: frame recursive YCNR (burst locked clocking) */ +#define UPD64083_YCNR_MODE 3 + +/* Select external Y-ADC: this should be set if this device is used in + combination with the upd64031a ghost reduction device. + Otherwise leave at 0 (use internal Y-ADC). */ +#define UPD64083_EXT_Y_ADC (1 << 2) + +#endif diff --git a/6.18.0/include-overrides/media/i2c/wm8775.h b/6.18.0/include-overrides/media/i2c/wm8775.h new file mode 100644 index 0000000..a02695e --- /dev/null +++ b/6.18.0/include-overrides/media/i2c/wm8775.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + wm8775.h - definition for wm8775 inputs and outputs + + Copyright (C) 2006 Hans Verkuil (hverkuil@kernel.org) + +*/ + +#ifndef _WM8775_H_ +#define _WM8775_H_ + +/* The WM8775 has 4 inputs and one output. Zero or more inputs + are multiplexed together to the output. Hence there are + 16 combinations. + If only one input is active (the normal case) then the + input values 1, 2, 4 or 8 should be used. */ + +#define WM8775_AIN1 1 +#define WM8775_AIN2 2 +#define WM8775_AIN3 4 +#define WM8775_AIN4 8 + + +struct wm8775_platform_data { + /* + * FIXME: Instead, we should parameterize the params + * that need different settings between ivtv, pvrusb2, and Nova-S + */ + bool is_nova_s; +}; + +#endif diff --git a/6.18.0/include-overrides/media/imx.h b/6.18.0/include-overrides/media/imx.h new file mode 100644 index 0000000..5f14acd --- /dev/null +++ b/6.18.0/include-overrides/media/imx.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2014-2017 Mentor Graphics Inc. + */ + +#ifndef __MEDIA_IMX_H__ +#define __MEDIA_IMX_H__ + +#include + +#endif diff --git a/6.18.0/include-overrides/media/ipu-bridge.h b/6.18.0/include-overrides/media/ipu-bridge.h new file mode 100644 index 0000000..16fac76 --- /dev/null +++ b/6.18.0/include-overrides/media/ipu-bridge.h @@ -0,0 +1,182 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Author: Dan Scally */ +#ifndef __IPU_BRIDGE_H +#define __IPU_BRIDGE_H + +#include +#include +#include +#include + +#define IPU_HID "INT343E" +#define IPU_MAX_LANES 4 +#define IPU_MAX_PORTS 4 +#define MAX_NUM_LINK_FREQS 3 + +/* Values are educated guesses as we don't have a spec */ +#define IPU_SENSOR_ROTATION_NORMAL 0 +#define IPU_SENSOR_ROTATION_INVERTED 1 + +#define IPU_SENSOR_CONFIG(_HID, _NR, ...) \ + (const struct ipu_sensor_config) { \ + .hid = _HID, \ + .nr_link_freqs = _NR, \ + .link_freqs = { __VA_ARGS__ } \ + } + +#define NODE_SENSOR(_HID, _PROPS) \ + (const struct software_node) { \ + .name = _HID, \ + .properties = _PROPS, \ + } + +#define NODE_PORT(_PORT, _SENSOR_NODE) \ + (const struct software_node) { \ + .name = _PORT, \ + .parent = _SENSOR_NODE, \ + } + +#define NODE_ENDPOINT(_EP, _PORT, _PROPS) \ + (const struct software_node) { \ + .name = _EP, \ + .parent = _PORT, \ + .properties = _PROPS, \ + } + +#define NODE_VCM(_TYPE) \ + (const struct software_node) { \ + .name = _TYPE, \ + } + +enum ipu_sensor_swnodes { + SWNODE_SENSOR_HID, + SWNODE_SENSOR_PORT, + SWNODE_SENSOR_ENDPOINT, + SWNODE_IPU_PORT, + SWNODE_IPU_ENDPOINT, + /* below are optional / maybe empty */ + SWNODE_IVSC_HID, + SWNODE_IVSC_SENSOR_PORT, + SWNODE_IVSC_SENSOR_ENDPOINT, + SWNODE_IVSC_IPU_PORT, + SWNODE_IVSC_IPU_ENDPOINT, + SWNODE_VCM, + SWNODE_COUNT +}; + +/* Data representation as it is in ACPI SSDB buffer */ +struct ipu_sensor_ssdb { + u8 version; + u8 sku; + u8 guid_csi2[16]; + u8 devfunction; + u8 bus; + u32 dphylinkenfuses; + u32 clockdiv; + u8 link; + u8 lanes; + u32 csiparams[10]; + u32 maxlanespeed; + u8 sensorcalibfileidx; + u8 sensorcalibfileidxInMBZ[3]; + u8 romtype; + u8 vcmtype; + u8 platforminfo; + u8 platformsubinfo; + u8 flash; + u8 privacyled; + u8 degree; + u8 mipilinkdefined; + u32 mclkspeed; + u8 controllogicid; + u8 reserved1[3]; + u8 mclkport; + u8 reserved2[13]; +} __packed; + +struct ipu_property_names { + char clock_frequency[16]; + char rotation[9]; + char orientation[12]; + char bus_type[9]; + char data_lanes[11]; + char remote_endpoint[16]; + char link_frequencies[17]; +}; + +struct ipu_node_names { + char port[7]; + char ivsc_sensor_port[7]; + char ivsc_ipu_port[7]; + char endpoint[11]; + char remote_port[9]; + char vcm[16]; +}; + +struct ipu_sensor_config { + const char *hid; + const u8 nr_link_freqs; + const u64 link_freqs[MAX_NUM_LINK_FREQS]; +}; + +struct ipu_sensor { + /* append ssdb.link(u8) in "-%u" format as suffix of HID */ + char name[ACPI_ID_LEN + 4]; + struct acpi_device *adev; + + struct device *csi_dev; + struct acpi_device *ivsc_adev; + char ivsc_name[ACPI_ID_LEN + 4]; + + /* SWNODE_COUNT + 1 for terminating NULL */ + const struct software_node *group[SWNODE_COUNT + 1]; + struct software_node swnodes[SWNODE_COUNT]; + struct ipu_node_names node_names; + + u8 link; + u8 lanes; + u32 mclkspeed; + u32 rotation; + enum v4l2_fwnode_orientation orientation; + const char *vcm_type; + + struct ipu_property_names prop_names; + struct property_entry ep_properties[5]; + struct property_entry dev_properties[5]; + struct property_entry ipu_properties[3]; + struct property_entry ivsc_properties[1]; + struct property_entry ivsc_sensor_ep_properties[4]; + struct property_entry ivsc_ipu_ep_properties[4]; + + struct software_node_ref_args local_ref[1]; + struct software_node_ref_args remote_ref[1]; + struct software_node_ref_args vcm_ref[1]; + struct software_node_ref_args ivsc_sensor_ref[1]; + struct software_node_ref_args ivsc_ipu_ref[1]; +}; + +typedef int (*ipu_parse_sensor_fwnode_t)(struct acpi_device *adev, + struct ipu_sensor *sensor); + +struct ipu_bridge { + struct device *dev; + ipu_parse_sensor_fwnode_t parse_sensor_fwnode; + char ipu_node_name[ACPI_ID_LEN]; + struct software_node ipu_hid_node; + u32 data_lanes[4]; + unsigned int n_sensors; + struct ipu_sensor sensors[IPU_MAX_PORTS]; +}; + +#if IS_ENABLED(CONFIG_IPU_BRIDGE) +int ipu_bridge_init(struct device *dev, + ipu_parse_sensor_fwnode_t parse_sensor_fwnode); +int ipu_bridge_parse_ssdb(struct acpi_device *adev, struct ipu_sensor *sensor); +int ipu_bridge_instantiate_vcm(struct device *sensor); +#else +/* Use a define to avoid the @parse_sensor_fwnode argument getting evaluated */ +#define ipu_bridge_init(dev, parse_sensor_fwnode) (0) +static inline int ipu_bridge_instantiate_vcm(struct device *s) { return 0; } +#endif + +#endif diff --git a/6.18.0/include-overrides/media/ipu6-pci-table.h b/6.18.0/include-overrides/media/ipu6-pci-table.h new file mode 100644 index 0000000..0899d9d --- /dev/null +++ b/6.18.0/include-overrides/media/ipu6-pci-table.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2024 Intel Corporation + */ + +#ifndef __IPU6_PCI_TBL_H__ +#define __IPU6_PCI_TBL_H__ + +#include + +#define PCI_DEVICE_ID_INTEL_IPU6 0x9a19 +#define PCI_DEVICE_ID_INTEL_IPU6SE 0x4e19 +#define PCI_DEVICE_ID_INTEL_IPU6EP_ADLP 0x465d +#define PCI_DEVICE_ID_INTEL_IPU6EP_ADLN 0x462e +#define PCI_DEVICE_ID_INTEL_IPU6EP_RPLP 0xa75d +#define PCI_DEVICE_ID_INTEL_IPU6EP_MTL 0x7d19 + +static const struct pci_device_id ipu6_pci_tbl[] = { + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IPU6) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IPU6SE) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IPU6EP_ADLP) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IPU6EP_ADLN) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IPU6EP_RPLP) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IPU6EP_MTL) }, + { } +}; + +#endif /* __IPU6_PCI_TBL_H__ */ diff --git a/6.18.0/include-overrides/media/jpeg.h b/6.18.0/include-overrides/media/jpeg.h new file mode 100644 index 0000000..a01e142 --- /dev/null +++ b/6.18.0/include-overrides/media/jpeg.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef _MEDIA_JPEG_H_ +#define _MEDIA_JPEG_H_ + +/* JPEG markers */ +#define JPEG_MARKER_TEM 0x01 +#define JPEG_MARKER_SOF0 0xc0 +#define JPEG_MARKER_DHT 0xc4 +#define JPEG_MARKER_RST 0xd0 +#define JPEG_MARKER_SOI 0xd8 +#define JPEG_MARKER_EOI 0xd9 +#define JPEG_MARKER_SOS 0xda +#define JPEG_MARKER_DQT 0xdb +#define JPEG_MARKER_DRI 0xdd +#define JPEG_MARKER_DHP 0xde +#define JPEG_MARKER_APP0 0xe0 +#define JPEG_MARKER_COM 0xfe + +#endif /* _MEDIA_JPEG_H_ */ diff --git a/6.18.0/include-overrides/media/media-dev-allocator.h b/6.18.0/include-overrides/media/media-dev-allocator.h new file mode 100644 index 0000000..2ab54d4 --- /dev/null +++ b/6.18.0/include-overrides/media/media-dev-allocator.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * media-dev-allocator.h - Media Controller Device Allocator API + * + * Copyright (c) 2019 Shuah Khan + * + * Credits: Suggested by Laurent Pinchart + */ + +/* + * This file adds a global ref-counted Media Controller Device Instance API. + * A system wide global media device list is managed and each media device + * includes a kref count. The last put on the media device releases the media + * device instance. + */ + +#ifndef _MEDIA_DEV_ALLOCATOR_H +#define _MEDIA_DEV_ALLOCATOR_H + +struct usb_device; + +#if defined(CONFIG_MEDIA_CONTROLLER) && IS_ENABLED(CONFIG_USB) +/** + * media_device_usb_allocate() - Allocate and return struct &media device + * + * @udev: struct &usb_device pointer + * @module_name: should be filled with %KBUILD_MODNAME + * @owner: struct module pointer %THIS_MODULE for the driver. + * %THIS_MODULE is null for a built-in driver. + * It is safe even when %THIS_MODULE is null. + * + * This interface should be called to allocate a Media Device when multiple + * drivers share usb_device and the media device. This interface allocates + * &media_device structure and calls media_device_usb_init() to initialize + * it. + * + */ +struct media_device *media_device_usb_allocate(struct usb_device *udev, + const char *module_name, + struct module *owner); +/** + * media_device_delete() - Release media device. Calls kref_put(). + * + * @mdev: struct &media_device pointer + * @module_name: should be filled with %KBUILD_MODNAME + * @owner: struct module pointer %THIS_MODULE for the driver. + * %THIS_MODULE is null for a built-in driver. + * It is safe even when %THIS_MODULE is null. + * + * This interface should be called to put Media Device Instance kref. + */ +void media_device_delete(struct media_device *mdev, const char *module_name, + struct module *owner); +#else +static inline struct media_device *media_device_usb_allocate( + struct usb_device *udev, const char *module_name, + struct module *owner) + { return NULL; } +static inline void media_device_delete( + struct media_device *mdev, const char *module_name, + struct module *owner) { } +#endif /* CONFIG_MEDIA_CONTROLLER && CONFIG_USB */ +#endif /* _MEDIA_DEV_ALLOCATOR_H */ diff --git a/6.18.0/include-overrides/media/media-device.h b/6.18.0/include-overrides/media/media-device.h new file mode 100644 index 0000000..53d2a16 --- /dev/null +++ b/6.18.0/include-overrides/media/media-device.h @@ -0,0 +1,518 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Media device + * + * Copyright (C) 2010 Nokia Corporation + * + * Contacts: Laurent Pinchart + * Sakari Ailus + */ + +#ifndef _MEDIA_DEVICE_H +#define _MEDIA_DEVICE_H + +#include +#include +#include +#include + +#include +#include + +struct ida; +struct media_device; + +/** + * struct media_entity_notify - Media Entity Notify + * + * @list: List head + * @notify_data: Input data to invoke the callback + * @notify: Callback function pointer + * + * Drivers may register a callback to take action when new entities get + * registered with the media device. This handler is intended for creating + * links between existing entities and should not create entities and register + * them. + */ +struct media_entity_notify { + struct list_head list; + void *notify_data; + void (*notify)(struct media_entity *entity, void *notify_data); +}; + +/** + * struct media_device_ops - Media device operations + * @link_notify: Link state change notification callback. This callback is + * called with the graph_mutex held. + * @req_alloc: Allocate a request. Set this if you need to allocate a struct + * larger then struct media_request. @req_alloc and @req_free must + * either both be set or both be NULL. + * @req_free: Free a request. Set this if @req_alloc was set as well, leave + * to NULL otherwise. + * @req_validate: Validate a request, but do not queue yet. The req_queue_mutex + * lock is held when this op is called. + * @req_queue: Queue a validated request, cannot fail. If something goes + * wrong when queueing this request then it should be marked + * as such internally in the driver and any related buffers + * must eventually return to vb2 with state VB2_BUF_STATE_ERROR. + * The req_queue_mutex lock is held when this op is called. + * It is important that vb2 buffer objects are queued last after + * all other object types are queued: queueing a buffer kickstarts + * the request processing, so all other objects related to the + * request (and thus the buffer) must be available to the driver. + * And once a buffer is queued, then the driver can complete + * or delete objects from the request before req_queue exits. + */ +struct media_device_ops { + int (*link_notify)(struct media_link *link, u32 flags, + unsigned int notification); + struct media_request *(*req_alloc)(struct media_device *mdev); + void (*req_free)(struct media_request *req); + int (*req_validate)(struct media_request *req); + void (*req_queue)(struct media_request *req); +}; + +/** + * struct media_device - Media device + * @dev: Parent device + * @devnode: Media device node + * @driver_name: Optional device driver name. If not set, calls to + * %MEDIA_IOC_DEVICE_INFO will return ``dev->driver->name``. + * This is needed for USB drivers for example, as otherwise + * they'll all appear as if the driver name was "usb". + * @model: Device model name + * @serial: Device serial number (optional) + * @bus_info: Unique and stable device location identifier + * @hw_revision: Hardware device revision + * @topology_version: Monotonic counter for storing the version of the graph + * topology. Should be incremented each time the topology changes. + * @id: Unique ID used on the last registered graph object + * @entity_internal_idx: Unique internal entity ID used by the graph traversal + * algorithms + * @entity_internal_idx_max: Allocated internal entity indices + * @entities: List of registered entities + * @interfaces: List of registered interfaces + * @pads: List of registered pads + * @links: List of registered links + * @entity_notify: List of registered entity_notify callbacks + * @graph_mutex: Protects access to struct media_device data + * @pm_count_walk: Graph walk for power state walk. Access serialised using + * graph_mutex. + * + * @source_priv: Driver Private data for enable/disable source handlers + * @enable_source: Enable Source Handler function pointer + * @disable_source: Disable Source Handler function pointer + * + * @ops: Operation handler callbacks + * @req_queue_mutex: Serialise the MEDIA_REQUEST_IOC_QUEUE ioctl w.r.t. + * other operations that stop or start streaming. + * @request_id: Used to generate unique request IDs + * + * This structure represents an abstract high-level media device. It allows easy + * access to entities and provides basic media device-level support. The + * structure can be allocated directly or embedded in a larger structure. + * + * The parent @dev is a physical device. It must be set before registering the + * media device. + * + * @model is a descriptive model name exported through sysfs. It doesn't have to + * be unique. + * + * @enable_source is a handler to find source entity for the + * sink entity and activate the link between them if source + * entity is free. Drivers should call this handler before + * accessing the source. + * + * @disable_source is a handler to find source entity for the + * sink entity and deactivate the link between them. Drivers + * should call this handler to release the source. + * + * Use-case: find tuner entity connected to the decoder + * entity and check if it is available, and activate the + * link between them from @enable_source and deactivate + * from @disable_source. + * + * .. note:: + * + * Bridge driver is expected to implement and set the + * handler when &media_device is registered or when + * bridge driver finds the media_device during probe. + * Bridge driver sets source_priv with information + * necessary to run @enable_source and @disable_source handlers. + * Callers should hold graph_mutex to access and call @enable_source + * and @disable_source handlers. + */ +struct media_device { + /* dev->driver_data points to this struct. */ + struct device *dev; + struct media_devnode *devnode; + + char model[32]; + char driver_name[32]; + char serial[40]; + char bus_info[32]; + u32 hw_revision; + + u64 topology_version; + + u32 id; + struct ida entity_internal_idx; + int entity_internal_idx_max; + + struct list_head entities; + struct list_head interfaces; + struct list_head pads; + struct list_head links; + + /* notify callback list invoked when a new entity is registered */ + struct list_head entity_notify; + + /* Serializes graph operations. */ + struct mutex graph_mutex; + struct media_graph pm_count_walk; + + void *source_priv; + int (*enable_source)(struct media_entity *entity, + struct media_pipeline *pipe); + void (*disable_source)(struct media_entity *entity); + + const struct media_device_ops *ops; + + struct mutex req_queue_mutex; + atomic_t request_id; +}; + +/* We don't need to include usb.h here */ +struct usb_device; + +#ifdef CONFIG_MEDIA_CONTROLLER + +/* Supported link_notify @notification values. */ +#define MEDIA_DEV_NOTIFY_PRE_LINK_CH 0 +#define MEDIA_DEV_NOTIFY_POST_LINK_CH 1 + +/** + * media_device_init() - Initializes a media device element + * + * @mdev: pointer to struct &media_device + * + * This function initializes the media device prior to its registration. + * The media device initialization and registration is split in two functions + * to avoid race conditions and make the media device available to user-space + * before the media graph has been completed. + * + * So drivers need to first initialize the media device, register any entity + * within the media device, create pad to pad links and then finally register + * the media device by calling media_device_register() as a final step. + * + * The caller is responsible for initializing the media device before + * registration. The following fields must be set: + * + * - dev must point to the parent device + * - model must be filled with the device model name + * + * The bus_info field is set by media_device_init() for PCI and platform devices + * if the field begins with '\0'. + */ +void media_device_init(struct media_device *mdev); + +/** + * media_device_cleanup() - Cleanups a media device element + * + * @mdev: pointer to struct &media_device + * + * This function that will destroy the graph_mutex that is + * initialized in media_device_init(). + */ +void media_device_cleanup(struct media_device *mdev); + +/** + * __media_device_register() - Registers a media device element + * + * @mdev: pointer to struct &media_device + * @owner: should be filled with %THIS_MODULE + * + * Users, should, instead, call the media_device_register() macro. + * + * The caller is responsible for initializing the &media_device structure + * before registration. The following fields of &media_device must be set: + * + * - &media_device.model must be filled with the device model name as a + * NUL-terminated UTF-8 string. The device/model revision must not be + * stored in this field. + * + * The following fields are optional: + * + * - &media_device.serial is a unique serial number stored as a + * NUL-terminated ASCII string. The field is big enough to store a GUID + * in text form. If the hardware doesn't provide a unique serial number + * this field must be left empty. + * + * - &media_device.bus_info represents the location of the device in the + * system as a NUL-terminated ASCII string. For PCI/PCIe devices + * &media_device.bus_info must be set to "PCI:" (or "PCIe:") followed by + * the value of pci_name(). For USB devices,the usb_make_path() function + * must be used. This field is used by applications to distinguish between + * otherwise identical devices that don't provide a serial number. + * + * - &media_device.hw_revision is the hardware device revision in a + * driver-specific format. When possible the revision should be formatted + * with the KERNEL_VERSION() macro. + * + * .. note:: + * + * #) Upon successful registration a character device named media[0-9]+ is created. The device major and minor numbers are dynamic. The model name is exported as a sysfs attribute. + * + * #) Unregistering a media device that hasn't been registered is **NOT** safe. + * + * Return: returns zero on success or a negative error code. + */ +int __must_check __media_device_register(struct media_device *mdev, + struct module *owner); + + +/** + * media_device_register() - Registers a media device element + * + * @mdev: pointer to struct &media_device + * + * This macro calls __media_device_register() passing %THIS_MODULE as + * the __media_device_register() second argument (**owner**). + */ +#define media_device_register(mdev) __media_device_register(mdev, THIS_MODULE) + +/** + * media_device_unregister() - Unregisters a media device element + * + * @mdev: pointer to struct &media_device + * + * It is safe to call this function on an unregistered (but initialised) + * media device. + */ +void media_device_unregister(struct media_device *mdev); + +/** + * media_device_register_entity() - registers a media entity inside a + * previously registered media device. + * + * @mdev: pointer to struct &media_device + * @entity: pointer to struct &media_entity to be registered + * + * Entities are identified by a unique positive integer ID. The media + * controller framework will such ID automatically. IDs are not guaranteed + * to be contiguous, and the ID number can change on newer Kernel versions. + * So, neither the driver nor userspace should hardcode ID numbers to refer + * to the entities, but, instead, use the framework to find the ID, when + * needed. + * + * The media_entity name, type and flags fields should be initialized before + * calling media_device_register_entity(). Entities embedded in higher-level + * standard structures can have some of those fields set by the higher-level + * framework. + * + * If the device has pads, media_entity_pads_init() should be called before + * this function. Otherwise, the &media_entity.pad and &media_entity.num_pads + * should be zeroed before calling this function. + * + * Entities have flags that describe the entity capabilities and state: + * + * %MEDIA_ENT_FL_DEFAULT + * indicates the default entity for a given type. + * This can be used to report the default audio and video devices or the + * default camera sensor. + * + * .. note:: + * + * Drivers should set the entity function before calling this function. + * Please notice that the values %MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN and + * %MEDIA_ENT_F_UNKNOWN should not be used by the drivers. + */ +int __must_check media_device_register_entity(struct media_device *mdev, + struct media_entity *entity); + +/** + * media_device_unregister_entity() - unregisters a media entity. + * + * @entity: pointer to struct &media_entity to be unregistered + * + * All links associated with the entity and all PADs are automatically + * unregistered from the media_device when this function is called. + * + * Unregistering an entity will not change the IDs of the other entities and + * the previoully used ID will never be reused for a newly registered entities. + * + * When a media device is unregistered, all its entities are unregistered + * automatically. No manual entities unregistration is then required. + * + * .. note:: + * + * The media_entity instance itself must be freed explicitly by + * the driver if required. + */ +void media_device_unregister_entity(struct media_entity *entity); + +/** + * media_device_register_entity_notify() - Registers a media entity_notify + * callback + * + * @mdev: The media device + * @nptr: The media_entity_notify + * + * .. note:: + * + * When a new entity is registered, all the registered + * media_entity_notify callbacks are invoked. + */ + +void media_device_register_entity_notify(struct media_device *mdev, + struct media_entity_notify *nptr); + +/** + * media_device_unregister_entity_notify() - Unregister a media entity notify + * callback + * + * @mdev: The media device + * @nptr: The media_entity_notify + * + */ +void media_device_unregister_entity_notify(struct media_device *mdev, + struct media_entity_notify *nptr); + +/* Iterate over all entities. */ +#define media_device_for_each_entity(entity, mdev) \ + list_for_each_entry(entity, &(mdev)->entities, graph_obj.list) + +/* Iterate over all interfaces. */ +#define media_device_for_each_intf(intf, mdev) \ + list_for_each_entry(intf, &(mdev)->interfaces, graph_obj.list) + +/* Iterate over all pads. */ +#define media_device_for_each_pad(pad, mdev) \ + list_for_each_entry(pad, &(mdev)->pads, graph_obj.list) + +/* Iterate over all links. */ +#define media_device_for_each_link(link, mdev) \ + list_for_each_entry(link, &(mdev)->links, graph_obj.list) + +/** + * media_device_pci_init() - create and initialize a + * struct &media_device from a PCI device. + * + * @mdev: pointer to struct &media_device + * @pci_dev: pointer to struct pci_dev + * @name: media device name. If %NULL, the routine will use the default + * name for the pci device, given by pci_name() macro. + */ +void media_device_pci_init(struct media_device *mdev, + struct pci_dev *pci_dev, + const char *name); +/** + * __media_device_usb_init() - create and initialize a + * struct &media_device from a PCI device. + * + * @mdev: pointer to struct &media_device + * @udev: pointer to struct usb_device + * @board_name: media device name. If %NULL, the routine will use the usb + * product name, if available. + * @driver_name: name of the driver. if %NULL, the routine will use the name + * given by ``udev->dev->driver->name``, with is usually the wrong + * thing to do. + * + * .. note:: + * + * It is better to call media_device_usb_init() instead, as + * such macro fills driver_name with %KBUILD_MODNAME. + */ +void __media_device_usb_init(struct media_device *mdev, + struct usb_device *udev, + const char *board_name, + const char *driver_name); + +#else +static inline void media_device_init(struct media_device *mdev) +{ +} +static inline int media_device_register(struct media_device *mdev) +{ + return 0; +} +static inline void media_device_unregister(struct media_device *mdev) +{ +} +static inline void media_device_cleanup(struct media_device *mdev) +{ +} +static inline int media_device_register_entity(struct media_device *mdev, + struct media_entity *entity) +{ + return 0; +} +static inline void media_device_unregister_entity(struct media_entity *entity) +{ +} +static inline void media_device_register_entity_notify( + struct media_device *mdev, + struct media_entity_notify *nptr) +{ +} +static inline void media_device_unregister_entity_notify( + struct media_device *mdev, + struct media_entity_notify *nptr) +{ +} + +static inline void media_device_pci_init(struct media_device *mdev, + struct pci_dev *pci_dev, + char *name) +{ +} + +static inline void __media_device_usb_init(struct media_device *mdev, + struct usb_device *udev, + char *board_name, + char *driver_name) +{ +} + +#endif /* CONFIG_MEDIA_CONTROLLER */ + +/** + * media_device_usb_init() - create and initialize a + * struct &media_device from a PCI device. + * + * @mdev: pointer to struct &media_device + * @udev: pointer to struct usb_device + * @name: media device name. If %NULL, the routine will use the usb + * product name, if available. + * + * This macro calls media_device_usb_init() passing the + * media_device_usb_init() **driver_name** parameter filled with + * %KBUILD_MODNAME. + */ +#define media_device_usb_init(mdev, udev, name) \ + __media_device_usb_init(mdev, udev, name, KBUILD_MODNAME) + +/** + * media_set_bus_info() - Set bus_info field + * + * @bus_info: Variable where to write the bus info (char array) + * @bus_info_size: Length of the bus_info + * @dev: Related struct device + * + * Sets bus information based on &dev. This is currently done for PCI and + * platform devices. dev is required to be non-NULL for this to happen. + * + * This function is not meant to be called from drivers. + */ +static inline void +media_set_bus_info(char *bus_info, size_t bus_info_size, struct device *dev) +{ + if (!dev) + strscpy(bus_info, "no bus info", bus_info_size); + else if (dev_is_platform(dev)) + snprintf(bus_info, bus_info_size, "platform:%s", dev_name(dev)); + else if (dev_is_pci(dev)) + snprintf(bus_info, bus_info_size, "PCI:%s", dev_name(dev)); +} + +#endif diff --git a/6.18.0/include-overrides/media/media-devnode.h b/6.18.0/include-overrides/media/media-devnode.h new file mode 100644 index 0000000..d27c1c6 --- /dev/null +++ b/6.18.0/include-overrides/media/media-devnode.h @@ -0,0 +1,168 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Media device node + * + * Copyright (C) 2010 Nokia Corporation + * + * Contacts: Laurent Pinchart + * Sakari Ailus + * + * -- + * + * Common functions for media-related drivers to register and unregister media + * device nodes. + */ + +#ifndef _MEDIA_DEVNODE_H +#define _MEDIA_DEVNODE_H + +#include +#include +#include +#include + +struct media_device; + +/* + * Flag to mark the media_devnode struct as registered. Drivers must not touch + * this flag directly, it will be set and cleared by media_devnode_register and + * media_devnode_unregister. + */ +#define MEDIA_FLAG_REGISTERED 0 + +/** + * struct media_file_operations - Media device file operations + * + * @owner: should be filled with %THIS_MODULE + * @read: pointer to the function that implements read() syscall + * @write: pointer to the function that implements write() syscall + * @poll: pointer to the function that implements poll() syscall + * @ioctl: pointer to the function that implements ioctl() syscall + * @compat_ioctl: pointer to the function that will handle 32 bits userspace + * calls to the ioctl() syscall on a Kernel compiled with 64 bits. + * @open: pointer to the function that implements open() syscall + * @release: pointer to the function that will release the resources allocated + * by the @open function. + */ +struct media_file_operations { + struct module *owner; + ssize_t (*read) (struct file *, char __user *, size_t, loff_t *); + ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *); + __poll_t (*poll) (struct file *, struct poll_table_struct *); + long (*ioctl) (struct file *, unsigned int, unsigned long); + long (*compat_ioctl) (struct file *, unsigned int, unsigned long); + int (*open) (struct file *); + int (*release) (struct file *); +}; + +/** + * struct media_devnode - Media device node + * @media_dev: pointer to struct &media_device + * @fops: pointer to struct &media_file_operations with media device ops + * @dev: pointer to struct &device containing the media controller device + * @cdev: struct cdev pointer character device + * @parent: parent device + * @minor: device node minor number + * @flags: flags, combination of the ``MEDIA_FLAG_*`` constants + * @release: release callback called at the end of ``media_devnode_release()`` + * routine at media-device.c. + * + * This structure represents a media-related device node. + * + * The @parent is a physical device. It must be set by core or device drivers + * before registering the node. + */ +struct media_devnode { + struct media_device *media_dev; + + /* device ops */ + const struct media_file_operations *fops; + + /* sysfs */ + struct device dev; /* media device */ + struct cdev cdev; /* character device */ + struct device *parent; /* device parent */ + + /* device info */ + int minor; + unsigned long flags; /* Use bitops to access flags */ + + /* callbacks */ + void (*release)(struct media_devnode *devnode); +}; + +/* dev to media_devnode */ +#define to_media_devnode(cd) container_of(cd, struct media_devnode, dev) + +/** + * media_devnode_register - register a media device node + * + * @mdev: struct media_device we want to register a device node + * @devnode: media device node structure we want to register + * @owner: should be filled with %THIS_MODULE + * + * The registration code assigns minor numbers and registers the new device node + * with the kernel. An error is returned if no free minor number can be found, + * or if the registration of the device node fails. + * + * Zero is returned on success. + * + * Note that if the media_devnode_register call fails, the release() callback of + * the media_devnode structure is *not* called, so the caller is responsible for + * freeing any data. + */ +int __must_check media_devnode_register(struct media_device *mdev, + struct media_devnode *devnode, + struct module *owner); + +/** + * media_devnode_unregister_prepare - clear the media device node register bit + * @devnode: the device node to prepare for unregister + * + * This clears the passed device register bit. Future open calls will be met + * with errors. Should be called before media_devnode_unregister() to avoid + * races with unregister and device file open calls. + * + * This function can safely be called if the device node has never been + * registered or has already been unregistered. + */ +void media_devnode_unregister_prepare(struct media_devnode *devnode); + +/** + * media_devnode_unregister - unregister a media device node + * @devnode: the device node to unregister + * + * This unregisters the passed device. Future open calls will be met with + * errors. + * + * Should be called after media_devnode_unregister_prepare() + */ +void media_devnode_unregister(struct media_devnode *devnode); + +/** + * media_devnode_data - returns a pointer to the &media_devnode + * + * @filp: pointer to struct &file + */ +static inline struct media_devnode *media_devnode_data(struct file *filp) +{ + return filp->private_data; +} + +/** + * media_devnode_is_registered - returns true if &media_devnode is registered; + * false otherwise. + * + * @devnode: pointer to struct &media_devnode. + * + * Note: If mdev is NULL, it also returns false. + */ +static inline int media_devnode_is_registered(struct media_devnode *devnode) +{ + if (!devnode) + return false; + + return test_bit(MEDIA_FLAG_REGISTERED, &devnode->flags); +} + +#endif /* _MEDIA_DEVNODE_H */ diff --git a/6.18.0/include-overrides/media/media-entity.h b/6.18.0/include-overrides/media/media-entity.h new file mode 100644 index 0000000..64cf590 --- /dev/null +++ b/6.18.0/include-overrides/media/media-entity.h @@ -0,0 +1,1450 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Media entity + * + * Copyright (C) 2010 Nokia Corporation + * + * Contacts: Laurent Pinchart + * Sakari Ailus + */ + +#ifndef _MEDIA_ENTITY_H +#define _MEDIA_ENTITY_H + +#include +#include +#include +#include +#include +#include +#include +#include + +/* Enums used internally at the media controller to represent graphs */ + +/** + * enum media_gobj_type - type of a graph object + * + * @MEDIA_GRAPH_ENTITY: Identify a media entity + * @MEDIA_GRAPH_PAD: Identify a media pad + * @MEDIA_GRAPH_LINK: Identify a media link + * @MEDIA_GRAPH_INTF_DEVNODE: Identify a media Kernel API interface via + * a device node + */ +enum media_gobj_type { + MEDIA_GRAPH_ENTITY, + MEDIA_GRAPH_PAD, + MEDIA_GRAPH_LINK, + MEDIA_GRAPH_INTF_DEVNODE, +}; + +#define MEDIA_BITS_PER_TYPE 8 +#define MEDIA_BITS_PER_ID (32 - MEDIA_BITS_PER_TYPE) +#define MEDIA_ID_MASK GENMASK_ULL(MEDIA_BITS_PER_ID - 1, 0) + +/* Structs to represent the objects that belong to a media graph */ + +/** + * struct media_gobj - Define a graph object. + * + * @mdev: Pointer to the struct &media_device that owns the object + * @id: Non-zero object ID identifier. The ID should be unique + * inside a media_device, as it is composed by + * %MEDIA_BITS_PER_TYPE to store the type plus + * %MEDIA_BITS_PER_ID to store the ID + * @list: List entry stored in one of the per-type mdev object lists + * + * All objects on the media graph should have this struct embedded + */ +struct media_gobj { + struct media_device *mdev; + u32 id; + struct list_head list; +}; + +#define MEDIA_ENTITY_ENUM_MAX_DEPTH 16 + +/** + * struct media_entity_enum - An enumeration of media entities. + * + * @bmap: Bit map in which each bit represents one entity at struct + * media_entity->internal_idx. + * @idx_max: Number of bits in bmap + */ +struct media_entity_enum { + unsigned long *bmap; + int idx_max; +}; + +/** + * struct media_graph - Media graph traversal state + * + * @stack: Graph traversal stack; the stack contains information + * on the path the media entities to be walked and the + * links through which they were reached. + * @stack.entity: pointer to &struct media_entity at the graph. + * @stack.link: pointer to &struct list_head. + * @ent_enum: Visited entities + * @top: The top of the stack + */ +struct media_graph { + struct { + struct media_entity *entity; + struct list_head *link; + } stack[MEDIA_ENTITY_ENUM_MAX_DEPTH]; + + struct media_entity_enum ent_enum; + int top; +}; + +/** + * struct media_pipeline - Media pipeline related information + * + * @allocated: Media pipeline allocated and freed by the framework + * @mdev: The media device the pipeline is part of + * @pads: List of media_pipeline_pad + * @start_count: Media pipeline start - stop count + */ +struct media_pipeline { + bool allocated; + struct media_device *mdev; + struct list_head pads; + int start_count; +}; + +/** + * struct media_pipeline_pad - A pad part of a media pipeline + * + * @list: Entry in the media_pad pads list + * @pipe: The media_pipeline that the pad is part of + * @pad: The media pad + * + * This structure associate a pad with a media pipeline. Instances of + * media_pipeline_pad are created by media_pipeline_start() when it builds the + * pipeline, and stored in the &media_pad.pads list. media_pipeline_stop() + * removes the entries from the list and deletes them. + */ +struct media_pipeline_pad { + struct list_head list; + struct media_pipeline *pipe; + struct media_pad *pad; +}; + +/** + * struct media_pipeline_pad_iter - Iterator for media_pipeline_for_each_pad + * + * @cursor: The current element + */ +struct media_pipeline_pad_iter { + struct list_head *cursor; +}; + +/** + * struct media_pipeline_entity_iter - Iterator for media_pipeline_for_each_entity + * + * @ent_enum: The entity enumeration tracker + * @cursor: The current element + */ +struct media_pipeline_entity_iter { + struct media_entity_enum ent_enum; + struct list_head *cursor; +}; + +/** + * struct media_link - A link object part of a media graph. + * + * @graph_obj: Embedded structure containing the media object common data + * @list: Linked list associated with an entity or an interface that + * owns the link. + * @gobj0: Part of a union. Used to get the pointer for the first + * graph_object of the link. + * @source: Part of a union. Used only if the first object (gobj0) is + * a pad. In that case, it represents the source pad. + * @intf: Part of a union. Used only if the first object (gobj0) is + * an interface. + * @gobj1: Part of a union. Used to get the pointer for the second + * graph_object of the link. + * @sink: Part of a union. Used only if the second object (gobj1) is + * a pad. In that case, it represents the sink pad. + * @entity: Part of a union. Used only if the second object (gobj1) is + * an entity. + * @reverse: Pointer to the link for the reverse direction of a pad to pad + * link. + * @flags: Link flags, as defined in uapi/media.h (MEDIA_LNK_FL_*) + * @is_backlink: Indicate if the link is a backlink. + */ +struct media_link { + struct media_gobj graph_obj; + struct list_head list; + union { + struct media_gobj *gobj0; + struct media_pad *source; + struct media_interface *intf; + }; + union { + struct media_gobj *gobj1; + struct media_pad *sink; + struct media_entity *entity; + }; + struct media_link *reverse; + unsigned long flags; + bool is_backlink; +}; + +/** + * enum media_pad_signal_type - type of the signal inside a media pad + * + * @PAD_SIGNAL_DEFAULT: + * Default signal. Use this when all inputs or all outputs are + * uniquely identified by the pad number. + * @PAD_SIGNAL_ANALOG: + * The pad contains an analog signal. It can be Radio Frequency, + * Intermediate Frequency, a baseband signal or sub-carriers. + * Tuner inputs, IF-PLL demodulators, composite and s-video signals + * should use it. + * @PAD_SIGNAL_DV: + * Contains a digital video signal, with can be a bitstream of samples + * taken from an analog TV video source. On such case, it usually + * contains the VBI data on it. + * @PAD_SIGNAL_AUDIO: + * Contains an Intermediate Frequency analog signal from an audio + * sub-carrier or an audio bitstream. IF signals are provided by tuners + * and consumed by audio AM/FM decoders. Bitstream audio is provided by + * an audio decoder. + */ +enum media_pad_signal_type { + PAD_SIGNAL_DEFAULT = 0, + PAD_SIGNAL_ANALOG, + PAD_SIGNAL_DV, + PAD_SIGNAL_AUDIO, +}; + +/** + * struct media_pad - A media pad graph object. + * + * @graph_obj: Embedded structure containing the media object common data + * @entity: Entity this pad belongs to + * @index: Pad index in the entity pads array, numbered from 0 to n + * @num_links: Number of links connected to this pad + * @sig_type: Type of the signal inside a media pad + * @flags: Pad flags, as defined in + * :ref:`include/uapi/linux/media.h ` + * (seek for ``MEDIA_PAD_FL_*``) + * @pipe: Pipeline this pad belongs to. Use media_entity_pipeline() to + * access this field. + */ +struct media_pad { + struct media_gobj graph_obj; /* must be first field in struct */ + struct media_entity *entity; + u16 index; + u16 num_links; + enum media_pad_signal_type sig_type; + unsigned long flags; + + /* + * The fields below are private, and should only be accessed via + * appropriate functions. + */ + struct media_pipeline *pipe; +}; + +/** + * struct media_entity_operations - Media entity operations + * @get_fwnode_pad: Return the pad number based on a fwnode endpoint or + * a negative value on error. This operation can be used + * to map a fwnode to a media pad number. Optional. + * @link_setup: Notify the entity of link changes. The operation can + * return an error, in which case link setup will be + * cancelled. Optional. + * @link_validate: Return whether a link is valid from the entity point of + * view. The media_pipeline_start() function + * validates all links by calling this operation. Optional. + * @has_pad_interdep: Return whether two pads of the entity are + * interdependent. If two pads are interdependent they are + * part of the same pipeline and enabling one of the pads + * means that the other pad will become "locked" and + * doesn't allow configuration changes. pad0 and pad1 are + * guaranteed to not both be sinks or sources. Never call + * the .has_pad_interdep() operation directly, always use + * media_entity_has_pad_interdep(). + * Optional: If the operation isn't implemented all pads + * will be considered as interdependent. + * + * .. note:: + * + * Those these callbacks are called with struct &media_device.graph_mutex + * mutex held. + */ +struct media_entity_operations { + int (*get_fwnode_pad)(struct media_entity *entity, + struct fwnode_endpoint *endpoint); + int (*link_setup)(struct media_entity *entity, + const struct media_pad *local, + const struct media_pad *remote, u32 flags); + int (*link_validate)(struct media_link *link); + bool (*has_pad_interdep)(struct media_entity *entity, unsigned int pad0, + unsigned int pad1); +}; + +/** + * enum media_entity_type - Media entity type + * + * @MEDIA_ENTITY_TYPE_BASE: + * The entity isn't embedded in another subsystem structure. + * @MEDIA_ENTITY_TYPE_VIDEO_DEVICE: + * The entity is embedded in a struct video_device instance. + * @MEDIA_ENTITY_TYPE_V4L2_SUBDEV: + * The entity is embedded in a struct v4l2_subdev instance. + * + * Media entity objects are often not instantiated directly, but the media + * entity structure is inherited by (through embedding) other subsystem-specific + * structures. The media entity type identifies the type of the subclass + * structure that implements a media entity instance. + * + * This allows runtime type identification of media entities and safe casting to + * the correct object type. For instance, a media entity structure instance + * embedded in a v4l2_subdev structure instance will have the type + * %MEDIA_ENTITY_TYPE_V4L2_SUBDEV and can safely be cast to a &v4l2_subdev + * structure using the container_of() macro. + */ +enum media_entity_type { + MEDIA_ENTITY_TYPE_BASE, + MEDIA_ENTITY_TYPE_VIDEO_DEVICE, + MEDIA_ENTITY_TYPE_V4L2_SUBDEV, +}; + +/** + * struct media_entity - A media entity graph object. + * + * @graph_obj: Embedded structure containing the media object common data. + * @name: Entity name. + * @obj_type: Type of the object that implements the media_entity. + * @function: Entity main function, as defined in + * :ref:`include/uapi/linux/media.h ` + * (seek for ``MEDIA_ENT_F_*``) + * @flags: Entity flags, as defined in + * :ref:`include/uapi/linux/media.h ` + * (seek for ``MEDIA_ENT_FL_*``) + * @num_pads: Number of sink and source pads. + * @num_links: Total number of links, forward and back, enabled and disabled. + * @num_backlinks: Number of backlinks + * @internal_idx: An unique internal entity specific number. The numbers are + * re-used if entities are unregistered or registered again. + * @pads: Pads array with the size defined by @num_pads. + * @links: List of data links. + * @ops: Entity operations. + * @use_count: Use count for the entity. + * @info: Union with devnode information. Kept just for backward + * compatibility. + * @info.dev: Contains device major and minor info. + * @info.dev.major: device node major, if the device is a devnode. + * @info.dev.minor: device node minor, if the device is a devnode. + * + * .. note:: + * + * The @use_count reference count must never be negative, but is a signed + * integer on purpose: a simple ``WARN_ON(<0)`` check can be used to detect + * reference count bugs that would make it negative. + */ +struct media_entity { + struct media_gobj graph_obj; /* must be first field in struct */ + const char *name; + enum media_entity_type obj_type; + u32 function; + unsigned long flags; + + u16 num_pads; + u16 num_links; + u16 num_backlinks; + int internal_idx; + + struct media_pad *pads; + struct list_head links; + + const struct media_entity_operations *ops; + + int use_count; + + union { + struct { + u32 major; + u32 minor; + } dev; + } info; +}; + +/** + * media_entity_for_each_pad - Iterate on all pads in an entity + * @entity: The entity the pads belong to + * @iter: The iterator pad + * + * Iterate on all pads in a media entity. + */ +#define media_entity_for_each_pad(entity, iter) \ + for (iter = (entity)->pads; \ + iter < &(entity)->pads[(entity)->num_pads]; \ + ++iter) + +/** + * struct media_interface - A media interface graph object. + * + * @graph_obj: embedded graph object + * @links: List of links pointing to graph entities + * @type: Type of the interface as defined in + * :ref:`include/uapi/linux/media.h ` + * (seek for ``MEDIA_INTF_T_*``) + * @flags: Interface flags as defined in + * :ref:`include/uapi/linux/media.h ` + * (seek for ``MEDIA_INTF_FL_*``) + * + * .. note:: + * + * Currently, no flags for &media_interface is defined. + */ +struct media_interface { + struct media_gobj graph_obj; + struct list_head links; + u32 type; + u32 flags; +}; + +/** + * struct media_intf_devnode - A media interface via a device node. + * + * @intf: embedded interface object + * @major: Major number of a device node + * @minor: Minor number of a device node + */ +struct media_intf_devnode { + struct media_interface intf; + + /* Should match the fields at media_v2_intf_devnode */ + u32 major; + u32 minor; +}; + +/** + * media_entity_id() - return the media entity graph object id + * + * @entity: pointer to &media_entity + */ +static inline u32 media_entity_id(struct media_entity *entity) +{ + return entity->graph_obj.id; +} + +/** + * media_type() - return the media object type + * + * @gobj: Pointer to the struct &media_gobj graph object + */ +static inline enum media_gobj_type media_type(struct media_gobj *gobj) +{ + return gobj->id >> MEDIA_BITS_PER_ID; +} + +/** + * media_id() - return the media object ID + * + * @gobj: Pointer to the struct &media_gobj graph object + */ +static inline u32 media_id(struct media_gobj *gobj) +{ + return gobj->id & MEDIA_ID_MASK; +} + +/** + * media_gobj_gen_id() - encapsulates type and ID on at the object ID + * + * @type: object type as define at enum &media_gobj_type. + * @local_id: next ID, from struct &media_device.id. + */ +static inline u32 media_gobj_gen_id(enum media_gobj_type type, u64 local_id) +{ + u32 id; + + id = type << MEDIA_BITS_PER_ID; + id |= local_id & MEDIA_ID_MASK; + + return id; +} + +/** + * is_media_entity_v4l2_video_device() - Check if the entity is a video_device + * @entity: pointer to entity + * + * Return: %true if the entity is an instance of a video_device object and can + * safely be cast to a struct video_device using the container_of() macro, or + * %false otherwise. + */ +static inline bool is_media_entity_v4l2_video_device(struct media_entity *entity) +{ + return entity && entity->obj_type == MEDIA_ENTITY_TYPE_VIDEO_DEVICE; +} + +/** + * is_media_entity_v4l2_subdev() - Check if the entity is a v4l2_subdev + * @entity: pointer to entity + * + * Return: %true if the entity is an instance of a &v4l2_subdev object and can + * safely be cast to a struct &v4l2_subdev using the container_of() macro, or + * %false otherwise. + */ +static inline bool is_media_entity_v4l2_subdev(struct media_entity *entity) +{ + return entity && entity->obj_type == MEDIA_ENTITY_TYPE_V4L2_SUBDEV; +} + +/** + * media_entity_enum_init - Initialise an entity enumeration + * + * @ent_enum: Entity enumeration to be initialised + * @mdev: The related media device + * + * Return: zero on success or a negative error code. + */ +__must_check int media_entity_enum_init(struct media_entity_enum *ent_enum, + struct media_device *mdev); + +/** + * media_entity_enum_cleanup - Release resources of an entity enumeration + * + * @ent_enum: Entity enumeration to be released + */ +void media_entity_enum_cleanup(struct media_entity_enum *ent_enum); + +/** + * media_entity_enum_zero - Clear the entire enum + * + * @ent_enum: Entity enumeration to be cleared + */ +static inline void media_entity_enum_zero(struct media_entity_enum *ent_enum) +{ + bitmap_zero(ent_enum->bmap, ent_enum->idx_max); +} + +/** + * media_entity_enum_set - Mark a single entity in the enum + * + * @ent_enum: Entity enumeration + * @entity: Entity to be marked + */ +static inline void media_entity_enum_set(struct media_entity_enum *ent_enum, + struct media_entity *entity) +{ + if (WARN_ON(entity->internal_idx >= ent_enum->idx_max)) + return; + + __set_bit(entity->internal_idx, ent_enum->bmap); +} + +/** + * media_entity_enum_clear - Unmark a single entity in the enum + * + * @ent_enum: Entity enumeration + * @entity: Entity to be unmarked + */ +static inline void media_entity_enum_clear(struct media_entity_enum *ent_enum, + struct media_entity *entity) +{ + if (WARN_ON(entity->internal_idx >= ent_enum->idx_max)) + return; + + __clear_bit(entity->internal_idx, ent_enum->bmap); +} + +/** + * media_entity_enum_test - Test whether the entity is marked + * + * @ent_enum: Entity enumeration + * @entity: Entity to be tested + * + * Returns %true if the entity was marked. + */ +static inline bool media_entity_enum_test(struct media_entity_enum *ent_enum, + struct media_entity *entity) +{ + if (WARN_ON(entity->internal_idx >= ent_enum->idx_max)) + return true; + + return test_bit(entity->internal_idx, ent_enum->bmap); +} + +/** + * media_entity_enum_test_and_set - Test whether the entity is marked, + * and mark it + * + * @ent_enum: Entity enumeration + * @entity: Entity to be tested + * + * Returns %true if the entity was marked, and mark it before doing so. + */ +static inline bool +media_entity_enum_test_and_set(struct media_entity_enum *ent_enum, + struct media_entity *entity) +{ + if (WARN_ON(entity->internal_idx >= ent_enum->idx_max)) + return true; + + return __test_and_set_bit(entity->internal_idx, ent_enum->bmap); +} + +/** + * media_entity_enum_empty - Test whether the entire enum is empty + * + * @ent_enum: Entity enumeration + * + * Return: %true if the entity was empty. + */ +static inline bool media_entity_enum_empty(struct media_entity_enum *ent_enum) +{ + return bitmap_empty(ent_enum->bmap, ent_enum->idx_max); +} + +/** + * media_entity_enum_intersects - Test whether two enums intersect + * + * @ent_enum1: First entity enumeration + * @ent_enum2: Second entity enumeration + * + * Return: %true if entity enumerations @ent_enum1 and @ent_enum2 intersect, + * otherwise %false. + */ +static inline bool media_entity_enum_intersects( + struct media_entity_enum *ent_enum1, + struct media_entity_enum *ent_enum2) +{ + WARN_ON(ent_enum1->idx_max != ent_enum2->idx_max); + + return bitmap_intersects(ent_enum1->bmap, ent_enum2->bmap, + min(ent_enum1->idx_max, ent_enum2->idx_max)); +} + +/** + * gobj_to_entity - returns the struct &media_entity pointer from the + * @gobj contained on it. + * + * @gobj: Pointer to the struct &media_gobj graph object + */ +#define gobj_to_entity(gobj) \ + container_of(gobj, struct media_entity, graph_obj) + +/** + * gobj_to_pad - returns the struct &media_pad pointer from the + * @gobj contained on it. + * + * @gobj: Pointer to the struct &media_gobj graph object + */ +#define gobj_to_pad(gobj) \ + container_of(gobj, struct media_pad, graph_obj) + +/** + * gobj_to_link - returns the struct &media_link pointer from the + * @gobj contained on it. + * + * @gobj: Pointer to the struct &media_gobj graph object + */ +#define gobj_to_link(gobj) \ + container_of(gobj, struct media_link, graph_obj) + +/** + * gobj_to_intf - returns the struct &media_interface pointer from the + * @gobj contained on it. + * + * @gobj: Pointer to the struct &media_gobj graph object + */ +#define gobj_to_intf(gobj) \ + container_of(gobj, struct media_interface, graph_obj) + +/** + * intf_to_devnode - returns the struct media_intf_devnode pointer from the + * @intf contained on it. + * + * @intf: Pointer to struct &media_intf_devnode + */ +#define intf_to_devnode(intf) \ + container_of(intf, struct media_intf_devnode, intf) + +/** + * media_gobj_create - Initialize a graph object + * + * @mdev: Pointer to the &media_device that contains the object + * @type: Type of the object + * @gobj: Pointer to the struct &media_gobj graph object + * + * This routine initializes the embedded struct &media_gobj inside a + * media graph object. It is called automatically if ``media_*_create`` + * function calls are used. However, if the object (entity, link, pad, + * interface) is embedded on some other object, this function should be + * called before registering the object at the media controller. + */ +void media_gobj_create(struct media_device *mdev, + enum media_gobj_type type, + struct media_gobj *gobj); + +/** + * media_gobj_destroy - Stop using a graph object on a media device + * + * @gobj: Pointer to the struct &media_gobj graph object + * + * This should be called by all routines like media_device_unregister() + * that remove/destroy media graph objects. + */ +void media_gobj_destroy(struct media_gobj *gobj); + +/** + * media_entity_pads_init() - Initialize the entity pads + * + * @entity: entity where the pads belong + * @num_pads: total number of sink and source pads + * @pads: Array of @num_pads pads. + * + * The pads array is managed by the entity driver and passed to + * media_entity_pads_init() where its pointer will be stored in the + * &media_entity structure. + * + * If no pads are needed, drivers could either directly fill + * &media_entity->num_pads with 0 and &media_entity->pads with %NULL or call + * this function that will do the same. + * + * As the number of pads is known in advance, the pads array is not allocated + * dynamically but is managed by the entity driver. Most drivers will embed the + * pads array in a driver-specific structure, avoiding dynamic allocation. + * + * Drivers must set the direction of every pad in the pads array before calling + * media_entity_pads_init(). The function will initialize the other pads fields. + */ +int media_entity_pads_init(struct media_entity *entity, u16 num_pads, + struct media_pad *pads); + +/** + * media_entity_cleanup() - free resources associated with an entity + * + * @entity: entity where the pads belong + * + * This function must be called during the cleanup phase after unregistering + * the entity (currently, it does nothing). + * + * Calling media_entity_cleanup() on a media_entity whose memory has been + * zeroed but that has not been initialized with media_entity_pad_init() is + * valid and is a no-op. + */ +#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER) +static inline void media_entity_cleanup(struct media_entity *entity) {} +#else +#define media_entity_cleanup(entity) do { } while (false) +#endif + +/** + * media_get_pad_index() - retrieves a pad index from an entity + * + * @entity: entity where the pads belong + * @pad_type: the type of the pad, one of MEDIA_PAD_FL_* pad types + * @sig_type: type of signal of the pad to be search + * + * This helper function finds the first pad index inside an entity that + * satisfies both @is_sink and @sig_type conditions. + * + * Return: + * + * On success, return the pad number. If the pad was not found or the media + * entity is a NULL pointer, return -EINVAL. + */ +int media_get_pad_index(struct media_entity *entity, u32 pad_type, + enum media_pad_signal_type sig_type); + +/** + * media_create_pad_link() - creates a link between two entities. + * + * @source: pointer to &media_entity of the source pad. + * @source_pad: number of the source pad in the pads array + * @sink: pointer to &media_entity of the sink pad. + * @sink_pad: number of the sink pad in the pads array. + * @flags: Link flags, as defined in + * :ref:`include/uapi/linux/media.h ` + * ( seek for ``MEDIA_LNK_FL_*``) + * + * Valid values for flags: + * + * %MEDIA_LNK_FL_ENABLED + * Indicates that the link is enabled and can be used to transfer media data. + * When two or more links target a sink pad, only one of them can be + * enabled at a time. + * + * %MEDIA_LNK_FL_IMMUTABLE + * Indicates that the link enabled state can't be modified at runtime. If + * %MEDIA_LNK_FL_IMMUTABLE is set, then %MEDIA_LNK_FL_ENABLED must also be + * set, since an immutable link is always enabled. + * + * .. note:: + * + * Before calling this function, media_entity_pads_init() and + * media_device_register_entity() should be called previously for both ends. + */ +__must_check int media_create_pad_link(struct media_entity *source, + u16 source_pad, struct media_entity *sink, + u16 sink_pad, u32 flags); + +/** + * media_create_pad_links() - creates a link between two entities. + * + * @mdev: Pointer to the media_device that contains the object + * @source_function: Function of the source entities. Used only if @source is + * NULL. + * @source: pointer to &media_entity of the source pad. If NULL, it will use + * all entities that matches the @sink_function. + * @source_pad: number of the source pad in the pads array + * @sink_function: Function of the sink entities. Used only if @sink is NULL. + * @sink: pointer to &media_entity of the sink pad. If NULL, it will use + * all entities that matches the @sink_function. + * @sink_pad: number of the sink pad in the pads array. + * @flags: Link flags, as defined in include/uapi/linux/media.h. + * @allow_both_undefined: if %true, then both @source and @sink can be NULL. + * In such case, it will create a crossbar between all entities that + * matches @source_function to all entities that matches @sink_function. + * If %false, it will return 0 and won't create any link if both @source + * and @sink are NULL. + * + * Valid values for flags: + * + * A %MEDIA_LNK_FL_ENABLED flag indicates that the link is enabled and can be + * used to transfer media data. If multiple links are created and this + * flag is passed as an argument, only the first created link will have + * this flag. + * + * A %MEDIA_LNK_FL_IMMUTABLE flag indicates that the link enabled state can't + * be modified at runtime. If %MEDIA_LNK_FL_IMMUTABLE is set, then + * %MEDIA_LNK_FL_ENABLED must also be set since an immutable link is + * always enabled. + * + * It is common for some devices to have multiple source and/or sink entities + * of the same type that should be linked. While media_create_pad_link() + * creates link by link, this function is meant to allow 1:n, n:1 and even + * cross-bar (n:n) links. + * + * .. note:: + * + * Before calling this function, media_entity_pads_init() and + * media_device_register_entity() should be called previously for the + * entities to be linked. + */ +int media_create_pad_links(const struct media_device *mdev, + const u32 source_function, + struct media_entity *source, + const u16 source_pad, + const u32 sink_function, + struct media_entity *sink, + const u16 sink_pad, + u32 flags, + const bool allow_both_undefined); + +void __media_entity_remove_links(struct media_entity *entity); + +/** + * media_entity_remove_links() - remove all links associated with an entity + * + * @entity: pointer to &media_entity + * + * .. note:: + * + * This is called automatically when an entity is unregistered via + * media_device_register_entity(). + */ +void media_entity_remove_links(struct media_entity *entity); + +/** + * __media_entity_setup_link - Configure a media link without locking + * @link: The link being configured + * @flags: Link configuration flags + * + * The bulk of link setup is handled by the two entities connected through the + * link. This function notifies both entities of the link configuration change. + * + * If the link is immutable or if the current and new configuration are + * identical, return immediately. + * + * The user is expected to hold link->source->parent->mutex. If not, + * media_entity_setup_link() should be used instead. + */ +int __media_entity_setup_link(struct media_link *link, u32 flags); + +/** + * media_entity_setup_link() - changes the link flags properties in runtime + * + * @link: pointer to &media_link + * @flags: the requested new link flags + * + * The only configurable property is the %MEDIA_LNK_FL_ENABLED link flag + * to enable/disable a link. Links marked with the + * %MEDIA_LNK_FL_IMMUTABLE link flag can not be enabled or disabled. + * + * When a link is enabled or disabled, the media framework calls the + * link_setup operation for the two entities at the source and sink of the + * link, in that order. If the second link_setup call fails, another + * link_setup call is made on the first entity to restore the original link + * flags. + * + * Media device drivers can be notified of link setup operations by setting the + * &media_device.link_notify pointer to a callback function. If provided, the + * notification callback will be called before enabling and after disabling + * links. + * + * Entity drivers must implement the link_setup operation if any of their links + * is non-immutable. The operation must either configure the hardware or store + * the configuration information to be applied later. + * + * Link configuration must not have any side effect on other links. If an + * enabled link at a sink pad prevents another link at the same pad from + * being enabled, the link_setup operation must return %-EBUSY and can't + * implicitly disable the first enabled link. + * + * .. note:: + * + * The valid values of the flags for the link is the same as described + * on media_create_pad_link(), for pad to pad links or the same as described + * on media_create_intf_link(), for interface to entity links. + */ +int media_entity_setup_link(struct media_link *link, u32 flags); + +/** + * media_entity_find_link - Find a link between two pads + * @source: Source pad + * @sink: Sink pad + * + * Return: returns a pointer to the link between the two entities. If no + * such link exists, return %NULL. + */ +struct media_link *media_entity_find_link(struct media_pad *source, + struct media_pad *sink); + +/** + * media_pad_remote_pad_first - Find the first pad at the remote end of a link + * @pad: Pad at the local end of the link + * + * Search for a remote pad connected to the given pad by iterating over all + * links originating or terminating at that pad until an enabled link is found. + * + * Return: returns a pointer to the pad at the remote end of the first found + * enabled link, or %NULL if no enabled link has been found. + */ +struct media_pad *media_pad_remote_pad_first(const struct media_pad *pad); + +/** + * media_pad_remote_pad_unique - Find a remote pad connected to a pad + * @pad: The pad + * + * Search for and return a remote pad connected to @pad through an enabled + * link. If multiple (or no) remote pads are found, an error is returned. + * + * The uniqueness constraint makes this helper function suitable for entities + * that support a single active source at a time on a given pad. + * + * Return: A pointer to the remote pad, or one of the following error pointers + * if an error occurs: + * + * * -ENOTUNIQ - Multiple links are enabled + * * -ENOLINK - No connected pad found + */ +struct media_pad *media_pad_remote_pad_unique(const struct media_pad *pad); + +/** + * media_entity_remote_pad_unique - Find a remote pad connected to an entity + * @entity: The entity + * @type: The type of pad to find (MEDIA_PAD_FL_SINK or MEDIA_PAD_FL_SOURCE) + * + * Search for and return a remote pad of @type connected to @entity through an + * enabled link. If multiple (or no) remote pads match these criteria, an error + * is returned. + * + * The uniqueness constraint makes this helper function suitable for entities + * that support a single active source or sink at a time. + * + * Return: A pointer to the remote pad, or one of the following error pointers + * if an error occurs: + * + * * -ENOTUNIQ - Multiple links are enabled + * * -ENOLINK - No connected pad found + */ +struct media_pad * +media_entity_remote_pad_unique(const struct media_entity *entity, + unsigned int type); + +/** + * media_entity_remote_source_pad_unique - Find a remote source pad connected to + * an entity + * @entity: The entity + * + * Search for and return a remote source pad connected to @entity through an + * enabled link. If multiple (or no) remote pads match these criteria, an error + * is returned. + * + * The uniqueness constraint makes this helper function suitable for entities + * that support a single active source at a time. + * + * Return: A pointer to the remote pad, or one of the following error pointers + * if an error occurs: + * + * * -ENOTUNIQ - Multiple links are enabled + * * -ENOLINK - No connected pad found + */ +static inline struct media_pad * +media_entity_remote_source_pad_unique(const struct media_entity *entity) +{ + return media_entity_remote_pad_unique(entity, MEDIA_PAD_FL_SOURCE); +} + +/** + * media_pad_is_streaming - Test if a pad is part of a streaming pipeline + * @pad: The pad + * + * Return: True if the pad is part of a pipeline started with the + * media_pipeline_start() function, false otherwise. + */ +static inline bool media_pad_is_streaming(const struct media_pad *pad) +{ + return pad->pipe; +} + +/** + * media_entity_is_streaming - Test if an entity is part of a streaming pipeline + * @entity: The entity + * + * Return: True if the entity is part of a pipeline started with the + * media_pipeline_start() function, false otherwise. + */ +static inline bool media_entity_is_streaming(const struct media_entity *entity) +{ + struct media_pad *pad; + + media_entity_for_each_pad(entity, pad) { + if (media_pad_is_streaming(pad)) + return true; + } + + return false; +} + +/** + * media_entity_pipeline - Get the media pipeline an entity is part of + * @entity: The entity + * + * DEPRECATED: use media_pad_pipeline() instead. + * + * This function returns the media pipeline that an entity has been associated + * with when constructing the pipeline with media_pipeline_start(). The pointer + * remains valid until media_pipeline_stop() is called. + * + * In general, entities can be part of multiple pipelines, when carrying + * multiple streams (either on different pads, or on the same pad using + * multiplexed streams). This function is to be used only for entities that + * do not support multiple pipelines. + * + * Return: The media_pipeline the entity is part of, or NULL if the entity is + * not part of any pipeline. + */ +struct media_pipeline *media_entity_pipeline(struct media_entity *entity); + +/** + * media_pad_pipeline - Get the media pipeline a pad is part of + * @pad: The pad + * + * This function returns the media pipeline that a pad has been associated + * with when constructing the pipeline with media_pipeline_start(). The pointer + * remains valid until media_pipeline_stop() is called. + * + * Return: The media_pipeline the pad is part of, or NULL if the pad is + * not part of any pipeline. + */ +struct media_pipeline *media_pad_pipeline(struct media_pad *pad); + +/** + * media_entity_get_fwnode_pad - Get pad number from fwnode + * + * @entity: The entity + * @fwnode: Pointer to the fwnode_handle which should be used to find the pad + * @direction_flags: Expected direction of the pad, as defined in + * :ref:`include/uapi/linux/media.h ` + * (seek for ``MEDIA_PAD_FL_*``) + * + * This function can be used to resolve the media pad number from + * a fwnode. This is useful for devices which use more complex + * mappings of media pads. + * + * If the entity does not implement the get_fwnode_pad() operation + * then this function searches the entity for the first pad that + * matches the @direction_flags. + * + * Return: returns the pad number on success or a negative error code. + */ +int media_entity_get_fwnode_pad(struct media_entity *entity, + const struct fwnode_handle *fwnode, + unsigned long direction_flags); + +/** + * media_graph_walk_init - Allocate resources used by graph walk. + * + * @graph: Media graph structure that will be used to walk the graph + * @mdev: Pointer to the &media_device that contains the object + * + * This function is deprecated, use media_pipeline_for_each_pad() instead. + * + * The caller is required to hold the media_device graph_mutex during the graph + * walk until the graph state is released. + * + * Returns zero on success or a negative error code otherwise. + */ +__must_check int media_graph_walk_init( + struct media_graph *graph, struct media_device *mdev); + +/** + * media_graph_walk_cleanup - Release resources used by graph walk. + * + * @graph: Media graph structure that will be used to walk the graph + * + * This function is deprecated, use media_pipeline_for_each_pad() instead. + */ +void media_graph_walk_cleanup(struct media_graph *graph); + +/** + * media_graph_walk_start - Start walking the media graph at a + * given entity + * + * @graph: Media graph structure that will be used to walk the graph + * @entity: Starting entity + * + * This function is deprecated, use media_pipeline_for_each_pad() instead. + * + * Before using this function, media_graph_walk_init() must be + * used to allocate resources used for walking the graph. This + * function initializes the graph traversal structure to walk the + * entities graph starting at the given entity. The traversal + * structure must not be modified by the caller during graph + * traversal. After the graph walk, the resources must be released + * using media_graph_walk_cleanup(). + */ +void media_graph_walk_start(struct media_graph *graph, + struct media_entity *entity); + +/** + * media_graph_walk_next - Get the next entity in the graph + * @graph: Media graph structure + * + * This function is deprecated, use media_pipeline_for_each_pad() instead. + * + * Perform a depth-first traversal of the given media entities graph. + * + * The graph structure must have been previously initialized with a call to + * media_graph_walk_start(). + * + * Return: returns the next entity in the graph or %NULL if the whole graph + * have been traversed. + */ +struct media_entity *media_graph_walk_next(struct media_graph *graph); + +/** + * media_pipeline_start - Mark a pipeline as streaming + * @origin: Starting pad + * @pipe: Media pipeline to be assigned to all pads in the pipeline. + * + * Mark all pads connected to pad @origin through enabled links, either + * directly or indirectly, as streaming. The given pipeline object is assigned + * to every pad in the pipeline and stored in the media_pad pipe field. + * + * Calls to this function can be nested, in which case the same number of + * media_pipeline_stop() calls will be required to stop streaming. The + * pipeline pointer must be identical for all nested calls to + * media_pipeline_start(). + */ +__must_check int media_pipeline_start(struct media_pad *origin, + struct media_pipeline *pipe); +/** + * __media_pipeline_start - Mark a pipeline as streaming + * + * @origin: Starting pad + * @pipe: Media pipeline to be assigned to all pads in the pipeline. + * + * ..note:: This is the non-locking version of media_pipeline_start() + */ +__must_check int __media_pipeline_start(struct media_pad *origin, + struct media_pipeline *pipe); + +/** + * media_pipeline_stop - Mark a pipeline as not streaming + * @pad: Starting pad + * + * Mark all pads connected to a given pad through enabled links, either + * directly or indirectly, as not streaming. The media_pad pipe field is + * reset to %NULL. + * + * If multiple calls to media_pipeline_start() have been made, the same + * number of calls to this function are required to mark the pipeline as not + * streaming. + */ +void media_pipeline_stop(struct media_pad *pad); + +/** + * __media_pipeline_stop - Mark a pipeline as not streaming + * + * @pad: Starting pad + * + * .. note:: This is the non-locking version of media_pipeline_stop() + */ +void __media_pipeline_stop(struct media_pad *pad); + +struct media_pad * +__media_pipeline_pad_iter_next(struct media_pipeline *pipe, + struct media_pipeline_pad_iter *iter, + struct media_pad *pad); + +/** + * media_pipeline_for_each_pad - Iterate on all pads in a media pipeline + * @pipe: The pipeline + * @iter: The iterator (struct media_pipeline_pad_iter) + * @pad: The iterator pad + * + * Iterate on all pads in a media pipeline. This is only valid after the + * pipeline has been built with media_pipeline_start() and before it gets + * destroyed with media_pipeline_stop(). + */ +#define media_pipeline_for_each_pad(pipe, iter, pad) \ + for (pad = __media_pipeline_pad_iter_next((pipe), iter, NULL); \ + pad != NULL; \ + pad = __media_pipeline_pad_iter_next((pipe), iter, pad)) + +/** + * media_pipeline_entity_iter_init - Initialize a pipeline entity iterator + * @pipe: The pipeline + * @iter: The iterator + * + * This function must be called to initialize the iterator before using it in a + * media_pipeline_for_each_entity() loop. The iterator must be destroyed by a + * call to media_pipeline_entity_iter_cleanup after the loop (including in code + * paths that break from the loop). + * + * The same iterator can be used in multiple consecutive loops without being + * destroyed and reinitialized. + * + * Return: 0 on success or a negative error code otherwise. + */ +int media_pipeline_entity_iter_init(struct media_pipeline *pipe, + struct media_pipeline_entity_iter *iter); + +/** + * media_pipeline_entity_iter_cleanup - Destroy a pipeline entity iterator + * @iter: The iterator + * + * This function must be called to destroy iterators initialized with + * media_pipeline_entity_iter_init(). + */ +void media_pipeline_entity_iter_cleanup(struct media_pipeline_entity_iter *iter); + +struct media_entity * +__media_pipeline_entity_iter_next(struct media_pipeline *pipe, + struct media_pipeline_entity_iter *iter, + struct media_entity *entity); + +/** + * media_pipeline_for_each_entity - Iterate on all entities in a media pipeline + * @pipe: The pipeline + * @iter: The iterator (struct media_pipeline_entity_iter) + * @entity: The iterator entity + * + * Iterate on all entities in a media pipeline. This is only valid after the + * pipeline has been built with media_pipeline_start() and before it gets + * destroyed with media_pipeline_stop(). The iterator must be initialized with + * media_pipeline_entity_iter_init() before iteration, and destroyed with + * media_pipeline_entity_iter_cleanup() after (including in code paths that + * break from the loop). + */ +#define media_pipeline_for_each_entity(pipe, iter, entity) \ + for (entity = __media_pipeline_entity_iter_next((pipe), iter, NULL); \ + entity != NULL; \ + entity = __media_pipeline_entity_iter_next((pipe), iter, entity)) + +/** + * media_pipeline_alloc_start - Mark a pipeline as streaming + * @pad: Starting pad + * + * media_pipeline_alloc_start() is similar to media_pipeline_start() but instead + * of working on a given pipeline the function will use an existing pipeline if + * the pad is already part of a pipeline, or allocate a new pipeline. + * + * Calls to media_pipeline_alloc_start() must be matched with + * media_pipeline_stop(). + */ +__must_check int media_pipeline_alloc_start(struct media_pad *pad); + +/** + * media_devnode_create() - creates and initializes a device node interface + * + * @mdev: pointer to struct &media_device + * @type: type of the interface, as given by + * :ref:`include/uapi/linux/media.h ` + * ( seek for ``MEDIA_INTF_T_*``) macros. + * @flags: Interface flags, as defined in + * :ref:`include/uapi/linux/media.h ` + * ( seek for ``MEDIA_INTF_FL_*``) + * @major: Device node major number. + * @minor: Device node minor number. + * + * Return: if succeeded, returns a pointer to the newly allocated + * &media_intf_devnode pointer. + * + * .. note:: + * + * Currently, no flags for &media_interface is defined. + */ +struct media_intf_devnode * +__must_check media_devnode_create(struct media_device *mdev, + u32 type, u32 flags, + u32 major, u32 minor); +/** + * media_devnode_remove() - removes a device node interface + * + * @devnode: pointer to &media_intf_devnode to be freed. + * + * When a device node interface is removed, all links to it are automatically + * removed. + */ +void media_devnode_remove(struct media_intf_devnode *devnode); + +/** + * media_create_intf_link() - creates a link between an entity and an interface + * + * @entity: pointer to %media_entity + * @intf: pointer to %media_interface + * @flags: Link flags, as defined in + * :ref:`include/uapi/linux/media.h ` + * ( seek for ``MEDIA_LNK_FL_*``) + * + * + * Valid values for flags: + * + * %MEDIA_LNK_FL_ENABLED + * Indicates that the interface is connected to the entity hardware. + * That's the default value for interfaces. An interface may be disabled if + * the hardware is busy due to the usage of some other interface that it is + * currently controlling the hardware. + * + * A typical example is an hybrid TV device that handle only one type of + * stream on a given time. So, when the digital TV is streaming, + * the V4L2 interfaces won't be enabled, as such device is not able to + * also stream analog TV or radio. + * + * .. note:: + * + * Before calling this function, media_devnode_create() should be called for + * the interface and media_device_register_entity() should be called for the + * interface that will be part of the link. + */ +struct media_link * +__must_check media_create_intf_link(struct media_entity *entity, + struct media_interface *intf, + u32 flags); +/** + * __media_remove_intf_link() - remove a single interface link + * + * @link: pointer to &media_link. + * + * .. note:: This is an unlocked version of media_remove_intf_link() + */ +void __media_remove_intf_link(struct media_link *link); + +/** + * media_remove_intf_link() - remove a single interface link + * + * @link: pointer to &media_link. + * + * .. note:: Prefer to use this one, instead of __media_remove_intf_link() + */ +void media_remove_intf_link(struct media_link *link); + +/** + * __media_remove_intf_links() - remove all links associated with an interface + * + * @intf: pointer to &media_interface + * + * .. note:: This is an unlocked version of media_remove_intf_links(). + */ +void __media_remove_intf_links(struct media_interface *intf); + +/** + * media_remove_intf_links() - remove all links associated with an interface + * + * @intf: pointer to &media_interface + * + * .. note:: + * + * #) This is called automatically when an entity is unregistered via + * media_device_register_entity() and by media_devnode_remove(). + * + * #) Prefer to use this one, instead of __media_remove_intf_links(). + */ +void media_remove_intf_links(struct media_interface *intf); + +/** + * media_entity_call - Calls a struct media_entity_operations operation on + * an entity + * + * @entity: entity where the @operation will be called + * @operation: type of the operation. Should be the name of a member of + * struct &media_entity_operations. + * + * This helper function will check if @operation is not %NULL. On such case, + * it will issue a call to @operation\(@entity, @args\). + */ + +#define media_entity_call(entity, operation, args...) \ + (((entity)->ops && (entity)->ops->operation) ? \ + (entity)->ops->operation((entity) , ##args) : -ENOIOCTLCMD) + +/** + * media_create_ancillary_link() - create an ancillary link between two + * instances of &media_entity + * + * @primary: pointer to the primary &media_entity + * @ancillary: pointer to the ancillary &media_entity + * + * Create an ancillary link between two entities, indicating that they + * represent two connected pieces of hardware that form a single logical unit. + * A typical example is a camera lens controller being linked to the sensor that + * it is supporting. + * + * The function sets both MEDIA_LNK_FL_ENABLED and MEDIA_LNK_FL_IMMUTABLE for + * the new link. + */ +struct media_link * +media_create_ancillary_link(struct media_entity *primary, + struct media_entity *ancillary); + +/** + * __media_entity_next_link() - Iterate through a &media_entity's links + * + * @entity: pointer to the &media_entity + * @link: pointer to a &media_link to hold the iterated values + * @link_type: one of the MEDIA_LNK_FL_LINK_TYPE flags + * + * Return the next link against an entity matching a specific link type. This + * allows iteration through an entity's links whilst guaranteeing all of the + * returned links are of the given type. + */ +struct media_link *__media_entity_next_link(struct media_entity *entity, + struct media_link *link, + unsigned long link_type); + +/** + * for_each_media_entity_data_link() - Iterate through an entity's data links + * + * @entity: pointer to the &media_entity + * @link: pointer to a &media_link to hold the iterated values + * + * Iterate over a &media_entity's data links + */ +#define for_each_media_entity_data_link(entity, link) \ + for (link = __media_entity_next_link(entity, NULL, \ + MEDIA_LNK_FL_DATA_LINK); \ + link; \ + link = __media_entity_next_link(entity, link, \ + MEDIA_LNK_FL_DATA_LINK)) + +#endif diff --git a/6.18.0/include-overrides/media/media-request.h b/6.18.0/include-overrides/media/media-request.h new file mode 100644 index 0000000..bb500b2 --- /dev/null +++ b/6.18.0/include-overrides/media/media-request.h @@ -0,0 +1,442 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Media device request objects + * + * Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + * Copyright (C) 2018 Intel Corporation + * + * Author: Hans Verkuil + * Author: Sakari Ailus + */ + +#ifndef MEDIA_REQUEST_H +#define MEDIA_REQUEST_H + +#include +#include +#include +#include + +#include + +/** + * enum media_request_state - media request state + * + * @MEDIA_REQUEST_STATE_IDLE: Idle + * @MEDIA_REQUEST_STATE_VALIDATING: Validating the request, no state changes + * allowed + * @MEDIA_REQUEST_STATE_QUEUED: Queued + * @MEDIA_REQUEST_STATE_COMPLETE: Completed, the request is done + * @MEDIA_REQUEST_STATE_CLEANING: Cleaning, the request is being re-inited + * @MEDIA_REQUEST_STATE_UPDATING: The request is being updated, i.e. + * request objects are being added, + * modified or removed + * @NR_OF_MEDIA_REQUEST_STATE: The number of media request states, used + * internally for sanity check purposes + */ +enum media_request_state { + MEDIA_REQUEST_STATE_IDLE, + MEDIA_REQUEST_STATE_VALIDATING, + MEDIA_REQUEST_STATE_QUEUED, + MEDIA_REQUEST_STATE_COMPLETE, + MEDIA_REQUEST_STATE_CLEANING, + MEDIA_REQUEST_STATE_UPDATING, + NR_OF_MEDIA_REQUEST_STATE, +}; + +struct media_request_object; + +/** + * struct media_request - Media device request + * @mdev: Media device this request belongs to + * @kref: Reference count + * @debug_str: Prefix for debug messages (process name:fd) + * @state: The state of the request + * @updating_count: count the number of request updates that are in progress + * @access_count: count the number of request accesses that are in progress + * @objects: List of @struct media_request_object request objects + * @num_incomplete_objects: The number of incomplete objects in the request + * @poll_wait: Wait queue for poll + * @lock: Serializes access to this struct + */ +struct media_request { + struct media_device *mdev; + struct kref kref; + char debug_str[TASK_COMM_LEN + 11]; + enum media_request_state state; + unsigned int updating_count; + unsigned int access_count; + struct list_head objects; + unsigned int num_incomplete_objects; + wait_queue_head_t poll_wait; + spinlock_t lock; +}; + +#ifdef CONFIG_MEDIA_CONTROLLER + +/** + * media_request_lock_for_access - Lock the request to access its objects + * + * @req: The media request + * + * Use before accessing a completed request. A reference to the request must + * be held during the access. This usually takes place automatically through + * a file handle. Use @media_request_unlock_for_access when done. + */ +static inline int __must_check +media_request_lock_for_access(struct media_request *req) +{ + unsigned long flags; + int ret = -EBUSY; + + spin_lock_irqsave(&req->lock, flags); + if (req->state == MEDIA_REQUEST_STATE_COMPLETE) { + req->access_count++; + ret = 0; + } + spin_unlock_irqrestore(&req->lock, flags); + + return ret; +} + +/** + * media_request_unlock_for_access - Unlock a request previously locked for + * access + * + * @req: The media request + * + * Unlock a request that has previously been locked using + * @media_request_lock_for_access. + */ +static inline void media_request_unlock_for_access(struct media_request *req) +{ + unsigned long flags; + + spin_lock_irqsave(&req->lock, flags); + if (!WARN_ON(!req->access_count)) + req->access_count--; + spin_unlock_irqrestore(&req->lock, flags); +} + +/** + * media_request_lock_for_update - Lock the request for updating its objects + * + * @req: The media request + * + * Use before updating a request, i.e. adding, modifying or removing a request + * object in it. A reference to the request must be held during the update. This + * usually takes place automatically through a file handle. Use + * @media_request_unlock_for_update when done. + */ +static inline int __must_check +media_request_lock_for_update(struct media_request *req) +{ + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&req->lock, flags); + if (req->state == MEDIA_REQUEST_STATE_IDLE || + req->state == MEDIA_REQUEST_STATE_UPDATING) { + req->state = MEDIA_REQUEST_STATE_UPDATING; + req->updating_count++; + } else { + ret = -EBUSY; + } + spin_unlock_irqrestore(&req->lock, flags); + + return ret; +} + +/** + * media_request_unlock_for_update - Unlock a request previously locked for + * update + * + * @req: The media request + * + * Unlock a request that has previously been locked using + * @media_request_lock_for_update. + */ +static inline void media_request_unlock_for_update(struct media_request *req) +{ + unsigned long flags; + + spin_lock_irqsave(&req->lock, flags); + WARN_ON(req->updating_count <= 0); + if (!--req->updating_count) + req->state = MEDIA_REQUEST_STATE_IDLE; + spin_unlock_irqrestore(&req->lock, flags); +} + +/** + * media_request_get - Get the media request + * + * @req: The media request + * + * Get the media request. + */ +static inline void media_request_get(struct media_request *req) +{ + kref_get(&req->kref); +} + +/** + * media_request_put - Put the media request + * + * @req: The media request + * + * Put the media request. The media request will be released + * when the refcount reaches 0. + */ +void media_request_put(struct media_request *req); + +/** + * media_request_get_by_fd - Get a media request by fd + * + * @mdev: Media device this request belongs to + * @request_fd: The file descriptor of the request + * + * Get the request represented by @request_fd that is owned + * by the media device. + * + * Return a -EBADR error pointer if requests are not supported + * by this driver. Return -EINVAL if the request was not found. + * Return the pointer to the request if found: the caller will + * have to call @media_request_put when it finished using the + * request. + */ +struct media_request * +media_request_get_by_fd(struct media_device *mdev, int request_fd); + +/** + * media_request_alloc - Allocate the media request + * + * @mdev: Media device this request belongs to + * @alloc_fd: Store the request's file descriptor in this int + * + * Allocated the media request and put the fd in @alloc_fd. + */ +int media_request_alloc(struct media_device *mdev, + int *alloc_fd); + +#else + +static inline void media_request_get(struct media_request *req) +{ +} + +static inline void media_request_put(struct media_request *req) +{ +} + +static inline struct media_request * +media_request_get_by_fd(struct media_device *mdev, int request_fd) +{ + return ERR_PTR(-EBADR); +} + +#endif + +/** + * struct media_request_object_ops - Media request object operations + * @prepare: Validate and prepare the request object, optional. + * @unprepare: Unprepare the request object, optional. + * @queue: Queue the request object, optional. + * @unbind: Unbind the request object, optional. + * @release: Release the request object, required. + */ +struct media_request_object_ops { + int (*prepare)(struct media_request_object *object); + void (*unprepare)(struct media_request_object *object); + void (*queue)(struct media_request_object *object); + void (*unbind)(struct media_request_object *object); + void (*release)(struct media_request_object *object); +}; + +/** + * struct media_request_object - An opaque object that belongs to a media + * request + * + * @ops: object's operations + * @priv: object's priv pointer + * @req: the request this object belongs to (can be NULL) + * @list: List entry of the object for @struct media_request + * @kref: Reference count of the object, acquire before releasing req->lock + * @completed: If true, then this object was completed. + * + * An object related to the request. This struct is always embedded in + * another struct that contains the actual data for this request object. + */ +struct media_request_object { + const struct media_request_object_ops *ops; + void *priv; + struct media_request *req; + struct list_head list; + struct kref kref; + bool completed; +}; + +#ifdef CONFIG_MEDIA_CONTROLLER + +/** + * media_request_object_get - Get a media request object + * + * @obj: The object + * + * Get a media request object. + */ +static inline void media_request_object_get(struct media_request_object *obj) +{ + kref_get(&obj->kref); +} + +/** + * media_request_object_put - Put a media request object + * + * @obj: The object + * + * Put a media request object. Once all references are gone, the + * object's memory is released. + */ +void media_request_object_put(struct media_request_object *obj); + +/** + * media_request_object_find - Find an object in a request + * + * @req: The media request + * @ops: Find an object with this ops value + * @priv: Find an object with this priv value + * + * Both @ops and @priv must be non-NULL. + * + * Returns the object pointer or NULL if not found. The caller must + * call media_request_object_put() once it finished using the object. + * + * Since this function needs to walk the list of objects it takes + * the @req->lock spin lock to make this safe. + */ +struct media_request_object * +media_request_object_find(struct media_request *req, + const struct media_request_object_ops *ops, + void *priv); + +/** + * media_request_object_init - Initialise a media request object + * + * @obj: The object + * + * Initialise a media request object. The object will be released using the + * release callback of the ops once it has no references (this function + * initialises references to one). + */ +void media_request_object_init(struct media_request_object *obj); + +/** + * media_request_object_bind - Bind a media request object to a request + * + * @req: The media request + * @ops: The object ops for this object + * @priv: A driver-specific priv pointer associated with this object + * @is_buffer: Set to true if the object a buffer object. + * @obj: The object + * + * Bind this object to the request and set the ops and priv values of + * the object so it can be found later with media_request_object_find(). + * + * Every bound object must be unbound or completed by the kernel at some + * point in time, otherwise the request will never complete. When the + * request is released all completed objects will be unbound by the + * request core code. + * + * Buffer objects will be added to the end of the request's object + * list, non-buffer objects will be added to the front of the list. + * This ensures that all buffer objects are at the end of the list + * and that all non-buffer objects that they depend on are processed + * first. + */ +int media_request_object_bind(struct media_request *req, + const struct media_request_object_ops *ops, + void *priv, bool is_buffer, + struct media_request_object *obj); + +/** + * media_request_object_unbind - Unbind a media request object + * + * @obj: The object + * + * Unbind the media request object from the request. + */ +void media_request_object_unbind(struct media_request_object *obj); + +/** + * media_request_object_complete - Mark the media request object as complete + * + * @obj: The object + * + * Mark the media request object as complete. Only bound objects can + * be completed. + */ +void media_request_object_complete(struct media_request_object *obj); + +#else + +static inline int __must_check +media_request_lock_for_access(struct media_request *req) +{ + return -EINVAL; +} + +static inline void media_request_unlock_for_access(struct media_request *req) +{ +} + +static inline int __must_check +media_request_lock_for_update(struct media_request *req) +{ + return -EINVAL; +} + +static inline void media_request_unlock_for_update(struct media_request *req) +{ +} + +static inline void media_request_object_get(struct media_request_object *obj) +{ +} + +static inline void media_request_object_put(struct media_request_object *obj) +{ +} + +static inline struct media_request_object * +media_request_object_find(struct media_request *req, + const struct media_request_object_ops *ops, + void *priv) +{ + return NULL; +} + +static inline void media_request_object_init(struct media_request_object *obj) +{ + obj->ops = NULL; + obj->req = NULL; +} + +static inline int media_request_object_bind(struct media_request *req, + const struct media_request_object_ops *ops, + void *priv, bool is_buffer, + struct media_request_object *obj) +{ + return 0; +} + +static inline void media_request_object_unbind(struct media_request_object *obj) +{ +} + +static inline void media_request_object_complete(struct media_request_object *obj) +{ +} + +#endif + +#endif diff --git a/6.18.0/include-overrides/media/mipi-csi2.h b/6.18.0/include-overrides/media/mipi-csi2.h new file mode 100644 index 0000000..40fc026 --- /dev/null +++ b/6.18.0/include-overrides/media/mipi-csi2.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * MIPI CSI-2 Data Types + * + * Copyright (C) 2022 Laurent Pinchart + */ + +#ifndef _MEDIA_MIPI_CSI2_H +#define _MEDIA_MIPI_CSI2_H + +/* Short packet data types */ +#define MIPI_CSI2_DT_FS 0x00 +#define MIPI_CSI2_DT_FE 0x01 +#define MIPI_CSI2_DT_LS 0x02 +#define MIPI_CSI2_DT_LE 0x03 +#define MIPI_CSI2_DT_GENERIC_SHORT(n) (0x08 + (n)) /* 0..7 */ + +/* Long packet data types */ +#define MIPI_CSI2_DT_NULL 0x10 +#define MIPI_CSI2_DT_BLANKING 0x11 +#define MIPI_CSI2_DT_EMBEDDED_8B 0x12 +#define MIPI_CSI2_DT_GENERIC_LONG(n) (0x13 + (n) - 1) /* 1..4 */ +#define MIPI_CSI2_DT_YUV420_8B 0x18 +#define MIPI_CSI2_DT_YUV420_10B 0x19 +#define MIPI_CSI2_DT_YUV420_8B_LEGACY 0x1a +#define MIPI_CSI2_DT_YUV420_8B_CS 0x1c +#define MIPI_CSI2_DT_YUV420_10B_CS 0x1d +#define MIPI_CSI2_DT_YUV422_8B 0x1e +#define MIPI_CSI2_DT_YUV422_10B 0x1f +#define MIPI_CSI2_DT_RGB444 0x20 +#define MIPI_CSI2_DT_RGB555 0x21 +#define MIPI_CSI2_DT_RGB565 0x22 +#define MIPI_CSI2_DT_RGB666 0x23 +#define MIPI_CSI2_DT_RGB888 0x24 +#define MIPI_CSI2_DT_RAW28 0x26 +#define MIPI_CSI2_DT_RAW24 0x27 +#define MIPI_CSI2_DT_RAW6 0x28 +#define MIPI_CSI2_DT_RAW7 0x29 +#define MIPI_CSI2_DT_RAW8 0x2a +#define MIPI_CSI2_DT_RAW10 0x2b +#define MIPI_CSI2_DT_RAW12 0x2c +#define MIPI_CSI2_DT_RAW14 0x2d +#define MIPI_CSI2_DT_RAW16 0x2e +#define MIPI_CSI2_DT_RAW20 0x2f +#define MIPI_CSI2_DT_USER_DEFINED(n) (0x30 + (n)) /* 0..7 */ + +#endif /* _MEDIA_MIPI_CSI2_H */ diff --git a/6.18.0/include-overrides/media/rc-core.h b/6.18.0/include-overrides/media/rc-core.h new file mode 100644 index 0000000..35c7a05 --- /dev/null +++ b/6.18.0/include-overrides/media/rc-core.h @@ -0,0 +1,377 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Remote Controller core header + * + * Copyright (C) 2009-2010 by Mauro Carvalho Chehab + */ + +#ifndef _RC_CORE +#define _RC_CORE + +#include +#include +#include +#include +#include +#include + +/** + * enum rc_driver_type - type of the RC driver. + * + * @RC_DRIVER_SCANCODE: Driver or hardware generates a scancode. + * @RC_DRIVER_IR_RAW: Driver or hardware generates pulse/space sequences. + * It needs a Infra-Red pulse/space decoder + * @RC_DRIVER_IR_RAW_TX: Device transmitter only, + * driver requires pulse/space data sequence. + */ +enum rc_driver_type { + RC_DRIVER_SCANCODE = 0, + RC_DRIVER_IR_RAW, + RC_DRIVER_IR_RAW_TX, +}; + +/** + * struct rc_scancode_filter - Filter scan codes. + * @data: Scancode data to match. + * @mask: Mask of bits of scancode to compare. + */ +struct rc_scancode_filter { + u32 data; + u32 mask; +}; + +/** + * enum rc_filter_type - Filter type constants. + * @RC_FILTER_NORMAL: Filter for normal operation. + * @RC_FILTER_WAKEUP: Filter for waking from suspend. + * @RC_FILTER_MAX: Number of filter types. + */ +enum rc_filter_type { + RC_FILTER_NORMAL = 0, + RC_FILTER_WAKEUP, + + RC_FILTER_MAX +}; + +/** + * struct lirc_fh - represents an open lirc file + * @list: list of open file handles + * @rc: rcdev for this lirc chardev + * @rawir: queue for incoming raw IR + * @scancodes: queue for incoming decoded scancodes + * @wait_poll: poll struct for lirc device + * @carrier_low: when setting the carrier range, first the low end must be + * set with an ioctl and then the high end with another ioctl + * @send_mode: lirc mode for sending, either LIRC_MODE_SCANCODE or + * LIRC_MODE_PULSE + * @rec_mode: lirc mode for receiving, either LIRC_MODE_SCANCODE or + * LIRC_MODE_MODE2 + */ +struct lirc_fh { + struct list_head list; + struct rc_dev *rc; + DECLARE_KFIFO_PTR(rawir, unsigned int); + DECLARE_KFIFO_PTR(scancodes, struct lirc_scancode); + wait_queue_head_t wait_poll; + u32 carrier_low; + u8 send_mode; + u8 rec_mode; +}; + +/** + * struct rc_dev - represents a remote control device + * @dev: driver model's view of this device + * @managed_alloc: devm_rc_allocate_device was used to create rc_dev + * @registered: set to true by rc_register_device(), false by + * rc_unregister_device + * @idle: used to keep track of RX state + * @encode_wakeup: wakeup filtering uses IR encode API, therefore the allowed + * wakeup protocols is the set of all raw encoders + * @minor: unique minor remote control device number + * @sysfs_groups: sysfs attribute groups + * @device_name: name of the rc child device + * @input_phys: physical path to the input child device + * @input_id: id of the input child device (struct input_id) + * @driver_name: name of the hardware driver which registered this device + * @map_name: name of the default keymap + * @rc_map: current scan/key table + * @lock: used to ensure we've filled in all protocol details before + * anyone can call show_protocols or store_protocols + * @raw: additional data for raw pulse/space devices + * @input_dev: the input child device used to communicate events to userspace + * @driver_type: specifies if protocol decoding is done in hardware or software + * @users: number of current users of the device + * @allowed_protocols: bitmask with the supported RC_PROTO_BIT_* protocols + * @enabled_protocols: bitmask with the enabled RC_PROTO_BIT_* protocols + * @allowed_wakeup_protocols: bitmask with the supported RC_PROTO_BIT_* wakeup + * protocols + * @wakeup_protocol: the enabled RC_PROTO_* wakeup protocol or + * RC_PROTO_UNKNOWN if disabled. + * @scancode_filter: scancode filter + * @scancode_wakeup_filter: scancode wakeup filters + * @scancode_mask: some hardware decoders are not capable of providing the full + * scancode to the application. As this is a hardware limit, we can't do + * anything with it. Yet, as the same keycode table can be used with other + * devices, a mask is provided to allow its usage. Drivers should generally + * leave this field in blank + * @priv: driver-specific data + * @keylock: protects the remaining members of the struct + * @keypressed: whether a key is currently pressed + * @last_toggle: toggle value of last command + * @last_keycode: keycode of last keypress + * @last_protocol: protocol of last keypress + * @last_scancode: scancode of last keypress + * @keyup_jiffies: time (in jiffies) when the current keypress should be released + * @timer_keyup: timer for releasing a keypress + * @timer_repeat: timer for autorepeat events. This is needed for CEC, which + * has non-standard repeats. + * @timeout: optional time after which device stops sending data + * @min_timeout: minimum timeout supported by device + * @max_timeout: maximum timeout supported by device + * @rx_resolution : resolution (in us) of input sampler + * @lirc_dev: lirc device + * @lirc_cdev: lirc char cdev + * @gap_start: start time for gap after timeout if non-zero + * @lirc_fh_lock: protects lirc_fh list + * @lirc_fh: list of open files + * @change_protocol: allow changing the protocol used on hardware decoders + * @open: callback to allow drivers to enable polling/irq when IR input device + * is opened. + * @close: callback to allow drivers to disable polling/irq when IR input device + * is opened. + * @s_tx_mask: set transmitter mask (for devices with multiple tx outputs) + * @s_tx_carrier: set transmit carrier frequency + * @s_tx_duty_cycle: set transmit duty cycle (0% - 100%) + * @s_rx_carrier_range: inform driver about carrier it is expected to handle + * @tx_ir: transmit IR + * @s_idle: enable/disable hardware idle mode, upon which, + * device doesn't interrupt host until it sees IR pulses + * @s_wideband_receiver: enable wide band receiver used for learning + * @s_carrier_report: enable carrier reports + * @s_filter: set the scancode filter + * @s_wakeup_filter: set the wakeup scancode filter. If the mask is zero + * then wakeup should be disabled. wakeup_protocol will be set to + * a valid protocol if mask is nonzero. + * @s_timeout: set hardware timeout in us + */ +struct rc_dev { + struct device dev; + bool managed_alloc; + bool registered; + bool idle; + bool encode_wakeup; + unsigned int minor; + const struct attribute_group *sysfs_groups[5]; + const char *device_name; + const char *input_phys; + struct input_id input_id; + const char *driver_name; + const char *map_name; + struct rc_map rc_map; + struct mutex lock; + struct ir_raw_event_ctrl *raw; + struct input_dev *input_dev; + enum rc_driver_type driver_type; + u32 users; + u64 allowed_protocols; + u64 enabled_protocols; + u64 allowed_wakeup_protocols; + enum rc_proto wakeup_protocol; + struct rc_scancode_filter scancode_filter; + struct rc_scancode_filter scancode_wakeup_filter; + u32 scancode_mask; + void *priv; + spinlock_t keylock; + bool keypressed; + u8 last_toggle; + u32 last_keycode; + enum rc_proto last_protocol; + u64 last_scancode; + unsigned long keyup_jiffies; + struct timer_list timer_keyup; + struct timer_list timer_repeat; + u32 timeout; + u32 min_timeout; + u32 max_timeout; + u32 rx_resolution; +#ifdef CONFIG_LIRC + struct device lirc_dev; + struct cdev lirc_cdev; + ktime_t gap_start; + spinlock_t lirc_fh_lock; + struct list_head lirc_fh; +#endif + int (*change_protocol)(struct rc_dev *dev, u64 *rc_proto); + int (*open)(struct rc_dev *dev); + void (*close)(struct rc_dev *dev); + int (*s_tx_mask)(struct rc_dev *dev, u32 mask); + int (*s_tx_carrier)(struct rc_dev *dev, u32 carrier); + int (*s_tx_duty_cycle)(struct rc_dev *dev, u32 duty_cycle); + int (*s_rx_carrier_range)(struct rc_dev *dev, u32 min, u32 max); + int (*tx_ir)(struct rc_dev *dev, unsigned *txbuf, unsigned n); + void (*s_idle)(struct rc_dev *dev, bool enable); + int (*s_wideband_receiver)(struct rc_dev *dev, int enable); + int (*s_carrier_report) (struct rc_dev *dev, int enable); + int (*s_filter)(struct rc_dev *dev, + struct rc_scancode_filter *filter); + int (*s_wakeup_filter)(struct rc_dev *dev, + struct rc_scancode_filter *filter); + int (*s_timeout)(struct rc_dev *dev, + unsigned int timeout); +}; + +#define to_rc_dev(d) container_of(d, struct rc_dev, dev) + +/* + * From rc-main.c + * Those functions can be used on any type of Remote Controller. They + * basically creates an input_dev and properly reports the device as a + * Remote Controller, at sys/class/rc. + */ + +/** + * rc_allocate_device - Allocates a RC device + * + * @rc_driver_type: specifies the type of the RC output to be allocated + * returns a pointer to struct rc_dev. + */ +struct rc_dev *rc_allocate_device(enum rc_driver_type); + +/** + * devm_rc_allocate_device - Managed RC device allocation + * + * @dev: pointer to struct device + * @rc_driver_type: specifies the type of the RC output to be allocated + * returns a pointer to struct rc_dev. + */ +struct rc_dev *devm_rc_allocate_device(struct device *dev, enum rc_driver_type); + +/** + * rc_free_device - Frees a RC device + * + * @dev: pointer to struct rc_dev. + */ +void rc_free_device(struct rc_dev *dev); + +/** + * rc_register_device - Registers a RC device + * + * @dev: pointer to struct rc_dev. + */ +int rc_register_device(struct rc_dev *dev); + +/** + * devm_rc_register_device - Manageded registering of a RC device + * + * @parent: pointer to struct device. + * @dev: pointer to struct rc_dev. + */ +int devm_rc_register_device(struct device *parent, struct rc_dev *dev); + +/** + * rc_unregister_device - Unregisters a RC device + * + * @dev: pointer to struct rc_dev. + */ +void rc_unregister_device(struct rc_dev *dev); + +void rc_repeat(struct rc_dev *dev); +void rc_keydown(struct rc_dev *dev, enum rc_proto protocol, u64 scancode, + u8 toggle); +void rc_keydown_notimeout(struct rc_dev *dev, enum rc_proto protocol, + u64 scancode, u8 toggle); +void rc_keyup(struct rc_dev *dev); +u32 rc_g_keycode_from_table(struct rc_dev *dev, u64 scancode); + +/* + * From rc-raw.c + * The Raw interface is specific to InfraRed. It may be a good idea to + * split it later into a separate header. + */ +struct ir_raw_event { + union { + u32 duration; + u32 carrier; + }; + u8 duty_cycle; + + unsigned pulse:1; + unsigned overflow:1; + unsigned timeout:1; + unsigned carrier_report:1; +}; + +#define US_TO_NS(usec) ((usec) * 1000) +#define MS_TO_US(msec) ((msec) * 1000) +#define IR_MAX_DURATION MS_TO_US(500) +#define IR_DEFAULT_TIMEOUT MS_TO_US(125) +#define IR_MAX_TIMEOUT LIRC_VALUE_MASK + +void ir_raw_event_handle(struct rc_dev *dev); +int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev); +int ir_raw_event_store_edge(struct rc_dev *dev, bool pulse); +int ir_raw_event_store_with_filter(struct rc_dev *dev, + struct ir_raw_event *ev); +int ir_raw_event_store_with_timeout(struct rc_dev *dev, + struct ir_raw_event *ev); +void ir_raw_event_set_idle(struct rc_dev *dev, bool idle); +int ir_raw_encode_scancode(enum rc_proto protocol, u32 scancode, + struct ir_raw_event *events, unsigned int max); +int ir_raw_encode_carrier(enum rc_proto protocol); + +static inline void ir_raw_event_overflow(struct rc_dev *dev) +{ + ir_raw_event_store(dev, &((struct ir_raw_event) { .overflow = true })); + dev->idle = true; + ir_raw_event_handle(dev); +} + +/* extract mask bits out of data and pack them into the result */ +static inline u32 ir_extract_bits(u32 data, u32 mask) +{ + u32 vbit = 1, value = 0; + + do { + if (mask & 1) { + if (data & 1) + value |= vbit; + vbit <<= 1; + } + data >>= 1; + } while (mask >>= 1); + + return value; +} + +/* Get NEC scancode and protocol type from address and command bytes */ +static inline u32 ir_nec_bytes_to_scancode(u8 address, u8 not_address, + u8 command, u8 not_command, + enum rc_proto *protocol) +{ + u32 scancode; + + if ((command ^ not_command) != 0xff) { + /* NEC transport, but modified protocol, used by at + * least Apple and TiVo remotes + */ + scancode = not_address << 24 | + address << 16 | + not_command << 8 | + command; + *protocol = RC_PROTO_NEC32; + } else if ((address ^ not_address) != 0xff) { + /* Extended NEC */ + scancode = address << 16 | + not_address << 8 | + command; + *protocol = RC_PROTO_NECX; + } else { + /* Normal NEC */ + scancode = address << 8 | command; + *protocol = RC_PROTO_NEC; + } + + return scancode; +} + +#endif /* _RC_CORE */ diff --git a/6.18.0/include-overrides/media/rc-map.h b/6.18.0/include-overrides/media/rc-map.h new file mode 100644 index 0000000..d90e461 --- /dev/null +++ b/6.18.0/include-overrides/media/rc-map.h @@ -0,0 +1,357 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * rc-map.h - define RC map names used by RC drivers + * + * Copyright (c) 2010 by Mauro Carvalho Chehab + */ + +#ifndef _MEDIA_RC_MAP_H +#define _MEDIA_RC_MAP_H + +#include +#include + +#define RC_PROTO_BIT_NONE 0ULL +#define RC_PROTO_BIT_UNKNOWN BIT_ULL(RC_PROTO_UNKNOWN) +#define RC_PROTO_BIT_OTHER BIT_ULL(RC_PROTO_OTHER) +#define RC_PROTO_BIT_RC5 BIT_ULL(RC_PROTO_RC5) +#define RC_PROTO_BIT_RC5X_20 BIT_ULL(RC_PROTO_RC5X_20) +#define RC_PROTO_BIT_RC5_SZ BIT_ULL(RC_PROTO_RC5_SZ) +#define RC_PROTO_BIT_JVC BIT_ULL(RC_PROTO_JVC) +#define RC_PROTO_BIT_SONY12 BIT_ULL(RC_PROTO_SONY12) +#define RC_PROTO_BIT_SONY15 BIT_ULL(RC_PROTO_SONY15) +#define RC_PROTO_BIT_SONY20 BIT_ULL(RC_PROTO_SONY20) +#define RC_PROTO_BIT_NEC BIT_ULL(RC_PROTO_NEC) +#define RC_PROTO_BIT_NECX BIT_ULL(RC_PROTO_NECX) +#define RC_PROTO_BIT_NEC32 BIT_ULL(RC_PROTO_NEC32) +#define RC_PROTO_BIT_SANYO BIT_ULL(RC_PROTO_SANYO) +#define RC_PROTO_BIT_MCIR2_KBD BIT_ULL(RC_PROTO_MCIR2_KBD) +#define RC_PROTO_BIT_MCIR2_MSE BIT_ULL(RC_PROTO_MCIR2_MSE) +#define RC_PROTO_BIT_RC6_0 BIT_ULL(RC_PROTO_RC6_0) +#define RC_PROTO_BIT_RC6_6A_20 BIT_ULL(RC_PROTO_RC6_6A_20) +#define RC_PROTO_BIT_RC6_6A_24 BIT_ULL(RC_PROTO_RC6_6A_24) +#define RC_PROTO_BIT_RC6_6A_32 BIT_ULL(RC_PROTO_RC6_6A_32) +#define RC_PROTO_BIT_RC6_MCE BIT_ULL(RC_PROTO_RC6_MCE) +#define RC_PROTO_BIT_SHARP BIT_ULL(RC_PROTO_SHARP) +#define RC_PROTO_BIT_XMP BIT_ULL(RC_PROTO_XMP) +#define RC_PROTO_BIT_CEC BIT_ULL(RC_PROTO_CEC) +#define RC_PROTO_BIT_IMON BIT_ULL(RC_PROTO_IMON) +#define RC_PROTO_BIT_RCMM12 BIT_ULL(RC_PROTO_RCMM12) +#define RC_PROTO_BIT_RCMM24 BIT_ULL(RC_PROTO_RCMM24) +#define RC_PROTO_BIT_RCMM32 BIT_ULL(RC_PROTO_RCMM32) +#define RC_PROTO_BIT_XBOX_DVD BIT_ULL(RC_PROTO_XBOX_DVD) + +#if IS_ENABLED(CONFIG_IR_RC5_DECODER) +#define __RC_PROTO_RC5_CODEC \ + (RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC5X_20 | RC_PROTO_BIT_RC5_SZ) +#else +#define __RC_PROTO_RC5_CODEC 0 +#endif + +#if IS_ENABLED(CONFIG_IR_JVC_DECODER) +#define __RC_PROTO_JVC_CODEC RC_PROTO_BIT_JVC +#else +#define __RC_PROTO_JVC_CODEC 0 +#endif +#if IS_ENABLED(CONFIG_IR_SONY_DECODER) +#define __RC_PROTO_SONY_CODEC \ + (RC_PROTO_BIT_SONY12 | RC_PROTO_BIT_SONY15 | RC_PROTO_BIT_SONY20) +#else +#define __RC_PROTO_SONY_CODEC 0 +#endif +#if IS_ENABLED(CONFIG_IR_NEC_DECODER) +#define __RC_PROTO_NEC_CODEC \ + (RC_PROTO_BIT_NEC | RC_PROTO_BIT_NECX | RC_PROTO_BIT_NEC32) +#else +#define __RC_PROTO_NEC_CODEC 0 +#endif +#if IS_ENABLED(CONFIG_IR_SANYO_DECODER) +#define __RC_PROTO_SANYO_CODEC RC_PROTO_BIT_SANYO +#else +#define __RC_PROTO_SANYO_CODEC 0 +#endif +#if IS_ENABLED(CONFIG_IR_MCE_KBD_DECODER) +#define __RC_PROTO_MCE_KBD_CODEC \ + (RC_PROTO_BIT_MCIR2_KBD | RC_PROTO_BIT_MCIR2_MSE) +#else +#define __RC_PROTO_MCE_KBD_CODEC 0 +#endif +#if IS_ENABLED(CONFIG_IR_RC6_DECODER) +#define __RC_PROTO_RC6_CODEC \ + (RC_PROTO_BIT_RC6_0 | RC_PROTO_BIT_RC6_6A_20 | \ + RC_PROTO_BIT_RC6_6A_24 | RC_PROTO_BIT_RC6_6A_32 | \ + RC_PROTO_BIT_RC6_MCE) +#else +#define __RC_PROTO_RC6_CODEC 0 +#endif +#if IS_ENABLED(CONFIG_IR_SHARP_DECODER) +#define __RC_PROTO_SHARP_CODEC RC_PROTO_BIT_SHARP +#else +#define __RC_PROTO_SHARP_CODEC 0 +#endif +#if IS_ENABLED(CONFIG_IR_XMP_DECODER) +#define __RC_PROTO_XMP_CODEC RC_PROTO_BIT_XMP +#else +#define __RC_PROTO_XMP_CODEC 0 +#endif +#if IS_ENABLED(CONFIG_IR_IMON_DECODER) +#define __RC_PROTO_IMON_CODEC RC_PROTO_BIT_IMON +#else +#define __RC_PROTO_IMON_CODEC 0 +#endif +#if IS_ENABLED(CONFIG_IR_RCMM_DECODER) +#define __RC_PROTO_RCMM_CODEC \ + (RC_PROTO_BIT_RCMM12 | RC_PROTO_BIT_RCMM24 | RC_PROTO_BIT_RCMM32) +#else +#define __RC_PROTO_RCMM_CODEC 0 +#endif + +/* All kernel-based codecs have encoders and decoders */ +#define RC_PROTO_BIT_ALL_IR_DECODER \ + (__RC_PROTO_RC5_CODEC | __RC_PROTO_JVC_CODEC | __RC_PROTO_SONY_CODEC | \ + __RC_PROTO_NEC_CODEC | __RC_PROTO_SANYO_CODEC | \ + __RC_PROTO_MCE_KBD_CODEC | __RC_PROTO_RC6_CODEC | \ + __RC_PROTO_SHARP_CODEC | __RC_PROTO_XMP_CODEC | \ + __RC_PROTO_IMON_CODEC | __RC_PROTO_RCMM_CODEC) + +#define RC_PROTO_BIT_ALL_IR_ENCODER \ + (__RC_PROTO_RC5_CODEC | __RC_PROTO_JVC_CODEC | __RC_PROTO_SONY_CODEC | \ + __RC_PROTO_NEC_CODEC | __RC_PROTO_SANYO_CODEC | \ + __RC_PROTO_MCE_KBD_CODEC | __RC_PROTO_RC6_CODEC | \ + __RC_PROTO_SHARP_CODEC | __RC_PROTO_XMP_CODEC | \ + __RC_PROTO_IMON_CODEC | __RC_PROTO_RCMM_CODEC) + +#define RC_SCANCODE_UNKNOWN(x) (x) +#define RC_SCANCODE_OTHER(x) (x) +#define RC_SCANCODE_NEC(addr, cmd) (((addr) << 8) | (cmd)) +#define RC_SCANCODE_NECX(addr, cmd) (((addr) << 8) | (cmd)) +#define RC_SCANCODE_NEC32(data) ((data) & 0xffffffff) +#define RC_SCANCODE_RC5(sys, cmd) (((sys) << 8) | (cmd)) +#define RC_SCANCODE_RC5_SZ(sys, cmd) (((sys) << 8) | (cmd)) +#define RC_SCANCODE_RC6_0(sys, cmd) (((sys) << 8) | (cmd)) +#define RC_SCANCODE_RC6_6A(vendor, sys, cmd) (((vendor) << 16) | ((sys) << 8) | (cmd)) + +/** + * struct rc_map_table - represents a scancode/keycode pair + * + * @scancode: scan code (u64) + * @keycode: Linux input keycode + */ +struct rc_map_table { + u64 scancode; + u32 keycode; +}; + +/** + * struct rc_map - represents a keycode map table + * + * @scan: pointer to struct &rc_map_table + * @size: Max number of entries + * @len: Number of entries that are in use + * @alloc: size of \*scan, in bytes + * @rc_proto: type of the remote controller protocol, as defined at + * enum &rc_proto + * @name: name of the key map table + * @lock: lock to protect access to this structure + */ +struct rc_map { + struct rc_map_table *scan; + unsigned int size; + unsigned int len; + unsigned int alloc; + enum rc_proto rc_proto; + const char *name; + spinlock_t lock; +}; + +/** + * struct rc_map_list - list of the registered &rc_map maps + * + * @list: pointer to struct &list_head + * @map: pointer to struct &rc_map + */ +struct rc_map_list { + struct list_head list; + struct rc_map map; +}; + +#ifdef CONFIG_MEDIA_CEC_RC +/* + * rc_map_list from rc-cec.c + */ +extern struct rc_map_list cec_map; +#endif + +/* Routines from rc-map.c */ + +/** + * rc_map_register() - Registers a Remote Controller scancode map + * + * @map: pointer to struct rc_map_list + */ +int rc_map_register(struct rc_map_list *map); + +/** + * rc_map_unregister() - Unregisters a Remote Controller scancode map + * + * @map: pointer to struct rc_map_list + */ +void rc_map_unregister(struct rc_map_list *map); + +/** + * rc_map_get - gets an RC map from its name + * @name: name of the RC scancode map + */ +struct rc_map *rc_map_get(const char *name); + +/* Names of the several keytables defined in-kernel */ + +#define RC_MAP_ADSTECH_DVB_T_PCI "rc-adstech-dvb-t-pci" +#define RC_MAP_ALINK_DTU_M "rc-alink-dtu-m" +#define RC_MAP_ANYSEE "rc-anysee" +#define RC_MAP_APAC_VIEWCOMP "rc-apac-viewcomp" +#define RC_MAP_ASTROMETA_T2HYBRID "rc-astrometa-t2hybrid" +#define RC_MAP_ASUS_PC39 "rc-asus-pc39" +#define RC_MAP_ASUS_PS3_100 "rc-asus-ps3-100" +#define RC_MAP_ATI_TV_WONDER_HD_600 "rc-ati-tv-wonder-hd-600" +#define RC_MAP_ATI_X10 "rc-ati-x10" +#define RC_MAP_AVERMEDIA "rc-avermedia" +#define RC_MAP_AVERMEDIA_A16D "rc-avermedia-a16d" +#define RC_MAP_AVERMEDIA_CARDBUS "rc-avermedia-cardbus" +#define RC_MAP_AVERMEDIA_DVBT "rc-avermedia-dvbt" +#define RC_MAP_AVERMEDIA_M135A "rc-avermedia-m135a" +#define RC_MAP_AVERMEDIA_M733A_RM_K6 "rc-avermedia-m733a-rm-k6" +#define RC_MAP_AVERMEDIA_RM_KS "rc-avermedia-rm-ks" +#define RC_MAP_AVERTV_303 "rc-avertv-303" +#define RC_MAP_AZUREWAVE_AD_TU700 "rc-azurewave-ad-tu700" +#define RC_MAP_BEELINK_GS1 "rc-beelink-gs1" +#define RC_MAP_BEELINK_MXIII "rc-beelink-mxiii" +#define RC_MAP_BEHOLD "rc-behold" +#define RC_MAP_BEHOLD_COLUMBUS "rc-behold-columbus" +#define RC_MAP_BUDGET_CI_OLD "rc-budget-ci-old" +#define RC_MAP_CEC "rc-cec" +#define RC_MAP_CINERGY "rc-cinergy" +#define RC_MAP_CINERGY_1400 "rc-cinergy-1400" +#define RC_MAP_CT_90405 "rc-ct-90405" +#define RC_MAP_D680_DMB "rc-d680-dmb" +#define RC_MAP_DELOCK_61959 "rc-delock-61959" +#define RC_MAP_DIB0700_NEC_TABLE "rc-dib0700-nec" +#define RC_MAP_DIB0700_RC5_TABLE "rc-dib0700-rc5" +#define RC_MAP_DIGITALNOW_TINYTWIN "rc-digitalnow-tinytwin" +#define RC_MAP_DIGITTRADE "rc-digittrade" +#define RC_MAP_DM1105_NEC "rc-dm1105-nec" +#define RC_MAP_DNTV_LIVE_DVB_T "rc-dntv-live-dvb-t" +#define RC_MAP_DNTV_LIVE_DVBT_PRO "rc-dntv-live-dvbt-pro" +#define RC_MAP_DREAMBOX "rc-dreambox" +#define RC_MAP_DTT200U "rc-dtt200u" +#define RC_MAP_DVBSKY "rc-dvbsky" +#define RC_MAP_DVICO_MCE "rc-dvico-mce" +#define RC_MAP_DVICO_PORTABLE "rc-dvico-portable" +#define RC_MAP_EMPTY "rc-empty" +#define RC_MAP_EM_TERRATEC "rc-em-terratec" +#define RC_MAP_ENCORE_ENLTV "rc-encore-enltv" +#define RC_MAP_ENCORE_ENLTV2 "rc-encore-enltv2" +#define RC_MAP_ENCORE_ENLTV_FM53 "rc-encore-enltv-fm53" +#define RC_MAP_EVGA_INDTUBE "rc-evga-indtube" +#define RC_MAP_EZTV "rc-eztv" +#define RC_MAP_FLYDVB "rc-flydvb" +#define RC_MAP_FLYVIDEO "rc-flyvideo" +#define RC_MAP_FUSIONHDTV_MCE "rc-fusionhdtv-mce" +#define RC_MAP_GADMEI_RM008Z "rc-gadmei-rm008z" +#define RC_MAP_GEEKBOX "rc-geekbox" +#define RC_MAP_GENIUS_TVGO_A11MCE "rc-genius-tvgo-a11mce" +#define RC_MAP_GOTVIEW7135 "rc-gotview7135" +#define RC_MAP_HAUPPAUGE "rc-hauppauge" +#define RC_MAP_HAUPPAUGE_NEW "rc-hauppauge" +#define RC_MAP_HISI_POPLAR "rc-hisi-poplar" +#define RC_MAP_HISI_TV_DEMO "rc-hisi-tv-demo" +#define RC_MAP_IMON_MCE "rc-imon-mce" +#define RC_MAP_IMON_PAD "rc-imon-pad" +#define RC_MAP_IMON_RSC "rc-imon-rsc" +#define RC_MAP_IODATA_BCTV7E "rc-iodata-bctv7e" +#define RC_MAP_IT913X_V1 "rc-it913x-v1" +#define RC_MAP_IT913X_V2 "rc-it913x-v2" +#define RC_MAP_KAIOMY "rc-kaiomy" +#define RC_MAP_KHADAS "rc-khadas" +#define RC_MAP_KHAMSIN "rc-khamsin" +#define RC_MAP_KWORLD_315U "rc-kworld-315u" +#define RC_MAP_KWORLD_PC150U "rc-kworld-pc150u" +#define RC_MAP_KWORLD_PLUS_TV_ANALOG "rc-kworld-plus-tv-analog" +#define RC_MAP_LEADTEK_Y04G0051 "rc-leadtek-y04g0051" +#define RC_MAP_LME2510 "rc-lme2510" +#define RC_MAP_MANLI "rc-manli" +#define RC_MAP_MECOOL_KII_PRO "rc-mecool-kii-pro" +#define RC_MAP_MECOOL_KIII_PRO "rc-mecool-kiii-pro" +#define RC_MAP_MEDION_X10 "rc-medion-x10" +#define RC_MAP_MEDION_X10_DIGITAINER "rc-medion-x10-digitainer" +#define RC_MAP_MEDION_X10_OR2X "rc-medion-x10-or2x" +#define RC_MAP_MINIX_NEO "rc-minix-neo" +#define RC_MAP_MSI_DIGIVOX_II "rc-msi-digivox-ii" +#define RC_MAP_MSI_DIGIVOX_III "rc-msi-digivox-iii" +#define RC_MAP_MSI_TVANYWHERE "rc-msi-tvanywhere" +#define RC_MAP_MSI_TVANYWHERE_PLUS "rc-msi-tvanywhere-plus" +#define RC_MAP_MYGICA_UTV3 "rc-mygica-utv3" +#define RC_MAP_NEBULA "rc-nebula" +#define RC_MAP_NEC_TERRATEC_CINERGY_XS "rc-nec-terratec-cinergy-xs" +#define RC_MAP_NORWOOD "rc-norwood" +#define RC_MAP_NPGTECH "rc-npgtech" +#define RC_MAP_ODROID "rc-odroid" +#define RC_MAP_PCTV_SEDNA "rc-pctv-sedna" +#define RC_MAP_PINE64 "rc-pine64" +#define RC_MAP_PINNACLE_COLOR "rc-pinnacle-color" +#define RC_MAP_PINNACLE_GREY "rc-pinnacle-grey" +#define RC_MAP_PINNACLE_PCTV_HD "rc-pinnacle-pctv-hd" +#define RC_MAP_PIXELVIEW "rc-pixelview" +#define RC_MAP_PIXELVIEW_002T "rc-pixelview-002t" +#define RC_MAP_PIXELVIEW_MK12 "rc-pixelview-mk12" +#define RC_MAP_PIXELVIEW_NEW "rc-pixelview-new" +#define RC_MAP_POWERCOLOR_REAL_ANGEL "rc-powercolor-real-angel" +#define RC_MAP_PROTEUS_2309 "rc-proteus-2309" +#define RC_MAP_PURPLETV "rc-purpletv" +#define RC_MAP_PV951 "rc-pv951" +#define RC_MAP_RC5_TV "rc-rc5-tv" +#define RC_MAP_RC6_MCE "rc-rc6-mce" +#define RC_MAP_REAL_AUDIO_220_32_KEYS "rc-real-audio-220-32-keys" +#define RC_MAP_REDDO "rc-reddo" +#define RC_MAP_SIEMENS_GIGASET_RC20 "rc-siemens-gigaset-rc20" +#define RC_MAP_SNAPSTREAM_FIREFLY "rc-snapstream-firefly" +#define RC_MAP_STREAMZAP "rc-streamzap" +#define RC_MAP_SU3000 "rc-su3000" +#define RC_MAP_TANIX_TX3MINI "rc-tanix-tx3mini" +#define RC_MAP_TANIX_TX5MAX "rc-tanix-tx5max" +#define RC_MAP_TBS_NEC "rc-tbs-nec" +#define RC_MAP_TECHNISAT_TS35 "rc-technisat-ts35" +#define RC_MAP_TECHNISAT_USB2 "rc-technisat-usb2" +#define RC_MAP_TERRATEC_CINERGY_C_PCI "rc-terratec-cinergy-c-pci" +#define RC_MAP_TERRATEC_CINERGY_S2_HD "rc-terratec-cinergy-s2-hd" +#define RC_MAP_TERRATEC_CINERGY_XS "rc-terratec-cinergy-xs" +#define RC_MAP_TERRATEC_SLIM "rc-terratec-slim" +#define RC_MAP_TERRATEC_SLIM_2 "rc-terratec-slim-2" +#define RC_MAP_TEVII_NEC "rc-tevii-nec" +#define RC_MAP_TIVO "rc-tivo" +#define RC_MAP_TOTAL_MEDIA_IN_HAND "rc-total-media-in-hand" +#define RC_MAP_TOTAL_MEDIA_IN_HAND_02 "rc-total-media-in-hand-02" +#define RC_MAP_TREKSTOR "rc-trekstor" +#define RC_MAP_TT_1500 "rc-tt-1500" +#define RC_MAP_TWINHAN_DTV_CAB_CI "rc-twinhan-dtv-cab-ci" +#define RC_MAP_TWINHAN_VP1027_DVBS "rc-twinhan1027" +#define RC_MAP_VEGA_S9X "rc-vega-s9x" +#define RC_MAP_VIDEOMATE_K100 "rc-videomate-k100" +#define RC_MAP_VIDEOMATE_S350 "rc-videomate-s350" +#define RC_MAP_VIDEOMATE_TV_PVR "rc-videomate-tv-pvr" +#define RC_MAP_KII_PRO "rc-videostrong-kii-pro" +#define RC_MAP_WETEK_HUB "rc-wetek-hub" +#define RC_MAP_WETEK_PLAY2 "rc-wetek-play2" +#define RC_MAP_WINFAST "rc-winfast" +#define RC_MAP_WINFAST_USBII_DELUXE "rc-winfast-usbii-deluxe" +#define RC_MAP_X96MAX "rc-x96max" +#define RC_MAP_XBOX_360 "rc-xbox-360" +#define RC_MAP_XBOX_DVD "rc-xbox-dvd" +#define RC_MAP_ZX_IRDEC "rc-zx-irdec" + +/* + * Please, do not just append newer Remote Controller names at the end. + * The names should be ordered in alphabetical order + */ + +#endif /* _MEDIA_RC_MAP_H */ diff --git a/6.18.0/include-overrides/media/rcar-fcp.h b/6.18.0/include-overrides/media/rcar-fcp.h new file mode 100644 index 0000000..6ac9be9 --- /dev/null +++ b/6.18.0/include-overrides/media/rcar-fcp.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * rcar-fcp.h -- R-Car Frame Compression Processor Driver + * + * Copyright (C) 2016 Renesas Electronics Corporation + * + * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) + */ +#ifndef __MEDIA_RCAR_FCP_H__ +#define __MEDIA_RCAR_FCP_H__ + +struct device_node; +struct rcar_fcp_device; + +#if IS_ENABLED(CONFIG_VIDEO_RENESAS_FCP) +struct rcar_fcp_device *rcar_fcp_get(const struct device_node *np); +void rcar_fcp_put(struct rcar_fcp_device *fcp); +struct device *rcar_fcp_get_device(struct rcar_fcp_device *fcp); +int rcar_fcp_enable(struct rcar_fcp_device *fcp); +void rcar_fcp_disable(struct rcar_fcp_device *fcp); +int rcar_fcp_soft_reset(struct rcar_fcp_device *fcp); +#else +static inline struct rcar_fcp_device *rcar_fcp_get(const struct device_node *np) +{ + return ERR_PTR(-ENOENT); +} +static inline void rcar_fcp_put(struct rcar_fcp_device *fcp) { } +static inline struct device *rcar_fcp_get_device(struct rcar_fcp_device *fcp) +{ + return NULL; +} +static inline int rcar_fcp_enable(struct rcar_fcp_device *fcp) +{ + return 0; +} +static inline void rcar_fcp_disable(struct rcar_fcp_device *fcp) { } +static inline int rcar_fcp_soft_reset(struct rcar_fcp_device *fcp) +{ + return 0; +} +#endif + +#endif /* __MEDIA_RCAR_FCP_H__ */ diff --git a/6.18.0/include-overrides/media/tpg/v4l2-tpg.h b/6.18.0/include-overrides/media/tpg/v4l2-tpg.h new file mode 100644 index 0000000..a550889 --- /dev/null +++ b/6.18.0/include-overrides/media/tpg/v4l2-tpg.h @@ -0,0 +1,668 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * v4l2-tpg.h - Test Pattern Generator + * + * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + */ + +#ifndef _V4L2_TPG_H_ +#define _V4L2_TPG_H_ + +#include +#include +#include +#include +#include +#include + +struct tpg_rbg_color8 { + unsigned char r, g, b; +}; + +struct tpg_rbg_color16 { + __u16 r, g, b; +}; + +enum tpg_color { + TPG_COLOR_CSC_WHITE, + TPG_COLOR_CSC_YELLOW, + TPG_COLOR_CSC_CYAN, + TPG_COLOR_CSC_GREEN, + TPG_COLOR_CSC_MAGENTA, + TPG_COLOR_CSC_RED, + TPG_COLOR_CSC_BLUE, + TPG_COLOR_CSC_BLACK, + TPG_COLOR_75_YELLOW, + TPG_COLOR_75_CYAN, + TPG_COLOR_75_GREEN, + TPG_COLOR_75_MAGENTA, + TPG_COLOR_75_RED, + TPG_COLOR_75_BLUE, + TPG_COLOR_100_WHITE, + TPG_COLOR_100_YELLOW, + TPG_COLOR_100_CYAN, + TPG_COLOR_100_GREEN, + TPG_COLOR_100_MAGENTA, + TPG_COLOR_100_RED, + TPG_COLOR_100_BLUE, + TPG_COLOR_100_BLACK, + TPG_COLOR_TEXTFG, + TPG_COLOR_TEXTBG, + TPG_COLOR_RANDOM, + TPG_COLOR_RAMP, + TPG_COLOR_MAX = TPG_COLOR_RAMP + 256 +}; + +extern const struct tpg_rbg_color8 tpg_colors[TPG_COLOR_MAX]; +extern const unsigned short tpg_rec709_to_linear[255 * 16 + 1]; +extern const unsigned short tpg_linear_to_rec709[255 * 16 + 1]; +extern const struct tpg_rbg_color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1] + [V4L2_XFER_FUNC_SMPTE2084 + 1] + [TPG_COLOR_CSC_BLACK + 1]; +enum tpg_pattern { + TPG_PAT_75_COLORBAR, + TPG_PAT_100_COLORBAR, + TPG_PAT_CSC_COLORBAR, + TPG_PAT_100_HCOLORBAR, + TPG_PAT_100_COLORSQUARES, + TPG_PAT_BLACK, + TPG_PAT_WHITE, + TPG_PAT_RED, + TPG_PAT_GREEN, + TPG_PAT_BLUE, + TPG_PAT_CHECKERS_16X16, + TPG_PAT_CHECKERS_2X2, + TPG_PAT_CHECKERS_1X1, + TPG_PAT_COLOR_CHECKERS_2X2, + TPG_PAT_COLOR_CHECKERS_1X1, + TPG_PAT_ALTERNATING_HLINES, + TPG_PAT_ALTERNATING_VLINES, + TPG_PAT_CROSS_1_PIXEL, + TPG_PAT_CROSS_2_PIXELS, + TPG_PAT_CROSS_10_PIXELS, + TPG_PAT_GRAY_RAMP, + + /* Must be the last pattern */ + TPG_PAT_NOISE, +}; + +extern const char * const tpg_pattern_strings[]; + +enum tpg_quality { + TPG_QUAL_COLOR, + TPG_QUAL_GRAY, + TPG_QUAL_NOISE +}; + +enum tpg_video_aspect { + TPG_VIDEO_ASPECT_IMAGE, + TPG_VIDEO_ASPECT_4X3, + TPG_VIDEO_ASPECT_14X9_CENTRE, + TPG_VIDEO_ASPECT_16X9_CENTRE, + TPG_VIDEO_ASPECT_16X9_ANAMORPHIC, +}; + +enum tpg_pixel_aspect { + TPG_PIXEL_ASPECT_SQUARE, + TPG_PIXEL_ASPECT_NTSC, + TPG_PIXEL_ASPECT_PAL, +}; + +enum tpg_move_mode { + TPG_MOVE_NEG_FAST, + TPG_MOVE_NEG, + TPG_MOVE_NEG_SLOW, + TPG_MOVE_NONE, + TPG_MOVE_POS_SLOW, + TPG_MOVE_POS, + TPG_MOVE_POS_FAST, +}; + +enum tgp_color_enc { + TGP_COLOR_ENC_RGB, + TGP_COLOR_ENC_YCBCR, + TGP_COLOR_ENC_HSV, + TGP_COLOR_ENC_LUMA, +}; + +extern const char * const tpg_aspect_strings[]; + +#define TPG_MAX_PLANES 3 +#define TPG_MAX_PAT_LINES 8 + +struct tpg_data { + /* Source frame size */ + unsigned src_width, src_height; + /* Buffer height */ + unsigned buf_height; + /* Scaled output frame size */ + unsigned scaled_width; + u32 field; + bool field_alternate; + /* crop coordinates are frame-based */ + struct v4l2_rect crop; + /* compose coordinates are format-based */ + struct v4l2_rect compose; + /* border and square coordinates are frame-based */ + struct v4l2_rect border; + struct v4l2_rect square; + + /* Color-related fields */ + enum tpg_quality qual; + unsigned qual_offset; + u8 alpha_component; + bool alpha_red_only; + u8 brightness; + u8 contrast; + u8 saturation; + s16 hue; + u32 fourcc; + enum tgp_color_enc color_enc; + u32 colorspace; + u32 xfer_func; + u32 ycbcr_enc; + u32 hsv_enc; + /* + * Stores the actual transfer function, i.e. will never be + * V4L2_XFER_FUNC_DEFAULT. + */ + u32 real_xfer_func; + /* + * Stores the actual Y'CbCr encoding, i.e. will never be + * V4L2_YCBCR_ENC_DEFAULT. + */ + u32 real_hsv_enc; + u32 real_ycbcr_enc; + u32 quantization; + /* + * Stores the actual quantization, i.e. will never be + * V4L2_QUANTIZATION_DEFAULT. + */ + u32 real_quantization; + enum tpg_video_aspect vid_aspect; + enum tpg_pixel_aspect pix_aspect; + unsigned rgb_range; + unsigned real_rgb_range; + unsigned buffers; + unsigned planes; + bool interleaved; + u8 vdownsampling[TPG_MAX_PLANES]; + u8 hdownsampling[TPG_MAX_PLANES]; + /* + * horizontal positions must be ANDed with this value to enforce + * correct boundaries for packed YUYV values. + */ + unsigned hmask[TPG_MAX_PLANES]; + /* Used to store the colors in native format, either RGB or YUV */ + u8 colors[TPG_COLOR_MAX][3]; + u8 textfg[TPG_MAX_PLANES][8], textbg[TPG_MAX_PLANES][8]; + /* size in bytes for two pixels in each plane */ + unsigned twopixelsize[TPG_MAX_PLANES]; + unsigned bytesperline[TPG_MAX_PLANES]; + + /* Configuration */ + enum tpg_pattern pattern; + bool hflip; + bool vflip; + unsigned perc_fill; + bool perc_fill_blank; + bool show_border; + bool show_square; + bool insert_sav; + bool insert_eav; + bool insert_hdmi_video_guard_band; + + /* Test pattern movement */ + enum tpg_move_mode mv_hor_mode; + int mv_hor_count; + int mv_hor_step; + enum tpg_move_mode mv_vert_mode; + int mv_vert_count; + int mv_vert_step; + + bool recalc_colors; + bool recalc_lines; + bool recalc_square_border; + + /* Used to store TPG_MAX_PAT_LINES lines, each with up to two planes */ + unsigned max_line_width; + u8 *lines[TPG_MAX_PAT_LINES][TPG_MAX_PLANES]; + u8 *downsampled_lines[TPG_MAX_PAT_LINES][TPG_MAX_PLANES]; + u8 *random_line[TPG_MAX_PLANES]; + u8 *contrast_line[TPG_MAX_PLANES]; + u8 *black_line[TPG_MAX_PLANES]; +}; + +void tpg_init(struct tpg_data *tpg, unsigned w, unsigned h); +int tpg_alloc(struct tpg_data *tpg, unsigned max_w); +void tpg_free(struct tpg_data *tpg); +void tpg_reset_source(struct tpg_data *tpg, unsigned width, unsigned height, + u32 field); +void tpg_log_status(struct tpg_data *tpg); + +void tpg_set_font(const u8 *f); +void tpg_gen_text(const struct tpg_data *tpg, + u8 *basep[TPG_MAX_PLANES][2], int y, int x, const char *text); +void tpg_calc_text_basep(struct tpg_data *tpg, + u8 *basep[TPG_MAX_PLANES][2], unsigned p, u8 *vbuf); +unsigned tpg_g_interleaved_plane(const struct tpg_data *tpg, unsigned buf_line); +void tpg_fill_plane_buffer(struct tpg_data *tpg, v4l2_std_id std, + unsigned p, u8 *vbuf); +void tpg_fillbuffer(struct tpg_data *tpg, v4l2_std_id std, + unsigned p, u8 *vbuf); +bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc); +void tpg_s_crop_compose(struct tpg_data *tpg, const struct v4l2_rect *crop, + const struct v4l2_rect *compose); +const char *tpg_g_color_order(const struct tpg_data *tpg); + +static inline void tpg_s_pattern(struct tpg_data *tpg, enum tpg_pattern pattern) +{ + if (tpg->pattern == pattern) + return; + tpg->pattern = pattern; + tpg->recalc_colors = true; +} + +static inline void tpg_s_quality(struct tpg_data *tpg, + enum tpg_quality qual, unsigned qual_offset) +{ + if (tpg->qual == qual && tpg->qual_offset == qual_offset) + return; + tpg->qual = qual; + tpg->qual_offset = qual_offset; + tpg->recalc_colors = true; +} + +static inline enum tpg_quality tpg_g_quality(const struct tpg_data *tpg) +{ + return tpg->qual; +} + +static inline void tpg_s_alpha_component(struct tpg_data *tpg, + u8 alpha_component) +{ + if (tpg->alpha_component == alpha_component) + return; + tpg->alpha_component = alpha_component; + tpg->recalc_colors = true; +} + +static inline void tpg_s_alpha_mode(struct tpg_data *tpg, + bool red_only) +{ + if (tpg->alpha_red_only == red_only) + return; + tpg->alpha_red_only = red_only; + tpg->recalc_colors = true; +} + +static inline void tpg_s_brightness(struct tpg_data *tpg, + u8 brightness) +{ + if (tpg->brightness == brightness) + return; + tpg->brightness = brightness; + tpg->recalc_colors = true; +} + +static inline void tpg_s_contrast(struct tpg_data *tpg, + u8 contrast) +{ + if (tpg->contrast == contrast) + return; + tpg->contrast = contrast; + tpg->recalc_colors = true; +} + +static inline void tpg_s_saturation(struct tpg_data *tpg, + u8 saturation) +{ + if (tpg->saturation == saturation) + return; + tpg->saturation = saturation; + tpg->recalc_colors = true; +} + +static inline void tpg_s_hue(struct tpg_data *tpg, + s16 hue) +{ + hue = clamp_t(s16, hue, -128, 128); + if (tpg->hue == hue) + return; + tpg->hue = hue; + tpg->recalc_colors = true; +} + +static inline void tpg_s_rgb_range(struct tpg_data *tpg, + unsigned rgb_range) +{ + if (tpg->rgb_range == rgb_range) + return; + tpg->rgb_range = rgb_range; + tpg->recalc_colors = true; +} + +static inline void tpg_s_real_rgb_range(struct tpg_data *tpg, + unsigned rgb_range) +{ + if (tpg->real_rgb_range == rgb_range) + return; + tpg->real_rgb_range = rgb_range; + tpg->recalc_colors = true; +} + +static inline void tpg_s_colorspace(struct tpg_data *tpg, u32 colorspace) +{ + if (tpg->colorspace == colorspace) + return; + tpg->colorspace = colorspace; + tpg->recalc_colors = true; +} + +static inline u32 tpg_g_colorspace(const struct tpg_data *tpg) +{ + return tpg->colorspace; +} + +static inline void tpg_s_ycbcr_enc(struct tpg_data *tpg, u32 ycbcr_enc) +{ + if (tpg->ycbcr_enc == ycbcr_enc) + return; + tpg->ycbcr_enc = ycbcr_enc; + tpg->recalc_colors = true; +} + +static inline u32 tpg_g_ycbcr_enc(const struct tpg_data *tpg) +{ + return tpg->ycbcr_enc; +} + +static inline void tpg_s_hsv_enc(struct tpg_data *tpg, u32 hsv_enc) +{ + if (tpg->hsv_enc == hsv_enc) + return; + tpg->hsv_enc = hsv_enc; + tpg->recalc_colors = true; +} + +static inline u32 tpg_g_hsv_enc(const struct tpg_data *tpg) +{ + return tpg->hsv_enc; +} + +static inline void tpg_s_xfer_func(struct tpg_data *tpg, u32 xfer_func) +{ + if (tpg->xfer_func == xfer_func) + return; + tpg->xfer_func = xfer_func; + tpg->recalc_colors = true; +} + +static inline u32 tpg_g_xfer_func(const struct tpg_data *tpg) +{ + return tpg->xfer_func; +} + +static inline void tpg_s_quantization(struct tpg_data *tpg, u32 quantization) +{ + if (tpg->quantization == quantization) + return; + tpg->quantization = quantization; + tpg->recalc_colors = true; +} + +static inline u32 tpg_g_quantization(const struct tpg_data *tpg) +{ + return tpg->quantization; +} + +static inline unsigned tpg_g_buffers(const struct tpg_data *tpg) +{ + return tpg->buffers; +} + +static inline unsigned tpg_g_planes(const struct tpg_data *tpg) +{ + return tpg->interleaved ? 1 : tpg->planes; +} + +static inline bool tpg_g_interleaved(const struct tpg_data *tpg) +{ + return tpg->interleaved; +} + +static inline unsigned tpg_g_twopixelsize(const struct tpg_data *tpg, unsigned plane) +{ + return tpg->twopixelsize[plane]; +} + +static inline unsigned tpg_hdiv(const struct tpg_data *tpg, + unsigned plane, unsigned x) +{ + return ((x / tpg->hdownsampling[plane]) & tpg->hmask[plane]) * + tpg->twopixelsize[plane] / 2; +} + +static inline unsigned tpg_hscale(const struct tpg_data *tpg, unsigned x) +{ + return (x * tpg->scaled_width) / tpg->src_width; +} + +static inline unsigned tpg_hscale_div(const struct tpg_data *tpg, + unsigned plane, unsigned x) +{ + return tpg_hdiv(tpg, plane, tpg_hscale(tpg, x)); +} + +static inline unsigned tpg_g_bytesperline(const struct tpg_data *tpg, unsigned plane) +{ + return tpg->bytesperline[plane]; +} + +static inline void tpg_s_bytesperline(struct tpg_data *tpg, unsigned plane, unsigned bpl) +{ + unsigned p; + + if (tpg->buffers > 1) { + tpg->bytesperline[plane] = bpl; + return; + } + + for (p = 0; p < tpg_g_planes(tpg); p++) { + unsigned plane_w = bpl * tpg->twopixelsize[p] / tpg->twopixelsize[0]; + + tpg->bytesperline[p] = plane_w / tpg->hdownsampling[p]; + } + if (tpg_g_interleaved(tpg)) + tpg->bytesperline[1] = tpg->bytesperline[0]; +} + + +static inline unsigned tpg_g_line_width(const struct tpg_data *tpg, unsigned plane) +{ + unsigned w = 0; + unsigned p; + + if (tpg->buffers > 1) + return tpg_g_bytesperline(tpg, plane); + for (p = 0; p < tpg_g_planes(tpg); p++) { + unsigned plane_w = tpg_g_bytesperline(tpg, p); + + w += plane_w / tpg->vdownsampling[p]; + } + return w; +} + +static inline unsigned tpg_calc_line_width(const struct tpg_data *tpg, + unsigned plane, unsigned bpl) +{ + unsigned w = 0; + unsigned p; + + if (tpg->buffers > 1) + return bpl; + for (p = 0; p < tpg_g_planes(tpg); p++) { + unsigned plane_w = bpl * tpg->twopixelsize[p] / tpg->twopixelsize[0]; + + plane_w /= tpg->hdownsampling[p]; + w += plane_w / tpg->vdownsampling[p]; + } + return w; +} + +static inline unsigned tpg_calc_plane_size(const struct tpg_data *tpg, unsigned plane) +{ + if (plane >= tpg_g_planes(tpg)) + return 0; + + return tpg_g_bytesperline(tpg, plane) * tpg->buf_height / + tpg->vdownsampling[plane]; +} + +static inline void tpg_s_buf_height(struct tpg_data *tpg, unsigned h) +{ + tpg->buf_height = h; +} + +static inline void tpg_s_field(struct tpg_data *tpg, unsigned field, bool alternate) +{ + tpg->field = field; + tpg->field_alternate = alternate; +} + +static inline void tpg_s_perc_fill(struct tpg_data *tpg, + unsigned perc_fill) +{ + tpg->perc_fill = perc_fill; +} + +static inline unsigned tpg_g_perc_fill(const struct tpg_data *tpg) +{ + return tpg->perc_fill; +} + +static inline void tpg_s_perc_fill_blank(struct tpg_data *tpg, + bool perc_fill_blank) +{ + tpg->perc_fill_blank = perc_fill_blank; +} + +static inline void tpg_s_video_aspect(struct tpg_data *tpg, + enum tpg_video_aspect vid_aspect) +{ + if (tpg->vid_aspect == vid_aspect) + return; + tpg->vid_aspect = vid_aspect; + tpg->recalc_square_border = true; +} + +static inline enum tpg_video_aspect tpg_g_video_aspect(const struct tpg_data *tpg) +{ + return tpg->vid_aspect; +} + +static inline void tpg_s_pixel_aspect(struct tpg_data *tpg, + enum tpg_pixel_aspect pix_aspect) +{ + if (tpg->pix_aspect == pix_aspect) + return; + tpg->pix_aspect = pix_aspect; + tpg->recalc_square_border = true; +} + +static inline void tpg_s_show_border(struct tpg_data *tpg, + bool show_border) +{ + tpg->show_border = show_border; +} + +static inline void tpg_s_show_square(struct tpg_data *tpg, + bool show_square) +{ + tpg->show_square = show_square; +} + +static inline void tpg_s_insert_sav(struct tpg_data *tpg, bool insert_sav) +{ + tpg->insert_sav = insert_sav; +} + +static inline void tpg_s_insert_eav(struct tpg_data *tpg, bool insert_eav) +{ + tpg->insert_eav = insert_eav; +} + +/* + * This inserts 4 pixels of the RGB color 0xab55ab at the left hand side of the + * image. This is only done for 3 or 4 byte RGB pixel formats. This pixel value + * equals the Video Guard Band value as defined by HDMI (see section 5.2.2.1 + * in the HDMI 1.3 Specification) that preceeds the first actual pixel. If the + * HDMI receiver doesn't handle this correctly, then it might keep skipping + * these Video Guard Band patterns and end up with a shorter video line. So this + * is a nice pattern to test with. + */ +static inline void tpg_s_insert_hdmi_video_guard_band(struct tpg_data *tpg, + bool insert_hdmi_video_guard_band) +{ + tpg->insert_hdmi_video_guard_band = insert_hdmi_video_guard_band; +} + +void tpg_update_mv_step(struct tpg_data *tpg); + +static inline void tpg_s_mv_hor_mode(struct tpg_data *tpg, + enum tpg_move_mode mv_hor_mode) +{ + tpg->mv_hor_mode = mv_hor_mode; + tpg_update_mv_step(tpg); +} + +static inline void tpg_s_mv_vert_mode(struct tpg_data *tpg, + enum tpg_move_mode mv_vert_mode) +{ + tpg->mv_vert_mode = mv_vert_mode; + tpg_update_mv_step(tpg); +} + +static inline void tpg_init_mv_count(struct tpg_data *tpg) +{ + tpg->mv_hor_count = tpg->mv_vert_count = 0; +} + +static inline void tpg_update_mv_count(struct tpg_data *tpg, bool frame_is_field) +{ + tpg->mv_hor_count += tpg->mv_hor_step * (frame_is_field ? 1 : 2); + tpg->mv_vert_count += tpg->mv_vert_step * (frame_is_field ? 1 : 2); +} + +static inline void tpg_s_hflip(struct tpg_data *tpg, bool hflip) +{ + if (tpg->hflip == hflip) + return; + tpg->hflip = hflip; + tpg_update_mv_step(tpg); + tpg->recalc_lines = true; +} + +static inline bool tpg_g_hflip(const struct tpg_data *tpg) +{ + return tpg->hflip; +} + +static inline void tpg_s_vflip(struct tpg_data *tpg, bool vflip) +{ + tpg->vflip = vflip; +} + +static inline bool tpg_g_vflip(const struct tpg_data *tpg) +{ + return tpg->vflip; +} + +static inline bool tpg_pattern_is_static(const struct tpg_data *tpg) +{ + return tpg->pattern != TPG_PAT_NOISE && + tpg->mv_hor_mode == TPG_MOVE_NONE && + tpg->mv_vert_mode == TPG_MOVE_NONE; +} + +#endif diff --git a/6.18.0/include-overrides/media/tuner-types.h b/6.18.0/include-overrides/media/tuner-types.h new file mode 100644 index 0000000..c79b773 --- /dev/null +++ b/6.18.0/include-overrides/media/tuner-types.h @@ -0,0 +1,205 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * descriptions for simple tuners. + */ + +#ifndef __TUNER_TYPES_H__ +#define __TUNER_TYPES_H__ + +/** + * enum param_type - type of the tuner pameters + * + * @TUNER_PARAM_TYPE_RADIO: Tuner params are for FM and/or AM radio + * @TUNER_PARAM_TYPE_PAL: Tuner params are for PAL color TV standard + * @TUNER_PARAM_TYPE_SECAM: Tuner params are for SECAM color TV standard + * @TUNER_PARAM_TYPE_NTSC: Tuner params are for NTSC color TV standard + * @TUNER_PARAM_TYPE_DIGITAL: Tuner params are for digital TV + */ +enum param_type { + TUNER_PARAM_TYPE_RADIO, + TUNER_PARAM_TYPE_PAL, + TUNER_PARAM_TYPE_SECAM, + TUNER_PARAM_TYPE_NTSC, + TUNER_PARAM_TYPE_DIGITAL, +}; + +/** + * struct tuner_range - define the frequencies supported by the tuner + * + * @limit: Max frequency supported by that range, in 62.5 kHz + * (TV) or 62.5 Hz (Radio), as defined by + * V4L2_TUNER_CAP_LOW. + * @config: Value of the band switch byte (BB) to setup this mode. + * @cb: Value of the CB byte to setup this mode. + * + * Please notice that digital tuners like xc3028/xc4000/xc5000 don't use + * those ranges, as they're defined inside the driver. This is used by + * analog tuners that are compatible with the "Philips way" to setup the + * tuners. On those devices, the tuner set is done via 4 bytes: + * + * #) divider byte1 (DB1) + * #) divider byte 2 (DB2) + * #) Control byte (CB) + * #) band switch byte (BB) + * + * Some tuners also have an additional optional Auxiliary byte (AB). + */ +struct tuner_range { + unsigned short limit; + unsigned char config; + unsigned char cb; +}; + +/** + * struct tuner_params - Parameters to be used to setup the tuner. Those + * are used by drivers/media/tuners/tuner-types.c in + * order to specify the tuner properties. Most of + * the parameters are for tuners based on tda9887 IF-PLL + * multi-standard analog TV/Radio demodulator, with is + * very common on legacy analog tuners. + * + * @type: Type of the tuner parameters, as defined at + * enum param_type. If the tuner supports multiple + * standards, an array should be used, with one + * row per different standard. + * @cb_first_if_lower_freq: Many Philips-based tuners have a comment in + * their datasheet like + * "For channel selection involving band + * switching, and to ensure smooth tuning to the + * desired channel without causing unnecessary + * charge pump action, it is recommended to + * consider the difference between wanted channel + * frequency and the current channel frequency. + * Unnecessary charge pump action will result + * in very low tuning voltage which may drive the + * oscillator to extreme conditions". + * Set cb_first_if_lower_freq to 1, if this check + * is required for this tuner. I tested this for + * PAL by first setting the TV frequency to + * 203 MHz and then switching to 96.6 MHz FM + * radio. The result was static unless the + * control byte was sent first. + * @has_tda9887: Set to 1 if this tuner uses a tda9887 + * @port1_fm_high_sensitivity: Many Philips tuners use tda9887 PORT1 to select + * the FM radio sensitivity. If this setting is 1, + * then set PORT1 to 1 to get proper FM reception. + * @port2_fm_high_sensitivity: Some Philips tuners use tda9887 PORT2 to select + * the FM radio sensitivity. If this setting is 1, + * then set PORT2 to 1 to get proper FM reception. + * @fm_gain_normal: Some Philips tuners use tda9887 cGainNormal to + * select the FM radio sensitivity. If this + * setting is 1, e register will use cGainNormal + * instead of cGainLow. + * @intercarrier_mode: Most tuners with a tda9887 use QSS mode. + * Some (cheaper) tuners use Intercarrier mode. + * If this setting is 1, then the tuner needs to + * be set to intercarrier mode. + * @port1_active: This setting sets the default value for PORT1. + * 0 means inactive, 1 means active. Note: the + * actual bit value written to the tda9887 is + * inverted. So a 0 here means a 1 in the B6 bit. + * @port2_active: This setting sets the default value for PORT2. + * 0 means inactive, 1 means active. Note: the + * actual bit value written to the tda9887 is + * inverted. So a 0 here means a 1 in the B7 bit. + * @port1_invert_for_secam_lc: Sometimes PORT1 is inverted when the SECAM-L' + * standard is selected. Set this bit to 1 if this + * is needed. + * @port2_invert_for_secam_lc: Sometimes PORT2 is inverted when the SECAM-L' + * standard is selected. Set this bit to 1 if this + * is needed. + * @port1_set_for_fm_mono: Some cards require PORT1 to be 1 for mono Radio + * FM and 0 for stereo. + * @default_pll_gating_18: Select 18% (or according to datasheet 0%) + * L standard PLL gating, vs the driver default + * of 36%. + * @radio_if: IF to use in radio mode. Tuners with a + * separate radio IF filter seem to use 10.7, + * while those without use 33.3 for PAL/SECAM + * tuners and 41.3 for NTSC tuners. + * 0 = 10.7, 1 = 33.3, 2 = 41.3 + * @default_top_low: Default tda9887 TOP value in dB for the low + * band. Default is 0. Range: -16:+15 + * @default_top_mid: Default tda9887 TOP value in dB for the mid + * band. Default is 0. Range: -16:+15 + * @default_top_high: Default tda9887 TOP value in dB for the high + * band. Default is 0. Range: -16:+15 + * @default_top_secam_low: Default tda9887 TOP value in dB for SECAM-L/L' + * for the low band. Default is 0. Several tuners + * require a different TOP value for the + * SECAM-L/L' standards. Range: -16:+15 + * @default_top_secam_mid: Default tda9887 TOP value in dB for SECAM-L/L' + * for the mid band. Default is 0. Several tuners + * require a different TOP value for the + * SECAM-L/L' standards. Range: -16:+15 + * @default_top_secam_high: Default tda9887 TOP value in dB for SECAM-L/L' + * for the high band. Default is 0. Several tuners + * require a different TOP value for the + * SECAM-L/L' standards. Range: -16:+15 + * @iffreq: Intermediate frequency (IF) used by the tuner + * on digital mode. + * @count: Size of the ranges array. + * @ranges: Array with the frequency ranges supported by + * the tuner. + */ +struct tuner_params { + enum param_type type; + + unsigned int cb_first_if_lower_freq:1; + unsigned int has_tda9887:1; + unsigned int port1_fm_high_sensitivity:1; + unsigned int port2_fm_high_sensitivity:1; + unsigned int fm_gain_normal:1; + unsigned int intercarrier_mode:1; + unsigned int port1_active:1; + unsigned int port2_active:1; + unsigned int port1_invert_for_secam_lc:1; + unsigned int port2_invert_for_secam_lc:1; + unsigned int port1_set_for_fm_mono:1; + unsigned int default_pll_gating_18:1; + unsigned int radio_if:2; + signed int default_top_low:5; + signed int default_top_mid:5; + signed int default_top_high:5; + signed int default_top_secam_low:5; + signed int default_top_secam_mid:5; + signed int default_top_secam_high:5; + + u16 iffreq; + + unsigned int count; + const struct tuner_range *ranges; +}; + +/** + * struct tunertype - describes the known tuners. + * + * @name: string with the tuner's name. + * @count: size of &struct tuner_params array. + * @params: pointer to &struct tuner_params array. + * + * @min: minimal tuner frequency, in 62.5 kHz step. + * should be multiplied to 16 to convert to MHz. + * @max: minimal tuner frequency, in 62.5 kHz step. + * Should be multiplied to 16 to convert to MHz. + * @stepsize: frequency step, in Hz. + * @initdata: optional byte sequence to initialize the tuner. + * @sleepdata: optional byte sequence to power down the tuner. + */ +struct tunertype { + char *name; + unsigned int count; + const struct tuner_params *params; + + u16 min; + u16 max; + u32 stepsize; + + u8 *initdata; + u8 *sleepdata; +}; + +extern const struct tunertype tuners[]; +extern unsigned const int tuner_count; + +#endif diff --git a/6.18.0/include-overrides/media/tuner.h b/6.18.0/include-overrides/media/tuner.h new file mode 100644 index 0000000..c5fd6fa --- /dev/null +++ b/6.18.0/include-overrides/media/tuner.h @@ -0,0 +1,229 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * tuner.h - definition for different tuners + * + * Copyright (C) 1997 Markus Schroeder (schroedm@uni-duesseldorf.de) + * minor modifications by Ralph Metzler (rjkm@thp.uni-koeln.de) + */ + +#ifndef _TUNER_H +#define _TUNER_H +#ifdef __KERNEL__ + +#include +#include + +#define ADDR_UNSET (255) + +#define TUNER_TEMIC_PAL 0 /* 4002 FH5 (3X 7756, 9483) */ +#define TUNER_PHILIPS_PAL_I 1 +#define TUNER_PHILIPS_NTSC 2 +#define TUNER_PHILIPS_SECAM 3 /* you must actively select B/G, L, L` */ + +#define TUNER_ABSENT 4 +#define TUNER_PHILIPS_PAL 5 +#define TUNER_TEMIC_NTSC 6 /* 4032 FY5 (3X 7004, 9498, 9789) */ +#define TUNER_TEMIC_PAL_I 7 /* 4062 FY5 (3X 8501, 9957) */ + +#define TUNER_TEMIC_4036FY5_NTSC 8 /* 4036 FY5 (3X 1223, 1981, 7686) */ +#define TUNER_ALPS_TSBH1_NTSC 9 +#define TUNER_ALPS_TSBE1_PAL 10 +#define TUNER_ALPS_TSBB5_PAL_I 11 + +#define TUNER_ALPS_TSBE5_PAL 12 +#define TUNER_ALPS_TSBC5_PAL 13 +#define TUNER_TEMIC_4006FH5_PAL 14 /* 4006 FH5 (3X 9500, 9501, 7291) */ +#define TUNER_ALPS_TSHC6_NTSC 15 + +#define TUNER_TEMIC_PAL_DK 16 /* 4016 FY5 (3X 1392, 1393) */ +#define TUNER_PHILIPS_NTSC_M 17 +#define TUNER_TEMIC_4066FY5_PAL_I 18 /* 4066 FY5 (3X 7032, 7035) */ +#define TUNER_TEMIC_4006FN5_MULTI_PAL 19 /* B/G, I and D/K autodetected (3X 7595, 7606, 7657) */ + +#define TUNER_TEMIC_4009FR5_PAL 20 /* incl. FM radio (3X 7607, 7488, 7711) */ +#define TUNER_TEMIC_4039FR5_NTSC 21 /* incl. FM radio (3X 7246, 7578, 7732) */ +#define TUNER_TEMIC_4046FM5 22 /* you must actively select B/G, D/K, I, L, L` ! (3X 7804, 7806, 8103, 8104) */ +#define TUNER_PHILIPS_PAL_DK 23 + +#define TUNER_PHILIPS_FQ1216ME 24 /* you must actively select B/G/D/K, I, L, L` */ +#define TUNER_LG_PAL_I_FM 25 +#define TUNER_LG_PAL_I 26 +#define TUNER_LG_NTSC_FM 27 + +#define TUNER_LG_PAL_FM 28 +#define TUNER_LG_PAL 29 +#define TUNER_TEMIC_4009FN5_MULTI_PAL_FM 30 /* B/G, I and D/K autodetected (3X 8155, 8160, 8163) */ +#define TUNER_SHARP_2U5JF5540_NTSC 31 + +#define TUNER_Samsung_PAL_TCPM9091PD27 32 +#define TUNER_MT2032 33 +#define TUNER_TEMIC_4106FH5 34 /* 4106 FH5 (3X 7808, 7865) */ +#define TUNER_TEMIC_4012FY5 35 /* 4012 FY5 (3X 0971, 1099) */ + +#define TUNER_TEMIC_4136FY5 36 /* 4136 FY5 (3X 7708, 7746) */ +#define TUNER_LG_PAL_NEW_TAPC 37 +#define TUNER_PHILIPS_FM1216ME_MK3 38 +#define TUNER_LG_NTSC_NEW_TAPC 39 + +#define TUNER_HITACHI_NTSC 40 +#define TUNER_PHILIPS_PAL_MK 41 +#define TUNER_PHILIPS_FCV1236D 42 +#define TUNER_PHILIPS_FM1236_MK3 43 + +#define TUNER_PHILIPS_4IN1 44 /* ATI TV Wonder Pro - Conexant */ + /* + * Microtune merged with Temic 12/31/1999 partially financed by Alps. + * these may be similar to Temic + */ +#define TUNER_MICROTUNE_4049FM5 45 +#define TUNER_PANASONIC_VP27 46 +#define TUNER_LG_NTSC_TAPE 47 + +#define TUNER_TNF_8831BGFF 48 +#define TUNER_MICROTUNE_4042FI5 49 /* DViCO FusionHDTV 3 Gold-Q - 4042 FI5 (3X 8147) */ +#define TUNER_TCL_2002N 50 +#define TUNER_PHILIPS_FM1256_IH3 51 + +#define TUNER_THOMSON_DTT7610 52 +#define TUNER_PHILIPS_FQ1286 53 +#define TUNER_PHILIPS_TDA8290 54 +#define TUNER_TCL_2002MB 55 /* Hauppauge PVR-150 PAL */ + +#define TUNER_PHILIPS_FQ1216AME_MK4 56 /* Hauppauge PVR-150 PAL */ +#define TUNER_PHILIPS_FQ1236A_MK4 57 /* Hauppauge PVR-500MCE NTSC */ +#define TUNER_YMEC_TVF_8531MF 58 +#define TUNER_YMEC_TVF_5533MF 59 /* Pixelview Pro Ultra NTSC */ + +#define TUNER_THOMSON_DTT761X 60 /* DTT 7611 7611A 7612 7613 7613A 7614 7615 7615A */ +#define TUNER_TENA_9533_DI 61 +#define TUNER_TEA5767 62 /* Only FM Radio Tuner */ +#define TUNER_PHILIPS_FMD1216ME_MK3 63 + +#define TUNER_LG_TDVS_H06XF 64 /* TDVS H061F, H062F, H064F */ +#define TUNER_YMEC_TVF66T5_B_DFF 65 /* Acorp Y878F */ +#define TUNER_LG_TALN 66 +#define TUNER_PHILIPS_TD1316 67 + +#define TUNER_PHILIPS_TUV1236D 68 /* ATI HDTV Wonder */ +#define TUNER_TNF_5335MF 69 /* Sabrent Bt848 */ +#define TUNER_SAMSUNG_TCPN_2121P30A 70 /* Hauppauge PVR-500MCE NTSC */ +#define TUNER_XC2028 71 + +#define TUNER_THOMSON_FE6600 72 /* DViCO FusionHDTV DVB-T Hybrid */ +#define TUNER_SAMSUNG_TCPG_6121P30A 73 /* Hauppauge PVR-500 PAL */ +#define TUNER_TDA9887 74 /* This tuner should be used only internally */ +#define TUNER_TEA5761 75 /* Only FM Radio Tuner */ +#define TUNER_XC5000 76 /* Xceive Silicon Tuner */ +#define TUNER_TCL_MF02GIP_5N 77 /* TCL MF02GIP_5N */ +#define TUNER_PHILIPS_FMD1216MEX_MK3 78 +#define TUNER_PHILIPS_FM1216MK5 79 +#define TUNER_PHILIPS_FQ1216LME_MK3 80 /* Active loopthrough, no FM */ + +#define TUNER_PARTSNIC_PTI_5NF05 81 +#define TUNER_PHILIPS_CU1216L 82 +#define TUNER_NXP_TDA18271 83 +#define TUNER_SONY_BTF_PXN01Z 84 +#define TUNER_PHILIPS_FQ1236_MK5 85 /* NTSC, TDA9885, no FM radio */ +#define TUNER_TENA_TNF_5337 86 + +#define TUNER_XC4000 87 /* Xceive Silicon Tuner */ +#define TUNER_XC5000C 88 /* Xceive Silicon Tuner */ + +#define TUNER_SONY_BTF_PG472Z 89 /* PAL+SECAM */ +#define TUNER_SONY_BTF_PK467Z 90 /* NTSC_JP */ +#define TUNER_SONY_BTF_PB463Z 91 /* NTSC */ +#define TUNER_SI2157 92 +#define TUNER_TENA_TNF_931D_DFDR1 93 + +/* tv card specific */ +#define TDA9887_PRESENT (1<<0) +#define TDA9887_PORT1_INACTIVE (1<<1) +#define TDA9887_PORT2_INACTIVE (1<<2) +#define TDA9887_QSS (1<<3) +#define TDA9887_INTERCARRIER (1<<4) +#define TDA9887_PORT1_ACTIVE (1<<5) +#define TDA9887_PORT2_ACTIVE (1<<6) +#define TDA9887_INTERCARRIER_NTSC (1<<7) +/* Tuner takeover point adjustment, in dB, -16 <= top <= 15 */ +#define TDA9887_TOP_MASK (0x3f << 8) +#define TDA9887_TOP_SET (1 << 13) +#define TDA9887_TOP(top) (TDA9887_TOP_SET | \ + (((16 + (top)) & 0x1f) << 8)) + +/* config options */ +#define TDA9887_DEEMPHASIS_MASK (3<<16) +#define TDA9887_DEEMPHASIS_NONE (1<<16) +#define TDA9887_DEEMPHASIS_50 (2<<16) +#define TDA9887_DEEMPHASIS_75 (3<<16) +#define TDA9887_AUTOMUTE (1<<18) +#define TDA9887_GATING_18 (1<<19) +#define TDA9887_GAIN_NORMAL (1<<20) +#define TDA9887_RIF_41_3 (1<<21) /* radio IF1 41.3 vs 33.3 */ + +/** + * enum tuner_mode - Mode of the tuner + * + * @T_RADIO: Tuner core will work in radio mode + * @T_ANALOG_TV: Tuner core will work in analog TV mode + * + * Older boards only had a single tuner device, but some devices have a + * separate tuner for radio. In any case, the tuner-core needs to know if + * the tuner chip(s) will be used in radio mode or analog TV mode, as, on + * radio mode, frequencies are specified on a different range than on TV + * mode. This enum is used by the tuner core in order to work with the + * proper tuner range and eventually use a different tuner chip while in + * radio mode. + */ +enum tuner_mode { + T_RADIO = 1 << V4L2_TUNER_RADIO, + T_ANALOG_TV = 1 << V4L2_TUNER_ANALOG_TV, + /* Don't map V4L2_TUNER_DIGITAL_TV, as tuner-core won't use it */ +}; + +/** + * struct tuner_setup - setup the tuner chipsets + * + * @addr: I2C address used to control the tuner device/chipset + * @type: Type of the tuner, as defined at the TUNER_* macros. + * Each different tuner model should have an unique + * identifier. + * @mode_mask: Mask with the allowed tuner modes: V4L2_TUNER_RADIO, + * V4L2_TUNER_ANALOG_TV and/or V4L2_TUNER_DIGITAL_TV, + * describing if the tuner should be used to support + * Radio, analog TV and/or digital TV. + * @config: Used to send tuner-specific configuration for complex + * tuners that require extra parameters to be set. + * Only a very few tuners require it and its usage on + * newer tuners should be avoided. + * @tuner_callback: Some tuners require to call back the bridge driver, + * in order to do some tasks like rising a GPIO at the + * bridge chipset, in order to do things like resetting + * the device. + * + * Older boards only had a single tuner device. Nowadays multiple tuner + * devices may be present on a single board. Using TUNER_SET_TYPE_ADDR + * to pass the tuner_setup structure it is possible to setup each tuner + * device in turn. + * + * Since multiple devices may be present it is no longer sufficient to + * send a command to a single i2c device. Instead you should broadcast + * the command to all i2c devices. + * + * By setting the mode_mask correctly you can select which commands are + * accepted by a specific tuner device. For example, set mode_mask to + * T_RADIO if the device is a radio-only tuner. That specific tuner will + * only accept commands when the tuner is in radio mode and ignore them + * when the tuner is set to TV mode. + */ + +struct tuner_setup { + unsigned short addr; + unsigned int type; + unsigned int mode_mask; + void *config; + int (*tuner_callback)(void *dev, int component, int cmd, int arg); +}; + +#endif /* __KERNEL__ */ + +#endif /* _TUNER_H */ diff --git a/6.18.0/include-overrides/media/tveeprom.h b/6.18.0/include-overrides/media/tveeprom.h new file mode 100644 index 0000000..f37c9b1 --- /dev/null +++ b/6.18.0/include-overrides/media/tveeprom.h @@ -0,0 +1,116 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * tveeprom - Contains structures and functions to work with Hauppauge + * eeproms. + */ + +#include + +/** + * enum tveeprom_audio_processor - Specifies the type of audio processor + * used on a Hauppauge device. + * + * @TVEEPROM_AUDPROC_NONE: No audio processor present + * @TVEEPROM_AUDPROC_INTERNAL: The audio processor is internal to the + * video processor + * @TVEEPROM_AUDPROC_MSP: The audio processor is a MSPXXXX device + * @TVEEPROM_AUDPROC_OTHER: The audio processor is another device + */ +enum tveeprom_audio_processor { + TVEEPROM_AUDPROC_NONE, + TVEEPROM_AUDPROC_INTERNAL, + TVEEPROM_AUDPROC_MSP, + TVEEPROM_AUDPROC_OTHER, +}; + +/** + * struct tveeprom - Contains the fields parsed from Hauppauge eeproms + * + * @has_radio: 1 if the device has radio; 0 otherwise. + * + * @has_ir: If has_ir == 0, then it is unknown what the IR + * capabilities are. Otherwise: + * bit 0) 1 (= IR capabilities are known); + * bit 1) IR receiver present; + * bit 2) IR transmitter (blaster) present. + * + * @has_MAC_address: 0: no MAC, 1: MAC present, 2: unknown. + * @tuner_type: type of the tuner (TUNER_*, as defined at + * include/media/tuner.h). + * + * @tuner_formats: Supported analog TV standards (V4L2_STD_*). + * @tuner_hauppauge_model: Hauppauge's code for the device model number. + * @tuner2_type: type of the second tuner (TUNER_*, as defined + * at include/media/tuner.h). + * + * @tuner2_formats: Tuner 2 supported analog TV standards + * (V4L2_STD_*). + * + * @tuner2_hauppauge_model: tuner 2 Hauppauge's code for the device model + * number. + * + * @audio_processor: analog audio decoder, as defined by enum + * tveeprom_audio_processor. + * + * @decoder_processor: Hauppauge's code for the decoder chipset. + * Unused by the drivers, as they probe the + * decoder based on the PCI or USB ID. + * + * @model: Hauppauge's model number + * + * @revision: Card revision number + * + * @serial_number: Card's serial number + * + * @rev_str: Card revision converted to number + * + * @MAC_address: MAC address for the network interface + */ +struct tveeprom { + u32 has_radio; + u32 has_ir; + u32 has_MAC_address; + + u32 tuner_type; + u32 tuner_formats; + u32 tuner_hauppauge_model; + + u32 tuner2_type; + u32 tuner2_formats; + u32 tuner2_hauppauge_model; + + u32 audio_processor; + u32 decoder_processor; + + u32 model; + u32 revision; + u32 serial_number; + char rev_str[5]; + u8 MAC_address[ETH_ALEN]; +}; + +/** + * tveeprom_hauppauge_analog - Fill struct tveeprom using the contents + * of the eeprom previously filled at + * @eeprom_data field. + * + * @tvee: Struct to where the eeprom parsed data will be filled; + * @eeprom_data: Array with the contents of the eeprom_data. It should + * contain 256 bytes filled with the contents of the + * eeprom read from the Hauppauge device. + */ +void tveeprom_hauppauge_analog(struct tveeprom *tvee, + unsigned char *eeprom_data); + +/** + * tveeprom_read - Reads the contents of the eeprom found at the Hauppauge + * devices. + * + * @c: I2C client struct + * @eedata: Array where the eeprom content will be stored. + * @len: Size of @eedata array. If the eeprom content will be latter + * be parsed by tveeprom_hauppauge_analog(), len should be, at + * least, 256. + */ +int tveeprom_read(struct i2c_client *c, unsigned char *eedata, int len); diff --git a/6.18.0/include-overrides/media/v4l2-async.h b/6.18.0/include-overrides/media/v4l2-async.h new file mode 100644 index 0000000..f26c323 --- /dev/null +++ b/6.18.0/include-overrides/media/v4l2-async.h @@ -0,0 +1,346 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * V4L2 asynchronous subdevice registration API + * + * Copyright (C) 2012-2013, Guennadi Liakhovetski + */ + +#ifndef V4L2_ASYNC_H +#define V4L2_ASYNC_H + +#include +#include + +struct dentry; +struct device; +struct device_node; +struct v4l2_device; +struct v4l2_subdev; +struct v4l2_async_notifier; + +/** + * enum v4l2_async_match_type - type of asynchronous subdevice logic to be used + * in order to identify a match + * + * @V4L2_ASYNC_MATCH_TYPE_I2C: Match will check for I2C adapter ID and address + * @V4L2_ASYNC_MATCH_TYPE_FWNODE: Match will use firmware node + * + * This enum is used by the asynchronous connection logic to define the + * algorithm that will be used to match an asynchronous device. + */ +enum v4l2_async_match_type { + V4L2_ASYNC_MATCH_TYPE_I2C, + V4L2_ASYNC_MATCH_TYPE_FWNODE, +}; + +/** + * struct v4l2_async_match_desc - async connection match information + * + * @type: type of match that will be used + * @fwnode: pointer to &struct fwnode_handle to be matched. + * Used if @match_type is %V4L2_ASYNC_MATCH_TYPE_FWNODE. + * @i2c: embedded struct with I2C parameters to be matched. + * Both @match.i2c.adapter_id and @match.i2c.address + * should be matched. + * Used if @match_type is %V4L2_ASYNC_MATCH_TYPE_I2C. + * @i2c.adapter_id: + * I2C adapter ID to be matched. + * Used if @match_type is %V4L2_ASYNC_MATCH_TYPE_I2C. + * @i2c.address: + * I2C address to be matched. + * Used if @match_type is %V4L2_ASYNC_MATCH_TYPE_I2C. + */ +struct v4l2_async_match_desc { + enum v4l2_async_match_type type; + union { + struct fwnode_handle *fwnode; + struct { + int adapter_id; + unsigned short address; + } i2c; + }; +}; + +/** + * struct v4l2_async_connection - sub-device connection descriptor, as known to + * a bridge + * + * @match: struct of match type and per-bus type matching data sets + * @notifier: the async notifier the connection is related to + * @asc_entry: used to add struct v4l2_async_connection objects to the + * notifier @waiting_list or @done_list + * @asc_subdev_entry: entry in struct v4l2_async_subdev.asc_list list + * @sd: the related sub-device + * + * When this struct is used as a member in a driver specific struct, the driver + * specific struct shall contain the &struct v4l2_async_connection as its first + * member. + */ +struct v4l2_async_connection { + struct v4l2_async_match_desc match; + struct v4l2_async_notifier *notifier; + struct list_head asc_entry; + struct list_head asc_subdev_entry; + struct v4l2_subdev *sd; +}; + +/** + * struct v4l2_async_notifier_operations - Asynchronous V4L2 notifier operations + * @bound: a sub-device has been bound by the given connection + * @complete: All connections have been bound successfully. The complete + * callback is only executed for the root notifier. + * @unbind: a subdevice is leaving + * @destroy: the asc is about to be freed + */ +struct v4l2_async_notifier_operations { + int (*bound)(struct v4l2_async_notifier *notifier, + struct v4l2_subdev *subdev, + struct v4l2_async_connection *asc); + int (*complete)(struct v4l2_async_notifier *notifier); + void (*unbind)(struct v4l2_async_notifier *notifier, + struct v4l2_subdev *subdev, + struct v4l2_async_connection *asc); + void (*destroy)(struct v4l2_async_connection *asc); +}; + +/** + * struct v4l2_async_notifier - v4l2_device notifier data + * + * @ops: notifier operations + * @v4l2_dev: v4l2_device of the root notifier, NULL otherwise + * @sd: sub-device that registered the notifier, NULL otherwise + * @parent: parent notifier + * @waiting_list: list of struct v4l2_async_connection, waiting for their + * drivers + * @done_list: list of struct v4l2_subdev, already probed + * @notifier_entry: member in a global list of notifiers + */ +struct v4l2_async_notifier { + const struct v4l2_async_notifier_operations *ops; + struct v4l2_device *v4l2_dev; + struct v4l2_subdev *sd; + struct v4l2_async_notifier *parent; + struct list_head waiting_list; + struct list_head done_list; + struct list_head notifier_entry; +}; + +/** + * struct v4l2_async_subdev_endpoint - Entry in sub-device's fwnode list + * + * @async_subdev_endpoint_entry: An entry in async_subdev_endpoint_list of + * &struct v4l2_subdev + * @endpoint: Endpoint fwnode agains which to match the sub-device + */ +struct v4l2_async_subdev_endpoint { + struct list_head async_subdev_endpoint_entry; + struct fwnode_handle *endpoint; +}; + +/** + * v4l2_async_debug_init - Initialize debugging tools. + * + * @debugfs_dir: pointer to the parent debugfs &struct dentry + */ +void v4l2_async_debug_init(struct dentry *debugfs_dir); + +/** + * v4l2_async_nf_init - Initialize a notifier. + * + * @notifier: pointer to &struct v4l2_async_notifier + * @v4l2_dev: pointer to &struct v4l2_device + * + * This function initializes the notifier @asc_entry. It must be called + * before adding a subdevice to a notifier, using one of: + * v4l2_async_nf_add_fwnode_remote(), + * v4l2_async_nf_add_fwnode() or + * v4l2_async_nf_add_i2c(). + */ +void v4l2_async_nf_init(struct v4l2_async_notifier *notifier, + struct v4l2_device *v4l2_dev); + +/** + * v4l2_async_subdev_nf_init - Initialize a sub-device notifier. + * + * @notifier: pointer to &struct v4l2_async_notifier + * @sd: pointer to &struct v4l2_subdev + * + * This function initializes the notifier @asc_list. It must be called + * before adding a subdevice to a notifier, using one of: + * v4l2_async_nf_add_fwnode_remote(), v4l2_async_nf_add_fwnode() or + * v4l2_async_nf_add_i2c(). + */ +void v4l2_async_subdev_nf_init(struct v4l2_async_notifier *notifier, + struct v4l2_subdev *sd); + +struct v4l2_async_connection * +__v4l2_async_nf_add_fwnode(struct v4l2_async_notifier *notifier, + struct fwnode_handle *fwnode, + unsigned int asc_struct_size); +/** + * v4l2_async_nf_add_fwnode - Allocate and add a fwnode async + * subdev to the notifier's master asc_list. + * + * @notifier: pointer to &struct v4l2_async_notifier + * @fwnode: fwnode handle of the sub-device to be matched, pointer to + * &struct fwnode_handle + * @type: Type of the driver's async sub-device or connection struct. The + * &struct v4l2_async_connection shall be the first member of the + * driver's async struct, i.e. both begin at the same memory address. + * + * Allocate a fwnode-matched asc of size asc_struct_size, and add it to the + * notifiers @asc_list. The function also gets a reference of the fwnode which + * is released later at notifier cleanup time. + */ +#define v4l2_async_nf_add_fwnode(notifier, fwnode, type) \ + ((type *)__v4l2_async_nf_add_fwnode(notifier, fwnode, sizeof(type))) + +struct v4l2_async_connection * +__v4l2_async_nf_add_fwnode_remote(struct v4l2_async_notifier *notif, + struct fwnode_handle *endpoint, + unsigned int asc_struct_size); +/** + * v4l2_async_nf_add_fwnode_remote - Allocate and add a fwnode + * remote async subdev to the + * notifier's master asc_list. + * + * @notifier: pointer to &struct v4l2_async_notifier + * @ep: local endpoint pointing to the remote connection to be matched, + * pointer to &struct fwnode_handle + * @type: Type of the driver's async connection struct. The &struct + * v4l2_async_connection shall be the first member of the driver's async + * connection struct, i.e. both begin at the same memory address. + * + * Gets the remote endpoint of a given local endpoint, set it up for fwnode + * matching and adds the async connection to the notifier's @asc_list. The + * function also gets a reference of the fwnode which is released later at + * notifier cleanup time. + * + * This is just like v4l2_async_nf_add_fwnode(), but with the + * exception that the fwnode refers to a local endpoint, not the remote one. + */ +#define v4l2_async_nf_add_fwnode_remote(notifier, ep, type) \ + ((type *)__v4l2_async_nf_add_fwnode_remote(notifier, ep, sizeof(type))) + +struct v4l2_async_connection * +__v4l2_async_nf_add_i2c(struct v4l2_async_notifier *notifier, + int adapter_id, unsigned short address, + unsigned int asc_struct_size); +/** + * v4l2_async_nf_add_i2c - Allocate and add an i2c async + * subdev to the notifier's master asc_list. + * + * @notifier: pointer to &struct v4l2_async_notifier + * @adapter: I2C adapter ID to be matched + * @address: I2C address of connection to be matched + * @type: Type of the driver's async connection struct. The &struct + * v4l2_async_connection shall be the first member of the driver's async + * connection struct, i.e. both begin at the same memory address. + * + * Same as v4l2_async_nf_add_fwnode() but for I2C matched + * connections. + */ +#define v4l2_async_nf_add_i2c(notifier, adapter, address, type) \ + ((type *)__v4l2_async_nf_add_i2c(notifier, adapter, address, \ + sizeof(type))) + +/** + * v4l2_async_subdev_endpoint_add - Add an endpoint fwnode to async sub-device + * matching list + * + * @sd: the sub-device + * @fwnode: the endpoint fwnode to match + * + * Add a fwnode to the async sub-device's matching list. This allows registering + * multiple async sub-devices from a single device. + * + * Note that calling v4l2_subdev_cleanup() as part of the sub-device's cleanup + * if endpoints have been added to the sub-device's fwnode matching list. + * + * Returns an error on failure, 0 on success. + */ +int v4l2_async_subdev_endpoint_add(struct v4l2_subdev *sd, + struct fwnode_handle *fwnode); + +/** + * v4l2_async_connection_unique - return a unique &struct v4l2_async_connection + * for a sub-device + * @sd: the sub-device + * + * Return an async connection for a sub-device, when there is a single + * one only. + */ +struct v4l2_async_connection * +v4l2_async_connection_unique(struct v4l2_subdev *sd); + +/** + * v4l2_async_nf_register - registers a subdevice asynchronous notifier + * + * @notifier: pointer to &struct v4l2_async_notifier + */ +int v4l2_async_nf_register(struct v4l2_async_notifier *notifier); + +/** + * v4l2_async_nf_unregister - unregisters a subdevice + * asynchronous notifier + * + * @notifier: pointer to &struct v4l2_async_notifier + */ +void v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier); + +/** + * v4l2_async_nf_cleanup - clean up notifier resources + * @notifier: the notifier the resources of which are to be cleaned up + * + * Release memory resources related to a notifier, including the async + * connections allocated for the purposes of the notifier but not the notifier + * itself. The user is responsible for calling this function to clean up the + * notifier after calling v4l2_async_nf_add_fwnode_remote(), + * v4l2_async_nf_add_fwnode() or v4l2_async_nf_add_i2c(). + * + * There is no harm from calling v4l2_async_nf_cleanup() in other + * cases as long as its memory has been zeroed after it has been + * allocated. + */ +void v4l2_async_nf_cleanup(struct v4l2_async_notifier *notifier); + +/** + * v4l2_async_register_subdev - registers a sub-device to the asynchronous + * subdevice framework + * + * @sd: pointer to &struct v4l2_subdev + */ +#define v4l2_async_register_subdev(sd) \ + __v4l2_async_register_subdev(sd, THIS_MODULE) +int __v4l2_async_register_subdev(struct v4l2_subdev *sd, struct module *module); + +/** + * v4l2_async_register_subdev_sensor - registers a sensor sub-device to the + * asynchronous sub-device framework and + * parse set up common sensor related + * devices + * + * @sd: pointer to struct &v4l2_subdev + * + * This function is just like v4l2_async_register_subdev() with the exception + * that calling it will also parse firmware interfaces for remote references + * using v4l2_async_nf_parse_fwnode_sensor() and registers the + * async sub-devices. The sub-device is similarly unregistered by calling + * v4l2_async_unregister_subdev(). + * + * While registered, the subdev module is marked as in-use. + * + * An error is returned if the module is no longer loaded on any attempts + * to register it. + */ +int __must_check +v4l2_async_register_subdev_sensor(struct v4l2_subdev *sd); + +/** + * v4l2_async_unregister_subdev - unregisters a sub-device to the asynchronous + * subdevice framework + * + * @sd: pointer to &struct v4l2_subdev + */ +void v4l2_async_unregister_subdev(struct v4l2_subdev *sd); +#endif diff --git a/6.18.0/include-overrides/media/v4l2-cci.h b/6.18.0/include-overrides/media/v4l2-cci.h new file mode 100644 index 0000000..4e96e90 --- /dev/null +++ b/6.18.0/include-overrides/media/v4l2-cci.h @@ -0,0 +1,141 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * MIPI Camera Control Interface (CCI) register access helpers. + * + * Copyright (C) 2023 Hans de Goede + */ +#ifndef _V4L2_CCI_H +#define _V4L2_CCI_H + +#include +#include +#include + +struct i2c_client; +struct regmap; + +/** + * struct cci_reg_sequence - An individual write from a sequence of CCI writes + * + * @reg: Register address, use CCI_REG#() macros to encode reg width + * @val: Register value + * + * Register/value pairs for sequences of writes. + */ +struct cci_reg_sequence { + u32 reg; + u64 val; +}; + +/* + * Macros to define register address with the register width encoded + * into the higher bits. + */ +#define CCI_REG_ADDR_MASK GENMASK(15, 0) +#define CCI_REG_WIDTH_SHIFT 16 +#define CCI_REG_WIDTH_MASK GENMASK(19, 16) +/* + * Private CCI register flags, for the use of drivers. + */ +#define CCI_REG_PRIVATE_SHIFT 28U +#define CCI_REG_PRIVATE_MASK GENMASK(31U, CCI_REG_PRIVATE_SHIFT) + +#define CCI_REG_WIDTH_BYTES(x) FIELD_GET(CCI_REG_WIDTH_MASK, x) +#define CCI_REG_WIDTH(x) (CCI_REG_WIDTH_BYTES(x) << 3) +#define CCI_REG_ADDR(x) FIELD_GET(CCI_REG_ADDR_MASK, x) +#define CCI_REG_LE BIT(20) + +#define CCI_REG8(x) ((1 << CCI_REG_WIDTH_SHIFT) | (x)) +#define CCI_REG16(x) ((2 << CCI_REG_WIDTH_SHIFT) | (x)) +#define CCI_REG24(x) ((3 << CCI_REG_WIDTH_SHIFT) | (x)) +#define CCI_REG32(x) ((4 << CCI_REG_WIDTH_SHIFT) | (x)) +#define CCI_REG64(x) ((8 << CCI_REG_WIDTH_SHIFT) | (x)) +#define CCI_REG16_LE(x) (CCI_REG_LE | (2U << CCI_REG_WIDTH_SHIFT) | (x)) +#define CCI_REG24_LE(x) (CCI_REG_LE | (3U << CCI_REG_WIDTH_SHIFT) | (x)) +#define CCI_REG32_LE(x) (CCI_REG_LE | (4U << CCI_REG_WIDTH_SHIFT) | (x)) +#define CCI_REG64_LE(x) (CCI_REG_LE | (8U << CCI_REG_WIDTH_SHIFT) | (x)) + +/** + * cci_read() - Read a value from a single CCI register + * + * @map: Register map to read from + * @reg: Register address to read, use CCI_REG#() macros to encode reg width + * @val: Pointer to store read value + * @err: Optional pointer to store errors, if a previous error is set + * then the read will be skipped + * + * Return: %0 on success or a negative error code on failure. + */ +int cci_read(struct regmap *map, u32 reg, u64 *val, int *err); + +/** + * cci_write() - Write a value to a single CCI register + * + * @map: Register map to write to + * @reg: Register address to write, use CCI_REG#() macros to encode reg width + * @val: Value to be written + * @err: Optional pointer to store errors, if a previous error is set + * then the write will be skipped + * + * Return: %0 on success or a negative error code on failure. + */ +int cci_write(struct regmap *map, u32 reg, u64 val, int *err); + +/** + * cci_update_bits() - Perform a read/modify/write cycle on + * a single CCI register + * + * @map: Register map to update + * @reg: Register address to update, use CCI_REG#() macros to encode reg width + * @mask: Bitmask to change + * @val: New value for bitmask + * @err: Optional pointer to store errors, if a previous error is set + * then the update will be skipped + * + * Note this uses read-modify-write to update the bits, atomicity with regards + * to other cci_*() register access functions is NOT guaranteed. + * + * Return: %0 on success or a negative error code on failure. + */ +int cci_update_bits(struct regmap *map, u32 reg, u64 mask, u64 val, int *err); + +/** + * cci_multi_reg_write() - Write multiple registers to the device + * + * @map: Register map to write to + * @regs: Array of structures containing register-address, -value pairs to be + * written, register-addresses use CCI_REG#() macros to encode reg width + * @num_regs: Number of registers to write + * @err: Optional pointer to store errors, if a previous error is set + * then the write will be skipped + * + * Write multiple registers to the device where the set of register, value + * pairs are supplied in any order, possibly not all in a single range. + * + * Use of the CCI_REG#() macros to encode reg width is mandatory. + * + * For raw lists of register-address, -value pairs with only 8 bit + * wide writes regmap_multi_reg_write() can be used instead. + * + * Return: %0 on success or a negative error code on failure. + */ +int cci_multi_reg_write(struct regmap *map, const struct cci_reg_sequence *regs, + unsigned int num_regs, int *err); + +#if IS_ENABLED(CONFIG_V4L2_CCI_I2C) +/** + * devm_cci_regmap_init_i2c() - Create regmap to use with cci_*() register + * access functions + * + * @client: i2c_client to create the regmap for + * @reg_addr_bits: register address width to use (8 or 16) + * + * Note the memory for the created regmap is devm() managed, tied to the client. + * + * Return: %0 on success or a negative error code on failure. + */ +struct regmap *devm_cci_regmap_init_i2c(struct i2c_client *client, + int reg_addr_bits); +#endif + +#endif diff --git a/6.18.0/include-overrides/media/v4l2-common.h b/6.18.0/include-overrides/media/v4l2-common.h new file mode 100644 index 0000000..5c0a7f6 --- /dev/null +++ b/6.18.0/include-overrides/media/v4l2-common.h @@ -0,0 +1,737 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + v4l2 common internal API header + + This header contains internal shared ioctl definitions for use by the + internal low-level v4l2 drivers. + Each ioctl begins with VIDIOC_INT_ to clearly mark that it is an internal + define, + + Copyright (C) 2005 Hans Verkuil + + */ + +#ifndef V4L2_COMMON_H_ +#define V4L2_COMMON_H_ + +#include +#include + +/* Common printk constructs for v4l-i2c drivers. These macros create a unique + prefix consisting of the driver name, the adapter number and the i2c + address. */ +#define v4l_printk(level, name, adapter, addr, fmt, arg...) \ + printk(level "%s %d-%04x: " fmt, name, i2c_adapter_id(adapter), addr , ## arg) + +#define v4l_client_printk(level, client, fmt, arg...) \ + v4l_printk(level, (client)->dev.driver->name, (client)->adapter, \ + (client)->addr, fmt , ## arg) + +#define v4l_err(client, fmt, arg...) \ + v4l_client_printk(KERN_ERR, client, fmt , ## arg) + +#define v4l_warn(client, fmt, arg...) \ + v4l_client_printk(KERN_WARNING, client, fmt , ## arg) + +#define v4l_info(client, fmt, arg...) \ + v4l_client_printk(KERN_INFO, client, fmt , ## arg) + +/* These three macros assume that the debug level is set with a module + parameter called 'debug'. */ +#define v4l_dbg(level, debug, client, fmt, arg...) \ + do { \ + if (debug >= (level)) \ + v4l_client_printk(KERN_DEBUG, client, fmt , ## arg); \ + } while (0) + +/* Add a version of v4l_dbg to be used on drivers using dev_foo() macros */ +#define dev_dbg_lvl(__dev, __level, __debug, __fmt, __arg...) \ + do { \ + if (__debug >= (__level)) \ + dev_printk(KERN_DEBUG, __dev, __fmt, ##__arg); \ + } while (0) + +/* ------------------------------------------------------------------------- */ + +/* These printk constructs can be used with v4l2_device and v4l2_subdev */ +#define v4l2_printk(level, dev, fmt, arg...) \ + printk(level "%s: " fmt, (dev)->name , ## arg) + +#define v4l2_err(dev, fmt, arg...) \ + v4l2_printk(KERN_ERR, dev, fmt , ## arg) + +#define v4l2_warn(dev, fmt, arg...) \ + v4l2_printk(KERN_WARNING, dev, fmt , ## arg) + +#define v4l2_info(dev, fmt, arg...) \ + v4l2_printk(KERN_INFO, dev, fmt , ## arg) + +/* These three macros assume that the debug level is set with a module + parameter called 'debug'. */ +#define v4l2_dbg(level, debug, dev, fmt, arg...) \ + do { \ + if (debug >= (level)) \ + v4l2_printk(KERN_DEBUG, dev, fmt , ## arg); \ + } while (0) + +/** + * v4l2_ctrl_query_fill- Fill in a struct v4l2_queryctrl + * + * @qctrl: pointer to the &struct v4l2_queryctrl to be filled + * @min: minimum value for the control + * @max: maximum value for the control + * @step: control step + * @def: default value for the control + * + * Fills the &struct v4l2_queryctrl fields for the query control. + * + * .. note:: + * + * This function assumes that the @qctrl->id field is filled. + * + * Returns -EINVAL if the control is not known by the V4L2 core, 0 on success. + */ + +int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, + s32 min, s32 max, s32 step, s32 def); + +/* ------------------------------------------------------------------------- */ + +struct clk; +struct v4l2_device; +struct v4l2_subdev; +struct v4l2_subdev_ops; + +/* I2C Helper functions */ +#include + +/** + * enum v4l2_i2c_tuner_type - specifies the range of tuner address that + * should be used when seeking for I2C devices. + * + * @ADDRS_RADIO: Radio tuner addresses. + * Represent the following I2C addresses: + * 0x10 (if compiled with tea5761 support) + * and 0x60. + * @ADDRS_DEMOD: Demod tuner addresses. + * Represent the following I2C addresses: + * 0x42, 0x43, 0x4a and 0x4b. + * @ADDRS_TV: TV tuner addresses. + * Represent the following I2C addresses: + * 0x42, 0x43, 0x4a, 0x4b, 0x60, 0x61, 0x62, + * 0x63 and 0x64. + * @ADDRS_TV_WITH_DEMOD: TV tuner addresses if demod is present, this + * excludes addresses used by the demodulator + * from the list of candidates. + * Represent the following I2C addresses: + * 0x60, 0x61, 0x62, 0x63 and 0x64. + * + * NOTE: All I2C addresses above use the 7-bit notation. + */ +enum v4l2_i2c_tuner_type { + ADDRS_RADIO, + ADDRS_DEMOD, + ADDRS_TV, + ADDRS_TV_WITH_DEMOD, +}; + +#if defined(CONFIG_VIDEO_V4L2_I2C) + +/** + * v4l2_i2c_new_subdev - Load an i2c module and return an initialized + * &struct v4l2_subdev. + * + * @v4l2_dev: pointer to &struct v4l2_device + * @adapter: pointer to struct i2c_adapter + * @client_type: name of the chip that's on the adapter. + * @addr: I2C address. If zero, it will use @probe_addrs + * @probe_addrs: array with a list of address. The last entry at such + * array should be %I2C_CLIENT_END. + * + * returns a &struct v4l2_subdev pointer. + */ +struct v4l2_subdev *v4l2_i2c_new_subdev(struct v4l2_device *v4l2_dev, + struct i2c_adapter *adapter, const char *client_type, + u8 addr, const unsigned short *probe_addrs); + +/** + * v4l2_i2c_new_subdev_board - Load an i2c module and return an initialized + * &struct v4l2_subdev. + * + * @v4l2_dev: pointer to &struct v4l2_device + * @adapter: pointer to struct i2c_adapter + * @info: pointer to struct i2c_board_info used to replace the irq, + * platform_data and addr arguments. + * @probe_addrs: array with a list of address. The last entry at such + * array should be %I2C_CLIENT_END. + * + * returns a &struct v4l2_subdev pointer. + */ +struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev, + struct i2c_adapter *adapter, struct i2c_board_info *info, + const unsigned short *probe_addrs); + +/** + * v4l2_i2c_subdev_set_name - Set name for an I²C sub-device + * + * @sd: pointer to &struct v4l2_subdev + * @client: pointer to struct i2c_client + * @devname: the name of the device; if NULL, the I²C device drivers's name + * will be used + * @postfix: sub-device specific string to put right after the I²C device name; + * may be NULL + */ +void v4l2_i2c_subdev_set_name(struct v4l2_subdev *sd, struct i2c_client *client, + const char *devname, const char *postfix); + +/** + * v4l2_i2c_subdev_init - Initializes a &struct v4l2_subdev with data from + * an i2c_client struct. + * + * @sd: pointer to &struct v4l2_subdev + * @client: pointer to struct i2c_client + * @ops: pointer to &struct v4l2_subdev_ops + */ +void v4l2_i2c_subdev_init(struct v4l2_subdev *sd, struct i2c_client *client, + const struct v4l2_subdev_ops *ops); + +/** + * v4l2_i2c_subdev_addr - returns i2c client address of &struct v4l2_subdev. + * + * @sd: pointer to &struct v4l2_subdev + * + * Returns the address of an I2C sub-device + */ +unsigned short v4l2_i2c_subdev_addr(struct v4l2_subdev *sd); + +/** + * v4l2_i2c_tuner_addrs - Return a list of I2C tuner addresses to probe. + * + * @type: type of the tuner to seek, as defined by + * &enum v4l2_i2c_tuner_type. + * + * NOTE: Use only if the tuner addresses are unknown. + */ +const unsigned short *v4l2_i2c_tuner_addrs(enum v4l2_i2c_tuner_type type); + +/** + * v4l2_i2c_subdev_unregister - Unregister a v4l2_subdev + * + * @sd: pointer to &struct v4l2_subdev + */ +void v4l2_i2c_subdev_unregister(struct v4l2_subdev *sd); + +#else + +static inline struct v4l2_subdev * +v4l2_i2c_new_subdev(struct v4l2_device *v4l2_dev, + struct i2c_adapter *adapter, const char *client_type, + u8 addr, const unsigned short *probe_addrs) +{ + return NULL; +} + +static inline struct v4l2_subdev * +v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev, + struct i2c_adapter *adapter, struct i2c_board_info *info, + const unsigned short *probe_addrs) +{ + return NULL; +} + +static inline void +v4l2_i2c_subdev_set_name(struct v4l2_subdev *sd, struct i2c_client *client, + const char *devname, const char *postfix) +{} + +static inline void +v4l2_i2c_subdev_init(struct v4l2_subdev *sd, struct i2c_client *client, + const struct v4l2_subdev_ops *ops) +{} + +static inline unsigned short v4l2_i2c_subdev_addr(struct v4l2_subdev *sd) +{ + return I2C_CLIENT_END; +} + +static inline const unsigned short * +v4l2_i2c_tuner_addrs(enum v4l2_i2c_tuner_type type) +{ + return NULL; +} + +static inline void v4l2_i2c_subdev_unregister(struct v4l2_subdev *sd) +{} + +#endif + +/* ------------------------------------------------------------------------- */ + +/* SPI Helper functions */ + +#include + +#if defined(CONFIG_SPI) + +/** + * v4l2_spi_new_subdev - Load an spi module and return an initialized + * &struct v4l2_subdev. + * + * + * @v4l2_dev: pointer to &struct v4l2_device. + * @ctlr: pointer to struct spi_controller. + * @info: pointer to struct spi_board_info. + * + * returns a &struct v4l2_subdev pointer. + */ +struct v4l2_subdev *v4l2_spi_new_subdev(struct v4l2_device *v4l2_dev, + struct spi_controller *ctlr, struct spi_board_info *info); + +/** + * v4l2_spi_subdev_init - Initialize a v4l2_subdev with data from an + * spi_device struct. + * + * @sd: pointer to &struct v4l2_subdev + * @spi: pointer to struct spi_device. + * @ops: pointer to &struct v4l2_subdev_ops + */ +void v4l2_spi_subdev_init(struct v4l2_subdev *sd, struct spi_device *spi, + const struct v4l2_subdev_ops *ops); + +/** + * v4l2_spi_subdev_unregister - Unregister a v4l2_subdev + * + * @sd: pointer to &struct v4l2_subdev + */ +void v4l2_spi_subdev_unregister(struct v4l2_subdev *sd); + +#else + +static inline struct v4l2_subdev * +v4l2_spi_new_subdev(struct v4l2_device *v4l2_dev, + struct spi_controller *ctlr, struct spi_board_info *info) +{ + return NULL; +} + +static inline void +v4l2_spi_subdev_init(struct v4l2_subdev *sd, struct spi_device *spi, + const struct v4l2_subdev_ops *ops) +{} + +static inline void v4l2_spi_subdev_unregister(struct v4l2_subdev *sd) +{} +#endif + +/* ------------------------------------------------------------------------- */ + +/* + * FIXME: these remaining ioctls/structs should be removed as well, but they + * are still used in tuner-simple.c (TUNER_SET_CONFIG) and cx18/ivtv (RESET). + * To remove these ioctls some more cleanup is needed in those modules. + * + * It doesn't make much sense on documenting them, as what we really want is + * to get rid of them. + */ + +/* s_config */ +struct v4l2_priv_tun_config { + int tuner; + void *priv; +}; +#define TUNER_SET_CONFIG _IOW('d', 92, struct v4l2_priv_tun_config) + +#define VIDIOC_INT_RESET _IOW ('d', 102, u32) + +/* ------------------------------------------------------------------------- */ + +/* Miscellaneous helper functions */ + +/** + * v4l_bound_align_image - adjust video dimensions according to + * a given constraints. + * + * @width: pointer to width that will be adjusted if needed. + * @wmin: minimum width. + * @wmax: maximum width. + * @walign: least significant bit on width. + * @height: pointer to height that will be adjusted if needed. + * @hmin: minimum height. + * @hmax: maximum height. + * @halign: least significant bit on height. + * @salign: least significant bit for the image size (e. g. + * :math:`width * height`). + * + * Clip an image to have @width between @wmin and @wmax, and @height between + * @hmin and @hmax, inclusive. + * + * Additionally, the @width will be a multiple of :math:`2^{walign}`, + * the @height will be a multiple of :math:`2^{halign}`, and the overall + * size :math:`width * height` will be a multiple of :math:`2^{salign}`. + * + * .. note:: + * + * #. The clipping rectangle may be shrunk or enlarged to fit the alignment + * constraints. + * #. @wmax must not be smaller than @wmin. + * #. @hmax must not be smaller than @hmin. + * #. The alignments must not be so high there are no possible image + * sizes within the allowed bounds. + * #. @wmin and @hmin must be at least 1 (don't use 0). + * #. For @walign, @halign and @salign, if you don't care about a certain + * alignment, specify ``0``, as :math:`2^0 = 1` and one byte alignment + * is equivalent to no alignment. + * #. If you only want to adjust downward, specify a maximum that's the + * same as the initial value. + */ +void v4l_bound_align_image(unsigned int *width, unsigned int wmin, + unsigned int wmax, unsigned int walign, + unsigned int *height, unsigned int hmin, + unsigned int hmax, unsigned int halign, + unsigned int salign); + +/** + * v4l2_find_nearest_size_conditional - Find the nearest size among a discrete + * set of resolutions contained in an array of a driver specific struct, + * with conditionally exlusion of certain modes + * + * @array: a driver specific array of image sizes + * @array_size: the length of the driver specific array of image sizes + * @width_field: the name of the width field in the driver specific struct + * @height_field: the name of the height field in the driver specific struct + * @width: desired width + * @height: desired height + * @func: ignores mode if returns false + * @context: context for the function + * + * Finds the closest resolution to minimize the width and height differences + * between what requested and the supported resolutions. The size of the width + * and height fields in the driver specific must equal to that of u32, i.e. four + * bytes. @func is called for each mode considered, a mode is ignored if @func + * returns false for it. + * + * Returns the best match or NULL if the length of the array is zero. + */ +#define v4l2_find_nearest_size_conditional(array, array_size, width_field, \ + height_field, width, height, \ + func, context) \ + ({ \ + BUILD_BUG_ON(sizeof((array)->width_field) != sizeof(u32) || \ + sizeof((array)->height_field) != sizeof(u32)); \ + (typeof(&(array)[0]))__v4l2_find_nearest_size_conditional( \ + (array), array_size, sizeof(*(array)), \ + offsetof(typeof(*(array)), width_field), \ + offsetof(typeof(*(array)), height_field), \ + width, height, func, context); \ + }) +const void * +__v4l2_find_nearest_size_conditional(const void *array, size_t array_size, + size_t entry_size, size_t width_offset, + size_t height_offset, s32 width, + s32 height, + bool (*func)(const void *array, + size_t index, + const void *context), + const void *context); + +/** + * v4l2_find_nearest_size - Find the nearest size among a discrete set of + * resolutions contained in an array of a driver specific struct + * + * @array: a driver specific array of image sizes + * @array_size: the length of the driver specific array of image sizes + * @width_field: the name of the width field in the driver specific struct + * @height_field: the name of the height field in the driver specific struct + * @width: desired width + * @height: desired height + * + * Finds the closest resolution to minimize the width and height differences + * between what requested and the supported resolutions. The size of the width + * and height fields in the driver specific must equal to that of u32, i.e. four + * bytes. + * + * Returns the best match or NULL if the length of the array is zero. + */ +#define v4l2_find_nearest_size(array, array_size, width_field, \ + height_field, width, height) \ + v4l2_find_nearest_size_conditional(array, array_size, width_field, \ + height_field, width, height, NULL, \ + NULL) + +/** + * v4l2_g_parm_cap - helper routine for vidioc_g_parm to fill this in by + * calling the get_frame_interval op of the given subdev. It only works + * for V4L2_BUF_TYPE_VIDEO_CAPTURE(_MPLANE), hence the _cap in the + * function name. + * + * @vdev: the struct video_device pointer. Used to determine the device caps. + * @sd: the sub-device pointer. + * @a: the VIDIOC_G_PARM argument. + */ +int v4l2_g_parm_cap(struct video_device *vdev, + struct v4l2_subdev *sd, struct v4l2_streamparm *a); + +/** + * v4l2_s_parm_cap - helper routine for vidioc_s_parm to fill this in by + * calling the set_frame_interval op of the given subdev. It only works + * for V4L2_BUF_TYPE_VIDEO_CAPTURE(_MPLANE), hence the _cap in the + * function name. + * + * @vdev: the struct video_device pointer. Used to determine the device caps. + * @sd: the sub-device pointer. + * @a: the VIDIOC_S_PARM argument. + */ +int v4l2_s_parm_cap(struct video_device *vdev, + struct v4l2_subdev *sd, struct v4l2_streamparm *a); + +/* Compare two v4l2_fract structs */ +#define V4L2_FRACT_COMPARE(a, OP, b) \ + ((u64)(a).numerator * (b).denominator OP \ + (u64)(b).numerator * (a).denominator) + +/* ------------------------------------------------------------------------- */ + +/* Pixel format and FourCC helpers */ + +/** + * enum v4l2_pixel_encoding - specifies the pixel encoding value + * + * @V4L2_PIXEL_ENC_UNKNOWN: Pixel encoding is unknown/un-initialized + * @V4L2_PIXEL_ENC_YUV: Pixel encoding is YUV + * @V4L2_PIXEL_ENC_RGB: Pixel encoding is RGB + * @V4L2_PIXEL_ENC_BAYER: Pixel encoding is Bayer + */ +enum v4l2_pixel_encoding { + V4L2_PIXEL_ENC_UNKNOWN = 0, + V4L2_PIXEL_ENC_YUV = 1, + V4L2_PIXEL_ENC_RGB = 2, + V4L2_PIXEL_ENC_BAYER = 3, +}; + +/** + * struct v4l2_format_info - information about a V4L2 format + * @format: 4CC format identifier (V4L2_PIX_FMT_*) + * @pixel_enc: Pixel encoding (see enum v4l2_pixel_encoding above) + * @mem_planes: Number of memory planes, which includes the alpha plane (1 to 4). + * @comp_planes: Number of component planes, which includes the alpha plane (1 to 4). + * @bpp: Array of per-plane bytes per pixel + * @bpp_div: Array of per-plane bytes per pixel divisors to support fractional pixel sizes. + * @hdiv: Horizontal chroma subsampling factor + * @vdiv: Vertical chroma subsampling factor + * @block_w: Per-plane macroblock pixel width (optional) + * @block_h: Per-plane macroblock pixel height (optional) + */ +struct v4l2_format_info { + u32 format; + u8 pixel_enc; + u8 mem_planes; + u8 comp_planes; + u8 bpp[4]; + u8 bpp_div[4]; + u8 hdiv; + u8 vdiv; + u8 block_w[4]; + u8 block_h[4]; +}; + +static inline bool v4l2_is_format_rgb(const struct v4l2_format_info *f) +{ + return f && f->pixel_enc == V4L2_PIXEL_ENC_RGB; +} + +static inline bool v4l2_is_format_yuv(const struct v4l2_format_info *f) +{ + return f && f->pixel_enc == V4L2_PIXEL_ENC_YUV; +} + +static inline bool v4l2_is_format_bayer(const struct v4l2_format_info *f) +{ + return f && f->pixel_enc == V4L2_PIXEL_ENC_BAYER; +} + +const struct v4l2_format_info *v4l2_format_info(u32 format); +void v4l2_apply_frmsize_constraints(u32 *width, u32 *height, + const struct v4l2_frmsize_stepwise *frmsize); +int v4l2_fill_pixfmt(struct v4l2_pix_format *pixfmt, u32 pixelformat, + u32 width, u32 height); +int v4l2_fill_pixfmt_mp(struct v4l2_pix_format_mplane *pixfmt, u32 pixelformat, + u32 width, u32 height); + +/** + * v4l2_get_link_freq - Get link rate from transmitter + * + * @pad: The transmitter's media pad + * @mul: The multiplier between pixel rate and link frequency. Bits per pixel on + * D-PHY, samples per clock on parallel. 0 otherwise. + * @div: The divisor between pixel rate and link frequency. Number of data lanes + * times two on D-PHY, 1 on parallel. 0 otherwise. + * + * This function obtains and returns the link frequency from the transmitter + * sub-device's pad. The link frequency is retrieved using the get_mbus_config + * sub-device pad operation. If this fails, the function falls back to obtaining + * the frequency either directly from the V4L2_CID_LINK_FREQ control if + * implemented by the transmitter, or by calculating it from the pixel rate + * obtained from the V4L2_CID_PIXEL_RATE control. + * + * Return: + * * >0: Link frequency + * * %-ENOENT: Link frequency or pixel rate control not found + * * %-EINVAL: Invalid link frequency value + */ +#ifdef CONFIG_MEDIA_CONTROLLER +s64 v4l2_get_link_freq(const struct media_pad *pad, unsigned int mul, + unsigned int div); +#endif + +void v4l2_simplify_fraction(u32 *numerator, u32 *denominator, + unsigned int n_terms, unsigned int threshold); +u32 v4l2_fraction_to_interval(u32 numerator, u32 denominator); + +/** + * v4l2_link_freq_to_bitmap - Figure out platform-supported link frequencies + * @dev: The struct device + * @fw_link_freqs: Array of link frequencies from firmware + * @num_of_fw_link_freqs: Number of entries in @fw_link_freqs + * @driver_link_freqs: Array of link frequencies supported by the driver + * @num_of_driver_link_freqs: Number of entries in @driver_link_freqs + * @bitmap: Bitmap of driver-supported link frequencies found in @fw_link_freqs + * + * This function checks which driver-supported link frequencies are enabled in + * system firmware and sets the corresponding bits in @bitmap (after first + * zeroing it). + * + * Return: + * * %0: Success + * * %-ENOENT: No match found between driver-supported link frequencies and + * those available in firmware. + * * %-ENODATA: No link frequencies were specified in firmware. + */ +int v4l2_link_freq_to_bitmap(struct device *dev, const u64 *fw_link_freqs, + unsigned int num_of_fw_link_freqs, + const s64 *driver_link_freqs, + unsigned int num_of_driver_link_freqs, + unsigned long *bitmap); + +struct clk *__devm_v4l2_sensor_clk_get(struct device *dev, const char *id, + bool legacy, bool fixed_rate, + unsigned long clk_rate); + +/** + * devm_v4l2_sensor_clk_get - lookup and obtain a reference to a clock producer + * for a camera sensor + * + * @dev: device for v4l2 sensor clock "consumer" + * @id: clock consumer ID + * + * This function behaves the same way as devm_clk_get() except where there + * is no clock producer like in ACPI-based platforms. + * + * For ACPI-based platforms, the function will read the "clock-frequency" + * ACPI _DSD property and register a fixed-clock with the frequency indicated + * in the property. + * + * This function also handles the special ACPI-based system case where: + * + * * The clock-frequency _DSD property is present. + * * A reference to the clock producer is present, where the clock is provided + * by a camera sensor PMIC driver (e.g. int3472/tps68470.c) + * + * In this case try to set the clock-frequency value to the provided clock. + * + * As the name indicates, this function may only be used on camera sensor + * devices. This is because generally only camera sensors do need a clock to + * query the frequency from, due to the requirement to configure the PLL for a + * given CSI-2 interface frequency where the sensor's external clock frequency + * is a factor. Additionally, the clock frequency tends to be available on ACPI + * firmware based systems for camera sensors specifically (if e.g. DisCo for + * Imaging compliant). + * + * Returns a pointer to a struct clk on success or an error pointer on failure. + */ +static inline struct clk * +devm_v4l2_sensor_clk_get(struct device *dev, const char *id) +{ + return __devm_v4l2_sensor_clk_get(dev, id, false, false, 0); +} + +/** + * devm_v4l2_sensor_clk_get_legacy - lookup and obtain a reference to a clock + * producer for a camera sensor. + * + * @dev: device for v4l2 sensor clock "consumer" + * @id: clock consumer ID + * @fixed_rate: interpret the @clk_rate as a fixed rate or default rate + * @clk_rate: the clock rate + * + * This function behaves the same way as devm_v4l2_sensor_clk_get() except that + * it extends the behaviour on ACPI platforms to all platforms. + * + * The function also provides the ability to set the clock rate to a fixed + * frequency by setting @fixed_rate to true and specifying the fixed frequency + * in @clk_rate, or to use a default clock rate when the "clock-frequency" + * property is absent by setting @fixed_rate to false and specifying the default + * frequency in @clk_rate. Setting @fixed_rate to true and @clk_rate to 0 is an + * error. + * + * This function is meant to support legacy behaviour in existing drivers only. + * It must not be used in any new driver. + * + * Returns a pointer to a struct clk on success or an error pointer on failure. + */ +static inline struct clk * +devm_v4l2_sensor_clk_get_legacy(struct device *dev, const char *id, + bool fixed_rate, unsigned long clk_rate) +{ + return __devm_v4l2_sensor_clk_get(dev, id, true, fixed_rate, clk_rate); +} + +static inline u64 v4l2_buffer_get_timestamp(const struct v4l2_buffer *buf) +{ + /* + * When the timestamp comes from 32-bit user space, there may be + * uninitialized data in tv_usec, so cast it to u32. + * Otherwise allow invalid input for backwards compatibility. + */ + return buf->timestamp.tv_sec * NSEC_PER_SEC + + (u32)buf->timestamp.tv_usec * NSEC_PER_USEC; +} + +static inline void v4l2_buffer_set_timestamp(struct v4l2_buffer *buf, + u64 timestamp) +{ + struct timespec64 ts = ns_to_timespec64(timestamp); + + buf->timestamp.tv_sec = ts.tv_sec; + buf->timestamp.tv_usec = ts.tv_nsec / NSEC_PER_USEC; +} + +static inline bool v4l2_is_colorspace_valid(__u32 colorspace) +{ + return colorspace > V4L2_COLORSPACE_DEFAULT && + colorspace < V4L2_COLORSPACE_LAST; +} + +static inline bool v4l2_is_xfer_func_valid(__u32 xfer_func) +{ + return xfer_func > V4L2_XFER_FUNC_DEFAULT && + xfer_func < V4L2_XFER_FUNC_LAST; +} + +static inline bool v4l2_is_ycbcr_enc_valid(__u8 ycbcr_enc) +{ + return ycbcr_enc > V4L2_YCBCR_ENC_DEFAULT && + ycbcr_enc < V4L2_YCBCR_ENC_LAST; +} + +static inline bool v4l2_is_hsv_enc_valid(__u8 hsv_enc) +{ + return hsv_enc == V4L2_HSV_ENC_180 || hsv_enc == V4L2_HSV_ENC_256; +} + +static inline bool v4l2_is_quant_valid(__u8 quantization) +{ + return quantization == V4L2_QUANTIZATION_FULL_RANGE || + quantization == V4L2_QUANTIZATION_LIM_RANGE; +} + +#endif /* V4L2_COMMON_H_ */ diff --git a/6.18.0/include-overrides/media/v4l2-ctrls.h b/6.18.0/include-overrides/media/v4l2-ctrls.h new file mode 100644 index 0000000..31fc1be --- /dev/null +++ b/6.18.0/include-overrides/media/v4l2-ctrls.h @@ -0,0 +1,1633 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * V4L2 controls support header. + * + * Copyright (C) 2010 Hans Verkuil + */ + +#ifndef _V4L2_CTRLS_H +#define _V4L2_CTRLS_H + +#include +#include +#include +#include + +/* forward references */ +struct file; +struct poll_table_struct; +struct v4l2_ctrl; +struct v4l2_ctrl_handler; +struct v4l2_ctrl_helper; +struct v4l2_fh; +struct v4l2_fwnode_device_properties; +struct v4l2_subdev; +struct v4l2_subscribed_event; +struct video_device; + +/** + * union v4l2_ctrl_ptr - A pointer to a control value. + * @p_s32: Pointer to a 32-bit signed value. + * @p_s64: Pointer to a 64-bit signed value. + * @p_u8: Pointer to a 8-bit unsigned value. + * @p_u16: Pointer to a 16-bit unsigned value. + * @p_u32: Pointer to a 32-bit unsigned value. + * @p_char: Pointer to a string. + * @p_mpeg2_sequence: Pointer to a MPEG2 sequence structure. + * @p_mpeg2_picture: Pointer to a MPEG2 picture structure. + * @p_mpeg2_quantisation: Pointer to a MPEG2 quantisation data structure. + * @p_fwht_params: Pointer to a FWHT stateless parameters structure. + * @p_h264_sps: Pointer to a struct v4l2_ctrl_h264_sps. + * @p_h264_pps: Pointer to a struct v4l2_ctrl_h264_pps. + * @p_h264_scaling_matrix: Pointer to a struct v4l2_ctrl_h264_scaling_matrix. + * @p_h264_slice_params: Pointer to a struct v4l2_ctrl_h264_slice_params. + * @p_h264_decode_params: Pointer to a struct v4l2_ctrl_h264_decode_params. + * @p_h264_pred_weights: Pointer to a struct v4l2_ctrl_h264_pred_weights. + * @p_vp8_frame: Pointer to a VP8 frame params structure. + * @p_vp9_compressed_hdr_probs: Pointer to a VP9 frame compressed header probs structure. + * @p_vp9_frame: Pointer to a VP9 frame params structure. + * @p_hevc_sps: Pointer to an HEVC sequence parameter set structure. + * @p_hevc_pps: Pointer to an HEVC picture parameter set structure. + * @p_hevc_slice_params: Pointer to an HEVC slice parameters structure. + * @p_hdr10_cll: Pointer to an HDR10 Content Light Level structure. + * @p_hdr10_mastering: Pointer to an HDR10 Mastering Display structure. + * @p_area: Pointer to an area. + * @p_av1_sequence: Pointer to an AV1 sequence structure. + * @p_av1_tile_group_entry: Pointer to an AV1 tile group entry structure. + * @p_av1_frame: Pointer to an AV1 frame structure. + * @p_av1_film_grain: Pointer to an AV1 film grain structure. + * @p_rect: Pointer to a rectangle. + * @p: Pointer to a compound value. + * @p_const: Pointer to a constant compound value. + */ +union v4l2_ctrl_ptr { + s32 *p_s32; + s64 *p_s64; + u8 *p_u8; + u16 *p_u16; + u32 *p_u32; + char *p_char; + struct v4l2_ctrl_mpeg2_sequence *p_mpeg2_sequence; + struct v4l2_ctrl_mpeg2_picture *p_mpeg2_picture; + struct v4l2_ctrl_mpeg2_quantisation *p_mpeg2_quantisation; + struct v4l2_ctrl_fwht_params *p_fwht_params; + struct v4l2_ctrl_h264_sps *p_h264_sps; + struct v4l2_ctrl_h264_pps *p_h264_pps; + struct v4l2_ctrl_h264_scaling_matrix *p_h264_scaling_matrix; + struct v4l2_ctrl_h264_slice_params *p_h264_slice_params; + struct v4l2_ctrl_h264_decode_params *p_h264_decode_params; + struct v4l2_ctrl_h264_pred_weights *p_h264_pred_weights; + struct v4l2_ctrl_vp8_frame *p_vp8_frame; + struct v4l2_ctrl_hevc_sps *p_hevc_sps; + struct v4l2_ctrl_hevc_pps *p_hevc_pps; + struct v4l2_ctrl_hevc_slice_params *p_hevc_slice_params; + struct v4l2_ctrl_vp9_compressed_hdr *p_vp9_compressed_hdr_probs; + struct v4l2_ctrl_vp9_frame *p_vp9_frame; + struct v4l2_ctrl_hdr10_cll_info *p_hdr10_cll; + struct v4l2_ctrl_hdr10_mastering_display *p_hdr10_mastering; + struct v4l2_area *p_area; + struct v4l2_ctrl_av1_sequence *p_av1_sequence; + struct v4l2_ctrl_av1_tile_group_entry *p_av1_tile_group_entry; + struct v4l2_ctrl_av1_frame *p_av1_frame; + struct v4l2_ctrl_av1_film_grain *p_av1_film_grain; + struct v4l2_rect *p_rect; + void *p; + const void *p_const; +}; + +/** + * v4l2_ctrl_ptr_create() - Helper function to return a v4l2_ctrl_ptr from a + * void pointer + * @ptr: The void pointer + */ +static inline union v4l2_ctrl_ptr v4l2_ctrl_ptr_create(void *ptr) +{ + union v4l2_ctrl_ptr p = { .p = ptr }; + + return p; +} + +/** + * struct v4l2_ctrl_ops - The control operations that the driver has to provide. + * + * @g_volatile_ctrl: Get a new value for this control. Generally only relevant + * for volatile (and usually read-only) controls such as a control + * that returns the current signal strength which changes + * continuously. + * If not set, then the currently cached value will be returned. + * @try_ctrl: Test whether the control's value is valid. Only relevant when + * the usual min/max/step checks are not sufficient. + * @s_ctrl: Actually set the new control value. s_ctrl is compulsory. The + * ctrl->handler->lock is held when these ops are called, so no + * one else can access controls owned by that handler. + */ +struct v4l2_ctrl_ops { + int (*g_volatile_ctrl)(struct v4l2_ctrl *ctrl); + int (*try_ctrl)(struct v4l2_ctrl *ctrl); + int (*s_ctrl)(struct v4l2_ctrl *ctrl); +}; + +/** + * struct v4l2_ctrl_type_ops - The control type operations that the driver + * has to provide. + * + * @equal: return true if all ctrl->elems array elements are equal. + * @init: initialize the value for array elements from from_idx to ctrl->elems. + * @minimum: set the value to the minimum value of the control. + * @maximum: set the value to the maximum value of the control. + * @log: log the value. + * @validate: validate the value for ctrl->new_elems array elements. + * Return 0 on success and a negative value otherwise. + */ +struct v4l2_ctrl_type_ops { + bool (*equal)(const struct v4l2_ctrl *ctrl, + union v4l2_ctrl_ptr ptr1, union v4l2_ctrl_ptr ptr2); + void (*init)(const struct v4l2_ctrl *ctrl, u32 from_idx, + union v4l2_ctrl_ptr ptr); + void (*minimum)(const struct v4l2_ctrl *ctrl, u32 idx, + union v4l2_ctrl_ptr ptr); + void (*maximum)(const struct v4l2_ctrl *ctrl, u32 idx, + union v4l2_ctrl_ptr ptr); + void (*log)(const struct v4l2_ctrl *ctrl); + int (*validate)(const struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr ptr); +}; + +/** + * typedef v4l2_ctrl_notify_fnc - typedef for a notify argument with a function + * that should be called when a control value has changed. + * + * @ctrl: pointer to struct &v4l2_ctrl + * @priv: control private data + * + * This typedef definition is used as an argument to v4l2_ctrl_notify() + * and as an argument at struct &v4l2_ctrl_handler. + */ +typedef void (*v4l2_ctrl_notify_fnc)(struct v4l2_ctrl *ctrl, void *priv); + +/** + * struct v4l2_ctrl - The control structure. + * + * @node: The list node. + * @ev_subs: The list of control event subscriptions. + * @handler: The handler that owns the control. + * @cluster: Point to start of cluster array. + * @ncontrols: Number of controls in cluster array. + * @done: Internal flag: set for each processed control. + * @is_new: Set when the user specified a new value for this control. It + * is also set when called from v4l2_ctrl_handler_setup(). Drivers + * should never set this flag. + * @has_changed: Set when the current value differs from the new value. Drivers + * should never use this flag. + * @is_private: If set, then this control is private to its handler and it + * will not be added to any other handlers. Drivers can set + * this flag. + * @is_auto: If set, then this control selects whether the other cluster + * members are in 'automatic' mode or 'manual' mode. This is + * used for autogain/gain type clusters. Drivers should never + * set this flag directly. + * @is_int: If set, then this control has a simple integer value (i.e. it + * uses ctrl->val). + * @is_string: If set, then this control has type %V4L2_CTRL_TYPE_STRING. + * @is_ptr: If set, then this control is an array and/or has type >= + * %V4L2_CTRL_COMPOUND_TYPES + * and/or has type %V4L2_CTRL_TYPE_STRING. In other words, &struct + * v4l2_ext_control uses field p to point to the data. + * @is_array: If set, then this control contains an N-dimensional array. + * @is_dyn_array: If set, then this control contains a dynamically sized 1-dimensional array. + * If this is set, then @is_array is also set. + * @has_volatiles: If set, then one or more members of the cluster are volatile. + * Drivers should never touch this flag. + * @call_notify: If set, then call the handler's notify function whenever the + * control's value changes. + * @manual_mode_value: If the is_auto flag is set, then this is the value + * of the auto control that determines if that control is in + * manual mode. So if the value of the auto control equals this + * value, then the whole cluster is in manual mode. Drivers should + * never set this flag directly. + * @ops: The control ops. + * @type_ops: The control type ops. + * @id: The control ID. + * @name: The control name. + * @type: The control type. + * @minimum: The control's minimum value. + * @maximum: The control's maximum value. + * @default_value: The control's default value. + * @step: The control's step value for non-menu controls. + * @elems: The number of elements in the N-dimensional array. + * @elem_size: The size in bytes of the control. + * @new_elems: The number of elements in p_new. This is the same as @elems, + * except for dynamic arrays. In that case it is in the range of + * 1 to @p_array_alloc_elems. + * @dims: The size of each dimension. + * @nr_of_dims:The number of dimensions in @dims. + * @menu_skip_mask: The control's skip mask for menu controls. This makes it + * easy to skip menu items that are not valid. If bit X is set, + * then menu item X is skipped. Of course, this only works for + * menus with <= 32 menu items. There are no menus that come + * close to that number, so this is OK. Should we ever need more, + * then this will have to be extended to a u64 or a bit array. + * @qmenu: A const char * array for all menu items. Array entries that are + * empty strings ("") correspond to non-existing menu items (this + * is in addition to the menu_skip_mask above). The last entry + * must be NULL. + * Used only if the @type is %V4L2_CTRL_TYPE_MENU. + * @qmenu_int: A 64-bit integer array for with integer menu items. + * The size of array must be equal to the menu size, e. g.: + * :math:`ceil(\frac{maximum - minimum}{step}) + 1`. + * Used only if the @type is %V4L2_CTRL_TYPE_INTEGER_MENU. + * @flags: The control's flags. + * @priv: The control's private pointer. For use by the driver. It is + * untouched by the control framework. Note that this pointer is + * not freed when the control is deleted. Should this be needed + * then a new internal bitfield can be added to tell the framework + * to free this pointer. + * @p_array: Pointer to the allocated array. Only valid if @is_array is true. + * @p_array_alloc_elems: The number of elements in the allocated + * array for both the cur and new values. So @p_array is actually + * sized for 2 * @p_array_alloc_elems * @elem_size. Only valid if + * @is_array is true. + * @cur: Structure to store the current value. + * @cur.val: The control's current value, if the @type is represented via + * a u32 integer (see &enum v4l2_ctrl_type). + * @val: The control's new s32 value. + * @p_def: The control's default value represented via a union which + * provides a standard way of accessing control types + * through a pointer (for compound controls only). + * @p_min: The control's minimum value represented via a union which + * provides a standard way of accessing control types + * through a pointer (for compound controls only). + * @p_max: The control's maximum value represented via a union which + * provides a standard way of accessing control types + * through a pointer (for compound controls only). + * @p_cur: The control's current value represented via a union which + * provides a standard way of accessing control types + * through a pointer. + * @p_new: The control's new value represented via a union which provides + * a standard way of accessing control types + * through a pointer. + */ +struct v4l2_ctrl { + /* Administrative fields */ + struct list_head node; + struct list_head ev_subs; + struct v4l2_ctrl_handler *handler; + struct v4l2_ctrl **cluster; + unsigned int ncontrols; + + unsigned int done:1; + + unsigned int is_new:1; + unsigned int has_changed:1; + unsigned int is_private:1; + unsigned int is_auto:1; + unsigned int is_int:1; + unsigned int is_string:1; + unsigned int is_ptr:1; + unsigned int is_array:1; + unsigned int is_dyn_array:1; + unsigned int has_volatiles:1; + unsigned int call_notify:1; + unsigned int manual_mode_value:8; + + const struct v4l2_ctrl_ops *ops; + const struct v4l2_ctrl_type_ops *type_ops; + u32 id; + const char *name; + enum v4l2_ctrl_type type; + s64 minimum, maximum, default_value; + u32 elems; + u32 elem_size; + u32 new_elems; + u32 dims[V4L2_CTRL_MAX_DIMS]; + u32 nr_of_dims; + union { + u64 step; + u64 menu_skip_mask; + }; + union { + const char * const *qmenu; + const s64 *qmenu_int; + }; + unsigned long flags; + void *priv; + void *p_array; + u32 p_array_alloc_elems; + s32 val; + struct { + s32 val; + } cur; + + union v4l2_ctrl_ptr p_def; + union v4l2_ctrl_ptr p_min; + union v4l2_ctrl_ptr p_max; + union v4l2_ctrl_ptr p_new; + union v4l2_ctrl_ptr p_cur; +}; + +/** + * struct v4l2_ctrl_ref - The control reference. + * + * @node: List node for the sorted list. + * @next: Single-link list node for the hash. + * @ctrl: The actual control information. + * @helper: Pointer to helper struct. Used internally in + * ``prepare_ext_ctrls`` function at ``v4l2-ctrl.c``. + * @from_other_dev: If true, then @ctrl was defined in another + * device than the &struct v4l2_ctrl_handler. + * @req_done: Internal flag: if the control handler containing this control + * reference is bound to a media request, then this is set when + * the control has been applied. This prevents applying controls + * from a cluster with multiple controls twice (when the first + * control of a cluster is applied, they all are). + * @p_req_valid: If set, then p_req contains the control value for the request. + * @p_req_array_enomem: If set, then p_req is invalid since allocating space for + * an array failed. Attempting to read this value shall + * result in ENOMEM. Only valid if ctrl->is_array is true. + * @p_req_array_alloc_elems: The number of elements allocated for the + * array. Only valid if @p_req_valid and ctrl->is_array are + * true. + * @p_req_elems: The number of elements in @p_req. This is the same as + * ctrl->elems, except for dynamic arrays. In that case it is in + * the range of 1 to @p_req_array_alloc_elems. Only valid if + * @p_req_valid is true. + * @p_req: If the control handler containing this control reference + * is bound to a media request, then this points to the + * value of the control that must be applied when the request + * is executed, or to the value of the control at the time + * that the request was completed. If @p_req_valid is false, + * then this control was never set for this request and the + * control will not be updated when this request is applied. + * + * Each control handler has a list of these refs. The list_head is used to + * keep a sorted-by-control-ID list of all controls, while the next pointer + * is used to link the control in the hash's bucket. + */ +struct v4l2_ctrl_ref { + struct list_head node; + struct v4l2_ctrl_ref *next; + struct v4l2_ctrl *ctrl; + struct v4l2_ctrl_helper *helper; + bool from_other_dev; + bool req_done; + bool p_req_valid; + bool p_req_array_enomem; + u32 p_req_array_alloc_elems; + u32 p_req_elems; + union v4l2_ctrl_ptr p_req; +}; + +/** + * struct v4l2_ctrl_handler - The control handler keeps track of all the + * controls: both the controls owned by the handler and those inherited + * from other handlers. + * + * @_lock: Default for "lock". + * @lock: Lock to control access to this handler and its controls. + * May be replaced by the user right after init. + * @ctrls: The list of controls owned by this handler. + * @ctrl_refs: The list of control references. + * @cached: The last found control reference. It is common that the same + * control is needed multiple times, so this is a simple + * optimization. + * @buckets: Buckets for the hashing. Allows for quick control lookup. + * @notify: A notify callback that is called whenever the control changes + * value. + * Note that the handler's lock is held when the notify function + * is called! + * @notify_priv: Passed as argument to the v4l2_ctrl notify callback. + * @nr_of_buckets: Total number of buckets in the array. + * @error: The error code of the first failed control addition. + * @request_is_queued: True if the request was queued. + * @requests: List to keep track of open control handler request objects. + * For the parent control handler (@req_obj.ops == NULL) this + * is the list header. When the parent control handler is + * removed, it has to unbind and put all these requests since + * they refer to the parent. + * @requests_queued: List of the queued requests. This determines the order + * in which these controls are applied. Once the request is + * completed it is removed from this list. + * @req_obj: The &struct media_request_object, used to link into a + * &struct media_request. This request object has a refcount. + */ +struct v4l2_ctrl_handler { + struct mutex _lock; + struct mutex *lock; + struct list_head ctrls; + struct list_head ctrl_refs; + struct v4l2_ctrl_ref *cached; + struct v4l2_ctrl_ref **buckets; + v4l2_ctrl_notify_fnc notify; + void *notify_priv; + u16 nr_of_buckets; + int error; + bool request_is_queued; + struct list_head requests; + struct list_head requests_queued; + struct media_request_object req_obj; +}; + +/** + * struct v4l2_ctrl_config - Control configuration structure. + * + * @ops: The control ops. + * @type_ops: The control type ops. Only needed for compound controls. + * @id: The control ID. + * @name: The control name. + * @type: The control type. + * @min: The control's minimum value. + * @max: The control's maximum value. + * @step: The control's step value for non-menu controls. + * @def: The control's default value. + * @p_def: The control's default value for compound controls. + * @p_min: The control's minimum value for compound controls. + * @p_max: The control's maximum value for compound controls. + * @dims: The size of each dimension. + * @elem_size: The size in bytes of the control. + * @flags: The control's flags. + * @menu_skip_mask: The control's skip mask for menu controls. This makes it + * easy to skip menu items that are not valid. If bit X is set, + * then menu item X is skipped. Of course, this only works for + * menus with <= 64 menu items. There are no menus that come + * close to that number, so this is OK. Should we ever need more, + * then this will have to be extended to a bit array. + * @qmenu: A const char * array for all menu items. Array entries that are + * empty strings ("") correspond to non-existing menu items (this + * is in addition to the menu_skip_mask above). The last entry + * must be NULL. + * @qmenu_int: A const s64 integer array for all menu items of the type + * V4L2_CTRL_TYPE_INTEGER_MENU. + * @is_private: If set, then this control is private to its handler and it + * will not be added to any other handlers. + */ +struct v4l2_ctrl_config { + const struct v4l2_ctrl_ops *ops; + const struct v4l2_ctrl_type_ops *type_ops; + u32 id; + const char *name; + enum v4l2_ctrl_type type; + s64 min; + s64 max; + u64 step; + s64 def; + union v4l2_ctrl_ptr p_def; + union v4l2_ctrl_ptr p_min; + union v4l2_ctrl_ptr p_max; + u32 dims[V4L2_CTRL_MAX_DIMS]; + u32 elem_size; + u32 flags; + u64 menu_skip_mask; + const char * const *qmenu; + const s64 *qmenu_int; + unsigned int is_private:1; +}; + +/** + * v4l2_ctrl_fill - Fill in the control fields based on the control ID. + * + * @id: ID of the control + * @name: pointer to be filled with a string with the name of the control + * @type: pointer for storing the type of the control + * @min: pointer for storing the minimum value for the control + * @max: pointer for storing the maximum value for the control + * @step: pointer for storing the control step + * @def: pointer for storing the default value for the control + * @flags: pointer for storing the flags to be used on the control + * + * This works for all standard V4L2 controls. + * For non-standard controls it will only fill in the given arguments + * and @name content will be set to %NULL. + * + * This function will overwrite the contents of @name, @type and @flags. + * The contents of @min, @max, @step and @def may be modified depending on + * the type. + * + * .. note:: + * + * Do not use in drivers! It is used internally for backwards compatibility + * control handling only. Once all drivers are converted to use the new + * control framework this function will no longer be exported. + */ +void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type, + s64 *min, s64 *max, u64 *step, s64 *def, u32 *flags); + + +/** + * v4l2_ctrl_handler_init_class() - Initialize the control handler. + * @hdl: The control handler. + * @nr_of_controls_hint: A hint of how many controls this handler is + * expected to refer to. This is the total number, so including + * any inherited controls. It doesn't have to be precise, but if + * it is way off, then you either waste memory (too many buckets + * are allocated) or the control lookup becomes slower (not enough + * buckets are allocated, so there are more slow list lookups). + * It will always work, though. + * @key: Used by the lock validator if CONFIG_LOCKDEP is set. + * @name: Used by the lock validator if CONFIG_LOCKDEP is set. + * + * .. attention:: + * + * Never use this call directly, always use the v4l2_ctrl_handler_init() + * macro that hides the @key and @name arguments. + * + * Return: returns an error if the buckets could not be allocated. This + * error will also be stored in @hdl->error. + */ +int v4l2_ctrl_handler_init_class(struct v4l2_ctrl_handler *hdl, + unsigned int nr_of_controls_hint, + struct lock_class_key *key, const char *name); + +#ifdef CONFIG_LOCKDEP + +/** + * v4l2_ctrl_handler_init - helper function to create a static struct + * &lock_class_key and calls v4l2_ctrl_handler_init_class() + * + * @hdl: The control handler. + * @nr_of_controls_hint: A hint of how many controls this handler is + * expected to refer to. This is the total number, so including + * any inherited controls. It doesn't have to be precise, but if + * it is way off, then you either waste memory (too many buckets + * are allocated) or the control lookup becomes slower (not enough + * buckets are allocated, so there are more slow list lookups). + * It will always work, though. + * + * This helper function creates a static struct &lock_class_key and + * calls v4l2_ctrl_handler_init_class(), providing a proper name for the lock + * validador. + * + * Use this helper function to initialize a control handler. + */ +#define v4l2_ctrl_handler_init(hdl, nr_of_controls_hint) \ +( \ + ({ \ + static struct lock_class_key _key; \ + v4l2_ctrl_handler_init_class(hdl, nr_of_controls_hint, \ + &_key, \ + KBUILD_BASENAME ":" \ + __stringify(__LINE__) ":" \ + "(" #hdl ")->_lock"); \ + }) \ +) +#else +#define v4l2_ctrl_handler_init(hdl, nr_of_controls_hint) \ + v4l2_ctrl_handler_init_class(hdl, nr_of_controls_hint, NULL, NULL) +#endif + +/** + * v4l2_ctrl_handler_free() - Free all controls owned by the handler and free + * the control list. + * @hdl: The control handler. + * + * Does nothing if @hdl == NULL. + * + * Return: @hdl's error field or 0 if @hdl is NULL. + */ +int v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl); + +/** + * v4l2_ctrl_lock() - Helper function to lock the handler + * associated with the control. + * @ctrl: The control to lock. + */ +static inline void v4l2_ctrl_lock(struct v4l2_ctrl *ctrl) +{ + mutex_lock(ctrl->handler->lock); +} + +/** + * v4l2_ctrl_unlock() - Helper function to unlock the handler + * associated with the control. + * @ctrl: The control to unlock. + */ +static inline void v4l2_ctrl_unlock(struct v4l2_ctrl *ctrl) +{ + mutex_unlock(ctrl->handler->lock); +} + +/** + * __v4l2_ctrl_handler_setup() - Call the s_ctrl op for all controls belonging + * to the handler to initialize the hardware to the current control values. The + * caller is responsible for acquiring the control handler mutex on behalf of + * __v4l2_ctrl_handler_setup(). + * @hdl: The control handler. + * + * Button controls will be skipped, as are read-only controls. + * + * If @hdl == NULL, then this just returns 0. + */ +int __v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl); + +/** + * v4l2_ctrl_handler_setup() - Call the s_ctrl op for all controls belonging + * to the handler to initialize the hardware to the current control values. + * @hdl: The control handler. + * + * Button controls will be skipped, as are read-only controls. + * + * If @hdl == NULL, then this just returns 0. + */ +int v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl); + +/** + * v4l2_ctrl_handler_log_status() - Log all controls owned by the handler. + * @hdl: The control handler. + * @prefix: The prefix to use when logging the control values. If the + * prefix does not end with a space, then ": " will be added + * after the prefix. If @prefix == NULL, then no prefix will be + * used. + * + * For use with VIDIOC_LOG_STATUS. + * + * Does nothing if @hdl == NULL. + */ +void v4l2_ctrl_handler_log_status(struct v4l2_ctrl_handler *hdl, + const char *prefix); + +/** + * v4l2_ctrl_new_custom() - Allocate and initialize a new custom V4L2 + * control. + * + * @hdl: The control handler. + * @cfg: The control's configuration data. + * @priv: The control's driver-specific private data. + * + * If the &v4l2_ctrl struct could not be allocated then NULL is returned + * and @hdl->error is set to the error code (if it wasn't set already). + */ +struct v4l2_ctrl *v4l2_ctrl_new_custom(struct v4l2_ctrl_handler *hdl, + const struct v4l2_ctrl_config *cfg, + void *priv); + +/** + * v4l2_ctrl_new_std() - Allocate and initialize a new standard V4L2 non-menu + * control. + * + * @hdl: The control handler. + * @ops: The control ops. + * @id: The control ID. + * @min: The control's minimum value. + * @max: The control's maximum value. + * @step: The control's step value + * @def: The control's default value. + * + * If the &v4l2_ctrl struct could not be allocated, or the control + * ID is not known, then NULL is returned and @hdl->error is set to the + * appropriate error code (if it wasn't set already). + * + * If @id refers to a menu control, then this function will return NULL. + * + * Use v4l2_ctrl_new_std_menu() when adding menu controls. + */ +struct v4l2_ctrl *v4l2_ctrl_new_std(struct v4l2_ctrl_handler *hdl, + const struct v4l2_ctrl_ops *ops, + u32 id, s64 min, s64 max, u64 step, + s64 def); + +/** + * v4l2_ctrl_new_std_menu() - Allocate and initialize a new standard V4L2 + * menu control. + * + * @hdl: The control handler. + * @ops: The control ops. + * @id: The control ID. + * @max: The control's maximum value. + * @mask: The control's skip mask for menu controls. This makes it + * easy to skip menu items that are not valid. If bit X is set, + * then menu item X is skipped. Of course, this only works for + * menus with <= 64 menu items. There are no menus that come + * close to that number, so this is OK. Should we ever need more, + * then this will have to be extended to a bit array. + * @def: The control's default value. + * + * Same as v4l2_ctrl_new_std(), but @min is set to 0 and the @mask value + * determines which menu items are to be skipped. + * + * If @id refers to a non-menu control, then this function will return NULL. + */ +struct v4l2_ctrl *v4l2_ctrl_new_std_menu(struct v4l2_ctrl_handler *hdl, + const struct v4l2_ctrl_ops *ops, + u32 id, u8 max, u64 mask, u8 def); + +/** + * v4l2_ctrl_new_std_menu_items() - Create a new standard V4L2 menu control + * with driver specific menu. + * + * @hdl: The control handler. + * @ops: The control ops. + * @id: The control ID. + * @max: The control's maximum value. + * @mask: The control's skip mask for menu controls. This makes it + * easy to skip menu items that are not valid. If bit X is set, + * then menu item X is skipped. Of course, this only works for + * menus with <= 64 menu items. There are no menus that come + * close to that number, so this is OK. Should we ever need more, + * then this will have to be extended to a bit array. + * @def: The control's default value. + * @qmenu: The new menu. + * + * Same as v4l2_ctrl_new_std_menu(), but @qmenu will be the driver specific + * menu of this control. + * + */ +struct v4l2_ctrl *v4l2_ctrl_new_std_menu_items(struct v4l2_ctrl_handler *hdl, + const struct v4l2_ctrl_ops *ops, + u32 id, u8 max, + u64 mask, u8 def, + const char * const *qmenu); + +/** + * v4l2_ctrl_new_std_compound() - Allocate and initialize a new standard V4L2 + * compound control. + * + * @hdl: The control handler. + * @ops: The control ops. + * @id: The control ID. + * @p_def: The control's default value. + * @p_min: The control's minimum value. + * @p_max: The control's maximum value. + * + * Same as v4l2_ctrl_new_std(), but with support for compound controls. + * To fill in the @p_def, @p_min and @p_max fields, use v4l2_ctrl_ptr_create() + * to convert a pointer to a const union v4l2_ctrl_ptr. + * Use v4l2_ctrl_ptr_create(NULL) if you want the default, minimum or maximum + * value of the compound control to be all zeroes. + * If the compound control does not set the ``V4L2_CTRL_FLAG_HAS_WHICH_MIN_MAX`` + * flag, then it does not has minimum and maximum values. In that case just use + * v4l2_ctrl_ptr_create(NULL) for the @p_min and @p_max arguments. + * + */ +struct v4l2_ctrl *v4l2_ctrl_new_std_compound(struct v4l2_ctrl_handler *hdl, + const struct v4l2_ctrl_ops *ops, + u32 id, + const union v4l2_ctrl_ptr p_def, + const union v4l2_ctrl_ptr p_min, + const union v4l2_ctrl_ptr p_max); + +/** + * v4l2_ctrl_new_int_menu() - Create a new standard V4L2 integer menu control. + * + * @hdl: The control handler. + * @ops: The control ops. + * @id: The control ID. + * @max: The control's maximum value. + * @def: The control's default value. + * @qmenu_int: The control's menu entries. + * + * Same as v4l2_ctrl_new_std_menu(), but @mask is set to 0 and it additionally + * takes as an argument an array of integers determining the menu items. + * + * If @id refers to a non-integer-menu control, then this function will + * return %NULL. + */ +struct v4l2_ctrl *v4l2_ctrl_new_int_menu(struct v4l2_ctrl_handler *hdl, + const struct v4l2_ctrl_ops *ops, + u32 id, u8 max, u8 def, + const s64 *qmenu_int); + +/** + * typedef v4l2_ctrl_filter - Typedef to define the filter function to be + * used when adding a control handler. + * + * @ctrl: pointer to struct &v4l2_ctrl. + */ + +typedef bool (*v4l2_ctrl_filter)(const struct v4l2_ctrl *ctrl); + +/** + * v4l2_ctrl_add_handler() - Add all controls from handler @add to + * handler @hdl. + * + * @hdl: The control handler. + * @add: The control handler whose controls you want to add to + * the @hdl control handler. + * @filter: This function will filter which controls should be added. + * @from_other_dev: If true, then the controls in @add were defined in another + * device than @hdl. + * + * Does nothing if either of the two handlers is a NULL pointer. + * If @filter is NULL, then all controls are added. Otherwise only those + * controls for which @filter returns true will be added. + * In case of an error @hdl->error will be set to the error code (if it + * wasn't set already). + */ +int v4l2_ctrl_add_handler(struct v4l2_ctrl_handler *hdl, + struct v4l2_ctrl_handler *add, + v4l2_ctrl_filter filter, + bool from_other_dev); + +/** + * v4l2_ctrl_radio_filter() - Standard filter for radio controls. + * + * @ctrl: The control that is filtered. + * + * This will return true for any controls that are valid for radio device + * nodes. Those are all of the V4L2_CID_AUDIO_* user controls and all FM + * transmitter class controls. + * + * This function is to be used with v4l2_ctrl_add_handler(). + */ +bool v4l2_ctrl_radio_filter(const struct v4l2_ctrl *ctrl); + +/** + * v4l2_ctrl_cluster() - Mark all controls in the cluster as belonging + * to that cluster. + * + * @ncontrols: The number of controls in this cluster. + * @controls: The cluster control array of size @ncontrols. + */ +void v4l2_ctrl_cluster(unsigned int ncontrols, struct v4l2_ctrl **controls); + + +/** + * v4l2_ctrl_auto_cluster() - Mark all controls in the cluster as belonging + * to that cluster and set it up for autofoo/foo-type handling. + * + * @ncontrols: The number of controls in this cluster. + * @controls: The cluster control array of size @ncontrols. The first control + * must be the 'auto' control (e.g. autogain, autoexposure, etc.) + * @manual_val: The value for the first control in the cluster that equals the + * manual setting. + * @set_volatile: If true, then all controls except the first auto control will + * be volatile. + * + * Use for control groups where one control selects some automatic feature and + * the other controls are only active whenever the automatic feature is turned + * off (manual mode). Typical examples: autogain vs gain, auto-whitebalance vs + * red and blue balance, etc. + * + * The behavior of such controls is as follows: + * + * When the autofoo control is set to automatic, then any manual controls + * are set to inactive and any reads will call g_volatile_ctrl (if the control + * was marked volatile). + * + * When the autofoo control is set to manual, then any manual controls will + * be marked active, and any reads will just return the current value without + * going through g_volatile_ctrl. + * + * In addition, this function will set the %V4L2_CTRL_FLAG_UPDATE flag + * on the autofoo control and %V4L2_CTRL_FLAG_INACTIVE on the foo control(s) + * if autofoo is in auto mode. + */ +void v4l2_ctrl_auto_cluster(unsigned int ncontrols, + struct v4l2_ctrl **controls, + u8 manual_val, bool set_volatile); + + +/** + * v4l2_ctrl_find() - Find a control with the given ID. + * + * @hdl: The control handler. + * @id: The control ID to find. + * + * If @hdl == NULL this will return NULL as well. Will lock the handler so + * do not use from inside &v4l2_ctrl_ops. + */ +struct v4l2_ctrl *v4l2_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id); + +/** + * v4l2_ctrl_activate() - Make the control active or inactive. + * @ctrl: The control to (de)activate. + * @active: True if the control should become active. + * + * This sets or clears the V4L2_CTRL_FLAG_INACTIVE flag atomically. + * Does nothing if @ctrl == NULL. + * This will usually be called from within the s_ctrl op. + * The V4L2_EVENT_CTRL event will be generated afterwards. + * + * This function assumes that the control handler is locked. + */ +void v4l2_ctrl_activate(struct v4l2_ctrl *ctrl, bool active); + +/** + * __v4l2_ctrl_grab() - Unlocked variant of v4l2_ctrl_grab. + * + * @ctrl: The control to (de)activate. + * @grabbed: True if the control should become grabbed. + * + * This sets or clears the V4L2_CTRL_FLAG_GRABBED flag atomically. + * Does nothing if @ctrl == NULL. + * The V4L2_EVENT_CTRL event will be generated afterwards. + * This will usually be called when starting or stopping streaming in the + * driver. + * + * This function assumes that the control handler is locked by the caller. + */ +void __v4l2_ctrl_grab(struct v4l2_ctrl *ctrl, bool grabbed); + +/** + * v4l2_ctrl_grab() - Mark the control as grabbed or not grabbed. + * + * @ctrl: The control to (de)activate. + * @grabbed: True if the control should become grabbed. + * + * This sets or clears the V4L2_CTRL_FLAG_GRABBED flag atomically. + * Does nothing if @ctrl == NULL. + * The V4L2_EVENT_CTRL event will be generated afterwards. + * This will usually be called when starting or stopping streaming in the + * driver. + * + * This function assumes that the control handler is not locked and will + * take the lock itself. + */ +static inline void v4l2_ctrl_grab(struct v4l2_ctrl *ctrl, bool grabbed) +{ + if (!ctrl) + return; + + v4l2_ctrl_lock(ctrl); + __v4l2_ctrl_grab(ctrl, grabbed); + v4l2_ctrl_unlock(ctrl); +} + +/** + *__v4l2_ctrl_modify_range() - Unlocked variant of v4l2_ctrl_modify_range() + * + * @ctrl: The control to update. + * @min: The control's minimum value. + * @max: The control's maximum value. + * @step: The control's step value + * @def: The control's default value. + * + * Update the range of a control on the fly. This works for control types + * INTEGER, BOOLEAN, MENU, INTEGER MENU and BITMASK. For menu controls the + * @step value is interpreted as a menu_skip_mask. + * + * An error is returned if one of the range arguments is invalid for this + * control type. + * + * The caller is responsible for acquiring the control handler mutex on behalf + * of __v4l2_ctrl_modify_range(). + */ +int __v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl, + s64 min, s64 max, u64 step, s64 def); + +/** + * v4l2_ctrl_modify_range() - Update the range of a control. + * + * @ctrl: The control to update. + * @min: The control's minimum value. + * @max: The control's maximum value. + * @step: The control's step value + * @def: The control's default value. + * + * Update the range of a control on the fly. This works for control types + * INTEGER, BOOLEAN, MENU, INTEGER MENU and BITMASK. For menu controls the + * @step value is interpreted as a menu_skip_mask. + * + * An error is returned if one of the range arguments is invalid for this + * control type. + * + * This function assumes that the control handler is not locked and will + * take the lock itself. + */ +static inline int v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl, + s64 min, s64 max, u64 step, s64 def) +{ + int rval; + + v4l2_ctrl_lock(ctrl); + rval = __v4l2_ctrl_modify_range(ctrl, min, max, step, def); + v4l2_ctrl_unlock(ctrl); + + return rval; +} + +/** + *__v4l2_ctrl_modify_dimensions() - Unlocked variant of v4l2_ctrl_modify_dimensions() + * + * @ctrl: The control to update. + * @dims: The control's new dimensions. + * + * Update the dimensions of an array control on the fly. The elements of the + * array are reset to their default value, even if the dimensions are + * unchanged. + * + * An error is returned if @dims is invalid for this control. + * + * The caller is responsible for acquiring the control handler mutex on behalf + * of __v4l2_ctrl_modify_dimensions(). + * + * Note: calling this function when the same control is used in pending requests + * is untested. It should work (a request with the wrong size of the control + * will drop that control silently), but it will be very confusing. + */ +int __v4l2_ctrl_modify_dimensions(struct v4l2_ctrl *ctrl, + u32 dims[V4L2_CTRL_MAX_DIMS]); + +/** + * v4l2_ctrl_modify_dimensions() - Update the dimensions of an array control. + * + * @ctrl: The control to update. + * @dims: The control's new dimensions. + * + * Update the dimensions of an array control on the fly. The elements of the + * array are reset to their default value, even if the dimensions are + * unchanged. + * + * An error is returned if @dims is invalid for this control type. + * + * This function assumes that the control handler is not locked and will + * take the lock itself. + * + * Note: calling this function when the same control is used in pending requests + * is untested. It should work (a request with the wrong size of the control + * will drop that control silently), but it will be very confusing. + */ +static inline int v4l2_ctrl_modify_dimensions(struct v4l2_ctrl *ctrl, + u32 dims[V4L2_CTRL_MAX_DIMS]) +{ + int rval; + + v4l2_ctrl_lock(ctrl); + rval = __v4l2_ctrl_modify_dimensions(ctrl, dims); + v4l2_ctrl_unlock(ctrl); + + return rval; +} + +/** + * v4l2_ctrl_notify() - Function to set a notify callback for a control. + * + * @ctrl: The control. + * @notify: The callback function. + * @priv: The callback private handle, passed as argument to the callback. + * + * This function sets a callback function for the control. If @ctrl is NULL, + * then it will do nothing. If @notify is NULL, then the notify callback will + * be removed. + * + * There can be only one notify. If another already exists, then a WARN_ON + * will be issued and the function will do nothing. + */ +void v4l2_ctrl_notify(struct v4l2_ctrl *ctrl, v4l2_ctrl_notify_fnc notify, + void *priv); + +/** + * v4l2_ctrl_get_name() - Get the name of the control + * + * @id: The control ID. + * + * This function returns the name of the given control ID or NULL if it isn't + * a known control. + */ +const char *v4l2_ctrl_get_name(u32 id); + +/** + * v4l2_ctrl_get_menu() - Get the menu string array of the control + * + * @id: The control ID. + * + * This function returns the NULL-terminated menu string array name of the + * given control ID or NULL if it isn't a known menu control. + */ +const char * const *v4l2_ctrl_get_menu(u32 id); + +/** + * v4l2_ctrl_get_int_menu() - Get the integer menu array of the control + * + * @id: The control ID. + * @len: The size of the integer array. + * + * This function returns the integer array of the given control ID or NULL if it + * if it isn't a known integer menu control. + */ +const s64 *v4l2_ctrl_get_int_menu(u32 id, u32 *len); + +/** + * v4l2_ctrl_g_ctrl() - Helper function to get the control's value from + * within a driver. + * + * @ctrl: The control. + * + * This returns the control's value safely by going through the control + * framework. This function will lock the control's handler, so it cannot be + * used from within the &v4l2_ctrl_ops functions. + * + * This function is for integer type controls only. + */ +s32 v4l2_ctrl_g_ctrl(struct v4l2_ctrl *ctrl); + +/** + * __v4l2_ctrl_s_ctrl() - Unlocked variant of v4l2_ctrl_s_ctrl(). + * + * @ctrl: The control. + * @val: The new value. + * + * This sets the control's new value safely by going through the control + * framework. This function assumes the control's handler is already locked, + * allowing it to be used from within the &v4l2_ctrl_ops functions. + * + * This function is for integer type controls only. + */ +int __v4l2_ctrl_s_ctrl(struct v4l2_ctrl *ctrl, s32 val); + +/** + * v4l2_ctrl_s_ctrl() - Helper function to set the control's value from + * within a driver. + * @ctrl: The control. + * @val: The new value. + * + * This sets the control's new value safely by going through the control + * framework. This function will lock the control's handler, so it cannot be + * used from within the &v4l2_ctrl_ops functions. + * + * This function is for integer type controls only. + */ +static inline int v4l2_ctrl_s_ctrl(struct v4l2_ctrl *ctrl, s32 val) +{ + int rval; + + v4l2_ctrl_lock(ctrl); + rval = __v4l2_ctrl_s_ctrl(ctrl, val); + v4l2_ctrl_unlock(ctrl); + + return rval; +} + +/** + * v4l2_ctrl_g_ctrl_int64() - Helper function to get a 64-bit control's value + * from within a driver. + * + * @ctrl: The control. + * + * This returns the control's value safely by going through the control + * framework. This function will lock the control's handler, so it cannot be + * used from within the &v4l2_ctrl_ops functions. + * + * This function is for 64-bit integer type controls only. + */ +s64 v4l2_ctrl_g_ctrl_int64(struct v4l2_ctrl *ctrl); + +/** + * __v4l2_ctrl_s_ctrl_int64() - Unlocked variant of v4l2_ctrl_s_ctrl_int64(). + * + * @ctrl: The control. + * @val: The new value. + * + * This sets the control's new value safely by going through the control + * framework. This function assumes the control's handler is already locked, + * allowing it to be used from within the &v4l2_ctrl_ops functions. + * + * This function is for 64-bit integer type controls only. + */ +int __v4l2_ctrl_s_ctrl_int64(struct v4l2_ctrl *ctrl, s64 val); + +/** + * v4l2_ctrl_s_ctrl_int64() - Helper function to set a 64-bit control's value + * from within a driver. + * + * @ctrl: The control. + * @val: The new value. + * + * This sets the control's new value safely by going through the control + * framework. This function will lock the control's handler, so it cannot be + * used from within the &v4l2_ctrl_ops functions. + * + * This function is for 64-bit integer type controls only. + */ +static inline int v4l2_ctrl_s_ctrl_int64(struct v4l2_ctrl *ctrl, s64 val) +{ + int rval; + + v4l2_ctrl_lock(ctrl); + rval = __v4l2_ctrl_s_ctrl_int64(ctrl, val); + v4l2_ctrl_unlock(ctrl); + + return rval; +} + +/** + * __v4l2_ctrl_s_ctrl_string() - Unlocked variant of v4l2_ctrl_s_ctrl_string(). + * + * @ctrl: The control. + * @s: The new string. + * + * This sets the control's new string safely by going through the control + * framework. This function assumes the control's handler is already locked, + * allowing it to be used from within the &v4l2_ctrl_ops functions. + * + * This function is for string type controls only. + */ +int __v4l2_ctrl_s_ctrl_string(struct v4l2_ctrl *ctrl, const char *s); + +/** + * v4l2_ctrl_s_ctrl_string() - Helper function to set a control's string value + * from within a driver. + * + * @ctrl: The control. + * @s: The new string. + * + * This sets the control's new string safely by going through the control + * framework. This function will lock the control's handler, so it cannot be + * used from within the &v4l2_ctrl_ops functions. + * + * This function is for string type controls only. + */ +static inline int v4l2_ctrl_s_ctrl_string(struct v4l2_ctrl *ctrl, const char *s) +{ + int rval; + + v4l2_ctrl_lock(ctrl); + rval = __v4l2_ctrl_s_ctrl_string(ctrl, s); + v4l2_ctrl_unlock(ctrl); + + return rval; +} + +/** + * __v4l2_ctrl_s_ctrl_compound() - Unlocked variant to set a compound control + * + * @ctrl: The control. + * @type: The type of the data. + * @p: The new compound payload. + * + * This sets the control's new compound payload safely by going through the + * control framework. This function assumes the control's handler is already + * locked, allowing it to be used from within the &v4l2_ctrl_ops functions. + * + * This function is for compound type controls only. + */ +int __v4l2_ctrl_s_ctrl_compound(struct v4l2_ctrl *ctrl, + enum v4l2_ctrl_type type, const void *p); + +/** + * v4l2_ctrl_s_ctrl_compound() - Helper function to set a compound control + * from within a driver. + * + * @ctrl: The control. + * @type: The type of the data. + * @p: The new compound payload. + * + * This sets the control's new compound payload safely by going through the + * control framework. This function will lock the control's handler, so it + * cannot be used from within the &v4l2_ctrl_ops functions. + * + * This function is for compound type controls only. + */ +static inline int v4l2_ctrl_s_ctrl_compound(struct v4l2_ctrl *ctrl, + enum v4l2_ctrl_type type, + const void *p) +{ + int rval; + + v4l2_ctrl_lock(ctrl); + rval = __v4l2_ctrl_s_ctrl_compound(ctrl, type, p); + v4l2_ctrl_unlock(ctrl); + + return rval; +} + +/* Helper defines for area type controls */ +#define __v4l2_ctrl_s_ctrl_area(ctrl, area) \ + __v4l2_ctrl_s_ctrl_compound((ctrl), V4L2_CTRL_TYPE_AREA, (area)) +#define v4l2_ctrl_s_ctrl_area(ctrl, area) \ + v4l2_ctrl_s_ctrl_compound((ctrl), V4L2_CTRL_TYPE_AREA, (area)) + +/* Internal helper functions that deal with control events. */ +extern const struct v4l2_subscribed_event_ops v4l2_ctrl_sub_ev_ops; + +/** + * v4l2_ctrl_replace - Function to be used as a callback to + * &struct v4l2_subscribed_event_ops replace\(\) + * + * @old: pointer to struct &v4l2_event with the reported + * event; + * @new: pointer to struct &v4l2_event with the modified + * event; + */ +void v4l2_ctrl_replace(struct v4l2_event *old, const struct v4l2_event *new); + +/** + * v4l2_ctrl_merge - Function to be used as a callback to + * &struct v4l2_subscribed_event_ops merge(\) + * + * @old: pointer to struct &v4l2_event with the reported + * event; + * @new: pointer to struct &v4l2_event with the merged + * event; + */ +void v4l2_ctrl_merge(const struct v4l2_event *old, struct v4l2_event *new); + +/** + * v4l2_ctrl_log_status - helper function to implement %VIDIOC_LOG_STATUS ioctl + * + * @file: pointer to struct file + * @priv: unused. Kept just to be compatible to the arguments expected by + * &struct v4l2_ioctl_ops.vidioc_log_status. + * + * Can be used as a vidioc_log_status function that just dumps all controls + * associated with the filehandle. + */ +int v4l2_ctrl_log_status(struct file *file, void *priv); + +/** + * v4l2_ctrl_subscribe_event - Subscribes to an event + * + * + * @fh: pointer to struct v4l2_fh + * @sub: pointer to &struct v4l2_event_subscription + * + * Can be used as a vidioc_subscribe_event function that just subscribes + * control events. + */ +int v4l2_ctrl_subscribe_event(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub); + +/** + * v4l2_ctrl_poll - function to be used as a callback to the poll() + * That just polls for control events. + * + * @file: pointer to struct file + * @wait: pointer to struct poll_table_struct + */ +__poll_t v4l2_ctrl_poll(struct file *file, struct poll_table_struct *wait); + +/** + * v4l2_ctrl_request_setup - helper function to apply control values in a request + * + * @req: The request + * @parent: The parent control handler ('priv' in media_request_object_find()) + * + * This is a helper function to call the control handler's s_ctrl callback with + * the control values contained in the request. Do note that this approach of + * applying control values in a request is only applicable to memory-to-memory + * devices. + */ +int v4l2_ctrl_request_setup(struct media_request *req, + struct v4l2_ctrl_handler *parent); + +/** + * v4l2_ctrl_request_complete - Complete a control handler request object + * + * @req: The request + * @parent: The parent control handler ('priv' in media_request_object_find()) + * + * This function is to be called on each control handler that may have had a + * request object associated with it, i.e. control handlers of a driver that + * supports requests. + * + * The function first obtains the values of any volatile controls in the control + * handler and attach them to the request. Then, the function completes the + * request object. + */ +void v4l2_ctrl_request_complete(struct media_request *req, + struct v4l2_ctrl_handler *parent); + +/** + * v4l2_ctrl_request_hdl_find - Find the control handler in the request + * + * @req: The request + * @parent: The parent control handler ('priv' in media_request_object_find()) + * + * This function finds the control handler in the request. It may return + * NULL if not found. When done, you must call v4l2_ctrl_request_hdl_put() + * with the returned handler pointer. + * + * If the request is not in state VALIDATING or QUEUED, then this function + * will always return NULL. + * + * Note that in state VALIDATING the req_queue_mutex is held, so + * no objects can be added or deleted from the request. + * + * In state QUEUED it is the driver that will have to ensure this. + */ +struct v4l2_ctrl_handler *v4l2_ctrl_request_hdl_find(struct media_request *req, + struct v4l2_ctrl_handler *parent); + +/** + * v4l2_ctrl_request_hdl_put - Put the control handler + * + * @hdl: Put this control handler + * + * This function released the control handler previously obtained from' + * v4l2_ctrl_request_hdl_find(). + */ +static inline void v4l2_ctrl_request_hdl_put(struct v4l2_ctrl_handler *hdl) +{ + if (hdl) + media_request_object_put(&hdl->req_obj); +} + +/** + * v4l2_ctrl_request_hdl_ctrl_find() - Find a control with the given ID. + * + * @hdl: The control handler from the request. + * @id: The ID of the control to find. + * + * This function returns a pointer to the control if this control is + * part of the request or NULL otherwise. + */ +struct v4l2_ctrl * +v4l2_ctrl_request_hdl_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id); + +/* Helpers for ioctl_ops */ + +/** + * v4l2_queryctrl - Helper function to implement + * :ref:`VIDIOC_QUERYCTRL ` ioctl + * + * @hdl: pointer to &struct v4l2_ctrl_handler + * @qc: pointer to &struct v4l2_queryctrl + * + * If hdl == NULL then they will all return -EINVAL. + */ +int v4l2_queryctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_queryctrl *qc); + +/** + * v4l2_query_ext_ctrl_to_v4l2_queryctrl - Convert a qec to qe. + * + * @to: The v4l2_queryctrl to write to. + * @from: The v4l2_query_ext_ctrl to read from. + * + * This function is a helper to convert a v4l2_query_ext_ctrl into a + * v4l2_queryctrl. + */ +void v4l2_query_ext_ctrl_to_v4l2_queryctrl(struct v4l2_queryctrl *to, + const struct v4l2_query_ext_ctrl *from); + +/** + * v4l2_query_ext_ctrl - Helper function to implement + * :ref:`VIDIOC_QUERY_EXT_CTRL ` ioctl + * + * @hdl: pointer to &struct v4l2_ctrl_handler + * @qc: pointer to &struct v4l2_query_ext_ctrl + * + * If hdl == NULL then they will all return -EINVAL. + */ +int v4l2_query_ext_ctrl(struct v4l2_ctrl_handler *hdl, + struct v4l2_query_ext_ctrl *qc); + +/** + * v4l2_querymenu - Helper function to implement + * :ref:`VIDIOC_QUERYMENU ` ioctl + * + * @hdl: pointer to &struct v4l2_ctrl_handler + * @qm: pointer to &struct v4l2_querymenu + * + * If hdl == NULL then they will all return -EINVAL. + */ +int v4l2_querymenu(struct v4l2_ctrl_handler *hdl, struct v4l2_querymenu *qm); + +/** + * v4l2_g_ctrl - Helper function to implement + * :ref:`VIDIOC_G_CTRL ` ioctl + * + * @hdl: pointer to &struct v4l2_ctrl_handler + * @ctrl: pointer to &struct v4l2_control + * + * If hdl == NULL then they will all return -EINVAL. + */ +int v4l2_g_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_control *ctrl); + +/** + * v4l2_s_ctrl - Helper function to implement + * :ref:`VIDIOC_S_CTRL ` ioctl + * + * @fh: pointer to &struct v4l2_fh + * @hdl: pointer to &struct v4l2_ctrl_handler + * + * @ctrl: pointer to &struct v4l2_control + * + * If hdl == NULL then they will all return -EINVAL. + */ +int v4l2_s_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl, + struct v4l2_control *ctrl); + +/** + * v4l2_g_ext_ctrls - Helper function to implement + * :ref:`VIDIOC_G_EXT_CTRLS ` ioctl + * + * @hdl: pointer to &struct v4l2_ctrl_handler + * @vdev: pointer to &struct video_device + * @mdev: pointer to &struct media_device + * @c: pointer to &struct v4l2_ext_controls + * + * If hdl == NULL then they will all return -EINVAL. + */ +int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct video_device *vdev, + struct media_device *mdev, struct v4l2_ext_controls *c); + +/** + * v4l2_try_ext_ctrls - Helper function to implement + * :ref:`VIDIOC_TRY_EXT_CTRLS ` ioctl + * + * @hdl: pointer to &struct v4l2_ctrl_handler + * @vdev: pointer to &struct video_device + * @mdev: pointer to &struct media_device + * @c: pointer to &struct v4l2_ext_controls + * + * If hdl == NULL then they will all return -EINVAL. + */ +int v4l2_try_ext_ctrls(struct v4l2_ctrl_handler *hdl, + struct video_device *vdev, + struct media_device *mdev, + struct v4l2_ext_controls *c); + +/** + * v4l2_s_ext_ctrls - Helper function to implement + * :ref:`VIDIOC_S_EXT_CTRLS ` ioctl + * + * @fh: pointer to &struct v4l2_fh + * @hdl: pointer to &struct v4l2_ctrl_handler + * @vdev: pointer to &struct video_device + * @mdev: pointer to &struct media_device + * @c: pointer to &struct v4l2_ext_controls + * + * If hdl == NULL then they will all return -EINVAL. + */ +int v4l2_s_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl, + struct video_device *vdev, + struct media_device *mdev, + struct v4l2_ext_controls *c); + +/** + * v4l2_ctrl_subdev_subscribe_event - Helper function to implement + * as a &struct v4l2_subdev_core_ops subscribe_event function + * that just subscribes control events. + * + * @sd: pointer to &struct v4l2_subdev + * @fh: pointer to &struct v4l2_fh + * @sub: pointer to &struct v4l2_event_subscription + */ +int v4l2_ctrl_subdev_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh, + struct v4l2_event_subscription *sub); + +/** + * v4l2_ctrl_subdev_log_status - Log all controls owned by subdev's control + * handler. + * + * @sd: pointer to &struct v4l2_subdev + */ +int v4l2_ctrl_subdev_log_status(struct v4l2_subdev *sd); + +/** + * v4l2_ctrl_new_fwnode_properties() - Register controls for the device + * properties + * + * @hdl: pointer to &struct v4l2_ctrl_handler to register controls on + * @ctrl_ops: pointer to &struct v4l2_ctrl_ops to register controls with + * @p: pointer to &struct v4l2_fwnode_device_properties + * + * This function registers controls associated to device properties, using the + * property values contained in @p parameter, if the property has been set to + * a value. + * + * Currently the following v4l2 controls are parsed and registered: + * - V4L2_CID_CAMERA_ORIENTATION + * - V4L2_CID_CAMERA_SENSOR_ROTATION; + * + * Controls already registered by the caller with the @hdl control handler are + * not overwritten. Callers should register the controls they want to handle + * themselves before calling this function. + * + * Return: 0 on success, a negative error code on failure. + */ +int v4l2_ctrl_new_fwnode_properties(struct v4l2_ctrl_handler *hdl, + const struct v4l2_ctrl_ops *ctrl_ops, + const struct v4l2_fwnode_device_properties *p); + +/** + * v4l2_ctrl_type_op_equal - Default v4l2_ctrl_type_ops equal callback. + * + * @ctrl: The v4l2_ctrl pointer. + * @ptr1: A v4l2 control value. + * @ptr2: A v4l2 control value. + * + * Return: true if values are equal, otherwise false. + */ +bool v4l2_ctrl_type_op_equal(const struct v4l2_ctrl *ctrl, + union v4l2_ctrl_ptr ptr1, union v4l2_ctrl_ptr ptr2); + +/** + * v4l2_ctrl_type_op_init - Default v4l2_ctrl_type_ops init callback. + * + * @ctrl: The v4l2_ctrl pointer. + * @from_idx: Starting element index. + * @ptr: The v4l2 control value. + * + * Return: void + */ +void v4l2_ctrl_type_op_init(const struct v4l2_ctrl *ctrl, u32 from_idx, + union v4l2_ctrl_ptr ptr); + +/** + * v4l2_ctrl_type_op_log - Default v4l2_ctrl_type_ops log callback. + * + * @ctrl: The v4l2_ctrl pointer. + * + * Return: void + */ +void v4l2_ctrl_type_op_log(const struct v4l2_ctrl *ctrl); + +/** + * v4l2_ctrl_type_op_validate - Default v4l2_ctrl_type_ops validate callback. + * + * @ctrl: The v4l2_ctrl pointer. + * @ptr: The v4l2 control value. + * + * Return: 0 on success, a negative error code on failure. + */ +int v4l2_ctrl_type_op_validate(const struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr ptr); + +#endif diff --git a/6.18.0/include-overrides/media/v4l2-dev.h b/6.18.0/include-overrides/media/v4l2-dev.h new file mode 100644 index 0000000..a213c33 --- /dev/null +++ b/6.18.0/include-overrides/media/v4l2-dev.h @@ -0,0 +1,665 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * + * V 4 L 2 D R I V E R H E L P E R A P I + * + * Moved from videodev2.h + * + * Some commonly needed functions for drivers (v4l2-common.o module) + */ +#ifndef _V4L2_DEV_H +#define _V4L2_DEV_H + +#include +#include +#include +#include +#include +#include + +#include + +#define VIDEO_MAJOR 81 + +/** + * enum vfl_devnode_type - type of V4L2 device node + * + * @VFL_TYPE_VIDEO: for video input/output devices + * @VFL_TYPE_VBI: for vertical blank data (i.e. closed captions, teletext) + * @VFL_TYPE_RADIO: for radio tuners + * @VFL_TYPE_SUBDEV: for V4L2 subdevices + * @VFL_TYPE_SDR: for Software Defined Radio tuners + * @VFL_TYPE_TOUCH: for touch sensors + * @VFL_TYPE_MAX: number of VFL types, must always be last in the enum + */ +enum vfl_devnode_type { + VFL_TYPE_VIDEO, + VFL_TYPE_VBI, + VFL_TYPE_RADIO, + VFL_TYPE_SUBDEV, + VFL_TYPE_SDR, + VFL_TYPE_TOUCH, + VFL_TYPE_MAX /* Shall be the last one */ +}; + +/** + * enum vfl_devnode_direction - Identifies if a &struct video_device + * corresponds to a receiver, a transmitter or a mem-to-mem device. + * + * @VFL_DIR_RX: device is a receiver. + * @VFL_DIR_TX: device is a transmitter. + * @VFL_DIR_M2M: device is a memory to memory device. + * + * Note: Ignored if &enum vfl_devnode_type is %VFL_TYPE_SUBDEV. + */ +enum vfl_devnode_direction { + VFL_DIR_RX, + VFL_DIR_TX, + VFL_DIR_M2M, +}; + +struct v4l2_ioctl_callbacks; +struct video_device; +struct v4l2_device; +struct v4l2_ctrl_handler; +struct dentry; + +/** + * enum v4l2_video_device_flags - Flags used by &struct video_device + * + * @V4L2_FL_REGISTERED: + * indicates that a &struct video_device is registered. + * Drivers can clear this flag if they want to block all future + * device access. It is cleared by video_unregister_device. + * @V4L2_FL_USES_V4L2_FH: + * indicates that file->private_data points to &struct v4l2_fh. + * This flag is set by the core when v4l2_fh_init() is called. + * All drivers must use it. + * @V4L2_FL_QUIRK_INVERTED_CROP: + * some old M2M drivers use g/s_crop/cropcap incorrectly: crop and + * compose are swapped. If this flag is set, then the selection + * targets are swapped in the g/s_crop/cropcap functions in v4l2-ioctl.c. + * This allows those drivers to correctly implement the selection API, + * but the old crop API will still work as expected in order to preserve + * backwards compatibility. + * Never set this flag for new drivers. + * @V4L2_FL_SUBDEV_RO_DEVNODE: + * indicates that the video device node is registered in read-only mode. + * The flag only applies to device nodes registered for sub-devices, it is + * set by the core when the sub-devices device nodes are registered with + * v4l2_device_register_ro_subdev_nodes() and used by the sub-device ioctl + * handler to restrict access to some ioctl calls. + */ +enum v4l2_video_device_flags { + V4L2_FL_REGISTERED = 0, + V4L2_FL_USES_V4L2_FH = 1, + V4L2_FL_QUIRK_INVERTED_CROP = 2, + V4L2_FL_SUBDEV_RO_DEVNODE = 3, +}; + +/* Priority helper functions */ + +/** + * struct v4l2_prio_state - stores the priority states + * + * @prios: array with elements to store the array priorities + * + * + * .. note:: + * The size of @prios array matches the number of priority types defined + * by enum &v4l2_priority. + */ +struct v4l2_prio_state { + atomic_t prios[4]; +}; + +/** + * v4l2_prio_init - initializes a struct v4l2_prio_state + * + * @global: pointer to &struct v4l2_prio_state + */ +void v4l2_prio_init(struct v4l2_prio_state *global); + +/** + * v4l2_prio_change - changes the v4l2 file handler priority + * + * @global: pointer to the &struct v4l2_prio_state of the device node. + * @local: pointer to the desired priority, as defined by enum &v4l2_priority + * @new: Priority type requested, as defined by enum &v4l2_priority. + * + * .. note:: + * This function should be used only by the V4L2 core. + */ +int v4l2_prio_change(struct v4l2_prio_state *global, enum v4l2_priority *local, + enum v4l2_priority new); + +/** + * v4l2_prio_open - Implements the priority logic for a file handler open + * + * @global: pointer to the &struct v4l2_prio_state of the device node. + * @local: pointer to the desired priority, as defined by enum &v4l2_priority + * + * .. note:: + * This function should be used only by the V4L2 core. + */ +void v4l2_prio_open(struct v4l2_prio_state *global, enum v4l2_priority *local); + +/** + * v4l2_prio_close - Implements the priority logic for a file handler close + * + * @global: pointer to the &struct v4l2_prio_state of the device node. + * @local: priority to be released, as defined by enum &v4l2_priority + * + * .. note:: + * This function should be used only by the V4L2 core. + */ +void v4l2_prio_close(struct v4l2_prio_state *global, enum v4l2_priority local); + +/** + * v4l2_prio_max - Return the maximum priority, as stored at the @global array. + * + * @global: pointer to the &struct v4l2_prio_state of the device node. + * + * .. note:: + * This function should be used only by the V4L2 core. + */ +enum v4l2_priority v4l2_prio_max(struct v4l2_prio_state *global); + +/** + * v4l2_prio_check - Implements the priority logic for a file handler close + * + * @global: pointer to the &struct v4l2_prio_state of the device node. + * @local: desired priority, as defined by enum &v4l2_priority local + * + * .. note:: + * This function should be used only by the V4L2 core. + */ +int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local); + +/** + * struct v4l2_file_operations - fs operations used by a V4L2 device + * + * @owner: pointer to struct module + * @read: operations needed to implement the read() syscall + * @write: operations needed to implement the write() syscall + * @poll: operations needed to implement the poll() syscall + * @unlocked_ioctl: operations needed to implement the ioctl() syscall + * @compat_ioctl32: operations needed to implement the ioctl() syscall for + * the special case where the Kernel uses 64 bits instructions, but + * the userspace uses 32 bits. + * @get_unmapped_area: called by the mmap() syscall, used when %!CONFIG_MMU + * @mmap: operations needed to implement the mmap() syscall + * @open: operations needed to implement the open() syscall + * @release: operations needed to implement the release() syscall + * + * .. note:: + * + * Those operations are used to implemente the fs struct file_operations + * at the V4L2 drivers. The V4L2 core overrides the fs ops with some + * extra logic needed by the subsystem. + */ +struct v4l2_file_operations { + struct module *owner; + ssize_t (*read) (struct file *, char __user *, size_t, loff_t *); + ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *); + __poll_t (*poll) (struct file *, struct poll_table_struct *); + long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); +#ifdef CONFIG_COMPAT + long (*compat_ioctl32) (struct file *, unsigned int, unsigned long); +#endif + unsigned long (*get_unmapped_area) (struct file *, unsigned long, + unsigned long, unsigned long, unsigned long); + int (*mmap) (struct file *, struct vm_area_struct *); + int (*open) (struct file *); + int (*release) (struct file *); +}; + +/* + * Newer version of video_device, handled by videodev2.c + * This version moves redundant code from video device code to + * the common handler + */ + +/** + * struct video_device - Structure used to create and manage the V4L2 device + * nodes. + * + * @entity: &struct media_entity + * @intf_devnode: pointer to &struct media_intf_devnode + * @pipe: &struct media_pipeline + * @fops: pointer to &struct v4l2_file_operations for the video device + * @device_caps: device capabilities as used in v4l2_capabilities + * @dev: &struct device for the video device + * @cdev: character device + * @v4l2_dev: pointer to &struct v4l2_device parent + * @dev_parent: pointer to &struct device parent + * @ctrl_handler: Control handler associated with this device node. + * May be NULL. + * @queue: &struct vb2_queue associated with this device node. May be NULL. + * @prio: pointer to &struct v4l2_prio_state with device's Priority state. + * If NULL, then v4l2_dev->prio will be used. + * @name: video device name + * @vfl_type: V4L device type, as defined by &enum vfl_devnode_type + * @vfl_dir: V4L receiver, transmitter or m2m + * @minor: device node 'minor'. It is set to -1 if the registration failed + * @num: number of the video device node + * @flags: video device flags. Use bitops to set/clear/test flags. + * Contains a set of &enum v4l2_video_device_flags. + * @index: attribute to differentiate multiple indices on one physical device + * @fh_lock: Lock for all v4l2_fhs + * @fh_list: List of &struct v4l2_fh + * @dev_debug: Internal device debug flags, not for use by drivers + * @tvnorms: Supported tv norms + * + * @release: video device release() callback + * @ioctl_ops: pointer to &struct v4l2_ioctl_ops with ioctl callbacks + * + * @valid_ioctls: bitmap with the valid ioctls for this device + * @lock: pointer to &struct mutex serialization lock + * + * .. note:: + * Only set @dev_parent if that can't be deduced from @v4l2_dev. + */ + +struct video_device { +#if defined(CONFIG_MEDIA_CONTROLLER) + struct media_entity entity; + struct media_intf_devnode *intf_devnode; + struct media_pipeline pipe; +#endif + const struct v4l2_file_operations *fops; + + u32 device_caps; + + /* sysfs */ + struct device dev; + struct cdev *cdev; + + struct v4l2_device *v4l2_dev; + struct device *dev_parent; + + struct v4l2_ctrl_handler *ctrl_handler; + + struct vb2_queue *queue; + + struct v4l2_prio_state *prio; + + /* device info */ + char name[64]; + enum vfl_devnode_type vfl_type; + enum vfl_devnode_direction vfl_dir; + int minor; + u16 num; + unsigned long flags; + int index; + + /* V4L2 file handles */ + spinlock_t fh_lock; + struct list_head fh_list; + + int dev_debug; + + v4l2_std_id tvnorms; + + /* callbacks */ + void (*release)(struct video_device *vdev); + const struct v4l2_ioctl_ops *ioctl_ops; + DECLARE_BITMAP(valid_ioctls, BASE_VIDIOC_PRIVATE); + + struct mutex *lock; +}; + +/** + * media_entity_to_video_device - Returns a &struct video_device from + * the &struct media_entity embedded on it. + * + * @__entity: pointer to &struct media_entity, may be NULL + */ +#define media_entity_to_video_device(__entity) \ +({ \ + typeof(__entity) __me_vdev_ent = __entity; \ + \ + __me_vdev_ent ? \ + container_of(__me_vdev_ent, struct video_device, entity) : \ + NULL; \ +}) + +/** + * to_video_device - Returns a &struct video_device from the + * &struct device embedded on it. + * + * @cd: pointer to &struct device + */ +#define to_video_device(cd) container_of(cd, struct video_device, dev) + +/** + * __video_register_device - register video4linux devices + * + * @vdev: struct video_device to register + * @type: type of device to register, as defined by &enum vfl_devnode_type + * @nr: which device node number is desired: + * (0 == /dev/video0, 1 == /dev/video1, ..., -1 == first free) + * @warn_if_nr_in_use: warn if the desired device node number + * was already in use and another number was chosen instead. + * @owner: module that owns the video device node + * + * The registration code assigns minor numbers and device node numbers + * based on the requested type and registers the new device node with + * the kernel. + * + * This function assumes that struct video_device was zeroed when it + * was allocated and does not contain any stale date. + * + * An error is returned if no free minor or device node number could be + * found, or if the registration of the device node failed. + * + * Returns 0 on success. + * + * .. note:: + * + * This function is meant to be used only inside the V4L2 core. + * Drivers should use video_register_device() or + * video_register_device_no_warn(). + */ +int __must_check __video_register_device(struct video_device *vdev, + enum vfl_devnode_type type, + int nr, int warn_if_nr_in_use, + struct module *owner); + +/** + * video_register_device - register video4linux devices + * + * @vdev: struct video_device to register + * @type: type of device to register, as defined by &enum vfl_devnode_type + * @nr: which device node number is desired: + * (0 == /dev/video0, 1 == /dev/video1, ..., -1 == first free) + * + * Internally, it calls __video_register_device(). Please see its + * documentation for more details. + * + * .. note:: + * if video_register_device fails, the release() callback of + * &struct video_device structure is *not* called, so the caller + * is responsible for freeing any data. Usually that means that + * you video_device_release() should be called on failure. + */ +static inline int __must_check video_register_device(struct video_device *vdev, + enum vfl_devnode_type type, + int nr) +{ + return __video_register_device(vdev, type, nr, 1, vdev->fops->owner); +} + +/** + * video_register_device_no_warn - register video4linux devices + * + * @vdev: struct video_device to register + * @type: type of device to register, as defined by &enum vfl_devnode_type + * @nr: which device node number is desired: + * (0 == /dev/video0, 1 == /dev/video1, ..., -1 == first free) + * + * This function is identical to video_register_device() except that no + * warning is issued if the desired device node number was already in use. + * + * Internally, it calls __video_register_device(). Please see its + * documentation for more details. + * + * .. note:: + * if video_register_device fails, the release() callback of + * &struct video_device structure is *not* called, so the caller + * is responsible for freeing any data. Usually that means that + * you video_device_release() should be called on failure. + */ +static inline int __must_check +video_register_device_no_warn(struct video_device *vdev, + enum vfl_devnode_type type, int nr) +{ + return __video_register_device(vdev, type, nr, 0, vdev->fops->owner); +} + +/** + * video_unregister_device - Unregister video devices. + * + * @vdev: &struct video_device to register + * + * Does nothing if vdev == NULL or if video_is_registered() returns false. + */ +void video_unregister_device(struct video_device *vdev); + +/** + * video_device_alloc - helper function to alloc &struct video_device + * + * Returns NULL if %-ENOMEM or a &struct video_device on success. + */ +struct video_device * __must_check video_device_alloc(void); + +/** + * video_device_release - helper function to release &struct video_device + * + * @vdev: pointer to &struct video_device + * + * Can also be used for video_device->release\(\). + */ +void video_device_release(struct video_device *vdev); + +/** + * video_device_release_empty - helper function to implement the + * video_device->release\(\) callback. + * + * @vdev: pointer to &struct video_device + * + * This release function does nothing. + * + * It should be used when the video_device is a static global struct. + * + * .. note:: + * Having a static video_device is a dubious construction at best. + */ +void video_device_release_empty(struct video_device *vdev); + +/** + * v4l2_disable_ioctl- mark that a given command isn't implemented. + * shouldn't use core locking + * + * @vdev: pointer to &struct video_device + * @cmd: ioctl command + * + * This function allows drivers to provide just one v4l2_ioctl_ops struct, but + * disable ioctls based on the specific card that is actually found. + * + * .. note:: + * + * This must be called before video_register_device. + * See also the comments for determine_valid_ioctls(). + */ +static inline void v4l2_disable_ioctl(struct video_device *vdev, + unsigned int cmd) +{ + if (_IOC_NR(cmd) < BASE_VIDIOC_PRIVATE) + set_bit(_IOC_NR(cmd), vdev->valid_ioctls); +} + +/** + * video_get_drvdata - gets private data from &struct video_device. + * + * @vdev: pointer to &struct video_device + * + * returns a pointer to the private data + */ +static inline void *video_get_drvdata(struct video_device *vdev) +{ + return dev_get_drvdata(&vdev->dev); +} + +/** + * video_set_drvdata - sets private data from &struct video_device. + * + * @vdev: pointer to &struct video_device + * @data: private data pointer + */ +static inline void video_set_drvdata(struct video_device *vdev, void *data) +{ + dev_set_drvdata(&vdev->dev, data); +} + +/** + * video_devdata - gets &struct video_device from struct file. + * + * @file: pointer to struct file + */ +struct video_device *video_devdata(struct file *file); + +/** + * video_drvdata - gets private data from &struct video_device using the + * struct file. + * + * @file: pointer to struct file + * + * This is function combines both video_get_drvdata() and video_devdata() + * as this is used very often. + */ +static inline void *video_drvdata(struct file *file) +{ + return video_get_drvdata(video_devdata(file)); +} + +/** + * video_device_node_name - returns the video device name + * + * @vdev: pointer to &struct video_device + * + * Returns the device name string + */ +static inline const char *video_device_node_name(struct video_device *vdev) +{ + return dev_name(&vdev->dev); +} + +/** + * video_is_registered - returns true if the &struct video_device is registered. + * + * + * @vdev: pointer to &struct video_device + */ +static inline int video_is_registered(struct video_device *vdev) +{ + return test_bit(V4L2_FL_REGISTERED, &vdev->flags); +} + +/** + * v4l2_debugfs_root - returns the dentry of the top-level "v4l2" debugfs dir + * + * If this directory does not yet exist, then it will be created. + */ +#ifdef CONFIG_DEBUG_FS +struct dentry *v4l2_debugfs_root(void); +#else +static inline struct dentry *v4l2_debugfs_root(void) +{ + return NULL; +} +#endif + +#if defined(CONFIG_MEDIA_CONTROLLER) + +/** + * video_device_pipeline_start - Mark a pipeline as streaming + * @vdev: Starting video device + * @pipe: Media pipeline to be assigned to all entities in the pipeline. + * + * Mark all entities connected to a given video device through enabled links, + * either directly or indirectly, as streaming. The given pipeline object is + * assigned to every pad in the pipeline and stored in the media_pad pipe + * field. + * + * Calls to this function can be nested, in which case the same number of + * video_device_pipeline_stop() calls will be required to stop streaming. The + * pipeline pointer must be identical for all nested calls to + * video_device_pipeline_start(). + * + * The video device must contain a single pad. + * + * This is a convenience wrapper around media_pipeline_start(). + */ +__must_check int video_device_pipeline_start(struct video_device *vdev, + struct media_pipeline *pipe); + +/** + * __video_device_pipeline_start - Mark a pipeline as streaming + * @vdev: Starting video device + * @pipe: Media pipeline to be assigned to all entities in the pipeline. + * + * ..note:: This is the non-locking version of video_device_pipeline_start() + * + * The video device must contain a single pad. + * + * This is a convenience wrapper around __media_pipeline_start(). + */ +__must_check int __video_device_pipeline_start(struct video_device *vdev, + struct media_pipeline *pipe); + +/** + * video_device_pipeline_stop - Mark a pipeline as not streaming + * @vdev: Starting video device + * + * Mark all entities connected to a given video device through enabled links, + * either directly or indirectly, as not streaming. The media_pad pipe field + * is reset to %NULL. + * + * If multiple calls to media_pipeline_start() have been made, the same + * number of calls to this function are required to mark the pipeline as not + * streaming. + * + * The video device must contain a single pad. + * + * This is a convenience wrapper around media_pipeline_stop(). + */ +void video_device_pipeline_stop(struct video_device *vdev); + +/** + * __video_device_pipeline_stop - Mark a pipeline as not streaming + * @vdev: Starting video device + * + * .. note:: This is the non-locking version of media_pipeline_stop() + * + * The video device must contain a single pad. + * + * This is a convenience wrapper around __media_pipeline_stop(). + */ +void __video_device_pipeline_stop(struct video_device *vdev); + +/** + * video_device_pipeline_alloc_start - Mark a pipeline as streaming + * @vdev: Starting video device + * + * video_device_pipeline_alloc_start() is similar to video_device_pipeline_start() + * but instead of working on a given pipeline the function will use an + * existing pipeline if the video device is already part of a pipeline, or + * allocate a new pipeline. + * + * Calls to video_device_pipeline_alloc_start() must be matched with + * video_device_pipeline_stop(). + */ +__must_check int video_device_pipeline_alloc_start(struct video_device *vdev); + +/** + * video_device_pipeline - Get the media pipeline a video device is part of + * @vdev: The video device + * + * This function returns the media pipeline that a video device has been + * associated with when constructing the pipeline with + * video_device_pipeline_start(). The pointer remains valid until + * video_device_pipeline_stop() is called. + * + * Return: The media_pipeline the video device is part of, or NULL if the video + * device is not part of any pipeline. + * + * The video device must contain a single pad. + * + * This is a convenience wrapper around media_entity_pipeline(). + */ +struct media_pipeline *video_device_pipeline(struct video_device *vdev); + +#endif /* CONFIG_MEDIA_CONTROLLER */ + +#endif /* _V4L2_DEV_H */ diff --git a/6.18.0/include-overrides/media/v4l2-device.h b/6.18.0/include-overrides/media/v4l2-device.h new file mode 100644 index 0000000..25f69b1 --- /dev/null +++ b/6.18.0/include-overrides/media/v4l2-device.h @@ -0,0 +1,569 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + V4L2 device support header. + + Copyright (C) 2008 Hans Verkuil + + */ + +#ifndef _V4L2_DEVICE_H +#define _V4L2_DEVICE_H + +#include +#include +#include + +struct v4l2_ctrl_handler; + +/** + * struct v4l2_device - main struct to for V4L2 device drivers + * + * @dev: pointer to struct device. + * @mdev: pointer to struct media_device, may be NULL. + * @subdevs: used to keep track of the registered subdevs + * @lock: lock this struct; can be used by the driver as well + * if this struct is embedded into a larger struct. + * @name: unique device name, by default the driver name + bus ID + * @notify: notify operation called by some sub-devices. + * @ctrl_handler: The control handler. May be %NULL. + * @prio: Device's priority state + * @ref: Keep track of the references to this struct. + * @release: Release function that is called when the ref count + * goes to 0. + * + * Each instance of a V4L2 device should create the v4l2_device struct, + * either stand-alone or embedded in a larger struct. + * + * It allows easy access to sub-devices (see v4l2-subdev.h) and provides + * basic V4L2 device-level support. + * + * .. note:: + * + * #) @dev->driver_data points to this struct. + * #) @dev might be %NULL if there is no parent device + */ +struct v4l2_device { + struct device *dev; + struct media_device *mdev; + struct list_head subdevs; + spinlock_t lock; + char name[36]; + void (*notify)(struct v4l2_subdev *sd, + unsigned int notification, void *arg); + struct v4l2_ctrl_handler *ctrl_handler; + struct v4l2_prio_state prio; + struct kref ref; + void (*release)(struct v4l2_device *v4l2_dev); +}; + +/** + * v4l2_device_get - gets a V4L2 device reference + * + * @v4l2_dev: pointer to struct &v4l2_device + * + * This is an ancillary routine meant to increment the usage for the + * struct &v4l2_device pointed by @v4l2_dev. + */ +static inline void v4l2_device_get(struct v4l2_device *v4l2_dev) +{ + kref_get(&v4l2_dev->ref); +} + +/** + * v4l2_device_put - puts a V4L2 device reference + * + * @v4l2_dev: pointer to struct &v4l2_device + * + * This is an ancillary routine meant to decrement the usage for the + * struct &v4l2_device pointed by @v4l2_dev. + */ +int v4l2_device_put(struct v4l2_device *v4l2_dev); + +/** + * v4l2_device_register - Initialize v4l2_dev and make @dev->driver_data + * point to @v4l2_dev. + * + * @dev: pointer to struct &device + * @v4l2_dev: pointer to struct &v4l2_device + * + * .. note:: + * @dev may be %NULL in rare cases (ISA devices). + * In such case the caller must fill in the @v4l2_dev->name field + * before calling this function. + */ +int __must_check v4l2_device_register(struct device *dev, + struct v4l2_device *v4l2_dev); + +/** + * v4l2_device_set_name - Optional function to initialize the + * name field of struct &v4l2_device + * + * @v4l2_dev: pointer to struct &v4l2_device + * @basename: base name for the device name + * @instance: pointer to a static atomic_t var with the instance usage for + * the device driver. + * + * v4l2_device_set_name() initializes the name field of struct &v4l2_device + * using the driver name and a driver-global atomic_t instance. + * + * This function will increment the instance counter and returns the + * instance value used in the name. + * + * Example: + * + * static atomic_t drv_instance = ATOMIC_INIT(0); + * + * ... + * + * instance = v4l2_device_set_name(&\ v4l2_dev, "foo", &\ drv_instance); + * + * The first time this is called the name field will be set to foo0 and + * this function returns 0. If the name ends with a digit (e.g. cx18), + * then the name will be set to cx18-0 since cx180 would look really odd. + */ +int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename, + atomic_t *instance); + +/** + * v4l2_device_disconnect - Change V4L2 device state to disconnected. + * + * @v4l2_dev: pointer to struct v4l2_device + * + * Should be called when the USB parent disconnects. + * Since the parent disappears, this ensures that @v4l2_dev doesn't have + * an invalid parent pointer. + * + * .. note:: This function sets @v4l2_dev->dev to NULL. + */ +void v4l2_device_disconnect(struct v4l2_device *v4l2_dev); + +/** + * v4l2_device_unregister - Unregister all sub-devices and any other + * resources related to @v4l2_dev. + * + * @v4l2_dev: pointer to struct v4l2_device + */ +void v4l2_device_unregister(struct v4l2_device *v4l2_dev); + +/** + * v4l2_device_register_subdev - Registers a subdev with a v4l2 device. + * + * @v4l2_dev: pointer to struct &v4l2_device + * @sd: pointer to &struct v4l2_subdev + * + * While registered, the subdev module is marked as in-use. + * + * An error is returned if the module is no longer loaded on any attempts + * to register it. + */ +#define v4l2_device_register_subdev(v4l2_dev, sd) \ + __v4l2_device_register_subdev(v4l2_dev, sd, THIS_MODULE) +int __must_check __v4l2_device_register_subdev(struct v4l2_device *v4l2_dev, + struct v4l2_subdev *sd, + struct module *module); + +/** + * v4l2_device_unregister_subdev - Unregisters a subdev with a v4l2 device. + * + * @sd: pointer to &struct v4l2_subdev + * + * .. note :: + * + * Can also be called if the subdev wasn't registered. In such + * case, it will do nothing. + */ +void v4l2_device_unregister_subdev(struct v4l2_subdev *sd); + +/** + * __v4l2_device_register_subdev_nodes - Registers device nodes for + * all subdevs of the v4l2 device that are marked with the + * %V4L2_SUBDEV_FL_HAS_DEVNODE flag. + * + * @v4l2_dev: pointer to struct v4l2_device + * @read_only: subdevices read-only flag. True to register the subdevices + * device nodes in read-only mode, false to allow full access to the + * subdevice userspace API. + */ +int __must_check +__v4l2_device_register_subdev_nodes(struct v4l2_device *v4l2_dev, + bool read_only); + +/** + * v4l2_device_register_subdev_nodes - Registers subdevices device nodes with + * unrestricted access to the subdevice userspace operations + * + * Internally calls __v4l2_device_register_subdev_nodes(). See its documentation + * for more details. + * + * @v4l2_dev: pointer to struct v4l2_device + */ +static inline int __must_check +v4l2_device_register_subdev_nodes(struct v4l2_device *v4l2_dev) +{ +#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) + return __v4l2_device_register_subdev_nodes(v4l2_dev, false); +#else + return 0; +#endif +} + +/** + * v4l2_device_register_ro_subdev_nodes - Registers subdevices device nodes + * in read-only mode + * + * Internally calls __v4l2_device_register_subdev_nodes(). See its documentation + * for more details. + * + * @v4l2_dev: pointer to struct v4l2_device + */ +static inline int __must_check +v4l2_device_register_ro_subdev_nodes(struct v4l2_device *v4l2_dev) +{ +#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) + return __v4l2_device_register_subdev_nodes(v4l2_dev, true); +#else + return 0; +#endif +} + +/** + * v4l2_subdev_notify - Sends a notification to v4l2_device. + * + * @sd: pointer to &struct v4l2_subdev + * @notification: type of notification. Please notice that the notification + * type is driver-specific. + * @arg: arguments for the notification. Those are specific to each + * notification type. + */ +static inline void v4l2_subdev_notify(struct v4l2_subdev *sd, + unsigned int notification, void *arg) +{ + if (sd && sd->v4l2_dev && sd->v4l2_dev->notify) + sd->v4l2_dev->notify(sd, notification, arg); +} + +/** + * v4l2_device_supports_requests - Test if requests are supported. + * + * @v4l2_dev: pointer to struct v4l2_device + */ +static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev) +{ + return v4l2_dev->mdev && v4l2_dev->mdev->ops && + v4l2_dev->mdev->ops->req_queue; +} + +/* Helper macros to iterate over all subdevs. */ + +/** + * v4l2_device_for_each_subdev - Helper macro that interates over all + * sub-devices of a given &v4l2_device. + * + * @sd: pointer that will be filled by the macro with all + * &struct v4l2_subdev pointer used as an iterator by the loop. + * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. + * + * This macro iterates over all sub-devices owned by the @v4l2_dev device. + * It acts as a for loop iterator and executes the next statement with + * the @sd variable pointing to each sub-device in turn. + */ +#define v4l2_device_for_each_subdev(sd, v4l2_dev) \ + list_for_each_entry(sd, &(v4l2_dev)->subdevs, list) + +/** + * __v4l2_device_call_subdevs_p - Calls the specified operation for + * all subdevs matching the condition. + * + * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. + * @sd: pointer that will be filled by the macro with all + * &struct v4l2_subdev pointer used as an iterator by the loop. + * @cond: condition to be match + * @o: name of the element at &struct v4l2_subdev_ops that contains @f. + * Each element there groups a set of operations functions. + * @f: operation function that will be called if @cond matches. + * The operation functions are defined in groups, according to + * each element at &struct v4l2_subdev_ops. + * @args: arguments for @f. + * + * Ignore any errors. + * + * Note: subdevs cannot be added or deleted while walking + * the subdevs list. + */ +#define __v4l2_device_call_subdevs_p(v4l2_dev, sd, cond, o, f, args...) \ + do { \ + list_for_each_entry((sd), &(v4l2_dev)->subdevs, list) \ + if ((cond) && (sd)->ops->o && (sd)->ops->o->f) \ + (sd)->ops->o->f((sd) , ##args); \ + } while (0) + +/** + * __v4l2_device_call_subdevs - Calls the specified operation for + * all subdevs matching the condition. + * + * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. + * @cond: condition to be match + * @o: name of the element at &struct v4l2_subdev_ops that contains @f. + * Each element there groups a set of operations functions. + * @f: operation function that will be called if @cond matches. + * The operation functions are defined in groups, according to + * each element at &struct v4l2_subdev_ops. + * @args: arguments for @f. + * + * Ignore any errors. + * + * Note: subdevs cannot be added or deleted while walking + * the subdevs list. + */ +#define __v4l2_device_call_subdevs(v4l2_dev, cond, o, f, args...) \ + do { \ + struct v4l2_subdev *__sd; \ + \ + __v4l2_device_call_subdevs_p(v4l2_dev, __sd, cond, o, \ + f , ##args); \ + } while (0) + +/** + * __v4l2_device_call_subdevs_until_err_p - Calls the specified operation for + * all subdevs matching the condition. + * + * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. + * @sd: pointer that will be filled by the macro with all + * &struct v4l2_subdev sub-devices associated with @v4l2_dev. + * @cond: condition to be match + * @o: name of the element at &struct v4l2_subdev_ops that contains @f. + * Each element there groups a set of operations functions. + * @f: operation function that will be called if @cond matches. + * The operation functions are defined in groups, according to + * each element at &struct v4l2_subdev_ops. + * @args: arguments for @f. + * + * Return: + * + * If the operation returns an error other than 0 or ``-ENOIOCTLCMD`` + * for any subdevice, then abort and return with that error code, zero + * otherwise. + * + * Note: subdevs cannot be added or deleted while walking + * the subdevs list. + */ +#define __v4l2_device_call_subdevs_until_err_p(v4l2_dev, sd, cond, o, f, args...) \ +({ \ + long __err = 0; \ + \ + list_for_each_entry((sd), &(v4l2_dev)->subdevs, list) { \ + if ((cond) && (sd)->ops->o && (sd)->ops->o->f) \ + __err = (sd)->ops->o->f((sd) , ##args); \ + if (__err && __err != -ENOIOCTLCMD) \ + break; \ + } \ + (__err == -ENOIOCTLCMD) ? 0 : __err; \ +}) + +/** + * __v4l2_device_call_subdevs_until_err - Calls the specified operation for + * all subdevs matching the condition. + * + * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. + * @cond: condition to be match + * @o: name of the element at &struct v4l2_subdev_ops that contains @f. + * Each element there groups a set of operations functions. + * @f: operation function that will be called if @cond matches. + * The operation functions are defined in groups, according to + * each element at &struct v4l2_subdev_ops. + * @args: arguments for @f. + * + * Return: + * + * If the operation returns an error other than 0 or ``-ENOIOCTLCMD`` + * for any subdevice, then abort and return with that error code, + * zero otherwise. + * + * Note: subdevs cannot be added or deleted while walking + * the subdevs list. + */ +#define __v4l2_device_call_subdevs_until_err(v4l2_dev, cond, o, f, args...) \ +({ \ + struct v4l2_subdev *__sd; \ + __v4l2_device_call_subdevs_until_err_p(v4l2_dev, __sd, cond, o, \ + f , ##args); \ +}) + +/** + * v4l2_device_call_all - Calls the specified operation for + * all subdevs matching the &v4l2_subdev.grp_id, as assigned + * by the bridge driver. + * + * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. + * @grpid: &struct v4l2_subdev->grp_id group ID to match. + * Use 0 to match them all. + * @o: name of the element at &struct v4l2_subdev_ops that contains @f. + * Each element there groups a set of operations functions. + * @f: operation function that will be called if @cond matches. + * The operation functions are defined in groups, according to + * each element at &struct v4l2_subdev_ops. + * @args: arguments for @f. + * + * Ignore any errors. + * + * Note: subdevs cannot be added or deleted while walking + * the subdevs list. + */ +#define v4l2_device_call_all(v4l2_dev, grpid, o, f, args...) \ + do { \ + struct v4l2_subdev *__sd; \ + \ + __v4l2_device_call_subdevs_p(v4l2_dev, __sd, \ + (grpid) == 0 || __sd->grp_id == (grpid), o, f , \ + ##args); \ + } while (0) + +/** + * v4l2_device_call_until_err - Calls the specified operation for + * all subdevs matching the &v4l2_subdev.grp_id, as assigned + * by the bridge driver, until an error occurs. + * + * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. + * @grpid: &struct v4l2_subdev->grp_id group ID to match. + * Use 0 to match them all. + * @o: name of the element at &struct v4l2_subdev_ops that contains @f. + * Each element there groups a set of operations functions. + * @f: operation function that will be called if @cond matches. + * The operation functions are defined in groups, according to + * each element at &struct v4l2_subdev_ops. + * @args: arguments for @f. + * + * Return: + * + * If the operation returns an error other than 0 or ``-ENOIOCTLCMD`` + * for any subdevice, then abort and return with that error code, + * zero otherwise. + * + * Note: subdevs cannot be added or deleted while walking + * the subdevs list. + */ +#define v4l2_device_call_until_err(v4l2_dev, grpid, o, f, args...) \ +({ \ + struct v4l2_subdev *__sd; \ + __v4l2_device_call_subdevs_until_err_p(v4l2_dev, __sd, \ + (grpid) == 0 || __sd->grp_id == (grpid), o, f , \ + ##args); \ +}) + +/** + * v4l2_device_mask_call_all - Calls the specified operation for + * all subdevices where a group ID matches a specified bitmask. + * + * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. + * @grpmsk: bitmask to be checked against &struct v4l2_subdev->grp_id + * group ID to be matched. Use 0 to match them all. + * @o: name of the element at &struct v4l2_subdev_ops that contains @f. + * Each element there groups a set of operations functions. + * @f: operation function that will be called if @cond matches. + * The operation functions are defined in groups, according to + * each element at &struct v4l2_subdev_ops. + * @args: arguments for @f. + * + * Ignore any errors. + * + * Note: subdevs cannot be added or deleted while walking + * the subdevs list. + */ +#define v4l2_device_mask_call_all(v4l2_dev, grpmsk, o, f, args...) \ + do { \ + struct v4l2_subdev *__sd; \ + \ + __v4l2_device_call_subdevs_p(v4l2_dev, __sd, \ + (grpmsk) == 0 || (__sd->grp_id & (grpmsk)), o, \ + f , ##args); \ + } while (0) + +/** + * v4l2_device_mask_call_until_err - Calls the specified operation for + * all subdevices where a group ID matches a specified bitmask. + * + * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. + * @grpmsk: bitmask to be checked against &struct v4l2_subdev->grp_id + * group ID to be matched. Use 0 to match them all. + * @o: name of the element at &struct v4l2_subdev_ops that contains @f. + * Each element there groups a set of operations functions. + * @f: operation function that will be called if @cond matches. + * The operation functions are defined in groups, according to + * each element at &struct v4l2_subdev_ops. + * @args: arguments for @f. + * + * Return: + * + * If the operation returns an error other than 0 or ``-ENOIOCTLCMD`` + * for any subdevice, then abort and return with that error code, + * zero otherwise. + * + * Note: subdevs cannot be added or deleted while walking + * the subdevs list. + */ +#define v4l2_device_mask_call_until_err(v4l2_dev, grpmsk, o, f, args...) \ +({ \ + struct v4l2_subdev *__sd; \ + __v4l2_device_call_subdevs_until_err_p(v4l2_dev, __sd, \ + (grpmsk) == 0 || (__sd->grp_id & (grpmsk)), o, \ + f , ##args); \ +}) + + +/** + * v4l2_device_has_op - checks if any subdev with matching grpid has a + * given ops. + * + * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. + * @grpid: &struct v4l2_subdev->grp_id group ID to match. + * Use 0 to match them all. + * @o: name of the element at &struct v4l2_subdev_ops that contains @f. + * Each element there groups a set of operations functions. + * @f: operation function that will be called if @cond matches. + * The operation functions are defined in groups, according to + * each element at &struct v4l2_subdev_ops. + */ +#define v4l2_device_has_op(v4l2_dev, grpid, o, f) \ +({ \ + struct v4l2_subdev *__sd; \ + bool __result = false; \ + list_for_each_entry(__sd, &(v4l2_dev)->subdevs, list) { \ + if ((grpid) && __sd->grp_id != (grpid)) \ + continue; \ + if (v4l2_subdev_has_op(__sd, o, f)) { \ + __result = true; \ + break; \ + } \ + } \ + __result; \ +}) + +/** + * v4l2_device_mask_has_op - checks if any subdev with matching group + * mask has a given ops. + * + * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. + * @grpmsk: bitmask to be checked against &struct v4l2_subdev->grp_id + * group ID to be matched. Use 0 to match them all. + * @o: name of the element at &struct v4l2_subdev_ops that contains @f. + * Each element there groups a set of operations functions. + * @f: operation function that will be called if @cond matches. + * The operation functions are defined in groups, according to + * each element at &struct v4l2_subdev_ops. + */ +#define v4l2_device_mask_has_op(v4l2_dev, grpmsk, o, f) \ +({ \ + struct v4l2_subdev *__sd; \ + bool __result = false; \ + list_for_each_entry(__sd, &(v4l2_dev)->subdevs, list) { \ + if ((grpmsk) && !(__sd->grp_id & (grpmsk))) \ + continue; \ + if (v4l2_subdev_has_op(__sd, o, f)) { \ + __result = true; \ + break; \ + } \ + } \ + __result; \ +}) + +#endif diff --git a/6.18.0/include-overrides/media/v4l2-dv-timings.h b/6.18.0/include-overrides/media/v4l2-dv-timings.h new file mode 100644 index 0000000..2b42e5d --- /dev/null +++ b/6.18.0/include-overrides/media/v4l2-dv-timings.h @@ -0,0 +1,310 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * v4l2-dv-timings - Internal header with dv-timings helper functions + * + * Copyright 2013 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + */ + +#ifndef __V4L2_DV_TIMINGS_H +#define __V4L2_DV_TIMINGS_H + +#include +#include + +/** + * v4l2_calc_timeperframe - helper function to calculate timeperframe based + * v4l2_dv_timings fields. + * @t: Timings for the video mode. + * + * Calculates the expected timeperframe using the pixel clock value and + * horizontal/vertical measures. This means that v4l2_dv_timings structure + * must be correctly and fully filled. + */ +struct v4l2_fract v4l2_calc_timeperframe(const struct v4l2_dv_timings *t); + +/* + * v4l2_dv_timings_presets: list of all dv_timings presets. + */ +extern const struct v4l2_dv_timings v4l2_dv_timings_presets[]; + +/** + * typedef v4l2_check_dv_timings_fnc - timings check callback + * + * @t: the v4l2_dv_timings struct. + * @handle: a handle from the driver. + * + * Returns true if the given timings are valid. + */ +typedef bool v4l2_check_dv_timings_fnc(const struct v4l2_dv_timings *t, void *handle); + +/** + * v4l2_valid_dv_timings() - are these timings valid? + * + * @t: the v4l2_dv_timings struct. + * @cap: the v4l2_dv_timings_cap capabilities. + * @fnc: callback to check if this timing is OK. May be NULL. + * @fnc_handle: a handle that is passed on to @fnc. + * + * Returns true if the given dv_timings struct is supported by the + * hardware capabilities and the callback function (if non-NULL), returns + * false otherwise. + */ +bool v4l2_valid_dv_timings(const struct v4l2_dv_timings *t, + const struct v4l2_dv_timings_cap *cap, + v4l2_check_dv_timings_fnc fnc, + void *fnc_handle); + +/** + * v4l2_enum_dv_timings_cap() - Helper function to enumerate possible DV + * timings based on capabilities + * + * @t: the v4l2_enum_dv_timings struct. + * @cap: the v4l2_dv_timings_cap capabilities. + * @fnc: callback to check if this timing is OK. May be NULL. + * @fnc_handle: a handle that is passed on to @fnc. + * + * This enumerates dv_timings using the full list of possible CEA-861 and DMT + * timings, filtering out any timings that are not supported based on the + * hardware capabilities and the callback function (if non-NULL). + * + * If a valid timing for the given index is found, it will fill in @t and + * return 0, otherwise it returns -EINVAL. + */ +int v4l2_enum_dv_timings_cap(struct v4l2_enum_dv_timings *t, + const struct v4l2_dv_timings_cap *cap, + v4l2_check_dv_timings_fnc fnc, + void *fnc_handle); + +/** + * v4l2_find_dv_timings_cap() - Find the closest timings struct + * + * @t: the v4l2_enum_dv_timings struct. + * @cap: the v4l2_dv_timings_cap capabilities. + * @pclock_delta: maximum delta between t->pixelclock and the timing struct + * under consideration. + * @fnc: callback to check if a given timings struct is OK. May be NULL. + * @fnc_handle: a handle that is passed on to @fnc. + * + * This function tries to map the given timings to an entry in the + * full list of possible CEA-861 and DMT timings, filtering out any timings + * that are not supported based on the hardware capabilities and the callback + * function (if non-NULL). + * + * On success it will fill in @t with the found timings and it returns true. + * On failure it will return false. + */ +bool v4l2_find_dv_timings_cap(struct v4l2_dv_timings *t, + const struct v4l2_dv_timings_cap *cap, + unsigned pclock_delta, + v4l2_check_dv_timings_fnc fnc, + void *fnc_handle); + +/** + * v4l2_find_dv_timings_cea861_vic() - find timings based on CEA-861 VIC + * @t: the timings data. + * @vic: CEA-861 VIC code + * + * On success it will fill in @t with the found timings and it returns true. + * On failure it will return false. + */ +bool v4l2_find_dv_timings_cea861_vic(struct v4l2_dv_timings *t, u8 vic); + +/** + * v4l2_match_dv_timings() - do two timings match? + * + * @measured: the measured timings data. + * @standard: the timings according to the standard. + * @pclock_delta: maximum delta in Hz between standard->pixelclock and + * the measured timings. + * @match_reduced_fps: if true, then fail if V4L2_DV_FL_REDUCED_FPS does not + * match. + * + * Returns true if the two timings match, returns false otherwise. + */ +bool v4l2_match_dv_timings(const struct v4l2_dv_timings *measured, + const struct v4l2_dv_timings *standard, + unsigned pclock_delta, bool match_reduced_fps); + +/** + * v4l2_print_dv_timings() - log the contents of a dv_timings struct + * @dev_prefix:device prefix for each log line. + * @prefix: additional prefix for each log line, may be NULL. + * @t: the timings data. + * @detailed: if true, give a detailed log. + */ +void v4l2_print_dv_timings(const char *dev_prefix, const char *prefix, + const struct v4l2_dv_timings *t, bool detailed); + +/** + * v4l2_detect_cvt - detect if the given timings follow the CVT standard + * + * @frame_height: the total height of the frame (including blanking) in lines. + * @hfreq: the horizontal frequency in Hz. + * @vsync: the height of the vertical sync in lines. + * @active_width: active width of image (does not include blanking). This + * information is needed only in case of version 2 of reduced blanking. + * In other cases, this parameter does not have any effect on timings. + * @polarities: the horizontal and vertical polarities (same as struct + * v4l2_bt_timings polarities). + * @interlaced: if this flag is true, it indicates interlaced format + * @cap: the v4l2_dv_timings_cap capabilities. + * @fmt: the resulting timings. + * + * This function will attempt to detect if the given values correspond to a + * valid CVT format. If so, then it will return true, and fmt will be filled + * in with the found CVT timings. + */ +bool v4l2_detect_cvt(unsigned int frame_height, unsigned int hfreq, + unsigned int vsync, unsigned int active_width, + u32 polarities, bool interlaced, + const struct v4l2_dv_timings_cap *cap, + struct v4l2_dv_timings *fmt); + +/** + * v4l2_detect_gtf - detect if the given timings follow the GTF standard + * + * @frame_height: the total height of the frame (including blanking) in lines. + * @hfreq: the horizontal frequency in Hz. + * @vsync: the height of the vertical sync in lines. + * @polarities: the horizontal and vertical polarities (same as struct + * v4l2_bt_timings polarities). + * @interlaced: if this flag is true, it indicates interlaced format + * @aspect: preferred aspect ratio. GTF has no method of determining the + * aspect ratio in order to derive the image width from the + * image height, so it has to be passed explicitly. Usually + * the native screen aspect ratio is used for this. If it + * is not filled in correctly, then 16:9 will be assumed. + * @cap: the v4l2_dv_timings_cap capabilities. + * @fmt: the resulting timings. + * + * This function will attempt to detect if the given values correspond to a + * valid GTF format. If so, then it will return true, and fmt will be filled + * in with the found GTF timings. + */ +bool v4l2_detect_gtf(unsigned int frame_height, unsigned int hfreq, + unsigned int vsync, u32 polarities, bool interlaced, + struct v4l2_fract aspect, + const struct v4l2_dv_timings_cap *cap, + struct v4l2_dv_timings *fmt); + +/** + * v4l2_calc_aspect_ratio - calculate the aspect ratio based on bytes + * 0x15 and 0x16 from the EDID. + * + * @hor_landscape: byte 0x15 from the EDID. + * @vert_portrait: byte 0x16 from the EDID. + * + * Determines the aspect ratio from the EDID. + * See VESA Enhanced EDID standard, release A, rev 2, section 3.6.2: + * "Horizontal and Vertical Screen Size or Aspect Ratio" + */ +struct v4l2_fract v4l2_calc_aspect_ratio(u8 hor_landscape, u8 vert_portrait); + +/** + * v4l2_dv_timings_aspect_ratio - calculate the aspect ratio based on the + * v4l2_dv_timings information. + * + * @t: the timings data. + */ +struct v4l2_fract v4l2_dv_timings_aspect_ratio(const struct v4l2_dv_timings *t); + +/** + * can_reduce_fps - check if conditions for reduced fps are true. + * @bt: v4l2 timing structure + * + * For different timings reduced fps is allowed if the following conditions + * are met: + * + * - For CVT timings: if reduced blanking v2 (vsync == 8) is true. + * - For CEA861 timings: if %V4L2_DV_FL_CAN_REDUCE_FPS flag is true. + */ +static inline bool can_reduce_fps(struct v4l2_bt_timings *bt) +{ + if ((bt->standards & V4L2_DV_BT_STD_CVT) && (bt->vsync == 8)) + return true; + + if ((bt->standards & V4L2_DV_BT_STD_CEA861) && + (bt->flags & V4L2_DV_FL_CAN_REDUCE_FPS)) + return true; + + return false; +} + +/** + * struct v4l2_hdmi_colorimetry - describes the HDMI colorimetry information + * @colorspace: enum v4l2_colorspace, the colorspace + * @ycbcr_enc: enum v4l2_ycbcr_encoding, Y'CbCr encoding + * @quantization: enum v4l2_quantization, colorspace quantization + * @xfer_func: enum v4l2_xfer_func, colorspace transfer function + */ +struct v4l2_hdmi_colorimetry { + enum v4l2_colorspace colorspace; + enum v4l2_ycbcr_encoding ycbcr_enc; + enum v4l2_quantization quantization; + enum v4l2_xfer_func xfer_func; +}; + +struct hdmi_avi_infoframe; +struct hdmi_vendor_infoframe; + +struct v4l2_hdmi_colorimetry +v4l2_hdmi_rx_colorimetry(const struct hdmi_avi_infoframe *avi, + const struct hdmi_vendor_infoframe *hdmi, + unsigned int height); + +unsigned int v4l2_num_edid_blocks(const u8 *edid, unsigned int max_blocks); +u16 v4l2_get_edid_phys_addr(const u8 *edid, unsigned int size, + unsigned int *offset); +void v4l2_set_edid_phys_addr(u8 *edid, unsigned int size, u16 phys_addr); +u16 v4l2_phys_addr_for_input(u16 phys_addr, u8 input); +int v4l2_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port); + +/* Add support for exporting InfoFrames to debugfs */ + +/* + * HDMI InfoFrames start with a 3 byte header, then a checksum, + * followed by the actual IF payload. + * + * The payload length is limited to 30 bytes according to the HDMI spec, + * but since the length is encoded in 5 bits, it can be 31 bytes theoretically. + * So set the max length as 31 + 3 (header) + 1 (checksum) = 35. + */ +#define V4L2_DEBUGFS_IF_MAX_LEN (35) + +#define V4L2_DEBUGFS_IF_AVI BIT(0) +#define V4L2_DEBUGFS_IF_AUDIO BIT(1) +#define V4L2_DEBUGFS_IF_SPD BIT(2) +#define V4L2_DEBUGFS_IF_HDMI BIT(3) +#define V4L2_DEBUGFS_IF_DRM BIT(4) + +typedef ssize_t (*v4l2_debugfs_if_read_t)(u32 type, void *priv, + struct file *filp, char __user *ubuf, + size_t count, loff_t *ppos); + +struct v4l2_debugfs_if { + struct dentry *if_dir; + void *priv; + + v4l2_debugfs_if_read_t if_read; +}; + +#ifdef CONFIG_DEBUG_FS +struct v4l2_debugfs_if *v4l2_debugfs_if_alloc(struct dentry *root, u32 if_types, + void *priv, + v4l2_debugfs_if_read_t if_read); +void v4l2_debugfs_if_free(struct v4l2_debugfs_if *infoframes); +#else +static inline +struct v4l2_debugfs_if *v4l2_debugfs_if_alloc(struct dentry *root, u32 if_types, + void *priv, + v4l2_debugfs_if_read_t if_read) +{ + return NULL; +} + +static inline void v4l2_debugfs_if_free(struct v4l2_debugfs_if *infoframes) +{ +} +#endif + +#endif diff --git a/6.18.0/include-overrides/media/v4l2-event.h b/6.18.0/include-overrides/media/v4l2-event.h new file mode 100644 index 0000000..3a0e258 --- /dev/null +++ b/6.18.0/include-overrides/media/v4l2-event.h @@ -0,0 +1,208 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * v4l2-event.h + * + * V4L2 events. + * + * Copyright (C) 2009--2010 Nokia Corporation. + * + * Contact: Sakari Ailus + */ + +#ifndef V4L2_EVENT_H +#define V4L2_EVENT_H + +#include +#include +#include + +struct v4l2_fh; +struct v4l2_subdev; +struct v4l2_subscribed_event; +struct video_device; + +/** + * struct v4l2_kevent - Internal kernel event struct. + * @list: List node for the v4l2_fh->available list. + * @sev: Pointer to parent v4l2_subscribed_event. + * @event: The event itself. + * @ts: The timestamp of the event. + */ +struct v4l2_kevent { + struct list_head list; + struct v4l2_subscribed_event *sev; + struct v4l2_event event; + u64 ts; +}; + +/** + * struct v4l2_subscribed_event_ops - Subscribed event operations. + * + * @add: Optional callback, called when a new listener is added + * @del: Optional callback, called when a listener stops listening + * @replace: Optional callback that can replace event 'old' with event 'new'. + * @merge: Optional callback that can merge event 'old' into event 'new'. + */ +struct v4l2_subscribed_event_ops { + int (*add)(struct v4l2_subscribed_event *sev, unsigned int elems); + void (*del)(struct v4l2_subscribed_event *sev); + void (*replace)(struct v4l2_event *old, const struct v4l2_event *new); + void (*merge)(const struct v4l2_event *old, struct v4l2_event *new); +}; + +/** + * struct v4l2_subscribed_event - Internal struct representing a subscribed + * event. + * + * @list: List node for the v4l2_fh->subscribed list. + * @type: Event type. + * @id: Associated object ID (e.g. control ID). 0 if there isn't any. + * @flags: Copy of v4l2_event_subscription->flags. + * @fh: Filehandle that subscribed to this event. + * @node: List node that hooks into the object's event list + * (if there is one). + * @ops: v4l2_subscribed_event_ops + * @elems: The number of elements in the events array. + * @first: The index of the events containing the oldest available event. + * @in_use: The number of queued events. + * @events: An array of @elems events. + */ +struct v4l2_subscribed_event { + struct list_head list; + u32 type; + u32 id; + u32 flags; + struct v4l2_fh *fh; + struct list_head node; + const struct v4l2_subscribed_event_ops *ops; + unsigned int elems; + unsigned int first; + unsigned int in_use; + struct v4l2_kevent events[] __counted_by(elems); +}; + +/** + * v4l2_event_dequeue - Dequeue events from video device. + * + * @fh: pointer to struct v4l2_fh + * @event: pointer to struct v4l2_event + * @nonblocking: if not zero, waits for an event to arrive + */ +int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event, + int nonblocking); + +/** + * v4l2_event_queue - Queue events to video device. + * + * @vdev: pointer to &struct video_device + * @ev: pointer to &struct v4l2_event + * + * The event will be queued for all &struct v4l2_fh file handlers. + * + * .. note:: + * The driver's only responsibility is to fill in the type and the data + * fields. The other fields will be filled in by V4L2. + */ +void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev); + +/** + * v4l2_event_queue_fh - Queue events to video device. + * + * @fh: pointer to &struct v4l2_fh + * @ev: pointer to &struct v4l2_event + * + * + * The event will be queued only for the specified &struct v4l2_fh file handler. + * + * .. note:: + * The driver's only responsibility is to fill in the type and the data + * fields. The other fields will be filled in by V4L2. + */ +void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev); + +/** + * v4l2_event_wake_all - Wake all filehandles. + * + * Used when unregistering a video device. + * + * @vdev: pointer to &struct video_device + */ +void v4l2_event_wake_all(struct video_device *vdev); + +/** + * v4l2_event_pending - Check if an event is available + * + * @fh: pointer to &struct v4l2_fh + * + * Returns the number of pending events. + */ +int v4l2_event_pending(struct v4l2_fh *fh); + +/** + * v4l2_event_subscribe - Subscribes to an event + * + * @fh: pointer to &struct v4l2_fh + * @sub: pointer to &struct v4l2_event_subscription + * @elems: size of the events queue + * @ops: pointer to &v4l2_subscribed_event_ops + * + * .. note:: + * + * if @elems is zero, the framework will fill in a default value, + * with is currently 1 element. + */ +int v4l2_event_subscribe(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub, + unsigned int elems, + const struct v4l2_subscribed_event_ops *ops); +/** + * v4l2_event_unsubscribe - Unsubscribes to an event + * + * @fh: pointer to &struct v4l2_fh + * @sub: pointer to &struct v4l2_event_subscription + */ +int v4l2_event_unsubscribe(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub); +/** + * v4l2_event_unsubscribe_all - Unsubscribes to all events + * + * @fh: pointer to &struct v4l2_fh + */ +void v4l2_event_unsubscribe_all(struct v4l2_fh *fh); + +/** + * v4l2_event_subdev_unsubscribe - Subdev variant of v4l2_event_unsubscribe() + * + * @sd: pointer to &struct v4l2_subdev + * @fh: pointer to &struct v4l2_fh + * @sub: pointer to &struct v4l2_event_subscription + * + * .. note:: + * + * This function should be used for the &struct v4l2_subdev_core_ops + * %unsubscribe_event field. + */ +int v4l2_event_subdev_unsubscribe(struct v4l2_subdev *sd, + struct v4l2_fh *fh, + struct v4l2_event_subscription *sub); +/** + * v4l2_src_change_event_subscribe - helper function that calls + * v4l2_event_subscribe() if the event is %V4L2_EVENT_SOURCE_CHANGE. + * + * @fh: pointer to struct v4l2_fh + * @sub: pointer to &struct v4l2_event_subscription + */ +int v4l2_src_change_event_subscribe(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub); +/** + * v4l2_src_change_event_subdev_subscribe - Variant of v4l2_event_subscribe(), + * meant to subscribe only events of the type %V4L2_EVENT_SOURCE_CHANGE. + * + * @sd: pointer to &struct v4l2_subdev + * @fh: pointer to &struct v4l2_fh + * @sub: pointer to &struct v4l2_event_subscription + */ +int v4l2_src_change_event_subdev_subscribe(struct v4l2_subdev *sd, + struct v4l2_fh *fh, + struct v4l2_event_subscription *sub); +#endif /* V4L2_EVENT_H */ diff --git a/6.18.0/include-overrides/media/v4l2-fh.h b/6.18.0/include-overrides/media/v4l2-fh.h new file mode 100644 index 0000000..aad4b36 --- /dev/null +++ b/6.18.0/include-overrides/media/v4l2-fh.h @@ -0,0 +1,181 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * v4l2-fh.h + * + * V4L2 file handle. Store per file handle data for the V4L2 + * framework. Using file handles is mandatory for the drivers. + * + * Copyright (C) 2009--2010 Nokia Corporation. + * + * Contact: Sakari Ailus + */ + +#ifndef V4L2_FH_H +#define V4L2_FH_H + +#include +#include +#include +#include + +struct video_device; +struct v4l2_ctrl_handler; + +/** + * struct v4l2_fh - Describes a V4L2 file handler + * + * @list: list of file handlers + * @vdev: pointer to &struct video_device + * @ctrl_handler: pointer to &struct v4l2_ctrl_handler + * @prio: priority of the file handler, as defined by &enum v4l2_priority + * + * @wait: event' s wait queue + * @subscribe_lock: serialise changes to the subscribed list; guarantee that + * the add and del event callbacks are orderly called + * @subscribed: list of subscribed events + * @available: list of events waiting to be dequeued + * @navailable: number of available events at @available list + * @sequence: event sequence number + * + * @m2m_ctx: pointer to &struct v4l2_m2m_ctx + */ +struct v4l2_fh { + struct list_head list; + struct video_device *vdev; + struct v4l2_ctrl_handler *ctrl_handler; + enum v4l2_priority prio; + + /* Events */ + wait_queue_head_t wait; + struct mutex subscribe_lock; + struct list_head subscribed; + struct list_head available; + unsigned int navailable; + u32 sequence; + + struct v4l2_m2m_ctx *m2m_ctx; +}; + +/** + * file_to_v4l2_fh - Return the v4l2_fh associated with a struct file + * + * @filp: pointer to &struct file + * + * This function should be used by drivers to retrieve the &struct v4l2_fh + * instance pointer stored in the file private_data instead of accessing the + * private_data field directly. + */ +static inline struct v4l2_fh *file_to_v4l2_fh(struct file *filp) +{ + return filp->private_data; +} + +/** + * v4l2_fh_init - Initialise the file handle. + * + * @fh: pointer to &struct v4l2_fh + * @vdev: pointer to &struct video_device + * + * Parts of the V4L2 framework using the + * file handles should be initialised in this function. Must be called + * from driver's v4l2_file_operations->open\(\) handler if the driver + * uses &struct v4l2_fh. + */ +void v4l2_fh_init(struct v4l2_fh *fh, struct video_device *vdev); + +/** + * v4l2_fh_add - Add the fh to the list of file handles on a video_device. + * + * @fh: pointer to &struct v4l2_fh + * @filp: pointer to &struct file associated with @fh + * + * The function sets filp->private_data to point to @fh. + * + * .. note:: + * The @fh file handle must be initialised first. + */ +void v4l2_fh_add(struct v4l2_fh *fh, struct file *filp); + +/** + * v4l2_fh_open - Ancillary routine that can be used as the open\(\) op + * of v4l2_file_operations. + * + * @filp: pointer to struct file + * + * It allocates a v4l2_fh and inits and adds it to the &struct video_device + * associated with the file pointer. + * + * On error filp->private_data will be %NULL, otherwise it will point to + * the &struct v4l2_fh. + */ +int v4l2_fh_open(struct file *filp); + +/** + * v4l2_fh_del - Remove file handle from the list of file handles. + * + * @fh: pointer to &struct v4l2_fh + * @filp: pointer to &struct file associated with @fh + * + * The function resets filp->private_data to NULL. + * + * .. note:: + * Must be called in v4l2_file_operations->release\(\) handler if the driver + * uses &struct v4l2_fh. + */ +void v4l2_fh_del(struct v4l2_fh *fh, struct file *filp); + +/** + * v4l2_fh_exit - Release resources related to a file handle. + * + * @fh: pointer to &struct v4l2_fh + * + * Parts of the V4L2 framework using the v4l2_fh must release their + * resources here, too. + * + * .. note:: + * Must be called in v4l2_file_operations->release\(\) handler if the + * driver uses &struct v4l2_fh. + */ +void v4l2_fh_exit(struct v4l2_fh *fh); + +/** + * v4l2_fh_release - Ancillary routine that can be used as the release\(\) op + * of v4l2_file_operations. + * + * @filp: pointer to struct file + * + * It deletes and exits the v4l2_fh associated with the file pointer and + * frees it. It will do nothing if filp->private_data (the pointer to the + * v4l2_fh struct) is %NULL. + * + * This function always returns 0. + */ +int v4l2_fh_release(struct file *filp); + +/** + * v4l2_fh_is_singular - Returns 1 if this filehandle is the only filehandle + * opened for the associated video_device. + * + * @fh: pointer to &struct v4l2_fh + * + * If @fh is NULL, then it returns 0. + */ +int v4l2_fh_is_singular(struct v4l2_fh *fh); + +/** + * v4l2_fh_is_singular_file - Returns 1 if this filehandle is the only + * filehandle opened for the associated video_device. + * + * @filp: pointer to struct file + * + * This is a helper function variant of v4l2_fh_is_singular() with uses + * struct file as argument. + * + * If filp->private_data is %NULL, then it will return 0. + */ +static inline int v4l2_fh_is_singular_file(struct file *filp) +{ + return v4l2_fh_is_singular(filp->private_data); +} + +#endif /* V4L2_EVENT_H */ diff --git a/6.18.0/include-overrides/media/v4l2-flash-led-class.h b/6.18.0/include-overrides/media/v4l2-flash-led-class.h new file mode 100644 index 0000000..b106e7a --- /dev/null +++ b/6.18.0/include-overrides/media/v4l2-flash-led-class.h @@ -0,0 +1,186 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * V4L2 flash LED sub-device registration helpers. + * + * Copyright (C) 2015 Samsung Electronics Co., Ltd + * Author: Jacek Anaszewski + */ + +#ifndef _V4L2_FLASH_H +#define _V4L2_FLASH_H + +#include +#include + +struct led_classdev_flash; +struct led_classdev; +struct v4l2_flash; +enum led_brightness; + +/** + * struct v4l2_flash_ctrl_data - flash control initialization data, filled + * basing on the features declared by the LED flash + * class driver in the v4l2_flash_config + * @config: initialization data for a control + * @cid: contains v4l2 flash control id if the config + * field was initialized, 0 otherwise + */ +struct v4l2_flash_ctrl_data { + struct v4l2_ctrl_config config; + u32 cid; +}; + +/** + * struct v4l2_flash_ops - V4L2 flash operations + * + * @external_strobe_set: Setup strobing the flash by hardware pin state + * assertion. + * @intensity_to_led_brightness: Convert intensity to brightness in a device + * specific manner + * @led_brightness_to_intensity: convert brightness to intensity in a device + * specific manner. + */ +struct v4l2_flash_ops { + int (*external_strobe_set)(struct v4l2_flash *v4l2_flash, + bool enable); + enum led_brightness (*intensity_to_led_brightness) + (struct v4l2_flash *v4l2_flash, s32 intensity); + s32 (*led_brightness_to_intensity) + (struct v4l2_flash *v4l2_flash, enum led_brightness); +}; + +/** + * struct v4l2_flash_config - V4L2 Flash sub-device initialization data + * @dev_name: the name of the media entity, + * unique in the system + * @intensity: non-flash strobe constraints for the LED + * @flash_faults: bitmask of flash faults that the LED flash class + * device can report; corresponding LED_FAULT* bit + * definitions are available in the header file + * + * @has_external_strobe: external strobe capability + */ +struct v4l2_flash_config { + char dev_name[32]; + struct led_flash_setting intensity; + u32 flash_faults; + unsigned int has_external_strobe:1; +}; + +/** + * struct v4l2_flash - Flash sub-device context + * @fled_cdev: LED flash class device controlled by this sub-device + * @iled_cdev: LED class device representing indicator LED associated + * with the LED flash class device + * @ops: V4L2 specific flash ops + * @sd: V4L2 sub-device + * @hdl: flash controls handler + * @ctrls: array of pointers to controls, whose values define + * the sub-device state + */ +struct v4l2_flash { + struct led_classdev_flash *fled_cdev; + struct led_classdev *iled_cdev; + const struct v4l2_flash_ops *ops; + + struct v4l2_subdev sd; + struct v4l2_ctrl_handler hdl; + struct v4l2_ctrl **ctrls; +}; + +/** + * v4l2_subdev_to_v4l2_flash - Returns a &struct v4l2_flash from the + * &struct v4l2_subdev embedded on it. + * + * @sd: pointer to &struct v4l2_subdev + */ +static inline struct v4l2_flash *v4l2_subdev_to_v4l2_flash( + struct v4l2_subdev *sd) +{ + return container_of(sd, struct v4l2_flash, sd); +} + +/** + * v4l2_ctrl_to_v4l2_flash - Returns a &struct v4l2_flash from the + * &struct v4l2_ctrl embedded on it. + * + * @c: pointer to &struct v4l2_ctrl + */ +static inline struct v4l2_flash *v4l2_ctrl_to_v4l2_flash(struct v4l2_ctrl *c) +{ + return container_of(c->handler, struct v4l2_flash, hdl); +} + +#if IS_ENABLED(CONFIG_V4L2_FLASH_LED_CLASS) +/** + * v4l2_flash_init - initialize V4L2 flash led sub-device + * @dev: flash device, e.g. an I2C device + * @fwn: fwnode_handle of the LED, may be NULL if the same as device's + * @fled_cdev: LED flash class device to wrap + * @ops: V4L2 Flash device ops + * @config: initialization data for V4L2 Flash sub-device + * + * Create V4L2 Flash sub-device wrapping given LED subsystem device. + * The ops pointer is stored by the V4L2 flash framework. No + * references are held to config nor its contents once this function + * has returned. + * + * Returns: A valid pointer, or, when an error occurs, the return + * value is encoded using ERR_PTR(). Use IS_ERR() to check and + * PTR_ERR() to obtain the numeric return value. + */ +struct v4l2_flash *v4l2_flash_init( + struct device *dev, struct fwnode_handle *fwn, + struct led_classdev_flash *fled_cdev, + const struct v4l2_flash_ops *ops, struct v4l2_flash_config *config); + +/** + * v4l2_flash_indicator_init - initialize V4L2 indicator sub-device + * @dev: flash device, e.g. an I2C device + * @fwn: fwnode_handle of the LED, may be NULL if the same as device's + * @iled_cdev: LED flash class device representing the indicator LED + * @config: initialization data for V4L2 Flash sub-device + * + * Create V4L2 Flash sub-device wrapping given LED subsystem device. + * The ops pointer is stored by the V4L2 flash framework. No + * references are held to config nor its contents once this function + * has returned. + * + * Returns: A valid pointer, or, when an error occurs, the return + * value is encoded using ERR_PTR(). Use IS_ERR() to check and + * PTR_ERR() to obtain the numeric return value. + */ +struct v4l2_flash *v4l2_flash_indicator_init( + struct device *dev, struct fwnode_handle *fwn, + struct led_classdev *iled_cdev, struct v4l2_flash_config *config); + +/** + * v4l2_flash_release - release V4L2 Flash sub-device + * @v4l2_flash: the V4L2 Flash sub-device to release + * + * Release V4L2 Flash sub-device. + */ +void v4l2_flash_release(struct v4l2_flash *v4l2_flash); + +#else +static inline struct v4l2_flash *v4l2_flash_init( + struct device *dev, struct fwnode_handle *fwn, + struct led_classdev_flash *fled_cdev, + const struct v4l2_flash_ops *ops, struct v4l2_flash_config *config) +{ + return NULL; +} + +static inline struct v4l2_flash *v4l2_flash_indicator_init( + struct device *dev, struct fwnode_handle *fwn, + struct led_classdev *iled_cdev, struct v4l2_flash_config *config) +{ + return NULL; +} + +static inline void v4l2_flash_release(struct v4l2_flash *v4l2_flash) +{ +} +#endif /* CONFIG_V4L2_FLASH_LED_CLASS */ + +#endif /* _V4L2_FLASH_H */ diff --git a/6.18.0/include-overrides/media/v4l2-fwnode.h b/6.18.0/include-overrides/media/v4l2-fwnode.h new file mode 100644 index 0000000..f7c57c7 --- /dev/null +++ b/6.18.0/include-overrides/media/v4l2-fwnode.h @@ -0,0 +1,414 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * V4L2 fwnode binding parsing library + * + * Copyright (c) 2016 Intel Corporation. + * Author: Sakari Ailus + * + * Copyright (C) 2012 - 2013 Samsung Electronics Co., Ltd. + * Author: Sylwester Nawrocki + * + * Copyright (C) 2012 Renesas Electronics Corp. + * Author: Guennadi Liakhovetski + */ +#ifndef _V4L2_FWNODE_H +#define _V4L2_FWNODE_H + +#include +#include +#include +#include + +#include + +/** + * struct v4l2_fwnode_endpoint - the endpoint data structure + * @base: fwnode endpoint of the v4l2_fwnode + * @bus_type: bus type + * @bus: bus configuration data structure + * @bus.parallel: embedded &struct v4l2_mbus_config_parallel. + * Used if the bus is parallel. + * @bus.mipi_csi1: embedded &struct v4l2_mbus_config_mipi_csi1. + * Used if the bus is MIPI Alliance's Camera Serial + * Interface version 1 (MIPI CSI1) or Standard + * Mobile Imaging Architecture's Compact Camera Port 2 + * (SMIA CCP2). + * @bus.mipi_csi2: embedded &struct v4l2_mbus_config_mipi_csi2. + * Used if the bus is MIPI Alliance's Camera Serial + * Interface version 2 (MIPI CSI2). + * @link_frequencies: array of supported link frequencies + * @nr_of_link_frequencies: number of elements in link_frequenccies array + */ +struct v4l2_fwnode_endpoint { + struct fwnode_endpoint base; + enum v4l2_mbus_type bus_type; + struct { + struct v4l2_mbus_config_parallel parallel; + struct v4l2_mbus_config_mipi_csi1 mipi_csi1; + struct v4l2_mbus_config_mipi_csi2 mipi_csi2; + } bus; + u64 *link_frequencies; + unsigned int nr_of_link_frequencies; +}; + +/** + * V4L2_FWNODE_PROPERTY_UNSET - identify a non initialized property + * + * All properties in &struct v4l2_fwnode_device_properties are initialized + * to this value. + */ +#define V4L2_FWNODE_PROPERTY_UNSET (-1U) + +/** + * enum v4l2_fwnode_orientation - possible device orientation + * @V4L2_FWNODE_ORIENTATION_FRONT: device installed on the front side + * @V4L2_FWNODE_ORIENTATION_BACK: device installed on the back side + * @V4L2_FWNODE_ORIENTATION_EXTERNAL: device externally located + */ +enum v4l2_fwnode_orientation { + V4L2_FWNODE_ORIENTATION_FRONT, + V4L2_FWNODE_ORIENTATION_BACK, + V4L2_FWNODE_ORIENTATION_EXTERNAL +}; + +/** + * struct v4l2_fwnode_device_properties - fwnode device properties + * @orientation: device orientation. See &enum v4l2_fwnode_orientation + * @rotation: device rotation + */ +struct v4l2_fwnode_device_properties { + enum v4l2_fwnode_orientation orientation; + unsigned int rotation; +}; + +/** + * struct v4l2_fwnode_link - a link between two endpoints + * @local_node: pointer to device_node of this endpoint + * @local_port: identifier of the port this endpoint belongs to + * @local_id: identifier of the id this endpoint belongs to + * @remote_node: pointer to device_node of the remote endpoint + * @remote_port: identifier of the port the remote endpoint belongs to + * @remote_id: identifier of the id the remote endpoint belongs to + */ +struct v4l2_fwnode_link { + struct fwnode_handle *local_node; + unsigned int local_port; + unsigned int local_id; + struct fwnode_handle *remote_node; + unsigned int remote_port; + unsigned int remote_id; +}; + +/** + * enum v4l2_connector_type - connector type + * @V4L2_CONN_UNKNOWN: unknown connector type, no V4L2 connector configuration + * @V4L2_CONN_COMPOSITE: analog composite connector + * @V4L2_CONN_SVIDEO: analog svideo connector + */ +enum v4l2_connector_type { + V4L2_CONN_UNKNOWN, + V4L2_CONN_COMPOSITE, + V4L2_CONN_SVIDEO, +}; + +/** + * struct v4l2_connector_link - connector link data structure + * @head: structure to be used to add the link to the + * &struct v4l2_fwnode_connector + * @fwnode_link: &struct v4l2_fwnode_link link between the connector and the + * device the connector belongs to. + */ +struct v4l2_connector_link { + struct list_head head; + struct v4l2_fwnode_link fwnode_link; +}; + +/** + * struct v4l2_fwnode_connector_analog - analog connector data structure + * @sdtv_stds: sdtv standards this connector supports, set to V4L2_STD_ALL + * if no restrictions are specified. + */ +struct v4l2_fwnode_connector_analog { + v4l2_std_id sdtv_stds; +}; + +/** + * struct v4l2_fwnode_connector - the connector data structure + * @name: the connector device name + * @label: optional connector label + * @type: connector type + * @links: list of all connector &struct v4l2_connector_link links + * @nr_of_links: total number of links + * @connector: connector configuration + * @connector.analog: analog connector configuration + * &struct v4l2_fwnode_connector_analog + */ +struct v4l2_fwnode_connector { + const char *name; + const char *label; + enum v4l2_connector_type type; + struct list_head links; + unsigned int nr_of_links; + + union { + struct v4l2_fwnode_connector_analog analog; + /* future connectors */ + } connector; +}; + +/** + * enum v4l2_fwnode_bus_type - Video bus types defined by firmware properties + * @V4L2_FWNODE_BUS_TYPE_GUESS: Default value if no bus-type fwnode property + * @V4L2_FWNODE_BUS_TYPE_CSI2_CPHY: MIPI CSI-2 bus, C-PHY physical layer + * @V4L2_FWNODE_BUS_TYPE_CSI1: MIPI CSI-1 bus + * @V4L2_FWNODE_BUS_TYPE_CCP2: SMIA Compact Camera Port 2 bus + * @V4L2_FWNODE_BUS_TYPE_CSI2_DPHY: MIPI CSI-2 bus, D-PHY physical layer + * @V4L2_FWNODE_BUS_TYPE_PARALLEL: Camera Parallel Interface bus + * @V4L2_FWNODE_BUS_TYPE_BT656: BT.656 video format bus-type + * @V4L2_FWNODE_BUS_TYPE_DPI: Video Parallel Interface bus + * @NR_OF_V4L2_FWNODE_BUS_TYPE: Number of bus-types + */ +enum v4l2_fwnode_bus_type { + V4L2_FWNODE_BUS_TYPE_GUESS = 0, + V4L2_FWNODE_BUS_TYPE_CSI2_CPHY, + V4L2_FWNODE_BUS_TYPE_CSI1, + V4L2_FWNODE_BUS_TYPE_CCP2, + V4L2_FWNODE_BUS_TYPE_CSI2_DPHY, + V4L2_FWNODE_BUS_TYPE_PARALLEL, + V4L2_FWNODE_BUS_TYPE_BT656, + V4L2_FWNODE_BUS_TYPE_DPI, + NR_OF_V4L2_FWNODE_BUS_TYPE +}; + +/** + * v4l2_fwnode_endpoint_parse() - parse all fwnode node properties + * @fwnode: pointer to the endpoint's fwnode handle + * @vep: pointer to the V4L2 fwnode data structure + * + * This function parses the V4L2 fwnode endpoint specific parameters from the + * firmware. There are two ways to use this function, either by letting it + * obtain the type of the bus (by setting the @vep.bus_type field to + * V4L2_MBUS_UNKNOWN) or specifying the bus type explicitly to one of the &enum + * v4l2_mbus_type types. + * + * When @vep.bus_type is V4L2_MBUS_UNKNOWN, the function will use the "bus-type" + * property to determine the type when it is available. The caller is + * responsible for validating the contents of @vep.bus_type field after the call + * returns. + * + * As a deprecated functionality to support older DT bindings without "bus-type" + * property for devices that support multiple types, if the "bus-type" property + * does not exist, the function will attempt to guess the type based on the + * endpoint properties available. NEVER RELY ON GUESSING THE BUS TYPE IN NEW + * DRIVERS OR BINDINGS. + * + * It is also possible to set @vep.bus_type corresponding to an actual bus. In + * this case the function will only attempt to parse properties related to this + * bus, and it will return an error if the value of the "bus-type" property + * corresponds to a different bus. + * + * The caller is required to initialise all fields of @vep, either with + * explicitly values, or by zeroing them. + * + * The function does not change the V4L2 fwnode endpoint state if it fails. + * + * NOTE: This function does not parse "link-frequencies" property as its size is + * not known in advance. Please use v4l2_fwnode_endpoint_alloc_parse() if you + * need properties of variable size. + * + * Return: %0 on success or a negative error code on failure: + * %-ENOMEM on memory allocation failure + * %-EINVAL on parsing failure + * %-ENXIO on mismatching bus types + */ +int v4l2_fwnode_endpoint_parse(struct fwnode_handle *fwnode, + struct v4l2_fwnode_endpoint *vep); + +/** + * v4l2_fwnode_endpoint_free() - free the V4L2 fwnode acquired by + * v4l2_fwnode_endpoint_alloc_parse() + * @vep: the V4L2 fwnode the resources of which are to be released + * + * It is safe to call this function with NULL argument or on a V4L2 fwnode the + * parsing of which failed. + */ +void v4l2_fwnode_endpoint_free(struct v4l2_fwnode_endpoint *vep); + +/** + * v4l2_fwnode_endpoint_alloc_parse() - parse all fwnode node properties + * @fwnode: pointer to the endpoint's fwnode handle + * @vep: pointer to the V4L2 fwnode data structure + * + * This function parses the V4L2 fwnode endpoint specific parameters from the + * firmware. There are two ways to use this function, either by letting it + * obtain the type of the bus (by setting the @vep.bus_type field to + * V4L2_MBUS_UNKNOWN) or specifying the bus type explicitly to one of the &enum + * v4l2_mbus_type types. + * + * When @vep.bus_type is V4L2_MBUS_UNKNOWN, the function will use the "bus-type" + * property to determine the type when it is available. The caller is + * responsible for validating the contents of @vep.bus_type field after the call + * returns. + * + * As a deprecated functionality to support older DT bindings without "bus-type" + * property for devices that support multiple types, if the "bus-type" property + * does not exist, the function will attempt to guess the type based on the + * endpoint properties available. NEVER RELY ON GUESSING THE BUS TYPE IN NEW + * DRIVERS OR BINDINGS. + * + * It is also possible to set @vep.bus_type corresponding to an actual bus. In + * this case the function will only attempt to parse properties related to this + * bus, and it will return an error if the value of the "bus-type" property + * corresponds to a different bus. + * + * The caller is required to initialise all fields of @vep, either with + * explicitly values, or by zeroing them. + * + * The function does not change the V4L2 fwnode endpoint state if it fails. + * + * v4l2_fwnode_endpoint_alloc_parse() has two important differences to + * v4l2_fwnode_endpoint_parse(): + * + * 1. It also parses variable size data. + * + * 2. The memory it has allocated to store the variable size data must be freed + * using v4l2_fwnode_endpoint_free() when no longer needed. + * + * Return: %0 on success or a negative error code on failure: + * %-ENOMEM on memory allocation failure + * %-EINVAL on parsing failure + * %-ENXIO on mismatching bus types + */ +int v4l2_fwnode_endpoint_alloc_parse(struct fwnode_handle *fwnode, + struct v4l2_fwnode_endpoint *vep); + +/** + * v4l2_fwnode_parse_link() - parse a link between two endpoints + * @fwnode: pointer to the endpoint's fwnode at the local end of the link + * @link: pointer to the V4L2 fwnode link data structure + * + * Fill the link structure with the local and remote nodes and port numbers. + * The local_node and remote_node fields are set to point to the local and + * remote port's parent nodes respectively (the port parent node being the + * parent node of the port node if that node isn't a 'ports' node, or the + * grand-parent node of the port node otherwise). + * + * A reference is taken to both the local and remote nodes, the caller must use + * v4l2_fwnode_put_link() to drop the references when done with the + * link. + * + * Return: 0 on success, or -ENOLINK if the remote endpoint fwnode can't be + * found. + */ +int v4l2_fwnode_parse_link(struct fwnode_handle *fwnode, + struct v4l2_fwnode_link *link); + +/** + * v4l2_fwnode_put_link() - drop references to nodes in a link + * @link: pointer to the V4L2 fwnode link data structure + * + * Drop references to the local and remote nodes in the link. This function + * must be called on every link parsed with v4l2_fwnode_parse_link(). + */ +void v4l2_fwnode_put_link(struct v4l2_fwnode_link *link); + +/** + * v4l2_fwnode_connector_free() - free the V4L2 connector acquired memory + * @connector: the V4L2 connector resources of which are to be released + * + * Free all allocated memory and put all links acquired by + * v4l2_fwnode_connector_parse() and v4l2_fwnode_connector_add_link(). + * + * It is safe to call this function with NULL argument or on a V4L2 connector + * the parsing of which failed. + */ +void v4l2_fwnode_connector_free(struct v4l2_fwnode_connector *connector); + +/** + * v4l2_fwnode_connector_parse() - initialize the 'struct v4l2_fwnode_connector' + * @fwnode: pointer to the subdev endpoint's fwnode handle where the connector + * is connected to or to the connector endpoint fwnode handle. + * @connector: pointer to the V4L2 fwnode connector data structure + * + * Fill the &struct v4l2_fwnode_connector with the connector type, label and + * all &enum v4l2_connector_type specific connector data. The label is optional + * so it is set to %NULL if no one was found. The function initialize the links + * to zero. Adding links to the connector is done by calling + * v4l2_fwnode_connector_add_link(). + * + * The memory allocated for the label must be freed when no longer needed. + * Freeing the memory is done by v4l2_fwnode_connector_free(). + * + * Return: + * * %0 on success or a negative error code on failure: + * * %-EINVAL if @fwnode is invalid + * * %-ENOTCONN if connector type is unknown or connector device can't be found + */ +int v4l2_fwnode_connector_parse(struct fwnode_handle *fwnode, + struct v4l2_fwnode_connector *connector); + +/** + * v4l2_fwnode_connector_add_link - add a link between a connector node and + * a v4l2-subdev node. + * @fwnode: pointer to the subdev endpoint's fwnode handle where the connector + * is connected to + * @connector: pointer to the V4L2 fwnode connector data structure + * + * Add a new &struct v4l2_connector_link link to the + * &struct v4l2_fwnode_connector connector links list. The link local_node + * points to the connector node, the remote_node to the host v4l2 (sub)dev. + * + * The taken references to remote_node and local_node must be dropped and the + * allocated memory must be freed when no longer needed. Both is done by calling + * v4l2_fwnode_connector_free(). + * + * Return: + * * %0 on success or a negative error code on failure: + * * %-EINVAL if @fwnode or @connector is invalid or @connector type is unknown + * * %-ENOMEM on link memory allocation failure + * * %-ENOTCONN if remote connector device can't be found + * * %-ENOLINK if link parsing between v4l2 (sub)dev and connector fails + */ +int v4l2_fwnode_connector_add_link(struct fwnode_handle *fwnode, + struct v4l2_fwnode_connector *connector); + +/** + * v4l2_fwnode_device_parse() - parse fwnode device properties + * @dev: pointer to &struct device + * @props: pointer to &struct v4l2_fwnode_device_properties where to store the + * parsed properties values + * + * This function parses and validates the V4L2 fwnode device properties from the + * firmware interface, and fills the @struct v4l2_fwnode_device_properties + * provided by the caller. + * + * Return: + * % 0 on success + * %-EINVAL if a parsed property value is not valid + */ +int v4l2_fwnode_device_parse(struct device *dev, + struct v4l2_fwnode_device_properties *props); + +/* Helper macros to access the connector links. */ + +/** v4l2_connector_last_link - Helper macro to get the first + * &struct v4l2_fwnode_connector link + * @v4l2c: &struct v4l2_fwnode_connector owning the connector links + * + * This marco returns the first added &struct v4l2_connector_link connector + * link or @NULL if the connector has no links. + */ +#define v4l2_connector_first_link(v4l2c) \ + list_first_entry_or_null(&(v4l2c)->links, \ + struct v4l2_connector_link, head) + +/** v4l2_connector_last_link - Helper macro to get the last + * &struct v4l2_fwnode_connector link + * @v4l2c: &struct v4l2_fwnode_connector owning the connector links + * + * This marco returns the last &struct v4l2_connector_link added connector link. + */ +#define v4l2_connector_last_link(v4l2c) \ + list_last_entry(&(v4l2c)->links, struct v4l2_connector_link, head) + +#endif /* _V4L2_FWNODE_H */ diff --git a/6.18.0/include-overrides/media/v4l2-h264.h b/6.18.0/include-overrides/media/v4l2-h264.h new file mode 100644 index 0000000..0d9eaa9 --- /dev/null +++ b/6.18.0/include-overrides/media/v4l2-h264.h @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Helper functions for H264 codecs. + * + * Copyright (c) 2019 Collabora, Ltd. + * + * Author: Boris Brezillon + */ + +#ifndef _MEDIA_V4L2_H264_H +#define _MEDIA_V4L2_H264_H + +#include + +/** + * struct v4l2_h264_reflist_builder - Reference list builder object + * + * @refs.top_field_order_cnt: top field order count + * @refs.bottom_field_order_cnt: bottom field order count + * @refs.frame_num: reference frame number + * @refs.longterm: set to true for a long term reference + * @refs: array of references + * @cur_pic_order_count: picture order count of the frame being decoded + * @cur_pic_fields: fields present in the frame being decoded + * @unordered_reflist: unordered list of references. Will be used to generate + * ordered P/B0/B1 lists + * @num_valid: number of valid references in the refs array + * + * This object stores the context of the P/B0/B1 reference list builder. + * This procedure is described in section '8.2.4 Decoding process for reference + * picture lists construction' of the H264 spec. + */ +struct v4l2_h264_reflist_builder { + struct { + s32 top_field_order_cnt; + s32 bottom_field_order_cnt; + int frame_num; + u16 longterm : 1; + } refs[V4L2_H264_NUM_DPB_ENTRIES]; + + s32 cur_pic_order_count; + u8 cur_pic_fields; + + struct v4l2_h264_reference unordered_reflist[V4L2_H264_REF_LIST_LEN]; + u8 num_valid; +}; + +void +v4l2_h264_init_reflist_builder(struct v4l2_h264_reflist_builder *b, + const struct v4l2_ctrl_h264_decode_params *dec_params, + const struct v4l2_ctrl_h264_sps *sps, + const struct v4l2_h264_dpb_entry dpb[V4L2_H264_NUM_DPB_ENTRIES]); + +/** + * v4l2_h264_build_b_ref_lists() - Build the B0/B1 reference lists + * + * @builder: reference list builder context + * @b0_reflist: 32 sized array used to store the B0 reference list. Each entry + * is a v4l2_h264_reference structure + * @b1_reflist: 32 sized array used to store the B1 reference list. Each entry + * is a v4l2_h264_reference structure + * + * This functions builds the B0/B1 reference lists. This procedure is described + * in section '8.2.4 Decoding process for reference picture lists construction' + * of the H264 spec. This function can be used by H264 decoder drivers that + * need to pass B0/B1 reference lists to the hardware. + */ +void +v4l2_h264_build_b_ref_lists(const struct v4l2_h264_reflist_builder *builder, + struct v4l2_h264_reference *b0_reflist, + struct v4l2_h264_reference *b1_reflist); + +/** + * v4l2_h264_build_p_ref_list() - Build the P reference list + * + * @builder: reference list builder context + * @reflist: 32 sized array used to store the P reference list. Each entry + * is a v4l2_h264_reference structure + * + * This functions builds the P reference lists. This procedure is describe in + * section '8.2.4 Decoding process for reference picture lists construction' + * of the H264 spec. This function can be used by H264 decoder drivers that + * need to pass a P reference list to the hardware. + */ +void +v4l2_h264_build_p_ref_list(const struct v4l2_h264_reflist_builder *builder, + struct v4l2_h264_reference *reflist); + +#endif /* _MEDIA_V4L2_H264_H */ diff --git a/6.18.0/include-overrides/media/v4l2-image-sizes.h b/6.18.0/include-overrides/media/v4l2-image-sizes.h new file mode 100644 index 0000000..24a7a0b --- /dev/null +++ b/6.18.0/include-overrides/media/v4l2-image-sizes.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Standard image size definitions + * + * Copyright (C) 2013, Sylwester Nawrocki + */ +#ifndef _IMAGE_SIZES_H +#define _IMAGE_SIZES_H + +#define CIF_WIDTH 352 +#define CIF_HEIGHT 288 + +#define HD_720_WIDTH 1280 +#define HD_720_HEIGHT 720 + +#define HD_1080_WIDTH 1920 +#define HD_1080_HEIGHT 1080 + +#define QCIF_WIDTH 176 +#define QCIF_HEIGHT 144 + +#define QQCIF_WIDTH 88 +#define QQCIF_HEIGHT 72 + +#define QQVGA_WIDTH 160 +#define QQVGA_HEIGHT 120 + +#define QVGA_WIDTH 320 +#define QVGA_HEIGHT 240 + +#define SVGA_WIDTH 800 +#define SVGA_HEIGHT 600 + +#define SXGA_WIDTH 1280 +#define SXGA_HEIGHT 1024 + +#define VGA_WIDTH 640 +#define VGA_HEIGHT 480 + +#define UXGA_WIDTH 1600 +#define UXGA_HEIGHT 1200 + +#define XGA_WIDTH 1024 +#define XGA_HEIGHT 768 + +#endif /* _IMAGE_SIZES_H */ diff --git a/6.18.0/include-overrides/media/v4l2-ioctl.h b/6.18.0/include-overrides/media/v4l2-ioctl.h new file mode 100644 index 0000000..6f7a583 --- /dev/null +++ b/6.18.0/include-overrides/media/v4l2-ioctl.h @@ -0,0 +1,785 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * + * V 4 L 2 D R I V E R H E L P E R A P I + * + * Moved from videodev2.h + * + * Some commonly needed functions for drivers (v4l2-common.o module) + */ +#ifndef _V4L2_IOCTL_H +#define _V4L2_IOCTL_H + +#include +#include +#include +#include +#include /* need __user */ +#include + +struct v4l2_fh; + +/** + * struct v4l2_ioctl_ops - describe operations for each V4L2 ioctl + * + * @vidioc_querycap: pointer to the function that implements + * :ref:`VIDIOC_QUERYCAP ` ioctl + * @vidioc_enum_fmt_vid_cap: pointer to the function that implements + * :ref:`VIDIOC_ENUM_FMT ` ioctl logic + * for video capture in single and multi plane mode + * @vidioc_enum_fmt_vid_overlay: pointer to the function that implements + * :ref:`VIDIOC_ENUM_FMT ` ioctl logic + * for video overlay + * @vidioc_enum_fmt_vid_out: pointer to the function that implements + * :ref:`VIDIOC_ENUM_FMT ` ioctl logic + * for video output in single and multi plane mode + * @vidioc_enum_fmt_sdr_cap: pointer to the function that implements + * :ref:`VIDIOC_ENUM_FMT ` ioctl logic + * for Software Defined Radio capture + * @vidioc_enum_fmt_sdr_out: pointer to the function that implements + * :ref:`VIDIOC_ENUM_FMT ` ioctl logic + * for Software Defined Radio output + * @vidioc_enum_fmt_meta_cap: pointer to the function that implements + * :ref:`VIDIOC_ENUM_FMT ` ioctl logic + * for metadata capture + * @vidioc_enum_fmt_meta_out: pointer to the function that implements + * :ref:`VIDIOC_ENUM_FMT ` ioctl logic + * for metadata output + * @vidioc_g_fmt_vid_cap: pointer to the function that implements + * :ref:`VIDIOC_G_FMT ` ioctl logic for video capture + * in single plane mode + * @vidioc_g_fmt_vid_overlay: pointer to the function that implements + * :ref:`VIDIOC_G_FMT ` ioctl logic for video overlay + * @vidioc_g_fmt_vid_out: pointer to the function that implements + * :ref:`VIDIOC_G_FMT ` ioctl logic for video out + * in single plane mode + * @vidioc_g_fmt_vid_out_overlay: pointer to the function that implements + * :ref:`VIDIOC_G_FMT ` ioctl logic for video overlay output + * @vidioc_g_fmt_vbi_cap: pointer to the function that implements + * :ref:`VIDIOC_G_FMT ` ioctl logic for raw VBI capture + * @vidioc_g_fmt_vbi_out: pointer to the function that implements + * :ref:`VIDIOC_G_FMT ` ioctl logic for raw VBI output + * @vidioc_g_fmt_sliced_vbi_cap: pointer to the function that implements + * :ref:`VIDIOC_G_FMT ` ioctl logic for sliced VBI capture + * @vidioc_g_fmt_sliced_vbi_out: pointer to the function that implements + * :ref:`VIDIOC_G_FMT ` ioctl logic for sliced VBI output + * @vidioc_g_fmt_vid_cap_mplane: pointer to the function that implements + * :ref:`VIDIOC_G_FMT ` ioctl logic for video capture + * in multiple plane mode + * @vidioc_g_fmt_vid_out_mplane: pointer to the function that implements + * :ref:`VIDIOC_G_FMT ` ioctl logic for video out + * in multiplane plane mode + * @vidioc_g_fmt_sdr_cap: pointer to the function that implements + * :ref:`VIDIOC_G_FMT ` ioctl logic for Software Defined + * Radio capture + * @vidioc_g_fmt_sdr_out: pointer to the function that implements + * :ref:`VIDIOC_G_FMT ` ioctl logic for Software Defined + * Radio output + * @vidioc_g_fmt_meta_cap: pointer to the function that implements + * :ref:`VIDIOC_G_FMT ` ioctl logic for metadata capture + * @vidioc_g_fmt_meta_out: pointer to the function that implements + * :ref:`VIDIOC_G_FMT ` ioctl logic for metadata output + * @vidioc_s_fmt_vid_cap: pointer to the function that implements + * :ref:`VIDIOC_S_FMT ` ioctl logic for video capture + * in single plane mode + * @vidioc_s_fmt_vid_overlay: pointer to the function that implements + * :ref:`VIDIOC_S_FMT ` ioctl logic for video overlay + * @vidioc_s_fmt_vid_out: pointer to the function that implements + * :ref:`VIDIOC_S_FMT ` ioctl logic for video out + * in single plane mode + * @vidioc_s_fmt_vid_out_overlay: pointer to the function that implements + * :ref:`VIDIOC_S_FMT ` ioctl logic for video overlay output + * @vidioc_s_fmt_vbi_cap: pointer to the function that implements + * :ref:`VIDIOC_S_FMT ` ioctl logic for raw VBI capture + * @vidioc_s_fmt_vbi_out: pointer to the function that implements + * :ref:`VIDIOC_S_FMT ` ioctl logic for raw VBI output + * @vidioc_s_fmt_sliced_vbi_cap: pointer to the function that implements + * :ref:`VIDIOC_S_FMT ` ioctl logic for sliced VBI capture + * @vidioc_s_fmt_sliced_vbi_out: pointer to the function that implements + * :ref:`VIDIOC_S_FMT ` ioctl logic for sliced VBI output + * @vidioc_s_fmt_vid_cap_mplane: pointer to the function that implements + * :ref:`VIDIOC_S_FMT ` ioctl logic for video capture + * in multiple plane mode + * @vidioc_s_fmt_vid_out_mplane: pointer to the function that implements + * :ref:`VIDIOC_S_FMT ` ioctl logic for video out + * in multiplane plane mode + * @vidioc_s_fmt_sdr_cap: pointer to the function that implements + * :ref:`VIDIOC_S_FMT ` ioctl logic for Software Defined + * Radio capture + * @vidioc_s_fmt_sdr_out: pointer to the function that implements + * :ref:`VIDIOC_S_FMT ` ioctl logic for Software Defined + * Radio output + * @vidioc_s_fmt_meta_cap: pointer to the function that implements + * :ref:`VIDIOC_S_FMT ` ioctl logic for metadata capture + * @vidioc_s_fmt_meta_out: pointer to the function that implements + * :ref:`VIDIOC_S_FMT ` ioctl logic for metadata output + * @vidioc_try_fmt_vid_cap: pointer to the function that implements + * :ref:`VIDIOC_TRY_FMT ` ioctl logic for video capture + * in single plane mode + * @vidioc_try_fmt_vid_overlay: pointer to the function that implements + * :ref:`VIDIOC_TRY_FMT ` ioctl logic for video overlay + * @vidioc_try_fmt_vid_out: pointer to the function that implements + * :ref:`VIDIOC_TRY_FMT ` ioctl logic for video out + * in single plane mode + * @vidioc_try_fmt_vid_out_overlay: pointer to the function that implements + * :ref:`VIDIOC_TRY_FMT ` ioctl logic for video overlay + * output + * @vidioc_try_fmt_vbi_cap: pointer to the function that implements + * :ref:`VIDIOC_TRY_FMT ` ioctl logic for raw VBI capture + * @vidioc_try_fmt_vbi_out: pointer to the function that implements + * :ref:`VIDIOC_TRY_FMT ` ioctl logic for raw VBI output + * @vidioc_try_fmt_sliced_vbi_cap: pointer to the function that implements + * :ref:`VIDIOC_TRY_FMT ` ioctl logic for sliced VBI + * capture + * @vidioc_try_fmt_sliced_vbi_out: pointer to the function that implements + * :ref:`VIDIOC_TRY_FMT ` ioctl logic for sliced VBI output + * @vidioc_try_fmt_vid_cap_mplane: pointer to the function that implements + * :ref:`VIDIOC_TRY_FMT ` ioctl logic for video capture + * in multiple plane mode + * @vidioc_try_fmt_vid_out_mplane: pointer to the function that implements + * :ref:`VIDIOC_TRY_FMT ` ioctl logic for video out + * in multiplane plane mode + * @vidioc_try_fmt_sdr_cap: pointer to the function that implements + * :ref:`VIDIOC_TRY_FMT ` ioctl logic for Software Defined + * Radio capture + * @vidioc_try_fmt_sdr_out: pointer to the function that implements + * :ref:`VIDIOC_TRY_FMT ` ioctl logic for Software Defined + * Radio output + * @vidioc_try_fmt_meta_cap: pointer to the function that implements + * :ref:`VIDIOC_TRY_FMT ` ioctl logic for metadata capture + * @vidioc_try_fmt_meta_out: pointer to the function that implements + * :ref:`VIDIOC_TRY_FMT ` ioctl logic for metadata output + * @vidioc_reqbufs: pointer to the function that implements + * :ref:`VIDIOC_REQBUFS ` ioctl + * @vidioc_querybuf: pointer to the function that implements + * :ref:`VIDIOC_QUERYBUF ` ioctl + * @vidioc_qbuf: pointer to the function that implements + * :ref:`VIDIOC_QBUF ` ioctl + * @vidioc_expbuf: pointer to the function that implements + * :ref:`VIDIOC_EXPBUF ` ioctl + * @vidioc_dqbuf: pointer to the function that implements + * :ref:`VIDIOC_DQBUF ` ioctl + * @vidioc_create_bufs: pointer to the function that implements + * :ref:`VIDIOC_CREATE_BUFS ` ioctl + * @vidioc_prepare_buf: pointer to the function that implements + * :ref:`VIDIOC_PREPARE_BUF ` ioctl + * @vidioc_remove_bufs: pointer to the function that implements + * :ref:`VIDIOC_REMOVE_BUFS ` ioctl + * @vidioc_overlay: pointer to the function that implements + * :ref:`VIDIOC_OVERLAY ` ioctl + * @vidioc_g_fbuf: pointer to the function that implements + * :ref:`VIDIOC_G_FBUF ` ioctl + * @vidioc_s_fbuf: pointer to the function that implements + * :ref:`VIDIOC_S_FBUF ` ioctl + * @vidioc_streamon: pointer to the function that implements + * :ref:`VIDIOC_STREAMON ` ioctl + * @vidioc_streamoff: pointer to the function that implements + * :ref:`VIDIOC_STREAMOFF ` ioctl + * @vidioc_g_std: pointer to the function that implements + * :ref:`VIDIOC_G_STD ` ioctl + * @vidioc_s_std: pointer to the function that implements + * :ref:`VIDIOC_S_STD ` ioctl + * @vidioc_querystd: pointer to the function that implements + * :ref:`VIDIOC_QUERYSTD ` ioctl + * @vidioc_enum_input: pointer to the function that implements + * :ref:`VIDIOC_ENUM_INPUT ` ioctl + * @vidioc_g_input: pointer to the function that implements + * :ref:`VIDIOC_G_INPUT ` ioctl + * @vidioc_s_input: pointer to the function that implements + * :ref:`VIDIOC_S_INPUT ` ioctl + * @vidioc_enum_output: pointer to the function that implements + * :ref:`VIDIOC_ENUM_OUTPUT ` ioctl + * @vidioc_g_output: pointer to the function that implements + * :ref:`VIDIOC_G_OUTPUT ` ioctl + * @vidioc_s_output: pointer to the function that implements + * :ref:`VIDIOC_S_OUTPUT ` ioctl + * @vidioc_query_ext_ctrl: pointer to the function that implements + * :ref:`VIDIOC_QUERY_EXT_CTRL ` ioctl + * @vidioc_g_ext_ctrls: pointer to the function that implements + * :ref:`VIDIOC_G_EXT_CTRLS ` ioctl + * @vidioc_s_ext_ctrls: pointer to the function that implements + * :ref:`VIDIOC_S_EXT_CTRLS ` ioctl + * @vidioc_try_ext_ctrls: pointer to the function that implements + * :ref:`VIDIOC_TRY_EXT_CTRLS ` ioctl + * @vidioc_querymenu: pointer to the function that implements + * :ref:`VIDIOC_QUERYMENU ` ioctl + * @vidioc_enumaudio: pointer to the function that implements + * :ref:`VIDIOC_ENUMAUDIO ` ioctl + * @vidioc_g_audio: pointer to the function that implements + * :ref:`VIDIOC_G_AUDIO ` ioctl + * @vidioc_s_audio: pointer to the function that implements + * :ref:`VIDIOC_S_AUDIO ` ioctl + * @vidioc_enumaudout: pointer to the function that implements + * :ref:`VIDIOC_ENUMAUDOUT ` ioctl + * @vidioc_g_audout: pointer to the function that implements + * :ref:`VIDIOC_G_AUDOUT ` ioctl + * @vidioc_s_audout: pointer to the function that implements + * :ref:`VIDIOC_S_AUDOUT ` ioctl + * @vidioc_g_modulator: pointer to the function that implements + * :ref:`VIDIOC_G_MODULATOR ` ioctl + * @vidioc_s_modulator: pointer to the function that implements + * :ref:`VIDIOC_S_MODULATOR ` ioctl + * @vidioc_g_pixelaspect: pointer to the function that implements + * the pixelaspect part of the :ref:`VIDIOC_CROPCAP ` ioctl + * @vidioc_g_selection: pointer to the function that implements + * :ref:`VIDIOC_G_SELECTION ` ioctl + * @vidioc_s_selection: pointer to the function that implements + * :ref:`VIDIOC_S_SELECTION ` ioctl + * @vidioc_g_jpegcomp: pointer to the function that implements + * :ref:`VIDIOC_G_JPEGCOMP ` ioctl + * @vidioc_s_jpegcomp: pointer to the function that implements + * :ref:`VIDIOC_S_JPEGCOMP ` ioctl + * @vidioc_g_enc_index: pointer to the function that implements + * :ref:`VIDIOC_G_ENC_INDEX ` ioctl + * @vidioc_encoder_cmd: pointer to the function that implements + * :ref:`VIDIOC_ENCODER_CMD ` ioctl + * @vidioc_try_encoder_cmd: pointer to the function that implements + * :ref:`VIDIOC_TRY_ENCODER_CMD ` ioctl + * @vidioc_decoder_cmd: pointer to the function that implements + * :ref:`VIDIOC_DECODER_CMD ` ioctl + * @vidioc_try_decoder_cmd: pointer to the function that implements + * :ref:`VIDIOC_TRY_DECODER_CMD ` ioctl + * @vidioc_g_parm: pointer to the function that implements + * :ref:`VIDIOC_G_PARM ` ioctl + * @vidioc_s_parm: pointer to the function that implements + * :ref:`VIDIOC_S_PARM ` ioctl + * @vidioc_g_tuner: pointer to the function that implements + * :ref:`VIDIOC_G_TUNER ` ioctl + * @vidioc_s_tuner: pointer to the function that implements + * :ref:`VIDIOC_S_TUNER ` ioctl + * @vidioc_g_frequency: pointer to the function that implements + * :ref:`VIDIOC_G_FREQUENCY ` ioctl + * @vidioc_s_frequency: pointer to the function that implements + * :ref:`VIDIOC_S_FREQUENCY ` ioctl + * @vidioc_enum_freq_bands: pointer to the function that implements + * :ref:`VIDIOC_ENUM_FREQ_BANDS ` ioctl + * @vidioc_g_sliced_vbi_cap: pointer to the function that implements + * :ref:`VIDIOC_G_SLICED_VBI_CAP ` ioctl + * @vidioc_log_status: pointer to the function that implements + * :ref:`VIDIOC_LOG_STATUS ` ioctl + * @vidioc_s_hw_freq_seek: pointer to the function that implements + * :ref:`VIDIOC_S_HW_FREQ_SEEK ` ioctl + * @vidioc_g_register: pointer to the function that implements + * :ref:`VIDIOC_DBG_G_REGISTER ` ioctl + * @vidioc_s_register: pointer to the function that implements + * :ref:`VIDIOC_DBG_S_REGISTER ` ioctl + * @vidioc_g_chip_info: pointer to the function that implements + * :ref:`VIDIOC_DBG_G_CHIP_INFO ` ioctl + * @vidioc_enum_framesizes: pointer to the function that implements + * :ref:`VIDIOC_ENUM_FRAMESIZES ` ioctl + * @vidioc_enum_frameintervals: pointer to the function that implements + * :ref:`VIDIOC_ENUM_FRAMEINTERVALS ` ioctl + * @vidioc_s_dv_timings: pointer to the function that implements + * :ref:`VIDIOC_S_DV_TIMINGS ` ioctl + * @vidioc_g_dv_timings: pointer to the function that implements + * :ref:`VIDIOC_G_DV_TIMINGS ` ioctl + * @vidioc_query_dv_timings: pointer to the function that implements + * :ref:`VIDIOC_QUERY_DV_TIMINGS ` ioctl + * @vidioc_enum_dv_timings: pointer to the function that implements + * :ref:`VIDIOC_ENUM_DV_TIMINGS ` ioctl + * @vidioc_dv_timings_cap: pointer to the function that implements + * :ref:`VIDIOC_DV_TIMINGS_CAP ` ioctl + * @vidioc_g_edid: pointer to the function that implements + * :ref:`VIDIOC_G_EDID ` ioctl + * @vidioc_s_edid: pointer to the function that implements + * :ref:`VIDIOC_S_EDID ` ioctl + * @vidioc_subscribe_event: pointer to the function that implements + * :ref:`VIDIOC_SUBSCRIBE_EVENT ` ioctl + * @vidioc_unsubscribe_event: pointer to the function that implements + * :ref:`VIDIOC_UNSUBSCRIBE_EVENT ` ioctl + * @vidioc_default: pointed used to allow other ioctls + */ +struct v4l2_ioctl_ops { + /* ioctl callbacks */ + + /* VIDIOC_QUERYCAP handler */ + int (*vidioc_querycap)(struct file *file, void *priv, + struct v4l2_capability *cap); + + /* VIDIOC_ENUM_FMT handlers */ + int (*vidioc_enum_fmt_vid_cap)(struct file *file, void *priv, + struct v4l2_fmtdesc *f); + int (*vidioc_enum_fmt_vid_overlay)(struct file *file, void *priv, + struct v4l2_fmtdesc *f); + int (*vidioc_enum_fmt_vid_out)(struct file *file, void *priv, + struct v4l2_fmtdesc *f); + int (*vidioc_enum_fmt_sdr_cap)(struct file *file, void *priv, + struct v4l2_fmtdesc *f); + int (*vidioc_enum_fmt_sdr_out)(struct file *file, void *priv, + struct v4l2_fmtdesc *f); + int (*vidioc_enum_fmt_meta_cap)(struct file *file, void *priv, + struct v4l2_fmtdesc *f); + int (*vidioc_enum_fmt_meta_out)(struct file *file, void *priv, + struct v4l2_fmtdesc *f); + + /* VIDIOC_G_FMT handlers */ + int (*vidioc_g_fmt_vid_cap)(struct file *file, void *priv, + struct v4l2_format *f); + int (*vidioc_g_fmt_vid_overlay)(struct file *file, void *priv, + struct v4l2_format *f); + int (*vidioc_g_fmt_vid_out)(struct file *file, void *priv, + struct v4l2_format *f); + int (*vidioc_g_fmt_vid_out_overlay)(struct file *file, void *priv, + struct v4l2_format *f); + int (*vidioc_g_fmt_vbi_cap)(struct file *file, void *priv, + struct v4l2_format *f); + int (*vidioc_g_fmt_vbi_out)(struct file *file, void *priv, + struct v4l2_format *f); + int (*vidioc_g_fmt_sliced_vbi_cap)(struct file *file, void *priv, + struct v4l2_format *f); + int (*vidioc_g_fmt_sliced_vbi_out)(struct file *file, void *priv, + struct v4l2_format *f); + int (*vidioc_g_fmt_vid_cap_mplane)(struct file *file, void *priv, + struct v4l2_format *f); + int (*vidioc_g_fmt_vid_out_mplane)(struct file *file, void *priv, + struct v4l2_format *f); + int (*vidioc_g_fmt_sdr_cap)(struct file *file, void *priv, + struct v4l2_format *f); + int (*vidioc_g_fmt_sdr_out)(struct file *file, void *priv, + struct v4l2_format *f); + int (*vidioc_g_fmt_meta_cap)(struct file *file, void *priv, + struct v4l2_format *f); + int (*vidioc_g_fmt_meta_out)(struct file *file, void *priv, + struct v4l2_format *f); + + /* VIDIOC_S_FMT handlers */ + int (*vidioc_s_fmt_vid_cap)(struct file *file, void *priv, + struct v4l2_format *f); + int (*vidioc_s_fmt_vid_overlay)(struct file *file, void *priv, + struct v4l2_format *f); + int (*vidioc_s_fmt_vid_out)(struct file *file, void *priv, + struct v4l2_format *f); + int (*vidioc_s_fmt_vid_out_overlay)(struct file *file, void *priv, + struct v4l2_format *f); + int (*vidioc_s_fmt_vbi_cap)(struct file *file, void *priv, + struct v4l2_format *f); + int (*vidioc_s_fmt_vbi_out)(struct file *file, void *priv, + struct v4l2_format *f); + int (*vidioc_s_fmt_sliced_vbi_cap)(struct file *file, void *priv, + struct v4l2_format *f); + int (*vidioc_s_fmt_sliced_vbi_out)(struct file *file, void *priv, + struct v4l2_format *f); + int (*vidioc_s_fmt_vid_cap_mplane)(struct file *file, void *priv, + struct v4l2_format *f); + int (*vidioc_s_fmt_vid_out_mplane)(struct file *file, void *priv, + struct v4l2_format *f); + int (*vidioc_s_fmt_sdr_cap)(struct file *file, void *priv, + struct v4l2_format *f); + int (*vidioc_s_fmt_sdr_out)(struct file *file, void *priv, + struct v4l2_format *f); + int (*vidioc_s_fmt_meta_cap)(struct file *file, void *priv, + struct v4l2_format *f); + int (*vidioc_s_fmt_meta_out)(struct file *file, void *priv, + struct v4l2_format *f); + + /* VIDIOC_TRY_FMT handlers */ + int (*vidioc_try_fmt_vid_cap)(struct file *file, void *priv, + struct v4l2_format *f); + int (*vidioc_try_fmt_vid_overlay)(struct file *file, void *priv, + struct v4l2_format *f); + int (*vidioc_try_fmt_vid_out)(struct file *file, void *priv, + struct v4l2_format *f); + int (*vidioc_try_fmt_vid_out_overlay)(struct file *file, void *priv, + struct v4l2_format *f); + int (*vidioc_try_fmt_vbi_cap)(struct file *file, void *priv, + struct v4l2_format *f); + int (*vidioc_try_fmt_vbi_out)(struct file *file, void *priv, + struct v4l2_format *f); + int (*vidioc_try_fmt_sliced_vbi_cap)(struct file *file, void *priv, + struct v4l2_format *f); + int (*vidioc_try_fmt_sliced_vbi_out)(struct file *file, void *priv, + struct v4l2_format *f); + int (*vidioc_try_fmt_vid_cap_mplane)(struct file *file, void *priv, + struct v4l2_format *f); + int (*vidioc_try_fmt_vid_out_mplane)(struct file *file, void *priv, + struct v4l2_format *f); + int (*vidioc_try_fmt_sdr_cap)(struct file *file, void *priv, + struct v4l2_format *f); + int (*vidioc_try_fmt_sdr_out)(struct file *file, void *priv, + struct v4l2_format *f); + int (*vidioc_try_fmt_meta_cap)(struct file *file, void *priv, + struct v4l2_format *f); + int (*vidioc_try_fmt_meta_out)(struct file *file, void *priv, + struct v4l2_format *f); + + /* Buffer handlers */ + int (*vidioc_reqbufs)(struct file *file, void *priv, + struct v4l2_requestbuffers *b); + int (*vidioc_querybuf)(struct file *file, void *priv, + struct v4l2_buffer *b); + int (*vidioc_qbuf)(struct file *file, void *priv, + struct v4l2_buffer *b); + int (*vidioc_expbuf)(struct file *file, void *priv, + struct v4l2_exportbuffer *e); + int (*vidioc_dqbuf)(struct file *file, void *priv, + struct v4l2_buffer *b); + + int (*vidioc_create_bufs)(struct file *file, void *priv, + struct v4l2_create_buffers *b); + int (*vidioc_prepare_buf)(struct file *file, void *priv, + struct v4l2_buffer *b); + int (*vidioc_remove_bufs)(struct file *file, void *priv, + struct v4l2_remove_buffers *d); + + int (*vidioc_overlay)(struct file *file, void *priv, unsigned int i); + int (*vidioc_g_fbuf)(struct file *file, void *priv, + struct v4l2_framebuffer *a); + int (*vidioc_s_fbuf)(struct file *file, void *priv, + const struct v4l2_framebuffer *a); + + /* Stream on/off */ + int (*vidioc_streamon)(struct file *file, void *priv, + enum v4l2_buf_type i); + int (*vidioc_streamoff)(struct file *file, void *priv, + enum v4l2_buf_type i); + + /* + * Standard handling + * + * Note: ENUMSTD is handled by videodev.c + */ + int (*vidioc_g_std)(struct file *file, void *priv, v4l2_std_id *norm); + int (*vidioc_s_std)(struct file *file, void *priv, v4l2_std_id norm); + int (*vidioc_querystd)(struct file *file, void *priv, v4l2_std_id *a); + + /* Input handling */ + int (*vidioc_enum_input)(struct file *file, void *priv, + struct v4l2_input *inp); + int (*vidioc_g_input)(struct file *file, void *priv, unsigned int *i); + int (*vidioc_s_input)(struct file *file, void *priv, unsigned int i); + + /* Output handling */ + int (*vidioc_enum_output)(struct file *file, void *priv, + struct v4l2_output *a); + int (*vidioc_g_output)(struct file *file, void *priv, unsigned int *i); + int (*vidioc_s_output)(struct file *file, void *priv, unsigned int i); + + /* Control handling */ + int (*vidioc_query_ext_ctrl)(struct file *file, void *priv, + struct v4l2_query_ext_ctrl *a); + int (*vidioc_g_ext_ctrls)(struct file *file, void *priv, + struct v4l2_ext_controls *a); + int (*vidioc_s_ext_ctrls)(struct file *file, void *priv, + struct v4l2_ext_controls *a); + int (*vidioc_try_ext_ctrls)(struct file *file, void *priv, + struct v4l2_ext_controls *a); + int (*vidioc_querymenu)(struct file *file, void *priv, + struct v4l2_querymenu *a); + + /* Audio ioctls */ + int (*vidioc_enumaudio)(struct file *file, void *priv, + struct v4l2_audio *a); + int (*vidioc_g_audio)(struct file *file, void *priv, + struct v4l2_audio *a); + int (*vidioc_s_audio)(struct file *file, void *priv, + const struct v4l2_audio *a); + + /* Audio out ioctls */ + int (*vidioc_enumaudout)(struct file *file, void *priv, + struct v4l2_audioout *a); + int (*vidioc_g_audout)(struct file *file, void *priv, + struct v4l2_audioout *a); + int (*vidioc_s_audout)(struct file *file, void *priv, + const struct v4l2_audioout *a); + int (*vidioc_g_modulator)(struct file *file, void *priv, + struct v4l2_modulator *a); + int (*vidioc_s_modulator)(struct file *file, void *priv, + const struct v4l2_modulator *a); + /* Crop ioctls */ + int (*vidioc_g_pixelaspect)(struct file *file, void *priv, + int buf_type, struct v4l2_fract *aspect); + int (*vidioc_g_selection)(struct file *file, void *priv, + struct v4l2_selection *s); + int (*vidioc_s_selection)(struct file *file, void *priv, + struct v4l2_selection *s); + /* Compression ioctls */ + int (*vidioc_g_jpegcomp)(struct file *file, void *priv, + struct v4l2_jpegcompression *a); + int (*vidioc_s_jpegcomp)(struct file *file, void *priv, + const struct v4l2_jpegcompression *a); + int (*vidioc_g_enc_index)(struct file *file, void *priv, + struct v4l2_enc_idx *a); + int (*vidioc_encoder_cmd)(struct file *file, void *priv, + struct v4l2_encoder_cmd *a); + int (*vidioc_try_encoder_cmd)(struct file *file, void *priv, + struct v4l2_encoder_cmd *a); + int (*vidioc_decoder_cmd)(struct file *file, void *priv, + struct v4l2_decoder_cmd *a); + int (*vidioc_try_decoder_cmd)(struct file *file, void *priv, + struct v4l2_decoder_cmd *a); + + /* Stream type-dependent parameter ioctls */ + int (*vidioc_g_parm)(struct file *file, void *priv, + struct v4l2_streamparm *a); + int (*vidioc_s_parm)(struct file *file, void *priv, + struct v4l2_streamparm *a); + + /* Tuner ioctls */ + int (*vidioc_g_tuner)(struct file *file, void *priv, + struct v4l2_tuner *a); + int (*vidioc_s_tuner)(struct file *file, void *priv, + const struct v4l2_tuner *a); + int (*vidioc_g_frequency)(struct file *file, void *priv, + struct v4l2_frequency *a); + int (*vidioc_s_frequency)(struct file *file, void *priv, + const struct v4l2_frequency *a); + int (*vidioc_enum_freq_bands)(struct file *file, void *priv, + struct v4l2_frequency_band *band); + + /* Sliced VBI cap */ + int (*vidioc_g_sliced_vbi_cap)(struct file *file, void *priv, + struct v4l2_sliced_vbi_cap *a); + + /* Log status ioctl */ + int (*vidioc_log_status)(struct file *file, void *priv); + + int (*vidioc_s_hw_freq_seek)(struct file *file, void *priv, + const struct v4l2_hw_freq_seek *a); + + /* Debugging ioctls */ +#ifdef CONFIG_VIDEO_ADV_DEBUG + int (*vidioc_g_register)(struct file *file, void *priv, + struct v4l2_dbg_register *reg); + int (*vidioc_s_register)(struct file *file, void *priv, + const struct v4l2_dbg_register *reg); + + int (*vidioc_g_chip_info)(struct file *file, void *priv, + struct v4l2_dbg_chip_info *chip); +#endif + + int (*vidioc_enum_framesizes)(struct file *file, void *priv, + struct v4l2_frmsizeenum *fsize); + + int (*vidioc_enum_frameintervals)(struct file *file, void *priv, + struct v4l2_frmivalenum *fival); + + /* DV Timings IOCTLs */ + int (*vidioc_s_dv_timings)(struct file *file, void *priv, + struct v4l2_dv_timings *timings); + int (*vidioc_g_dv_timings)(struct file *file, void *priv, + struct v4l2_dv_timings *timings); + int (*vidioc_query_dv_timings)(struct file *file, void *priv, + struct v4l2_dv_timings *timings); + int (*vidioc_enum_dv_timings)(struct file *file, void *priv, + struct v4l2_enum_dv_timings *timings); + int (*vidioc_dv_timings_cap)(struct file *file, void *priv, + struct v4l2_dv_timings_cap *cap); + int (*vidioc_g_edid)(struct file *file, void *priv, + struct v4l2_edid *edid); + int (*vidioc_s_edid)(struct file *file, void *priv, + struct v4l2_edid *edid); + + int (*vidioc_subscribe_event)(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub); + int (*vidioc_unsubscribe_event)(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub); + + /* For other private ioctls */ + long (*vidioc_default)(struct file *file, void *priv, + bool valid_prio, unsigned int cmd, void *arg); +}; + + +/* v4l debugging and diagnostics */ + +/* Device debug flags to be used with the video device debug attribute */ + +/* Just log the ioctl name + error code */ +#define V4L2_DEV_DEBUG_IOCTL 0x01 +/* Log the ioctl name arguments + error code */ +#define V4L2_DEV_DEBUG_IOCTL_ARG 0x02 +/* Log the file operations open, release, mmap and get_unmapped_area */ +#define V4L2_DEV_DEBUG_FOP 0x04 +/* Log the read and write file operations and the VIDIOC_(D)QBUF ioctls */ +#define V4L2_DEV_DEBUG_STREAMING 0x08 +/* Log poll() */ +#define V4L2_DEV_DEBUG_POLL 0x10 +/* Log controls */ +#define V4L2_DEV_DEBUG_CTRL 0x20 + +/* Video standard functions */ + +/** + * v4l2_norm_to_name - Ancillary routine to analog TV standard name from its ID. + * + * @id: analog TV standard ID. + * + * Return: returns a string with the name of the analog TV standard. + * If the standard is not found or if @id points to multiple standard, + * it returns "Unknown". + */ +const char *v4l2_norm_to_name(v4l2_std_id id); + +/** + * v4l2_video_std_frame_period - Ancillary routine that fills a + * struct &v4l2_fract pointer with the default framerate fraction. + * + * @id: analog TV standard ID. + * @frameperiod: struct &v4l2_fract pointer to be filled + * + */ +void v4l2_video_std_frame_period(int id, struct v4l2_fract *frameperiod); + +/** + * v4l2_video_std_construct - Ancillary routine that fills in the fields of + * a &v4l2_standard structure according to the @id parameter. + * + * @vs: struct &v4l2_standard pointer to be filled + * @id: analog TV standard ID. + * @name: name of the standard to be used + * + * .. note:: + * + * This ancillary routine is obsolete. Shouldn't be used on newer drivers. + */ +int v4l2_video_std_construct(struct v4l2_standard *vs, + int id, const char *name); + +/** + * v4l_video_std_enumstd - Ancillary routine that fills in the fields of + * a &v4l2_standard structure according to the @id and @vs->index + * parameters. + * + * @vs: struct &v4l2_standard pointer to be filled. + * @id: analog TV standard ID. + * + */ +int v4l_video_std_enumstd(struct v4l2_standard *vs, v4l2_std_id id); + +/** + * v4l_printk_ioctl - Ancillary routine that prints the ioctl in a + * human-readable format. + * + * @prefix: prefix to be added at the ioctl prints. + * @cmd: ioctl name + * + * .. note:: + * + * If prefix != %NULL, then it will issue a + * ``printk(KERN_DEBUG "%s: ", prefix)`` first. + */ +void v4l_printk_ioctl(const char *prefix, unsigned int cmd); + +struct video_device; + +/* names for fancy debug output */ +extern const char *v4l2_field_names[]; +extern const char *v4l2_type_names[]; + +#ifdef CONFIG_COMPAT +/** + * v4l2_compat_ioctl32 -32 Bits compatibility layer for 64 bits processors + * + * @file: Pointer to struct &file. + * @cmd: Ioctl name. + * @arg: Ioctl argument. + */ +long int v4l2_compat_ioctl32(struct file *file, unsigned int cmd, + unsigned long arg); +#endif + +unsigned int v4l2_compat_translate_cmd(unsigned int cmd); +unsigned int v4l2_translate_cmd(unsigned int cmd); +int v4l2_compat_get_user(void __user *arg, void *parg, unsigned int cmd); +int v4l2_compat_put_user(void __user *arg, void *parg, unsigned int cmd); +int v4l2_compat_get_array_args(struct file *file, void *mbuf, + void __user *user_ptr, size_t array_size, + unsigned int cmd, void *arg); +int v4l2_compat_put_array_args(struct file *file, void __user *user_ptr, + void *mbuf, size_t array_size, + unsigned int cmd, void *arg); + +/** + * typedef v4l2_kioctl - Typedef used to pass an ioctl handler. + * + * @file: Pointer to struct &file. + * @cmd: Ioctl name. + * @arg: Ioctl argument. + */ +typedef long (*v4l2_kioctl)(struct file *file, unsigned int cmd, void *arg); + +/** + * video_usercopy - copies data from/to userspace memory when an ioctl is + * issued. + * + * @file: Pointer to struct &file. + * @cmd: Ioctl name. + * @arg: Ioctl argument. + * @func: function that will handle the ioctl + * + * .. note:: + * + * This routine should be used only inside the V4L2 core. + */ +long int video_usercopy(struct file *file, unsigned int cmd, + unsigned long int arg, v4l2_kioctl func); + +/** + * video_ioctl2 - Handles a V4L2 ioctl. + * + * @file: Pointer to struct &file. + * @cmd: Ioctl name. + * @arg: Ioctl argument. + * + * Method used to hancle an ioctl. Should be used to fill the + * &v4l2_ioctl_ops.unlocked_ioctl on all V4L2 drivers. + */ +long int video_ioctl2(struct file *file, + unsigned int cmd, unsigned long int arg); + +/* + * The user space interpretation of the 'v4l2_event' differs + * based on the 'time_t' definition on 32-bit architectures, so + * the kernel has to handle both. + * This is the old version for 32-bit architectures. + */ +struct v4l2_event_time32 { + __u32 type; + union { + struct v4l2_event_vsync vsync; + struct v4l2_event_ctrl ctrl; + struct v4l2_event_frame_sync frame_sync; + struct v4l2_event_src_change src_change; + struct v4l2_event_motion_det motion_det; + __u8 data[64]; + } u; + __u32 pending; + __u32 sequence; + struct old_timespec32 timestamp; + __u32 id; + __u32 reserved[8]; +}; + +#define VIDIOC_DQEVENT_TIME32 _IOR('V', 89, struct v4l2_event_time32) + +struct v4l2_buffer_time32 { + __u32 index; + __u32 type; + __u32 bytesused; + __u32 flags; + __u32 field; + struct old_timeval32 timestamp; + struct v4l2_timecode timecode; + __u32 sequence; + + /* memory location */ + __u32 memory; + union { + __u32 offset; + unsigned long userptr; + struct v4l2_plane *planes; + __s32 fd; + } m; + __u32 length; + __u32 reserved2; + union { + __s32 request_fd; + __u32 reserved; + }; +}; +#define VIDIOC_QUERYBUF_TIME32 _IOWR('V', 9, struct v4l2_buffer_time32) +#define VIDIOC_QBUF_TIME32 _IOWR('V', 15, struct v4l2_buffer_time32) +#define VIDIOC_DQBUF_TIME32 _IOWR('V', 17, struct v4l2_buffer_time32) +#define VIDIOC_PREPARE_BUF_TIME32 _IOWR('V', 93, struct v4l2_buffer_time32) + +#endif /* _V4L2_IOCTL_H */ diff --git a/6.18.0/include-overrides/media/v4l2-jpeg.h b/6.18.0/include-overrides/media/v4l2-jpeg.h new file mode 100644 index 0000000..62dda15 --- /dev/null +++ b/6.18.0/include-overrides/media/v4l2-jpeg.h @@ -0,0 +1,180 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * V4L2 JPEG helpers header + * + * Copyright (C) 2019 Pengutronix, Philipp Zabel + * + * For reference, see JPEG ITU-T.81 (ISO/IEC 10918-1) + */ + +#ifndef _V4L2_JPEG_H +#define _V4L2_JPEG_H + +#include + +#define V4L2_JPEG_MAX_COMPONENTS 4 +#define V4L2_JPEG_MAX_TABLES 4 +/* + * Prefixes used to generate huffman table class and destination identifiers as + * described below: + * + * V4L2_JPEG_LUM_HT | V4L2_JPEG_DC_HT : Prefix for Luma DC coefficients + * huffman table + * V4L2_JPEG_LUM_HT | V4L2_JPEG_AC_HT : Prefix for Luma AC coefficients + * huffman table + * V4L2_JPEG_CHR_HT | V4L2_JPEG_DC_HT : Prefix for Chroma DC coefficients + * huffman table + * V4L2_JPEG_CHR_HT | V4L2_JPEG_AC_HT : Prefix for Chroma AC coefficients + * huffman table + */ +#define V4L2_JPEG_LUM_HT 0x00 +#define V4L2_JPEG_CHR_HT 0x01 +#define V4L2_JPEG_DC_HT 0x00 +#define V4L2_JPEG_AC_HT 0x10 + +/* Length of reference huffman tables as provided in Table K.3 of ITU-T.81 */ +#define V4L2_JPEG_REF_HT_AC_LEN 178 +#define V4L2_JPEG_REF_HT_DC_LEN 28 + +/* Array size for 8x8 block of samples or DCT coefficient */ +#define V4L2_JPEG_PIXELS_IN_BLOCK 64 + +/** + * struct v4l2_jpeg_reference - reference into the JPEG buffer + * @start: pointer to the start of the referenced segment or table + * @length: size of the referenced segment or table + * + * Wnen referencing marker segments, start points right after the marker code, + * and length is the size of the segment parameters, excluding the marker code. + */ +struct v4l2_jpeg_reference { + u8 *start; + size_t length; +}; + +/* B.2.2 Frame header syntax */ + +/** + * struct v4l2_jpeg_frame_component_spec - frame component-specification + * @component_identifier: C[i] + * @horizontal_sampling_factor: H[i] + * @vertical_sampling_factor: V[i] + * @quantization_table_selector: quantization table destination selector Tq[i] + */ +struct v4l2_jpeg_frame_component_spec { + u8 component_identifier; + u8 horizontal_sampling_factor; + u8 vertical_sampling_factor; + u8 quantization_table_selector; +}; + +/** + * struct v4l2_jpeg_frame_header - JPEG frame header + * @height: Y + * @width: X + * @precision: P + * @num_components: Nf + * @component: component-specification, see v4l2_jpeg_frame_component_spec + * @subsampling: decoded subsampling from component-specification + */ +struct v4l2_jpeg_frame_header { + u16 height; + u16 width; + u8 precision; + u8 num_components; + struct v4l2_jpeg_frame_component_spec component[V4L2_JPEG_MAX_COMPONENTS]; + enum v4l2_jpeg_chroma_subsampling subsampling; +}; + +/* B.2.3 Scan header syntax */ + +/** + * struct v4l2_jpeg_scan_component_spec - scan component-specification + * @component_selector: Cs[j] + * @dc_entropy_coding_table_selector: Td[j] + * @ac_entropy_coding_table_selector: Ta[j] + */ +struct v4l2_jpeg_scan_component_spec { + u8 component_selector; + u8 dc_entropy_coding_table_selector; + u8 ac_entropy_coding_table_selector; +}; + +/** + * struct v4l2_jpeg_scan_header - JPEG scan header + * @num_components: Ns + * @component: component-specification, see v4l2_jpeg_scan_component_spec + */ +struct v4l2_jpeg_scan_header { + u8 num_components; /* Ns */ + struct v4l2_jpeg_scan_component_spec component[V4L2_JPEG_MAX_COMPONENTS]; + /* Ss, Se, Ah, and Al are not used by any driver */ +}; + +/** + * enum v4l2_jpeg_app14_tf - APP14 transform flag + * According to Rec. ITU-T T.872 (06/2012) 6.5.3 + * APP14 segment is for color encoding, it contains a transform flag, + * which may have values of 0, 1 and 2 and are interpreted as follows: + * @V4L2_JPEG_APP14_TF_CMYK_RGB: CMYK for images encoded with four components + * RGB for images encoded with three components + * @V4L2_JPEG_APP14_TF_YCBCR: an image encoded with three components using YCbCr + * @V4L2_JPEG_APP14_TF_YCCK: an image encoded with four components using YCCK + * @V4L2_JPEG_APP14_TF_UNKNOWN: indicate app14 is not present + */ +enum v4l2_jpeg_app14_tf { + V4L2_JPEG_APP14_TF_CMYK_RGB = 0, + V4L2_JPEG_APP14_TF_YCBCR = 1, + V4L2_JPEG_APP14_TF_YCCK = 2, + V4L2_JPEG_APP14_TF_UNKNOWN = -1, +}; + +/** + * struct v4l2_jpeg_header - parsed JPEG header + * @sof: pointer to frame header and size + * @sos: pointer to scan header and size + * @num_dht: number of entries in @dht + * @dht: pointers to huffman tables and sizes + * @num_dqt: number of entries in @dqt + * @dqt: pointers to quantization tables and sizes + * @frame: parsed frame header + * @scan: pointer to parsed scan header, optional + * @quantization_tables: references to four quantization tables, optional + * @huffman_tables: references to four Huffman tables in DC0, DC1, AC0, AC1 + * order, optional + * @restart_interval: number of MCU per restart interval, Ri + * @ecs_offset: buffer offset in bytes to the entropy coded segment + * @app14_tf: transform flag from app14 data + * + * When this structure is passed to v4l2_jpeg_parse_header, the optional scan, + * quantization_tables, and huffman_tables pointers must be initialized to NULL + * or point at valid memory. + */ +struct v4l2_jpeg_header { + struct v4l2_jpeg_reference sof; + struct v4l2_jpeg_reference sos; + unsigned int num_dht; + struct v4l2_jpeg_reference dht[V4L2_JPEG_MAX_TABLES]; + unsigned int num_dqt; + struct v4l2_jpeg_reference dqt[V4L2_JPEG_MAX_TABLES]; + + struct v4l2_jpeg_frame_header frame; + struct v4l2_jpeg_scan_header *scan; + struct v4l2_jpeg_reference *quantization_tables; + struct v4l2_jpeg_reference *huffman_tables; + u16 restart_interval; + size_t ecs_offset; + enum v4l2_jpeg_app14_tf app14_tf; +}; + +int v4l2_jpeg_parse_header(void *buf, size_t len, struct v4l2_jpeg_header *out); + +extern const u8 v4l2_jpeg_zigzag_scan_index[V4L2_JPEG_PIXELS_IN_BLOCK]; +extern const u8 v4l2_jpeg_ref_table_luma_qt[V4L2_JPEG_PIXELS_IN_BLOCK]; +extern const u8 v4l2_jpeg_ref_table_chroma_qt[V4L2_JPEG_PIXELS_IN_BLOCK]; +extern const u8 v4l2_jpeg_ref_table_luma_dc_ht[V4L2_JPEG_REF_HT_DC_LEN]; +extern const u8 v4l2_jpeg_ref_table_luma_ac_ht[V4L2_JPEG_REF_HT_AC_LEN]; +extern const u8 v4l2_jpeg_ref_table_chroma_dc_ht[V4L2_JPEG_REF_HT_DC_LEN]; +extern const u8 v4l2_jpeg_ref_table_chroma_ac_ht[V4L2_JPEG_REF_HT_AC_LEN]; + +#endif diff --git a/6.18.0/include-overrides/media/v4l2-mc.h b/6.18.0/include-overrides/media/v4l2-mc.h new file mode 100644 index 0000000..1837c9f --- /dev/null +++ b/6.18.0/include-overrides/media/v4l2-mc.h @@ -0,0 +1,232 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * v4l2-mc.h - Media Controller V4L2 types and prototypes + * + * Copyright (C) 2016 Mauro Carvalho Chehab + * Copyright (C) 2006-2010 Nokia Corporation + * Copyright (c) 2016 Intel Corporation. + */ + +#ifndef _V4L2_MC_H +#define _V4L2_MC_H + +#include +#include +#include +#include + +/* We don't need to include pci.h or usb.h here */ +struct pci_dev; +struct usb_device; + +#ifdef CONFIG_MEDIA_CONTROLLER +/** + * v4l2_mc_create_media_graph() - create Media Controller links at the graph. + * + * @mdev: pointer to the &media_device struct. + * + * Add links between the entities commonly found on PC customer's hardware at + * the V4L2 side: camera sensors, audio and video PLL-IF decoders, tuners, + * analog TV decoder and I/O entities (video, VBI and Software Defined Radio). + * + * .. note:: + * + * Webcams are modelled on a very simple way: the sensor is + * connected directly to the I/O entity. All dirty details, like + * scaler and crop HW are hidden. While such mapping is enough for v4l2 + * interface centric PC-consumer's hardware, V4L2 subdev centric camera + * hardware should not use this routine, as it will not build the right graph. + */ +int v4l2_mc_create_media_graph(struct media_device *mdev); + +/** + * v4l_enable_media_source() - Hold media source for exclusive use + * if free + * + * @vdev: pointer to struct video_device + * + * This interface calls enable_source handler to determine if + * media source is free for use. The enable_source handler is + * responsible for checking is the media source is free and + * start a pipeline between the media source and the media + * entity associated with the video device. This interface + * should be called from v4l2-core and dvb-core interfaces + * that change the source configuration. + * + * Return: returns zero on success or a negative error code. + */ +int v4l_enable_media_source(struct video_device *vdev); + +/** + * v4l_disable_media_source() - Release media source + * + * @vdev: pointer to struct video_device + * + * This interface calls disable_source handler to release + * the media source. The disable_source handler stops the + * active media pipeline between the media source and the + * media entity associated with the video device. + * + * Return: returns zero on success or a negative error code. + */ +void v4l_disable_media_source(struct video_device *vdev); + +/* + * v4l_vb2q_enable_media_tuner - Hold media source for exclusive use + * if free. + * @q - pointer to struct vb2_queue + * + * Wrapper for v4l_enable_media_source(). This function should + * be called from v4l2-core to enable the media source with + * pointer to struct vb2_queue as the input argument. Some + * v4l2-core interfaces don't have access to video device and + * this interface finds the struct video_device for the q and + * calls v4l_enable_media_source(). + */ +int v4l_vb2q_enable_media_source(struct vb2_queue *q); + +/** + * v4l2_create_fwnode_links_to_pad - Create fwnode-based links from a + * source subdev to a sink pad. + * + * @src_sd: pointer to a source subdev + * @sink: pointer to a sink pad + * @flags: the link flags + * + * This function searches for fwnode endpoint connections from a source + * subdevice to a single sink pad, and if suitable connections are found, + * translates them into media links to that pad. The function can be + * called by the sink, in its v4l2-async notifier bound callback, to create + * links from a bound source subdevice. + * + * The @flags argument specifies the link flags. The caller shall ensure that + * the flags are valid regardless of the number of links that may be created. + * For instance, setting the MEDIA_LNK_FL_ENABLED flag will cause all created + * links to be enabled, which isn't valid if more than one link is created. + * + * .. note:: + * + * Any sink subdevice that calls this function must implement the + * .get_fwnode_pad media operation in order to verify endpoints passed + * to the sink are owned by the sink. + * + * Return 0 on success or a negative error code on failure. + */ +int v4l2_create_fwnode_links_to_pad(struct v4l2_subdev *src_sd, + struct media_pad *sink, u32 flags); + +/** + * v4l2_create_fwnode_links - Create fwnode-based links from a source + * subdev to a sink subdev. + * + * @src_sd: pointer to a source subdevice + * @sink_sd: pointer to a sink subdevice + * + * This function searches for any and all fwnode endpoint connections + * between source and sink subdevices, and translates them into media + * links. The function can be called by the sink subdevice, in its + * v4l2-async notifier subdev bound callback, to create all links from + * a bound source subdevice. + * + * .. note:: + * + * Any sink subdevice that calls this function must implement the + * .get_fwnode_pad media operation in order to verify endpoints passed + * to the sink are owned by the sink. + * + * Return 0 on success or a negative error code on failure. + */ +int v4l2_create_fwnode_links(struct v4l2_subdev *src_sd, + struct v4l2_subdev *sink_sd); + +/** + * v4l2_pipeline_pm_get - Increase the use count of a pipeline + * @entity: The root entity of a pipeline + * + * THIS FUNCTION IS DEPRECATED. DO NOT USE IN NEW DRIVERS. USE RUNTIME PM + * ON SUB-DEVICE DRIVERS INSTEAD. + * + * Update the use count of all entities in the pipeline and power entities on. + * + * This function is intended to be called in video node open. It uses + * struct media_entity.use_count to track the power status. The use + * of this function should be paired with v4l2_pipeline_link_notify(). + * + * Return 0 on success or a negative error code on failure. + */ +int v4l2_pipeline_pm_get(struct media_entity *entity); + +/** + * v4l2_pipeline_pm_put - Decrease the use count of a pipeline + * @entity: The root entity of a pipeline + * + * THIS FUNCTION IS DEPRECATED. DO NOT USE IN NEW DRIVERS. USE RUNTIME PM + * ON SUB-DEVICE DRIVERS INSTEAD. + * + * Update the use count of all entities in the pipeline and power entities off. + * + * This function is intended to be called in video node release. It uses + * struct media_entity.use_count to track the power status. The use + * of this function should be paired with v4l2_pipeline_link_notify(). + */ +void v4l2_pipeline_pm_put(struct media_entity *entity); + + +/** + * v4l2_pipeline_link_notify - Link management notification callback + * @link: The link + * @flags: New link flags that will be applied + * @notification: The link's state change notification type (MEDIA_DEV_NOTIFY_*) + * + * THIS FUNCTION IS DEPRECATED. DO NOT USE IN NEW DRIVERS. USE RUNTIME PM + * ON SUB-DEVICE DRIVERS INSTEAD. + * + * React to link management on powered pipelines by updating the use count of + * all entities in the source and sink sides of the link. Entities are powered + * on or off accordingly. The use of this function should be paired + * with v4l2_pipeline_pm_{get,put}(). + * + * Return 0 on success or a negative error code on failure. Powering entities + * off is assumed to never fail. This function will not fail for disconnection + * events. + */ +int v4l2_pipeline_link_notify(struct media_link *link, u32 flags, + unsigned int notification); + +#else /* CONFIG_MEDIA_CONTROLLER */ + +static inline int v4l2_mc_create_media_graph(struct media_device *mdev) +{ + return 0; +} + +static inline int v4l_enable_media_source(struct video_device *vdev) +{ + return 0; +} + +static inline void v4l_disable_media_source(struct video_device *vdev) +{ +} + +static inline int v4l_vb2q_enable_media_source(struct vb2_queue *q) +{ + return 0; +} + +static inline int v4l2_pipeline_pm_get(struct media_entity *entity) +{ + return 0; +} + +static inline void v4l2_pipeline_pm_put(struct media_entity *entity) +{} + +static inline int v4l2_pipeline_link_notify(struct media_link *link, u32 flags, + unsigned int notification) +{ + return 0; +} + +#endif /* CONFIG_MEDIA_CONTROLLER */ +#endif /* _V4L2_MC_H */ diff --git a/6.18.0/include-overrides/media/v4l2-mediabus.h b/6.18.0/include-overrides/media/v4l2-mediabus.h new file mode 100644 index 0000000..24c738c --- /dev/null +++ b/6.18.0/include-overrides/media/v4l2-mediabus.h @@ -0,0 +1,278 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Media Bus API header + * + * Copyright (C) 2009, Guennadi Liakhovetski + */ + +#ifndef V4L2_MEDIABUS_H +#define V4L2_MEDIABUS_H + +#include +#include + +/* + * How to use the V4L2_MBUS_* flags: + * Flags are defined for each of the possible states and values of a media + * bus configuration parameter. One and only one bit of each group of flags + * shall be set by the users of the v4l2_subdev_pad_ops.get_mbus_config + * operation to ensure that no conflicting settings are specified when + * reporting the media bus configuration. For example, it is invalid to set or + * clear both the V4L2_MBUS_HSYNC_ACTIVE_HIGH and the + * V4L2_MBUS_HSYNC_ACTIVE_LOW flag at the same time. Instead either flag + * V4L2_MBUS_HSYNC_ACTIVE_HIGH or flag V4L2_MBUS_HSYNC_ACTIVE_LOW shall be set. + * + * TODO: replace the existing V4L2_MBUS_* flags with structures of fields + * to avoid conflicting settings. + * + * In example: + * #define V4L2_MBUS_HSYNC_ACTIVE_HIGH BIT(2) + * #define V4L2_MBUS_HSYNC_ACTIVE_LOW BIT(3) + * will be replaced by a field whose value reports the intended active state of + * the signal: + * unsigned int v4l2_mbus_hsync_active : 1; + */ + +/* Parallel flags */ +/* + * The client runs in master or in slave mode. By "Master mode" an operation + * mode is meant, when the client (e.g., a camera sensor) is producing + * horizontal and vertical synchronisation. In "Slave mode" the host is + * providing these signals to the slave. + */ +#define V4L2_MBUS_MASTER BIT(0) +#define V4L2_MBUS_SLAVE BIT(1) +/* + * Signal polarity flags + * Note: in BT.656 mode HSYNC, FIELD, and VSYNC are unused + * V4L2_MBUS_[HV]SYNC* flags should be also used for specifying + * configuration of hardware that uses [HV]REF signals + */ +#define V4L2_MBUS_HSYNC_ACTIVE_HIGH BIT(2) +#define V4L2_MBUS_HSYNC_ACTIVE_LOW BIT(3) +#define V4L2_MBUS_VSYNC_ACTIVE_HIGH BIT(4) +#define V4L2_MBUS_VSYNC_ACTIVE_LOW BIT(5) +#define V4L2_MBUS_PCLK_SAMPLE_RISING BIT(6) +#define V4L2_MBUS_PCLK_SAMPLE_FALLING BIT(7) +#define V4L2_MBUS_PCLK_SAMPLE_DUALEDGE BIT(8) +#define V4L2_MBUS_DATA_ACTIVE_HIGH BIT(9) +#define V4L2_MBUS_DATA_ACTIVE_LOW BIT(10) +/* FIELD = 0/1 - Field1 (odd)/Field2 (even) */ +#define V4L2_MBUS_FIELD_EVEN_HIGH BIT(11) +/* FIELD = 1/0 - Field1 (odd)/Field2 (even) */ +#define V4L2_MBUS_FIELD_EVEN_LOW BIT(12) +/* Active state of Sync-on-green (SoG) signal, 0/1 for LOW/HIGH respectively. */ +#define V4L2_MBUS_VIDEO_SOG_ACTIVE_HIGH BIT(13) +#define V4L2_MBUS_VIDEO_SOG_ACTIVE_LOW BIT(14) +#define V4L2_MBUS_DATA_ENABLE_HIGH BIT(15) +#define V4L2_MBUS_DATA_ENABLE_LOW BIT(16) + +/* Serial flags */ +/* Clock non-continuous mode support. */ +#define V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK BIT(0) + +#define V4L2_MBUS_CSI2_MAX_DATA_LANES 8 + +/** + * enum v4l2_mbus_csi2_cphy_line_orders_type - CSI-2 C-PHY line order + * @V4L2_MBUS_CSI2_CPHY_LINE_ORDER_ABC: C-PHY line order ABC (default) + * @V4L2_MBUS_CSI2_CPHY_LINE_ORDER_ACB: C-PHY line order ACB + * @V4L2_MBUS_CSI2_CPHY_LINE_ORDER_BAC: C-PHY line order BAC + * @V4L2_MBUS_CSI2_CPHY_LINE_ORDER_BCA: C-PHY line order BCA + * @V4L2_MBUS_CSI2_CPHY_LINE_ORDER_CAB: C-PHY line order CAB + * @V4L2_MBUS_CSI2_CPHY_LINE_ORDER_CBA: C-PHY line order CBA + */ +enum v4l2_mbus_csi2_cphy_line_orders_type { + V4L2_MBUS_CSI2_CPHY_LINE_ORDER_ABC, + V4L2_MBUS_CSI2_CPHY_LINE_ORDER_ACB, + V4L2_MBUS_CSI2_CPHY_LINE_ORDER_BAC, + V4L2_MBUS_CSI2_CPHY_LINE_ORDER_BCA, + V4L2_MBUS_CSI2_CPHY_LINE_ORDER_CAB, + V4L2_MBUS_CSI2_CPHY_LINE_ORDER_CBA, +}; + +/** + * struct v4l2_mbus_config_mipi_csi2 - MIPI CSI-2 data bus configuration + * @flags: media bus (V4L2_MBUS_*) flags + * @data_lanes: an array of physical data lane indexes + * @clock_lane: physical lane index of the clock lane + * @num_data_lanes: number of data lanes + * @lane_polarities: polarity of the lanes. The order is the same of + * the physical lanes. + * @line_orders: line order of the data lanes. The order is the same of the + * physical lanes. + */ +struct v4l2_mbus_config_mipi_csi2 { + unsigned int flags; + unsigned char data_lanes[V4L2_MBUS_CSI2_MAX_DATA_LANES]; + unsigned char clock_lane; + unsigned char num_data_lanes; + bool lane_polarities[1 + V4L2_MBUS_CSI2_MAX_DATA_LANES]; + enum v4l2_mbus_csi2_cphy_line_orders_type line_orders[V4L2_MBUS_CSI2_MAX_DATA_LANES]; +}; + +/** + * struct v4l2_mbus_config_parallel - parallel data bus configuration + * @flags: media bus (V4L2_MBUS_*) flags + * @bus_width: bus width in bits + * @data_shift: data shift in bits + */ +struct v4l2_mbus_config_parallel { + unsigned int flags; + unsigned char bus_width; + unsigned char data_shift; +}; + +/** + * struct v4l2_mbus_config_mipi_csi1 - CSI-1/CCP2 data bus configuration + * @clock_inv: polarity of clock/strobe signal + * false - not inverted, true - inverted + * @strobe: false - data/clock, true - data/strobe + * @lane_polarity: the polarities of the clock (index 0) and data lanes + * index (1) + * @data_lane: the number of the data lane + * @clock_lane: the number of the clock lane + */ +struct v4l2_mbus_config_mipi_csi1 { + unsigned char clock_inv:1; + unsigned char strobe:1; + bool lane_polarity[2]; + unsigned char data_lane; + unsigned char clock_lane; +}; + +/** + * enum v4l2_mbus_type - media bus type + * @V4L2_MBUS_UNKNOWN: unknown bus type, no V4L2 mediabus configuration + * @V4L2_MBUS_PARALLEL: parallel interface with hsync and vsync + * @V4L2_MBUS_BT656: parallel interface with embedded synchronisation, can + * also be used for BT.1120 + * @V4L2_MBUS_CSI1: MIPI CSI-1 serial interface + * @V4L2_MBUS_CCP2: CCP2 (Compact Camera Port 2) + * @V4L2_MBUS_CSI2_DPHY: MIPI CSI-2 serial interface, with D-PHY + * @V4L2_MBUS_CSI2_CPHY: MIPI CSI-2 serial interface, with C-PHY + * @V4L2_MBUS_DPI: MIPI VIDEO DPI interface + * @V4L2_MBUS_INVALID: invalid bus type (keep as last) + */ +enum v4l2_mbus_type { + V4L2_MBUS_UNKNOWN, + V4L2_MBUS_PARALLEL, + V4L2_MBUS_BT656, + V4L2_MBUS_CSI1, + V4L2_MBUS_CCP2, + V4L2_MBUS_CSI2_DPHY, + V4L2_MBUS_CSI2_CPHY, + V4L2_MBUS_DPI, + V4L2_MBUS_INVALID, +}; + +/** + * struct v4l2_mbus_config - media bus configuration + * @type: interface type + * @link_freq: The link frequency. See also V4L2_CID_LINK_FREQ control. + * @bus: bus configuration data structure + * @bus.parallel: embedded &struct v4l2_mbus_config_parallel. + * Used if the bus is parallel or BT.656. + * @bus.mipi_csi1: embedded &struct v4l2_mbus_config_mipi_csi1. + * Used if the bus is MIPI Alliance's Camera Serial + * Interface version 1 (MIPI CSI1) or Standard + * Mobile Imaging Architecture's Compact Camera Port 2 + * (SMIA CCP2). + * @bus.mipi_csi2: embedded &struct v4l2_mbus_config_mipi_csi2. + * Used if the bus is MIPI Alliance's Camera Serial + * Interface version 2 (MIPI CSI2). + */ +struct v4l2_mbus_config { + enum v4l2_mbus_type type; + u64 link_freq; + union { + struct v4l2_mbus_config_parallel parallel; + struct v4l2_mbus_config_mipi_csi1 mipi_csi1; + struct v4l2_mbus_config_mipi_csi2 mipi_csi2; + } bus; +}; + +/** + * v4l2_fill_pix_format - Ancillary routine that fills a &struct + * v4l2_pix_format fields from a &struct v4l2_mbus_framefmt. + * + * @pix_fmt: pointer to &struct v4l2_pix_format to be filled + * @mbus_fmt: pointer to &struct v4l2_mbus_framefmt to be used as model + */ +static inline void +v4l2_fill_pix_format(struct v4l2_pix_format *pix_fmt, + const struct v4l2_mbus_framefmt *mbus_fmt) +{ + pix_fmt->width = mbus_fmt->width; + pix_fmt->height = mbus_fmt->height; + pix_fmt->field = mbus_fmt->field; + pix_fmt->colorspace = mbus_fmt->colorspace; + pix_fmt->ycbcr_enc = mbus_fmt->ycbcr_enc; + pix_fmt->quantization = mbus_fmt->quantization; + pix_fmt->xfer_func = mbus_fmt->xfer_func; +} + +/** + * v4l2_fill_mbus_format - Ancillary routine that fills a &struct + * v4l2_mbus_framefmt from a &struct v4l2_pix_format and a + * data format code. + * + * @mbus_fmt: pointer to &struct v4l2_mbus_framefmt to be filled + * @pix_fmt: pointer to &struct v4l2_pix_format to be used as model + * @code: data format code (from &enum v4l2_mbus_pixelcode) + */ +static inline void v4l2_fill_mbus_format(struct v4l2_mbus_framefmt *mbus_fmt, + const struct v4l2_pix_format *pix_fmt, + u32 code) +{ + mbus_fmt->width = pix_fmt->width; + mbus_fmt->height = pix_fmt->height; + mbus_fmt->field = pix_fmt->field; + mbus_fmt->colorspace = pix_fmt->colorspace; + mbus_fmt->ycbcr_enc = pix_fmt->ycbcr_enc; + mbus_fmt->quantization = pix_fmt->quantization; + mbus_fmt->xfer_func = pix_fmt->xfer_func; + mbus_fmt->code = code; +} + +/** + * v4l2_fill_pix_format_mplane - Ancillary routine that fills a &struct + * v4l2_pix_format_mplane fields from a media bus structure. + * + * @pix_mp_fmt: pointer to &struct v4l2_pix_format_mplane to be filled + * @mbus_fmt: pointer to &struct v4l2_mbus_framefmt to be used as model + */ +static inline void +v4l2_fill_pix_format_mplane(struct v4l2_pix_format_mplane *pix_mp_fmt, + const struct v4l2_mbus_framefmt *mbus_fmt) +{ + pix_mp_fmt->width = mbus_fmt->width; + pix_mp_fmt->height = mbus_fmt->height; + pix_mp_fmt->field = mbus_fmt->field; + pix_mp_fmt->colorspace = mbus_fmt->colorspace; + pix_mp_fmt->ycbcr_enc = mbus_fmt->ycbcr_enc; + pix_mp_fmt->quantization = mbus_fmt->quantization; + pix_mp_fmt->xfer_func = mbus_fmt->xfer_func; +} + +/** + * v4l2_fill_mbus_format_mplane - Ancillary routine that fills a &struct + * v4l2_mbus_framefmt from a &struct v4l2_pix_format_mplane. + * + * @mbus_fmt: pointer to &struct v4l2_mbus_framefmt to be filled + * @pix_mp_fmt: pointer to &struct v4l2_pix_format_mplane to be used as model + */ +static inline void +v4l2_fill_mbus_format_mplane(struct v4l2_mbus_framefmt *mbus_fmt, + const struct v4l2_pix_format_mplane *pix_mp_fmt) +{ + mbus_fmt->width = pix_mp_fmt->width; + mbus_fmt->height = pix_mp_fmt->height; + mbus_fmt->field = pix_mp_fmt->field; + mbus_fmt->colorspace = pix_mp_fmt->colorspace; + mbus_fmt->ycbcr_enc = pix_mp_fmt->ycbcr_enc; + mbus_fmt->quantization = pix_mp_fmt->quantization; + mbus_fmt->xfer_func = pix_mp_fmt->xfer_func; +} + +#endif diff --git a/6.18.0/include-overrides/media/v4l2-mem2mem.h b/6.18.0/include-overrides/media/v4l2-mem2mem.h new file mode 100644 index 0000000..09c6164 --- /dev/null +++ b/6.18.0/include-overrides/media/v4l2-mem2mem.h @@ -0,0 +1,902 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Memory-to-memory device framework for Video for Linux 2. + * + * Helper functions for devices that use memory buffers for both source + * and destination. + * + * Copyright (c) 2009 Samsung Electronics Co., Ltd. + * Pawel Osciak, + * Marek Szyprowski, + */ + +#ifndef _MEDIA_V4L2_MEM2MEM_H +#define _MEDIA_V4L2_MEM2MEM_H + +#include + +/** + * struct v4l2_m2m_ops - mem-to-mem device driver callbacks + * @device_run: required. Begin the actual job (transaction) inside this + * callback. + * The job does NOT have to end before this callback returns + * (and it will be the usual case). When the job finishes, + * v4l2_m2m_job_finish() or v4l2_m2m_buf_done_and_job_finish() + * has to be called. + * @job_ready: optional. Should return 0 if the driver does not have a job + * fully prepared to run yet (i.e. it will not be able to finish a + * transaction without sleeping). If not provided, it will be + * assumed that one source and one destination buffer are all + * that is required for the driver to perform one full transaction. + * This method may not sleep. + * @job_abort: optional. Informs the driver that it has to abort the currently + * running transaction as soon as possible (i.e. as soon as it can + * stop the device safely; e.g. in the next interrupt handler), + * even if the transaction would not have been finished by then. + * After the driver performs the necessary steps, it has to call + * v4l2_m2m_job_finish() or v4l2_m2m_buf_done_and_job_finish() as + * if the transaction ended normally. + * This function does not have to (and will usually not) wait + * until the device enters a state when it can be stopped. + */ +struct v4l2_m2m_ops { + void (*device_run)(void *priv); + int (*job_ready)(void *priv); + void (*job_abort)(void *priv); +}; + +struct video_device; +struct v4l2_m2m_dev; + +/** + * struct v4l2_m2m_queue_ctx - represents a queue for buffers ready to be + * processed + * + * @q: pointer to struct &vb2_queue + * @rdy_queue: List of V4L2 mem-to-mem queues + * @rdy_spinlock: spin lock to protect the struct usage + * @num_rdy: number of buffers ready to be processed + * @buffered: is the queue buffered? + * + * Queue for buffers ready to be processed as soon as this + * instance receives access to the device. + */ + +struct v4l2_m2m_queue_ctx { + struct vb2_queue q; + + struct list_head rdy_queue; + spinlock_t rdy_spinlock; + u8 num_rdy; + bool buffered; +}; + +/** + * struct v4l2_m2m_ctx - Memory to memory context structure + * + * @q_lock: struct &mutex lock + * @new_frame: valid in the device_run callback: if true, then this + * starts a new frame; if false, then this is a new slice + * for an existing frame. This is always true unless + * V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF is set, which + * indicates slicing support. + * @is_draining: indicates device is in draining phase + * @last_src_buf: indicate the last source buffer for draining + * @next_buf_last: next capture queud buffer will be tagged as last + * @has_stopped: indicate the device has been stopped + * @ignore_cap_streaming: If true, job_ready can be called even if the CAPTURE + * queue is not streaming. This allows firmware to + * analyze the bitstream header which arrives on the + * OUTPUT queue. The driver must implement the job_ready + * callback correctly to make sure that the requirements + * for actual decoding are met. + * @m2m_dev: opaque pointer to the internal data to handle M2M context + * @cap_q_ctx: Capture (output to memory) queue context + * @out_q_ctx: Output (input from memory) queue context + * @queue: List of memory to memory contexts + * @job_flags: Job queue flags, used internally by v4l2-mem2mem.c: + * %TRANS_QUEUED, %TRANS_RUNNING and %TRANS_ABORT. + * @finished: Wait queue used to signalize when a job queue finished. + * @priv: Instance private data + * + * The memory to memory context is specific to a file handle, NOT to e.g. + * a device. + */ +struct v4l2_m2m_ctx { + /* optional cap/out vb2 queues lock */ + struct mutex *q_lock; + + bool new_frame; + + bool is_draining; + struct vb2_v4l2_buffer *last_src_buf; + bool next_buf_last; + bool has_stopped; + bool ignore_cap_streaming; + + /* internal use only */ + struct v4l2_m2m_dev *m2m_dev; + + struct v4l2_m2m_queue_ctx cap_q_ctx; + + struct v4l2_m2m_queue_ctx out_q_ctx; + + /* For device job queue */ + struct list_head queue; + unsigned long job_flags; + wait_queue_head_t finished; + + void *priv; +}; + +/** + * struct v4l2_m2m_buffer - Memory to memory buffer + * + * @vb: pointer to struct &vb2_v4l2_buffer + * @list: list of m2m buffers + */ +struct v4l2_m2m_buffer { + struct vb2_v4l2_buffer vb; + struct list_head list; +}; + +/** + * v4l2_m2m_get_curr_priv() - return driver private data for the currently + * running instance or NULL if no instance is running + * + * @m2m_dev: opaque pointer to the internal data to handle M2M context + */ +void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev); + +/** + * v4l2_m2m_get_vq() - return vb2_queue for the given type + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type + */ +struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx, + enum v4l2_buf_type type); + +/** + * v4l2_m2m_try_schedule() - check whether an instance is ready to be added to + * the pending job queue and add it if so. + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * + * There are three basic requirements an instance has to meet to be able to run: + * 1) at least one source buffer has to be queued, + * 2) at least one destination buffer has to be queued, + * 3) streaming has to be on. + * + * If a queue is buffered (for example a decoder hardware ringbuffer that has + * to be drained before doing streamoff), allow scheduling without v4l2 buffers + * on that queue. + * + * There may also be additional, custom requirements. In such case the driver + * should supply a custom callback (job_ready in v4l2_m2m_ops) that should + * return 1 if the instance is ready. + * An example of the above could be an instance that requires more than one + * src/dst buffer per transaction. + */ +void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx); + +/** + * v4l2_m2m_job_finish() - inform the framework that a job has been finished + * and have it clean up + * + * @m2m_dev: opaque pointer to the internal data to handle M2M context + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * + * Called by a driver to yield back the device after it has finished with it. + * Should be called as soon as possible after reaching a state which allows + * other instances to take control of the device. + * + * This function has to be called only after &v4l2_m2m_ops->device_run + * callback has been called on the driver. To prevent recursion, it should + * not be called directly from the &v4l2_m2m_ops->device_run callback though. + */ +void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, + struct v4l2_m2m_ctx *m2m_ctx); + +/** + * v4l2_m2m_buf_done_and_job_finish() - return source/destination buffers with + * state and inform the framework that a job has been finished and have it + * clean up + * + * @m2m_dev: opaque pointer to the internal data to handle M2M context + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @state: vb2 buffer state passed to v4l2_m2m_buf_done(). + * + * Drivers that set V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF must use this + * function instead of job_finish() to take held buffers into account. It is + * optional for other drivers. + * + * This function removes the source buffer from the ready list and returns + * it with the given state. The same is done for the destination buffer, unless + * it is marked 'held'. In that case the buffer is kept on the ready list. + * + * After that the job is finished (see job_finish()). + * + * This allows for multiple output buffers to be used to fill in a single + * capture buffer. This is typically used by stateless decoders where + * multiple e.g. H.264 slices contribute to a single decoded frame. + */ +void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev *m2m_dev, + struct v4l2_m2m_ctx *m2m_ctx, + enum vb2_buffer_state state); + +static inline void +v4l2_m2m_buf_done(struct vb2_v4l2_buffer *buf, enum vb2_buffer_state state) +{ + vb2_buffer_done(&buf->vb2_buf, state); +} + +/** + * v4l2_m2m_clear_state() - clear encoding/decoding state + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + */ +static inline void +v4l2_m2m_clear_state(struct v4l2_m2m_ctx *m2m_ctx) +{ + m2m_ctx->next_buf_last = false; + m2m_ctx->is_draining = false; + m2m_ctx->has_stopped = false; +} + +/** + * v4l2_m2m_mark_stopped() - set current encoding/decoding state as stopped + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + */ +static inline void +v4l2_m2m_mark_stopped(struct v4l2_m2m_ctx *m2m_ctx) +{ + m2m_ctx->next_buf_last = false; + m2m_ctx->is_draining = false; + m2m_ctx->has_stopped = true; +} + +/** + * v4l2_m2m_dst_buf_is_last() - return the current encoding/decoding session + * draining management state of next queued capture buffer + * + * This last capture buffer should be tagged with V4L2_BUF_FLAG_LAST to notify + * the end of the capture session. + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + */ +static inline bool +v4l2_m2m_dst_buf_is_last(struct v4l2_m2m_ctx *m2m_ctx) +{ + return m2m_ctx->is_draining && m2m_ctx->next_buf_last; +} + +/** + * v4l2_m2m_has_stopped() - return the current encoding/decoding session + * stopped state + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + */ +static inline bool +v4l2_m2m_has_stopped(struct v4l2_m2m_ctx *m2m_ctx) +{ + return m2m_ctx->has_stopped; +} + +/** + * v4l2_m2m_is_last_draining_src_buf() - return the output buffer draining + * state in the current encoding/decoding session + * + * This will identify the last output buffer queued before a session stop + * was required, leading to an actual encoding/decoding session stop state + * in the encoding/decoding process after being processed. + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @vbuf: pointer to struct &v4l2_buffer + */ +static inline bool +v4l2_m2m_is_last_draining_src_buf(struct v4l2_m2m_ctx *m2m_ctx, + struct vb2_v4l2_buffer *vbuf) +{ + return m2m_ctx->is_draining && vbuf == m2m_ctx->last_src_buf; +} + +/** + * v4l2_m2m_last_buffer_done() - marks the buffer with LAST flag and DONE + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @vbuf: pointer to struct &v4l2_buffer + */ +void v4l2_m2m_last_buffer_done(struct v4l2_m2m_ctx *m2m_ctx, + struct vb2_v4l2_buffer *vbuf); + +/** + * v4l2_m2m_suspend() - stop new jobs from being run and wait for current job + * to finish + * + * @m2m_dev: opaque pointer to the internal data to handle M2M context + * + * Called by a driver in the suspend hook. Stop new jobs from being run, and + * wait for current running job to finish. + */ +void v4l2_m2m_suspend(struct v4l2_m2m_dev *m2m_dev); + +/** + * v4l2_m2m_resume() - resume job running and try to run a queued job + * + * @m2m_dev: opaque pointer to the internal data to handle M2M context + * + * Called by a driver in the resume hook. This reverts the operation of + * v4l2_m2m_suspend() and allows job to be run. Also try to run a queued job if + * there is any. + */ +void v4l2_m2m_resume(struct v4l2_m2m_dev *m2m_dev); + +/** + * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer + * + * @file: pointer to struct &file + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @reqbufs: pointer to struct &v4l2_requestbuffers + */ +int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_requestbuffers *reqbufs); + +/** + * v4l2_m2m_querybuf() - multi-queue-aware QUERYBUF multiplexer + * + * @file: pointer to struct &file + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @buf: pointer to struct &v4l2_buffer + * + * See v4l2_m2m_mmap() documentation for details. + */ +int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_buffer *buf); + +/** + * v4l2_m2m_qbuf() - enqueue a source or destination buffer, depending on + * the type + * + * @file: pointer to struct &file + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @buf: pointer to struct &v4l2_buffer + */ +int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_buffer *buf); + +/** + * v4l2_m2m_dqbuf() - dequeue a source or destination buffer, depending on + * the type + * + * @file: pointer to struct &file + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @buf: pointer to struct &v4l2_buffer + */ +int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_buffer *buf); + +/** + * v4l2_m2m_prepare_buf() - prepare a source or destination buffer, depending on + * the type + * + * @file: pointer to struct &file + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @buf: pointer to struct &v4l2_buffer + */ +int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_buffer *buf); + +/** + * v4l2_m2m_create_bufs() - create a source or destination buffer, depending + * on the type + * + * @file: pointer to struct &file + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @create: pointer to struct &v4l2_create_buffers + */ +int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_create_buffers *create); + +/** + * v4l2_m2m_expbuf() - export a source or destination buffer, depending on + * the type + * + * @file: pointer to struct &file + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @eb: pointer to struct &v4l2_exportbuffer + */ +int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_exportbuffer *eb); + +/** + * v4l2_m2m_streamon() - turn on streaming for a video queue + * + * @file: pointer to struct &file + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type + */ +int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + enum v4l2_buf_type type); + +/** + * v4l2_m2m_streamoff() - turn off streaming for a video queue + * + * @file: pointer to struct &file + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type + */ +int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + enum v4l2_buf_type type); + +/** + * v4l2_m2m_update_start_streaming_state() - update the encoding/decoding + * session state when a start of streaming of a video queue is requested + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @q: queue + */ +void v4l2_m2m_update_start_streaming_state(struct v4l2_m2m_ctx *m2m_ctx, + struct vb2_queue *q); + +/** + * v4l2_m2m_update_stop_streaming_state() - update the encoding/decoding + * session state when a stop of streaming of a video queue is requested + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @q: queue + */ +void v4l2_m2m_update_stop_streaming_state(struct v4l2_m2m_ctx *m2m_ctx, + struct vb2_queue *q); + +/** + * v4l2_m2m_encoder_cmd() - execute an encoder command + * + * @file: pointer to struct &file + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @ec: pointer to the encoder command + */ +int v4l2_m2m_encoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_encoder_cmd *ec); + +/** + * v4l2_m2m_decoder_cmd() - execute a decoder command + * + * @file: pointer to struct &file + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @dc: pointer to the decoder command + */ +int v4l2_m2m_decoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_decoder_cmd *dc); + +/** + * v4l2_m2m_poll() - poll replacement, for destination buffers only + * + * @file: pointer to struct &file + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @wait: pointer to struct &poll_table_struct + * + * Call from the driver's poll() function. Will poll both queues. If a buffer + * is available to dequeue (with dqbuf) from the source queue, this will + * indicate that a non-blocking write can be performed, while read will be + * returned in case of the destination queue. + */ +__poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct poll_table_struct *wait); + +/** + * v4l2_m2m_mmap() - source and destination queues-aware mmap multiplexer + * + * @file: pointer to struct &file + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @vma: pointer to struct &vm_area_struct + * + * Call from driver's mmap() function. Will handle mmap() for both queues + * seamlessly for the video buffer, which will receive normal per-queue offsets + * and proper vb2 queue pointers. The differentiation is made outside + * vb2 by adding a predefined offset to buffers from one of the queues + * and subtracting it before passing it back to vb2. Only drivers (and + * thus applications) receive modified offsets. + */ +int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct vm_area_struct *vma); + +#ifndef CONFIG_MMU +unsigned long v4l2_m2m_get_unmapped_area(struct file *file, unsigned long addr, + unsigned long len, unsigned long pgoff, + unsigned long flags); +#endif +/** + * v4l2_m2m_init() - initialize per-driver m2m data + * + * @m2m_ops: pointer to struct v4l2_m2m_ops + * + * Usually called from driver's ``probe()`` function. + * + * Return: returns an opaque pointer to the internal data to handle M2M context + */ +struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops); + +#if defined(CONFIG_MEDIA_CONTROLLER) +void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev); +int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev, + struct video_device *vdev, int function); +#else +static inline void +v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev) +{ +} + +static inline int +v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev, + struct video_device *vdev, int function) +{ + return 0; +} +#endif + +/** + * v4l2_m2m_release() - cleans up and frees a m2m_dev structure + * + * @m2m_dev: opaque pointer to the internal data to handle M2M context + * + * Usually called from driver's ``remove()`` function. + */ +void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev); + +/** + * v4l2_m2m_ctx_init() - allocate and initialize a m2m context + * + * @m2m_dev: opaque pointer to the internal data to handle M2M context + * @drv_priv: driver's instance private data + * @queue_init: a callback for queue type-specific initialization function + * to be used for initializing vb2_queues + * + * Usually called from driver's ``open()`` function. + */ +struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev, + void *drv_priv, + int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)); + +static inline void v4l2_m2m_set_src_buffered(struct v4l2_m2m_ctx *m2m_ctx, + bool buffered) +{ + m2m_ctx->out_q_ctx.buffered = buffered; +} + +static inline void v4l2_m2m_set_dst_buffered(struct v4l2_m2m_ctx *m2m_ctx, + bool buffered) +{ + m2m_ctx->cap_q_ctx.buffered = buffered; +} + +/** + * v4l2_m2m_ctx_release() - release m2m context + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * + * Usually called from driver's release() function. + */ +void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx); + +/** + * v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list. + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @vbuf: pointer to struct &vb2_v4l2_buffer + * + * Call from vb2_queue_ops->ops->buf_queue, vb2_queue_ops callback. + */ +void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, + struct vb2_v4l2_buffer *vbuf); + +/** + * v4l2_m2m_num_src_bufs_ready() - return the number of source buffers ready for + * use + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + */ +static inline +unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx) +{ + unsigned int num_buf_rdy; + unsigned long flags; + + spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); + num_buf_rdy = m2m_ctx->out_q_ctx.num_rdy; + spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); + + return num_buf_rdy; +} + +/** + * v4l2_m2m_num_dst_bufs_ready() - return the number of destination buffers + * ready for use + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + */ +static inline +unsigned int v4l2_m2m_num_dst_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx) +{ + unsigned int num_buf_rdy; + unsigned long flags; + + spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags); + num_buf_rdy = m2m_ctx->cap_q_ctx.num_rdy; + spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags); + + return num_buf_rdy; +} + +/** + * v4l2_m2m_next_buf() - return next buffer from the list of ready buffers + * + * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx + */ +struct vb2_v4l2_buffer *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx); + +/** + * v4l2_m2m_next_src_buf() - return next source buffer from the list of ready + * buffers + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + */ +static inline struct vb2_v4l2_buffer * +v4l2_m2m_next_src_buf(struct v4l2_m2m_ctx *m2m_ctx) +{ + return v4l2_m2m_next_buf(&m2m_ctx->out_q_ctx); +} + +/** + * v4l2_m2m_next_dst_buf() - return next destination buffer from the list of + * ready buffers + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + */ +static inline struct vb2_v4l2_buffer * +v4l2_m2m_next_dst_buf(struct v4l2_m2m_ctx *m2m_ctx) +{ + return v4l2_m2m_next_buf(&m2m_ctx->cap_q_ctx); +} + +/** + * v4l2_m2m_last_buf() - return last buffer from the list of ready buffers + * + * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx + */ +struct vb2_v4l2_buffer *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx); + +/** + * v4l2_m2m_last_src_buf() - return last source buffer from the list of + * ready buffers + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + */ +static inline struct vb2_v4l2_buffer * +v4l2_m2m_last_src_buf(struct v4l2_m2m_ctx *m2m_ctx) +{ + return v4l2_m2m_last_buf(&m2m_ctx->out_q_ctx); +} + +/** + * v4l2_m2m_last_dst_buf() - return last destination buffer from the list of + * ready buffers + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + */ +static inline struct vb2_v4l2_buffer * +v4l2_m2m_last_dst_buf(struct v4l2_m2m_ctx *m2m_ctx) +{ + return v4l2_m2m_last_buf(&m2m_ctx->cap_q_ctx); +} + +/** + * v4l2_m2m_for_each_dst_buf() - iterate over a list of destination ready + * buffers + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @b: current buffer of type struct v4l2_m2m_buffer + */ +#define v4l2_m2m_for_each_dst_buf(m2m_ctx, b) \ + list_for_each_entry(b, &m2m_ctx->cap_q_ctx.rdy_queue, list) + +/** + * v4l2_m2m_for_each_src_buf() - iterate over a list of source ready buffers + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @b: current buffer of type struct v4l2_m2m_buffer + */ +#define v4l2_m2m_for_each_src_buf(m2m_ctx, b) \ + list_for_each_entry(b, &m2m_ctx->out_q_ctx.rdy_queue, list) + +/** + * v4l2_m2m_for_each_dst_buf_safe() - iterate over a list of destination ready + * buffers safely + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @b: current buffer of type struct v4l2_m2m_buffer + * @n: used as temporary storage + */ +#define v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, b, n) \ + list_for_each_entry_safe(b, n, &m2m_ctx->cap_q_ctx.rdy_queue, list) + +/** + * v4l2_m2m_for_each_src_buf_safe() - iterate over a list of source ready + * buffers safely + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @b: current buffer of type struct v4l2_m2m_buffer + * @n: used as temporary storage + */ +#define v4l2_m2m_for_each_src_buf_safe(m2m_ctx, b, n) \ + list_for_each_entry_safe(b, n, &m2m_ctx->out_q_ctx.rdy_queue, list) + +/** + * v4l2_m2m_get_src_vq() - return vb2_queue for source buffers + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + */ +static inline +struct vb2_queue *v4l2_m2m_get_src_vq(struct v4l2_m2m_ctx *m2m_ctx) +{ + return &m2m_ctx->out_q_ctx.q; +} + +/** + * v4l2_m2m_get_dst_vq() - return vb2_queue for destination buffers + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + */ +static inline +struct vb2_queue *v4l2_m2m_get_dst_vq(struct v4l2_m2m_ctx *m2m_ctx) +{ + return &m2m_ctx->cap_q_ctx.q; +} + +/** + * v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and + * return it + * + * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx + */ +struct vb2_v4l2_buffer *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx); + +/** + * v4l2_m2m_src_buf_remove() - take off a source buffer from the list of ready + * buffers and return it + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + */ +static inline struct vb2_v4l2_buffer * +v4l2_m2m_src_buf_remove(struct v4l2_m2m_ctx *m2m_ctx) +{ + return v4l2_m2m_buf_remove(&m2m_ctx->out_q_ctx); +} + +/** + * v4l2_m2m_dst_buf_remove() - take off a destination buffer from the list of + * ready buffers and return it + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + */ +static inline struct vb2_v4l2_buffer * +v4l2_m2m_dst_buf_remove(struct v4l2_m2m_ctx *m2m_ctx) +{ + return v4l2_m2m_buf_remove(&m2m_ctx->cap_q_ctx); +} + +/** + * v4l2_m2m_buf_remove_by_buf() - take off exact buffer from the list of ready + * buffers + * + * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx + * @vbuf: the buffer to be removed + */ +void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx *q_ctx, + struct vb2_v4l2_buffer *vbuf); + +/** + * v4l2_m2m_src_buf_remove_by_buf() - take off exact source buffer from the list + * of ready buffers + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @vbuf: the buffer to be removed + */ +static inline void v4l2_m2m_src_buf_remove_by_buf(struct v4l2_m2m_ctx *m2m_ctx, + struct vb2_v4l2_buffer *vbuf) +{ + v4l2_m2m_buf_remove_by_buf(&m2m_ctx->out_q_ctx, vbuf); +} + +/** + * v4l2_m2m_dst_buf_remove_by_buf() - take off exact destination buffer from the + * list of ready buffers + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @vbuf: the buffer to be removed + */ +static inline void v4l2_m2m_dst_buf_remove_by_buf(struct v4l2_m2m_ctx *m2m_ctx, + struct vb2_v4l2_buffer *vbuf) +{ + v4l2_m2m_buf_remove_by_buf(&m2m_ctx->cap_q_ctx, vbuf); +} + +struct vb2_v4l2_buffer * +v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx *q_ctx, unsigned int idx); + +static inline struct vb2_v4l2_buffer * +v4l2_m2m_src_buf_remove_by_idx(struct v4l2_m2m_ctx *m2m_ctx, unsigned int idx) +{ + return v4l2_m2m_buf_remove_by_idx(&m2m_ctx->out_q_ctx, idx); +} + +static inline struct vb2_v4l2_buffer * +v4l2_m2m_dst_buf_remove_by_idx(struct v4l2_m2m_ctx *m2m_ctx, unsigned int idx) +{ + return v4l2_m2m_buf_remove_by_idx(&m2m_ctx->cap_q_ctx, idx); +} + +/** + * v4l2_m2m_buf_copy_metadata() - copy buffer metadata from + * the output buffer to the capture buffer + * + * @out_vb: the output buffer that is the source of the metadata. + * @cap_vb: the capture buffer that will receive the metadata. + * @copy_frame_flags: copy the KEY/B/PFRAME flags as well. + * + * This helper function copies the timestamp, timecode (if the TIMECODE + * buffer flag was set), field and the TIMECODE, KEYFRAME, BFRAME, PFRAME + * and TSTAMP_SRC_MASK flags from @out_vb to @cap_vb. + * + * If @copy_frame_flags is false, then the KEYFRAME, BFRAME and PFRAME + * flags are not copied. This is typically needed for encoders that + * set this bits explicitly. + */ +void v4l2_m2m_buf_copy_metadata(const struct vb2_v4l2_buffer *out_vb, + struct vb2_v4l2_buffer *cap_vb, + bool copy_frame_flags); + +/* v4l2 request helper */ + +void v4l2_m2m_request_queue(struct media_request *req); + +/* v4l2 ioctl helpers */ + +int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv, + struct v4l2_requestbuffers *rb); +int v4l2_m2m_ioctl_create_bufs(struct file *file, void *priv, + struct v4l2_create_buffers *create); +int v4l2_m2m_ioctl_remove_bufs(struct file *file, void *priv, + struct v4l2_remove_buffers *d); +int v4l2_m2m_ioctl_querybuf(struct file *file, void *priv, + struct v4l2_buffer *buf); +int v4l2_m2m_ioctl_expbuf(struct file *file, void *priv, + struct v4l2_exportbuffer *eb); +int v4l2_m2m_ioctl_qbuf(struct file *file, void *priv, + struct v4l2_buffer *buf); +int v4l2_m2m_ioctl_dqbuf(struct file *file, void *priv, + struct v4l2_buffer *buf); +int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *priv, + struct v4l2_buffer *buf); +int v4l2_m2m_ioctl_streamon(struct file *file, void *priv, + enum v4l2_buf_type type); +int v4l2_m2m_ioctl_streamoff(struct file *file, void *priv, + enum v4l2_buf_type type); +int v4l2_m2m_ioctl_encoder_cmd(struct file *file, void *priv, + struct v4l2_encoder_cmd *ec); +int v4l2_m2m_ioctl_decoder_cmd(struct file *file, void *priv, + struct v4l2_decoder_cmd *dc); +int v4l2_m2m_ioctl_try_encoder_cmd(struct file *file, void *priv, + struct v4l2_encoder_cmd *ec); +int v4l2_m2m_ioctl_try_decoder_cmd(struct file *file, void *priv, + struct v4l2_decoder_cmd *dc); +int v4l2_m2m_ioctl_stateless_try_decoder_cmd(struct file *file, void *priv, + struct v4l2_decoder_cmd *dc); +int v4l2_m2m_ioctl_stateless_decoder_cmd(struct file *file, void *priv, + struct v4l2_decoder_cmd *dc); +int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma); +__poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait); + +#endif /* _MEDIA_V4L2_MEM2MEM_H */ + diff --git a/6.18.0/include-overrides/media/v4l2-rect.h b/6.18.0/include-overrides/media/v4l2-rect.h new file mode 100644 index 0000000..bd587d0 --- /dev/null +++ b/6.18.0/include-overrides/media/v4l2-rect.h @@ -0,0 +1,207 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * v4l2-rect.h - v4l2_rect helper functions + * + * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + */ + +#ifndef _V4L2_RECT_H_ +#define _V4L2_RECT_H_ + +#include + +/** + * v4l2_rect_set_size_to() - copy the width/height values. + * @r: rect whose width and height fields will be set + * @size: rect containing the width and height fields you need. + */ +static inline void v4l2_rect_set_size_to(struct v4l2_rect *r, + const struct v4l2_rect *size) +{ + r->width = size->width; + r->height = size->height; +} + +/** + * v4l2_rect_set_min_size() - width and height of r should be >= min_size. + * @r: rect whose width and height will be modified + * @min_size: rect containing the minimal width and height + */ +static inline void v4l2_rect_set_min_size(struct v4l2_rect *r, + const struct v4l2_rect *min_size) +{ + if (r->width < min_size->width) + r->width = min_size->width; + if (r->height < min_size->height) + r->height = min_size->height; +} + +/** + * v4l2_rect_set_max_size() - width and height of r should be <= max_size + * @r: rect whose width and height will be modified + * @max_size: rect containing the maximum width and height + */ +static inline void v4l2_rect_set_max_size(struct v4l2_rect *r, + const struct v4l2_rect *max_size) +{ + if (r->width > max_size->width) + r->width = max_size->width; + if (r->height > max_size->height) + r->height = max_size->height; +} + +/** + * v4l2_rect_map_inside()- r should be inside boundary. + * @r: rect that will be modified + * @boundary: rect containing the boundary for @r + */ +static inline void v4l2_rect_map_inside(struct v4l2_rect *r, + const struct v4l2_rect *boundary) +{ + v4l2_rect_set_max_size(r, boundary); + if (r->left < boundary->left) + r->left = boundary->left; + if (r->top < boundary->top) + r->top = boundary->top; + if (r->left + r->width > boundary->left + boundary->width) + r->left = boundary->left + boundary->width - r->width; + if (r->top + r->height > boundary->top + boundary->height) + r->top = boundary->top + boundary->height - r->height; +} + +/** + * v4l2_rect_same_size() - return true if r1 has the same size as r2 + * @r1: rectangle. + * @r2: rectangle. + * + * Return true if both rectangles have the same size. + */ +static inline bool v4l2_rect_same_size(const struct v4l2_rect *r1, + const struct v4l2_rect *r2) +{ + return r1->width == r2->width && r1->height == r2->height; +} + +/** + * v4l2_rect_same_position() - return true if r1 has the same position as r2 + * @r1: rectangle. + * @r2: rectangle. + * + * Return true if both rectangles have the same position + */ +static inline bool v4l2_rect_same_position(const struct v4l2_rect *r1, + const struct v4l2_rect *r2) +{ + return r1->top == r2->top && r1->left == r2->left; +} + +/** + * v4l2_rect_equal() - return true if r1 equals r2 + * @r1: rectangle. + * @r2: rectangle. + * + * Return true if both rectangles have the same size and position. + */ +static inline bool v4l2_rect_equal(const struct v4l2_rect *r1, + const struct v4l2_rect *r2) +{ + return v4l2_rect_same_size(r1, r2) && v4l2_rect_same_position(r1, r2); +} + +/** + * v4l2_rect_intersect() - calculate the intersection of two rects. + * @r: intersection of @r1 and @r2. + * @r1: rectangle. + * @r2: rectangle. + */ +static inline void v4l2_rect_intersect(struct v4l2_rect *r, + const struct v4l2_rect *r1, + const struct v4l2_rect *r2) +{ + int right, bottom; + + r->top = max(r1->top, r2->top); + r->left = max(r1->left, r2->left); + bottom = min(r1->top + r1->height, r2->top + r2->height); + right = min(r1->left + r1->width, r2->left + r2->width); + r->height = max(0, bottom - r->top); + r->width = max(0, right - r->left); +} + +/** + * v4l2_rect_scale() - scale rect r by to/from + * @r: rect to be scaled. + * @from: from rectangle. + * @to: to rectangle. + * + * This scales rectangle @r horizontally by @to->width / @from->width and + * vertically by @to->height / @from->height. + * + * Typically @r is a rectangle inside @from and you want the rectangle as + * it would appear after scaling @from to @to. So the resulting @r will + * be the scaled rectangle inside @to. + */ +static inline void v4l2_rect_scale(struct v4l2_rect *r, + const struct v4l2_rect *from, + const struct v4l2_rect *to) +{ + if (from->width == 0 || from->height == 0) { + r->left = r->top = r->width = r->height = 0; + return; + } + r->left = (((r->left - from->left) * to->width) / from->width) & ~1; + r->width = ((r->width * to->width) / from->width) & ~1; + r->top = ((r->top - from->top) * to->height) / from->height; + r->height = (r->height * to->height) / from->height; +} + +/** + * v4l2_rect_overlap() - do r1 and r2 overlap? + * @r1: rectangle. + * @r2: rectangle. + * + * Returns true if @r1 and @r2 overlap. + */ +static inline bool v4l2_rect_overlap(const struct v4l2_rect *r1, + const struct v4l2_rect *r2) +{ + /* + * IF the left side of r1 is to the right of the right side of r2 OR + * the left side of r2 is to the right of the right side of r1 THEN + * they do not overlap. + */ + if (r1->left >= r2->left + r2->width || + r2->left >= r1->left + r1->width) + return false; + /* + * IF the top side of r1 is below the bottom of r2 OR + * the top side of r2 is below the bottom of r1 THEN + * they do not overlap. + */ + if (r1->top >= r2->top + r2->height || + r2->top >= r1->top + r1->height) + return false; + return true; +} + +/** + * v4l2_rect_enclosed() - is r1 enclosed in r2? + * @r1: rectangle. + * @r2: rectangle. + * + * Returns true if @r1 is enclosed in @r2. + */ +static inline bool v4l2_rect_enclosed(struct v4l2_rect *r1, + struct v4l2_rect *r2) +{ + if (r1->left < r2->left || r1->top < r2->top) + return false; + if (r1->left + r1->width > r2->left + r2->width) + return false; + if (r1->top + r1->height > r2->top + r2->height) + return false; + + return true; +} + +#endif diff --git a/6.18.0/include-overrides/media/v4l2-subdev.h b/6.18.0/include-overrides/media/v4l2-subdev.h new file mode 100644 index 0000000..e0bb58c --- /dev/null +++ b/6.18.0/include-overrides/media/v4l2-subdev.h @@ -0,0 +1,1998 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * V4L2 sub-device support header. + * + * Copyright (C) 2008 Hans Verkuil + */ + +#ifndef _V4L2_SUBDEV_H +#define _V4L2_SUBDEV_H + +#include +#include +#include +#include +#include +#include +#include +#include + +/* generic v4l2_device notify callback notification values */ +#define V4L2_SUBDEV_IR_RX_NOTIFY _IOW('v', 0, u32) +#define V4L2_SUBDEV_IR_RX_FIFO_SERVICE_REQ 0x00000001 +#define V4L2_SUBDEV_IR_RX_END_OF_RX_DETECTED 0x00000002 +#define V4L2_SUBDEV_IR_RX_HW_FIFO_OVERRUN 0x00000004 +#define V4L2_SUBDEV_IR_RX_SW_FIFO_OVERRUN 0x00000008 + +#define V4L2_SUBDEV_IR_TX_NOTIFY _IOW('v', 1, u32) +#define V4L2_SUBDEV_IR_TX_FIFO_SERVICE_REQ 0x00000001 + +#define V4L2_DEVICE_NOTIFY_EVENT _IOW('v', 2, struct v4l2_event) + +struct v4l2_device; +struct v4l2_ctrl_handler; +struct v4l2_event; +struct v4l2_event_subscription; +struct v4l2_fh; +struct v4l2_subdev; +struct v4l2_subdev_fh; +struct v4l2_subdev_stream_config; +struct tuner_setup; +struct v4l2_mbus_frame_desc; +struct led_classdev; + +/** + * struct v4l2_decode_vbi_line - used to decode_vbi_line + * + * @is_second_field: Set to 0 for the first (odd) field; + * set to 1 for the second (even) field. + * @p: Pointer to the sliced VBI data from the decoder. On exit, points to + * the start of the payload. + * @line: Line number of the sliced VBI data (1-23) + * @type: VBI service type (V4L2_SLICED_*). 0 if no service found + */ +struct v4l2_decode_vbi_line { + u32 is_second_field; + u8 *p; + u32 line; + u32 type; +}; + +/* + * Sub-devices are devices that are connected somehow to the main bridge + * device. These devices are usually audio/video muxers/encoders/decoders or + * sensors and webcam controllers. + * + * Usually these devices are controlled through an i2c bus, but other buses + * may also be used. + * + * The v4l2_subdev struct provides a way of accessing these devices in a + * generic manner. Most operations that these sub-devices support fall in + * a few categories: core ops, audio ops, video ops and tuner ops. + * + * More categories can be added if needed, although this should remain a + * limited set (no more than approx. 8 categories). + * + * Each category has its own set of ops that subdev drivers can implement. + * + * A subdev driver can leave the pointer to the category ops NULL if + * it does not implement them (e.g. an audio subdev will generally not + * implement the video category ops). The exception is the core category: + * this must always be present. + * + * These ops are all used internally so it is no problem to change, remove + * or add ops or move ops from one to another category. Currently these + * ops are based on the original ioctls, but since ops are not limited to + * one argument there is room for improvement here once all i2c subdev + * drivers are converted to use these ops. + */ + +/* + * Core ops: it is highly recommended to implement at least these ops: + * + * log_status + * g_register + * s_register + * + * This provides basic debugging support. + * + * The ioctl ops is meant for generic ioctl-like commands. Depending on + * the use-case it might be better to use subdev-specific ops (currently + * not yet implemented) since ops provide proper type-checking. + */ + +/** + * enum v4l2_subdev_io_pin_bits - Subdevice external IO pin configuration + * bits + * + * @V4L2_SUBDEV_IO_PIN_DISABLE: disables a pin config. ENABLE assumed. + * @V4L2_SUBDEV_IO_PIN_OUTPUT: set it if pin is an output. + * @V4L2_SUBDEV_IO_PIN_INPUT: set it if pin is an input. + * @V4L2_SUBDEV_IO_PIN_SET_VALUE: to set the output value via + * &struct v4l2_subdev_io_pin_config->value. + * @V4L2_SUBDEV_IO_PIN_ACTIVE_LOW: pin active is bit 0. + * Otherwise, ACTIVE HIGH is assumed. + */ +enum v4l2_subdev_io_pin_bits { + V4L2_SUBDEV_IO_PIN_DISABLE = 0, + V4L2_SUBDEV_IO_PIN_OUTPUT = 1, + V4L2_SUBDEV_IO_PIN_INPUT = 2, + V4L2_SUBDEV_IO_PIN_SET_VALUE = 3, + V4L2_SUBDEV_IO_PIN_ACTIVE_LOW = 4, +}; + +/** + * struct v4l2_subdev_io_pin_config - Subdevice external IO pin configuration + * + * @flags: bitmask with flags for this pin's config, whose bits are defined by + * &enum v4l2_subdev_io_pin_bits. + * @pin: Chip external IO pin to configure + * @function: Internal signal pad/function to route to IO pin + * @value: Initial value for pin - e.g. GPIO output value + * @strength: Pin drive strength + */ +struct v4l2_subdev_io_pin_config { + u32 flags; + u8 pin; + u8 function; + u8 value; + u8 strength; +}; + +/** + * struct v4l2_subdev_core_ops - Define core ops callbacks for subdevs + * + * @log_status: callback for VIDIOC_LOG_STATUS() ioctl handler code. + * + * @s_io_pin_config: configure one or more chip I/O pins for chips that + * multiplex different internal signal pads out to IO pins. This function + * takes a pointer to an array of 'n' pin configuration entries, one for + * each pin being configured. This function could be called at times + * other than just subdevice initialization. + * + * @init: initialize the sensor registers to some sort of reasonable default + * values. Do not use for new drivers and should be removed in existing + * drivers. + * + * @load_fw: load firmware. + * + * @reset: generic reset command. The argument selects which subsystems to + * reset. Passing 0 will always reset the whole chip. Do not use for new + * drivers without discussing this first on the linux-media mailinglist. + * There should be no reason normally to reset a device. + * + * @s_gpio: set GPIO pins. Very simple right now, might need to be extended with + * a direction argument if needed. + * + * @command: called by in-kernel drivers in order to call functions internal + * to subdev drivers driver that have a separate callback. + * + * @ioctl: called at the end of ioctl() syscall handler at the V4L2 core. + * used to provide support for private ioctls used on the driver. + * + * @compat_ioctl32: called when a 32 bits application uses a 64 bits Kernel, + * in order to fix data passed from/to userspace. + * + * @g_register: callback for VIDIOC_DBG_G_REGISTER() ioctl handler code. + * + * @s_register: callback for VIDIOC_DBG_S_REGISTER() ioctl handler code. + * + * @s_power: puts subdevice in power saving mode (on == 0) or normal operation + * mode (on == 1). DEPRECATED. See + * Documentation/driver-api/media/camera-sensor.rst . pre_streamon and + * post_streamoff callbacks can be used for e.g. setting the bus to LP-11 + * mode before s_stream is called. + * + * @interrupt_service_routine: Called by the bridge chip's interrupt service + * handler, when an interrupt status has be raised due to this subdev, + * so that this subdev can handle the details. It may schedule work to be + * performed later. It must not sleep. **Called from an IRQ context**. + * + * @subscribe_event: used by the drivers to request the control framework that + * for it to be warned when the value of a control changes. + * + * @unsubscribe_event: remove event subscription from the control framework. + */ +struct v4l2_subdev_core_ops { + int (*log_status)(struct v4l2_subdev *sd); + int (*s_io_pin_config)(struct v4l2_subdev *sd, size_t n, + struct v4l2_subdev_io_pin_config *pincfg); + int (*init)(struct v4l2_subdev *sd, u32 val); + int (*load_fw)(struct v4l2_subdev *sd); + int (*reset)(struct v4l2_subdev *sd, u32 val); + int (*s_gpio)(struct v4l2_subdev *sd, u32 val); + long (*command)(struct v4l2_subdev *sd, unsigned int cmd, void *arg); + long (*ioctl)(struct v4l2_subdev *sd, unsigned int cmd, void *arg); +#ifdef CONFIG_COMPAT + long (*compat_ioctl32)(struct v4l2_subdev *sd, unsigned int cmd, + unsigned long arg); +#endif +#ifdef CONFIG_VIDEO_ADV_DEBUG + int (*g_register)(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg); + int (*s_register)(struct v4l2_subdev *sd, const struct v4l2_dbg_register *reg); +#endif + int (*s_power)(struct v4l2_subdev *sd, int on); + int (*interrupt_service_routine)(struct v4l2_subdev *sd, + u32 status, bool *handled); + int (*subscribe_event)(struct v4l2_subdev *sd, struct v4l2_fh *fh, + struct v4l2_event_subscription *sub); + int (*unsubscribe_event)(struct v4l2_subdev *sd, struct v4l2_fh *fh, + struct v4l2_event_subscription *sub); +}; + +/** + * struct v4l2_subdev_tuner_ops - Callbacks used when v4l device was opened + * in radio mode. + * + * @standby: puts the tuner in standby mode. It will be woken up + * automatically the next time it is used. + * + * @s_radio: callback that switches the tuner to radio mode. + * drivers should explicitly call it when a tuner ops should + * operate on radio mode, before being able to handle it. + * Used on devices that have both AM/FM radio receiver and TV. + * + * @s_frequency: callback for VIDIOC_S_FREQUENCY() ioctl handler code. + * + * @g_frequency: callback for VIDIOC_G_FREQUENCY() ioctl handler code. + * freq->type must be filled in. Normally done by video_ioctl2() + * or the bridge driver. + * + * @enum_freq_bands: callback for VIDIOC_ENUM_FREQ_BANDS() ioctl handler code. + * + * @g_tuner: callback for VIDIOC_G_TUNER() ioctl handler code. + * + * @s_tuner: callback for VIDIOC_S_TUNER() ioctl handler code. @vt->type must be + * filled in. Normally done by video_ioctl2 or the + * bridge driver. + * + * @g_modulator: callback for VIDIOC_G_MODULATOR() ioctl handler code. + * + * @s_modulator: callback for VIDIOC_S_MODULATOR() ioctl handler code. + * + * @s_type_addr: sets tuner type and its I2C addr. + * + * @s_config: sets tda9887 specific stuff, like port1, port2 and qss + * + * .. note:: + * + * On devices that have both AM/FM and TV, it is up to the driver + * to explicitly call s_radio when the tuner should be switched to + * radio mode, before handling other &struct v4l2_subdev_tuner_ops + * that would require it. An example of such usage is:: + * + * static void s_frequency(void *priv, const struct v4l2_frequency *f) + * { + * ... + * if (f.type == V4L2_TUNER_RADIO) + * v4l2_device_call_all(v4l2_dev, 0, tuner, s_radio); + * ... + * v4l2_device_call_all(v4l2_dev, 0, tuner, s_frequency); + * } + */ +struct v4l2_subdev_tuner_ops { + int (*standby)(struct v4l2_subdev *sd); + int (*s_radio)(struct v4l2_subdev *sd); + int (*s_frequency)(struct v4l2_subdev *sd, const struct v4l2_frequency *freq); + int (*g_frequency)(struct v4l2_subdev *sd, struct v4l2_frequency *freq); + int (*enum_freq_bands)(struct v4l2_subdev *sd, struct v4l2_frequency_band *band); + int (*g_tuner)(struct v4l2_subdev *sd, struct v4l2_tuner *vt); + int (*s_tuner)(struct v4l2_subdev *sd, const struct v4l2_tuner *vt); + int (*g_modulator)(struct v4l2_subdev *sd, struct v4l2_modulator *vm); + int (*s_modulator)(struct v4l2_subdev *sd, const struct v4l2_modulator *vm); + int (*s_type_addr)(struct v4l2_subdev *sd, struct tuner_setup *type); + int (*s_config)(struct v4l2_subdev *sd, const struct v4l2_priv_tun_config *config); +}; + +/** + * struct v4l2_subdev_audio_ops - Callbacks used for audio-related settings + * + * @s_clock_freq: set the frequency (in Hz) of the audio clock output. + * Used to slave an audio processor to the video decoder, ensuring that + * audio and video remain synchronized. Usual values for the frequency + * are 48000, 44100 or 32000 Hz. If the frequency is not supported, then + * -EINVAL is returned. + * + * @s_i2s_clock_freq: sets I2S speed in bps. This is used to provide a standard + * way to select I2S clock used by driving digital audio streams at some + * board designs. Usual values for the frequency are 1024000 and 2048000. + * If the frequency is not supported, then %-EINVAL is returned. + * + * @s_routing: used to define the input and/or output pins of an audio chip, + * and any additional configuration data. + * Never attempt to use user-level input IDs (e.g. Composite, S-Video, + * Tuner) at this level. An i2c device shouldn't know about whether an + * input pin is connected to a Composite connector, become on another + * board or platform it might be connected to something else entirely. + * The calling driver is responsible for mapping a user-level input to + * the right pins on the i2c device. + * + * @s_stream: used to notify the audio code that stream will start or has + * stopped. + */ +struct v4l2_subdev_audio_ops { + int (*s_clock_freq)(struct v4l2_subdev *sd, u32 freq); + int (*s_i2s_clock_freq)(struct v4l2_subdev *sd, u32 freq); + int (*s_routing)(struct v4l2_subdev *sd, u32 input, u32 output, u32 config); + int (*s_stream)(struct v4l2_subdev *sd, int enable); +}; + +/** + * struct v4l2_mbus_frame_desc_entry_csi2 + * + * @vc: CSI-2 virtual channel + * @dt: CSI-2 data type ID + */ +struct v4l2_mbus_frame_desc_entry_csi2 { + u8 vc; + u8 dt; +}; + +/** + * enum v4l2_mbus_frame_desc_flags - media bus frame description flags + * + * @V4L2_MBUS_FRAME_DESC_FL_LEN_MAX: + * Indicates that &struct v4l2_mbus_frame_desc_entry->length field + * specifies maximum data length. + * @V4L2_MBUS_FRAME_DESC_FL_BLOB: + * Indicates that the format does not have line offsets, i.e. + * the receiver should use 1D DMA. + */ +enum v4l2_mbus_frame_desc_flags { + V4L2_MBUS_FRAME_DESC_FL_LEN_MAX = BIT(0), + V4L2_MBUS_FRAME_DESC_FL_BLOB = BIT(1), +}; + +/** + * struct v4l2_mbus_frame_desc_entry - media bus frame description structure + * + * @flags: bitmask flags, as defined by &enum v4l2_mbus_frame_desc_flags. + * @stream: stream in routing configuration + * @pixelcode: media bus pixel code, valid if @flags + * %FRAME_DESC_FL_BLOB is not set. + * @length: number of octets per frame, valid if @flags + * %V4L2_MBUS_FRAME_DESC_FL_LEN_MAX is set. + * @bus: Bus-specific frame descriptor parameters + * @bus.csi2: CSI-2-specific bus configuration + */ +struct v4l2_mbus_frame_desc_entry { + enum v4l2_mbus_frame_desc_flags flags; + u32 stream; + u32 pixelcode; + u32 length; + union { + struct v4l2_mbus_frame_desc_entry_csi2 csi2; + } bus; +}; + + /* + * If this number is too small, it should be dropped altogether and the + * API switched to a dynamic number of frame descriptor entries. + */ +#define V4L2_FRAME_DESC_ENTRY_MAX 8 + +/** + * enum v4l2_mbus_frame_desc_type - media bus frame description type + * + * @V4L2_MBUS_FRAME_DESC_TYPE_UNDEFINED: + * Undefined frame desc type. Drivers should not use this, it is + * for backwards compatibility. + * @V4L2_MBUS_FRAME_DESC_TYPE_PARALLEL: + * Parallel media bus. + * @V4L2_MBUS_FRAME_DESC_TYPE_CSI2: + * CSI-2 media bus. Frame desc parameters must be set in + * &struct v4l2_mbus_frame_desc_entry->csi2. + */ +enum v4l2_mbus_frame_desc_type { + V4L2_MBUS_FRAME_DESC_TYPE_UNDEFINED = 0, + V4L2_MBUS_FRAME_DESC_TYPE_PARALLEL, + V4L2_MBUS_FRAME_DESC_TYPE_CSI2, +}; + +/** + * struct v4l2_mbus_frame_desc - media bus data frame description + * @type: type of the bus (enum v4l2_mbus_frame_desc_type) + * @entry: frame descriptors array + * @num_entries: number of entries in @entry array + */ +struct v4l2_mbus_frame_desc { + enum v4l2_mbus_frame_desc_type type; + struct v4l2_mbus_frame_desc_entry entry[V4L2_FRAME_DESC_ENTRY_MAX]; + unsigned short num_entries; +}; + +/** + * enum v4l2_subdev_pre_streamon_flags - Flags for pre_streamon subdev core op + * + * @V4L2_SUBDEV_PRE_STREAMON_FL_MANUAL_LP: Set the transmitter to either LP-11 + * or LP-111 mode before call to s_stream(). + */ +enum v4l2_subdev_pre_streamon_flags { + V4L2_SUBDEV_PRE_STREAMON_FL_MANUAL_LP = BIT(0), +}; + +/** + * struct v4l2_subdev_video_ops - Callbacks used when v4l device was opened + * in video mode. + * + * @s_routing: see s_routing in audio_ops, except this version is for video + * devices. + * + * @s_crystal_freq: sets the frequency of the crystal used to generate the + * clocks in Hz. An extra flags field allows device specific configuration + * regarding clock frequency dividers, etc. If not used, then set flags + * to 0. If the frequency is not supported, then -EINVAL is returned. + * + * @g_std: callback for VIDIOC_G_STD() ioctl handler code. + * + * @s_std: callback for VIDIOC_S_STD() ioctl handler code. + * + * @s_std_output: set v4l2_std_id for video OUTPUT devices. This is ignored by + * video input devices. + * + * @g_std_output: get current standard for video OUTPUT devices. This is ignored + * by video input devices. + * + * @querystd: callback for VIDIOC_QUERYSTD() ioctl handler code. + * + * @g_tvnorms: get &v4l2_std_id with all standards supported by the video + * CAPTURE device. This is ignored by video output devices. + * + * @g_tvnorms_output: get v4l2_std_id with all standards supported by the video + * OUTPUT device. This is ignored by video capture devices. + * + * @g_input_status: get input status. Same as the status field in the + * &struct v4l2_input + * + * @s_stream: start (enabled == 1) or stop (enabled == 0) streaming on the + * sub-device. Failure on stop will remove any resources acquired in + * streaming start, while the error code is still returned by the driver. + * The caller shall track the subdev state, and shall not start or stop an + * already started or stopped subdev. Also see call_s_stream wrapper in + * v4l2-subdev.c. + * + * This callback is DEPRECATED. New drivers should instead implement + * &v4l2_subdev_pad_ops.enable_streams and + * &v4l2_subdev_pad_ops.disable_streams operations, and use + * v4l2_subdev_s_stream_helper for the &v4l2_subdev_video_ops.s_stream + * operation to support legacy users. + * + * Drivers should also not call the .s_stream() subdev operation directly, + * but use the v4l2_subdev_enable_streams() and + * v4l2_subdev_disable_streams() helpers. + * + * @s_rx_buffer: set a host allocated memory buffer for the subdev. The subdev + * can adjust @size to a lower value and must not write more data to the + * buffer starting at @data than the original value of @size. + * + * @pre_streamon: May be called before streaming is actually started, to help + * initialising the bus. Current usage is to set a CSI-2 transmitter to + * LP-11 or LP-111 mode before streaming. See &enum + * v4l2_subdev_pre_streamon_flags. + * + * pre_streamon shall return error if it cannot perform the operation as + * indicated by the flags argument. In particular, -EACCES indicates lack + * of support for the operation. The caller shall call post_streamoff for + * each successful call of pre_streamon. + * + * @post_streamoff: Called after streaming is stopped, but if and only if + * pre_streamon was called earlier. + */ +struct v4l2_subdev_video_ops { + int (*s_routing)(struct v4l2_subdev *sd, u32 input, u32 output, u32 config); + int (*s_crystal_freq)(struct v4l2_subdev *sd, u32 freq, u32 flags); + int (*g_std)(struct v4l2_subdev *sd, v4l2_std_id *norm); + int (*s_std)(struct v4l2_subdev *sd, v4l2_std_id norm); + int (*s_std_output)(struct v4l2_subdev *sd, v4l2_std_id std); + int (*g_std_output)(struct v4l2_subdev *sd, v4l2_std_id *std); + int (*querystd)(struct v4l2_subdev *sd, v4l2_std_id *std); + int (*g_tvnorms)(struct v4l2_subdev *sd, v4l2_std_id *std); + int (*g_tvnorms_output)(struct v4l2_subdev *sd, v4l2_std_id *std); + int (*g_input_status)(struct v4l2_subdev *sd, u32 *status); + int (*s_stream)(struct v4l2_subdev *sd, int enable); + int (*s_rx_buffer)(struct v4l2_subdev *sd, void *buf, + unsigned int *size); + int (*pre_streamon)(struct v4l2_subdev *sd, u32 flags); + int (*post_streamoff)(struct v4l2_subdev *sd); +}; + +/** + * struct v4l2_subdev_vbi_ops - Callbacks used when v4l device was opened + * in video mode via the vbi device node. + * + * @decode_vbi_line: video decoders that support sliced VBI need to implement + * this ioctl. Field p of the &struct v4l2_decode_vbi_line is set to the + * start of the VBI data that was generated by the decoder. The driver + * then parses the sliced VBI data and sets the other fields in the + * struct accordingly. The pointer p is updated to point to the start of + * the payload which can be copied verbatim into the data field of the + * &struct v4l2_sliced_vbi_data. If no valid VBI data was found, then the + * type field is set to 0 on return. + * + * @s_vbi_data: used to generate VBI signals on a video signal. + * &struct v4l2_sliced_vbi_data is filled with the data packets that + * should be output. Note that if you set the line field to 0, then that + * VBI signal is disabled. If no valid VBI data was found, then the type + * field is set to 0 on return. + * + * @g_vbi_data: used to obtain the sliced VBI packet from a readback register. + * Not all video decoders support this. If no data is available because + * the readback register contains invalid or erroneous data %-EIO is + * returned. Note that you must fill in the 'id' member and the 'field' + * member (to determine whether CC data from the first or second field + * should be obtained). + * + * @g_sliced_vbi_cap: callback for VIDIOC_G_SLICED_VBI_CAP() ioctl handler + * code. + * + * @s_raw_fmt: setup the video encoder/decoder for raw VBI. + * + * @g_sliced_fmt: retrieve the current sliced VBI settings. + * + * @s_sliced_fmt: setup the sliced VBI settings. + */ +struct v4l2_subdev_vbi_ops { + int (*decode_vbi_line)(struct v4l2_subdev *sd, struct v4l2_decode_vbi_line *vbi_line); + int (*s_vbi_data)(struct v4l2_subdev *sd, const struct v4l2_sliced_vbi_data *vbi_data); + int (*g_vbi_data)(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_data *vbi_data); + int (*g_sliced_vbi_cap)(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_cap *cap); + int (*s_raw_fmt)(struct v4l2_subdev *sd, struct v4l2_vbi_format *fmt); + int (*g_sliced_fmt)(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *fmt); + int (*s_sliced_fmt)(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *fmt); +}; + +/** + * struct v4l2_subdev_sensor_ops - v4l2-subdev sensor operations + * @g_skip_top_lines: number of lines at the top of the image to be skipped. + * This is needed for some sensors, which always corrupt + * several top lines of the output image, or which send their + * metadata in them. + * @g_skip_frames: number of frames to skip at stream start. This is needed for + * buggy sensors that generate faulty frames when they are + * turned on. + */ +struct v4l2_subdev_sensor_ops { + int (*g_skip_top_lines)(struct v4l2_subdev *sd, u32 *lines); + int (*g_skip_frames)(struct v4l2_subdev *sd, u32 *frames); +}; + +/** + * enum v4l2_subdev_ir_mode- describes the type of IR supported + * + * @V4L2_SUBDEV_IR_MODE_PULSE_WIDTH: IR uses struct ir_raw_event records + */ +enum v4l2_subdev_ir_mode { + V4L2_SUBDEV_IR_MODE_PULSE_WIDTH, +}; + +/** + * struct v4l2_subdev_ir_parameters - Parameters for IR TX or TX + * + * @bytes_per_data_element: bytes per data element of data in read or + * write call. + * @mode: IR mode as defined by &enum v4l2_subdev_ir_mode. + * @enable: device is active if true + * @interrupt_enable: IR interrupts are enabled if true + * @shutdown: if true: set hardware to low/no power, false: normal mode + * + * @modulation: if true, it uses carrier, if false: baseband + * @max_pulse_width: maximum pulse width in ns, valid only for baseband signal + * @carrier_freq: carrier frequency in Hz, valid only for modulated signal + * @duty_cycle: duty cycle percentage, valid only for modulated signal + * @invert_level: invert signal level + * + * @invert_carrier_sense: Send 0/space as a carrier burst. used only in TX. + * + * @noise_filter_min_width: min time of a valid pulse, in ns. Used only for RX. + * @carrier_range_lower: Lower carrier range, in Hz, valid only for modulated + * signal. Used only for RX. + * @carrier_range_upper: Upper carrier range, in Hz, valid only for modulated + * signal. Used only for RX. + * @resolution: The receive resolution, in ns . Used only for RX. + */ +struct v4l2_subdev_ir_parameters { + unsigned int bytes_per_data_element; + enum v4l2_subdev_ir_mode mode; + + bool enable; + bool interrupt_enable; + bool shutdown; + + bool modulation; + u32 max_pulse_width; + unsigned int carrier_freq; + unsigned int duty_cycle; + bool invert_level; + + /* Tx only */ + bool invert_carrier_sense; + + /* Rx only */ + u32 noise_filter_min_width; + unsigned int carrier_range_lower; + unsigned int carrier_range_upper; + u32 resolution; +}; + +/** + * struct v4l2_subdev_ir_ops - operations for IR subdevices + * + * @rx_read: Reads received codes or pulse width data. + * The semantics are similar to a non-blocking read() call. + * @rx_g_parameters: Get the current operating parameters and state of + * the IR receiver. + * @rx_s_parameters: Set the current operating parameters and state of + * the IR receiver. It is recommended to call + * [rt]x_g_parameters first to fill out the current state, and only change + * the fields that need to be changed. Upon return, the actual device + * operating parameters and state will be returned. Note that hardware + * limitations may prevent the actual settings from matching the requested + * settings - e.g. an actual carrier setting of 35,904 Hz when 36,000 Hz + * was requested. An exception is when the shutdown parameter is true. + * The last used operational parameters will be returned, but the actual + * state of the hardware be different to minimize power consumption and + * processing when shutdown is true. + * + * @tx_write: Writes codes or pulse width data for transmission. + * The semantics are similar to a non-blocking write() call. + * @tx_g_parameters: Get the current operating parameters and state of + * the IR transmitter. + * @tx_s_parameters: Set the current operating parameters and state of + * the IR transmitter. It is recommended to call + * [rt]x_g_parameters first to fill out the current state, and only change + * the fields that need to be changed. Upon return, the actual device + * operating parameters and state will be returned. Note that hardware + * limitations may prevent the actual settings from matching the requested + * settings - e.g. an actual carrier setting of 35,904 Hz when 36,000 Hz + * was requested. An exception is when the shutdown parameter is true. + * The last used operational parameters will be returned, but the actual + * state of the hardware be different to minimize power consumption and + * processing when shutdown is true. + */ +struct v4l2_subdev_ir_ops { + /* Receiver */ + int (*rx_read)(struct v4l2_subdev *sd, u8 *buf, size_t count, + ssize_t *num); + + int (*rx_g_parameters)(struct v4l2_subdev *sd, + struct v4l2_subdev_ir_parameters *params); + int (*rx_s_parameters)(struct v4l2_subdev *sd, + struct v4l2_subdev_ir_parameters *params); + + /* Transmitter */ + int (*tx_write)(struct v4l2_subdev *sd, u8 *buf, size_t count, + ssize_t *num); + + int (*tx_g_parameters)(struct v4l2_subdev *sd, + struct v4l2_subdev_ir_parameters *params); + int (*tx_s_parameters)(struct v4l2_subdev *sd, + struct v4l2_subdev_ir_parameters *params); +}; + +/** + * struct v4l2_subdev_pad_config - Used for storing subdev pad information. + * + * @format: &struct v4l2_mbus_framefmt + * @crop: &struct v4l2_rect to be used for crop + * @compose: &struct v4l2_rect to be used for compose + * @interval: frame interval + */ +struct v4l2_subdev_pad_config { + struct v4l2_mbus_framefmt format; + struct v4l2_rect crop; + struct v4l2_rect compose; + struct v4l2_fract interval; +}; + +/** + * struct v4l2_subdev_stream_configs - A collection of stream configs. + * + * @num_configs: number of entries in @config. + * @configs: an array of &struct v4l2_subdev_stream_configs. + */ +struct v4l2_subdev_stream_configs { + u32 num_configs; + struct v4l2_subdev_stream_config *configs; +}; + +/** + * struct v4l2_subdev_krouting - subdev routing table + * + * @len_routes: length of routes array, in routes + * @num_routes: number of routes + * @routes: &struct v4l2_subdev_route + * + * This structure contains the routing table for a subdev. + */ +struct v4l2_subdev_krouting { + unsigned int len_routes; + unsigned int num_routes; + struct v4l2_subdev_route *routes; +}; + +/** + * struct v4l2_subdev_state - Used for storing subdev state information. + * + * @_lock: default for 'lock' + * @lock: mutex for the state. May be replaced by the user. + * @sd: the sub-device which the state is related to + * @pads: &struct v4l2_subdev_pad_config array + * @routing: routing table for the subdev + * @stream_configs: stream configurations (only for V4L2_SUBDEV_FL_STREAMS) + * + * This structure only needs to be passed to the pad op if the 'which' field + * of the main argument is set to %V4L2_SUBDEV_FORMAT_TRY. For + * %V4L2_SUBDEV_FORMAT_ACTIVE it is safe to pass %NULL. + */ +struct v4l2_subdev_state { + /* lock for the struct v4l2_subdev_state fields */ + struct mutex _lock; + struct mutex *lock; + struct v4l2_subdev *sd; + struct v4l2_subdev_pad_config *pads; + struct v4l2_subdev_krouting routing; + struct v4l2_subdev_stream_configs stream_configs; +}; + +/** + * struct v4l2_subdev_pad_ops - v4l2-subdev pad level operations + * + * @enum_mbus_code: callback for VIDIOC_SUBDEV_ENUM_MBUS_CODE() ioctl handler + * code. + * @enum_frame_size: callback for VIDIOC_SUBDEV_ENUM_FRAME_SIZE() ioctl handler + * code. + * + * @enum_frame_interval: callback for VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL() ioctl + * handler code. + * + * @get_fmt: callback for VIDIOC_SUBDEV_G_FMT() ioctl handler code. + * + * @set_fmt: callback for VIDIOC_SUBDEV_S_FMT() ioctl handler code. + * + * @get_selection: callback for VIDIOC_SUBDEV_G_SELECTION() ioctl handler code. + * + * @set_selection: callback for VIDIOC_SUBDEV_S_SELECTION() ioctl handler code. + * + * @get_frame_interval: callback for VIDIOC_SUBDEV_G_FRAME_INTERVAL() + * ioctl handler code. + * + * @set_frame_interval: callback for VIDIOC_SUBDEV_S_FRAME_INTERVAL() + * ioctl handler code. + * + * @get_edid: callback for VIDIOC_SUBDEV_G_EDID() ioctl handler code. + * + * @set_edid: callback for VIDIOC_SUBDEV_S_EDID() ioctl handler code. + * + * @s_dv_timings: Set custom dv timings in the sub device. This is used + * when sub device is capable of setting detailed timing information + * in the hardware to generate/detect the video signal. + * + * @g_dv_timings: Get custom dv timings in the sub device. + * + * @query_dv_timings: callback for VIDIOC_QUERY_DV_TIMINGS() ioctl handler code. + * + * @dv_timings_cap: callback for VIDIOC_SUBDEV_DV_TIMINGS_CAP() ioctl handler + * code. + * + * @enum_dv_timings: callback for VIDIOC_SUBDEV_ENUM_DV_TIMINGS() ioctl handler + * code. + * + * @link_validate: used by the media controller code to check if the links + * that belongs to a pipeline can be used for stream. + * + * @get_frame_desc: get the current low level media bus frame parameters. + * + * @set_frame_desc: set the low level media bus frame parameters, @fd array + * may be adjusted by the subdev driver to device capabilities. + * + * @get_mbus_config: get the media bus configuration of a remote sub-device. + * The media bus configuration is usually retrieved from the + * firmware interface at sub-device probe time, immediately + * applied to the hardware and eventually adjusted by the + * driver. Remote sub-devices (usually video receivers) shall + * use this operation to query the transmitting end bus + * configuration in order to adjust their own one accordingly. + * Callers should make sure they get the most up-to-date as + * possible configuration from the remote end, likely calling + * this operation as close as possible to stream on time. The + * operation shall fail if the pad index it has been called on + * is not valid or in case of unrecoverable failures. The + * config argument has been memset to 0 just before calling + * the op. + * + * @set_routing: Enable or disable data connection routes described in the + * subdevice routing table. Subdevs that implement this operation + * must set the V4L2_SUBDEV_FL_STREAMS flag. + * + * @enable_streams: Enable the streams defined in streams_mask on the given + * source pad. Subdevs that implement this operation must use the active + * state management provided by the subdev core (enabled through a call to + * v4l2_subdev_init_finalize() at initialization time). Do not call + * directly, use v4l2_subdev_enable_streams() instead. + * + * Drivers that support only a single stream without setting the + * V4L2_SUBDEV_CAP_STREAMS sub-device capability flag can ignore the mask + * argument. + * + * @disable_streams: Disable the streams defined in streams_mask on the given + * source pad. Subdevs that implement this operation must use the active + * state management provided by the subdev core (enabled through a call to + * v4l2_subdev_init_finalize() at initialization time). Do not call + * directly, use v4l2_subdev_disable_streams() instead. + * + * Drivers that support only a single stream without setting the + * V4L2_SUBDEV_CAP_STREAMS sub-device capability flag can ignore the mask + * argument. + */ +struct v4l2_subdev_pad_ops { + int (*enum_mbus_code)(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_mbus_code_enum *code); + int (*enum_frame_size)(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_frame_size_enum *fse); + int (*enum_frame_interval)(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_frame_interval_enum *fie); + int (*get_fmt)(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_format *format); + int (*set_fmt)(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_format *format); + int (*get_selection)(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_selection *sel); + int (*set_selection)(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_selection *sel); + int (*get_frame_interval)(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_frame_interval *interval); + int (*set_frame_interval)(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_frame_interval *interval); + int (*get_edid)(struct v4l2_subdev *sd, struct v4l2_edid *edid); + int (*set_edid)(struct v4l2_subdev *sd, struct v4l2_edid *edid); + int (*s_dv_timings)(struct v4l2_subdev *sd, unsigned int pad, + struct v4l2_dv_timings *timings); + int (*g_dv_timings)(struct v4l2_subdev *sd, unsigned int pad, + struct v4l2_dv_timings *timings); + int (*query_dv_timings)(struct v4l2_subdev *sd, unsigned int pad, + struct v4l2_dv_timings *timings); + int (*dv_timings_cap)(struct v4l2_subdev *sd, + struct v4l2_dv_timings_cap *cap); + int (*enum_dv_timings)(struct v4l2_subdev *sd, + struct v4l2_enum_dv_timings *timings); +#ifdef CONFIG_MEDIA_CONTROLLER + int (*link_validate)(struct v4l2_subdev *sd, struct media_link *link, + struct v4l2_subdev_format *source_fmt, + struct v4l2_subdev_format *sink_fmt); +#endif /* CONFIG_MEDIA_CONTROLLER */ + int (*get_frame_desc)(struct v4l2_subdev *sd, unsigned int pad, + struct v4l2_mbus_frame_desc *fd); + int (*set_frame_desc)(struct v4l2_subdev *sd, unsigned int pad, + struct v4l2_mbus_frame_desc *fd); + int (*get_mbus_config)(struct v4l2_subdev *sd, unsigned int pad, + struct v4l2_mbus_config *config); + int (*set_routing)(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + enum v4l2_subdev_format_whence which, + struct v4l2_subdev_krouting *route); + int (*enable_streams)(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, u32 pad, + u64 streams_mask); + int (*disable_streams)(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, u32 pad, + u64 streams_mask); +}; + +/** + * struct v4l2_subdev_ops - Subdev operations + * + * @core: pointer to &struct v4l2_subdev_core_ops. Can be %NULL + * @tuner: pointer to &struct v4l2_subdev_tuner_ops. Can be %NULL + * @audio: pointer to &struct v4l2_subdev_audio_ops. Can be %NULL + * @video: pointer to &struct v4l2_subdev_video_ops. Can be %NULL + * @vbi: pointer to &struct v4l2_subdev_vbi_ops. Can be %NULL + * @ir: pointer to &struct v4l2_subdev_ir_ops. Can be %NULL + * @sensor: pointer to &struct v4l2_subdev_sensor_ops. Can be %NULL + * @pad: pointer to &struct v4l2_subdev_pad_ops. Can be %NULL + */ +struct v4l2_subdev_ops { + const struct v4l2_subdev_core_ops *core; + const struct v4l2_subdev_tuner_ops *tuner; + const struct v4l2_subdev_audio_ops *audio; + const struct v4l2_subdev_video_ops *video; + const struct v4l2_subdev_vbi_ops *vbi; + const struct v4l2_subdev_ir_ops *ir; + const struct v4l2_subdev_sensor_ops *sensor; + const struct v4l2_subdev_pad_ops *pad; +}; + +/** + * struct v4l2_subdev_internal_ops - V4L2 subdev internal ops + * + * @init_state: initialize the subdev state to default values + * + * @registered: called when this subdev is registered. When called the v4l2_dev + * field is set to the correct v4l2_device. + * + * @unregistered: called when this subdev is unregistered. When called the + * v4l2_dev field is still set to the correct v4l2_device. + * + * @open: called when the subdev device node is opened by an application. + * + * @close: called when the subdev device node is closed. Please note that + * it is possible for @close to be called after @unregistered! + * + * @release: called when the last user of the subdev device is gone. This + * happens after the @unregistered callback and when the last open + * filehandle to the v4l-subdevX device node was closed. If no device + * node was created for this sub-device, then the @release callback + * is called right after the @unregistered callback. + * The @release callback is typically used to free the memory containing + * the v4l2_subdev structure. It is almost certainly required for any + * sub-device that sets the V4L2_SUBDEV_FL_HAS_DEVNODE flag. + * + * .. note:: + * Never call this from drivers, only the v4l2 framework can call + * these ops. + */ +struct v4l2_subdev_internal_ops { + int (*init_state)(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state); + int (*registered)(struct v4l2_subdev *sd); + void (*unregistered)(struct v4l2_subdev *sd); + int (*open)(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh); + int (*close)(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh); + void (*release)(struct v4l2_subdev *sd); +}; + +/* Set this flag if this subdev is a i2c device. */ +#define V4L2_SUBDEV_FL_IS_I2C (1U << 0) +/* Set this flag if this subdev is a spi device. */ +#define V4L2_SUBDEV_FL_IS_SPI (1U << 1) +/* Set this flag if this subdev needs a device node. */ +#define V4L2_SUBDEV_FL_HAS_DEVNODE (1U << 2) +/* + * Set this flag if this subdev generates events. + * Note controls can send events, thus drivers exposing controls + * should set this flag. + */ +#define V4L2_SUBDEV_FL_HAS_EVENTS (1U << 3) +/* + * Set this flag if this subdev supports multiplexed streams. This means + * that the driver supports routing and handles the stream parameter in its + * v4l2_subdev_pad_ops handlers. More specifically, this means: + * + * - Centrally managed subdev active state is enabled + * - Legacy pad config is _not_ supported (state->pads is NULL) + * - Routing ioctls are available + * - Multiple streams per pad are supported + */ +#define V4L2_SUBDEV_FL_STREAMS (1U << 4) + +struct regulator_bulk_data; + +/** + * struct v4l2_subdev_platform_data - regulators config struct + * + * @regulators: Optional regulators used to power on/off the subdevice + * @num_regulators: Number of regululators + * @host_priv: Per-subdevice data, specific for a certain video host device + */ +struct v4l2_subdev_platform_data { + struct regulator_bulk_data *regulators; + int num_regulators; + + void *host_priv; +}; + +/** + * struct v4l2_subdev - describes a V4L2 sub-device + * + * @entity: pointer to &struct media_entity + * @list: List of sub-devices + * @owner: The owner is the same as the driver's &struct device owner. + * @owner_v4l2_dev: true if the &sd->owner matches the owner of @v4l2_dev->dev + * owner. Initialized by v4l2_device_register_subdev(). + * @flags: subdev flags. Can be: + * %V4L2_SUBDEV_FL_IS_I2C - Set this flag if this subdev is a i2c device; + * %V4L2_SUBDEV_FL_IS_SPI - Set this flag if this subdev is a spi device; + * %V4L2_SUBDEV_FL_HAS_DEVNODE - Set this flag if this subdev needs a + * device node; + * %V4L2_SUBDEV_FL_HAS_EVENTS - Set this flag if this subdev generates + * events. + * + * @v4l2_dev: pointer to struct &v4l2_device + * @ops: pointer to struct &v4l2_subdev_ops + * @internal_ops: pointer to struct &v4l2_subdev_internal_ops. + * Never call these internal ops from within a driver! + * @ctrl_handler: The control handler of this subdev. May be NULL. + * @name: Name of the sub-device. Please notice that the name must be unique. + * @grp_id: can be used to group similar subdevs. Value is driver-specific + * @dev_priv: pointer to private data + * @host_priv: pointer to private data used by the device where the subdev + * is attached. + * @devnode: subdev device node + * @dev: pointer to the physical device, if any + * @fwnode: The fwnode_handle of the subdev, usually the same as + * either dev->of_node->fwnode or dev->fwnode (whichever is non-NULL). + * @async_list: Links this subdev to a global subdev_list or + * @notifier->done_list list. + * @async_subdev_endpoint_list: List entry in async_subdev_endpoint_entry of + * &struct v4l2_async_subdev_endpoint. + * @subdev_notifier: A sub-device notifier implicitly registered for the sub- + * device using v4l2_async_register_subdev_sensor(). + * @asc_list: Async connection list, of &struct + * v4l2_async_connection.subdev_entry. + * @pdata: common part of subdevice platform data + * @state_lock: A pointer to a lock used for all the subdev's states, set by the + * driver. This is optional. If NULL, each state instance will get + * a lock of its own. + * @privacy_led: Optional pointer to a LED classdev for the privacy LED for sensors. + * @active_state: Active state for the subdev (NULL for subdevs tracking the + * state internally). Initialized by calling + * v4l2_subdev_init_finalize(). + * @enabled_pads: Bitmask of enabled pads used by v4l2_subdev_enable_streams() + * and v4l2_subdev_disable_streams() helper functions for + * fallback cases. + * @s_stream_enabled: Tracks whether streaming has been enabled with s_stream. + * This is only for call_s_stream() internal use. + * + * Each instance of a subdev driver should create this struct, either + * stand-alone or embedded in a larger struct. + * + * This structure should be initialized by v4l2_subdev_init() or one of + * its variants: v4l2_spi_subdev_init(), v4l2_i2c_subdev_init(). + */ +struct v4l2_subdev { +#if defined(CONFIG_MEDIA_CONTROLLER) + struct media_entity entity; +#endif + struct list_head list; + struct module *owner; + bool owner_v4l2_dev; + u32 flags; + struct v4l2_device *v4l2_dev; + const struct v4l2_subdev_ops *ops; + const struct v4l2_subdev_internal_ops *internal_ops; + struct v4l2_ctrl_handler *ctrl_handler; + char name[52]; + u32 grp_id; + void *dev_priv; + void *host_priv; + struct video_device *devnode; + struct device *dev; + struct fwnode_handle *fwnode; + struct list_head async_list; + struct list_head async_subdev_endpoint_list; + struct v4l2_async_notifier *subdev_notifier; + struct list_head asc_list; + struct v4l2_subdev_platform_data *pdata; + struct mutex *state_lock; + + /* + * The fields below are private, and should only be accessed via + * appropriate functions. + */ + + struct led_classdev *privacy_led; + + /* + * TODO: active_state should most likely be changed from a pointer to an + * embedded field. For the time being it's kept as a pointer to more + * easily catch uses of active_state in the cases where the driver + * doesn't support it. + */ + struct v4l2_subdev_state *active_state; + u64 enabled_pads; + bool s_stream_enabled; +}; + + +/** + * media_entity_to_v4l2_subdev - Returns a &struct v4l2_subdev from + * the &struct media_entity embedded in it. + * + * @ent: pointer to &struct media_entity. + */ +#define media_entity_to_v4l2_subdev(ent) \ +({ \ + typeof(ent) __me_sd_ent = (ent); \ + \ + __me_sd_ent ? \ + container_of(__me_sd_ent, struct v4l2_subdev, entity) : \ + NULL; \ +}) + +/** + * vdev_to_v4l2_subdev - Returns a &struct v4l2_subdev from + * the &struct video_device embedded on it. + * + * @vdev: pointer to &struct video_device + */ +#define vdev_to_v4l2_subdev(vdev) \ + ((struct v4l2_subdev *)video_get_drvdata(vdev)) + +/** + * struct v4l2_subdev_fh - Used for storing subdev information per file handle + * + * @vfh: pointer to &struct v4l2_fh + * @state: pointer to &struct v4l2_subdev_state + * @owner: module pointer to the owner of this file handle + * @client_caps: bitmask of ``V4L2_SUBDEV_CLIENT_CAP_*`` + */ +struct v4l2_subdev_fh { + struct v4l2_fh vfh; + struct module *owner; +#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) + struct v4l2_subdev_state *state; + u64 client_caps; +#endif +}; + +/** + * to_v4l2_subdev_fh - Returns a &struct v4l2_subdev_fh from + * the &struct v4l2_fh embedded on it. + * + * @fh: pointer to &struct v4l2_fh + */ +#define to_v4l2_subdev_fh(fh) \ + container_of(fh, struct v4l2_subdev_fh, vfh) + +extern const struct v4l2_file_operations v4l2_subdev_fops; + +/** + * v4l2_set_subdevdata - Sets V4L2 dev private device data + * + * @sd: pointer to &struct v4l2_subdev + * @p: pointer to the private device data to be stored. + */ +static inline void v4l2_set_subdevdata(struct v4l2_subdev *sd, void *p) +{ + sd->dev_priv = p; +} + +/** + * v4l2_get_subdevdata - Gets V4L2 dev private device data + * + * @sd: pointer to &struct v4l2_subdev + * + * Returns the pointer to the private device data to be stored. + */ +static inline void *v4l2_get_subdevdata(const struct v4l2_subdev *sd) +{ + return sd->dev_priv; +} + +/** + * v4l2_set_subdev_hostdata - Sets V4L2 dev private host data + * + * @sd: pointer to &struct v4l2_subdev + * @p: pointer to the private data to be stored. + */ +static inline void v4l2_set_subdev_hostdata(struct v4l2_subdev *sd, void *p) +{ + sd->host_priv = p; +} + +/** + * v4l2_get_subdev_hostdata - Gets V4L2 dev private data + * + * @sd: pointer to &struct v4l2_subdev + * + * Returns the pointer to the private host data to be stored. + */ +static inline void *v4l2_get_subdev_hostdata(const struct v4l2_subdev *sd) +{ + return sd->host_priv; +} + +#ifdef CONFIG_MEDIA_CONTROLLER + +/** + * v4l2_subdev_get_fwnode_pad_1_to_1 - Get pad number from a subdev fwnode + * endpoint, assuming 1:1 port:pad + * + * @entity: Pointer to the subdev entity + * @endpoint: Pointer to a parsed fwnode endpoint + * + * This function can be used as the .get_fwnode_pad operation for + * subdevices that map port numbers and pad indexes 1:1. If the endpoint + * is owned by the subdevice, the function returns the endpoint port + * number. + * + * Returns the endpoint port number on success or a negative error code. + */ +int v4l2_subdev_get_fwnode_pad_1_to_1(struct media_entity *entity, + struct fwnode_endpoint *endpoint); + +/** + * v4l2_subdev_link_validate_default - validates a media link + * + * @sd: pointer to &struct v4l2_subdev + * @link: pointer to &struct media_link + * @source_fmt: pointer to &struct v4l2_subdev_format + * @sink_fmt: pointer to &struct v4l2_subdev_format + * + * This function ensures that width, height and the media bus pixel + * code are equal on both source and sink of the link. + */ +int v4l2_subdev_link_validate_default(struct v4l2_subdev *sd, + struct media_link *link, + struct v4l2_subdev_format *source_fmt, + struct v4l2_subdev_format *sink_fmt); + +/** + * v4l2_subdev_link_validate - validates a media link + * + * @link: pointer to &struct media_link + * + * This function calls the subdev's link_validate ops to validate + * if a media link is valid for streaming. It also internally + * calls v4l2_subdev_link_validate_default() to ensure that + * width, height and the media bus pixel code are equal on both + * source and sink of the link. + * + * The function can be used as a drop-in &media_entity_ops.link_validate + * implementation for v4l2_subdev instances. It supports all links between + * subdevs, as well as links between subdevs and video devices, provided that + * the video devices also implement their &media_entity_ops.link_validate + * operation. + */ +int v4l2_subdev_link_validate(struct media_link *link); + +/** + * v4l2_subdev_has_pad_interdep - MC has_pad_interdep implementation for subdevs + * + * @entity: pointer to &struct media_entity + * @pad0: pad number for the first pad + * @pad1: pad number for the second pad + * + * This function is an implementation of the + * media_entity_operations.has_pad_interdep operation for subdevs that + * implement the multiplexed streams API (as indicated by the + * V4L2_SUBDEV_FL_STREAMS subdev flag). + * + * It considers two pads interdependent if there is an active route between pad0 + * and pad1. + */ +bool v4l2_subdev_has_pad_interdep(struct media_entity *entity, + unsigned int pad0, unsigned int pad1); + +/** + * __v4l2_subdev_state_alloc - allocate v4l2_subdev_state + * + * @sd: pointer to &struct v4l2_subdev for which the state is being allocated. + * @lock_name: name of the state lock + * @key: lock_class_key for the lock + * + * Must call __v4l2_subdev_state_free() when state is no longer needed. + * + * Not to be called directly by the drivers. + */ +struct v4l2_subdev_state *__v4l2_subdev_state_alloc(struct v4l2_subdev *sd, + const char *lock_name, + struct lock_class_key *key); + +/** + * __v4l2_subdev_state_free - free a v4l2_subdev_state + * + * @state: v4l2_subdev_state to be freed. + * + * Not to be called directly by the drivers. + */ +void __v4l2_subdev_state_free(struct v4l2_subdev_state *state); + +/** + * v4l2_subdev_init_finalize() - Finalizes the initialization of the subdevice + * @sd: The subdev + * + * This function finalizes the initialization of the subdev, including + * allocation of the active state for the subdev. + * + * This function must be called by the subdev drivers that use the centralized + * active state, after the subdev struct has been initialized and + * media_entity_pads_init() has been called, but before registering the + * subdev. + * + * The user must call v4l2_subdev_cleanup() when the subdev is being removed. + */ +#define v4l2_subdev_init_finalize(sd) \ + ({ \ + static struct lock_class_key __key; \ + const char *name = KBUILD_BASENAME \ + ":" __stringify(__LINE__) ":sd->active_state->lock"; \ + __v4l2_subdev_init_finalize(sd, name, &__key); \ + }) + +int __v4l2_subdev_init_finalize(struct v4l2_subdev *sd, const char *name, + struct lock_class_key *key); + +/** + * v4l2_subdev_cleanup() - Releases the resources allocated by the subdevice + * @sd: The subdevice + * + * Clean up a V4L2 async sub-device. Must be called for a sub-device as part of + * its release if resources have been associated with it using + * v4l2_async_subdev_endpoint_add() or v4l2_subdev_init_finalize(). + */ +void v4l2_subdev_cleanup(struct v4l2_subdev *sd); + +/* + * A macro to generate the macro or function name for sub-devices state access + * wrapper macros below. + */ +#define __v4l2_subdev_state_gen_call(NAME, _1, ARG, ...) \ + __v4l2_subdev_state_get_ ## NAME ## ARG + +/* + * A macro to constify the return value of the state accessors when the state + * parameter is const. + */ +#define __v4l2_subdev_state_constify_ret(state, value) \ + _Generic(state, \ + const struct v4l2_subdev_state *: (const typeof(*(value)) *)(value), \ + struct v4l2_subdev_state *: (value) \ + ) + +/** + * v4l2_subdev_state_get_format() - Get pointer to a stream format + * @state: subdevice state + * @pad: pad id + * @...: stream id (optional argument) + * + * This returns a pointer to &struct v4l2_mbus_framefmt for the given pad + + * stream in the subdev state. + * + * For stream-unaware drivers the format for the corresponding pad is returned. + * If the pad does not exist, NULL is returned. + */ +/* + * Wrap v4l2_subdev_state_get_format(), allowing the function to be called with + * two or three arguments. The purpose of the __v4l2_subdev_state_gen_call() + * macro is to come up with the name of the function or macro to call, using + * the last two arguments (_stream and _pad). The selected function or macro is + * then called using the arguments specified by the caller. The + * __v4l2_subdev_state_constify_ret() macro constifies the returned pointer + * when the state is const, allowing the state accessors to guarantee + * const-correctness in all cases. + * + * A similar arrangement is used for v4l2_subdev_state_crop(), + * v4l2_subdev_state_compose() and v4l2_subdev_state_get_interval() below. + */ +#define v4l2_subdev_state_get_format(state, pad, ...) \ + __v4l2_subdev_state_constify_ret(state, \ + __v4l2_subdev_state_gen_call(format, ##__VA_ARGS__, , _pad) \ + ((struct v4l2_subdev_state *)state, pad, ##__VA_ARGS__)) +#define __v4l2_subdev_state_get_format_pad(state, pad) \ + __v4l2_subdev_state_get_format(state, pad, 0) +struct v4l2_mbus_framefmt * +__v4l2_subdev_state_get_format(struct v4l2_subdev_state *state, + unsigned int pad, u32 stream); + +/** + * v4l2_subdev_state_get_crop() - Get pointer to a stream crop rectangle + * @state: subdevice state + * @pad: pad id + * @...: stream id (optional argument) + * + * This returns a pointer to crop rectangle for the given pad + stream in the + * subdev state. + * + * For stream-unaware drivers the crop rectangle for the corresponding pad is + * returned. If the pad does not exist, NULL is returned. + */ +#define v4l2_subdev_state_get_crop(state, pad, ...) \ + __v4l2_subdev_state_constify_ret(state, \ + __v4l2_subdev_state_gen_call(crop, ##__VA_ARGS__, , _pad) \ + ((struct v4l2_subdev_state *)state, pad, ##__VA_ARGS__)) +#define __v4l2_subdev_state_get_crop_pad(state, pad) \ + __v4l2_subdev_state_get_crop(state, pad, 0) +struct v4l2_rect * +__v4l2_subdev_state_get_crop(struct v4l2_subdev_state *state, unsigned int pad, + u32 stream); + +/** + * v4l2_subdev_state_get_compose() - Get pointer to a stream compose rectangle + * @state: subdevice state + * @pad: pad id + * @...: stream id (optional argument) + * + * This returns a pointer to compose rectangle for the given pad + stream in the + * subdev state. + * + * For stream-unaware drivers the compose rectangle for the corresponding pad is + * returned. If the pad does not exist, NULL is returned. + */ +#define v4l2_subdev_state_get_compose(state, pad, ...) \ + __v4l2_subdev_state_constify_ret(state, \ + __v4l2_subdev_state_gen_call(compose, ##__VA_ARGS__, , _pad) \ + ((struct v4l2_subdev_state *)state, pad, ##__VA_ARGS__)) +#define __v4l2_subdev_state_get_compose_pad(state, pad) \ + __v4l2_subdev_state_get_compose(state, pad, 0) +struct v4l2_rect * +__v4l2_subdev_state_get_compose(struct v4l2_subdev_state *state, + unsigned int pad, u32 stream); + +/** + * v4l2_subdev_state_get_interval() - Get pointer to a stream frame interval + * @state: subdevice state + * @pad: pad id + * @...: stream id (optional argument) + * + * This returns a pointer to the frame interval for the given pad + stream in + * the subdev state. + * + * For stream-unaware drivers the frame interval for the corresponding pad is + * returned. If the pad does not exist, NULL is returned. + */ +#define v4l2_subdev_state_get_interval(state, pad, ...) \ + __v4l2_subdev_state_constify_ret(state, \ + __v4l2_subdev_state_gen_call(interval, ##__VA_ARGS__, , _pad) \ + ((struct v4l2_subdev_state *)state, pad, ##__VA_ARGS__)) +#define __v4l2_subdev_state_get_interval_pad(state, pad) \ + __v4l2_subdev_state_get_interval(state, pad, 0) +struct v4l2_fract * +__v4l2_subdev_state_get_interval(struct v4l2_subdev_state *state, + unsigned int pad, u32 stream); + +#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) + +/** + * v4l2_subdev_get_fmt() - Fill format based on state + * @sd: subdevice + * @state: subdevice state + * @format: pointer to &struct v4l2_subdev_format + * + * Fill @format->format field based on the information in the @format struct. + * + * This function can be used by the subdev drivers which support active state to + * implement v4l2_subdev_pad_ops.get_fmt if the subdev driver does not need to + * do anything special in their get_fmt op. + * + * Returns 0 on success, error value otherwise. + */ +int v4l2_subdev_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, + struct v4l2_subdev_format *format); + +/** + * v4l2_subdev_get_frame_interval() - Fill frame interval based on state + * @sd: subdevice + * @state: subdevice state + * @fi: pointer to &struct v4l2_subdev_frame_interval + * + * Fill @fi->interval field based on the information in the @fi struct. + * + * This function can be used by the subdev drivers which support active state to + * implement v4l2_subdev_pad_ops.get_frame_interval if the subdev driver does + * not need to do anything special in their get_frame_interval op. + * + * Returns 0 on success, error value otherwise. + */ +int v4l2_subdev_get_frame_interval(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_frame_interval *fi); + +/** + * v4l2_subdev_set_routing() - Set given routing to subdev state + * @sd: The subdevice + * @state: The subdevice state + * @routing: Routing that will be copied to subdev state + * + * This will release old routing table (if any) from the state, allocate + * enough space for the given routing, and copy the routing. + * + * This can be used from the subdev driver's set_routing op, after validating + * the routing. + */ +int v4l2_subdev_set_routing(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + const struct v4l2_subdev_krouting *routing); + +struct v4l2_subdev_route * +__v4l2_subdev_next_active_route(const struct v4l2_subdev_krouting *routing, + struct v4l2_subdev_route *route); + +/** + * for_each_active_route - iterate on all active routes of a routing table + * @routing: The routing table + * @route: The route iterator + */ +#define for_each_active_route(routing, route) \ + for ((route) = NULL; \ + ((route) = __v4l2_subdev_next_active_route((routing), (route)));) + +/** + * v4l2_subdev_set_routing_with_fmt() - Set given routing and format to subdev + * state + * @sd: The subdevice + * @state: The subdevice state + * @routing: Routing that will be copied to subdev state + * @fmt: Format used to initialize all the streams + * + * This is the same as v4l2_subdev_set_routing, but additionally initializes + * all the streams using the given format. + */ +int v4l2_subdev_set_routing_with_fmt(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + const struct v4l2_subdev_krouting *routing, + const struct v4l2_mbus_framefmt *fmt); + +/** + * v4l2_subdev_routing_find_opposite_end() - Find the opposite stream + * @routing: routing used to find the opposite side + * @pad: pad id + * @stream: stream id + * @other_pad: pointer used to return the opposite pad + * @other_stream: pointer used to return the opposite stream + * + * This function uses the routing table to find the pad + stream which is + * opposite the given pad + stream. + * + * @other_pad and/or @other_stream can be NULL if the caller does not need the + * value. + * + * Returns 0 on success, or -EINVAL if no matching route is found. + */ +int v4l2_subdev_routing_find_opposite_end(const struct v4l2_subdev_krouting *routing, + u32 pad, u32 stream, u32 *other_pad, + u32 *other_stream); + +/** + * v4l2_subdev_state_get_opposite_stream_format() - Get pointer to opposite + * stream format + * @state: subdevice state + * @pad: pad id + * @stream: stream id + * + * This returns a pointer to &struct v4l2_mbus_framefmt for the pad + stream + * that is opposite the given pad + stream in the subdev state. + * + * If the state does not contain the given pad + stream, NULL is returned. + */ +struct v4l2_mbus_framefmt * +v4l2_subdev_state_get_opposite_stream_format(struct v4l2_subdev_state *state, + u32 pad, u32 stream); + +/** + * v4l2_subdev_state_xlate_streams() - Translate streams from one pad to another + * + * @state: Subdevice state + * @pad0: The first pad + * @pad1: The second pad + * @streams: Streams bitmask on the first pad + * + * Streams on sink pads of a subdev are routed to source pads as expressed in + * the subdev state routing table. Stream numbers don't necessarily match on + * the sink and source side of a route. This function translates stream numbers + * on @pad0, expressed as a bitmask in @streams, to the corresponding streams + * on @pad1 using the routing table from the @state. It returns the stream mask + * on @pad1, and updates @streams with the streams that have been found in the + * routing table. + * + * @pad0 and @pad1 must be a sink and a source, in any order. + * + * Return: The bitmask of streams of @pad1 that are routed to @streams on @pad0. + */ +u64 v4l2_subdev_state_xlate_streams(const struct v4l2_subdev_state *state, + u32 pad0, u32 pad1, u64 *streams); + +/** + * enum v4l2_subdev_routing_restriction - Subdevice internal routing restrictions + * + * @V4L2_SUBDEV_ROUTING_NO_1_TO_N: + * an input stream shall not be routed to multiple output streams (stream + * duplication) + * @V4L2_SUBDEV_ROUTING_NO_N_TO_1: + * multiple input streams shall not be routed to the same output stream + * (stream merging) + * @V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX: + * all streams from a sink pad must be routed to a single source pad + * @V4L2_SUBDEV_ROUTING_NO_SOURCE_STREAM_MIX: + * all streams on a source pad must originate from a single sink pad + * @V4L2_SUBDEV_ROUTING_NO_SOURCE_MULTIPLEXING: + * source pads shall not contain multiplexed streams + * @V4L2_SUBDEV_ROUTING_NO_SINK_MULTIPLEXING: + * sink pads shall not contain multiplexed streams + * @V4L2_SUBDEV_ROUTING_ONLY_1_TO_1: + * only non-overlapping 1-to-1 stream routing is allowed (a combination of + * @V4L2_SUBDEV_ROUTING_NO_1_TO_N and @V4L2_SUBDEV_ROUTING_NO_N_TO_1) + * @V4L2_SUBDEV_ROUTING_NO_STREAM_MIX: + * all streams from a sink pad must be routed to a single source pad, and + * that source pad shall not get routes from any other sink pad + * (a combination of @V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX and + * @V4L2_SUBDEV_ROUTING_NO_SOURCE_STREAM_MIX) + * @V4L2_SUBDEV_ROUTING_NO_MULTIPLEXING: + * no multiplexed streams allowed on either source or sink sides. + */ +enum v4l2_subdev_routing_restriction { + V4L2_SUBDEV_ROUTING_NO_1_TO_N = BIT(0), + V4L2_SUBDEV_ROUTING_NO_N_TO_1 = BIT(1), + V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX = BIT(2), + V4L2_SUBDEV_ROUTING_NO_SOURCE_STREAM_MIX = BIT(3), + V4L2_SUBDEV_ROUTING_NO_SINK_MULTIPLEXING = BIT(4), + V4L2_SUBDEV_ROUTING_NO_SOURCE_MULTIPLEXING = BIT(5), + V4L2_SUBDEV_ROUTING_ONLY_1_TO_1 = + V4L2_SUBDEV_ROUTING_NO_1_TO_N | + V4L2_SUBDEV_ROUTING_NO_N_TO_1, + V4L2_SUBDEV_ROUTING_NO_STREAM_MIX = + V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX | + V4L2_SUBDEV_ROUTING_NO_SOURCE_STREAM_MIX, + V4L2_SUBDEV_ROUTING_NO_MULTIPLEXING = + V4L2_SUBDEV_ROUTING_NO_SINK_MULTIPLEXING | + V4L2_SUBDEV_ROUTING_NO_SOURCE_MULTIPLEXING, +}; + +/** + * v4l2_subdev_routing_validate() - Verify that routes comply with driver + * constraints + * @sd: The subdevice + * @routing: Routing to verify + * @disallow: Restrictions on routes + * + * This verifies that the given routing complies with the @disallow constraints. + * + * Returns 0 on success, error value otherwise. + */ +int v4l2_subdev_routing_validate(struct v4l2_subdev *sd, + const struct v4l2_subdev_krouting *routing, + enum v4l2_subdev_routing_restriction disallow); + +/** + * v4l2_subdev_enable_streams() - Enable streams on a pad + * @sd: The subdevice + * @pad: The pad + * @streams_mask: Bitmask of streams to enable + * + * This function enables streams on a source @pad of a subdevice. The pad is + * identified by its index, while the streams are identified by the + * @streams_mask bitmask. This allows enabling multiple streams on a pad at + * once. + * + * Enabling a stream that is already enabled isn't allowed. If @streams_mask + * contains an already enabled stream, this function returns -EALREADY without + * performing any operation. + * + * Per-stream enable is only available for subdevs that implement the + * .enable_streams() and .disable_streams() operations. For other subdevs, this + * function implements a best-effort compatibility by calling the .s_stream() + * operation, limited to subdevs that have a single source pad. + * + * Drivers that are not stream-aware shall set @streams_mask to BIT_ULL(0). + * + * Return: + * * 0: Success + * * -EALREADY: One of the streams in streams_mask is already enabled + * * -EINVAL: The pad index is invalid, or doesn't correspond to a source pad + * * -EOPNOTSUPP: Falling back to the legacy .s_stream() operation is + * impossible because the subdev has multiple source pads + */ +int v4l2_subdev_enable_streams(struct v4l2_subdev *sd, u32 pad, + u64 streams_mask); + +/** + * v4l2_subdev_disable_streams() - Disable streams on a pad + * @sd: The subdevice + * @pad: The pad + * @streams_mask: Bitmask of streams to disable + * + * This function disables streams on a source @pad of a subdevice. The pad is + * identified by its index, while the streams are identified by the + * @streams_mask bitmask. This allows disabling multiple streams on a pad at + * once. + * + * Disabling a streams that is not enabled isn't allowed. If @streams_mask + * contains a disabled stream, this function returns -EALREADY without + * performing any operation. + * + * Per-stream disable is only available for subdevs that implement the + * .enable_streams() and .disable_streams() operations. For other subdevs, this + * function implements a best-effort compatibility by calling the .s_stream() + * operation, limited to subdevs that have a single source pad. + * + * Drivers that are not stream-aware shall set @streams_mask to BIT_ULL(0). + * + * Return: + * * 0: Success + * * -EALREADY: One of the streams in streams_mask is not enabled + * * -EINVAL: The pad index is invalid, or doesn't correspond to a source pad + * * -EOPNOTSUPP: Falling back to the legacy .s_stream() operation is + * impossible because the subdev has multiple source pads + */ +int v4l2_subdev_disable_streams(struct v4l2_subdev *sd, u32 pad, + u64 streams_mask); + +/** + * v4l2_subdev_s_stream_helper() - Helper to implement the subdev s_stream + * operation using enable_streams and disable_streams + * @sd: The subdevice + * @enable: Enable or disable streaming + * + * Subdevice drivers that implement the streams-aware + * &v4l2_subdev_pad_ops.enable_streams and &v4l2_subdev_pad_ops.disable_streams + * operations can use this helper to implement the legacy + * &v4l2_subdev_video_ops.s_stream operation. + * + * This helper can only be used by subdevs that have a single source pad. + * + * Return: 0 on success, or a negative error code otherwise. + */ +int v4l2_subdev_s_stream_helper(struct v4l2_subdev *sd, int enable); + +#endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */ + +#endif /* CONFIG_MEDIA_CONTROLLER */ + +/** + * v4l2_subdev_lock_state() - Locks the subdev state + * @state: The subdevice state + * + * Locks the given subdev state. + * + * The state must be unlocked with v4l2_subdev_unlock_state() after use. + */ +static inline void v4l2_subdev_lock_state(struct v4l2_subdev_state *state) +{ + mutex_lock(state->lock); +} + +/** + * v4l2_subdev_unlock_state() - Unlocks the subdev state + * @state: The subdevice state + * + * Unlocks the given subdev state. + */ +static inline void v4l2_subdev_unlock_state(struct v4l2_subdev_state *state) +{ + mutex_unlock(state->lock); +} + +/** + * v4l2_subdev_lock_states - Lock two sub-device states + * @state1: One subdevice state + * @state2: The other subdevice state + * + * Locks the state of two sub-devices. + * + * The states must be unlocked with v4l2_subdev_unlock_states() after use. + * + * This differs from calling v4l2_subdev_lock_state() on both states so that if + * the states share the same lock, the lock is acquired only once (so no + * deadlock occurs). The caller is responsible for ensuring the locks will + * always be acquired in the same order. + */ +static inline void v4l2_subdev_lock_states(struct v4l2_subdev_state *state1, + struct v4l2_subdev_state *state2) +{ + mutex_lock(state1->lock); + if (state1->lock != state2->lock) + mutex_lock(state2->lock); +} + +/** + * v4l2_subdev_unlock_states() - Unlock two sub-device states + * @state1: One subdevice state + * @state2: The other subdevice state + * + * Unlocks the state of two sub-devices. + * + * This differs from calling v4l2_subdev_unlock_state() on both states so that + * if the states share the same lock, the lock is released only once. + */ +static inline void v4l2_subdev_unlock_states(struct v4l2_subdev_state *state1, + struct v4l2_subdev_state *state2) +{ + mutex_unlock(state1->lock); + if (state1->lock != state2->lock) + mutex_unlock(state2->lock); +} + +/** + * v4l2_subdev_get_unlocked_active_state() - Checks that the active subdev state + * is unlocked and returns it + * @sd: The subdevice + * + * Returns the active state for the subdevice, or NULL if the subdev does not + * support active state. If the state is not NULL, calls + * lockdep_assert_not_held() to issue a warning if the state is locked. + * + * This function is to be used e.g. when getting the active state for the sole + * purpose of passing it forward, without accessing the state fields. + */ +static inline struct v4l2_subdev_state * +v4l2_subdev_get_unlocked_active_state(struct v4l2_subdev *sd) +{ + if (sd->active_state) + lockdep_assert_not_held(sd->active_state->lock); + return sd->active_state; +} + +/** + * v4l2_subdev_get_locked_active_state() - Checks that the active subdev state + * is locked and returns it + * + * @sd: The subdevice + * + * Returns the active state for the subdevice, or NULL if the subdev does not + * support active state. If the state is not NULL, calls lockdep_assert_held() + * to issue a warning if the state is not locked. + * + * This function is to be used when the caller knows that the active state is + * already locked. + */ +static inline struct v4l2_subdev_state * +v4l2_subdev_get_locked_active_state(struct v4l2_subdev *sd) +{ + if (sd->active_state) + lockdep_assert_held(sd->active_state->lock); + return sd->active_state; +} + +/** + * v4l2_subdev_lock_and_get_active_state() - Locks and returns the active subdev + * state for the subdevice + * @sd: The subdevice + * + * Returns the locked active state for the subdevice, or NULL if the subdev + * does not support active state. + * + * The state must be unlocked with v4l2_subdev_unlock_state() after use. + */ +static inline struct v4l2_subdev_state * +v4l2_subdev_lock_and_get_active_state(struct v4l2_subdev *sd) +{ + if (sd->active_state) + v4l2_subdev_lock_state(sd->active_state); + return sd->active_state; +} + +/** + * v4l2_subdev_init - initializes the sub-device struct + * + * @sd: pointer to the &struct v4l2_subdev to be initialized + * @ops: pointer to &struct v4l2_subdev_ops. + */ +void v4l2_subdev_init(struct v4l2_subdev *sd, + const struct v4l2_subdev_ops *ops); + +extern const struct v4l2_subdev_ops v4l2_subdev_call_wrappers; + +/** + * v4l2_subdev_call - call an operation of a v4l2_subdev. + * + * @sd: pointer to the &struct v4l2_subdev + * @o: name of the element at &struct v4l2_subdev_ops that contains @f. + * Each element there groups a set of callbacks functions. + * @f: callback function to be called. + * The callback functions are defined in groups, according to + * each element at &struct v4l2_subdev_ops. + * @args: arguments for @f. + * + * Example: err = v4l2_subdev_call(sd, video, s_std, norm); + */ +#define v4l2_subdev_call(sd, o, f, args...) \ + ({ \ + struct v4l2_subdev *__sd = (sd); \ + int __result; \ + if (!__sd) \ + __result = -ENODEV; \ + else if (!(__sd->ops->o && __sd->ops->o->f)) \ + __result = -ENOIOCTLCMD; \ + else if (v4l2_subdev_call_wrappers.o && \ + v4l2_subdev_call_wrappers.o->f) \ + __result = v4l2_subdev_call_wrappers.o->f( \ + __sd, ##args); \ + else \ + __result = __sd->ops->o->f(__sd, ##args); \ + __result; \ + }) + +/** + * v4l2_subdev_call_state_active - call an operation of a v4l2_subdev which + * takes state as a parameter, passing the + * subdev its active state. + * + * @sd: pointer to the &struct v4l2_subdev + * @o: name of the element at &struct v4l2_subdev_ops that contains @f. + * Each element there groups a set of callbacks functions. + * @f: callback function to be called. + * The callback functions are defined in groups, according to + * each element at &struct v4l2_subdev_ops. + * @args: arguments for @f. + * + * This is similar to v4l2_subdev_call(), except that this version can only be + * used for ops that take a subdev state as a parameter. The macro will get the + * active state, lock it before calling the op and unlock it after the call. + */ +#define v4l2_subdev_call_state_active(sd, o, f, args...) \ + ({ \ + int __result; \ + struct v4l2_subdev_state *state; \ + state = v4l2_subdev_get_unlocked_active_state(sd); \ + if (state) \ + v4l2_subdev_lock_state(state); \ + __result = v4l2_subdev_call(sd, o, f, state, ##args); \ + if (state) \ + v4l2_subdev_unlock_state(state); \ + __result; \ + }) + +/** + * v4l2_subdev_call_state_try - call an operation of a v4l2_subdev which + * takes state as a parameter, passing the + * subdev a newly allocated try state. + * + * @sd: pointer to the &struct v4l2_subdev + * @o: name of the element at &struct v4l2_subdev_ops that contains @f. + * Each element there groups a set of callbacks functions. + * @f: callback function to be called. + * The callback functions are defined in groups, according to + * each element at &struct v4l2_subdev_ops. + * @args: arguments for @f. + * + * This is similar to v4l2_subdev_call_state_active(), except that as this + * version allocates a new state, this is only usable for + * V4L2_SUBDEV_FORMAT_TRY use cases. + * + * Note: only legacy non-MC drivers may need this macro. + */ +#define v4l2_subdev_call_state_try(sd, o, f, args...) \ + ({ \ + int __result; \ + static struct lock_class_key __key; \ + const char *name = KBUILD_BASENAME \ + ":" __stringify(__LINE__) ":state->lock"; \ + struct v4l2_subdev_state *state = \ + __v4l2_subdev_state_alloc(sd, name, &__key); \ + if (IS_ERR(state)) { \ + __result = PTR_ERR(state); \ + } else { \ + v4l2_subdev_lock_state(state); \ + __result = v4l2_subdev_call(sd, o, f, state, ##args); \ + v4l2_subdev_unlock_state(state); \ + __v4l2_subdev_state_free(state); \ + } \ + __result; \ + }) + +/** + * v4l2_subdev_has_op - Checks if a subdev defines a certain operation. + * + * @sd: pointer to the &struct v4l2_subdev + * @o: The group of callback functions in &struct v4l2_subdev_ops + * which @f is a part of. + * @f: callback function to be checked for its existence. + */ +#define v4l2_subdev_has_op(sd, o, f) \ + ((sd)->ops->o && (sd)->ops->o->f) + +/** + * v4l2_subdev_notify_event() - Delivers event notification for subdevice + * @sd: The subdev for which to deliver the event + * @ev: The event to deliver + * + * Will deliver the specified event to all userspace event listeners which are + * subscribed to the v42l subdev event queue as well as to the bridge driver + * using the notify callback. The notification type for the notify callback + * will be %V4L2_DEVICE_NOTIFY_EVENT. + */ +void v4l2_subdev_notify_event(struct v4l2_subdev *sd, + const struct v4l2_event *ev); + +/** + * v4l2_subdev_is_streaming() - Returns if the subdevice is streaming + * @sd: The subdevice + * + * v4l2_subdev_is_streaming() tells if the subdevice is currently streaming. + * "Streaming" here means whether .s_stream() or .enable_streams() has been + * successfully called, and the streaming has not yet been disabled. + * + * If the subdevice implements .enable_streams() this function must be called + * while holding the active state lock. + */ +bool v4l2_subdev_is_streaming(struct v4l2_subdev *sd); + +#endif /* _V4L2_SUBDEV_H */ diff --git a/6.18.0/include-overrides/media/v4l2-vp9.h b/6.18.0/include-overrides/media/v4l2-vp9.h new file mode 100644 index 0000000..05478ad --- /dev/null +++ b/6.18.0/include-overrides/media/v4l2-vp9.h @@ -0,0 +1,233 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Helper functions for vp9 codecs. + * + * Copyright (c) 2021 Collabora, Ltd. + * + * Author: Andrzej Pietrasiewicz + */ + +#ifndef _MEDIA_V4L2_VP9_H +#define _MEDIA_V4L2_VP9_H + +#include + +/** + * struct v4l2_vp9_frame_mv_context - motion vector-related probabilities + * + * @joint: motion vector joint probabilities. + * @sign: motion vector sign probabilities. + * @classes: motion vector class probabilities. + * @class0_bit: motion vector class0 bit probabilities. + * @bits: motion vector bits probabilities. + * @class0_fr: motion vector class0 fractional bit probabilities. + * @fr: motion vector fractional bit probabilities. + * @class0_hp: motion vector class0 high precision fractional bit probabilities. + * @hp: motion vector high precision fractional bit probabilities. + * + * A member of v4l2_vp9_frame_context. + */ +struct v4l2_vp9_frame_mv_context { + u8 joint[3]; + u8 sign[2]; + u8 classes[2][10]; + u8 class0_bit[2]; + u8 bits[2][10]; + u8 class0_fr[2][2][3]; + u8 fr[2][3]; + u8 class0_hp[2]; + u8 hp[2]; +}; + +/** + * struct v4l2_vp9_frame_context - frame probabilities, including motion-vector related + * + * @tx8: TX 8x8 probabilities. + * @tx16: TX 16x16 probabilities. + * @tx32: TX 32x32 probabilities. + * @coef: coefficient probabilities. + * @skip: skip probabilities. + * @inter_mode: inter mode probabilities. + * @interp_filter: interpolation filter probabilities. + * @is_inter: is inter-block probabilities. + * @comp_mode: compound prediction mode probabilities. + * @single_ref: single ref probabilities. + * @comp_ref: compound ref probabilities. + * @y_mode: Y prediction mode probabilities. + * @uv_mode: UV prediction mode probabilities. + * @partition: partition probabilities. + * @mv: motion vector probabilities. + * + * Drivers which need to keep track of frame context(s) can use this struct. + * The members correspond to probability tables, which are specified only implicitly in the + * vp9 spec. Section 10.5 "Default probability tables" contains all the types of involved + * tables, i.e. the actual tables are of the same kind, and when they are reset (which is + * mandated by the spec sometimes) they are overwritten with values from the default tables. + */ +struct v4l2_vp9_frame_context { + u8 tx8[2][1]; + u8 tx16[2][2]; + u8 tx32[2][3]; + u8 coef[4][2][2][6][6][3]; + u8 skip[3]; + u8 inter_mode[7][3]; + u8 interp_filter[4][2]; + u8 is_inter[4]; + u8 comp_mode[5]; + u8 single_ref[5][2]; + u8 comp_ref[5]; + u8 y_mode[4][9]; + u8 uv_mode[10][9]; + u8 partition[16][3]; + + struct v4l2_vp9_frame_mv_context mv; +}; + +/** + * struct v4l2_vp9_frame_symbol_counts - pointers to arrays of symbol counts + * + * @partition: partition counts. + * @skip: skip counts. + * @intra_inter: is inter-block counts. + * @tx32p: TX32 counts. + * @tx16p: TX16 counts. + * @tx8p: TX8 counts. + * @y_mode: Y prediction mode counts. + * @uv_mode: UV prediction mode counts. + * @comp: compound prediction mode counts. + * @comp_ref: compound ref counts. + * @single_ref: single ref counts. + * @mv_mode: inter mode counts. + * @filter: interpolation filter counts. + * @mv_joint: motion vector joint counts. + * @sign: motion vector sign counts. + * @classes: motion vector class counts. + * @class0: motion vector class0 bit counts. + * @bits: motion vector bits counts. + * @class0_fp: motion vector class0 fractional bit counts. + * @fp: motion vector fractional bit counts. + * @class0_hp: motion vector class0 high precision fractional bit counts. + * @hp: motion vector high precision fractional bit counts. + * @coeff: coefficient counts. + * @eob: eob counts + * + * The fields correspond to what is specified in section 8.3 "Clear counts process" of the spec. + * Different pieces of hardware can report the counts in different order, so we cannot rely on + * simply overlaying a struct on a relevant block of memory. Instead we provide pointers to + * arrays or array of pointers to arrays in case of coeff, or array of pointers for eob. + */ +struct v4l2_vp9_frame_symbol_counts { + u32 (*partition)[16][4]; + u32 (*skip)[3][2]; + u32 (*intra_inter)[4][2]; + u32 (*tx32p)[2][4]; + u32 (*tx16p)[2][4]; + u32 (*tx8p)[2][2]; + u32 (*y_mode)[4][10]; + u32 (*uv_mode)[10][10]; + u32 (*comp)[5][2]; + u32 (*comp_ref)[5][2]; + u32 (*single_ref)[5][2][2]; + u32 (*mv_mode)[7][4]; + u32 (*filter)[4][3]; + u32 (*mv_joint)[4]; + u32 (*sign)[2][2]; + u32 (*classes)[2][11]; + u32 (*class0)[2][2]; + u32 (*bits)[2][10][2]; + u32 (*class0_fp)[2][2][4]; + u32 (*fp)[2][4]; + u32 (*class0_hp)[2][2]; + u32 (*hp)[2][2]; + u32 (*coeff[4][2][2][6][6])[3]; + u32 *eob[4][2][2][6][6][2]; +}; + +extern const u8 v4l2_vp9_kf_y_mode_prob[10][10][9]; /* Section 10.4 of the spec */ +extern const u8 v4l2_vp9_kf_partition_probs[16][3]; /* Section 10.4 of the spec */ +extern const u8 v4l2_vp9_kf_uv_mode_prob[10][9]; /* Section 10.4 of the spec */ +extern const struct v4l2_vp9_frame_context v4l2_vp9_default_probs; /* Section 10.5 of the spec */ + +/** + * v4l2_vp9_fw_update_probs() - Perform forward update of vp9 probabilities + * + * @probs: current probabilities values + * @deltas: delta values from compressed header + * @dec_params: vp9 frame decoding parameters + * + * This function performs forward updates of probabilities for the vp9 boolean decoder. + * The frame header can contain a directive to update the probabilities (deltas), if so, then + * the deltas are provided in the header, too. The userspace parses those and passes the said + * deltas struct to the kernel. + */ +void v4l2_vp9_fw_update_probs(struct v4l2_vp9_frame_context *probs, + const struct v4l2_ctrl_vp9_compressed_hdr *deltas, + const struct v4l2_ctrl_vp9_frame *dec_params); + +/** + * v4l2_vp9_reset_frame_ctx() - Reset appropriate frame context + * + * @dec_params: vp9 frame decoding parameters + * @frame_context: array of the 4 frame contexts + * + * This function resets appropriate frame contexts, based on what's in dec_params. + * + * Returns the frame context index after the update, which might be reset to zero if + * mandated by the spec. + */ +u8 v4l2_vp9_reset_frame_ctx(const struct v4l2_ctrl_vp9_frame *dec_params, + struct v4l2_vp9_frame_context *frame_context); + +/** + * v4l2_vp9_adapt_coef_probs() - Perform backward update of vp9 coefficients probabilities + * + * @probs: current probabilities values + * @counts: values of symbol counts after the current frame has been decoded + * @use_128: flag to request that 128 is used as update factor if true, otherwise 112 is used + * @frame_is_intra: flag indicating that FrameIsIntra is true + * + * This function performs backward updates of coefficients probabilities for the vp9 boolean + * decoder. After a frame has been decoded the counts of how many times a given symbol has + * occurred are known and are used to update the probability of each symbol. + */ +void v4l2_vp9_adapt_coef_probs(struct v4l2_vp9_frame_context *probs, + struct v4l2_vp9_frame_symbol_counts *counts, + bool use_128, + bool frame_is_intra); + +/** + * v4l2_vp9_adapt_noncoef_probs() - Perform backward update of vp9 non-coefficients probabilities + * + * @probs: current probabilities values + * @counts: values of symbol counts after the current frame has been decoded + * @reference_mode: specifies the type of inter prediction to be used. See + * &v4l2_vp9_reference_mode for more details + * @interpolation_filter: specifies the filter selection used for performing inter prediction. + * See &v4l2_vp9_interpolation_filter for more details + * @tx_mode: specifies the TX mode. See &v4l2_vp9_tx_mode for more details + * @flags: combination of V4L2_VP9_FRAME_FLAG_* flags + * + * This function performs backward updates of non-coefficients probabilities for the vp9 boolean + * decoder. After a frame has been decoded the counts of how many times a given symbol has + * occurred are known and are used to update the probability of each symbol. + */ +void v4l2_vp9_adapt_noncoef_probs(struct v4l2_vp9_frame_context *probs, + struct v4l2_vp9_frame_symbol_counts *counts, + u8 reference_mode, u8 interpolation_filter, u8 tx_mode, + u32 flags); + +/** + * v4l2_vp9_seg_feat_enabled() - Check if a segmentation feature is enabled + * + * @feature_enabled: array of 8-bit flags (for all segments) + * @feature: id of the feature to check + * @segid: id of the segment to look up + * + * This function returns true if a given feature is active in a given segment. + */ +bool +v4l2_vp9_seg_feat_enabled(const u8 *feature_enabled, + unsigned int feature, + unsigned int segid); + +#endif /* _MEDIA_V4L2_VP9_H */ diff --git a/6.18.0/include-overrides/media/videobuf2-core.h b/6.18.0/include-overrides/media/videobuf2-core.h new file mode 100644 index 0000000..9b02aeb --- /dev/null +++ b/6.18.0/include-overrides/media/videobuf2-core.h @@ -0,0 +1,1348 @@ +/* + * videobuf2-core.h - Video Buffer 2 Core Framework + * + * Copyright (C) 2010 Samsung Electronics + * + * Author: Pawel Osciak + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ +#ifndef _MEDIA_VIDEOBUF2_CORE_H +#define _MEDIA_VIDEOBUF2_CORE_H + +#include +#include +#include +#include +#include +#include +#include + +#define VB2_MAX_FRAME (32) +#define VB2_MAX_PLANES (8) + +/** + * enum vb2_memory - type of memory model used to make the buffers visible + * on userspace. + * + * @VB2_MEMORY_UNKNOWN: Buffer status is unknown or it is not used yet on + * userspace. + * @VB2_MEMORY_MMAP: The buffers are allocated by the Kernel and it is + * memory mapped via mmap() ioctl. This model is + * also used when the user is using the buffers via + * read() or write() system calls. + * @VB2_MEMORY_USERPTR: The buffers was allocated in userspace and it is + * memory mapped via mmap() ioctl. + * @VB2_MEMORY_DMABUF: The buffers are passed to userspace via DMA buffer. + */ +enum vb2_memory { + VB2_MEMORY_UNKNOWN = 0, + VB2_MEMORY_MMAP = 1, + VB2_MEMORY_USERPTR = 2, + VB2_MEMORY_DMABUF = 4, +}; + +struct vb2_fileio_data; +struct vb2_threadio_data; +struct vb2_buffer; + +/** + * struct vb2_mem_ops - memory handling/memory allocator operations. + * @alloc: allocate video memory and, optionally, allocator private data, + * return ERR_PTR() on failure or a pointer to allocator private, + * per-buffer data on success; the returned private structure + * will then be passed as @buf_priv argument to other ops in this + * structure. The size argument to this function shall be + * *page aligned*. + * @put: inform the allocator that the buffer will no longer be used; + * usually will result in the allocator freeing the buffer (if + * no other users of this buffer are present); the @buf_priv + * argument is the allocator private per-buffer structure + * previously returned from the alloc callback. + * @get_dmabuf: acquire userspace memory for a hardware operation; used for + * DMABUF memory types. + * @get_userptr: acquire userspace memory for a hardware operation; used for + * USERPTR memory types; vaddr is the address passed to the + * videobuf2 layer when queuing a video buffer of USERPTR type; + * should return an allocator private per-buffer structure + * associated with the buffer on success, ERR_PTR() on failure; + * the returned private structure will then be passed as @buf_priv + * argument to other ops in this structure. + * @put_userptr: inform the allocator that a USERPTR buffer will no longer + * be used. + * @prepare: called every time the buffer is passed from userspace to the + * driver, useful for cache synchronisation, optional. + * @finish: called every time the buffer is passed back from the driver + * to the userspace, also optional. + * @attach_dmabuf: attach a shared &struct dma_buf for a hardware operation; + * used for DMABUF memory types; dev is the alloc device + * dbuf is the shared dma_buf; returns ERR_PTR() on failure; + * allocator private per-buffer structure on success; + * this needs to be used for further accesses to the buffer. + * @detach_dmabuf: inform the exporter of the buffer that the current DMABUF + * buffer is no longer used; the @buf_priv argument is the + * allocator private per-buffer structure previously returned + * from the attach_dmabuf callback. + * @map_dmabuf: request for access to the dmabuf from allocator; the allocator + * of dmabuf is informed that this driver is going to use the + * dmabuf. + * @unmap_dmabuf: releases access control to the dmabuf - allocator is notified + * that this driver is done using the dmabuf for now. + * @vaddr: return a kernel virtual address to a given memory buffer + * associated with the passed private structure or NULL if no + * such mapping exists. + * @cookie: return allocator specific cookie for a given memory buffer + * associated with the passed private structure or NULL if not + * available. + * @num_users: return the current number of users of a memory buffer; + * return 1 if the videobuf2 layer (or actually the driver using + * it) is the only user. + * @mmap: setup a userspace mapping for a given memory buffer under + * the provided virtual memory region. + * + * Those operations are used by the videobuf2 core to implement the memory + * handling/memory allocators for each type of supported streaming I/O method. + * + * .. note:: + * #) Required ops for USERPTR types: get_userptr, put_userptr. + * + * #) Required ops for MMAP types: alloc, put, num_users, mmap. + * + * #) Required ops for read/write access types: alloc, put, num_users, vaddr. + * + * #) Required ops for DMABUF types: attach_dmabuf, detach_dmabuf, + * map_dmabuf, unmap_dmabuf. + */ +struct vb2_mem_ops { + void *(*alloc)(struct vb2_buffer *vb, + struct device *dev, + unsigned long size); + void (*put)(void *buf_priv); + struct dma_buf *(*get_dmabuf)(struct vb2_buffer *vb, + void *buf_priv, + unsigned long flags); + + void *(*get_userptr)(struct vb2_buffer *vb, + struct device *dev, + unsigned long vaddr, + unsigned long size); + void (*put_userptr)(void *buf_priv); + + void (*prepare)(void *buf_priv); + void (*finish)(void *buf_priv); + + void *(*attach_dmabuf)(struct vb2_buffer *vb, + struct device *dev, + struct dma_buf *dbuf, + unsigned long size); + void (*detach_dmabuf)(void *buf_priv); + int (*map_dmabuf)(void *buf_priv); + void (*unmap_dmabuf)(void *buf_priv); + + void *(*vaddr)(struct vb2_buffer *vb, void *buf_priv); + void *(*cookie)(struct vb2_buffer *vb, void *buf_priv); + + unsigned int (*num_users)(void *buf_priv); + + int (*mmap)(void *buf_priv, struct vm_area_struct *vma); +}; + +/** + * struct vb2_plane - plane information. + * @mem_priv: private data with this plane. + * @dbuf: dma_buf - shared buffer object. + * @dbuf_mapped: flag to show whether dbuf is mapped or not + * @dbuf_duplicated: boolean to show whether dbuf is duplicated with a + * previous plane of the buffer. + * @bytesused: number of bytes occupied by data in the plane (payload). + * @length: size of this plane (NOT the payload) in bytes. The maximum + * valid size is MAX_UINT - PAGE_SIZE. + * @min_length: minimum required size of this plane (NOT the payload) in bytes. + * @length is always greater or equal to @min_length, and like + * @length, it is limited to MAX_UINT - PAGE_SIZE. + * @m: Union with memtype-specific data. + * @m.offset: when memory in the associated struct vb2_buffer is + * %VB2_MEMORY_MMAP, equals the offset from the start of + * the device memory for this plane (or is a "cookie" that + * should be passed to mmap() called on the video node). + * @m.userptr: when memory is %VB2_MEMORY_USERPTR, a userspace pointer + * pointing to this plane. + * @m.fd: when memory is %VB2_MEMORY_DMABUF, a userspace file + * descriptor associated with this plane. + * @data_offset: offset in the plane to the start of data; usually 0, + * unless there is a header in front of the data. + * + * Should contain enough information to be able to cover all the fields + * of &struct v4l2_plane at videodev2.h. + */ +struct vb2_plane { + void *mem_priv; + struct dma_buf *dbuf; + unsigned int dbuf_mapped; + bool dbuf_duplicated; + unsigned int bytesused; + unsigned int length; + unsigned int min_length; + union { + unsigned int offset; + unsigned long userptr; + int fd; + } m; + unsigned int data_offset; +}; + +/** + * enum vb2_io_modes - queue access methods. + * @VB2_MMAP: driver supports MMAP with streaming API. + * @VB2_USERPTR: driver supports USERPTR with streaming API. + * @VB2_READ: driver supports read() style access. + * @VB2_WRITE: driver supports write() style access. + * @VB2_DMABUF: driver supports DMABUF with streaming API. + */ +enum vb2_io_modes { + VB2_MMAP = BIT(0), + VB2_USERPTR = BIT(1), + VB2_READ = BIT(2), + VB2_WRITE = BIT(3), + VB2_DMABUF = BIT(4), +}; + +/** + * enum vb2_buffer_state - current video buffer state. + * @VB2_BUF_STATE_DEQUEUED: buffer under userspace control. + * @VB2_BUF_STATE_IN_REQUEST: buffer is queued in media request. + * @VB2_BUF_STATE_PREPARING: buffer is being prepared in videobuf2. + * @VB2_BUF_STATE_QUEUED: buffer queued in videobuf2, but not in driver. + * @VB2_BUF_STATE_ACTIVE: buffer queued in driver and possibly used + * in a hardware operation. + * @VB2_BUF_STATE_DONE: buffer returned from driver to videobuf2, but + * not yet dequeued to userspace. + * @VB2_BUF_STATE_ERROR: same as above, but the operation on the buffer + * has ended with an error, which will be reported + * to the userspace when it is dequeued. + */ +enum vb2_buffer_state { + VB2_BUF_STATE_DEQUEUED, + VB2_BUF_STATE_IN_REQUEST, + VB2_BUF_STATE_PREPARING, + VB2_BUF_STATE_QUEUED, + VB2_BUF_STATE_ACTIVE, + VB2_BUF_STATE_DONE, + VB2_BUF_STATE_ERROR, +}; + +struct vb2_queue; + +/** + * struct vb2_buffer - represents a video buffer. + * @vb2_queue: pointer to &struct vb2_queue with the queue to + * which this driver belongs. + * @index: id number of the buffer. + * @type: buffer type. + * @memory: the method, in which the actual data is passed. + * @num_planes: number of planes in the buffer + * on an internal driver queue. + * @timestamp: frame timestamp in ns. + * @request: the request this buffer is associated with. + * @req_obj: used to bind this buffer to a request. This + * request object has a refcount. + */ +struct vb2_buffer { + struct vb2_queue *vb2_queue; + unsigned int index; + unsigned int type; + unsigned int memory; + unsigned int num_planes; + u64 timestamp; + struct media_request *request; + struct media_request_object req_obj; + + /* private: internal use only + * + * state: current buffer state; do not change + * synced: this buffer has been synced for DMA, i.e. the + * 'prepare' memop was called. It is cleared again + * after the 'finish' memop is called. + * prepared: this buffer has been prepared, i.e. the + * buf_prepare op was called. It is cleared again + * after the 'buf_finish' op is called. + * copied_timestamp: the timestamp of this capture buffer was copied + * from an output buffer. + * skip_cache_sync_on_prepare: when set buffer's ->prepare() function + * skips cache sync/invalidation. + * skip_cache_sync_on_finish: when set buffer's ->finish() function + * skips cache sync/invalidation. + * planes: per-plane information; do not change + * queued_entry: entry on the queued buffers list, which holds + * all buffers queued from userspace + * done_entry: entry on the list that stores all buffers ready + * to be dequeued to userspace + */ + enum vb2_buffer_state state; + unsigned int synced:1; + unsigned int prepared:1; + unsigned int copied_timestamp:1; + unsigned int skip_cache_sync_on_prepare:1; + unsigned int skip_cache_sync_on_finish:1; + + struct vb2_plane planes[VB2_MAX_PLANES]; + struct list_head queued_entry; + struct list_head done_entry; +#ifdef CONFIG_VIDEO_ADV_DEBUG + /* + * Counters for how often these buffer-related ops are + * called. Used to check for unbalanced ops. + */ + u32 cnt_mem_alloc; + u32 cnt_mem_put; + u32 cnt_mem_get_dmabuf; + u32 cnt_mem_get_userptr; + u32 cnt_mem_put_userptr; + u32 cnt_mem_prepare; + u32 cnt_mem_finish; + u32 cnt_mem_attach_dmabuf; + u32 cnt_mem_detach_dmabuf; + u32 cnt_mem_map_dmabuf; + u32 cnt_mem_unmap_dmabuf; + u32 cnt_mem_vaddr; + u32 cnt_mem_cookie; + u32 cnt_mem_num_users; + u32 cnt_mem_mmap; + + u32 cnt_buf_out_validate; + u32 cnt_buf_init; + u32 cnt_buf_prepare; + u32 cnt_buf_finish; + u32 cnt_buf_cleanup; + u32 cnt_buf_queue; + u32 cnt_buf_request_complete; + + /* This counts the number of calls to vb2_buffer_done() */ + u32 cnt_buf_done; +#endif +}; + +/** + * struct vb2_ops - driver-specific callbacks. + * + * These operations are not called from interrupt context except where + * mentioned specifically. + * + * @queue_setup: called from VIDIOC_REQBUFS() and VIDIOC_CREATE_BUFS() + * handlers before memory allocation. It can be called + * twice: if the original number of requested buffers + * could not be allocated, then it will be called a + * second time with the actually allocated number of + * buffers to verify if that is OK. + * The driver should return the required number of buffers + * in \*num_buffers, the required number of planes per + * buffer in \*num_planes, the size of each plane should be + * set in the sizes\[\] array and optional per-plane + * allocator specific device in the alloc_devs\[\] array. + * When called from VIDIOC_REQBUFS(), \*num_planes == 0, + * the driver has to use the currently configured format to + * determine the plane sizes and \*num_buffers is the total + * number of buffers that are being allocated. When called + * from VIDIOC_CREATE_BUFS(), \*num_planes != 0 and it + * describes the requested number of planes and sizes\[\] + * contains the requested plane sizes. In this case + * \*num_buffers are being allocated additionally to + * the buffers already allocated. If either \*num_planes + * or the requested sizes are invalid callback must return %-EINVAL. + * @wait_prepare: release any locks taken while calling vb2 functions; + * it is called before an ioctl needs to wait for a new + * buffer to arrive; required to avoid a deadlock in + * blocking access type. + * @wait_finish: reacquire all locks released in the previous callback; + * required to continue operation after sleeping while + * waiting for a new buffer to arrive. + * @buf_out_validate: called when the output buffer is prepared or queued + * to a request; drivers can use this to validate + * userspace-provided information; this is required only + * for OUTPUT queues. + * @buf_init: called once after allocating a buffer (in MMAP case) + * or after acquiring a new USERPTR buffer; drivers may + * perform additional buffer-related initialization; + * initialization failure (return != 0) will prevent + * queue setup from completing successfully; optional. + * @buf_prepare: called every time the buffer is queued from userspace + * and from the VIDIOC_PREPARE_BUF() ioctl; drivers may + * perform any initialization required before each + * hardware operation in this callback; drivers can + * access/modify the buffer here as it is still synced for + * the CPU; drivers that support VIDIOC_CREATE_BUFS() must + * also validate the buffer size; if an error is returned, + * the buffer will not be queued in driver; optional. + * @buf_finish: called before every dequeue of the buffer back to + * userspace; the buffer is synced for the CPU, so drivers + * can access/modify the buffer contents; drivers may + * perform any operations required before userspace + * accesses the buffer; optional. The buffer state can be + * one of the following: %DONE and %ERROR occur while + * streaming is in progress, and the %PREPARED state occurs + * when the queue has been canceled and all pending + * buffers are being returned to their default %DEQUEUED + * state. Typically you only have to do something if the + * state is %VB2_BUF_STATE_DONE, since in all other cases + * the buffer contents will be ignored anyway. + * @buf_cleanup: called once before the buffer is freed; drivers may + * perform any additional cleanup; optional. + * @prepare_streaming: called once to prepare for 'streaming' state; this is + * where validation can be done to verify everything is + * okay and streaming resources can be claimed. It is + * called when the VIDIOC_STREAMON ioctl is called. The + * actual streaming starts when @start_streaming is called. + * Optional. + * @start_streaming: called once to enter 'streaming' state; the driver may + * receive buffers with @buf_queue callback + * before @start_streaming is called; the driver gets the + * number of already queued buffers in count parameter; + * driver can return an error if hardware fails, in that + * case all buffers that have been already given by + * the @buf_queue callback are to be returned by the driver + * by calling vb2_buffer_done() with %VB2_BUF_STATE_QUEUED. + * If you need a minimum number of buffers before you can + * start streaming, then set + * &vb2_queue->min_queued_buffers. If that is non-zero + * then @start_streaming won't be called until at least + * that many buffers have been queued up by userspace. + * @stop_streaming: called when 'streaming' state must be disabled; driver + * should stop any DMA transactions or wait until they + * finish and give back all buffers it got from &buf_queue + * callback by calling vb2_buffer_done() with either + * %VB2_BUF_STATE_DONE or %VB2_BUF_STATE_ERROR; may use + * vb2_wait_for_all_buffers() function + * @unprepare_streaming:called as counterpart to @prepare_streaming; any claimed + * streaming resources can be released here. It is + * called when the VIDIOC_STREAMOFF ioctls is called or + * when the streaming filehandle is closed. Optional. + * @buf_queue: passes buffer vb to the driver; driver may start + * hardware operation on this buffer; driver should give + * the buffer back by calling vb2_buffer_done() function; + * it is always called after calling VIDIOC_STREAMON() + * ioctl; might be called before @start_streaming callback + * if user pre-queued buffers before calling + * VIDIOC_STREAMON(). + * @buf_request_complete: a buffer that was never queued to the driver but is + * associated with a queued request was canceled. + * The driver will have to mark associated objects in the + * request as completed; required if requests are + * supported. + */ +struct vb2_ops { + int (*queue_setup)(struct vb2_queue *q, + unsigned int *num_buffers, unsigned int *num_planes, + unsigned int sizes[], struct device *alloc_devs[]); + + void (*wait_prepare)(struct vb2_queue *q); + void (*wait_finish)(struct vb2_queue *q); + + int (*buf_out_validate)(struct vb2_buffer *vb); + int (*buf_init)(struct vb2_buffer *vb); + int (*buf_prepare)(struct vb2_buffer *vb); + void (*buf_finish)(struct vb2_buffer *vb); + void (*buf_cleanup)(struct vb2_buffer *vb); + + int (*prepare_streaming)(struct vb2_queue *q); + int (*start_streaming)(struct vb2_queue *q, unsigned int count); + void (*stop_streaming)(struct vb2_queue *q); + void (*unprepare_streaming)(struct vb2_queue *q); + + void (*buf_queue)(struct vb2_buffer *vb); + + void (*buf_request_complete)(struct vb2_buffer *vb); +}; + +/** + * struct vb2_buf_ops - driver-specific callbacks. + * + * @verify_planes_array: Verify that a given user space structure contains + * enough planes for the buffer. This is called + * for each dequeued buffer. + * @init_buffer: given a &vb2_buffer initialize the extra data after + * struct vb2_buffer. + * For V4L2 this is a &struct vb2_v4l2_buffer. + * @fill_user_buffer: given a &vb2_buffer fill in the userspace structure. + * For V4L2 this is a &struct v4l2_buffer. + * @fill_vb2_buffer: given a userspace structure, fill in the &vb2_buffer. + * If the userspace structure is invalid, then this op + * will return an error. + * @copy_timestamp: copy the timestamp from a userspace structure to + * the &struct vb2_buffer. + */ +struct vb2_buf_ops { + int (*verify_planes_array)(struct vb2_buffer *vb, const void *pb); + void (*init_buffer)(struct vb2_buffer *vb); + void (*fill_user_buffer)(struct vb2_buffer *vb, void *pb); + int (*fill_vb2_buffer)(struct vb2_buffer *vb, struct vb2_plane *planes); + void (*copy_timestamp)(struct vb2_buffer *vb, const void *pb); +}; + +/** + * struct vb2_queue - a videobuf2 queue. + * + * @type: private buffer type whose content is defined by the vb2-core + * caller. For example, for V4L2, it should match + * the types defined on &enum v4l2_buf_type. + * @io_modes: supported io methods (see &enum vb2_io_modes). + * @dev: device to use for the default allocation context if the driver + * doesn't fill in the @alloc_devs array. + * @dma_attrs: DMA attributes to use for the DMA. + * @bidirectional: when this flag is set the DMA direction for the buffers of + * this queue will be overridden with %DMA_BIDIRECTIONAL direction. + * This is useful in cases where the hardware (firmware) writes to + * a buffer which is mapped as read (%DMA_TO_DEVICE), or reads from + * buffer which is mapped for write (%DMA_FROM_DEVICE) in order + * to satisfy some internal hardware restrictions or adds a padding + * needed by the processing algorithm. In case the DMA mapping is + * not bidirectional but the hardware (firmware) trying to access + * the buffer (in the opposite direction) this could lead to an + * IOMMU protection faults. + * @fileio_read_once: report EOF after reading the first buffer + * @fileio_write_immediately: queue buffer after each write() call + * @allow_zero_bytesused: allow bytesused == 0 to be passed to the driver + * @quirk_poll_must_check_waiting_for_buffers: Return %EPOLLERR at poll when QBUF + * has not been called. This is a vb1 idiom that has been adopted + * also by vb2. + * @supports_requests: this queue supports the Request API. + * @requires_requests: this queue requires the Request API. If this is set to 1, + * then supports_requests must be set to 1 as well. + * @uses_qbuf: qbuf was used directly for this queue. Set to 1 the first + * time this is called. Set to 0 when the queue is canceled. + * If this is 1, then you cannot queue buffers from a request. + * @uses_requests: requests are used for this queue. Set to 1 the first time + * a request is queued. Set to 0 when the queue is canceled. + * If this is 1, then you cannot queue buffers directly. + * @allow_cache_hints: when set user-space can pass cache management hints in + * order to skip cache flush/invalidation on ->prepare() or/and + * ->finish(). + * @non_coherent_mem: when set queue will attempt to allocate buffers using + * non-coherent memory. + * @lock: pointer to a mutex that protects the &struct vb2_queue. The + * driver can set this to a mutex to let the v4l2 core serialize + * the queuing ioctls. If the driver wants to handle locking + * itself, then this should be set to NULL. This lock is not used + * by the videobuf2 core API. + * @owner: The filehandle that 'owns' the buffers, i.e. the filehandle + * that called reqbufs, create_buffers or started fileio. + * This field is not used by the videobuf2 core API, but it allows + * drivers to easily associate an owner filehandle with the queue. + * @ops: driver-specific callbacks + * @mem_ops: memory allocator specific callbacks + * @buf_ops: callbacks to deliver buffer information. + * between user-space and kernel-space. + * @drv_priv: driver private data. + * @subsystem_flags: Flags specific to the subsystem (V4L2/DVB/etc.). Not used + * by the vb2 core. + * @buf_struct_size: size of the driver-specific buffer structure; + * "0" indicates the driver doesn't want to use a custom buffer + * structure type. In that case a subsystem-specific struct + * will be used (in the case of V4L2 that is + * ``sizeof(struct vb2_v4l2_buffer)``). The first field of the + * driver-specific buffer structure must be the subsystem-specific + * struct (vb2_v4l2_buffer in the case of V4L2). + * @timestamp_flags: Timestamp flags; ``V4L2_BUF_FLAG_TIMESTAMP_*`` and + * ``V4L2_BUF_FLAG_TSTAMP_SRC_*`` + * @gfp_flags: additional gfp flags used when allocating the buffers. + * Typically this is 0, but it may be e.g. %GFP_DMA or %__GFP_DMA32 + * to force the buffer allocation to a specific memory zone. + * @min_queued_buffers: the minimum number of queued buffers needed before + * @start_streaming can be called. Used when a DMA engine + * cannot be started unless at least this number of buffers + * have been queued into the driver. + * VIDIOC_REQBUFS will ensure at least @min_queued_buffers + 1 + * buffers will be allocated. Note that VIDIOC_CREATE_BUFS will not + * modify the requested buffer count. + * @min_reqbufs_allocation: the minimum number of buffers to be allocated when + * calling VIDIOC_REQBUFS. Note that VIDIOC_CREATE_BUFS will *not* + * modify the requested buffer count and does not use this field. + * Drivers can set this if there has to be a certain number of + * buffers available for the hardware to work effectively. + * This allows calling VIDIOC_REQBUFS with a buffer count of 1 and + * it will be automatically adjusted to a workable buffer count. + * If set, then @min_reqbufs_allocation must be larger than + * @min_queued_buffers + 1. + * If this field is > 3, then it is highly recommended that the + * driver implements the V4L2_CID_MIN_BUFFERS_FOR_CAPTURE/OUTPUT + * control. + * @alloc_devs: &struct device memory type/allocator-specific per-plane device + */ +/* + * Private elements (won't appear at the uAPI book): + * @mmap_lock: private mutex used when buffers are allocated/freed/mmapped + * @memory: current memory type used + * @dma_dir: DMA mapping direction. + * @bufs: videobuf2 buffer structures. If it is non-NULL then + * bufs_bitmap is also non-NULL. + * @bufs_bitmap: bitmap tracking whether each bufs[] entry is used + * @max_num_buffers: upper limit of number of allocated/used buffers. + * If set to 0 v4l2 core will change it VB2_MAX_FRAME + * for backward compatibility. + * @queued_list: list of buffers currently queued from userspace + * @queued_count: number of buffers queued and ready for streaming. + * @owned_by_drv_count: number of buffers owned by the driver + * @done_list: list of buffers ready to be dequeued to userspace + * @done_lock: lock to protect done_list list + * @done_wq: waitqueue for processes waiting for buffers ready to be dequeued + * @streaming: current streaming state + * @start_streaming_called: @start_streaming was called successfully and we + * started streaming. + * @error: a fatal error occurred on the queue + * @waiting_for_buffers: used in poll() to check if vb2 is still waiting for + * buffers. Only set for capture queues if qbuf has not yet been + * called since poll() needs to return %EPOLLERR in that situation. + * @waiting_in_dqbuf: set by the core for the duration of a blocking DQBUF, when + * it has to wait for a buffer to become available with vb2_queue->lock + * released. Used to prevent destroying the queue by other threads. + * @is_multiplanar: set if buffer type is multiplanar + * @is_output: set if buffer type is output + * @is_busy: set if at least one buffer has been allocated at some time. + * @copy_timestamp: set if vb2-core should set timestamps + * @last_buffer_dequeued: used in poll() and DQBUF to immediately return if the + * last decoded buffer was already dequeued. Set for capture queues + * when a buffer with the %V4L2_BUF_FLAG_LAST is dequeued. + * @fileio: file io emulator internal data, used only if emulator is active + * @threadio: thread io internal data, used only if thread is active + * @name: queue name, used for logging purpose. Initialized automatically + * if left empty by drivers. + */ +struct vb2_queue { + unsigned int type; + unsigned int io_modes; + struct device *dev; + unsigned long dma_attrs; + unsigned int bidirectional:1; + unsigned int fileio_read_once:1; + unsigned int fileio_write_immediately:1; + unsigned int allow_zero_bytesused:1; + unsigned int quirk_poll_must_check_waiting_for_buffers:1; + unsigned int supports_requests:1; + unsigned int requires_requests:1; + unsigned int uses_qbuf:1; + unsigned int uses_requests:1; + unsigned int allow_cache_hints:1; + unsigned int non_coherent_mem:1; + + struct mutex *lock; + void *owner; + + const struct vb2_ops *ops; + const struct vb2_mem_ops *mem_ops; + const struct vb2_buf_ops *buf_ops; + + void *drv_priv; + u32 subsystem_flags; + unsigned int buf_struct_size; + u32 timestamp_flags; + gfp_t gfp_flags; + u32 min_queued_buffers; + u32 min_reqbufs_allocation; + + struct device *alloc_devs[VB2_MAX_PLANES]; + + /* private: internal use only */ + struct mutex mmap_lock; + unsigned int memory; + enum dma_data_direction dma_dir; + struct vb2_buffer **bufs; + unsigned long *bufs_bitmap; + unsigned int max_num_buffers; + + struct list_head queued_list; + unsigned int queued_count; + + atomic_t owned_by_drv_count; + struct list_head done_list; + spinlock_t done_lock; + wait_queue_head_t done_wq; + + unsigned int streaming:1; + unsigned int start_streaming_called:1; + unsigned int error:1; + unsigned int waiting_for_buffers:1; + unsigned int waiting_in_dqbuf:1; + unsigned int is_multiplanar:1; + unsigned int is_output:1; + unsigned int is_busy:1; + unsigned int copy_timestamp:1; + unsigned int last_buffer_dequeued:1; + + struct vb2_fileio_data *fileio; + struct vb2_threadio_data *threadio; + + char name[32]; + +#ifdef CONFIG_VIDEO_ADV_DEBUG + /* + * Counters for how often these queue-related ops are + * called. Used to check for unbalanced ops. + */ + u32 cnt_queue_setup; + u32 cnt_wait_prepare; + u32 cnt_wait_finish; + u32 cnt_prepare_streaming; + u32 cnt_start_streaming; + u32 cnt_stop_streaming; + u32 cnt_unprepare_streaming; +#endif +}; + +/** + * vb2_queue_allows_cache_hints() - Return true if the queue allows cache + * and memory consistency hints. + * + * @q: pointer to &struct vb2_queue with videobuf2 queue + */ +static inline bool vb2_queue_allows_cache_hints(struct vb2_queue *q) +{ + return q->allow_cache_hints && q->memory == VB2_MEMORY_MMAP; +} + +/** + * vb2_plane_vaddr() - Return a kernel virtual address of a given plane. + * @vb: pointer to &struct vb2_buffer to which the plane in + * question belongs to. + * @plane_no: plane number for which the address is to be returned. + * + * This function returns a kernel virtual address of a given plane if + * such a mapping exist, NULL otherwise. + */ +void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no); + +/** + * vb2_plane_cookie() - Return allocator specific cookie for the given plane. + * @vb: pointer to &struct vb2_buffer to which the plane in + * question belongs to. + * @plane_no: plane number for which the cookie is to be returned. + * + * This function returns an allocator specific cookie for a given plane if + * available, NULL otherwise. The allocator should provide some simple static + * inline function, which would convert this cookie to the allocator specific + * type that can be used directly by the driver to access the buffer. This can + * be for example physical address, pointer to scatter list or IOMMU mapping. + */ +void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no); + +/** + * vb2_buffer_done() - inform videobuf2 that an operation on a buffer + * is finished. + * @vb: pointer to &struct vb2_buffer to be used. + * @state: state of the buffer, as defined by &enum vb2_buffer_state. + * Either %VB2_BUF_STATE_DONE if the operation finished + * successfully, %VB2_BUF_STATE_ERROR if the operation finished + * with an error or %VB2_BUF_STATE_QUEUED. + * + * This function should be called by the driver after a hardware operation on + * a buffer is finished and the buffer may be returned to userspace. The driver + * cannot use this buffer anymore until it is queued back to it by videobuf + * by the means of &vb2_ops->buf_queue callback. Only buffers previously queued + * to the driver by &vb2_ops->buf_queue can be passed to this function. + * + * While streaming a buffer can only be returned in state DONE or ERROR. + * The &vb2_ops->start_streaming op can also return them in case the DMA engine + * cannot be started for some reason. In that case the buffers should be + * returned with state QUEUED to put them back into the queue. + */ +void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state); + +/** + * vb2_discard_done() - discard all buffers marked as DONE. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * + * This function is intended to be used with suspend/resume operations. It + * discards all 'done' buffers as they would be too old to be requested after + * resume. + * + * Drivers must stop the hardware and synchronize with interrupt handlers and/or + * delayed works before calling this function to make sure no buffer will be + * touched by the driver and/or hardware. + */ +void vb2_discard_done(struct vb2_queue *q); + +/** + * vb2_wait_for_all_buffers() - wait until all buffers are given back to vb2. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * + * This function will wait until all buffers that have been given to the driver + * by &vb2_ops->buf_queue are given back to vb2 with vb2_buffer_done(). It + * doesn't call &vb2_ops->wait_prepare/&vb2_ops->wait_finish pair. + * It is intended to be called with all locks taken, for example from + * &vb2_ops->stop_streaming callback. + */ +int vb2_wait_for_all_buffers(struct vb2_queue *q); + +/** + * vb2_core_querybuf() - query video buffer information. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @vb: pointer to struct &vb2_buffer. + * @pb: buffer struct passed from userspace. + * + * Videobuf2 core helper to implement VIDIOC_QUERYBUF() operation. It is called + * internally by VB2 by an API-specific handler, like ``videobuf2-v4l2.h``. + * + * The passed buffer should have been verified. + * + * This function fills the relevant information for the userspace. + * + * Return: returns zero on success; an error code otherwise. + */ +void vb2_core_querybuf(struct vb2_queue *q, struct vb2_buffer *vb, void *pb); + +/** + * vb2_core_reqbufs() - Initiate streaming. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @memory: memory type, as defined by &enum vb2_memory. + * @flags: auxiliary queue/buffer management flags. Currently, the only + * used flag is %V4L2_MEMORY_FLAG_NON_COHERENT. + * @count: requested buffer count. + * + * Videobuf2 core helper to implement VIDIOC_REQBUF() operation. It is called + * internally by VB2 by an API-specific handler, like ``videobuf2-v4l2.h``. + * + * This function: + * + * #) verifies streaming parameters passed from the userspace; + * #) sets up the queue; + * #) negotiates number of buffers and planes per buffer with the driver + * to be used during streaming; + * #) allocates internal buffer structures (&struct vb2_buffer), according to + * the agreed parameters; + * #) for MMAP memory type, allocates actual video memory, using the + * memory handling/allocation routines provided during queue initialization. + * + * If req->count is 0, all the memory will be freed instead. + * + * If the queue has been allocated previously by a previous vb2_core_reqbufs() + * call and the queue is not busy, memory will be reallocated. + * + * Return: returns zero on success; an error code otherwise. + */ +int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, + unsigned int flags, unsigned int *count); + +/** + * vb2_core_create_bufs() - Allocate buffers and any required auxiliary structs + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @memory: memory type, as defined by &enum vb2_memory. + * @flags: auxiliary queue/buffer management flags. + * @count: requested buffer count. + * @requested_planes: number of planes requested. + * @requested_sizes: array with the size of the planes. + * @first_index: index of the first created buffer, all allocated buffers have + * indices in the range [first_index..first_index+count-1] + * + * Videobuf2 core helper to implement VIDIOC_CREATE_BUFS() operation. It is + * called internally by VB2 by an API-specific handler, like + * ``videobuf2-v4l2.h``. + * + * This function: + * + * #) verifies parameter sanity; + * #) calls the &vb2_ops->queue_setup queue operation; + * #) performs any necessary memory allocations. + * + * Return: returns zero on success; an error code otherwise. + */ +int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, + unsigned int flags, unsigned int *count, + unsigned int requested_planes, + const unsigned int requested_sizes[], + unsigned int *first_index); + +/** + * vb2_core_prepare_buf() - Pass ownership of a buffer from userspace + * to the kernel. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @vb: pointer to struct &vb2_buffer. + * @pb: buffer structure passed from userspace to + * &v4l2_ioctl_ops->vidioc_prepare_buf handler in driver. + * + * Videobuf2 core helper to implement VIDIOC_PREPARE_BUF() operation. It is + * called internally by VB2 by an API-specific handler, like + * ``videobuf2-v4l2.h``. + * + * The passed buffer should have been verified. + * + * This function calls vb2_ops->buf_prepare callback in the driver + * (if provided), in which driver-specific buffer initialization can + * be performed. + * + * Return: returns zero on success; an error code otherwise. + */ +int vb2_core_prepare_buf(struct vb2_queue *q, struct vb2_buffer *vb, void *pb); + +/** + * vb2_core_remove_bufs() - + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @start: first index of the range of buffers to remove. + * @count: number of buffers to remove. + * + * Return: returns zero on success; an error code otherwise. + */ +int vb2_core_remove_bufs(struct vb2_queue *q, unsigned int start, unsigned int count); + +/** + * vb2_core_qbuf() - Queue a buffer from userspace + * + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @vb: pointer to struct &vb2_buffer. + * @pb: buffer structure passed from userspace to + * v4l2_ioctl_ops->vidioc_qbuf handler in driver + * @req: pointer to &struct media_request, may be NULL. + * + * Videobuf2 core helper to implement VIDIOC_QBUF() operation. It is called + * internally by VB2 by an API-specific handler, like ``videobuf2-v4l2.h``. + * + * This function: + * + * #) If @req is non-NULL, then the buffer will be bound to this + * media request and it returns. The buffer will be prepared and + * queued to the driver (i.e. the next two steps) when the request + * itself is queued. + * #) if necessary, calls &vb2_ops->buf_prepare callback in the driver + * (if provided), in which driver-specific buffer initialization can + * be performed; + * #) if streaming is on, queues the buffer in driver by the means of + * &vb2_ops->buf_queue callback for processing. + * + * Return: returns zero on success; an error code otherwise. + */ +int vb2_core_qbuf(struct vb2_queue *q, struct vb2_buffer *vb, void *pb, + struct media_request *req); + +/** + * vb2_core_dqbuf() - Dequeue a buffer to the userspace + * @q: pointer to &struct vb2_queue with videobuf2 queue + * @pindex: pointer to the buffer index. May be NULL + * @pb: buffer structure passed from userspace to + * v4l2_ioctl_ops->vidioc_dqbuf handler in driver. + * @nonblocking: if true, this call will not sleep waiting for a buffer if no + * buffers ready for dequeuing are present. Normally the driver + * would be passing (file->f_flags & O_NONBLOCK) here. + * + * Videobuf2 core helper to implement VIDIOC_DQBUF() operation. It is called + * internally by VB2 by an API-specific handler, like ``videobuf2-v4l2.h``. + * + * This function: + * + * #) calls buf_finish callback in the driver (if provided), in which + * driver can perform any additional operations that may be required before + * returning the buffer to userspace, such as cache sync, + * #) the buffer struct members are filled with relevant information for + * the userspace. + * + * Return: returns zero on success; an error code otherwise. + */ +int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb, + bool nonblocking); + +/** + * vb2_core_streamon() - Implements VB2 stream ON logic + * + * @q: pointer to &struct vb2_queue with videobuf2 queue + * @type: type of the queue to be started. + * For V4L2, this is defined by &enum v4l2_buf_type type. + * + * Videobuf2 core helper to implement VIDIOC_STREAMON() operation. It is called + * internally by VB2 by an API-specific handler, like ``videobuf2-v4l2.h``. + * + * Return: returns zero on success; an error code otherwise. + */ +int vb2_core_streamon(struct vb2_queue *q, unsigned int type); + +/** + * vb2_core_streamoff() - Implements VB2 stream OFF logic + * + * @q: pointer to &struct vb2_queue with videobuf2 queue + * @type: type of the queue to be started. + * For V4L2, this is defined by &enum v4l2_buf_type type. + * + * Videobuf2 core helper to implement VIDIOC_STREAMOFF() operation. It is + * called internally by VB2 by an API-specific handler, like + * ``videobuf2-v4l2.h``. + * + * Return: returns zero on success; an error code otherwise. + */ +int vb2_core_streamoff(struct vb2_queue *q, unsigned int type); + +/** + * vb2_core_expbuf() - Export a buffer as a file descriptor. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @fd: pointer to the file descriptor associated with DMABUF + * (set by driver). + * @type: buffer type. + * @vb: pointer to struct &vb2_buffer. + * @plane: index of the plane to be exported, 0 for single plane queues + * @flags: file flags for newly created file, as defined at + * include/uapi/asm-generic/fcntl.h. + * Currently, the only used flag is %O_CLOEXEC. + * is supported, refer to manual of open syscall for more details. + * + * + * Videobuf2 core helper to implement VIDIOC_EXPBUF() operation. It is called + * internally by VB2 by an API-specific handler, like ``videobuf2-v4l2.h``. + * + * Return: returns zero on success; an error code otherwise. + */ +int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type, + struct vb2_buffer *vb, unsigned int plane, unsigned int flags); + +/** + * vb2_core_queue_init() - initialize a videobuf2 queue + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * This structure should be allocated in driver + * + * The &vb2_queue structure should be allocated by the driver. The driver is + * responsible of clearing it's content and setting initial values for some + * required entries before calling this function. + * + * .. note:: + * + * The following fields at @q should be set before calling this function: + * &vb2_queue->ops, &vb2_queue->mem_ops, &vb2_queue->type. + */ +int vb2_core_queue_init(struct vb2_queue *q); + +/** + * vb2_core_queue_release() - stop streaming, release the queue and free memory + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * + * This function stops streaming and performs necessary clean ups, including + * freeing video buffer memory. The driver is responsible for freeing + * the &struct vb2_queue itself. + */ +void vb2_core_queue_release(struct vb2_queue *q); + +/** + * vb2_queue_error() - signal a fatal error on the queue + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * + * Flag that a fatal unrecoverable error has occurred and wake up all processes + * waiting on the queue. Polling will now set %EPOLLERR and queuing and dequeuing + * buffers will return %-EIO. + * + * The error flag will be cleared when canceling the queue, either from + * vb2_streamoff() or vb2_queue_release(). Drivers should thus not call this + * function before starting the stream, otherwise the error flag will remain set + * until the queue is released when closing the device node. + */ +void vb2_queue_error(struct vb2_queue *q); + +/** + * vb2_mmap() - map video buffers into application address space. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @vma: pointer to &struct vm_area_struct with the vma passed + * to the mmap file operation handler in the driver. + * + * Should be called from mmap file operation handler of a driver. + * This function maps one plane of one of the available video buffers to + * userspace. To map whole video memory allocated on reqbufs, this function + * has to be called once per each plane per each buffer previously allocated. + * + * When the userspace application calls mmap, it passes to it an offset returned + * to it earlier by the means of &v4l2_ioctl_ops->vidioc_querybuf handler. + * That offset acts as a "cookie", which is then used to identify the plane + * to be mapped. + * + * This function finds a plane with a matching offset and a mapping is performed + * by the means of a provided memory operation. + * + * The return values from this function are intended to be directly returned + * from the mmap handler in driver. + */ +int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma); + +#ifndef CONFIG_MMU +/** + * vb2_get_unmapped_area - map video buffers into application address space. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @addr: memory address. + * @len: buffer size. + * @pgoff: page offset. + * @flags: memory flags. + * + * This function is used in noMMU platforms to propose address mapping + * for a given buffer. It's intended to be used as a handler for the + * &file_operations->get_unmapped_area operation. + * + * This is called by the mmap() syscall routines will call this + * to get a proposed address for the mapping, when ``!CONFIG_MMU``. + */ +unsigned long vb2_get_unmapped_area(struct vb2_queue *q, + unsigned long addr, + unsigned long len, + unsigned long pgoff, + unsigned long flags); +#endif + +/** + * vb2_core_poll() - implements poll syscall() logic. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @file: &struct file argument passed to the poll + * file operation handler. + * @wait: &poll_table wait argument passed to the poll + * file operation handler. + * + * This function implements poll file operation handler for a driver. + * For CAPTURE queues, if a buffer is ready to be dequeued, the userspace will + * be informed that the file descriptor of a video device is available for + * reading. + * For OUTPUT queues, if a buffer is ready to be dequeued, the file descriptor + * will be reported as available for writing. + * + * The return values from this function are intended to be directly returned + * from poll handler in driver. + */ +__poll_t vb2_core_poll(struct vb2_queue *q, struct file *file, + poll_table *wait); + +/** + * vb2_read() - implements read() syscall logic. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @data: pointed to target userspace buffer + * @count: number of bytes to read + * @ppos: file handle position tracking pointer + * @nonblock: mode selector (1 means blocking calls, 0 means nonblocking) + */ +size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count, + loff_t *ppos, int nonblock); +/** + * vb2_write() - implements write() syscall logic. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @data: pointed to target userspace buffer + * @count: number of bytes to write + * @ppos: file handle position tracking pointer + * @nonblock: mode selector (1 means blocking calls, 0 means nonblocking) + */ +size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count, + loff_t *ppos, int nonblock); + +/** + * typedef vb2_thread_fnc - callback function for use with vb2_thread. + * + * @vb: pointer to struct &vb2_buffer. + * @priv: pointer to a private data. + * + * This is called whenever a buffer is dequeued in the thread. + */ +typedef int (*vb2_thread_fnc)(struct vb2_buffer *vb, void *priv); + +/** + * vb2_thread_start() - start a thread for the given queue. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @fnc: &vb2_thread_fnc callback function. + * @priv: priv pointer passed to the callback function. + * @thread_name:the name of the thread. This will be prefixed with "vb2-". + * + * This starts a thread that will queue and dequeue until an error occurs + * or vb2_thread_stop() is called. + * + * .. attention:: + * + * This function should not be used for anything else but the videobuf2-dvb + * support. If you think you have another good use-case for this, then please + * contact the linux-media mailing list first. + */ +int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv, + const char *thread_name); + +/** + * vb2_thread_stop() - stop the thread for the given queue. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + */ +int vb2_thread_stop(struct vb2_queue *q); + +/** + * vb2_is_streaming() - return streaming status of the queue. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + */ +static inline bool vb2_is_streaming(struct vb2_queue *q) +{ + return q->streaming; +} + +/** + * vb2_fileio_is_active() - return true if fileio is active. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * + * This returns true if read() or write() is used to stream the data + * as opposed to stream I/O. This is almost never an important distinction, + * except in rare cases. One such case is that using read() or write() to + * stream a format using %V4L2_FIELD_ALTERNATE is not allowed since there + * is no way you can pass the field information of each buffer to/from + * userspace. A driver that supports this field format should check for + * this in the &vb2_ops->queue_setup op and reject it if this function returns + * true. + */ +static inline bool vb2_fileio_is_active(struct vb2_queue *q) +{ + return q->fileio; +} + +/** + * vb2_get_num_buffers() - get the number of buffer in a queue + * @q: pointer to &struct vb2_queue with videobuf2 queue. + */ +static inline unsigned int vb2_get_num_buffers(struct vb2_queue *q) +{ + if (q->bufs_bitmap) + return bitmap_weight(q->bufs_bitmap, q->max_num_buffers); + + return 0; +} + +/** + * vb2_is_busy() - return busy status of the queue. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * + * This function checks if queue has any buffers allocated. + */ +static inline bool vb2_is_busy(struct vb2_queue *q) +{ + return !!q->is_busy; +} + +/** + * vb2_get_drv_priv() - return driver private data associated with the queue. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + */ +static inline void *vb2_get_drv_priv(struct vb2_queue *q) +{ + return q->drv_priv; +} + +/** + * vb2_set_plane_payload() - set bytesused for the plane @plane_no. + * @vb: pointer to &struct vb2_buffer to which the plane in + * question belongs to. + * @plane_no: plane number for which payload should be set. + * @size: payload in bytes. + */ +static inline void vb2_set_plane_payload(struct vb2_buffer *vb, + unsigned int plane_no, unsigned long size) +{ + /* + * size must never be larger than the buffer length, so + * warn and clamp to the buffer length if that's the case. + */ + if (plane_no < vb->num_planes) { + if (WARN_ON_ONCE(size > vb->planes[plane_no].length)) + size = vb->planes[plane_no].length; + vb->planes[plane_no].bytesused = size; + } +} + +/** + * vb2_get_plane_payload() - get bytesused for the plane plane_no + * @vb: pointer to &struct vb2_buffer to which the plane in + * question belongs to. + * @plane_no: plane number for which payload should be set. + */ +static inline unsigned long vb2_get_plane_payload(struct vb2_buffer *vb, + unsigned int plane_no) +{ + if (plane_no < vb->num_planes) + return vb->planes[plane_no].bytesused; + return 0; +} + +/** + * vb2_plane_size() - return plane size in bytes. + * @vb: pointer to &struct vb2_buffer to which the plane in + * question belongs to. + * @plane_no: plane number for which size should be returned. + */ +static inline unsigned long +vb2_plane_size(struct vb2_buffer *vb, unsigned int plane_no) +{ + if (plane_no < vb->num_planes) + return vb->planes[plane_no].length; + return 0; +} + +/** + * vb2_start_streaming_called() - return streaming status of driver. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + */ +static inline bool vb2_start_streaming_called(struct vb2_queue *q) +{ + return q->start_streaming_called; +} + +/** + * vb2_clear_last_buffer_dequeued() - clear last buffer dequeued flag of queue. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + */ +static inline void vb2_clear_last_buffer_dequeued(struct vb2_queue *q) +{ + q->last_buffer_dequeued = false; +} + +/** + * vb2_get_buffer() - get a buffer from a queue + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @index: buffer index + * + * This function obtains a buffer from a queue, by its index. + * Keep in mind that there is no refcounting involved in this + * operation, so the buffer lifetime should be taken into + * consideration. + */ +static inline struct vb2_buffer *vb2_get_buffer(struct vb2_queue *q, + unsigned int index) +{ + if (!q->bufs) + return NULL; + + if (index >= q->max_num_buffers) + return NULL; + + if (test_bit(index, q->bufs_bitmap)) + return q->bufs[index]; + return NULL; +} + +/* + * The following functions are not part of the vb2 core API, but are useful + * functions for videobuf2-*. + */ + +/** + * vb2_buffer_in_use() - return true if the buffer is in use and + * the queue cannot be freed (by the means of VIDIOC_REQBUFS(0)) call. + * + * @vb: buffer for which plane size should be returned. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + */ +bool vb2_buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb); + +/** + * vb2_verify_memory_type() - Check whether the memory type and buffer type + * passed to a buffer operation are compatible with the queue. + * + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @memory: memory model, as defined by enum &vb2_memory. + * @type: private buffer type whose content is defined by the vb2-core + * caller. For example, for V4L2, it should match + * the types defined on enum &v4l2_buf_type. + */ +int vb2_verify_memory_type(struct vb2_queue *q, + enum vb2_memory memory, unsigned int type); + +/** + * vb2_request_object_is_buffer() - return true if the object is a buffer + * + * @obj: the request object. + */ +bool vb2_request_object_is_buffer(struct media_request_object *obj); + +/** + * vb2_request_buffer_cnt() - return the number of buffers in the request + * + * @req: the request. + */ +unsigned int vb2_request_buffer_cnt(struct media_request *req); + +#endif /* _MEDIA_VIDEOBUF2_CORE_H */ diff --git a/6.18.0/include-overrides/media/videobuf2-dma-contig.h b/6.18.0/include-overrides/media/videobuf2-dma-contig.h new file mode 100644 index 0000000..5be313c --- /dev/null +++ b/6.18.0/include-overrides/media/videobuf2-dma-contig.h @@ -0,0 +1,32 @@ +/* + * videobuf2-dma-contig.h - DMA contig memory allocator for videobuf2 + * + * Copyright (C) 2010 Samsung Electronics + * + * Author: Pawel Osciak + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef _MEDIA_VIDEOBUF2_DMA_CONTIG_H +#define _MEDIA_VIDEOBUF2_DMA_CONTIG_H + +#include +#include + +static inline dma_addr_t +vb2_dma_contig_plane_dma_addr(struct vb2_buffer *vb, unsigned int plane_no) +{ + dma_addr_t *addr = vb2_plane_cookie(vb, plane_no); + + return *addr; +} + +int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size); +static inline void vb2_dma_contig_clear_max_seg_size(struct device *dev) { } + +extern const struct vb2_mem_ops vb2_dma_contig_memops; + +#endif diff --git a/6.18.0/include-overrides/media/videobuf2-dma-sg.h b/6.18.0/include-overrides/media/videobuf2-dma-sg.h new file mode 100644 index 0000000..f28fcb0 --- /dev/null +++ b/6.18.0/include-overrides/media/videobuf2-dma-sg.h @@ -0,0 +1,26 @@ +/* + * videobuf2-dma-sg.h - DMA scatter/gather memory allocator for videobuf2 + * + * Copyright (C) 2010 Samsung Electronics + * + * Author: Andrzej Pietrasiewicz + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef _MEDIA_VIDEOBUF2_DMA_SG_H +#define _MEDIA_VIDEOBUF2_DMA_SG_H + +#include + +static inline struct sg_table *vb2_dma_sg_plane_desc( + struct vb2_buffer *vb, unsigned int plane_no) +{ + return (struct sg_table *)vb2_plane_cookie(vb, plane_no); +} + +extern const struct vb2_mem_ops vb2_dma_sg_memops; + +#endif diff --git a/6.18.0/include-overrides/media/videobuf2-dvb.h b/6.18.0/include-overrides/media/videobuf2-dvb.h new file mode 100644 index 0000000..2d577b9 --- /dev/null +++ b/6.18.0/include-overrides/media/videobuf2-dvb.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _VIDEOBUF2_DVB_H_ +#define _VIDEOBUF2_DVB_H_ + +#include +#include +#include +#include +#include +#include + +/* We don't actually need to include media-device.h here */ +struct media_device; + +/* + * TODO: This header file should be replaced with videobuf2-core.h + * Currently, vb2_thread is not a stuff of videobuf2-core, + * since vb2_thread has many dependencies on videobuf2-v4l2. + */ + +struct vb2_dvb { + /* filling that the job of the driver */ + char *name; + struct dvb_frontend *frontend; + struct vb2_queue dvbq; + + /* vb2-dvb state info */ + struct mutex lock; + int nfeeds; + + /* vb2_dvb_(un)register manages this */ + struct dvb_demux demux; + struct dmxdev dmxdev; + struct dmx_frontend fe_hw; + struct dmx_frontend fe_mem; + struct dvb_net net; +}; + +struct vb2_dvb_frontend { + struct list_head felist; + int id; + struct vb2_dvb dvb; +}; + +struct vb2_dvb_frontends { + struct list_head felist; + struct mutex lock; + struct dvb_adapter adapter; + int active_fe_id; /* Indicates which frontend in the felist is in use */ + int gate; /* Frontend with gate control 0=!MFE,1=fe0,2=fe1 etc */ +}; + +int vb2_dvb_register_bus(struct vb2_dvb_frontends *f, + struct module *module, + void *adapter_priv, + struct device *device, + struct media_device *mdev, + short *adapter_nr, + int mfe_shared); + +void vb2_dvb_unregister_bus(struct vb2_dvb_frontends *f); + +struct vb2_dvb_frontend *vb2_dvb_alloc_frontend(struct vb2_dvb_frontends *f, int id); +void vb2_dvb_dealloc_frontends(struct vb2_dvb_frontends *f); + +struct vb2_dvb_frontend *vb2_dvb_get_frontend(struct vb2_dvb_frontends *f, int id); +int vb2_dvb_find_frontend(struct vb2_dvb_frontends *f, struct dvb_frontend *p); + +#endif /* _VIDEOBUF2_DVB_H_ */ diff --git a/6.18.0/include-overrides/media/videobuf2-memops.h b/6.18.0/include-overrides/media/videobuf2-memops.h new file mode 100644 index 0000000..4b5b84f --- /dev/null +++ b/6.18.0/include-overrides/media/videobuf2-memops.h @@ -0,0 +1,41 @@ +/* + * videobuf2-memops.h - generic memory handling routines for videobuf2 + * + * Copyright (C) 2010 Samsung Electronics + * + * Author: Pawel Osciak + * Marek Szyprowski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef _MEDIA_VIDEOBUF2_MEMOPS_H +#define _MEDIA_VIDEOBUF2_MEMOPS_H + +#include +#include +#include + +/** + * struct vb2_vmarea_handler - common vma refcount tracking handler. + * + * @refcount: pointer to &refcount_t entry in the buffer. + * @put: callback to function that decreases buffer refcount. + * @arg: argument for @put callback. + */ +struct vb2_vmarea_handler { + refcount_t *refcount; + void (*put)(void *arg); + void *arg; +}; + +extern const struct vm_operations_struct vb2_common_vm_ops; + +struct frame_vector *vb2_create_framevec(unsigned long start, + unsigned long length, + bool write); +void vb2_destroy_framevec(struct frame_vector *vec); + +#endif diff --git a/6.18.0/include-overrides/media/videobuf2-v4l2.h b/6.18.0/include-overrides/media/videobuf2-v4l2.h new file mode 100644 index 0000000..77ce823 --- /dev/null +++ b/6.18.0/include-overrides/media/videobuf2-v4l2.h @@ -0,0 +1,392 @@ +/* + * videobuf2-v4l2.h - V4L2 driver helper framework + * + * Copyright (C) 2010 Samsung Electronics + * + * Author: Pawel Osciak + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ +#ifndef _MEDIA_VIDEOBUF2_V4L2_H +#define _MEDIA_VIDEOBUF2_V4L2_H + +#include +#include + +#if VB2_MAX_FRAME != VIDEO_MAX_FRAME +#error VB2_MAX_FRAME != VIDEO_MAX_FRAME +#endif + +#if VB2_MAX_PLANES != VIDEO_MAX_PLANES +#error VB2_MAX_PLANES != VIDEO_MAX_PLANES +#endif + +struct video_device; + +/** + * struct vb2_v4l2_buffer - video buffer information for v4l2. + * + * @vb2_buf: embedded struct &vb2_buffer. + * @flags: buffer informational flags. + * @field: field order of the image in the buffer, as defined by + * &enum v4l2_field. + * @timecode: frame timecode. + * @sequence: sequence count of this frame. + * @request_fd: the request_fd associated with this buffer + * @is_held: if true, then this capture buffer was held + * @planes: plane information (userptr/fd, length, bytesused, data_offset). + * + * Should contain enough information to be able to cover all the fields + * of &struct v4l2_buffer at ``videodev2.h``. + */ +struct vb2_v4l2_buffer { + struct vb2_buffer vb2_buf; + + __u32 flags; + __u32 field; + struct v4l2_timecode timecode; + __u32 sequence; + __s32 request_fd; + bool is_held; + struct vb2_plane planes[VB2_MAX_PLANES]; +}; + +/* VB2 V4L2 flags as set in vb2_queue.subsystem_flags */ +#define VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF (1 << 0) + +/* + * to_vb2_v4l2_buffer() - cast struct vb2_buffer * to struct vb2_v4l2_buffer * + */ +#define to_vb2_v4l2_buffer(vb) \ + container_of(vb, struct vb2_v4l2_buffer, vb2_buf) + +/** + * vb2_find_buffer() - Find a buffer with given timestamp + * + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @timestamp: the timestamp to find. + * + * Returns the buffer with the given @timestamp, or NULL if not found. + */ +struct vb2_buffer *vb2_find_buffer(struct vb2_queue *q, u64 timestamp); + +int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b); + +/** + * vb2_reqbufs() - Wrapper for vb2_core_reqbufs() that also verifies + * the memory and type values. + * + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @req: &struct v4l2_requestbuffers passed from userspace to + * &v4l2_ioctl_ops->vidioc_reqbufs handler in driver. + */ +int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req); + +/** + * vb2_create_bufs() - Wrapper for vb2_core_create_bufs() that also verifies + * the memory and type values. + * + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @create: creation parameters, passed from userspace to + * &v4l2_ioctl_ops->vidioc_create_bufs handler in driver + */ +int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create); + +/** + * vb2_prepare_buf() - Pass ownership of a buffer from userspace to the kernel + * + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @mdev: pointer to &struct media_device, may be NULL. + * @b: buffer structure passed from userspace to + * &v4l2_ioctl_ops->vidioc_prepare_buf handler in driver + * + * Should be called from &v4l2_ioctl_ops->vidioc_prepare_buf ioctl handler + * of a driver. + * + * This function: + * + * #) verifies the passed buffer, + * #) calls &vb2_ops->buf_prepare callback in the driver (if provided), + * in which driver-specific buffer initialization can be performed. + * #) if @b->request_fd is non-zero and @mdev->ops->req_queue is set, + * then bind the prepared buffer to the request. + * + * The return values from this function are intended to be directly returned + * from &v4l2_ioctl_ops->vidioc_prepare_buf handler in driver. + */ +int vb2_prepare_buf(struct vb2_queue *q, struct media_device *mdev, + struct v4l2_buffer *b); + +/** + * vb2_qbuf() - Queue a buffer from userspace + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @mdev: pointer to &struct media_device, may be NULL. + * @b: buffer structure passed from userspace to + * &v4l2_ioctl_ops->vidioc_qbuf handler in driver + * + * Should be called from &v4l2_ioctl_ops->vidioc_qbuf handler of a driver. + * + * This function: + * + * #) verifies the passed buffer; + * #) if @b->request_fd is non-zero and @mdev->ops->req_queue is set, + * then bind the buffer to the request. + * #) if necessary, calls &vb2_ops->buf_prepare callback in the driver + * (if provided), in which driver-specific buffer initialization can + * be performed; + * #) if streaming is on, queues the buffer in driver by the means of + * &vb2_ops->buf_queue callback for processing. + * + * The return values from this function are intended to be directly returned + * from &v4l2_ioctl_ops->vidioc_qbuf handler in driver. + */ +int vb2_qbuf(struct vb2_queue *q, struct media_device *mdev, + struct v4l2_buffer *b); + +/** + * vb2_expbuf() - Export a buffer as a file descriptor + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @eb: export buffer structure passed from userspace to + * &v4l2_ioctl_ops->vidioc_expbuf handler in driver + * + * The return values from this function are intended to be directly returned + * from &v4l2_ioctl_ops->vidioc_expbuf handler in driver. + */ +int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb); + +/** + * vb2_dqbuf() - Dequeue a buffer to the userspace + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @b: buffer structure passed from userspace to + * &v4l2_ioctl_ops->vidioc_dqbuf handler in driver + * @nonblocking: if true, this call will not sleep waiting for a buffer if no + * buffers ready for dequeuing are present. Normally the driver + * would be passing (&file->f_flags & %O_NONBLOCK) here + * + * Should be called from &v4l2_ioctl_ops->vidioc_dqbuf ioctl handler + * of a driver. + * + * This function: + * + * #) verifies the passed buffer; + * #) calls &vb2_ops->buf_finish callback in the driver (if provided), in which + * driver can perform any additional operations that may be required before + * returning the buffer to userspace, such as cache sync; + * #) the buffer struct members are filled with relevant information for + * the userspace. + * + * The return values from this function are intended to be directly returned + * from &v4l2_ioctl_ops->vidioc_dqbuf handler in driver. + */ +int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking); + +/** + * vb2_streamon - start streaming + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @type: type argument passed from userspace to vidioc_streamon handler, + * as defined by &enum v4l2_buf_type. + * + * Should be called from &v4l2_ioctl_ops->vidioc_streamon handler of a driver. + * + * This function: + * + * 1) verifies current state + * 2) passes any previously queued buffers to the driver and starts streaming + * + * The return values from this function are intended to be directly returned + * from &v4l2_ioctl_ops->vidioc_streamon handler in the driver. + */ +int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type); + +/** + * vb2_streamoff - stop streaming + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @type: type argument passed from userspace to vidioc_streamoff handler + * + * Should be called from vidioc_streamoff handler of a driver. + * + * This function: + * + * #) verifies current state, + * #) stop streaming and dequeues any queued buffers, including those previously + * passed to the driver (after waiting for the driver to finish). + * + * This call can be used for pausing playback. + * The return values from this function are intended to be directly returned + * from vidioc_streamoff handler in the driver + */ +int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type); + +/** + * vb2_queue_init() - initialize a videobuf2 queue + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * + * The vb2_queue structure should be allocated by the driver. The driver is + * responsible of clearing it's content and setting initial values for some + * required entries before calling this function. + * q->ops, q->mem_ops, q->type and q->io_modes are mandatory. Please refer + * to the struct vb2_queue description in include/media/videobuf2-core.h + * for more information. + */ +int __must_check vb2_queue_init(struct vb2_queue *q); + +/** + * vb2_queue_init_name() - initialize a videobuf2 queue with a name + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @name: the queue name + * + * This function initializes the vb2_queue exactly like vb2_queue_init(), + * and additionally sets the queue name. The queue name is used for logging + * purpose, and should uniquely identify the queue within the context of the + * device it belongs to. This is useful to attribute kernel log messages to the + * right queue for m2m devices or other devices that handle multiple queues. + */ +int __must_check vb2_queue_init_name(struct vb2_queue *q, const char *name); + +/** + * vb2_queue_release() - stop streaming, release the queue and free memory + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * + * This function stops streaming and performs necessary clean ups, including + * freeing video buffer memory. The driver is responsible for freeing + * the vb2_queue structure itself. + */ +void vb2_queue_release(struct vb2_queue *q); + +/** + * vb2_queue_change_type() - change the type of an inactive vb2_queue + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @type: the type to change to (V4L2_BUF_TYPE_VIDEO_*) + * + * This function changes the type of the vb2_queue. This is only possible + * if the queue is not busy (i.e. no buffers have been allocated). + * + * vb2_queue_change_type() can be used to support multiple buffer types using + * the same queue. The driver can implement v4l2_ioctl_ops.vidioc_reqbufs and + * v4l2_ioctl_ops.vidioc_create_bufs functions and call vb2_queue_change_type() + * before calling vb2_ioctl_reqbufs() or vb2_ioctl_create_bufs(), and thus + * "lock" the buffer type until the buffers have been released. + */ +int vb2_queue_change_type(struct vb2_queue *q, unsigned int type); + +/** + * vb2_poll() - implements poll userspace operation + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @file: file argument passed to the poll file operation handler + * @wait: wait argument passed to the poll file operation handler + * + * This function implements poll file operation handler for a driver. + * For CAPTURE queues, if a buffer is ready to be dequeued, the userspace will + * be informed that the file descriptor of a video device is available for + * reading. + * For OUTPUT queues, if a buffer is ready to be dequeued, the file descriptor + * will be reported as available for writing. + * + * If the driver uses struct v4l2_fh, then vb2_poll() will also check for any + * pending events. + * + * The return values from this function are intended to be directly returned + * from poll handler in driver. + */ +__poll_t vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait); + +/* + * The following functions are not part of the vb2 core API, but are simple + * helper functions that you can use in your struct v4l2_file_operations, + * struct v4l2_ioctl_ops and struct vb2_ops. They will serialize if vb2_queue->lock + * or video_device->lock is set, and they will set and test the queue owner + * (vb2_queue->owner) to check if the calling filehandle is permitted to do the + * queuing operation. + */ + +/** + * vb2_queue_is_busy() - check if the queue is busy + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @file: file through which the vb2 queue access is performed + * + * The queue is considered busy if it has an owner and the owner is not the + * @file. + * + * Queue ownership is acquired and checked by some of the v4l2_ioctl_ops helpers + * below. Drivers can also use this function directly when they need to + * open-code ioctl handlers, for instance to add additional checks between the + * queue ownership test and the call to the corresponding vb2 operation. + */ +static inline bool vb2_queue_is_busy(struct vb2_queue *q, struct file *file) +{ + return q->owner && q->owner != file->private_data; +} + +/* struct v4l2_ioctl_ops helpers */ + +int vb2_ioctl_reqbufs(struct file *file, void *priv, + struct v4l2_requestbuffers *p); +int vb2_ioctl_create_bufs(struct file *file, void *priv, + struct v4l2_create_buffers *p); +int vb2_ioctl_prepare_buf(struct file *file, void *priv, + struct v4l2_buffer *p); +int vb2_ioctl_querybuf(struct file *file, void *priv, struct v4l2_buffer *p); +int vb2_ioctl_qbuf(struct file *file, void *priv, struct v4l2_buffer *p); +int vb2_ioctl_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p); +int vb2_ioctl_streamon(struct file *file, void *priv, enum v4l2_buf_type i); +int vb2_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type i); +int vb2_ioctl_expbuf(struct file *file, void *priv, + struct v4l2_exportbuffer *p); +int vb2_ioctl_remove_bufs(struct file *file, void *priv, + struct v4l2_remove_buffers *p); + +/* struct v4l2_file_operations helpers */ + +int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma); +int vb2_fop_release(struct file *file); +int _vb2_fop_release(struct file *file, struct mutex *lock); +ssize_t vb2_fop_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos); +ssize_t vb2_fop_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos); +__poll_t vb2_fop_poll(struct file *file, poll_table *wait); +#ifndef CONFIG_MMU +unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags); +#endif + +/** + * vb2_video_unregister_device - unregister the video device and release queue + * + * @vdev: pointer to &struct video_device + * + * If the driver uses vb2_fop_release()/_vb2_fop_release(), then it should use + * vb2_video_unregister_device() instead of video_unregister_device(). + * + * This function will call video_unregister_device() and then release the + * vb2_queue if streaming is in progress. This will stop streaming and + * this will simplify the unbind sequence since after this call all subdevs + * will have stopped streaming as well. + */ +void vb2_video_unregister_device(struct video_device *vdev); + +/** + * vb2_ops_wait_prepare - helper function to lock a struct &vb2_queue + * + * @vq: pointer to &struct vb2_queue + * + * ..note:: only use if vq->lock is non-NULL. + */ +void vb2_ops_wait_prepare(struct vb2_queue *vq); + +/** + * vb2_ops_wait_finish - helper function to unlock a struct &vb2_queue + * + * @vq: pointer to &struct vb2_queue + * + * ..note:: only use if vq->lock is non-NULL. + */ +void vb2_ops_wait_finish(struct vb2_queue *vq); + +struct media_request; +int vb2_request_validate(struct media_request *req); +void vb2_request_queue(struct media_request *req); + +#endif /* _MEDIA_VIDEOBUF2_V4L2_H */ diff --git a/6.18.0/include-overrides/media/videobuf2-vmalloc.h b/6.18.0/include-overrides/media/videobuf2-vmalloc.h new file mode 100644 index 0000000..a63fe66 --- /dev/null +++ b/6.18.0/include-overrides/media/videobuf2-vmalloc.h @@ -0,0 +1,20 @@ +/* + * videobuf2-vmalloc.h - vmalloc memory allocator for videobuf2 + * + * Copyright (C) 2010 Samsung Electronics + * + * Author: Pawel Osciak + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef _MEDIA_VIDEOBUF2_VMALLOC_H +#define _MEDIA_VIDEOBUF2_VMALLOC_H + +#include + +extern const struct vb2_mem_ops vb2_vmalloc_memops; + +#endif diff --git a/6.18.0/include-overrides/media/vsp1.h b/6.18.0/include-overrides/media/vsp1.h new file mode 100644 index 0000000..d9b91ff --- /dev/null +++ b/6.18.0/include-overrides/media/vsp1.h @@ -0,0 +1,213 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * vsp1.h -- R-Car VSP1 API + * + * Copyright (C) 2015 Renesas Electronics Corporation + * + * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) + */ +#ifndef __MEDIA_VSP1_H__ +#define __MEDIA_VSP1_H__ + +#include +#include +#include + +struct device; +struct vsp1_dl_list; + +/* ----------------------------------------------------------------------------- + * VSP1 DU interface + */ + +int vsp1_du_init(struct device *dev); + +#define VSP1_DU_STATUS_COMPLETE BIT(0) +#define VSP1_DU_STATUS_WRITEBACK BIT(1) + +/** + * struct vsp1_du_lif_config - VSP LIF configuration + * @width: output frame width + * @height: output frame height + * @interlaced: true for interlaced pipelines + * @callback: frame completion callback function (optional). When a callback + * is provided, the VSP driver guarantees that it will be called once + * and only once for each vsp1_du_atomic_flush() call. + * @callback_data: data to be passed to the frame completion callback + */ +struct vsp1_du_lif_config { + unsigned int width; + unsigned int height; + bool interlaced; + + void (*callback)(void *data, unsigned int status, u32 crc); + void *callback_data; +}; + +int vsp1_du_setup_lif(struct device *dev, unsigned int pipe_index, + const struct vsp1_du_lif_config *cfg); + +/** + * struct vsp1_du_atomic_config - VSP atomic configuration parameters + * @pixelformat: plane pixel format (V4L2 4CC) + * @pitch: line pitch in bytes for the first plane + * @mem: DMA memory address for each plane of the frame buffer + * @src: source rectangle in the frame buffer (integer coordinates) + * @dst: destination rectangle on the display (integer coordinates) + * @alpha: alpha value (0: fully transparent, 255: fully opaque) + * @zpos: Z position of the plane (from 0 to number of planes minus 1) + * @premult: true for premultiplied alpha + * @color_encoding: color encoding (valid for YUV formats only) + * @color_range: color range (valid for YUV formats only) + */ +struct vsp1_du_atomic_config { + u32 pixelformat; + unsigned int pitch; + dma_addr_t mem[3]; + struct v4l2_rect src; + struct v4l2_rect dst; + unsigned int alpha; + unsigned int zpos; + bool premult; + enum v4l2_ycbcr_encoding color_encoding; + enum v4l2_quantization color_range; +}; + +/** + * enum vsp1_du_crc_source - Source used for CRC calculation + * @VSP1_DU_CRC_NONE: CRC calculation disabled + * @VSP1_DU_CRC_PLANE: Perform CRC calculation on an input plane + * @VSP1_DU_CRC_OUTPUT: Perform CRC calculation on the composed output + */ +enum vsp1_du_crc_source { + VSP1_DU_CRC_NONE, + VSP1_DU_CRC_PLANE, + VSP1_DU_CRC_OUTPUT, +}; + +/** + * struct vsp1_du_crc_config - VSP CRC computation configuration parameters + * @source: source for CRC calculation + * @index: index of the CRC source plane (when source is set to plane) + */ +struct vsp1_du_crc_config { + enum vsp1_du_crc_source source; + unsigned int index; +}; + +/** + * struct vsp1_du_writeback_config - VSP writeback configuration parameters + * @pixelformat: plane pixel format (V4L2 4CC) + * @pitch: line pitch in bytes for the first plane + * @mem: DMA memory address for each plane of the frame buffer + */ +struct vsp1_du_writeback_config { + u32 pixelformat; + unsigned int pitch; + dma_addr_t mem[3]; +}; + +/** + * struct vsp1_du_atomic_pipe_config - VSP atomic pipe configuration parameters + * @crc: CRC computation configuration + * @writeback: writeback configuration + */ +struct vsp1_du_atomic_pipe_config { + struct vsp1_du_crc_config crc; + struct vsp1_du_writeback_config writeback; +}; + +void vsp1_du_atomic_begin(struct device *dev, unsigned int pipe_index); +int vsp1_du_atomic_update(struct device *dev, unsigned int pipe_index, + unsigned int rpf, + const struct vsp1_du_atomic_config *cfg); +void vsp1_du_atomic_flush(struct device *dev, unsigned int pipe_index, + const struct vsp1_du_atomic_pipe_config *cfg); +int vsp1_du_map_sg(struct device *dev, struct sg_table *sgt); +void vsp1_du_unmap_sg(struct device *dev, struct sg_table *sgt); + +/* ----------------------------------------------------------------------------- + * VSP1 ISP interface + */ + +/** + * struct vsp1_isp_buffer_desc - Describe a buffer allocated by VSPX + * @size: Byte size of the buffer allocated by VSPX + * @cpu_addr: CPU-mapped address of a buffer allocated by VSPX + * @dma_addr: bus address of a buffer allocated by VSPX + */ +struct vsp1_isp_buffer_desc { + size_t size; + void *cpu_addr; + dma_addr_t dma_addr; +}; + +/** + * struct vsp1_isp_job_desc - Describe a VSPX buffer transfer request + * @config: ConfigDMA buffer descriptor + * @config.pairs: number of reg-value pairs in the ConfigDMA buffer + * @config.mem: bus address of the ConfigDMA buffer + * @img: RAW image buffer descriptor + * @img.fmt: RAW image format + * @img.mem: bus address of the RAW image buffer + * @dl: pointer to the display list populated by the VSPX driver in the + * vsp1_isp_job_prepare() function + * + * Describe a transfer request for the VSPX to perform on behalf of the ISP. + * The job descriptor contains an optional ConfigDMA buffer and one RAW image + * buffer. Set config.pairs to 0 if no ConfigDMA buffer should be transferred. + * The minimum number of config.pairs that can be written using ConfigDMA is 17. + * A number of pairs < 16 corrupts the output image. A number of pairs == 16 + * freezes the VSPX operation. If the ISP driver has to write less than 17 pairs + * it shall pad the buffer with writes directed to registers that have no effect + * or avoid using ConfigDMA at all for such small write sequences. + * + * The ISP driver shall pass an instance this type to the vsp1_isp_job_prepare() + * function that will populate the display list pointer @dl using the @config + * and @img descriptors. When the job has to be run on the VSPX, the descriptor + * shall be passed to vsp1_isp_job_run() which consumes the display list. + * + * Job descriptors not yet run shall be released with a call to + * vsp1_isp_job_release() when stopping the streaming in order to properly + * release the resources acquired by vsp1_isp_job_prepare(). + */ +struct vsp1_isp_job_desc { + struct { + unsigned int pairs; + dma_addr_t mem; + } config; + struct { + struct v4l2_pix_format_mplane fmt; + dma_addr_t mem; + } img; + struct vsp1_dl_list *dl; +}; + +/** + * struct vsp1_vspx_frame_end - VSPX frame end callback data + * @vspx_frame_end: Frame end callback. Called after a transfer job has been + * completed. If the job includes both a ConfigDMA and a + * RAW image, the callback is called after both have been + * transferred + * @frame_end_data: Frame end callback data, passed to vspx_frame_end + */ +struct vsp1_vspx_frame_end { + void (*vspx_frame_end)(void *data); + void *frame_end_data; +}; + +int vsp1_isp_init(struct device *dev); +struct device *vsp1_isp_get_bus_master(struct device *dev); +int vsp1_isp_alloc_buffer(struct device *dev, size_t size, + struct vsp1_isp_buffer_desc *buffer_desc); +void vsp1_isp_free_buffer(struct device *dev, + struct vsp1_isp_buffer_desc *buffer_desc); +int vsp1_isp_start_streaming(struct device *dev, + struct vsp1_vspx_frame_end *frame_end); +void vsp1_isp_stop_streaming(struct device *dev); +int vsp1_isp_job_prepare(struct device *dev, + struct vsp1_isp_job_desc *job); +int vsp1_isp_job_run(struct device *dev, struct vsp1_isp_job_desc *job); +void vsp1_isp_job_release(struct device *dev, struct vsp1_isp_job_desc *job); + +#endif /* __MEDIA_VSP1_H__ */ diff --git a/dkms.conf b/dkms.conf index e0101f7..c203227 100644 --- a/dkms.conf +++ b/dkms.conf @@ -1,6 +1,6 @@ PACKAGE_NAME="intel-mipi-gmsl-dkms" PACKAGE_VERSION="#MODULE_VERSION#" -BUILD_EXCLUSIVE_KERNEL="^(6\.(1[2])\.)" +BUILD_EXCLUSIVE_KERNEL="^(6\.(1[278])\.)" VERSION_SUFFIX="${PACKAGE_VERSION}" KBASE=${dkms_tree}/${PACKAGE_NAME}/${PACKAGE_VERSION}/build @@ -24,7 +24,7 @@ MAKE="make V=1 KERNELRELEASE=$kernelver DRIVER_VERSION_SUFFIX='${VERSION_SUFFIX} make V=1 -C ${kernel_source_dir} M=${KBASE}/${SELECTED_KERNEL}/drivers/media/pci/intel/ipu6 DRIVER_VERSION_SUFFIX='${VERSION_SUFFIX}' && \ mkdir -p ${KBASE}/drivers/media/pci/intel/ipu6/ && cp ${KBASE}/${SELECTED_KERNEL}/drivers/media/pci/intel/ipu6/intel-ipu6.ko ${KBASE}/${SELECTED_KERNEL}/drivers/media/pci/intel/ipu6/intel-ipu6-isys.ko ${KBASE}/${SELECTED_KERNEL}/drivers/media/pci/intel/ipu6/psys/intel-ipu6-psys.ko ${KBASE}/drivers/media/pci/intel/ipu6/" -MAKE_MATCH[1]="^(6.1[2])" +MAKE_MATCH[1]="^(6.1[27])" CLEAN="make V=1 KERNELRELEASE=$kernelver KERNEL_SRC=$kernel_source_dir clean && \ make -C ${kernel_source_dir} M=${KBASE}/${SELECTED_KERNEL}/drivers/media/v4l2-core clean clean && \ @@ -89,6 +89,71 @@ PATCH[25]="0026-ipu6-dkms-align-ipu7-acpi-pdata-device-parser-on-6.1.patch" PATCH_MATCH[24]="^(6.1[2])" PATCH_MATCH[25]="^(6.1[2])" + +j=25 + +# Linux non-upstreamed patches for Linux 6.17 to 6.18 +PATCH[$((++j))]="0050-media-ipu6-isys-Use-v4l2_ctrl_subdev_subscribe_event.patch" +PATCH_MATCH[$j]="^(6.1[78])" +PATCH[$((++j))]="0051-media-ipu6-isys-Don-t-set-V4L2_FL_USES_V4L2_FH-manua.patch" +PATCH_MATCH[$j]="^(6.1[78])" +PATCH[$((++j))]="0052-media-ipu6-isys-Set-embedded-data-type-correctly-for.patch" +PATCH_MATCH[$j]="^(6.1[78])" +PATCH[$((++j))]="0053-upstream-Use-module-parameter-to-set-isys-freq.patch" +PATCH_MATCH[$j]="^(6.1[78])" +PATCH[$((++j))]="0054-media-pci-Enable-ISYS-reset.patch" +PATCH_MATCH[$j]="^(6.1[78])" +PATCH[$((++j))]="0055-media-intel-ipu6-use-vc1-dma-for-MTL-and-ARL.patch" +PATCH_MATCH[$j]="^(6.1[78])" +PATCH[$((++j))]="0056-media-ipu-Dma-sync-at-buffer_prepare-callback-as-DMA.patch" +PATCH_MATCH[$j]="^(6.1[78])" +PATCH[$((++j))]="0057-media-intel-ipu6-Support-IPU6-ISYS-FW-trace-dump-for.patch" +PATCH_MATCH[$j]="^(6.1[78])" +PATCH[$((++j))]="0058-media-pci-The-order-of-return-buffers-should-be-FIFO.patch" +PATCH_MATCH[$j]="^(6.1[78])" +PATCH[$((++j))]="0059-media-pci-Modify-enble-disable-stream-in-CSI2.patch" +PATCH_MATCH[$j]="^(6.1[78])" +PATCH[$((++j))]="0060-media-pci-Set-the-correct-SOF-for-different-stream.patch" +PATCH_MATCH[$j]="^(6.1[78])" +PATCH[$((++j))]="0061-media-pci-support-imx390-for-6.11.0-rc3.patch" +PATCH_MATCH[$j]="^(6.1[78])" +PATCH[$((++j))]="0062-i2c-media-fix-cov-issue.patch" +PATCH_MATCH[$j]="^(6.1[78])" +PATCH[$((++j))]="0063-media-add-ipu-acpi-module-to-linux-drivers.patch" +PATCH_MATCH[$j]="^(6.1[78])" +PATCH[$((++j))]="0064-media-pci-unregister-i2c-device-to-complete-ext_subd.patch" +PATCH_MATCH[$j]="^(6.1[78])" +PATCH[$((++j))]="0065-media-pci-add-missing-if-for-PDATA.patch" +PATCH_MATCH[$j]="^(6.1[78])" +PATCH[$((++j))]="0066-media-pci-refine-PDATA-related-config.patch" +PATCH_MATCH[$j]="^(6.1[78])" +PATCH[$((++j))]="0067-kernel-align-ACPI-PDATA-and-ACPI-fwnode-build-for-EC.patch" +PATCH_MATCH[$j]="^(6.1[78])" +PATCH[$((++j))]="0068-upstream-Use-module-parameter-to-set-psys-freq.patch" +PATCH_MATCH[$j]="^(6.1[78])" +PATCH[$((++j))]="0069-media-pci-add-IPU6-PSYS-driver.patch" +PATCH_MATCH[$j]="^(6.1[78])" +PATCH[$((++j))]="0070-media-intel-ipu6-support-IPU6-PSYS-FW-trace-dump-for.patch" +PATCH_MATCH[$j]="^(6.1[78])" +PATCH[$((++j))]="0071-Revert-media-intel-ipu6-Constify-ipu6_buttress_ctrl-.patch" +PATCH_MATCH[$j]="^(6.1[78])" +# Make IPU V4L2-CORE out-of-tree +PATCH[$((++j))]="0072-ipu7-dkms-v4l2-core-makefile-adaptation-6.17.patch" +PATCH_MATCH[$j]="^(6.1[78])" +PATCH[$((++j))]="0073-drivers-media-set-v4l2_subdev_enable_streams_api-tru.patch" +PATCH_MATCH[$j]="^(6.1[78])" +PATCH[$((++j))]="0074-ipu7-dkms-v4l2-core-makefile-adaptation-6.18.patch" +PATCH_MATCH[$j]="^(6.1[78])" +PATCH[$((++j))]="0075-drivers-media-set-v4l2_subdev_enable_streams_api-tru.patch" +PATCH_MATCH[$j]="^(6.1[78])" +PATCH[$((++j))]="0076-Revert-media-v4l2-subdev-Make-struct-v4l2_subdev_str.patch" +PATCH_MATCH[$j]="^(6.1[78])" +# Make IPU ISYS+PSYS out-of-tree +PATCH[$((++j))]="0077-ipu6-dkms-add-isys-makefile-adaptation-6.17.patch" +PATCH_MATCH[$j]="^(6.1[78])" +PATCH[$((++j))]="0078-ipu6-dkms-align-ipu7-acpi-pdata-device-parser-on-6.1.patch" +PATCH_MATCH[$j]="^(6.1[78])" + i=0 BUILT_MODULE_NAME[$i]="videodev" diff --git a/include/uapi/linux/ipu-psys.h b/include/uapi/linux/ipu-psys.h new file mode 100644 index 0000000..30538e7 --- /dev/null +++ b/include/uapi/linux/ipu-psys.h @@ -0,0 +1,121 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* Copyright (C) 2013 - 2020 Intel Corporation */ + +#ifndef _UAPI_IPU_PSYS_H +#define _UAPI_IPU_PSYS_H + +#ifdef __KERNEL__ +#include +#else +#include +#endif + +struct ipu_psys_capability { + uint32_t version; + uint8_t driver[20]; + uint32_t pg_count; + uint8_t dev_model[32]; + uint32_t reserved[17]; +} __attribute__ ((packed)); + +struct ipu_psys_event { + uint32_t type; /* IPU_PSYS_EVENT_TYPE_ */ + uint64_t user_token; + uint64_t issue_id; + uint32_t buffer_idx; + uint32_t error; + int32_t reserved[2]; +} __attribute__ ((packed)); + +#define IPU_PSYS_EVENT_TYPE_CMD_COMPLETE 1 +#define IPU_PSYS_EVENT_TYPE_BUFFER_COMPLETE 2 + +/** + * struct ipu_psys_buffer - for input/output terminals + * @len: total allocated size @ base address + * @userptr: user pointer + * @fd: DMA-BUF handle + * @data_offset:offset to valid data + * @bytes_used: amount of valid data including offset + * @flags: flags + */ +struct ipu_psys_buffer { + uint64_t len; + union { + int fd; + void __user *userptr; + uint64_t reserved; + } base; + uint32_t data_offset; + uint32_t bytes_used; + uint32_t flags; + uint32_t reserved[2]; +} __attribute__ ((packed)); + +#define IPU_BUFFER_FLAG_INPUT (1 << 0) +#define IPU_BUFFER_FLAG_OUTPUT (1 << 1) +#define IPU_BUFFER_FLAG_MAPPED (1 << 2) +#define IPU_BUFFER_FLAG_NO_FLUSH (1 << 3) +#define IPU_BUFFER_FLAG_DMA_HANDLE (1 << 4) +#define IPU_BUFFER_FLAG_USERPTR (1 << 5) + +#define IPU_PSYS_CMD_PRIORITY_HIGH 0 +#define IPU_PSYS_CMD_PRIORITY_MED 1 +#define IPU_PSYS_CMD_PRIORITY_LOW 2 +#define IPU_PSYS_CMD_PRIORITY_NUM 3 + +/** + * struct ipu_psys_command - processing command + * @issue_id: unique id for the command set by user + * @user_token: token of the command + * @priority: priority of the command + * @pg_manifest: userspace pointer to program group manifest + * @buffers: userspace pointers to array of psys dma buf structs + * @pg: process group DMA-BUF handle + * @pg_manifest_size: size of program group manifest + * @bufcount: number of buffers in buffers array + * @min_psys_freq: minimum psys frequency in MHz used for this cmd + * @frame_counter: counter of current frame synced between isys and psys + * @kernel_enable_bitmap: enable bits for each individual kernel + * @terminal_enable_bitmap: enable bits for each individual terminals + * @routing_enable_bitmap: enable bits for each individual routing + * @rbm: enable bits for routing + * + * Specifies a processing command with input and output buffers. + */ +struct ipu_psys_command { + uint64_t issue_id; + uint64_t user_token; + uint32_t priority; + void __user *pg_manifest; + struct ipu_psys_buffer __user *buffers; + int pg; + uint32_t pg_manifest_size; + uint32_t bufcount; + uint32_t min_psys_freq; + uint32_t frame_counter; + uint32_t kernel_enable_bitmap[4]; + uint32_t terminal_enable_bitmap[4]; + uint32_t routing_enable_bitmap[4]; + uint32_t rbm[5]; + uint32_t reserved[2]; +} __attribute__ ((packed)); + +struct ipu_psys_manifest { + uint32_t index; + uint32_t size; + void __user *manifest; + uint32_t reserved[5]; +} __attribute__ ((packed)); + +#define IPU_IOC_QUERYCAP _IOR('A', 1, struct ipu_psys_capability) +#define IPU_IOC_MAPBUF _IOWR('A', 2, int) +#define IPU_IOC_UNMAPBUF _IOWR('A', 3, int) +#define IPU_IOC_GETBUF _IOWR('A', 4, struct ipu_psys_buffer) +#define IPU_IOC_PUTBUF _IOWR('A', 5, struct ipu_psys_buffer) +#define IPU_IOC_QCMD _IOWR('A', 6, struct ipu_psys_command) +#define IPU_IOC_DQEVENT _IOWR('A', 7, struct ipu_psys_event) +#define IPU_IOC_CMD_CANCEL _IOWR('A', 8, struct ipu_psys_command) +#define IPU_IOC_GET_MANIFEST _IOWR('A', 9, struct ipu_psys_manifest) + +#endif /* _UAPI_IPU_PSYS_H */ diff --git a/patches/0050-media-ipu6-isys-Use-v4l2_ctrl_subdev_subscribe_event.patch b/patches/0050-media-ipu6-isys-Use-v4l2_ctrl_subdev_subscribe_event.patch new file mode 100644 index 0000000..c6db101 --- /dev/null +++ b/patches/0050-media-ipu6-isys-Use-v4l2_ctrl_subdev_subscribe_event.patch @@ -0,0 +1,34 @@ +From 10b4d928dfd933ad2fac935a52b638cfe07835f6 Mon Sep 17 00:00:00 2001 +From: Laurent Pinchart +Date: Fri, 16 Jan 2026 00:07:52 -0700 +Subject: [PATCH 50/76] media: ipu6: isys: Use + v4l2_ctrl_subdev_subscribe_event() + +The ipu6-isys driver uses v4l2_ctrl_subscribe_event() to handle control +event subscription on a subdev. While this works, it is the wrong API. +Use the subdev-specific v4l2_ctrl_subdev_subscribe_event() helper +instead. + +Signed-off-by: Laurent Pinchart +Signed-off-by: Sakari Ailus +Signed-off-by: Hans Verkuil +--- + 6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c +index 6030bd2..d1fece6 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c +@@ -109,7 +109,7 @@ static int csi2_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh, + case V4L2_EVENT_FRAME_SYNC: + return v4l2_event_subscribe(fh, sub, 10, NULL); + case V4L2_EVENT_CTRL: +- return v4l2_ctrl_subscribe_event(fh, sub); ++ return v4l2_ctrl_subdev_subscribe_event(sd, fh, sub); + default: + return -EINVAL; + } +-- +2.43.0 + diff --git a/patches/0051-media-ipu6-isys-Don-t-set-V4L2_FL_USES_V4L2_FH-manua.patch b/patches/0051-media-ipu6-isys-Don-t-set-V4L2_FL_USES_V4L2_FH-manua.patch new file mode 100644 index 0000000..f9d7fb3 --- /dev/null +++ b/patches/0051-media-ipu6-isys-Don-t-set-V4L2_FL_USES_V4L2_FH-manua.patch @@ -0,0 +1,32 @@ +From 9bbab9bf77365736c8040d70eceb1453bd24b1fc Mon Sep 17 00:00:00 2001 +From: Laurent Pinchart +Date: Fri, 16 Jan 2026 00:05:37 -0700 +Subject: [PATCH 51/76] media: ipu6: isys: Don't set V4L2_FL_USES_V4L2_FH + manually + +The V4L2_FL_USES_V4L2_FH flag is set by v4l2_fh_init(). It is not meant +to be set manually by drivers. Drop it from the ipu6-isys driver. + +Signed-off-by: Laurent Pinchart +Reviewed-by: Bingbu Cao +Reviewed-by: Sakari Ailus +Signed-off-by: Hans Verkuil +--- + 6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c | 1 - + 1 file changed, 1 deletion(-) + +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c +index 24a2ef9..f3f3bc0 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c +@@ -1306,7 +1306,6 @@ int ipu6_isys_video_init(struct ipu6_isys_video *av) + __ipu6_isys_vidioc_try_fmt_meta_cap(av, &format_meta); + av->meta_fmt = format_meta.fmt.meta; + +- set_bit(V4L2_FL_USES_V4L2_FH, &av->vdev.flags); + video_set_drvdata(&av->vdev, av); + + ret = video_register_device(&av->vdev, VFL_TYPE_VIDEO, -1); +-- +2.43.0 + diff --git a/patches/0052-media-ipu6-isys-Set-embedded-data-type-correctly-for.patch b/patches/0052-media-ipu6-isys-Set-embedded-data-type-correctly-for.patch new file mode 100644 index 0000000..5c77e96 --- /dev/null +++ b/patches/0052-media-ipu6-isys-Set-embedded-data-type-correctly-for.patch @@ -0,0 +1,35 @@ +From 284432c3b2dcc1e637446b41c5cafb49c49ccf99 Mon Sep 17 00:00:00 2001 +From: Sakari Ailus +Date: Fri, 16 Jan 2026 00:09:51 -0700 +Subject: [PATCH 52/76] media: ipu6: isys: Set embedded data type correctly for + metadata formats + +The IPU6 ISYS driver supported metadata formats but was missing correct +embedded data type in the receiver configuration. Add it now. + +Signed-off-by: Sakari Ailus +Signed-off-by: Hans Verkuil +--- + 6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c +index 0a06de5..463a0ad 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c +@@ -81,6 +81,12 @@ unsigned int ipu6_isys_mbus_code_to_mipi(u32 code) + case MEDIA_BUS_FMT_SGRBG8_1X8: + case MEDIA_BUS_FMT_SRGGB8_1X8: + return MIPI_CSI2_DT_RAW8; ++ case MEDIA_BUS_FMT_META_8: ++ case MEDIA_BUS_FMT_META_10: ++ case MEDIA_BUS_FMT_META_12: ++ case MEDIA_BUS_FMT_META_16: ++ case MEDIA_BUS_FMT_META_24: ++ return MIPI_CSI2_DT_EMBEDDED_8B; + default: + /* return unavailable MIPI data type - 0x3f */ + WARN_ON(1); +-- +2.43.0 + diff --git a/patches/0053-upstream-Use-module-parameter-to-set-isys-freq.patch b/patches/0053-upstream-Use-module-parameter-to-set-isys-freq.patch new file mode 100644 index 0000000..ac53fe6 --- /dev/null +++ b/patches/0053-upstream-Use-module-parameter-to-set-isys-freq.patch @@ -0,0 +1,45 @@ +From ed2570ea754b2a12c4d771be5bc352d0d17cf21e Mon Sep 17 00:00:00 2001 +From: Dongcheng Yan +Date: Fri, 16 Jan 2026 00:36:33 -0700 +Subject: [PATCH 53/76] upstream: Use module parameter to set isys freq + +Signed-off-by: Hongju Wang +Signed-off-by: Dongcheng Yan +Signed-off-by: zouxiaoh +--- + 6.18.0/drivers/media/pci/intel/ipu6/ipu6.c | 12 ++++++++++++ + 1 file changed, 12 insertions(+) + +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c +index 1f4f20b..0311fe7 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c +@@ -33,6 +33,10 @@ + #include "ipu6-platform-isys-csi2-reg.h" + #include "ipu6-platform-regs.h" + ++static unsigned int isys_freq_override; ++module_param(isys_freq_override, uint, 0660); ++MODULE_PARM_DESC(isys_freq_override, "Override ISYS freq(mhz)"); ++ + #define IPU6_PCI_BAR 0 + + struct ipu6_cell_program { +@@ -388,6 +392,14 @@ ipu6_isys_init(struct pci_dev *pdev, struct device *parent, + pdata->base = base; + pdata->ipdata = ipdata; + ++ /* Override the isys freq */ ++ if (isys_freq_override >= BUTTRESS_MIN_FORCE_IS_FREQ && ++ isys_freq_override <= BUTTRESS_MAX_FORCE_IS_FREQ) { ++ ctrl->ratio = isys_freq_override / BUTTRESS_IS_FREQ_STEP; ++ dev_dbg(&pdev->dev, "Override the isys freq:%u(mhz)\n", ++ isys_freq_override); ++ } ++ + isys_adev = ipu6_bus_initialize_device(pdev, parent, pdata, ctrl, + IPU6_ISYS_NAME); + if (IS_ERR(isys_adev)) { +-- +2.43.0 + diff --git a/patches/0054-media-pci-Enable-ISYS-reset.patch b/patches/0054-media-pci-Enable-ISYS-reset.patch new file mode 100644 index 0000000..57cb8f4 --- /dev/null +++ b/patches/0054-media-pci-Enable-ISYS-reset.patch @@ -0,0 +1,626 @@ +From 3dff0732eab9d5fb91567c0f58b1807d4c9ba3b5 Mon Sep 17 00:00:00 2001 +From: zouxiaoh +Date: Fri, 16 Jan 2026 00:37:54 -0700 +Subject: [PATCH 54/76] media: pci: Enable ISYS reset + +--- + 6.18.0/drivers/media/pci/intel/ipu6/Kconfig | 9 + + .../media/pci/intel/ipu6/ipu6-fw-isys.h | 3 + + .../media/pci/intel/ipu6/ipu6-isys-queue.c | 251 ++++++++++++++++++ + .../media/pci/intel/ipu6/ipu6-isys-queue.h | 3 + + .../media/pci/intel/ipu6/ipu6-isys-video.c | 82 +++++- + .../media/pci/intel/ipu6/ipu6-isys-video.h | 8 + + .../drivers/media/pci/intel/ipu6/ipu6-isys.c | 10 + + .../drivers/media/pci/intel/ipu6/ipu6-isys.h | 5 + + 8 files changed, 370 insertions(+), 1 deletion(-) + +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/Kconfig b/6.18.0/drivers/media/pci/intel/ipu6/Kconfig +index 1129e2b..fca2d78 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/Kconfig ++++ b/6.18.0/drivers/media/pci/intel/ipu6/Kconfig +@@ -16,3 +16,12 @@ config VIDEO_INTEL_IPU6 + + To compile this driver, say Y here! It contains 2 modules - + intel_ipu6 and intel_ipu6_isys. ++ ++config VIDEO_INTEL_IPU6_ISYS_RESET ++ depends on VIDEO_INTEL_IPU6 ++ bool "Enable isys reset" ++ default n ++ help ++ Support ISYS reset in IPU6. ++ ++ To compile this driver, say Y here! +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h +index b60f020..e6ee161 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h +@@ -66,6 +66,9 @@ struct ipu6_isys; + #define IPU6_ISYS_OPEN_RETRY 2000 + #define IPU6_ISYS_CLOSE_RETRY 2000 + #define IPU6_FW_CALL_TIMEOUT_JIFFIES msecs_to_jiffies(2000) ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++#define IPU6_LIB_CALL_TIMEOUT_JIFFIES_RESET msecs_to_jiffies(200) ++#endif + + enum ipu6_fw_isys_resp_type { + IPU6_FW_ISYS_RESP_TYPE_STREAM_OPEN_DONE = 0, +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c +index aa2cf72..cfeea15 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c +@@ -10,6 +10,9 @@ + #include + #include + #include ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++#include ++#endif + + #include + #include +@@ -21,6 +24,9 @@ + #include "ipu6-fw-isys.h" + #include "ipu6-isys.h" + #include "ipu6-isys-video.h" ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++#include "ipu6-cpd.h" ++#endif + + static int ipu6_isys_buf_init(struct vb2_buffer *vb) + { +@@ -218,6 +224,16 @@ static int buffer_list_get(struct ipu6_isys_stream *stream, + ib = list_last_entry(&aq->incoming, + struct ipu6_isys_buffer, head); + ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++ struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq); ++ ++ if (av->skipframe) { ++ atomic_set(&ib->skipframe_flag, 1); ++ av->skipframe--; ++ } else { ++ atomic_set(&ib->skipframe_flag, 0); ++ } ++#endif + dev_dbg(dev, "buffer: %s: buffer %u\n", + ipu6_isys_queue_to_video(aq)->vdev.name, + ipu6_isys_buffer_to_vb2_buffer(ib)->index); +@@ -372,6 +388,19 @@ static void buf_queue(struct vb2_buffer *vb) + list_add(&ib->head, &aq->incoming); + spin_unlock_irqrestore(&aq->lock, flags); + ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++ mutex_lock(&av->isys->reset_mutex); ++ while (av->isys->in_reset) { ++ mutex_unlock(&av->isys->reset_mutex); ++ dev_dbg(dev, "buffer: %s: wait for reset\n", av->vdev.name); ++ usleep_range(10000, 11000); ++ mutex_lock(&av->isys->reset_mutex); ++ } ++ mutex_unlock(&av->isys->reset_mutex); ++ /* ip may be cleared in ipu reset */ ++ stream = av->stream; ++ ++#endif + if (!media_pipe || !vb->vb2_queue->start_streaming_called) { + dev_dbg(dev, "media pipeline is not ready for %s\n", + av->vdev.name); +@@ -603,6 +632,9 @@ static int start_streaming(struct vb2_queue *q, unsigned int count) + + out: + mutex_unlock(&stream->mutex); ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++ av->start_streaming = 1; ++#endif + + return 0; + +@@ -624,12 +656,210 @@ out_return_buffers: + return ret; + } + ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++static void reset_stop_streaming(struct ipu6_isys_video *av) ++{ ++ struct ipu6_isys_queue *aq = &av->aq; ++ struct ipu6_isys_stream *stream = av->stream; ++ ++ mutex_lock(&stream->mutex); ++ ++ ipu6_isys_update_stream_watermark(av, false); ++ ++ mutex_lock(&av->isys->stream_mutex); ++ if (stream->nr_streaming == stream->nr_queues && stream->streaming) ++ ipu6_isys_video_set_streaming(av, 0, NULL); ++ mutex_unlock(&av->isys->stream_mutex); ++ ++ stream->nr_streaming--; ++ list_del(&aq->node); ++ stream->streaming = 0; ++ mutex_unlock(&stream->mutex); ++ ++ ipu6_isys_stream_cleanup(av); ++ ++ return_buffers(aq, VB2_BUF_STATE_ERROR); ++ ++ ipu6_isys_fw_close(av->isys); ++} ++ ++static int reset_start_streaming(struct ipu6_isys_video *av) ++{ ++ struct ipu6_isys_queue *aq = &av->aq; ++ struct device *dev = &av->isys->adev->auxdev.dev; ++ unsigned long flags; ++ int ret; ++ ++ dev_dbg(dev, "%s: start streaming\n", av->vdev.name); ++ ++ spin_lock_irqsave(&aq->lock, flags); ++ while (!list_empty(&aq->active)) { ++ struct ipu6_isys_buffer *ib = list_first_entry(&aq->active, ++ struct ipu6_isys_buffer, head); ++ ++ list_del(&ib->head); ++ list_add_tail(&ib->head, &aq->incoming); ++ } ++ spin_unlock_irqrestore(&aq->lock, flags); ++ ++ av->skipframe = 1; ++ ret = start_streaming(&aq->vbq, 0); ++ if (ret) { ++ dev_dbg(dev, ++ "%s: start streaming failed in reset ! set av->start_streaming = 0.\n", ++ av->vdev.name); ++ av->start_streaming = 0; ++ } else ++ av->start_streaming = 1; ++ ++ return ret; ++} ++ ++static int ipu_isys_reset(struct ipu6_isys_video *self_av, ++ struct ipu6_isys_stream *self_stream) ++{ ++ struct ipu6_isys *isys = self_av->isys; ++ struct ipu6_bus_device *adev = isys->adev; ++ struct ipu6_device *isp = adev->isp; ++ struct ipu6_isys_video *av = NULL; ++ struct ipu6_isys_stream *stream = NULL; ++ struct device *dev = &adev->auxdev.dev; ++ int ret, i, j; ++ int has_streaming = 0; ++ const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = ++ &isys->pdata->ipdata->csi2; ++ ++ ++ mutex_lock(&isys->reset_mutex); ++ if (isys->in_reset) { ++ mutex_unlock(&isys->reset_mutex); ++ return 0; ++ } ++ isys->in_reset = true; ++ ++ while (isys->in_stop_streaming) { ++ dev_dbg(dev, "isys reset: %s: wait for stop\n", ++ self_av->vdev.name); ++ mutex_unlock(&isys->reset_mutex); ++ usleep_range(10000, 11000); ++ mutex_lock(&isys->reset_mutex); ++ } ++ ++ mutex_unlock(&isys->reset_mutex); ++ ++ for (i = 0; i < csi2_pdata->nports; i++) { ++ for (j = 0; j < NR_OF_CSI2_SRC_PADS; j++) { ++ av = &isys->csi2[i].av[j]; ++ if (av == self_av) ++ continue; ++ ++ stream = av->stream; ++ if (!stream || stream == self_stream) ++ continue; ++ ++ if (!stream->streaming && !stream->nr_streaming) ++ continue; ++ ++ av->reset = true; ++ has_streaming = true; ++ reset_stop_streaming(av); ++ } ++ } ++ ++ if (!has_streaming) ++ goto end_of_reset; ++ ++ dev_dbg(dev, "ipu reset, power cycle\n"); ++ /* bus_pm_runtime_suspend() */ ++ /* isys_runtime_pm_suspend() */ ++ dev->bus->pm->runtime_suspend(dev); ++ ++ /* ipu_suspend */ ++ isp->pdev->driver->driver.pm->runtime_suspend(&isp->pdev->dev); ++ ++ /* ipu_runtime_resume */ ++ isp->pdev->driver->driver.pm->runtime_resume(&isp->pdev->dev); ++ ++ /* bus_pm_runtime_resume() */ ++ /* isys_runtime_pm_resume() */ ++ dev->bus->pm->runtime_resume(dev); ++ ++ ipu6_cleanup_fw_msg_bufs(isys); ++ ++ if (isys->fwcom) { ++ dev_err(dev, "Clearing old context\n"); ++ ipu6_fw_isys_cleanup(isys); ++ } ++ ++ ret = ipu6_fw_isys_init(av->isys, ++ isys->pdata->ipdata->num_parallel_streams); ++ if (ret < 0) ++ dev_err(dev, "ipu fw isys init failed\n"); ++ ++ dev_dbg(dev, "restart streams\n"); ++ ++ for (j = 0; j < csi2_pdata->nports; j++) { ++ for (i = 0; i < NR_OF_CSI2_SRC_PADS; i++) { ++ av = &isys->csi2[j].av[i]; ++ if (!av->reset) ++ continue; ++ ++ av->reset = false; ++ reset_start_streaming(av); ++ } ++ } ++ ++end_of_reset: ++ mutex_lock(&isys->reset_mutex); ++ isys->in_reset = false; ++ mutex_unlock(&isys->reset_mutex); ++ dev_dbg(dev, "reset done\n"); ++ ++ return 0; ++} ++ ++#endif + static void stop_streaming(struct vb2_queue *q) + { + struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(q); + struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq); + struct ipu6_isys_stream *stream = av->stream; + ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++ struct device *dev = &av->isys->adev->auxdev.dev; ++ ++ dev_dbg(dev, "stop: %s: enter\n", av->vdev.name); ++ ++ mutex_lock(&av->isys->reset_mutex); ++ while (av->isys->in_reset || av->isys->in_stop_streaming) { ++ mutex_unlock(&av->isys->reset_mutex); ++ dev_dbg(dev, "stop: %s: wait for in_reset = %d\n", ++ av->vdev.name, av->isys->in_reset); ++ dev_dbg(dev, "stop: %s: wait for in_stop = %d\n", ++ av->vdev.name, av->isys->in_stop_streaming); ++ usleep_range(10000, 11000); ++ mutex_lock(&av->isys->reset_mutex); ++ } ++ ++ if (!av->start_streaming) { ++ mutex_unlock(&av->isys->reset_mutex); ++ return; ++ } ++ ++ av->isys->in_stop_streaming = true; ++ mutex_unlock(&av->isys->reset_mutex); ++ ++ stream = av->stream; ++ if (!stream) { ++ dev_err(dev, "stop: %s: ip cleard!\n", av->vdev.name); ++ return_buffers(aq, VB2_BUF_STATE_ERROR); ++ mutex_lock(&av->isys->reset_mutex); ++ av->isys->in_stop_streaming = false; ++ mutex_unlock(&av->isys->reset_mutex); ++ return; ++ } ++#endif ++ + mutex_lock(&stream->mutex); + + ipu6_isys_update_stream_watermark(av, false); +@@ -649,6 +879,22 @@ static void stop_streaming(struct vb2_queue *q) + return_buffers(aq, VB2_BUF_STATE_ERROR); + + ipu6_isys_fw_close(av->isys); ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++ ++ av->start_streaming = 0; ++ mutex_lock(&av->isys->reset_mutex); ++ av->isys->in_stop_streaming = false; ++ mutex_unlock(&av->isys->reset_mutex); ++ ++ if (av->isys->need_reset) { ++ if (!stream->nr_streaming) ++ ipu_isys_reset(av, stream); ++ else ++ av->isys->need_reset = 0; ++ } ++ ++ dev_dbg(dev, "stop: %s: exit\n", av->vdev.name); ++#endif + } + + static unsigned int +@@ -729,6 +975,11 @@ static void ipu6_isys_queue_buf_done(struct ipu6_isys_buffer *ib) + * to the userspace when it is de-queued + */ + atomic_set(&ib->str2mmio_flag, 0); ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++ } else if (atomic_read(&ib->skipframe_flag)) { ++ vb2_buffer_done(vb, VB2_BUF_STATE_ERROR); ++ atomic_set(&ib->skipframe_flag, 0); ++#endif + } else { + vb2_buffer_done(vb, VB2_BUF_STATE_DONE); + } +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h +index 844dfda..183bb51 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h +@@ -29,6 +29,9 @@ struct ipu6_isys_queue { + struct ipu6_isys_buffer { + struct list_head head; + atomic_t str2mmio_flag; ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++ atomic_t skipframe_flag; ++#endif + }; + + struct ipu6_isys_video_buffer { +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c +index f3f3bc0..a864614 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c +@@ -17,6 +17,9 @@ + #include + #include + #include ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++#include ++#endif + + #include + #include +@@ -112,6 +115,26 @@ static int video_open(struct file *file) + return v4l2_fh_open(file); + } + ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++static int video_release(struct file *file) ++{ ++ struct ipu6_isys_video *av = video_drvdata(file); ++ ++ dev_dbg(&av->isys->adev->auxdev.dev, ++ "release: %s: enter\n", av->vdev.name); ++ mutex_lock(&av->isys->reset_mutex); ++ while (av->isys->in_reset) { ++ mutex_unlock(&av->isys->reset_mutex); ++ dev_dbg(&av->isys->adev->auxdev.dev, ++ "release: %s: wait for reset\n", av->vdev.name); ++ usleep_range(10000, 11000); ++ mutex_lock(&av->isys->reset_mutex); ++ } ++ mutex_unlock(&av->isys->reset_mutex); ++ return vb2_fop_release(file); ++} ++ ++#endif + const struct ipu6_isys_pixelformat * + ipu6_isys_get_isys_format(u32 pixelformat, u32 type) + { +@@ -595,7 +618,11 @@ static int start_stream_firmware(struct ipu6_isys_video *av, + } + + reinit_completion(&stream->stream_start_completion); +- ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++ send_type = IPU6_FW_ISYS_SEND_TYPE_STREAM_START; ++ ret = ipu6_fw_isys_simple_cmd(av->isys, stream->stream_handle, ++ send_type); ++#else + if (bl) { + send_type = IPU6_FW_ISYS_SEND_TYPE_STREAM_START_AND_CAPTURE; + ipu6_fw_isys_dump_frame_buff_set(dev, buf, +@@ -608,6 +635,7 @@ static int start_stream_firmware(struct ipu6_isys_video *av, + ret = ipu6_fw_isys_simple_cmd(av->isys, stream->stream_handle, + send_type); + } ++#endif + + if (ret < 0) { + dev_err(dev, "can't start streaming (%d)\n", ret); +@@ -626,7 +654,25 @@ static int start_stream_firmware(struct ipu6_isys_video *av, + ret = -EIO; + goto out_stream_close; + } ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++ if (bl) { ++ dev_dbg(dev, "start stream: capture\n"); ++ send_type = IPU6_FW_ISYS_SEND_TYPE_STREAM_CAPTURE; ++ ipu6_fw_isys_dump_frame_buff_set(dev, buf, stream_cfg->nof_output_pins); ++ ret = ipu6_fw_isys_complex_cmd(av->isys, stream->stream_handle, ++ buf, msg->dma_addr, ++ sizeof(*buf), ++ send_type); ++ ++ if (ret < 0) { ++ dev_err(dev, "can't queue buffers (%d)\n", ret); ++ goto out_stream_close; ++ } ++ } ++ ++#else + dev_dbg(dev, "start stream: complete\n"); ++#endif + + return 0; + +@@ -673,7 +719,11 @@ static void stop_streaming_firmware(struct ipu6_isys_video *av) + } + + tout = wait_for_completion_timeout(&stream->stream_stop_completion, ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++ IPU6_LIB_CALL_TIMEOUT_JIFFIES_RESET); ++#else + IPU6_FW_CALL_TIMEOUT_JIFFIES); ++#endif + if (!tout) + dev_warn(dev, "stream stop time out\n"); + else if (stream->error) +@@ -698,7 +748,11 @@ static void close_streaming_firmware(struct ipu6_isys_video *av) + } + + tout = wait_for_completion_timeout(&stream->stream_close_completion, ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++ IPU6_LIB_CALL_TIMEOUT_JIFFIES_RESET); ++#else + IPU6_FW_CALL_TIMEOUT_JIFFIES); ++#endif + if (!tout) + dev_warn(dev, "stream close time out\n"); + else if (stream->error) +@@ -706,6 +760,12 @@ static void close_streaming_firmware(struct ipu6_isys_video *av) + else + dev_dbg(dev, "close stream: complete\n"); + ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++ stream->last_sequence = atomic_read(&stream->sequence); ++ dev_dbg(dev, "IPU_ISYS_RESET: ip->last_sequence = %d\n", ++ stream->last_sequence); ++ ++#endif + put_stream_opened(av); + } + +@@ -720,7 +780,18 @@ int ipu6_isys_video_prepare_stream(struct ipu6_isys_video *av, + return -EINVAL; + + stream->nr_queues = nr_queues; ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++ if (av->isys->in_reset) { ++ atomic_set(&stream->sequence, stream->last_sequence); ++ dev_dbg(&av->isys->adev->auxdev.dev, ++ "atomic_set : stream->last_sequence = %d\n", ++ stream->last_sequence); ++ } else { ++ atomic_set(&stream->sequence, 0); ++ } ++# else + atomic_set(&stream->sequence, 0); ++#endif + + stream->seq_index = 0; + memset(stream->seq, 0, sizeof(stream->seq)); +@@ -1088,7 +1159,11 @@ static const struct v4l2_file_operations isys_fops = { + .unlocked_ioctl = video_ioctl2, + .mmap = vb2_fop_mmap, + .open = video_open, ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++ .release = video_release, ++#else + .release = vb2_fop_release, ++#endif + }; + + int ipu6_isys_fw_open(struct ipu6_isys *isys) +@@ -1305,6 +1380,11 @@ int ipu6_isys_video_init(struct ipu6_isys_video *av) + av->pix_fmt = format.fmt.pix; + __ipu6_isys_vidioc_try_fmt_meta_cap(av, &format_meta); + av->meta_fmt = format_meta.fmt.meta; ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++ av->reset = false; ++ av->skipframe = 0; ++ av->start_streaming = 0; ++#endif + + video_set_drvdata(&av->vdev, av); + +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.h +index 1dd36f2..3d234ae 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.h ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.h +@@ -45,6 +45,9 @@ struct ipu6_isys_stream { + struct mutex mutex; + struct media_entity *source_entity; + atomic_t sequence; ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++ int last_sequence; ++#endif + unsigned int seq_index; + struct sequence_info seq[IPU6_ISYS_MAX_PARALLEL_SOF]; + int stream_source; +@@ -95,6 +98,11 @@ struct ipu6_isys_video { + u32 source_stream; + u8 vc; + u8 dt; ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++ unsigned int reset; ++ unsigned int skipframe; ++ unsigned int start_streaming; ++#endif + }; + + #define ipu6_isys_queue_to_video(__aq) \ +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c +index fc0ec0a..3a7ffca 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c +@@ -1084,6 +1084,10 @@ static int isys_probe(struct auxiliary_device *auxdev, + + mutex_init(&isys->mutex); + mutex_init(&isys->stream_mutex); ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++ mutex_init(&isys->reset_mutex); ++ isys->in_reset = false; ++#endif + + spin_lock_init(&isys->listlock); + INIT_LIST_HEAD(&isys->framebuflist); +@@ -1125,6 +1129,9 @@ static int isys_probe(struct auxiliary_device *auxdev, + if (ret) + goto free_fw_msg_bufs; + ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++ mutex_destroy(&isys->reset_mutex); ++#endif + ipu6_mmu_hw_cleanup(adev->mmu); + + return 0; +@@ -1179,6 +1186,9 @@ static void isys_remove(struct auxiliary_device *auxdev) + isys_iwake_watermark_cleanup(isys); + mutex_destroy(&isys->stream_mutex); + mutex_destroy(&isys->mutex); ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++ mutex_destroy(&isys->reset_mutex); ++#endif + } + + struct fwmsg { +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h +index 0e2c8b7..d55d804 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h +@@ -163,6 +163,11 @@ struct ipu6_isys { + struct list_head framebuflist_fw; + struct v4l2_async_notifier notifier; + struct isys_iwake_watermark iwake_watermark; ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++ struct mutex reset_mutex; ++ bool in_reset; ++ bool in_stop_streaming; ++#endif + }; + + struct isys_fw_msgs { +-- +2.43.0 + diff --git a/patches/0055-media-intel-ipu6-use-vc1-dma-for-MTL-and-ARL.patch b/patches/0055-media-intel-ipu6-use-vc1-dma-for-MTL-and-ARL.patch new file mode 100644 index 0000000..b9b0348 --- /dev/null +++ b/patches/0055-media-intel-ipu6-use-vc1-dma-for-MTL-and-ARL.patch @@ -0,0 +1,41 @@ +From e3f271c417e5b2dedf209f3f3c4f8e0329afc849 Mon Sep 17 00:00:00 2001 +From: Chen Meng J +Date: Fri, 16 Jan 2026 00:39:01 -0700 +Subject: [PATCH 55/76] media: intel-ipu6: use vc1 dma for MTL and ARL + +--- + 6.18.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h | 4 ++-- + 6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c | 2 +- + 2 files changed, 3 insertions(+), 3 deletions(-) + +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h +index e6ee161..545180c 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h +@@ -54,8 +54,8 @@ struct ipu6_isys; + /* Max number of planes for frame formats supported by the FW */ + #define IPU6_PIN_PLANES_MAX 4 + +-#define IPU6_FW_ISYS_SENSOR_TYPE_START 14 +-#define IPU6_FW_ISYS_SENSOR_TYPE_END 19 ++#define IPU6_FW_ISYS_SENSOR_TYPE_START 1 ++#define IPU6_FW_ISYS_SENSOR_TYPE_END 10 + #define IPU6SE_FW_ISYS_SENSOR_TYPE_START 6 + #define IPU6SE_FW_ISYS_SENSOR_TYPE_END 11 + /* +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c +index a864614..148c790 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c +@@ -529,7 +529,7 @@ static int ipu6_isys_fw_pin_cfg(struct ipu6_isys_video *av, + output_pin->csi_be_soc_pixel_remapping = + CSI_BE_SOC_PIXEL_REMAPPING_FLAG_NO_REMAPPING; + +- output_pin->snoopable = true; ++ output_pin->snoopable = false; + output_pin->error_handling_enable = false; + output_pin->sensor_type = isys->sensor_type++; + if (isys->sensor_type > isys->pdata->ipdata->sensor_type_end) +-- +2.43.0 + diff --git a/patches/0056-media-ipu-Dma-sync-at-buffer_prepare-callback-as-DMA.patch b/patches/0056-media-ipu-Dma-sync-at-buffer_prepare-callback-as-DMA.patch new file mode 100644 index 0000000..8b66f0e --- /dev/null +++ b/patches/0056-media-ipu-Dma-sync-at-buffer_prepare-callback-as-DMA.patch @@ -0,0 +1,42 @@ +From b7fd94a334f08f78630b3a0c2f809cd6eaf028b7 Mon Sep 17 00:00:00 2001 +From: linya14x +Date: Fri, 16 Jan 2026 00:40:06 -0700 +Subject: [PATCH 56/76] media: ipu: Dma sync at buffer_prepare callback as DMA + is non-coherent + +Test Platform: +MTL ARL LT6911UXE + +Signed-off-by: linya14x +Signed-off-by: Bingbu Cao +--- + 6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c | 5 +++++ + 1 file changed, 5 insertions(+) + +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c +index cfeea15..aaad622 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c +@@ -84,7 +84,9 @@ static int ipu6_isys_queue_setup(struct vb2_queue *q, unsigned int *num_buffers, + static int ipu6_isys_buf_prepare(struct vb2_buffer *vb) + { + struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(vb->vb2_queue); ++ struct ipu6_isys *isys = vb2_get_drv_priv(vb->vb2_queue); + struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq); ++ struct sg_table *sg = vb2_dma_sg_plane_desc(vb, 0); + struct device *dev = &av->isys->adev->auxdev.dev; + u32 bytesperline = ipu6_isys_get_bytes_per_line(av); + u32 height = ipu6_isys_get_frame_height(av); +@@ -98,6 +100,9 @@ static int ipu6_isys_buf_prepare(struct vb2_buffer *vb) + + vb2_set_plane_payload(vb, 0, bytesperline * height); + ++ /* assume IPU is not DMA coherent */ ++ ipu6_dma_sync_sgtable(isys->adev, sg); ++ + return 0; + } + +-- +2.43.0 + diff --git a/patches/0057-media-intel-ipu6-Support-IPU6-ISYS-FW-trace-dump-for.patch b/patches/0057-media-intel-ipu6-Support-IPU6-ISYS-FW-trace-dump-for.patch new file mode 100644 index 0000000..dc147b1 --- /dev/null +++ b/patches/0057-media-intel-ipu6-Support-IPU6-ISYS-FW-trace-dump-for.patch @@ -0,0 +1,1432 @@ +From 6f9b0a17e32276defa3e9099f9fee25da6532695 Mon Sep 17 00:00:00 2001 +From: hepengpx +Date: Fri, 16 Jan 2026 00:42:06 -0700 +Subject: [PATCH 57/76] media: intel-ipu6: Support IPU6 ISYS FW trace dump for + upstream driver + +[media]pci]Support IPU6 ISYS FW trace dump for upstream drive +Signed-off-by: hepengpx +--- + 6.18.0/drivers/media/pci/intel/ipu6/Makefile | 3 +- + .../drivers/media/pci/intel/ipu6/ipu6-bus.h | 2 + + .../media/pci/intel/ipu6/ipu6-fw-com.c | 12 +- + .../drivers/media/pci/intel/ipu6/ipu6-isys.c | 58 +- + .../media/pci/intel/ipu6/ipu6-platform-regs.h | 40 + + .../drivers/media/pci/intel/ipu6/ipu6-trace.c | 896 ++++++++++++++++++ + .../drivers/media/pci/intel/ipu6/ipu6-trace.h | 147 +++ + 6.18.0/drivers/media/pci/intel/ipu6/ipu6.c | 48 + + 6.18.0/drivers/media/pci/intel/ipu6/ipu6.h | 5 + + 9 files changed, 1206 insertions(+), 5 deletions(-) + create mode 100644 6.18.0/drivers/media/pci/intel/ipu6/ipu6-trace.c + create mode 100644 6.18.0/drivers/media/pci/intel/ipu6/ipu6-trace.h + +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/Makefile b/6.18.0/drivers/media/pci/intel/ipu6/Makefile +index a821b0a..e1e123b 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/Makefile ++++ b/6.18.0/drivers/media/pci/intel/ipu6/Makefile +@@ -6,7 +6,8 @@ intel-ipu6-y := ipu6.o \ + ipu6-mmu.o \ + ipu6-buttress.o \ + ipu6-cpd.o \ +- ipu6-fw-com.o ++ ipu6-fw-com.o \ ++ ipu6-trace.o + + obj-$(CONFIG_VIDEO_INTEL_IPU6) += intel-ipu6.o + +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-bus.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-bus.h +index a08c546..16be932 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-bus.h ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-bus.h +@@ -16,6 +16,7 @@ struct firmware; + struct pci_dev; + + struct ipu6_buttress_ctrl; ++struct ipu_subsystem_trace_config; + + struct ipu6_bus_device { + struct auxiliary_device auxdev; +@@ -25,6 +26,7 @@ struct ipu6_bus_device { + void *pdata; + struct ipu6_mmu *mmu; + struct ipu6_device *isp; ++ struct ipu_subsystem_trace_config *trace_cfg; + const struct ipu6_buttress_ctrl *ctrl; + const struct firmware *fw; + struct sg_table fw_sgt; +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.c +index 40d8ce1..9f530e0 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.c ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.c +@@ -14,6 +14,7 @@ + #include "ipu6-bus.h" + #include "ipu6-dma.h" + #include "ipu6-fw-com.h" ++#include "ipu6-trace.h" + + /* + * FWCOM layer is a shared resource between FW and driver. It consist +@@ -265,8 +266,15 @@ EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_prepare, "INTEL_IPU6"); + + int ipu6_fw_com_open(struct ipu6_fw_com_context *ctx) + { +- /* write magic pattern to disable the tunit trace */ +- writel(TUNIT_MAGIC_PATTERN, ctx->dmem_addr + TUNIT_CFG_DWR_REG * 4); ++ dma_addr_t trace_buff = TUNIT_MAGIC_PATTERN; ++ ++ /* ++ * Write trace buff start addr to tunit cfg reg. ++ * This feature is used to enable tunit trace in secure mode. ++ */ ++ ipu_trace_buffer_dma_handle(&ctx->adev->auxdev.dev, &trace_buff); ++ writel(trace_buff, ctx->dmem_addr + TUNIT_CFG_DWR_REG * 4); ++ + /* Check if SP is in valid state */ + if (!ctx->cell_ready(ctx->adev)) + return -EIO; +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c +index 3a7ffca..1ab8dd8 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c +@@ -41,6 +41,7 @@ + #include "ipu6-platform-buttress-regs.h" + #include "ipu6-platform-isys-csi2-reg.h" + #include "ipu6-platform-regs.h" ++#include "ipu6-trace.h" + + #define IPU6_BUTTRESS_FABIC_CONTROL 0x68 + #define GDA_ENABLE_IWAKE_INDEX 2 +@@ -348,9 +349,11 @@ irqreturn_t isys_isr(struct ipu6_bus_device *adev) + u32 status_sw, status_csi; + u32 ctrl0_status, ctrl0_clear; + ++ dev_dbg(&adev->auxdev.dev, "%s enter", __func__); + spin_lock(&isys->power_lock); + if (!isys->power) { + spin_unlock(&isys->power_lock); ++ dev_dbg(&adev->auxdev.dev, "%s exit, no power", __func__); + return IRQ_NONE; + } + +@@ -399,6 +402,7 @@ irqreturn_t isys_isr(struct ipu6_bus_device *adev) + + spin_unlock(&isys->power_lock); + ++ dev_dbg(&adev->auxdev.dev, "%s exit", __func__); + return IRQ_HANDLED; + } + +@@ -1040,6 +1044,46 @@ void ipu6_put_fw_msg_buf(struct ipu6_isys *isys, uintptr_t data) + spin_unlock_irqrestore(&isys->listlock, flags); + } + ++struct ipu_trace_block isys_trace_blocks[] = { ++ { ++ .offset = IPU_TRACE_REG_IS_TRACE_UNIT_BASE, ++ .type = IPU_TRACE_BLOCK_TUN, ++ }, ++ { ++ .offset = IPU_TRACE_REG_IS_SP_EVQ_BASE, ++ .type = IPU_TRACE_BLOCK_TM, ++ }, ++ { ++ .offset = IPU_TRACE_REG_IS_SP_GPC_BASE, ++ .type = IPU_TRACE_BLOCK_GPC, ++ }, ++ { ++ .offset = IPU_TRACE_REG_IS_ISL_GPC_BASE, ++ .type = IPU_TRACE_BLOCK_GPC, ++ }, ++ { ++ .offset = IPU_TRACE_REG_IS_MMU_GPC_BASE, ++ .type = IPU_TRACE_BLOCK_GPC, ++ }, ++ { ++ /* Note! this covers all 8 blocks */ ++ .offset = IPU_TRACE_REG_CSI2_TM_BASE(0), ++ .type = IPU_TRACE_CSI2, ++ }, ++ { ++ /* Note! this covers all 11 blocks */ ++ .offset = IPU_TRACE_REG_CSI2_PORT_SIG2SIO_GR_BASE(0), ++ .type = IPU_TRACE_SIG2CIOS, ++ }, ++ { ++ .offset = IPU_TRACE_REG_IS_GPREG_TRACE_TIMER_RST_N, ++ .type = IPU_TRACE_TIMER_RST, ++ }, ++ { ++ .type = IPU_TRACE_BLOCK_END, ++ } ++}; ++ + static int isys_probe(struct auxiliary_device *auxdev, + const struct auxiliary_device_id *auxdev_id) + { +@@ -1110,6 +1154,8 @@ static int isys_probe(struct auxiliary_device *auxdev, + goto remove_shared_buffer; + } + ++ ipu_trace_init(adev->isp, isys->pdata->base, &auxdev->dev, ++ isys_trace_blocks); + cpu_latency_qos_add_request(&isys->pm_qos, PM_QOS_DEFAULT_VALUE); + + ret = alloc_fw_msg_bufs(isys, 20); +@@ -1139,6 +1185,7 @@ static int isys_probe(struct auxiliary_device *auxdev, + free_fw_msg_bufs: + free_fw_msg_bufs(isys); + out_remove_pkg_dir_shared_buffer: ++ ipu_trace_uninit(&auxdev->dev); + cpu_latency_qos_remove_request(&isys->pm_qos); + if (!isp->secure_mode) + ipu6_cpd_free_pkg_dir(adev); +@@ -1184,6 +1231,7 @@ static void isys_remove(struct auxiliary_device *auxdev) + mutex_destroy(&isys->streams[i].mutex); + + isys_iwake_watermark_cleanup(isys); ++ ipu_trace_uninit(&auxdev->dev); + mutex_destroy(&isys->stream_mutex); + mutex_destroy(&isys->mutex); + #ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET +@@ -1236,12 +1284,17 @@ static int isys_isr_one(struct ipu6_bus_device *adev) + u32 index; + u64 ts; + +- if (!isys->fwcom) ++ dev_dbg(&adev->auxdev.dev, "%s enter", __func__); ++ if (!isys->fwcom) { ++ dev_dbg(&adev->auxdev.dev, "%s exit, fwcom is null", __func__); + return 1; ++ } + + resp = ipu6_fw_isys_get_resp(isys->fwcom, IPU6_BASE_MSG_RECV_QUEUES); +- if (!resp) ++ if (!resp) { ++ dev_dbg(&adev->auxdev.dev, "%s exit, resp is null", __func__); + return 1; ++ } + + ts = (u64)resp->timestamp[1] << 32 | resp->timestamp[0]; + +@@ -1349,6 +1402,7 @@ static int isys_isr_one(struct ipu6_bus_device *adev) + ipu6_isys_put_stream(stream); + leave: + ipu6_fw_isys_put_resp(isys->fwcom, IPU6_BASE_MSG_RECV_QUEUES); ++ dev_dbg(&adev->auxdev.dev, "%s exit", __func__); + return 0; + } + +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-platform-regs.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-platform-regs.h +index b883385..a1744d2 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-platform-regs.h ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-platform-regs.h +@@ -176,4 +176,44 @@ enum nci_ab_access_mode { + #define IPU6_PSYS_GPDEV_IRQ_FWIRQ(n) BIT(n) + #define IPU6_REG_PSYS_GPDEV_FWIRQ(n) (4 * (n) + 0x1aa100) + ++/* Trace unit related register definitions */ ++#define TRACE_REG_MAX_ISYS_OFFSET 0xfffff ++#define TRACE_REG_MAX_PSYS_OFFSET 0xfffff ++#define IPU_ISYS_OFFSET IPU6_ISYS_DMEM_OFFSET ++#define IPU_PSYS_OFFSET IPU6_PSYS_DMEM_OFFSET ++/* ISYS trace unit registers */ ++/* Trace unit base offset */ ++#define IPU_TRACE_REG_IS_TRACE_UNIT_BASE 0x27d000 ++/* Trace monitors */ ++#define IPU_TRACE_REG_IS_SP_EVQ_BASE 0x211000 ++/* GPC blocks */ ++#define IPU_TRACE_REG_IS_SP_GPC_BASE 0x210800 ++#define IPU_TRACE_REG_IS_ISL_GPC_BASE 0x2b0a00 ++#define IPU_TRACE_REG_IS_MMU_GPC_BASE 0x2e0f00 ++/* each CSI2 port has a dedicated trace monitor, index 0..7 */ ++#define IPU_TRACE_REG_CSI2_TM_BASE(port) (0x220400 + 0x1000 * (port)) ++ ++/* Trace timers */ ++#define IPU_TRACE_REG_IS_GPREG_TRACE_TIMER_RST_N 0x27c410 ++#define TRACE_REG_GPREG_TRACE_TIMER_RST_OFF BIT(0) ++ ++/* SIG2CIO */ ++#define IPU_TRACE_REG_CSI2_PORT_SIG2SIO_GR_BASE(port) \ ++ (0x220e00 + (port) * 0x1000) ++ ++/* PSYS trace unit registers */ ++/* Trace unit base offset */ ++#define IPU_TRACE_REG_PS_TRACE_UNIT_BASE 0x1b4000 ++/* Trace monitors */ ++#define IPU_TRACE_REG_PS_SPC_EVQ_BASE 0x119000 ++#define IPU_TRACE_REG_PS_SPP0_EVQ_BASE 0x139000 ++ ++/* GPC blocks */ ++#define IPU_TRACE_REG_PS_SPC_GPC_BASE 0x118800 ++#define IPU_TRACE_REG_PS_SPP0_GPC_BASE 0x138800 ++#define IPU_TRACE_REG_PS_MMU_GPC_BASE 0x1b1b00 ++ ++/* Trace timers */ ++#define IPU_TRACE_REG_PS_GPREG_TRACE_TIMER_RST_N 0x1aa714 ++ + #endif /* IPU6_PLATFORM_REGS_H */ +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-trace.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-trace.c +new file mode 100644 +index 0000000..adcee20 +--- /dev/null ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-trace.c +@@ -0,0 +1,896 @@ ++// SPDX-License-Identifier: GPL-2.0 ++// Copyright (C) 2014 - 2025 Intel Corporation ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "ipu6.h" ++#include "ipu6-bus.h" ++#include "ipu6-platform-regs.h" ++#include "ipu6-dma.h" ++#include "ipu6-trace.h" ++ ++/* ++ * enabling ipu trace need a 96 MB buffer. ++ */ ++static bool ipu_trace_enable; ++module_param(ipu_trace_enable, bool, 0660); ++MODULE_PARM_DESC(ipu_trace_enable, "IPU trace enable"); ++ ++struct trace_register_range { ++ u32 start; ++ u32 end; ++}; ++ ++#define MEMORY_RING_BUFFER_SIZE (SZ_1M * 96) ++#define TRACE_MESSAGE_SIZE 16 ++/* ++ * It looks that the trace unit sometimes writes outside the given buffer. ++ * To avoid memory corruption one extra page is reserved at the end ++ * of the buffer. Read also the extra area since it may contain valid data. ++ */ ++#define MEMORY_RING_BUFFER_GUARD PAGE_SIZE ++#define MEMORY_RING_BUFFER_OVERREAD MEMORY_RING_BUFFER_GUARD ++#define MAX_TRACE_REGISTERS 200 ++#define TRACE_CONF_DUMP_BUFFER_SIZE (MAX_TRACE_REGISTERS * 2 * 32) ++#define TRACE_CONF_DATA_MAX_LEN (1024 * 4) ++#define WPT_TRACE_CONF_DATA_MAX_LEN (1024 * 64) ++ ++struct config_value { ++ u32 reg; ++ u32 value; ++}; ++ ++struct ipu_trace_buffer { ++ dma_addr_t dma_handle; ++ void *memory_buffer; ++}; ++ ++struct ipu_subsystem_wptrace_config { ++ bool open; ++ char *conf_dump_buffer; ++ int size_conf_dump; ++ unsigned int fill_level; ++ struct config_value config[MAX_TRACE_REGISTERS]; ++}; ++ ++struct ipu_subsystem_trace_config { ++ u32 offset; ++ void __iomem *base; ++ struct ipu_trace_buffer memory; /* ring buffer */ ++ struct device *dev; ++ struct ipu_trace_block *blocks; ++ unsigned int fill_level; /* Nbr of regs in config table below */ ++ bool running; ++ /* Cached register values */ ++ struct config_value config[MAX_TRACE_REGISTERS]; ++ /* watchpoint trace info */ ++ struct ipu_subsystem_wptrace_config wpt; ++}; ++ ++struct ipu_trace { ++ struct mutex lock; /* Protect ipu trace operations */ ++ bool open; ++ char *conf_dump_buffer; ++ int size_conf_dump; ++ ++ struct ipu_subsystem_trace_config isys; ++ struct ipu_subsystem_trace_config psys; ++}; ++ ++static void __ipu_trace_restore(struct device *dev) ++{ ++ struct ipu6_bus_device *adev = to_ipu6_bus_device(dev); ++ struct ipu6_device *isp = adev->isp; ++ struct ipu_trace *trace = isp->trace; ++ struct config_value *config; ++ struct ipu_subsystem_trace_config *sys = adev->trace_cfg; ++ struct ipu_trace_block *blocks; ++ u32 mapped_trace_buffer; ++ void __iomem *addr = NULL; ++ int i; ++ ++ if (trace->open) { ++ dev_info(dev, "Trace control file open. Skipping update\n"); ++ return; ++ } ++ ++ if (!sys) ++ return; ++ ++ /* leave if no trace configuration for this subsystem */ ++ if (sys->fill_level == 0) ++ return; ++ ++ /* Find trace unit base address */ ++ blocks = sys->blocks; ++ while (blocks->type != IPU_TRACE_BLOCK_END) { ++ if (blocks->type == IPU_TRACE_BLOCK_TUN) { ++ addr = sys->base + blocks->offset; ++ break; ++ } ++ blocks++; ++ } ++ if (!addr) ++ return; ++ ++ if (!sys->memory.memory_buffer) { ++ sys->memory.memory_buffer = ++ ipu6_dma_alloc(adev, MEMORY_RING_BUFFER_SIZE + ++ MEMORY_RING_BUFFER_GUARD, ++ &sys->memory.dma_handle, ++ GFP_KERNEL, 0); ++ } ++ ++ if (!sys->memory.memory_buffer) { ++ dev_err(dev, "No memory for tracing. Trace unit disabled\n"); ++ return; ++ } ++ ++ config = sys->config; ++ mapped_trace_buffer = sys->memory.dma_handle; ++ ++ /* ring buffer base */ ++ writel(mapped_trace_buffer, addr + TRACE_REG_TUN_DRAM_BASE_ADDR); ++ ++ /* ring buffer end */ ++ writel(mapped_trace_buffer + MEMORY_RING_BUFFER_SIZE - ++ TRACE_MESSAGE_SIZE, addr + TRACE_REG_TUN_DRAM_END_ADDR); ++ ++ /* Infobits for ddr trace */ ++ writel(IPU6_INFO_REQUEST_DESTINATION_PRIMARY, ++ addr + TRACE_REG_TUN_DDR_INFO_VAL); ++ ++ /* Find trace timer reset address */ ++ addr = NULL; ++ blocks = sys->blocks; ++ while (blocks->type != IPU_TRACE_BLOCK_END) { ++ if (blocks->type == IPU_TRACE_TIMER_RST) { ++ addr = sys->base + blocks->offset; ++ break; ++ } ++ blocks++; ++ } ++ if (!addr) { ++ dev_err(dev, "No trace reset addr\n"); ++ return; ++ } ++ ++ /* Remove reset from trace timers */ ++ writel(TRACE_REG_GPREG_TRACE_TIMER_RST_OFF, addr); ++ ++ /* Register config received from userspace */ ++ for (i = 0; i < sys->fill_level; i++) { ++ dev_dbg(dev, ++ "Trace restore: reg 0x%08x, value 0x%08x\n", ++ config[i].reg, config[i].value); ++ writel(config[i].value, isp->base + config[i].reg); ++ } ++ ++ /* Register wpt config received from userspace, and only psys has wpt */ ++ config = sys->wpt.config; ++ for (i = 0; i < sys->wpt.fill_level; i++) { ++ dev_dbg(dev, "Trace restore: reg 0x%08x, value 0x%08x\n", ++ config[i].reg, config[i].value); ++ writel(config[i].value, isp->base + config[i].reg); ++ } ++ sys->running = true; ++} ++ ++void ipu_trace_restore(struct device *dev) ++{ ++ struct ipu_trace *trace = to_ipu6_bus_device(dev)->isp->trace; ++ ++ if (!trace) ++ return; ++ ++ mutex_lock(&trace->lock); ++ __ipu_trace_restore(dev); ++ mutex_unlock(&trace->lock); ++} ++EXPORT_SYMBOL_GPL(ipu_trace_restore); ++ ++static void __ipu_trace_stop(struct device *dev) ++{ ++ struct ipu_subsystem_trace_config *sys = ++ to_ipu6_bus_device(dev)->trace_cfg; ++ struct ipu_trace_block *blocks; ++ ++ if (!sys) ++ return; ++ ++ if (!sys->running) ++ return; ++ sys->running = false; ++ ++ /* Turn off all the gpc blocks */ ++ blocks = sys->blocks; ++ while (blocks->type != IPU_TRACE_BLOCK_END) { ++ if (blocks->type == IPU_TRACE_BLOCK_GPC) { ++ writel(0, sys->base + blocks->offset + ++ TRACE_REG_GPC_OVERALL_ENABLE); ++ } ++ blocks++; ++ } ++ ++ /* Turn off all the trace monitors */ ++ blocks = sys->blocks; ++ while (blocks->type != IPU_TRACE_BLOCK_END) { ++ if (blocks->type == IPU_TRACE_BLOCK_TM) { ++ writel(0, sys->base + blocks->offset + ++ TRACE_REG_TM_TRACE_ENABLE_NPK); ++ ++ writel(0, sys->base + blocks->offset + ++ TRACE_REG_TM_TRACE_ENABLE_DDR); ++ } ++ blocks++; ++ } ++ ++ /* Turn off trace units */ ++ blocks = sys->blocks; ++ while (blocks->type != IPU_TRACE_BLOCK_END) { ++ if (blocks->type == IPU_TRACE_BLOCK_TUN) { ++ writel(0, sys->base + blocks->offset + ++ TRACE_REG_TUN_DDR_ENABLE); ++ writel(0, sys->base + blocks->offset + ++ TRACE_REG_TUN_NPK_ENABLE); ++ } ++ blocks++; ++ } ++} ++ ++void ipu_trace_stop(struct device *dev) ++{ ++ struct ipu_trace *trace = to_ipu6_bus_device(dev)->isp->trace; ++ ++ if (!trace) ++ return; ++ ++ mutex_lock(&trace->lock); ++ __ipu_trace_stop(dev); ++ mutex_unlock(&trace->lock); ++} ++EXPORT_SYMBOL_GPL(ipu_trace_stop); ++ ++static int update_register_cache(struct ipu6_device *isp, u32 reg, u32 value) ++{ ++ struct ipu_trace *dctrl = isp->trace; ++ struct ipu_subsystem_trace_config *sys; ++ int rval = -EINVAL; ++ ++ if (dctrl->isys.offset == dctrl->psys.offset) { ++ /* For the IPU with uniform address space */ ++ if (reg >= IPU_ISYS_OFFSET && ++ reg < IPU_ISYS_OFFSET + TRACE_REG_MAX_ISYS_OFFSET) ++ sys = &dctrl->isys; ++ else if (reg >= IPU_PSYS_OFFSET && ++ reg < IPU_PSYS_OFFSET + TRACE_REG_MAX_PSYS_OFFSET) ++ sys = &dctrl->psys; ++ else ++ goto error; ++ } else { ++ if (dctrl->isys.offset && ++ reg >= dctrl->isys.offset && ++ reg < dctrl->isys.offset + TRACE_REG_MAX_ISYS_OFFSET) ++ sys = &dctrl->isys; ++ else if (dctrl->psys.offset && ++ reg >= dctrl->psys.offset && ++ reg < dctrl->psys.offset + TRACE_REG_MAX_PSYS_OFFSET) ++ sys = &dctrl->psys; ++ else ++ goto error; ++ } ++ ++ if (sys->fill_level < MAX_TRACE_REGISTERS) { ++ dev_dbg(sys->dev, ++ "Trace reg addr 0x%08x value 0x%08x\n", reg, value); ++ sys->config[sys->fill_level].reg = reg; ++ sys->config[sys->fill_level].value = value; ++ sys->fill_level++; ++ } else { ++ rval = -ENOMEM; ++ goto error; ++ } ++ return 0; ++error: ++ dev_info(&isp->pdev->dev, ++ "Trace register address 0x%08x ignored as invalid register\n", ++ reg); ++ return rval; ++} ++ ++static void traceconf_dump(struct ipu6_device *isp) ++{ ++ struct ipu_subsystem_trace_config *sys[2] = { ++ &isp->trace->isys, ++ &isp->trace->psys ++ }; ++ int i, j, rem_size; ++ char *out; ++ ++ isp->trace->size_conf_dump = 0; ++ out = isp->trace->conf_dump_buffer; ++ rem_size = TRACE_CONF_DUMP_BUFFER_SIZE; ++ ++ for (j = 0; j < ARRAY_SIZE(sys); j++) { ++ for (i = 0; i < sys[j]->fill_level && rem_size > 0; i++) { ++ int bytes_print; ++ int n = snprintf(out, rem_size, "0x%08x = 0x%08x\n", ++ sys[j]->config[i].reg, ++ sys[j]->config[i].value); ++ ++ bytes_print = min(n, rem_size - 1); ++ rem_size -= bytes_print; ++ out += bytes_print; ++ } ++ } ++ isp->trace->size_conf_dump = out - isp->trace->conf_dump_buffer; ++} ++ ++static void clear_trace_buffer(struct ipu_subsystem_trace_config *sys) ++{ ++ if (!sys || !sys->memory.memory_buffer) ++ return; ++ ++ memset(sys->memory.memory_buffer, 0, MEMORY_RING_BUFFER_SIZE + ++ MEMORY_RING_BUFFER_OVERREAD); ++ ++ dma_sync_single_for_device(sys->dev, ++ sys->memory.dma_handle, ++ MEMORY_RING_BUFFER_SIZE + ++ MEMORY_RING_BUFFER_GUARD, DMA_FROM_DEVICE); ++} ++ ++static int traceconf_open(struct inode *inode, struct file *file) ++{ ++ int ret; ++ struct ipu6_device *isp; ++ ++ if (!inode->i_private) ++ return -EACCES; ++ ++ isp = inode->i_private; ++ ++ ret = mutex_trylock(&isp->trace->lock); ++ if (!ret) ++ return -EBUSY; ++ ++ if (isp->trace->open) { ++ mutex_unlock(&isp->trace->lock); ++ return -EBUSY; ++ } ++ ++ file->private_data = isp; ++ isp->trace->open = 1; ++ if (file->f_mode & FMODE_WRITE) { ++ /* TBD: Allocate temp buffer for processing. ++ * Push validated buffer to active config ++ */ ++ ++ /* Forget old config if opened for write */ ++ isp->trace->isys.fill_level = 0; ++ isp->trace->psys.fill_level = 0; ++ isp->trace->psys.wpt.fill_level = 0; ++ } ++ ++ if (file->f_mode & FMODE_READ) { ++ isp->trace->conf_dump_buffer = ++ vzalloc(TRACE_CONF_DUMP_BUFFER_SIZE); ++ if (!isp->trace->conf_dump_buffer) { ++ isp->trace->open = 0; ++ mutex_unlock(&isp->trace->lock); ++ return -ENOMEM; ++ } ++ traceconf_dump(isp); ++ } ++ mutex_unlock(&isp->trace->lock); ++ return 0; ++} ++ ++static ssize_t traceconf_read(struct file *file, char __user *buf, ++ size_t len, loff_t *ppos) ++{ ++ struct ipu6_device *isp = file->private_data; ++ ++ return simple_read_from_buffer(buf, len, ppos, ++ isp->trace->conf_dump_buffer, ++ isp->trace->size_conf_dump); ++} ++ ++static ssize_t traceconf_write(struct file *file, const char __user *buf, ++ size_t len, loff_t *ppos) ++{ ++ int i; ++ struct ipu6_device *isp = file->private_data; ++ ssize_t bytes = 0; ++ char *ipu_trace_buffer = NULL; ++ size_t buffer_size = 0; ++ u32 ipu_trace_number = 0; ++ struct config_value *cfg_buffer = NULL; ++ ++ if ((*ppos < 0) || (len > TRACE_CONF_DATA_MAX_LEN) || ++ (len < sizeof(ipu_trace_number))) { ++ dev_info(&isp->pdev->dev, ++ "length is error, len:%ld, loff:%lld\n", ++ len, *ppos); ++ return -EINVAL; ++ } ++ ++ ipu_trace_buffer = vzalloc(len); ++ if (!ipu_trace_buffer) ++ return -ENOMEM; ++ ++ bytes = copy_from_user(ipu_trace_buffer, buf, len); ++ if (bytes != 0) { ++ vfree(ipu_trace_buffer); ++ return -EFAULT; ++ } ++ ++ memcpy(&ipu_trace_number, ipu_trace_buffer, sizeof(u32)); ++ buffer_size = ipu_trace_number * sizeof(struct config_value); ++ if ((buffer_size + sizeof(ipu_trace_number)) != len) { ++ dev_info(&isp->pdev->dev, ++ "File size is not right, len:%ld, buffer_size:%zu\n", ++ len, buffer_size); ++ vfree(ipu_trace_buffer); ++ return -EFAULT; ++ } ++ ++ mutex_lock(&isp->trace->lock); ++ cfg_buffer = (struct config_value *)(ipu_trace_buffer + sizeof(u32)); ++ for (i = 0; i < ipu_trace_number; i++) { ++ update_register_cache(isp, cfg_buffer[i].reg, ++ cfg_buffer[i].value); ++ } ++ mutex_unlock(&isp->trace->lock); ++ vfree(ipu_trace_buffer); ++ ++ return len; ++} ++ ++static int traceconf_release(struct inode *inode, struct file *file) ++{ ++ struct ipu6_device *isp = file->private_data; ++ struct device *psys_dev = isp->psys ? &isp->psys->auxdev.dev : NULL; ++ struct device *isys_dev = isp->isys ? &isp->isys->auxdev.dev : NULL; ++ int isys_pm_rval = -EINVAL; ++ int psys_pm_rval = -EINVAL; ++ ++ /* ++ * Turn devices on outside trace->lock mutex. PM transition may ++ * cause call to function which tries to take the same lock. ++ * Also do this before trace->open is set back to 0 to avoid ++ * double restore (one here and one in pm transition). We can't ++ * rely purely on the restore done by pm call backs since trace ++ * configuration can occur in any phase compared to other activity. ++ */ ++ ++ if (file->f_mode & FMODE_WRITE) { ++ if (isys_dev) ++ isys_pm_rval = pm_runtime_resume_and_get(isys_dev); ++ if (isys_pm_rval >= 0 && psys_dev) ++ psys_pm_rval = pm_runtime_resume_and_get(psys_dev); ++ } ++ ++ mutex_lock(&isp->trace->lock); ++ isp->trace->open = 0; ++ vfree(isp->trace->conf_dump_buffer); ++ isp->trace->conf_dump_buffer = NULL; ++ ++ /* Update new cfg to HW */ ++ if (isys_pm_rval >= 0) { ++ __ipu_trace_stop(isys_dev); ++ clear_trace_buffer(isp->isys->trace_cfg); ++ __ipu_trace_restore(isys_dev); ++ } ++ ++ if (psys_pm_rval >= 0) { ++ __ipu_trace_stop(psys_dev); ++ clear_trace_buffer(isp->psys->trace_cfg); ++ __ipu_trace_restore(psys_dev); ++ } ++ mutex_unlock(&isp->trace->lock); ++ ++ if (psys_pm_rval >= 0) ++ pm_runtime_put(psys_dev); ++ if (isys_pm_rval >= 0) ++ pm_runtime_put(isys_dev); ++ ++ return 0; ++} ++ ++static const struct file_operations ipu_traceconf_fops = { ++ .owner = THIS_MODULE, ++ .open = traceconf_open, ++ .release = traceconf_release, ++ .read = traceconf_read, ++ .write = traceconf_write, ++ .llseek = noop_llseek, ++}; ++ ++static void wptraceconf_dump(struct ipu6_device *isp) ++{ ++ struct ipu_subsystem_wptrace_config *sys = &isp->trace->psys.wpt; ++ int i, rem_size; ++ char *out; ++ ++ sys->size_conf_dump = 0; ++ out = sys->conf_dump_buffer; ++ rem_size = TRACE_CONF_DUMP_BUFFER_SIZE; ++ ++ for (i = 0; i < sys->fill_level && rem_size > 0; i++) { ++ int bytes_print; ++ int n = snprintf(out, rem_size, "0x%08x = 0x%08x\n", ++ sys->config[i].reg, ++ sys->config[i].value); ++ ++ bytes_print = min(n, rem_size - 1); ++ rem_size -= bytes_print; ++ out += bytes_print; ++ } ++ sys->size_conf_dump = out - sys->conf_dump_buffer; ++} ++ ++static int wptraceconf_open(struct inode *inode, struct file *file) ++{ ++ int ret; ++ struct ipu6_device *isp; ++ ++ if (!inode->i_private) ++ return -EACCES; ++ ++ isp = inode->i_private; ++ ret = mutex_trylock(&isp->trace->lock); ++ if (!ret) ++ return -EBUSY; ++ ++ if (isp->trace->psys.wpt.open) { ++ mutex_unlock(&isp->trace->lock); ++ return -EBUSY; ++ } ++ ++ file->private_data = isp; ++ if (file->f_mode & FMODE_WRITE) { ++ /* TBD: Allocate temp buffer for processing. ++ * Push validated buffer to active config ++ */ ++ /* Forget old config if opened for write */ ++ isp->trace->psys.wpt.fill_level = 0; ++ } ++ ++ if (file->f_mode & FMODE_READ) { ++ isp->trace->psys.wpt.conf_dump_buffer = ++ vzalloc(TRACE_CONF_DUMP_BUFFER_SIZE); ++ if (!isp->trace->psys.wpt.conf_dump_buffer) { ++ mutex_unlock(&isp->trace->lock); ++ return -ENOMEM; ++ } ++ wptraceconf_dump(isp); ++ } ++ mutex_unlock(&isp->trace->lock); ++ return 0; ++} ++ ++static ssize_t wptraceconf_read(struct file *file, char __user *buf, ++ size_t len, loff_t *ppos) ++{ ++ struct ipu6_device *isp = file->private_data; ++ ++ return simple_read_from_buffer(buf, len, ppos, ++ isp->trace->psys.wpt.conf_dump_buffer, ++ isp->trace->psys.wpt.size_conf_dump); ++} ++ ++static ssize_t wptraceconf_write(struct file *file, const char __user *buf, ++ size_t len, loff_t *ppos) ++{ ++ int i; ++ struct ipu6_device *isp = file->private_data; ++ ssize_t bytes = 0; ++ char *wpt_info_buffer = NULL; ++ size_t buffer_size = 0; ++ u32 wp_node_number = 0; ++ struct config_value *wpt_buffer = NULL; ++ struct ipu_subsystem_wptrace_config *wpt = &isp->trace->psys.wpt; ++ ++ if ((*ppos < 0) || (len > WPT_TRACE_CONF_DATA_MAX_LEN) || ++ (len < sizeof(wp_node_number))) { ++ dev_info(&isp->pdev->dev, ++ "length is error, len:%ld, loff:%lld\n", ++ len, *ppos); ++ return -EINVAL; ++ } ++ ++ wpt_info_buffer = vzalloc(len); ++ if (!wpt_info_buffer) ++ return -ENOMEM; ++ ++ bytes = copy_from_user(wpt_info_buffer, buf, len); ++ if (bytes != 0) { ++ vfree(wpt_info_buffer); ++ return -EFAULT; ++ } ++ ++ memcpy(&wp_node_number, wpt_info_buffer, sizeof(u32)); ++ buffer_size = wp_node_number * sizeof(struct config_value); ++ if ((buffer_size + sizeof(wp_node_number)) != len) { ++ dev_info(&isp->pdev->dev, ++ "File size is not right, len:%ld, buffer_size:%zu\n", ++ len, buffer_size); ++ vfree(wpt_info_buffer); ++ return -EFAULT; ++ } ++ ++ mutex_lock(&isp->trace->lock); ++ wpt_buffer = (struct config_value *)(wpt_info_buffer + sizeof(u32)); ++ for (i = 0; i < wp_node_number; i++) { ++ if (wpt->fill_level < MAX_TRACE_REGISTERS) { ++ wpt->config[wpt->fill_level].reg = wpt_buffer[i].reg; ++ wpt->config[wpt->fill_level].value = ++ wpt_buffer[i].value; ++ wpt->fill_level++; ++ } else { ++ dev_info(&isp->pdev->dev, ++ "Address 0x%08x ignored as invalid register\n", ++ wpt_buffer[i].reg); ++ break; ++ } ++ } ++ mutex_unlock(&isp->trace->lock); ++ vfree(wpt_info_buffer); ++ ++ return len; ++} ++ ++static int wptraceconf_release(struct inode *inode, struct file *file) ++{ ++ struct ipu6_device *isp = file->private_data; ++ ++ mutex_lock(&isp->trace->lock); ++ isp->trace->open = 0; ++ vfree(isp->trace->psys.wpt.conf_dump_buffer); ++ isp->trace->psys.wpt.conf_dump_buffer = NULL; ++ mutex_unlock(&isp->trace->lock); ++ ++ return 0; ++} ++ ++static const struct file_operations ipu_wptraceconf_fops = { ++ .owner = THIS_MODULE, ++ .open = wptraceconf_open, ++ .release = wptraceconf_release, ++ .read = wptraceconf_read, ++ .write = wptraceconf_write, ++ .llseek = noop_llseek, ++}; ++ ++static int gettrace_open(struct inode *inode, struct file *file) ++{ ++ struct ipu_subsystem_trace_config *sys = inode->i_private; ++ ++ if (!sys) ++ return -EACCES; ++ ++ if (!sys->memory.memory_buffer) ++ return -EACCES; ++ ++ dma_sync_single_for_cpu(sys->dev, ++ sys->memory.dma_handle, ++ MEMORY_RING_BUFFER_SIZE + ++ MEMORY_RING_BUFFER_GUARD, DMA_FROM_DEVICE); ++ ++ file->private_data = sys; ++ return 0; ++}; ++ ++static ssize_t gettrace_read(struct file *file, char __user *buf, ++ size_t len, loff_t *ppos) ++{ ++ struct ipu_subsystem_trace_config *sys = file->private_data; ++ ++ return simple_read_from_buffer(buf, len, ppos, ++ sys->memory.memory_buffer, ++ MEMORY_RING_BUFFER_SIZE + ++ MEMORY_RING_BUFFER_OVERREAD); ++} ++ ++static ssize_t gettrace_write(struct file *file, const char __user *buf, ++ size_t len, loff_t *ppos) ++{ ++ struct ipu_subsystem_trace_config *sys = file->private_data; ++ static const char str[] = "clear"; ++ char buffer[sizeof(str)] = { 0 }; ++ ssize_t ret; ++ ++ ret = simple_write_to_buffer(buffer, sizeof(buffer), ppos, buf, len); ++ if (ret < 0) ++ return ret; ++ ++ if (ret < sizeof(str) - 1) ++ return -EINVAL; ++ ++ if (!strncmp(str, buffer, sizeof(str) - 1)) { ++ clear_trace_buffer(sys); ++ return len; ++ } ++ ++ return -EINVAL; ++} ++ ++static int gettrace_release(struct inode *inode, struct file *file) ++{ ++ return 0; ++} ++ ++static const struct file_operations ipu_gettrace_fops = { ++ .owner = THIS_MODULE, ++ .open = gettrace_open, ++ .release = gettrace_release, ++ .read = gettrace_read, ++ .write = gettrace_write, ++ .llseek = noop_llseek, ++}; ++ ++int ipu_trace_init(struct ipu6_device *isp, void __iomem *base, ++ struct device *dev, struct ipu_trace_block *blocks) ++{ ++ struct ipu6_bus_device *adev = to_ipu6_bus_device(dev); ++ struct ipu_trace *trace = isp->trace; ++ struct ipu_subsystem_trace_config *sys; ++ int ret = 0; ++ ++ if (!isp->trace) ++ return 0; ++ ++ mutex_lock(&isp->trace->lock); ++ ++ if (dev == &isp->isys->auxdev.dev) { ++ sys = &trace->isys; ++ } else if (dev == &isp->psys->auxdev.dev) { ++ sys = &trace->psys; ++ } else { ++ ret = -EINVAL; ++ goto leave; ++ } ++ ++ adev->trace_cfg = sys; ++ sys->dev = dev; ++ sys->offset = base - isp->base; /* sub system offset */ ++ sys->base = base; ++ sys->blocks = blocks; ++ ++ sys->memory.memory_buffer = ++ ipu6_dma_alloc(adev, MEMORY_RING_BUFFER_SIZE + ++ MEMORY_RING_BUFFER_GUARD, ++ &sys->memory.dma_handle, ++ GFP_KERNEL, 0); ++ ++ if (!sys->memory.memory_buffer) ++ dev_err(dev, "failed alloc memory for tracing.\n"); ++ ++leave: ++ mutex_unlock(&isp->trace->lock); ++ ++ return ret; ++} ++EXPORT_SYMBOL_GPL(ipu_trace_init); ++ ++void ipu_trace_uninit(struct device *dev) ++{ ++ struct ipu6_bus_device *adev = to_ipu6_bus_device(dev); ++ struct ipu6_device *isp = adev->isp; ++ struct ipu_trace *trace = isp->trace; ++ struct ipu_subsystem_trace_config *sys = adev->trace_cfg; ++ ++ if (!trace || !sys) ++ return; ++ ++ mutex_lock(&trace->lock); ++ ++ if (sys->memory.memory_buffer) ++ ipu6_dma_free( ++ adev, ++ MEMORY_RING_BUFFER_SIZE + MEMORY_RING_BUFFER_GUARD, ++ sys->memory.memory_buffer, sys->memory.dma_handle, 0); ++ ++ sys->dev = NULL; ++ sys->memory.memory_buffer = NULL; ++ ++ mutex_unlock(&trace->lock); ++} ++EXPORT_SYMBOL_GPL(ipu_trace_uninit); ++ ++int ipu_trace_debugfs_add(struct ipu6_device *isp, struct dentry *dir) ++{ ++ struct dentry *files[4]; ++ int i = 0; ++ ++ if (!ipu_trace_enable) ++ return 0; ++ ++ files[i] = debugfs_create_file("traceconf", 0644, ++ dir, isp, &ipu_traceconf_fops); ++ if (!files[i]) ++ return -ENOMEM; ++ i++; ++ ++ files[i] = debugfs_create_file("wptraceconf", 0644, ++ dir, isp, &ipu_wptraceconf_fops); ++ if (!files[i]) ++ goto error; ++ i++; ++ ++ files[i] = debugfs_create_file("getisystrace", 0444, ++ dir, ++ &isp->trace->isys, &ipu_gettrace_fops); ++ ++ if (!files[i]) ++ goto error; ++ i++; ++ ++ files[i] = debugfs_create_file("getpsystrace", 0444, ++ dir, ++ &isp->trace->psys, &ipu_gettrace_fops); ++ if (!files[i]) ++ goto error; ++ ++ return 0; ++ ++error: ++ for (; i > 0; i--) ++ debugfs_remove(files[i - 1]); ++ return -ENOMEM; ++} ++ ++int ipu_trace_add(struct ipu6_device *isp) ++{ ++ if (!ipu_trace_enable) ++ return 0; ++ ++ isp->trace = devm_kzalloc(&isp->pdev->dev, ++ sizeof(struct ipu_trace), GFP_KERNEL); ++ if (!isp->trace) ++ return -ENOMEM; ++ ++ mutex_init(&isp->trace->lock); ++ ++ dev_dbg(&isp->pdev->dev, "ipu trace enabled!"); ++ ++ return 0; ++} ++ ++void ipu_trace_release(struct ipu6_device *isp) ++{ ++ if (!isp->trace) ++ return; ++ mutex_destroy(&isp->trace->lock); ++} ++ ++int ipu_trace_buffer_dma_handle(struct device *dev, dma_addr_t *dma_handle) ++{ ++ struct ipu6_bus_device *adev = to_ipu6_bus_device(dev); ++ struct ipu_subsystem_trace_config *sys = adev->trace_cfg; ++ ++ if (!ipu_trace_enable) ++ return -EACCES; ++ ++ if (!sys || !sys->memory.memory_buffer) ++ return -EACCES; ++ ++ *dma_handle = sys->memory.dma_handle; ++ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(ipu_trace_buffer_dma_handle); ++ ++MODULE_AUTHOR("Samu Onkalo "); ++MODULE_LICENSE("GPL"); ++MODULE_DESCRIPTION("Intel ipu trace support"); +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-trace.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-trace.h +new file mode 100644 +index 0000000..f66d889 +--- /dev/null ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-trace.h +@@ -0,0 +1,147 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* Copyright (C) 2014 - 2025 Intel Corporation */ ++ ++#ifndef IPU_TRACE_H ++#define IPU_TRACE_H ++#include ++ ++/* Trace unit register offsets */ ++#define TRACE_REG_TUN_DDR_ENABLE 0x000 ++#define TRACE_REG_TUN_NPK_ENABLE 0x004 ++#define TRACE_REG_TUN_DDR_INFO_VAL 0x008 ++#define TRACE_REG_TUN_NPK_ADDR 0x00C ++#define TRACE_REG_TUN_DRAM_BASE_ADDR 0x010 ++#define TRACE_REG_TUN_DRAM_END_ADDR 0x014 ++#define TRACE_REG_TUN_LOCAL_TIMER0 0x018 ++#define TRACE_REG_TUN_LOCAL_TIMER1 0x01C ++#define TRACE_REG_TUN_WR_PTR 0x020 ++#define TRACE_REG_TUN_RD_PTR 0x024 ++ ++/* ++ * Following registers are left out on purpose: ++ * TUN_LOCAL_TIMER0, TUN_LOCAL_TIMER1, TUN_DRAM_BASE_ADDR ++ * TUN_DRAM_END_ADDR, TUN_WR_PTR, TUN_RD_PTR ++ */ ++ ++/* Trace monitor register offsets */ ++#define TRACE_REG_TM_TRACE_ADDR_A 0x0900 ++#define TRACE_REG_TM_TRACE_ADDR_B 0x0904 ++#define TRACE_REG_TM_TRACE_ADDR_C 0x0908 ++#define TRACE_REG_TM_TRACE_ADDR_D 0x090c ++#define TRACE_REG_TM_TRACE_ENABLE_NPK 0x0910 ++#define TRACE_REG_TM_TRACE_ENABLE_DDR 0x0914 ++#define TRACE_REG_TM_TRACE_PER_PC 0x0918 ++#define TRACE_REG_TM_TRACE_PER_BRANCH 0x091c ++#define TRACE_REG_TM_TRACE_HEADER 0x0920 ++#define TRACE_REG_TM_TRACE_CFG 0x0924 ++#define TRACE_REG_TM_TRACE_LOST_PACKETS 0x0928 ++#define TRACE_REG_TM_TRACE_LP_CLEAR 0x092c ++#define TRACE_REG_TM_TRACE_LMRUN_MASK 0x0930 ++#define TRACE_REG_TM_TRACE_LMRUN_PC_LOW 0x0934 ++#define TRACE_REG_TM_TRACE_LMRUN_PC_HIGH 0x0938 ++#define TRACE_REG_TM_TRACE_MMIO_SEL 0x093c ++#define TRACE_REG_TM_TRACE_MMIO_WP0_LOW 0x0940 ++#define TRACE_REG_TM_TRACE_MMIO_WP1_LOW 0x0944 ++#define TRACE_REG_TM_TRACE_MMIO_WP2_LOW 0x0948 ++#define TRACE_REG_TM_TRACE_MMIO_WP3_LOW 0x094c ++#define TRACE_REG_TM_TRACE_MMIO_WP0_HIGH 0x0950 ++#define TRACE_REG_TM_TRACE_MMIO_WP1_HIGH 0x0954 ++#define TRACE_REG_TM_TRACE_MMIO_WP2_HIGH 0x0958 ++#define TRACE_REG_TM_TRACE_MMIO_WP3_HIGH 0x095c ++#define TRACE_REG_TM_FWTRACE_FIRST 0x0A00 ++#define TRACE_REG_TM_FWTRACE_MIDDLE 0x0A04 ++#define TRACE_REG_TM_FWTRACE_LAST 0x0A08 ++ ++/* ++ * Following exists only in (I)SP address space: ++ * TM_FWTRACE_FIRST, TM_FWTRACE_MIDDLE, TM_FWTRACE_LAST ++ */ ++ ++#define TRACE_REG_GPC_RESET 0x000 ++#define TRACE_REG_GPC_OVERALL_ENABLE 0x004 ++#define TRACE_REG_GPC_TRACE_HEADER 0x008 ++#define TRACE_REG_GPC_TRACE_ADDRESS 0x00C ++#define TRACE_REG_GPC_TRACE_NPK_EN 0x010 ++#define TRACE_REG_GPC_TRACE_DDR_EN 0x014 ++#define TRACE_REG_GPC_TRACE_LPKT_CLEAR 0x018 ++#define TRACE_REG_GPC_TRACE_LPKT 0x01C ++ ++#define TRACE_REG_GPC_ENABLE_ID0 0x020 ++#define TRACE_REG_GPC_ENABLE_ID1 0x024 ++#define TRACE_REG_GPC_ENABLE_ID2 0x028 ++#define TRACE_REG_GPC_ENABLE_ID3 0x02c ++ ++#define TRACE_REG_GPC_VALUE_ID0 0x030 ++#define TRACE_REG_GPC_VALUE_ID1 0x034 ++#define TRACE_REG_GPC_VALUE_ID2 0x038 ++#define TRACE_REG_GPC_VALUE_ID3 0x03c ++ ++#define TRACE_REG_GPC_CNT_INPUT_SELECT_ID0 0x040 ++#define TRACE_REG_GPC_CNT_INPUT_SELECT_ID1 0x044 ++#define TRACE_REG_GPC_CNT_INPUT_SELECT_ID2 0x048 ++#define TRACE_REG_GPC_CNT_INPUT_SELECT_ID3 0x04c ++ ++#define TRACE_REG_GPC_CNT_START_SELECT_ID0 0x050 ++#define TRACE_REG_GPC_CNT_START_SELECT_ID1 0x054 ++#define TRACE_REG_GPC_CNT_START_SELECT_ID2 0x058 ++#define TRACE_REG_GPC_CNT_START_SELECT_ID3 0x05c ++ ++#define TRACE_REG_GPC_CNT_STOP_SELECT_ID0 0x060 ++#define TRACE_REG_GPC_CNT_STOP_SELECT_ID1 0x064 ++#define TRACE_REG_GPC_CNT_STOP_SELECT_ID2 0x068 ++#define TRACE_REG_GPC_CNT_STOP_SELECT_ID3 0x06c ++ ++#define TRACE_REG_GPC_CNT_MSG_SELECT_ID0 0x070 ++#define TRACE_REG_GPC_CNT_MSG_SELECT_ID1 0x074 ++#define TRACE_REG_GPC_CNT_MSG_SELECT_ID2 0x078 ++#define TRACE_REG_GPC_CNT_MSG_SELECT_ID3 0x07c ++ ++#define TRACE_REG_GPC_CNT_MSG_PLOAD_SELECT_ID0 0x080 ++#define TRACE_REG_GPC_CNT_MSG_PLOAD_SELECT_ID1 0x084 ++#define TRACE_REG_GPC_CNT_MSG_PLOAD_SELECT_ID2 0x088 ++#define TRACE_REG_GPC_CNT_MSG_PLOAD_SELECT_ID3 0x08c ++ ++#define TRACE_REG_GPC_IRQ_TRIGGER_VALUE_ID0 0x090 ++#define TRACE_REG_GPC_IRQ_TRIGGER_VALUE_ID1 0x094 ++#define TRACE_REG_GPC_IRQ_TRIGGER_VALUE_ID2 0x098 ++#define TRACE_REG_GPC_IRQ_TRIGGER_VALUE_ID3 0x09c ++ ++#define TRACE_REG_GPC_IRQ_TIMER_SELECT_ID0 0x0a0 ++#define TRACE_REG_GPC_IRQ_TIMER_SELECT_ID1 0x0a4 ++#define TRACE_REG_GPC_IRQ_TIMER_SELECT_ID2 0x0a8 ++#define TRACE_REG_GPC_IRQ_TIMER_SELECT_ID3 0x0ac ++ ++#define TRACE_REG_GPC_IRQ_ENABLE_ID0 0x0b0 ++#define TRACE_REG_GPC_IRQ_ENABLE_ID1 0x0b4 ++#define TRACE_REG_GPC_IRQ_ENABLE_ID2 0x0b8 ++#define TRACE_REG_GPC_IRQ_ENABLE_ID3 0x0bc ++ ++struct ipu_trace; ++struct ipu_subsystem_trace_config; ++ ++enum ipu_trace_block_type { ++ IPU_TRACE_BLOCK_TUN = 0, /* Trace unit */ ++ IPU_TRACE_BLOCK_TM, /* Trace monitor */ ++ IPU_TRACE_BLOCK_GPC, /* General purpose control */ ++ IPU_TRACE_CSI2, /* CSI2 legacy receiver */ ++ IPU_TRACE_CSI2_3PH, /* CSI2 combo receiver */ ++ IPU_TRACE_SIG2CIOS, ++ IPU_TRACE_TIMER_RST, /* Trace reset control timer */ ++ IPU_TRACE_BLOCK_END /* End of list */ ++}; ++ ++struct ipu_trace_block { ++ u32 offset; /* Offset to block inside subsystem */ ++ enum ipu_trace_block_type type; ++}; ++ ++int ipu_trace_add(struct ipu6_device *isp); ++int ipu_trace_debugfs_add(struct ipu6_device *isp, struct dentry *dir); ++void ipu_trace_release(struct ipu6_device *isp); ++int ipu_trace_init(struct ipu6_device *isp, void __iomem *base, ++ struct device *dev, struct ipu_trace_block *blocks); ++void ipu_trace_restore(struct device *dev); ++void ipu_trace_uninit(struct device *dev); ++void ipu_trace_stop(struct device *dev); ++int ipu_trace_buffer_dma_handle(struct device *dev, dma_addr_t *dma_handle); ++#endif +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c +index 0311fe7..3a00c9b 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c +@@ -32,6 +32,7 @@ + #include "ipu6-platform-buttress-regs.h" + #include "ipu6-platform-isys-csi2-reg.h" + #include "ipu6-platform-regs.h" ++#include "ipu6-trace.h" + + static unsigned int isys_freq_override; + module_param(isys_freq_override, uint, 0660); +@@ -490,6 +491,37 @@ static int ipu6_pci_config_setup(struct pci_dev *dev, u8 hw_ver) + return 0; + } + ++#ifdef CONFIG_DEBUG_FS ++static int ipu_init_debugfs(struct ipu6_device *isp) ++{ ++ struct dentry *dir; ++ ++ dir = debugfs_create_dir(IPU6_NAME, NULL); ++ if (!dir) ++ return -ENOMEM; ++ ++ if (ipu_trace_debugfs_add(isp, dir)) ++ goto err; ++ ++ isp->ipu_dir = dir; ++ ++ return 0; ++err: ++ debugfs_remove_recursive(dir); ++ return -ENOMEM; ++} ++ ++static void ipu_remove_debugfs(struct ipu6_device *isp) ++{ ++ /* ++ * Since isys and psys debugfs dir will be created under ipu root dir, ++ * mark its dentry to NULL to avoid duplicate removal. ++ */ ++ debugfs_remove_recursive(isp->ipu_dir); ++ isp->ipu_dir = NULL; ++} ++#endif /* CONFIG_DEBUG_FS */ ++ + static void ipu6_configure_vc_mechanism(struct ipu6_device *isp) + { + u32 val = readl(isp->base + BUTTRESS_REG_BTRS_CTRL); +@@ -598,6 +630,10 @@ static int ipu6_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) + goto buttress_exit; + } + ++ ret = ipu_trace_add(isp); ++ if (ret) ++ dev_err(&isp->pdev->dev, "Trace support not available\n"); ++ + ret = ipu6_cpd_validate_cpd_file(isp, isp->cpd_fw->data, + isp->cpd_fw->size); + if (ret) { +@@ -677,6 +713,13 @@ static int ipu6_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) + ipu6_mmu_hw_cleanup(isp->psys->mmu); + pm_runtime_put(&isp->psys->auxdev.dev); + ++#ifdef CONFIG_DEBUG_FS ++ ret = ipu_init_debugfs(isp); ++ if (ret) ++ dev_err(&isp->pdev->dev, "Failed to initialize debugfs"); ++ ++#endif ++ + /* Configure the arbitration mechanisms for VC requests */ + ipu6_configure_vc_mechanism(isp); + +@@ -718,6 +761,11 @@ static void ipu6_pci_remove(struct pci_dev *pdev) + struct ipu6_mmu *isys_mmu = isp->isys->mmu; + struct ipu6_mmu *psys_mmu = isp->psys->mmu; + ++#ifdef CONFIG_DEBUG_FS ++ ipu_remove_debugfs(isp); ++#endif ++ ipu_trace_release(isp); ++ + devm_free_irq(&pdev->dev, pdev->irq, isp); + ipu6_cpd_free_pkg_dir(isp->psys); + +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.h +index 92e3c34..d91a78e 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.h ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.h +@@ -82,10 +82,15 @@ struct ipu6_device { + u32 cpd_metadata_cmpnt_size; + + void __iomem *base; ++#ifdef CONFIG_DEBUG_FS ++ struct dentry *ipu_dir; ++#endif + bool need_ipc_reset; + bool secure_mode; + u8 hw_ver; + bool bus_ready_to_probe; ++ ++ struct ipu_trace *trace; + }; + + #define IPU6_ISYS_NAME "isys" +-- +2.43.0 + diff --git a/patches/0058-media-pci-The-order-of-return-buffers-should-be-FIFO.patch b/patches/0058-media-pci-The-order-of-return-buffers-should-be-FIFO.patch new file mode 100644 index 0000000..332535e --- /dev/null +++ b/patches/0058-media-pci-The-order-of-return-buffers-should-be-FIFO.patch @@ -0,0 +1,100 @@ +From 24fc44aeb136b4f57622c613efec534ec67e3013 Mon Sep 17 00:00:00 2001 +From: zouxiaoh +Date: Fri, 16 Jan 2026 00:44:19 -0700 +Subject: [PATCH 58/76] media: pci: The order of return buffers should be FIFO. +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Change Description: +return_buffers : use "list_first_entry", will get these logs in hal: +“CamHAL[ERR] DeviceBase: dequeueBuffer, CamBuf index isn't same with +index used by kernel +CamHAL[ERR] CaptureUnit: Device:Generic grab frame failed:-22” + +The index order is changed from sequential to reverse after return_buffers. +The reason why the normal start&stop does not expose the problem is that +every Hal start will start the buffer index from 0 instead of continuing +to use the buffer index returned last stop. + +So need return_buffers from the list of tail, +and need use the "list_last_entry". + +Signed-off-by: linya14x +Signed-off-by: zouxiaoh +--- + .../media/pci/intel/ipu6/ipu6-isys-queue.c | 31 ++++++++++--------- + 1 file changed, 16 insertions(+), 15 deletions(-) + +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c +index aaad622..4839277 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c +@@ -519,39 +519,40 @@ static void return_buffers(struct ipu6_isys_queue *aq, + unsigned long flags; + + spin_lock_irqsave(&aq->lock, flags); +- while (!list_empty(&aq->incoming)) { ++ ++ /* ++ * Something went wrong (FW crash / HW hang / not all buffers ++ * returned from isys) if there are still buffers queued in active ++ * queue. We have to clean up places a bit. ++ */ ++ while (!list_empty(&aq->active)) { + struct vb2_buffer *vb; + +- ib = list_first_entry(&aq->incoming, struct ipu6_isys_buffer, +- head); ++ ib = list_last_entry(&aq->active, struct ipu6_isys_buffer, ++ head); + vb = ipu6_isys_buffer_to_vb2_buffer(ib); ++ + list_del(&ib->head); + spin_unlock_irqrestore(&aq->lock, flags); + + vb2_buffer_done(vb, state); + + spin_lock_irqsave(&aq->lock, flags); ++ need_reset = true; + } + +- /* +- * Something went wrong (FW crash / HW hang / not all buffers +- * returned from isys) if there are still buffers queued in active +- * queue. We have to clean up places a bit. +- */ +- while (!list_empty(&aq->active)) { ++ while (!list_empty(&aq->incoming)) { + struct vb2_buffer *vb; + +- ib = list_first_entry(&aq->active, struct ipu6_isys_buffer, +- head); ++ ib = list_last_entry(&aq->incoming, struct ipu6_isys_buffer, ++ head); + vb = ipu6_isys_buffer_to_vb2_buffer(ib); +- + list_del(&ib->head); + spin_unlock_irqrestore(&aq->lock, flags); + + vb2_buffer_done(vb, state); + + spin_lock_irqsave(&aq->lock, flags); +- need_reset = true; + } + + spin_unlock_irqrestore(&aq->lock, flags); +@@ -699,8 +700,8 @@ static int reset_start_streaming(struct ipu6_isys_video *av) + + spin_lock_irqsave(&aq->lock, flags); + while (!list_empty(&aq->active)) { +- struct ipu6_isys_buffer *ib = list_first_entry(&aq->active, +- struct ipu6_isys_buffer, head); ++ struct ipu6_isys_buffer *ib = list_last_entry(&aq->active, ++ struct ipu6_isys_buffer, head); + + list_del(&ib->head); + list_add_tail(&ib->head, &aq->incoming); +-- +2.43.0 + diff --git a/patches/0059-media-pci-Modify-enble-disable-stream-in-CSI2.patch b/patches/0059-media-pci-Modify-enble-disable-stream-in-CSI2.patch new file mode 100644 index 0000000..aae618c --- /dev/null +++ b/patches/0059-media-pci-Modify-enble-disable-stream-in-CSI2.patch @@ -0,0 +1,338 @@ +From 9da221ff3a32069ba04dd0df4785ce8a0a4002bb Mon Sep 17 00:00:00 2001 +From: Hongju Wang +Date: Fri, 16 Jan 2026 00:47:04 -0700 +Subject: [PATCH 59/76] media: pci: Modify enble/disable stream in CSI2 + +Signed-off-by: Hongju Wang +Signed-off-by: zouxiaoh +--- + .../media/pci/intel/ipu6/ipu6-isys-csi2.c | 105 ++++++++++++------ + .../media/pci/intel/ipu6/ipu6-isys-csi2.h | 7 +- + .../media/pci/intel/ipu6/ipu6-isys-video.c | 78 +++++++------ + 3 files changed, 118 insertions(+), 72 deletions(-) + +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c +index d1fece6..1e4e70b 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c +@@ -339,6 +339,12 @@ static int ipu6_isys_csi2_set_stream(struct v4l2_subdev *sd, + return ret; + } + ++/* ++ * Maximum stream ID is 63 for now, as we use u64 bitmask to represent a set ++ * of streams. ++ */ ++#define CSI2_SUBDEV_MAX_STREAM_ID 63 ++ + static int ipu6_isys_csi2_enable_streams(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + u32 pad, u64 streams_mask) +@@ -346,54 +352,81 @@ static int ipu6_isys_csi2_enable_streams(struct v4l2_subdev *sd, + struct ipu6_isys_subdev *asd = to_ipu6_isys_subdev(sd); + struct ipu6_isys_csi2 *csi2 = to_ipu6_isys_csi2(asd); + struct ipu6_isys_csi2_timing timing = { }; +- struct v4l2_subdev *remote_sd; +- struct media_pad *remote_pad; +- u64 sink_streams; +- int ret; +- +- remote_pad = media_pad_remote_pad_first(&sd->entity.pads[CSI2_PAD_SINK]); +- remote_sd = media_entity_to_v4l2_subdev(remote_pad->entity); ++ u32 sink_pad, sink_stream; ++ struct v4l2_subdev *r_sd; ++ struct media_pad *r_pad; ++ int ret, i; ++ ++ if (!csi2->stream_count) { ++ ret = ipu6_isys_csi2_calc_timing(csi2, &timing, CSI2_ACCINV); ++ if (ret) ++ return ret; ++ ++ ret = ipu6_isys_csi2_set_stream(sd, &timing, csi2->nlanes, ++ true); ++ if (ret) ++ return ret; ++ } + +- sink_streams = +- v4l2_subdev_state_xlate_streams(state, pad, CSI2_PAD_SINK, +- &streams_mask); ++ for (i = 0; i <= CSI2_SUBDEV_MAX_STREAM_ID; i++) { ++ if (streams_mask & BIT_ULL(i)) ++ break; ++ } + +- ret = ipu6_isys_csi2_calc_timing(csi2, &timing, CSI2_ACCINV); ++ ret = v4l2_subdev_routing_find_opposite_end(&state->routing, pad, i, ++ &sink_pad, &sink_stream); + if (ret) + return ret; + +- ret = ipu6_isys_csi2_set_stream(sd, &timing, csi2->nlanes, true); +- if (ret) +- return ret; ++ r_pad = media_pad_remote_pad_first(&sd->entity.pads[CSI2_PAD_SINK]); ++ r_sd = media_entity_to_v4l2_subdev(r_pad->entity); + +- ret = v4l2_subdev_enable_streams(remote_sd, remote_pad->index, +- sink_streams); +- if (ret) { +- ipu6_isys_csi2_set_stream(sd, NULL, 0, false); +- return ret; ++ ret = v4l2_subdev_enable_streams(r_sd, r_pad->index, ++ BIT_ULL(sink_stream)); ++ if (!ret) { ++ csi2->stream_count++; ++ return 0; + } + +- return 0; ++ if (!csi2->stream_count) ++ ipu6_isys_csi2_set_stream(sd, NULL, 0, false); ++ ++ return ret; + } + + static int ipu6_isys_csi2_disable_streams(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + u32 pad, u64 streams_mask) + { +- struct v4l2_subdev *remote_sd; +- struct media_pad *remote_pad; +- u64 sink_streams; ++ struct ipu6_isys_subdev *asd = to_ipu6_isys_subdev(sd); ++ struct ipu6_isys_csi2 *csi2 = to_ipu6_isys_csi2(asd); ++ u32 sink_pad, sink_stream; ++ struct v4l2_subdev *r_sd; ++ struct media_pad *r_pad; ++ int ret, i; + +- sink_streams = +- v4l2_subdev_state_xlate_streams(state, pad, CSI2_PAD_SINK, +- &streams_mask); ++ for (i = 0; i <= CSI2_SUBDEV_MAX_STREAM_ID; i++) { ++ if (streams_mask & BIT_ULL(i)) ++ break; ++ } + +- remote_pad = media_pad_remote_pad_first(&sd->entity.pads[CSI2_PAD_SINK]); +- remote_sd = media_entity_to_v4l2_subdev(remote_pad->entity); ++ ret = v4l2_subdev_routing_find_opposite_end(&state->routing, pad, i, ++ &sink_pad, &sink_stream); ++ if (ret) ++ return ret; + +- ipu6_isys_csi2_set_stream(sd, NULL, 0, false); ++ r_pad = media_pad_remote_pad_first(&sd->entity.pads[CSI2_PAD_SINK]); ++ r_sd = media_entity_to_v4l2_subdev(r_pad->entity); + +- v4l2_subdev_disable_streams(remote_sd, remote_pad->index, sink_streams); ++ v4l2_subdev_disable_streams(r_sd, r_pad->index, BIT_ULL(sink_stream)); ++ ++ if (--csi2->stream_count) ++ return 0; ++ ++ dev_dbg(&csi2->isys->adev->auxdev.dev, ++ "stream off CSI2-%u with %u lanes\n", csi2->port, csi2->nlanes); ++ ++ ipu6_isys_csi2_set_stream(sd, NULL, 0, false); + + return 0; + } +@@ -592,7 +625,8 @@ void ipu6_isys_csi2_eof_event_by_stream(struct ipu6_isys_stream *stream) + int ipu6_isys_csi2_get_remote_desc(u32 source_stream, + struct ipu6_isys_csi2 *csi2, + struct media_entity *source_entity, +- struct v4l2_mbus_frame_desc_entry *entry) ++ struct v4l2_mbus_frame_desc_entry *entry, ++ int *nr_queues) + { + struct v4l2_mbus_frame_desc_entry *desc_entry = NULL; + struct device *dev = &csi2->isys->adev->auxdev.dev; +@@ -639,5 +673,14 @@ int ipu6_isys_csi2_get_remote_desc(u32 source_stream, + + *entry = *desc_entry; + ++ for (i = 0; i < desc.num_entries; i++) { ++ if (desc_entry->bus.csi2.vc == desc.entry[i].bus.csi2.vc) ++ (*nr_queues)++; ++ } ++ ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++ csi2->is_multiple = true; ++ dev_dbg(dev, "set csi2->is_multiple is true.\n"); ++#endif + return 0; + } +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h +index ce8eed9..7d751e5 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h +@@ -43,6 +43,10 @@ struct ipu6_isys_csi2 { + u32 receiver_errors; + unsigned int nlanes; + unsigned int port; ++ unsigned int stream_count; ++#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET ++ bool is_multiple; ++#endif + }; + + struct ipu6_isys_csi2_timing { +@@ -73,6 +77,7 @@ void ipu6_isys_csi2_error(struct ipu6_isys_csi2 *csi2); + int ipu6_isys_csi2_get_remote_desc(u32 source_stream, + struct ipu6_isys_csi2 *csi2, + struct media_entity *source_entity, +- struct v4l2_mbus_frame_desc_entry *entry); ++ struct v4l2_mbus_frame_desc_entry *entry, ++ int *nr_queues); + + #endif /* IPU6_ISYS_CSI2_H */ +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c +index 148c790..b8cbfe2 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c +@@ -1035,35 +1035,40 @@ ipu6_isys_query_stream_by_source(struct ipu6_isys *isys, int source, u8 vc) + return stream; + } + +-static u64 get_stream_mask_by_pipeline(struct ipu6_isys_video *__av) ++static u32 get_remote_pad_stream(struct media_pad *r_pad) + { +- struct media_pipeline *pipeline = +- media_entity_pipeline(&__av->vdev.entity); ++ struct v4l2_subdev_state *state; ++ struct v4l2_subdev *sd; ++ u32 stream_id = 0; + unsigned int i; +- u64 stream_mask = 0; + +- for (i = 0; i < NR_OF_CSI2_SRC_PADS; i++) { +- struct ipu6_isys_video *av = &__av->csi2->av[i]; ++ sd = media_entity_to_v4l2_subdev(r_pad->entity); ++ state = v4l2_subdev_lock_and_get_active_state(sd); ++ if (!state) ++ return 0; + +- if (pipeline == media_entity_pipeline(&av->vdev.entity)) +- stream_mask |= BIT_ULL(av->source_stream); ++ for (i = 0; i < state->stream_configs.num_configs; i++) { ++ struct v4l2_subdev_stream_config *cfg = ++ &state->stream_configs.configs[i]; ++ if (cfg->pad == r_pad->index) { ++ stream_id = cfg->stream; ++ break; ++ } + } + +- return stream_mask; ++ v4l2_subdev_unlock_state(state); ++ ++ return stream_id; + } + + int ipu6_isys_video_set_streaming(struct ipu6_isys_video *av, int state, + struct ipu6_isys_buffer_list *bl) + { +- struct v4l2_subdev_krouting *routing; + struct ipu6_isys_stream *stream = av->stream; +- struct v4l2_subdev_state *subdev_state; + struct device *dev = &av->isys->adev->auxdev.dev; + struct v4l2_subdev *sd; + struct media_pad *r_pad; +- u32 sink_pad, sink_stream; +- u64 r_stream; +- u64 stream_mask = 0; ++ u32 r_stream = 0; + int ret = 0; + + dev_dbg(dev, "set stream: %d\n", state); +@@ -1073,31 +1078,22 @@ int ipu6_isys_video_set_streaming(struct ipu6_isys_video *av, int state, + + sd = &stream->asd->sd; + r_pad = media_pad_remote_pad_first(&av->pad); +- r_stream = ipu6_isys_get_src_stream_by_src_pad(sd, r_pad->index); +- +- subdev_state = v4l2_subdev_lock_and_get_active_state(sd); +- routing = &subdev_state->routing; +- ret = v4l2_subdev_routing_find_opposite_end(routing, r_pad->index, +- r_stream, &sink_pad, +- &sink_stream); +- v4l2_subdev_unlock_state(subdev_state); +- if (ret) +- return ret; +- +- stream_mask = get_stream_mask_by_pipeline(av); ++ r_stream = get_remote_pad_stream(r_pad); + if (!state) { + stop_streaming_firmware(av); + + /* stop sub-device which connects with video */ +- dev_dbg(dev, "stream off entity %s pad:%d mask:0x%llx\n", +- sd->name, r_pad->index, stream_mask); ++ dev_dbg(dev, "disable streams %s pad:%d mask:0x%llx for %s\n", ++ sd->name, r_pad->index, BIT_ULL(r_stream), ++ stream->source_entity->name); + ret = v4l2_subdev_disable_streams(sd, r_pad->index, +- stream_mask); ++ BIT_ULL(r_stream)); + if (ret) { +- dev_err(dev, "stream off %s failed with %d\n", sd->name, +- ret); ++ dev_err(dev, "disable streams %s failed with %d\n", ++ sd->name, ret); + return ret; + } ++ + close_streaming_firmware(av); + } else { + ret = start_stream_firmware(av, bl); +@@ -1107,12 +1103,14 @@ int ipu6_isys_video_set_streaming(struct ipu6_isys_video *av, int state, + } + + /* start sub-device which connects with video */ +- dev_dbg(dev, "stream on %s pad %d mask 0x%llx\n", sd->name, +- r_pad->index, stream_mask); +- ret = v4l2_subdev_enable_streams(sd, r_pad->index, stream_mask); ++ dev_dbg(dev, "enable streams %s pad: %d mask:0x%llx for %s\n", ++ sd->name, r_pad->index, BIT_ULL(r_stream), ++ stream->source_entity->name); ++ ret = v4l2_subdev_enable_streams(sd, r_pad->index, ++ BIT_ULL(r_stream)); + if (ret) { +- dev_err(dev, "stream on %s failed with %d\n", sd->name, +- ret); ++ dev_err(dev, "enable streams %s failed with %d\n", ++ sd->name, ret); + goto out_media_entity_stop_streaming_firmware; + } + } +@@ -1276,8 +1274,6 @@ int ipu6_isys_setup_video(struct ipu6_isys_video *av, + /* Find the root */ + state = v4l2_subdev_lock_and_get_active_state(remote_sd); + for_each_active_route(&state->routing, r) { +- (*nr_queues)++; +- + if (r->source_pad == remote_pad->index) + route = r; + } +@@ -1292,11 +1288,13 @@ int ipu6_isys_setup_video(struct ipu6_isys_video *av, + + ret = ipu6_isys_csi2_get_remote_desc(av->source_stream, + to_ipu6_isys_csi2(asd), +- *source_entity, &entry); ++ *source_entity, &entry, ++ nr_queues); + if (ret == -ENOIOCTLCMD) { + av->vc = 0; + av->dt = ipu6_isys_mbus_code_to_mipi(pfmt->code); +- } else if (!ret) { ++ *nr_queues = 1; ++ } else if (*nr_queues && !ret) { + dev_dbg(dev, "Framedesc: stream %u, len %u, vc %u, dt %#x\n", + entry.stream, entry.length, entry.bus.csi2.vc, + entry.bus.csi2.dt); +-- +2.43.0 + diff --git a/patches/0060-media-pci-Set-the-correct-SOF-for-different-stream.patch b/patches/0060-media-pci-Set-the-correct-SOF-for-different-stream.patch new file mode 100644 index 0000000..efcf7ab --- /dev/null +++ b/patches/0060-media-pci-Set-the-correct-SOF-for-different-stream.patch @@ -0,0 +1,29 @@ +From d60e72405ffdf29cf617e988700735ee9f5c5a24 Mon Sep 17 00:00:00 2001 +From: Hongju Wang +Date: Fri, 16 Jan 2026 00:49:26 -0700 +Subject: [PATCH 60/76] media: pci: Set the correct SOF for different stream + +[Changes] +Distinguish SOF according to VC for streams. + +Signed-off-by: Hongju Wang +Signed-off-by: zouxiaoh +--- + 6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c +index 1e4e70b..70a144e 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c +@@ -605,6 +605,7 @@ void ipu6_isys_csi2_sof_event_by_stream(struct ipu6_isys_stream *stream) + .type = V4L2_EVENT_FRAME_SYNC, + }; + ++ ev.id = stream->vc; + ev.u.frame_sync.frame_sequence = atomic_fetch_inc(&stream->sequence); + v4l2_event_queue(vdev, &ev); + +-- +2.43.0 + diff --git a/patches/0061-media-pci-support-imx390-for-6.11.0-rc3.patch b/patches/0061-media-pci-support-imx390-for-6.11.0-rc3.patch new file mode 100644 index 0000000..e3dff66 --- /dev/null +++ b/patches/0061-media-pci-support-imx390-for-6.11.0-rc3.patch @@ -0,0 +1,981 @@ +From 970cdb3c90921cec75d25a9e8433cb511e3cab1f Mon Sep 17 00:00:00 2001 +From: hepengpx +Date: Fri, 16 Jan 2026 00:56:23 -0700 +Subject: [PATCH 61/76] media: pci: support imx390 for 6.11.0-rc3. + +Signed-off-by: hepengpx +Signed-off-by: zouxiaoh +--- + .../media/pci/intel/ipu6/ipu6-buttress.c | 42 +++ + .../media/pci/intel/ipu6/ipu6-buttress.h | 1 + + .../media/pci/intel/ipu6/ipu6-isys-mcd-phy.c | 77 +++++ + .../drivers/media/pci/intel/ipu6/ipu6-isys.c | 324 +++++++++++++++++- + .../drivers/media/pci/intel/ipu6/ipu6-isys.h | 85 +++++ + 6.18.0/drivers/media/pci/intel/ipu6/ipu6.c | 123 ++++++- + 6.18.0/drivers/media/pci/intel/ipu6/ipu6.h | 15 +- + 7 files changed, 656 insertions(+), 11 deletions(-) + +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c +index 103386c..9523cc6 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c +@@ -21,6 +21,7 @@ + #include + #include + #include ++#include + + #include "ipu6.h" + #include "ipu6-bus.h" +@@ -774,6 +775,47 @@ int ipu6_buttress_start_tsc_sync(struct ipu6_device *isp) + } + EXPORT_SYMBOL_NS_GPL(ipu6_buttress_start_tsc_sync, "INTEL_IPU6"); + ++/* ++ * The dev_id was hard code in platform data, as i2c bus number ++ * may change dynamiclly, we need to update this bus id ++ * accordingly. ++ * ++ * @adapter_id: hardware i2c adapter id, this was fixed in platform data ++ * return: i2c bus id registered in system ++ */ ++int ipu6_get_i2c_bus_id(int adapter_id, char *adapter_bdf, int bdf_len) ++{ ++ struct i2c_adapter *adapter; ++ char name[32]; ++ int i = 0; ++ ++ if (adapter_bdf) { ++ while ((adapter = i2c_get_adapter(i)) != NULL) { ++ struct device *parent = adapter->dev.parent; ++ struct device *pp = parent->parent; ++ ++ if (pp && !strncmp(adapter_bdf, dev_name(pp), bdf_len)) ++ return i; ++ i++; ++ } ++ } ++ ++ i = 0; ++ snprintf(name, sizeof(name), "i2c_designware.%d", adapter_id); ++ while ((adapter = i2c_get_adapter(i)) != NULL) { ++ struct device *parent = adapter->dev.parent; ++ ++ if (parent && !strncmp(name, dev_name(parent), sizeof(name))) ++ return i; ++ i++; ++ } ++ ++ /* Not found, should never happen! */ ++ WARN_ON_ONCE(1); ++ return -1; ++} ++EXPORT_SYMBOL_NS_GPL(ipu6_get_i2c_bus_id, "INTEL_IPU6"); ++ + void ipu6_buttress_tsc_read(struct ipu6_device *isp, u64 *val) + { + u32 tsc_hi_1, tsc_hi_2, tsc_lo; +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h +index 51e5ad4..6fed522 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h +@@ -82,4 +82,5 @@ void ipu6_buttress_exit(struct ipu6_device *isp); + void ipu6_buttress_csi_port_config(struct ipu6_device *isp, + u32 legacy, u32 combo); + void ipu6_buttress_restore(struct ipu6_device *isp); ++int ipu6_get_i2c_bus_id(int adapter_id, char *adapter_bdf, int bdf_len); + #endif /* IPU6_BUTTRESS_H */ +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c +index 71aa500..f18e6fa 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c +@@ -567,6 +567,27 @@ static int ipu6_isys_mcd_phy_ready(struct ipu6_isys *isys, u8 id) + return ret; + } + ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++static int ipu6_isys_mcd_phy_common_init(struct ipu6_isys *isys, struct ipu6_isys_csi2_config *cfg) ++{ ++ unsigned int phy_id; ++ void __iomem *phy_base; ++ struct ipu6_bus_device *adev = to_ipu6_bus_device(&isys->adev->auxdev.dev); ++ struct ipu6_device *isp = adev->isp; ++ void __iomem *isp_base = isp->base; ++ unsigned int i; ++ ++ phy_id = cfg->port / 4; ++ phy_base = isp_base + IPU6_ISYS_MCD_PHY_BASE(phy_id); ++ ++ for (i = 0 ; i < ARRAY_SIZE(common_init_regs); i++) { ++ writel(common_init_regs[i].val, ++ phy_base + common_init_regs[i].reg); ++ } ++ ++ return 0; ++} ++#else + static void ipu6_isys_mcd_phy_common_init(struct ipu6_isys *isys) + { + struct ipu6_bus_device *adev = isys->adev; +@@ -588,6 +609,7 @@ static void ipu6_isys_mcd_phy_common_init(struct ipu6_isys *isys) + phy_base + common_init_regs[i].reg); + } + } ++#endif + + static int ipu6_isys_driver_port_to_phy_port(struct ipu6_isys_csi2_config *cfg) + { +@@ -619,6 +641,55 @@ static int ipu6_isys_driver_port_to_phy_port(struct ipu6_isys_csi2_config *cfg) + return ret; + } + ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++static int ipu6_isys_mcd_phy_config(struct ipu6_isys *isys, struct ipu6_isys_csi2_config *cfg) ++{ ++ unsigned int phy_port, phy_id; ++ void __iomem *phy_base; ++ struct ipu6_bus_device *adev = to_ipu6_bus_device(&isys->adev->auxdev.dev); ++ struct ipu6_device *isp = adev->isp; ++ void __iomem *isp_base = isp->base; ++ const struct phy_reg **phy_config_regs; ++ struct ipu6_isys_subdev_pdata *spdata = isys->pdata->spdata; ++ struct ipu6_isys_subdev_info **subdevs, *sd_info; ++ int i; ++ ++ if (!spdata) { ++ dev_err(&isys->adev->auxdev.dev, "no subdevice info provided\n"); ++ return -EINVAL; ++ } ++ ++ phy_id = cfg->port / 4; ++ phy_base = isp_base + IPU6_ISYS_MCD_PHY_BASE(phy_id); ++ for (subdevs = spdata->subdevs; *subdevs; subdevs++) { ++ sd_info = *subdevs; ++ if (!sd_info->csi2) ++ continue; ++ ++ phy_port = ipu6_isys_driver_port_to_phy_port(sd_info->csi2); ++ if (phy_port < 0) { ++ dev_err(&isys->adev->auxdev.dev, "invalid port %d for lane %d", ++ cfg->port, cfg->nlanes); ++ return -ENXIO; ++ } ++ ++ if ((sd_info->csi2->port / 4) != phy_id) ++ continue; ++ ++ dev_dbg(&isys->adev->auxdev.dev, "port%d PHY%u lanes %u\n", ++ phy_port, phy_id, cfg->nlanes); ++ ++ phy_config_regs = config_regs[sd_info->csi2->nlanes/2]; ++ ++ for (i = 0; phy_config_regs[phy_port][i].reg; i++) { ++ writel(phy_config_regs[phy_port][i].val, ++ phy_base + phy_config_regs[phy_port][i].reg); ++ } ++ } ++ ++ return 0; ++} ++#else + static int ipu6_isys_mcd_phy_config(struct ipu6_isys *isys) + { + struct device *dev = &isys->adev->auxdev.dev; +@@ -658,6 +729,7 @@ static int ipu6_isys_mcd_phy_config(struct ipu6_isys *isys) + + return 0; + } ++#endif + + #define CSI_MCD_PHY_NUM 2 + static refcount_t phy_power_ref_count[CSI_MCD_PHY_NUM]; +@@ -698,9 +770,14 @@ int ipu6_isys_mcd_phy_set_power(struct ipu6_isys *isys, + return ret; + + ipu6_isys_mcd_phy_reset(isys, phy_id, 0); ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++ ipu6_isys_mcd_phy_common_init(isys, cfg); ++ ret = ipu6_isys_mcd_phy_config(isys, cfg); ++#else + ipu6_isys_mcd_phy_common_init(isys); + + ret = ipu6_isys_mcd_phy_config(isys); ++#endif + if (ret) + return ret; + +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c +index 1ab8dd8..b3ae1f1 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c +@@ -28,9 +28,14 @@ + #include + #include + #include +-#include +-#include ++#if !IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) + #include ++#include ++#include ++#include ++#include ++#include ++#endif + + #include "ipu6-bus.h" + #include "ipu6-cpd.h" +@@ -100,6 +105,57 @@ enum ltr_did_type { + }; + + #define ISYS_PM_QOS_VALUE 300 ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++/* ++ * The param was passed from module to indicate if port ++ * could be optimized. ++ */ ++static bool csi2_port_optimized = true; ++module_param(csi2_port_optimized, bool, 0660); ++MODULE_PARM_DESC(csi2_port_optimized, "IPU CSI2 port optimization"); ++#endif ++ ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++struct isys_i2c_test { ++ u8 bus_nr; ++ u16 addr; ++ struct i2c_client *client; ++}; ++ ++static int isys_i2c_test(struct device *dev, void *priv) ++{ ++ struct i2c_client *client = i2c_verify_client(dev); ++ struct isys_i2c_test *test = priv; ++ ++ if (!client) ++ return 0; ++ ++ if (i2c_adapter_id(client->adapter) != test->bus_nr || ++ client->addr != test->addr) ++ return 0; ++ ++ test->client = client; ++ ++ return 0; ++} ++ ++static struct ++i2c_client *isys_find_i2c_subdev(struct i2c_adapter *adapter, ++ struct ipu6_isys_subdev_info *sd_info) ++{ ++ struct i2c_board_info *info = &sd_info->i2c.board_info; ++ struct isys_i2c_test test = { ++ .bus_nr = i2c_adapter_id(adapter), ++ .addr = info->addr, ++ }; ++ int rval; ++ ++ rval = i2c_for_each_dev(&test, isys_i2c_test); ++ if (rval || !test.client) ++ return NULL; ++ return test.client; ++} ++#endif + + static int isys_isr_one(struct ipu6_bus_device *adev); + +@@ -142,6 +198,153 @@ unregister_subdev: + return ret; + } + ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++static int isys_register_ext_subdev(struct ipu6_isys *isys, ++ struct ipu6_isys_subdev_info *sd_info) ++{ ++ struct i2c_adapter *adapter; ++ struct v4l2_subdev *sd; ++ struct i2c_client *client; ++ int rval; ++ int bus; ++ ++ bus = ipu6_get_i2c_bus_id(sd_info->i2c.i2c_adapter_id, ++ sd_info->i2c.i2c_adapter_bdf, ++ sizeof(sd_info->i2c.i2c_adapter_bdf)); ++ if (bus < 0) { ++ dev_err(&isys->adev->auxdev.dev, ++ "getting i2c bus id for adapter %d (bdf %s) failed", ++ sd_info->i2c.i2c_adapter_id, ++ sd_info->i2c.i2c_adapter_bdf); ++ return -ENOENT; ++ } ++ dev_info(&isys->adev->auxdev.dev, ++ "got i2c bus id %d for adapter %d (bdf %s)", bus, ++ sd_info->i2c.i2c_adapter_id, ++ sd_info->i2c.i2c_adapter_bdf); ++ adapter = i2c_get_adapter(bus); ++ if (!adapter) { ++ dev_warn(&isys->adev->auxdev.dev, "can't find adapter\n"); ++ return -ENOENT; ++ } ++ ++ dev_info(&isys->adev->auxdev.dev, ++ "creating new i2c subdev for %s (address %2.2x, bus %d)", ++ sd_info->i2c.board_info.type, sd_info->i2c.board_info.addr, ++ bus); ++ ++ if (sd_info->csi2) { ++ dev_info(&isys->adev->auxdev.dev, "sensor device on CSI port: %d\n", ++ sd_info->csi2->port); ++ if (sd_info->csi2->port >= isys->pdata->ipdata->csi2.nports || ++ !isys->csi2[sd_info->csi2->port].isys) { ++ dev_warn(&isys->adev->auxdev.dev, "invalid csi2 port %u\n", ++ sd_info->csi2->port); ++ rval = -EINVAL; ++ goto skip_put_adapter; ++ } ++ } else { ++ dev_info(&isys->adev->auxdev.dev, "non camera subdevice\n"); ++ } ++ ++ client = isys_find_i2c_subdev(adapter, sd_info); ++ if (client) { ++ dev_dbg(&isys->adev->auxdev.dev, "Device exists\n"); ++ rval = 0; ++ goto skip_put_adapter; ++ } ++ ++ sd = v4l2_i2c_new_subdev_board(&isys->v4l2_dev, adapter, ++ &sd_info->i2c.board_info, NULL); ++ if (!sd) { ++ dev_warn(&isys->adev->auxdev.dev, "can't create new i2c subdev\n"); ++ rval = -EINVAL; ++ goto skip_put_adapter; ++ } ++ ++ if (!sd_info->csi2) ++ return 0; ++ ++ return isys_complete_ext_device_registration(isys, sd, sd_info->csi2); ++ ++skip_put_adapter: ++ i2c_put_adapter(adapter); ++ ++ return rval; ++} ++ ++static int isys_unregister_ext_subdev(struct ipu6_isys *isys, ++ struct ipu6_isys_subdev_info *sd_info) ++{ ++ struct i2c_adapter *adapter; ++ struct i2c_client *client; ++ int bus; ++ ++ bus = ipu6_get_i2c_bus_id(sd_info->i2c.i2c_adapter_id, ++ sd_info->i2c.i2c_adapter_bdf, ++ sizeof(sd_info->i2c.i2c_adapter_bdf)); ++ if (bus < 0) { ++ dev_err(&isys->adev->auxdev.dev, ++ "getting i2c bus id for adapter %d (bdf %s) failed\n", ++ sd_info->i2c.i2c_adapter_id, ++ sd_info->i2c.i2c_adapter_bdf); ++ return -ENOENT; ++ } ++ dev_dbg(&isys->adev->auxdev.dev, ++ "got i2c bus id %d for adapter %d (bdf %s)\n", bus, ++ sd_info->i2c.i2c_adapter_id, ++ sd_info->i2c.i2c_adapter_bdf); ++ adapter = i2c_get_adapter(bus); ++ if (!adapter) { ++ dev_warn(&isys->adev->auxdev.dev, "can't find adapter\n"); ++ return -ENOENT; ++ } ++ ++ dev_dbg(&isys->adev->auxdev.dev, ++ "unregister i2c subdev for %s (address %2.2x, bus %d)\n", ++ sd_info->i2c.board_info.type, sd_info->i2c.board_info.addr, ++ bus); ++ ++ client = isys_find_i2c_subdev(adapter, sd_info); ++ if (!client) { ++ dev_dbg(&isys->adev->auxdev.dev, "Device not exists\n"); ++ goto skip_put_adapter; ++ } ++ ++ i2c_unregister_device(client); ++ ++skip_put_adapter: ++ i2c_put_adapter(adapter); ++ ++ return 0; ++} ++ ++static void isys_register_ext_subdevs(struct ipu6_isys *isys) ++{ ++ struct ipu6_isys_subdev_pdata *spdata = isys->pdata->spdata; ++ struct ipu6_isys_subdev_info **sd_info; ++ ++ if (!spdata) { ++ dev_info(&isys->adev->auxdev.dev, "no subdevice info provided\n"); ++ return; ++ } ++ for (sd_info = spdata->subdevs; *sd_info; sd_info++) ++ isys_register_ext_subdev(isys, *sd_info); ++} ++ ++static void isys_unregister_ext_subdevs(struct ipu6_isys *isys) ++{ ++ struct ipu6_isys_subdev_pdata *spdata = isys->pdata->spdata; ++ struct ipu6_isys_subdev_info **sd_info; ++ ++ if (!spdata) ++ return; ++ ++ for (sd_info = spdata->subdevs; *sd_info; sd_info++) ++ isys_unregister_ext_subdev(isys, *sd_info); ++} ++#endif ++ + static void isys_stream_init(struct ipu6_isys *isys) + { + u32 i; +@@ -173,10 +376,43 @@ static int isys_csi2_register_subdevices(struct ipu6_isys *isys) + { + const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = + &isys->pdata->ipdata->csi2; ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++ struct ipu6_isys_subdev_pdata *spdata = isys->pdata->spdata; ++ struct ipu6_isys_subdev_info **sd_info; ++ DECLARE_BITMAP(csi2_enable, 32); ++#endif + unsigned int i; + int ret; + ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++ /* ++ * Here is somewhat a workaround, let each platform decide ++ * if csi2 port can be optimized, which means only registered ++ * port from pdata would be enabled. ++ */ ++ if (csi2_port_optimized && spdata) { ++ bitmap_zero(csi2_enable, 32); ++ for (sd_info = spdata->subdevs; *sd_info; sd_info++) { ++ if ((*sd_info)->csi2) { ++ i = (*sd_info)->csi2->port; ++ if (i >= csi2_pdata->nports) { ++ dev_warn(&isys->adev->auxdev.dev, ++ "invalid csi2 port %u\n", i); ++ continue; ++ } ++ bitmap_set(csi2_enable, i, 1); ++ } ++ } ++ } else { ++ bitmap_fill(csi2_enable, 32); ++ } ++#endif ++ + for (i = 0; i < csi2_pdata->nports; i++) { ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++ if (!test_bit(i, csi2_enable)) ++ continue; ++#endif + ret = ipu6_isys_csi2_init(&isys->csi2[i], isys, + isys->pdata->base + + CSI_REG_PORT_BASE(i), i); +@@ -200,10 +436,43 @@ static int isys_csi2_create_media_links(struct ipu6_isys *isys) + const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = + &isys->pdata->ipdata->csi2; + struct device *dev = &isys->adev->auxdev.dev; ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++ struct ipu6_isys_subdev_pdata *spdata = isys->pdata->spdata; ++ struct ipu6_isys_subdev_info **sd_info; ++ DECLARE_BITMAP(csi2_enable, 32); ++#endif + unsigned int i, j; + int ret; + ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++ /* ++ * Here is somewhat a workaround, let each platform decide ++ * if csi2 port can be optimized, which means only registered ++ * port from pdata would be enabled. ++ */ ++ if (csi2_port_optimized && spdata) { ++ bitmap_zero(csi2_enable, 32); ++ for (sd_info = spdata->subdevs; *sd_info; sd_info++) { ++ if ((*sd_info)->csi2) { ++ i = (*sd_info)->csi2->port; ++ if (i >= csi2_pdata->nports) { ++ dev_warn(&isys->adev->auxdev.dev, ++ "invalid csi2 port %u\n", i); ++ continue; ++ } ++ bitmap_set(csi2_enable, i, 1); ++ } ++ } ++ } else { ++ bitmap_fill(csi2_enable, 32); ++ } ++#endif ++ + for (i = 0; i < csi2_pdata->nports; i++) { ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++ if (!test_bit(i, csi2_enable)) ++ continue; ++#endif + struct media_entity *sd = &isys->csi2[i].asd.sd.entity; + + for (j = 0; j < NR_OF_CSI2_SRC_PADS; j++) { +@@ -238,10 +507,43 @@ static int isys_register_video_devices(struct ipu6_isys *isys) + { + const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = + &isys->pdata->ipdata->csi2; ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++ struct ipu6_isys_subdev_pdata *spdata = isys->pdata->spdata; ++ struct ipu6_isys_subdev_info **sd_info; ++ DECLARE_BITMAP(csi2_enable, 32); ++#endif + unsigned int i, j; + int ret; + ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++ /* ++ * Here is somewhat a workaround, let each platform decide ++ * if csi2 port can be optimized, which means only registered ++ * port from pdata would be enabled. ++ */ ++ if (csi2_port_optimized && spdata) { ++ bitmap_zero(csi2_enable, 32); ++ for (sd_info = spdata->subdevs; *sd_info; sd_info++) { ++ if ((*sd_info)->csi2) { ++ i = (*sd_info)->csi2->port; ++ if (i >= csi2_pdata->nports) { ++ dev_warn(&isys->adev->auxdev.dev, ++ "invalid csi2 port %u\n", i); ++ continue; ++ } ++ bitmap_set(csi2_enable, i, 1); ++ } ++ } ++ } else { ++ bitmap_fill(csi2_enable, 32); ++ } ++#endif ++ + for (i = 0; i < csi2_pdata->nports; i++) { ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++ if (!test_bit(i, csi2_enable)) ++ continue; ++#endif + for (j = 0; j < NR_OF_CSI2_SRC_PADS; j++) { + struct ipu6_isys_video *av = &isys->csi2[i].av[j]; + +@@ -672,6 +974,7 @@ static void isys_iwake_watermark_cleanup(struct ipu6_isys *isys) + mutex_destroy(&iwake_watermark->mutex); + } + ++#if !IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) + /* The .bound() notifier callback when a match is found */ + static int isys_notifier_bound(struct v4l2_async_notifier *notifier, + struct v4l2_subdev *sd, +@@ -783,6 +1086,7 @@ static void isys_notifier_cleanup(struct ipu6_isys *isys) + v4l2_async_nf_unregister(&isys->notifier); + v4l2_async_nf_cleanup(&isys->notifier); + } ++#endif + + static int isys_register_devices(struct ipu6_isys *isys) + { +@@ -820,12 +1124,23 @@ static int isys_register_devices(struct ipu6_isys *isys) + if (ret) + goto out_isys_unregister_subdevices; + ++#if !IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) + ret = isys_notifier_init(isys); + if (ret) + goto out_isys_unregister_subdevices; ++#else ++ isys_register_ext_subdevs(isys); ++#endif ++ ++ ret = v4l2_device_register_subdev_nodes(&isys->v4l2_dev); ++ if (ret) ++ goto out_isys_unregister_ext_subdevs; + + return 0; + ++out_isys_unregister_ext_subdevs: ++ isys_unregister_ext_subdevs(isys); ++ + out_isys_unregister_subdevices: + isys_csi2_unregister_subdevices(isys); + +@@ -848,6 +1163,9 @@ static void isys_unregister_devices(struct ipu6_isys *isys) + { + isys_unregister_video_devices(isys); + isys_csi2_unregister_subdevices(isys); ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++ isys_unregister_ext_subdevs(isys); ++#endif + v4l2_device_unregister(&isys->v4l2_dev); + media_device_unregister(&isys->media_dev); + media_device_cleanup(&isys->media_dev); +@@ -1217,7 +1535,9 @@ static void isys_remove(struct auxiliary_device *auxdev) + free_fw_msg_bufs(isys); + + isys_unregister_devices(isys); ++#if !IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) + isys_notifier_cleanup(isys); ++#endif + + cpu_latency_qos_remove_request(&isys->pm_qos); + +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h +index d55d804..5e58d81 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h +@@ -10,6 +10,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -60,6 +61,15 @@ struct ipu6_bus_device; + #define IPU6EP_MTL_LTR_VALUE 1023 + #define IPU6EP_MTL_MIN_MEMOPEN_TH 0xc + ++#if (IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ ++ && IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING)) \ ++ || IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) ++#define IPU6_SPDATA_NAME_LEN 20 ++#define IPU6_SPDATA_BDF_LEN 32 ++#define IPU6_SPDATA_GPIO_NUM 4 ++#define IPU6_SPDATA_IRQ_PIN_NAME_LEN 16 ++#endif ++ + struct ltr_did { + union { + u32 value; +@@ -161,7 +171,9 @@ struct ipu6_isys { + spinlock_t listlock; /* Protect framebuflist */ + struct list_head framebuflist; + struct list_head framebuflist_fw; ++#if !IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) + struct v4l2_async_notifier notifier; ++#endif + struct isys_iwake_watermark iwake_watermark; + #ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET + struct mutex reset_mutex; +@@ -180,6 +192,79 @@ struct isys_fw_msgs { + dma_addr_t dma_addr; + }; + ++struct ipu6_isys_subdev_i2c_info { ++ struct i2c_board_info board_info; ++ int i2c_adapter_id; ++ char i2c_adapter_bdf[32]; ++}; ++ ++#if (IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ ++ && IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING)) \ ++ || IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) ++#define IPU6_SPDATA_NAME_LEN 20 ++#define IPU6_SPDATA_BDF_LEN 32 ++#define IPU6_SPDATA_GPIO_NUM 4 ++#define IPU6_SPDATA_IRQ_PIN_NAME_LEN 16 ++#endif ++ ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ ++ && IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) ++/** ++ * struct ipu6_spdata_rep - override subdev platform data ++ * ++ * @name: i2c_board_info.type ++ * @i2c_adapter_bdf_o: old i2c adapter bdf ++ * @slave_addr_o: old i2c slave address ++ * @i2c_adapter_bdf_n: new i2c adapter bdf ++ * @slave_addr_n: new i2c slave address ++ * ++ * identify a subdev with @name, @i2c_adapter_bdf_o and @slave_addr_o and ++ * configure it to use the new @i2c_adapter_bdf_n and @slave_addr_n ++ */ ++struct ipu6_spdata_rep { ++ /* i2c old information */ ++ char name[IPU6_SPDATA_NAME_LEN]; ++ unsigned int port_o; ++ char i2c_adapter_bdf_o[IPU6_SPDATA_BDF_LEN]; ++ uint32_t slave_addr_o; ++ ++ /* i2c new information */ ++ unsigned int port_n; ++ char i2c_adapter_bdf_n[IPU6_SPDATA_BDF_LEN]; ++ uint32_t slave_addr_n; ++ ++ /* sensor_platform */ ++ unsigned int lanes; ++ int gpios[IPU6_SPDATA_GPIO_NUM]; ++ int irq_pin; ++ unsigned int irq_pin_flags; ++ char irq_pin_name[IPU6_SPDATA_IRQ_PIN_NAME_LEN]; ++ char suffix; ++}; ++#endif ++ ++struct ipu6_isys_subdev_info { ++ struct ipu6_isys_csi2_config *csi2; ++ struct ipu6_isys_subdev_i2c_info i2c; ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ ++ && IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) ++ void (*fixup_spdata)(const void *spdata_rep, void *spdata); ++#endif ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) ++ char *acpi_hid; ++#endif ++}; ++ ++struct ipu6_isys_clk_mapping { ++ struct clk_lookup clkdev_data; ++ char *platform_clock_name; ++}; ++ ++struct ipu6_isys_subdev_pdata { ++ struct ipu6_isys_subdev_info **subdevs; ++ struct ipu6_isys_clk_mapping *clk_map; ++}; ++ + struct isys_fw_msgs *ipu6_get_fw_msg_buf(struct ipu6_isys_stream *stream); + void ipu6_put_fw_msg_buf(struct ipu6_isys *isys, uintptr_t data); + void ipu6_cleanup_fw_msg_bufs(struct ipu6_isys *isys); +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c +index 3a00c9b..c740c22 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c +@@ -34,6 +34,19 @@ + #include "ipu6-platform-regs.h" + #include "ipu6-trace.h" + ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) ++#include ++#endif ++ ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) ++static int isys_init_acpi_add_device(struct device *dev, void *priv, ++ struct ipu6_isys_csi2_config *csi2, ++ bool reprobe) ++{ ++ return 0; ++} ++#endif ++ + static unsigned int isys_freq_override; + module_param(isys_freq_override, uint, 0660); + MODULE_PARM_DESC(isys_freq_override, "Override ISYS freq(mhz)"); +@@ -373,18 +386,25 @@ static void ipu6_internal_pdata_init(struct ipu6_device *isp) + static struct ipu6_bus_device * + ipu6_isys_init(struct pci_dev *pdev, struct device *parent, + struct ipu6_buttress_ctrl *ctrl, void __iomem *base, +- const struct ipu6_isys_internal_pdata *ipdata) ++ const struct ipu6_isys_internal_pdata *ipdata, ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++ struct ipu6_isys_subdev_pdata *spdata ++#endif ++ ) + { + struct device *dev = &pdev->dev; + struct ipu6_bus_device *isys_adev; + struct ipu6_isys_pdata *pdata; ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) ++ struct ipu6_isys_subdev_pdata *acpi_pdata; ++#endif + int ret; + +- ret = ipu_bridge_init(dev, ipu_bridge_parse_ssdb); +- if (ret) { +- dev_err_probe(dev, ret, "IPU6 bridge init failed\n"); +- return ERR_PTR(ret); +- } ++ // ret = ipu_bridge_init(dev, ipu_bridge_parse_ssdb); ++ // if (ret) { ++ // dev_err_probe(dev, ret, "IPU6 bridge init failed\n"); ++ // return ERR_PTR(ret); ++ // } + + pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); + if (!pdata) +@@ -392,7 +412,9 @@ ipu6_isys_init(struct pci_dev *pdev, struct device *parent, + + pdata->base = base; + pdata->ipdata = ipdata; +- ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++ pdata->spdata = spdata; ++#endif + /* Override the isys freq */ + if (isys_freq_override >= BUTTRESS_MIN_FORCE_IS_FREQ && + isys_freq_override <= BUTTRESS_MAX_FORCE_IS_FREQ) { +@@ -409,6 +431,19 @@ ipu6_isys_init(struct pci_dev *pdev, struct device *parent, + "ipu6_bus_initialize_device isys failed\n"); + } + ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) ++ if (!spdata) { ++ dev_dbg(&pdev->dev, "No subdevice info provided"); ++ ipu_get_acpi_devices(isys_adev, &isys_adev->auxdev.dev, &acpi_pdata, NULL, ++ isys_init_acpi_add_device); ++ pdata->spdata = acpi_pdata; ++ } else { ++ dev_dbg(&pdev->dev, "Subdevice info found"); ++ ipu_get_acpi_devices(isys_adev, &isys_adev->auxdev.dev, &acpi_pdata, &spdata, ++ isys_init_acpi_add_device); ++ } ++#endif ++ + isys_adev->mmu = ipu6_mmu_init(dev, base, ISYS_MMID, + &ipdata->hw_variant); + if (IS_ERR(isys_adev->mmu)) { +@@ -539,6 +574,59 @@ static void ipu6_configure_vc_mechanism(struct ipu6_device *isp) + writel(val, isp->base + BUTTRESS_REG_BTRS_CTRL); + } + ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) ++static inline int match_spdata(struct ipu6_isys_subdev_info *sd, ++ const struct ipu6_spdata_rep *rep) ++{ ++ if (strcmp(sd->i2c.board_info.type, rep->name)) ++ return 0; ++ ++ if (strcmp(sd->i2c.i2c_adapter_bdf, rep->i2c_adapter_bdf_o)) ++ return 0; ++ ++ if (sd->i2c.board_info.addr != rep->slave_addr_o) ++ return 0; ++ ++ if (sd->csi2->port != rep->port_o) ++ return 0; ++ ++ return 1; ++} ++ ++static void fixup_spdata(const void *spdata_rep, ++ struct ipu6_isys_subdev_pdata *spdata) ++{ ++ const struct ipu6_spdata_rep *rep = spdata_rep; ++ struct ipu6_isys_subdev_info **subdevs, *sd_info; ++ ++ if (!spdata) ++ return; ++ ++ for (; rep->name[0]; rep++) { ++ for (subdevs = spdata->subdevs; *subdevs; subdevs++) { ++ sd_info = *subdevs; ++ ++ if (!sd_info->csi2) ++ continue; ++ ++ if (match_spdata(sd_info, rep)) { ++ strcpy(sd_info->i2c.i2c_adapter_bdf, ++ rep->i2c_adapter_bdf_n); ++ sd_info->i2c.board_info.addr = ++ rep->slave_addr_n; ++ sd_info->csi2->port = rep->port_n; ++ ++ if (sd_info->fixup_spdata) ++ sd_info->fixup_spdata(rep, ++ sd_info->i2c.board_info.platform_data); ++ } ++ } ++ } ++} ++#endif ++#endif ++ + static int ipu6_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) + { + struct ipu6_buttress_ctrl *isys_ctrl = NULL, *psys_ctrl = NULL; +@@ -642,6 +730,16 @@ static int ipu6_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) + goto out_ipu6_bus_del_devices; + } + ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) ++ rval = request_firmware(&isp->spdata_fw, IPU6_SPDATA_NAME, &pdev->dev); ++ if (rval) ++ dev_warn(&isp->pdev->dev, "no spdata replace, using default\n"); ++ else ++ fixup_spdata(isp->spdata_fw->data, pdev->dev.platform_data); ++#endif ++#endif ++ + isys_ctrl = devm_kmemdup(dev, &isys_buttress_ctrl, + sizeof(isys_buttress_ctrl), GFP_KERNEL); + if (!isys_ctrl) { +@@ -650,7 +748,11 @@ static int ipu6_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) + } + + isp->isys = ipu6_isys_init(pdev, dev, isys_ctrl, isys_base, +- &isys_ipdata); ++ &isys_ipdata, ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++ pdev->dev.platform_data ++#endif ++ ); + if (IS_ERR(isp->isys)) { + ret = PTR_ERR(isp->isys); + goto out_ipu6_bus_del_devices; +@@ -749,6 +851,11 @@ out_ipu6_bus_del_devices: + ipu6_mmu_cleanup(isp->isys->mmu); + ipu6_bus_del_devices(pdev); + release_firmware(isp->cpd_fw); ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) ++ release_firmware(isp->spdata_fw); ++#endif ++#endif + buttress_exit: + ipu6_buttress_exit(isp); + +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.h +index d91a78e..71d8cdf 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.h ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.h +@@ -23,6 +23,12 @@ struct ipu6_bus_device; + #define IPU6EPMTL_FIRMWARE_NAME "intel/ipu/ipu6epmtl_fw.bin" + #define IPU6EPADLN_FIRMWARE_NAME "intel/ipu/ipu6epadln_fw.bin" + ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ ++ && IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) ++/* array of struct ipu6_spdata_rep terminated by NULL */ ++#define IPU6_SPDATA_NAME "ipu6v1_spdata.bin" ++#endif ++ + enum ipu6_version { + IPU6_VER_INVALID = 0, + IPU6_VER_6 = 1, +@@ -80,7 +86,11 @@ struct ipu6_device { + const struct firmware *cpd_fw; + const char *cpd_fw_name; + u32 cpd_metadata_cmpnt_size; +- ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) ++ const struct firmware *spdata_fw; ++#endif ++#endif + void __iomem *base; + #ifdef CONFIG_DEBUG_FS + struct dentry *ipu_dir; +@@ -328,6 +338,9 @@ struct ipu6_isys_internal_pdata { + struct ipu6_isys_pdata { + void __iomem *base; + const struct ipu6_isys_internal_pdata *ipdata; ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++ struct ipu6_isys_subdev_pdata *spdata; ++#endif + }; + + struct ipu6_psys_internal_pdata { +-- +2.43.0 + diff --git a/patches/0062-i2c-media-fix-cov-issue.patch b/patches/0062-i2c-media-fix-cov-issue.patch new file mode 100644 index 0000000..340eea3 --- /dev/null +++ b/patches/0062-i2c-media-fix-cov-issue.patch @@ -0,0 +1,31 @@ +From d48e001d4952b4c75f29375d0d122925804579ba Mon Sep 17 00:00:00 2001 +From: hepengpx +Date: Fri, 16 Jan 2026 00:58:48 -0700 +Subject: [PATCH 62/76] i2c: media: fix cov issue + +check return value of v4l2_ctrl_handler_init; +change the type of phy_port to int; + +Signed-off-by: hepengpx +Signed-off-by: zouxiaoh +--- + 6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c +index f18e6fa..ff29130 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c +@@ -644,7 +644,8 @@ static int ipu6_isys_driver_port_to_phy_port(struct ipu6_isys_csi2_config *cfg) + #if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) + static int ipu6_isys_mcd_phy_config(struct ipu6_isys *isys, struct ipu6_isys_csi2_config *cfg) + { +- unsigned int phy_port, phy_id; ++ unsigned int phy_id; ++ int phy_port; + void __iomem *phy_base; + struct ipu6_bus_device *adev = to_ipu6_bus_device(&isys->adev->auxdev.dev); + struct ipu6_device *isp = adev->isp; +-- +2.43.0 + diff --git a/patches/0063-media-add-ipu-acpi-module-to-linux-drivers.patch b/patches/0063-media-add-ipu-acpi-module-to-linux-drivers.patch new file mode 100644 index 0000000..be2cd47 --- /dev/null +++ b/patches/0063-media-add-ipu-acpi-module-to-linux-drivers.patch @@ -0,0 +1,252 @@ +From b1aaf42e3a0cf80f26a45502385469b674b17d66 Mon Sep 17 00:00:00 2001 +From: Dongcheng Yan +Date: Fri, 16 Jan 2026 01:01:26 -0700 +Subject: [PATCH 63/76] media: add ipu-acpi module to linux-drivers + +The ipu-acpi module is responsible for obtaining ACPI device platform +data (pdata) and is universally applicable to both IPU6/7 and +upstream/non-upstream. To better align with its purpose and usage, the +ipu-acpi module is being moved out of the linux_kernel and into the +linux-drivers[android/platform/main]. + +Specific changes include: + +- Struct ipu_isys_subdev_pdata and its internal structs + ipu_isys_subdev_info/ipu_isys_clk_mapping: + solely related to ACPI pdata, not tied to any specific IPU. +- IPU_SPDATA_NAME_LEN and related macros: + used exclusively for ACPI device definition and configuration. + +Signed-off-by: Dongcheng Yan +Signed-off-by: zouxiaoh +--- + .../media/pci/intel/ipu6/ipu6-isys-mcd-phy.c | 4 +-- + .../drivers/media/pci/intel/ipu6/ipu6-isys.c | 26 +++++++++---------- + .../drivers/media/pci/intel/ipu6/ipu6-isys.h | 26 +++++++++---------- + 6.18.0/drivers/media/pci/intel/ipu6/ipu6.c | 10 +++---- + 6.18.0/drivers/media/pci/intel/ipu6/ipu6.h | 2 +- + 5 files changed, 34 insertions(+), 34 deletions(-) + +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c +index ff29130..7ab02b7 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c +@@ -651,8 +651,8 @@ static int ipu6_isys_mcd_phy_config(struct ipu6_isys *isys, struct ipu6_isys_csi + struct ipu6_device *isp = adev->isp; + void __iomem *isp_base = isp->base; + const struct phy_reg **phy_config_regs; +- struct ipu6_isys_subdev_pdata *spdata = isys->pdata->spdata; +- struct ipu6_isys_subdev_info **subdevs, *sd_info; ++ struct ipu_isys_subdev_pdata *spdata = isys->pdata->spdata; ++ struct ipu_isys_subdev_info **subdevs, *sd_info; + int i; + + if (!spdata) { +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c +index b3ae1f1..709c582 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c +@@ -141,7 +141,7 @@ static int isys_i2c_test(struct device *dev, void *priv) + + static struct + i2c_client *isys_find_i2c_subdev(struct i2c_adapter *adapter, +- struct ipu6_isys_subdev_info *sd_info) ++ struct ipu_isys_subdev_info *sd_info) + { + struct i2c_board_info *info = &sd_info->i2c.board_info; + struct isys_i2c_test test = { +@@ -200,7 +200,7 @@ unregister_subdev: + + #if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) + static int isys_register_ext_subdev(struct ipu6_isys *isys, +- struct ipu6_isys_subdev_info *sd_info) ++ struct ipu_isys_subdev_info *sd_info) + { + struct i2c_adapter *adapter; + struct v4l2_subdev *sd; +@@ -274,7 +274,7 @@ skip_put_adapter: + } + + static int isys_unregister_ext_subdev(struct ipu6_isys *isys, +- struct ipu6_isys_subdev_info *sd_info) ++ struct ipu_isys_subdev_info *sd_info) + { + struct i2c_adapter *adapter; + struct i2c_client *client; +@@ -321,8 +321,8 @@ skip_put_adapter: + + static void isys_register_ext_subdevs(struct ipu6_isys *isys) + { +- struct ipu6_isys_subdev_pdata *spdata = isys->pdata->spdata; +- struct ipu6_isys_subdev_info **sd_info; ++ struct ipu_isys_subdev_pdata *spdata = isys->pdata->spdata; ++ struct ipu_isys_subdev_info **sd_info; + + if (!spdata) { + dev_info(&isys->adev->auxdev.dev, "no subdevice info provided\n"); +@@ -334,8 +334,8 @@ static void isys_register_ext_subdevs(struct ipu6_isys *isys) + + static void isys_unregister_ext_subdevs(struct ipu6_isys *isys) + { +- struct ipu6_isys_subdev_pdata *spdata = isys->pdata->spdata; +- struct ipu6_isys_subdev_info **sd_info; ++ struct ipu_isys_subdev_pdata *spdata = isys->pdata->spdata; ++ struct ipu_isys_subdev_info **sd_info; + + if (!spdata) + return; +@@ -377,8 +377,8 @@ static int isys_csi2_register_subdevices(struct ipu6_isys *isys) + const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = + &isys->pdata->ipdata->csi2; + #if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) +- struct ipu6_isys_subdev_pdata *spdata = isys->pdata->spdata; +- struct ipu6_isys_subdev_info **sd_info; ++ struct ipu_isys_subdev_pdata *spdata = isys->pdata->spdata; ++ struct ipu_isys_subdev_info **sd_info; + DECLARE_BITMAP(csi2_enable, 32); + #endif + unsigned int i; +@@ -437,8 +437,8 @@ static int isys_csi2_create_media_links(struct ipu6_isys *isys) + &isys->pdata->ipdata->csi2; + struct device *dev = &isys->adev->auxdev.dev; + #if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) +- struct ipu6_isys_subdev_pdata *spdata = isys->pdata->spdata; +- struct ipu6_isys_subdev_info **sd_info; ++ struct ipu_isys_subdev_pdata *spdata = isys->pdata->spdata; ++ struct ipu_isys_subdev_info **sd_info; + DECLARE_BITMAP(csi2_enable, 32); + #endif + unsigned int i, j; +@@ -508,8 +508,8 @@ static int isys_register_video_devices(struct ipu6_isys *isys) + const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = + &isys->pdata->ipdata->csi2; + #if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) +- struct ipu6_isys_subdev_pdata *spdata = isys->pdata->spdata; +- struct ipu6_isys_subdev_info **sd_info; ++ struct ipu_isys_subdev_pdata *spdata = isys->pdata->spdata; ++ struct ipu_isys_subdev_info **sd_info; + DECLARE_BITMAP(csi2_enable, 32); + #endif + unsigned int i, j; +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h +index 5e58d81..7d091da 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h +@@ -64,10 +64,10 @@ struct ipu6_bus_device; + #if (IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ + && IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING)) \ + || IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) +-#define IPU6_SPDATA_NAME_LEN 20 +-#define IPU6_SPDATA_BDF_LEN 32 +-#define IPU6_SPDATA_GPIO_NUM 4 +-#define IPU6_SPDATA_IRQ_PIN_NAME_LEN 16 ++#define IPU_SPDATA_NAME_LEN 20 ++#define IPU_SPDATA_BDF_LEN 32 ++#define IPU_SPDATA_GPIO_NUM 4 ++#define IPU_SPDATA_IRQ_PIN_NAME_LEN 16 + #endif + + struct ltr_did { +@@ -201,10 +201,10 @@ struct ipu6_isys_subdev_i2c_info { + #if (IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ + && IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING)) \ + || IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) +-#define IPU6_SPDATA_NAME_LEN 20 +-#define IPU6_SPDATA_BDF_LEN 32 +-#define IPU6_SPDATA_GPIO_NUM 4 +-#define IPU6_SPDATA_IRQ_PIN_NAME_LEN 16 ++#define IPU_SPDATA_NAME_LEN 20 ++#define IPU_SPDATA_BDF_LEN 32 ++#define IPU_SPDATA_GPIO_NUM 4 ++#define IPU_SPDATA_IRQ_PIN_NAME_LEN 16 + #endif + + #if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ +@@ -243,7 +243,7 @@ struct ipu6_spdata_rep { + }; + #endif + +-struct ipu6_isys_subdev_info { ++struct ipu_isys_subdev_info { + struct ipu6_isys_csi2_config *csi2; + struct ipu6_isys_subdev_i2c_info i2c; + #if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ +@@ -255,14 +255,14 @@ struct ipu6_isys_subdev_info { + #endif + }; + +-struct ipu6_isys_clk_mapping { ++struct ipu_isys_clk_mapping { + struct clk_lookup clkdev_data; + char *platform_clock_name; + }; + +-struct ipu6_isys_subdev_pdata { +- struct ipu6_isys_subdev_info **subdevs; +- struct ipu6_isys_clk_mapping *clk_map; ++struct ipu_isys_subdev_pdata { ++ struct ipu_isys_subdev_info **subdevs; ++ struct ipu_isys_clk_mapping *clk_map; + }; + + struct isys_fw_msgs *ipu6_get_fw_msg_buf(struct ipu6_isys_stream *stream); +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c +index c740c22..8ae3612 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c +@@ -388,7 +388,7 @@ ipu6_isys_init(struct pci_dev *pdev, struct device *parent, + struct ipu6_buttress_ctrl *ctrl, void __iomem *base, + const struct ipu6_isys_internal_pdata *ipdata, + #if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) +- struct ipu6_isys_subdev_pdata *spdata ++ struct ipu_isys_subdev_pdata *spdata + #endif + ) + { +@@ -396,7 +396,7 @@ ipu6_isys_init(struct pci_dev *pdev, struct device *parent, + struct ipu6_bus_device *isys_adev; + struct ipu6_isys_pdata *pdata; + #if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) +- struct ipu6_isys_subdev_pdata *acpi_pdata; ++ struct ipu_isys_subdev_pdata *acpi_pdata; + #endif + int ret; + +@@ -576,7 +576,7 @@ static void ipu6_configure_vc_mechanism(struct ipu6_device *isp) + + #if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) + #if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) +-static inline int match_spdata(struct ipu6_isys_subdev_info *sd, ++static inline int match_spdata(struct ipu_isys_subdev_info *sd, + const struct ipu6_spdata_rep *rep) + { + if (strcmp(sd->i2c.board_info.type, rep->name)) +@@ -595,10 +595,10 @@ static inline int match_spdata(struct ipu6_isys_subdev_info *sd, + } + + static void fixup_spdata(const void *spdata_rep, +- struct ipu6_isys_subdev_pdata *spdata) ++ struct ipu_isys_subdev_pdata *spdata) + { + const struct ipu6_spdata_rep *rep = spdata_rep; +- struct ipu6_isys_subdev_info **subdevs, *sd_info; ++ struct ipu_isys_subdev_info **subdevs, *sd_info; + + if (!spdata) + return; +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.h +index 71d8cdf..786494d 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.h ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.h +@@ -339,7 +339,7 @@ struct ipu6_isys_pdata { + void __iomem *base; + const struct ipu6_isys_internal_pdata *ipdata; + #if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) +- struct ipu6_isys_subdev_pdata *spdata; ++ struct ipu_isys_subdev_pdata *spdata; + #endif + }; + +-- +2.43.0 + diff --git a/patches/0064-media-pci-unregister-i2c-device-to-complete-ext_subd.patch b/patches/0064-media-pci-unregister-i2c-device-to-complete-ext_subd.patch new file mode 100644 index 0000000..4c40916 --- /dev/null +++ b/patches/0064-media-pci-unregister-i2c-device-to-complete-ext_subd.patch @@ -0,0 +1,56 @@ +From 9e6c9f97ca9a758f980bbe551abce94be050ac04 Mon Sep 17 00:00:00 2001 +From: Dongcheng Yan +Date: Fri, 16 Jan 2026 01:03:24 -0700 +Subject: [PATCH 64/76] media: pci: unregister i2c device to complete + ext_subdev_register + +Signed-off-by: Dongcheng Yan +Signed-off-by: zouxiaoh +--- + 6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c | 4 ++-- + 6.18.0/drivers/media/pci/intel/ipu6/ipu6.c | 7 ++++--- + 2 files changed, 6 insertions(+), 5 deletions(-) + +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c +index 709c582..df5de9a 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c +@@ -250,8 +250,8 @@ static int isys_register_ext_subdev(struct ipu6_isys *isys, + client = isys_find_i2c_subdev(adapter, sd_info); + if (client) { + dev_dbg(&isys->adev->auxdev.dev, "Device exists\n"); +- rval = 0; +- goto skip_put_adapter; ++ i2c_unregister_device(client); ++ dev_dbg(&isys->adev->auxdev.dev, "Unregister device"); + } + + sd = v4l2_i2c_new_subdev_board(&isys->v4l2_dev, adapter, +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c +index 8ae3612..e183f9c 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c +@@ -434,16 +434,17 @@ ipu6_isys_init(struct pci_dev *pdev, struct device *parent, + #if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + if (!spdata) { + dev_dbg(&pdev->dev, "No subdevice info provided"); +- ipu_get_acpi_devices(isys_adev, &isys_adev->auxdev.dev, &acpi_pdata, NULL, ++ ret = ipu_get_acpi_devices(isys_adev, &isys_adev->auxdev.dev, &acpi_pdata, NULL, + isys_init_acpi_add_device); + pdata->spdata = acpi_pdata; + } else { + dev_dbg(&pdev->dev, "Subdevice info found"); +- ipu_get_acpi_devices(isys_adev, &isys_adev->auxdev.dev, &acpi_pdata, &spdata, ++ ret = ipu_get_acpi_devices(isys_adev, &isys_adev->auxdev.dev, &acpi_pdata, &spdata, + isys_init_acpi_add_device); + } ++ if (ret) ++ return ERR_PTR(ret); + #endif +- + isys_adev->mmu = ipu6_mmu_init(dev, base, ISYS_MMID, + &ipdata->hw_variant); + if (IS_ERR(isys_adev->mmu)) { +-- +2.43.0 + diff --git a/patches/0065-media-pci-add-missing-if-for-PDATA.patch b/patches/0065-media-pci-add-missing-if-for-PDATA.patch new file mode 100644 index 0000000..df7dd85 --- /dev/null +++ b/patches/0065-media-pci-add-missing-if-for-PDATA.patch @@ -0,0 +1,114 @@ +From 0dc67e20a257bc43749e93d9533e963074f4e682 Mon Sep 17 00:00:00 2001 +From: "Signed-off-by: zouxiaoh" +Date: Fri, 16 Jan 2026 01:26:58 -0700 +Subject: [PATCH 65/76] media: pci: add missing #if for PDATA + +--- + 6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c | 5 ++++- + 6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h | 2 ++ + 6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c | 8 ++++---- + 6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h | 7 ++++--- + 4 files changed, 14 insertions(+), 8 deletions(-) + +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c +index 9523cc6..a3bd109 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c +@@ -21,8 +21,9 @@ + #include + #include + #include ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) + #include +- ++#endif + #include "ipu6.h" + #include "ipu6-bus.h" + #include "ipu6-dma.h" +@@ -775,6 +776,7 @@ int ipu6_buttress_start_tsc_sync(struct ipu6_device *isp) + } + EXPORT_SYMBOL_NS_GPL(ipu6_buttress_start_tsc_sync, "INTEL_IPU6"); + ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) + /* + * The dev_id was hard code in platform data, as i2c bus number + * may change dynamiclly, we need to update this bus id +@@ -816,6 +818,7 @@ int ipu6_get_i2c_bus_id(int adapter_id, char *adapter_bdf, int bdf_len) + } + EXPORT_SYMBOL_NS_GPL(ipu6_get_i2c_bus_id, "INTEL_IPU6"); + ++#endif + void ipu6_buttress_tsc_read(struct ipu6_device *isp, u64 *val) + { + u32 tsc_hi_1, tsc_hi_2, tsc_lo; +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h +index 6fed522..d6884c7 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h +@@ -82,5 +82,7 @@ void ipu6_buttress_exit(struct ipu6_device *isp); + void ipu6_buttress_csi_port_config(struct ipu6_device *isp, + u32 legacy, u32 combo); + void ipu6_buttress_restore(struct ipu6_device *isp); ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) + int ipu6_get_i2c_bus_id(int adapter_id, char *adapter_bdf, int bdf_len); ++#endif + #endif /* IPU6_BUTTRESS_H */ +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c +index df5de9a..4f2c86f 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c +@@ -1131,16 +1131,16 @@ static int isys_register_devices(struct ipu6_isys *isys) + #else + isys_register_ext_subdevs(isys); + #endif +- ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) + ret = v4l2_device_register_subdev_nodes(&isys->v4l2_dev); + if (ret) + goto out_isys_unregister_ext_subdevs; +- ++#endif + return 0; +- ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) + out_isys_unregister_ext_subdevs: + isys_unregister_ext_subdevs(isys); +- ++#endif + out_isys_unregister_subdevices: + isys_csi2_unregister_subdevices(isys); + +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h +index 7d091da..76a452f 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h +@@ -10,8 +10,9 @@ + #include + #include + #include ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) + #include +- ++#endif + #include + #include + #include +@@ -191,13 +192,13 @@ struct isys_fw_msgs { + struct list_head head; + dma_addr_t dma_addr; + }; +- ++#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) + struct ipu6_isys_subdev_i2c_info { + struct i2c_board_info board_info; + int i2c_adapter_id; + char i2c_adapter_bdf[32]; + }; +- ++#endif + #if (IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ + && IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING)) \ + || IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) +-- +2.43.0 + diff --git a/patches/0066-media-pci-refine-PDATA-related-config.patch b/patches/0066-media-pci-refine-PDATA-related-config.patch new file mode 100644 index 0000000..9161e80 --- /dev/null +++ b/patches/0066-media-pci-refine-PDATA-related-config.patch @@ -0,0 +1,527 @@ +From ea2ce6bd736dcb0311811b92d3c3ff30dbb07743 Mon Sep 17 00:00:00 2001 +From: hepengpx +Date: Fri, 16 Jan 2026 01:28:11 -0700 +Subject: [PATCH 66/76] media: pci: refine PDATA related config + +remove dynamic pdata related code; +replace CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA +with CONFIG_INTEL_IPU6_ACPI; + +Signed-off-by: hepengpx +Signed-off-by: zouxiaoh +--- + .../media/pci/intel/ipu6/ipu6-buttress.c | 5 +- + .../media/pci/intel/ipu6/ipu6-buttress.h | 2 +- + .../media/pci/intel/ipu6/ipu6-isys-mcd-phy.c | 6 +- + .../drivers/media/pci/intel/ipu6/ipu6-isys.c | 38 +++++----- + .../drivers/media/pci/intel/ipu6/ipu6-isys.h | 60 ++------------- + 6.18.0/drivers/media/pci/intel/ipu6/ipu6.c | 74 +------------------ + 6.18.0/drivers/media/pci/intel/ipu6/ipu6.h | 13 +--- + 7 files changed, 34 insertions(+), 164 deletions(-) + +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c +index a3bd109..c243142 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c +@@ -21,7 +21,7 @@ + #include + #include + #include +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + #include + #endif + #include "ipu6.h" +@@ -775,8 +775,7 @@ int ipu6_buttress_start_tsc_sync(struct ipu6_device *isp) + return -ETIMEDOUT; + } + EXPORT_SYMBOL_NS_GPL(ipu6_buttress_start_tsc_sync, "INTEL_IPU6"); +- +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + /* + * The dev_id was hard code in platform data, as i2c bus number + * may change dynamiclly, we need to update this bus id +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h +index d6884c7..8bc5eeb 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h +@@ -82,7 +82,7 @@ void ipu6_buttress_exit(struct ipu6_device *isp); + void ipu6_buttress_csi_port_config(struct ipu6_device *isp, + u32 legacy, u32 combo); + void ipu6_buttress_restore(struct ipu6_device *isp); +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + int ipu6_get_i2c_bus_id(int adapter_id, char *adapter_bdf, int bdf_len); + #endif + #endif /* IPU6_BUTTRESS_H */ +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c +index 7ab02b7..1cf33b5 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c +@@ -567,7 +567,7 @@ static int ipu6_isys_mcd_phy_ready(struct ipu6_isys *isys, u8 id) + return ret; + } + +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + static int ipu6_isys_mcd_phy_common_init(struct ipu6_isys *isys, struct ipu6_isys_csi2_config *cfg) + { + unsigned int phy_id; +@@ -641,7 +641,7 @@ static int ipu6_isys_driver_port_to_phy_port(struct ipu6_isys_csi2_config *cfg) + return ret; + } + +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + static int ipu6_isys_mcd_phy_config(struct ipu6_isys *isys, struct ipu6_isys_csi2_config *cfg) + { + unsigned int phy_id; +@@ -771,7 +771,7 @@ int ipu6_isys_mcd_phy_set_power(struct ipu6_isys *isys, + return ret; + + ipu6_isys_mcd_phy_reset(isys, phy_id, 0); +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + ipu6_isys_mcd_phy_common_init(isys, cfg); + ret = ipu6_isys_mcd_phy_config(isys, cfg); + #else +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c +index 4f2c86f..25d01ea 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c +@@ -28,7 +28,7 @@ + #include + #include + #include +-#if !IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if !IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + #include + #include + #include +@@ -105,7 +105,7 @@ enum ltr_did_type { + }; + + #define ISYS_PM_QOS_VALUE 300 +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + /* + * The param was passed from module to indicate if port + * could be optimized. +@@ -113,9 +113,7 @@ enum ltr_did_type { + static bool csi2_port_optimized = true; + module_param(csi2_port_optimized, bool, 0660); + MODULE_PARM_DESC(csi2_port_optimized, "IPU CSI2 port optimization"); +-#endif + +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) + struct isys_i2c_test { + u8 bus_nr; + u16 addr; +@@ -198,7 +196,7 @@ unregister_subdev: + return ret; + } + +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + static int isys_register_ext_subdev(struct ipu6_isys *isys, + struct ipu_isys_subdev_info *sd_info) + { +@@ -376,7 +374,7 @@ static int isys_csi2_register_subdevices(struct ipu6_isys *isys) + { + const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = + &isys->pdata->ipdata->csi2; +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + struct ipu_isys_subdev_pdata *spdata = isys->pdata->spdata; + struct ipu_isys_subdev_info **sd_info; + DECLARE_BITMAP(csi2_enable, 32); +@@ -384,7 +382,7 @@ static int isys_csi2_register_subdevices(struct ipu6_isys *isys) + unsigned int i; + int ret; + +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + /* + * Here is somewhat a workaround, let each platform decide + * if csi2 port can be optimized, which means only registered +@@ -409,7 +407,7 @@ static int isys_csi2_register_subdevices(struct ipu6_isys *isys) + #endif + + for (i = 0; i < csi2_pdata->nports; i++) { +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + if (!test_bit(i, csi2_enable)) + continue; + #endif +@@ -436,7 +434,7 @@ static int isys_csi2_create_media_links(struct ipu6_isys *isys) + const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = + &isys->pdata->ipdata->csi2; + struct device *dev = &isys->adev->auxdev.dev; +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + struct ipu_isys_subdev_pdata *spdata = isys->pdata->spdata; + struct ipu_isys_subdev_info **sd_info; + DECLARE_BITMAP(csi2_enable, 32); +@@ -444,7 +442,7 @@ static int isys_csi2_create_media_links(struct ipu6_isys *isys) + unsigned int i, j; + int ret; + +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + /* + * Here is somewhat a workaround, let each platform decide + * if csi2 port can be optimized, which means only registered +@@ -469,7 +467,7 @@ static int isys_csi2_create_media_links(struct ipu6_isys *isys) + #endif + + for (i = 0; i < csi2_pdata->nports; i++) { +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + if (!test_bit(i, csi2_enable)) + continue; + #endif +@@ -507,7 +505,7 @@ static int isys_register_video_devices(struct ipu6_isys *isys) + { + const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = + &isys->pdata->ipdata->csi2; +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + struct ipu_isys_subdev_pdata *spdata = isys->pdata->spdata; + struct ipu_isys_subdev_info **sd_info; + DECLARE_BITMAP(csi2_enable, 32); +@@ -515,7 +513,7 @@ static int isys_register_video_devices(struct ipu6_isys *isys) + unsigned int i, j; + int ret; + +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + /* + * Here is somewhat a workaround, let each platform decide + * if csi2 port can be optimized, which means only registered +@@ -540,7 +538,7 @@ static int isys_register_video_devices(struct ipu6_isys *isys) + #endif + + for (i = 0; i < csi2_pdata->nports; i++) { +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + if (!test_bit(i, csi2_enable)) + continue; + #endif +@@ -974,7 +972,7 @@ static void isys_iwake_watermark_cleanup(struct ipu6_isys *isys) + mutex_destroy(&iwake_watermark->mutex); + } + +-#if !IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if !IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + /* The .bound() notifier callback when a match is found */ + static int isys_notifier_bound(struct v4l2_async_notifier *notifier, + struct v4l2_subdev *sd, +@@ -1124,20 +1122,20 @@ static int isys_register_devices(struct ipu6_isys *isys) + if (ret) + goto out_isys_unregister_subdevices; + +-#if !IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if !IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + ret = isys_notifier_init(isys); + if (ret) + goto out_isys_unregister_subdevices; + #else + isys_register_ext_subdevs(isys); + #endif +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + ret = v4l2_device_register_subdev_nodes(&isys->v4l2_dev); + if (ret) + goto out_isys_unregister_ext_subdevs; + #endif + return 0; +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + out_isys_unregister_ext_subdevs: + isys_unregister_ext_subdevs(isys); + #endif +@@ -1163,7 +1161,7 @@ static void isys_unregister_devices(struct ipu6_isys *isys) + { + isys_unregister_video_devices(isys); + isys_csi2_unregister_subdevices(isys); +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + isys_unregister_ext_subdevs(isys); + #endif + v4l2_device_unregister(&isys->v4l2_dev); +@@ -1535,7 +1533,7 @@ static void isys_remove(struct auxiliary_device *auxdev) + free_fw_msg_bufs(isys); + + isys_unregister_devices(isys); +-#if !IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if !IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + isys_notifier_cleanup(isys); + #endif + +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h +index 76a452f..5c96f5b 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h +@@ -10,7 +10,7 @@ + #include + #include + #include +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + #include + #endif + #include +@@ -62,11 +62,7 @@ struct ipu6_bus_device; + #define IPU6EP_MTL_LTR_VALUE 1023 + #define IPU6EP_MTL_MIN_MEMOPEN_TH 0xc + +-#if (IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ +- && IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING)) \ +- || IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) +-#define IPU_SPDATA_NAME_LEN 20 +-#define IPU_SPDATA_BDF_LEN 32 ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + #define IPU_SPDATA_GPIO_NUM 4 + #define IPU_SPDATA_IRQ_PIN_NAME_LEN 16 + #endif +@@ -172,7 +168,7 @@ struct ipu6_isys { + spinlock_t listlock; /* Protect framebuflist */ + struct list_head framebuflist; + struct list_head framebuflist_fw; +-#if !IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if !IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + struct v4l2_async_notifier notifier; + #endif + struct isys_iwake_watermark iwake_watermark; +@@ -192,66 +188,22 @@ struct isys_fw_msgs { + struct list_head head; + dma_addr_t dma_addr; + }; +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + struct ipu6_isys_subdev_i2c_info { + struct i2c_board_info board_info; + int i2c_adapter_id; + char i2c_adapter_bdf[32]; + }; + #endif +-#if (IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ +- && IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING)) \ +- || IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) +-#define IPU_SPDATA_NAME_LEN 20 +-#define IPU_SPDATA_BDF_LEN 32 ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + #define IPU_SPDATA_GPIO_NUM 4 + #define IPU_SPDATA_IRQ_PIN_NAME_LEN 16 + #endif + +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ +- && IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) +-/** +- * struct ipu6_spdata_rep - override subdev platform data +- * +- * @name: i2c_board_info.type +- * @i2c_adapter_bdf_o: old i2c adapter bdf +- * @slave_addr_o: old i2c slave address +- * @i2c_adapter_bdf_n: new i2c adapter bdf +- * @slave_addr_n: new i2c slave address +- * +- * identify a subdev with @name, @i2c_adapter_bdf_o and @slave_addr_o and +- * configure it to use the new @i2c_adapter_bdf_n and @slave_addr_n +- */ +-struct ipu6_spdata_rep { +- /* i2c old information */ +- char name[IPU6_SPDATA_NAME_LEN]; +- unsigned int port_o; +- char i2c_adapter_bdf_o[IPU6_SPDATA_BDF_LEN]; +- uint32_t slave_addr_o; +- +- /* i2c new information */ +- unsigned int port_n; +- char i2c_adapter_bdf_n[IPU6_SPDATA_BDF_LEN]; +- uint32_t slave_addr_n; +- +- /* sensor_platform */ +- unsigned int lanes; +- int gpios[IPU6_SPDATA_GPIO_NUM]; +- int irq_pin; +- unsigned int irq_pin_flags; +- char irq_pin_name[IPU6_SPDATA_IRQ_PIN_NAME_LEN]; +- char suffix; +-}; +-#endif +- + struct ipu_isys_subdev_info { + struct ipu6_isys_csi2_config *csi2; +- struct ipu6_isys_subdev_i2c_info i2c; +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ +- && IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) +- void (*fixup_spdata)(const void *spdata_rep, void *spdata); +-#endif + #if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) ++ struct ipu6_isys_subdev_i2c_info i2c; + char *acpi_hid; + #endif + }; +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c +index e183f9c..7a0443a 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c +@@ -387,7 +387,7 @@ static struct ipu6_bus_device * + ipu6_isys_init(struct pci_dev *pdev, struct device *parent, + struct ipu6_buttress_ctrl *ctrl, void __iomem *base, + const struct ipu6_isys_internal_pdata *ipdata, +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + struct ipu_isys_subdev_pdata *spdata + #endif + ) +@@ -412,7 +412,7 @@ ipu6_isys_init(struct pci_dev *pdev, struct device *parent, + + pdata->base = base; + pdata->ipdata = ipdata; +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + pdata->spdata = spdata; + #endif + /* Override the isys freq */ +@@ -575,59 +575,6 @@ static void ipu6_configure_vc_mechanism(struct ipu6_device *isp) + writel(val, isp->base + BUTTRESS_REG_BTRS_CTRL); + } + +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) +-static inline int match_spdata(struct ipu_isys_subdev_info *sd, +- const struct ipu6_spdata_rep *rep) +-{ +- if (strcmp(sd->i2c.board_info.type, rep->name)) +- return 0; +- +- if (strcmp(sd->i2c.i2c_adapter_bdf, rep->i2c_adapter_bdf_o)) +- return 0; +- +- if (sd->i2c.board_info.addr != rep->slave_addr_o) +- return 0; +- +- if (sd->csi2->port != rep->port_o) +- return 0; +- +- return 1; +-} +- +-static void fixup_spdata(const void *spdata_rep, +- struct ipu_isys_subdev_pdata *spdata) +-{ +- const struct ipu6_spdata_rep *rep = spdata_rep; +- struct ipu_isys_subdev_info **subdevs, *sd_info; +- +- if (!spdata) +- return; +- +- for (; rep->name[0]; rep++) { +- for (subdevs = spdata->subdevs; *subdevs; subdevs++) { +- sd_info = *subdevs; +- +- if (!sd_info->csi2) +- continue; +- +- if (match_spdata(sd_info, rep)) { +- strcpy(sd_info->i2c.i2c_adapter_bdf, +- rep->i2c_adapter_bdf_n); +- sd_info->i2c.board_info.addr = +- rep->slave_addr_n; +- sd_info->csi2->port = rep->port_n; +- +- if (sd_info->fixup_spdata) +- sd_info->fixup_spdata(rep, +- sd_info->i2c.board_info.platform_data); +- } +- } +- } +-} +-#endif +-#endif +- + static int ipu6_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) + { + struct ipu6_buttress_ctrl *isys_ctrl = NULL, *psys_ctrl = NULL; +@@ -731,16 +678,6 @@ static int ipu6_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) + goto out_ipu6_bus_del_devices; + } + +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) +- rval = request_firmware(&isp->spdata_fw, IPU6_SPDATA_NAME, &pdev->dev); +- if (rval) +- dev_warn(&isp->pdev->dev, "no spdata replace, using default\n"); +- else +- fixup_spdata(isp->spdata_fw->data, pdev->dev.platform_data); +-#endif +-#endif +- + isys_ctrl = devm_kmemdup(dev, &isys_buttress_ctrl, + sizeof(isys_buttress_ctrl), GFP_KERNEL); + if (!isys_ctrl) { +@@ -750,7 +687,7 @@ static int ipu6_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) + + isp->isys = ipu6_isys_init(pdev, dev, isys_ctrl, isys_base, + &isys_ipdata, +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + pdev->dev.platform_data + #endif + ); +@@ -852,11 +789,6 @@ out_ipu6_bus_del_devices: + ipu6_mmu_cleanup(isp->isys->mmu); + ipu6_bus_del_devices(pdev); + release_firmware(isp->cpd_fw); +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) +- release_firmware(isp->spdata_fw); +-#endif +-#endif + buttress_exit: + ipu6_buttress_exit(isp); + +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.h +index 786494d..f464a7b 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.h ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.h +@@ -23,12 +23,6 @@ struct ipu6_bus_device; + #define IPU6EPMTL_FIRMWARE_NAME "intel/ipu/ipu6epmtl_fw.bin" + #define IPU6EPADLN_FIRMWARE_NAME "intel/ipu/ipu6epadln_fw.bin" + +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ +- && IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) +-/* array of struct ipu6_spdata_rep terminated by NULL */ +-#define IPU6_SPDATA_NAME "ipu6v1_spdata.bin" +-#endif +- + enum ipu6_version { + IPU6_VER_INVALID = 0, + IPU6_VER_6 = 1, +@@ -86,11 +80,6 @@ struct ipu6_device { + const struct firmware *cpd_fw; + const char *cpd_fw_name; + u32 cpd_metadata_cmpnt_size; +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) +- const struct firmware *spdata_fw; +-#endif +-#endif + void __iomem *base; + #ifdef CONFIG_DEBUG_FS + struct dentry *ipu_dir; +@@ -338,7 +327,7 @@ struct ipu6_isys_internal_pdata { + struct ipu6_isys_pdata { + void __iomem *base; + const struct ipu6_isys_internal_pdata *ipdata; +-#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + struct ipu_isys_subdev_pdata *spdata; + #endif + }; +-- +2.43.0 + diff --git a/patches/0067-kernel-align-ACPI-PDATA-and-ACPI-fwnode-build-for-EC.patch b/patches/0067-kernel-align-ACPI-PDATA-and-ACPI-fwnode-build-for-EC.patch new file mode 100644 index 0000000..e1d5209 --- /dev/null +++ b/patches/0067-kernel-align-ACPI-PDATA-and-ACPI-fwnode-build-for-EC.patch @@ -0,0 +1,382 @@ +From fde4535bc9282dde7886f0fe24375acc5b35bf09 Mon Sep 17 00:00:00 2001 +From: hepengpx +Date: Fri, 16 Jan 2026 01:31:22 -0700 +Subject: [PATCH 67/76] kernel: align ACPI PDATA and ACPI fwnode build for ECG + +Signed-off-by: hepengpx +Signed-off-by: zouxiaoh +--- + .../media/pci/intel/ipu6/ipu6-isys-mcd-phy.c | 170 ++++++++---------- + .../drivers/media/pci/intel/ipu6/ipu6-isys.c | 29 ++- + .../drivers/media/pci/intel/ipu6/ipu6-isys.h | 4 - + 6.18.0/drivers/media/pci/intel/ipu6/ipu6.c | 14 +- + 4 files changed, 96 insertions(+), 121 deletions(-) + +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c +index 1cf33b5..1be7d75 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c +@@ -567,49 +567,46 @@ static int ipu6_isys_mcd_phy_ready(struct ipu6_isys *isys, u8 id) + return ret; + } + +-#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) +-static int ipu6_isys_mcd_phy_common_init(struct ipu6_isys *isys, struct ipu6_isys_csi2_config *cfg) ++static void ipu6_isys_write_mcd_phy_common_init_regs(struct ipu6_isys *isys, ++ u32 port) + { +- unsigned int phy_id; +- void __iomem *phy_base; +- struct ipu6_bus_device *adev = to_ipu6_bus_device(&isys->adev->auxdev.dev); ++ struct ipu6_bus_device *adev = isys->adev; + struct ipu6_device *isp = adev->isp; + void __iomem *isp_base = isp->base; ++ void __iomem *phy_base; + unsigned int i; ++ u8 phy_id; + +- phy_id = cfg->port / 4; ++ phy_id = port / 4; + phy_base = isp_base + IPU6_ISYS_MCD_PHY_BASE(phy_id); + +- for (i = 0 ; i < ARRAY_SIZE(common_init_regs); i++) { ++ for (i = 0; i < ARRAY_SIZE(common_init_regs); i++) + writel(common_init_regs[i].val, +- phy_base + common_init_regs[i].reg); +- } +- +- return 0; ++ phy_base + common_init_regs[i].reg); + } +-#else +-static void ipu6_isys_mcd_phy_common_init(struct ipu6_isys *isys) ++ ++static void ipu6_isys_mcd_phy_common_init(struct ipu6_isys *isys ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) ++ , struct ipu6_isys_csi2_config *csi2_cfg ++#endif ++) + { +- struct ipu6_bus_device *adev = isys->adev; +- struct ipu6_device *isp = adev->isp; +- void __iomem *isp_base = isp->base; + struct sensor_async_sd *s_asd; + struct v4l2_async_connection *asc; +- void __iomem *phy_base; +- unsigned int i; +- u8 phy_id; + +- list_for_each_entry(asc, &isys->notifier.done_list, asc_entry) { +- s_asd = container_of(asc, struct sensor_async_sd, asc); +- phy_id = s_asd->csi2.port / 4; +- phy_base = isp_base + IPU6_ISYS_MCD_PHY_BASE(phy_id); +- +- for (i = 0; i < ARRAY_SIZE(common_init_regs); i++) +- writel(common_init_regs[i].val, +- phy_base + common_init_regs[i].reg); ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) ++ if (isys->pdata->spdata) { ++ ipu6_isys_write_mcd_phy_common_init_regs(isys, csi2_cfg->port); ++ } else { ++#endif ++ list_for_each_entry(asc, &isys->notifier.done_list, asc_entry) { ++ s_asd = container_of(asc, struct sensor_async_sd, asc); ++ ipu6_isys_write_mcd_phy_common_init_regs(isys, s_asd->csi2.port); ++ } ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + } +-} + #endif ++} + + static int ipu6_isys_driver_port_to_phy_port(struct ipu6_isys_csi2_config *cfg) + { +@@ -641,96 +638,79 @@ static int ipu6_isys_driver_port_to_phy_port(struct ipu6_isys_csi2_config *cfg) + return ret; + } + +-#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) +-static int ipu6_isys_mcd_phy_config(struct ipu6_isys *isys, struct ipu6_isys_csi2_config *cfg) ++static int ipu6_isys_write_mcd_phy_config_regs(struct ipu6_isys *isys, ++ struct ipu6_isys_csi2_config *cfg) + { +- unsigned int phy_id; +- int phy_port; +- void __iomem *phy_base; +- struct ipu6_bus_device *adev = to_ipu6_bus_device(&isys->adev->auxdev.dev); ++ struct device *dev = &isys->adev->auxdev.dev; ++ struct ipu6_bus_device *adev = isys->adev; ++ const struct phy_reg **phy_config_regs; + struct ipu6_device *isp = adev->isp; + void __iomem *isp_base = isp->base; +- const struct phy_reg **phy_config_regs; +- struct ipu_isys_subdev_pdata *spdata = isys->pdata->spdata; +- struct ipu_isys_subdev_info **subdevs, *sd_info; +- int i; ++ void __iomem *phy_base; ++ int phy_port, phy_id; ++ unsigned int i; + +- if (!spdata) { +- dev_err(&isys->adev->auxdev.dev, "no subdevice info provided\n"); +- return -EINVAL; ++ phy_port = ipu6_isys_driver_port_to_phy_port(cfg); ++ if (phy_port < 0) { ++ dev_err(dev, "invalid port %d for lane %d", cfg->port, ++ cfg->nlanes); ++ return -ENXIO; + } + + phy_id = cfg->port / 4; + phy_base = isp_base + IPU6_ISYS_MCD_PHY_BASE(phy_id); +- for (subdevs = spdata->subdevs; *subdevs; subdevs++) { +- sd_info = *subdevs; +- if (!sd_info->csi2) +- continue; +- +- phy_port = ipu6_isys_driver_port_to_phy_port(sd_info->csi2); +- if (phy_port < 0) { +- dev_err(&isys->adev->auxdev.dev, "invalid port %d for lane %d", +- cfg->port, cfg->nlanes); +- return -ENXIO; +- } +- +- if ((sd_info->csi2->port / 4) != phy_id) +- continue; +- +- dev_dbg(&isys->adev->auxdev.dev, "port%d PHY%u lanes %u\n", +- phy_port, phy_id, cfg->nlanes); ++ dev_dbg(dev, "port%d PHY%u lanes %u\n", cfg->port, phy_id, cfg->nlanes); + +- phy_config_regs = config_regs[sd_info->csi2->nlanes/2]; +- +- for (i = 0; phy_config_regs[phy_port][i].reg; i++) { +- writel(phy_config_regs[phy_port][i].val, +- phy_base + phy_config_regs[phy_port][i].reg); +- } +- } ++ phy_config_regs = config_regs[cfg->nlanes / 2]; ++ for (i = 0; phy_config_regs[phy_port][i].reg; i++) ++ writel(phy_config_regs[phy_port][i].val, ++ phy_base + phy_config_regs[phy_port][i].reg); + + return 0; + } +-#else +-static int ipu6_isys_mcd_phy_config(struct ipu6_isys *isys) ++ ++static int ipu6_isys_mcd_phy_config(struct ipu6_isys *isys ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) ++ , struct ipu6_isys_csi2_config *csi2_cfg ++#endif ++) + { +- struct device *dev = &isys->adev->auxdev.dev; +- struct ipu6_bus_device *adev = isys->adev; +- const struct phy_reg **phy_config_regs; +- struct ipu6_device *isp = adev->isp; +- void __iomem *isp_base = isp->base; + struct sensor_async_sd *s_asd; + struct ipu6_isys_csi2_config cfg; + struct v4l2_async_connection *asc; +- int phy_port, phy_id; +- unsigned int i; +- void __iomem *phy_base; ++ int ret = 0; + +- list_for_each_entry(asc, &isys->notifier.done_list, asc_entry) { +- s_asd = container_of(asc, struct sensor_async_sd, asc); +- cfg.port = s_asd->csi2.port; +- cfg.nlanes = s_asd->csi2.nlanes; +- phy_port = ipu6_isys_driver_port_to_phy_port(&cfg); +- if (phy_port < 0) { +- dev_err(dev, "invalid port %d for lane %d", cfg.port, +- cfg.nlanes); +- return -ENXIO; +- } ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) ++ struct ipu_isys_subdev_pdata *spdata = isys->pdata->spdata; ++ struct ipu_isys_subdev_info **subdevs, *sd_info; + +- phy_id = cfg.port / 4; +- phy_base = isp_base + IPU6_ISYS_MCD_PHY_BASE(phy_id); +- dev_dbg(dev, "port%d PHY%u lanes %u\n", cfg.port, phy_id, +- cfg.nlanes); ++ if (spdata) { ++ for (subdevs = spdata->subdevs; *subdevs; subdevs++) { ++ sd_info = *subdevs; ++ if (!sd_info->csi2 || ++ (sd_info->csi2->port / 4) != (csi2_cfg->port / 4)) ++ continue; + +- phy_config_regs = config_regs[cfg.nlanes / 2]; +- cfg.port = phy_port; +- for (i = 0; phy_config_regs[cfg.port][i].reg; i++) +- writel(phy_config_regs[cfg.port][i].val, +- phy_base + phy_config_regs[cfg.port][i].reg); ++ ret = ipu6_isys_write_mcd_phy_config_regs(isys, sd_info->csi2); ++ if (ret) ++ return ret; ++ } ++ } else { ++#endif ++ list_for_each_entry(asc, &isys->notifier.done_list, asc_entry) { ++ s_asd = container_of(asc, struct sensor_async_sd, asc); ++ cfg.port = s_asd->csi2.port; ++ cfg.nlanes = s_asd->csi2.nlanes; ++ ret = ipu6_isys_write_mcd_phy_config_regs(isys, &cfg); ++ if (ret) ++ return ret; ++ } ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + } ++#endif + + return 0; + } +-#endif + + #define CSI_MCD_PHY_NUM 2 + static refcount_t phy_power_ref_count[CSI_MCD_PHY_NUM]; +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c +index 25d01ea..7d10583 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c +@@ -28,14 +28,12 @@ + #include + #include + #include +-#if !IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + #include + #include + #include + #include + #include + #include +-#endif + + #include "ipu6-bus.h" + #include "ipu6-cpd.h" +@@ -972,7 +970,6 @@ static void isys_iwake_watermark_cleanup(struct ipu6_isys *isys) + mutex_destroy(&iwake_watermark->mutex); + } + +-#if !IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + /* The .bound() notifier callback when a match is found */ + static int isys_notifier_bound(struct v4l2_async_notifier *notifier, + struct v4l2_subdev *sd, +@@ -1084,7 +1081,6 @@ static void isys_notifier_cleanup(struct ipu6_isys *isys) + v4l2_async_nf_unregister(&isys->notifier); + v4l2_async_nf_cleanup(&isys->notifier); + } +-#endif + + static int isys_register_devices(struct ipu6_isys *isys) + { +@@ -1122,17 +1118,19 @@ static int isys_register_devices(struct ipu6_isys *isys) + if (ret) + goto out_isys_unregister_subdevices; + +-#if !IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) +- ret = isys_notifier_init(isys); +- if (ret) +- goto out_isys_unregister_subdevices; +-#else +- isys_register_ext_subdevs(isys); ++#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) ++ if (!isys->pdata->spdata) { + #endif ++ ret = isys_notifier_init(isys); ++ if (ret) ++ goto out_isys_unregister_subdevices; + #if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) +- ret = v4l2_device_register_subdev_nodes(&isys->v4l2_dev); +- if (ret) +- goto out_isys_unregister_ext_subdevs; ++ } else { ++ isys_register_ext_subdevs(isys); ++ ret = v4l2_device_register_subdev_nodes(&isys->v4l2_dev); ++ if (ret) ++ goto out_isys_unregister_ext_subdevs; ++ } + #endif + return 0; + #if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) +@@ -1162,7 +1160,8 @@ static void isys_unregister_devices(struct ipu6_isys *isys) + isys_unregister_video_devices(isys); + isys_csi2_unregister_subdevices(isys); + #if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) +- isys_unregister_ext_subdevs(isys); ++ if (isys->pdata->spdata) ++ isys_unregister_ext_subdevs(isys); + #endif + v4l2_device_unregister(&isys->v4l2_dev); + media_device_unregister(&isys->media_dev); +@@ -1533,9 +1532,7 @@ static void isys_remove(struct auxiliary_device *auxdev) + free_fw_msg_bufs(isys); + + isys_unregister_devices(isys); +-#if !IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + isys_notifier_cleanup(isys); +-#endif + + cpu_latency_qos_remove_request(&isys->pm_qos); + +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h +index 5c96f5b..3768c48 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h +@@ -168,9 +168,7 @@ struct ipu6_isys { + spinlock_t listlock; /* Protect framebuflist */ + struct list_head framebuflist; + struct list_head framebuflist_fw; +-#if !IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + struct v4l2_async_notifier notifier; +-#endif + struct isys_iwake_watermark iwake_watermark; + #ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET + struct mutex reset_mutex; +@@ -194,8 +192,6 @@ struct ipu6_isys_subdev_i2c_info { + int i2c_adapter_id; + char i2c_adapter_bdf[32]; + }; +-#endif +-#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + #define IPU_SPDATA_GPIO_NUM 4 + #define IPU_SPDATA_IRQ_PIN_NAME_LEN 16 + #endif +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c +index 7a0443a..e469f47 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c +@@ -400,11 +400,11 @@ ipu6_isys_init(struct pci_dev *pdev, struct device *parent, + #endif + int ret; + +- // ret = ipu_bridge_init(dev, ipu_bridge_parse_ssdb); +- // if (ret) { +- // dev_err_probe(dev, ret, "IPU6 bridge init failed\n"); +- // return ERR_PTR(ret); +- // } ++ ret = ipu_bridge_init(dev, ipu_bridge_parse_ssdb); ++ if (ret) { ++ dev_err_probe(dev, ret, "IPU6 bridge init failed\n"); ++ return ERR_PTR(ret); ++ } + + pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); + if (!pdata) +@@ -436,7 +436,9 @@ ipu6_isys_init(struct pci_dev *pdev, struct device *parent, + dev_dbg(&pdev->dev, "No subdevice info provided"); + ret = ipu_get_acpi_devices(isys_adev, &isys_adev->auxdev.dev, &acpi_pdata, NULL, + isys_init_acpi_add_device); +- pdata->spdata = acpi_pdata; ++ if (acpi_pdata && (*acpi_pdata->subdevs)) { ++ pdata->spdata = acpi_pdata; ++ } + } else { + dev_dbg(&pdev->dev, "Subdevice info found"); + ret = ipu_get_acpi_devices(isys_adev, &isys_adev->auxdev.dev, &acpi_pdata, &spdata, +-- +2.43.0 + diff --git a/patches/0068-upstream-Use-module-parameter-to-set-psys-freq.patch b/patches/0068-upstream-Use-module-parameter-to-set-psys-freq.patch new file mode 100644 index 0000000..95476db --- /dev/null +++ b/patches/0068-upstream-Use-module-parameter-to-set-psys-freq.patch @@ -0,0 +1,43 @@ +From 28c69edaca0d33d2617677592d8bc864fc607223 Mon Sep 17 00:00:00 2001 +From: Dongcheng Yan +Date: Fri, 16 Jan 2026 01:32:54 -0700 +Subject: [PATCH 68/76] upstream: Use module parameter to set psys freq + +--- + 6.18.0/drivers/media/pci/intel/ipu6/ipu6.c | 13 +++++++++++++ + 1 file changed, 13 insertions(+) + +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c +index e469f47..291b745 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c +@@ -51,6 +51,10 @@ static unsigned int isys_freq_override; + module_param(isys_freq_override, uint, 0660); + MODULE_PARM_DESC(isys_freq_override, "Override ISYS freq(mhz)"); + ++static unsigned int psys_freq_override; ++module_param(psys_freq_override, uint, 0660); ++MODULE_PARM_DESC(psys_freq_override, "Override PSYS freq(mhz)"); ++ + #define IPU6_PCI_BAR 0 + + struct ipu6_cell_program { +@@ -483,6 +487,15 @@ ipu6_psys_init(struct pci_dev *pdev, struct device *parent, + pdata->base = base; + pdata->ipdata = ipdata; + ++ /* Override the isys freq */ ++ if (psys_freq_override >= BUTTRESS_MIN_FORCE_PS_FREQ && ++ psys_freq_override <= BUTTRESS_MAX_FORCE_PS_FREQ) { ++ ctrl->ratio = psys_freq_override / BUTTRESS_PS_FREQ_STEP; ++ ctrl->qos_floor = psys_freq_override; ++ dev_dbg(&pdev->dev, "Override the psys freq:%u(mhz)\n", ++ psys_freq_override); ++ } ++ + psys_adev = ipu6_bus_initialize_device(pdev, parent, pdata, ctrl, + IPU6_PSYS_NAME); + if (IS_ERR(psys_adev)) { +-- +2.43.0 + diff --git a/patches/0069-media-pci-add-IPU6-PSYS-driver.patch b/patches/0069-media-pci-add-IPU6-PSYS-driver.patch new file mode 100644 index 0000000..1adcde1 --- /dev/null +++ b/patches/0069-media-pci-add-IPU6-PSYS-driver.patch @@ -0,0 +1,8335 @@ +From e4e0e82bb08efc972738b2cadea52b74595ae9f5 Mon Sep 17 00:00:00 2001 +From: zouxiaoh +Date: Fri, 16 Jan 2026 01:40:47 -0700 +Subject: [PATCH 69/77] media: pci: add IPU6 PSYS driver + +* media: intel/ipu6: Remove ipu6_buttress_ctrl started + +Signed-off-by: zouxiaoh +Signed-off-by: Florent Pirou +--- + 6.18.0/drivers/media/pci/intel/ipu6/Makefile | 1 + + .../media/pci/intel/ipu6/psys/Makefile | 26 + + .../media/pci/intel/ipu6/psys/ipu-fw-psys.c | 433 +++++ + .../media/pci/intel/ipu6/psys/ipu-fw-psys.h | 382 ++++ + .../pci/intel/ipu6/psys/ipu-fw-resources.c | 103 ++ + .../pci/intel/ipu6/psys/ipu-platform-psys.h | 78 + + .../intel/ipu6/psys/ipu-platform-resources.h | 103 ++ + .../pci/intel/ipu6/psys/ipu-psys-compat32.c | 222 +++ + .../media/pci/intel/ipu6/psys/ipu-psys.c | 1599 +++++++++++++++++ + .../media/pci/intel/ipu6/psys/ipu-psys.h | 324 ++++ + .../media/pci/intel/ipu6/psys/ipu-resources.c | 861 +++++++++ + .../pci/intel/ipu6/psys/ipu6-fw-resources.c | 607 +++++++ + .../pci/intel/ipu6/psys/ipu6-l-scheduler.c | 621 +++++++ + .../intel/ipu6/psys/ipu6-platform-resources.h | 194 ++ + .../media/pci/intel/ipu6/psys/ipu6-ppg.c | 556 ++++++ + .../media/pci/intel/ipu6/psys/ipu6-ppg.h | 38 + + .../media/pci/intel/ipu6/psys/ipu6-psys-gpc.c | 209 +++ + .../media/pci/intel/ipu6/psys/ipu6-psys.c | 1054 +++++++++++ + .../pci/intel/ipu6/psys/ipu6ep-fw-resources.c | 393 ++++ + .../ipu6/psys/ipu6ep-platform-resources.h | 42 + + .../pci/intel/ipu6/psys/ipu6se-fw-resources.c | 194 ++ + .../ipu6/psys/ipu6se-platform-resources.h | 103 ++ + 22 files changed, 8143 insertions(+) + create mode 100644 6.18.0/drivers/media/pci/intel/ipu6/psys/Makefile + create mode 100644 6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-fw-psys.c + create mode 100644 6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-fw-psys.h + create mode 100644 6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-fw-resources.c + create mode 100644 6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-platform-psys.h + create mode 100644 6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-platform-resources.h + create mode 100644 6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-psys-compat32.c + create mode 100644 6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.c + create mode 100644 6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.h + create mode 100644 6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-resources.c + create mode 100644 6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-fw-resources.c + create mode 100644 6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-l-scheduler.c + create mode 100644 6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-platform-resources.h + create mode 100644 6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-ppg.c + create mode 100644 6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-ppg.h + create mode 100644 6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-psys-gpc.c + create mode 100644 6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-psys.c + create mode 100644 6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6ep-fw-resources.c + create mode 100644 6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6ep-platform-resources.h + create mode 100644 6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6se-fw-resources.c + create mode 100644 6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6se-platform-resources.h + +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/Makefile b/6.18.0/drivers/media/pci/intel/ipu6/Makefile +index e1e123b..67c13c9 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/Makefile ++++ b/6.18.0/drivers/media/pci/intel/ipu6/Makefile +@@ -22,3 +22,4 @@ intel-ipu6-isys-y := ipu6-isys.o \ + ipu6-isys-dwc-phy.o + + obj-$(CONFIG_VIDEO_INTEL_IPU6) += intel-ipu6-isys.o ++obj-$(CONFIG_VIDEO_INTEL_IPU6) += psys/ +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/psys/Makefile b/6.18.0/drivers/media/pci/intel/ipu6/psys/Makefile +new file mode 100644 +index 0000000..299cb04 +--- /dev/null ++++ b/6.18.0/drivers/media/pci/intel/ipu6/psys/Makefile +@@ -0,0 +1,26 @@ ++# SPDX-License-Identifier: GPL-2.0-only ++ ++is_kernel_lt_6_10 = $(shell if [ $$(printf "6.10\n$(KERNELVERSION)" | sort -V | head -n1) != "6.10" ]; then echo 1; fi) ++ifeq ($(is_kernel_lt_6_10), 1) ++ifneq ($(EXTERNAL_BUILD), 1) ++src := $(srctree)/$(src) ++endif ++endif ++ ++intel-ipu6-psys-objs += ipu-psys.o \ ++ ipu-fw-psys.o \ ++ ipu-fw-resources.o \ ++ ipu-resources.o \ ++ ipu6-psys.o \ ++ ipu6-ppg.o \ ++ ipu6-l-scheduler.o \ ++ ipu6-fw-resources.o \ ++ ipu6se-fw-resources.o \ ++ ipu6ep-fw-resources.o ++ ++obj-$(CONFIG_VIDEO_INTEL_IPU6) += intel-ipu6-psys.o ++ ++ifeq ($(is_kernel_lt_6_10), 1) ++ccflags-y += -I$(src)/../ipu6/ ++endif ++ccflags-y += -I$(src)/../ +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-fw-psys.c b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-fw-psys.c +new file mode 100644 +index 0000000..c89cd0c +--- /dev/null ++++ b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-fw-psys.c +@@ -0,0 +1,433 @@ ++// SPDX-License-Identifier: GPL-2.0 ++// Copyright (C) 2016 - 2024 Intel Corporation ++ ++#include ++ ++#include ++ ++#include ++#include "ipu6-fw-com.h" ++#include "ipu-fw-psys.h" ++#include "ipu-psys.h" ++ ++int ipu_fw_psys_pg_start(struct ipu_psys_kcmd *kcmd) ++{ ++ kcmd->kpg->pg->state = IPU_FW_PSYS_PROCESS_GROUP_STARTED; ++ return 0; ++} ++ ++int ipu_fw_psys_pg_disown(struct ipu_psys_kcmd *kcmd) ++{ ++ struct ipu_fw_psys_cmd *psys_cmd; ++ struct device *dev = &kcmd->fh->psys->adev->auxdev.dev; ++ int ret = 0; ++ ++ psys_cmd = ipu6_send_get_token(kcmd->fh->psys->fwcom, 0); ++ if (!psys_cmd) { ++ dev_err(dev, "%s failed to get token!\n", __func__); ++ kcmd->pg_user = NULL; ++ ret = -ENODATA; ++ goto out; ++ } ++ psys_cmd->command = IPU_FW_PSYS_PROCESS_GROUP_CMD_START; ++ psys_cmd->msg = 0; ++ psys_cmd->context_handle = kcmd->kpg->pg->ipu_virtual_address; ++ ipu6_send_put_token(kcmd->fh->psys->fwcom, 0); ++ ++out: ++ return ret; ++} ++ ++int ipu_fw_psys_ppg_suspend(struct ipu_psys_kcmd *kcmd) ++{ ++ struct device *dev = &kcmd->fh->psys->adev->auxdev.dev; ++ struct ipu_fw_psys_cmd *psys_cmd; ++ int ret = 0; ++ ++ /* ppg suspend cmd uses QUEUE_DEVICE_ID instead of QUEUE_COMMAND_ID */ ++ psys_cmd = ipu6_send_get_token(kcmd->fh->psys->fwcom, 1); ++ if (!psys_cmd) { ++ dev_err(dev, "%s failed to get token!\n", __func__); ++ kcmd->pg_user = NULL; ++ ret = -ENODATA; ++ goto out; ++ } ++ psys_cmd->command = IPU_FW_PSYS_PROCESS_GROUP_CMD_SUSPEND; ++ psys_cmd->msg = 0; ++ psys_cmd->context_handle = kcmd->kpg->pg->ipu_virtual_address; ++ ipu6_send_put_token(kcmd->fh->psys->fwcom, 1); ++ ++out: ++ return ret; ++} ++ ++int ipu_fw_psys_ppg_resume(struct ipu_psys_kcmd *kcmd) ++{ ++ struct device *dev = &kcmd->fh->psys->adev->auxdev.dev; ++ struct ipu_fw_psys_cmd *psys_cmd; ++ int ret = 0; ++ ++ psys_cmd = ipu6_send_get_token(kcmd->fh->psys->fwcom, 0); ++ if (!psys_cmd) { ++ dev_err(dev, "%s failed to get token!\n", __func__); ++ kcmd->pg_user = NULL; ++ ret = -ENODATA; ++ goto out; ++ } ++ psys_cmd->command = IPU_FW_PSYS_PROCESS_GROUP_CMD_RESUME; ++ psys_cmd->msg = 0; ++ psys_cmd->context_handle = kcmd->kpg->pg->ipu_virtual_address; ++ ipu6_send_put_token(kcmd->fh->psys->fwcom, 0); ++ ++out: ++ return ret; ++} ++ ++int ipu_fw_psys_pg_abort(struct ipu_psys_kcmd *kcmd) ++{ ++ struct device *dev = &kcmd->fh->psys->adev->auxdev.dev; ++ struct ipu_fw_psys_cmd *psys_cmd; ++ int ret = 0; ++ ++ psys_cmd = ipu6_send_get_token(kcmd->fh->psys->fwcom, 0); ++ if (!psys_cmd) { ++ dev_err(dev, "%s failed to get token!\n", __func__); ++ kcmd->pg_user = NULL; ++ ret = -ENODATA; ++ goto out; ++ } ++ psys_cmd->command = IPU_FW_PSYS_PROCESS_GROUP_CMD_STOP; ++ psys_cmd->msg = 0; ++ psys_cmd->context_handle = kcmd->kpg->pg->ipu_virtual_address; ++ ipu6_send_put_token(kcmd->fh->psys->fwcom, 0); ++ ++out: ++ return ret; ++} ++ ++int ipu_fw_psys_pg_submit(struct ipu_psys_kcmd *kcmd) ++{ ++ kcmd->kpg->pg->state = IPU_FW_PSYS_PROCESS_GROUP_BLOCKED; ++ return 0; ++} ++ ++int ipu_fw_psys_rcv_event(struct ipu_psys *psys, ++ struct ipu_fw_psys_event *event) ++{ ++ void *rcv; ++ ++ rcv = ipu6_recv_get_token(psys->fwcom, 0); ++ if (!rcv) ++ return 0; ++ ++ memcpy(event, rcv, sizeof(*event)); ++ ipu6_recv_put_token(psys->fwcom, 0); ++ return 1; ++} ++ ++int ipu_fw_psys_terminal_set(struct ipu_fw_psys_terminal *terminal, ++ int terminal_idx, ++ struct ipu_psys_kcmd *kcmd, ++ u32 buffer, unsigned int size) ++{ ++ struct device *dev = &kcmd->fh->psys->adev->auxdev.dev; ++ u32 type; ++ u32 buffer_state; ++ ++ type = terminal->terminal_type; ++ ++ switch (type) { ++ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_CACHED_IN: ++ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_CACHED_OUT: ++ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SPATIAL_IN: ++ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SPATIAL_OUT: ++ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SLICED_IN: ++ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SLICED_OUT: ++ case IPU_FW_PSYS_TERMINAL_TYPE_PROGRAM: ++ case IPU_FW_PSYS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT: ++ buffer_state = IPU_FW_PSYS_BUFFER_UNDEFINED; ++ break; ++ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_STREAM: ++ case IPU_FW_PSYS_TERMINAL_TYPE_DATA_IN: ++ case IPU_FW_PSYS_TERMINAL_TYPE_STATE_IN: ++ buffer_state = IPU_FW_PSYS_BUFFER_FULL; ++ break; ++ case IPU_FW_PSYS_TERMINAL_TYPE_DATA_OUT: ++ case IPU_FW_PSYS_TERMINAL_TYPE_STATE_OUT: ++ buffer_state = IPU_FW_PSYS_BUFFER_EMPTY; ++ break; ++ default: ++ dev_err(dev, "unknown terminal type: 0x%x\n", type); ++ return -EAGAIN; ++ } ++ ++ if (type == IPU_FW_PSYS_TERMINAL_TYPE_DATA_IN || ++ type == IPU_FW_PSYS_TERMINAL_TYPE_DATA_OUT) { ++ struct ipu_fw_psys_data_terminal *dterminal = ++ (struct ipu_fw_psys_data_terminal *)terminal; ++ dterminal->connection_type = IPU_FW_PSYS_CONNECTION_MEMORY; ++ dterminal->frame.data_bytes = size; ++ if (!ipu_fw_psys_pg_get_protocol(kcmd)) ++ dterminal->frame.data = buffer; ++ else ++ dterminal->frame.data_index = terminal_idx; ++ dterminal->frame.buffer_state = buffer_state; ++ } else { ++ struct ipu_fw_psys_param_terminal *pterminal = ++ (struct ipu_fw_psys_param_terminal *)terminal; ++ if (!ipu_fw_psys_pg_get_protocol(kcmd)) ++ pterminal->param_payload.buffer = buffer; ++ else ++ pterminal->param_payload.terminal_index = terminal_idx; ++ } ++ return 0; ++} ++ ++void ipu_fw_psys_pg_dump(struct ipu_psys *psys, ++ struct ipu_psys_kcmd *kcmd, const char *note) ++{ ++ ipu6_fw_psys_pg_dump(psys, kcmd, note); ++} ++ ++int ipu_fw_psys_pg_get_id(struct ipu_psys_kcmd *kcmd) ++{ ++ return kcmd->kpg->pg->ID; ++} ++ ++int ipu_fw_psys_pg_get_terminal_count(struct ipu_psys_kcmd *kcmd) ++{ ++ return kcmd->kpg->pg->terminal_count; ++} ++ ++int ipu_fw_psys_pg_get_size(struct ipu_psys_kcmd *kcmd) ++{ ++ return kcmd->kpg->pg->size; ++} ++ ++int ipu_fw_psys_pg_set_ipu_vaddress(struct ipu_psys_kcmd *kcmd, ++ dma_addr_t vaddress) ++{ ++ kcmd->kpg->pg->ipu_virtual_address = vaddress; ++ return 0; ++} ++ ++struct ipu_fw_psys_terminal *ipu_fw_psys_pg_get_terminal(struct ipu_psys_kcmd ++ *kcmd, int index) ++{ ++ struct ipu_fw_psys_terminal *terminal; ++ u16 *terminal_offset_table; ++ ++ terminal_offset_table = ++ (uint16_t *)((char *)kcmd->kpg->pg + ++ kcmd->kpg->pg->terminals_offset); ++ terminal = (struct ipu_fw_psys_terminal *) ++ ((char *)kcmd->kpg->pg + terminal_offset_table[index]); ++ return terminal; ++} ++ ++void ipu_fw_psys_pg_set_token(struct ipu_psys_kcmd *kcmd, u64 token) ++{ ++ kcmd->kpg->pg->token = (u64)token; ++} ++ ++u64 ipu_fw_psys_pg_get_token(struct ipu_psys_kcmd *kcmd) ++{ ++ return kcmd->kpg->pg->token; ++} ++ ++int ipu_fw_psys_pg_get_protocol(struct ipu_psys_kcmd *kcmd) ++{ ++ return kcmd->kpg->pg->protocol_version; ++} ++ ++int ipu_fw_psys_ppg_set_buffer_set(struct ipu_psys_kcmd *kcmd, ++ struct ipu_fw_psys_terminal *terminal, ++ int terminal_idx, u32 buffer) ++{ ++ struct device *dev = &kcmd->fh->psys->adev->auxdev.dev; ++ struct ipu_fw_psys_buffer_set *buf_set = kcmd->kbuf_set->buf_set; ++ u32 buffer_state; ++ u32 *buffer_ptr; ++ u32 type; ++ ++ type = terminal->terminal_type; ++ ++ switch (type) { ++ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_CACHED_IN: ++ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_CACHED_OUT: ++ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SPATIAL_IN: ++ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SPATIAL_OUT: ++ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SLICED_IN: ++ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SLICED_OUT: ++ case IPU_FW_PSYS_TERMINAL_TYPE_PROGRAM: ++ case IPU_FW_PSYS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT: ++ buffer_state = IPU_FW_PSYS_BUFFER_UNDEFINED; ++ break; ++ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_STREAM: ++ case IPU_FW_PSYS_TERMINAL_TYPE_DATA_IN: ++ case IPU_FW_PSYS_TERMINAL_TYPE_STATE_IN: ++ buffer_state = IPU_FW_PSYS_BUFFER_FULL; ++ break; ++ case IPU_FW_PSYS_TERMINAL_TYPE_DATA_OUT: ++ case IPU_FW_PSYS_TERMINAL_TYPE_STATE_OUT: ++ buffer_state = IPU_FW_PSYS_BUFFER_EMPTY; ++ break; ++ default: ++ dev_err(dev, "unknown terminal type: 0x%x\n", type); ++ return -EAGAIN; ++ } ++ ++ buffer_ptr = (u32 *)((char *)buf_set + sizeof(*buf_set) + ++ terminal_idx * sizeof(*buffer_ptr)); ++ ++ *buffer_ptr = buffer; ++ ++ if (type == IPU_FW_PSYS_TERMINAL_TYPE_DATA_IN || ++ type == IPU_FW_PSYS_TERMINAL_TYPE_DATA_OUT) { ++ struct ipu_fw_psys_data_terminal *dterminal = ++ (struct ipu_fw_psys_data_terminal *)terminal; ++ dterminal->frame.buffer_state = buffer_state; ++ } ++ ++ return 0; ++} ++ ++size_t ipu_fw_psys_ppg_get_buffer_set_size(struct ipu_psys_kcmd *kcmd) ++{ ++ return (sizeof(struct ipu_fw_psys_buffer_set) + ++ kcmd->kpg->pg->terminal_count * sizeof(u32)); ++} ++ ++int ++ipu_fw_psys_ppg_buffer_set_vaddress(struct ipu_fw_psys_buffer_set *buf_set, ++ u32 vaddress) ++{ ++ buf_set->ipu_virtual_address = vaddress; ++ return 0; ++} ++ ++int ipu_fw_psys_ppg_buffer_set_set_keb(struct ipu_fw_psys_buffer_set *buf_set, ++ u32 *kernel_enable_bitmap) ++{ ++ memcpy(buf_set->kernel_enable_bitmap, (u8 *)kernel_enable_bitmap, ++ sizeof(buf_set->kernel_enable_bitmap)); ++ return 0; ++} ++ ++struct ipu_fw_psys_buffer_set * ++ipu_fw_psys_ppg_create_buffer_set(struct ipu_psys_kcmd *kcmd, ++ void *kaddr, u32 frame_counter) ++{ ++ struct ipu_fw_psys_buffer_set *buffer_set = NULL; ++ unsigned int i; ++ ++ buffer_set = (struct ipu_fw_psys_buffer_set *)kaddr; ++ ++ /* ++ * Set base struct members ++ */ ++ buffer_set->ipu_virtual_address = 0; ++ buffer_set->process_group_handle = kcmd->kpg->pg->ipu_virtual_address; ++ buffer_set->frame_counter = frame_counter; ++ buffer_set->terminal_count = kcmd->kpg->pg->terminal_count; ++ ++ /* ++ * Initialize adjacent buffer addresses ++ */ ++ for (i = 0; i < buffer_set->terminal_count; i++) { ++ u32 *buffer = ++ (u32 *)((char *)buffer_set + ++ sizeof(*buffer_set) + sizeof(u32) * i); ++ ++ *buffer = 0; ++ } ++ ++ return buffer_set; ++} ++ ++int ipu_fw_psys_ppg_enqueue_bufs(struct ipu_psys_kcmd *kcmd) ++{ ++ struct device *dev = &kcmd->fh->psys->adev->auxdev.dev; ++ struct ipu_fw_psys_cmd *psys_cmd; ++ unsigned int queue_id; ++ int ret = 0; ++ unsigned int size; ++ ++ if (ipu_ver == IPU6_VER_6SE) ++ size = IPU6SE_FW_PSYS_N_PSYS_CMD_QUEUE_ID; ++ else ++ size = IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID; ++ queue_id = kcmd->kpg->pg->base_queue_id; ++ ++ if (queue_id >= size) ++ return -EINVAL; ++ ++ psys_cmd = ipu6_send_get_token(kcmd->fh->psys->fwcom, queue_id); ++ if (!psys_cmd) { ++ dev_err(dev, "%s failed to get token!\n", __func__); ++ kcmd->pg_user = NULL; ++ return -ENODATA; ++ } ++ ++ psys_cmd->command = IPU_FW_PSYS_PROCESS_GROUP_CMD_RUN; ++ psys_cmd->msg = 0; ++ psys_cmd->context_handle = kcmd->kbuf_set->buf_set->ipu_virtual_address; ++ ++ ipu6_send_put_token(kcmd->fh->psys->fwcom, queue_id); ++ ++ return ret; ++} ++ ++u8 ipu_fw_psys_ppg_get_base_queue_id(struct ipu_psys_kcmd *kcmd) ++{ ++ return kcmd->kpg->pg->base_queue_id; ++} ++ ++void ipu_fw_psys_ppg_set_base_queue_id(struct ipu_psys_kcmd *kcmd, u8 queue_id) ++{ ++ kcmd->kpg->pg->base_queue_id = queue_id; ++} ++ ++int ipu_fw_psys_open(struct ipu_psys *psys) ++{ ++ struct device *dev = &psys->adev->auxdev.dev; ++ int retry = IPU_PSYS_OPEN_RETRY, retval; ++ ++ retval = ipu6_fw_com_open(psys->fwcom); ++ if (retval) { ++ dev_err(dev, "fw com open failed.\n"); ++ return retval; ++ } ++ ++ do { ++ usleep_range(IPU_PSYS_OPEN_TIMEOUT_US, ++ IPU_PSYS_OPEN_TIMEOUT_US + 10); ++ retval = ipu6_fw_com_ready(psys->fwcom); ++ if (retval) { ++ dev_dbg(dev, "psys port open ready!\n"); ++ break; ++ } ++ retry--; ++ } while (retry > 0); ++ ++ if (!retry) { ++ dev_err(dev, "psys port open ready failed\n"); ++ ipu6_fw_com_close(psys->fwcom); ++ return -EIO; ++ } ++ ++ return 0; ++} ++ ++int ipu_fw_psys_close(struct ipu_psys *psys) ++{ ++ struct device *dev = &psys->adev->auxdev.dev; ++ int retval; ++ ++ retval = ipu6_fw_com_close(psys->fwcom); ++ if (retval) { ++ dev_err(dev, "fw com close failed.\n"); ++ return retval; ++ } ++ return retval; ++} +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-fw-psys.h b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-fw-psys.h +new file mode 100644 +index 0000000..492a9ea +--- /dev/null ++++ b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-fw-psys.h +@@ -0,0 +1,382 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* Copyright (C) 2016 - 2024 Intel Corporation */ ++ ++#ifndef IPU_FW_PSYS_H ++#define IPU_FW_PSYS_H ++ ++#include "ipu6-platform-resources.h" ++#include "ipu6se-platform-resources.h" ++#include "ipu6ep-platform-resources.h" ++ ++#define IPU_FW_PSYS_CMD_QUEUE_SIZE 0x20 ++#define IPU_FW_PSYS_EVENT_QUEUE_SIZE 0x40 ++ ++#define IPU_FW_PSYS_CMD_BITS 64 ++#define IPU_FW_PSYS_EVENT_BITS 128 ++ ++enum { ++ IPU_FW_PSYS_EVENT_TYPE_SUCCESS = 0, ++ IPU_FW_PSYS_EVENT_TYPE_UNKNOWN_ERROR = 1, ++ IPU_FW_PSYS_EVENT_TYPE_RET_REM_OBJ_NOT_FOUND = 2, ++ IPU_FW_PSYS_EVENT_TYPE_RET_REM_OBJ_TOO_BIG = 3, ++ IPU_FW_PSYS_EVENT_TYPE_RET_REM_OBJ_DDR_TRANS_ERR = 4, ++ IPU_FW_PSYS_EVENT_TYPE_RET_REM_OBJ_NULL_PKG_DIR_ADDR = 5, ++ IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_LOAD_FRAME_ERR = 6, ++ IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_LOAD_FRAGMENT_ERR = 7, ++ IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_PROCESS_COUNT_ZERO = 8, ++ IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_PROCESS_INIT_ERR = 9, ++ IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_ABORT = 10, ++ IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_NULL = 11, ++ IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_VALIDATION_ERR = 12, ++ IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_INVALID_FRAME = 13 ++}; ++ ++enum { ++ IPU_FW_PSYS_EVENT_QUEUE_MAIN_ID, ++ IPU_FW_PSYS_N_PSYS_EVENT_QUEUE_ID ++}; ++ ++enum { ++ IPU_FW_PSYS_PROCESS_GROUP_ERROR = 0, ++ IPU_FW_PSYS_PROCESS_GROUP_CREATED, ++ IPU_FW_PSYS_PROCESS_GROUP_READY, ++ IPU_FW_PSYS_PROCESS_GROUP_BLOCKED, ++ IPU_FW_PSYS_PROCESS_GROUP_STARTED, ++ IPU_FW_PSYS_PROCESS_GROUP_RUNNING, ++ IPU_FW_PSYS_PROCESS_GROUP_STALLED, ++ IPU_FW_PSYS_PROCESS_GROUP_STOPPED, ++ IPU_FW_PSYS_N_PROCESS_GROUP_STATES ++}; ++ ++enum { ++ IPU_FW_PSYS_CONNECTION_MEMORY = 0, ++ IPU_FW_PSYS_CONNECTION_MEMORY_STREAM, ++ IPU_FW_PSYS_CONNECTION_STREAM, ++ IPU_FW_PSYS_N_CONNECTION_TYPES ++}; ++ ++enum { ++ IPU_FW_PSYS_BUFFER_NULL = 0, ++ IPU_FW_PSYS_BUFFER_UNDEFINED, ++ IPU_FW_PSYS_BUFFER_EMPTY, ++ IPU_FW_PSYS_BUFFER_NONEMPTY, ++ IPU_FW_PSYS_BUFFER_FULL, ++ IPU_FW_PSYS_N_BUFFER_STATES ++}; ++ ++enum { ++ IPU_FW_PSYS_TERMINAL_TYPE_DATA_IN = 0, ++ IPU_FW_PSYS_TERMINAL_TYPE_DATA_OUT, ++ IPU_FW_PSYS_TERMINAL_TYPE_PARAM_STREAM, ++ IPU_FW_PSYS_TERMINAL_TYPE_PARAM_CACHED_IN, ++ IPU_FW_PSYS_TERMINAL_TYPE_PARAM_CACHED_OUT, ++ IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SPATIAL_IN, ++ IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SPATIAL_OUT, ++ IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SLICED_IN, ++ IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SLICED_OUT, ++ IPU_FW_PSYS_TERMINAL_TYPE_STATE_IN, ++ IPU_FW_PSYS_TERMINAL_TYPE_STATE_OUT, ++ IPU_FW_PSYS_TERMINAL_TYPE_PROGRAM, ++ IPU_FW_PSYS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT, ++ IPU_FW_PSYS_N_TERMINAL_TYPES ++}; ++ ++enum { ++ IPU_FW_PSYS_COL_DIMENSION = 0, ++ IPU_FW_PSYS_ROW_DIMENSION = 1, ++ IPU_FW_PSYS_N_DATA_DIMENSION = 2 ++}; ++ ++enum { ++ IPU_FW_PSYS_PROCESS_GROUP_CMD_NOP = 0, ++ IPU_FW_PSYS_PROCESS_GROUP_CMD_SUBMIT, ++ IPU_FW_PSYS_PROCESS_GROUP_CMD_ATTACH, ++ IPU_FW_PSYS_PROCESS_GROUP_CMD_DETACH, ++ IPU_FW_PSYS_PROCESS_GROUP_CMD_START, ++ IPU_FW_PSYS_PROCESS_GROUP_CMD_DISOWN, ++ IPU_FW_PSYS_PROCESS_GROUP_CMD_RUN, ++ IPU_FW_PSYS_PROCESS_GROUP_CMD_STOP, ++ IPU_FW_PSYS_PROCESS_GROUP_CMD_SUSPEND, ++ IPU_FW_PSYS_PROCESS_GROUP_CMD_RESUME, ++ IPU_FW_PSYS_PROCESS_GROUP_CMD_ABORT, ++ IPU_FW_PSYS_PROCESS_GROUP_CMD_RESET, ++ IPU_FW_PSYS_N_PROCESS_GROUP_CMDS ++}; ++ ++enum { ++ IPU_FW_PSYS_PROCESS_GROUP_PROTOCOL_LEGACY = 0, ++ IPU_FW_PSYS_PROCESS_GROUP_PROTOCOL_PPG, ++ IPU_FW_PSYS_PROCESS_GROUP_N_PROTOCOLS ++}; ++ ++struct __packed ipu_fw_psys_process_group { ++ u64 token; ++ u64 private_token; ++ u32 routing_bitmap[IPU_FW_PSYS_RBM_NOF_ELEMS]; ++ u32 kernel_bitmap[IPU_FW_PSYS_KBM_NOF_ELEMS]; ++ u32 size; ++ u32 psys_server_init_cycles; ++ u32 pg_load_start_ts; ++ u32 pg_load_cycles; ++ u32 pg_init_cycles; ++ u32 pg_processing_cycles; ++ u32 pg_next_frame_init_cycles; ++ u32 pg_complete_cycles; ++ u32 ID; ++ u32 state; ++ u32 ipu_virtual_address; ++ u32 resource_bitmap; ++ u16 fragment_count; ++ u16 fragment_state; ++ u16 fragment_limit; ++ u16 processes_offset; ++ u16 terminals_offset; ++ u8 process_count; ++ u8 terminal_count; ++ u8 subgraph_count; ++ u8 protocol_version; ++ u8 base_queue_id; ++ u8 num_queues; ++ u8 mask_irq; ++ u8 error_handling_enable; ++ u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_PROCESS_GROUP_STRUCT]; ++}; ++ ++struct ipu_fw_psys_srv_init { ++ void *host_ddr_pkg_dir; ++ u32 ddr_pkg_dir_address; ++ u32 pkg_dir_size; ++ ++ u32 icache_prefetch_sp; ++ u32 icache_prefetch_isp; ++}; ++ ++struct __packed ipu_fw_psys_cmd { ++ u16 command; ++ u16 msg; ++ u32 context_handle; ++}; ++ ++struct __packed ipu_fw_psys_event { ++ u16 status; ++ u16 command; ++ u32 context_handle; ++ u64 token; ++}; ++ ++struct ipu_fw_psys_terminal { ++ u32 terminal_type; ++ s16 parent_offset; ++ u16 size; ++ u16 tm_index; ++ u8 ID; ++ u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_TERMINAL_STRUCT]; ++}; ++ ++struct ipu_fw_psys_param_payload { ++ u64 host_buffer; ++ u32 buffer; ++ u32 terminal_index; ++}; ++ ++struct ipu_fw_psys_param_terminal { ++ struct ipu_fw_psys_terminal base; ++ struct ipu_fw_psys_param_payload param_payload; ++ u16 param_section_desc_offset; ++ u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_PARAM_TERMINAL_STRUCT]; ++}; ++ ++struct ipu_fw_psys_frame { ++ u32 buffer_state; ++ u32 access_type; ++ u32 pointer_state; ++ u32 access_scope; ++ u32 data; ++ u32 data_index; ++ u32 data_bytes; ++ u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_FRAME_STRUCT]; ++}; ++ ++struct ipu_fw_psys_frame_descriptor { ++ u32 frame_format_type; ++ u32 plane_count; ++ u32 plane_offsets[IPU_FW_PSYS_N_FRAME_PLANES]; ++ u32 stride[1]; ++ u32 ts_offsets[IPU_FW_PSYS_N_FRAME_PLANES]; ++ u16 dimension[2]; ++ u16 size; ++ u8 bpp; ++ u8 bpe; ++ u8 is_compressed; ++ u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_FRAME_DESC_STRUCT]; ++}; ++ ++struct ipu_fw_psys_stream { ++ u64 dummy; ++}; ++ ++struct ipu_fw_psys_data_terminal { ++ struct ipu_fw_psys_terminal base; ++ struct ipu_fw_psys_frame_descriptor frame_descriptor; ++ struct ipu_fw_psys_frame frame; ++ struct ipu_fw_psys_stream stream; ++ u32 reserved; ++ u32 connection_type; ++ u16 fragment_descriptor_offset; ++ u8 kernel_id; ++ u8 subgraph_id; ++ u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_DATA_TERMINAL_STRUCT]; ++}; ++ ++struct ipu_fw_psys_buffer_set { ++ u64 token; ++ u32 kernel_enable_bitmap[IPU_FW_PSYS_KERNEL_BITMAP_NOF_ELEMS]; ++ u32 terminal_enable_bitmap[IPU_FW_PSYS_KERNEL_BITMAP_NOF_ELEMS]; ++ u32 routing_enable_bitmap[IPU_FW_PSYS_KERNEL_BITMAP_NOF_ELEMS]; ++ u32 rbm[IPU_FW_PSYS_RBM_NOF_ELEMS]; ++ u32 ipu_virtual_address; ++ u32 process_group_handle; ++ u16 terminal_count; ++ u8 frame_counter; ++ u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_BUFFER_SET_STRUCT]; ++}; ++ ++struct ipu_fw_psys_pgm { ++ u32 kernel_bitmap[IPU_FW_PSYS_KERNEL_BITMAP_NOF_ELEMS]; ++ u32 ID; ++ u16 program_manifest_offset; ++ u16 terminal_manifest_offset; ++ u16 private_data_offset; ++ u16 rbm_manifest_offset; ++ u16 size; ++ u8 alignment; ++ u8 kernel_count; ++ u8 program_count; ++ u8 terminal_count; ++ u8 subgraph_count; ++ u8 reserved[5]; ++}; ++ ++struct ipu_fw_generic_program_manifest { ++ u16 *dev_chn_size; ++ u16 *dev_chn_offset; ++ u16 *ext_mem_size; ++ u16 *ext_mem_offset; ++ u8 cell_id; ++ u8 cells[IPU_FW_PSYS_PROCESS_MAX_CELLS]; ++ u8 cell_type_id; ++ u8 *is_dfm_relocatable; ++ u32 *dfm_port_bitmap; ++ u32 *dfm_active_port_bitmap; ++}; ++ ++struct ipu_fw_resource_definitions { ++ u32 num_cells; ++ u32 num_cells_type; ++ const u8 *cells; ++ u32 num_dev_channels; ++ const u16 *dev_channels; ++ ++ u32 num_ext_mem_types; ++ u32 num_ext_mem_ids; ++ const u16 *ext_mem_ids; ++ ++ u32 num_dfm_ids; ++ const u16 *dfms; ++ ++ u32 cell_mem_row; ++ const u8 *cell_mem; ++}; ++ ++struct ipu6_psys_hw_res_variant { ++ unsigned int queue_num; ++ unsigned int cell_num; ++ int (*set_proc_dev_chn)(struct ipu_fw_psys_process *ptr, u16 offset, ++ u16 value); ++ int (*set_proc_dfm_bitmap)(struct ipu_fw_psys_process *ptr, ++ u16 id, u32 bitmap, u32 active_bitmap); ++ int (*set_proc_ext_mem)(struct ipu_fw_psys_process *ptr, ++ u16 type_id, u16 mem_id, u16 offset); ++ int (*get_pgm_by_proc)(struct ipu_fw_generic_program_manifest *gen_pm, ++ const struct ipu_fw_psys_pgm *pg_manifest, ++ struct ipu_fw_psys_process *process); ++}; ++ ++struct ipu_psys_kcmd; ++struct ipu_psys; ++int ipu_fw_psys_pg_start(struct ipu_psys_kcmd *kcmd); ++int ipu_fw_psys_pg_disown(struct ipu_psys_kcmd *kcmd); ++int ipu_fw_psys_pg_abort(struct ipu_psys_kcmd *kcmd); ++int ipu_fw_psys_pg_submit(struct ipu_psys_kcmd *kcmd); ++int ipu_fw_psys_ppg_suspend(struct ipu_psys_kcmd *kcmd); ++int ipu_fw_psys_ppg_resume(struct ipu_psys_kcmd *kcmd); ++int ipu_fw_psys_pg_load_cycles(struct ipu_psys_kcmd *kcmd); ++int ipu_fw_psys_pg_init_cycles(struct ipu_psys_kcmd *kcmd); ++int ipu_fw_psys_pg_processing_cycles(struct ipu_psys_kcmd *kcmd); ++int ipu_fw_psys_pg_server_init_cycles(struct ipu_psys_kcmd *kcmd); ++int ipu_fw_psys_pg_next_frame_init_cycles(struct ipu_psys_kcmd *kcmd); ++int ipu_fw_psys_pg_complete_cycles(struct ipu_psys_kcmd *kcmd); ++int ipu_fw_psys_rcv_event(struct ipu_psys *psys, ++ struct ipu_fw_psys_event *event); ++int ipu_fw_psys_terminal_set(struct ipu_fw_psys_terminal *terminal, ++ int terminal_idx, ++ struct ipu_psys_kcmd *kcmd, ++ u32 buffer, unsigned int size); ++void ipu_fw_psys_pg_dump(struct ipu_psys *psys, ++ struct ipu_psys_kcmd *kcmd, const char *note); ++int ipu_fw_psys_pg_get_id(struct ipu_psys_kcmd *kcmd); ++int ipu_fw_psys_pg_get_terminal_count(struct ipu_psys_kcmd *kcmd); ++int ipu_fw_psys_pg_get_size(struct ipu_psys_kcmd *kcmd); ++int ipu_fw_psys_pg_set_ipu_vaddress(struct ipu_psys_kcmd *kcmd, ++ dma_addr_t vaddress); ++struct ipu_fw_psys_terminal *ipu_fw_psys_pg_get_terminal(struct ipu_psys_kcmd ++ *kcmd, int index); ++void ipu_fw_psys_pg_set_token(struct ipu_psys_kcmd *kcmd, u64 token); ++u64 ipu_fw_psys_pg_get_token(struct ipu_psys_kcmd *kcmd); ++int ipu_fw_psys_ppg_set_buffer_set(struct ipu_psys_kcmd *kcmd, ++ struct ipu_fw_psys_terminal *terminal, ++ int terminal_idx, u32 buffer); ++size_t ipu_fw_psys_ppg_get_buffer_set_size(struct ipu_psys_kcmd *kcmd); ++int ++ipu_fw_psys_ppg_buffer_set_vaddress(struct ipu_fw_psys_buffer_set *buf_set, ++ u32 vaddress); ++int ipu_fw_psys_ppg_buffer_set_set_keb(struct ipu_fw_psys_buffer_set *buf_set, ++ u32 *kernel_enable_bitmap); ++struct ipu_fw_psys_buffer_set * ++ipu_fw_psys_ppg_create_buffer_set(struct ipu_psys_kcmd *kcmd, ++ void *kaddr, u32 frame_counter); ++int ipu_fw_psys_ppg_enqueue_bufs(struct ipu_psys_kcmd *kcmd); ++u8 ipu_fw_psys_ppg_get_base_queue_id(struct ipu_psys_kcmd *kcmd); ++void ipu_fw_psys_ppg_set_base_queue_id(struct ipu_psys_kcmd *kcmd, u8 queue_id); ++int ipu_fw_psys_pg_get_protocol(struct ipu_psys_kcmd *kcmd); ++int ipu_fw_psys_open(struct ipu_psys *psys); ++int ipu_fw_psys_close(struct ipu_psys *psys); ++ ++/* common resource interface for both abi and api mode */ ++int ipu_fw_psys_set_process_cell_id(struct ipu_fw_psys_process *ptr, u8 index, ++ u8 value); ++u8 ipu_fw_psys_get_process_cell_id(struct ipu_fw_psys_process *ptr, u8 index); ++int ipu_fw_psys_clear_process_cell(struct ipu_fw_psys_process *ptr); ++int ipu_fw_psys_set_proc_dev_chn(struct ipu_fw_psys_process *ptr, u16 offset, ++ u16 value); ++int ipu_fw_psys_set_process_ext_mem(struct ipu_fw_psys_process *ptr, ++ u16 type_id, u16 mem_id, u16 offset); ++int ++ipu_fw_psys_get_pgm_by_process(struct ipu_fw_generic_program_manifest *gen_pm, ++ const struct ipu_fw_psys_pgm *pg_manifest, ++ struct ipu_fw_psys_process *process); ++int ipu6_fw_psys_set_proc_dev_chn(struct ipu_fw_psys_process *ptr, u16 offset, ++ u16 value); ++int ipu6_fw_psys_set_proc_dfm_bitmap(struct ipu_fw_psys_process *ptr, ++ u16 id, u32 bitmap, ++ u32 active_bitmap); ++int ipu6_fw_psys_set_process_ext_mem(struct ipu_fw_psys_process *ptr, ++ u16 type_id, u16 mem_id, u16 offset); ++int ++ipu6_fw_psys_get_pgm_by_process(struct ipu_fw_generic_program_manifest *gen_pm, ++ const struct ipu_fw_psys_pgm *pg_manifest, ++ struct ipu_fw_psys_process *process); ++void ipu6_fw_psys_pg_dump(struct ipu_psys *psys, ++ struct ipu_psys_kcmd *kcmd, const char *note); ++void ipu6_psys_hw_res_variant_init(void); ++#endif /* IPU_FW_PSYS_H */ +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-fw-resources.c b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-fw-resources.c +new file mode 100644 +index 0000000..b6af0b7 +--- /dev/null ++++ b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-fw-resources.c +@@ -0,0 +1,103 @@ ++// SPDX-License-Identifier: GPL-2.0 ++// Copyright (C) 2015 - 2024 Intel Corporation ++ ++#include ++#include ++ ++#include "ipu-psys.h" ++#include "ipu-fw-psys.h" ++#include "ipu6-platform-resources.h" ++#include "ipu6se-platform-resources.h" ++ ++/********** Generic resource handling **********/ ++ ++/* ++ * Extension library gives byte offsets to its internal structures. ++ * use those offsets to update fields. Without extension lib access ++ * structures directly. ++ */ ++static const struct ipu6_psys_hw_res_variant *var = &hw_var; ++ ++int ipu_fw_psys_set_process_cell_id(struct ipu_fw_psys_process *ptr, u8 index, ++ u8 value) ++{ ++ struct ipu_fw_psys_process_group *parent = ++ (struct ipu_fw_psys_process_group *)((char *)ptr + ++ ptr->parent_offset); ++ ++ ptr->cells[index] = value; ++ parent->resource_bitmap |= 1 << value; ++ ++ return 0; ++} ++ ++u8 ipu_fw_psys_get_process_cell_id(struct ipu_fw_psys_process *ptr, u8 index) ++{ ++ return ptr->cells[index]; ++} ++ ++int ipu_fw_psys_clear_process_cell(struct ipu_fw_psys_process *ptr) ++{ ++ struct ipu_fw_psys_process_group *parent; ++ u8 cell_id = ipu_fw_psys_get_process_cell_id(ptr, 0); ++ int retval = -1; ++ u8 value; ++ ++ parent = (struct ipu_fw_psys_process_group *)((char *)ptr + ++ ptr->parent_offset); ++ ++ value = var->cell_num; ++ if ((1 << cell_id) != 0 && ++ ((1 << cell_id) & parent->resource_bitmap)) { ++ ipu_fw_psys_set_process_cell_id(ptr, 0, value); ++ parent->resource_bitmap &= ~(1 << cell_id); ++ retval = 0; ++ } ++ ++ return retval; ++} ++ ++int ipu_fw_psys_set_proc_dev_chn(struct ipu_fw_psys_process *ptr, u16 offset, ++ u16 value) ++{ ++ if (var->set_proc_dev_chn) ++ return var->set_proc_dev_chn(ptr, offset, value); ++ ++ WARN(1, "ipu6 psys res var is not initialised correctly."); ++ return 0; ++} ++ ++int ipu_fw_psys_set_proc_dfm_bitmap(struct ipu_fw_psys_process *ptr, ++ u16 id, u32 bitmap, ++ u32 active_bitmap) ++{ ++ if (var->set_proc_dfm_bitmap) ++ return var->set_proc_dfm_bitmap(ptr, id, bitmap, ++ active_bitmap); ++ ++ WARN(1, "ipu6 psys res var is not initialised correctly."); ++ return 0; ++} ++ ++int ipu_fw_psys_set_process_ext_mem(struct ipu_fw_psys_process *ptr, ++ u16 type_id, u16 mem_id, u16 offset) ++{ ++ if (var->set_proc_ext_mem) ++ return var->set_proc_ext_mem(ptr, type_id, mem_id, offset); ++ ++ WARN(1, "ipu6 psys res var is not initialised correctly."); ++ return 0; ++} ++ ++int ++ipu_fw_psys_get_pgm_by_process(struct ipu_fw_generic_program_manifest *gen_pm, ++ const struct ipu_fw_psys_pgm *pg_manifest, ++ struct ipu_fw_psys_process *process) ++{ ++ if (var->get_pgm_by_proc) ++ return var->get_pgm_by_proc(gen_pm, pg_manifest, process); ++ ++ WARN(1, "ipu6 psys res var is not initialised correctly."); ++ return 0; ++} ++ +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-platform-psys.h b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-platform-psys.h +new file mode 100644 +index 0000000..dc2cae3 +--- /dev/null ++++ b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-platform-psys.h +@@ -0,0 +1,78 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* Copyright (C) 2020 - 2024 Intel Corporation */ ++ ++#ifndef IPU_PLATFORM_PSYS_H ++#define IPU_PLATFORM_PSYS_H ++ ++#include "ipu-psys.h" ++#include ++ ++#define IPU_PSYS_BUF_SET_POOL_SIZE 8 ++#define IPU_PSYS_BUF_SET_MAX_SIZE 1024 ++ ++struct ipu_fw_psys_buffer_set; ++ ++enum ipu_psys_cmd_state { ++ KCMD_STATE_PPG_NEW, ++ KCMD_STATE_PPG_START, ++ KCMD_STATE_PPG_ENQUEUE, ++ KCMD_STATE_PPG_STOP, ++ KCMD_STATE_PPG_COMPLETE ++}; ++ ++struct ipu_psys_scheduler { ++ struct list_head ppgs; ++ struct mutex bs_mutex; /* Protects buf_set field */ ++ struct list_head buf_sets; ++}; ++ ++enum ipu_psys_ppg_state { ++ PPG_STATE_START = (1 << 0), ++ PPG_STATE_STARTING = (1 << 1), ++ PPG_STATE_STARTED = (1 << 2), ++ PPG_STATE_RUNNING = (1 << 3), ++ PPG_STATE_SUSPEND = (1 << 4), ++ PPG_STATE_SUSPENDING = (1 << 5), ++ PPG_STATE_SUSPENDED = (1 << 6), ++ PPG_STATE_RESUME = (1 << 7), ++ PPG_STATE_RESUMING = (1 << 8), ++ PPG_STATE_RESUMED = (1 << 9), ++ PPG_STATE_STOP = (1 << 10), ++ PPG_STATE_STOPPING = (1 << 11), ++ PPG_STATE_STOPPED = (1 << 12), ++}; ++ ++struct ipu_psys_ppg { ++ struct ipu_psys_pg *kpg; ++ struct ipu_psys_fh *fh; ++ struct list_head list; ++ struct list_head sched_list; ++ u64 token; ++ void *manifest; ++ struct mutex mutex; /* Protects kcmd and ppg state field */ ++ struct list_head kcmds_new_list; ++ struct list_head kcmds_processing_list; ++ struct list_head kcmds_finished_list; ++ enum ipu_psys_ppg_state state; ++ u32 pri_base; ++ int pri_dynamic; ++}; ++ ++struct ipu_psys_buffer_set { ++ struct list_head list; ++ struct ipu_fw_psys_buffer_set *buf_set; ++ size_t size; ++ size_t buf_set_size; ++ dma_addr_t dma_addr; ++ void *kaddr; ++ struct ipu_psys_kcmd *kcmd; ++}; ++ ++int ipu_psys_kcmd_start(struct ipu_psys *psys, struct ipu_psys_kcmd *kcmd); ++void ipu_psys_kcmd_complete(struct ipu_psys_ppg *kppg, ++ struct ipu_psys_kcmd *kcmd, ++ int error); ++int ipu_psys_fh_init(struct ipu_psys_fh *fh); ++int ipu_psys_fh_deinit(struct ipu_psys_fh *fh); ++ ++#endif /* IPU_PLATFORM_PSYS_H */ +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-platform-resources.h b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-platform-resources.h +new file mode 100644 +index 0000000..1f064dc +--- /dev/null ++++ b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-platform-resources.h +@@ -0,0 +1,103 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* Copyright (C) 2018 - 2024 Intel Corporation */ ++ ++#ifndef IPU_PLATFORM_RESOURCES_COMMON_H ++#define IPU_PLATFORM_RESOURCES_COMMON_H ++ ++#define IPU_FW_PSYS_N_PADDING_UINT8_IN_PROGRAM_MANIFEST 0 ++ ++#define IPU_FW_PSYS_N_PADDING_UINT8_IN_PROCESS_STRUCT 0 ++#define IPU_FW_PSYS_N_PADDING_UINT8_IN_PROCESS_GROUP_STRUCT 2 ++#define IPU_FW_PSYS_N_PADDING_UINT8_IN_PROGRAM_MANIFEST_EXT 2 ++ ++#define IPU_FW_PSYS_N_PADDING_UINT8_IN_TERMINAL_STRUCT 5 ++ ++#define IPU_FW_PSYS_N_PADDING_UINT8_IN_PARAM_TERMINAL_STRUCT 6 ++ ++#define IPU_FW_PSYS_N_PADDING_UINT8_IN_DATA_TERMINAL_STRUCT 3 ++ ++#define IPU_FW_PSYS_N_PADDING_UINT8_IN_FRAME_DESC_STRUCT 3 ++#define IPU_FW_PSYS_N_FRAME_PLANES 6 ++#define IPU_FW_PSYS_N_PADDING_UINT8_IN_FRAME_STRUCT 4 ++ ++#define IPU_FW_PSYS_N_PADDING_UINT8_IN_BUFFER_SET_STRUCT 1 ++ ++#define IPU_FW_PSYS_MAX_INPUT_DEC_RESOURCES 4 ++#define IPU_FW_PSYS_MAX_OUTPUT_DEC_RESOURCES 4 ++ ++#define IPU_FW_PSYS_PROCESS_MAX_CELLS 1 ++#define IPU_FW_PSYS_KERNEL_BITMAP_NOF_ELEMS 4 ++#define IPU_FW_PSYS_RBM_NOF_ELEMS 5 ++#define IPU_FW_PSYS_KBM_NOF_ELEMS 4 ++ ++struct ipu_fw_psys_process { ++ s16 parent_offset; ++ u8 size; ++ u8 cell_dependencies_offset; ++ u8 terminal_dependencies_offset; ++ u8 process_extension_offset; ++ u8 ID; ++ u8 program_idx; ++ u8 state; ++ u8 cells[IPU_FW_PSYS_PROCESS_MAX_CELLS]; ++ u8 cell_dependency_count; ++ u8 terminal_dependency_count; ++}; ++ ++struct ipu_fw_psys_program_manifest { ++ u32 kernel_bitmap[IPU_FW_PSYS_KERNEL_BITMAP_NOF_ELEMS]; ++ s16 parent_offset; ++ u8 program_dependency_offset; ++ u8 terminal_dependency_offset; ++ u8 size; ++ u8 program_extension_offset; ++ u8 program_type; ++ u8 ID; ++ u8 cells[IPU_FW_PSYS_PROCESS_MAX_CELLS]; ++ u8 cell_type_id; ++ u8 program_dependency_count; ++ u8 terminal_dependency_count; ++}; ++ ++/* platform specific resource interface */ ++struct ipu_psys_resource_pool; ++struct ipu_psys_resource_alloc; ++struct ipu_fw_psys_process_group; ++int ipu_psys_allocate_resources(const struct device *dev, ++ struct ipu_fw_psys_process_group *pg, ++ void *pg_manifest, ++ struct ipu_psys_resource_alloc *alloc, ++ struct ipu_psys_resource_pool *pool); ++int ipu_psys_move_resources(const struct device *dev, ++ struct ipu_psys_resource_alloc *alloc, ++ struct ipu_psys_resource_pool *source_pool, ++ struct ipu_psys_resource_pool *target_pool); ++ ++void ipu_psys_resource_copy(struct ipu_psys_resource_pool *src, ++ struct ipu_psys_resource_pool *dest); ++ ++int ipu_psys_try_allocate_resources(struct device *dev, ++ struct ipu_fw_psys_process_group *pg, ++ void *pg_manifest, ++ struct ipu_psys_resource_pool *pool); ++ ++void ipu_psys_reset_process_cell(const struct device *dev, ++ struct ipu_fw_psys_process_group *pg, ++ void *pg_manifest, ++ int process_count); ++void ipu_psys_free_resources(struct ipu_psys_resource_alloc *alloc, ++ struct ipu_psys_resource_pool *pool); ++ ++int ipu_fw_psys_set_proc_dfm_bitmap(struct ipu_fw_psys_process *ptr, ++ u16 id, u32 bitmap, ++ u32 active_bitmap); ++ ++int ipu_psys_allocate_cmd_queue_res(struct ipu_psys_resource_pool *pool); ++void ipu_psys_free_cmd_queue_res(struct ipu_psys_resource_pool *pool, ++ u8 queue_id); ++ ++extern const struct ipu_fw_resource_definitions *ipu6_res_defs; ++extern const struct ipu_fw_resource_definitions *ipu6se_res_defs; ++extern const struct ipu_fw_resource_definitions *ipu6ep_res_defs; ++extern struct ipu6_psys_hw_res_variant hw_var; ++#endif /* IPU_PLATFORM_RESOURCES_COMMON_H */ +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-psys-compat32.c b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-psys-compat32.c +new file mode 100644 +index 0000000..32350ca +--- /dev/null ++++ b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-psys-compat32.c +@@ -0,0 +1,222 @@ ++// SPDX-License-Identifier: GPL-2.0 ++// Copyright (C) 2013 - 2024 Intel Corporation ++ ++#include ++#include ++#include ++ ++#include ++ ++#include "ipu-psys.h" ++ ++static long native_ioctl(struct file *file, unsigned int cmd, unsigned long arg) ++{ ++ long ret = -ENOTTY; ++ ++ if (file->f_op->unlocked_ioctl) ++ ret = file->f_op->unlocked_ioctl(file, cmd, arg); ++ ++ return ret; ++} ++ ++struct ipu_psys_buffer32 { ++ u64 len; ++ union { ++ int fd; ++ compat_uptr_t userptr; ++ u64 reserved; ++ } base; ++ u32 data_offset; ++ u32 bytes_used; ++ u32 flags; ++ u32 reserved[2]; ++} __packed; ++ ++struct ipu_psys_command32 { ++ u64 issue_id; ++ u64 user_token; ++ u32 priority; ++ compat_uptr_t pg_manifest; ++ compat_uptr_t buffers; ++ int pg; ++ u32 pg_manifest_size; ++ u32 bufcount; ++ u32 min_psys_freq; ++ u32 frame_counter; ++ u32 reserved[2]; ++} __packed; ++ ++struct ipu_psys_manifest32 { ++ u32 index; ++ u32 size; ++ compat_uptr_t manifest; ++ u32 reserved[5]; ++} __packed; ++ ++static int ++get_ipu_psys_command32(struct ipu_psys_command *kp, ++ struct ipu_psys_command32 __user *up) ++{ ++ compat_uptr_t pgm, bufs; ++ bool access_ok; ++ ++ access_ok = access_ok(up, sizeof(struct ipu_psys_command32)); ++ if (!access_ok || get_user(kp->issue_id, &up->issue_id) || ++ get_user(kp->user_token, &up->user_token) || ++ get_user(kp->priority, &up->priority) || ++ get_user(pgm, &up->pg_manifest) || ++ get_user(bufs, &up->buffers) || ++ get_user(kp->pg, &up->pg) || ++ get_user(kp->pg_manifest_size, &up->pg_manifest_size) || ++ get_user(kp->bufcount, &up->bufcount) || ++ get_user(kp->min_psys_freq, &up->min_psys_freq) || ++ get_user(kp->frame_counter, &up->frame_counter) ++ ) ++ return -EFAULT; ++ ++ kp->pg_manifest = compat_ptr(pgm); ++ kp->buffers = compat_ptr(bufs); ++ ++ return 0; ++} ++ ++static int ++get_ipu_psys_buffer32(struct ipu_psys_buffer *kp, ++ struct ipu_psys_buffer32 __user *up) ++{ ++ compat_uptr_t ptr; ++ bool access_ok; ++ ++ access_ok = access_ok(up, sizeof(struct ipu_psys_buffer32)); ++ if (!access_ok || get_user(kp->len, &up->len) || ++ get_user(ptr, &up->base.userptr) || ++ get_user(kp->data_offset, &up->data_offset) || ++ get_user(kp->bytes_used, &up->bytes_used) || ++ get_user(kp->flags, &up->flags)) ++ return -EFAULT; ++ ++ kp->base.userptr = compat_ptr(ptr); ++ ++ return 0; ++} ++ ++static int ++put_ipu_psys_buffer32(struct ipu_psys_buffer *kp, ++ struct ipu_psys_buffer32 __user *up) ++{ ++ bool access_ok; ++ ++ access_ok = access_ok(up, sizeof(struct ipu_psys_buffer32)); ++ if (!access_ok || put_user(kp->len, &up->len) || ++ put_user(kp->base.fd, &up->base.fd) || ++ put_user(kp->data_offset, &up->data_offset) || ++ put_user(kp->bytes_used, &up->bytes_used) || ++ put_user(kp->flags, &up->flags)) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++static int ++get_ipu_psys_manifest32(struct ipu_psys_manifest *kp, ++ struct ipu_psys_manifest32 __user *up) ++{ ++ compat_uptr_t ptr; ++ bool access_ok; ++ ++ access_ok = access_ok(up, sizeof(struct ipu_psys_manifest32)); ++ if (!access_ok || get_user(kp->index, &up->index) || ++ get_user(kp->size, &up->size) || get_user(ptr, &up->manifest)) ++ return -EFAULT; ++ ++ kp->manifest = compat_ptr(ptr); ++ ++ return 0; ++} ++ ++static int ++put_ipu_psys_manifest32(struct ipu_psys_manifest *kp, ++ struct ipu_psys_manifest32 __user *up) ++{ ++ compat_uptr_t ptr = (u32)((unsigned long)kp->manifest); ++ bool access_ok; ++ ++ access_ok = access_ok(up, sizeof(struct ipu_psys_manifest32)); ++ if (!access_ok || put_user(kp->index, &up->index) || ++ put_user(kp->size, &up->size) || put_user(ptr, &up->manifest)) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++#define IPU_IOC_GETBUF32 _IOWR('A', 4, struct ipu_psys_buffer32) ++#define IPU_IOC_PUTBUF32 _IOWR('A', 5, struct ipu_psys_buffer32) ++#define IPU_IOC_QCMD32 _IOWR('A', 6, struct ipu_psys_command32) ++#define IPU_IOC_CMD_CANCEL32 _IOWR('A', 8, struct ipu_psys_command32) ++#define IPU_IOC_GET_MANIFEST32 _IOWR('A', 9, struct ipu_psys_manifest32) ++ ++long ipu_psys_compat_ioctl32(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ union { ++ struct ipu_psys_buffer buf; ++ struct ipu_psys_command cmd; ++ struct ipu_psys_event ev; ++ struct ipu_psys_manifest m; ++ } karg; ++ int compatible_arg = 1; ++ int err = 0; ++ void __user *up = compat_ptr(arg); ++ ++ switch (cmd) { ++ case IPU_IOC_GETBUF32: ++ cmd = IPU_IOC_GETBUF; ++ break; ++ case IPU_IOC_PUTBUF32: ++ cmd = IPU_IOC_PUTBUF; ++ break; ++ case IPU_IOC_QCMD32: ++ cmd = IPU_IOC_QCMD; ++ break; ++ case IPU_IOC_GET_MANIFEST32: ++ cmd = IPU_IOC_GET_MANIFEST; ++ break; ++ } ++ ++ switch (cmd) { ++ case IPU_IOC_GETBUF: ++ case IPU_IOC_PUTBUF: ++ err = get_ipu_psys_buffer32(&karg.buf, up); ++ compatible_arg = 0; ++ break; ++ case IPU_IOC_QCMD: ++ err = get_ipu_psys_command32(&karg.cmd, up); ++ compatible_arg = 0; ++ break; ++ case IPU_IOC_GET_MANIFEST: ++ err = get_ipu_psys_manifest32(&karg.m, up); ++ compatible_arg = 0; ++ break; ++ } ++ if (err) ++ return err; ++ ++ if (compatible_arg) { ++ err = native_ioctl(file, cmd, (unsigned long)up); ++ } else { ++ err = native_ioctl(file, cmd, (unsigned long)&karg); ++ } ++ ++ if (err) ++ return err; ++ ++ switch (cmd) { ++ case IPU_IOC_GETBUF: ++ err = put_ipu_psys_buffer32(&karg.buf, up); ++ break; ++ case IPU_IOC_GET_MANIFEST: ++ err = put_ipu_psys_manifest32(&karg.m, up); ++ break; ++ } ++ return err; ++} +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.c b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.c +new file mode 100644 +index 0000000..dcd0dce +--- /dev/null ++++ b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.c +@@ -0,0 +1,1599 @@ ++// SPDX-License-Identifier: GPL-2.0 ++// Copyright (C) 2013 - 2024 Intel Corporation ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#include "ipu6.h" ++#include "ipu6-mmu.h" ++#include "ipu6-bus.h" ++#include "ipu6-buttress.h" ++#include "ipu6-cpd.h" ++#include "ipu-fw-psys.h" ++#include "ipu-psys.h" ++#include "ipu6-platform-regs.h" ++#include "ipu6-fw-com.h" ++ ++static bool async_fw_init; ++module_param(async_fw_init, bool, 0664); ++MODULE_PARM_DESC(async_fw_init, "Enable asynchronous firmware initialization"); ++ ++#define SYSCOM_BUTTRESS_FW_PARAMS_PSYS_OFFSET 7 ++ ++#define IPU_PSYS_NUM_DEVICES 4 ++ ++#define IPU_PSYS_MAX_NUM_DESCS 1024 ++#define IPU_PSYS_MAX_NUM_BUFS 1024 ++#define IPU_PSYS_MAX_NUM_BUFS_LRU 12 ++ ++static int psys_runtime_pm_resume(struct device *dev); ++static int psys_runtime_pm_suspend(struct device *dev); ++ ++static dev_t ipu_psys_dev_t; ++static DECLARE_BITMAP(ipu_psys_devices, IPU_PSYS_NUM_DEVICES); ++static DEFINE_MUTEX(ipu_psys_mutex); ++ ++static struct fw_init_task { ++ struct delayed_work work; ++ struct ipu_psys *psys; ++} fw_init_task; ++ ++static void ipu6_psys_remove(struct auxiliary_device *auxdev); ++ ++static const struct bus_type ipu_psys_bus = { ++ .name = "intel-ipu6-psys", ++}; ++#define PKG_DIR_ENT_LEN_FOR_PSYS 2 ++#define PKG_DIR_SIZE_MASK_FOR_PSYS GENMASK(23, 0) ++ ++enum ipu6_version ipu_ver; ++ ++static u32 ipu6_cpd_pkg_dir_get_address(const u64 *pkg_dir, int pkg_dir_idx) ++{ ++ return pkg_dir[++pkg_dir_idx * PKG_DIR_ENT_LEN_FOR_PSYS]; ++} ++ ++static u32 ipu6_cpd_pkg_dir_get_num_entries(const u64 *pkg_dir) ++{ ++ return pkg_dir[1]; ++} ++ ++static u32 ipu6_cpd_pkg_dir_get_size(const u64 *pkg_dir, int pkg_dir_idx) ++{ ++ return pkg_dir[++pkg_dir_idx * PKG_DIR_ENT_LEN_FOR_PSYS + 1] & ++ PKG_DIR_SIZE_MASK_FOR_PSYS; ++} ++ ++#define PKG_DIR_ID_SHIFT 48 ++#define PKG_DIR_ID_MASK 0x7f ++ ++static u32 ipu6_cpd_pkg_dir_get_type(const u64 *pkg_dir, int pkg_dir_idx) ++{ ++ return pkg_dir[++pkg_dir_idx * PKG_DIR_ENT_LEN_FOR_PSYS + 1] >> ++ PKG_DIR_ID_SHIFT & PKG_DIR_ID_MASK; ++} ++ ++/* ++ * These are some trivial wrappers that save us from open-coding some ++ * common patterns and also that's were we have some checking (for the ++ * time being) ++ */ ++static void ipu_desc_add(struct ipu_psys_fh *fh, struct ipu_psys_desc *desc) ++{ ++ fh->num_descs++; ++ ++ WARN_ON_ONCE(fh->num_descs >= IPU_PSYS_MAX_NUM_DESCS); ++ list_add(&desc->list, &fh->descs_list); ++} ++ ++static void ipu_desc_del(struct ipu_psys_fh *fh, struct ipu_psys_desc *desc) ++{ ++ fh->num_descs--; ++ list_del_init(&desc->list); ++} ++ ++static void ipu_buffer_add(struct ipu_psys_fh *fh, ++ struct ipu_psys_kbuffer *kbuf) ++{ ++ fh->num_bufs++; ++ ++ WARN_ON_ONCE(fh->num_bufs >= IPU_PSYS_MAX_NUM_BUFS); ++ list_add(&kbuf->list, &fh->bufs_list); ++} ++ ++static void ipu_buffer_del(struct ipu_psys_fh *fh, ++ struct ipu_psys_kbuffer *kbuf) ++{ ++ fh->num_bufs--; ++ list_del_init(&kbuf->list); ++} ++ ++static void ipu_buffer_lru_add(struct ipu_psys_fh *fh, ++ struct ipu_psys_kbuffer *kbuf) ++{ ++ fh->num_bufs_lru++; ++ list_add_tail(&kbuf->list, &fh->bufs_lru); ++} ++ ++static void ipu_buffer_lru_del(struct ipu_psys_fh *fh, ++ struct ipu_psys_kbuffer *kbuf) ++{ ++ fh->num_bufs_lru--; ++ list_del_init(&kbuf->list); ++} ++ ++static struct ipu_psys_kbuffer *ipu_psys_kbuffer_alloc(void) ++{ ++ struct ipu_psys_kbuffer *kbuf; ++ ++ kbuf = kzalloc(sizeof(*kbuf), GFP_KERNEL); ++ if (!kbuf) ++ return NULL; ++ ++ atomic_set(&kbuf->map_count, 0); ++ INIT_LIST_HEAD(&kbuf->list); ++ return kbuf; ++} ++ ++static struct ipu_psys_desc *ipu_psys_desc_alloc(int fd) ++{ ++ struct ipu_psys_desc *desc; ++ ++ desc = kzalloc(sizeof(*desc), GFP_KERNEL); ++ if (!desc) ++ return NULL; ++ ++ desc->fd = fd; ++ INIT_LIST_HEAD(&desc->list); ++ return desc; ++} ++ ++struct ipu_psys_pg *__get_pg_buf(struct ipu_psys *psys, size_t pg_size) ++{ ++ struct ipu_psys_pg *kpg; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&psys->pgs_lock, flags); ++ list_for_each_entry(kpg, &psys->pgs, list) { ++ if (!kpg->pg_size && kpg->size >= pg_size) { ++ kpg->pg_size = pg_size; ++ spin_unlock_irqrestore(&psys->pgs_lock, flags); ++ return kpg; ++ } ++ } ++ spin_unlock_irqrestore(&psys->pgs_lock, flags); ++ /* no big enough buffer available, allocate new one */ ++ kpg = kzalloc(sizeof(*kpg), GFP_KERNEL); ++ if (!kpg) ++ return NULL; ++ ++ kpg->pg = ipu6_dma_alloc(psys->adev, pg_size, &kpg->pg_dma_addr, ++ GFP_KERNEL, 0); ++ if (!kpg->pg) { ++ kfree(kpg); ++ return NULL; ++ } ++ ++ kpg->pg_size = pg_size; ++ kpg->size = pg_size; ++ spin_lock_irqsave(&psys->pgs_lock, flags); ++ list_add(&kpg->list, &psys->pgs); ++ spin_unlock_irqrestore(&psys->pgs_lock, flags); ++ ++ return kpg; ++} ++ ++static struct ipu_psys_desc *psys_desc_lookup(struct ipu_psys_fh *fh, int fd) ++{ ++ struct ipu_psys_desc *desc; ++ ++ list_for_each_entry(desc, &fh->descs_list, list) { ++ if (desc->fd == fd) ++ return desc; ++ } ++ ++ return NULL; ++} ++ ++static bool dmabuf_cmp(struct dma_buf *lb, struct dma_buf *rb) ++{ ++ return lb == rb && lb->size == rb->size; ++} ++ ++static struct ipu_psys_kbuffer *psys_buf_lookup(struct ipu_psys_fh *fh, int fd) ++{ ++ struct ipu_psys_kbuffer *kbuf; ++ struct dma_buf *dma_buf; ++ ++ dma_buf = dma_buf_get(fd); ++ if (IS_ERR(dma_buf)) ++ return NULL; ++ ++ /* ++ * First lookup so-called `active` list, that is the list of ++ * referenced buffers ++ */ ++ list_for_each_entry(kbuf, &fh->bufs_list, list) { ++ if (dmabuf_cmp(kbuf->dbuf, dma_buf)) { ++ dma_buf_put(dma_buf); ++ return kbuf; ++ } ++ } ++ ++ /* ++ * We didn't find anything on the `active` list, try the LRU list ++ * (list of unreferenced buffers) and possibly resurrect a buffer ++ */ ++ list_for_each_entry(kbuf, &fh->bufs_lru, list) { ++ if (dmabuf_cmp(kbuf->dbuf, dma_buf)) { ++ dma_buf_put(dma_buf); ++ ipu_buffer_lru_del(fh, kbuf); ++ ipu_buffer_add(fh, kbuf); ++ return kbuf; ++ } ++ } ++ ++ dma_buf_put(dma_buf); ++ return NULL; ++} ++ ++struct ipu_psys_kbuffer *ipu_psys_lookup_kbuffer(struct ipu_psys_fh *fh, int fd) ++{ ++ struct ipu_psys_desc *desc; ++ ++ desc = psys_desc_lookup(fh, fd); ++ if (!desc) ++ return NULL; ++ ++ return desc->kbuf; ++} ++ ++struct ipu_psys_kbuffer * ++ipu_psys_lookup_kbuffer_by_kaddr(struct ipu_psys_fh *fh, void *kaddr) ++{ ++ struct ipu_psys_kbuffer *kbuffer; ++ ++ list_for_each_entry(kbuffer, &fh->bufs_list, list) { ++ if (kbuffer->kaddr == kaddr) ++ return kbuffer; ++ } ++ ++ return NULL; ++} ++ ++static int ipu_psys_get_userpages(struct ipu_dma_buf_attach *attach) ++{ ++ struct vm_area_struct *vma; ++ unsigned long start, end; ++ int npages, array_size; ++ struct page **pages; ++ struct sg_table *sgt; ++ int ret = -ENOMEM; ++ int nr = 0; ++ u32 flags; ++ ++ start = attach->userptr; ++ end = PAGE_ALIGN(start + attach->len); ++ npages = (end - (start & PAGE_MASK)) >> PAGE_SHIFT; ++ array_size = npages * sizeof(struct page *); ++ ++ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); ++ if (!sgt) ++ return -ENOMEM; ++ ++ WARN_ON_ONCE(attach->npages); ++ ++ pages = kvzalloc(array_size, GFP_KERNEL); ++ if (!pages) ++ goto free_sgt; ++ ++ mmap_read_lock(current->mm); ++ vma = vma_lookup(current->mm, start); ++ if (unlikely(!vma)) { ++ ret = -EFAULT; ++ goto error_up_read; ++ } ++ mmap_read_unlock(current->mm); ++ ++ flags = FOLL_WRITE | FOLL_FORCE | FOLL_LONGTERM; ++ nr = pin_user_pages_fast(start & PAGE_MASK, npages, ++ flags, pages); ++ if (nr < npages) ++ goto error; ++ ++ attach->pages = pages; ++ attach->npages = npages; ++ ++ ret = sg_alloc_table_from_pages(sgt, pages, npages, ++ start & ~PAGE_MASK, attach->len, ++ GFP_KERNEL); ++ if (ret < 0) ++ goto error; ++ ++ attach->sgt = sgt; ++ ++ return 0; ++ ++error_up_read: ++ mmap_read_unlock(current->mm); ++error: ++ if (nr) ++ unpin_user_pages(pages, nr); ++ kvfree(pages); ++free_sgt: ++ kfree(sgt); ++ ++ pr_err("failed to get userpages:%d\n", ret); ++ ++ return ret; ++} ++ ++static void ipu_psys_put_userpages(struct ipu_dma_buf_attach *attach) ++{ ++ if (!attach || !attach->userptr || !attach->sgt) ++ return; ++ ++ unpin_user_pages(attach->pages, attach->npages); ++ kvfree(attach->pages); ++ ++ sg_free_table(attach->sgt); ++ kfree(attach->sgt); ++ attach->sgt = NULL; ++} ++ ++static int ipu_dma_buf_attach(struct dma_buf *dbuf, ++ struct dma_buf_attachment *attach) ++{ ++ struct ipu_psys_kbuffer *kbuf = dbuf->priv; ++ struct ipu_dma_buf_attach *ipu_attach; ++ int ret; ++ ++ ipu_attach = kzalloc(sizeof(*ipu_attach), GFP_KERNEL); ++ if (!ipu_attach) ++ return -ENOMEM; ++ ++ ipu_attach->len = kbuf->len; ++ ipu_attach->userptr = kbuf->userptr; ++ ++ ret = ipu_psys_get_userpages(ipu_attach); ++ if (ret) { ++ kfree(ipu_attach); ++ return ret; ++ } ++ ++ attach->priv = ipu_attach; ++ return 0; ++} ++ ++static void ipu_dma_buf_detach(struct dma_buf *dbuf, ++ struct dma_buf_attachment *attach) ++{ ++ struct ipu_dma_buf_attach *ipu_attach = attach->priv; ++ ++ ipu_psys_put_userpages(ipu_attach); ++ kfree(ipu_attach); ++ attach->priv = NULL; ++} ++ ++static struct sg_table *ipu_dma_buf_map(struct dma_buf_attachment *attach, ++ enum dma_data_direction dir) ++{ ++ struct ipu_dma_buf_attach *ipu_attach = attach->priv; ++ struct pci_dev *pdev = to_pci_dev(attach->dev); ++ struct ipu6_device *isp = pci_get_drvdata(pdev); ++ struct ipu6_bus_device *adev = isp->psys; ++ unsigned long attrs; ++ int ret; ++ ++ attrs = DMA_ATTR_SKIP_CPU_SYNC; ++ ret = dma_map_sgtable(&pdev->dev, ipu_attach->sgt, dir, attrs); ++ if (ret) { ++ dev_err(attach->dev, "pci buf map failed\n"); ++ return ERR_PTR(-EIO); ++ } ++ ++ dma_sync_sgtable_for_device(&pdev->dev, ipu_attach->sgt, dir); ++ ++ ret = ipu6_dma_map_sgtable(adev, ipu_attach->sgt, dir, 0); ++ if (ret) { ++ dev_err(attach->dev, "ipu6 buf map failed\n"); ++ return ERR_PTR(-EIO); ++ } ++ ++ ipu6_dma_sync_sgtable(adev, ipu_attach->sgt); ++ ++ return ipu_attach->sgt; ++} ++ ++static void ipu_dma_buf_unmap(struct dma_buf_attachment *attach, ++ struct sg_table *sgt, enum dma_data_direction dir) ++{ ++ struct pci_dev *pdev = to_pci_dev(attach->dev); ++ struct ipu6_device *isp = pci_get_drvdata(pdev); ++ struct ipu6_bus_device *adev = isp->psys; ++ ++ ipu6_dma_unmap_sgtable(adev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC); ++ dma_unmap_sgtable(&pdev->dev, sgt, dir, 0); ++} ++ ++static int ipu_dma_buf_mmap(struct dma_buf *dbuf, struct vm_area_struct *vma) ++{ ++ return -ENOTTY; ++} ++ ++static void ipu_dma_buf_release(struct dma_buf *buf) ++{ ++ struct ipu_psys_kbuffer *kbuf = buf->priv; ++ ++ if (!kbuf) ++ return; ++ ++ if (kbuf->db_attach) ++ ipu_psys_put_userpages(kbuf->db_attach->priv); ++ ++ kfree(kbuf); ++} ++ ++static int ipu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf, ++ enum dma_data_direction dir) ++{ ++ return -ENOTTY; ++} ++ ++static int ipu_dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map) ++{ ++ struct dma_buf_attachment *attach; ++ struct ipu_dma_buf_attach *ipu_attach; ++ ++ if (list_empty(&dmabuf->attachments)) ++ return -EINVAL; ++ ++ attach = list_last_entry(&dmabuf->attachments, ++ struct dma_buf_attachment, node); ++ ipu_attach = attach->priv; ++ ++ if (!ipu_attach || !ipu_attach->pages || !ipu_attach->npages) ++ return -EINVAL; ++ ++ map->vaddr = vm_map_ram(ipu_attach->pages, ipu_attach->npages, 0); ++ map->is_iomem = false; ++ if (!map->vaddr) ++ return -EINVAL; ++ ++ return 0; ++} ++ ++static void ipu_dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map) ++{ ++ struct dma_buf_attachment *attach; ++ struct ipu_dma_buf_attach *ipu_attach; ++ ++ if (WARN_ON(list_empty(&dmabuf->attachments))) ++ return; ++ ++ attach = list_last_entry(&dmabuf->attachments, ++ struct dma_buf_attachment, node); ++ ipu_attach = attach->priv; ++ ++ if (WARN_ON(!ipu_attach || !ipu_attach->pages || !ipu_attach->npages)) ++ return; ++ ++ vm_unmap_ram(map->vaddr, ipu_attach->npages); ++} ++ ++static const struct dma_buf_ops ipu_dma_buf_ops = { ++ .attach = ipu_dma_buf_attach, ++ .detach = ipu_dma_buf_detach, ++ .map_dma_buf = ipu_dma_buf_map, ++ .unmap_dma_buf = ipu_dma_buf_unmap, ++ .release = ipu_dma_buf_release, ++ .begin_cpu_access = ipu_dma_buf_begin_cpu_access, ++ .mmap = ipu_dma_buf_mmap, ++ .vmap = ipu_dma_buf_vmap, ++ .vunmap = ipu_dma_buf_vunmap, ++}; ++ ++static int ipu_psys_open(struct inode *inode, struct file *file) ++{ ++ struct ipu_psys *psys = inode_to_ipu_psys(inode); ++ struct ipu_psys_fh *fh; ++ int rval; ++ ++ fh = kzalloc(sizeof(*fh), GFP_KERNEL); ++ if (!fh) ++ return -ENOMEM; ++ ++ fh->psys = psys; ++ ++ file->private_data = fh; ++ ++ mutex_init(&fh->mutex); ++ INIT_LIST_HEAD(&fh->bufs_list); ++ INIT_LIST_HEAD(&fh->descs_list); ++ INIT_LIST_HEAD(&fh->bufs_lru); ++ init_waitqueue_head(&fh->wait); ++ ++ rval = ipu_psys_fh_init(fh); ++ if (rval) ++ goto open_failed; ++ ++ mutex_lock(&psys->mutex); ++ list_add_tail(&fh->list, &psys->fhs); ++ mutex_unlock(&psys->mutex); ++ ++ return 0; ++ ++open_failed: ++ mutex_destroy(&fh->mutex); ++ kfree(fh); ++ return rval; ++} ++ ++static inline void ipu_psys_kbuf_unmap(struct ipu_psys_fh *fh, ++ struct ipu_psys_kbuffer *kbuf) ++{ ++ if (!kbuf) ++ return; ++ ++ kbuf->valid = false; ++ if (kbuf->kaddr) { ++ struct iosys_map dmap; ++ ++ iosys_map_set_vaddr(&dmap, kbuf->kaddr); ++ dma_buf_vunmap_unlocked(kbuf->dbuf, &dmap); ++ } ++ ++ if (!kbuf->userptr) ++ ipu6_dma_unmap_sgtable(fh->psys->adev, kbuf->sgt, ++ DMA_BIDIRECTIONAL, 0); ++ ++ if (!IS_ERR_OR_NULL(kbuf->sgt)) ++ dma_buf_unmap_attachment_unlocked(kbuf->db_attach, ++ kbuf->sgt, ++ DMA_BIDIRECTIONAL); ++ if (!IS_ERR_OR_NULL(kbuf->db_attach)) ++ dma_buf_detach(kbuf->dbuf, kbuf->db_attach); ++ dma_buf_put(kbuf->dbuf); ++ ++ kbuf->db_attach = NULL; ++ kbuf->dbuf = NULL; ++ kbuf->sgt = NULL; ++} ++ ++static void __ipu_psys_unmapbuf(struct ipu_psys_fh *fh, ++ struct ipu_psys_kbuffer *kbuf) ++{ ++ /* From now on it is not safe to use this kbuffer */ ++ ipu_psys_kbuf_unmap(fh, kbuf); ++ ipu_buffer_del(fh, kbuf); ++ if (!kbuf->userptr) ++ kfree(kbuf); ++} ++ ++static int ipu_psys_unmapbuf_locked(int fd, struct ipu_psys_fh *fh) ++{ ++ struct ipu_psys *psys = fh->psys; ++ struct device *dev = &psys->adev->auxdev.dev; ++ struct ipu_psys_kbuffer *kbuf; ++ struct ipu_psys_desc *desc; ++ ++ desc = psys_desc_lookup(fh, fd); ++ if (WARN_ON_ONCE(!desc)) { ++ dev_err(dev, "descriptor not found: %d\n", fd); ++ return -EINVAL; ++ } ++ ++ kbuf = desc->kbuf; ++ /* descriptor is gone now */ ++ ipu_desc_del(fh, desc); ++ kfree(desc); ++ ++ if (WARN_ON_ONCE(!kbuf || !kbuf->dbuf)) { ++ dev_err(dev, ++ "descriptor with no buffer: %d\n", fd); ++ return -EINVAL; ++ } ++ ++ /* Wait for final UNMAP */ ++ if (!atomic_dec_and_test(&kbuf->map_count)) ++ return 0; ++ ++ __ipu_psys_unmapbuf(fh, kbuf); ++ ++ return 0; ++} ++ ++static int ipu_psys_release(struct inode *inode, struct file *file) ++{ ++ struct ipu_psys *psys = inode_to_ipu_psys(inode); ++ struct ipu_psys_fh *fh = file->private_data; ++ ++ mutex_lock(&fh->mutex); ++ while (!list_empty(&fh->descs_list)) { ++ struct ipu_psys_desc *desc; ++ ++ desc = list_first_entry(&fh->descs_list, ++ struct ipu_psys_desc, ++ list); ++ ++ ipu_desc_del(fh, desc); ++ kfree(desc); ++ } ++ ++ while (!list_empty(&fh->bufs_lru)) { ++ struct ipu_psys_kbuffer *kbuf; ++ ++ kbuf = list_first_entry(&fh->bufs_lru, ++ struct ipu_psys_kbuffer, ++ list); ++ ++ ipu_buffer_lru_del(fh, kbuf); ++ __ipu_psys_unmapbuf(fh, kbuf); ++ } ++ ++ while (!list_empty(&fh->bufs_list)) { ++ struct dma_buf_attachment *db_attach; ++ struct ipu_psys_kbuffer *kbuf; ++ ++ kbuf = list_first_entry(&fh->bufs_list, ++ struct ipu_psys_kbuffer, ++ list); ++ ++ ipu_buffer_del(fh, kbuf); ++ db_attach = kbuf->db_attach; ++ ++ /* Unmap and release buffers */ ++ if (kbuf->dbuf && db_attach) { ++ ipu_psys_kbuf_unmap(fh, kbuf); ++ } else { ++ if (db_attach) ++ ipu_psys_put_userpages(db_attach->priv); ++ kfree(kbuf); ++ } ++ } ++ mutex_unlock(&fh->mutex); ++ ++ mutex_lock(&psys->mutex); ++ list_del(&fh->list); ++ ++ mutex_unlock(&psys->mutex); ++ ipu_psys_fh_deinit(fh); ++ ++ mutex_lock(&psys->mutex); ++ if (list_empty(&psys->fhs)) ++ psys->power_gating = 0; ++ mutex_unlock(&psys->mutex); ++ mutex_destroy(&fh->mutex); ++ kfree(fh); ++ ++ return 0; ++} ++ ++static int ipu_psys_getbuf(struct ipu_psys_buffer *buf, struct ipu_psys_fh *fh) ++{ ++ struct ipu_psys_kbuffer *kbuf; ++ struct ipu_psys *psys = fh->psys; ++ struct device *dev = &psys->adev->auxdev.dev; ++ struct ipu_psys_desc *desc; ++ DEFINE_DMA_BUF_EXPORT_INFO(exp_info); ++ struct dma_buf *dbuf; ++ int ret; ++ ++ if (!buf->base.userptr) { ++ dev_err(dev, "Buffer allocation not supported\n"); ++ return -EINVAL; ++ } ++ ++ kbuf = ipu_psys_kbuffer_alloc(); ++ if (!kbuf) ++ return -ENOMEM; ++ ++ kbuf->len = buf->len; ++ kbuf->userptr = (unsigned long)buf->base.userptr; ++ kbuf->flags = buf->flags; ++ ++ exp_info.ops = &ipu_dma_buf_ops; ++ exp_info.size = kbuf->len; ++ exp_info.flags = O_RDWR; ++ exp_info.priv = kbuf; ++ ++ dbuf = dma_buf_export(&exp_info); ++ if (IS_ERR(dbuf)) { ++ kfree(kbuf); ++ return PTR_ERR(dbuf); ++ } ++ ++ ret = dma_buf_fd(dbuf, 0); ++ if (ret < 0) { ++ dma_buf_put(dbuf); ++ return ret; ++ } ++ ++ buf->base.fd = ret; ++ buf->flags &= ~IPU_BUFFER_FLAG_USERPTR; ++ buf->flags |= IPU_BUFFER_FLAG_DMA_HANDLE; ++ kbuf->flags = buf->flags; ++ ++ desc = ipu_psys_desc_alloc(ret); ++ if (!desc) { ++ dma_buf_put(dbuf); ++ return -ENOMEM; ++ } ++ ++ kbuf->dbuf = dbuf; ++ ++ mutex_lock(&fh->mutex); ++ ipu_desc_add(fh, desc); ++ ipu_buffer_add(fh, kbuf); ++ mutex_unlock(&fh->mutex); ++ ++ dev_dbg(dev, "IOC_GETBUF: userptr %p size %llu to fd %d", ++ buf->base.userptr, buf->len, buf->base.fd); ++ ++ return 0; ++} ++ ++static int ipu_psys_putbuf(struct ipu_psys_buffer *buf, struct ipu_psys_fh *fh) ++{ ++ return 0; ++} ++ ++static void ipu_psys_kbuffer_lru(struct ipu_psys_fh *fh, ++ struct ipu_psys_kbuffer *kbuf) ++{ ++ ipu_buffer_del(fh, kbuf); ++ ipu_buffer_lru_add(fh, kbuf); ++ ++ while (fh->num_bufs_lru > IPU_PSYS_MAX_NUM_BUFS_LRU) { ++ kbuf = list_first_entry(&fh->bufs_lru, ++ struct ipu_psys_kbuffer, ++ list); ++ ++ ipu_buffer_lru_del(fh, kbuf); ++ __ipu_psys_unmapbuf(fh, kbuf); ++ } ++} ++ ++struct ipu_psys_kbuffer *ipu_psys_mapbuf_locked(int fd, struct ipu_psys_fh *fh) ++{ ++ struct ipu_psys *psys = fh->psys; ++ struct device *dev = &psys->adev->isp->pdev->dev; ++ int ret; ++ struct ipu_psys_kbuffer *kbuf; ++ struct ipu_psys_desc *desc; ++ struct dma_buf *dbuf; ++ struct iosys_map dmap; ++ ++ dbuf = dma_buf_get(fd); ++ if (IS_ERR(dbuf)) ++ return NULL; ++ ++ desc = psys_desc_lookup(fh, fd); ++ if (!desc) { ++ desc = ipu_psys_desc_alloc(fd); ++ if (!desc) ++ goto desc_alloc_fail; ++ ipu_desc_add(fh, desc); ++ } ++ ++ kbuf = psys_buf_lookup(fh, fd); ++ if (!kbuf) { ++ kbuf = ipu_psys_kbuffer_alloc(); ++ if (!kbuf) ++ goto buf_alloc_fail; ++ ipu_buffer_add(fh, kbuf); ++ } ++ ++ /* If this descriptor references no buffer or new buffer */ ++ if (desc->kbuf != kbuf) { ++ if (desc->kbuf) { ++ /* ++ * Un-reference old buffer and possibly put it on ++ * the LRU list ++ */ ++ if (atomic_dec_and_test(&desc->kbuf->map_count)) ++ ipu_psys_kbuffer_lru(fh, desc->kbuf); ++ } ++ ++ /* Grab reference of the new buffer */ ++ atomic_inc(&kbuf->map_count); ++ } ++ ++ desc->kbuf = kbuf; ++ ++ if (kbuf->sgt) { ++ dev_dbg(dev, "fd %d has been mapped!\n", fd); ++ dma_buf_put(dbuf); ++ goto mapbuf_end; ++ } ++ ++ kbuf->dbuf = dbuf; ++ ++ if (kbuf->len == 0) ++ kbuf->len = kbuf->dbuf->size; ++ ++ kbuf->db_attach = dma_buf_attach(kbuf->dbuf, dev); ++ if (IS_ERR(kbuf->db_attach)) { ++ dev_dbg(dev, "dma buf attach failed\n"); ++ goto attach_fail; ++ } ++ ++ kbuf->sgt = dma_buf_map_attachment_unlocked(kbuf->db_attach, ++ DMA_BIDIRECTIONAL); ++ if (IS_ERR_OR_NULL(kbuf->sgt)) { ++ kbuf->sgt = NULL; ++ dev_dbg(dev, "dma buf map attachment failed\n"); ++ goto kbuf_map_fail; ++ } ++ ++ if (!kbuf->userptr) { ++ ret = ipu6_dma_map_sgtable(psys->adev, kbuf->sgt, ++ DMA_BIDIRECTIONAL, 0); ++ if (ret) { ++ dev_dbg(dev, "ipu6 buf map failed\n"); ++ goto kbuf_map_fail; ++ } ++ } ++ ++ kbuf->dma_addr = sg_dma_address(kbuf->sgt->sgl); ++ ++ dmap.is_iomem = false; ++ if (dma_buf_vmap_unlocked(kbuf->dbuf, &dmap)) { ++ dev_dbg(dev, "dma buf vmap failed\n"); ++ goto kbuf_map_fail; ++ } ++ kbuf->kaddr = dmap.vaddr; ++ ++mapbuf_end: ++ dev_dbg(dev, "%s %s kbuf %p fd %d with len %llu mapped\n", ++ __func__, kbuf->userptr ? "private" : "imported", kbuf, fd, ++ kbuf->len); ++ ++ kbuf->valid = true; ++ return kbuf; ++ ++kbuf_map_fail: ++ ipu_buffer_del(fh, kbuf); ++ if (!IS_ERR_OR_NULL(kbuf->sgt)) { ++ if (!kbuf->userptr) ++ ipu6_dma_unmap_sgtable(psys->adev, kbuf->sgt, ++ DMA_BIDIRECTIONAL, 0); ++ dma_buf_unmap_attachment_unlocked(kbuf->db_attach, kbuf->sgt, ++ DMA_BIDIRECTIONAL); ++ } ++ dma_buf_detach(kbuf->dbuf, kbuf->db_attach); ++attach_fail: ++ ipu_buffer_del(fh, kbuf); ++ dbuf = ERR_PTR(-EINVAL); ++ if (!kbuf->userptr) ++ kfree(kbuf); ++ ++buf_alloc_fail: ++ ipu_desc_del(fh, desc); ++ kfree(desc); ++ ++desc_alloc_fail: ++ if (!IS_ERR(dbuf)) ++ dma_buf_put(dbuf); ++ return NULL; ++} ++ ++static long ipu_psys_mapbuf(int fd, struct ipu_psys_fh *fh) ++{ ++ struct ipu_psys_kbuffer *kbuf; ++ ++ dev_dbg(&fh->psys->adev->auxdev.dev, "IOC_MAPBUF\n"); ++ ++ mutex_lock(&fh->mutex); ++ kbuf = ipu_psys_mapbuf_locked(fd, fh); ++ mutex_unlock(&fh->mutex); ++ ++ return kbuf ? 0 : -EINVAL; ++} ++ ++static long ipu_psys_unmapbuf(int fd, struct ipu_psys_fh *fh) ++{ ++ struct device *dev = &fh->psys->adev->auxdev.dev; ++ long ret; ++ ++ dev_dbg(dev, "IOC_UNMAPBUF\n"); ++ ++ mutex_lock(&fh->mutex); ++ ret = ipu_psys_unmapbuf_locked(fd, fh); ++ mutex_unlock(&fh->mutex); ++ ++ return ret; ++} ++ ++static __poll_t ipu_psys_poll(struct file *file, ++ struct poll_table_struct *wait) ++{ ++ struct ipu_psys_fh *fh = file->private_data; ++ struct ipu_psys *psys = fh->psys; ++ struct device *dev = &psys->adev->auxdev.dev; ++ __poll_t ret = 0; ++ ++ dev_dbg(dev, "ipu psys poll\n"); ++ ++ poll_wait(file, &fh->wait, wait); ++ ++ if (ipu_get_completed_kcmd(fh)) ++ ret = POLLIN; ++ ++ dev_dbg(dev, "ipu psys poll ret %u\n", ret); ++ ++ return ret; ++} ++ ++static long ipu_get_manifest(struct ipu_psys_manifest *manifest, ++ struct ipu_psys_fh *fh) ++{ ++ struct ipu_psys *psys = fh->psys; ++ struct device *dev = &psys->adev->auxdev.dev; ++ struct ipu6_bus_device *adev = psys->adev; ++ struct ipu6_device *isp = adev->isp; ++ struct ipu6_cpd_client_pkg_hdr *client_pkg; ++ u32 entries; ++ void *host_fw_data; ++ dma_addr_t dma_fw_data; ++ u32 client_pkg_offset; ++ ++ host_fw_data = (void *)isp->cpd_fw->data; ++ dma_fw_data = sg_dma_address(adev->fw_sgt.sgl); ++ entries = ipu6_cpd_pkg_dir_get_num_entries(adev->pkg_dir); ++ if (!manifest || manifest->index > entries - 1) { ++ dev_err(dev, "invalid argument\n"); ++ return -EINVAL; ++ } ++ ++ if (!ipu6_cpd_pkg_dir_get_size(adev->pkg_dir, manifest->index) || ++ ipu6_cpd_pkg_dir_get_type(adev->pkg_dir, manifest->index) < ++ IPU6_CPD_PKG_DIR_CLIENT_PG_TYPE) { ++ dev_dbg(dev, "invalid pkg dir entry\n"); ++ return -ENOENT; ++ } ++ ++ client_pkg_offset = ipu6_cpd_pkg_dir_get_address(adev->pkg_dir, ++ manifest->index); ++ client_pkg_offset -= dma_fw_data; ++ ++ client_pkg = host_fw_data + client_pkg_offset; ++ manifest->size = client_pkg->pg_manifest_size; ++ ++ if (!manifest->manifest) ++ return 0; ++ ++ if (copy_to_user(manifest->manifest, ++ (uint8_t *)client_pkg + client_pkg->pg_manifest_offs, ++ manifest->size)) { ++ return -EFAULT; ++ } ++ ++ return 0; ++} ++ ++static long ipu_psys_ioctl(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ union { ++ struct ipu_psys_buffer buf; ++ struct ipu_psys_command cmd; ++ struct ipu_psys_event ev; ++ struct ipu_psys_capability caps; ++ struct ipu_psys_manifest m; ++ } karg; ++ struct ipu_psys_fh *fh = file->private_data; ++ long err = 0; ++ void __user *up = (void __user *)arg; ++ bool copy = (cmd != IPU_IOC_MAPBUF && cmd != IPU_IOC_UNMAPBUF); ++ ++ if (copy) { ++ if (_IOC_SIZE(cmd) > sizeof(karg)) ++ return -ENOTTY; ++ ++ if (_IOC_DIR(cmd) & _IOC_WRITE) { ++ err = copy_from_user(&karg, up, _IOC_SIZE(cmd)); ++ if (err) ++ return -EFAULT; ++ } ++ } ++ ++ switch (cmd) { ++ case IPU_IOC_MAPBUF: ++ err = ipu_psys_mapbuf(arg, fh); ++ break; ++ case IPU_IOC_UNMAPBUF: ++ err = ipu_psys_unmapbuf(arg, fh); ++ break; ++ case IPU_IOC_QUERYCAP: ++ karg.caps = fh->psys->caps; ++ break; ++ case IPU_IOC_GETBUF: ++ err = ipu_psys_getbuf(&karg.buf, fh); ++ break; ++ case IPU_IOC_PUTBUF: ++ err = ipu_psys_putbuf(&karg.buf, fh); ++ break; ++ case IPU_IOC_QCMD: ++ err = ipu_psys_kcmd_new(&karg.cmd, fh); ++ break; ++ case IPU_IOC_DQEVENT: ++ err = ipu_ioctl_dqevent(&karg.ev, fh, file->f_flags); ++ break; ++ case IPU_IOC_GET_MANIFEST: ++ err = ipu_get_manifest(&karg.m, fh); ++ break; ++ default: ++ err = -ENOTTY; ++ break; ++ } ++ ++ if (err) ++ return err; ++ ++ if (copy && _IOC_DIR(cmd) & _IOC_READ) ++ if (copy_to_user(up, &karg, _IOC_SIZE(cmd))) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++static const struct file_operations ipu_psys_fops = { ++ .open = ipu_psys_open, ++ .release = ipu_psys_release, ++ .unlocked_ioctl = ipu_psys_ioctl, ++ .poll = ipu_psys_poll, ++ .owner = THIS_MODULE, ++}; ++ ++static void ipu_psys_dev_release(struct device *dev) ++{ ++} ++ ++static int psys_runtime_pm_resume(struct device *dev) ++{ ++ struct ipu6_bus_device *adev = to_ipu6_bus_device(dev); ++ struct ipu_psys *psys = ipu6_bus_get_drvdata(adev); ++ unsigned long flags; ++ int retval; ++ ++ if (!psys) ++ return 0; ++ ++ spin_lock_irqsave(&psys->ready_lock, flags); ++ if (psys->ready) { ++ spin_unlock_irqrestore(&psys->ready_lock, flags); ++ return 0; ++ } ++ spin_unlock_irqrestore(&psys->ready_lock, flags); ++ ++ retval = ipu6_mmu_hw_init(adev->mmu); ++ if (retval) ++ return retval; ++ ++ if (async_fw_init && !psys->fwcom) { ++ dev_err(dev, "async firmware init not finished, skipping\n"); ++ return 0; ++ } ++ ++ if (!ipu6_buttress_auth_done(adev->isp)) { ++ dev_dbg(dev, "fw not yet authenticated, skipping\n"); ++ return 0; ++ } ++ ++ ipu_psys_setup_hw(psys); ++ ++ ipu_psys_subdomains_power(psys, 1); ++ ipu6_configure_spc(adev->isp, ++ &psys->pdata->ipdata->hw_variant, ++ IPU6_CPD_PKG_DIR_PSYS_SERVER_IDX, ++ psys->pdata->base, adev->pkg_dir, ++ adev->pkg_dir_dma_addr); ++ ++ retval = ipu_fw_psys_open(psys); ++ if (retval) { ++ dev_err(dev, "Failed to open abi.\n"); ++ return retval; ++ } ++ ++ spin_lock_irqsave(&psys->ready_lock, flags); ++ psys->ready = 1; ++ spin_unlock_irqrestore(&psys->ready_lock, flags); ++ ++ return 0; ++} ++ ++static int psys_runtime_pm_suspend(struct device *dev) ++{ ++ struct ipu6_bus_device *adev = to_ipu6_bus_device(dev); ++ struct ipu_psys *psys = ipu6_bus_get_drvdata(adev); ++ unsigned long flags; ++ int rval; ++ ++ if (!psys) ++ return 0; ++ ++ if (!psys->ready) ++ return 0; ++ ++ spin_lock_irqsave(&psys->ready_lock, flags); ++ psys->ready = 0; ++ spin_unlock_irqrestore(&psys->ready_lock, flags); ++ ++ /* ++ * We can trace failure but better to not return an error. ++ * At suspend we are progressing towards psys power gated state. ++ * Any hang / failure inside psys will be forgotten soon. ++ */ ++ rval = ipu_fw_psys_close(psys); ++ if (rval) ++ dev_err(dev, "Device close failure: %d\n", rval); ++ ++ ipu_psys_subdomains_power(psys, 0); ++ ++ ipu6_mmu_hw_cleanup(adev->mmu); ++ ++ return 0; ++} ++ ++/* The following PM callbacks are needed to enable runtime PM in IPU PCI ++ * device resume, otherwise, runtime PM can't work in PCI resume from ++ * S3 state. ++ */ ++static int psys_resume(struct device *dev) ++{ ++ return 0; ++} ++ ++static int psys_suspend(struct device *dev) ++{ ++ return 0; ++} ++ ++static const struct dev_pm_ops psys_pm_ops = { ++ .runtime_suspend = psys_runtime_pm_suspend, ++ .runtime_resume = psys_runtime_pm_resume, ++ .suspend = psys_suspend, ++ .resume = psys_resume, ++}; ++ ++static int ipu_psys_sched_cmd(void *ptr) ++{ ++ struct ipu_psys *psys = ptr; ++ size_t pending = 0; ++ ++ while (1) { ++ wait_event_interruptible(psys->sched_cmd_wq, ++ (kthread_should_stop() || ++ (pending = ++ atomic_read(&psys->wakeup_count)))); ++ ++ if (kthread_should_stop()) ++ break; ++ ++ if (pending == 0) ++ continue; ++ ++ mutex_lock(&psys->mutex); ++ atomic_set(&psys->wakeup_count, 0); ++ ipu_psys_run_next(psys); ++ mutex_unlock(&psys->mutex); ++ } ++ ++ return 0; ++} ++ ++static void start_sp(struct ipu6_bus_device *adev) ++{ ++ struct ipu_psys *psys = ipu6_bus_get_drvdata(adev); ++ void __iomem *spc_regs_base = psys->pdata->base + ++ psys->pdata->ipdata->hw_variant.spc_offset; ++ u32 val = 0; ++ ++ val |= IPU6_PSYS_SPC_STATUS_START | ++ IPU6_PSYS_SPC_STATUS_RUN | ++ IPU6_PSYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE; ++ val |= psys->icache_prefetch_sp ? ++ IPU6_PSYS_SPC_STATUS_ICACHE_PREFETCH : 0; ++ writel(val, spc_regs_base + IPU6_PSYS_REG_SPC_STATUS_CTRL); ++} ++ ++static int query_sp(struct ipu6_bus_device *adev) ++{ ++ struct ipu_psys *psys = ipu6_bus_get_drvdata(adev); ++ void __iomem *spc_regs_base = psys->pdata->base + ++ psys->pdata->ipdata->hw_variant.spc_offset; ++ u32 val = readl(spc_regs_base + IPU6_PSYS_REG_SPC_STATUS_CTRL); ++ ++ /* return true when READY == 1, START == 0 */ ++ val &= IPU6_PSYS_SPC_STATUS_READY | IPU6_PSYS_SPC_STATUS_START; ++ ++ return val == IPU6_PSYS_SPC_STATUS_READY; ++} ++ ++static int ipu_psys_fw_init(struct ipu_psys *psys) ++{ ++ struct ipu6_fw_syscom_queue_config *queue_cfg; ++ struct device *dev = &psys->adev->auxdev.dev; ++ unsigned int size; ++ struct ipu6_fw_syscom_queue_config fw_psys_event_queue_cfg[] = { ++ { ++ IPU_FW_PSYS_EVENT_QUEUE_SIZE, ++ sizeof(struct ipu_fw_psys_event) ++ } ++ }; ++ struct ipu_fw_psys_srv_init server_init = { ++ .ddr_pkg_dir_address = 0, ++ .host_ddr_pkg_dir = NULL, ++ .pkg_dir_size = 0, ++ .icache_prefetch_sp = psys->icache_prefetch_sp, ++ .icache_prefetch_isp = psys->icache_prefetch_isp, ++ }; ++ struct ipu6_fw_com_cfg fwcom = { ++ .num_output_queues = IPU_FW_PSYS_N_PSYS_EVENT_QUEUE_ID, ++ .output = fw_psys_event_queue_cfg, ++ .specific_addr = &server_init, ++ .specific_size = sizeof(server_init), ++ .cell_start = start_sp, ++ .cell_ready = query_sp, ++ .buttress_boot_offset = SYSCOM_BUTTRESS_FW_PARAMS_PSYS_OFFSET, ++ }; ++ int i; ++ ++ size = IPU6SE_FW_PSYS_N_PSYS_CMD_QUEUE_ID; ++ if (ipu_ver == IPU6_VER_6 || ipu_ver == IPU6_VER_6EP || ++ ipu_ver == IPU6_VER_6EP_MTL) ++ size = IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID; ++ ++ queue_cfg = devm_kzalloc(dev, sizeof(*queue_cfg) * size, ++ GFP_KERNEL); ++ if (!queue_cfg) ++ return -ENOMEM; ++ ++ for (i = 0; i < size; i++) { ++ queue_cfg[i].queue_size = IPU_FW_PSYS_CMD_QUEUE_SIZE; ++ queue_cfg[i].token_size = sizeof(struct ipu_fw_psys_cmd); ++ } ++ ++ fwcom.input = queue_cfg; ++ fwcom.num_input_queues = size; ++ fwcom.dmem_addr = psys->pdata->ipdata->hw_variant.dmem_offset; ++ ++ psys->fwcom = ipu6_fw_com_prepare(&fwcom, psys->adev, ++ psys->pdata->base); ++ if (!psys->fwcom) { ++ dev_err(dev, "psys fw com prepare failed\n"); ++ return -EIO; ++ } ++ ++ return 0; ++} ++ ++static void run_fw_init_work(struct work_struct *work) ++{ ++ struct fw_init_task *task = (struct fw_init_task *)work; ++ struct ipu_psys *psys = task->psys; ++ struct device *dev = &psys->adev->auxdev.dev; ++ int rval; ++ ++ rval = ipu_psys_fw_init(psys); ++ ++ if (rval) { ++ dev_err(dev, "FW init failed(%d)\n", rval); ++ ipu6_psys_remove(&psys->adev->auxdev); ++ } else { ++ dev_info(dev, "FW init done\n"); ++ } ++} ++ ++static int ipu6_psys_probe(struct auxiliary_device *auxdev, ++ const struct auxiliary_device_id *auxdev_id) ++{ ++ struct ipu6_bus_device *adev = auxdev_to_adev(auxdev); ++ struct device *dev = &auxdev->dev; ++ struct ipu_psys_pg *kpg, *kpg0; ++ struct ipu_psys *psys; ++ unsigned int minor; ++ int i, rval = -E2BIG; ++ ++ if (!adev->isp->bus_ready_to_probe) ++ return -EPROBE_DEFER; ++ ++ if (!adev->pkg_dir) ++ return -EPROBE_DEFER; ++ ++ ipu_ver = adev->isp->hw_ver; ++ ++ rval = ipu6_mmu_hw_init(adev->mmu); ++ if (rval) ++ return rval; ++ ++ mutex_lock(&ipu_psys_mutex); ++ ++ minor = find_next_zero_bit(ipu_psys_devices, IPU_PSYS_NUM_DEVICES, 0); ++ if (minor == IPU_PSYS_NUM_DEVICES) { ++ dev_err(dev, "too many devices\n"); ++ goto out_unlock; ++ } ++ ++ psys = devm_kzalloc(dev, sizeof(*psys), GFP_KERNEL); ++ if (!psys) { ++ rval = -ENOMEM; ++ goto out_unlock; ++ } ++ ++ adev->auxdrv_data = ++ (const struct ipu6_auxdrv_data *)auxdev_id->driver_data; ++ adev->auxdrv = to_auxiliary_drv(dev->driver); ++ ++ psys->adev = adev; ++ psys->pdata = adev->pdata; ++ psys->icache_prefetch_sp = 0; ++ ++ psys->power_gating = 0; ++ ++ spin_lock_init(&psys->ready_lock); ++ spin_lock_init(&psys->pgs_lock); ++ psys->ready = 0; ++ psys->timeout = IPU_PSYS_CMD_TIMEOUT_MS; ++ ++ mutex_init(&psys->mutex); ++ INIT_LIST_HEAD(&psys->fhs); ++ INIT_LIST_HEAD(&psys->pgs); ++ INIT_LIST_HEAD(&psys->started_kcmds_list); ++ ++ init_waitqueue_head(&psys->sched_cmd_wq); ++ atomic_set(&psys->wakeup_count, 0); ++ /* ++ * Create a thread to schedule commands sent to IPU firmware. ++ * The thread reduces the coupling between the command scheduler ++ * and queueing commands from the user to driver. ++ */ ++ psys->sched_cmd_thread = kthread_run(ipu_psys_sched_cmd, psys, ++ "psys_sched_cmd"); ++ ++ if (IS_ERR(psys->sched_cmd_thread)) { ++ psys->sched_cmd_thread = NULL; ++ mutex_destroy(&psys->mutex); ++ goto out_unlock; ++ } ++ ++ dev_set_drvdata(dev, psys); ++ ++ rval = ipu_psys_res_pool_init(&psys->res_pool_running); ++ if (rval < 0) { ++ dev_err(&psys->dev, ++ "unable to alloc process group resources\n"); ++ goto out_mutex_destroy; ++ } ++ ++ ipu6_psys_hw_res_variant_init(); ++ ++ /* allocate and map memory for process groups */ ++ for (i = 0; i < IPU_PSYS_PG_POOL_SIZE; i++) { ++ kpg = kzalloc(sizeof(*kpg), GFP_KERNEL); ++ if (!kpg) ++ goto out_free_pgs; ++ kpg->pg = ipu6_dma_alloc(adev, IPU_PSYS_PG_MAX_SIZE, ++ &kpg->pg_dma_addr, ++ GFP_KERNEL, 0); ++ if (!kpg->pg) { ++ kfree(kpg); ++ goto out_free_pgs; ++ } ++ kpg->size = IPU_PSYS_PG_MAX_SIZE; ++ list_add(&kpg->list, &psys->pgs); ++ } ++ ++ psys->caps.pg_count = ipu6_cpd_pkg_dir_get_num_entries(adev->pkg_dir); ++ ++ dev_info(dev, "pkg_dir entry count:%d\n", psys->caps.pg_count); ++ if (async_fw_init) { ++ INIT_DELAYED_WORK((struct delayed_work *)&fw_init_task, ++ run_fw_init_work); ++ fw_init_task.psys = psys; ++ schedule_delayed_work((struct delayed_work *)&fw_init_task, 0); ++ } else { ++ rval = ipu_psys_fw_init(psys); ++ } ++ ++ if (rval) { ++ dev_err(dev, "FW init failed(%d)\n", rval); ++ goto out_free_pgs; ++ } ++ ++ psys->dev.bus = &ipu_psys_bus; ++ psys->dev.parent = dev; ++ psys->dev.devt = MKDEV(MAJOR(ipu_psys_dev_t), minor); ++ psys->dev.release = ipu_psys_dev_release; ++ dev_set_name(&psys->dev, "ipu-psys%d", minor); ++ device_initialize(&psys->dev); ++ ++ cdev_init(&psys->cdev, &ipu_psys_fops); ++ psys->cdev.owner = ipu_psys_fops.owner; ++ ++ rval = cdev_device_add(&psys->cdev, &psys->dev); ++ if (rval < 0) { ++ dev_err(dev, "psys device_register failed\n"); ++ goto out_release_fw_com; ++ } ++ ++ set_bit(minor, ipu_psys_devices); ++ ++ /* Add the hw stepping information to caps */ ++ strscpy(psys->caps.dev_model, IPU6_MEDIA_DEV_MODEL_NAME, ++ sizeof(psys->caps.dev_model)); ++ ++ mutex_unlock(&ipu_psys_mutex); ++ ++ dev_info(dev, "psys probe minor: %d\n", minor); ++ ++ ipu6_mmu_hw_cleanup(adev->mmu); ++ ++ return 0; ++ ++out_release_fw_com: ++ ipu6_fw_com_release(psys->fwcom, 1); ++out_free_pgs: ++ list_for_each_entry_safe(kpg, kpg0, &psys->pgs, list) { ++ ipu6_dma_free(adev, kpg->size, kpg->pg, kpg->pg_dma_addr, 0); ++ kfree(kpg); ++ } ++ ++ ipu_psys_res_pool_cleanup(&psys->res_pool_running); ++out_mutex_destroy: ++ mutex_destroy(&psys->mutex); ++ if (psys->sched_cmd_thread) { ++ kthread_stop(psys->sched_cmd_thread); ++ psys->sched_cmd_thread = NULL; ++ } ++out_unlock: ++ /* Safe to call even if the init is not called */ ++ mutex_unlock(&ipu_psys_mutex); ++ ipu6_mmu_hw_cleanup(adev->mmu); ++ return rval; ++} ++ ++static void ipu6_psys_remove(struct auxiliary_device *auxdev) ++{ ++ struct ipu6_bus_device *adev = auxdev_to_adev(auxdev); ++ struct device *dev = &auxdev->dev; ++ struct ipu_psys *psys = dev_get_drvdata(&auxdev->dev); ++ struct ipu_psys_pg *kpg, *kpg0; ++ ++ if (psys->sched_cmd_thread) { ++ kthread_stop(psys->sched_cmd_thread); ++ psys->sched_cmd_thread = NULL; ++ } ++ ++ mutex_lock(&ipu_psys_mutex); ++ ++ list_for_each_entry_safe(kpg, kpg0, &psys->pgs, list) { ++ ipu6_dma_free(adev, kpg->size, kpg->pg, kpg->pg_dma_addr, 0); ++ kfree(kpg); ++ } ++ ++ if (psys->fwcom && ipu6_fw_com_release(psys->fwcom, 1)) ++ dev_err(dev, "fw com release failed.\n"); ++ ++ kfree(psys->server_init); ++ kfree(psys->syscom_config); ++ ++ ipu_psys_res_pool_cleanup(&psys->res_pool_running); ++ ++ cdev_device_del(&psys->cdev, &psys->dev); ++ ++ clear_bit(MINOR(psys->cdev.dev), ipu_psys_devices); ++ ++ mutex_unlock(&ipu_psys_mutex); ++ ++ mutex_destroy(&psys->mutex); ++ ++ dev_info(dev, "removed\n"); ++} ++ ++static irqreturn_t psys_isr_threaded(struct ipu6_bus_device *adev) ++{ ++ struct ipu_psys *psys = ipu6_bus_get_drvdata(adev); ++ struct device *dev = &psys->adev->auxdev.dev; ++ void __iomem *base = psys->pdata->base; ++ u32 status; ++ int r; ++ ++ mutex_lock(&psys->mutex); ++ r = pm_runtime_get_if_in_use(dev); ++ if (!r || WARN_ON_ONCE(r < 0)) { ++ mutex_unlock(&psys->mutex); ++ return IRQ_NONE; ++ } ++ ++ status = readl(base + IPU6_REG_PSYS_GPDEV_IRQ_STATUS); ++ writel(status, base + IPU6_REG_PSYS_GPDEV_IRQ_CLEAR); ++ ++ if (status & IPU6_PSYS_GPDEV_IRQ_FWIRQ(IPU6_PSYS_GPDEV_FWIRQ0)) { ++ writel(0, base + IPU6_REG_PSYS_GPDEV_FWIRQ(0)); ++ ipu_psys_handle_events(psys); ++ } ++ ++ pm_runtime_put(dev); ++ mutex_unlock(&psys->mutex); ++ ++ return status ? IRQ_HANDLED : IRQ_NONE; ++} ++ ++static const struct ipu6_auxdrv_data ipu6_psys_auxdrv_data = { ++ .isr_threaded = psys_isr_threaded, ++ .wake_isr_thread = true, ++}; ++ ++static const struct auxiliary_device_id ipu6_psys_id_table[] = { ++ { ++ .name = "intel_ipu6.psys", ++ .driver_data = (kernel_ulong_t)&ipu6_psys_auxdrv_data, ++ }, ++ { } ++}; ++MODULE_DEVICE_TABLE(auxiliary, ipu6_psys_id_table); ++ ++static struct auxiliary_driver ipu6_psys_aux_driver = { ++ .name = IPU6_PSYS_NAME, ++ .probe = ipu6_psys_probe, ++ .remove = ipu6_psys_remove, ++ .id_table = ipu6_psys_id_table, ++ .driver = { ++ .pm = &psys_pm_ops, ++ }, ++}; ++ ++static int __init ipu_psys_init(void) ++{ ++ int rval = alloc_chrdev_region(&ipu_psys_dev_t, 0, ++ IPU_PSYS_NUM_DEVICES, ipu_psys_bus.name); ++ if (rval) { ++ pr_err("can't alloc psys chrdev region (%d)\n", rval); ++ return rval; ++ } ++ ++ rval = bus_register(&ipu_psys_bus); ++ if (rval) { ++ pr_err("can't register psys bus (%d)\n", rval); ++ unregister_chrdev_region(ipu_psys_dev_t, IPU_PSYS_NUM_DEVICES); ++ return rval; ++ } ++ ++ auxiliary_driver_register(&ipu6_psys_aux_driver); ++ return 0; ++} ++module_init(ipu_psys_init); ++ ++static void __exit ipu_psys_exit(void) ++{ ++ auxiliary_driver_unregister(&ipu6_psys_aux_driver); ++ bus_unregister(&ipu_psys_bus); ++ unregister_chrdev_region(ipu_psys_dev_t, IPU_PSYS_NUM_DEVICES); ++} ++module_exit(ipu_psys_exit); ++ ++MODULE_AUTHOR("Bingbu Cao "); ++MODULE_LICENSE("GPL"); ++MODULE_DESCRIPTION("Intel IPU6 processing system driver"); ++MODULE_IMPORT_NS("DMA_BUF"); ++MODULE_IMPORT_NS("INTEL_IPU6"); +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.h b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.h +new file mode 100644 +index 0000000..8af6551 +--- /dev/null ++++ b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.h +@@ -0,0 +1,324 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* Copyright (C) 2013 - 2024 Intel Corporation */ ++ ++#ifndef IPU_PSYS_H ++#define IPU_PSYS_H ++ ++#include ++#include ++ ++#include ++#include "ipu6.h" ++#include "ipu6-bus.h" ++#include "ipu6-dma.h" ++#include "ipu-fw-psys.h" ++#include "ipu-platform-psys.h" ++ ++/* PSYS Info bits*/ ++#define IPU_REG_PSYS_INFO_SEG_CMEM_MASTER(a) (0x2c + ((a) * 12)) ++#define IPU_REG_PSYS_INFO_SEG_XMEM_MASTER(a) (0x5c + ((a) * 12)) ++ ++#define IPU_PSYS_REG_SPC_STATUS_CTRL 0x0 ++#define IPU_PSYS_REG_SPC_START_PC 0x4 ++#define IPU_PSYS_REG_SPC_ICACHE_BASE 0x10 ++#define IPU_REG_PSYS_INFO_SEG_0_CONFIG_ICACHE_MASTER 0x14 ++ ++#define IPU_PSYS_SPC_STATUS_START BIT(1) ++#define IPU_PSYS_SPC_STATUS_RUN BIT(3) ++#define IPU_PSYS_SPC_STATUS_READY BIT(5) ++#define IPU_PSYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE BIT(12) ++#define IPU_PSYS_SPC_STATUS_ICACHE_PREFETCH BIT(13) ++ ++#define IPU_PSYS_REG_SPP0_STATUS_CTRL 0x20000 ++ ++#define IPU_INFO_ENABLE_SNOOP BIT(0) ++#define IPU_INFO_DEC_FORCE_FLUSH BIT(1) ++#define IPU_INFO_DEC_PASS_THRU BIT(2) ++#define IPU_INFO_ZLW BIT(3) ++#define IPU_INFO_STREAM_ID_SET(id) (((id) & 0x1f) << 4) ++#define IPU_INFO_REQUEST_DESTINATION_IOSF BIT(9) ++#define IPU_INFO_IMR_BASE BIT(10) ++#define IPU_INFO_IMR_DESTINED BIT(11) ++ ++#define IPU_INFO_REQUEST_DESTINATION_PRIMARY IPU_INFO_REQUEST_DESTINATION_IOSF ++ ++#define IPU_REG_DMA_TOP_AB_GROUP1_BASE_ADDR 0x1ae000 ++#define IPU_REG_DMA_TOP_AB_GROUP2_BASE_ADDR 0x1af000 ++#define IPU_REG_DMA_TOP_AB_RING_MIN_OFFSET(n) (0x4 + (n) * 0xc) ++#define IPU_REG_DMA_TOP_AB_RING_MAX_OFFSET(n) (0x8 + (n) * 0xc) ++#define IPU_REG_DMA_TOP_AB_RING_ACCESS_OFFSET(n) (0xc + (n) * 0xc) ++ ++enum ipu_device_ab_group1_target_id { ++ IPU_DEVICE_AB_GROUP1_TARGET_ID_R0_SPC_DMEM, ++ IPU_DEVICE_AB_GROUP1_TARGET_ID_R1_SPC_DMEM, ++ IPU_DEVICE_AB_GROUP1_TARGET_ID_R2_SPC_DMEM, ++ IPU_DEVICE_AB_GROUP1_TARGET_ID_R3_SPC_STATUS_REG, ++ IPU_DEVICE_AB_GROUP1_TARGET_ID_R4_SPC_MASTER_BASE_ADDR, ++ IPU_DEVICE_AB_GROUP1_TARGET_ID_R5_SPC_PC_STALL, ++ IPU_DEVICE_AB_GROUP1_TARGET_ID_R6_SPC_EQ, ++ IPU_DEVICE_AB_GROUP1_TARGET_ID_R7_SPC_RESERVED, ++ IPU_DEVICE_AB_GROUP1_TARGET_ID_R8_SPC_RESERVED, ++ IPU_DEVICE_AB_GROUP1_TARGET_ID_R9_SPP0, ++ IPU_DEVICE_AB_GROUP1_TARGET_ID_R10_SPP1, ++ IPU_DEVICE_AB_GROUP1_TARGET_ID_R11_CENTRAL_R1, ++ IPU_DEVICE_AB_GROUP1_TARGET_ID_R12_IRQ, ++ IPU_DEVICE_AB_GROUP1_TARGET_ID_R13_CENTRAL_R2, ++ IPU_DEVICE_AB_GROUP1_TARGET_ID_R14_DMA, ++ IPU_DEVICE_AB_GROUP1_TARGET_ID_R15_DMA, ++ IPU_DEVICE_AB_GROUP1_TARGET_ID_R16_GP, ++ IPU_DEVICE_AB_GROUP1_TARGET_ID_R17_ZLW_INSERTER, ++ IPU_DEVICE_AB_GROUP1_TARGET_ID_R18_AB, ++}; ++ ++/* IRQ-related registers in PSYS */ ++#define IPU_REG_PSYS_GPDEV_IRQ_EDGE 0x1aa200 ++#define IPU_REG_PSYS_GPDEV_IRQ_MASK 0x1aa204 ++#define IPU_REG_PSYS_GPDEV_IRQ_STATUS 0x1aa208 ++#define IPU_REG_PSYS_GPDEV_IRQ_CLEAR 0x1aa20c ++#define IPU_REG_PSYS_GPDEV_IRQ_ENABLE 0x1aa210 ++#define IPU_REG_PSYS_GPDEV_IRQ_LEVEL_NOT_PULSE 0x1aa214 ++/* There are 8 FW interrupts, n = 0..7 */ ++#define IPU_PSYS_GPDEV_FWIRQ0 5 ++#define IPU_PSYS_GPDEV_FWIRQ1 6 ++#define IPU_PSYS_GPDEV_FWIRQ2 7 ++#define IPU_PSYS_GPDEV_FWIRQ3 8 ++#define IPU_PSYS_GPDEV_FWIRQ4 9 ++#define IPU_PSYS_GPDEV_FWIRQ5 10 ++#define IPU_PSYS_GPDEV_FWIRQ6 11 ++#define IPU_PSYS_GPDEV_FWIRQ7 12 ++#define IPU_PSYS_GPDEV_IRQ_FWIRQ(n) (1 << (n)) ++#define IPU_REG_PSYS_GPDEV_FWIRQ(n) (4 * (n) + 0x1aa100) ++ ++/* ++ * psys subdomains power request regs ++ */ ++enum ipu_device_buttress_psys_domain_pos { ++ IPU_PSYS_SUBDOMAIN_ISA = 0, ++ IPU_PSYS_SUBDOMAIN_PSA = 1, ++ IPU_PSYS_SUBDOMAIN_BB = 2, ++ IPU_PSYS_SUBDOMAIN_IDSP1 = 3, /* only in IPU6M */ ++ IPU_PSYS_SUBDOMAIN_IDSP2 = 4, /* only in IPU6M */ ++}; ++ ++#define IPU_PSYS_SUBDOMAINS_POWER_MASK (BIT(IPU_PSYS_SUBDOMAIN_ISA) | \ ++ BIT(IPU_PSYS_SUBDOMAIN_PSA) | \ ++ BIT(IPU_PSYS_SUBDOMAIN_BB)) ++ ++#define IPU_PSYS_SUBDOMAINS_POWER_REQ 0xa0 ++#define IPU_PSYS_SUBDOMAINS_POWER_STATUS 0xa4 ++ ++#define IPU_PSYS_CMD_TIMEOUT_MS 2000 ++#define IPU_PSYS_OPEN_TIMEOUT_US 50 ++#define IPU_PSYS_OPEN_RETRY (10000 / IPU_PSYS_OPEN_TIMEOUT_US) ++ ++#define IPU_PSYS_PG_POOL_SIZE 16 ++#define IPU_PSYS_PG_MAX_SIZE 8192 ++#define IPU_MAX_PSYS_CMD_BUFFERS 32 ++#define IPU_PSYS_EVENT_CMD_COMPLETE IPU_FW_PSYS_EVENT_TYPE_SUCCESS ++#define IPU_PSYS_EVENT_FRAGMENT_COMPLETE IPU_FW_PSYS_EVENT_TYPE_SUCCESS ++#define IPU_PSYS_CLOSE_TIMEOUT_US 50 ++#define IPU_PSYS_CLOSE_TIMEOUT (100000 / IPU_PSYS_CLOSE_TIMEOUT_US) ++#define IPU_MAX_RESOURCES 128 ++ ++extern enum ipu6_version ipu_ver; ++ ++/* Opaque structure. Do not access fields. */ ++struct ipu_resource { ++ u32 id; ++ int elements; /* Number of elements available to allocation */ ++ unsigned long *bitmap; /* Allocation bitmap, a bit for each element */ ++}; ++ ++enum ipu_resource_type { ++ IPU_RESOURCE_DEV_CHN = 0, ++ IPU_RESOURCE_EXT_MEM, ++ IPU_RESOURCE_DFM ++}; ++ ++/* Allocation of resource(s) */ ++/* Opaque structure. Do not access fields. */ ++struct ipu_resource_alloc { ++ enum ipu_resource_type type; ++ struct ipu_resource *resource; ++ int elements; ++ int pos; ++}; ++ ++/* ++ * This struct represents all of the currently allocated ++ * resources from IPU model. It is used also for allocating ++ * resources for the next set of PGs to be run on IPU ++ * (ie. those PGs which are not yet being run and which don't ++ * yet reserve real IPU resources). ++ * Use larger array to cover existing resource quantity ++ */ ++ ++/* resource size may need expand for new resource model */ ++struct ipu_psys_resource_pool { ++ u32 cells; /* Bitmask of cells allocated */ ++ struct ipu_resource dev_channels[16]; ++ struct ipu_resource ext_memory[32]; ++ struct ipu_resource dfms[16]; ++ DECLARE_BITMAP(cmd_queues, 32); ++ /* Protects cmd_queues bitmap */ ++ spinlock_t queues_lock; ++}; ++ ++/* ++ * This struct keeps book of the resources allocated for a specific PG. ++ * It is used for freeing up resources from struct ipu_psys_resources ++ * when the PG is released from IPU (or model of IPU). ++ */ ++struct ipu_psys_resource_alloc { ++ u32 cells; /* Bitmask of cells needed */ ++ struct ipu_resource_alloc ++ resource_alloc[IPU_MAX_RESOURCES]; ++ int resources; ++}; ++ ++struct task_struct; ++struct ipu_psys { ++ struct ipu_psys_capability caps; ++ struct cdev cdev; ++ struct device dev; ++ ++ struct mutex mutex; /* Psys various */ ++ int ready; /* psys fw status */ ++ bool icache_prefetch_sp; ++ bool icache_prefetch_isp; ++ spinlock_t ready_lock; /* protect psys firmware state */ ++ spinlock_t pgs_lock; /* Protect pgs list access */ ++ struct list_head fhs; ++ struct list_head pgs; ++ struct list_head started_kcmds_list; ++ struct ipu6_psys_pdata *pdata; ++ struct ipu6_bus_device *adev; ++ struct ia_css_syscom_context *dev_ctx; ++ struct ia_css_syscom_config *syscom_config; ++ struct ia_css_psys_server_init *server_init; ++ struct task_struct *sched_cmd_thread; ++ wait_queue_head_t sched_cmd_wq; ++ atomic_t wakeup_count; /* Psys schedule thread wakeup count */ ++ ++ /* Resources needed to be managed for process groups */ ++ struct ipu_psys_resource_pool res_pool_running; ++ ++ const struct firmware *fw; ++ struct sg_table fw_sgt; ++ u64 *pkg_dir; ++ dma_addr_t pkg_dir_dma_addr; ++ unsigned int pkg_dir_size; ++ unsigned long timeout; ++ ++ int active_kcmds, started_kcmds; ++ void *fwcom; ++ ++ int power_gating; ++}; ++ ++struct ipu_psys_fh { ++ struct ipu_psys *psys; ++ struct mutex mutex;/* Protects bufs_list & kcmds fields */ ++ struct list_head list; ++ /* Holds all buffers that this fh owns */ ++ struct list_head bufs_list; ++ /* Holds all descriptors (fd:kbuffer associations) */ ++ struct list_head descs_list; ++ struct list_head bufs_lru; ++ wait_queue_head_t wait; ++ struct ipu_psys_scheduler sched; ++ ++ u32 num_bufs; ++ u32 num_descs; ++ u32 num_bufs_lru; ++}; ++ ++struct ipu_psys_pg { ++ struct ipu_fw_psys_process_group *pg; ++ size_t size; ++ size_t pg_size; ++ dma_addr_t pg_dma_addr; ++ struct list_head list; ++ struct ipu_psys_resource_alloc resource_alloc; ++}; ++ ++struct ipu6_psys_constraint { ++ struct list_head list; ++ unsigned int min_freq; ++}; ++ ++struct ipu_psys_kcmd { ++ struct ipu_psys_fh *fh; ++ struct list_head list; ++ struct ipu_psys_buffer_set *kbuf_set; ++ enum ipu_psys_cmd_state state; ++ void *pg_manifest; ++ size_t pg_manifest_size; ++ struct ipu_psys_kbuffer **kbufs; ++ struct ipu_psys_buffer *buffers; ++ size_t nbuffers; ++ struct ipu_fw_psys_process_group *pg_user; ++ struct ipu_psys_pg *kpg; ++ u64 user_token; ++ u64 issue_id; ++ u32 priority; ++ u32 kernel_enable_bitmap[4]; ++ u32 terminal_enable_bitmap[4]; ++ u32 routing_enable_bitmap[4]; ++ u32 rbm[5]; ++ struct ipu6_psys_constraint constraint; ++ struct ipu_psys_event ev; ++ struct timer_list watchdog; ++}; ++ ++struct ipu_dma_buf_attach { ++ struct device *dev; ++ u64 len; ++ unsigned long userptr; ++ struct sg_table *sgt; ++ struct page **pages; ++ size_t npages; ++}; ++ ++struct ipu_psys_kbuffer { ++ u64 len; ++ unsigned long userptr; ++ void *kaddr; ++ struct list_head list; ++ dma_addr_t dma_addr; ++ struct sg_table *sgt; ++ struct dma_buf_attachment *db_attach; ++ struct dma_buf *dbuf; ++ u32 flags; ++ atomic_t map_count; /* The number of times this buffer is mapped */ ++ bool valid; /* True when buffer is usable */ ++}; ++ ++struct ipu_psys_desc { ++ struct ipu_psys_kbuffer *kbuf; ++ struct list_head list; ++ u32 fd; ++}; ++ ++#define inode_to_ipu_psys(inode) \ ++ container_of((inode)->i_cdev, struct ipu_psys, cdev) ++ ++void ipu_psys_setup_hw(struct ipu_psys *psys); ++void ipu_psys_subdomains_power(struct ipu_psys *psys, bool on); ++void ipu_psys_handle_events(struct ipu_psys *psys); ++int ipu_psys_kcmd_new(struct ipu_psys_command *cmd, struct ipu_psys_fh *fh); ++void ipu_psys_run_next(struct ipu_psys *psys); ++struct ipu_psys_pg *__get_pg_buf(struct ipu_psys *psys, size_t pg_size); ++struct ipu_psys_kbuffer * ++ipu_psys_lookup_kbuffer(struct ipu_psys_fh *fh, int fd); ++struct ipu_psys_kbuffer * ++ipu_psys_mapbuf_locked(int fd, struct ipu_psys_fh *fh); ++struct ipu_psys_kbuffer * ++ipu_psys_lookup_kbuffer_by_kaddr(struct ipu_psys_fh *fh, void *kaddr); ++int ipu_psys_res_pool_init(struct ipu_psys_resource_pool *pool); ++void ipu_psys_res_pool_cleanup(struct ipu_psys_resource_pool *pool); ++struct ipu_psys_kcmd *ipu_get_completed_kcmd(struct ipu_psys_fh *fh); ++long ipu_ioctl_dqevent(struct ipu_psys_event *event, ++ struct ipu_psys_fh *fh, unsigned int f_flags); ++ ++#endif /* IPU_PSYS_H */ +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-resources.c b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-resources.c +new file mode 100644 +index 0000000..4068e34 +--- /dev/null ++++ b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-resources.c +@@ -0,0 +1,861 @@ ++// SPDX-License-Identifier: GPL-2.0 ++// Copyright (C) 2015 - 2024 Intel Corporation ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#include "ipu-fw-psys.h" ++#include "ipu-psys.h" ++ ++struct ipu6_psys_hw_res_variant hw_var; ++void ipu6_psys_hw_res_variant_init(void) ++{ ++ if (ipu_ver == IPU6_VER_6SE) { ++ hw_var.queue_num = IPU6SE_FW_PSYS_N_PSYS_CMD_QUEUE_ID; ++ hw_var.cell_num = IPU6SE_FW_PSYS_N_CELL_ID; ++ } else if (ipu_ver == IPU6_VER_6) { ++ hw_var.queue_num = IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID; ++ hw_var.cell_num = IPU6_FW_PSYS_N_CELL_ID; ++ } else if (ipu_ver == IPU6_VER_6EP || ipu_ver == IPU6_VER_6EP_MTL) { ++ hw_var.queue_num = IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID; ++ hw_var.cell_num = IPU6EP_FW_PSYS_N_CELL_ID; ++ } else { ++ WARN(1, "ipu6 psys res var is not initialised correctly."); ++ } ++ ++ hw_var.set_proc_dev_chn = ipu6_fw_psys_set_proc_dev_chn; ++ hw_var.set_proc_dfm_bitmap = ipu6_fw_psys_set_proc_dfm_bitmap; ++ hw_var.set_proc_ext_mem = ipu6_fw_psys_set_process_ext_mem; ++ hw_var.get_pgm_by_proc = ipu6_fw_psys_get_pgm_by_process; ++} ++ ++static const struct ipu_fw_resource_definitions *get_res(void) ++{ ++ if (ipu_ver == IPU6_VER_6SE) ++ return ipu6se_res_defs; ++ ++ if (ipu_ver == IPU6_VER_6EP || ipu_ver == IPU6_VER_6EP_MTL) ++ return ipu6ep_res_defs; ++ ++ return ipu6_res_defs; ++} ++ ++static int ipu_resource_init(struct ipu_resource *res, u32 id, int elements) ++{ ++ if (elements <= 0) { ++ res->bitmap = NULL; ++ return 0; ++ } ++ ++ res->bitmap = bitmap_zalloc(elements, GFP_KERNEL); ++ if (!res->bitmap) ++ return -ENOMEM; ++ res->elements = elements; ++ res->id = id; ++ return 0; ++} ++ ++static unsigned long ++ipu_resource_alloc_with_pos(struct ipu_resource *res, int n, ++ int pos, ++ struct ipu_resource_alloc *alloc, ++ enum ipu_resource_type type) ++{ ++ unsigned long p; ++ ++ if (n <= 0) { ++ alloc->elements = 0; ++ return 0; ++ } ++ ++ if (!res->bitmap || pos >= res->elements) ++ return (unsigned long)(-ENOSPC); ++ ++ p = bitmap_find_next_zero_area(res->bitmap, res->elements, pos, n, 0); ++ alloc->resource = NULL; ++ ++ if (p != pos) ++ return (unsigned long)(-ENOSPC); ++ bitmap_set(res->bitmap, p, n); ++ alloc->resource = res; ++ alloc->elements = n; ++ alloc->pos = p; ++ alloc->type = type; ++ ++ return pos; ++} ++ ++static unsigned long ++ipu_resource_alloc(struct ipu_resource *res, int n, ++ struct ipu_resource_alloc *alloc, ++ enum ipu_resource_type type) ++{ ++ unsigned long p; ++ ++ if (n <= 0) { ++ alloc->elements = 0; ++ return 0; ++ } ++ ++ if (!res->bitmap) ++ return (unsigned long)(-ENOSPC); ++ ++ p = bitmap_find_next_zero_area(res->bitmap, res->elements, 0, n, 0); ++ alloc->resource = NULL; ++ ++ if (p >= res->elements) ++ return (unsigned long)(-ENOSPC); ++ bitmap_set(res->bitmap, p, n); ++ alloc->resource = res; ++ alloc->elements = n; ++ alloc->pos = p; ++ alloc->type = type; ++ ++ return p; ++} ++ ++static void ipu_resource_free(struct ipu_resource_alloc *alloc) ++{ ++ if (alloc->elements <= 0) ++ return; ++ ++ if (alloc->type == IPU_RESOURCE_DFM) ++ *alloc->resource->bitmap &= ~(unsigned long)(alloc->elements); ++ else ++ bitmap_clear(alloc->resource->bitmap, alloc->pos, ++ alloc->elements); ++ alloc->resource = NULL; ++} ++ ++static void ipu_resource_cleanup(struct ipu_resource *res) ++{ ++ bitmap_free(res->bitmap); ++ res->bitmap = NULL; ++} ++ ++/********** IPU PSYS-specific resource handling **********/ ++int ipu_psys_res_pool_init(struct ipu_psys_resource_pool *pool) ++{ ++ int i, j, k, ret; ++ const struct ipu_fw_resource_definitions *res_defs; ++ ++ res_defs = get_res(); ++ ++ spin_lock_init(&pool->queues_lock); ++ pool->cells = 0; ++ ++ for (i = 0; i < res_defs->num_dev_channels; i++) { ++ ret = ipu_resource_init(&pool->dev_channels[i], i, ++ res_defs->dev_channels[i]); ++ if (ret) ++ goto error; ++ } ++ ++ for (j = 0; j < res_defs->num_ext_mem_ids; j++) { ++ ret = ipu_resource_init(&pool->ext_memory[j], j, ++ res_defs->ext_mem_ids[j]); ++ if (ret) ++ goto memory_error; ++ } ++ ++ for (k = 0; k < res_defs->num_dfm_ids; k++) { ++ ret = ipu_resource_init(&pool->dfms[k], k, res_defs->dfms[k]); ++ if (ret) ++ goto dfm_error; ++ } ++ ++ spin_lock(&pool->queues_lock); ++ if (ipu_ver == IPU6_VER_6SE) ++ bitmap_zero(pool->cmd_queues, ++ IPU6SE_FW_PSYS_N_PSYS_CMD_QUEUE_ID); ++ else ++ bitmap_zero(pool->cmd_queues, ++ IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID); ++ spin_unlock(&pool->queues_lock); ++ ++ return 0; ++ ++dfm_error: ++ for (k--; k >= 0; k--) ++ ipu_resource_cleanup(&pool->dfms[k]); ++ ++memory_error: ++ for (j--; j >= 0; j--) ++ ipu_resource_cleanup(&pool->ext_memory[j]); ++ ++error: ++ for (i--; i >= 0; i--) ++ ipu_resource_cleanup(&pool->dev_channels[i]); ++ return ret; ++} ++ ++void ipu_psys_resource_copy(struct ipu_psys_resource_pool *src, ++ struct ipu_psys_resource_pool *dest) ++{ ++ int i; ++ const struct ipu_fw_resource_definitions *res_defs; ++ ++ res_defs = get_res(); ++ ++ dest->cells = src->cells; ++ for (i = 0; i < res_defs->num_dev_channels; i++) ++ *dest->dev_channels[i].bitmap = *src->dev_channels[i].bitmap; ++ ++ for (i = 0; i < res_defs->num_ext_mem_ids; i++) ++ *dest->ext_memory[i].bitmap = *src->ext_memory[i].bitmap; ++ ++ for (i = 0; i < res_defs->num_dfm_ids; i++) ++ *dest->dfms[i].bitmap = *src->dfms[i].bitmap; ++} ++ ++void ipu_psys_res_pool_cleanup(struct ipu_psys_resource_pool *pool) ++{ ++ u32 i; ++ const struct ipu_fw_resource_definitions *res_defs; ++ ++ res_defs = get_res(); ++ for (i = 0; i < res_defs->num_dev_channels; i++) ++ ipu_resource_cleanup(&pool->dev_channels[i]); ++ ++ for (i = 0; i < res_defs->num_ext_mem_ids; i++) ++ ipu_resource_cleanup(&pool->ext_memory[i]); ++ ++ for (i = 0; i < res_defs->num_dfm_ids; i++) ++ ipu_resource_cleanup(&pool->dfms[i]); ++} ++ ++static int __alloc_one_resrc(const struct device *dev, ++ struct ipu_fw_psys_process *process, ++ struct ipu_resource *resource, ++ struct ipu_fw_generic_program_manifest *pm, ++ u32 resource_id, ++ struct ipu_psys_resource_alloc *alloc) ++{ ++ const u16 resource_req = pm->dev_chn_size[resource_id]; ++ const u16 resource_offset_req = pm->dev_chn_offset[resource_id]; ++ unsigned long retl; ++ ++ if (!resource_req) ++ return -ENXIO; ++ ++ if (alloc->resources >= IPU_MAX_RESOURCES) { ++ dev_err(dev, "out of resource handles\n"); ++ return -ENOSPC; ++ } ++ if (resource_offset_req != (u16)(-1)) ++ retl = ipu_resource_alloc_with_pos ++ (resource, ++ resource_req, ++ resource_offset_req, ++ &alloc->resource_alloc[alloc->resources], ++ IPU_RESOURCE_DEV_CHN); ++ else ++ retl = ipu_resource_alloc ++ (resource, resource_req, ++ &alloc->resource_alloc[alloc->resources], ++ IPU_RESOURCE_DEV_CHN); ++ if (IS_ERR_VALUE(retl)) { ++ dev_dbg(dev, "out of device channel resources\n"); ++ return (int)retl; ++ } ++ alloc->resources++; ++ ++ return 0; ++} ++ ++static int ipu_psys_allocate_one_dfm(const struct device *dev, ++ struct ipu_fw_psys_process *process, ++ struct ipu_resource *resource, ++ struct ipu_fw_generic_program_manifest *pm, ++ u32 resource_id, ++ struct ipu_psys_resource_alloc *alloc) ++{ ++ u32 dfm_bitmap_req = pm->dfm_port_bitmap[resource_id]; ++ u32 active_dfm_bitmap_req = pm->dfm_active_port_bitmap[resource_id]; ++ const u8 is_relocatable = pm->is_dfm_relocatable[resource_id]; ++ struct ipu_resource_alloc *alloc_resource; ++ unsigned long p = 0; ++ ++ if (!dfm_bitmap_req) ++ return -ENXIO; ++ ++ if (alloc->resources >= IPU_MAX_RESOURCES) { ++ dev_err(dev, "out of resource handles\n"); ++ return -ENOSPC; ++ } ++ ++ if (!resource->bitmap) ++ return -ENOSPC; ++ ++ if (!is_relocatable) { ++ if (*resource->bitmap & dfm_bitmap_req) { ++ dev_warn(dev, ++ "out of dfm resources, req 0x%x, get 0x%lx\n", ++ dfm_bitmap_req, *resource->bitmap); ++ return -ENOSPC; ++ } ++ *resource->bitmap |= dfm_bitmap_req; ++ } else { ++ unsigned int n = hweight32(dfm_bitmap_req); ++ ++ p = bitmap_find_next_zero_area(resource->bitmap, ++ resource->elements, 0, n, 0); ++ ++ if (p >= resource->elements) ++ return -ENOSPC; ++ ++ bitmap_set(resource->bitmap, p, n); ++ dfm_bitmap_req = dfm_bitmap_req << p; ++ active_dfm_bitmap_req = active_dfm_bitmap_req << p; ++ } ++ ++ alloc_resource = &alloc->resource_alloc[alloc->resources]; ++ alloc_resource->resource = resource; ++ /* Using elements to indicate the bitmap */ ++ alloc_resource->elements = dfm_bitmap_req; ++ alloc_resource->pos = p; ++ alloc_resource->type = IPU_RESOURCE_DFM; ++ ++ alloc->resources++; ++ ++ return 0; ++} ++ ++/* ++ * ext_mem_type_id is a generic type id for memory (like DMEM, VMEM) ++ * ext_mem_bank_id is detailed type id for memory (like DMEM0, DMEM1 etc.) ++ */ ++static int __alloc_mem_resrc(const struct device *dev, ++ struct ipu_fw_psys_process *process, ++ struct ipu_resource *resource, ++ struct ipu_fw_generic_program_manifest *pm, ++ u32 ext_mem_type_id, u32 ext_mem_bank_id, ++ struct ipu_psys_resource_alloc *alloc) ++{ ++ const u16 memory_resource_req = pm->ext_mem_size[ext_mem_type_id]; ++ const u16 memory_offset_req = pm->ext_mem_offset[ext_mem_type_id]; ++ ++ unsigned long retl; ++ ++ if (!memory_resource_req) ++ return -ENXIO; ++ ++ if (alloc->resources >= IPU_MAX_RESOURCES) { ++ dev_err(dev, "out of resource handles\n"); ++ return -ENOSPC; ++ } ++ if (memory_offset_req != (u16)(-1)) ++ retl = ipu_resource_alloc_with_pos ++ (resource, ++ memory_resource_req, memory_offset_req, ++ &alloc->resource_alloc[alloc->resources], ++ IPU_RESOURCE_EXT_MEM); ++ else ++ retl = ipu_resource_alloc ++ (resource, memory_resource_req, ++ &alloc->resource_alloc[alloc->resources], ++ IPU_RESOURCE_EXT_MEM); ++ if (IS_ERR_VALUE(retl)) { ++ dev_dbg(dev, "out of memory resources\n"); ++ return (int)retl; ++ } ++ ++ alloc->resources++; ++ ++ return 0; ++} ++ ++int ipu_psys_allocate_cmd_queue_res(struct ipu_psys_resource_pool *pool) ++{ ++ unsigned long p; ++ int size, start; ++ ++ size = IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID; ++ start = IPU6_FW_PSYS_CMD_QUEUE_PPG0_COMMAND_ID; ++ ++ if (ipu_ver == IPU6_VER_6SE) { ++ size = IPU6SE_FW_PSYS_N_PSYS_CMD_QUEUE_ID; ++ start = IPU6SE_FW_PSYS_CMD_QUEUE_PPG0_COMMAND_ID; ++ } ++ ++ spin_lock(&pool->queues_lock); ++ /* find available cmd queue from ppg0_cmd_id */ ++ p = bitmap_find_next_zero_area(pool->cmd_queues, size, start, 1, 0); ++ ++ if (p >= size) { ++ spin_unlock(&pool->queues_lock); ++ return -ENOSPC; ++ } ++ ++ bitmap_set(pool->cmd_queues, p, 1); ++ spin_unlock(&pool->queues_lock); ++ ++ return p; ++} ++ ++void ipu_psys_free_cmd_queue_res(struct ipu_psys_resource_pool *pool, ++ u8 queue_id) ++{ ++ spin_lock(&pool->queues_lock); ++ bitmap_clear(pool->cmd_queues, queue_id, 1); ++ spin_unlock(&pool->queues_lock); ++} ++ ++int ipu_psys_try_allocate_resources(struct device *dev, ++ struct ipu_fw_psys_process_group *pg, ++ void *pg_manifest, ++ struct ipu_psys_resource_pool *pool) ++{ ++ u32 id, idx; ++ u32 mem_type_id; ++ int ret, i; ++ u16 *process_offset_table; ++ u8 processes; ++ u32 cells = 0; ++ struct ipu_psys_resource_alloc *alloc; ++ const struct ipu_fw_resource_definitions *res_defs; ++ ++ if (!pg) ++ return -EINVAL; ++ process_offset_table = (u16 *)((u8 *)pg + pg->processes_offset); ++ processes = pg->process_count; ++ ++ alloc = kzalloc(sizeof(*alloc), GFP_KERNEL); ++ if (!alloc) ++ return -ENOMEM; ++ ++ res_defs = get_res(); ++ for (i = 0; i < processes; i++) { ++ u32 cell; ++ struct ipu_fw_psys_process *process = ++ (struct ipu_fw_psys_process *) ++ ((char *)pg + process_offset_table[i]); ++ struct ipu_fw_generic_program_manifest pm; ++ ++ memset(&pm, 0, sizeof(pm)); ++ ++ if (!process) { ++ dev_err(dev, "can not get process\n"); ++ ret = -ENOENT; ++ goto free_out; ++ } ++ ++ ret = ipu_fw_psys_get_pgm_by_process(&pm, pg_manifest, process); ++ if (ret < 0) { ++ dev_err(dev, "can not get manifest\n"); ++ goto free_out; ++ } ++ ++ if (pm.cell_id == res_defs->num_cells && ++ pm.cell_type_id == res_defs->num_cells_type) { ++ cell = res_defs->num_cells; ++ } else if ((pm.cell_id != res_defs->num_cells && ++ pm.cell_type_id == res_defs->num_cells_type)) { ++ cell = pm.cell_id; ++ } else { ++ /* Find a free cell of desired type */ ++ u32 type = pm.cell_type_id; ++ ++ for (cell = 0; cell < res_defs->num_cells; cell++) ++ if (res_defs->cells[cell] == type && ++ ((pool->cells | cells) & (1 << cell)) == 0) ++ break; ++ if (cell >= res_defs->num_cells) { ++ dev_dbg(dev, "no free cells of right type\n"); ++ ret = -ENOSPC; ++ goto free_out; ++ } ++ } ++ if (cell < res_defs->num_cells) ++ cells |= 1 << cell; ++ if (pool->cells & cells) { ++ dev_dbg(dev, "out of cell resources\n"); ++ ret = -ENOSPC; ++ goto free_out; ++ } ++ ++ if (pm.dev_chn_size) { ++ for (id = 0; id < res_defs->num_dev_channels; id++) { ++ ret = __alloc_one_resrc(dev, process, ++ &pool->dev_channels[id], ++ &pm, id, alloc); ++ if (ret == -ENXIO) ++ continue; ++ ++ if (ret) ++ goto free_out; ++ } ++ } ++ ++ if (pm.dfm_port_bitmap) { ++ for (id = 0; id < res_defs->num_dfm_ids; id++) { ++ ret = ipu_psys_allocate_one_dfm ++ (dev, process, ++ &pool->dfms[id], &pm, id, alloc); ++ if (ret == -ENXIO) ++ continue; ++ ++ if (ret) ++ goto free_out; ++ } ++ } ++ ++ if (pm.ext_mem_size) { ++ for (mem_type_id = 0; ++ mem_type_id < res_defs->num_ext_mem_types; ++ mem_type_id++) { ++ u32 bank = res_defs->num_ext_mem_ids; ++ ++ if (cell != res_defs->num_cells) { ++ idx = res_defs->cell_mem_row * cell + ++ mem_type_id; ++ bank = res_defs->cell_mem[idx]; ++ } ++ ++ if (bank == res_defs->num_ext_mem_ids) ++ continue; ++ ++ ret = __alloc_mem_resrc(dev, process, ++ &pool->ext_memory[bank], ++ &pm, mem_type_id, bank, ++ alloc); ++ if (ret == -ENXIO) ++ continue; ++ ++ if (ret) ++ goto free_out; ++ } ++ } ++ } ++ ++ pool->cells |= cells; ++ ++ kfree(alloc); ++ return 0; ++ ++free_out: ++ dev_dbg(dev, "failed to try_allocate resource\n"); ++ kfree(alloc); ++ return ret; ++} ++ ++/* ++ * Allocate resources for pg from `pool'. Mark the allocated ++ * resources into `alloc'. Returns 0 on success, -ENOSPC ++ * if there are no enough resources, in which cases resources ++ * are not allocated at all, or some other error on other conditions. ++ */ ++int ipu_psys_allocate_resources(const struct device *dev, ++ struct ipu_fw_psys_process_group *pg, ++ void *pg_manifest, ++ struct ipu_psys_resource_alloc *alloc, ++ struct ipu_psys_resource_pool *pool) ++{ ++ u32 id; ++ u32 mem_type_id; ++ int ret, i; ++ u16 *process_offset_table; ++ u8 processes; ++ u32 cells = 0; ++ int p, idx; ++ u32 bmp, a_bmp; ++ const struct ipu_fw_resource_definitions *res_defs; ++ ++ if (!pg) ++ return -EINVAL; ++ ++ res_defs = get_res(); ++ process_offset_table = (u16 *)((u8 *)pg + pg->processes_offset); ++ processes = pg->process_count; ++ ++ for (i = 0; i < processes; i++) { ++ u32 cell; ++ struct ipu_fw_psys_process *process = ++ (struct ipu_fw_psys_process *) ++ ((char *)pg + process_offset_table[i]); ++ struct ipu_fw_generic_program_manifest pm; ++ ++ memset(&pm, 0, sizeof(pm)); ++ if (!process) { ++ dev_err(dev, "can not get process\n"); ++ ret = -ENOENT; ++ goto free_out; ++ } ++ ++ ret = ipu_fw_psys_get_pgm_by_process(&pm, pg_manifest, process); ++ if (ret < 0) { ++ dev_err(dev, "can not get manifest\n"); ++ goto free_out; ++ } ++ ++ if (pm.cell_id == res_defs->num_cells && ++ pm.cell_type_id == res_defs->num_cells_type) { ++ cell = res_defs->num_cells; ++ } else if ((pm.cell_id != res_defs->num_cells && ++ pm.cell_type_id == res_defs->num_cells_type)) { ++ cell = pm.cell_id; ++ } else { ++ /* Find a free cell of desired type */ ++ u32 type = pm.cell_type_id; ++ ++ for (cell = 0; cell < res_defs->num_cells; cell++) ++ if (res_defs->cells[cell] == type && ++ ((pool->cells | cells) & (1 << cell)) == 0) ++ break; ++ if (cell >= res_defs->num_cells) { ++ dev_dbg(dev, "no free cells of right type\n"); ++ ret = -ENOSPC; ++ goto free_out; ++ } ++ ret = ipu_fw_psys_set_process_cell_id(process, 0, cell); ++ if (ret) ++ goto free_out; ++ } ++ if (cell < res_defs->num_cells) ++ cells |= 1 << cell; ++ if (pool->cells & cells) { ++ dev_dbg(dev, "out of cell resources\n"); ++ ret = -ENOSPC; ++ goto free_out; ++ } ++ ++ if (pm.dev_chn_size) { ++ for (id = 0; id < res_defs->num_dev_channels; id++) { ++ ret = __alloc_one_resrc(dev, process, ++ &pool->dev_channels[id], ++ &pm, id, alloc); ++ if (ret == -ENXIO) ++ continue; ++ ++ if (ret) ++ goto free_out; ++ ++ idx = alloc->resources - 1; ++ p = alloc->resource_alloc[idx].pos; ++ ret = ipu_fw_psys_set_proc_dev_chn(process, id, ++ p); ++ if (ret) ++ goto free_out; ++ } ++ } ++ ++ if (pm.dfm_port_bitmap) { ++ for (id = 0; id < res_defs->num_dfm_ids; id++) { ++ ret = ipu_psys_allocate_one_dfm(dev, process, ++ &pool->dfms[id], ++ &pm, id, alloc); ++ if (ret == -ENXIO) ++ continue; ++ ++ if (ret) ++ goto free_out; ++ ++ idx = alloc->resources - 1; ++ p = alloc->resource_alloc[idx].pos; ++ bmp = pm.dfm_port_bitmap[id]; ++ bmp = bmp << p; ++ a_bmp = pm.dfm_active_port_bitmap[id]; ++ a_bmp = a_bmp << p; ++ ret = ipu_fw_psys_set_proc_dfm_bitmap(process, ++ id, bmp, ++ a_bmp); ++ if (ret) ++ goto free_out; ++ } ++ } ++ ++ if (pm.ext_mem_size) { ++ for (mem_type_id = 0; ++ mem_type_id < res_defs->num_ext_mem_types; ++ mem_type_id++) { ++ u32 bank = res_defs->num_ext_mem_ids; ++ ++ if (cell != res_defs->num_cells) { ++ idx = res_defs->cell_mem_row * cell + ++ mem_type_id; ++ bank = res_defs->cell_mem[idx]; ++ } ++ if (bank == res_defs->num_ext_mem_ids) ++ continue; ++ ++ ret = __alloc_mem_resrc(dev, process, ++ &pool->ext_memory[bank], ++ &pm, mem_type_id, ++ bank, alloc); ++ if (ret == -ENXIO) ++ continue; ++ ++ if (ret) ++ goto free_out; ++ ++ /* no return value check here because fw api ++ * will do some checks, and would return ++ * non-zero except mem_type_id == 0. ++ * This maybe caused by that above flow of ++ * allocating mem_bank_id is improper. ++ */ ++ idx = alloc->resources - 1; ++ p = alloc->resource_alloc[idx].pos; ++ ipu_fw_psys_set_process_ext_mem(process, ++ mem_type_id, ++ bank, p); ++ } ++ } ++ } ++ alloc->cells |= cells; ++ pool->cells |= cells; ++ return 0; ++ ++free_out: ++ dev_err(dev, "failed to allocate resources, ret %d\n", ret); ++ ipu_psys_reset_process_cell(dev, pg, pg_manifest, i + 1); ++ ipu_psys_free_resources(alloc, pool); ++ return ret; ++} ++ ++int ipu_psys_move_resources(const struct device *dev, ++ struct ipu_psys_resource_alloc *alloc, ++ struct ipu_psys_resource_pool ++ *source_pool, struct ipu_psys_resource_pool ++ *target_pool) ++{ ++ int i; ++ ++ if (target_pool->cells & alloc->cells) { ++ dev_dbg(dev, "out of cell resources\n"); ++ return -ENOSPC; ++ } ++ ++ for (i = 0; i < alloc->resources; i++) { ++ unsigned long bitmap = 0; ++ unsigned int id = alloc->resource_alloc[i].resource->id; ++ unsigned long fbit, end; ++ ++ switch (alloc->resource_alloc[i].type) { ++ case IPU_RESOURCE_DEV_CHN: ++ bitmap_set(&bitmap, alloc->resource_alloc[i].pos, ++ alloc->resource_alloc[i].elements); ++ if (*target_pool->dev_channels[id].bitmap & bitmap) ++ return -ENOSPC; ++ break; ++ case IPU_RESOURCE_EXT_MEM: ++ end = alloc->resource_alloc[i].elements + ++ alloc->resource_alloc[i].pos; ++ ++ fbit = find_next_bit(target_pool->ext_memory[id].bitmap, ++ end, alloc->resource_alloc[i].pos); ++ /* if find_next_bit returns "end" it didn't find 1bit */ ++ if (end != fbit) ++ return -ENOSPC; ++ break; ++ case IPU_RESOURCE_DFM: ++ bitmap = alloc->resource_alloc[i].elements; ++ if (*target_pool->dfms[id].bitmap & bitmap) ++ return -ENOSPC; ++ break; ++ default: ++ dev_err(dev, "Illegal resource type\n"); ++ return -EINVAL; ++ } ++ } ++ ++ for (i = 0; i < alloc->resources; i++) { ++ u32 id = alloc->resource_alloc[i].resource->id; ++ ++ switch (alloc->resource_alloc[i].type) { ++ case IPU_RESOURCE_DEV_CHN: ++ bitmap_set(target_pool->dev_channels[id].bitmap, ++ alloc->resource_alloc[i].pos, ++ alloc->resource_alloc[i].elements); ++ ipu_resource_free(&alloc->resource_alloc[i]); ++ alloc->resource_alloc[i].resource = ++ &target_pool->dev_channels[id]; ++ break; ++ case IPU_RESOURCE_EXT_MEM: ++ bitmap_set(target_pool->ext_memory[id].bitmap, ++ alloc->resource_alloc[i].pos, ++ alloc->resource_alloc[i].elements); ++ ipu_resource_free(&alloc->resource_alloc[i]); ++ alloc->resource_alloc[i].resource = ++ &target_pool->ext_memory[id]; ++ break; ++ case IPU_RESOURCE_DFM: ++ *target_pool->dfms[id].bitmap |= ++ alloc->resource_alloc[i].elements; ++ *alloc->resource_alloc[i].resource->bitmap &= ++ ~(alloc->resource_alloc[i].elements); ++ alloc->resource_alloc[i].resource = ++ &target_pool->dfms[id]; ++ break; ++ default: ++ /* ++ * Just keep compiler happy. This case failed already ++ * in above loop. ++ */ ++ break; ++ } ++ } ++ ++ target_pool->cells |= alloc->cells; ++ source_pool->cells &= ~alloc->cells; ++ ++ return 0; ++} ++ ++void ipu_psys_reset_process_cell(const struct device *dev, ++ struct ipu_fw_psys_process_group *pg, ++ void *pg_manifest, ++ int process_count) ++{ ++ int i; ++ u16 *process_offset_table; ++ const struct ipu_fw_resource_definitions *res_defs; ++ ++ if (!pg) ++ return; ++ ++ res_defs = get_res(); ++ process_offset_table = (u16 *)((u8 *)pg + pg->processes_offset); ++ for (i = 0; i < process_count; i++) { ++ struct ipu_fw_psys_process *process = ++ (struct ipu_fw_psys_process *) ++ ((char *)pg + process_offset_table[i]); ++ struct ipu_fw_generic_program_manifest pm; ++ int ret; ++ ++ if (!process) ++ break; ++ ++ ret = ipu_fw_psys_get_pgm_by_process(&pm, pg_manifest, process); ++ if (ret < 0) { ++ dev_err(dev, "can not get manifest\n"); ++ break; ++ } ++ if ((pm.cell_id != res_defs->num_cells && ++ pm.cell_type_id == res_defs->num_cells_type)) ++ continue; ++ /* no return value check here because if finding free cell ++ * failed, process cell would not set then calling clear_cell ++ * will return non-zero. ++ */ ++ ipu_fw_psys_clear_process_cell(process); ++ } ++} ++ ++/* Free resources marked in `alloc' from `resources' */ ++void ipu_psys_free_resources(struct ipu_psys_resource_alloc ++ *alloc, struct ipu_psys_resource_pool *pool) ++{ ++ unsigned int i; ++ ++ pool->cells &= ~alloc->cells; ++ alloc->cells = 0; ++ for (i = 0; i < alloc->resources; i++) ++ ipu_resource_free(&alloc->resource_alloc[i]); ++ alloc->resources = 0; ++} +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-fw-resources.c b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-fw-resources.c +new file mode 100644 +index 0000000..fe65a0a +--- /dev/null ++++ b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-fw-resources.c +@@ -0,0 +1,607 @@ ++// SPDX-License-Identifier: GPL-2.0 ++// Copyright (C) 2015 - 2024 Intel Corporation ++ ++#include ++#include ++ ++#include "ipu-psys.h" ++#include "ipu-fw-psys.h" ++#include "ipu6-platform-resources.h" ++ ++/* resources table */ ++ ++/* ++ * Cell types by cell IDs ++ */ ++static const u8 ipu6_fw_psys_cell_types[IPU6_FW_PSYS_N_CELL_ID] = { ++ IPU6_FW_PSYS_SP_CTRL_TYPE_ID, ++ IPU6_FW_PSYS_VP_TYPE_ID, ++ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_OSA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_OSA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_OSA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, /* X2B_MD */ ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, /* ICA_MEDIUM */ ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, /* X2B_SVE_RGBIR */ ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, /* PAF */ ++ IPU6_FW_PSYS_GDC_TYPE_ID, ++ IPU6_FW_PSYS_TNR_TYPE_ID, ++}; ++ ++static const u16 ipu6_fw_num_dev_channels[IPU6_FW_PSYS_N_DEV_CHN_ID] = { ++ IPU6_FW_PSYS_DEV_CHN_DMA_EXT0_MAX_SIZE, ++ IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_READ_MAX_SIZE, ++ IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_MAX_SIZE, ++ IPU6_FW_PSYS_DEV_CHN_DMA_INTERNAL_MAX_SIZE, ++ IPU6_FW_PSYS_DEV_CHN_DMA_ISA_MAX_SIZE, ++}; ++ ++static const u16 ipu6_fw_psys_mem_size[IPU6_FW_PSYS_N_MEM_ID] = { ++ IPU6_FW_PSYS_VMEM0_MAX_SIZE, ++ IPU6_FW_PSYS_TRANSFER_VMEM0_MAX_SIZE, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_MAX_SIZE, ++ IPU6_FW_PSYS_LB_VMEM_MAX_SIZE, ++ IPU6_FW_PSYS_BAMEM0_MAX_SIZE, ++ IPU6_FW_PSYS_DMEM0_MAX_SIZE, ++ IPU6_FW_PSYS_DMEM1_MAX_SIZE, ++ IPU6_FW_PSYS_DMEM2_MAX_SIZE, ++ IPU6_FW_PSYS_DMEM3_MAX_SIZE, ++ IPU6_FW_PSYS_PMEM0_MAX_SIZE ++}; ++ ++static const u16 ipu6_fw_psys_dfms[IPU6_FW_PSYS_N_DEV_DFM_ID] = { ++ IPU6_FW_PSYS_DEV_DFM_BB_FULL_PORT_ID_MAX_SIZE, ++ IPU6_FW_PSYS_DEV_DFM_BB_EMPTY_PORT_ID_MAX_SIZE, ++ IPU6_FW_PSYS_DEV_DFM_ISL_FULL_PORT_ID_MAX_SIZE, ++ IPU6_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID_MAX_SIZE, ++ IPU6_FW_PSYS_DEV_DFM_LB_FULL_PORT_ID_MAX_SIZE, ++ IPU6_FW_PSYS_DEV_DFM_LB_EMPTY_PORT_ID_MAX_SIZE, ++}; ++ ++static const u8 ++ipu6_fw_psys_c_mem[IPU6_FW_PSYS_N_CELL_ID][IPU6_FW_PSYS_N_MEM_TYPE_ID] = { ++ { ++ /* IPU6_FW_PSYS_SP0_ID */ ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_DMEM0_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_SP1_ID */ ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_DMEM1_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_VP0_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_DMEM3_ID, ++ IPU6_FW_PSYS_VMEM0_ID, ++ IPU6_FW_PSYS_BAMEM0_ID, ++ IPU6_FW_PSYS_PMEM0_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ACC1_ID BNLM */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ACC2_ID DM */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ACC3_ID ACM */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ACC4_ID GTC YUV1 */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ACC5_ID OFS pin main */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ACC6_ID OFS pin display */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ACC7_ID OFS pin pp */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ACC8_ID GAMMASTAR */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ACC9_ID GLTM */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ACC10_ID XNR */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_ICA_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_LSC_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_DPC_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_SIS_A_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_SIS_B_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_B2B_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_B2R_ID and ISA_R2I_SIE */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_R2I_DS_A_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_R2I_DS_B_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_AWB_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_AE_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_AF_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_DOL_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_X2B_MD_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_ICA_MEDIUM_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_X2B_SVE_RGBIR_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_PAF_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_BB_ACC_GDC0_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_BB_ACC_TNR_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ } ++}; ++ ++static const struct ipu_fw_resource_definitions ipu6_defs = { ++ .cells = ipu6_fw_psys_cell_types, ++ .num_cells = IPU6_FW_PSYS_N_CELL_ID, ++ .num_cells_type = IPU6_FW_PSYS_N_CELL_TYPE_ID, ++ ++ .dev_channels = ipu6_fw_num_dev_channels, ++ .num_dev_channels = IPU6_FW_PSYS_N_DEV_CHN_ID, ++ ++ .num_ext_mem_types = IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID, ++ .num_ext_mem_ids = IPU6_FW_PSYS_N_MEM_ID, ++ .ext_mem_ids = ipu6_fw_psys_mem_size, ++ ++ .num_dfm_ids = IPU6_FW_PSYS_N_DEV_DFM_ID, ++ ++ .dfms = ipu6_fw_psys_dfms, ++ ++ .cell_mem_row = IPU6_FW_PSYS_N_MEM_TYPE_ID, ++ .cell_mem = &ipu6_fw_psys_c_mem[0][0], ++}; ++ ++const struct ipu_fw_resource_definitions *ipu6_res_defs = &ipu6_defs; ++ ++/********** Generic resource handling **********/ ++ ++int ipu6_fw_psys_set_proc_dev_chn(struct ipu_fw_psys_process *ptr, u16 offset, ++ u16 value) ++{ ++ struct ipu6_fw_psys_process_ext *pm_ext; ++ u8 ps_ext_offset; ++ ++ ps_ext_offset = ptr->process_extension_offset; ++ if (!ps_ext_offset) ++ return -EINVAL; ++ ++ pm_ext = (struct ipu6_fw_psys_process_ext *)((u8 *)ptr + ps_ext_offset); ++ ++ pm_ext->dev_chn_offset[offset] = value; ++ ++ return 0; ++} ++ ++int ipu6_fw_psys_set_proc_dfm_bitmap(struct ipu_fw_psys_process *ptr, ++ u16 id, u32 bitmap, ++ u32 active_bitmap) ++{ ++ struct ipu6_fw_psys_process_ext *pm_ext; ++ u8 ps_ext_offset; ++ ++ ps_ext_offset = ptr->process_extension_offset; ++ if (!ps_ext_offset) ++ return -EINVAL; ++ ++ pm_ext = (struct ipu6_fw_psys_process_ext *)((u8 *)ptr + ps_ext_offset); ++ ++ pm_ext->dfm_port_bitmap[id] = bitmap; ++ pm_ext->dfm_active_port_bitmap[id] = active_bitmap; ++ ++ return 0; ++} ++ ++int ipu6_fw_psys_set_process_ext_mem(struct ipu_fw_psys_process *ptr, ++ u16 type_id, u16 mem_id, u16 offset) ++{ ++ struct ipu6_fw_psys_process_ext *pm_ext; ++ u8 ps_ext_offset; ++ ++ ps_ext_offset = ptr->process_extension_offset; ++ if (!ps_ext_offset) ++ return -EINVAL; ++ ++ pm_ext = (struct ipu6_fw_psys_process_ext *)((u8 *)ptr + ps_ext_offset); ++ ++ pm_ext->ext_mem_offset[type_id] = offset; ++ pm_ext->ext_mem_id[type_id] = mem_id; ++ ++ return 0; ++} ++ ++static struct ipu_fw_psys_program_manifest * ++get_program_manifest(const struct ipu_fw_psys_pgm *manifest, ++ const unsigned int program_index) ++{ ++ struct ipu_fw_psys_program_manifest *prg_manifest_base; ++ u8 *program_manifest = NULL; ++ u8 program_count; ++ unsigned int i; ++ ++ program_count = manifest->program_count; ++ ++ prg_manifest_base = (struct ipu_fw_psys_program_manifest *) ++ ((char *)manifest + manifest->program_manifest_offset); ++ if (program_index < program_count) { ++ program_manifest = (u8 *)prg_manifest_base; ++ for (i = 0; i < program_index; i++) ++ program_manifest += ++ ((struct ipu_fw_psys_program_manifest *) ++ program_manifest)->size; ++ } ++ ++ return (struct ipu_fw_psys_program_manifest *)program_manifest; ++} ++ ++int ++ipu6_fw_psys_get_pgm_by_process(struct ipu_fw_generic_program_manifest *gen_pm, ++ const struct ipu_fw_psys_pgm *pg_manifest, ++ struct ipu_fw_psys_process *process) ++{ ++ u32 program_id = process->program_idx; ++ struct ipu_fw_psys_program_manifest *pm; ++ struct ipu6_fw_psys_program_manifest_ext *pm_ext; ++ ++ pm = get_program_manifest(pg_manifest, program_id); ++ ++ if (!pm) ++ return -ENOENT; ++ ++ if (pm->program_extension_offset) { ++ pm_ext = (struct ipu6_fw_psys_program_manifest_ext *) ++ ((u8 *)pm + pm->program_extension_offset); ++ ++ gen_pm->dev_chn_size = pm_ext->dev_chn_size; ++ gen_pm->dev_chn_offset = pm_ext->dev_chn_offset; ++ gen_pm->ext_mem_size = pm_ext->ext_mem_size; ++ gen_pm->ext_mem_offset = (u16 *)pm_ext->ext_mem_offset; ++ gen_pm->is_dfm_relocatable = pm_ext->is_dfm_relocatable; ++ gen_pm->dfm_port_bitmap = pm_ext->dfm_port_bitmap; ++ gen_pm->dfm_active_port_bitmap = ++ pm_ext->dfm_active_port_bitmap; ++ } ++ ++ memcpy(gen_pm->cells, pm->cells, sizeof(pm->cells)); ++ gen_pm->cell_id = pm->cells[0]; ++ gen_pm->cell_type_id = pm->cell_type_id; ++ ++ return 0; ++} ++ ++#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) || \ ++ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) ++void ipu6_fw_psys_pg_dump(struct ipu_psys *psys, ++ struct ipu_psys_kcmd *kcmd, const char *note) ++{ ++ struct ipu_fw_psys_process_group *pg = kcmd->kpg->pg; ++ struct device *dev = &psys->adev->auxdev.dev; ++ u32 pgid = pg->ID; ++ u8 processes = pg->process_count; ++ u16 *process_offset_table = (u16 *)((char *)pg + pg->processes_offset); ++ unsigned int p, chn, mem, mem_id; ++ unsigned int mem_type, max_mem_id, dev_chn; ++ ++ if (ipu_ver == IPU6_VER_6SE) { ++ mem_type = IPU6SE_FW_PSYS_N_DATA_MEM_TYPE_ID; ++ max_mem_id = IPU6SE_FW_PSYS_N_MEM_ID; ++ dev_chn = IPU6SE_FW_PSYS_N_DEV_CHN_ID; ++ } else if (ipu_ver == IPU6_VER_6 || ipu_ver == IPU6_VER_6EP || ++ ipu_ver == IPU6_VER_6EP_MTL) { ++ mem_type = IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID; ++ max_mem_id = IPU6_FW_PSYS_N_MEM_ID; ++ dev_chn = IPU6_FW_PSYS_N_DEV_CHN_ID; ++ } else { ++ WARN(1, "%s ipu_ver:[%u] is unsupported!\n", __func__, ipu_ver); ++ return; ++ } ++ ++ dev_dbg(dev, "%s %s pgid %i has %i processes:\n", ++ __func__, note, pgid, processes); ++ ++ for (p = 0; p < processes; p++) { ++ struct ipu_fw_psys_process *process = ++ (struct ipu_fw_psys_process *) ++ ((char *)pg + process_offset_table[p]); ++ struct ipu6_fw_psys_process_ext *pm_ext = ++ (struct ipu6_fw_psys_process_ext *)((u8 *)process ++ + process->process_extension_offset); ++ dev_dbg(dev, "\t process %i size=%u", p, process->size); ++ if (!process->process_extension_offset) ++ continue; ++ ++ for (mem = 0; mem < mem_type; mem++) { ++ mem_id = pm_ext->ext_mem_id[mem]; ++ if (mem_id != max_mem_id) ++ dev_dbg(dev, "\t mem type %u id %d offset=0x%x", ++ mem, mem_id, ++ pm_ext->ext_mem_offset[mem]); ++ } ++ for (chn = 0; chn < dev_chn; chn++) { ++ if (pm_ext->dev_chn_offset[chn] != (u16)(-1)) ++ dev_dbg(dev, "\t dev_chn[%u]=0x%x\n", ++ chn, pm_ext->dev_chn_offset[chn]); ++ } ++ } ++} ++#else ++void ipu6_fw_psys_pg_dump(struct ipu_psys *psys, ++ struct ipu_psys_kcmd *kcmd, const char *note) ++{ ++ if (ipu_ver == IPU6_VER_6SE || ipu_ver == IPU6_VER_6 || ++ ipu_ver == IPU6_VER_6EP || ipu_ver == IPU6_VER_6EP_MTL) ++ return; ++ ++ WARN(1, "%s ipu_ver:[%u] is unsupported!\n", __func__, ipu_ver); ++} ++#endif +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-l-scheduler.c b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-l-scheduler.c +new file mode 100644 +index 0000000..5d95843 +--- /dev/null ++++ b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-l-scheduler.c +@@ -0,0 +1,621 @@ ++// SPDX-License-Identifier: GPL-2.0 ++// Copyright (C) 2020 - 2024 Intel Corporation ++ ++#include "ipu-psys.h" ++#include "ipu6-ppg.h" ++ ++extern bool enable_power_gating; ++ ++struct sched_list { ++ struct list_head list; ++ /* to protect the list */ ++ struct mutex lock; ++}; ++ ++static struct sched_list start_list = { ++ .list = LIST_HEAD_INIT(start_list.list), ++ .lock = __MUTEX_INITIALIZER(start_list.lock), ++}; ++ ++static struct sched_list stop_list = { ++ .list = LIST_HEAD_INIT(stop_list.list), ++ .lock = __MUTEX_INITIALIZER(stop_list.lock), ++}; ++ ++static struct sched_list *get_sc_list(enum SCHED_LIST type) ++{ ++ /* for debug purposes */ ++ WARN_ON(type != SCHED_START_LIST && type != SCHED_STOP_LIST); ++ ++ if (type == SCHED_START_LIST) ++ return &start_list; ++ return &stop_list; ++} ++ ++static bool is_kppg_in_list(struct ipu_psys_ppg *kppg, struct list_head *head) ++{ ++ struct ipu_psys_ppg *tmp; ++ ++ list_for_each_entry(tmp, head, sched_list) { ++ if (kppg == tmp) ++ return true; ++ } ++ ++ return false; ++} ++ ++void ipu_psys_scheduler_remove_kppg(struct ipu_psys_ppg *kppg, ++ enum SCHED_LIST type) ++{ ++ struct sched_list *sc_list = get_sc_list(type); ++ struct ipu_psys_ppg *tmp0, *tmp1; ++ struct ipu_psys *psys = kppg->fh->psys; ++ struct device *dev = &psys->adev->auxdev.dev; ++ ++ mutex_lock(&sc_list->lock); ++ list_for_each_entry_safe(tmp0, tmp1, &sc_list->list, sched_list) { ++ if (tmp0 == kppg) { ++ dev_dbg(dev, "remove from %s list, kppg(%d 0x%p) state %d\n", ++ type == SCHED_START_LIST ? "start" : "stop", ++ kppg->kpg->pg->ID, kppg, kppg->state); ++ list_del_init(&kppg->sched_list); ++ } ++ } ++ mutex_unlock(&sc_list->lock); ++} ++ ++void ipu_psys_scheduler_add_kppg(struct ipu_psys_ppg *kppg, ++ enum SCHED_LIST type) ++{ ++ int cur_pri = kppg->pri_base + kppg->pri_dynamic; ++ struct sched_list *sc_list = get_sc_list(type); ++ struct ipu_psys *psys = kppg->fh->psys; ++ struct device *dev = &psys->adev->auxdev.dev; ++ struct ipu_psys_ppg *tmp0, *tmp1; ++ ++ dev_dbg(dev, ++ "add to %s list, kppg(%d 0x%p) state %d prio(%d %d) fh 0x%p\n", ++ type == SCHED_START_LIST ? "start" : "stop", ++ kppg->kpg->pg->ID, kppg, kppg->state, ++ kppg->pri_base, kppg->pri_dynamic, kppg->fh); ++ ++ mutex_lock(&sc_list->lock); ++ if (list_empty(&sc_list->list)) { ++ list_add(&kppg->sched_list, &sc_list->list); ++ goto out; ++ } ++ ++ if (is_kppg_in_list(kppg, &sc_list->list)) { ++ dev_dbg(dev, "kppg already in list\n"); ++ goto out; ++ } ++ ++ list_for_each_entry_safe(tmp0, tmp1, &sc_list->list, sched_list) { ++ int tmp_pri = tmp0->pri_base + tmp0->pri_dynamic; ++ ++ dev_dbg(dev, ++ "found kppg(%d 0x%p), state %d pri(%d %d) fh 0x%p\n", ++ tmp0->kpg->pg->ID, tmp0, tmp0->state, ++ tmp0->pri_base, tmp0->pri_dynamic, tmp0->fh); ++ ++ if (type == SCHED_START_LIST && tmp_pri > cur_pri) { ++ list_add(&kppg->sched_list, tmp0->sched_list.prev); ++ goto out; ++ } else if (type == SCHED_STOP_LIST && tmp_pri < cur_pri) { ++ list_add(&kppg->sched_list, tmp0->sched_list.prev); ++ goto out; ++ } ++ } ++ ++ list_add_tail(&kppg->sched_list, &sc_list->list); ++out: ++ mutex_unlock(&sc_list->lock); ++} ++ ++static int ipu_psys_detect_resource_contention(struct ipu_psys_ppg *kppg) ++{ ++ struct ipu_psys_resource_pool *try_res_pool; ++ struct ipu_psys *psys = kppg->fh->psys; ++ struct device *dev = &psys->adev->auxdev.dev; ++ int ret = 0; ++ int state; ++ ++ try_res_pool = kzalloc(sizeof(*try_res_pool), GFP_KERNEL); ++ if (IS_ERR_OR_NULL(try_res_pool)) ++ return -ENOMEM; ++ ++ mutex_lock(&kppg->mutex); ++ state = kppg->state; ++ mutex_unlock(&kppg->mutex); ++ if (state == PPG_STATE_STARTED || state == PPG_STATE_RUNNING || ++ state == PPG_STATE_RESUMED) ++ goto exit; ++ ++ ret = ipu_psys_res_pool_init(try_res_pool); ++ if (ret < 0) { ++ dev_err(dev, "unable to alloc pg resources\n"); ++ WARN_ON(1); ++ goto exit; ++ } ++ ++ ipu_psys_resource_copy(&psys->res_pool_running, try_res_pool); ++ ret = ipu_psys_try_allocate_resources(dev, kppg->kpg->pg, ++ kppg->manifest, ++ try_res_pool); ++ ++ ipu_psys_res_pool_cleanup(try_res_pool); ++exit: ++ kfree(try_res_pool); ++ ++ return ret; ++} ++ ++static void ipu_psys_scheduler_ppg_sort(struct ipu_psys *psys, bool *stopping) ++{ ++ struct ipu_psys_ppg *kppg, *tmp; ++ struct ipu_psys_scheduler *sched; ++ struct ipu_psys_fh *fh; ++ ++ list_for_each_entry(fh, &psys->fhs, list) { ++ mutex_lock(&fh->mutex); ++ sched = &fh->sched; ++ ++ if (list_empty(&sched->ppgs)) { ++ mutex_unlock(&fh->mutex); ++ continue; ++ } ++ ++ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { ++ mutex_lock(&kppg->mutex); ++ if (kppg->state == PPG_STATE_START || ++ kppg->state == PPG_STATE_RESUME) { ++ ipu_psys_scheduler_add_kppg(kppg, ++ SCHED_START_LIST); ++ } else if (kppg->state == PPG_STATE_RUNNING) { ++ ipu_psys_scheduler_add_kppg(kppg, ++ SCHED_STOP_LIST); ++ } else if (kppg->state == PPG_STATE_SUSPENDING || ++ kppg->state == PPG_STATE_STOPPING) { ++ /* there are some suspending/stopping ppgs */ ++ *stopping = true; ++ } else if (kppg->state == PPG_STATE_RESUMING || ++ kppg->state == PPG_STATE_STARTING) { ++ /* how about kppg are resuming/starting? */ ++ } ++ mutex_unlock(&kppg->mutex); ++ } ++ mutex_unlock(&fh->mutex); ++ } ++} ++ ++static void ipu_psys_scheduler_update_start_ppg_priority(void) ++{ ++ struct sched_list *sc_list = get_sc_list(SCHED_START_LIST); ++ struct ipu_psys_ppg *kppg, *tmp; ++ ++ mutex_lock(&sc_list->lock); ++ if (!list_empty(&sc_list->list)) ++ list_for_each_entry_safe(kppg, tmp, &sc_list->list, sched_list) ++ kppg->pri_dynamic--; ++ mutex_unlock(&sc_list->lock); ++} ++ ++static bool ipu_psys_scheduler_switch_ppg(struct ipu_psys *psys) ++{ ++ struct sched_list *sc_list = get_sc_list(SCHED_STOP_LIST); ++ struct device *dev = &psys->adev->auxdev.dev; ++ struct ipu_psys_ppg *kppg; ++ bool resched = false; ++ ++ mutex_lock(&sc_list->lock); ++ if (list_empty(&sc_list->list)) { ++ /* some ppgs are RESUMING/STARTING */ ++ dev_dbg(dev, "no candidated stop ppg\n"); ++ mutex_unlock(&sc_list->lock); ++ return false; ++ } ++ kppg = list_first_entry(&sc_list->list, struct ipu_psys_ppg, ++ sched_list); ++ mutex_unlock(&sc_list->lock); ++ ++ mutex_lock(&kppg->mutex); ++ if (!(kppg->state & PPG_STATE_STOP)) { ++ dev_dbg(dev, "s_change:%s: %p %d -> %d\n", ++ __func__, kppg, kppg->state, PPG_STATE_SUSPEND); ++ kppg->state = PPG_STATE_SUSPEND; ++ resched = true; ++ } ++ mutex_unlock(&kppg->mutex); ++ ++ return resched; ++} ++ ++/* ++ * search all kppgs and sort them into start_list and stop_list, always start ++ * first kppg(high priority) in start_list; ++ * if there is resource contention, it would switch kppgs in stop_list ++ * to suspend state one by one ++ */ ++static bool ipu_psys_scheduler_ppg_start(struct ipu_psys *psys) ++{ ++ struct sched_list *sc_list = get_sc_list(SCHED_START_LIST); ++ struct device *dev = &psys->adev->auxdev.dev; ++ struct ipu_psys_ppg *kppg, *kppg0; ++ bool stopping_existed = false; ++ int ret; ++ ++ ipu_psys_scheduler_ppg_sort(psys, &stopping_existed); ++ ++ mutex_lock(&sc_list->lock); ++ if (list_empty(&sc_list->list)) { ++ dev_dbg(dev, "no ppg to start\n"); ++ mutex_unlock(&sc_list->lock); ++ return false; ++ } ++ ++ list_for_each_entry_safe(kppg, kppg0, ++ &sc_list->list, sched_list) { ++ mutex_unlock(&sc_list->lock); ++ ++ ret = ipu_psys_detect_resource_contention(kppg); ++ if (ret < 0) { ++ dev_dbg(dev, "ppg %d resource detect failed(%d)\n", ++ kppg->kpg->pg->ID, ret); ++ /* ++ * switch out other ppg in 2 cases: ++ * 1. resource contention ++ * 2. no suspending/stopping ppg ++ */ ++ if (ret == -ENOSPC) { ++ if (!stopping_existed && ++ ipu_psys_scheduler_switch_ppg(psys)) { ++ return true; ++ } ++ dev_dbg(dev, "ppg is suspending/stopping\n"); ++ } else { ++ dev_err(dev, "detect resource error %d\n", ret); ++ } ++ } else { ++ kppg->pri_dynamic = 0; ++ ++ mutex_lock(&kppg->mutex); ++ if (kppg->state == PPG_STATE_START) ++ ipu_psys_ppg_start(kppg); ++ else ++ ipu_psys_ppg_resume(kppg); ++ mutex_unlock(&kppg->mutex); ++ ++ ipu_psys_scheduler_remove_kppg(kppg, ++ SCHED_START_LIST); ++ ipu_psys_scheduler_update_start_ppg_priority(); ++ } ++ mutex_lock(&sc_list->lock); ++ } ++ mutex_unlock(&sc_list->lock); ++ ++ return false; ++} ++ ++static bool ipu_psys_scheduler_ppg_enqueue_bufset(struct ipu_psys *psys) ++{ ++ struct ipu_psys_scheduler *sched; ++ struct ipu_psys_ppg *kppg; ++ struct ipu_psys_fh *fh; ++ bool resched = false; ++ ++ list_for_each_entry(fh, &psys->fhs, list) { ++ mutex_lock(&fh->mutex); ++ sched = &fh->sched; ++ if (list_empty(&sched->ppgs)) { ++ mutex_unlock(&fh->mutex); ++ continue; ++ } ++ ++ list_for_each_entry(kppg, &sched->ppgs, list) { ++ if (ipu_psys_ppg_enqueue_bufsets(kppg)) ++ resched = true; ++ } ++ mutex_unlock(&fh->mutex); ++ } ++ ++ return resched; ++} ++ ++/* ++ * This function will check all kppgs within fhs, and if kppg state ++ * is STOP or SUSPEND, l-scheduler will call ppg function to stop ++ * or suspend it and update stop list ++ */ ++ ++static bool ipu_psys_scheduler_ppg_halt(struct ipu_psys *psys) ++{ ++ struct ipu_psys_scheduler *sched; ++ struct ipu_psys_ppg *kppg, *tmp; ++ struct ipu_psys_fh *fh; ++ bool stopping_exit = false; ++ ++ list_for_each_entry(fh, &psys->fhs, list) { ++ mutex_lock(&fh->mutex); ++ sched = &fh->sched; ++ if (list_empty(&sched->ppgs)) { ++ mutex_unlock(&fh->mutex); ++ continue; ++ } ++ ++ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { ++ mutex_lock(&kppg->mutex); ++ if (kppg->state & PPG_STATE_STOP) { ++ ipu_psys_ppg_stop(kppg); ++ ipu_psys_scheduler_remove_kppg(kppg, ++ SCHED_STOP_LIST); ++ } else if (kppg->state == PPG_STATE_SUSPEND && ++ list_empty(&kppg->kcmds_processing_list)) { ++ ipu_psys_ppg_suspend(kppg); ++ ipu_psys_scheduler_remove_kppg(kppg, ++ SCHED_STOP_LIST); ++ } else if (kppg->state == PPG_STATE_SUSPENDING || ++ kppg->state == PPG_STATE_STOPPING) { ++ stopping_exit = true; ++ } ++ mutex_unlock(&kppg->mutex); ++ } ++ mutex_unlock(&fh->mutex); ++ } ++ return stopping_exit; ++} ++ ++static void ipu_psys_update_ppg_state_by_kcmd(struct ipu_psys *psys, ++ struct ipu_psys_ppg *kppg, ++ struct ipu_psys_kcmd *kcmd) ++{ ++ struct device *dev = &psys->adev->auxdev.dev; ++ int old_ppg_state = kppg->state; ++ ++ /* ++ * Respond kcmd when ppg is in stable state: ++ * STARTED/RESUMED/RUNNING/SUSPENDED/STOPPED ++ */ ++ if (kppg->state == PPG_STATE_STARTED || ++ kppg->state == PPG_STATE_RESUMED || ++ kppg->state == PPG_STATE_RUNNING) { ++ if (kcmd->state == KCMD_STATE_PPG_START) ++ ipu_psys_kcmd_complete(kppg, kcmd, 0); ++ else if (kcmd->state == KCMD_STATE_PPG_STOP) ++ kppg->state = PPG_STATE_STOP; ++ } else if (kppg->state == PPG_STATE_SUSPENDED) { ++ if (kcmd->state == KCMD_STATE_PPG_START) ++ ipu_psys_kcmd_complete(kppg, kcmd, 0); ++ else if (kcmd->state == KCMD_STATE_PPG_STOP) ++ /* ++ * Record the previous state ++ * because here need resume at first ++ */ ++ kppg->state |= PPG_STATE_STOP; ++ else if (kcmd->state == KCMD_STATE_PPG_ENQUEUE) ++ kppg->state = PPG_STATE_RESUME; ++ } else if (kppg->state == PPG_STATE_STOPPED) { ++ if (kcmd->state == KCMD_STATE_PPG_START) { ++ kppg->state = PPG_STATE_START; ++ } else if (kcmd->state == KCMD_STATE_PPG_STOP) { ++ ipu_psys_kcmd_complete(kppg, kcmd, 0); ++ } else if (kcmd->state == KCMD_STATE_PPG_ENQUEUE) { ++ dev_err(dev, "ppg %p stopped!\n", kppg); ++ ipu_psys_kcmd_complete(kppg, kcmd, -EIO); ++ } ++ } ++ ++ if (old_ppg_state != kppg->state) ++ dev_dbg(dev, "s_change:%s: %p %d -> %d\n", ++ __func__, kppg, old_ppg_state, kppg->state); ++} ++ ++static void ipu_psys_scheduler_kcmd_set(struct ipu_psys *psys) ++{ ++ struct ipu_psys_kcmd *kcmd; ++ struct ipu_psys_scheduler *sched; ++ struct ipu_psys_ppg *kppg, *tmp; ++ struct ipu_psys_fh *fh; ++ ++ list_for_each_entry(fh, &psys->fhs, list) { ++ mutex_lock(&fh->mutex); ++ sched = &fh->sched; ++ if (list_empty(&sched->ppgs)) { ++ mutex_unlock(&fh->mutex); ++ continue; ++ } ++ ++ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { ++ mutex_lock(&kppg->mutex); ++ if (list_empty(&kppg->kcmds_new_list)) { ++ mutex_unlock(&kppg->mutex); ++ continue; ++ }; ++ ++ kcmd = list_first_entry(&kppg->kcmds_new_list, ++ struct ipu_psys_kcmd, list); ++ ipu_psys_update_ppg_state_by_kcmd(psys, kppg, kcmd); ++ mutex_unlock(&kppg->mutex); ++ } ++ mutex_unlock(&fh->mutex); ++ } ++} ++ ++static bool is_ready_to_enter_power_gating(struct ipu_psys *psys) ++{ ++ struct ipu_psys_scheduler *sched; ++ struct ipu_psys_ppg *kppg, *tmp; ++ struct ipu_psys_fh *fh; ++ ++ list_for_each_entry(fh, &psys->fhs, list) { ++ mutex_lock(&fh->mutex); ++ sched = &fh->sched; ++ if (list_empty(&sched->ppgs)) { ++ mutex_unlock(&fh->mutex); ++ continue; ++ } ++ ++ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { ++ mutex_lock(&kppg->mutex); ++ if (!list_empty(&kppg->kcmds_new_list) || ++ !list_empty(&kppg->kcmds_processing_list)) { ++ mutex_unlock(&kppg->mutex); ++ mutex_unlock(&fh->mutex); ++ return false; ++ } ++ if (!(kppg->state == PPG_STATE_RUNNING || ++ kppg->state == PPG_STATE_STOPPED || ++ kppg->state == PPG_STATE_SUSPENDED)) { ++ mutex_unlock(&kppg->mutex); ++ mutex_unlock(&fh->mutex); ++ return false; ++ } ++ mutex_unlock(&kppg->mutex); ++ } ++ mutex_unlock(&fh->mutex); ++ } ++ ++ return true; ++} ++ ++static bool has_pending_kcmd(struct ipu_psys *psys) ++{ ++ struct ipu_psys_scheduler *sched; ++ struct ipu_psys_ppg *kppg, *tmp; ++ struct ipu_psys_fh *fh; ++ ++ list_for_each_entry(fh, &psys->fhs, list) { ++ mutex_lock(&fh->mutex); ++ sched = &fh->sched; ++ if (list_empty(&sched->ppgs)) { ++ mutex_unlock(&fh->mutex); ++ continue; ++ } ++ ++ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { ++ mutex_lock(&kppg->mutex); ++ if (!list_empty(&kppg->kcmds_new_list) || ++ !list_empty(&kppg->kcmds_processing_list)) { ++ mutex_unlock(&kppg->mutex); ++ mutex_unlock(&fh->mutex); ++ return true; ++ } ++ mutex_unlock(&kppg->mutex); ++ } ++ mutex_unlock(&fh->mutex); ++ } ++ ++ return false; ++} ++ ++static bool ipu_psys_scheduler_exit_power_gating(struct ipu_psys *psys) ++{ ++ struct device *dev = &psys->adev->auxdev.dev; ++ ++ /* Assume power gating process can be aborted directly during START */ ++ if (psys->power_gating == PSYS_POWER_GATED) { ++ dev_dbg(dev, "powergating: exit ---\n"); ++ ipu_psys_exit_power_gating(psys); ++ } ++ psys->power_gating = PSYS_POWER_NORMAL; ++ return false; ++} ++ ++static bool ipu_psys_scheduler_enter_power_gating(struct ipu_psys *psys) ++{ ++ struct device *dev = &psys->adev->auxdev.dev; ++ struct ipu_psys_scheduler *sched; ++ struct ipu_psys_ppg *kppg, *tmp; ++ struct ipu_psys_fh *fh; ++ ++ if (!enable_power_gating) ++ return false; ++ ++ if (psys->power_gating == PSYS_POWER_NORMAL && ++ is_ready_to_enter_power_gating(psys)) { ++ /* Enter power gating */ ++ dev_dbg(dev, "powergating: enter +++\n"); ++ psys->power_gating = PSYS_POWER_GATING; ++ } ++ ++ if (psys->power_gating != PSYS_POWER_GATING) ++ return false; ++ ++ /* Suspend ppgs one by one */ ++ list_for_each_entry(fh, &psys->fhs, list) { ++ mutex_lock(&fh->mutex); ++ sched = &fh->sched; ++ if (list_empty(&sched->ppgs)) { ++ mutex_unlock(&fh->mutex); ++ continue; ++ } ++ ++ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { ++ mutex_lock(&kppg->mutex); ++ if (kppg->state == PPG_STATE_RUNNING) { ++ kppg->state = PPG_STATE_SUSPEND; ++ mutex_unlock(&kppg->mutex); ++ mutex_unlock(&fh->mutex); ++ return true; ++ } ++ ++ if (kppg->state != PPG_STATE_SUSPENDED && ++ kppg->state != PPG_STATE_STOPPED) { ++ /* Can't enter power gating */ ++ mutex_unlock(&kppg->mutex); ++ mutex_unlock(&fh->mutex); ++ /* Need re-run l-scheduler to suspend ppg? */ ++ return (kppg->state & PPG_STATE_STOP || ++ kppg->state == PPG_STATE_SUSPEND); ++ } ++ mutex_unlock(&kppg->mutex); ++ } ++ mutex_unlock(&fh->mutex); ++ } ++ ++ psys->power_gating = PSYS_POWER_GATED; ++ ipu_psys_enter_power_gating(psys); ++ ++ return false; ++} ++ ++void ipu_psys_run_next(struct ipu_psys *psys) ++{ ++ struct device *dev = &psys->adev->auxdev.dev; ++ /* Wake up scheduler due to unfinished work */ ++ bool need_trigger = false; ++ /* Wait FW callback if there are stopping/suspending/running ppg */ ++ bool wait_fw_finish = false; ++ /* ++ * Code below will crash if fhs is empty. Normally this ++ * shouldn't happen. ++ */ ++ if (list_empty(&psys->fhs)) { ++ WARN_ON(1); ++ return; ++ } ++ ++ /* Abort power gating process */ ++ if (psys->power_gating != PSYS_POWER_NORMAL && ++ has_pending_kcmd(psys)) ++ need_trigger = ipu_psys_scheduler_exit_power_gating(psys); ++ ++ /* Handle kcmd and related ppg switch */ ++ if (psys->power_gating == PSYS_POWER_NORMAL) { ++ ipu_psys_scheduler_kcmd_set(psys); ++ wait_fw_finish = ipu_psys_scheduler_ppg_halt(psys); ++ need_trigger |= ipu_psys_scheduler_ppg_start(psys); ++ need_trigger |= ipu_psys_scheduler_ppg_enqueue_bufset(psys); ++ } ++ if (!(need_trigger || wait_fw_finish)) { ++ /* Nothing to do, enter power gating */ ++ need_trigger = ipu_psys_scheduler_enter_power_gating(psys); ++ if (psys->power_gating == PSYS_POWER_GATING) ++ wait_fw_finish = ipu_psys_scheduler_ppg_halt(psys); ++ } ++ ++ if (need_trigger && !wait_fw_finish) { ++ dev_dbg(dev, "scheduler: wake up\n"); ++ atomic_set(&psys->wakeup_count, 1); ++ wake_up_interruptible(&psys->sched_cmd_wq); ++ } ++} +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-platform-resources.h b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-platform-resources.h +new file mode 100644 +index 0000000..1b67956 +--- /dev/null ++++ b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-platform-resources.h +@@ -0,0 +1,194 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* Copyright (C) 2018 - 2024 Intel Corporation */ ++ ++#ifndef IPU6_PLATFORM_RESOURCES_H ++#define IPU6_PLATFORM_RESOURCES_H ++ ++#include "ipu-platform-resources.h" ++ ++#define IPU6_FW_PSYS_N_PADDING_UINT8_IN_PROCESS_EXT_STRUCT 0 ++ ++enum { ++ IPU6_FW_PSYS_CMD_QUEUE_COMMAND_ID = 0, ++ IPU6_FW_PSYS_CMD_QUEUE_DEVICE_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG0_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG1_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG2_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG3_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG4_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG5_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG6_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG7_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG8_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG9_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG10_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG11_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG12_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG13_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG14_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG15_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG16_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG17_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG18_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG19_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG20_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG21_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG22_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG23_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG24_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG25_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG26_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG27_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG28_COMMAND_ID, ++ IPU6_FW_PSYS_CMD_QUEUE_PPG29_COMMAND_ID, ++ IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID ++}; ++ ++enum { ++ IPU6_FW_PSYS_TRANSFER_VMEM0_TYPE_ID = 0, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_TYPE_ID, ++ IPU6_FW_PSYS_LB_VMEM_TYPE_ID, ++ IPU6_FW_PSYS_DMEM_TYPE_ID, ++ IPU6_FW_PSYS_VMEM_TYPE_ID, ++ IPU6_FW_PSYS_BAMEM_TYPE_ID, ++ IPU6_FW_PSYS_PMEM_TYPE_ID, ++ IPU6_FW_PSYS_N_MEM_TYPE_ID ++}; ++ ++enum ipu6_mem_id { ++ IPU6_FW_PSYS_VMEM0_ID = 0, /* ISP0 VMEM */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, /* TRANSFER VMEM 0 */ ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, /* TRANSFER VMEM 1 */ ++ IPU6_FW_PSYS_LB_VMEM_ID, /* LB VMEM */ ++ IPU6_FW_PSYS_BAMEM0_ID, /* ISP0 BAMEM */ ++ IPU6_FW_PSYS_DMEM0_ID, /* SPC0 Dmem */ ++ IPU6_FW_PSYS_DMEM1_ID, /* SPP0 Dmem */ ++ IPU6_FW_PSYS_DMEM2_ID, /* SPP1 Dmem */ ++ IPU6_FW_PSYS_DMEM3_ID, /* ISP0 Dmem */ ++ IPU6_FW_PSYS_PMEM0_ID, /* ISP0 PMEM */ ++ IPU6_FW_PSYS_N_MEM_ID ++}; ++ ++enum { ++ IPU6_FW_PSYS_DEV_CHN_DMA_EXT0_ID = 0, ++ IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_READ_ID, ++ IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_ID, ++ IPU6_FW_PSYS_DEV_CHN_DMA_INTERNAL_ID, ++ IPU6_FW_PSYS_DEV_CHN_DMA_ISA_ID, ++ IPU6_FW_PSYS_N_DEV_CHN_ID ++}; ++ ++enum { ++ IPU6_FW_PSYS_SP_CTRL_TYPE_ID = 0, ++ IPU6_FW_PSYS_SP_SERVER_TYPE_ID, ++ IPU6_FW_PSYS_VP_TYPE_ID, ++ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_OSA_TYPE_ID, ++ IPU6_FW_PSYS_GDC_TYPE_ID, ++ IPU6_FW_PSYS_TNR_TYPE_ID, ++ IPU6_FW_PSYS_N_CELL_TYPE_ID ++}; ++ ++enum { ++ IPU6_FW_PSYS_SP0_ID = 0, ++ IPU6_FW_PSYS_VP0_ID, ++ IPU6_FW_PSYS_PSA_ACC_BNLM_ID, ++ IPU6_FW_PSYS_PSA_ACC_DM_ID, ++ IPU6_FW_PSYS_PSA_ACC_ACM_ID, ++ IPU6_FW_PSYS_PSA_ACC_GTC_YUV1_ID, ++ IPU6_FW_PSYS_BB_ACC_OFS_PIN_MAIN_ID, ++ IPU6_FW_PSYS_BB_ACC_OFS_PIN_DISPLAY_ID, ++ IPU6_FW_PSYS_BB_ACC_OFS_PIN_PP_ID, ++ IPU6_FW_PSYS_PSA_ACC_GAMMASTAR_ID, ++ IPU6_FW_PSYS_PSA_ACC_GLTM_ID, ++ IPU6_FW_PSYS_PSA_ACC_XNR_ID, ++ IPU6_FW_PSYS_PSA_VCSC_ID, /* VCSC */ ++ IPU6_FW_PSYS_ISA_ICA_ID, ++ IPU6_FW_PSYS_ISA_LSC_ID, ++ IPU6_FW_PSYS_ISA_DPC_ID, ++ IPU6_FW_PSYS_ISA_SIS_A_ID, ++ IPU6_FW_PSYS_ISA_SIS_B_ID, ++ IPU6_FW_PSYS_ISA_B2B_ID, ++ IPU6_FW_PSYS_ISA_B2R_R2I_SIE_ID, ++ IPU6_FW_PSYS_ISA_R2I_DS_A_ID, ++ IPU6_FW_PSYS_ISA_R2I_DS_B_ID, ++ IPU6_FW_PSYS_ISA_AWB_ID, ++ IPU6_FW_PSYS_ISA_AE_ID, ++ IPU6_FW_PSYS_ISA_AF_ID, ++ IPU6_FW_PSYS_ISA_DOL_ID, ++ IPU6_FW_PSYS_ISA_ICA_MEDIUM_ID, ++ IPU6_FW_PSYS_ISA_X2B_MD_ID, ++ IPU6_FW_PSYS_ISA_X2B_SVE_RGBIR_ID, ++ IPU6_FW_PSYS_ISA_PAF_ID, ++ IPU6_FW_PSYS_BB_ACC_GDC0_ID, ++ IPU6_FW_PSYS_BB_ACC_TNR_ID, ++ IPU6_FW_PSYS_N_CELL_ID ++}; ++ ++enum { ++ IPU6_FW_PSYS_DEV_DFM_BB_FULL_PORT_ID = 0, ++ IPU6_FW_PSYS_DEV_DFM_BB_EMPTY_PORT_ID, ++ IPU6_FW_PSYS_DEV_DFM_ISL_FULL_PORT_ID, ++ IPU6_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID, ++ IPU6_FW_PSYS_DEV_DFM_LB_FULL_PORT_ID, ++ IPU6_FW_PSYS_DEV_DFM_LB_EMPTY_PORT_ID, ++}; ++ ++/* Excluding PMEM */ ++#define IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID (IPU6_FW_PSYS_N_MEM_TYPE_ID - 1) ++#define IPU6_FW_PSYS_N_DEV_DFM_ID \ ++ (IPU6_FW_PSYS_DEV_DFM_LB_EMPTY_PORT_ID + 1) ++ ++#define IPU6_FW_PSYS_VMEM0_MAX_SIZE 0x0800 ++/* Transfer VMEM0 words, ref HAS Transfer*/ ++#define IPU6_FW_PSYS_TRANSFER_VMEM0_MAX_SIZE 0x0800 ++/* Transfer VMEM1 words, ref HAS Transfer*/ ++#define IPU6_FW_PSYS_TRANSFER_VMEM1_MAX_SIZE 0x0800 ++#define IPU6_FW_PSYS_LB_VMEM_MAX_SIZE 0x0400 /* LB VMEM words */ ++#define IPU6_FW_PSYS_BAMEM0_MAX_SIZE 0x0800 ++#define IPU6_FW_PSYS_DMEM0_MAX_SIZE 0x4000 ++#define IPU6_FW_PSYS_DMEM1_MAX_SIZE 0x1000 ++#define IPU6_FW_PSYS_DMEM2_MAX_SIZE 0x1000 ++#define IPU6_FW_PSYS_DMEM3_MAX_SIZE 0x1000 ++#define IPU6_FW_PSYS_PMEM0_MAX_SIZE 0x0500 ++ ++#define IPU6_FW_PSYS_DEV_CHN_DMA_EXT0_MAX_SIZE 30 ++#define IPU6_FW_PSYS_DEV_CHN_GDC_MAX_SIZE 0 ++#define IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_READ_MAX_SIZE 30 ++#define IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_MAX_SIZE 43 ++#define IPU6_FW_PSYS_DEV_CHN_DMA_INTERNAL_MAX_SIZE 8 ++#define IPU6_FW_PSYS_DEV_CHN_DMA_IPFD_MAX_SIZE 0 ++#define IPU6_FW_PSYS_DEV_CHN_DMA_ISA_MAX_SIZE 2 ++ ++#define IPU6_FW_PSYS_DEV_DFM_BB_FULL_PORT_ID_MAX_SIZE 32 ++#define IPU6_FW_PSYS_DEV_DFM_ISL_FULL_PORT_ID_MAX_SIZE 32 ++#define IPU6_FW_PSYS_DEV_DFM_LB_FULL_PORT_ID_MAX_SIZE 32 ++#define IPU6_FW_PSYS_DEV_DFM_BB_EMPTY_PORT_ID_MAX_SIZE 32 ++#define IPU6_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID_MAX_SIZE 32 ++#define IPU6_FW_PSYS_DEV_DFM_LB_EMPTY_PORT_ID_MAX_SIZE 32 ++ ++struct ipu6_fw_psys_program_manifest_ext { ++ u32 dfm_port_bitmap[IPU6_FW_PSYS_N_DEV_DFM_ID]; ++ u32 dfm_active_port_bitmap[IPU6_FW_PSYS_N_DEV_DFM_ID]; ++ u16 ext_mem_size[IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID]; ++ u16 ext_mem_offset[IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID]; ++ u16 dev_chn_size[IPU6_FW_PSYS_N_DEV_CHN_ID]; ++ u16 dev_chn_offset[IPU6_FW_PSYS_N_DEV_CHN_ID]; ++ u8 is_dfm_relocatable[IPU6_FW_PSYS_N_DEV_DFM_ID]; ++ u8 dec_resources_input[IPU_FW_PSYS_MAX_INPUT_DEC_RESOURCES]; ++ u8 dec_resources_input_terminal[IPU_FW_PSYS_MAX_INPUT_DEC_RESOURCES]; ++ u8 dec_resources_output[IPU_FW_PSYS_MAX_OUTPUT_DEC_RESOURCES]; ++ u8 dec_resources_output_terminal[IPU_FW_PSYS_MAX_OUTPUT_DEC_RESOURCES]; ++ u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_PROGRAM_MANIFEST_EXT]; ++}; ++ ++struct ipu6_fw_psys_process_ext { ++ u32 dfm_port_bitmap[IPU6_FW_PSYS_N_DEV_DFM_ID]; ++ u32 dfm_active_port_bitmap[IPU6_FW_PSYS_N_DEV_DFM_ID]; ++ u16 ext_mem_offset[IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID]; ++ u16 dev_chn_offset[IPU6_FW_PSYS_N_DEV_CHN_ID]; ++ u8 ext_mem_id[IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID]; ++}; ++ ++#endif /* IPU6_PLATFORM_RESOURCES_H */ +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-ppg.c b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-ppg.c +new file mode 100644 +index 0000000..b6a90f8 +--- /dev/null ++++ b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-ppg.c +@@ -0,0 +1,556 @@ ++// SPDX-License-Identifier: GPL-2.0 ++// Copyright (C) 2020 - 2024 Intel Corporation ++ ++#include ++#include ++#include ++#include ++ ++#include "ipu6-dma.h" ++#include "ipu6-ppg.h" ++ ++static bool enable_suspend_resume; ++module_param(enable_suspend_resume, bool, 0664); ++MODULE_PARM_DESC(enable_suspend_resume, "enable fw ppg suspend/resume api"); ++ ++static struct ipu_psys_kcmd * ++ipu_psys_ppg_get_kcmd(struct ipu_psys_ppg *kppg, enum ipu_psys_cmd_state state) ++{ ++ struct ipu_psys_kcmd *kcmd; ++ ++ if (list_empty(&kppg->kcmds_new_list)) ++ return NULL; ++ ++ list_for_each_entry(kcmd, &kppg->kcmds_new_list, list) { ++ if (kcmd->state == state) ++ return kcmd; ++ } ++ ++ return NULL; ++} ++ ++struct ipu_psys_kcmd *ipu_psys_ppg_get_stop_kcmd(struct ipu_psys_ppg *kppg) ++{ ++ struct ipu_psys_kcmd *kcmd; ++ ++ WARN(!mutex_is_locked(&kppg->mutex), "ppg locking error"); ++ ++ if (list_empty(&kppg->kcmds_processing_list)) ++ return NULL; ++ ++ list_for_each_entry(kcmd, &kppg->kcmds_processing_list, list) { ++ if (kcmd->state == KCMD_STATE_PPG_STOP) ++ return kcmd; ++ } ++ ++ return NULL; ++} ++ ++static struct ipu_psys_buffer_set * ++__get_buf_set(struct ipu_psys_fh *fh, size_t buf_set_size) ++{ ++ struct ipu6_bus_device *adev = fh->psys->adev; ++ struct ipu_psys_buffer_set *kbuf_set; ++ struct ipu_psys_scheduler *sched = &fh->sched; ++ ++ mutex_lock(&sched->bs_mutex); ++ list_for_each_entry(kbuf_set, &sched->buf_sets, list) { ++ if (!kbuf_set->buf_set_size && ++ kbuf_set->size >= buf_set_size) { ++ kbuf_set->buf_set_size = buf_set_size; ++ mutex_unlock(&sched->bs_mutex); ++ return kbuf_set; ++ } ++ } ++ ++ mutex_unlock(&sched->bs_mutex); ++ /* no suitable buffer available, allocate new one */ ++ kbuf_set = kzalloc(sizeof(*kbuf_set), GFP_KERNEL); ++ if (!kbuf_set) ++ return NULL; ++ ++ kbuf_set->kaddr = ipu6_dma_alloc(adev, buf_set_size, ++ &kbuf_set->dma_addr, GFP_KERNEL, 0); ++ if (!kbuf_set->kaddr) { ++ kfree(kbuf_set); ++ return NULL; ++ } ++ ++ kbuf_set->buf_set_size = buf_set_size; ++ kbuf_set->size = buf_set_size; ++ mutex_lock(&sched->bs_mutex); ++ list_add(&kbuf_set->list, &sched->buf_sets); ++ mutex_unlock(&sched->bs_mutex); ++ ++ return kbuf_set; ++} ++ ++static struct ipu_psys_buffer_set * ++ipu_psys_create_buffer_set(struct ipu_psys_kcmd *kcmd, ++ struct ipu_psys_ppg *kppg) ++{ ++ struct ipu_psys_fh *fh = kcmd->fh; ++ struct ipu_psys *psys = fh->psys; ++ struct device *dev = &psys->adev->auxdev.dev; ++ struct ipu_psys_buffer_set *kbuf_set; ++ size_t buf_set_size; ++ u32 *keb; ++ ++ buf_set_size = ipu_fw_psys_ppg_get_buffer_set_size(kcmd); ++ ++ kbuf_set = __get_buf_set(fh, buf_set_size); ++ if (!kbuf_set) { ++ dev_err(dev, "failed to create buffer set\n"); ++ return NULL; ++ } ++ ++ kbuf_set->buf_set = ipu_fw_psys_ppg_create_buffer_set(kcmd, ++ kbuf_set->kaddr, ++ 0); ++ ++ ipu_fw_psys_ppg_buffer_set_vaddress(kbuf_set->buf_set, ++ kbuf_set->dma_addr); ++ keb = kcmd->kernel_enable_bitmap; ++ ipu_fw_psys_ppg_buffer_set_set_keb(kbuf_set->buf_set, keb); ++ ipu6_dma_sync_single(psys->adev, kbuf_set->dma_addr, buf_set_size); ++ ++ return kbuf_set; ++} ++ ++int ipu_psys_ppg_get_bufset(struct ipu_psys_kcmd *kcmd, ++ struct ipu_psys_ppg *kppg) ++{ ++ struct ipu_psys *psys = kppg->fh->psys; ++ struct device *dev = &psys->adev->auxdev.dev; ++ struct ipu_psys_buffer_set *kbuf_set; ++ unsigned int i; ++ int ret; ++ ++ kbuf_set = ipu_psys_create_buffer_set(kcmd, kppg); ++ if (!kbuf_set) { ++ ret = -EINVAL; ++ goto error; ++ } ++ kcmd->kbuf_set = kbuf_set; ++ kbuf_set->kcmd = kcmd; ++ ++ for (i = 0; i < kcmd->nbuffers; i++) { ++ struct ipu_fw_psys_terminal *terminal; ++ u32 buffer; ++ ++ terminal = ipu_fw_psys_pg_get_terminal(kcmd, i); ++ if (!terminal) ++ continue; ++ ++ buffer = (u32)kcmd->kbufs[i]->dma_addr + ++ kcmd->buffers[i].data_offset; ++ ++ ret = ipu_fw_psys_ppg_set_buffer_set(kcmd, terminal, i, buffer); ++ if (ret) { ++ dev_err(dev, "Unable to set bufset\n"); ++ goto error; ++ } ++ } ++ ++ return 0; ++ ++error: ++ dev_err(dev, "failed to get buffer set\n"); ++ return ret; ++} ++ ++void ipu_psys_ppg_complete(struct ipu_psys *psys, struct ipu_psys_ppg *kppg) ++{ ++ struct device *dev; ++ u8 queue_id; ++ int old_ppg_state; ++ ++ if (!psys || !kppg) ++ return; ++ ++ dev = &psys->adev->auxdev.dev; ++ ++ mutex_lock(&kppg->mutex); ++ old_ppg_state = kppg->state; ++ if (kppg->state == PPG_STATE_STOPPING) { ++ struct ipu_psys_kcmd tmp_kcmd = { ++ .kpg = kppg->kpg, ++ }; ++ ++ kppg->state = PPG_STATE_STOPPED; ++ ipu_psys_free_resources(&kppg->kpg->resource_alloc, ++ &psys->res_pool_running); ++ queue_id = ipu_fw_psys_ppg_get_base_queue_id(&tmp_kcmd); ++ ipu_psys_free_cmd_queue_res(&psys->res_pool_running, queue_id); ++ pm_runtime_put(dev); ++ } else { ++ if (kppg->state == PPG_STATE_SUSPENDING) { ++ kppg->state = PPG_STATE_SUSPENDED; ++ ipu_psys_free_resources(&kppg->kpg->resource_alloc, ++ &psys->res_pool_running); ++ } else if (kppg->state == PPG_STATE_STARTED || ++ kppg->state == PPG_STATE_RESUMED) { ++ kppg->state = PPG_STATE_RUNNING; ++ } ++ ++ /* Kick l-scheduler thread for FW callback, ++ * also for checking if need to enter power gating ++ */ ++ atomic_set(&psys->wakeup_count, 1); ++ wake_up_interruptible(&psys->sched_cmd_wq); ++ } ++ if (old_ppg_state != kppg->state) ++ dev_dbg(dev, "s_change:%s: %p %d -> %d\n", __func__, kppg, ++ old_ppg_state, kppg->state); ++ ++ mutex_unlock(&kppg->mutex); ++} ++ ++int ipu_psys_ppg_start(struct ipu_psys_ppg *kppg) ++{ ++ struct ipu_psys *psys = kppg->fh->psys; ++ struct device *dev = &psys->adev->auxdev.dev; ++ struct ipu_psys_kcmd *kcmd = ipu_psys_ppg_get_kcmd(kppg, ++ KCMD_STATE_PPG_START); ++ unsigned int i; ++ int ret; ++ ++ if (!kcmd) { ++ dev_err(dev, "failed to find start kcmd!\n"); ++ return -EINVAL; ++ } ++ ++ dev_dbg(dev, "start ppg id %d, addr 0x%p\n", ++ ipu_fw_psys_pg_get_id(kcmd), kppg); ++ ++ kppg->state = PPG_STATE_STARTING; ++ for (i = 0; i < kcmd->nbuffers; i++) { ++ struct ipu_fw_psys_terminal *terminal; ++ ++ terminal = ipu_fw_psys_pg_get_terminal(kcmd, i); ++ if (!terminal) ++ continue; ++ ++ ret = ipu_fw_psys_terminal_set(terminal, i, kcmd, 0, ++ kcmd->buffers[i].len); ++ if (ret) { ++ dev_err(dev, "Unable to set terminal\n"); ++ return ret; ++ } ++ } ++ ++ ret = ipu_fw_psys_pg_submit(kcmd); ++ if (ret) { ++ dev_err(dev, "failed to submit kcmd!\n"); ++ return ret; ++ } ++ ++ ret = ipu_psys_allocate_resources(dev, kcmd->kpg->pg, kcmd->pg_manifest, ++ &kcmd->kpg->resource_alloc, ++ &psys->res_pool_running); ++ if (ret) { ++ dev_err(dev, "alloc resources failed!\n"); ++ return ret; ++ } ++ ++ ret = pm_runtime_get_sync(dev); ++ if (ret < 0) { ++ dev_err(dev, "failed to power on psys\n"); ++ goto error; ++ } ++ ++ ret = ipu_psys_kcmd_start(psys, kcmd); ++ if (ret) { ++ ipu_psys_kcmd_complete(kppg, kcmd, -EIO); ++ goto error; ++ } ++ ++ dev_dbg(dev, "s_change:%s: %p %d -> %d\n", ++ __func__, kppg, kppg->state, PPG_STATE_STARTED); ++ kppg->state = PPG_STATE_STARTED; ++ ipu_psys_kcmd_complete(kppg, kcmd, 0); ++ ++ return 0; ++ ++error: ++ pm_runtime_put_noidle(dev); ++ ipu_psys_reset_process_cell(dev, kcmd->kpg->pg, kcmd->pg_manifest, ++ kcmd->kpg->pg->process_count); ++ ipu_psys_free_resources(&kppg->kpg->resource_alloc, ++ &psys->res_pool_running); ++ ++ dev_err(dev, "failed to start ppg\n"); ++ return ret; ++} ++ ++int ipu_psys_ppg_resume(struct ipu_psys_ppg *kppg) ++{ ++ struct ipu_psys *psys = kppg->fh->psys; ++ struct device *dev = &psys->adev->auxdev.dev; ++ struct ipu_psys_kcmd tmp_kcmd = { ++ .kpg = kppg->kpg, ++ .fh = kppg->fh, ++ }; ++ int ret; ++ ++ dev_dbg(dev, "resume ppg id %d, addr 0x%p\n", ++ ipu_fw_psys_pg_get_id(&tmp_kcmd), kppg); ++ ++ kppg->state = PPG_STATE_RESUMING; ++ if (enable_suspend_resume) { ++ ret = ipu_psys_allocate_resources(dev, kppg->kpg->pg, ++ kppg->manifest, ++ &kppg->kpg->resource_alloc, ++ &psys->res_pool_running); ++ if (ret) { ++ dev_err(dev, "failed to allocate res\n"); ++ return -EIO; ++ } ++ ++ ret = ipu_fw_psys_ppg_resume(&tmp_kcmd); ++ if (ret) { ++ dev_err(dev, "failed to resume ppg\n"); ++ goto error; ++ } ++ } else { ++ kppg->kpg->pg->state = IPU_FW_PSYS_PROCESS_GROUP_READY; ++ ret = ipu_fw_psys_pg_submit(&tmp_kcmd); ++ if (ret) { ++ dev_err(dev, "failed to submit kcmd!\n"); ++ return ret; ++ } ++ ++ ret = ipu_psys_allocate_resources(dev, kppg->kpg->pg, ++ kppg->manifest, ++ &kppg->kpg->resource_alloc, ++ &psys->res_pool_running); ++ if (ret) { ++ dev_err(dev, "failed to allocate res\n"); ++ return ret; ++ } ++ ++ ret = ipu_psys_kcmd_start(psys, &tmp_kcmd); ++ if (ret) { ++ dev_err(dev, "failed to start kcmd!\n"); ++ goto error; ++ } ++ } ++ dev_dbg(dev, "s_change:%s: %p %d -> %d\n", ++ __func__, kppg, kppg->state, PPG_STATE_RESUMED); ++ kppg->state = PPG_STATE_RESUMED; ++ ++ return 0; ++ ++error: ++ ipu_psys_reset_process_cell(dev, kppg->kpg->pg, kppg->manifest, ++ kppg->kpg->pg->process_count); ++ ipu_psys_free_resources(&kppg->kpg->resource_alloc, ++ &psys->res_pool_running); ++ ++ return ret; ++} ++ ++int ipu_psys_ppg_stop(struct ipu_psys_ppg *kppg) ++{ ++ struct ipu_psys_kcmd *kcmd = ipu_psys_ppg_get_kcmd(kppg, ++ KCMD_STATE_PPG_STOP); ++ struct ipu_psys *psys = kppg->fh->psys; ++ struct device *dev = &psys->adev->auxdev.dev; ++ struct ipu_psys_kcmd kcmd_temp; ++ int ppg_id, ret = 0; ++ ++ if (kcmd) { ++ list_move_tail(&kcmd->list, &kppg->kcmds_processing_list); ++ } else { ++ dev_dbg(dev, "Exceptional stop happened!\n"); ++ kcmd_temp.kpg = kppg->kpg; ++ kcmd_temp.fh = kppg->fh; ++ kcmd = &kcmd_temp; ++ /* delete kppg in stop list to avoid this ppg resuming */ ++ ipu_psys_scheduler_remove_kppg(kppg, SCHED_STOP_LIST); ++ } ++ ++ ppg_id = ipu_fw_psys_pg_get_id(kcmd); ++ dev_dbg(dev, "stop ppg(%d, addr 0x%p)\n", ppg_id, kppg); ++ ++ if (kppg->state & PPG_STATE_SUSPENDED) { ++ if (enable_suspend_resume) { ++ dev_dbg(dev, "need resume before stop!\n"); ++ kcmd_temp.kpg = kppg->kpg; ++ kcmd_temp.fh = kppg->fh; ++ ret = ipu_fw_psys_ppg_resume(&kcmd_temp); ++ if (ret) ++ dev_err(dev, "ppg(%d) failed to resume\n", ++ ppg_id); ++ } else if (kcmd != &kcmd_temp) { ++ u8 queue_id = ipu_fw_psys_ppg_get_base_queue_id(kcmd); ++ ++ ipu_psys_free_cmd_queue_res(&psys->res_pool_running, ++ queue_id); ++ ipu_psys_kcmd_complete(kppg, kcmd, 0); ++ dev_dbg(dev, "s_change:%s %p %d -> %d\n", __func__, ++ kppg, kppg->state, PPG_STATE_STOPPED); ++ pm_runtime_put(dev); ++ kppg->state = PPG_STATE_STOPPED; ++ ++ return 0; ++ } else { ++ return 0; ++ } ++ } ++ dev_dbg(dev, "s_change:%s %p %d -> %d\n", __func__, kppg, kppg->state, ++ PPG_STATE_STOPPING); ++ kppg->state = PPG_STATE_STOPPING; ++ ret = ipu_fw_psys_pg_abort(kcmd); ++ if (ret) ++ dev_err(dev, "ppg(%d) failed to abort\n", ppg_id); ++ ++ return ret; ++} ++ ++int ipu_psys_ppg_suspend(struct ipu_psys_ppg *kppg) ++{ ++ struct ipu_psys *psys = kppg->fh->psys; ++ struct device *dev = &psys->adev->auxdev.dev; ++ struct ipu_psys_kcmd tmp_kcmd = { ++ .kpg = kppg->kpg, ++ .fh = kppg->fh, ++ }; ++ int ppg_id = ipu_fw_psys_pg_get_id(&tmp_kcmd); ++ int ret = 0; ++ ++ dev_dbg(dev, "suspend ppg(%d, addr 0x%p)\n", ppg_id, kppg); ++ ++ dev_dbg(dev, "s_change:%s %p %d -> %d\n", __func__, kppg, kppg->state, ++ PPG_STATE_SUSPENDING); ++ kppg->state = PPG_STATE_SUSPENDING; ++ if (enable_suspend_resume) ++ ret = ipu_fw_psys_ppg_suspend(&tmp_kcmd); ++ else ++ ret = ipu_fw_psys_pg_abort(&tmp_kcmd); ++ if (ret) ++ dev_err(dev, "failed to %s ppg(%d)\n", ++ enable_suspend_resume ? "suspend" : "stop", ret); ++ ++ return ret; ++} ++ ++static bool ipu_psys_ppg_is_bufset_existing(struct ipu_psys_ppg *kppg) ++{ ++ return !list_empty(&kppg->kcmds_new_list); ++} ++ ++/* ++ * ipu_psys_ppg_enqueue_bufsets - enqueue buffer sets to firmware ++ * Sometimes, if the ppg is at suspended state, this function will return true ++ * to reschedule and let the resume command scheduled before the buffer sets ++ * enqueuing. ++ */ ++bool ipu_psys_ppg_enqueue_bufsets(struct ipu_psys_ppg *kppg) ++{ ++ struct ipu_psys_kcmd *kcmd, *kcmd0; ++ struct ipu_psys *psys = kppg->fh->psys; ++ struct device *dev = &psys->adev->auxdev.dev; ++ bool need_resume = false; ++ ++ mutex_lock(&kppg->mutex); ++ ++ if (kppg->state & (PPG_STATE_STARTED | PPG_STATE_RESUMED | ++ PPG_STATE_RUNNING)) { ++ if (ipu_psys_ppg_is_bufset_existing(kppg)) { ++ list_for_each_entry_safe(kcmd, kcmd0, ++ &kppg->kcmds_new_list, list) { ++ int ret; ++ ++ if (kcmd->state != KCMD_STATE_PPG_ENQUEUE) { ++ need_resume = true; ++ break; ++ } ++ ++ ret = ipu_fw_psys_ppg_enqueue_bufs(kcmd); ++ if (ret) { ++ dev_err(dev, ++ "kppg 0x%p fail to qbufset %d", ++ kppg, ret); ++ break; ++ } ++ list_move_tail(&kcmd->list, ++ &kppg->kcmds_processing_list); ++ dev_dbg(dev, ++ "kppg %d %p queue kcmd 0x%p fh 0x%p\n", ++ ipu_fw_psys_pg_get_id(kcmd), ++ kppg, kcmd, kcmd->fh); ++ } ++ } ++ } ++ ++ mutex_unlock(&kppg->mutex); ++ return need_resume; ++} ++ ++void ipu_psys_enter_power_gating(struct ipu_psys *psys) ++{ ++ struct device *dev = &psys->adev->auxdev.dev; ++ struct ipu_psys_scheduler *sched; ++ struct ipu_psys_ppg *kppg, *tmp; ++ struct ipu_psys_fh *fh; ++ ++ list_for_each_entry(fh, &psys->fhs, list) { ++ mutex_lock(&fh->mutex); ++ sched = &fh->sched; ++ if (list_empty(&sched->ppgs)) { ++ mutex_unlock(&fh->mutex); ++ continue; ++ } ++ ++ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { ++ mutex_lock(&kppg->mutex); ++ /* ++ * Only for SUSPENDED kppgs, STOPPED kppgs has already ++ * power down and new kppgs might come now. ++ */ ++ if (kppg->state != PPG_STATE_SUSPENDED) { ++ mutex_unlock(&kppg->mutex); ++ continue; ++ } ++ pm_runtime_put(dev); ++ mutex_unlock(&kppg->mutex); ++ } ++ mutex_unlock(&fh->mutex); ++ } ++} ++ ++void ipu_psys_exit_power_gating(struct ipu_psys *psys) ++{ ++ struct device *dev = &psys->adev->auxdev.dev; ++ struct ipu_psys_scheduler *sched; ++ struct ipu_psys_ppg *kppg, *tmp; ++ struct ipu_psys_fh *fh; ++ int ret = 0; ++ ++ list_for_each_entry(fh, &psys->fhs, list) { ++ mutex_lock(&fh->mutex); ++ sched = &fh->sched; ++ if (list_empty(&sched->ppgs)) { ++ mutex_unlock(&fh->mutex); ++ continue; ++ } ++ ++ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { ++ mutex_lock(&kppg->mutex); ++ /* Only for SUSPENDED kppgs */ ++ if (kppg->state != PPG_STATE_SUSPENDED) { ++ mutex_unlock(&kppg->mutex); ++ continue; ++ } ++ ++ ret = pm_runtime_get_sync(dev); ++ if (ret < 0) { ++ dev_err(dev, "failed to power gating\n"); ++ pm_runtime_put_noidle(dev); ++ } ++ mutex_unlock(&kppg->mutex); ++ } ++ mutex_unlock(&fh->mutex); ++ } ++} +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-ppg.h b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-ppg.h +new file mode 100644 +index 0000000..676a4ea +--- /dev/null ++++ b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-ppg.h +@@ -0,0 +1,38 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Copyright (C) 2020 - 2024 Intel Corporation ++ */ ++ ++#ifndef IPU6_PPG_H ++#define IPU6_PPG_H ++ ++#include "ipu-psys.h" ++/* starting from '2' in case of someone passes true or false */ ++enum SCHED_LIST { ++ SCHED_START_LIST = 2, ++ SCHED_STOP_LIST ++}; ++ ++enum ipu_psys_power_gating_state { ++ PSYS_POWER_NORMAL = 0, ++ PSYS_POWER_GATING, ++ PSYS_POWER_GATED ++}; ++ ++int ipu_psys_ppg_get_bufset(struct ipu_psys_kcmd *kcmd, ++ struct ipu_psys_ppg *kppg); ++struct ipu_psys_kcmd *ipu_psys_ppg_get_stop_kcmd(struct ipu_psys_ppg *kppg); ++void ipu_psys_scheduler_remove_kppg(struct ipu_psys_ppg *kppg, ++ enum SCHED_LIST type); ++void ipu_psys_scheduler_add_kppg(struct ipu_psys_ppg *kppg, ++ enum SCHED_LIST type); ++int ipu_psys_ppg_start(struct ipu_psys_ppg *kppg); ++int ipu_psys_ppg_resume(struct ipu_psys_ppg *kppg); ++int ipu_psys_ppg_stop(struct ipu_psys_ppg *kppg); ++int ipu_psys_ppg_suspend(struct ipu_psys_ppg *kppg); ++void ipu_psys_ppg_complete(struct ipu_psys *psys, struct ipu_psys_ppg *kppg); ++bool ipu_psys_ppg_enqueue_bufsets(struct ipu_psys_ppg *kppg); ++void ipu_psys_enter_power_gating(struct ipu_psys *psys); ++void ipu_psys_exit_power_gating(struct ipu_psys *psys); ++ ++#endif /* IPU6_PPG_H */ +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-psys-gpc.c b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-psys-gpc.c +new file mode 100644 +index 0000000..ff19256 +--- /dev/null ++++ b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-psys-gpc.c +@@ -0,0 +1,209 @@ ++// SPDX-License-Identifier: GPL-2.0 ++// Copyright (C) 2020 - 2024 Intel Corporation ++ ++#ifdef CONFIG_DEBUG_FS ++#include ++#include ++ ++#include "ipu-psys.h" ++#include "ipu-platform-regs.h" ++ ++/* ++ * GPC (Gerneral Performance Counters) ++ */ ++#define IPU_PSYS_GPC_NUM 16 ++ ++#ifndef CONFIG_PM ++#define pm_runtime_get_sync(d) 0 ++#define pm_runtime_put(d) 0 ++#endif ++ ++struct ipu_psys_gpc { ++ bool enable; ++ unsigned int route; ++ unsigned int source; ++ unsigned int sense; ++ unsigned int gpcindex; ++ void *prit; ++}; ++ ++struct ipu_psys_gpcs { ++ bool gpc_enable; ++ struct ipu_psys_gpc gpc[IPU_PSYS_GPC_NUM]; ++ void *prit; ++}; ++ ++static int ipu6_psys_gpc_global_enable_get(void *data, u64 *val) ++{ ++ struct ipu_psys_gpcs *psys_gpcs = data; ++ struct ipu_psys *psys = psys_gpcs->prit; ++ ++ mutex_lock(&psys->mutex); ++ ++ *val = psys_gpcs->gpc_enable; ++ ++ mutex_unlock(&psys->mutex); ++ return 0; ++} ++ ++static int ipu6_psys_gpc_global_enable_set(void *data, u64 val) ++{ ++ struct ipu_psys_gpcs *psys_gpcs = data; ++ struct ipu_psys *psys = psys_gpcs->prit; ++ void __iomem *base; ++ int idx, res; ++ ++ if (val != 0 && val != 1) ++ return -EINVAL; ++ ++ if (!psys || !psys->pdata || !psys->pdata->base) ++ return -EINVAL; ++ ++ mutex_lock(&psys->mutex); ++ ++ base = psys->pdata->base + IPU_GPC_BASE; ++ ++ res = pm_runtime_get_sync(&psys->adev->dev); ++ if (res < 0) { ++ pm_runtime_put(&psys->adev->dev); ++ mutex_unlock(&psys->mutex); ++ return res; ++ } ++ ++ if (val == 0) { ++ writel(0x0, base + IPU_GPREG_TRACE_TIMER_RST); ++ writel(0x0, base + IPU_CDC_MMU_GPC_OVERALL_ENABLE); ++ writel(0xffff, base + IPU_CDC_MMU_GPC_SOFT_RESET); ++ psys_gpcs->gpc_enable = false; ++ for (idx = 0; idx < IPU_PSYS_GPC_NUM; idx++) { ++ psys_gpcs->gpc[idx].enable = 0; ++ psys_gpcs->gpc[idx].sense = 0; ++ psys_gpcs->gpc[idx].route = 0; ++ psys_gpcs->gpc[idx].source = 0; ++ } ++ pm_runtime_put(&psys->adev->dev); ++ } else { ++ /* Set gpc reg and start all gpc here. ++ * RST free running local timer. ++ */ ++ writel(0x0, base + IPU_GPREG_TRACE_TIMER_RST); ++ writel(0x1, base + IPU_GPREG_TRACE_TIMER_RST); ++ ++ for (idx = 0; idx < IPU_PSYS_GPC_NUM; idx++) { ++ /* Enable */ ++ writel(psys_gpcs->gpc[idx].enable, ++ base + IPU_CDC_MMU_GPC_ENABLE0 + 4 * idx); ++ /* Setting (route/source/sense) */ ++ writel((psys_gpcs->gpc[idx].sense ++ << IPU_GPC_SENSE_OFFSET) ++ + (psys_gpcs->gpc[idx].route ++ << IPU_GPC_ROUTE_OFFSET) ++ + (psys_gpcs->gpc[idx].source ++ << IPU_GPC_SOURCE_OFFSET), ++ base + IPU_CDC_MMU_GPC_CNT_SEL0 + 4 * idx); ++ } ++ ++ /* Soft reset and Overall Enable. */ ++ writel(0x0, base + IPU_CDC_MMU_GPC_OVERALL_ENABLE); ++ writel(0xffff, base + IPU_CDC_MMU_GPC_SOFT_RESET); ++ writel(0x1, base + IPU_CDC_MMU_GPC_OVERALL_ENABLE); ++ ++ psys_gpcs->gpc_enable = true; ++ } ++ ++ mutex_unlock(&psys->mutex); ++ return 0; ++} ++ ++DEFINE_SIMPLE_ATTRIBUTE(psys_gpc_globe_enable_fops, ++ ipu6_psys_gpc_global_enable_get, ++ ipu6_psys_gpc_global_enable_set, "%llu\n"); ++ ++static int ipu6_psys_gpc_count_get(void *data, u64 *val) ++{ ++ struct ipu_psys_gpc *psys_gpc = data; ++ struct ipu_psys *psys = psys_gpc->prit; ++ void __iomem *base; ++ int res; ++ ++ if (!psys || !psys->pdata || !psys->pdata->base) ++ return -EINVAL; ++ ++ mutex_lock(&psys->mutex); ++ ++ base = psys->pdata->base + IPU_GPC_BASE; ++ ++ res = pm_runtime_get_sync(&psys->adev->dev); ++ if (res < 0) { ++ pm_runtime_put(&psys->adev->dev); ++ mutex_unlock(&psys->mutex); ++ return res; ++ } ++ ++ *val = readl(base + IPU_CDC_MMU_GPC_VALUE0 + 4 * psys_gpc->gpcindex); ++ ++ mutex_unlock(&psys->mutex); ++ return 0; ++} ++ ++DEFINE_SIMPLE_ATTRIBUTE(psys_gpc_count_fops, ++ ipu6_psys_gpc_count_get, ++ NULL, "%llu\n"); ++ ++int ipu_psys_gpc_init_debugfs(struct ipu_psys *psys) ++{ ++ struct dentry *gpcdir; ++ struct dentry *dir; ++ struct dentry *file; ++ int idx; ++ char gpcname[10]; ++ struct ipu_psys_gpcs *psys_gpcs; ++ ++ psys_gpcs = devm_kzalloc(&psys->dev, sizeof(*psys_gpcs), GFP_KERNEL); ++ if (!psys_gpcs) ++ return -ENOMEM; ++ ++ gpcdir = debugfs_create_dir("gpc", psys->debugfsdir); ++ if (IS_ERR(gpcdir)) ++ return -ENOMEM; ++ ++ psys_gpcs->prit = psys; ++ file = debugfs_create_file("enable", 0600, gpcdir, psys_gpcs, ++ &psys_gpc_globe_enable_fops); ++ if (IS_ERR(file)) ++ goto err; ++ ++ for (idx = 0; idx < IPU_PSYS_GPC_NUM; idx++) { ++ sprintf(gpcname, "gpc%d", idx); ++ dir = debugfs_create_dir(gpcname, gpcdir); ++ if (IS_ERR(dir)) ++ goto err; ++ ++ debugfs_create_bool("enable", 0600, dir, ++ &psys_gpcs->gpc[idx].enable); ++ ++ debugfs_create_u32("source", 0600, dir, ++ &psys_gpcs->gpc[idx].source); ++ ++ debugfs_create_u32("route", 0600, dir, ++ &psys_gpcs->gpc[idx].route); ++ ++ debugfs_create_u32("sense", 0600, dir, ++ &psys_gpcs->gpc[idx].sense); ++ ++ psys_gpcs->gpc[idx].gpcindex = idx; ++ psys_gpcs->gpc[idx].prit = psys; ++ file = debugfs_create_file("count", 0400, dir, ++ &psys_gpcs->gpc[idx], ++ &psys_gpc_count_fops); ++ if (IS_ERR(file)) ++ goto err; ++ } ++ ++ return 0; ++ ++err: ++ debugfs_remove_recursive(gpcdir); ++ return -ENOMEM; ++} ++#endif +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-psys.c b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-psys.c +new file mode 100644 +index 0000000..281207a +--- /dev/null ++++ b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-psys.c +@@ -0,0 +1,1054 @@ ++// SPDX-License-Identifier: GPL-2.0 ++// Copyright (C) 2020 - 2024 Intel Corporation ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "ipu6.h" ++#include "ipu6-dma.h" ++#include "ipu-psys.h" ++#include "ipu6-ppg.h" ++#include "ipu6-platform-regs.h" ++#include "ipu6-platform-buttress-regs.h" ++ ++MODULE_IMPORT_NS("DMA_BUF"); ++ ++static bool early_pg_transfer; ++module_param(early_pg_transfer, bool, 0664); ++MODULE_PARM_DESC(early_pg_transfer, ++ "Copy PGs back to user after resource allocation"); ++ ++bool enable_power_gating = true; ++module_param(enable_power_gating, bool, 0664); ++MODULE_PARM_DESC(enable_power_gating, "enable power gating"); ++ ++static void ipu6_set_sp_info_bits(void __iomem *base) ++{ ++ int i; ++ ++ writel(IPU6_INFO_REQUEST_DESTINATION_IOSF, ++ base + IPU6_REG_PSYS_INFO_SEG_0_CONFIG_ICACHE_MASTER); ++ ++ for (i = 0; i < 4; i++) ++ writel(IPU6_INFO_REQUEST_DESTINATION_IOSF, ++ base + IPU_REG_PSYS_INFO_SEG_CMEM_MASTER(i)); ++ for (i = 0; i < 4; i++) ++ writel(IPU6_INFO_REQUEST_DESTINATION_IOSF, ++ base + IPU_REG_PSYS_INFO_SEG_XMEM_MASTER(i)); ++} ++ ++#define PSYS_SUBDOMAINS_STATUS_WAIT_COUNT 1000 ++void ipu_psys_subdomains_power(struct ipu_psys *psys, bool on) ++{ ++ struct device *dev = &psys->adev->auxdev.dev; ++ unsigned int i; ++ u32 val; ++ ++ /* power domain req */ ++ dev_dbg(dev, "power %s psys sub-domains", on ? "UP" : "DOWN"); ++ if (on) ++ writel(IPU_PSYS_SUBDOMAINS_POWER_MASK, ++ psys->adev->isp->base + IPU_PSYS_SUBDOMAINS_POWER_REQ); ++ else ++ writel(0x0, ++ psys->adev->isp->base + IPU_PSYS_SUBDOMAINS_POWER_REQ); ++ ++ i = 0; ++ do { ++ usleep_range(10, 20); ++ val = readl(psys->adev->isp->base + ++ IPU_PSYS_SUBDOMAINS_POWER_STATUS); ++ if (!(val & BIT(31))) { ++ dev_dbg(dev, "PS sub-domains req done with status 0x%x", ++ val); ++ break; ++ } ++ i++; ++ } while (i < PSYS_SUBDOMAINS_STATUS_WAIT_COUNT); ++ ++ if (i == PSYS_SUBDOMAINS_STATUS_WAIT_COUNT) ++ dev_warn(dev, "Psys sub-domains %s req timeout!", ++ on ? "UP" : "DOWN"); ++} ++ ++void ipu_psys_setup_hw(struct ipu_psys *psys) ++{ ++ void __iomem *base = psys->pdata->base; ++ void __iomem *spc_regs_base = ++ base + psys->pdata->ipdata->hw_variant.spc_offset; ++ void __iomem *psys_iommu0_ctrl; ++ u32 irqs; ++ const u8 r3 = IPU_DEVICE_AB_GROUP1_TARGET_ID_R3_SPC_STATUS_REG; ++ const u8 r4 = IPU_DEVICE_AB_GROUP1_TARGET_ID_R4_SPC_MASTER_BASE_ADDR; ++ const u8 r5 = IPU_DEVICE_AB_GROUP1_TARGET_ID_R5_SPC_PC_STALL; ++ ++ if (!psys->adev->isp->secure_mode) { ++ /* configure access blocker for non-secure mode */ ++ writel(NCI_AB_ACCESS_MODE_RW, ++ base + IPU_REG_DMA_TOP_AB_GROUP1_BASE_ADDR + ++ IPU_REG_DMA_TOP_AB_RING_ACCESS_OFFSET(r3)); ++ writel(NCI_AB_ACCESS_MODE_RW, ++ base + IPU_REG_DMA_TOP_AB_GROUP1_BASE_ADDR + ++ IPU_REG_DMA_TOP_AB_RING_ACCESS_OFFSET(r4)); ++ writel(NCI_AB_ACCESS_MODE_RW, ++ base + IPU_REG_DMA_TOP_AB_GROUP1_BASE_ADDR + ++ IPU_REG_DMA_TOP_AB_RING_ACCESS_OFFSET(r5)); ++ } ++ psys_iommu0_ctrl = base + ++ psys->pdata->ipdata->hw_variant.mmu_hw[0].offset + ++ IPU6_MMU_INFO_OFFSET; ++ writel(IPU_INFO_REQUEST_DESTINATION_IOSF, psys_iommu0_ctrl); ++ ++ ipu6_set_sp_info_bits(spc_regs_base + IPU_PSYS_REG_SPC_STATUS_CTRL); ++ ipu6_set_sp_info_bits(spc_regs_base + IPU_PSYS_REG_SPP0_STATUS_CTRL); ++ ++ /* Enable FW interrupt #0 */ ++ writel(0, base + IPU_REG_PSYS_GPDEV_FWIRQ(0)); ++ irqs = IPU6_PSYS_GPDEV_IRQ_FWIRQ(IPU_PSYS_GPDEV_FWIRQ0); ++ writel(irqs, base + IPU_REG_PSYS_GPDEV_IRQ_EDGE); ++ writel(irqs, base + IPU_REG_PSYS_GPDEV_IRQ_LEVEL_NOT_PULSE); ++ writel(0xffffffff, base + IPU_REG_PSYS_GPDEV_IRQ_CLEAR); ++ writel(irqs, base + IPU_REG_PSYS_GPDEV_IRQ_MASK); ++ writel(irqs, base + IPU_REG_PSYS_GPDEV_IRQ_ENABLE); ++} ++ ++static struct ipu_psys_ppg *ipu_psys_identify_kppg(struct ipu_psys_kcmd *kcmd) ++{ ++ struct ipu_psys_scheduler *sched = &kcmd->fh->sched; ++ struct ipu_psys_ppg *kppg, *tmp; ++ ++ mutex_lock(&kcmd->fh->mutex); ++ if (list_empty(&sched->ppgs)) ++ goto not_found; ++ ++ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { ++ if (ipu_fw_psys_pg_get_token(kcmd) ++ != kppg->token) ++ continue; ++ mutex_unlock(&kcmd->fh->mutex); ++ return kppg; ++ } ++ ++not_found: ++ mutex_unlock(&kcmd->fh->mutex); ++ return NULL; ++} ++ ++/* ++ * Called to free up all resources associated with a kcmd. ++ * After this the kcmd doesn't anymore exist in the driver. ++ */ ++static void ipu_psys_kcmd_free(struct ipu_psys_kcmd *kcmd) ++{ ++ struct ipu_psys_ppg *kppg; ++ struct ipu_psys_scheduler *sched; ++ ++ if (!kcmd) ++ return; ++ ++ kppg = ipu_psys_identify_kppg(kcmd); ++ sched = &kcmd->fh->sched; ++ ++ if (kcmd->kbuf_set) { ++ mutex_lock(&sched->bs_mutex); ++ kcmd->kbuf_set->buf_set_size = 0; ++ mutex_unlock(&sched->bs_mutex); ++ kcmd->kbuf_set = NULL; ++ } ++ ++ if (kppg) { ++ mutex_lock(&kppg->mutex); ++ if (!list_empty(&kcmd->list)) ++ list_del(&kcmd->list); ++ mutex_unlock(&kppg->mutex); ++ } ++ ++ kfree(kcmd->pg_manifest); ++ kfree(kcmd->kbufs); ++ kfree(kcmd->buffers); ++ kfree(kcmd); ++} ++ ++static struct ipu_psys_kcmd *ipu_psys_copy_cmd(struct ipu_psys_command *cmd, ++ struct ipu_psys_fh *fh) ++{ ++ struct ipu_psys *psys = fh->psys; ++ struct device *dev = &psys->adev->auxdev.dev; ++ struct ipu_psys_kcmd *kcmd; ++ struct ipu_psys_kbuffer *kpgbuf; ++ unsigned int i; ++ int ret, prevfd, fd; ++ ++ fd = -1; ++ prevfd = -1; ++ ++ if (cmd->bufcount > IPU_MAX_PSYS_CMD_BUFFERS) ++ return NULL; ++ ++ if (!cmd->pg_manifest_size) ++ return NULL; ++ ++ kcmd = kzalloc(sizeof(*kcmd), GFP_KERNEL); ++ if (!kcmd) ++ return NULL; ++ ++ kcmd->state = KCMD_STATE_PPG_NEW; ++ kcmd->fh = fh; ++ INIT_LIST_HEAD(&kcmd->list); ++ ++ mutex_lock(&fh->mutex); ++ fd = cmd->pg; ++ kpgbuf = ipu_psys_lookup_kbuffer(fh, fd); ++ if (!kpgbuf || !kpgbuf->sgt) { ++ dev_err(dev, "%s kbuf %p with fd %d not found.\n", ++ __func__, kpgbuf, fd); ++ mutex_unlock(&fh->mutex); ++ goto error; ++ } ++ ++ /* check and remap if possible */ ++ kpgbuf = ipu_psys_mapbuf_locked(fd, fh); ++ if (!kpgbuf || !kpgbuf->sgt) { ++ dev_err(dev, "%s remap failed\n", __func__); ++ mutex_unlock(&fh->mutex); ++ goto error; ++ } ++ mutex_unlock(&fh->mutex); ++ ++ kcmd->pg_user = kpgbuf->kaddr; ++ kcmd->kpg = __get_pg_buf(psys, kpgbuf->len); ++ if (!kcmd->kpg) ++ goto error; ++ ++ memcpy(kcmd->kpg->pg, kcmd->pg_user, kcmd->kpg->pg_size); ++ ++ kcmd->pg_manifest = kzalloc(cmd->pg_manifest_size, GFP_KERNEL); ++ if (!kcmd->pg_manifest) ++ goto error; ++ ++ ret = copy_from_user(kcmd->pg_manifest, cmd->pg_manifest, ++ cmd->pg_manifest_size); ++ if (ret) ++ goto error; ++ ++ kcmd->pg_manifest_size = cmd->pg_manifest_size; ++ ++ kcmd->user_token = cmd->user_token; ++ kcmd->issue_id = cmd->issue_id; ++ kcmd->priority = cmd->priority; ++ if (kcmd->priority >= IPU_PSYS_CMD_PRIORITY_NUM) ++ goto error; ++ ++ /* ++ * Kernel enable bitmap be used only. ++ */ ++ memcpy(kcmd->kernel_enable_bitmap, cmd->kernel_enable_bitmap, ++ sizeof(cmd->kernel_enable_bitmap)); ++ ++ kcmd->nbuffers = ipu_fw_psys_pg_get_terminal_count(kcmd); ++ kcmd->buffers = kcalloc(kcmd->nbuffers, sizeof(*kcmd->buffers), ++ GFP_KERNEL); ++ if (!kcmd->buffers) ++ goto error; ++ ++ kcmd->kbufs = kcalloc(kcmd->nbuffers, sizeof(kcmd->kbufs[0]), ++ GFP_KERNEL); ++ if (!kcmd->kbufs) ++ goto error; ++ ++ /* should be stop cmd for ppg */ ++ if (!cmd->buffers) { ++ kcmd->state = KCMD_STATE_PPG_STOP; ++ return kcmd; ++ } ++ ++ if (!cmd->bufcount || kcmd->nbuffers > cmd->bufcount) ++ goto error; ++ ++ ret = copy_from_user(kcmd->buffers, cmd->buffers, ++ kcmd->nbuffers * sizeof(*kcmd->buffers)); ++ if (ret) ++ goto error; ++ ++ for (i = 0; i < kcmd->nbuffers; i++) { ++ struct ipu_fw_psys_terminal *terminal; ++ ++ terminal = ipu_fw_psys_pg_get_terminal(kcmd, i); ++ if (!terminal) ++ continue; ++ ++ if (!(kcmd->buffers[i].flags & IPU_BUFFER_FLAG_DMA_HANDLE)) { ++ kcmd->state = KCMD_STATE_PPG_START; ++ continue; ++ } ++ if (kcmd->state == KCMD_STATE_PPG_START) { ++ dev_err(dev, "buffer.flags & DMA_HANDLE must be 0\n"); ++ goto error; ++ } ++ ++ mutex_lock(&fh->mutex); ++ fd = kcmd->buffers[i].base.fd; ++ kpgbuf = ipu_psys_lookup_kbuffer(fh, fd); ++ if (!kpgbuf || !kpgbuf->sgt) { ++ dev_err(dev, ++ "%s kcmd->buffers[%d] %p fd %d not found.\n", ++ __func__, i, kpgbuf, fd); ++ mutex_unlock(&fh->mutex); ++ goto error; ++ } ++ ++ kpgbuf = ipu_psys_mapbuf_locked(fd, fh); ++ if (!kpgbuf || !kpgbuf->sgt) { ++ dev_err(dev, "%s remap failed\n", ++ __func__); ++ mutex_unlock(&fh->mutex); ++ goto error; ++ } ++ ++ mutex_unlock(&fh->mutex); ++ kcmd->kbufs[i] = kpgbuf; ++ if (!kcmd->kbufs[i] || !kcmd->kbufs[i]->sgt || ++ kcmd->kbufs[i]->len < kcmd->buffers[i].bytes_used) ++ goto error; ++ ++ if ((kcmd->kbufs[i]->flags & IPU_BUFFER_FLAG_NO_FLUSH) || ++ (kcmd->buffers[i].flags & IPU_BUFFER_FLAG_NO_FLUSH) || ++ prevfd == kcmd->buffers[i].base.fd) ++ continue; ++ ++ prevfd = kcmd->buffers[i].base.fd; ++ ++ /* ++ * TODO: remove exported buffer sync here as the cache ++ * coherency should be done by the exporter ++ */ ++ if (kcmd->kbufs[i]->kaddr) ++ clflush_cache_range(kcmd->kbufs[i]->kaddr, ++ kcmd->kbufs[i]->len); ++ } ++ ++ if (kcmd->state != KCMD_STATE_PPG_START) ++ kcmd->state = KCMD_STATE_PPG_ENQUEUE; ++ ++ return kcmd; ++error: ++ ipu_psys_kcmd_free(kcmd); ++ ++ dev_dbg(dev, "failed to copy cmd\n"); ++ ++ return NULL; ++} ++ ++static struct ipu_psys_buffer_set * ++ipu_psys_lookup_kbuffer_set(struct ipu_psys *psys, u32 addr) ++{ ++ struct ipu_psys_fh *fh; ++ struct ipu_psys_buffer_set *kbuf_set; ++ struct ipu_psys_scheduler *sched; ++ ++ list_for_each_entry(fh, &psys->fhs, list) { ++ sched = &fh->sched; ++ mutex_lock(&sched->bs_mutex); ++ list_for_each_entry(kbuf_set, &sched->buf_sets, list) { ++ if (kbuf_set->buf_set && ++ kbuf_set->buf_set->ipu_virtual_address == addr) { ++ mutex_unlock(&sched->bs_mutex); ++ return kbuf_set; ++ } ++ } ++ mutex_unlock(&sched->bs_mutex); ++ } ++ ++ return NULL; ++} ++ ++static struct ipu_psys_ppg *ipu_psys_lookup_ppg(struct ipu_psys *psys, ++ dma_addr_t pg_addr) ++{ ++ struct ipu_psys_scheduler *sched; ++ struct ipu_psys_ppg *kppg, *tmp; ++ struct ipu_psys_fh *fh; ++ ++ list_for_each_entry(fh, &psys->fhs, list) { ++ sched = &fh->sched; ++ mutex_lock(&fh->mutex); ++ if (list_empty(&sched->ppgs)) { ++ mutex_unlock(&fh->mutex); ++ continue; ++ } ++ ++ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { ++ if (pg_addr != kppg->kpg->pg_dma_addr) ++ continue; ++ mutex_unlock(&fh->mutex); ++ return kppg; ++ } ++ mutex_unlock(&fh->mutex); ++ } ++ ++ return NULL; ++} ++ ++#define BUTTRESS_FREQ_CTL_QOS_FLOOR_SHIFT 8 ++static void ipu_buttress_set_psys_ratio(struct ipu6_device *isp, ++ unsigned int psys_divisor, ++ unsigned int psys_qos_floor) ++{ ++ struct ipu6_buttress_ctrl *ctrl = isp->psys->ctrl; ++ ++ mutex_lock(&isp->buttress.power_mutex); ++ ++ if (ctrl->ratio == psys_divisor && ctrl->qos_floor == psys_qos_floor) ++ goto out_mutex_unlock; ++ ++ ctrl->ratio = psys_divisor; ++ ctrl->qos_floor = psys_qos_floor; ++ ++ /* ++ * According to documentation driver initiates DVFS ++ * transition by writing wanted ratio, floor ratio and start ++ * bit. No need to stop PS first ++ */ ++ writel(BUTTRESS_FREQ_CTL_START | ++ ctrl->qos_floor << BUTTRESS_FREQ_CTL_QOS_FLOOR_SHIFT | ++ psys_divisor, isp->base + BUTTRESS_REG_PS_FREQ_CTL); ++ ++out_mutex_unlock: ++ mutex_unlock(&isp->buttress.power_mutex); ++} ++ ++static void ipu_buttress_set_psys_freq(struct ipu6_device *isp, ++ unsigned int freq) ++{ ++ unsigned int psys_ratio = freq / BUTTRESS_PS_FREQ_STEP; ++ ++ dev_dbg(&isp->psys->auxdev.dev, "freq:%u\n", freq); ++ ++ ipu_buttress_set_psys_ratio(isp, psys_ratio, psys_ratio); ++} ++ ++static void ++ipu_buttress_add_psys_constraint(struct ipu6_device *isp, ++ struct ipu6_psys_constraint *constraint) ++{ ++ struct ipu6_buttress *b = &isp->buttress; ++ ++ mutex_lock(&b->cons_mutex); ++ list_add(&constraint->list, &b->constraints); ++ mutex_unlock(&b->cons_mutex); ++} ++ ++static void ++ipu_buttress_remove_psys_constraint(struct ipu6_device *isp, ++ struct ipu6_psys_constraint *constraint) ++{ ++ struct ipu6_buttress *b = &isp->buttress; ++ struct ipu6_psys_constraint *c; ++ unsigned int min_freq = 0; ++ ++ mutex_lock(&b->cons_mutex); ++ list_del(&constraint->list); ++ ++ list_for_each_entry(c, &b->constraints, list) ++ if (c->min_freq > min_freq) ++ min_freq = c->min_freq; ++ ++ ipu_buttress_set_psys_freq(isp, min_freq); ++ mutex_unlock(&b->cons_mutex); ++} ++ ++/* ++ * Move kcmd into completed state (due to running finished or failure). ++ * Fill up the event struct and notify waiters. ++ */ ++void ipu_psys_kcmd_complete(struct ipu_psys_ppg *kppg, ++ struct ipu_psys_kcmd *kcmd, int error) ++{ ++ struct ipu_psys_fh *fh = kcmd->fh; ++ struct ipu_psys *psys = fh->psys; ++ struct device *dev = &psys->adev->auxdev.dev; ++ ++ kcmd->ev.type = IPU_PSYS_EVENT_TYPE_CMD_COMPLETE; ++ kcmd->ev.user_token = kcmd->user_token; ++ kcmd->ev.issue_id = kcmd->issue_id; ++ kcmd->ev.error = error; ++ list_move_tail(&kcmd->list, &kppg->kcmds_finished_list); ++ ++ if (kcmd->constraint.min_freq) ++ ipu_buttress_remove_psys_constraint(psys->adev->isp, ++ &kcmd->constraint); ++ ++ if (!early_pg_transfer && kcmd->pg_user && kcmd->kpg->pg) { ++ struct ipu_psys_kbuffer *kbuf; ++ ++ kbuf = ipu_psys_lookup_kbuffer_by_kaddr(kcmd->fh, ++ kcmd->pg_user); ++ if (kbuf && kbuf->valid) ++ memcpy(kcmd->pg_user, ++ kcmd->kpg->pg, kcmd->kpg->pg_size); ++ else ++ dev_dbg(dev, "Skipping unmapped buffer\n"); ++ } ++ ++ kcmd->state = KCMD_STATE_PPG_COMPLETE; ++ wake_up_interruptible(&fh->wait); ++} ++ ++/* ++ * Submit kcmd into psys queue. If running fails, complete the kcmd ++ * with an error. ++ * ++ * Found a runnable PG. Move queue to the list tail for round-robin ++ * scheduling and run the PG. Start the watchdog timer if the PG was ++ * started successfully. Enable PSYS power if requested. ++ */ ++int ipu_psys_kcmd_start(struct ipu_psys *psys, struct ipu_psys_kcmd *kcmd) ++{ ++ struct device *dev = &psys->adev->auxdev.dev; ++ int ret; ++ ++ if (early_pg_transfer && kcmd->pg_user && kcmd->kpg->pg) ++ memcpy(kcmd->pg_user, kcmd->kpg->pg, kcmd->kpg->pg_size); ++ ++ ret = ipu_fw_psys_pg_start(kcmd); ++ if (ret) { ++ dev_err(dev, "failed to start kcmd!\n"); ++ return ret; ++ } ++ ++ ipu_fw_psys_pg_dump(psys, kcmd, "run"); ++ ++ ret = ipu_fw_psys_pg_disown(kcmd); ++ if (ret) { ++ dev_err(dev, "failed to start kcmd!\n"); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static int ipu_psys_kcmd_send_to_ppg_start(struct ipu_psys_kcmd *kcmd) ++{ ++ struct ipu_psys_fh *fh = kcmd->fh; ++ struct ipu_psys_scheduler *sched = &fh->sched; ++ struct ipu_psys *psys = fh->psys; ++ struct device *dev = &psys->adev->auxdev.dev; ++ struct ipu_psys_ppg *kppg; ++ struct ipu_psys_resource_pool *rpr; ++ int queue_id; ++ int ret; ++ ++ rpr = &psys->res_pool_running; ++ ++ kppg = kzalloc(sizeof(*kppg), GFP_KERNEL); ++ if (!kppg) ++ return -ENOMEM; ++ ++ kppg->fh = fh; ++ kppg->kpg = kcmd->kpg; ++ kppg->state = PPG_STATE_START; ++ kppg->pri_base = kcmd->priority; ++ kppg->pri_dynamic = 0; ++ INIT_LIST_HEAD(&kppg->list); ++ ++ mutex_init(&kppg->mutex); ++ INIT_LIST_HEAD(&kppg->kcmds_new_list); ++ INIT_LIST_HEAD(&kppg->kcmds_processing_list); ++ INIT_LIST_HEAD(&kppg->kcmds_finished_list); ++ INIT_LIST_HEAD(&kppg->sched_list); ++ ++ kppg->manifest = kzalloc(kcmd->pg_manifest_size, GFP_KERNEL); ++ if (!kppg->manifest) { ++ kfree(kppg); ++ return -ENOMEM; ++ } ++ memcpy(kppg->manifest, kcmd->pg_manifest, ++ kcmd->pg_manifest_size); ++ ++ queue_id = ipu_psys_allocate_cmd_queue_res(rpr); ++ if (queue_id == -ENOSPC) { ++ dev_err(dev, "no available queue\n"); ++ kfree(kppg->manifest); ++ kfree(kppg); ++ mutex_unlock(&psys->mutex); ++ return -ENOMEM; ++ } ++ ++ /* ++ * set token as start cmd will immediately be followed by a ++ * enqueue cmd so that kppg could be retrieved. ++ */ ++ kppg->token = (u64)kcmd->kpg; ++ ipu_fw_psys_pg_set_token(kcmd, kppg->token); ++ ipu_fw_psys_ppg_set_base_queue_id(kcmd, queue_id); ++ ret = ipu_fw_psys_pg_set_ipu_vaddress(kcmd, ++ kcmd->kpg->pg_dma_addr); ++ if (ret) { ++ ipu_psys_free_cmd_queue_res(rpr, queue_id); ++ ++ kfree(kppg->manifest); ++ kfree(kppg); ++ return -EIO; ++ } ++ memcpy(kcmd->pg_user, kcmd->kpg->pg, kcmd->kpg->pg_size); ++ ++ mutex_lock(&fh->mutex); ++ list_add_tail(&kppg->list, &sched->ppgs); ++ mutex_unlock(&fh->mutex); ++ ++ mutex_lock(&kppg->mutex); ++ list_add(&kcmd->list, &kppg->kcmds_new_list); ++ mutex_unlock(&kppg->mutex); ++ ++ dev_dbg(dev, "START ppg(%d, 0x%p) kcmd 0x%p, queue %d\n", ++ ipu_fw_psys_pg_get_id(kcmd), kppg, kcmd, queue_id); ++ ++ /* Kick l-scheduler thread */ ++ atomic_set(&psys->wakeup_count, 1); ++ wake_up_interruptible(&psys->sched_cmd_wq); ++ ++ return 0; ++} ++ ++static int ipu_psys_kcmd_send_to_ppg(struct ipu_psys_kcmd *kcmd) ++{ ++ struct ipu_psys_fh *fh = kcmd->fh; ++ struct ipu_psys *psys = fh->psys; ++ struct device *dev = &psys->adev->auxdev.dev; ++ struct ipu_psys_ppg *kppg; ++ struct ipu_psys_resource_pool *rpr; ++ unsigned long flags; ++ u8 id; ++ bool resche = true; ++ ++ rpr = &psys->res_pool_running; ++ if (kcmd->state == KCMD_STATE_PPG_START) ++ return ipu_psys_kcmd_send_to_ppg_start(kcmd); ++ ++ kppg = ipu_psys_identify_kppg(kcmd); ++ spin_lock_irqsave(&psys->pgs_lock, flags); ++ kcmd->kpg->pg_size = 0; ++ spin_unlock_irqrestore(&psys->pgs_lock, flags); ++ if (!kppg) { ++ dev_err(dev, "token not match\n"); ++ return -EINVAL; ++ } ++ ++ kcmd->kpg = kppg->kpg; ++ ++ dev_dbg(dev, "%s ppg(%d, 0x%p) kcmd %p\n", ++ (kcmd->state == KCMD_STATE_PPG_STOP) ? "STOP" : "ENQUEUE", ++ ipu_fw_psys_pg_get_id(kcmd), kppg, kcmd); ++ ++ if (kcmd->state == KCMD_STATE_PPG_STOP) { ++ mutex_lock(&kppg->mutex); ++ if (kppg->state == PPG_STATE_STOPPED) { ++ dev_dbg(dev, "kppg 0x%p stopped!\n", kppg); ++ id = ipu_fw_psys_ppg_get_base_queue_id(kcmd); ++ ipu_psys_free_cmd_queue_res(rpr, id); ++ ipu_psys_kcmd_complete(kppg, kcmd, 0); ++ pm_runtime_put(dev); ++ resche = false; ++ } else { ++ list_add(&kcmd->list, &kppg->kcmds_new_list); ++ } ++ mutex_unlock(&kppg->mutex); ++ } else { ++ int ret; ++ ++ ret = ipu_psys_ppg_get_bufset(kcmd, kppg); ++ if (ret) ++ return ret; ++ ++ mutex_lock(&kppg->mutex); ++ list_add_tail(&kcmd->list, &kppg->kcmds_new_list); ++ mutex_unlock(&kppg->mutex); ++ } ++ ++ if (resche) { ++ /* Kick l-scheduler thread */ ++ atomic_set(&psys->wakeup_count, 1); ++ wake_up_interruptible(&psys->sched_cmd_wq); ++ } ++ return 0; ++} ++ ++int ipu_psys_kcmd_new(struct ipu_psys_command *cmd, struct ipu_psys_fh *fh) ++{ ++ struct ipu_psys *psys = fh->psys; ++ struct device *dev = &psys->adev->auxdev.dev; ++ struct ipu_psys_kcmd *kcmd; ++ size_t pg_size; ++ int ret; ++ ++ kcmd = ipu_psys_copy_cmd(cmd, fh); ++ if (!kcmd) ++ return -EINVAL; ++ ++ pg_size = ipu_fw_psys_pg_get_size(kcmd); ++ if (pg_size > kcmd->kpg->pg_size) { ++ dev_dbg(dev, "pg size mismatch %lu %lu\n", pg_size, ++ kcmd->kpg->pg_size); ++ ret = -EINVAL; ++ goto error; ++ } ++ ++ if (ipu_fw_psys_pg_get_protocol(kcmd) != ++ IPU_FW_PSYS_PROCESS_GROUP_PROTOCOL_PPG) { ++ dev_err(dev, "No support legacy pg now\n"); ++ ret = -EINVAL; ++ goto error; ++ } ++ ++ if (cmd->min_psys_freq) { ++ kcmd->constraint.min_freq = cmd->min_psys_freq; ++ ipu_buttress_add_psys_constraint(psys->adev->isp, ++ &kcmd->constraint); ++ } ++ ++ ret = ipu_psys_kcmd_send_to_ppg(kcmd); ++ if (ret) ++ goto error; ++ ++ dev_dbg(dev, "IOC_QCMD: user_token:%llx issue_id:0x%llx pri:%d\n", ++ cmd->user_token, cmd->issue_id, cmd->priority); ++ ++ return 0; ++ ++error: ++ ipu_psys_kcmd_free(kcmd); ++ ++ return ret; ++} ++ ++static bool ipu_psys_kcmd_is_valid(struct ipu_psys *psys, ++ struct ipu_psys_kcmd *kcmd) ++{ ++ struct ipu_psys_fh *fh; ++ struct ipu_psys_kcmd *kcmd0; ++ struct ipu_psys_ppg *kppg, *tmp; ++ struct ipu_psys_scheduler *sched; ++ ++ list_for_each_entry(fh, &psys->fhs, list) { ++ sched = &fh->sched; ++ mutex_lock(&fh->mutex); ++ if (list_empty(&sched->ppgs)) { ++ mutex_unlock(&fh->mutex); ++ continue; ++ } ++ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { ++ mutex_lock(&kppg->mutex); ++ list_for_each_entry(kcmd0, ++ &kppg->kcmds_processing_list, ++ list) { ++ if (kcmd0 == kcmd) { ++ mutex_unlock(&kppg->mutex); ++ mutex_unlock(&fh->mutex); ++ return true; ++ } ++ } ++ mutex_unlock(&kppg->mutex); ++ } ++ mutex_unlock(&fh->mutex); ++ } ++ ++ return false; ++} ++ ++void ipu_psys_handle_events(struct ipu_psys *psys) ++{ ++ struct device *dev = &psys->adev->auxdev.dev; ++ struct ipu_psys_kcmd *kcmd; ++ struct ipu_fw_psys_event event; ++ struct ipu_psys_ppg *kppg; ++ bool error; ++ u32 hdl; ++ u16 cmd, status; ++ int res; ++ ++ do { ++ memset(&event, 0, sizeof(event)); ++ if (!ipu_fw_psys_rcv_event(psys, &event)) ++ break; ++ ++ if (!event.context_handle) ++ break; ++ ++ dev_dbg(dev, "ppg event: 0x%x, %d, status %d\n", ++ event.context_handle, event.command, event.status); ++ ++ error = false; ++ /* ++ * event.command == CMD_RUN shows this is fw processing frame ++ * done as pPG mode, and event.context_handle should be pointer ++ * of buffer set; so we make use of this pointer to lookup ++ * kbuffer_set and kcmd ++ */ ++ hdl = event.context_handle; ++ cmd = event.command; ++ status = event.status; ++ ++ kppg = NULL; ++ kcmd = NULL; ++ if (cmd == IPU_FW_PSYS_PROCESS_GROUP_CMD_RUN) { ++ struct ipu_psys_buffer_set *kbuf_set; ++ /* ++ * Need change ppg state when the 1st running is done ++ * (after PPG started/resumed) ++ */ ++ kbuf_set = ipu_psys_lookup_kbuffer_set(psys, hdl); ++ if (kbuf_set) ++ kcmd = kbuf_set->kcmd; ++ if (!kbuf_set || !kcmd) ++ error = true; ++ else ++ kppg = ipu_psys_identify_kppg(kcmd); ++ } else if (cmd == IPU_FW_PSYS_PROCESS_GROUP_CMD_STOP || ++ cmd == IPU_FW_PSYS_PROCESS_GROUP_CMD_SUSPEND || ++ cmd == IPU_FW_PSYS_PROCESS_GROUP_CMD_RESUME) { ++ /* ++ * STOP/SUSPEND/RESUME cmd event would run this branch; ++ * only stop cmd queued by user has stop_kcmd and need ++ * to notify user to dequeue. ++ */ ++ kppg = ipu_psys_lookup_ppg(psys, hdl); ++ if (kppg) { ++ mutex_lock(&kppg->mutex); ++ if (kppg->state == PPG_STATE_STOPPING) { ++ kcmd = ipu_psys_ppg_get_stop_kcmd(kppg); ++ if (!kcmd) ++ error = true; ++ } ++ mutex_unlock(&kppg->mutex); ++ } ++ } else { ++ dev_err(dev, "invalid event\n"); ++ continue; ++ } ++ ++ if (error || !kppg) { ++ dev_err(dev, "event error, command %d\n", cmd); ++ break; ++ } ++ ++ dev_dbg(dev, "event to kppg 0x%p, kcmd 0x%p\n", kppg, kcmd); ++ ++ ipu_psys_ppg_complete(psys, kppg); ++ ++ if (kcmd && ipu_psys_kcmd_is_valid(psys, kcmd)) { ++ res = (status == IPU_PSYS_EVENT_CMD_COMPLETE || ++ status == IPU_PSYS_EVENT_FRAGMENT_COMPLETE) ? ++ 0 : -EIO; ++ mutex_lock(&kppg->mutex); ++ ipu_psys_kcmd_complete(kppg, kcmd, res); ++ mutex_unlock(&kppg->mutex); ++ } ++ } while (1); ++} ++ ++int ipu_psys_fh_init(struct ipu_psys_fh *fh) ++{ ++ struct ipu_psys *psys = fh->psys; ++ struct ipu_psys_buffer_set *kbuf_set, *kbuf_set_tmp; ++ struct ipu_psys_scheduler *sched = &fh->sched; ++ int i; ++ ++ mutex_init(&sched->bs_mutex); ++ INIT_LIST_HEAD(&sched->buf_sets); ++ INIT_LIST_HEAD(&sched->ppgs); ++ ++ /* allocate and map memory for buf_sets */ ++ for (i = 0; i < IPU_PSYS_BUF_SET_POOL_SIZE; i++) { ++ kbuf_set = kzalloc(sizeof(*kbuf_set), GFP_KERNEL); ++ if (!kbuf_set) ++ goto out_free_buf_sets; ++ kbuf_set->kaddr = ipu6_dma_alloc(psys->adev, ++ IPU_PSYS_BUF_SET_MAX_SIZE, ++ &kbuf_set->dma_addr, ++ GFP_KERNEL, 0); ++ if (!kbuf_set->kaddr) { ++ kfree(kbuf_set); ++ goto out_free_buf_sets; ++ } ++ kbuf_set->size = IPU_PSYS_BUF_SET_MAX_SIZE; ++ list_add(&kbuf_set->list, &sched->buf_sets); ++ } ++ ++ return 0; ++ ++out_free_buf_sets: ++ list_for_each_entry_safe(kbuf_set, kbuf_set_tmp, ++ &sched->buf_sets, list) { ++ ipu6_dma_free(psys->adev, kbuf_set->size, kbuf_set->kaddr, ++ kbuf_set->dma_addr, 0); ++ list_del(&kbuf_set->list); ++ kfree(kbuf_set); ++ } ++ mutex_destroy(&sched->bs_mutex); ++ ++ return -ENOMEM; ++} ++ ++int ipu_psys_fh_deinit(struct ipu_psys_fh *fh) ++{ ++ struct ipu_psys *psys = fh->psys; ++ struct device *dev = &psys->adev->auxdev.dev; ++ struct ipu_psys_ppg *kppg, *kppg0; ++ struct ipu_psys_kcmd *kcmd, *kcmd0; ++ struct ipu_psys_buffer_set *kbuf_set, *kbuf_set0; ++ struct ipu_psys_scheduler *sched = &fh->sched; ++ struct ipu_psys_resource_pool *rpr; ++ struct ipu_psys_resource_alloc *alloc; ++ u8 id; ++ ++ mutex_lock(&fh->mutex); ++ if (!list_empty(&sched->ppgs)) { ++ list_for_each_entry_safe(kppg, kppg0, &sched->ppgs, list) { ++ unsigned long flags; ++ ++ mutex_lock(&kppg->mutex); ++ if (!(kppg->state & ++ (PPG_STATE_STOPPED | ++ PPG_STATE_STOPPING))) { ++ struct ipu_psys_kcmd tmp = { ++ .kpg = kppg->kpg, ++ }; ++ ++ rpr = &psys->res_pool_running; ++ alloc = &kppg->kpg->resource_alloc; ++ id = ipu_fw_psys_ppg_get_base_queue_id(&tmp); ++ ipu_psys_ppg_stop(kppg); ++ ipu_psys_free_resources(alloc, rpr); ++ ipu_psys_free_cmd_queue_res(rpr, id); ++ dev_dbg(dev, ++ "s_change:%s %p %d -> %d\n", ++ __func__, kppg, kppg->state, ++ PPG_STATE_STOPPED); ++ kppg->state = PPG_STATE_STOPPED; ++ if (psys->power_gating != PSYS_POWER_GATED) ++ pm_runtime_put(dev); ++ } ++ list_del(&kppg->list); ++ mutex_unlock(&kppg->mutex); ++ ++ list_for_each_entry_safe(kcmd, kcmd0, ++ &kppg->kcmds_new_list, list) { ++ kcmd->pg_user = NULL; ++ mutex_unlock(&fh->mutex); ++ ipu_psys_kcmd_free(kcmd); ++ mutex_lock(&fh->mutex); ++ } ++ ++ list_for_each_entry_safe(kcmd, kcmd0, ++ &kppg->kcmds_processing_list, ++ list) { ++ kcmd->pg_user = NULL; ++ mutex_unlock(&fh->mutex); ++ ipu_psys_kcmd_free(kcmd); ++ mutex_lock(&fh->mutex); ++ } ++ ++ list_for_each_entry_safe(kcmd, kcmd0, ++ &kppg->kcmds_finished_list, ++ list) { ++ kcmd->pg_user = NULL; ++ mutex_unlock(&fh->mutex); ++ ipu_psys_kcmd_free(kcmd); ++ mutex_lock(&fh->mutex); ++ } ++ ++ spin_lock_irqsave(&psys->pgs_lock, flags); ++ kppg->kpg->pg_size = 0; ++ spin_unlock_irqrestore(&psys->pgs_lock, flags); ++ ++ mutex_destroy(&kppg->mutex); ++ kfree(kppg->manifest); ++ kfree(kppg); ++ } ++ } ++ mutex_unlock(&fh->mutex); ++ ++ mutex_lock(&sched->bs_mutex); ++ list_for_each_entry_safe(kbuf_set, kbuf_set0, &sched->buf_sets, list) { ++ ipu6_dma_free(psys->adev, kbuf_set->size, kbuf_set->kaddr, ++ kbuf_set->dma_addr, 0); ++ list_del(&kbuf_set->list); ++ kfree(kbuf_set); ++ } ++ mutex_unlock(&sched->bs_mutex); ++ mutex_destroy(&sched->bs_mutex); ++ ++ return 0; ++} ++ ++struct ipu_psys_kcmd *ipu_get_completed_kcmd(struct ipu_psys_fh *fh) ++{ ++ struct ipu_psys_scheduler *sched = &fh->sched; ++ struct device *dev = &fh->psys->adev->auxdev.dev; ++ struct ipu_psys_kcmd *kcmd; ++ struct ipu_psys_ppg *kppg; ++ ++ mutex_lock(&fh->mutex); ++ if (list_empty(&sched->ppgs)) { ++ mutex_unlock(&fh->mutex); ++ return NULL; ++ } ++ ++ list_for_each_entry(kppg, &sched->ppgs, list) { ++ mutex_lock(&kppg->mutex); ++ if (list_empty(&kppg->kcmds_finished_list)) { ++ mutex_unlock(&kppg->mutex); ++ continue; ++ } ++ ++ kcmd = list_first_entry(&kppg->kcmds_finished_list, ++ struct ipu_psys_kcmd, list); ++ mutex_unlock(&fh->mutex); ++ mutex_unlock(&kppg->mutex); ++ dev_dbg(dev, "get completed kcmd 0x%p\n", kcmd); ++ return kcmd; ++ } ++ mutex_unlock(&fh->mutex); ++ ++ return NULL; ++} ++ ++long ipu_ioctl_dqevent(struct ipu_psys_event *event, ++ struct ipu_psys_fh *fh, unsigned int f_flags) ++{ ++ struct ipu_psys *psys = fh->psys; ++ struct device *dev = &psys->adev->auxdev.dev; ++ struct ipu_psys_kcmd *kcmd = NULL; ++ int rval; ++ ++ dev_dbg(dev, "IOC_DQEVENT\n"); ++ ++ if (!(f_flags & O_NONBLOCK)) { ++ rval = wait_event_interruptible(fh->wait, ++ (kcmd = ++ ipu_get_completed_kcmd(fh))); ++ if (rval == -ERESTARTSYS) ++ return rval; ++ } ++ ++ if (!kcmd) { ++ kcmd = ipu_get_completed_kcmd(fh); ++ if (!kcmd) ++ return -ENODATA; ++ } ++ ++ *event = kcmd->ev; ++ ipu_psys_kcmd_free(kcmd); ++ ++ return 0; ++} +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6ep-fw-resources.c b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6ep-fw-resources.c +new file mode 100644 +index 0000000..4a1a869 +--- /dev/null ++++ b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6ep-fw-resources.c +@@ -0,0 +1,393 @@ ++// SPDX-License-Identifier: GPL-2.0 ++// Copyright (C) 2020 - 2024 Intel Corporation ++ ++#include ++#include ++ ++#include "ipu-psys.h" ++#include "ipu-fw-psys.h" ++#include "ipu6-platform-resources.h" ++#include "ipu6ep-platform-resources.h" ++ ++/* resources table */ ++ ++/* ++ * Cell types by cell IDs ++ */ ++static const u8 ipu6ep_fw_psys_cell_types[IPU6EP_FW_PSYS_N_CELL_ID] = { ++ IPU6_FW_PSYS_SP_CTRL_TYPE_ID, ++ IPU6_FW_PSYS_VP_TYPE_ID, ++ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_OSA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_OSA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_OSA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, /* AF */ ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, /* X2B_MD */ ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, /* X2B_SVE_RGBIR */ ++ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, /* PAF */ ++ IPU6_FW_PSYS_GDC_TYPE_ID, ++ IPU6_FW_PSYS_TNR_TYPE_ID, ++}; ++ ++static const u16 ipu6ep_fw_num_dev_channels[IPU6_FW_PSYS_N_DEV_CHN_ID] = { ++ IPU6_FW_PSYS_DEV_CHN_DMA_EXT0_MAX_SIZE, ++ IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_READ_MAX_SIZE, ++ IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_MAX_SIZE, ++ IPU6_FW_PSYS_DEV_CHN_DMA_INTERNAL_MAX_SIZE, ++ IPU6_FW_PSYS_DEV_CHN_DMA_ISA_MAX_SIZE, ++}; ++ ++static const u16 ipu6ep_fw_psys_mem_size[IPU6_FW_PSYS_N_MEM_ID] = { ++ IPU6_FW_PSYS_VMEM0_MAX_SIZE, ++ IPU6_FW_PSYS_TRANSFER_VMEM0_MAX_SIZE, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_MAX_SIZE, ++ IPU6_FW_PSYS_LB_VMEM_MAX_SIZE, ++ IPU6_FW_PSYS_BAMEM0_MAX_SIZE, ++ IPU6_FW_PSYS_DMEM0_MAX_SIZE, ++ IPU6_FW_PSYS_DMEM1_MAX_SIZE, ++ IPU6_FW_PSYS_DMEM2_MAX_SIZE, ++ IPU6_FW_PSYS_DMEM3_MAX_SIZE, ++ IPU6_FW_PSYS_PMEM0_MAX_SIZE ++}; ++ ++static const u16 ipu6ep_fw_psys_dfms[IPU6_FW_PSYS_N_DEV_DFM_ID] = { ++ IPU6_FW_PSYS_DEV_DFM_BB_FULL_PORT_ID_MAX_SIZE, ++ IPU6_FW_PSYS_DEV_DFM_BB_EMPTY_PORT_ID_MAX_SIZE, ++ IPU6_FW_PSYS_DEV_DFM_ISL_FULL_PORT_ID_MAX_SIZE, ++ IPU6_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID_MAX_SIZE, ++ IPU6_FW_PSYS_DEV_DFM_LB_FULL_PORT_ID_MAX_SIZE, ++ IPU6_FW_PSYS_DEV_DFM_LB_EMPTY_PORT_ID_MAX_SIZE, ++}; ++ ++static const u8 ++ipu6ep_fw_psys_c_mem[IPU6EP_FW_PSYS_N_CELL_ID][IPU6_FW_PSYS_N_MEM_TYPE_ID] = { ++ { ++ /* IPU6_FW_PSYS_SP0_ID */ ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_DMEM0_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_SP1_ID */ ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_DMEM1_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_VP0_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_DMEM3_ID, ++ IPU6_FW_PSYS_VMEM0_ID, ++ IPU6_FW_PSYS_BAMEM0_ID, ++ IPU6_FW_PSYS_PMEM0_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ACC1_ID BNLM */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ACC2_ID DM */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ACC3_ID ACM */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ACC4_ID GTC YUV1 */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ACC5_ID OFS pin main */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ACC6_ID OFS pin display */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ACC7_ID OFS pin pp */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ACC8_ID GAMMASTAR */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ACC9_ID GLTM */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ACC10_ID XNR */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_ICA_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_LSC_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_DPC_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_SIS_A_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_SIS_B_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_B2B_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_B2R_ID and ISA_R2I_SIE */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_R2I_DS_A_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_AWB_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_AE_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_AF_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_X2B_MD_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_X2B_SVE_RGBIR_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_ISA_PAF_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_LB_VMEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_BB_ACC_GDC0_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ }, ++ { ++ /* IPU6_FW_PSYS_BB_ACC_TNR_ID */ ++ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ IPU6_FW_PSYS_N_MEM_ID, ++ } ++}; ++ ++static const struct ipu_fw_resource_definitions ipu6ep_defs = { ++ .cells = ipu6ep_fw_psys_cell_types, ++ .num_cells = IPU6EP_FW_PSYS_N_CELL_ID, ++ .num_cells_type = IPU6_FW_PSYS_N_CELL_TYPE_ID, ++ ++ .dev_channels = ipu6ep_fw_num_dev_channels, ++ .num_dev_channels = IPU6_FW_PSYS_N_DEV_CHN_ID, ++ ++ .num_ext_mem_types = IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID, ++ .num_ext_mem_ids = IPU6_FW_PSYS_N_MEM_ID, ++ .ext_mem_ids = ipu6ep_fw_psys_mem_size, ++ ++ .num_dfm_ids = IPU6_FW_PSYS_N_DEV_DFM_ID, ++ ++ .dfms = ipu6ep_fw_psys_dfms, ++ ++ .cell_mem_row = IPU6_FW_PSYS_N_MEM_TYPE_ID, ++ .cell_mem = &ipu6ep_fw_psys_c_mem[0][0], ++}; ++ ++const struct ipu_fw_resource_definitions *ipu6ep_res_defs = &ipu6ep_defs; +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6ep-platform-resources.h b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6ep-platform-resources.h +new file mode 100644 +index 0000000..8420149 +--- /dev/null ++++ b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6ep-platform-resources.h +@@ -0,0 +1,42 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* Copyright (C) 2020 - 2024 Intel Corporation */ ++ ++#ifndef IPU6EP_PLATFORM_RESOURCES_H ++#define IPU6EP_PLATFORM_RESOURCES_H ++ ++#include ++#include ++ ++enum { ++ IPU6EP_FW_PSYS_SP0_ID = 0, ++ IPU6EP_FW_PSYS_VP0_ID, ++ IPU6EP_FW_PSYS_PSA_ACC_BNLM_ID, ++ IPU6EP_FW_PSYS_PSA_ACC_DM_ID, ++ IPU6EP_FW_PSYS_PSA_ACC_ACM_ID, ++ IPU6EP_FW_PSYS_PSA_ACC_GTC_YUV1_ID, ++ IPU6EP_FW_PSYS_BB_ACC_OFS_PIN_MAIN_ID, ++ IPU6EP_FW_PSYS_BB_ACC_OFS_PIN_DISPLAY_ID, ++ IPU6EP_FW_PSYS_BB_ACC_OFS_PIN_PP_ID, ++ IPU6EP_FW_PSYS_PSA_ACC_GAMMASTAR_ID, ++ IPU6EP_FW_PSYS_PSA_ACC_GLTM_ID, ++ IPU6EP_FW_PSYS_PSA_ACC_XNR_ID, ++ IPU6EP_FW_PSYS_PSA_VCSC_ID, /* VCSC */ ++ IPU6EP_FW_PSYS_ISA_ICA_ID, ++ IPU6EP_FW_PSYS_ISA_LSC_ID, ++ IPU6EP_FW_PSYS_ISA_DPC_ID, ++ IPU6EP_FW_PSYS_ISA_SIS_A_ID, ++ IPU6EP_FW_PSYS_ISA_SIS_B_ID, ++ IPU6EP_FW_PSYS_ISA_B2B_ID, ++ IPU6EP_FW_PSYS_ISA_B2R_R2I_SIE_ID, ++ IPU6EP_FW_PSYS_ISA_R2I_DS_A_ID, ++ IPU6EP_FW_PSYS_ISA_AWB_ID, ++ IPU6EP_FW_PSYS_ISA_AE_ID, ++ IPU6EP_FW_PSYS_ISA_AF_ID, ++ IPU6EP_FW_PSYS_ISA_X2B_MD_ID, ++ IPU6EP_FW_PSYS_ISA_X2B_SVE_RGBIR_ID, ++ IPU6EP_FW_PSYS_ISA_PAF_ID, ++ IPU6EP_FW_PSYS_BB_ACC_GDC0_ID, ++ IPU6EP_FW_PSYS_BB_ACC_TNR_ID, ++ IPU6EP_FW_PSYS_N_CELL_ID ++}; ++#endif /* IPU6EP_PLATFORM_RESOURCES_H */ +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6se-fw-resources.c b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6se-fw-resources.c +new file mode 100644 +index 0000000..41b6ba6 +--- /dev/null ++++ b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6se-fw-resources.c +@@ -0,0 +1,194 @@ ++// SPDX-License-Identifier: GPL-2.0 ++// Copyright (C) 2015 - 2024 Intel Corporation ++ ++#include ++#include ++ ++#include "ipu-psys.h" ++#include "ipu-fw-psys.h" ++#include "ipu6se-platform-resources.h" ++ ++/* resources table */ ++ ++/* ++ * Cell types by cell IDs ++ */ ++/* resources table */ ++ ++/* ++ * Cell types by cell IDs ++ */ ++static const u8 ipu6se_fw_psys_cell_types[IPU6SE_FW_PSYS_N_CELL_ID] = { ++ IPU6SE_FW_PSYS_SP_CTRL_TYPE_ID, ++ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_ICA_ID */ ++ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_LSC_ID */ ++ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_DPC_ID */ ++ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_B2B_ID */ ++ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_BNLM_ID */ ++ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_DM_ID */ ++ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_R2I_SIE_ID */ ++ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_R2I_DS_A_ID */ ++ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_R2I_DS_B_ID */ ++ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_AWB_ID */ ++ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_AE_ID */ ++ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_AF_ID*/ ++ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID /* PAF */ ++}; ++ ++static const u16 ipu6se_fw_num_dev_channels[IPU6SE_FW_PSYS_N_DEV_CHN_ID] = { ++ IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT0_MAX_SIZE, ++ IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT1_READ_MAX_SIZE, ++ IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_MAX_SIZE, ++ IPU6SE_FW_PSYS_DEV_CHN_DMA_ISA_MAX_SIZE, ++}; ++ ++static const u16 ipu6se_fw_psys_mem_size[IPU6SE_FW_PSYS_N_MEM_ID] = { ++ IPU6SE_FW_PSYS_TRANSFER_VMEM0_MAX_SIZE, ++ IPU6SE_FW_PSYS_LB_VMEM_MAX_SIZE, ++ IPU6SE_FW_PSYS_DMEM0_MAX_SIZE, ++ IPU6SE_FW_PSYS_DMEM1_MAX_SIZE ++}; ++ ++static const u16 ipu6se_fw_psys_dfms[IPU6SE_FW_PSYS_N_DEV_DFM_ID] = { ++ IPU6SE_FW_PSYS_DEV_DFM_ISL_FULL_PORT_ID_MAX_SIZE, ++ IPU6SE_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID_MAX_SIZE ++}; ++ ++static const u8 ++ipu6se_fw_psys_c_mem[IPU6SE_FW_PSYS_N_CELL_ID][IPU6SE_FW_PSYS_N_MEM_TYPE_ID] = { ++ { /* IPU6SE_FW_PSYS_SP0_ID */ ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_DMEM0_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ }, ++ { /* IPU6SE_FW_PSYS_ISA_ICA_ID */ ++ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6SE_FW_PSYS_LB_VMEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ }, ++ { /* IPU6SE_FW_PSYS_ISA_LSC_ID */ ++ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6SE_FW_PSYS_LB_VMEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ }, ++ { /* IPU6SE_FW_PSYS_ISA_DPC_ID */ ++ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6SE_FW_PSYS_LB_VMEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ }, ++ { /* IPU6SE_FW_PSYS_ISA_B2B_ID */ ++ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6SE_FW_PSYS_LB_VMEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ }, ++ ++ { /* IPU6SE_FW_PSYS_ISA_BNLM_ID */ ++ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6SE_FW_PSYS_LB_VMEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ }, ++ { /* IPU6SE_FW_PSYS_ISA_DM_ID */ ++ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6SE_FW_PSYS_LB_VMEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ }, ++ { /* IPU6SE_FW_PSYS_ISA_R2I_SIE_ID */ ++ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6SE_FW_PSYS_LB_VMEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ }, ++ { /* IPU6SE_FW_PSYS_ISA_R2I_DS_A_ID */ ++ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6SE_FW_PSYS_LB_VMEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ }, ++ { /* IPU6SE_FW_PSYS_ISA_R2I_DS_B_ID */ ++ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6SE_FW_PSYS_LB_VMEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ }, ++ { /* IPU6SE_FW_PSYS_ISA_AWB_ID */ ++ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6SE_FW_PSYS_LB_VMEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ }, ++ { /* IPU6SE_FW_PSYS_ISA_AE_ID */ ++ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6SE_FW_PSYS_LB_VMEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ }, ++ { /* IPU6SE_FW_PSYS_ISA_AF_ID */ ++ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6SE_FW_PSYS_LB_VMEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ }, ++ { /* IPU6SE_FW_PSYS_ISA_PAF_ID */ ++ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, ++ IPU6SE_FW_PSYS_LB_VMEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ IPU6SE_FW_PSYS_N_MEM_ID, ++ } ++}; ++ ++static const struct ipu_fw_resource_definitions ipu6se_defs = { ++ .cells = ipu6se_fw_psys_cell_types, ++ .num_cells = IPU6SE_FW_PSYS_N_CELL_ID, ++ .num_cells_type = IPU6SE_FW_PSYS_N_CELL_TYPE_ID, ++ ++ .dev_channels = ipu6se_fw_num_dev_channels, ++ .num_dev_channels = IPU6SE_FW_PSYS_N_DEV_CHN_ID, ++ ++ .num_ext_mem_types = IPU6SE_FW_PSYS_N_DATA_MEM_TYPE_ID, ++ .num_ext_mem_ids = IPU6SE_FW_PSYS_N_MEM_ID, ++ .ext_mem_ids = ipu6se_fw_psys_mem_size, ++ ++ .num_dfm_ids = IPU6SE_FW_PSYS_N_DEV_DFM_ID, ++ ++ .dfms = ipu6se_fw_psys_dfms, ++ ++ .cell_mem_row = IPU6SE_FW_PSYS_N_MEM_TYPE_ID, ++ .cell_mem = &ipu6se_fw_psys_c_mem[0][0], ++}; ++ ++const struct ipu_fw_resource_definitions *ipu6se_res_defs = &ipu6se_defs; +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6se-platform-resources.h b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6se-platform-resources.h +new file mode 100644 +index 0000000..81c618d +--- /dev/null ++++ b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6se-platform-resources.h +@@ -0,0 +1,103 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* Copyright (C) 2018 - 2024 Intel Corporation */ ++ ++#ifndef IPU6SE_PLATFORM_RESOURCES_H ++#define IPU6SE_PLATFORM_RESOURCES_H ++ ++#include ++#include ++#include "ipu-platform-resources.h" ++ ++#define IPU6SE_FW_PSYS_N_PADDING_UINT8_IN_PROCESS_EXT_STRUCT 1 ++ ++enum { ++ IPU6SE_FW_PSYS_CMD_QUEUE_COMMAND_ID = 0, ++ IPU6SE_FW_PSYS_CMD_QUEUE_DEVICE_ID, ++ IPU6SE_FW_PSYS_CMD_QUEUE_PPG0_COMMAND_ID, ++ IPU6SE_FW_PSYS_CMD_QUEUE_PPG1_COMMAND_ID, ++ IPU6SE_FW_PSYS_CMD_QUEUE_PPG2_COMMAND_ID, ++ IPU6SE_FW_PSYS_CMD_QUEUE_PPG3_COMMAND_ID, ++ IPU6SE_FW_PSYS_CMD_QUEUE_PPG4_COMMAND_ID, ++ IPU6SE_FW_PSYS_CMD_QUEUE_PPG5_COMMAND_ID, ++ IPU6SE_FW_PSYS_N_PSYS_CMD_QUEUE_ID ++}; ++ ++enum { ++ IPU6SE_FW_PSYS_TRANSFER_VMEM0_TYPE_ID = 0, ++ IPU6SE_FW_PSYS_LB_VMEM_TYPE_ID, ++ IPU6SE_FW_PSYS_DMEM_TYPE_ID, ++ IPU6SE_FW_PSYS_VMEM_TYPE_ID, ++ IPU6SE_FW_PSYS_BAMEM_TYPE_ID, ++ IPU6SE_FW_PSYS_PMEM_TYPE_ID, ++ IPU6SE_FW_PSYS_N_MEM_TYPE_ID ++}; ++ ++enum ipu6se_mem_id { ++ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID = 0, /* TRANSFER VMEM 0 */ ++ IPU6SE_FW_PSYS_LB_VMEM_ID, /* LB VMEM */ ++ IPU6SE_FW_PSYS_DMEM0_ID, /* SPC0 Dmem */ ++ IPU6SE_FW_PSYS_DMEM1_ID, /* SPP0 Dmem */ ++ IPU6SE_FW_PSYS_N_MEM_ID ++}; ++ ++enum { ++ IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT0_ID = 0, ++ IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT1_READ_ID, ++ IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_ID, ++ IPU6SE_FW_PSYS_DEV_CHN_DMA_ISA_ID, ++ IPU6SE_FW_PSYS_N_DEV_CHN_ID ++}; ++ ++enum { ++ IPU6SE_FW_PSYS_SP_CTRL_TYPE_ID = 0, ++ IPU6SE_FW_PSYS_SP_SERVER_TYPE_ID, ++ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, ++ IPU6SE_FW_PSYS_N_CELL_TYPE_ID ++}; ++ ++enum { ++ IPU6SE_FW_PSYS_SP0_ID = 0, ++ IPU6SE_FW_PSYS_ISA_ICA_ID, ++ IPU6SE_FW_PSYS_ISA_LSC_ID, ++ IPU6SE_FW_PSYS_ISA_DPC_ID, ++ IPU6SE_FW_PSYS_ISA_B2B_ID, ++ IPU6SE_FW_PSYS_ISA_BNLM_ID, ++ IPU6SE_FW_PSYS_ISA_DM_ID, ++ IPU6SE_FW_PSYS_ISA_R2I_SIE_ID, ++ IPU6SE_FW_PSYS_ISA_R2I_DS_A_ID, ++ IPU6SE_FW_PSYS_ISA_R2I_DS_B_ID, ++ IPU6SE_FW_PSYS_ISA_AWB_ID, ++ IPU6SE_FW_PSYS_ISA_AE_ID, ++ IPU6SE_FW_PSYS_ISA_AF_ID, ++ IPU6SE_FW_PSYS_ISA_PAF_ID, ++ IPU6SE_FW_PSYS_N_CELL_ID ++}; ++ ++enum { ++ IPU6SE_FW_PSYS_DEV_DFM_ISL_FULL_PORT_ID = 0, ++ IPU6SE_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID, ++}; ++ ++/* Excluding PMEM */ ++#define IPU6SE_FW_PSYS_N_DATA_MEM_TYPE_ID (IPU6SE_FW_PSYS_N_MEM_TYPE_ID - 1) ++#define IPU6SE_FW_PSYS_N_DEV_DFM_ID \ ++ (IPU6SE_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID + 1) ++#define IPU6SE_FW_PSYS_VMEM0_MAX_SIZE 0x0800 ++/* Transfer VMEM0 words, ref HAS Transfer*/ ++#define IPU6SE_FW_PSYS_TRANSFER_VMEM0_MAX_SIZE 0x0800 ++#define IPU6SE_FW_PSYS_LB_VMEM_MAX_SIZE 0x0400 /* LB VMEM words */ ++#define IPU6SE_FW_PSYS_DMEM0_MAX_SIZE 0x4000 ++#define IPU6SE_FW_PSYS_DMEM1_MAX_SIZE 0x1000 ++ ++#define IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT0_MAX_SIZE 22 ++#define IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT1_READ_MAX_SIZE 22 ++#define IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_MAX_SIZE 22 ++#define IPU6SE_FW_PSYS_DEV_CHN_DMA_IPFD_MAX_SIZE 0 ++#define IPU6SE_FW_PSYS_DEV_CHN_DMA_ISA_MAX_SIZE 2 ++ ++#define IPU6SE_FW_PSYS_DEV_DFM_ISL_FULL_PORT_ID_MAX_SIZE 32 ++#define IPU6SE_FW_PSYS_DEV_DFM_LB_FULL_PORT_ID_MAX_SIZE 32 ++#define IPU6SE_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID_MAX_SIZE 32 ++#define IPU6SE_FW_PSYS_DEV_DFM_LB_EMPTY_PORT_ID_MAX_SIZE 32 ++ ++#endif /* IPU6SE_PLATFORM_RESOURCES_H */ +-- +2.43.0 + diff --git a/patches/0070-media-intel-ipu6-support-IPU6-PSYS-FW-trace-dump-for.patch b/patches/0070-media-intel-ipu6-support-IPU6-PSYS-FW-trace-dump-for.patch new file mode 100644 index 0000000..7ecfc93 --- /dev/null +++ b/patches/0070-media-intel-ipu6-support-IPU6-PSYS-FW-trace-dump-for.patch @@ -0,0 +1,134 @@ +From f365735ea4b498870c456759fc756a1a332faaab Mon Sep 17 00:00:00 2001 +From: hepengpx +Date: Fri, 16 Jan 2026 02:01:37 -0700 +Subject: [PATCH 70/77] media: intel-ipu6: support IPU6 PSYS FW trace dump for + upstream driver + +[media][pci]Support IPU6 PSYS FW trace dump for upstream driver +Signed-off-by: hepengpx +--- + .../drivers/media/pci/intel/ipu6/ipu6-trace.c | 6 +++ + .../drivers/media/pci/intel/ipu6/ipu6-trace.h | 1 + + .../media/pci/intel/ipu6/psys/ipu-psys.c | 44 ++++++++++++++++++- + 3 files changed, 50 insertions(+), 1 deletion(-) + +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-trace.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-trace.c +index adcee20..077f140 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-trace.c ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-trace.c +@@ -891,6 +891,12 @@ int ipu_trace_buffer_dma_handle(struct device *dev, dma_addr_t *dma_handle) + } + EXPORT_SYMBOL_GPL(ipu_trace_buffer_dma_handle); + ++bool is_ipu_trace_enable(void) ++{ ++ return ipu_trace_enable; ++} ++EXPORT_SYMBOL_GPL(is_ipu_trace_enable); ++ + MODULE_AUTHOR("Samu Onkalo "); + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("Intel ipu trace support"); +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-trace.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-trace.h +index f66d889..fe89d1b 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-trace.h ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-trace.h +@@ -144,4 +144,5 @@ void ipu_trace_restore(struct device *dev); + void ipu_trace_uninit(struct device *dev); + void ipu_trace_stop(struct device *dev); + int ipu_trace_buffer_dma_handle(struct device *dev, dma_addr_t *dma_handle); ++bool is_ipu_trace_enable(void); + #endif +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.c b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.c +index dcd0dce..6faa582 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.c ++++ b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.c +@@ -31,6 +31,7 @@ + #include "ipu-psys.h" + #include "ipu6-platform-regs.h" + #include "ipu6-fw-com.h" ++#include "ipu6-trace.h" + + static bool async_fw_init; + module_param(async_fw_init, bool, 0664); +@@ -1201,6 +1202,8 @@ static int ipu_psys_sched_cmd(void *ptr) + return 0; + } + ++#include "../ipu6-trace.h" ++ + static void start_sp(struct ipu6_bus_device *adev) + { + struct ipu_psys *psys = ipu6_bus_get_drvdata(adev); +@@ -1211,7 +1214,7 @@ static void start_sp(struct ipu6_bus_device *adev) + val |= IPU6_PSYS_SPC_STATUS_START | + IPU6_PSYS_SPC_STATUS_RUN | + IPU6_PSYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE; +- val |= psys->icache_prefetch_sp ? ++ val |= (psys->icache_prefetch_sp || is_ipu_trace_enable()) ? + IPU6_PSYS_SPC_STATUS_ICACHE_PREFETCH : 0; + writel(val, spc_regs_base + IPU6_PSYS_REG_SPC_STATUS_CTRL); + } +@@ -1304,6 +1307,40 @@ static void run_fw_init_work(struct work_struct *work) + } + } + ++struct ipu_trace_block psys_trace_blocks[] = { ++ { ++ .offset = IPU_TRACE_REG_PS_TRACE_UNIT_BASE, ++ .type = IPU_TRACE_BLOCK_TUN, ++ }, ++ { ++ .offset = IPU_TRACE_REG_PS_SPC_EVQ_BASE, ++ .type = IPU_TRACE_BLOCK_TM, ++ }, ++ { ++ .offset = IPU_TRACE_REG_PS_SPP0_EVQ_BASE, ++ .type = IPU_TRACE_BLOCK_TM, ++ }, ++ { ++ .offset = IPU_TRACE_REG_PS_SPC_GPC_BASE, ++ .type = IPU_TRACE_BLOCK_GPC, ++ }, ++ { ++ .offset = IPU_TRACE_REG_PS_SPP0_GPC_BASE, ++ .type = IPU_TRACE_BLOCK_GPC, ++ }, ++ { ++ .offset = IPU_TRACE_REG_PS_MMU_GPC_BASE, ++ .type = IPU_TRACE_BLOCK_GPC, ++ }, ++ { ++ .offset = IPU_TRACE_REG_PS_GPREG_TRACE_TIMER_RST_N, ++ .type = IPU_TRACE_TIMER_RST, ++ }, ++ { ++ .type = IPU_TRACE_BLOCK_END, ++ } ++}; ++ + static int ipu6_psys_probe(struct auxiliary_device *auxdev, + const struct auxiliary_device_id *auxdev_id) + { +@@ -1442,6 +1479,9 @@ static int ipu6_psys_probe(struct auxiliary_device *auxdev, + strscpy(psys->caps.dev_model, IPU6_MEDIA_DEV_MODEL_NAME, + sizeof(psys->caps.dev_model)); + ++ ipu_trace_init(adev->isp, psys->pdata->base, &auxdev->dev, ++ psys_trace_blocks); ++ + mutex_unlock(&ipu_psys_mutex); + + dev_info(dev, "psys probe minor: %d\n", minor); +@@ -1503,6 +1543,8 @@ static void ipu6_psys_remove(struct auxiliary_device *auxdev) + + clear_bit(MINOR(psys->cdev.dev), ipu_psys_devices); + ++ ipu_trace_uninit(&auxdev->dev); ++ + mutex_unlock(&ipu_psys_mutex); + + mutex_destroy(&psys->mutex); +-- +2.43.0 + diff --git a/patches/0071-Revert-media-intel-ipu6-Constify-ipu6_buttress_ctrl-.patch b/patches/0071-Revert-media-intel-ipu6-Constify-ipu6_buttress_ctrl-.patch new file mode 100644 index 0000000..0e273f9 --- /dev/null +++ b/patches/0071-Revert-media-intel-ipu6-Constify-ipu6_buttress_ctrl-.patch @@ -0,0 +1,84 @@ +From 73f893117a273db351a6a2cf50b817b6ea3f1735 Mon Sep 17 00:00:00 2001 +From: Florent Pirou +Date: Fri, 16 Jan 2026 08:41:24 -0700 +Subject: [PATCH 71/77] Revert "media: intel/ipu6: Constify ipu6_buttress_ctrl + structure" + +This reverts commit 6ad57f8f86de024d10c764f829a50530e7121958. + +Signed-off-by: Florent Pirou +--- + 6.18.0/drivers/media/pci/intel/ipu6/ipu6-bus.c | 2 +- + 6.18.0/drivers/media/pci/intel/ipu6/ipu6-bus.h | 4 ++-- + 6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c | 4 ++-- + 6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h | 4 ++-- + 4 files changed, 7 insertions(+), 7 deletions(-) + +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-bus.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-bus.c +index 5cee274..37d88dd 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-bus.c ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-bus.c +@@ -82,7 +82,7 @@ static void ipu6_bus_release(struct device *dev) + + struct ipu6_bus_device * + ipu6_bus_initialize_device(struct pci_dev *pdev, struct device *parent, +- void *pdata, const struct ipu6_buttress_ctrl *ctrl, ++ void *pdata, struct ipu6_buttress_ctrl *ctrl, + char *name) + { + struct auxiliary_device *auxdev; +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-bus.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-bus.h +index 16be932..73dcf73 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-bus.h ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-bus.h +@@ -27,7 +27,7 @@ struct ipu6_bus_device { + struct ipu6_mmu *mmu; + struct ipu6_device *isp; + struct ipu_subsystem_trace_config *trace_cfg; +- const struct ipu6_buttress_ctrl *ctrl; ++ struct ipu6_buttress_ctrl *ctrl; + const struct firmware *fw; + struct sg_table fw_sgt; + u64 *pkg_dir; +@@ -49,7 +49,7 @@ struct ipu6_auxdrv_data { + + struct ipu6_bus_device * + ipu6_bus_initialize_device(struct pci_dev *pdev, struct device *parent, +- void *pdata, const struct ipu6_buttress_ctrl *ctrl, ++ void *pdata, struct ipu6_buttress_ctrl *ctrl, + char *name); + int ipu6_bus_add_device(struct ipu6_bus_device *adev); + void ipu6_bus_del_devices(struct pci_dev *pdev); +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c +index c243142..0a7b019 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c +@@ -445,8 +445,8 @@ irqreturn_t ipu6_buttress_isr_threaded(int irq, void *isp_ptr) + return ret; + } + +-int ipu6_buttress_power(struct device *dev, +- const struct ipu6_buttress_ctrl *ctrl, bool on) ++int ipu6_buttress_power(struct device *dev, struct ipu6_buttress_ctrl *ctrl, ++ bool on) + { + struct ipu6_device *isp = to_ipu6_bus_device(dev)->isp; + u32 pwr_sts, val; +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h +index 8bc5eeb..3b22dfb 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h +@@ -65,8 +65,8 @@ int ipu6_buttress_map_fw_image(struct ipu6_bus_device *sys, + struct sg_table *sgt); + void ipu6_buttress_unmap_fw_image(struct ipu6_bus_device *sys, + struct sg_table *sgt); +-int ipu6_buttress_power(struct device *dev, +- const struct ipu6_buttress_ctrl *ctrl, bool on); ++int ipu6_buttress_power(struct device *dev, struct ipu6_buttress_ctrl *ctrl, ++ bool on); + bool ipu6_buttress_get_secure_mode(struct ipu6_device *isp); + int ipu6_buttress_authenticate(struct ipu6_device *isp); + int ipu6_buttress_reset_authentication(struct ipu6_device *isp); +-- +2.43.0 + diff --git a/patches/0072-ipu7-dkms-v4l2-core-makefile-adaptation-6.17.patch b/patches/0072-ipu7-dkms-v4l2-core-makefile-adaptation-6.17.patch new file mode 100644 index 0000000..d7d2e93 --- /dev/null +++ b/patches/0072-ipu7-dkms-v4l2-core-makefile-adaptation-6.17.patch @@ -0,0 +1,37 @@ +From ab4266d7926dd398e0ff9ffad8888b20c68fa663 Mon Sep 17 00:00:00 2001 +From: florent pirou +Date: Mon, 17 Nov 2025 07:44:33 -0700 +Subject: [PATCH 72/77] ipu7 dkms : v4l2-core makefile adaptation 6.17 + +Signed-off-by: florent pirou +--- + 6.17.0/drivers/media/v4l2-core/Makefile | 3 +++ + 6.17.0/drivers/media/v4l2-core/v4l2-dev.c | 1 + + 2 files changed, 4 insertions(+) + +diff --git a/6.17.0/drivers/media/v4l2-core/Makefile b/6.17.0/drivers/media/v4l2-core/Makefile +index 2177b9d..82c235f 100644 +--- a/6.17.0/drivers/media/v4l2-core/Makefile ++++ b/6.17.0/drivers/media/v4l2-core/Makefile +@@ -2,6 +2,9 @@ + # + # Makefile for the V4L2 core + # ++# ipu7-dkms appendix ++CC := ${CC} -I ${M}/../../../include-overrides -I ${M}/../tuners -I ${M}/../dvb-core -I ${M}/../dvb-frontends ++ccflags-y := -DDRIVER_VERSION_SUFFIX=\"${DRIVER_VERSION_SUFFIX}\" + + ccflags-y += -I$(srctree)/drivers/media/dvb-frontends + ccflags-y += -I$(srctree)/drivers/media/tuners +diff --git a/6.17.0/drivers/media/v4l2-core/v4l2-dev.c b/6.17.0/drivers/media/v4l2-core/v4l2-dev.c +index c369235..1997370 100644 +--- a/6.17.0/drivers/media/v4l2-core/v4l2-dev.c ++++ b/6.17.0/drivers/media/v4l2-core/v4l2-dev.c +@@ -1245,3 +1245,4 @@ MODULE_AUTHOR("Alan Cox, Mauro Carvalho Chehab , Bill Dirks, + MODULE_DESCRIPTION("Video4Linux2 core driver"); + MODULE_LICENSE("GPL"); + MODULE_ALIAS_CHARDEV_MAJOR(VIDEO_MAJOR); ++MODULE_VERSION(DRIVER_VERSION_SUFFIX); +-- +2.43.0 + diff --git a/patches/0073-drivers-media-set-v4l2_subdev_enable_streams_api-tru.patch b/patches/0073-drivers-media-set-v4l2_subdev_enable_streams_api-tru.patch new file mode 100644 index 0000000..b694a18 --- /dev/null +++ b/patches/0073-drivers-media-set-v4l2_subdev_enable_streams_api-tru.patch @@ -0,0 +1,28 @@ +From 92714a8bb27332674cebe3a5adc46ff8ba99fe12 Mon Sep 17 00:00:00 2001 +From: florent pirou +Date: Mon, 17 Nov 2025 07:45:23 -0700 +Subject: [PATCH 73/77] drivers: media: set v4l2_subdev_enable_streams_api=true + for WA 6.17 + +Signed-off-by: hepengpx +Signed-off-by: florent pirou +--- + 6.17.0/drivers/media/v4l2-core/v4l2-subdev.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/6.17.0/drivers/media/v4l2-core/v4l2-subdev.c b/6.17.0/drivers/media/v4l2-core/v4l2-subdev.c +index 4fd25fe..60a0556 100644 +--- a/6.17.0/drivers/media/v4l2-core/v4l2-subdev.c ++++ b/6.17.0/drivers/media/v4l2-core/v4l2-subdev.c +@@ -32,7 +32,7 @@ + * 'v4l2_subdev_enable_streams_api' to 1 below. + */ + +-static bool v4l2_subdev_enable_streams_api; ++static bool v4l2_subdev_enable_streams_api = true; + #endif + + /* +-- +2.43.0 + diff --git a/patches/0074-ipu7-dkms-v4l2-core-makefile-adaptation-6.18.patch b/patches/0074-ipu7-dkms-v4l2-core-makefile-adaptation-6.18.patch new file mode 100644 index 0000000..196e208 --- /dev/null +++ b/patches/0074-ipu7-dkms-v4l2-core-makefile-adaptation-6.18.patch @@ -0,0 +1,27 @@ +From 52fec029544562294137641ca6cb48df710536f8 Mon Sep 17 00:00:00 2001 +From: Florent Pirou +Date: Fri, 16 Jan 2026 03:20:05 -0700 +Subject: [PATCH 74/78] ipu7 dkms : v4l2-core makefile adaptation 6.18 + +Signed-off-by: Florent Pirou +--- + 6.18.0/drivers/media/v4l2-core/Makefile | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/6.18.0/drivers/media/v4l2-core/Makefile b/6.18.0/drivers/media/v4l2-core/Makefile +index 2177b9d..82c235f 100644 +--- a/6.18.0/drivers/media/v4l2-core/Makefile ++++ b/6.18.0/drivers/media/v4l2-core/Makefile +@@ -2,6 +2,9 @@ + # + # Makefile for the V4L2 core + # ++# ipu7-dkms appendix ++CC := ${CC} -I ${M}/../../../include-overrides -I ${M}/../tuners -I ${M}/../dvb-core -I ${M}/../dvb-frontends ++ccflags-y := -DDRIVER_VERSION_SUFFIX=\"${DRIVER_VERSION_SUFFIX}\" + + ccflags-y += -I$(srctree)/drivers/media/dvb-frontends + ccflags-y += -I$(srctree)/drivers/media/tuners +-- +2.43.0 + diff --git a/patches/0075-drivers-media-set-v4l2_subdev_enable_streams_api-tru.patch b/patches/0075-drivers-media-set-v4l2_subdev_enable_streams_api-tru.patch new file mode 100644 index 0000000..512fab4 --- /dev/null +++ b/patches/0075-drivers-media-set-v4l2_subdev_enable_streams_api-tru.patch @@ -0,0 +1,27 @@ +From 6732cbe871d179380f48bbfae76abef6cd720e3a Mon Sep 17 00:00:00 2001 +From: Florent Pirou +Date: Fri, 16 Jan 2026 03:22:26 -0700 +Subject: [PATCH 75/78] drivers: media: set v4l2_subdev_enable_streams_api=true + for WA 6.18 + +Signed-off-by: Florent Pirou +--- + 6.18.0/drivers/media/v4l2-core/v4l2-subdev.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/6.18.0/drivers/media/v4l2-core/v4l2-subdev.c b/6.18.0/drivers/media/v4l2-core/v4l2-subdev.c +index 25e66bf..cf6ac8a 100644 +--- a/6.18.0/drivers/media/v4l2-core/v4l2-subdev.c ++++ b/6.18.0/drivers/media/v4l2-core/v4l2-subdev.c +@@ -56,7 +56,7 @@ struct v4l2_subdev_stream_config { + * 'v4l2_subdev_enable_streams_api' to 1 below. + */ + +-static bool v4l2_subdev_enable_streams_api; ++static bool v4l2_subdev_enable_streams_api = true; + #endif + + /* +-- +2.43.0 + diff --git a/patches/0076-Revert-media-v4l2-subdev-Make-struct-v4l2_subdev_str.patch b/patches/0076-Revert-media-v4l2-subdev-Make-struct-v4l2_subdev_str.patch new file mode 100644 index 0000000..1861e78 --- /dev/null +++ b/patches/0076-Revert-media-v4l2-subdev-Make-struct-v4l2_subdev_str.patch @@ -0,0 +1,95 @@ +From 560e0edd91435231530af29e668b682500a7d38e Mon Sep 17 00:00:00 2001 +From: Florent Pirou +Date: Fri, 16 Jan 2026 10:10:16 -0700 +Subject: [PATCH 76/78] Revert "media: v4l2-subdev: Make struct + v4l2_subdev_stream_config private" + +This reverts commit 5195b777552d2e2fa735c6cad75797efa132bd60. + +Signed-off-by: Florent Pirou +--- + 6.18.0/drivers/media/v4l2-core/v4l2-subdev.c | 24 ------------------- + 6.18.0/include-overrides/media/v4l2-subdev.h | 25 +++++++++++++++++++- + 2 files changed, 24 insertions(+), 25 deletions(-) + +diff --git a/6.18.0/drivers/media/v4l2-core/v4l2-subdev.c b/6.18.0/drivers/media/v4l2-core/v4l2-subdev.c +index cf6ac8a..b2b7125 100644 +--- a/6.18.0/drivers/media/v4l2-core/v4l2-subdev.c ++++ b/6.18.0/drivers/media/v4l2-core/v4l2-subdev.c +@@ -26,30 +26,6 @@ + #include + #include + +-/** +- * struct v4l2_subdev_stream_config - Used for storing stream configuration. +- * +- * @pad: pad number +- * @stream: stream number +- * @enabled: has the stream been enabled with v4l2_subdev_enable_streams() +- * @fmt: &struct v4l2_mbus_framefmt +- * @crop: &struct v4l2_rect to be used for crop +- * @compose: &struct v4l2_rect to be used for compose +- * @interval: frame interval +- * +- * This structure stores configuration for a stream. +- */ +-struct v4l2_subdev_stream_config { +- u32 pad; +- u32 stream; +- bool enabled; +- +- struct v4l2_mbus_framefmt fmt; +- struct v4l2_rect crop; +- struct v4l2_rect compose; +- struct v4l2_fract interval; +-}; +- + #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) + /* + * The Streams API is an experimental feature. To use the Streams API, set +diff --git a/6.18.0/include-overrides/media/v4l2-subdev.h b/6.18.0/include-overrides/media/v4l2-subdev.h +index e0bb58c..ad37aaf 100644 +--- a/6.18.0/include-overrides/media/v4l2-subdev.h ++++ b/6.18.0/include-overrides/media/v4l2-subdev.h +@@ -36,7 +36,6 @@ struct v4l2_event_subscription; + struct v4l2_fh; + struct v4l2_subdev; + struct v4l2_subdev_fh; +-struct v4l2_subdev_stream_config; + struct tuner_setup; + struct v4l2_mbus_frame_desc; + struct led_classdev; +@@ -684,6 +683,30 @@ struct v4l2_subdev_pad_config { + struct v4l2_fract interval; + }; + ++/** ++ * struct v4l2_subdev_stream_config - Used for storing stream configuration. ++ * ++ * @pad: pad number ++ * @stream: stream number ++ * @enabled: has the stream been enabled with v4l2_subdev_enable_streams() ++ * @fmt: &struct v4l2_mbus_framefmt ++ * @crop: &struct v4l2_rect to be used for crop ++ * @compose: &struct v4l2_rect to be used for compose ++ * @interval: frame interval ++ * ++ * This structure stores configuration for a stream. ++ */ ++struct v4l2_subdev_stream_config { ++ u32 pad; ++ u32 stream; ++ bool enabled; ++ ++ struct v4l2_mbus_framefmt fmt; ++ struct v4l2_rect crop; ++ struct v4l2_rect compose; ++ struct v4l2_fract interval; ++}; ++ + /** + * struct v4l2_subdev_stream_configs - A collection of stream configs. + * +-- +2.43.0 + diff --git a/patches/0077-ipu6-dkms-add-isys-makefile-adaptation-6.17.patch b/patches/0077-ipu6-dkms-add-isys-makefile-adaptation-6.17.patch new file mode 100644 index 0000000..b04042c --- /dev/null +++ b/patches/0077-ipu6-dkms-add-isys-makefile-adaptation-6.17.patch @@ -0,0 +1,48 @@ +From df2a50dc1637fc0a2db4093eefa9d8f22f4ff5df Mon Sep 17 00:00:00 2001 +From: florent pirou +Date: Thu, 4 Dec 2025 00:50:25 -0700 +Subject: [PATCH 77/78] ipu6-dkms: add isys makefile adaptation 6.17 + +Signed-off-by: florent pirou +--- + 6.18.0/drivers/media/pci/intel/ipu6/Makefile | 5 +++++ + 6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c | 1 + + 6.18.0/drivers/media/pci/intel/ipu6/ipu6.c | 1 + + 3 files changed, 7 insertions(+) + +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/Makefile b/6.18.0/drivers/media/pci/intel/ipu6/Makefile +index 67c13c9..3e2ce53 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/Makefile ++++ b/6.18.0/drivers/media/pci/intel/ipu6/Makefile +@@ -1,5 +1,10 @@ + # SPDX-License-Identifier: GPL-2.0-only + ++CC := ${CC} -I ${M}/../../../include-overrides ++ccflags-y := -DDRIVER_VERSION_SUFFIX=\"${DRIVER_VERSION_SUFFIX}\" ++ ++export CONFIG_VIDEO_INTEL_IPU6 = m ++ + intel-ipu6-y := ipu6.o \ + ipu6-bus.o \ + ipu6-dma.o \ +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c +index 7d10583..eaa7e68 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c +@@ -1757,3 +1757,4 @@ MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("Intel IPU6 input system driver"); + MODULE_IMPORT_NS("INTEL_IPU6"); + MODULE_IMPORT_NS("INTEL_IPU_BRIDGE"); ++MODULE_VERSION(DRIVER_VERSION_SUFFIX); +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c +index 291b745..56be152 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c +@@ -959,3 +959,4 @@ MODULE_AUTHOR("Yunliang Ding "); + MODULE_AUTHOR("Hongju Wang "); + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("Intel IPU6 PCI driver"); ++MODULE_VERSION(DRIVER_VERSION_SUFFIX); +-- +2.43.0 + diff --git a/patches/0078-ipu6-dkms-align-ipu7-acpi-pdata-device-parser-on-6.1.patch b/patches/0078-ipu6-dkms-align-ipu7-acpi-pdata-device-parser-on-6.1.patch new file mode 100644 index 0000000..ecc321e --- /dev/null +++ b/patches/0078-ipu6-dkms-align-ipu7-acpi-pdata-device-parser-on-6.1.patch @@ -0,0 +1,89 @@ +From 8bb193b2ad17562e020f2096b7e70c82e96a94aa Mon Sep 17 00:00:00 2001 +From: Florent Pirou +Date: Fri, 16 Jan 2026 03:34:49 -0700 +Subject: [PATCH 78/78] ipu6-dkms: align ipu7 acpi pdata device parser on 6.18 + +Signed-off-by: Florent Pirou +--- + 6.18.0/drivers/media/pci/intel/ipu6/Makefile | 9 ++++++++- + 6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h | 1 + + 6.18.0/drivers/media/pci/intel/ipu6/ipu6.c | 8 ++++---- + 3 files changed, 13 insertions(+), 5 deletions(-) + +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/Makefile b/6.18.0/drivers/media/pci/intel/ipu6/Makefile +index 3e2ce53..d33bfa0 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/Makefile ++++ b/6.18.0/drivers/media/pci/intel/ipu6/Makefile +@@ -1,8 +1,13 @@ + # SPDX-License-Identifier: GPL-2.0-only + +-CC := ${CC} -I ${M}/../../../include-overrides ++KERNEL_VERSION := $(shell echo $(KERNELRELEASE) | sed 's/[^0-9.]*\([0-9.]*\).*/\1/') ++ ++CC := ${CC} -I ${M}/../../../../../../include -I ${M}/../../../../../../$(KERNEL_VERSION)/include-overrides -DCONFIG_INTEL_IPU_ACPI -DCONFIG_INTEL_IPU6_ACPI -DCONFIG_VIDEO_INTEL_IPU6_ISYS_RESET + ccflags-y := -DDRIVER_VERSION_SUFFIX=\"${DRIVER_VERSION_SUFFIX}\" + ++# Path to ipu_acpi module build directory ++KBUILD_EXTRA_SYMBOLS += ${M}/../../../../../../Module.symvers ++ + export CONFIG_VIDEO_INTEL_IPU6 = m + + intel-ipu6-y := ipu6.o \ +@@ -27,4 +32,6 @@ intel-ipu6-isys-y := ipu6-isys.o \ + ipu6-isys-dwc-phy.o + + obj-$(CONFIG_VIDEO_INTEL_IPU6) += intel-ipu6-isys.o ++ ++subdir-ccflags-y += -DDRIVER_VERSION_SUFFIX=\"${DRIVER_VERSION_SUFFIX}\" + obj-$(CONFIG_VIDEO_INTEL_IPU6) += psys/ +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h +index 3768c48..d5435c8 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h +@@ -102,6 +102,7 @@ struct isys_iwake_watermark { + struct ipu6_isys_csi2_config { + u32 nlanes; + u32 port; ++ enum v4l2_mbus_type bus_type; + }; + + struct sensor_async_sd { +diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c +index 56be152..618bc49 100644 +--- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c ++++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c +@@ -35,12 +35,12 @@ + #include "ipu6-trace.h" + + #if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) +-#include ++#include + #endif + + #if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + static int isys_init_acpi_add_device(struct device *dev, void *priv, +- struct ipu6_isys_csi2_config *csi2, ++ void *csi2, + bool reprobe) + { + return 0; +@@ -438,14 +438,14 @@ ipu6_isys_init(struct pci_dev *pdev, struct device *parent, + #if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) + if (!spdata) { + dev_dbg(&pdev->dev, "No subdevice info provided"); +- ret = ipu_get_acpi_devices(isys_adev, &isys_adev->auxdev.dev, &acpi_pdata, NULL, ++ ret = ipu_get_acpi_devices(isys_adev, &isys_adev->auxdev.dev, (void **)&acpi_pdata, NULL, + isys_init_acpi_add_device); + if (acpi_pdata && (*acpi_pdata->subdevs)) { + pdata->spdata = acpi_pdata; + } + } else { + dev_dbg(&pdev->dev, "Subdevice info found"); +- ret = ipu_get_acpi_devices(isys_adev, &isys_adev->auxdev.dev, &acpi_pdata, &spdata, ++ ret = ipu_get_acpi_devices(isys_adev, &isys_adev->auxdev.dev, (void **)&acpi_pdata, (void **)&spdata, + isys_init_acpi_add_device); + } + if (ret) +-- +2.43.0 + From 01eef2111e162fb5623a22b88566492c4844fc0f Mon Sep 17 00:00:00 2001 From: Florent Pirou Date: Fri, 16 Jan 2026 21:34:08 -0700 Subject: [PATCH 07/14] ipu-dkms: initial ipu7 submodules PTL_Beta_IoT * ipu7-isys: add video VIDIOC_S_EXT_CTRLS callbacks by streamid * ipu7-isys : add d4xx pixel format support * ipu7-isys : allow virtual-channels on 16 csi2 input video capture * ipu-acpi: decouple the PLATFORM ACPI driver and IPU driver Signed-off-by: Florent Pirou --- .gitmodules | 3 + dkms.conf | 39 +- include/uapi/linux/ipu-isys.h | 20 + include/uapi/linux/ipu7-psys.h | 246 ++++++ ipu7-drivers | 1 + ...e-the-PLATFORM-ACPI-driver-and-IPU-d.patch | 649 ++++++++++++++ ...virtual-channels-on-16-csi2-input-vi.patch | 29 + ...7-isys-add-d4xx-pixel-format-support.patch | 124 +++ ...deo-VIDIOC_S_EXT_CTRLS-callbacks-by-.patch | 796 ++++++++++++++++++ ...odules-suffix-versioning-same-as-dkm.patch | 70 ++ 10 files changed, 1974 insertions(+), 3 deletions(-) create mode 100644 .gitmodules create mode 100644 include/uapi/linux/ipu-isys.h create mode 100644 include/uapi/linux/ipu7-psys.h create mode 160000 ipu7-drivers create mode 100644 patches/0100-ipu-acpi-decouple-the-PLATFORM-ACPI-driver-and-IPU-d.patch create mode 100644 patches/0101-ipu7-isys-allow-virtual-channels-on-16-csi2-input-vi.patch create mode 100644 patches/0102-ipu7-isys-add-d4xx-pixel-format-support.patch create mode 100644 patches/0103-ipu7-isys-add-video-VIDIOC_S_EXT_CTRLS-callbacks-by-.patch create mode 100644 patches/0104-ipu7-isys-make-modules-suffix-versioning-same-as-dkm.patch diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..0092983 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "ipu7-drivers"] + path = ipu7-drivers + url = https://github.com/intel/ipu7-drivers diff --git a/dkms.conf b/dkms.conf index c203227..31a1864 100644 --- a/dkms.conf +++ b/dkms.conf @@ -1,5 +1,6 @@ PACKAGE_NAME="intel-mipi-gmsl-dkms" -PACKAGE_VERSION="#MODULE_VERSION#" +PACKAGE_VERSION="20260112-0eci1" +#PACKAGE_VERSION="#MODULE_VERSION#" BUILD_EXCLUSIVE_KERNEL="^(6\.(1[278])\.)" VERSION_SUFFIX="${PACKAGE_VERSION}" @@ -22,12 +23,16 @@ MAKE="make V=1 KERNELRELEASE=$kernelver DRIVER_VERSION_SUFFIX='${VERSION_SUFFIX} mkdir -p ${KBASE}/drivers/media/v4l2-core/ && cp ${KBASE}/${SELECTED_KERNEL}/drivers/media/v4l2-core/videodev.ko ${KBASE}/drivers/media/v4l2-core/ && \ if [ -d ${KBASE}/drivers/media/pci/intel/ipu6 ]; then rmdir ${KBASE}/drivers/media/pci/intel/ipu6 ; fi &&\ make V=1 -C ${kernel_source_dir} M=${KBASE}/${SELECTED_KERNEL}/drivers/media/pci/intel/ipu6 DRIVER_VERSION_SUFFIX='${VERSION_SUFFIX}' && \ - mkdir -p ${KBASE}/drivers/media/pci/intel/ipu6/ && cp ${KBASE}/${SELECTED_KERNEL}/drivers/media/pci/intel/ipu6/intel-ipu6.ko ${KBASE}/${SELECTED_KERNEL}/drivers/media/pci/intel/ipu6/intel-ipu6-isys.ko ${KBASE}/${SELECTED_KERNEL}/drivers/media/pci/intel/ipu6/psys/intel-ipu6-psys.ko ${KBASE}/drivers/media/pci/intel/ipu6/" + mkdir -p ${KBASE}/drivers/media/pci/intel/ipu6/ && cp ${KBASE}/${SELECTED_KERNEL}/drivers/media/pci/intel/ipu6/intel-ipu6.ko ${KBASE}/${SELECTED_KERNEL}/drivers/media/pci/intel/ipu6/intel-ipu6-isys.ko ${KBASE}/${SELECTED_KERNEL}/drivers/media/pci/intel/ipu6/psys/intel-ipu6-psys.ko ${KBASE}/drivers/media/pci/intel/ipu6/ && \ + if [ -d ${KBASE}/drivers/media/pci/intel/ipu7 ]; then rmdir ${KBASE}/drivers/media/pci/intel/ipu7 ; fi &&\ + make V=1 -C ${kernel_source_dir} M=${KBASE}/ipu7-drivers/drivers/media/pci/intel/ipu7 DRIVER_VERSION_SUFFIX='${VERSION_SUFFIX}' && \ + mkdir -p ${KBASE}/drivers/media/pci/intel/ipu7/ && cp ${KBASE}/ipu7-drivers/drivers/media/pci/intel/ipu7/intel-ipu7.ko ${KBASE}/ipu7-drivers/drivers/media/pci/intel/ipu7/intel-ipu7-isys.ko ${KBASE}/ipu7-drivers/drivers/media/pci/intel/ipu7/psys/intel-ipu7-psys.ko ${KBASE}/drivers/media/pci/intel/ipu7/" -MAKE_MATCH[1]="^(6.1[27])" +MAKE_MATCH[1]="^(6.1[278])" CLEAN="make V=1 KERNELRELEASE=$kernelver KERNEL_SRC=$kernel_source_dir clean && \ make -C ${kernel_source_dir} M=${KBASE}/${SELECTED_KERNEL}/drivers/media/v4l2-core clean clean && \ + make -C ${kernel_source_dir} M=${KBASE}/ipu7-drivers/drivers/media/pci/intel/ipu7 clean && \ make -C ${kernel_source_dir} M=${KBASE}/${SELECTED_KERNEL}/drivers/media/pci/intel/ipu6 clean" AUTOINSTALL="yes" @@ -154,6 +159,19 @@ PATCH_MATCH[$j]="^(6.1[78])" PATCH[$((++j))]="0078-ipu6-dkms-align-ipu7-acpi-pdata-device-parser-on-6.1.patch" PATCH_MATCH[$j]="^(6.1[78])" + +# Make IPU7 ISYS+PSYS out-of-tree patch from PTL release for iot on 2025-09-23 +PATCH[$((++j))]="0100-ipu-acpi-decouple-the-PLATFORM-ACPI-driver-and-IPU-d.patch" +PATCH_MATCH[$j]="^(6.1[278])" +PATCH[$((++j))]="0101-ipu7-isys-allow-virtual-channels-on-16-csi2-input-vi.patch" +PATCH_MATCH[$j]="^(6.1[278])" +PATCH[$((++j))]="0102-ipu7-isys-add-d4xx-pixel-format-support.patch" +PATCH_MATCH[$j]="^(6.1[278])" +PATCH[$((++j))]="0103-ipu7-isys-add-video-VIDIOC_S_EXT_CTRLS-callbacks-by-.patch" +PATCH_MATCH[$j]="^(6.1[278])" +PATCH[$((++j))]="0104-ipu7-isys-make-modules-suffix-versioning-same-as-dkm.patch" +PATCH_MATCH[$j]="^(6.1[278])" + i=0 BUILT_MODULE_NAME[$i]="videodev" @@ -177,6 +195,21 @@ BUILT_MODULE_LOCATION[$i]="drivers/media/pci/intel/ipu6" DEST_MODULE_LOCATION[$i]="/updates" STRIP[$i]=no +BUILT_MODULE_NAME[$((++i))]="intel-ipu7" +BUILT_MODULE_LOCATION[$i]="drivers/media/pci/intel/ipu7" +DEST_MODULE_LOCATION[$i]="/updates" +STRIP[$i]=no + +BUILT_MODULE_NAME[$((++i))]="intel-ipu7-isys" +BUILT_MODULE_LOCATION[$i]="drivers/media/pci/intel/ipu7" +DEST_MODULE_LOCATION[$i]="/updates" +STRIP[$i]=no + +BUILT_MODULE_NAME[$((++i))]="intel-ipu7-psys" +BUILT_MODULE_LOCATION[$i]="drivers/media/pci/intel/ipu7" +DEST_MODULE_LOCATION[$i]="/updates" +STRIP[$i]=no + BUILT_MODULE_NAME[$((++i))]="ipu-acpi" BUILT_MODULE_LOCATION[$i]="drivers/media/platform/intel" DEST_MODULE_LOCATION[$i]="/updates" diff --git a/include/uapi/linux/ipu-isys.h b/include/uapi/linux/ipu-isys.h new file mode 100644 index 0000000..94bd27d --- /dev/null +++ b/include/uapi/linux/ipu-isys.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* Copyright (C) 2016 - 2020 Intel Corporation */ + +#ifndef UAPI_LINUX_IPU_ISYS_H +#define UAPI_LINUX_IPU_ISYS_H + +#define V4L2_CID_IPU_BASE (V4L2_CID_USER_BASE + 0x1080) + +#define V4L2_CID_IPU_STORE_CSI2_HEADER (V4L2_CID_IPU_BASE + 2) +#define V4L2_CID_IPU_ISYS_COMPRESSION (V4L2_CID_IPU_BASE + 3) + +#define V4L2_CID_IPU_QUERY_SUB_STREAM (V4L2_CID_IPU_BASE + 4) +#define V4L2_CID_IPU_SET_SUB_STREAM (V4L2_CID_IPU_BASE + 5) + +#define V4L2_CID_IPU_ENUMERATE_LINK (V4L2_CID_IPU_BASE + 6) + +#define VIDIOC_IPU_GET_DRIVER_VERSION \ + _IOWR('v', BASE_VIDIOC_PRIVATE + 3, uint32_t) + +#endif /* UAPI_LINUX_IPU_ISYS_H */ diff --git a/include/uapi/linux/ipu7-psys.h b/include/uapi/linux/ipu7-psys.h new file mode 100644 index 0000000..f7b841f --- /dev/null +++ b/include/uapi/linux/ipu7-psys.h @@ -0,0 +1,246 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* Copyright (C) 2013 - 2023 Intel Corporation */ + +#ifndef _UAPI_IPU_PSYS_H +#define _UAPI_IPU_PSYS_H + +#ifdef __KERNEL__ +#include +#else +#include +#endif + +struct ipu_psys_capability { + uint32_t version; + uint8_t driver[20]; + uint8_t dev_model[32]; + uint32_t reserved[17]; +} __attribute__ ((packed)); + +/** + * PSYS event error to user + */ +enum ipu_psys_event_error { + IPU_PSYS_EVT_ERROR_NONE = 0U, + IPU_PSYS_EVT_ERROR_INTERNAL = 1U, + IPU_PSYS_EVT_ERROR_FRAME = 2U, + IPU_PSYS_EVT_ERROR_FORCE_CLOSED = 3U, + IPU_PSYS_EVT_ERROR_MAX +} __attribute__ ((packed)); + +/** + * struct ipu_psys_event - event back from driver to user for requested tasks + * @graph_id: unique id per graph + * @node_ctx_id: unique logical context id per cb + * @frame_id: unique id per frame, originally assigned by user + * @error: error code of ipu_psys_event_error type + */ +struct ipu_psys_event { + uint8_t graph_id; + uint8_t node_ctx_id; + uint8_t frame_id; + uint32_t error; + int32_t reserved[2]; +} __attribute__ ((packed)); + +/** + * struct ipu_psys_buffer - for input/output terminals + * @len: total allocated size @ base address + * @userptr: user pointer + * @fd: DMA-BUF handle + * @data_offset:offset to valid data + * @bytes_used: amount of valid data including offset + * @flags: flags + */ +struct ipu_psys_buffer { + uint64_t len; + union { + int fd; + void __user *userptr; + uint64_t reserved; + } base; + uint32_t data_offset; + uint32_t bytes_used; + uint32_t flags; + uint32_t reserved[2]; +} __attribute__ ((packed)); + +/**< Max number of logical node */ +#define MAX_GRAPH_NODES 5U +/**< Max number of profile */ +#define MAX_GRAPH_NODE_PROFILES 1U +#define MAX_GRAPH_LINKS 10U +#define MAX_GRAPH_TERMINALS 32U + +/** + * Settings per node on the bitmap + * @teb: Terminal Enable bitmap + * @deb: Device Enable bitmap + * @rbm: Routing bitmap + * @reb: Routing Enable bitmap + */ +struct node_profile { + uint32_t teb[2]; + uint32_t deb[4]; + uint32_t rbm[4]; + uint32_t reb[4]; +} __attribute__ ((packed)); + +/** + * struct node_ternimal - terminal description on the node + * + * Terminal is the logical connection entity that is in the node, + * it can be different types, one node could have multiple terminal. + * + * @term_id: id of the terminal + * @buf_size: payload(PAC or SDI) size of the certain terminal + */ +struct node_ternimal { + uint8_t term_id; + uint32_t buf_size; +} __attribute__ ((packed)); + +/** + * struct graph_node - Description of graph that will be used for device + * and graph open purpose + * + * Node is the logical entity of a graph, one graph could have multiple + * nodes and it could have connection between each node with terminal. + * + * @node_rsrc_id: Physical node id + * @node_ctx_id: Logical node id, unique per graph + * @num_terms: Number of enabled terms in the node + * @profiles: bitmap settings on the node + * @terminals: terminal info on the node + * @num_frags: Number of fragments + */ +struct graph_node { + uint8_t node_rsrc_id; + uint8_t node_ctx_id; + uint8_t num_terms; + struct node_profile profiles[MAX_GRAPH_NODE_PROFILES]; + struct node_ternimal terminals[MAX_GRAPH_TERMINALS]; +} __attribute__ ((packed)); + +/** + * struct graph_link_ep - link endpoint description + * + * Link endpoint is used to describe the connection between different nodes. + * + * @node_ctx_id: Node ID as described in the list of nodes in the fgraph + * @term_id: Term ID as described in the list of terms in the fgraph + */ +struct graph_link_ep { + uint8_t node_ctx_id; + uint8_t term_id; +} __attribute__ ((packed)); + +/** + * All local links (links between nodes within a subsystem) require this + * value to be set. + */ +#define IPU_PSYS_FOREIGN_KEY_NONE UINT16_MAX +/** None value of TOP pbk id if not used */ +#define IPU_PSYS_LINK_PBK_ID_NONE UINT8_MAX +/** None value of TOP pbk slot id if not used */ +#define IPU_PSYS_LINK_PBK_SLOT_ID_NONE UINT8_MAX +/** Static Offline */ +#define IPU_PSYS_LINK_STREAMING_MODE_SOFF 0U + +/** + * struct graph_link - graph link to connect between cbs + * + * The sink and source links are defined with terminal information. + * + * @ep_src: Source side of the link + * @ep_dst: Destination side of the link + * @foreign_key: MUST set to IPU_PSYS_FOREIGN_KEY_NONE + * @streaming_mode: Value should be set from IPU_PSYS_LINK_STREAMING_MODE_X + * @pbk_id: TOP PBK id that used to connected to external IP + * @pbk_slot_id: TOP PBK slot id that used to connected to external IP + * @delayed_link: A delay link between producer N and consumer N+1 frame + */ +struct graph_link { + struct graph_link_ep ep_src; + struct graph_link_ep ep_dst; + uint16_t foreign_key; + uint8_t streaming_mode; + uint8_t pbk_id; + uint8_t pbk_slot_id; + uint8_t delayed_link; +} __attribute__ ((packed)); + +/** + * struct ipu_psys_graph_info + * + * Topology that describes an IPU internal connection includes CB and terminal + * information. + * + * @graph_id: id of graph, set initial to 0xFF by user, returned by driver + * @num_nodes: number of nodes in graph + * @nodes: node entity + * @links: link entity + */ +struct ipu_psys_graph_info { + uint8_t graph_id; + uint8_t num_nodes; + struct graph_node __user *nodes; + struct graph_link links[MAX_GRAPH_LINKS]; +} __attribute__ ((packed)); + +/** + * struct ipu_psys_term_buffers + * + * Descprion of each terminal payload buffer + * + * @term_id: terminal id + * @term_buf: terminal buffer + */ +struct ipu_psys_term_buffers { + uint8_t term_id; + struct ipu_psys_buffer term_buf; +} __attribute__ ((packed)); + +/** + * struct ipu_psys_task_request + * + * Task request is for user to send a request associated with terminal + * payload and expect IPU to process, each task request would expect + * an event, @see ipu_psys_event + * + * @graph_id: graph id returned from graph open + * @node_ctx_id: unique logical context id per cb + * @frame_id: frame id + * @payload_reuse_bm: Any terminal marked here must be enabled + * @term_buf_count: the number of terminal buffers + * @task_buffers: terminal buffers on the task request + * @num_frags: the number of fragments + * @frag_buffers: the buffer information of fragments + */ +struct ipu_psys_task_request { + uint8_t graph_id; + uint8_t node_ctx_id; + uint8_t frame_id; + uint32_t payload_reuse_bm[2]; + uint8_t term_buf_count; + struct ipu_psys_term_buffers __user *task_buffers; +} __attribute__ ((packed)); + +#define IPU_BUFFER_FLAG_INPUT (1 << 0) +#define IPU_BUFFER_FLAG_OUTPUT (1 << 1) +#define IPU_BUFFER_FLAG_MAPPED (1 << 2) +#define IPU_BUFFER_FLAG_NO_FLUSH (1 << 3) +#define IPU_BUFFER_FLAG_DMA_HANDLE (1 << 4) +#define IPU_BUFFER_FLAG_USERPTR (1 << 5) + +#define IPU_IOC_QUERYCAP _IOR('A', 1, struct ipu_psys_capability) +#define IPU_IOC_MAPBUF _IOWR('A', 2, int) +#define IPU_IOC_UNMAPBUF _IOWR('A', 3, int) +#define IPU_IOC_GETBUF _IOWR('A', 4, struct ipu_psys_buffer) +#define IPU_IOC_PUTBUF _IOWR('A', 5, struct ipu_psys_buffer) +#define IPU_IOC_DQEVENT _IOWR('A', 6, struct ipu_psys_event) +#define IPU_IOC_GRAPH_OPEN _IOWR('A', 7, struct ipu_psys_graph_info) +#define IPU_IOC_TASK_REQUEST _IOWR('A', 8, struct ipu_psys_task_request) +#define IPU_IOC_GRAPH_CLOSE _IOWR('A', 9, int) + +#endif /* _UAPI_IPU_PSYS_H */ diff --git a/ipu7-drivers b/ipu7-drivers new file mode 160000 index 0000000..62a3704 --- /dev/null +++ b/ipu7-drivers @@ -0,0 +1 @@ +Subproject commit 62a3704433c35d3bdfa679fc4dee74e133ce815c diff --git a/patches/0100-ipu-acpi-decouple-the-PLATFORM-ACPI-driver-and-IPU-d.patch b/patches/0100-ipu-acpi-decouple-the-PLATFORM-ACPI-driver-and-IPU-d.patch new file mode 100644 index 0000000..52683d1 --- /dev/null +++ b/patches/0100-ipu-acpi-decouple-the-PLATFORM-ACPI-driver-and-IPU-d.patch @@ -0,0 +1,649 @@ +From 3fa91bddbefdfabb8c1510df9c3eb85f9688467f Mon Sep 17 00:00:00 2001 +From: Florent Pirou +Date: Sat, 17 Jan 2026 01:54:42 -0700 +Subject: [PATCH 100/104] ipu-acpi: decouple the PLATFORM ACPI driver and IPU + driver + +Signed-off-by: Florent Pirou +--- + ipu7-drivers/Makefile | 6 +- + .../drivers/media/pci/intel/ipu7/ipu7-isys.c | 8 +-- + .../drivers/media/pci/intel/ipu7/ipu7.c | 8 +-- + .../drivers/media/platform/intel/Kconfig | 2 +- + .../drivers/media/platform/intel/Makefile | 6 +- + .../media/platform/intel/ipu-acpi-common.c | 9 ++- + .../media/platform/intel/ipu-acpi-pdata.c | 57 ++++++++++----- + .../drivers/media/platform/intel/ipu-acpi.c | 61 +++++++++++++--- + ipu7-drivers/include/media/ipu-acpi-pdata.h | 7 +- + ipu7-drivers/include/media/ipu-acpi.h | 70 ++++++++++++++++--- + 10 files changed, 177 insertions(+), 57 deletions(-) + +diff --git a/ipu7-drivers/Makefile b/ipu7-drivers/Makefile +index deb1761..ee53758 100644 +--- a/ipu7-drivers/Makefile ++++ b/ipu7-drivers/Makefile +@@ -8,7 +8,7 @@ MODSRC := $(shell pwd) + export EXTERNAL_BUILD = 1 + export CONFIG_VIDEO_INTEL_IPU7 = m + export CONFIG_IPU_BRIDGE = y +-export CONFIG_INTEL_IPU7_ACPI = m ++export CONFIG_INTEL_IPU_ACPI = m + + obj-y += drivers/media/pci/intel/ipu7/ + obj-y += drivers/media/platform/intel/ +@@ -16,8 +16,8 @@ subdir-ccflags-y += -I$(src)/include + + subdir-ccflags-$(CONFIG_IPU_BRIDGE) += \ + -DCONFIG_IPU_BRIDGE +-subdir-ccflags-$(CONFIG_INTEL_IPU7_ACPI) += \ +- -DCONFIG_INTEL_IPU7_ACPI ++subdir-ccflags-$(CONFIG_INTEL_IPU_ACPI) += \ ++ -DCONFIG_INTEL_IPU_ACPI + subdir-ccflags-y += $(subdir-ccflags-m) + + all: +diff --git a/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys.c b/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys.c +index ed3e37b..767104d 100644 +--- a/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys.c ++++ b/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys.c +@@ -178,7 +178,7 @@ static int isys_register_ext_subdev(struct ipu7_isys *isys, + client = isys_find_i2c_subdev(adapter, sd_info); + if (client) { + dev_warn(dev, "Device exists\n"); +-#if IS_ENABLED(CONFIG_INTEL_IPU7_ACPI) ++#if IS_ENABLED(CONFIG_INTEL_IPU_ACPI) + /* TODO: remove i2c_unregister_device() */ + i2c_unregister_device(client); + #else +@@ -266,7 +266,7 @@ static int isys_fw_log_init(struct ipu7_isys *isys) + return 0; + } + +-#if IS_ENABLED(CONFIG_INTEL_IPU7_ACPI) ++#if IS_ENABLED(CONFIG_INTEL_IPU_ACPI) + /* The .bound() notifier callback when a match is found */ + static int isys_notifier_bound(struct v4l2_async_notifier *notifier, + struct v4l2_subdev *sd, +@@ -580,7 +580,7 @@ static int isys_tpg_create_media_links(struct ipu7_isys *isys) + + #endif + +-#if IS_ENABLED(CONFIG_INTEL_IPU7_ACPI) ++#if IS_ENABLED(CONFIG_INTEL_IPU_ACPI) + static int isys_register_devices(struct ipu7_isys *isys) + { + struct device *dev = &isys->adev->auxdev.dev; +@@ -875,7 +875,7 @@ static const struct dev_pm_ops isys_pm_ops = { + .resume = isys_resume, + }; + +-#if IS_ENABLED(CONFIG_INTEL_IPU7_ACPI) ++#if IS_ENABLED(CONFIG_INTEL_IPU_ACPI) + static void isys_remove(struct auxiliary_device *auxdev) + { + struct ipu7_isys *isys = dev_get_drvdata(&auxdev->dev); +diff --git a/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7.c b/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7.c +index 20c88d5..96c3d2c 100644 +--- a/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7.c ++++ b/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7.c +@@ -40,8 +40,8 @@ + #include "ipu7-mmu.h" + #include "ipu7-platform-regs.h" + +-#if IS_ENABLED(CONFIG_INTEL_IPU7_ACPI) +-#include ++#if IS_ENABLED(CONFIG_INTEL_IPU_ACPI) ++#include + + #endif + #define IPU_PCI_BAR 0 +@@ -2626,8 +2626,8 @@ static int ipu7_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) + goto out_ipu_bus_del_devices; + } + +-#if IS_ENABLED(CONFIG_INTEL_IPU7_ACPI) +- ipu_get_acpi_devices(&dev->platform_data); ++#if IS_ENABLED(CONFIG_INTEL_IPU_ACPI) ++ ipu_get_acpi_devices_new(&dev->platform_data); + #endif + isp->isys = ipu7_isys_init(pdev, dev, isys_ctrl, isys_base, + dev->platform_data, +diff --git a/ipu7-drivers/drivers/media/platform/intel/Kconfig b/ipu7-drivers/drivers/media/platform/intel/Kconfig +index f8affa6..b8a7a36 100644 +--- a/ipu7-drivers/drivers/media/platform/intel/Kconfig ++++ b/ipu7-drivers/drivers/media/platform/intel/Kconfig +@@ -8,7 +8,7 @@ config INTEL_IPU7_FPGA_PDATA + development and mainly used for pixter or sensor enablement + without ACPI support. + +-config INTEL_IPU7_ACPI ++config INTEL_IPU_ACPI + tristate "Enable IPU ACPI driver" + default VIDEO_INTEL_IPU7 + depends on I2C +diff --git a/ipu7-drivers/drivers/media/platform/intel/Makefile b/ipu7-drivers/drivers/media/platform/intel/Makefile +index d0c41b7..409d55c 100644 +--- a/ipu7-drivers/drivers/media/platform/intel/Makefile ++++ b/ipu7-drivers/drivers/media/platform/intel/Makefile +@@ -1,5 +1,5 @@ + # SPDX-License-Identifier: GPL-2.0 +-# Copyright (c) 2010 - 2022 Intel Corporation. ++# Copyright (c) 2010 - 2025 Intel Corporation. + + is_kernel_lt_6_10 = $(shell if [ $$(printf "6.10\n$(KERNELVERSION)" | sort -V | head -n1) != "6.10" ]; then echo 1; fi) + ifeq ($(is_kernel_lt_6_10), 1) +@@ -10,8 +10,8 @@ endif + + ccflags-y += -I$(src)/../../pci/intel/ipu7/ + +-ifneq ($(filter y m, $(CONFIG_INTEL_IPU7_ACPI)),) +-obj-$(CONFIG_INTEL_IPU7_ACPI) += ipu-acpi.o \ ++ifneq ($(filter y m, $(CONFIG_INTEL_IPU_ACPI)),) ++obj-$(CONFIG_INTEL_IPU_ACPI) += ipu-acpi.o \ + ipu-acpi-pdata.o \ + ipu-acpi-common.o + else +diff --git a/ipu7-drivers/drivers/media/platform/intel/ipu-acpi-common.c b/ipu7-drivers/drivers/media/platform/intel/ipu-acpi-common.c +index 936c158..1804932 100644 +--- a/ipu7-drivers/drivers/media/platform/intel/ipu-acpi-common.c ++++ b/ipu7-drivers/drivers/media/platform/intel/ipu-acpi-common.c +@@ -1,6 +1,6 @@ + // SPDX-License-Identifier: GPL-2.0 + /* +- * Copyright (c) 2016-2024 Intel Corporation. ++ * Copyright (c) 2016-2025 Intel Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version +@@ -13,7 +13,10 @@ + * + */ + #include ++#include + #include ++#include ++ + #include + #include + +@@ -365,8 +368,12 @@ int ipu_acpi_get_dep_data(struct device *dev, + continue; + + /* Process device IN3472 created by acpi */ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 17, 0) ++ if (acpi_bus_get_device(dep_devices.handles[i], &device)) { ++#else + device = acpi_fetch_acpi_dev(dep_devices.handles[i]); + if (!device) { ++#endif + pr_err("IPU ACPI: Failed to get ACPI device"); + return -ENODEV; + } +diff --git a/ipu7-drivers/drivers/media/platform/intel/ipu-acpi-pdata.c b/ipu7-drivers/drivers/media/platform/intel/ipu-acpi-pdata.c +index 0a84dcc..be75bb4 100644 +--- a/ipu7-drivers/drivers/media/platform/intel/ipu-acpi-pdata.c ++++ b/ipu7-drivers/drivers/media/platform/intel/ipu-acpi-pdata.c +@@ -12,18 +12,31 @@ + * GNU General Public License for more details. + * + */ +- + #include + #include + + #define MIN_SENSOR_I2C 1 + #define MIN_SERDES_I2C 3 + #define SUFFIX_BASE 97 ++#define MSG_LEN 128 + +-struct ipu7_isys_subdev_pdata acpi_subdev_pdata = { +- .subdevs = (struct ipu7_isys_subdev_info *[]) { +- NULL, +- } ++static struct ipu_isys_subdev_pdata *ptr_built_in_pdata; ++ ++void set_built_in_pdata(struct ipu_isys_subdev_pdata *pdata) ++{ ++ ptr_built_in_pdata = pdata; ++}; ++EXPORT_SYMBOL(set_built_in_pdata); ++ ++static struct ipu_isys_clk_mapping clk_mapping[] = { ++ { CLKDEV_INIT(NULL, NULL, NULL), NULL } ++}; ++ ++struct ipu_isys_subdev_pdata acpi_subdev_pdata = { ++ .subdevs = (struct ipu_isys_subdev_info *[]) { ++ NULL, NULL, NULL, NULL, NULL, ++ }, ++ .clk_map = clk_mapping, + }; + + struct serdes_local serdes_info; +@@ -74,7 +87,8 @@ static int get_i2c_bus_id(int adapter_id, char *adapter_bdf, int bdf_len) + */ + static void update_i2c_bus_id(void) + { +- struct ipu7_isys_subdev_info **subdevs = acpi_subdev_pdata.subdevs; ++ struct ipu_isys_subdev_info **subdevs = acpi_subdev_pdata.subdevs; ++ + for (int i = 0; subdevs[i] != NULL; i++) { + subdevs[i]->i2c.i2c_adapter_id = + get_i2c_bus_id(subdevs[i]->i2c.i2c_adapter_id, +@@ -83,9 +97,9 @@ static void update_i2c_bus_id(void) + } + } + +-struct ipu7_isys_subdev_pdata *get_acpi_subdev_pdata(void) ++struct ipu_isys_subdev_pdata *get_acpi_subdev_pdata(void) + { +- struct ipu7_isys_subdev_pdata *ptr; ++ struct ipu_isys_subdev_pdata *ptr; + + update_i2c_bus_id(); + ptr = &acpi_subdev_pdata; +@@ -122,7 +136,7 @@ static void print_serdes_sdinfo(struct serdes_subdev_info *sdinfo) + (int)sd_mpdata->gpio_powerup_seq[i]); + } + +-static void print_serdes_subdev(struct ipu7_isys_subdev_info *sd) ++static void print_serdes_subdev(struct ipu_isys_subdev_info *sd) + { + struct serdes_platform_data *sd_pdata = sd->i2c.board_info.platform_data; + int i; +@@ -153,6 +167,7 @@ static void print_serdes_subdev(struct ipu7_isys_subdev_info *sd) + pr_debug("\t\tlink_freq_mbps \t\t= %d", sd_pdata->link_freq_mbps); + pr_debug("\t\tdeser_nlanes \t\t= %d", sd_pdata->deser_nlanes); + pr_debug("\t\tser_nlanes \t\t= %d", sd_pdata->ser_nlanes); ++ pr_debug("\t\tser_name \t\t= %s", sd_pdata->ser_name); + + for (i = 0; i < serdes_info.rx_port; i++) { + sd_sdinfo = &sd_pdata->subdev_info[i]; +@@ -167,7 +182,7 @@ static void print_serdes_subdev(struct ipu7_isys_subdev_info *sd) + + } + +-static void print_subdev(struct ipu7_isys_subdev_info *sd) ++static void print_subdev(struct ipu_isys_subdev_info *sd) + { + struct sensor_platform_data *spdata = sd->i2c.board_info.platform_data; + int i; +@@ -198,7 +213,7 @@ static void print_subdev(struct ipu7_isys_subdev_info *sd) + pr_debug("\t\treset_pin \t\t= %d", spdata->reset_pin); + pr_debug("\t\tdetect_pin \t\t= %d", spdata->detect_pin); + +- for (i = 0; i < IPU7_SPDATA_GPIO_NUM; i++) ++ for (i = 0; i < IPU_SPDATA_GPIO_NUM; i++) + pr_debug("\t\tgpios[%d] \t\t= %d", i, spdata->gpios[i]); + } + +@@ -226,11 +241,11 @@ static void set_common_gpio(struct control_logic_data *ctl_data, + ctl_data->gpio[i].func); + } + +-static int set_csi2(struct ipu7_isys_subdev_info **sensor_sd, ++static int set_csi2(struct ipu_isys_subdev_info **sensor_sd, + unsigned int lanes, unsigned int port, + unsigned int bus_type) + { +- struct ipu7_isys_csi2_config *csi2_config; ++ struct ipu_isys_csi2_config *csi2_config; + + csi2_config = kzalloc(sizeof(*csi2_config), GFP_KERNEL); + if (!csi2_config) +@@ -250,7 +265,7 @@ static int set_csi2(struct ipu7_isys_subdev_info **sensor_sd, + return 0; + } + +-static void set_i2c(struct ipu7_isys_subdev_info **sensor_sd, ++static void set_i2c(struct ipu_isys_subdev_info **sensor_sd, + struct device *dev, + const char *sensor_name, + unsigned int addr, +@@ -274,7 +289,7 @@ static void set_serdes_sd_pdata(struct serdes_module_pdata **module_pdata, + + #define PORT_NR 8 + +-static int set_serdes_subdev(struct ipu7_isys_subdev_info **serdes_sd, ++static int set_serdes_subdev(struct ipu_isys_subdev_info **serdes_sd, + struct device *dev, + struct serdes_platform_data **pdata, + const char *sensor_name, +@@ -305,6 +320,7 @@ static int set_serdes_subdev(struct ipu7_isys_subdev_info **serdes_sd, + /* board info */ + strscpy(serdes_sdinfo[i].board_info.type, sensor_name, I2C_NAME_SIZE); + serdes_sdinfo[i].board_info.addr = serdes_info.sensor_map_addr + i; ++ + serdes_sdinfo[i].board_info.platform_data = module_pdata[i]; + + /* serdes_subdev_info */ +@@ -326,7 +342,7 @@ static int set_serdes_subdev(struct ipu7_isys_subdev_info **serdes_sd, + return 0; + } + +-static int set_pdata(struct ipu7_isys_subdev_info **sensor_sd, ++static int set_pdata(struct ipu_isys_subdev_info **sensor_sd, + struct device *dev, + const char *sensor_name, + const char *hid_name, +@@ -423,7 +439,7 @@ static void set_serdes_info(struct device *dev, const char *sensor_name, + } + + static int populate_sensor_pdata(struct device *dev, +- struct ipu7_isys_subdev_info **sensor_sd, ++ struct ipu_isys_subdev_info **sensor_sd, + struct sensor_bios_data *cam_data, + struct control_logic_data *ctl_data, + enum connection_type connect, +@@ -433,7 +449,7 @@ static int populate_sensor_pdata(struct device *dev, + int sensor_physical_addr, + int link_freq) + { +- struct ipu7_isys_subdev_pdata *ptr_acpi_subdev_pdata = &acpi_subdev_pdata; ++ struct ipu_isys_subdev_pdata *ptr_acpi_subdev_pdata = &acpi_subdev_pdata; + int i = 0; + int ret; + +@@ -453,6 +469,7 @@ static int populate_sensor_pdata(struct device *dev, + cam_data->i2c_num); + return -1; + } ++ + /* Others use DISCRETE Control Logic */ + if (ctl_data->type != CL_DISCRETE) { + dev_err(dev, "IPU ACPI: Control Logic Type\n"); +@@ -530,12 +547,13 @@ int get_sensor_pdata(struct device *dev, + { + struct sensor_bios_data *cam_data; + struct control_logic_data *ctl_data; +- struct ipu7_isys_subdev_info *sensor_sd; ++ struct ipu_isys_subdev_info *sensor_sd; + int rval; + + cam_data = kzalloc(sizeof(*cam_data), GFP_KERNEL); + if (!cam_data) + return -ENOMEM; ++ + cam_data->dev = dev; + + ctl_data = kzalloc(sizeof(*ctl_data), GFP_KERNEL); +@@ -543,6 +561,7 @@ int get_sensor_pdata(struct device *dev, + kfree(cam_data); + return -ENOMEM; + } ++ + ctl_data->dev = dev; + + sensor_sd = kzalloc(sizeof(*sensor_sd), GFP_KERNEL); +diff --git a/ipu7-drivers/drivers/media/platform/intel/ipu-acpi.c b/ipu7-drivers/drivers/media/platform/intel/ipu-acpi.c +index deed3bb..a5a54b4 100644 +--- a/ipu7-drivers/drivers/media/platform/intel/ipu-acpi.c ++++ b/ipu7-drivers/drivers/media/platform/intel/ipu-acpi.c +@@ -32,6 +32,8 @@ + #include + #include + #include ++#include ++ + #include + #include + +@@ -73,7 +75,7 @@ static const struct ipu_acpi_devices supported_devices[] = { + + static int get_table_index(const char *acpi_name) + { +- unsigned int i; ++ int i; + + for (i = 0; i < ARRAY_SIZE(supported_devices); i++) { + if (!strncmp(supported_devices[i].hid_name, acpi_name, +@@ -143,8 +145,8 @@ static int ipu_acpi_test(struct device *dev, void *priv) + + if (acpi_idx < 0) + return 0; +- else +- dev_info(dev, "IPU6 ACPI: ACPI device %s\n", dev_name(dev)); ++ ++ dev_info(dev, "IPU6 ACPI: ACPI device %s\n", dev_name(dev)); + + const char *target_hid = supported_devices[acpi_idx].hid_name; + +@@ -161,10 +163,10 @@ static int ipu_acpi_test(struct device *dev, void *priv) + if (!adev) { + dev_dbg(dev, "No ACPI device found for %s\n", target_hid); + return 0; +- } else { +- set_primary_fwnode(dev, &adev->fwnode); +- dev_dbg(dev, "Assigned fwnode to %s\n", dev_name(dev)); + } ++ ++ set_primary_fwnode(dev, &adev->fwnode); ++ dev_dbg(dev, "Assigned fwnode to %s\n", dev_name(dev)); + } + + if (ACPI_COMPANION(dev) != adev) { +@@ -185,15 +187,53 @@ static int ipu_acpi_test(struct device *dev, void *priv) + return 0; /* Continue iteration */ + } + ++/* Scan all i2c devices and pick ones which we can handle */ ++ + /* Try to get all IPU related devices mentioned in BIOS and all related information +- * return a new generated existing pdata ++ * If there is existing ipu_isys_subdev_pdata, update the existing pdata ++ * If not, return a new generated existing pdata + */ + +-int ipu_get_acpi_devices(void **spdata) ++int ipu_get_acpi_devices(void *driver_data, ++ struct device *dev, ++ void **spdata, ++ void **built_in_pdata, ++ int (*fn) ++ (struct device *, void *, ++ void *csi2, ++ bool reprobe)) + { + struct ipu_i2c_helper helper = {0}; + int rval; +- struct ipu7_isys_subdev_pdata *ptr = NULL; ++ ++ if (!built_in_pdata) ++ dev_dbg(dev, "Built-in pdata not found"); ++ else { ++ dev_dbg(dev, "Built-in pdata found"); ++ set_built_in_pdata(*built_in_pdata); ++ } ++ ++ if ((!fn) || (!driver_data)) ++ return -ENODEV; ++ ++ rval = acpi_bus_for_each_dev(ipu_acpi_test, NULL); ++ if (rval < 0) ++ return rval; ++ ++ if (!built_in_pdata) { ++ dev_dbg(dev, "Return ACPI generated pdata"); ++ *spdata = get_acpi_subdev_pdata(); ++ } else ++ dev_dbg(dev, "Return updated built-in pdata"); ++ ++ return 0; ++} ++EXPORT_SYMBOL(ipu_get_acpi_devices); ++ ++int ipu_get_acpi_devices_new(void **spdata) ++{ ++ int rval; ++ struct ipu_isys_subdev_pdata *ptr = NULL; + + rval = acpi_bus_for_each_dev(ipu_acpi_test, NULL); + if (rval < 0) +@@ -205,10 +245,11 @@ int ipu_get_acpi_devices(void **spdata) + + return 0; + } +-EXPORT_SYMBOL(ipu_get_acpi_devices); ++EXPORT_SYMBOL(ipu_get_acpi_devices_new); + + static int __init ipu_acpi_init(void) + { ++ set_built_in_pdata(NULL); + return 0; + } + +diff --git a/ipu7-drivers/include/media/ipu-acpi-pdata.h b/ipu7-drivers/include/media/ipu-acpi-pdata.h +index e207441..c6ec18e 100644 +--- a/ipu7-drivers/include/media/ipu-acpi-pdata.h ++++ b/ipu7-drivers/include/media/ipu-acpi-pdata.h +@@ -9,6 +9,7 @@ + + #define CL_EMPTY 0 + #define CL_DISCRETE 1 ++#define SERDES_MAX_PORT 4 + #define SERDES_MAX_GPIO_POWERUP_SEQ 4 + #define LOOP_SIZE 10 + +@@ -26,7 +27,7 @@ int get_sensor_pdata(struct device *dev, + int sensor_physical_addr, + int link_freq); + +-struct ipu7_isys_subdev_pdata *get_acpi_subdev_pdata(void); ++struct ipu_isys_subdev_pdata *get_acpi_subdev_pdata(void); + + struct sensor_platform_data { + unsigned int port; +@@ -34,11 +35,11 @@ struct sensor_platform_data { + uint32_t i2c_slave_address; + int irq_pin; + unsigned int irq_pin_flags; +- char irq_pin_name[IPU7_SPDATA_IRQ_PIN_NAME_LEN]; ++ char irq_pin_name[IPU_SPDATA_IRQ_PIN_NAME_LEN]; + int reset_pin; + int detect_pin; + char suffix; +- int gpios[IPU7_SPDATA_GPIO_NUM]; ++ int gpios[IPU_SPDATA_GPIO_NUM]; + }; + + struct serdes_platform_data { +diff --git a/ipu7-drivers/include/media/ipu-acpi.h b/ipu7-drivers/include/media/ipu-acpi.h +index bc63240..1f8e09e 100644 +--- a/ipu7-drivers/include/media/ipu-acpi.h ++++ b/ipu7-drivers/include/media/ipu-acpi.h +@@ -16,16 +16,53 @@ + #ifndef MEDIA_INTEL_IPU_ACPI_H + #define MEDIA_INTEL_IPU_ACPI_H + +-#include "ipu7-isys.h" ++#include ++#include ++ ++#include ++ ++struct ipu_isys_csi2_config { ++ unsigned int nlanes; ++ unsigned int port; ++ enum v4l2_mbus_type bus_type; ++}; ++ ++struct ipu_isys_subdev_i2c_info { ++ struct i2c_board_info board_info; ++ int i2c_adapter_id; ++ char i2c_adapter_bdf[32]; ++}; ++ ++struct ipu_isys_subdev_info { ++ struct ipu_isys_csi2_config *csi2; ++ struct ipu_isys_subdev_i2c_info i2c; ++#if IS_ENABLED(CONFIG_INTEL_IPU_ACPI) ++ char *acpi_hid; ++#endif ++}; ++ ++struct ipu_isys_clk_mapping { ++ struct clk_lookup clkdev_data; ++ char *platform_clock_name; ++}; ++ ++struct ipu_isys_subdev_pdata { ++ struct ipu_isys_subdev_info **subdevs; ++ struct ipu_isys_clk_mapping *clk_map; ++}; + + #define MAX_ACPI_SENSOR_NUM 4 + #define MAX_ACPI_I2C_NUM 12 + #define MAX_ACPI_GPIO_NUM 12 + + #define GPIO_RESET 0x0 ++#define GPIO_POWER_EN 0xb ++#define GPIO_READY_STAT 0x13 ++ ++#define IPU_SPDATA_GPIO_NUM 4 ++#define IPU_SPDATA_IRQ_PIN_NAME_LEN 16 + +-#define IPU7_SPDATA_GPIO_NUM 4 +-#define IPU7_SPDATA_IRQ_PIN_NAME_LEN 16 ++void set_built_in_pdata(struct ipu_isys_subdev_pdata *pdata); + + enum connection_type { + TYPE_DIRECT, +@@ -122,6 +159,11 @@ struct ipu_gpio_info { + bool valid; + }; + ++struct ipu_irq_info { ++ int irq_pin; ++ char irq_pin_name[IPU_SPDATA_IRQ_PIN_NAME_LEN]; ++}; ++ + /* Each I2C client linked to 1 set of CTL Logic */ + struct control_logic_data { + struct device *dev; +@@ -133,9 +175,7 @@ struct control_logic_data { + bool completed; + }; + +-int ipu_get_acpi_devices(void **spdata); +- +-struct ipu7_isys_subdev_pdata *get_built_in_pdata(void); ++struct ipu_isys_subdev_pdata *get_built_in_pdata(void); + + int ipu_acpi_get_cam_data(struct device *dev, + struct sensor_bios_data *sensor); +@@ -146,17 +186,29 @@ int ipu_acpi_get_dep_data(struct device *dev, + int ipu_acpi_get_control_logic_data(struct device *dev, + struct control_logic_data **ctl_data); + ++struct intel_ipu6_regulator { ++ char *src_dev_name; ++ char *src_rail; ++ char *dest_rail; ++}; ++ + struct ipu_i2c_helper { + int (*fn)(struct device *dev, void *priv, +- struct ipu7_isys_csi2_config *csi2, ++ struct ipu_isys_csi2_config *csi2, + bool reprobe); + void *driver_data; + }; + ++struct ipu_i2c_new_dev { ++ struct list_head list; ++ struct i2c_board_info info; ++ unsigned short int bus; ++}; ++ + struct ipu_camera_module_data { + struct list_head list; +- struct ipu7_isys_subdev_info sd; +- struct ipu7_isys_csi2_config csi2; ++ struct ipu_isys_subdev_info sd; ++ struct ipu_isys_csi2_config csi2; + unsigned int ext_clk; + void *pdata; /* Ptr to generated platform data*/ + void *priv; /* Private for specific subdevice */ +-- +2.43.0 + diff --git a/patches/0101-ipu7-isys-allow-virtual-channels-on-16-csi2-input-vi.patch b/patches/0101-ipu7-isys-allow-virtual-channels-on-16-csi2-input-vi.patch new file mode 100644 index 0000000..5e7e075 --- /dev/null +++ b/patches/0101-ipu7-isys-allow-virtual-channels-on-16-csi2-input-vi.patch @@ -0,0 +1,29 @@ +From dbfe2a70deae4b31e089e52d9ad40a1be37970da Mon Sep 17 00:00:00 2001 +From: Florent Pirou +Date: Sat, 17 Jan 2026 02:05:34 -0700 +Subject: [PATCH 101/104] ipu7-isys : allow virtual-channels on 16 csi2 input + video capture + +* ipu7-isys : extend per csi2 video captures v4l2 to 16 sensors input streams + +Signed-off-by: Florent Pirou +--- + ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys-csi2.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys-csi2.h b/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys-csi2.h +index 6c23b80..dfca0d2 100644 +--- a/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys-csi2.h ++++ b/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys-csi2.h +@@ -20,7 +20,7 @@ struct ipu7_isys_stream; + #define INVALID_VC_ID -1 + #define IPU7_NR_OF_CSI2_SINK_PADS 1U + #define IPU7_CSI2_PAD_SINK 0U +-#define IPU7_NR_OF_CSI2_SRC_PADS 8U ++#define IPU7_NR_OF_CSI2_SRC_PADS 16U + #define IPU7_CSI2_PAD_SRC 1U + #define IPU7_NR_OF_CSI2_PADS (IPU7_NR_OF_CSI2_SINK_PADS + \ + IPU7_NR_OF_CSI2_SRC_PADS) +-- +2.43.0 + diff --git a/patches/0102-ipu7-isys-add-d4xx-pixel-format-support.patch b/patches/0102-ipu7-isys-add-d4xx-pixel-format-support.patch new file mode 100644 index 0000000..04c3b04 --- /dev/null +++ b/patches/0102-ipu7-isys-add-d4xx-pixel-format-support.patch @@ -0,0 +1,124 @@ +From 3849f87c48328b61f1ed81817b8c3f29473c6624 Mon Sep 17 00:00:00 2001 +From: Florent Pirou +Date: Sat, 17 Jan 2026 02:08:28 -0700 +Subject: [PATCH 102/104] ipu7-isys : add d4xx pixel format support + +Signed-off-by: Florent Pirou +--- + .../drivers/media/pci/intel/ipu7/abi/ipu7_fw_isys_abi.h | 1 + + .../drivers/media/pci/intel/ipu7/ipu7-isys-csi2.c | 3 +++ + .../drivers/media/pci/intel/ipu7/ipu7-isys-subdev.c | 3 +++ + .../drivers/media/pci/intel/ipu7/ipu7-isys-video.c | 9 +++++++++ + ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys.h | 4 ++-- + 5 files changed, 18 insertions(+), 2 deletions(-) + +diff --git a/ipu7-drivers/drivers/media/pci/intel/ipu7/abi/ipu7_fw_isys_abi.h b/ipu7-drivers/drivers/media/pci/intel/ipu7/abi/ipu7_fw_isys_abi.h +index 45db85e..dcb9c49 100644 +--- a/ipu7-drivers/drivers/media/pci/intel/ipu7/abi/ipu7_fw_isys_abi.h ++++ b/ipu7-drivers/drivers/media/pci/intel/ipu7/abi/ipu7_fw_isys_abi.h +@@ -126,6 +126,7 @@ enum ipu7_insys_frame_format_type { + IPU_INSYS_FRAME_FORMAT_ARGB888 = 31, + IPU_INSYS_FRAME_FORMAT_BGRA888 = 32, + IPU_INSYS_FRAME_FORMAT_ABGR888 = 33, ++ IPU_INSYS_FRAME_FORMAT_VYUY = 34, + N_IPU_INSYS_FRAME_FORMAT + }; + +diff --git a/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys-csi2.c b/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys-csi2.c +index 0c931a7..88b12f5 100644 +--- a/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys-csi2.c ++++ b/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys-csi2.c +@@ -28,11 +28,13 @@ + #include "ipu7-isys-csi-phy.h" + + static const u32 csi2_supported_codes[] = { ++ MEDIA_BUS_FMT_Y8_1X8, + MEDIA_BUS_FMT_Y10_1X10, + MEDIA_BUS_FMT_RGB565_1X16, + MEDIA_BUS_FMT_RGB888_1X24, + MEDIA_BUS_FMT_UYVY8_1X16, + MEDIA_BUS_FMT_YUYV8_1X16, ++ MEDIA_BUS_FMT_VYUY8_1X16, + MEDIA_BUS_FMT_YUYV10_1X20, + MEDIA_BUS_FMT_SBGGR10_1X10, + MEDIA_BUS_FMT_SGBRG10_1X10, +@@ -46,6 +48,7 @@ static const u32 csi2_supported_codes[] = { + MEDIA_BUS_FMT_SGBRG8_1X8, + MEDIA_BUS_FMT_SGRBG8_1X8, + MEDIA_BUS_FMT_SRGGB8_1X8, ++ MEDIA_BUS_FMT_FIXED, + 0, + }; + +diff --git a/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys-subdev.c b/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys-subdev.c +index 98b6ef6..b99b5e9 100644 +--- a/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys-subdev.c ++++ b/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys-subdev.c +@@ -19,6 +19,7 @@ + #include "ipu7-isys.h" + #include "ipu7-isys-subdev.h" + ++ + unsigned int ipu7_isys_mbus_code_to_mipi(u32 code) + { + switch (code) { +@@ -30,6 +31,7 @@ unsigned int ipu7_isys_mbus_code_to_mipi(u32 code) + return MIPI_CSI2_DT_YUV422_10B; + case MEDIA_BUS_FMT_UYVY8_1X16: + case MEDIA_BUS_FMT_YUYV8_1X16: ++ case MEDIA_BUS_FMT_VYUY8_1X16: + return MIPI_CSI2_DT_YUV422_8B; + case MEDIA_BUS_FMT_SBGGR12_1X12: + case MEDIA_BUS_FMT_SGBRG12_1X12: +@@ -42,6 +44,7 @@ unsigned int ipu7_isys_mbus_code_to_mipi(u32 code) + case MEDIA_BUS_FMT_SGRBG10_1X10: + case MEDIA_BUS_FMT_SRGGB10_1X10: + return MIPI_CSI2_DT_RAW10; ++ case MEDIA_BUS_FMT_Y8_1X8: + case MEDIA_BUS_FMT_SBGGR8_1X8: + case MEDIA_BUS_FMT_SGBRG8_1X8: + case MEDIA_BUS_FMT_SGRBG8_1X8: +diff --git a/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys-video.c b/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys-video.c +index 12fdf55..2993d62 100644 +--- a/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys-video.c ++++ b/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys-video.c +@@ -83,12 +83,21 @@ const struct ipu7_isys_pixelformat ipu7_isys_pfmts[] = { + IPU_INSYS_FRAME_FORMAT_RAW10}, + {V4L2_PIX_FMT_UYVY, 16, 16, MEDIA_BUS_FMT_UYVY8_1X16, + IPU_INSYS_FRAME_FORMAT_UYVY}, ++ {V4L2_PIX_FMT_Z16, 16, 16, MEDIA_BUS_FMT_UYVY8_1X16, ++ IPU_INSYS_FRAME_FORMAT_UYVY}, + {V4L2_PIX_FMT_YUYV, 16, 16, MEDIA_BUS_FMT_YUYV8_1X16, + IPU_INSYS_FRAME_FORMAT_YUYV}, ++ {V4L2_PIX_FMT_Y8I, 16, 16, MEDIA_BUS_FMT_VYUY8_1X16, ++ IPU_INSYS_FRAME_FORMAT_YUYV}, + {V4L2_PIX_FMT_RGB565, 16, 16, MEDIA_BUS_FMT_RGB565_1X16, + IPU_INSYS_FRAME_FORMAT_RGB565}, + {V4L2_PIX_FMT_BGR24, 24, 24, MEDIA_BUS_FMT_RGB888_1X24, + IPU_INSYS_FRAME_FORMAT_RGBA888}, ++ {V4L2_PIX_FMT_Y12I, 32, 24, MEDIA_BUS_FMT_RGB888_1X24, ++ IPU_INSYS_FRAME_FORMAT_RGBA888}, ++ {V4L2_PIX_FMT_GREY, 8, 8, MEDIA_BUS_FMT_Y8_1X8, ++ IPU_INSYS_FRAME_FORMAT_RAW8}, ++ {V4L2_META_FMT_D4XX, 8, 8, MEDIA_BUS_FMT_FIXED, 0}, + }; + + static int video_open(struct file *file) +diff --git a/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys.h b/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys.h +index c9ca2fb..8a505b8 100644 +--- a/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys.h ++++ b/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys.h +@@ -46,8 +46,8 @@ struct dentry; + #define IPU_ISYS_SIZE_SEND_QUEUE 40U + #define IPU_ISYS_NUM_RECV_QUEUE 1U + +-#define IPU_ISYS_MIN_WIDTH 2U +-#define IPU_ISYS_MIN_HEIGHT 2U ++#define IPU_ISYS_MIN_WIDTH 1U ++#define IPU_ISYS_MIN_HEIGHT 1U + #define IPU_ISYS_MAX_WIDTH 8160U + #define IPU_ISYS_MAX_HEIGHT 8190U + +-- +2.43.0 + diff --git a/patches/0103-ipu7-isys-add-video-VIDIOC_S_EXT_CTRLS-callbacks-by-.patch b/patches/0103-ipu7-isys-add-video-VIDIOC_S_EXT_CTRLS-callbacks-by-.patch new file mode 100644 index 0000000..907b329 --- /dev/null +++ b/patches/0103-ipu7-isys-add-video-VIDIOC_S_EXT_CTRLS-callbacks-by-.patch @@ -0,0 +1,796 @@ +From f25d8a31605419e73bd9a0a30d23feee864e5a8f Mon Sep 17 00:00:00 2001 +From: Florent Pirou +Date: Sat, 17 Jan 2026 02:09:32 -0700 +Subject: [PATCH 103/104] ipu7-isys: add video VIDIOC_S_EXT_CTRLS callbacks by + streamid + +Signed-off-by: Florent Pirou +--- + .../pci/intel/ipu7/ipu7-isys-video-ext.h | 41 ++ + .../media/pci/intel/ipu7/ipu7-isys-video.c | 602 +++++++++++++++++- + .../media/pci/intel/ipu7/ipu7-isys-video.h | 4 + + 3 files changed, 643 insertions(+), 4 deletions(-) + create mode 100644 ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys-video-ext.h + +diff --git a/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys-video-ext.h b/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys-video-ext.h +new file mode 100644 +index 0000000..91b2eca +--- /dev/null ++++ b/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys-video-ext.h +@@ -0,0 +1,41 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Copyright (C) 2013 - 2025 Intel Corporation ++ */ ++ ++#include ++ ++enum ipu7_isys_enum_link_state { ++ IPU7_ISYS_LINK_STATE_DISABLED = 0, ++ IPU7_ISYS_LINK_STATE_ENABLED = 1, ++ IPU7_ISYS_LINK_STATE_DONE = 2, ++ IPU7_ISYS_LINK_STATE_MD = 3, ++ IPU7_ISYS_LINK_STATE_MAX, ++}; ++ ++static struct media_pad *other_pad(struct media_pad *pad); ++bool has_src_pad_stream_active(struct v4l2_subdev *sd, u32 stream, u32 pad); ++bool get_src_pad_by_src_stream(struct v4l2_subdev *sd, u32 stream, u32 *s_pad); ++ ++/* needed for callback */ ++extern int ipu7_isys_inherit_ctrls(struct ipu7_isys_video *av, ++ struct v4l2_subdev *sd, void *data); ++extern int ipu7_isys_set_fmt_subdev(struct ipu7_isys_video *av, ++ struct v4l2_subdev *sd, void *data); ++extern int ipu7_isys_enum_frameintervals_subdev(struct ipu7_isys_video *av, ++ struct v4l2_subdev *sd, void *data); ++extern int ipu7_isys_enum_fmt_subdev(struct ipu7_isys_video *av, ++ struct v4l2_subdev *sd, void *data); ++extern int ipu7_isys_enum_framesizes_subdev(struct ipu7_isys_video *av, ++ struct v4l2_subdev *sd, void *data); ++extern int ipu7_isys_set_parm_subdev(struct ipu7_isys_video *av, ++ struct v4l2_subdev *sd, void *data); ++extern int ipu7_isys_get_parm_subdev(struct ipu7_isys_video *av, ++ struct v4l2_subdev *sd, void *data); ++ ++extern int media_pipeline_enumerate_by_stream_cb( ++ struct ipu7_isys_video *av, ++ int (*cb_fn)(struct ipu7_isys_video *av, ++ struct v4l2_subdev *sd, ++ void *data), ++ void *data); +diff --git a/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys-video.c b/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys-video.c +index 2993d62..cc48706 100644 +--- a/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys-video.c ++++ b/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys-video.c +@@ -38,6 +38,9 @@ + #include "ipu7-fw-isys.h" + #include "ipu7-isys.h" + #include "ipu7-isys-video.h" ++#ifdef CONFIG_VIDEO_INTEL_IPU7_EXT_CTRLS ++#include "ipu7-isys-video-ext.h" ++#endif + #include "ipu7-platform-regs.h" + + const struct ipu7_isys_pixelformat ipu7_isys_pfmts[] = { +@@ -102,8 +105,8 @@ const struct ipu7_isys_pixelformat ipu7_isys_pfmts[] = { + + static int video_open(struct file *file) + { +-#ifdef CONFIG_VIDEO_INTEL_IPU7_ISYS_RESET + struct ipu7_isys_video *av = video_drvdata(file); ++#ifdef CONFIG_VIDEO_INTEL_IPU7_ISYS_RESET + struct ipu7_isys *isys = av->isys; + struct ipu7_bus_device *adev = isys->adev; + +@@ -115,6 +118,11 @@ static int video_open(struct file *file) + } + mutex_unlock(&isys->reset_mutex); + ++#endif ++#ifdef CONFIG_VIDEO_INTEL_IPU7_EXT_CTRLS ++ /* ipu_isys inherit remote sd ctrls */ ++ if (av->enum_link_state == IPU7_ISYS_LINK_STATE_ENABLED && media_pad_remote_pad_first(&av->pad)) ++ media_pipeline_enumerate_by_stream_cb(av, ipu7_isys_inherit_ctrls, NULL); + #endif + return v4l2_fh_open(file); + } +@@ -139,6 +147,7 @@ static int video_release(struct file *file) + } + + #endif ++ + const struct ipu7_isys_pixelformat *ipu7_isys_get_isys_format(u32 pixelformat) + { + unsigned int i; +@@ -168,7 +177,7 @@ static int ipu7_isys_vidioc_enum_fmt(struct file *file, void *fh, + struct v4l2_fmtdesc *f) + { + unsigned int i, num_found; +- ++#ifndef CONFIG_VIDEO_INTEL_IPU7_EXT_CTRLS + for (i = 0, num_found = 0; i < ARRAY_SIZE(ipu7_isys_pfmts); i++) { + if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) + continue; +@@ -188,12 +197,62 @@ static int ipu7_isys_vidioc_enum_fmt(struct file *file, void *fh, + } + + return -EINVAL; ++#else ++ struct ipu7_isys_video *av = video_drvdata(file); ++ const struct ipu7_isys_pixelformat *pfmt; ++ struct v4l2_subdev_mbus_code_enum mce; ++ unsigned int j; ++ int ret=-EINVAL; ++ ++ /* Code found */ ++ for (i = 0, num_found = 0; i < ARRAY_SIZE(ipu7_isys_pfmts); i++) { ++ if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) ++ continue; ++ ++ if (f->mbus_code && f->mbus_code != ipu7_isys_pfmts[i].code) ++ continue; ++ ++ /* poll remote sensors */ ++ ret = 0; ++ pfmt = &ipu7_isys_pfmts[i]; ++ if (av->enum_link_state == IPU7_ISYS_LINK_STATE_DONE) { ++ ++ mce.index = f->index; ++ mce.pad = 0; ++ ++ ret = media_pipeline_enumerate_by_stream_cb(av, ++ ipu7_isys_enum_fmt_subdev, &mce); ++ if (!ret) { ++ for (j = 0; j < ARRAY_SIZE(ipu7_isys_pfmts); j++) { ++ if (mce.code != ipu7_isys_pfmts[j].code) ++ continue; ++ ++ pfmt = &ipu7_isys_pfmts[j]; ++ break; ++ } ++ } ++ } else if (num_found < f->index) { ++ num_found++; ++ continue; ++ } ++ break; ++ } ++ ++ if (!ret) { ++ f->pixelformat = pfmt->pixelformat; ++ f->mbus_code = pfmt->code; ++ f->flags = 0; ++ } ++ ++ return ret; ++#endif + } + + static int ipu7_isys_vidioc_enum_framesizes(struct file *file, void *fh, + struct v4l2_frmsizeenum *fsize) + { + unsigned int i; ++#ifndef CONFIG_VIDEO_INTEL_IPU7_EXT_CTRLS + + if (fsize->index > 0) + return -EINVAL; +@@ -214,8 +273,187 @@ static int ipu7_isys_vidioc_enum_framesizes(struct file *file, void *fh, + } + + return -EINVAL; ++#else ++ struct ipu7_isys_video *av = video_drvdata(file); ++ struct v4l2_subdev_frame_size_enum fse; ++ int ret = -EINVAL; ++ ++ for (i = 0; i < ARRAY_SIZE(ipu7_isys_pfmts); i++) { ++ if (fsize->pixel_format != ipu7_isys_pfmts[i].pixelformat) ++ continue; ++ ++ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE; ++ fsize->stepwise.min_width = IPU_ISYS_MIN_WIDTH; ++ fsize->stepwise.max_width = IPU_ISYS_MAX_WIDTH; ++ fsize->stepwise.min_height = IPU_ISYS_MIN_HEIGHT; ++ fsize->stepwise.max_height = IPU_ISYS_MAX_HEIGHT; ++ fsize->stepwise.step_width = 2; ++ fsize->stepwise.step_height = 2; ++ ++ ret = 0; ++ ++ if (av->enum_link_state == IPU7_ISYS_LINK_STATE_DONE) { ++ ++ fse.code = ipu7_isys_pfmts[i].code; ++ fse.index = fsize->index; ++ ++ ret = media_pipeline_enumerate_by_stream_cb(av, ++ ipu7_isys_enum_framesizes_subdev, &fse); ++ if (!ret) { ++ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE; ++ if (fse.max_width > 0 && fse.max_height > 0) { ++ fsize->discrete.width = fse.max_width; ++ fsize->discrete.height = fse.max_height; ++ } else { ++ fsize->discrete.width = 0; ++ fsize->discrete.height = 0; ++ ret = -EINVAL; ++ } ++ } ++ } ++ break; ++ } ++ return ret; ++#endif + } + ++#ifdef CONFIG_VIDEO_INTEL_IPU7_EXT_CTRLS ++/* extended v4l2_ctrl_ops */ ++static int ipu7_isys_video_s_ctrl(struct v4l2_ctrl *ctrl) ++{ ++ struct ipu7_isys_video *av = ctrl->priv; ++ struct ipu7_isys *isys = av->isys; ++ mutex_lock(&isys->mutex); ++ ++ switch (ctrl->id) { ++ case V4L2_CID_IPU_ENUMERATE_LINK: ++ av->enum_link_state = ctrl->val; ++ break; ++ } ++ ++ mutex_unlock(&isys->mutex); ++ return 0; ++} ++ ++static const struct v4l2_ctrl_ops ipu7_isys_video_ctrl_ops = { ++ .s_ctrl = ipu7_isys_video_s_ctrl, ++}; ++ ++static const struct v4l2_ctrl_config ipu7_isys_video_enum_link = { ++ .ops = &ipu7_isys_video_ctrl_ops, ++ .id = V4L2_CID_IPU_ENUMERATE_LINK, ++ .name = "Enumerate graph link", ++ .type = V4L2_CTRL_TYPE_INTEGER, ++ .min = 0, ++ .max = IPU7_ISYS_LINK_STATE_MAX, ++ .step = 1, ++ .def = 0, ++}; ++ ++/* extended v4l2_ioctl_ops */ ++static struct media_pad *other_pad(struct media_pad *pad) ++{ ++ struct media_link *link; ++ ++ list_for_each_entry(link, &pad->entity->links, list) { ++ if ((link->flags & MEDIA_LNK_FL_LINK_TYPE) ++ != MEDIA_LNK_FL_DATA_LINK) ++ continue; ++ ++ return link->source == pad ? link->sink : link->source; ++ } ++ ++ WARN_ON(1); ++ return NULL; ++} ++ ++static int ipu7_isys_get_parm(struct file *file, void *fh, ++ struct v4l2_streamparm *a) ++{ ++ struct ipu7_isys_video *av = video_drvdata(file); ++ struct v4l2_subdev_frame_interval fi; ++ int ret = -ENOLINK; ++ ++ a->parm.capture.timeperframe.numerator = 1; ++ a->parm.capture.timeperframe.denominator = 30; ++ ++ if (av->enum_link_state == IPU7_ISYS_LINK_STATE_DISABLED) ++ return 0; ++ ++ if (media_pad_remote_pad_first(&av->pad)) { ++ ret = media_pipeline_enumerate_by_stream_cb(av, ++ ipu7_isys_get_parm_subdev, &fi); ++ ++ if (!ret) { ++ a->parm.capture.timeperframe.numerator = fi.interval.numerator; ++ a->parm.capture.timeperframe.denominator = fi.interval.denominator; ++ } ++ } ++ return ret; ++} ++ ++static int ipu7_isys_set_parm(struct file *file, void *fh, ++ struct v4l2_streamparm *a) ++{ ++ struct ipu7_isys_video *av = video_drvdata(file); ++ struct v4l2_subdev_frame_interval fi; ++ int ret = -ENOLINK; ++ ++ if (av->enum_link_state == IPU7_ISYS_LINK_STATE_DISABLED) ++ return 0; ++ ++ fi.interval.numerator = a->parm.capture.timeperframe.numerator; ++ fi.interval.denominator = a->parm.capture.timeperframe.denominator; ++ ++ if (media_pad_remote_pad_first(&av->pad)) ++ ret = media_pipeline_enumerate_by_stream_cb(av, ++ ipu7_isys_set_parm_subdev, &fi); ++ ++ return ret; ++} ++ ++static int ipu7_isys_enum_frameintervals(struct file *file, void *fh, ++ struct v4l2_frmivalenum *intervals) ++{ ++ struct ipu7_isys_video *av = video_drvdata(file); ++ struct v4l2_subdev_frame_interval_enum fie; ++ unsigned int i, num_found; ++ int ret=-EINVAL; ++ ++ /* Code found */ ++ for (i = 0, num_found = 0; i < ARRAY_SIZE(ipu7_isys_pfmts); i++) { ++ if (intervals->pixel_format != ipu7_isys_pfmts[i].pixelformat) ++ continue; ++ ++ intervals->type = V4L2_FRMIVAL_TYPE_DISCRETE; ++ intervals->discrete.numerator = 1; ++ intervals->discrete.denominator = 30; ++ ++ ret = 0; ++ ++ /* poll remote sensors */ ++ if (av->enum_link_state == IPU7_ISYS_LINK_STATE_DONE) { ++ ++ fie.code = ipu7_isys_pfmts[i].code; ++ fie.index = intervals->index; ++ fie.width = intervals->width; ++ fie.height = intervals->height; ++ ++ ret = media_pipeline_enumerate_by_stream_cb(av, ++ ipu7_isys_enum_frameintervals_subdev, &fie); ++ if (!ret && ++ fie.interval.numerator > 0 && ++ fie.interval.denominator > 0) { ++ intervals->discrete.numerator = fie.interval.numerator; ++ intervals->discrete.denominator = fie.interval.denominator; ++ } ++ } ++ break; ++ } ++ return ret; ++} ++#endif ++ + static int ipu7_isys_vidioc_g_fmt_vid_cap(struct file *file, void *fh, + struct v4l2_format *f) + { +@@ -282,6 +520,30 @@ static int ipu7_isys_vidioc_try_fmt_vid_cap(struct file *file, void *fh, + + __ipu_isys_vidioc_try_fmt_vid_cap(av, f); + ++#ifdef CONFIG_VIDEO_INTEL_IPU7_EXT_CTRLS ++ if (av->enum_link_state == IPU7_ISYS_LINK_STATE_DONE) ++ { ++ int ret = 0; ++ struct v4l2_subdev_format fmt = { ++ .which = V4L2_SUBDEV_FORMAT_ACTIVE, ++ .pad = 0, ++ }; ++ struct v4l2_mbus_framefmt format = { ++ .width = f->fmt.pix.width, ++ .height = f->fmt.pix.height, ++ .code = ipu7_isys_get_isys_format(f->fmt.pix.pixelformat)->code, ++ .field = f->fmt.pix.field, ++ .colorspace = f->fmt.pix.colorspace, ++ .ycbcr_enc = f->fmt.pix.ycbcr_enc, ++ .quantization = f->fmt.pix.quantization, ++ .xfer_func = f->fmt.pix.xfer_func, ++ }; ++ fmt.format = format; ++ ret = media_pipeline_enumerate_by_stream_cb(av, ipu7_isys_set_fmt_subdev, &fmt); ++ if (ret) ++ return -EINVAL; ++ } ++#endif + return 0; + } + +@@ -290,9 +552,51 @@ static int ipu7_isys_vidioc_s_fmt_vid_cap(struct file *file, void *fh, + { + struct ipu7_isys_video *av = video_drvdata(file); + ++#ifndef CONFIG_VIDEO_INTEL_IPU7_EXT_CTRLS + ipu7_isys_vidioc_try_fmt_vid_cap(file, fh, f); +- av->pix_fmt = f->fmt.pix; ++#else ++ struct media_pad *source_pad = media_pad_remote_pad_first(&av->pad); ++ struct media_pad *remote_pad = source_pad; ++ struct v4l2_subdev *sd = NULL; ++ ++ if (vb2_is_busy(&av->aq.vbq)) ++ return -EBUSY; ++ ++ __ipu_isys_vidioc_try_fmt_vid_cap(av, f); ++ ++ if (av->enum_link_state == IPU7_ISYS_LINK_STATE_DONE) { ++ ++ int ret = 0; ++ struct v4l2_subdev_format fmt = { ++ .which = V4L2_SUBDEV_FORMAT_ACTIVE, ++ .pad = 0, ++ }; ++ struct v4l2_mbus_framefmt format = { ++ .width = f->fmt.pix.width, ++ .height = f->fmt.pix.height, ++ .code = ipu7_isys_get_isys_format(f->fmt.pix.pixelformat)->code, ++ .field = f->fmt.pix.field, ++ .colorspace = f->fmt.pix.colorspace, ++ .ycbcr_enc = f->fmt.pix.ycbcr_enc, ++ .quantization = f->fmt.pix.quantization, ++ .xfer_func = f->fmt.pix.xfer_func, ++ }; ++ fmt.format = format; ++ ret = media_pipeline_enumerate_by_stream_cb(av, ipu7_isys_set_fmt_subdev, &fmt); ++ if (ret) ++ return -EINVAL; + ++ /* TODO: set format for CSI-2 ++ do { ++ // Non-subdev nodes can be safely ignored here. ++ if (!is_media_entity_v4l2_subdev(remote_pad->entity)) ++ continue; ++ // Set only for IPU CSI entities ++ } while ((remote_pad = media_pad_remote_pad_first(&remote_pad->entity->pads[0]))); ++ */ ++ } ++#endif ++ av->pix_fmt = f->fmt.pix; + return 0; + } + +@@ -374,7 +678,9 @@ static int link_validate(struct media_link *link) + } + + v4l2_subdev_unlock_state(s_state); +- ++#ifdef CONFIG_VIDEO_INTEL_IPU7_EXT_CTRLS ++ //av->enum_link_state = IPU7_ISYS_LINK_STATE_DONE; ++#endif + return 0; + unlock: + v4l2_subdev_unlock_state(s_state); +@@ -1009,6 +1315,266 @@ out_media_entity_stop_streaming_firmware: + return ret; + } + ++#ifdef CONFIG_VIDEO_INTEL_IPU7_EXT_CTRLS ++int ipu7_isys_set_fmt_subdev(struct ipu7_isys_video *av, ++ struct v4l2_subdev *sd, void *data) ++{ ++ int ret = 0; ++ struct v4l2_subdev_format *fmt = ++ (struct v4l2_subdev_format *)data; ++ /* v4l2_subdev_call doesn't call, access directly */ ++ if (sd && sd->ops && sd->ops->pad && sd->ops->pad->set_fmt) ++ ret = sd->ops->pad->set_fmt(sd, NULL, fmt); ++ ++ return ret; ++} ++ ++int ipu7_isys_get_parm_subdev(struct ipu7_isys_video *av, ++ struct v4l2_subdev *sd, void *data) ++{ ++ int ret = 0; ++ struct v4l2_subdev_frame_interval *fi = ++ (struct v4l2_subdev_frame_interval *)data; ++ struct v4l2_subdev_state *state = v4l2_subdev_get_unlocked_active_state(sd); ++ /* v4l2_subdev_call doesn't call, access directly */ ++ if (sd && sd->ops && sd->ops->pad && sd->ops->pad->get_frame_interval) ++ ret = sd->ops->pad->get_frame_interval(sd, state, fi); ++ ++ return ret; ++} ++ ++int ipu7_isys_set_parm_subdev(struct ipu7_isys_video *av, ++ struct v4l2_subdev *sd, void *data) ++{ ++ int ret = 0; ++ struct v4l2_subdev_frame_interval *fi = ++ (struct v4l2_subdev_frame_interval *)data; ++ struct v4l2_subdev_state *state = v4l2_subdev_get_unlocked_active_state(sd); ++ /* v4l2_subdev_call doesn't call, access directly */ ++ if (sd && sd->ops && sd->ops->pad && sd->ops->pad->set_frame_interval) ++ ret = sd->ops->pad->set_frame_interval(sd, state, fi); ++ ++ return ret; ++} ++ ++int ipu7_isys_enum_fmt_subdev(struct ipu7_isys_video *av, ++ struct v4l2_subdev *sd, void *data) ++{ ++ int ret = 0; ++ struct v4l2_subdev_mbus_code_enum *mce = ++ (struct v4l2_subdev_mbus_code_enum *)data; ++ /* v4l2_subdev_call doesn't call, access directly */ ++ if (sd && sd->ops && sd->ops->pad && sd->ops->pad->enum_mbus_code) ++ ret = sd->ops->pad->enum_mbus_code(sd, NULL, mce); ++ ++ return ret; ++} ++ ++int ipu7_isys_enum_framesizes_subdev(struct ipu7_isys_video *av, ++ struct v4l2_subdev *sd, void *data) ++{ ++ int ret = 0; ++ struct v4l2_subdev_frame_size_enum *fse = ++ (struct v4l2_subdev_frame_size_enum *)data; ++ /* v4l2_subdev_call doesn't call, access directly */ ++ if (sd && sd->ops && sd->ops->pad && sd->ops->pad->enum_frame_size) ++ ret = sd->ops->pad->enum_frame_size(sd, NULL, fse); ++ ++ return ret; ++} ++ ++int ipu7_isys_enum_frameintervals_subdev(struct ipu7_isys_video *av, ++ struct v4l2_subdev *sd, void *data) ++{ ++ int ret = 0; ++ struct v4l2_subdev_frame_interval_enum *fie = ++ (struct v4l2_subdev_frame_interval_enum *)data; ++ /* v4l2_subdev_call doesn't call, access directly */ ++ if (sd && sd->ops && sd->ops->pad && sd->ops->pad->enum_frame_interval) ++ ret = sd->ops->pad->enum_frame_interval(sd, NULL, fie); ++ ++ return ret; ++} ++ ++int ipu7_isys_inherit_ctrls(struct ipu7_isys_video *av, ++ struct v4l2_subdev *sd, void *data) ++{ ++ int ret = 0; ++ ret = v4l2_ctrl_add_handler(&av->ctrl_handler, ++ sd->ctrl_handler, NULL, true); ++ return ret; ++} ++ ++bool has_src_pad_stream_active(struct v4l2_subdev *sd, u32 stream, u32 pad) ++{ ++ struct v4l2_subdev_state *state; ++ struct v4l2_subdev_route *routes; ++ bool has_active_source_stream = false; ++ unsigned int i; ++ ++ state = v4l2_subdev_lock_and_get_active_state(sd); ++ if (!state) ++ return has_active_source_stream; ++ ++ dev_dbg(sd->dev, "%s(): check %s pad/stream: %u/%u has V4L2_SUBDEV_ROUTE_FL_ACTIVE flag\n", ++ __func__, ++ sd->name, ++ pad, ++ stream); ++ routes = state->routing.routes; ++ for (i = 0; i < state->routing.num_routes; i++) { ++ dev_dbg(sd->dev, "%s(): %s pad/stream: %u/%u flags=0x%x\n", ++ __func__, ++ sd->name, ++ routes[i].source_pad, ++ routes[i].source_stream, ++ routes[i].flags); ++ if (routes[i].source_pad == pad && ++ routes[i].source_stream == stream && ++ routes[i].flags == V4L2_SUBDEV_ROUTE_FL_ACTIVE) { ++ dev_dbg(sd->dev, "%s(): %s pad/stream: %u/%u has V4L2_SUBDEV_ROUTE_FL_ACTIVE flag\n", ++ __func__, ++ sd->name, ++ routes[i].source_pad, ++ routes[i].source_stream); ++ has_active_source_stream = true; ++ break; ++ } ++ } ++ ++ v4l2_subdev_unlock_state(state); ++ ++ return has_active_source_stream; ++} ++ ++bool get_src_pad_by_src_stream(struct v4l2_subdev *sd, u32 stream, u32 *s_pad) ++{ ++ struct v4l2_subdev_state *state; ++ struct v4l2_subdev_route *routes; ++ bool has_source_pad = false; ++ unsigned int i; ++ ++ if (!s_pad) ++ return has_source_pad; ++ ++ state = v4l2_subdev_lock_and_get_active_state(sd); ++ if (!state) ++ return has_source_pad; ++ ++ routes = state->routing.routes; ++ for (i = 0; i < state->routing.num_routes; i++) { ++ if (routes[i].source_stream == stream) { ++ dev_dbg(sd->dev, "%s(): Found %s pad/stream: %u/%u\n", ++ __func__, ++ sd->name, ++ routes[i].source_pad, ++ stream); ++ *s_pad = routes[i].source_pad; ++ has_source_pad = true; ++ break; ++ } ++ } ++ ++ v4l2_subdev_unlock_state(state); ++ return has_source_pad; ++} ++ ++int media_pipeline_enumerate_by_stream_cb( ++ struct ipu7_isys_video *av, ++ int (*cb_fn)(struct ipu7_isys_video *av, ++ struct v4l2_subdev *sd, ++ void *data), ++ void *data) ++{ ++ int i; ++ struct media_graph graph; ++ struct media_entity *entity = &av->vdev.entity; ++ struct media_device *mdev = entity->graph_obj.mdev; ++ struct device *dev = entity->graph_obj.mdev->dev; ++ struct media_pad *remote_pad = media_pad_remote_pad_unique(&av->pad); ++ struct ipu7_isys_stream *stream = av->stream; ++ struct ipu7_isys_subdev *asd; ++ struct v4l2_subdev *sd; ++ unsigned int pad_id; ++ u32 stream_id; ++ int ret = -ENOLINK; ++ ++ if (!remote_pad) { ++ dev_err(dev, ++ "%s:%d no remote pad found\n", ++ __func__, __LINE__); ++ goto error_graph_walk_source; ++ } ++ ++ pad_id = remote_pad->index; ++ sd = media_entity_to_v4l2_subdev(remote_pad->entity); ++ if(!sd) { ++ dev_err(dev, ++ "%s():%d no stream found on remote pad %u\n", ++ __func__, __LINE__, pad_id); ++ goto error_graph_walk_source; ++ } ++ stream_id = ipu7_isys_get_src_stream_by_src_pad(sd, pad_id); ++ dev_dbg(dev, ++ "%s(): Check %s (vc=%u) inheritence from %s pad/stream %u/%u\n", ++ __func__, av->vdev.name, av->vc, ++ sd->name, pad_id, stream_id); ++ ++ mutex_lock(&mdev->graph_mutex); ++ ret = media_graph_walk_init(&graph, mdev); ++ if (ret) ++ goto error_graph_walk_start_enum; ++ ++ media_graph_walk_start(&graph, entity); ++ while ((entity = media_graph_walk_next(&graph))) { ++ dev_dbg(dev, ++ "%s(): walk entity name: %s, type:%x, func:%x\n", ++ __func__, ++ entity->name, ++ entity->obj_type, ++ entity->function); ++ ++ sd = (entity->obj_type == MEDIA_ENTITY_TYPE_V4L2_SUBDEV && \ ++ entity->function == MEDIA_ENT_F_CAM_SENSOR) \ ++ ? media_entity_to_v4l2_subdev(entity) : NULL; ++ ++ if (!sd) ++ continue; ++ if (!strlen(sd->name)) ++ continue; ++ ++ /* pre-filter sub-devices */ ++ if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS)) ++ continue; ++ ++ /* access only subdevices with src pad on same stream id */ ++ if(!get_src_pad_by_src_stream(sd, stream_id, &pad_id)) ++ continue; ++ ++ /* access only subdevices on source stream active route */ ++ if(!has_src_pad_stream_active(sd, stream_id, pad_id)) ++ continue; ++ ++ dev_dbg(dev, "%s(): %s pad/stream: %u/%u\n", ++ __func__, ++ sd->name, ++ pad_id, ++ stream_id); ++ ++ /* call function once */ ++ ret = cb_fn(av, sd, data); ++ break; ++ } ++ av->enum_link_state = IPU7_ISYS_LINK_STATE_DONE; ++ ++error_graph_walk_start_enum: ++ media_graph_walk_cleanup(&graph); ++ mutex_unlock(&mdev->graph_mutex); ++error_graph_walk_source: ++ return ret; ++} ++#endif ++ + static const struct v4l2_ioctl_ops ipu7_v4l2_ioctl_ops = { + .vidioc_querycap = ipu7_isys_vidioc_querycap, + .vidioc_enum_fmt_vid_cap = ipu7_isys_vidioc_enum_fmt, +@@ -1025,6 +1591,11 @@ static const struct v4l2_ioctl_ops ipu7_v4l2_ioctl_ops = { + .vidioc_streamon = vb2_ioctl_streamon, + .vidioc_streamoff = vb2_ioctl_streamoff, + .vidioc_expbuf = vb2_ioctl_expbuf, ++#ifdef CONFIG_VIDEO_INTEL_IPU7_EXT_CTRLS ++ .vidioc_g_parm = ipu7_isys_get_parm, ++ .vidioc_s_parm = ipu7_isys_set_parm, ++ .vidioc_enum_frameintervals = ipu7_isys_enum_frameintervals, ++#endif + }; + + static const struct media_entity_operations entity_ops = { +@@ -1297,8 +1868,28 @@ int ipu7_isys_video_init(struct ipu7_isys_video *av) + if (ret) + goto out_media_entity_cleanup; + ++#ifdef CONFIG_VIDEO_INTEL_IPU7_EXT_CTRLS ++ /* create v4l2 ctrl for capture video node */ ++ ret = v4l2_ctrl_handler_init(&av->ctrl_handler, 0); ++ if (ret) { ++ dev_err(&av->isys->adev->auxdev.dev, ++ "failed to init v4l2 ctrl handler for %s\n", av->vdev.name); ++ goto out_ctrl_handler_cleanup; ++ } ++ v4l2_ctrl_new_custom(&av->ctrl_handler, &ipu7_isys_video_enum_link, av); ++ ++ dev_dbg(&av->isys->adev->auxdev.dev, ++ "ext-ctrls: %s: set video_enum_link\n", av->vdev.name); ++ ++ av->vdev.ctrl_handler = &av->ctrl_handler; + return ret; + ++out_ctrl_handler_cleanup: ++ v4l2_ctrl_handler_free(&av->ctrl_handler); ++#else ++ return ret; ++#endif ++ + out_media_entity_cleanup: + vb2_video_unregister_device(&av->vdev); + media_entity_cleanup(&av->vdev.entity); +@@ -1314,6 +1905,9 @@ out_mutex_destroy: + + void ipu7_isys_video_cleanup(struct ipu7_isys_video *av) + { ++#ifdef CONFIG_VIDEO_INTEL_IPU7_EXT_CTRLS ++ v4l2_ctrl_handler_free(&av->ctrl_handler); ++#endif + vb2_video_unregister_device(&av->vdev); + media_entity_cleanup(&av->vdev.entity); + mutex_destroy(&av->mutex); +diff --git a/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys-video.h b/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys-video.h +index e6d1da2..c36be8d 100644 +--- a/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys-video.h ++++ b/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys-video.h +@@ -97,6 +97,10 @@ struct ipu7_isys_video { + unsigned int skipframe; + unsigned int start_streaming; + #endif ++#ifdef CONFIG_VIDEO_INTEL_IPU7_EXT_CTRLS ++ struct v4l2_ctrl_handler ctrl_handler; ++ unsigned int enum_link_state; /* state for link enumeration by vc */ ++#endif + }; + + #define ipu7_isys_queue_to_video(__aq) \ +-- +2.43.0 + diff --git a/patches/0104-ipu7-isys-make-modules-suffix-versioning-same-as-dkm.patch b/patches/0104-ipu7-isys-make-modules-suffix-versioning-same-as-dkm.patch new file mode 100644 index 0000000..b0eb6b2 --- /dev/null +++ b/patches/0104-ipu7-isys-make-modules-suffix-versioning-same-as-dkm.patch @@ -0,0 +1,70 @@ +From 15e1f9949d2934c4b417b25b17fb1c51ce1f3341 Mon Sep 17 00:00:00 2001 +From: Florent Pirou +Date: Sat, 17 Jan 2026 02:10:39 -0700 +Subject: [PATCH 104/104] ipu7-isys: make modules suffix versioning same as + dkms + +Signed-off-by: Florent Pirou +--- + ipu7-drivers/drivers/media/pci/intel/ipu7/Makefile | 9 +++++++++ + ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys.c | 1 + + ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7.c | 1 + + .../drivers/media/pci/intel/ipu7/psys/ipu-psys.c | 1 + + 4 files changed, 12 insertions(+) + +diff --git a/ipu7-drivers/drivers/media/pci/intel/ipu7/Makefile b/ipu7-drivers/drivers/media/pci/intel/ipu7/Makefile +index 5041701..14d46ff 100644 +--- a/ipu7-drivers/drivers/media/pci/intel/ipu7/Makefile ++++ b/ipu7-drivers/drivers/media/pci/intel/ipu7/Makefile +@@ -1,5 +1,12 @@ + # SPDX-License-Identifier: GPL-2.0 + # Copyright (c) 2017 - 2025 Intel Corporation. ++KERNEL_VERSION := $(shell echo $(KERNELRELEASE) | sed 's/[^0-9.]*\([0-9.]*\).*/\1/') ++ ++CC := ${CC} -I ${M}/../../../../../../include -I ${M}/../../../../../../$(KERNEL_VERSION)/include-overrides -DCONFIG_DEBUG_FS -DCONFIG_INTEL_IPU_ACPI -DCONFIG_VIDEO_INTEL_IPU7_ISYS_RESET -DCONFIG_VIDEO_INTEL_IPU7_EXT_CTRLS ++ ++KBUILD_EXTRA_SYMBOLS += ${M}/../../../../../../Module.symvers ++ ++ccflags-y += -DDRIVER_VERSION_SUFFIX=\"${DRIVER_VERSION_SUFFIX}\" + + is_kernel_lt_6_10 = $(shell if [ $$(printf "6.10\n$(KERNELVERSION)" | sort -V | head -n1) != "6.10" ]; then echo 1; fi) + ifeq ($(is_kernel_lt_6_10), 1) +@@ -32,6 +39,8 @@ intel-ipu7-isys-objs += ipu7-isys-tpg.o + endif + obj-$(CONFIG_VIDEO_INTEL_IPU7) += intel-ipu7-isys.o + ++subdir-ccflags-y += -DDRIVER_VERSION_SUFFIX=\"${DRIVER_VERSION_SUFFIX}\" ++ + obj-$(CONFIG_VIDEO_INTEL_IPU7) += psys/ + + ccflags-y += -I$(src)/ +diff --git a/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys.c b/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys.c +index 767104d..ba4f8db 100644 +--- a/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys.c ++++ b/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys.c +@@ -1627,3 +1627,4 @@ MODULE_IMPORT_NS("INTEL_IPU_BRIDGE"); + MODULE_IMPORT_NS(INTEL_IPU7); + MODULE_IMPORT_NS(INTEL_IPU_BRIDGE); + #endif ++MODULE_VERSION(DRIVER_VERSION_SUFFIX); +diff --git a/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7.c b/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7.c +index 96c3d2c..095ea9a 100644 +--- a/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7.c ++++ b/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7.c +@@ -2891,3 +2891,4 @@ MODULE_AUTHOR("Qingwu Zhang "); + MODULE_AUTHOR("Intel"); + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("Intel ipu7 pci driver"); ++MODULE_VERSION(DRIVER_VERSION_SUFFIX); +diff --git a/ipu7-drivers/drivers/media/pci/intel/ipu7/psys/ipu-psys.c b/ipu7-drivers/drivers/media/pci/intel/ipu7/psys/ipu-psys.c +index b20e238..cc0de0f 100644 +--- a/ipu7-drivers/drivers/media/pci/intel/ipu7/psys/ipu-psys.c ++++ b/ipu7-drivers/drivers/media/pci/intel/ipu7/psys/ipu-psys.c +@@ -1548,3 +1548,4 @@ MODULE_IMPORT_NS("DMA_BUF"); + MODULE_IMPORT_NS(INTEL_IPU7); + MODULE_IMPORT_NS(DMA_BUF); + #endif ++MODULE_VERSION(DRIVER_VERSION_SUFFIX); +-- +2.43.0 + From 70fa1b43b13ed1c750c67a5c086e4ab411d5bb41 Mon Sep 17 00:00:00 2001 From: Florent Pirou Date: Sat, 17 Jan 2026 09:45:08 -0700 Subject: [PATCH 08/14] media: i2c: add max9x serdes drivers * add max9x non-upstream driver code (orig. d3embbedded) --- drivers/media/i2c/Makefile | 7 +- drivers/media/i2c/max9x/Makefile | 4 + drivers/media/i2c/max9x/max9295.c | 770 +++++++ drivers/media/i2c/max9x/max9295.h | 143 ++ drivers/media/i2c/max9x/max9296.c | 882 ++++++++ drivers/media/i2c/max9x/max9296.h | 144 ++ drivers/media/i2c/max9x/max96717.c | 600 ++++++ drivers/media/i2c/max9x/max96717.h | 91 + drivers/media/i2c/max9x/max96724.c | 1168 +++++++++++ drivers/media/i2c/max9x/max96724.h | 242 +++ drivers/media/i2c/max9x/max9x_pdata.h | 103 + drivers/media/i2c/max9x/serdes.c | 2700 +++++++++++++++++++++++++ drivers/media/i2c/max9x/serdes.h | 459 +++++ 13 files changed, 7312 insertions(+), 1 deletion(-) create mode 100644 drivers/media/i2c/max9x/Makefile create mode 100644 drivers/media/i2c/max9x/max9295.c create mode 100644 drivers/media/i2c/max9x/max9295.h create mode 100644 drivers/media/i2c/max9x/max9296.c create mode 100644 drivers/media/i2c/max9x/max9296.h create mode 100644 drivers/media/i2c/max9x/max96717.c create mode 100644 drivers/media/i2c/max9x/max96717.h create mode 100644 drivers/media/i2c/max9x/max96724.c create mode 100644 drivers/media/i2c/max9x/max96724.h create mode 100644 drivers/media/i2c/max9x/max9x_pdata.h create mode 100644 drivers/media/i2c/max9x/serdes.c create mode 100644 drivers/media/i2c/max9x/serdes.h diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile index adbaa74..ba17411 100644 --- a/drivers/media/i2c/Makefile +++ b/drivers/media/i2c/Makefile @@ -1,7 +1,12 @@ # SPDX-License-Identifier: GPL-2.0 # Copyright (c) 2010 - 2025 Intel Corporation. +KERNEL_VERSION := $(shell echo $(KERNELRELEASE) | sed 's/[^0-9.]*\([0-9]\.[0-9]*\)\..*/\1.0/') + +CC := ${CC} -I ${M}/$(KERNEL_VERSION)/include-overrides obj-$(CONFIG_VIDEO_AR0234) += ar0234.o obj-$(CONFIG_VIDEO_AR0820) += ar0820.o -obj-$(CONFIG_VIDEO_ISX031) += isx031.o \ No newline at end of file +obj-$(CONFIG_VIDEO_ISX031) += isx031.o + +ccflags-y := -DDRIVER_VERSION_SUFFIX=\"${DRIVER_VERSION_SUFFIX}\" diff --git a/drivers/media/i2c/max9x/Makefile b/drivers/media/i2c/max9x/Makefile new file mode 100644 index 0000000..fa2c006 --- /dev/null +++ b/drivers/media/i2c/max9x/Makefile @@ -0,0 +1,4 @@ +ccflags-y += -I$(src)/../../pci/intel/ipu7/ + +obj-m += max9x.o +max9x-y += serdes.o max9296.o max96724.o max9295.o max96717.o \ No newline at end of file diff --git a/drivers/media/i2c/max9x/max9295.c b/drivers/media/i2c/max9x/max9295.c new file mode 100644 index 0000000..a7ccbdf --- /dev/null +++ b/drivers/media/i2c/max9x/max9295.c @@ -0,0 +1,770 @@ +/* + * max9295_main.c - Maxim MAX9295 CSI-2 to GMSL2/GMSL1 Serializer + * + * Copyright (c) 2020, D3 Engineering. All rights reserved. + * Copyright (c) 2023-2024, Define Design Deploy Corp. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2025 Intel Corporation. + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "max9295.h" + +static const char *const max9295_gpio_chip_names[] = { + "MFP1", + "MFP2", + "MFP3", + "MFP4", + "MFP5", + "MFP6", + "MFP7", + "MFP8", + "MFP9", + "MFP10", + "MFP11", +}; + +/* Declarations */ +static int max9295_set_pipe_csi_enabled(struct max9x_common *common, + unsigned int pipe_id, unsigned int csi_id, bool enable); +static int max9295_set_pipe_data_types_enabled(struct max9x_common *common, unsigned int pipe_id, bool enable); +static int max9295_set_video_pipe_double_loading(struct max9x_common *common, unsigned int pipe_id, unsigned int bpp); +static int max9295_set_video_pipe_pixel_padding(struct max9x_common *common, + unsigned int pipe_id, unsigned int min_bpp, unsigned int max_bpp); +static int max9295_max_elements(struct max9x_common *common, enum max9x_element_type element); +static int max9295_enable_serial_link(struct max9x_common *common, unsigned int link); +static int max9295_disable_serial_link(struct max9x_common *common, unsigned int link); +static int max9295_set_local_control_channel_enabled(struct max9x_common *common, bool enabled); +static int max9295_select_serial_link(struct max9x_common *common, unsigned int link); +static int max9295_deselect_serial_link(struct max9x_common *common, unsigned int link); +static int max9295_verify_devid(struct max9x_common *common); +static int max9295_enable(struct max9x_common *common); +static int max9295_disable(struct max9x_common *common); +static int max9295_remap_addr(struct max9x_common *common); +static int max9295_add_translate_addr(struct max9x_common *common, + unsigned int i2c_id, unsigned int virt_addr, unsigned int phys_addr); +static int max9295_remove_translate_addr(struct max9x_common *common, + unsigned int i2c_id, unsigned int virt_addr, unsigned int phys_addr); +static int max9295_reset(struct max9x_common *common); + +/* max9295 gpio */ +static struct max9x_common *from_gpio_chip(struct gpio_chip *chip); +static int max9295_gpio_get_direction(struct gpio_chip *chip, unsigned int offset); +static int max9295_gpio_direction_input(struct gpio_chip *chip, unsigned int offset); +static int max9295_gpio_direction_output(struct gpio_chip *chip, unsigned int offset, int value); +static int max9295_gpio_get(struct gpio_chip *chip, unsigned int offset); +static void max9295_gpio_set(struct gpio_chip *chip, unsigned int offset, int value); +static int max9295_setup_gpio(struct max9x_common *common); +/* max9295 gpio */ + +static struct max9x_common_ops max9295_common_ops = { + .max_elements = max9295_max_elements, + .soft_reset = max9295_reset, + .enable = max9295_enable, + .disable = max9295_disable, + .verify_devid = max9295_verify_devid, + .remap_addr = max9295_remap_addr, + .setup_gpio = max9295_setup_gpio, +}; + +static struct max9x_serial_link_ops max9295_serial_link_ops = { + .enable = max9295_enable_serial_link, + .disable = max9295_disable_serial_link, + .select = max9295_select_serial_link, + .deselect = max9295_deselect_serial_link, +}; + +static struct max9x_translation_ops max9295_translation_ops = { + .add = max9295_add_translate_addr, + .remove = max9295_remove_translate_addr, +}; + +static struct max9x_common *from_gpio_chip(struct gpio_chip *chip) +{ + return container_of(chip, struct max9x_common, gpio_chip); +} + +static int max9295_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) +{ + struct max9x_common *common = from_gpio_chip(chip); + struct regmap *map = common->map; + unsigned int val; + int ret; + + TRY(ret, regmap_read(map, MAX9295_GPIO_A(offset), &val)); + + return (FIELD_GET(MAX9295_GPIO_A_OUT_DIS_FIELD, val) == 0U ? + GPIOD_OUT_LOW : GPIOD_IN); +} + +static int max9295_gpio_direction_input(struct gpio_chip *chip, unsigned int offset) +{ + struct max9x_common *common = from_gpio_chip(chip); + struct regmap *map = common->map; + + return regmap_update_bits(map, MAX9295_GPIO_A(offset), + MAX9295_GPIO_A_OUT_DIS_FIELD, + MAX9X_FIELD_PREP(MAX9295_GPIO_A_OUT_DIS_FIELD, 1U)); +} + +static int max9295_gpio_direction_output(struct gpio_chip *chip, unsigned int offset, int value) +{ + struct max9x_common *common = from_gpio_chip(chip); + struct regmap *map = common->map; + unsigned int mask = 0; + unsigned int val; + + mask = MAX9295_GPIO_A_OUT_DIS_FIELD | MAX9295_GPIO_A_OUT_FIELD | + MAX9295_GPIO_A_RX_EN_FIELD; + + // Enable the GPIO as an output + val = MAX9X_FIELD_PREP(MAX9295_GPIO_A_OUT_DIS_FIELD, 0U); + // Write out the initial value to the GPIO + val |= MAX9X_FIELD_PREP(MAX9295_GPIO_A_OUT_FIELD, (value == 0 ? 0U : 1U)); + // Disable remote control over SerDes link + val |= MAX9X_FIELD_PREP(MAX9295_GPIO_A_RX_EN_FIELD, 0U); + + return regmap_update_bits(map, MAX9295_GPIO_A(offset), mask, val); +} + +static int max9295_gpio_get(struct gpio_chip *chip, unsigned int offset) +{ + struct max9x_common *common = from_gpio_chip(chip); + struct regmap *map = common->map; + unsigned int val; + int ret; + + TRY(ret, regmap_read(map, MAX9295_GPIO_A(offset), &val)); + + return FIELD_GET(MAX9295_GPIO_A_IN_FIELD, val); +} + +static void max9295_gpio_set(struct gpio_chip *chip, unsigned int offset, int value) +{ + struct max9x_common *common = from_gpio_chip(chip); + struct regmap *map = common->map; + + regmap_update_bits(map, MAX9295_GPIO_A(offset), + MAX9295_GPIO_A_OUT_FIELD, + MAX9X_FIELD_PREP(MAX9295_GPIO_A_OUT_FIELD, (value == 0 ? 0U : 1U))); +} + +static int max9295_setup_gpio(struct max9x_common *common) +{ + struct device *dev = common->dev; + int ret; + struct max9x_gpio_pdata *gpio_pdata; + + if (dev->platform_data) { + struct max9x_pdata *pdata = dev->platform_data; + gpio_pdata = &pdata->gpio; + } + + // Functions + if (gpio_pdata && gpio_pdata->label) + common->gpio_chip.label = gpio_pdata->label; + else + common->gpio_chip.label = dev_name(dev); + + dev_dbg(dev, "gpio_chip label is %s, dev_name is %s", + common->gpio_chip.label, dev_name(dev)); + + common->gpio_chip.parent = dev; + common->gpio_chip.get_direction = max9295_gpio_get_direction; + common->gpio_chip.direction_input = max9295_gpio_direction_input; + common->gpio_chip.direction_output = max9295_gpio_direction_output; + common->gpio_chip.get = max9295_gpio_get; + common->gpio_chip.set = max9295_gpio_set; + common->gpio_chip.ngpio = MAX9295_NUM_GPIO; + common->gpio_chip.can_sleep = 1; + common->gpio_chip.base = -1; + if (gpio_pdata && gpio_pdata->names) + common->gpio_chip.names = gpio_pdata->names; + else + common->gpio_chip.names = max9295_gpio_chip_names; + + ret = devm_gpiochip_add_data(dev, &common->gpio_chip, common); + if (ret < 0) { + dev_err(dev, "gpio_init: Failed to add max9295_gpio\n"); + return ret; + } + + return 0; +} + +static int max9295_set_pipe_csi_enabled(struct max9x_common *common, + unsigned int pipe_id, unsigned int csi_id, bool enable) +{ + struct device *dev = common->dev; + struct regmap *map = common->map; + int ret; + + dev_dbg(dev, "Video-pipe %d, csi %d: %s, %d lanes", pipe_id, csi_id, + (enable ? "enable" : "disable"), common->csi_link[csi_id].config.num_lanes); + + // Select number of lanes for CSI port csi_id + TRY(ret, regmap_update_bits(map, MAX9295_MIPI_RX_1, + MAX9295_MIPI_RX_1_SEL_CSI_LANES_FIELD(csi_id), + MAX9X_FIELD_PREP(MAX9295_MIPI_RX_1_SEL_CSI_LANES_FIELD(csi_id), + common->csi_link[csi_id].config.num_lanes - 1)) + ); + + // Select CSI port csi_id for video pipe pipe_id + // Enable CSI port csi_id (9295A only has port B, 9295B has both ports) + TRY(ret, regmap_update_bits(map, MAX9295_FRONTTOP_0, + MAX9295_FRONTTOP_0_SEL_CSI_FIELD(pipe_id) | MAX9295_FRONTTOP_0_START_CSI_FIELD(csi_id), + MAX9X_FIELD_PREP(MAX9295_FRONTTOP_0_SEL_CSI_FIELD(pipe_id), csi_id) | + MAX9X_FIELD_PREP(MAX9295_FRONTTOP_0_START_CSI_FIELD(csi_id), enable ? 1U : 0U)) + ); + + // Start video pipe pipe_id from CSI port csi_id + TRY(ret, regmap_update_bits(map, MAX9295_FRONTTOP_9, + MAX9295_FRONTTOP_9_START_VIDEO_FIELD(pipe_id, csi_id), + MAX9X_FIELD_PREP(MAX9295_FRONTTOP_9_START_VIDEO_FIELD(pipe_id, csi_id), enable ? 1U : 0U)) + ); + + // Enable video transmit for pipe + TRY(ret, regmap_update_bits(map, MAX9295_REG2, + MAX9295_REG2_VID_TX_EN_FIELD(pipe_id), + MAX9X_FIELD_PREP(MAX9295_REG2_VID_TX_EN_FIELD(pipe_id), enable ? 1U : 0U)) + ); + + return 0; +} + +static int max9295_set_pipe_data_types_enabled(struct max9x_common *common, + unsigned int pipe_id, bool enable) +{ + struct device *dev = common->dev; + struct regmap *map = common->map; + int data_type_slot, dt; + int ret; + + for (data_type_slot = 0; data_type_slot < common->video_pipe[pipe_id].config.num_data_types; data_type_slot++) { + dt = common->video_pipe[pipe_id].config.data_type[data_type_slot]; + dev_dbg(dev, "Video-pipe %d, data type %d: (%#.2x: %s)", + pipe_id, data_type_slot, dt, (enable ? "enable" : "disable")); + + TRY(ret, regmap_update_bits(map, MAX9295_MEM_DT_SEL(pipe_id, data_type_slot), + MAX9295_MEM_DT_SEL_DT_FIELD | MAX9295_MEM_DT_SEL_EN_FIELD, + MAX9X_FIELD_PREP(MAX9295_MEM_DT_SEL_DT_FIELD, dt) | + MAX9X_FIELD_PREP(MAX9295_MEM_DT_SEL_EN_FIELD, enable ? 1U : 0U)) + ); + } + + return 0; +} + +/** + * max9295_set_video_pipe_double_loading() - Configure Double Loading Mode on a video pipe + * @common: max9x_common + * @pipe_id: Target pipe's ID + * @bpp: Original BPP to double. This can be 0 (disables), 8, 10, or 12. + * + * Double loading mode squeezes two input pixels together such that they are + * treated as a single pixel by the video pipe. Using this method increases + * bandwidth efficiency. + * + * See: GMSL2 Customers User Guide Section 30.5.1.1.1.2 "Double Loading Mode" + * See: GMSL2 Customers User Guide Section 43.3.4.5.1 "Double Mode (Serializer)" + */ +static int max9295_set_video_pipe_double_loading(struct max9x_common *common, + unsigned int pipe_id, unsigned int bpp) +{ + struct device *dev = common->dev; + struct regmap *map = common->map; + struct max9x_serdes_pipe_config *config = &common->video_pipe[pipe_id].config; + unsigned int reg; + unsigned int fields; + int ret; + + if (pipe_id >= MAX9295_NUM_VIDEO_PIPES) + return -EINVAL; + + // Reset double loading registers to defaults + TRY(ret, regmap_update_bits(map, MAX9295_FRONTTOP_10, + MAX9295_FRONTTOP_10_DBL8_FIELD(pipe_id), + 0x0)); + + TRY(ret, regmap_update_bits(map, MAX9295_FRONTTOP_11, + MAX9295_FRONTTOP_11_DBL10_FIELD(pipe_id) | + MAX9295_FRONTTOP_11_DBL12_FIELD(pipe_id), + 0x0)); + + // Enable double pixel mode for pipe + switch (bpp) { + case 0: + //bpp not used on this pipe, but still valid input + break; + case 8: + reg = MAX9295_FRONTTOP_10; + fields = MAX9295_FRONTTOP_10_DBL8_FIELD(pipe_id); + break; + case 10: + reg = MAX9295_FRONTTOP_11; + fields = MAX9295_FRONTTOP_11_DBL10_FIELD(pipe_id); + break; + case 12: + reg = MAX9295_FRONTTOP_11; + fields = MAX9295_FRONTTOP_11_DBL12_FIELD(pipe_id); + break; + default: + dev_err(dev, "Unsupported BPP for pixel doubling: %u", bpp); + return -EINVAL; + } + + // Enable pixel doubling for specified pipe + if (bpp != 0) { + dev_info(dev, "Configuring double loading mode for pipe %u (%ubpp -> %ubpp)", + pipe_id, bpp, (bpp * 2)); + TRY(ret, regmap_update_bits(map, reg, fields, 0xFF)); + } + + // We share MAX9295_SOFT_BPP_FIELD/MAX9295_SOFT_BPP_EN_FIELD with + // max9295_set_video_pipe_pixel_padding(), only write to it if zero + // padding is disabled and double loading mode is enabled. Zero padding + // takes precedence and handles the 'both are disabled' case. + if (config->soft_min_pixel_bpp == 0 && config->soft_max_pixel_bpp == 0) { + // Override output bpp + TRY(ret, regmap_update_bits(map, MAX9295_SOFT_BPP(pipe_id), + MAX9295_SOFT_BPP_EN_FIELD | MAX9295_SOFT_BPP_FIELD, + MAX9X_FIELD_PREP(MAX9295_SOFT_BPP_EN_FIELD, bpp == 0 ? 0U : 1U) | + MAX9X_FIELD_PREP(MAX9295_SOFT_BPP_FIELD, bpp * 2)) + ); + } + + return 0; +} + +/** + * max9295_set_video_pipe_pixel_padding() - Configure zero padding on a video pipe + * @common: max9x_common + * @pipe_id: Target pipe's ID + * @min_bpp: Smallest BPP value of data in the pipe. Must be >= 8. + * @max_bpp: Largest BPP value of data in the pipe. Must be <= 16. + * + * Normally, video pipes can only transmit data with the same BPP value. Zero + * padding is a method to allow data with multiple BPP values to be transmitted + * over the same video pipe by padding the smaller BPP data to be the same BPP + * as the largest BPP data. The deserializer will automatically recover the + * data's original BPP based on datatype information transmitted alongside the + * padded data. + * + * See: GMSL2 Customers User Guide Section 43.3.4.5.2 "Zero Padding" + */ +static int max9295_set_video_pipe_pixel_padding(struct max9x_common *common, + unsigned int pipe_id, unsigned int min_bpp, unsigned int max_bpp) +{ + struct device *dev = common->dev; + struct regmap *map = common->map; + struct max9x_serdes_pipe_config *config = &common->video_pipe[pipe_id].config; + int ret; + bool enable = (min_bpp != 0) && (max_bpp != 0); + + if (enable) + dev_dbg(dev, "Configuring zero padding for pipe %u (%u <= bpp <= %u)", pipe_id, min_bpp, max_bpp); + else + dev_dbg(dev, "%s: pipe %u, min_bpp: %u, max_bpp: %u not enabled", __func__, pipe_id, min_bpp, max_bpp); + + if (pipe_id >= MAX9295_NUM_VIDEO_PIPES) + return -EINVAL; + + /* Auto bpp should be disabled to override bpp */ + TRY(ret, regmap_update_bits(map, MAX9295_VIDEO_TX0(pipe_id), + MAX9295_VIDEO_TX0_AUTO_BPP_EN_FIELD, + MAX9X_FIELD_PREP(MAX9295_VIDEO_TX0_AUTO_BPP_EN_FIELD, enable ? 0U : 1U)) + ); + + if (enable) + TRY(ret, regmap_update_bits(map, MAX9295_VIDEO_TX1(pipe_id), + MAX9295_VIDEO_TX1_BPP_FIELD, + MAX9X_FIELD_PREP(MAX9295_VIDEO_TX1_BPP_FIELD, max_bpp)) + ); + + // We share MAX9295_SOFT_BPP_FIELD/MAX9295_SOFT_BPP_EN_FIELD with + // max9295_set_video_pipe_double_loading(), only write to it if zero + // padding is enabled (this function takes precedence) or if both zero + // pading is disabled and double loading is disabled. + if (enable || (!enable && config->dbl_pixel_bpp == 0)) { + TRY(ret, regmap_update_bits(map, MAX9295_SOFT_BPP(pipe_id), + MAX9295_SOFT_BPP_EN_FIELD | MAX9295_SOFT_BPP_FIELD, + MAX9X_FIELD_PREP(MAX9295_SOFT_BPP_EN_FIELD, enable ? 1U : 0U) | + MAX9X_FIELD_PREP(MAX9295_SOFT_BPP_FIELD, min_bpp)) + ); + } + + return 0; +} + +static int max9295_max_elements(struct max9x_common *common, enum max9x_element_type element) +{ + switch (element) { + case MAX9X_SERIAL_LINK: + return MAX9295_NUM_SERIAL_LINKS; + case MAX9X_VIDEO_PIPE: + return MAX9295_NUM_VIDEO_PIPES; + case MAX9X_MIPI_MAP: + return MAX9295_NUM_MIPI_MAPS; + case MAX9X_CSI_LINK: + return MAX9295_NUM_CSI_LINKS; + case MAX9X_DATA_TYPES: + return MAX9295_NUM_DATA_TYPES; + default: + break; + } + + return 0; +} + +static int max9295_enable_serial_link(struct max9x_common *common, unsigned int link_id) +{ + struct device *dev = common->dev; + unsigned int pipe_id; + int ret; + + dev_dbg(dev, "Link %d: Enable", link_id); + + for (pipe_id = 0; pipe_id < common->num_video_pipes; pipe_id++) { + struct max9x_serdes_pipe_config *config; + + if (common->video_pipe[pipe_id].enabled == false) + continue; + + config = &common->video_pipe[pipe_id].config; + + TRY(ret, max9295_set_pipe_data_types_enabled(common, pipe_id, true)); + + TRY(ret, max9295_set_video_pipe_double_loading(common, pipe_id, config->dbl_pixel_bpp)); + + TRY(ret, max9295_set_video_pipe_pixel_padding(common, pipe_id, + config->soft_min_pixel_bpp, config->soft_max_pixel_bpp)); + + TRY(ret, max9295_set_pipe_csi_enabled(common, pipe_id, config->src_csi, true)); + } + + return 0; +} + +static int max9295_disable_serial_link(struct max9x_common *common, unsigned int link_id) +{ + struct device *dev = common->dev; + unsigned int pipe_id; + int ret; + + dev_dbg(dev, "Link %d: Disable", link_id); + + for (pipe_id = 0; pipe_id < common->num_video_pipes; pipe_id++) { + struct max9x_serdes_pipe_config *config; + + if (common->video_pipe[pipe_id].enabled == false) + continue; + + config = &common->video_pipe[pipe_id].config; + + TRY(ret, max9295_set_pipe_data_types_enabled(common, pipe_id, false)); + + TRY(ret, max9295_set_pipe_csi_enabled(common, pipe_id, config->src_csi, false)); + } + + return 0; +} + +/* + * Enable RCLK (27MHz) output on MFP4 pin. This pin is routed on some imager boards + * to the imager instead of an on-board oscillator. + */ +static int max9295_enable_rclk(struct max9x_common *common) +{ + struct device *dev = common->dev; + struct regmap *map = common->map; + int ret; + + if (!common->external_refclk_enable) { + dev_dbg(dev, "Enable RCLK: external-refclk-enable not present, not enabling external refclk"); + return 0; + } + + dev_info(dev, "Enable RCLK: 27MHz"); + + // Configure pre-defined 27MHz frequency (0b01 = 27MHz) + TRY(ret, regmap_update_bits(map, MAX9295_REF_VTG0, + MAX9295_REFGEN_PREDEF_FREQ_FIELD, + MAX9X_FIELD_PREP(MAX9295_REFGEN_PREDEF_FREQ_FIELD, 1U)) + ); + + // Enable reference generation + TRY(ret, regmap_update_bits(map, MAX9295_REF_VTG0, + MAX9295_REFGEN_EN_FIELD, + MAX9X_FIELD_PREP(MAX9295_REFGEN_EN_FIELD, 1U)) + ); + + // Configure reference generation output on MFP4 + TRY(ret, regmap_update_bits(map, MAX9295_REF_VTG1, + MAX9295_PCLK_GPIO_FIELD, + MAX9X_FIELD_PREP(MAX9295_PCLK_GPIO_FIELD, 4U)) + ); + + // Enable output + TRY(ret, regmap_update_bits(map, MAX9295_REF_VTG1, + MAX9295_PCLK_EN_FIELD, + MAX9X_FIELD_PREP(MAX9295_PCLK_EN_FIELD, 1U)) + ); + + TRY(ret, regmap_update_bits(map, MAX9295_REG3, + MAX9295_RCLK_SEL_FIELD, + MAX9X_FIELD_PREP(MAX9295_RCLK_SEL_FIELD, 3U)) + ); + + TRY(ret, regmap_update_bits(map, MAX9295_REG6, + MAX9295_RCLK_EN_FIELD, + MAX9X_FIELD_PREP(MAX9295_RCLK_EN_FIELD, 1U)) + ); + + return 0; +} + +static int max9295_set_local_control_channel_enabled(struct max9x_common *common, bool enabled) +{ + struct device *dev = common->dev; + struct regmap *map = common->map; + + dev_dbg(dev, "set rem cc %s", (enabled ? "enable" : "disable")); + + return regmap_update_bits(map, MAX9295_PHY_REM_CTRL, MAX9295_PHY_LOCAL_CTRL_DIS_FIELD, + MAX9X_FIELD_PREP(MAX9295_PHY_LOCAL_CTRL_DIS_FIELD, (enabled ? 0U : 1U))); +} + +static int max9295_select_serial_link(struct max9x_common *common, unsigned int link) +{ + return max9295_set_local_control_channel_enabled(common, true); +} + +static int max9295_deselect_serial_link(struct max9x_common *common, unsigned int link) +{ + return max9295_set_local_control_channel_enabled(common, false); +} + +static int max9295_verify_devid(struct max9x_common *common) +{ + struct device *dev = common->dev; + struct regmap *map = common->map; + unsigned int dev_id, dev_rev; + int ret; + + // Fetch and output chip name + revision + TRY(ret, regmap_read(map, MAX9295_DEV_ID, &dev_id)); + TRY(ret, regmap_read(map, MAX9295_DEV_REV, &dev_rev)); + + switch (dev_id) { + case MAX9295A: + dev_info(dev, "Detected MAX9295A revision %ld", + FIELD_GET(MAX9295_DEV_REV_FIELD, dev_rev)); + break; + case MAX9295B: + dev_info(dev, "Detected MAX9295B revision %ld", + FIELD_GET(MAX9295_DEV_REV_FIELD, dev_rev)); + break; + case MAX9295E: + dev_info(dev, "Detected MAX9295E revision %ld", + FIELD_GET(MAX9295_DEV_REV_FIELD, dev_rev)); + break; + default: + dev_err(dev, "Unknown device ID %d revision %ld", dev_id, + FIELD_GET(MAX9295_DEV_REV_FIELD, dev_rev)); + return -EINVAL; + } + + return 0; +} + +static int max9295_enable(struct max9x_common *common) +{ + struct device *dev = common->dev; + struct regmap *map = common->map; + int ret; + + TRY(ret, max9295_verify_devid(common)); + + // Turn on RCLK/PCLK + ret = max9295_enable_rclk(common); + if (ret) + dev_warn(dev, "Failed to enable RCLK output"); + + // Initialize local CC to off + TRY(ret, max9295_set_local_control_channel_enabled(common, false)); + + /* Clear the pipe maps */ + TRY(ret, regmap_write(map, MAX9295_FRONTTOP_9, 0)); + + /* Clear the csi port selections */ + TRY(ret, regmap_write(map, MAX9295_FRONTTOP_0, MAX9295_FRONTTOP_0_LINE_INFO)); + + return 0; +} + +static int max9295_disable(struct max9x_common *common) +{ + struct device *dev = common->dev; + + dev_dbg(dev, "Disable"); + + return 0; +} + +//TODO: more efforts to remap +static int max9295_remap_addr(struct max9x_common *common) +{ + int ret; + struct device *dev = common->dev; + const unsigned int RETRY_MS_MIN = 32; + const unsigned int RETRY_MS_MAX = 512; + unsigned int ms; + + if (!common->phys_client) + return 0; + + if (!common->phys_map) + return 0; + + dev_info(dev, "Remap address from 0x%02x to 0x%02x", + common->phys_client->addr, common->client->addr); + + TRY(ret, regmap_update_bits(common->phys_map, MAX9295_REG0, + MAX9295_REG0_DEV_ADDR_FIELD, + FIELD_PREP(MAX9295_REG0_DEV_ADDR_FIELD, common->client->addr)) + ); + + /* + * Use lower bits of I2C address as unique TX_SRC_ID to prevent + * conflicts for info frames, SPI, etc. (Leave video pipes alone) + */ + TRY_DEV_HERE(ret, regmap_update_bits(common->map, MAX9295_CFGI_INFOFR_TR3, MAX9295_TR3_TX_SRC_ID, + MAX9X_FIELD_PREP(MAX9295_TR3_TX_SRC_ID, common->client->addr)), dev); + TRY_DEV_HERE(ret, regmap_update_bits(common->map, MAX9295_CFGL_SPI_TR3, MAX9295_TR3_TX_SRC_ID, + MAX9X_FIELD_PREP(MAX9295_TR3_TX_SRC_ID, common->client->addr)), dev); + /* + * This exponential retry works around for MAX96724X. + */ + for (ms = RETRY_MS_MIN; ms <= RETRY_MS_MAX; ms <<= 1) { + ret = regmap_update_bits(common->map, MAX9295_CFGC_CC_TR3, MAX9295_TR3_TX_SRC_ID, + MAX9X_FIELD_PREP(MAX9295_TR3_TX_SRC_ID, common->client->addr)); + if (ret) { + dev_warn(dev, + "configure MAX9295_CFGC_CC_TR3 failed, trying again (waiting %d ms)", ms); + msleep(ms); + } else + break; + } + + TRY_DEV_HERE(ret, regmap_update_bits(common->map, MAX9295_CFGL_GPIO_TR3, MAX9295_TR3_TX_SRC_ID, + MAX9X_FIELD_PREP(MAX9295_TR3_TX_SRC_ID, common->client->addr)), dev); + TRY_DEV_HERE(ret, regmap_update_bits(common->map, MAX9295_CFGL_IIC_X, MAX9295_TR3_TX_SRC_ID, + MAX9X_FIELD_PREP(MAX9295_TR3_TX_SRC_ID, common->client->addr)), dev); + TRY_DEV_HERE(ret, regmap_update_bits(common->map, MAX9295_CFGL_IIC_Y, MAX9295_TR3_TX_SRC_ID, + MAX9X_FIELD_PREP(MAX9295_TR3_TX_SRC_ID, common->client->addr)), dev); + + return 0; +} + +static int max9295_add_translate_addr(struct max9x_common *common, + unsigned int i2c_id, unsigned int virt_addr, unsigned int phys_addr) +{ + struct device *dev = common->dev; + struct regmap *map = common->map; + unsigned int alias; + unsigned int src; + int ret; + + for (alias = 0; alias < MAX9295_NUM_ALIASES; alias++) { + TRY(ret, regmap_read(map, MAX9295_I2C_SRC(i2c_id, alias), &src)); + + src = FIELD_GET(MAX9295_I2C_SRC_FIELD, src); + if (src == virt_addr || src == 0) { + dev_dbg(dev, "SRC %02x = %02x, DST %02x = %02x", + MAX9295_I2C_SRC(i2c_id, alias), virt_addr, + MAX9295_I2C_DST(i2c_id, alias), phys_addr); + TRY(ret, regmap_write(map, MAX9295_I2C_DST(i2c_id, alias), + MAX9X_FIELD_PREP(MAX9295_I2C_DST_FIELD, phys_addr)) + ); + + TRY(ret, regmap_write(map, MAX9295_I2C_SRC(i2c_id, alias), + MAX9X_FIELD_PREP(MAX9295_I2C_SRC_FIELD, virt_addr)) + ); + } + } + + return 0; +} + +static int max9295_remove_translate_addr(struct max9x_common *common, + unsigned int i2c_id, unsigned int virt_addr, unsigned int phys_addr) +{ + struct regmap *map = common->map; + unsigned int alias; + unsigned int src; + int ret; + + for (alias = 0; alias < MAX9295_NUM_ALIASES; alias++) { + TRY(ret, regmap_read(map, MAX9295_I2C_SRC(i2c_id, alias), &src)); + src = FIELD_GET(MAX9295_I2C_SRC_FIELD, src); + if (src == virt_addr) { + return regmap_write(map, MAX9295_I2C_DST(i2c_id, alias), + MAX9X_FIELD_PREP(MAX9295_I2C_DST_FIELD, 0)); + } + } + + return 0; +} + +static int max9295_reset(struct max9x_common *common) +{ + struct device *dev = common->dev; + struct regmap *map = common->map; + int ret; + + dev_dbg(dev, "Reset"); + + /* Reset entire chip by CTRL0_RST_ALL: 0x10[7]*/ + TRY(ret, regmap_write(map, MAX9295_CTRL0, MAX9295_CTRL0_RST_ALL)); + usleep_range(45000, 45050); + + return 0; +} + +int max9295_get_ops(struct max9x_common_ops **common_ops, struct max9x_serial_link_ops **serial_ops, + struct max9x_csi_link_ops **csi_ops, struct max9x_line_fault_ops **lf_ops, + struct max9x_translation_ops **trans_ops) +{ + (*common_ops) = &max9295_common_ops; + (*serial_ops) = &max9295_serial_link_ops; + (*csi_ops) = NULL; + (*lf_ops) = NULL; + (*trans_ops) = &max9295_translation_ops; + return 0; +} + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Josh Watts "); +MODULE_AUTHOR("Yan, Dongcheng "); +MODULE_DESCRIPTION("Maxim MAX9295 CSI-2/parallel to GMSL2 Serializer driver"); diff --git a/drivers/media/i2c/max9x/max9295.h b/drivers/media/i2c/max9x/max9295.h new file mode 100644 index 0000000..de45056 --- /dev/null +++ b/drivers/media/i2c/max9x/max9295.h @@ -0,0 +1,143 @@ +/* + * max9295.h - Maxim MAX9295 registers and constants. + * + * Copyright (c) 2020, D3 Engineering. All rights reserved. + * Copyright (c) 2023-2024, Define Design Deploy Corp. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2025 Intel Corporation. + +#ifndef _MAX9295_H_ +#define _MAX9295_H_ + +#include +#include "serdes.h" + +enum max9295_dev_id { + MAX9295A = 0x91, + MAX9295B = 0x93, + MAX9295E = 0x9B +}; + +enum max9295_gpio_out_type { + MAX9295_GPIO_OUT_TYPE_OPEN_DRAIN = 0, + MAX9295_GPIO_OUT_TYPE_PUSH_PULL = 1, +}; + +enum max9295_gpio_pull_updn_sel { + MAX9295_GPIO_PULL_UPDN_SEL_NONE = 0, + MAX9295_GPIO_PULL_UPDN_SEL_UP = 1, + MAX9295_GPIO_PULL_UPDN_SEL_DOWN = 2, +}; + +#define MAX9295_NUM_ALIASES 2 /* 2 per i2c bus */ +#define MAX9295_NUM_SERIAL_LINKS 1 +#define MAX9295_NUM_VIDEO_PIPES 4 +#define MAX9295_NUM_MIPI_MAPS 16 +#define MAX9295_NUM_CSI_LINKS 2 // Only 1 port, but it is technically Port B +#define MAX9295_NUM_GPIO 11 +#define MAX9295_NUM_DATA_TYPES 4 + +#define MAX9295_REG0 (0x0) +#define MAX9295_REG0_DEV_ADDR_FIELD GENMASK(7, 1) + +#define MAX9295_PHY_REM_CTRL (0x1) +#define MAX9295_PHY_REM_CTRL_DIS_FIELD BIT(4) +#define MAX9295_PHY_LOCAL_CTRL_DIS_FIELD BIT(5) + +#define MAX9295_REG2 (0x2) +#define MAX9295_REG2_VID_TX_EN_FIELD(pipe_id) BIT((pipe_id) + 4) + +#define MAX9295_DEV_ID (0xD) +#define MAX9295_DEV_REV (0xE) +#define MAX9295_DEV_REV_FIELD GENMASK(3, 0) +#define MAX9295_CTRL0 (0x10) +#define MAX9295_CTRL0_RST_ALL BIT(7) + +#define MAX9295_CFGI_INFOFR_TR3 (0x7B) +#define MAX9295_CFGL_SPI_TR3 (0x83) +#define MAX9295_CFGC_CC_TR3 (0x8B) +#define MAX9295_CFGL_GPIO_TR3 (0x93) +#define MAX9295_CFGL_IIC_X (0xA3) +#define MAX9295_CFGL_IIC_Y (0xAB) +#define MAX9295_TR3_TX_SRC_ID GENMASK(2, 0) + +#define MAX9295_VIDEO_TX0(pipe_id) (0x100 + (pipe_id) * 8) +#define MAX9295_VIDEO_TX0_AUTO_BPP_EN_FIELD BIT(3) +#define MAX9295_VIDEO_TX1(pipe_id) (0x101 + (pipe_id) * 8) +#define MAX9295_VIDEO_TX1_BPP_FIELD GENMASK(5, 0) + +#define MAX9295_GPIO(gpio) (0x2BE + ((gpio) * 3)) +#define MAX9295_GPIO_A(gpio) (MAX9295_GPIO(gpio) + 0) +#define MAX9295_GPIO_A_OUT_DIS_FIELD BIT(0) +#define MAX9295_GPIO_A_TX_EN_FIELD BIT(1) +#define MAX9295_GPIO_A_RX_EN_FIELD BIT(2) +#define MAX9295_GPIO_A_IN_FIELD BIT(3) +#define MAX9295_GPIO_A_OUT_FIELD BIT(4) +#define MAX9295_GPIO_A_RES_CFG_FIELD BIT(7) +#define MAX9295_GPIO_B(gpio) (MAX9295_GPIO(gpio) + 1) +#define MAX9295_GPIO_B_TX_ID GENMASK(4, 0) +#define MAX9295_GPIO_B_OUT_TYPE_FIELD BIT(5) +#define MAX9295_GPIO_B_PULL_UPDN_SEL_FIELD GENMASK(7, 6) +#define MAX9295_GPIO_C(gpio) (MAX9295_GPIO(gpio) + 2) +#define MAX9295_GPIO_C_RX_ID GENMASK(4, 0) + +#define MAX9295_FRONTTOP_0 (0x308) +#define MAX9295_FRONTTOP_0_LINE_INFO BIT(6) +#define MAX9295_FRONTTOP_0_SEL_CSI_FIELD(pipe_id) BIT(pipe_id) +#define MAX9295_FRONTTOP_0_START_CSI_FIELD(csi_id) BIT((csi_id) + 4) +#define MAX9295_FRONTTOP_9 (0x311) +#define MAX9295_FRONTTOP_9_START_VIDEO_FIELD(pipe_id, csi_id) BIT((pipe_id) + 4 * (csi_id)) + +// Double loading mode +#define MAX9295_FRONTTOP_10 (0x312) +#define MAX9295_FRONTTOP_10_DBL8_FIELD(pipe_id) BIT(pipe_id) +#define MAX9295_FRONTTOP_11 (0x313) +#define MAX9295_FRONTTOP_11_DBL10_FIELD(pipe_id) BIT(pipe_id) +#define MAX9295_FRONTTOP_11_DBL12_FIELD(pipe_id) BIT((pipe_id) + 4) + +#define MAX9295_MEM_DT_SEL(pipe_id, dt_slot) (0x314 + (dt_slot) / 2 * 0xC2 + 2 * (pipe_id) + (dt_slot)) +#define MAX9295_MEM_DT_SEL_DT_FIELD GENMASK(5, 0) +#define MAX9295_MEM_DT_SEL_EN_FIELD BIT(6) + +// Software BPP override (used for double loading mode and zero padding) +#define MAX9295_SOFT_BPP(pipe_id) (0x31C + (pipe_id)) +#define MAX9295_SOFT_BPP_EN_FIELD BIT(5) +#define MAX9295_SOFT_BPP_FIELD GENMASK(4, 0) + +#define MAX9295_MIPI_RX (0x330) +#define MAX9295_MIPI_RX_1 (MAX9295_MIPI_RX + 1) +#define MAX9295_MIPI_RX_1_SEL_CSI_LANES_FIELD(csi_id) (GENMASK(1, 0) << (csi_id * 4)) + +// I2C SRC/DST +#define MAX9295_I2C_SRC(i2c_id, n) ((i2c_id == 0 ? 0x42 : (0x550 + (4 * ((i2c_id) - 1)))) + (2 * (n)) + 0) +#define MAX9295_I2C_SRC_FIELD GENMASK(7, 1) +#define MAX9295_I2C_DST(i2c_id, n) ((i2c_id == 0 ? 0x42 : (0x550 + (4 * ((i2c_id) - 1)))) + (2 * (n)) + 1) +#define MAX9295_I2C_DST_FIELD GENMASK(7, 1) + +// RCLK registers +#define MAX9295_REG6 (0x6) +#define MAX9295_RCLK_EN_FIELD BIT(5) +#define MAX9295_REG3 (0x3) +#define MAX9295_RCLK_SEL_FIELD GENMASK(1, 0) +#define MAX9295_REF_VTG0 (0x3F0) +#define MAX9295_REFGEN_PREDEF_FREQ_FIELD GENMASK(5, 4) +#define MAX9295_REFGEN_PREDEF_ALT_FIELD BIT(3) +#define MAX9295_REFGEN_EN_FIELD BIT(0) +#define MAX9295_REF_VTG1 (0x3F1) +#define MAX9295_PCLK_GPIO_FIELD GENMASK(5, 1) +#define MAX9295_PCLK_EN_FIELD BIT(0) + +#endif /* _MAX9295_H_ */ diff --git a/drivers/media/i2c/max9x/max9296.c b/drivers/media/i2c/max9x/max9296.c new file mode 100644 index 0000000..0c34466 --- /dev/null +++ b/drivers/media/i2c/max9x/max9296.c @@ -0,0 +1,882 @@ +/* + * max9296.c - Maxim MAX9296 GMSL2/GMSL1 to CSI-2 Deserializer + * + * Copyright (c) 2023-2024, Define Design Deploy Corp. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2025 Intel Corporation. + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "max9296.h" + +// Params +int max9296_serial_link_timeout_ms = MAX9296_DEFAULT_SERIAL_LINK_TIMEOUT_MS; +module_param(max9296_serial_link_timeout_ms, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); +MODULE_PARM_DESC(max9296_serial_link_timeout_ms, "Timeout for serial link in milliseconds"); + +// Declarations +static int max9296_set_phy_mode(struct max9x_common *common, unsigned int phy_mode); +static int max9296_set_phy_enabled(struct max9x_common *common, unsigned int csi_id, bool enable); +static int max9296_set_phy_lane_map(struct max9x_common *common, unsigned int csi_id, unsigned int phy_lane_map); +static int max9296_set_phy_dpll_enabled(struct max9x_common *common, unsigned int csi_id, bool enable); +static int max9296_set_phy_dpll_freq(struct max9x_common *common, unsigned int csi_id, unsigned int freq_mhz); +static int max9296_set_mipi_lane_cnt(struct max9x_common *common, unsigned int csi_id, int num_lanes); +static int max9296_set_initial_deskew(struct max9x_common *common, unsigned int csi_id, + bool enable, unsigned int width); +static int max9296_configure_csi_dphy(struct max9x_common *common); +static int max9296_enable(struct max9x_common *common); +static int max9296_max_elements(struct max9x_common *common, enum max9x_element_type element); +static int max9296_get_serial_link_lock(struct max9x_common *common, unsigned int link_id, bool *locked); +static int max9296_serial_link_reset(struct max9x_common *common, unsigned int link_id); +static int max9296_set_serial_link_rate(struct max9x_common *common, unsigned int link_id); +static int max9296_set_video_pipe_src(struct max9x_common *common, unsigned int pipe_id, + unsigned int link_id, unsigned int src_pipe); +static int max9296_set_video_pipe_maps_enabled(struct max9x_common *common, unsigned int pipe_id, int num_maps); +static int max9296_set_video_pipe_map(struct max9x_common *common, unsigned int pipe_id, + unsigned int map_id, struct max9x_serdes_mipi_map *mipi_map); +static int max9296_set_csi_double_loading_mode(struct max9x_common *common, unsigned int csi_id, unsigned int bpp); +static int max9296_set_csi_link_enabled(struct max9x_common *common, unsigned int csi_id, bool enable); +static int max9296_set_video_pipe_enabled(struct max9x_common *common, unsigned int pipe_id, bool enable); +static int max9296_set_serial_link_routing(struct max9x_common *common, unsigned int link_id); +static int max9296_disable_serial_link(struct max9x_common *common, unsigned int link_id); +static int max9296_enable_serial_link(struct max9x_common *common, unsigned int link_id); +static int max9296_isolate_serial_link(struct max9x_common *common, unsigned int link); +static int max9296_deisolate_serial_link(struct max9x_common *common, unsigned int link); +static int max9296_wait_link_lock(struct max9x_common *common, int link); +static int max9296_enable_csi_link(struct max9x_common *common, unsigned int csi_link_id); +static int max9296_disable_csi_link(struct max9x_common *common, unsigned int csi_link_id); + +/* Currently unused */ +static int max9296_conf_phy_maps(struct max9x_common *common, unsigned int csi_id, unsigned int *phy_lane_map); + +// Functions +static int max9296_set_phy_mode(struct max9x_common *common, unsigned int phy_mode) +{ + struct device *dev = common->dev; + struct regmap *map = common->map; + + dev_dbg(dev, "CSI: phy_mode=%d", phy_mode); + + return regmap_update_bits(map, MAX9296_MIPI_PHY0, + MAX9296_MIPI_PHY0_MODE_FIELD, + MAX9X_FIELD_PREP(MAX9296_MIPI_PHY0_MODE_FIELD, phy_mode)); +} + +static int max9296_set_phy_enabled(struct max9x_common *common, unsigned int csi_id, bool enable) +{ + struct device *dev = common->dev; + struct regmap *map = common->map; + + dev_dbg(dev, "CSI link %d: %s", csi_id, (enable ? "enable" : "disable")); + + return regmap_update_bits(map, MAX9296_MIPI_PHY_ENABLE, + MAX9296_MIPI_PHY_ENABLE_FIELD(csi_id), + MAX9X_FIELD_PREP(MAX9296_MIPI_PHY_ENABLE_FIELD(csi_id), enable ? 1U : 0U)); +} + +static int max9296_set_phy_lane_map(struct max9x_common *common, unsigned int csi_id, unsigned int phy_lane_map) +{ + struct device *dev = common->dev; + struct regmap *map = common->map; + + dev_dbg(dev, "CSI link %d: phy_lane_map=0x%04x", csi_id, phy_lane_map); + + return regmap_update_bits(map, MAX9296_MIPI_PHY_LANE_MAP(csi_id), + MAX9296_MIPI_PHY_LANE_MAP_FIELD(csi_id, 0) + | MAX9296_MIPI_PHY_LANE_MAP_FIELD(csi_id, 1), + phy_lane_map); +} + +static int max9296_set_phy_dpll_enabled(struct max9x_common *common, unsigned int csi_id, bool enable) +{ + struct device *dev = common->dev; + struct regmap *map = common->map; + + dev_dbg(dev, "CSI link %d: DPLL %s", csi_id, (enable ? "on" : "off")); + + return regmap_update_bits(map, MAX9296_DPLL_RESET(csi_id), + MAX9296_DPLL_RESET_SOFT_RST_FIELD, + MAX9X_FIELD_PREP(MAX9296_DPLL_RESET_SOFT_RST_FIELD, enable ? 1U : 0U)); +} + +static int max9296_set_phy_dpll_freq(struct max9x_common *common, unsigned int csi_id, unsigned int freq_mhz) +{ + struct device *dev = common->dev; + struct regmap *map = common->map; + + if (WARN_ONCE(freq_mhz > 0 && freq_mhz < MAX9296_DPLL_FREQ_MHZ_MULTIPLE, + "CSI frequency must be greater than %d MHz", MAX9296_DPLL_FREQ_MHZ_MULTIPLE)) + return -EINVAL; + + dev_dbg(dev, "CSI link %d: freq %u MHz, %u mult", csi_id, freq_mhz, freq_mhz / MAX9296_DPLL_FREQ_MHZ_MULTIPLE); + + return regmap_update_bits(map, MAX9296_DPLL_FREQ(csi_id), + MAX9296_DPLL_FREQ_FIELD, + MAX9X_FIELD_PREP(MAX9296_DPLL_FREQ_FIELD, freq_mhz / MAX9296_DPLL_FREQ_MHZ_MULTIPLE)); +} + + +static int max9296_set_mipi_lane_cnt(struct max9x_common *common, unsigned int csi_id, int num_lanes) +{ + struct device *dev = common->dev; + struct regmap *map = common->map; + + dev_dbg(dev, "CSI link %d: %d lanes", csi_id, num_lanes); + + return regmap_update_bits(map, MAX9296_MIPI_TX_LANE_CNT(csi_id), + MAX9296_MIPI_TX_LANE_CNT_FIELD, + MAX9X_FIELD_PREP(MAX9296_MIPI_TX_LANE_CNT_FIELD, + (common->csi_link[csi_id].config.num_lanes - 1))); +} + +static int max9296_set_initial_deskew(struct max9x_common *common, unsigned int csi_id, + bool enable, unsigned int width) +{ + struct device *dev = common->dev; + struct regmap *map = common->map; + + dev_dbg(dev, "CSI link %d: Initial deskew %s", csi_id, enable ? "enabled" : "disabled"); + + /* clamp initial deskew width to 7 which is 8*32k UI*/ + if (width > 7) + width = 7; + + return regmap_write(map, MAX9296_MIPI_TX_DESKEW_INIT(csi_id), + MAX9X_FIELD_PREP(MAX9296_MIPI_TX_DESKEW_INIT_AUTO_EN, enable) | + MAX9X_FIELD_PREP(MAX9296_MIPI_TX_DESKEW_INIT_WIDTH, width)); +} + +static int max9296_conf_phy_maps(struct max9x_common *common, unsigned int csi_id, + unsigned int *phy_lane_map) +{ + struct device *dev = common->dev; + int i; + const unsigned int phy_count = 2; + const unsigned int controller = csi_id / phy_count; + + if (common->csi_link[csi_id].config.num_lanes != common->csi_link[csi_id].config.num_maps) { + dev_err(dev, "CSI%u number of maps %u must match number of lanes %u.", csi_id, + common->csi_link[csi_id].config.num_maps, + common->csi_link[csi_id].config.num_lanes); + return -EINVAL; + } + + for (i = 0; i < common->csi_link[csi_id].config.num_maps; i++) { + const struct max9x_serdes_phy_map *map = &common->csi_link[csi_id].config.map[i]; + + if (map->int_csi >= 4) { + dev_err(dev, "CSI%u does not have %u Lanes can not map.", csi_id, + map->int_csi + 1); + return -EINVAL; + } + if (map->phy_lane >= 2) { + dev_err(dev, "Each PHY has 2 lanes can not map %u.", map->phy_ind); + return -EINVAL; + } + if (map->phy_ind < (controller * phy_count) || + map->phy_ind >= ((controller + 1) * phy_count)) { + dev_err(dev, "CSI%u does not have PHYs %u can not map.", csi_id, + map->phy_ind); + return -EINVAL; + } + phy_lane_map[map->phy_ind] |= MAX9X_FIELD_PREP( + MAX9296_MIPI_PHY_LANE_MAP_FIELD(map->phy_ind, map->phy_lane), map->int_csi); + } + return 0; +} + +/* + * MAX9296A has four PHYs, but does not support single-PHY configurations, + * only double-PHY configurations, even when only using two lanes. + * For PHY 0 + PHY 1, PHY 1 is the master PHY. + * For PHY 2 + PHY 3, PHY 2 is the master PHY. + * Clock is always on the master PHY. + * For first pair of PHYs, first lanes are on the master PHY. + * For second pair of PHYs, first lanes are on the master PHY too. + * + * PHY 0 + 1 + * CLK = PHY 1 + * PHY1 Lane 0 = D0 + * PHY1 Lane 1 = D1 + * PHY0 Lane 0 = D2 + * PHY0 Lane 1 = D3 + * + * PHY 2 + 3 + * CLK = PHY 2 + * PHY2 Lane 0 = D0 + * PHY2 Lane 1 = D1 + * PHY3 Lane 0 = D2 + * PHY3 Lane 1 = D3 + */ +static int max9296_configure_csi_dphy(struct max9x_common *common) +{ + struct device *dev = common->dev; + unsigned int phy_mode; + unsigned int phy_lane_map[MAX9296_NUM_CSI_LINKS] = {0}; + unsigned int csi_id; + unsigned int PHY1, PHY2; + int ret; + + for (csi_id = 0; csi_id < MAX9296_NUM_CSI_LINKS; csi_id++) { + dev_dbg(dev, "CSI link %d: enabled=%d, num_lanes=%d, freq_mhz=%d init_deskew=%d", + csi_id, common->csi_link[csi_id].enabled, + common->csi_link[csi_id].config.num_lanes, + common->csi_link[csi_id].config.freq_mhz, + common->csi_link[csi_id].config.auto_init_deskew_enabled); + } + + PHY1 = common->csi_link[0].enabled ? common->csi_link[0].config.num_lanes : 0; + PHY2 = common->csi_link[1].enabled ? common->csi_link[1].config.num_lanes : 0; + /* Each csi controller has 4 lanes. each phy has 2 lanes*/ + if ((PHY1 + PHY2) > 4) { + dev_err(dev, "CSI controller 1 has more than %u lanes. PHY0: %u, PHY1: %u", 4, PHY1, + PHY1); + return -EINVAL; + } + + PHY1 = common->csi_link[2].enabled ? common->csi_link[2].config.num_lanes : 0; + PHY2 = common->csi_link[3].enabled ? common->csi_link[3].config.num_lanes : 0; + /* Each csi controller has 4 lanes. each phy has 2 lanes */ + if ((PHY1 + PHY2) > 4) { + dev_err(dev, "CSI controller 2 has more than %u lanes. PHY2: %u, PHY3: %u", 4, PHY1, + PHY1); + return -EINVAL; + } + + if (common->csi_link[1].enabled && common->csi_link[1].config.num_lanes == 4) { + if (common->csi_link[0].enabled) + dev_warn(dev, "CSI link 0 enabled, but CSI link 1 is 4 lanes."); + + /* lane 1 is master for CSI controller 1*/ + max9296_conf_phy_maps(common, 1, phy_lane_map); + if (common->csi_link[2].enabled && common->csi_link[2].config.num_lanes == 4) { + if (common->csi_link[3].enabled) + dev_warn(dev, "CSI link 3 enabled, but CSI link 2 is 4 lanes."); + dev_dbg(dev, "CSI phy mode is set to 2X4Lanes"); + /* lane 2 is master for CSI controller 2*/ + max9296_conf_phy_maps(common, 2, phy_lane_map); + phy_mode = MAX9296_MIPI_PHY_2X4; // 2x 4lane + } else { + dev_dbg(dev, "CSI phy mode is set to A: 4Lanes B: 2X2Lanes"); + max9296_conf_phy_maps(common, 2, phy_lane_map); + max9296_conf_phy_maps(common, 3, phy_lane_map); + phy_mode = MAX9296_MIPI_PHY_1X4A_22; // A: 1x 4lane B: 2x 2lane + } + } else { + max9296_conf_phy_maps(common, 1, phy_lane_map); + max9296_conf_phy_maps(common, 2, phy_lane_map); + if (common->csi_link[2].enabled && common->csi_link[2].config.num_lanes == 4) { + if (common->csi_link[3].enabled) + dev_warn(dev, "CSI link 3 enabled, but CSI link 2 is 4 lanes."); + dev_dbg(dev, "CSI phy mode is set to A: 2X2Lanes B: 4Lanes"); + /* lane 2 is master for CSI controller 2*/ + max9296_conf_phy_maps(common, 2, phy_lane_map); + phy_mode = MAX9296_MIPI_PHY_1X4B_22; // B: 1x 4lane A: 2x 2lane + } else { + dev_dbg(dev, "CSI phy mode is set to 4X2Lanes"); + max9296_conf_phy_maps(common, 2, phy_lane_map); + max9296_conf_phy_maps(common, 3, phy_lane_map); + phy_mode = MAX9296_MIPI_PHY_4X2; // 4x 2lane + } + } + + TRY(ret, max9296_set_phy_mode(common, phy_mode)); + + for (csi_id = 0; csi_id < MAX9296_NUM_CSI_LINKS; csi_id++) { + struct max9x_serdes_csi_config *config = + &common->csi_link[csi_id].config; + + TRY(ret, max9296_set_phy_enabled(common, csi_id, false)); + + TRY(ret, max9296_set_phy_dpll_enabled(common, csi_id, false)); + + TRY(ret, max9296_set_phy_lane_map(common, csi_id, + phy_lane_map[csi_id]) + ); + + TRY(ret, max9296_set_mipi_lane_cnt(common, csi_id, + config->num_lanes) + ); + } + + return 0; +} + +static int max9296_set_all_reset(struct max9x_common *common, bool enable) +{ + struct device *dev = common->dev; + struct regmap *map = common->map; + + dev_dbg(dev, "Reset ALL %s", (enable ? "enable" : "disable")); + + return regmap_update_bits(map, MAX9296_CTRL0, + MAX9296_CTRL0_RESET_ALL_FIELD, + MAX9X_FIELD_PREP(MAX9296_CTRL0_RESET_ALL_FIELD, enable ? 1U : 0U)); +} + +static int max9296_soft_reset(struct max9x_common *common) +{ + struct device *dev = common->dev; + int ret; + + dev_dbg(dev, "Soft reset"); + + TRY(ret, max9296_set_all_reset(common, 1)); + /* lock time for reset_all / PWDNB in spec, I2C Wake time is 2.25ms */ + usleep_range(45000, 45050); + TRY(ret, max9296_set_all_reset(common, 0)); + + return 0; +} + +static int max9296_max_elements(struct max9x_common *common, enum max9x_element_type element) +{ + switch (element) { + case MAX9X_SERIAL_LINK: + return MAX9296_NUM_SERIAL_LINKS; + case MAX9X_VIDEO_PIPE: + return MAX9296_NUM_VIDEO_PIPES; + case MAX9X_MIPI_MAP: + return MAX9296_NUM_MIPI_MAPS; + case MAX9X_CSI_LINK: + return MAX9296_NUM_CSI_LINKS; + default: + break; + } + + return 0; +} + +static struct max9x_common_ops max9296_common_ops = { + .enable = max9296_enable, + .soft_reset = max9296_soft_reset, + .max_elements = max9296_max_elements, +}; + +static int max9296_set_video_pipe_src(struct max9x_common *common, unsigned int pipe_id, + unsigned int link_id, unsigned int src_pipe) +{ + struct device *dev = common->dev; + struct regmap *map = common->map; + + /* + * link_id ignored for max9296, pipe routing done through stream-ids, + * which must be unique across links + */ + dev_dbg(dev, "Video-pipe %d: src_pipe=%u", pipe_id, src_pipe); + + return regmap_update_bits(map, MAX9296_VIDEO_PIPE_SEL(pipe_id), + MAX9296_VIDEO_PIPE_STR_SEL_FIELD, + MAX9X_FIELD_PREP(MAX9296_VIDEO_PIPE_STR_SEL_FIELD, src_pipe)); +} + +static int max9296_set_video_pipe_maps_enabled(struct max9x_common *common, unsigned int pipe_id, int num_maps) +{ + struct device *dev = common->dev; + struct regmap *map = common->map; + unsigned int val = 0; + int ret; + + if (num_maps > 0) + val = GENMASK(num_maps - 1, 0); + + dev_dbg(dev, "Video-pipe %d: num_maps=%u", pipe_id, num_maps); + + TRY(ret, regmap_write(map, MAX9296_MAP_EN_L(pipe_id), + MAX9X_FIELD_PREP(MAX9296_MAP_EN_FIELD, val)) + ); + + TRY(ret, regmap_write(map, MAX9296_MAP_EN_H(pipe_id), + MAX9X_FIELD_PREP(MAX9296_MAP_EN_FIELD, val >> 8)) + ); + + return 0; +} + +static int max9296_set_video_pipe_map(struct max9x_common *common, unsigned int pipe_id, + unsigned int map_id, struct max9x_serdes_mipi_map *mipi_map) +{ + struct device *dev = common->dev; + struct regmap *map = common->map; + int ret; + + dev_dbg(dev, "Video-pipe %d, map %d: VC%d:DT%02x->VC%d:DT%02x, dst_csi=%d ", + pipe_id, map_id, mipi_map->src_vc, mipi_map->src_dt, + mipi_map->dst_vc, mipi_map->dst_dt, mipi_map->dst_csi); + + TRY(ret, regmap_write(map, MAX9296_MAP_SRC_L(pipe_id, map_id), + MAX9X_FIELD_PREP(MAX9296_MAP_SRC_L_VC_FIELD, mipi_map->src_vc) | + MAX9X_FIELD_PREP(MAX9296_MAP_SRC_L_DT_FIELD, mipi_map->src_dt)) + ); + + TRY(ret, regmap_write(map, MAX9296_MAP_DST_L(pipe_id, map_id), + MAX9X_FIELD_PREP(MAX9296_MAP_DST_L_VC_FIELD, mipi_map->dst_vc) | + MAX9X_FIELD_PREP(MAX9296_MAP_DST_L_DT_FIELD, mipi_map->dst_dt)) + ); + + TRY(ret, regmap_write(map, MAX9296_MAP_SRCDST_H(pipe_id, map_id), + MAX9X_FIELD_PREP(MAX9296_MAP_SRCDST_H_SRC_VC_FIELD, mipi_map->src_vc) | + MAX9X_FIELD_PREP(MAX9296_MAP_SRCDST_H_DST_VC_FIELD, mipi_map->dst_vc)) + ); + + TRY(ret, regmap_update_bits(map, MAX9296_MAP_DPHY_DEST(pipe_id, map_id), + MAX9296_MAP_DPHY_DEST_FIELD(map_id), + MAX9X_FIELD_PREP(MAX9296_MAP_DPHY_DEST_FIELD(map_id), mipi_map->dst_csi)) + ); + + return 0; +} + +/** + * max9296_set_csi_double_loading_mode() - Configure Double Loading Mode on a CSI controller + * @common: max9x_common + * @csi_id: Target CSI controller's ID + * @bpp: Original BPP to double. This can be 0 (disables), 8, 10, or 12. + * + * Double loading mode squeezes two input pixels together such that they are + * treated as a single pixel by the video pipe. Using this method increases + * bandwidth efficiency. + * + * See: GMSL2 Customers User Guide Section 30.5.1.1.1.2 "Double Loading Mode" + * See: GMSL2 Customers User Guide Section 43.4.2.3 "Double Mode (Deserializer)" + */ +static int max9296_set_csi_double_loading_mode(struct max9x_common *common, unsigned int csi_id, unsigned int bpp) +{ + struct device *dev = common->dev; + struct regmap *map = common->map; + unsigned int value; + + switch (bpp) { + case 0: + value = 0; + break; + case 8: + value = FIELD_PREP(MAX9296_MIPI_TX_ALT_MEM_8BPP, 1U); + // To fully support 8bpp, additional register writes are + // needed for 'bpp8dbl' and 'bpp8dbl_mode' fields on each pipe. + dev_err(dev, "8 BPP currently unsupported for pixel doubling"); + return -EINVAL; + case 10: + value = FIELD_PREP(MAX9296_MIPI_TX_ALT_MEM_10BPP, 1U); + break; + case 12: + value = FIELD_PREP(MAX9296_MIPI_TX_ALT_MEM_12BPP, 1U); + break; + default: + dev_err(dev, "Unsupported BPP for pixel doubling: %u", bpp); + return -EINVAL; + } + + if (bpp > 0) + dev_info(dev, "Configuring double loading mode on CSI %d: %u bpp -> %u bpp", + csi_id, bpp, (bpp * 2)); + + // Enable alt mem mapping + return regmap_update_bits(map, MAX9296_MIPI_TX_ALT_MEM(csi_id), + MAX9296_MIPI_TX_ALT_MEM_FIELD, value); +} + +static int max9296_set_csi_link_enabled(struct max9x_common *common, unsigned int csi_id, bool enable) +{ + struct device *dev = common->dev; + struct max9x_serdes_csi_link *csi_link; + int ret; + + if (csi_id > common->num_csi_links) + return -EINVAL; + + csi_link = &common->csi_link[csi_id]; + + if (WARN_ONCE(enable && csi_link->enabled == false, "Tried to enable a disabled CSI port???")) + return -EINVAL; + + if (WARN_ONCE(enable && csi_link->config.num_lanes == 0, "Tried to enable CSI port with no lanes???")) + return -EINVAL; + + if (enable) + csi_link->usecount++; + else if (csi_link->usecount > 0) + csi_link->usecount--; + + dev_dbg(dev, "CSI link %d: %s (%d users)", csi_id, (enable ? "enable" : "disable"), csi_link->usecount); + + if (enable && csi_link->usecount == 1) { + // Enable && first user + ret = max9296_set_initial_deskew(common, csi_id, csi_link->config.auto_init_deskew_enabled, + csi_link->config.initial_deskew_width); + if (ret) + return ret; + + ret = max9296_set_phy_dpll_freq(common, csi_id, csi_link->config.freq_mhz); + if (ret) + return ret; + + ret = max9296_set_phy_dpll_enabled(common, csi_id, true); + if (ret) + return ret; + + ret = max9296_set_phy_enabled(common, csi_id, true); + if (ret) + return ret; + + } else if (!enable && csi_link->usecount == 0) { + // Disable && no more users + ret = max9296_set_phy_enabled(common, csi_id, false); + if (ret) + return ret; + + ret = max9296_set_phy_dpll_enabled(common, csi_id, false); + if (ret) + return ret; + + } + + return 0; +} + +static int max9296_set_video_pipe_enabled(struct max9x_common *common, unsigned int pipe_id, bool enable) +{ + struct device *dev = common->dev; + struct regmap *map = common->map; + + dev_dbg(dev, "Video-pipe %d: %s", pipe_id, (enable ? "enable" : "disable")); + + return regmap_update_bits(map, MAX9296_VIDEO_PIPE_EN(pipe_id), + MAX9296_VIDEO_PIPE_EN_FIELD(pipe_id), + MAX9X_FIELD_PREP(MAX9296_VIDEO_PIPE_EN_FIELD(pipe_id), enable ? 1U : 0U)); +} + +/***** max9296_serial_link_ops auxiliary functions *****/ +static int max9296_set_serial_link_rate(struct max9x_common *common, unsigned int link_id) +{ + struct device *dev = common->dev; + struct regmap *map = common->map; + struct max9x_serdes_serial_config *config = &common->serial_link[link_id].config; + int tx_rate, rx_rate; + + tx_rate = max9x_serdes_mhz_to_rate(max9296_tx_rates, ARRAY_SIZE(max9296_tx_rates), config->tx_freq_mhz); + if (tx_rate < 0) + return tx_rate; + + rx_rate = max9x_serdes_mhz_to_rate(max9296_rx_rates, ARRAY_SIZE(max9296_rx_rates), config->rx_freq_mhz); + if (rx_rate < 0) + return rx_rate; + + dev_dbg(dev, "Serial-link %d: TX=%d MHz RX=%d MHz", link_id, config->tx_freq_mhz, config->rx_freq_mhz); + + return regmap_update_bits(map, MAX9296_PHY_REM_CTRL, + MAX9296_PHY_REM_CTRL_TX_FIELD | MAX9296_PHY_REM_CTRL_RX_FIELD, + MAX9X_FIELD_PREP(MAX9296_PHY_REM_CTRL_TX_FIELD, tx_rate) | + MAX9X_FIELD_PREP(MAX9296_PHY_REM_CTRL_RX_FIELD, rx_rate)); +} + +static int max9296_set_serial_link_routing(struct max9x_common *common, unsigned int link_id) +{ + unsigned int pipe_id; + unsigned int map_id; + int ret; + + for (pipe_id = 0; pipe_id < common->num_video_pipes; pipe_id++) { + struct max9x_serdes_pipe_config *config; + + if (common->video_pipe[pipe_id].enabled == false) + continue; + + config = &common->video_pipe[pipe_id].config; + if (config->src_link != link_id) + continue; + + ret = max9296_set_video_pipe_src(common, pipe_id, config->src_link, config->src_pipe); + if (ret) + return ret; + + ret = max9296_set_video_pipe_maps_enabled(common, pipe_id, config->num_maps); + if (ret) + return ret; + + for (map_id = 0; map_id < config->num_maps; map_id++) { + ret = max9296_set_video_pipe_map(common, pipe_id, map_id, &config->map[map_id]); + if (ret) + return ret; + + ret = max9296_set_csi_double_loading_mode(common, + config->map[map_id].dst_csi, config->dbl_pixel_bpp); + if (ret) + return ret; + + if (common->csi_link[config->map[map_id].dst_csi].config.auto_start) { + ret = max9296_set_csi_link_enabled(common, config->map[map_id].dst_csi, true); + if (ret) + return ret; + } + } + + ret = max9296_set_video_pipe_enabled(common, pipe_id, true); + if (ret) + return ret; + } + + return 0; +} + +static int max9296_serial_link_reset(struct max9x_common *common, unsigned int link_id) +{ + struct regmap *map = common->map; + + /* GMSL RX rate must be the same as the SER. This is set in REG1(0x1)[1:0] */ + return regmap_update_bits(map, MAX9296_CTRL0, MAX9296_CTRL0_RESET_ONESHOT_FIELD, + MAX9X_FIELD_PREP(MAX9296_CTRL0_RESET_ONESHOT_FIELD, 1U)); +} + +static int max9296_get_serial_link_lock(struct max9x_common *common, unsigned int link_id, bool *locked) +{ + struct regmap *map = common->map; + unsigned int val; + int ret; + + /* Only looks at link A refer to header file */ + ret = regmap_read(map, MAX9296_PHY_LOCKED(link_id), &val); + if (ret) + return ret; + + if (FIELD_GET(MAX9296_PHY_LOCKED_FIELD, val) != 0) + *locked = true; + else + *locked = false; + + return 0; +} + +static int max9296_wait_link_lock(struct max9x_common *common, int link) +{ + bool locked; + int ret; + ulong timeout = jiffies + msecs_to_jiffies(max9296_serial_link_timeout_ms); + + do { + ret = max9296_get_serial_link_lock(common, link, &locked); + if (ret == 0 && locked) + return 0; + + usleep_range(1000, 2000); + } while (time_is_after_jiffies(timeout)); + + return -ETIMEDOUT; +} +/***** max9296_serial_link_ops auxiliary functions *****/ + +/***** max9296_serial_link_ops *****/ +static int max9296_isolate_serial_link(struct max9x_common *common, unsigned int link) +{ + struct device *dev = common->dev; + struct regmap *map = common->map; + unsigned int link_cfg; + unsigned int auto_link; + int ret; + + dev_dbg(dev, "Isolate link %d", link); + + auto_link = 0; + link_cfg = (link == 0) ? MAX9296_LINK_A : MAX9296_LINK_B; + + TRY_DEV_HERE(ret, regmap_update_bits(map, MAX9296_CTRL0, + MAX9296_CTRL0_AUTO_CFG_FIELD | MAX9296_CTRL0_LINK_CFG_FIELD, + FIELD_PREP(MAX9296_CTRL0_AUTO_CFG_FIELD, auto_link) + | FIELD_PREP(MAX9296_CTRL0_LINK_CFG_FIELD, link_cfg)), + dev); + + TRY_DEV_HERE(ret, max9296_serial_link_reset(common, link), dev); + + TRY_DEV_HERE(ret, max9296_wait_link_lock(common, link), dev); + + return 0; +} + +static int max9296_deisolate_serial_link(struct max9x_common *common, unsigned int link) +{ + struct device *dev = common->dev; + struct regmap *map = common->map; + unsigned int link_cfg; + unsigned int auto_link = 0; + int ret; + bool link_a = common->serial_link[0].detected; + bool link_b = common->serial_link[1].detected; + + if (link_a && link_b) + link_cfg = MAX9296_LINK_SPLIT; + else if (link_a) + link_cfg = MAX9296_LINK_A; + else if (link_b) + link_cfg = MAX9296_LINK_B; + else { + dev_err(dev, "No link was detected"); + return -1; + } + + dev_dbg(dev, "Deisolate link %d (link_cfg=%d)", link, link_cfg); + + TRY_DEV_HERE(ret, regmap_update_bits( + map, + MAX9296_CTRL0, + MAX9296_CTRL0_AUTO_CFG_FIELD + |MAX9296_CTRL0_LINK_CFG_FIELD, + FIELD_PREP(MAX9296_CTRL0_AUTO_CFG_FIELD, auto_link) + |FIELD_PREP(MAX9296_CTRL0_LINK_CFG_FIELD, link_cfg)), + dev); + + TRY_DEV_HERE(ret, max9296_serial_link_reset(common, link), dev); + + TRY_DEV_HERE(ret, max9296_wait_link_lock(common, link), dev); + + return 0; +} + +static int max9296_enable_serial_link(struct max9x_common *common, unsigned int link_id) +{ + int ret; + + if (WARN_ON_ONCE(link_id >= common->num_serial_links)) + return -EINVAL; + + if (WARN_ONCE(common->serial_link[link_id].config.link_type + != MAX9X_LINK_TYPE_GMSL2, "Only GMSL2 is supported!")) + return -EINVAL; + + // GMSL2 + ret = max9296_set_serial_link_rate(common, link_id); + if (ret) + return ret; + + ret = max9296_isolate_serial_link(common, link_id); + if (ret) + return ret; + + common->serial_link[link_id].detected = true; + + ret = max9296_set_serial_link_routing(common, link_id); + if (ret) + return ret; + + max9296_deisolate_serial_link(common, link_id); + + return 0; +} + +static int max9296_disable_serial_link(struct max9x_common *common, unsigned int link_id) +{ + unsigned int pipe_id; + unsigned int map_id; + int ret; + + for (pipe_id = 0; pipe_id < common->num_video_pipes; pipe_id++) { + struct max9x_serdes_pipe_config *config; + + if (common->video_pipe[pipe_id].enabled == false) + continue; + + config = &common->video_pipe[pipe_id].config; + if (config->src_link != link_id) + continue; + + ret = max9296_set_video_pipe_enabled(common, pipe_id, false); + if (ret) + return ret; + + ret = max9296_set_video_pipe_maps_enabled(common, pipe_id, 0); + if (ret) + return ret; + + for (map_id = 0; map_id < config->num_maps; map_id++) { + ret = max9296_set_csi_link_enabled(common, config->map[map_id].dst_csi, false); + if (ret) + return ret; + } + } + + return 0; +} + +static struct max9x_serial_link_ops max9296_serial_link_ops = { + .enable = max9296_enable_serial_link, + .disable = max9296_disable_serial_link, + .isolate = max9296_isolate_serial_link, + .deisolate = max9296_deisolate_serial_link, +}; +/***** max9296_serial_link_ops *****/ + +static int max9296_enable_csi_link(struct max9x_common *common, unsigned int csi_link_id) +{ + return max9296_set_csi_link_enabled(common, csi_link_id, true); +} + +static int max9296_disable_csi_link(struct max9x_common *common, unsigned int csi_link_id) +{ + return max9296_set_csi_link_enabled(common, csi_link_id, false); +} + +static struct max9x_csi_link_ops max9296_csi_link_ops = { + .enable = max9296_enable_csi_link, + .disable = max9296_disable_csi_link, +}; + +static int max9296_enable(struct max9x_common *common) +{ + struct device *dev = common->dev; + int link_id; + int ret; + + dev_dbg(dev, "Enable"); + + for (link_id = 0; link_id < common->num_serial_links; link_id++) { + ret = max9296_disable_serial_link(common, link_id); + if (ret) + return ret; + } + + ret = max9296_configure_csi_dphy(common); + + if (ret) + return ret; + + return 0; +} + +int max9296_get_ops(struct max9x_common_ops **common_ops, struct max9x_serial_link_ops **serial_ops, + struct max9x_csi_link_ops **csi_ops, struct max9x_line_fault_ops **lf_ops, + struct max9x_translation_ops **trans_ops) +{ + *common_ops = &max9296_common_ops; + *serial_ops = &max9296_serial_link_ops; + *csi_ops = &max9296_csi_link_ops; + *lf_ops = NULL; + *trans_ops = NULL; + + return 0; +} + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Josh Watts "); +MODULE_AUTHOR("Jacob Kiggins "); +MODULE_AUTHOR("Yan Dongcheng "); +MODULE_DESCRIPTION("Maxim MAX9296 Dual GMSL2/GMSL1 to CSI-2 Deserializer driver"); diff --git a/drivers/media/i2c/max9x/max9296.h b/drivers/media/i2c/max9x/max9296.h new file mode 100644 index 0000000..38c3446 --- /dev/null +++ b/drivers/media/i2c/max9x/max9296.h @@ -0,0 +1,144 @@ +/* + * max9296.h - Maxim 9296 registers and constants. + * + * Copyright (c) 2023-2024 Define Design Deploy Corp. All Rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2025 Intel Corporation. + +#ifndef _MAX9296_H_ +#define _MAX9296_H_ + +#include +#include "serdes.h" + +enum max9296_dev_id { + MAX9296A = 0x94 +}; + +enum max9296_phy_mode { + MAX9296_MIPI_PHY_4X2 = BIT(0), // four 2-lane ports + MAX9296_MIPI_PHY_1X4 = BIT(1), // one 4-lane (CSI2 is master (TODO: Which port???)) + MAX9296_MIPI_PHY_2X4 = BIT(2), // two 4-lane ports (CSI1 is master for port A, CSI2 for port B) + MAX9296_MIPI_PHY_1X4A_22 = BIT(3), // one 4-lane (PHY0+PHY1, CSI1 for port A) port and two 2-lane ports + MAX9296_MIPI_PHY_1X4B_22 = BIT(4), // one 4-lane (PHY2+PHY3, CSI2 for port B) port and two 2-lane ports +}; + +enum max9296_link_mode { + MAX9296_LINK_AB, + MAX9296_LINK_A, + MAX9296_LINK_B, + MAX9296_LINK_SPLIT +}; + +#define MAX9296_NUM_SERIAL_LINKS 2 +#define MAX9296_NUM_VIDEO_PIPES 4 +#define MAX9296_NUM_MIPI_MAPS 16 +#define MAX9296_NUM_CSI_LINKS 4 /* Total Number of PHYs */ +/* 2 CSI controllers, 2 PHYs per controller, and 2 lanes per PHY */ + +#define MAX9296_DEFAULT_SERIAL_LINK_TIMEOUT_MS 250 + +#define MAX9296_DPLL_FREQ_MHZ_MULTIPLE 100 + +#define MAX9296_FLD_OFS(n, bits_per_field, count) (((n) % (count)) * (bits_per_field)) +#define MAX9296_OFFSET_GENMASK(offset, h, l) GENMASK(offset + h, offset + l) + +#define MAX9296_CTRL0 0x10 +#define MAX9296_CTRL0_AUTO_CFG_FIELD BIT(4) +#define MAX9296_CTRL0_LINK_CFG_FIELD GENMASK(1, 0) +#define MAX9296_CTRL0_RESET_LINK_FIELD BIT(6) // One bit to reset whole link +#define MAX9296_CTRL0_RESET_ALL_FIELD BIT(7) +#define MAX9296_CTRL0_RESET_ONESHOT_FIELD BIT(5) + +#define MAX9296_PHY_REM_CTRL (0x1) +#define MAX9296_PHY_REM_CTRL_TX_FIELD (GENMASK(1, 0) << 2) +#define MAX9296_PHY_REM_CTRL_RX_FIELD GENMASK(1, 0) +#define MAX9296_PHY_REM_CTRL_DIS_FIELD BIT(4) + +/* + *CTRL3(0x13) LINK_MODE is set to link A. + */ +#define MAX9296_PHY_LOCKED(link) (0x13) /* Based on link mode */ +#define MAX9296_PHY_LOCKED_FIELD BIT(3) + +#define MAX9296_VIDEO_PIPE_SEL(pipe) (0x50 + pipe) +#define MAX9296_VIDEO_PIPE_STR_SEL_FIELD GENMASK(1, 0) + +#define MAX9296_VIDEO_PIPE_EN(pipe) (0x2) +#define MAX9296_VIDEO_PIPE_EN_FIELD(pipe) (BIT(pipe) << 4) + +#define MAX9296_DPLL_FREQ(phy) (0x31D + ((phy) * 3)) +#define MAX9296_DPLL_FREQ_FIELD GENMASK(4, 0) + +#define MAX9296_MIPI_TX_EXT(pipe) (0x500 + ((pipe) * 0x10)) + +#define MAX9296_MIPI_PHY0 0x330 +#define MAX9296_MIPI_PHY0_MODE_FIELD GENMASK(4, 0) +#define MAX9296_MIPI_PHY_ENABLE 0x332 +#define MAX9296_MIPI_PHY_ENABLE_FIELD(csi) BIT((csi) + 4) +#define MAX9296_MIPI_PHY_LANE_MAP(csi) (0x333 + (csi) / 2) +#define MAX9296_MIPI_PHY_LANE_MAP_FIELD(csi, lane) \ + (GENMASK(1, 0) << (MAX9296_FLD_OFS(csi, 4, 2) + MAX9296_FLD_OFS(lane, 2, 2))) + +// Note that CSIs and pipes overlap: +#define MAX9296_MIPI_TX(pipe) (0x400 + ((pipe) * 0x40)) + +/* Offsets for MAX9296_MIPI_TX under 0x0B are only for lanes 1 and 3 */ +#define MAX9296_MIPI_TX_LANE_CNT(csi) (MAX9296_MIPI_TX(csi) + 0x0A) +#define MAX9296_MIPI_TX_LANE_CNT_FIELD GENMASK(7, 6) +#define MAX9296_MIPI_TX_DESKEW_INIT(csi) (MAX9296_MIPI_TX(csi) + 0x03) +#define MAX9296_MIPI_TX_DESKEW_INIT_AUTO_EN BIT(7) +#define MAX9296_MIPI_TX_DESKEW_INIT_WIDTH GENMASK(2, 0) +#define MAX9296_MAP_EN_L(pipe) (MAX9296_MIPI_TX(pipe) + 0x0B) +#define MAX9296_MAP_EN_H(pipe) (MAX9296_MIPI_TX(pipe) + 0x0C) +#define MAX9296_MAP_EN_FIELD GENMASK(7, 0) +#define MAX9296_MAP_SRC_L(pipe, map) (MAX9296_MIPI_TX(pipe) + 0x0D + ((map) * 2)) +#define MAX9296_MAP_SRC_L_VC_FIELD GENMASK(7, 6) +#define MAX9296_MAP_SRC_L_DT_FIELD GENMASK(5, 0) +#define MAX9296_MAP_DST_L(pipe, map) (MAX9296_MIPI_TX(pipe) + 0x0D + ((map) * 2) + 1) +#define MAX9296_MAP_DST_L_VC_FIELD GENMASK(7, 6) +#define MAX9296_MAP_DST_L_DT_FIELD GENMASK(5, 0) +#define MAX9296_MAP_SRCDST_H(pipe, map) (MAX9296_MIPI_TX_EXT(pipe) + (map)) +#define MAX9296_MAP_SRCDST_H_SRC_VC_FIELD GENMASK(7, 5) +#define MAX9296_MAP_SRCDST_H_DST_VC_FIELD GENMASK(4, 2) +#define MAX9296_MAP_DPHY_DEST(pipe, map) (MAX9296_MIPI_TX(pipe) + 0x2D + ((map) / 4)) +#define MAX9296_MAP_DPHY_DEST_FIELD(map) (GENMASK(1, 0) << MAX9296_FLD_OFS(map, 2, 4)) + +#define MAX9296_MIPI_TX_ALT_MEM(csi) (MAX9296_MIPI_TX(csi) + 0x33) +#define MAX9296_MIPI_TX_ALT_MEM_FIELD GENMASK(2, 0) +#define MAX9296_MIPI_TX_ALT_MEM_8BPP BIT(1) +#define MAX9296_MIPI_TX_ALT_MEM_10BPP BIT(2) +#define MAX9296_MIPI_TX_ALT_MEM_12BPP BIT(0) + +#define MAX9296_GPIO_A(gpio_num) (0x2B0 + 3 * gpio_num) +#define MAX9296_GPIO_B(gpio_num) (MAX9296_GPIO_A(gpio_num) + 1) +#define MAX9296_GPIO_B_TX_ID GENMASK(4, 0) +#define MAX9296_GPIO_C(gpio_num) (MAX9296_GPIO_A(gpio_num) + 2) +#define MAX9296_GPIO_C_RX_ID GENMASK(4, 0) + +#define MAX9296_DPLL_RESET(phy) (0x1C00 + ((phy) * 0x100)) +#define MAX9296_DPLL_RESET_SOFT_RST_FIELD BIT(0) + +static struct max9x_serdes_rate_table max9296_rx_rates[] = { + { .val = 1, .freq_mhz = 3000}, // 3 GHz + { .val = 2, .freq_mhz = 6000}, // 6 GHz +}; + +static struct max9x_serdes_rate_table max9296_tx_rates[] = { + { .val = 0, .freq_mhz = 187}, // 187.5 MHz +}; + +#endif /* _MAX9296_H_ */ diff --git a/drivers/media/i2c/max9x/max96717.c b/drivers/media/i2c/max9x/max96717.c new file mode 100644 index 0000000..178ad1c --- /dev/null +++ b/drivers/media/i2c/max9x/max96717.c @@ -0,0 +1,600 @@ +/* + * max96717_main.c - Maxim MAX96717 CSI-2 to GMSL2/GMSL1 Serializer + * + * Copyright (c) 2022, D3 Engineering. All rights reserved. + * Copyright (c) 2023, Define Design Deploy Corp. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2025 Intel Corporation. + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "max96717.h" + +static const struct regmap_config max96717_regmap_config = { + .reg_bits = 16, + .val_bits = 8, +}; + +// Declarations +static int max96717_set_pipe_csi_enabled(struct max9x_common *common, unsigned int pipe_id, + unsigned int csi_id, bool enable); +static int max96717_video_pipe_double_pixel(struct max9x_common *common, unsigned int pipe_id, unsigned int bpp); +static int max96717_max_elements(struct max9x_common *common, enum max9x_element_type element); +static int max96717_enable_serial_link(struct max9x_common *common, unsigned int link); +static int max96717_disable_serial_link(struct max9x_common *common, unsigned int link); +static int max96717_enable(struct max9x_common *common); +static int max96717_disable(struct max9x_common *common); +static int max96717_pixel_mode(struct max9x_common *common, bool pixel); + +static struct max9x_common *from_gpio_chip(struct gpio_chip *chip); +static int max96717_gpio_get_direction(struct gpio_chip *chip, unsigned int offset); +static int max96717_gpio_direction_input(struct gpio_chip *chip, unsigned int offset); +static int max96717_gpio_direction_output(struct gpio_chip *chip, unsigned int offset, int value); +static int max96717_gpio_get(struct gpio_chip *chip, unsigned int offset); +static void max96717_gpio_set(struct gpio_chip *chip, unsigned int offset, int value); +static int max96717_setup_gpio(struct max9x_common *common); + +static struct max9x_common_ops max96717_common_ops = { + .max_elements = max96717_max_elements, + .enable = max96717_enable, + .disable = max96717_disable, +}; + +static struct max9x_serial_link_ops max96717_serial_link_ops = { + .enable = max96717_enable_serial_link, + .disable = max96717_disable_serial_link, +}; + +static struct max9x_translation_ops max96717_translation_ops; + +static struct max9x_common *from_gpio_chip(struct gpio_chip *chip) +{ + return container_of(chip, struct max9x_common, gpio_chip); +} + +static int max96717_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) +{ + struct max9x_common *common = from_gpio_chip(chip); + struct regmap *map = common->map; + unsigned int val; + int ret; + + ret = regmap_read(map, MAX96717_GPIO_A(offset), &val); + if (ret) + return ret; + + return (FIELD_GET(MAX96717_GPIO_A_OUT_DIS_FIELD, val) == 0U ? + GPIOD_OUT_LOW : GPIOD_IN); +} + +static int max96717_gpio_direction_input(struct gpio_chip *chip, unsigned int offset) +{ + struct max9x_common *common = from_gpio_chip(chip); + struct regmap *map = common->map; + + return regmap_update_bits(map, MAX96717_GPIO_A(offset), + MAX96717_GPIO_A_OUT_DIS_FIELD, + MAX9X_FIELD_PREP(MAX96717_GPIO_A_OUT_DIS_FIELD, 1U)); +} + +static int max96717_gpio_direction_output(struct gpio_chip *chip, unsigned int offset, int value) +{ + struct max9x_common *common = from_gpio_chip(chip); + struct regmap *map = common->map; + unsigned int mask = 0; + unsigned int val; + + mask = MAX96717_GPIO_A_OUT_DIS_FIELD | MAX96717_GPIO_A_OUT_FIELD | + MAX96717_GPIO_A_RX_EN_FIELD; + + // Enable the GPIO as an output + val = MAX9X_FIELD_PREP(MAX96717_GPIO_A_OUT_DIS_FIELD, 0U); + // Write out the initial value to the GPIO + val |= MAX9X_FIELD_PREP(MAX96717_GPIO_A_OUT_FIELD, (value == 0 ? 0U : 1U)); + // Disable remote control over SerDes link + val |= MAX9X_FIELD_PREP(MAX96717_GPIO_A_RX_EN_FIELD, 0U); + + return regmap_update_bits(map, MAX96717_GPIO_A(offset), mask, val); +} + +static int max96717_gpio_get(struct gpio_chip *chip, unsigned int offset) +{ + struct max9x_common *common = from_gpio_chip(chip); + struct regmap *map = common->map; + unsigned int val; + int ret; + + ret = regmap_read(map, MAX96717_GPIO_A(offset), &val); + if (ret) + return ret; + + return FIELD_GET(MAX96717_GPIO_A_IN_FIELD, val); +} + +static void max96717_gpio_set(struct gpio_chip *chip, unsigned int offset, int value) +{ + struct max9x_common *common = from_gpio_chip(chip); + struct regmap *map = common->map; + + regmap_update_bits(map, MAX96717_GPIO_A(offset), + MAX96717_GPIO_A_OUT_FIELD, + MAX9X_FIELD_PREP(MAX96717_GPIO_A_OUT_FIELD, (value == 0 ? 0U : 1U))); +} + +static int max96717_setup_gpio(struct max9x_common *common) +{ + struct device *dev = common->dev; + int ret; + struct max9x_gpio_pdata *gpio_pdata; + + if (common->dev->platform_data) { + struct max9x_pdata *pdata = common->dev->platform_data; + gpio_pdata = &pdata->gpio; + } + + // Functions + if (gpio_pdata && gpio_pdata->label) + common->gpio_chip.label = gpio_pdata->label; + else + common->gpio_chip.label = dev_name(dev); + + dev_dbg(dev, "gpio_chip label is %s, dev_name is %s", + common->gpio_chip.label, dev_name(dev)); + + // Functions + common->gpio_chip.label = MAX96717_NAME; + common->gpio_chip.parent = dev; + common->gpio_chip.get_direction = max96717_gpio_get_direction; + common->gpio_chip.direction_input = max96717_gpio_direction_input; + common->gpio_chip.direction_output = max96717_gpio_direction_output; + common->gpio_chip.get = max96717_gpio_get; + common->gpio_chip.set = max96717_gpio_set; + common->gpio_chip.ngpio = MAX96717_NUM_GPIO; + common->gpio_chip.can_sleep = 1; + common->gpio_chip.base = -1; + if (gpio_pdata && gpio_pdata->names) + common->gpio_chip.names = gpio_pdata->names; + + ret = devm_gpiochip_add_data(dev, &common->gpio_chip, common); + if (ret < 0) { + dev_err(dev, "gpio_init: Failed to add max96717_gpio\n"); + return ret; + } + + return 0; +} + +static int max96717_set_pipe_csi_enabled(struct max9x_common *common, + unsigned int pipe_id, + unsigned int csi_id, bool enable) +{ + struct device *dev = common->dev; + struct regmap *map = common->map; + int ret; + + dev_dbg(dev, "Video-pipe %d, csi %d: %s, %d lanes", \ + pipe_id, csi_id, (enable ? "enable" : "disable"), \ + common->csi_link[csi_id].config.num_lanes); + + // Select number of lanes for CSI port csi_id + ret = regmap_update_bits(map, MAX96717_MIPI_RX_1, + MAX96717_MIPI_RX_1_SEL_CSI_LANES_FIELD(csi_id), + MAX9X_FIELD_PREP(MAX96717_MIPI_RX_1_SEL_CSI_LANES_FIELD(csi_id), + common->csi_link[csi_id].config.num_lanes - 1)); + if (ret) + return ret; + + // Select CSI port csi_id for video pipe pipe_id + ret = regmap_update_bits(map, MAX96717_FRONTTOP_0, + MAX96717_FRONTTOP_0_SEL_CSI_FIELD(pipe_id) + | MAX96717_FRONTTOP_0_START_CSI_FIELD(csi_id), + MAX9X_FIELD_PREP(MAX96717_FRONTTOP_0_SEL_CSI_FIELD(pipe_id), csi_id) + | MAX9X_FIELD_PREP(MAX96717_FRONTTOP_0_START_CSI_FIELD(csi_id), enable ? 1U : 0U)); + if (ret) + return ret; + + // Start video pipe pipe_id from CSI port csi_id + ret = regmap_update_bits(map, MAX96717_FRONTTOP_9, + MAX96717_FRONTTOP_9_START_VIDEO_FIELD(pipe_id, csi_id), + MAX9X_FIELD_PREP(MAX96717_FRONTTOP_9_START_VIDEO_FIELD(pipe_id, csi_id), enable ? 1U : 0U)); + if (ret) + return ret; + + return 0; +} + +/** + * max96717_video_pipe_double_pixel() - Configure Double Loading Mode on a video pipe + * @common: max9x_common + * @pipe_id: Target pipe's ID + * @bpp: Original BPP to double. This can be 0 (disables), 8, 10, or 12. + * + * Double loading mode squeezes two input pixels together such that they are + * treated as a single pixel by the video pipe. Using this method increases + * bandwidth efficiency. + * + * See: GMSL2 Customers User Guide Section 30.5.1.1.1.2 "Double Loading Mode" + * See: GMSL2 Customers User Guide Section 43.3.4.5.1 "Double Mode (Serializer)" + */ +static int max96717_video_pipe_double_pixel(struct max9x_common *common, + unsigned int pipe_id, unsigned int bpp) +{ + struct device *dev = common->dev; + struct regmap *map = common->map; + int ret; + + unsigned int reg; + unsigned int fields; + unsigned int vals; + unsigned int dbl_bpp; + + // Clear all the double pixel mode fields + ret = regmap_update_bits(map, + MAX96717_FRONTTOP_10, + MAX96717_FRONTTOP_10_DBL8_FIELD(pipe_id), + 0x0); + if (ret) + return ret; + + ret = regmap_update_bits(map, + MAX96717_FRONTTOP_11, + MAX96717_FRONTTOP_11_DBL10_FIELD(pipe_id) | + MAX96717_FRONTTOP_11_DBL12_FIELD(pipe_id), + 0x0); + if (ret) + return ret; + + // Enable/disable double pixel mode for pipe + switch (bpp) { + case 0: + //bpp not used on this pipe, but still valid input + break; + case 8: + reg = MAX96717_FRONTTOP_10; + fields = MAX96717_FRONTTOP_10_DBL8_FIELD(pipe_id); + break; + case 10: + reg = MAX96717_FRONTTOP_11; + fields = MAX96717_FRONTTOP_11_DBL10_FIELD(pipe_id); + break; + case 12: + reg = MAX96717_FRONTTOP_11; + fields = MAX96717_FRONTTOP_11_DBL12_FIELD(pipe_id); + break; + default: + dev_err(dev, "Unsupported BPP for pixel doubling: %u", bpp); + return -EINVAL; + } + + // Enable pixel doubling for specified pipe + if (bpp != 0) { + dev_info(dev, "Configure double loading mode for pipe %u (%ubpp -> %ubpp)", + pipe_id, bpp, (bpp * 2)); + ret = regmap_update_bits(map, reg, fields, 0xFF); + if (ret) + return ret; + } + + // Enable software BPP override and set BPP value + dbl_bpp = bpp * 2; + + // Override output bpp + reg = MAX96717_FRONTTOP_2X(pipe_id); + + fields = MAX96717_FRONTTOP_2X_BPP_EN_FIELD; + vals = MAX9X_FIELD_PREP(MAX96717_FRONTTOP_2X_BPP_EN_FIELD, bpp == 0 ? 0U : 1U); + + fields |= MAX96717_FRONTTOP_2X_BPP_FIELD; + vals |= MAX9X_FIELD_PREP(MAX96717_FRONTTOP_2X_BPP_FIELD, dbl_bpp); + + return regmap_update_bits(map, reg, fields, vals); +} + +static int max96717_max_elements(struct max9x_common *common, + enum max9x_element_type element) +{ + switch (element) { + case MAX9X_SERIAL_LINK: + return MAX96717_NUM_SERIAL_LINKS; + case MAX9X_VIDEO_PIPE: + return MAX96717_NUM_VIDEO_PIPES; + case MAX9X_MIPI_MAP: + return MAX96717_NUM_MIPI_MAPS; + case MAX9X_CSI_LINK: + return MAX96717_NUM_CSI_LINKS; + default: + break; + } + + return 0; +} + +static int max96717_enable_serial_link(struct max9x_common *common, + unsigned int link_id) +{ + struct device *dev = common->dev; + unsigned int pipe_id; + int ret; + + dev_info(dev, "Link %d: Enable", link_id); + for (pipe_id = 0; pipe_id < common->num_video_pipes; pipe_id++) { + struct max9x_serdes_pipe_config *config; + + if (common->video_pipe[pipe_id].enabled == false) + continue; + + config = &common->video_pipe[pipe_id].config; + ret = max96717_set_pipe_csi_enabled(common, pipe_id, + config->src_csi, true); + if (ret) + return ret; + ret = max96717_video_pipe_double_pixel(common, pipe_id, + config->dbl_pixel_bpp); + if (ret) + return ret; + } + + return 0; +} + +static int max96717_disable_serial_link(struct max9x_common *common, + unsigned int link_id) +{ + struct device *dev = common->dev; + unsigned int pipe_id; + int ret; + + dev_info(dev, "Link %d: Disable", link_id); + + for (pipe_id = 0; pipe_id < common->num_video_pipes; pipe_id++) { + struct max9x_serdes_pipe_config *config; + + if (common->video_pipe[pipe_id].enabled == false) + continue; + + config = &common->video_pipe[pipe_id].config; + + ret = max96717_set_pipe_csi_enabled(common, pipe_id, + config->src_csi, false); + if (ret) + return ret; + } + + return 0; +} + +static int max96717_disable(struct max9x_common *common) +{ + struct device *dev = common->dev; + + dev_dbg(dev, "Disable"); + + return 0; +} + +static int max96717_pixel_mode(struct max9x_common *common, bool pixel) +{ + struct device *dev = common->dev; + struct regmap *map = common->map; + + dev_dbg(dev, "Pixel mode: %d", pixel); + + // Register actually enables tunnel mode. Clearing the bit "enables" pixel mode. + return regmap_write(map, MAX96717_EXT11, + MAX9X_FIELD_PREP(MAX96717_TUNNEL_MODE, !pixel)); +} + +/** + * Enable the MAX96717 to replicate a frame sync signal from the deserializer. + * NOTE: Currently the MAX96717 driver supports frame sync across its + * GPIO8. + */ +static int max96717_enable_frame_sync(struct max9x_common *common) +{ + struct device_node *node = common->dev->of_node; + struct device *dev = common->dev; + struct regmap *map = common->map; + + int ret; + int deserializer_tx_id; + + ret = of_property_read_u32(node, "fsync-tx-id", &deserializer_tx_id); + // Not necessarily problematic, no frame sync tx found + if (ret == -ENODATA || ret == -EINVAL) { + dev_info(dev, "Frame sync GPIO tx id not found"); + return 0; + } + // Other errors are problematic + else if (ret < 0) { + dev_err(dev, "Failed to read frame sync tx id with err %d", + ret); + return ret; + } + + ret = regmap_write(map, MAX96717_GPIO_C(8), deserializer_tx_id); + if (ret) { + dev_err(dev, "Failed to write des frame sync id with err: %d", + ret); + return ret; + } + + return 0; +} + +static int max96717_get_datatype(struct max9x_common *common) +{ + struct device_node *node = common->dev->of_node; + struct device *dev = common->dev; + struct regmap *map = common->map; + int ret, datatype; + + ret = of_property_read_u32(node, "data-type", &datatype); + if (ret == -ENODATA || ret == -EINVAL) { + dev_dbg(dev, "Data-type not found not filtering"); + return regmap_write(map, MAX96717_FRONTTOP_16, 0); + } + // Other errors are problematic + else if (ret < 0) { + dev_err(dev, "Problem reading in data-type with err %d", ret); + return ret; + } + + dev_dbg(dev, "Setting image data type to %x", datatype); + /* Filter out metadata, only use the image datatype. + * MAX96717_FRONTTOP_16: mem_dt1_selz. + * Bit 6 is enable + */ + return regmap_write(map, MAX96717_FRONTTOP_16, + (datatype & MAX96717_FRONTTOP_16_FIELD) | MAX96717_FRONTTOP_16_ENABLE); +} + +static int max96717_enable(struct max9x_common *common) +{ + struct device *dev = common->dev; + int ret; + + dev_dbg(dev, "setup gpio"); + ret = max96717_setup_gpio(common); + if (ret) + return ret; + + dev_dbg(dev, "setup datatype"); + ret = max96717_get_datatype(common); + if (ret) + return ret; + + dev_dbg(dev, "setup pixel mode"); + // this driver MUST be in pixel mode as that is what our driver architecture supports + ret = max96717_pixel_mode(common, true); + if (ret) + return ret; + + dev_dbg(dev, "setup frame sync"); + ret = max96717_enable_frame_sync(common); + if (ret) + return ret; + + return 0; +} + +int max96717_get_ops(struct max9x_common_ops **common_ops, struct max9x_serial_link_ops **serial_ops, + struct max9x_csi_link_ops **csi_ops, struct max9x_line_fault_ops **lf_ops, + struct max9x_translation_ops **trans_ops) +{ + *common_ops = &max96717_common_ops; + *serial_ops = &max96717_serial_link_ops; + *csi_ops = NULL; + *lf_ops = NULL; + *trans_ops = &max96717_translation_ops; + return 0; +} + +#if 0 +#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 3, 0) +static int max96717_probe(struct i2c_client *client, const struct i2c_device_id *id) +#else +static int max96717_probe(struct i2c_client *client) +#endif +{ + struct device *dev = &client->dev; + struct max9x_common *ser = NULL; + int ret = 0; + + dev_info(dev, "Probing"); + + ser = devm_kzalloc(dev, sizeof(*ser), GFP_KERNEL); + if (!ser) { + dev_err(dev, "Failed to allocate memory."); + return -ENOMEM; + } + + ret = max9x_common_init_i2c_client( + ser, + client, + &max96717_regmap_config, + &max96717_common_ops, + &max96717_serial_link_ops, + NULL, /* csi_link_os */ + NULL); + if (ret) + return ret; + + return 0; + +err: + max9x_destroy(ser); + return ret; +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0) +static int max96717_remove(struct i2c_client *client) +#else +static void max96717_remove(struct i2c_client *client) +#endif +{ + struct device *dev = &client->dev; + struct max9x_common *ser = NULL; + + dev_info(dev, "Removing"); + + ser = max9x_client_to_common(client); + + max9x_destroy(ser); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0) + return 0; +#endif +} + +static struct i2c_device_id max96717_idtable[] = { + {MAX96717_NAME, 0}, + {}, +}; +MODULE_DEVICE_TABLE(i2c, max96717_idtable); + +static struct of_device_id max96717_of_match[] = { + { .compatible = MAX96717_NAME}, + {}, +}; +MODULE_DEVICE_TABLE(of, max96717_of_match); + +static struct i2c_driver max96717_driver = { + .driver = { + .name = MAX96717_NAME, + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(max96717_of_match), + }, + .probe = max96717_probe, + .remove = max96717_remove, + .id_table = max96717_idtable, +}; + +module_i2c_driver(max96717_driver); +#endif +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Cody Burrows "); +MODULE_AUTHOR("Josh Watts "); +MODULE_AUTHOR("He Jiabin "); +MODULE_DESCRIPTION("Maxim MAX96717 CSI-2 to GMSL2 Serializer driver"); diff --git a/drivers/media/i2c/max9x/max96717.h b/drivers/media/i2c/max9x/max96717.h new file mode 100644 index 0000000..23e06e5 --- /dev/null +++ b/drivers/media/i2c/max9x/max96717.h @@ -0,0 +1,91 @@ +/* + * max96717.h - Maxim MAX96717 registers and constants. + * + * Copyright (c) 2022, D3 Engineering. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2025 Intel Corporation. + +#ifndef _MAX96717_H_ +#define _MAX96717_H_ + +#include +#include "serdes.h" + +#define MAX96717_NAME "max96717" + +enum max96717_gpio_out_type { + MAX96717_GPIO_OUT_TYPE_OPEN_DRAIN = 0, + MAX96717_GPIO_OUT_TYPE_PUSH_PULL = 1, +}; + +enum max96717_gpio_pull_updn_sel { + MAX96717_GPIO_PULL_UPDN_SEL_NONE = 0, + MAX96717_GPIO_PULL_UPDN_SEL_UP = 1, + MAX96717_GPIO_PULL_UPDN_SEL_DOWN = 2, +}; + +#define MAX96717_NUM_SERIAL_LINKS 1 +#define MAX96717_NUM_VIDEO_PIPES 1 +#define MAX96717_NUM_MIPI_MAPS 1 +#define MAX96717_NUM_CSI_LINKS 1 +#define MAX96717_NUM_GPIO 11 + +#define MAX96717_GPIO(gpio) (0x2BE + ((gpio) * 3)) +#define MAX96717_GPIO_A(gpio) (MAX96717_GPIO(gpio) + 0) +#define MAX96717_GPIO_A_OUT_DIS_FIELD BIT(0) +#define MAX96717_GPIO_A_TX_EN_FIELD BIT(1) +#define MAX96717_GPIO_A_RX_EN_FIELD BIT(2) +#define MAX96717_GPIO_A_IN_FIELD BIT(3) +#define MAX96717_GPIO_A_OUT_FIELD BIT(4) +#define MAX96717_GPIO_A_RES_CFG_FIELD BIT(7) +#define MAX96717_GPIO_B(gpio) (MAX96717_GPIO(gpio) + 1) +#define MAX96717_GPIO_B_TX_ID GENMASK(4, 0) +#define MAX96717_GPIO_B_OUT_TYPE_FIELD BIT(5) +#define MAX96717_GPIO_B_PULL_UPDN_SEL_FIELD GENMASK(7, 6) +#define MAX96717_GPIO_C(gpio) (MAX96717_GPIO(gpio) + 2) +#define MAX96717_GPIO_C_RX_ID GENMASK(4, 0) + +/* + * Some macros refer to a pipe despite not using it + * this is to make code easier to port between drivers + * in spite of the max96717 only having 1 video pipe + */ +#define MAX96717_FRONTTOP_0 (0x308) +#define MAX96717_FRONTTOP_0_SEL_CSI_FIELD(pipe_id) BIT(pipe_id + 2) +#define MAX96717_FRONTTOP_0_START_CSI_FIELD(csi_id) BIT((csi_id) + 5) +#define MAX96717_FRONTTOP_9 (0x311) +#define MAX96717_FRONTTOP_9_START_VIDEO_FIELD(pipe_id, csi_id) BIT((pipe_id + 2) + 4 * (csi_id + 1)) +#define MAX96717_FRONTTOP_10 (0x312) +#define MAX96717_FRONTTOP_10_DBL8_FIELD(pipe_id) BIT(2) +#define MAX96717_FRONTTOP_11 (0x313) +#define MAX96717_FRONTTOP_11_DBL10_FIELD(pipe_id) BIT(2) +#define MAX96717_FRONTTOP_11_DBL12_FIELD(pipe_id) BIT(6) +#define MAX96717_FRONTTOP_16 (0x318) +#define MAX96717_FRONTTOP_16_FIELD GENMASK(5, 0) +#define MAX96717_FRONTTOP_16_ENABLE BIT(6) +#define MAX96717_FRONTTOP_2X(pipe_id) (0x31E) +#define MAX96717_FRONTTOP_2X_BPP_EN_FIELD BIT(5) +#define MAX96717_FRONTTOP_2X_BPP_FIELD GENMASK(4, 0) + + +#define MAX96717_MIPI_RX (0x330) +#define MAX96717_MIPI_RX_1 (MAX96717_MIPI_RX + 1) +#define MAX96717_MIPI_RX_1_SEL_CSI_LANES_FIELD(csi_id) (GENMASK(1, 0) << ((csi_id + 1) * 4)) + +#define MAX96717_EXT11 (0x383) +#define MAX96717_TUNNEL_MODE BIT(7) + +#endif /* _MAX96717_H_ */ diff --git a/drivers/media/i2c/max9x/max96724.c b/drivers/media/i2c/max9x/max96724.c new file mode 100644 index 0000000..3459a27 --- /dev/null +++ b/drivers/media/i2c/max9x/max96724.c @@ -0,0 +1,1168 @@ +/* + * max96724.c - Maxim MAX96724 GMSL2/GMSL1 to CSI-2 Deserializer + * + * Copyright (c) 2023-2024, Define Design Deploy Corp. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2025 Intel Corporation. + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "max96724.h" + +// Params +int max96724_serial_link_timeout_ms = MAX96724_DEFAULT_SERIAL_LINK_TIMEOUT_MS; +module_param(max96724_serial_link_timeout_ms, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); +MODULE_PARM_DESC(max96724_serial_link_timeout_ms, "Timeout for serial link in milliseconds"); + + +// Declarations +static int max96724_set_phy_mode(struct max9x_common *common, unsigned int phy_mode); +static int max96724_set_phy_enabled(struct max9x_common *common, unsigned int csi_id, bool enable); +static int max96724_set_phy_lane_map(struct max9x_common *common, unsigned int csi_id, unsigned int phy_lane_map); +static int max96724_set_phy_dpll_freq(struct max9x_common *common, unsigned int csi_id, unsigned int freq_mhz); +static int max96724_set_mipi_lane_cnt(struct max9x_common *common, unsigned int csi_id, int num_lanes); +static int max96724_set_initial_deskew(struct max9x_common *common, unsigned int csi_id, bool enable); +static int max96724_configure_csi_dphy(struct max9x_common *common); +static int max96724_enable(struct max9x_common *common); +static int max96724_max_elements(struct max9x_common *common, enum max9x_element_type element); +static int max96724_get_serial_link_lock(struct max9x_common *common, unsigned int link_id, bool *locked); +static int max96724_set_serial_link_state(struct max9x_common *common, unsigned int link_id, bool enable); +static int max96724_serial_link_reset(struct max9x_common *common, unsigned int link_id); +static int max96724_set_serial_link_rate(struct max9x_common *common, unsigned int link_id); +static int max96724_set_video_pipe_src(struct max9x_common *common, unsigned int pipe_id, + unsigned int link_id, unsigned int src_pipe); +static int max96724_set_video_pipe_maps_enabled(struct max9x_common *common, unsigned int pipe_id, int num_maps); +static int max96724_set_video_pipe_map(struct max9x_common *common, unsigned int pipe_id, unsigned int map_id, + struct max9x_serdes_mipi_map *mipi_map); +static int max96724_set_csi_link_enabled(struct max9x_common *common, unsigned int csi_id, bool enable); +static int max96724_csi_double_pixel(struct max9x_common *common, unsigned int csi_id, unsigned int bpp); +static int max96724_set_video_pipe_enabled(struct max9x_common *common, unsigned int pipe_id, bool enable); +static int max96724_set_serial_link_routing(struct max9x_common *common, unsigned int link_id); +static int max96724_disable_serial_link(struct max9x_common *common, unsigned int link_id); +static int max96724_enable_serial_link(struct max9x_common *common, unsigned int link_id); +static int max96724_set_remote_control_channel_enabled(struct max9x_common *common, unsigned int link_id, bool enabled); +static int max96724_select_serial_link(struct max9x_common *common, unsigned int link); +static int max96724_deselect_serial_link(struct max9x_common *common, unsigned int link); +static int max96724_enable_native_frame_sync(struct max9x_common *common); +static int max96724_enable_gpio_frame_sync(struct max9x_common *common); +static int max96724_disable_line_fault(struct max9x_common *common, unsigned int line); +static int max96724_enable_line_fault(struct max9x_common *common, unsigned int line); +static int max96724_set_line_fault(struct max9x_common *common, unsigned int line, bool enable); +static int max96724_get_line_fault(struct max9x_common *common, unsigned int line); + +// Functions +static int max96724_set_phy_mode(struct max9x_common *common, unsigned int phy_mode) +{ + struct device *dev = common->dev; + struct regmap *map = common->map; + + dev_dbg(dev, "CSI: phy_mode=%d", phy_mode); + + return regmap_update_bits(map, MAX96724_MIPI_PHY0, + MAX96724_MIPI_PHY0_MODE_FIELD, + MAX9X_FIELD_PREP(MAX96724_MIPI_PHY0_MODE_FIELD, phy_mode)); +} + +static int max96724_set_phy_enabled(struct max9x_common *common, + unsigned int csi_id, bool enable) +{ + struct device *dev = common->dev; + struct regmap *map = common->map; + int ret; + struct mutex *lock = &common->link_mutex; + + mutex_lock(lock); + dev_dbg(dev, "CSI link %d: %s", csi_id, (enable ? "enable" : "disable")); + + ret = regmap_update_bits(map, MAX96724_MIPI_PHY_ENABLE, + MAX96724_MIPI_PHY_ENABLE_FIELD(csi_id), + MAX9X_FIELD_PREP(MAX96724_MIPI_PHY_ENABLE_FIELD(csi_id), enable ? 1U : 0U)); + mutex_unlock(lock); + return ret; +} + +static int max96724_set_phy_lane_map(struct max9x_common *common, + unsigned int csi_id, unsigned int phy_lane_map) +{ + struct device *dev = common->dev; + struct regmap *map = common->map; + + dev_dbg(dev, "CSI link %d: phy_lane_map=0x%04x", csi_id, phy_lane_map); + + return regmap_update_bits(map, MAX96724_MIPI_PHY_LANE_MAP(csi_id), + MAX96724_MIPI_PHY_LANE_MAP_FIELD(csi_id, 0) + | MAX96724_MIPI_PHY_LANE_MAP_FIELD(csi_id, 1), + phy_lane_map); +} + +static int max96724_set_phy_dpll_freq(struct max9x_common *common, + unsigned int csi_id, unsigned int freq_mhz) +{ + struct device *dev = common->dev; + struct regmap *map = common->map; + + dev_dbg(dev, "CSI link %d: freq %u MHz, %u mult", csi_id, freq_mhz, + freq_mhz / MAX96724_DPLL_FREQ_MHZ_MULTIPLE); + + return regmap_update_bits(map, MAX96724_DPLL_FREQ(csi_id), + MAX96724_DPLL_FREQ_FIELD, + MAX9X_FIELD_PREP(MAX96724_DPLL_FREQ_FIELD, freq_mhz / MAX96724_DPLL_FREQ_MHZ_MULTIPLE)); +} + +static int max96724_set_initial_deskew(struct max9x_common *common, unsigned int csi_id, bool enable) +{ + struct device *dev = common->dev; + struct regmap *map = common->map; + unsigned int val; + unsigned int width; + + dev_dbg(dev, "CSI link %d: Initial deskew %s", csi_id, enable ? "enabled" : "disabled"); + + val = MAX9X_FIELD_PREP(MAX96724_MIPI_TX_DESKEW_INIT_AUTO_EN, enable); + + if (enable) { + width = common->csi_link[csi_id].config.initial_deskew_width; + dev_dbg(dev, "Initial deskew width: 0x%03x", width); + + if (width > MAX96724_INIT_DESKEW_WIDTH_MAX) { + dev_err(dev, "Unsupported initial deskew width!"); + return -EINVAL; + } + + val |= MAX9X_FIELD_PREP(MAX96724_MIPI_TX_DESKEW_WIDTH_FIELD, width); + } + + return regmap_write(map, MAX96724_MIPI_TX_DESKEW_INIT(csi_id), val); +} + +static int max96724_set_mipi_lane_cnt(struct max9x_common *common, + unsigned int csi_id, int num_lanes) +{ + struct device *dev = common->dev; + struct regmap *map = common->map; + int ret; + + dev_dbg(dev, "CSI link %d: %d lanes", csi_id, num_lanes); + + ret = regmap_update_bits(map, MAX96724_MIPI_TX_LANE_CNT(csi_id), + MAX96724_MIPI_TX_VCX_EN_FIELD, + MAX9X_FIELD_PREP(MAX96724_MIPI_TX_VCX_EN_FIELD, MAX96724_VC_2_BITS)); + + if (ret) { + dev_err(dev, "Failed to configure virtual channel extension!"); + return ret; + } + + ret = regmap_update_bits( + map, MAX96724_MIPI_TX_LANE_CNT(csi_id), + MAX96724_MIPI_TX_CPHY_EN_FIELD, + MAX9X_FIELD_PREP(MAX96724_MIPI_TX_CPHY_EN_FIELD, + common->csi_link[csi_id].config.bus_type == + V4L2_MBUS_CSI2_CPHY ? + 1 : + 0)); + + if (ret) { + dev_err(dev, "Failed to enable CSI2 %d to CPHY mode!", csi_id); + return ret; + } + + if (num_lanes > 0) { + ret = regmap_update_bits( + map, MAX96724_MIPI_TX_LANE_CNT(csi_id), + MAX96724_MIPI_TX_LANE_CNT_FIELD, + MAX9X_FIELD_PREP(MAX96724_MIPI_TX_LANE_CNT_FIELD, + (num_lanes - 1))); + + if (ret) { + dev_err(dev, + " Failed to set CSI2 controller %d with %d lanes !", + csi_id, num_lanes); + return ret; + } + } + + return ret; +} + +static int max96724_configure_csi_dphy(struct max9x_common *common) +{ + struct device *dev = common->dev; + unsigned int phy_mode; + unsigned int phy_lane_map[MAX96724_NUM_CSI_LINKS]; + unsigned int csi_id; + int ret; + + for (csi_id = 0; csi_id < MAX96724_NUM_CSI_LINKS; csi_id++) { + dev_dbg(dev, "CSI link %d: enabled=%d, num_lanes=%d, freq_mhz=%d, init_deskew=%d", + csi_id, + common->csi_link[csi_id].enabled, + common->csi_link[csi_id].config.num_lanes, + common->csi_link[csi_id].config.freq_mhz, + common->csi_link[csi_id].config.auto_init_deskew_enabled); + } + + //TODO: Allow DT to override lane mapping? + + // Determine correct phy_mode and associate lane mapping + if (common->csi_link[0].config.num_lanes <= 2 + && common->csi_link[1].config.num_lanes <= 2 + && common->csi_link[2].config.num_lanes <= 2 + && common->csi_link[3].config.num_lanes <= 2) { + + phy_mode = MAX96724_MIPI_PHY_4X2; + + phy_lane_map[0] = MAX9X_FIELD_PREP(MAX96724_MIPI_PHY_LANE_MAP_FIELD(0, 0), 0) + | MAX9X_FIELD_PREP(MAX96724_MIPI_PHY_LANE_MAP_FIELD(0, 1), 1); + phy_lane_map[1] = MAX9X_FIELD_PREP(MAX96724_MIPI_PHY_LANE_MAP_FIELD(1, 0), 0) + | MAX9X_FIELD_PREP(MAX96724_MIPI_PHY_LANE_MAP_FIELD(1, 1), 1); + phy_lane_map[2] = MAX9X_FIELD_PREP(MAX96724_MIPI_PHY_LANE_MAP_FIELD(2, 0), 0) + | MAX9X_FIELD_PREP(MAX96724_MIPI_PHY_LANE_MAP_FIELD(2, 1), 1); + phy_lane_map[3] = MAX9X_FIELD_PREP(MAX96724_MIPI_PHY_LANE_MAP_FIELD(3, 0), 0) + | MAX9X_FIELD_PREP(MAX96724_MIPI_PHY_LANE_MAP_FIELD(3, 1), 1); + + } else if (common->csi_link[0].config.num_lanes == 0 + && common->csi_link[1].config.num_lanes >= 3 + && common->csi_link[2].config.num_lanes >= 3 + && common->csi_link[3].config.num_lanes == 0) { + + phy_mode = MAX96724_MIPI_PHY_2X4; + + phy_lane_map[0] = MAX9X_FIELD_PREP(MAX96724_MIPI_PHY_LANE_MAP_FIELD(0, 0), 0) + | MAX9X_FIELD_PREP(MAX96724_MIPI_PHY_LANE_MAP_FIELD(0, 1), 1); + phy_lane_map[1] = MAX9X_FIELD_PREP(MAX96724_MIPI_PHY_LANE_MAP_FIELD(1, 0), 2) + | MAX9X_FIELD_PREP(MAX96724_MIPI_PHY_LANE_MAP_FIELD(1, 1), 3); + phy_lane_map[2] = MAX9X_FIELD_PREP(MAX96724_MIPI_PHY_LANE_MAP_FIELD(2, 0), 0) + | MAX9X_FIELD_PREP(MAX96724_MIPI_PHY_LANE_MAP_FIELD(2, 1), 1); + phy_lane_map[3] = MAX9X_FIELD_PREP(MAX96724_MIPI_PHY_LANE_MAP_FIELD(3, 0), 2) + | MAX9X_FIELD_PREP(MAX96724_MIPI_PHY_LANE_MAP_FIELD(3, 1), 3); + + } else if (common->csi_link[0].config.num_lanes == 0 + && common->csi_link[1].config.num_lanes >= 3 + && common->csi_link[2].config.num_lanes <= 2 + && common->csi_link[3].config.num_lanes <= 2) { + + phy_mode = MAX96724_MIPI_PHY_1X4A_22; + + phy_lane_map[0] = MAX9X_FIELD_PREP(MAX96724_MIPI_PHY_LANE_MAP_FIELD(0, 0), 0) + | MAX9X_FIELD_PREP(MAX96724_MIPI_PHY_LANE_MAP_FIELD(0, 1), 1); + phy_lane_map[1] = MAX9X_FIELD_PREP(MAX96724_MIPI_PHY_LANE_MAP_FIELD(1, 0), 2) + | MAX9X_FIELD_PREP(MAX96724_MIPI_PHY_LANE_MAP_FIELD(1, 1), 3); + phy_lane_map[2] = MAX9X_FIELD_PREP(MAX96724_MIPI_PHY_LANE_MAP_FIELD(2, 0), 0) + | MAX9X_FIELD_PREP(MAX96724_MIPI_PHY_LANE_MAP_FIELD(2, 1), 1); + phy_lane_map[3] = MAX9X_FIELD_PREP(MAX96724_MIPI_PHY_LANE_MAP_FIELD(3, 0), 0) + | MAX9X_FIELD_PREP(MAX96724_MIPI_PHY_LANE_MAP_FIELD(3, 1), 1); + + } else if (common->csi_link[0].config.num_lanes <= 2 + && common->csi_link[1].config.num_lanes <= 2 + && common->csi_link[2].config.num_lanes >= 3 + && common->csi_link[3].config.num_lanes == 0) { + + phy_mode = MAX96724_MIPI_PHY_1X4B_22; + + phy_lane_map[0] = MAX9X_FIELD_PREP(MAX96724_MIPI_PHY_LANE_MAP_FIELD(0, 0), 0) + | MAX9X_FIELD_PREP(MAX96724_MIPI_PHY_LANE_MAP_FIELD(0, 1), 1); + phy_lane_map[1] = MAX9X_FIELD_PREP(MAX96724_MIPI_PHY_LANE_MAP_FIELD(1, 0), 0) + | MAX9X_FIELD_PREP(MAX96724_MIPI_PHY_LANE_MAP_FIELD(1, 1), 1); + phy_lane_map[2] = MAX9X_FIELD_PREP(MAX96724_MIPI_PHY_LANE_MAP_FIELD(2, 0), 0) + | MAX9X_FIELD_PREP(MAX96724_MIPI_PHY_LANE_MAP_FIELD(2, 1), 1); + phy_lane_map[3] = MAX9X_FIELD_PREP(MAX96724_MIPI_PHY_LANE_MAP_FIELD(3, 0), 2) + | MAX9X_FIELD_PREP(MAX96724_MIPI_PHY_LANE_MAP_FIELD(3, 1), 3); + + } else { + dev_err(dev, "Invalid CSI configuration! Supported modes: 4x2, 2x4, 1x4+2x2, 2x2+1x4"); + return -EINVAL; + } + + ret = max96724_set_phy_mode(common, phy_mode); + if (ret) + return ret; + + for (csi_id = 0; csi_id < MAX96724_NUM_CSI_LINKS; csi_id++) { + struct max9x_serdes_csi_config *config = &common->csi_link[csi_id].config; + + ret = max96724_set_phy_enabled(common, csi_id, false); + if (ret) + return ret; + + ret = max96724_set_phy_lane_map(common, csi_id, phy_lane_map[csi_id]); + if (ret) + return ret; + + ret = max96724_set_mipi_lane_cnt(common, csi_id, + common->csi_link[csi_id].config.num_lanes); + if (ret) + return ret; + + ret = max96724_set_initial_deskew(common, csi_id, + common->csi_link[csi_id].config.auto_init_deskew_enabled); + if (ret) + return ret; + + if (WARN_ONCE(config->freq_mhz > 0 && config->freq_mhz < MAX96724_DPLL_FREQ_MHZ_MULTIPLE, + "CSI frequency must be greater than %d MHz", MAX96724_DPLL_FREQ_MHZ_MULTIPLE)) + return -EINVAL; + + ret = max96724_set_phy_dpll_freq(common, csi_id, config->freq_mhz); + if (ret) + return ret; + } + + return 0; +} + +static int max96724_set_all_reset(struct max9x_common *common, bool enable) +{ + struct device *dev = common->dev; + struct regmap *map = common->map; + + dev_dbg(dev, "Reset %s", (enable ? "enable" : "disable")); + + return regmap_update_bits(map, MAX96724_RESET_ALL, + MAX96724_RESET_ALL_FIELD, + MAX9X_FIELD_PREP(MAX96724_RESET_ALL_FIELD, enable ? 1U : 0U)); +} + +static int max96724_soft_reset(struct max9x_common *common) +{ + struct device *dev = common->dev; + int ret; + + dev_dbg(dev, "Soft reset"); + + ret = max96724_set_all_reset(common, 1); + if (ret) + return ret; + + msleep(1); + + ret = max96724_set_all_reset(common, 0); + if (ret) + return ret; + + return 0; +} + +static int max96724_max_elements(struct max9x_common *common, enum max9x_element_type element) +{ + switch (element) { + case MAX9X_SERIAL_LINK: + return MAX96724_NUM_SERIAL_LINKS; + case MAX9X_VIDEO_PIPE: + return MAX96724_NUM_VIDEO_PIPES; + case MAX9X_MIPI_MAP: + return MAX96724_NUM_MIPI_MAPS; + case MAX9X_CSI_LINK: + return MAX96724_NUM_CSI_LINKS; + case MAX9X_LINE_FAULT: + return MAX96724_NUM_LINE_FAULTS; + default: + break; + } + + return 0; +} + +static struct max9x_common_ops max96724_common_ops = { + .enable = max96724_enable, + .soft_reset = max96724_soft_reset, + .max_elements = max96724_max_elements, +}; + +static int max96724_get_serial_link_lock(struct max9x_common *common, unsigned int link_id, bool *locked) +{ + struct device *dev = common->dev; + struct regmap *map = common->map; + unsigned int val; + int ret; + + ret = regmap_read(map, MAX96724_PHY_LOCKED(link_id), &val); + if (ret) { + dev_err(dev, "failed to read link lock with err %d", ret); + return ret; + } + + if (FIELD_GET(MAX96724_PHY_LOCKED_FIELD, val) != 0) + *locked = true; + else + *locked = false; + + return 0; +} + +static int max96724_set_serial_link_state(struct max9x_common *common, unsigned int link_id, bool enable) +{ + struct device *dev = common->dev; + struct regmap *map = common->map; + enum max9x_serdes_link_type link_type = common->serial_link[link_id].config.link_type; + int ret; + struct mutex *lock = &common->link_mutex; + + mutex_lock(lock); + dev_dbg(dev, "Serial-link %d: %s", link_id, (enable ? "up" : "down")); + + ret = regmap_update_bits(map, MAX96724_LINK_CTRL, + MAX96724_LINK_CTRL_EN_FIELD(link_id) + | MAX96724_LINK_CTRL_GMSL_FIELD(link_id), + MAX9X_FIELD_PREP(MAX96724_LINK_CTRL_EN_FIELD(link_id), enable ? 1U : 0U) + | MAX9X_FIELD_PREP(MAX96724_LINK_CTRL_GMSL_FIELD(link_id), link_type == MAX9X_LINK_TYPE_GMSL2 ? 1U : 0U)); + mutex_unlock(lock); + return ret; +} + +static int max96724_serial_link_reset(struct max9x_common *common, unsigned int link_id) +{ + struct device *dev = common->dev; + struct regmap *map = common->map; + int ret; + struct mutex *lock = &common->link_mutex; + + mutex_lock(lock); + dev_dbg(dev, "Serial-link %d reset", link_id); + + ret = regmap_update_bits(map, MAX96724_RESET_CTRL, + MAX96724_RESET_CTRL_FIELD(link_id), + MAX9X_FIELD_PREP(MAX96724_RESET_CTRL_FIELD(link_id), 1U)); + mutex_unlock(lock); + + return ret; +} + +static int max96724_set_serial_link_rate(struct max9x_common *common, unsigned int link_id) +{ + struct device *dev = common->dev; + struct regmap *map = common->map; + struct max9x_serdes_serial_config *config = &common->serial_link[link_id].config; + unsigned int tx_rate, rx_rate; + + tx_rate = max9x_serdes_mhz_to_rate(max96724_tx_rates, ARRAY_SIZE(max96724_tx_rates), config->tx_freq_mhz); + if (tx_rate < 0) + return tx_rate; + + rx_rate = max9x_serdes_mhz_to_rate(max96724_rx_rates, ARRAY_SIZE(max96724_rx_rates), config->rx_freq_mhz); + if (rx_rate < 0) + return rx_rate; + + dev_dbg(dev, "Serial-link %d: TX=%d MHz RX=%d MHz", link_id, config->tx_freq_mhz, config->rx_freq_mhz); + + return regmap_update_bits(map, MAX96724_PHY_RATE_CTRL(link_id), + MAX96724_PHY_RATE_CTRL_TX_FIELD(link_id) + | MAX96724_PHY_RATE_CTRL_RX_FIELD(link_id), + MAX9X_FIELD_PREP(MAX96724_PHY_RATE_CTRL_TX_FIELD(link_id), tx_rate) + | MAX9X_FIELD_PREP(MAX96724_PHY_RATE_CTRL_RX_FIELD(link_id), rx_rate)); +} + +static int max96724_set_video_pipe_src(struct max9x_common *common, + unsigned int pipe_id, unsigned int link_id, unsigned int src_pipe) +{ + struct device *dev = common->dev; + struct regmap *map = common->map; + + dev_dbg(dev, "Video-pipe %d: src_link=%u, src_pipe=%u", pipe_id, link_id, src_pipe); + + return regmap_update_bits(map, MAX96724_VIDEO_PIPE_SEL(pipe_id), + MAX96724_VIDEO_PIPE_SEL_LINK_FIELD(pipe_id) + | MAX96724_VIDEO_PIPE_SEL_INPUT_FIELD(pipe_id), + MAX9X_FIELD_PREP(MAX96724_VIDEO_PIPE_SEL_LINK_FIELD(pipe_id), link_id) + | MAX9X_FIELD_PREP(MAX96724_VIDEO_PIPE_SEL_INPUT_FIELD(pipe_id), src_pipe)); +} + +static int max96724_set_video_pipe_maps_enabled(struct max9x_common *common, + unsigned int pipe_id, int num_maps) +{ + struct device *dev = common->dev; + struct regmap *map = common->map; + unsigned int val = 0; + int ret = 0; + struct mutex *lock = &common->link_mutex; + + if (num_maps > 0) + val = GENMASK(num_maps - 1, 0); + + mutex_lock(lock); + dev_dbg(dev, "Video-pipe %d: num_maps=%u", pipe_id, num_maps); + + ret = regmap_write(map, MAX96724_MAP_EN_L(pipe_id), + MAX9X_FIELD_PREP(MAX96724_MAP_EN_FIELD, val)); + if (ret) + goto unlock_exit; + + ret = regmap_write(map, MAX96724_MAP_EN_H(pipe_id), + MAX9X_FIELD_PREP(MAX96724_MAP_EN_FIELD, val >> 8)); + if (ret) + goto unlock_exit; + +unlock_exit: + mutex_unlock(lock); + return ret; +} + +static int max96724_set_video_pipe_map(struct max9x_common *common, + unsigned int pipe_id, unsigned int map_id, + struct max9x_serdes_mipi_map *mipi_map) +{ + struct device *dev = common->dev; + struct regmap *map = common->map; + int ret = 0; + struct mutex *lock = &common->link_mutex; + + mutex_lock(lock); + dev_dbg(dev, "Video-pipe %d, map %d: VC%d:DT%02x->VC%d:DT%02x, dst_csi=%d ", + pipe_id, map_id, + mipi_map->src_vc, + mipi_map->src_dt, + mipi_map->dst_vc, + mipi_map->dst_dt, + mipi_map->dst_csi); + + ret = regmap_write(map, MAX96724_MAP_SRC_L(pipe_id, map_id), + MAX9X_FIELD_PREP(MAX96724_MAP_SRC_L_VC_FIELD, mipi_map->src_vc) + | MAX9X_FIELD_PREP(MAX96724_MAP_SRC_L_DT_FIELD, mipi_map->src_dt)); + if (ret) + goto unlock_exit; + + ret = regmap_write(map, MAX96724_MAP_DST_L(pipe_id, map_id), + MAX9X_FIELD_PREP(MAX96724_MAP_DST_L_VC_FIELD, mipi_map->dst_vc) + | MAX9X_FIELD_PREP(MAX96724_MAP_DST_L_DT_FIELD, mipi_map->dst_dt)); + if (ret) + goto unlock_exit; + + ret = regmap_write(map, MAX96724_MAP_SRCDST_H(pipe_id, map_id), + MAX9X_FIELD_PREP(MAX96724_MAP_SRCDST_H_SRC_VC_FIELD, mipi_map->src_vc) + | MAX9X_FIELD_PREP(MAX96724_MAP_SRCDST_H_DST_VC_FIELD, mipi_map->dst_vc)); + if (ret) + goto unlock_exit; + + ret = regmap_update_bits(map, MAX96724_MAP_DPHY_DEST(pipe_id, map_id), + MAX96724_MAP_DPHY_DEST_FIELD(map_id), + MAX9X_FIELD_PREP(MAX96724_MAP_DPHY_DEST_FIELD(map_id), mipi_map->dst_csi)); + if (ret) + goto unlock_exit; + +unlock_exit: + mutex_unlock(lock); + return ret; +} + +static int max96724_set_csi_link_enabled(struct max9x_common *common, + unsigned int csi_id, bool enable) +{ + struct device *dev = common->dev; + struct max9x_serdes_csi_link *csi_link; + int ret; + + if (csi_id > common->num_csi_links) + return -EINVAL; + + csi_link = &common->csi_link[csi_id]; + + if (WARN_ONCE(enable && csi_link->enabled == false, + "Tried to enable a disabled CSI port???")) + return -EINVAL; + + if (WARN_ONCE(enable && csi_link->config.num_lanes == 0, + "Tried to enable CSI port with no lanes???")) + return -EINVAL; + + // Keep track of number of enabled maps using this CSI link + if (enable) + csi_link->usecount++; + else if (csi_link->usecount > 0) + csi_link->usecount--; + + dev_dbg(dev, "CSI link %d: %s (%d users)", csi_id, + (enable ? "enable" : "disable"), csi_link->usecount); + + if (enable && csi_link->usecount == 1) { + // Enable && first user + + ret = max96724_set_phy_enabled(common, csi_id, true); + if (ret) + return ret; + + } else if (!enable && csi_link->usecount == 0) { + // Disable && no more users + + ret = max96724_set_phy_enabled(common, csi_id, false); + if (ret) + return ret; + + } + + return 0; +} + + +static int max96724_csi_double_pixel(struct max9x_common *common, + unsigned int csi_id, + unsigned int bpp) +{ + struct device *dev = common->dev; + struct regmap *map = common->map; + unsigned int value; + int ret; + + dev_info(dev, "CSI ALT Mem mapping for %u bpp on csi %i", bpp, csi_id); + + switch (bpp) { + case 0: + value = 0; + break; + case 8: + value = FIELD_PREP(MAX96724_MIPI_TX_ALT_MEM_8BPP, 1U); + dev_err(dev, "8 BPP currently unsupported for pixel doubling"); + return -EINVAL; + case 10: + value = FIELD_PREP(MAX96724_MIPI_TX_ALT_MEM_10BPP, 1U); + break; + case 12: + value = FIELD_PREP(MAX96724_MIPI_TX_ALT_MEM_12BPP, 1U); + break; + default: + dev_err(dev, "Unsupported BPP for pixel doubling: %u", bpp); + return -EINVAL; + } + + // Enable alt mem mapping + ret = regmap_update_bits( + map, MAX96724_MIPI_TX_ALT_MEM(csi_id), + MAX96724_MIPI_TX_ALT_MEM_FIELD, value); + + return ret; + +} + +static int max96724_set_video_pipe_enabled(struct max9x_common *common, + unsigned int pipe_id, bool enable) +{ + struct device *dev = common->dev; + struct regmap *map = common->map; + int ret = 0; + struct mutex *lock = &common->link_mutex; + + mutex_lock(lock); + dev_dbg(dev, "Video-pipe %d: %s", pipe_id, (enable ? "enable" : "disable")); + + // Enable max96712 "legacy" mode + // Non "legacy" mode ignores pipe mapping, and selects all streams for pipe + // 0. The Jetson doesn't know what to do with that and throws spurious data + // stream errors. + ret = regmap_update_bits(map, MAX96724_VIDEO_PIPE_EN, + MAX96724_VIDEO_PIPE_STREAM_SEL_ALL_FIELD, + MAX9X_FIELD_PREP(MAX96724_VIDEO_PIPE_STREAM_SEL_ALL_FIELD, + MAX96724_VIDEO_PIPE_LEGACY_MODE)); + if (ret) { + dev_err(dev, "Failed to clear select all streams bit"); + goto unlock_exit; + } + + ret = regmap_update_bits(map, MAX96724_VIDEO_PIPE_EN, + MAX96724_VIDEO_PIPE_EN_FIELD(pipe_id), + MAX9X_FIELD_PREP(MAX96724_VIDEO_PIPE_EN_FIELD(pipe_id), enable ? 1U : 0U)); + +unlock_exit: + mutex_unlock(lock); + return ret; +} + +static int max96724_set_serial_link_routing(struct max9x_common *common, + unsigned int link_id) +{ + unsigned int pipe_id; + unsigned int map_id; + int ret; + + for (pipe_id = 0; pipe_id < common->num_video_pipes; pipe_id++) { + struct max9x_serdes_pipe_config *config; + + if (common->video_pipe[pipe_id].enabled == false) + continue; + + config = &common->video_pipe[pipe_id].config; + if (config->src_link != link_id) + continue; + + ret = max96724_set_video_pipe_src(common, pipe_id, + config->src_link, + config->src_pipe); + if (ret) + return ret; + + ret = max96724_set_video_pipe_maps_enabled(common, pipe_id, + config->num_maps); + if (ret) + return ret; + + for (map_id = 0; map_id < config->num_maps; map_id++) { + ret = max96724_set_video_pipe_map(common, pipe_id, map_id, + &config->map[map_id]); + if (ret) + return ret; + + ret = max96724_set_csi_link_enabled(common, + config->map[map_id].dst_csi, + true); + if (ret) + return ret; + + ret = max96724_csi_double_pixel(common, + config->map[map_id].dst_csi, + config->dbl_pixel_bpp); + if (ret) + return ret; + } + + ret = max96724_set_video_pipe_enabled(common, pipe_id, true); + if (ret) + return ret; + } + + return 0; +} + +static int max96724_disable_serial_link(struct max9x_common *common, + unsigned int link_id) +{ + unsigned int pipe_id; + unsigned int map_id; + int ret; + + for (pipe_id = 0; pipe_id < common->num_video_pipes; pipe_id++) { + struct max9x_serdes_pipe_config *config; + + if (common->video_pipe[pipe_id].enabled == false) + continue; + + config = &common->video_pipe[pipe_id].config; + if (config->src_link != link_id) + continue; + + ret = max96724_set_video_pipe_enabled(common, pipe_id, false); + if (ret) + return ret; + + ret = max96724_set_video_pipe_maps_enabled(common, pipe_id, 0); + if (ret) + return ret; + + for (map_id = 0; map_id < config->num_maps; map_id++) { + ret = max96724_set_csi_link_enabled(common, config->map[map_id].dst_csi, false); + if (ret) + return ret; + } + } + + ret = max96724_set_serial_link_state(common, link_id, false); + if (ret) + return ret; + + return 0; +} + +static int max96724_enable_serial_link(struct max9x_common *common, + unsigned int link_id) +{ + struct device *dev = common->dev; + int ret; + bool locked; + unsigned long timeout; + + if (WARN_ON_ONCE(link_id >= common->num_serial_links)) + return -EINVAL; + + if (WARN_ONCE(common->serial_link[link_id].config.link_type != MAX9X_LINK_TYPE_GMSL2, + "Only GMSL2 is supported!")) + return -EINVAL; + + // GMSL2 + ret = max96724_set_remote_control_channel_enabled(common, link_id, false); + if (ret) + return ret; + + ret = max96724_set_serial_link_state(common, link_id, true); + if (ret) + return ret; + + ret = max96724_set_serial_link_rate(common, link_id); + if (ret) + return ret; + + ret = max96724_serial_link_reset(common, link_id); + if (ret) + return ret; + + ret = max96724_set_serial_link_routing(common, link_id); + if (ret) + return ret; + + timeout = jiffies + msecs_to_jiffies(max96724_serial_link_timeout_ms); + do { + usleep_range(10 * 1000, 25 * 1000); // 10 to 25 ms + ret = max96724_get_serial_link_lock(common, link_id, &locked); + if (ret) + return ret; + } while (!locked && time_before(jiffies, timeout)); + + if (!locked) { + dev_err(dev, "Serial-link %d: Failed to lock!", link_id); + return -ETIMEDOUT; + } + + return 0; +} + +static int max96724_set_remote_control_channel_enabled(struct max9x_common *common, + unsigned int link_id, + bool enabled) +{ + struct device *dev = common->dev; + struct regmap *map = common->map; + int ret; + struct mutex *lock = &common->link_mutex; + //TODO: Allow DT to choose which port gets enabled: bit 0 disables port 0, bit 1 disables port 1 + //See also 0x0E REG14 DIS_REM_CC_P2[3:0] + + // Note that the register value 1 *disables* port-to-remote command & control + mutex_lock(lock); + dev_dbg(dev, "set rem cc %s", (enabled ? "enable" : "disable")); + if (enabled) { + ret = regmap_write(map, MAX96724_REM_CC, + ~(MAX9X_FIELD_PREP(MAX96724_REM_CC_DIS_PORT_FIELD(link_id, 0), enabled ? 1 : 0))); + } else { + ret = regmap_write(map, MAX96724_REM_CC, ~(0)); + } + + mutex_unlock(lock); + return ret; +} + +static int max96724_select_serial_link(struct max9x_common *common, unsigned int link) +{ + return max96724_set_remote_control_channel_enabled(common, link, true); +} + +static int max96724_deselect_serial_link(struct max9x_common *common, unsigned int link) +{ + return max96724_set_remote_control_channel_enabled(common, link, false); +} + +static struct max9x_serial_link_ops max96724_serial_link_ops = { + .enable = max96724_enable_serial_link, + .disable = max96724_disable_serial_link, + .select = max96724_select_serial_link, + .deselect = max96724_deselect_serial_link, + .get_locked = max96724_get_serial_link_lock, +}; + +static int max96724_enable_native_frame_sync(struct max9x_common *common) +{ + struct device_node *node = common->dev->of_node; + struct device *dev = common->dev; + struct regmap *map = common->map; + int ret, i; + unsigned int val; + enum max96724_fsync_pin pin; + unsigned int fsync_freq; + unsigned int pclk_freq; + unsigned int fsync_period; + unsigned int fsync_tx_id; + bool fsync_master; + + if (!of_property_read_bool(node, "frame-sync-enable")) { + dev_info(dev, "Native frame sync not enabled"); + return regmap_write(map, MAX96724_FSYNC_0, + MAX9X_FIELD_PREP(MAX96724_FSYNC_MODE_FIELD, + MAX96724_FSYNC_GEN_OFF_GPIO_OFF)); + } + + fsync_master = of_property_read_bool(node, "frame-sync-master"); + if (fsync_master) + dev_dbg(dev, "Frame sync master mode"); + else + dev_dbg(dev, "Frame sync slave mode"); + + ret = of_property_read_u32(node, "frame-sync-pin", &val); + if (ret) { + dev_err(dev, "Missing property: frame-sync-pin"); + return ret; + } + + // check value of pin + switch (val) { + case MAX96724_FSYNC_PIN_MFP0: + case MAX96724_FSYNC_PIN_MFP7: + pin = val; + break; + + default: + dev_err(dev, "Invalid frame-sync-pin"); + return -EINVAL; + }; + + ret = of_property_read_u32(node, "frame-sync-tx-id", &val); + if (ret) { + dev_err(dev, "Missing property: frame-sync-tx-id"); + return -EINVAL; + } + + // check value of frame-sync-tx-id + fsync_tx_id = val & 0x1F; + if (fsync_tx_id != val) + dev_warn(dev, "Truncated frame-sync-tx-id to 5 bits!"); + + ret = of_property_read_u32(node, "pclk-freq", &pclk_freq); + if (ret) { + dev_err(dev, "Missing property: pclk-freq"); + return -EINVAL; + } + + ret = of_property_read_u32(node, "frame-sync-freq", &fsync_freq); + if (ret) { + dev_err(dev, "Missing property: frame-sync-freq;"); + return -EINVAL; + } + + // Reset register to known state + ret = regmap_write(map, MAX96724_FSYNC_15, 0xDF); + if (ret) { + dev_dbg(dev, "Failed to reset FSYNC state"); + return ret; + } + + // Disable AUTO FS links + val = MAX9X_FIELD_PREP(MAX96724_FS_GPIO_TYPE_FIELD, MAX96724_FS_GPIO_TYPE_GMSL2) | + MAX9X_FIELD_PREP(MAX96724_FS_USE_XTAL_FIELD, true) | + MAX9X_FIELD_PREP(MAX96724_AUTO_FS_LINKS_FIELD, 0); + // Enable all FS links manually + for (i = 0; i < 4; ++i) + val |= MAX9X_FIELD_PREP(MAX96724_FS_LINK_FIELD(i), 1); + + ret = regmap_write(map, MAX96724_FSYNC_15, val); + if (ret) { + dev_dbg(dev, "Failed to write FSYNC_15"); + return ret; + } + + // Calculate value of FSYNC_PERIOD registers + // FSYNC_PERIOD = number of pclk cycles per fsync period + fsync_period = pclk_freq / fsync_freq; + dev_dbg(dev, "Calculated FSYNC_PERIOD: 0x%06x", fsync_period); + + for (val = MAX96724_FSYNC_5; val <= MAX96724_FSYNC_7; ++val) { + ret = regmap_write(map, val, (uint8_t) fsync_period); + if (ret) { + dev_err(dev, "Failed to write FSYNC_PERIOD registers to 0x%03x", val); + return ret; + } + + fsync_period = fsync_period >> 8; + } + + ret = regmap_write(map, MAX96724_FSYNC_17, + MAX9X_FIELD_PREP(MAX96724_FSYNC_TX_ID_FIELD, fsync_tx_id) | + MAX9X_FIELD_PREP(MAX96724_FSYNC_ERR_THR_FIELD, 0)); + if (ret) { + dev_err(dev, "Failed to set FSYNC_17"); + return ret; + } + + ret = regmap_write(map, MAX96724_FSYNC_0, + MAX9X_FIELD_PREP(MAX96724_FSYNC_OUT_PIN_FIELD, pin) | + MAX9X_FIELD_PREP(MAX96724_EN_VS_GEN_FIELD, 0) | + MAX9X_FIELD_PREP(MAX96724_FSYNC_MODE_FIELD, + (fsync_master ? MAX96724_FSYNC_GEN_ON_GPIO_OUTPUT : MAX96724_FSYNC_GEN_OFF_GPIO_INPUT)) | + MAX9X_FIELD_PREP(MAX96724_FSYNC_METH_FIELD, MAX96724_FSYNC_METHOD_MANUAL)); + + return 0; +} + +static int max96724_enable_gpio_frame_sync(struct max9x_common *common) +{ + struct device_node *node = common->dev->of_node; + struct device *dev = common->dev; + struct regmap *map = common->map; + + u32 fsync_gpios[MAX96724_NUM_GPIOS]; + int num_fsync_gpios; + int i, gpio, gpio_tx_val, ret; + + // Clean up any previous values in the event the chip was not reset + // or GPIO forwarding needs to be toggled off + dev_dbg(dev, "Setting GPIO registers to default value"); + for (i = 0; i < MAX96724_NUM_GPIOS; i++) { + // Default values per the datasheet + TRY(ret, regmap_write(map, MAX96724_GPIO_REG(gpio), (BIT(7) | BIT(0)))); + + // Link A register has different fields from Links B, C, D + TRY(ret, regmap_write(map, MAX96724_GPIO_A_REG(gpio), (BIT(7) | BIT(5) | i))); + TRY(ret, regmap_write(map, MAX96724_GPIO_B_REG(gpio), i)); + TRY(ret, regmap_write(map, MAX96724_GPIO_C_REG(gpio), i)); + TRY(ret, regmap_write(map, MAX96724_GPIO_D_REG(gpio), i)); + } + + // Read DT to find fsync GPIOs + ret = of_property_read_variable_u32_array(node, "frame-sync-ports", + fsync_gpios, 0, MAX96724_NUM_GPIOS); + + if (ret == -ENODATA || ret == -EINVAL) { + dev_dbg(dev, "No frame sync GPIOs specified in DT"); + return 0; + } + + if (ret < 0) { + dev_err(dev, "Failed to parse DT frame-sync-ports, error %d", ret); + return ret; + } + + num_fsync_gpios = ret; + dev_info(dev, "Enabling %d frame sync GPIOs", num_fsync_gpios); + + // Configure MAX96724 to forward specified GPIOs + for (i = 0; i < num_fsync_gpios; i++) { + gpio = fsync_gpios[i]; + + if (gpio >= MAX96724_NUM_GPIOS) { + dev_warn(dev, "Skipping invalid GPIO %d in DT", gpio); + continue; + } + + // See: MAX96724 Users Guide "Configuring GPIO forwarding" + + // Enable GPIO for transmission + TRY(ret, regmap_write(map, MAX96724_GPIO_REG(gpio), + MAX96724_GPIO_RES_CFG | MAX96724_GPIO_TX_ENABLE | MAX96724_GPIO_OUTDRV_DISABLE)); + + // Configure transmission registers on Links A-D. + gpio_tx_val = MAX96724_GPIO_PUSH_PULL | gpio; + + TRY(ret, regmap_write(map, MAX96724_GPIO_A_REG(gpio), gpio_tx_val)); + TRY(ret, regmap_write(map, MAX96724_GPIO_B_REG(gpio), gpio_tx_val)); + TRY(ret, regmap_write(map, MAX96724_GPIO_C_REG(gpio), gpio_tx_val)); + TRY(ret, regmap_write(map, MAX96724_GPIO_D_REG(gpio), gpio_tx_val)); + } + + return 0; +} + +static int max96724_disable_line_fault(struct max9x_common *common, unsigned int line) +{ + return max96724_set_line_fault(common, line, false); +} + +static int max96724_enable_line_fault(struct max9x_common *common, unsigned int line) +{ + return max96724_set_line_fault(common, line, true); +} + +static int max96724_set_line_fault(struct max9x_common *common, unsigned int line, bool enable) +{ + int ret; + + ret = regmap_update_bits(common->map, + MAX96724_PU_LF, + MAX96724_PU_LF_FIELD(line), + enable ? 0xFF : 0x00); + + return ret; +} + +static int max96724_get_line_fault(struct max9x_common *common, unsigned int line) +{ + unsigned int val = 0; + int ret = regmap_read(common->map, MAX96724_LF(line), &val); + + if (ret < 0) + return ret; + + return MAX96724_LF_VAL(val, line); +} + +static struct max9x_line_fault_ops max96724_line_fault_ops = { + .enable = max96724_enable_line_fault, + .disable = max96724_disable_line_fault, + .get_status = max96724_get_line_fault, +}; + +static int max96724_enable(struct max9x_common *common) +{ + struct device *dev = common->dev; + struct regmap *map = common->map; + int link_id; + int ret; + + dev_dbg(dev, "Enable"); + + for (link_id = 0; link_id < common->num_serial_links; link_id++) { + ret = max96724_disable_serial_link(common, link_id); + if (ret) + return ret; + } + + // Apply errata tuning + ret = regmap_multi_reg_write(map, max96724_errata_rev1, ARRAY_SIZE(max96724_errata_rev1)); + if (ret) + return ret; + + ret = max96724_configure_csi_dphy(common); + if (ret) + return ret; + + return 0; +} + +static int max96724_enable_csi_link(struct max9x_common *common, + unsigned int csi_link_id) +{ + return max96724_set_csi_link_enabled(common, csi_link_id, true); +} + +static int max96724_disable_csi_link(struct max9x_common *common, + unsigned int csi_link_id) +{ + return max96724_set_csi_link_enabled(common, csi_link_id, false); +} + +static struct max9x_csi_link_ops max96724_csi_link_ops = { + .enable = max96724_enable_csi_link, + .disable = max96724_disable_csi_link, +}; + +int max96724_get_ops(struct max9x_common_ops **common_ops, struct max9x_serial_link_ops **serial_ops, + struct max9x_csi_link_ops **csi_ops, struct max9x_line_fault_ops **lf_ops, + struct max9x_translation_ops **trans_ops) +{ + (*common_ops) = &max96724_common_ops; + (*serial_ops) = &max96724_serial_link_ops; + (*csi_ops) = &max96724_csi_link_ops; + (*lf_ops) = &max96724_line_fault_ops; + (*trans_ops) = NULL; + return 0; +} + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Cody Burrows "); +MODULE_AUTHOR("Josh Watts "); +MODULE_AUTHOR("He Jiabin "); +MODULE_DESCRIPTION("Maxim MAX96724 Quad GMSL2/GMSL1 to CSI-2 Deserializer driver"); diff --git a/drivers/media/i2c/max9x/max96724.h b/drivers/media/i2c/max9x/max96724.h new file mode 100644 index 0000000..1e63114 --- /dev/null +++ b/drivers/media/i2c/max9x/max96724.h @@ -0,0 +1,242 @@ +/* + * max96724.h - Maxim MAX96724 registers and constants. + * + * Copyright (c) 2023-2024, Define Design Deploy Corp. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2025 Intel Corporation. + +#ifndef _MAX96724_H_ +#define _MAX96724_H_ + +#include +#include +#include "serdes.h" + +enum max96724_phy_mode { + MAX96724_MIPI_PHY_4X2 = BIT(0), // four 2-lane ports + MAX96724_MIPI_PHY_2X4 = BIT(2), // two 4-lane ports (CSI1 is master for port A, CSI2 for port B) + MAX96724_MIPI_PHY_1X4A_22 = BIT(3), // one 4-lane (PHY0+PHY1, CSI1 for port A) port and two 2-lane ports + MAX96724_MIPI_PHY_1X4B_22 = BIT(4), // one 4-lane (PHY2+PHY3, CSI2 for port B) port and two 2-lane ports +}; + +// described in detail on page 340 of the datasheet & reg doc +enum max96724_initial_deskew_width { + MAX96724_INIT_DESKEW_1x32UI = 0, + MAX96724_INIT_DESKEW_2x32UI, + MAX96724_INIT_DESKEW_3x32UI, + MAX96724_INIT_DESKEW_4x32UI, + MAX96724_INIT_DESKEW_5x32UI, + MAX96724_INIT_DESKEW_6x32UI, + MAX96724_INIT_DESKEW_7x32UI, + MAX96724_INIT_DESKEW_8x32UI, + MAX96724_INIT_DESKEW_WIDTH_MAX = MAX96724_INIT_DESKEW_8x32UI, +}; + +enum max96724_fsync_pin { + MAX96724_FSYNC_PIN_MFP0 = 0, + MAX96724_FSYNC_PIN_MFP7 = 1, +}; + +enum max96724_fsync_mode { + MAX96724_FSYNC_GEN_ON_GPIO_OFF = 0, + MAX96724_FSYNC_GEN_ON_GPIO_OUTPUT, + MAX96724_FSYNC_GEN_OFF_GPIO_INPUT, + MAX96724_FSYNC_GEN_OFF_GPIO_OFF, +}; + +enum max96724_fsync_method { + MAX96724_FSYNC_METHOD_MANUAL = 0, + MAX96724_FSYNC_METHOD_SEMI_AUTO, + MAX96724_FSYNC_METHOD_AUTO, +}; + +enum max96724_fsync_gpio_type { + MAX96724_FS_GPIO_TYPE_GMSL1 = 0, + MAX96724_FS_GPIO_TYPE_GMSL2, +}; + +enum max96724_virtual_channel_size { + MAX96724_VC_2_BITS = 0, + MAX96724_VC_4_BITS = 1, +}; + +#define MAX96724_NUM_SERIAL_LINKS 4 +#define MAX96724_NUM_VIDEO_PIPES 4 +#define MAX96724_NUM_MIPI_MAPS 16 +#define MAX96724_NUM_CSI_LINKS 4 +#define MAX96724_NUM_LINE_FAULTS 4 +// There are 16 GPIO registers and 11 GPIO TX/RX forwarding registers, but only 9 pins exposed +#define MAX96724_NUM_GPIOS 9 + +#define MAX96724_DEFAULT_SERIAL_LINK_TIMEOUT_MS 200 + +#define MAX96724_DPLL_FREQ_MHZ_MULTIPLE 100 + + +#define MAX96724_FLD_OFS(n, bits_per_field, count) (((n) % (count)) * (bits_per_field)) +#define MAX96724_OFFSET_GENMASK(offset, h, l) GENMASK(offset + h, offset + l) + +#define MAX96724_REM_CC 0x03 +#define MAX96724_REM_CC_DIS_PORT_FIELD(link, port) BIT(MAX96724_FLD_OFS(link, 2, 4) + (port % 2)) +#define MAX96724_LINK_CTRL 0x06 +#define MAX96724_LINK_CTRL_GMSL_FIELD(link) (BIT(link) << 4) +#define MAX96724_LINK_CTRL_EN_FIELD(link) BIT(link) +#define MAX96724_PHY_RATE_CTRL(link) (0x10 + ((link) / 2)) +#define MAX96724_PHY_RATE_CTRL_TX_FIELD(link) (GENMASK(1, 0) << (MAX96724_FLD_OFS(link, 4, 2) + 2)) +#define MAX96724_PHY_RATE_CTRL_RX_FIELD(link) (GENMASK(1, 0) << (MAX96724_FLD_OFS(link, 4, 2) + 0)) + +// Link lock registers +#define MAX96724_PHY_LOCKED(link) ((link) == 0 ? 0x1A : 0x0A + ((link) - 1)) +#define MAX96724_PHY_LOCKED_FIELD BIT(3) +#define MAX96724_RESET_ALL 0x13 +#define MAX96724_RESET_ALL_FIELD BIT(6) +#define MAX96724_RESET_CTRL 0x18 +#define MAX96724_RESET_CTRL_FIELD(link) BIT(link) +#define MAX96724_DEV_REV 0x4C +#define MAX96724_DEV_REV_FIELD GENMASK(3, 0) + +#define MAX96724_VIDEO_PIPE_SEL(pipe) (0xF0 + ((pipe) / 2)) +#define MAX96724_VIDEO_PIPE_SEL_LINK_FIELD(link) \ + (GENMASK(1, 0) << (MAX96724_FLD_OFS(link, 4, 2) + 2)) +#define MAX96724_VIDEO_PIPE_SEL_INPUT_FIELD(link) \ + (GENMASK(1, 0) << (MAX96724_FLD_OFS(link, 4, 2) + 0)) + +#define MAX96724_VIDEO_PIPE_EN 0xF4 +#define MAX96724_VIDEO_PIPE_EN_FIELD(pipe) BIT(pipe) +#define MAX96724_VIDEO_PIPE_STREAM_SEL_ALL_FIELD BIT(4) +#define MAX96724_VIDEO_PIPE_LEGACY_MODE 0 + +#define MAX96724_DPLL_FREQ(phy) (0x415 + ((phy) * 3)) +#define MAX96724_DPLL_FREQ_FIELD GENMASK(4, 0) + +#define MAX96724_MIPI_TX_EXT(pipe) (0x800 + ((pipe) * 0x10)) + +#define MAX96724_MIPI_PHY0 0x8A0 +#define MAX96724_MIPI_PHY0_MODE_FIELD GENMASK(4, 0) +#define MAX96724_MIPI_PHY_ENABLE 0x8A2 +#define MAX96724_MIPI_PHY_ENABLE_FIELD(csi) BIT((csi) + 4) +#define MAX96724_MIPI_PHY_LANE_MAP(csi) (0x8A3 + (csi) / 2) +#define MAX96724_MIPI_PHY_LANE_MAP_FIELD(csi, lane) \ + (GENMASK(1, 0) << (MAX96724_FLD_OFS(csi, 4, 2) + MAX96724_FLD_OFS(lane, 2, 2))) + +// Note that CSIs and pipes overlap: +// 0x901 through 0x90A and 0x933 through 0x934 are CSIs, repeated every 0x40 up to 4 times +// 0x90B through 0x932 are pipes, repeated every 0x40 up to 4 times +#define MAX96724_MIPI_TX(pipe) (0x900 + ((pipe) * 0x40)) +#define MAX96724_MIPI_TX_LANE_CNT(csi) (MAX96724_MIPI_TX(csi) + 0x0A) +#define MAX96724_MIPI_TX_LANE_CNT_FIELD GENMASK(7, 6) +#define MAX96724_MIPI_TX_CPHY_EN_FIELD BIT(5) +#define MAX96724_MIPI_TX_VCX_EN_FIELD BIT(4) +#define MAX96724_MIPI_TX_DESKEW_INIT(csi) (MAX96724_MIPI_TX(csi) + 0x03) +#define MAX96724_MIPI_TX_DESKEW_INIT_AUTO_EN BIT(7) +#define MAX96724_MIPI_TX_DESKEW_WIDTH_FIELD GENMASK(2, 0) +#define MAX96724_MAP_EN_L(pipe) (MAX96724_MIPI_TX(pipe) + 0x0B) +#define MAX96724_MAP_EN_H(pipe) (MAX96724_MIPI_TX(pipe) + 0x0C) +#define MAX96724_MAP_EN_FIELD GENMASK(7, 0) +#define MAX96724_MAP_SRC_L(pipe, map) (MAX96724_MIPI_TX(pipe) + 0x0D + ((map) * 2)) +#define MAX96724_MAP_SRC_L_VC_FIELD GENMASK(7, 6) +#define MAX96724_MAP_SRC_L_DT_FIELD GENMASK(5, 0) +#define MAX96724_MAP_DST_L(pipe, map) (MAX96724_MIPI_TX(pipe) + 0x0D + ((map) * 2) + 1) +#define MAX96724_MAP_DST_L_VC_FIELD GENMASK(7, 6) +#define MAX96724_MAP_DST_L_DT_FIELD GENMASK(5, 0) +#define MAX96724_MAP_SRCDST_H(pipe, map) (MAX96724_MIPI_TX_EXT(pipe) + (map)) +#define MAX96724_MAP_SRCDST_H_SRC_VC_FIELD GENMASK(7, 5) +#define MAX96724_MAP_SRCDST_H_DST_VC_FIELD GENMASK(4, 2) +#define MAX96724_MAP_DPHY_DEST(pipe, map) (MAX96724_MIPI_TX(pipe) + 0x2D + ((map) / 4)) +#define MAX96724_MAP_DPHY_DEST_FIELD(map) (GENMASK(1, 0) << MAX96724_FLD_OFS(map, 2, 4)) + +#define MAX96724_MIPI_TX_ALT_MEM(csi) (MAX96724_MIPI_TX(csi) + 0x33) +#define MAX96724_MIPI_TX_ALT_MEM_FIELD GENMASK(2, 0) +#define MAX96724_MIPI_TX_ALT_MEM_8BPP BIT(1) +#define MAX96724_MIPI_TX_ALT_MEM_10BPP BIT(2) +#define MAX96724_MIPI_TX_ALT_MEM_12BPP BIT(0) + +#define MAX96724_VID_TX(pipe) (0x100 + (0x12 * (pipe))) + +// GPIO Registers +#define MAX96724_GPIO_REG(n) (0x300 + 3 * n) +#define MAX96724_GPIO_RES_CFG BIT(7) +#define MAX96724_GPIO_TX_ENABLE BIT(1) +#define MAX96724_GPIO_OUTDRV_DISABLE BIT(0) + +#define MAX96724_GPIO_A_REG(n) (MAX96724_GPIO_REG(n) + 1) + +#define MAX96724_GPIO_B_REG(n) \ + ((n <= 2) ? (0x337 + 3 * n) : (n <= 7) ? (0x341 + 3 * (n - 3)) \ + : (0x351 + 3 * (n - 8))) + +#define MAX96724_GPIO_C_REG(n) \ + ((n == 0) ? (0x36D) : (n <= 5) ? (0x371 + 3 * (n - 1)) \ + : (0x381 + 3 * (n - 6))) + +#define MAX96724_GPIO_D_REG(n) \ + ((n <= 3) ? (0x3A4 + 3 * n) : (n <= 8) ? (0x3B1 + 3 * (n - 4)) \ + : (0x3C1 + 3 * (n - 9))) + +#define MAX96724_GPIO_PUSH_PULL BIT(5) + +// Native frame sync registers +#define MAX96724_FSYNC_0 (0x4A0) +#define MAX96724_FSYNC_OUT_PIN_FIELD BIT(5) +#define MAX96724_EN_VS_GEN_FIELD BIT(6) +#define MAX96724_FSYNC_MODE_FIELD GENMASK(3, 2) +#define MAX96724_FSYNC_METH_FIELD GENMASK(1, 0) + +#define MAX96724_FSYNC_5 (0x4A5) +#define MAX96724_FSYNC_PERIOD_L_FIELD GENMASK(7, 0) +#define MAX96724_FSYNC_6 (0x4A6) +#define MAX96724_FSYNC_PERIOD_M_FIELD GENMASK(7, 0) +#define MAX96724_FSYNC_7 (0x4A7) +#define MAX96724_FSYNC_PERIOD_H_FIELD GENMASK(7, 0) + +#define MAX96724_FSYNC_15 (0x4AF) +#define MAX96724_FS_GPIO_TYPE_FIELD BIT(7) +#define MAX96724_FS_USE_XTAL_FIELD BIT(6) +#define MAX96724_AUTO_FS_LINKS_FIELD BIT(4) +#define MAX96724_FS_LINK_FIELD(link) BIT(link) + +#define MAX96724_FSYNC_17 (0x4B1) +#define MAX96724_FSYNC_TX_ID_FIELD GENMASK(7, 3) +#define MAX96724_FSYNC_ERR_THR_FIELD GENMASK(2, 0) + +// Line fault status registers +#define MAX96724_LF(link) (0xE1 + ((link) / 2)) +#define MAX96724_LF_VAL(val, link) ((val >> ((link % 2) * 4)) & GENMASK(2, 0)) + +#define MAX96724_PU_LF (0xE0) +#define MAX96724_PU_LF_FIELD(line) BIT(line) + +static struct max9x_serdes_rate_table max96724_rx_rates[] = { + { .val = 1, .freq_mhz = 3000 }, // 3 Gbps + { .val = 2, .freq_mhz = 6000 }, // 6 Gbps +}; + +static struct max9x_serdes_rate_table max96724_tx_rates[] = { + { .val = 0, .freq_mhz = 187 }, // 187.5 Mbps +}; + +// See: Errata for MAX96724/MAX96724F/MAX96724R (Rev. 1) document +// https://www.analog.com/media/en/technical-documentation/data-sheets/max96724-f-r-rev1-b-0a-errata.pdf +static const struct reg_sequence max96724_errata_rev1[] = { + // Errata #5 - GMSL2 Link requires register writes for robust 6 Gbps operation + { 0x1449, 0x75, }, + { 0x1549, 0x75, }, + { 0x1649, 0x75, }, + { 0x1749, 0x75, }, +}; + +#endif /* _MAX96724_H_ */ + diff --git a/drivers/media/i2c/max9x/max9x_pdata.h b/drivers/media/i2c/max9x/max9x_pdata.h new file mode 100644 index 0000000..874e741 --- /dev/null +++ b/drivers/media/i2c/max9x/max9x_pdata.h @@ -0,0 +1,103 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2025 Intel Corporation. + +#ifndef _MAX9X_PDATA_H_ +#define _MAX9X_PDATA_H_ + +#include +#include + +struct max9x_common; + +enum max9x_serdes_link_type { + MAX9X_LINK_TYPE_GMSL1 = 0, + MAX9X_LINK_TYPE_GMSL2, + MAX9X_LINK_TYPE_FPDLINK, +}; + +struct max9x_subdev_pdata { + unsigned int serial_link_id; // DES identify GMSL link/pipe id + struct i2c_board_info board_info; + unsigned short phys_addr; // Remap or translate subdev +}; + +struct max9x_serial_link_pdata { + unsigned int link_id; + enum max9x_serdes_link_type link_type; + unsigned int rx_freq_mhz; + unsigned int tx_freq_mhz; + char poc_regulator[32]; + + // SER + struct i2c_client *des_client; + unsigned int des_link_id; +}; + +struct max9x_serdes_mipi_map { + u16 src_vc; + u16 src_dt; + u16 dst_vc; + u16 dst_dt; + u16 dst_csi; +}; + +struct max9x_video_pipe_pdata { + unsigned int serial_link_id; // DES: src-link, SER: dst-link + unsigned int pipe_id; // X, Y, Z, U + + // DES + struct max9x_serdes_mipi_map *maps; + unsigned int num_maps; + unsigned int src_pipe_id; + + // SER + unsigned int src_csi_id; + unsigned int *data_types; + unsigned int num_data_types; +}; + +struct max9x_serdes_phy_map { + u8 int_csi; // Internal CSI lane + u8 phy_ind; // PHY index + u8 phy_lane; // PHY lane index +}; + +struct max9x_csi_link_pdata { + unsigned int link_id; + enum v4l2_mbus_type bus_type; + unsigned int num_lanes; + struct max9x_serdes_phy_map *maps; + unsigned int num_maps; + unsigned int tx_rate_mbps; + bool auto_initial_deskew; + unsigned int initial_deskew_width; + bool auto_start; +}; + +struct max9x_gpio_pdata { + const char *label; + const char *const *names; +}; + +struct max9x_pdata { + unsigned short phys_addr; // Remap self + char suffix[5]; + + struct max9x_subdev_pdata *subdevs; // DES: the serializers, 1 per link; SER: sensor, eeprom, etc + unsigned int num_subdevs; + + struct max9x_serial_link_pdata *serial_links; // DES only. SER is currently presumed to have only 1 active link + unsigned int num_serial_links; + + struct max9x_video_pipe_pdata *video_pipes; + unsigned int num_video_pipes; + + struct max9x_csi_link_pdata *csi_links; + unsigned int num_csi_links; + + struct max9x_gpio_pdata gpio; + + bool external_refclk_enable; +}; + +#endif diff --git a/drivers/media/i2c/max9x/serdes.c b/drivers/media/i2c/max9x/serdes.c new file mode 100644 index 0000000..38d844c --- /dev/null +++ b/drivers/media/i2c/max9x/serdes.c @@ -0,0 +1,2700 @@ +/* + * serdes.c + * + * Copyright (c) 2018-2020 D3 Engineering. All rights reserved. + * Copyright (c) 2023, Define Design Deploy Corp. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2025 Intel Corporation + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "serdes.h" + +static const s64 max9x_op_sys_clock[] = { + MAX9X_LINK_FREQ_MBPS_TO_HZ(2500), + MAX9X_LINK_FREQ_MBPS_TO_HZ(2400), + MAX9X_LINK_FREQ_MBPS_TO_HZ(2300), + MAX9X_LINK_FREQ_MBPS_TO_HZ(2200), + MAX9X_LINK_FREQ_MBPS_TO_HZ(2100), + MAX9X_LINK_FREQ_MBPS_TO_HZ(2000), + MAX9X_LINK_FREQ_MBPS_TO_HZ(1900), + MAX9X_LINK_FREQ_MBPS_TO_HZ(1800), + MAX9X_LINK_FREQ_MBPS_TO_HZ(1700), + MAX9X_LINK_FREQ_MBPS_TO_HZ(1600), + MAX9X_LINK_FREQ_MBPS_TO_HZ(1500), + MAX9X_LINK_FREQ_MBPS_TO_HZ(1400), + MAX9X_LINK_FREQ_MBPS_TO_HZ(1300), + MAX9X_LINK_FREQ_MBPS_TO_HZ(1200), + MAX9X_LINK_FREQ_MBPS_TO_HZ(1100), + MAX9X_LINK_FREQ_MBPS_TO_HZ(1000), + MAX9X_LINK_FREQ_MBPS_TO_HZ(900), + MAX9X_LINK_FREQ_MBPS_TO_HZ(800), + MAX9X_LINK_FREQ_MBPS_TO_HZ(700), + MAX9X_LINK_FREQ_MBPS_TO_HZ(600), + MAX9X_LINK_FREQ_MBPS_TO_HZ(500), + MAX9X_LINK_FREQ_MBPS_TO_HZ(400), + MAX9X_LINK_FREQ_MBPS_TO_HZ(300), + MAX9X_LINK_FREQ_MBPS_TO_HZ(200), + MAX9X_LINK_FREQ_MBPS_TO_HZ(100), + MAX9X_LINK_FREQ_MBPS_TO_HZ(80), +}; + +int max9x_get_ops(char dev_id, + struct max9x_common_ops **common_ops, + struct max9x_serial_link_ops **serial_ops, + struct max9x_csi_link_ops **csi_ops, + struct max9x_line_fault_ops **lf_ops, + struct max9x_translation_ops **trans_ops) +{ + int rval = 0; + + switch (dev_id) { + case MAX9296: + rval = max9296_get_ops(common_ops, serial_ops, csi_ops, lf_ops, trans_ops); + break; + case MAX96724: + rval = max96724_get_ops(common_ops, serial_ops, csi_ops, lf_ops, trans_ops); + break; + case MAX9295: + rval = max9295_get_ops(common_ops, serial_ops, csi_ops, lf_ops, trans_ops); + break; + case MAX96717: + rval = max96717_get_ops(common_ops, serial_ops, csi_ops, lf_ops, trans_ops); + break; + default: + break; + } + + return rval; +} + +static struct max9x_desc max9x_chips[] = { + [MAX9296] = { + .dev_id = 0x94, + .rev_reg = 0xE, + .serdes_type = MAX9X_DESERIALIZER, + .chip_type = MAX9296, + .get_max9x_ops = max9x_get_ops, + }, + [MAX96724] = { + .dev_id = 0xA2, + .rev_reg = 0x4C, + .serdes_type = MAX9X_DESERIALIZER, + .chip_type = MAX96724, + .get_max9x_ops = max9x_get_ops, + }, + [MAX9295] = { + .dev_id = 0x91, + .rev_reg = 0xE, + .serdes_type = MAX9X_SERIALIZER, + .chip_type = MAX9295, + .get_max9x_ops = max9x_get_ops, + }, + /*need to check dev_id and others when used*/ + [MAX96717] = { + .dev_id = 0x91, + .rev_reg = 0xE, + .serdes_type = MAX9X_SERIALIZER, + .chip_type = MAX96717, + .get_max9x_ops = max9x_get_ops, + }, +}; + +static const struct of_device_id max9x_of_match[] = { + { .compatible = "max9x,max9296", .data = &max9x_chips[MAX9296] }, + { .compatible = "max9x,max96724", .data = &max9x_chips[MAX96724] }, + { .compatible = "max9x,max9295", .data = &max9x_chips[MAX9295] }, + { .compatible = "max9x,max96717", .data = &max9x_chips[MAX96717] }, + {} +}; +MODULE_DEVICE_TABLE(of, max9x_of_match); + +static const struct i2c_device_id max9x_id[] = { + { "max9x", MAX9296 }, + { "max9296", MAX9296 }, + { "max96724", MAX96724 }, + { "max9295", MAX9295 }, + { "max96717", MAX96717 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, max9x_id); + +typedef int (*max9x_serdes_parse_child_func)(struct max9x_common *common, struct device_node *node); + +static int max9x_soft_reset(struct max9x_common *common); +static int max9x_remap_addr(struct max9x_common *common); +static int max9x_setup_gpio(struct max9x_common *common); +static int max9x_enable(struct max9x_common *common); +static int max9x_disable(struct max9x_common *common); +static int max9x_verify_devid(struct max9x_common *common); + +static int max9x_remap_serializers_resume(struct max9x_common *common, unsigned int link_id); +static int max9x_create_adapters_resume(struct max9x_common *common); + +static int max9x_remap_serializers(struct max9x_common *common, unsigned int link_id); +static int max9x_create_adapters(struct max9x_common *common); +static int max9x_csi_link_to_pad(struct max9x_common *common, int csi_id); +static int max9x_serial_link_to_pad(struct max9x_common *common, int link_id); +static int max9x_register_v4l_subdev(struct max9x_common *common); +static int max9x_enable_serial_link(struct max9x_common *common, unsigned int link_id); +static int max9x_disable_serial_link(struct max9x_common *common, unsigned int link_id); +static int max9x_sysfs_create_get_link(struct max9x_common *common, unsigned int link_id); +static void max9x_sysfs_destroy_get_link(struct max9x_common *common, unsigned int link_id); + +static int max9x_enable_line_faults(struct max9x_common *common); +static int max9x_disable_line_faults(struct max9x_common *common); +static void max9x_sysfs_destroy_line_fault_status(struct max9x_common *common, unsigned int line); + +static int max9x_parse_pdata(struct max9x_common *common, struct max9x_pdata *pdata); +static int max9x_parse_serial_link_pdata(struct max9x_common *common, + struct max9x_serial_link_pdata *max9x_serial_link_pdata); +static int max9x_parse_video_pipe_pdata(struct max9x_common *common, + struct max9x_video_pipe_pdata *video_pipe_pdata); +static int max9x_parse_csi_link_pdata(struct max9x_common *common, struct max9x_csi_link_pdata *csi_link_pdata); +static int max9x_parse_subdev_pdata(struct max9x_common *common, struct max9x_subdev_pdata *subdev_pdata); + +static int max9x_select_i2c_chan(struct i2c_mux_core *muxc, u32 chan_id); +static int max9x_deselect_i2c_chan(struct i2c_mux_core *muxc, u32 chan_id); + +static int max9x_des_isolate_serial_link(struct max9x_common *common, unsigned int link_id); +static int max9x_des_deisolate_serial_link(struct max9x_common *common, unsigned int link_id); + +static ssize_t max9x_link_status_show(struct device *dev, struct device_attribute *attr, char *buf); + +static int max9x_setup_translations(struct max9x_common *common); +static int max9x_disable_translations(struct max9x_common *common); + +#define MAX9X_ALLOCATE_ELEMENTS(common, element_type, elements, num_elements) ({ \ + struct device *dev = common->dev; \ + int allocate_ret = 0; \ + (common)->num_elements = 0; \ + (common)->elements = NULL; \ + if ((common)->common_ops && (common)->common_ops->max_elements) { \ + (common)->num_elements = (common)->common_ops->max_elements((common), element_type); \ + if ((common)->num_elements > 0) { \ + (common)->elements = devm_kzalloc((common)->dev, \ + (common)->num_elements * sizeof(typeof(*((common)->elements))), GFP_KERNEL); \ + if (!(common)->elements) { \ + dev_err(dev, "Failed to allocated memory for " # element_type); \ + allocate_ret = -ENOMEM; \ + } \ + } \ + } \ + allocate_ret; \ +}) + +static struct max9x_pdata *pdata_ser(struct device *dev, struct max9x_subdev_pdata *sdinfo, const char *name, + unsigned int phys_addr, unsigned int virt_addr) +{ + struct max9x_pdata *pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); + + dev_info(dev, "ser %s phys %02x virt %02x\n", name, phys_addr, virt_addr); + + sdinfo->board_info.platform_data = pdata; + strscpy(sdinfo->board_info.type, name, I2C_NAME_SIZE); + sdinfo->board_info.addr = virt_addr; + sdinfo->phys_addr = pdata->phys_addr = phys_addr; + + return pdata; +} + +static struct max9x_pdata *pdata_sensor(struct device *dev, struct max9x_subdev_pdata *sdinfo, const char *name, + unsigned int phys_addr, unsigned int virt_addr) +{ + dev_info(dev, "sen %s phys %02x virt %02x\n", name, phys_addr, virt_addr); + + strscpy(sdinfo->board_info.type, name, I2C_NAME_SIZE); + sdinfo->board_info.addr = virt_addr; + sdinfo->phys_addr = phys_addr; + + return NULL; +} + +static struct max9x_pdata *parse_ser_pdata(struct device *dev, const char *ser_name, char *suffix, + unsigned int ser_nlanes, unsigned int phys_addr, + unsigned int virt_addr, struct max9x_subdev_pdata *ser_sdinfo, + unsigned int sensor_dt) +{ + struct max9x_pdata *ser_pdata = pdata_ser(dev, ser_sdinfo, ser_name, phys_addr, virt_addr); + struct max9x_serial_link_pdata *ser_serial_link; + struct max9x_video_pipe_pdata *ser_video_pipe; + + snprintf(ser_pdata->suffix, sizeof(ser_pdata->suffix), "%s", suffix); + + ser_pdata->num_serial_links = 1; + ser_pdata->serial_links = devm_kzalloc(dev, ser_pdata->num_serial_links * sizeof(*ser_pdata->serial_links), + GFP_KERNEL); + + ser_serial_link = &ser_pdata->serial_links[0]; + ser_serial_link->link_id = 0; + ser_serial_link->link_type = MAX9X_LINK_TYPE_GMSL2; + ser_serial_link->rx_freq_mhz = 6000; + ser_serial_link->tx_freq_mhz = 187; + + ser_pdata->num_video_pipes = 1; + ser_pdata->video_pipes = devm_kzalloc(dev, + ser_pdata->num_video_pipes * sizeof(*ser_pdata->video_pipes), GFP_KERNEL); + + ser_video_pipe = &ser_pdata->video_pipes[0]; + ser_video_pipe->serial_link_id = 0; + ser_video_pipe->pipe_id = ser_sdinfo->serial_link_id; + ser_video_pipe->src_csi_id = 1; /* PHY B typically */ + + ser_video_pipe->num_data_types = 1; + ser_video_pipe->data_types = devm_kzalloc(dev, + ser_video_pipe->num_data_types * sizeof(*ser_video_pipe->data_types), GFP_KERNEL); + ser_video_pipe->data_types[0] = sensor_dt; + + ser_pdata->num_csi_links = 1; + ser_pdata->csi_links = devm_kzalloc(dev, ser_pdata->num_csi_links * sizeof(*ser_pdata->csi_links), GFP_KERNEL); + + struct max9x_csi_link_pdata *csi_link = &ser_pdata->csi_links[0]; + + csi_link->link_id = 1; + csi_link->num_lanes = ser_nlanes; + + return ser_pdata; +} + +static void parse_sensor_pdata(struct device *dev, const char *sensor_name, char *suffix, unsigned int ser_nlanes, + unsigned int phys_addr, unsigned int virt_addr, struct max9x_subdev_pdata *ser_sdinfo, + struct max9x_pdata *ser_pdata) +{ + ser_pdata->num_subdevs = 1; + ser_pdata->subdevs = devm_kzalloc(dev, ser_pdata->num_subdevs * sizeof(*ser_pdata->subdevs), GFP_KERNEL); + pdata_sensor(dev, &ser_pdata->subdevs[0], sensor_name, phys_addr, virt_addr); + + /* NOTE: i2c_dev_set_name() will prepend "i2c-" to this name */ + char *dev_name = devm_kzalloc(dev, I2C_NAME_SIZE, GFP_KERNEL); + + snprintf(dev_name, I2C_NAME_SIZE, "%s %s", sensor_name, suffix); + ser_pdata->subdevs[0].board_info.dev_name = dev_name; + + struct isx031_platform_data *sen_pdata = devm_kzalloc(dev, sizeof(*sen_pdata), GFP_KERNEL); + + if (!sen_pdata) + return; + + ser_pdata->subdevs[0].board_info.platform_data = sen_pdata; + sen_pdata->lanes = ser_nlanes; + sen_pdata->irq_pin_flags = 1; //workaround for identify D3. + snprintf(sen_pdata->suffix, sizeof(sen_pdata->suffix), "%s", suffix); +} + +static void *parse_serdes_pdata(struct device *dev) +{ + /* + * Assumptions: + * - All ports have same model of sensor + * - Single-port usage will always be port 0, never port 1 + * - Serializer always uses serial link 0 + */ + struct serdes_platform_data *serdes_pdata = dev->platform_data; + unsigned int num_ports = serdes_pdata->subdev_num; + unsigned int csi_port = (serdes_pdata->des_port / 90); + struct max9x_pdata *des_pdata = devm_kzalloc(dev, sizeof(*des_pdata), GFP_KERNEL); + + snprintf(des_pdata->suffix, sizeof(des_pdata->suffix), "%c", serdes_pdata->suffix); + des_pdata->num_serial_links = num_ports; + des_pdata->serial_links = devm_kzalloc(dev, + des_pdata->num_serial_links * sizeof(*des_pdata->serial_links), GFP_KERNEL); + + des_pdata->num_subdevs = num_ports; + des_pdata->subdevs = devm_kzalloc(dev, des_pdata->num_subdevs * sizeof(*des_pdata->subdevs), GFP_KERNEL); + + des_pdata->num_video_pipes = num_ports; + des_pdata->video_pipes = devm_kzalloc(dev, + des_pdata->num_video_pipes * sizeof(*des_pdata->video_pipes), GFP_KERNEL); + + for (unsigned int serial_link_id = 0; serial_link_id < des_pdata->num_serial_links; serial_link_id++) { + struct max9x_serial_link_pdata *serial_link = &des_pdata->serial_links[serial_link_id]; + unsigned int video_pipe_id = serial_link_id; + struct serdes_subdev_info *serdes_sdinfo = &serdes_pdata->subdev_info[serial_link_id]; + struct max9x_video_pipe_pdata *des_video_pipe = &des_pdata->video_pipes[video_pipe_id]; + struct max9x_subdev_pdata *ser_sdinfo = &des_pdata->subdevs[serial_link_id]; + const char *ser_name = serdes_pdata->ser_name; + const char *sensor_name = serdes_sdinfo->board_info.type; + unsigned int ser_alias = serdes_sdinfo->ser_alias; + unsigned int sensor_alias = serdes_sdinfo->board_info.addr; + unsigned int ser_phys_addr = serdes_sdinfo->ser_phys_addr; + unsigned int sensor_phys_addr = serdes_sdinfo->phy_i2c_addr; + unsigned int lanes = serdes_pdata->ser_nlanes; + unsigned int dt = serdes_sdinfo->sensor_dt; + + serial_link->link_id = serial_link_id; + serial_link->link_type = MAX9X_LINK_TYPE_GMSL2; + serial_link->rx_freq_mhz = 6000; + serial_link->tx_freq_mhz = 187; + + des_video_pipe->serial_link_id = serial_link_id; + des_video_pipe->pipe_id = video_pipe_id; + des_video_pipe->src_pipe_id = video_pipe_id; + des_video_pipe->num_maps = 3; + des_video_pipe->maps = devm_kzalloc(dev, + des_video_pipe->num_maps * sizeof(*des_video_pipe->maps), GFP_KERNEL); + + ser_sdinfo->serial_link_id = serial_link_id; + + SET_CSI_MAP(des_video_pipe->maps, 0, 0, 0x00, video_pipe_id, 0x00, csi_port); + SET_CSI_MAP(des_video_pipe->maps, 1, 0, 0x01, video_pipe_id, 0x01, csi_port); + SET_CSI_MAP(des_video_pipe->maps, 2, 0, dt, video_pipe_id, dt, csi_port); /* YUV422 8-bit */ + + struct max9x_pdata *ser_pdata = parse_ser_pdata(dev, ser_name, serdes_sdinfo->suffix, lanes, + ser_phys_addr, ser_alias, ser_sdinfo, dt); + + parse_sensor_pdata(dev, sensor_name, serdes_sdinfo->suffix, lanes, sensor_phys_addr, sensor_alias, + ser_sdinfo, ser_pdata); + + } + + des_pdata->num_csi_links = 1; + des_pdata->csi_links = devm_kzalloc(dev, des_pdata->num_csi_links * sizeof(*des_pdata->csi_links), GFP_KERNEL); + + do { + struct max9x_csi_link_pdata *csi_link = &des_pdata->csi_links[0]; + + csi_link->link_id = csi_port; + csi_link->bus_type = serdes_pdata->bus_type; + csi_link->num_lanes = serdes_pdata->deser_nlanes; + csi_link->tx_rate_mbps = 2000; + csi_link->auto_initial_deskew = true; + csi_link->initial_deskew_width = 7; + csi_link->auto_start = false; + csi_link->num_maps = 2; + csi_link->maps = devm_kzalloc(dev, csi_link->num_maps * sizeof(*csi_link->maps), GFP_KERNEL); + if (csi_port == 1) { + SET_PHY_MAP(csi_link->maps, 0, 0, 1, 0); /* 0 (DA0) -> PHY1.0 */ + SET_PHY_MAP(csi_link->maps, 1, 1, 1, 1); /* 1 (DA1) -> PHY1.1 */ + } else if (csi_port == 2) { + SET_PHY_MAP(csi_link->maps, 0, 2, 2, 0); /* 0 (DA0) -> PHY2.0 */ + SET_PHY_MAP(csi_link->maps, 1, 2, 2, 1); /* 1 (DA1) -> PHY2.1 */ + } + } while (0); + + return des_pdata; +} + +/* TODO: remap not hardcode according to pdata */ +static int max9x_remap_serializers_resume(struct max9x_common *common, unsigned int link_id) +{ + int ret; + struct max9x_serdes_serial_link *serial_link = &common->serial_link[link_id]; + unsigned int phys_addr, virt_addr; + struct i2c_client *virt_client; + struct regmap *virt_map; + unsigned int val; + const struct regmap_config regmap_config = { + .reg_bits = 16, + .val_bits = 8, + }; + + if (!serial_link->remote.pdata) + return 0; + + ret = max9x_select_i2c_chan(common->muxc, link_id); + if (ret) + return ret; + + ret = max9x_des_isolate_serial_link(common, link_id); + if (ret) + return ret; + + phys_addr = serial_link->remote.pdata->phys_addr; + virt_addr = serial_link->remote.pdata->board_info.addr; + if (phys_addr == virt_addr) + return 0; + + dev_info(common->dev, "Remap serializer from 0x%02x to 0x%02x", phys_addr, virt_addr); + + virt_client = i2c_new_dummy_device(common->client->adapter, virt_addr); + if (IS_ERR_OR_NULL(virt_client)) + goto err_regmap; + + virt_map = regmap_init_i2c(virt_client, ®map_config); + if (IS_ERR_OR_NULL(virt_map)) + goto err_virt_client; + + usleep_range(1000, 1050); + + ret = regmap_read(virt_map, MAX9X_DEV_ID, &val); + if (ret) + dev_err(common->dev, "Device not present after remap to 0x%02x", virt_addr); + else + dev_info(common->dev, "DEV_ID after: 0x%02x", val); + + regmap_exit(virt_map); + +err_virt_client: + i2c_unregister_device(virt_client); + +err_regmap: + max9x_deselect_i2c_chan(common->muxc, link_id); + max9x_des_deisolate_serial_link(common, link_id); + + return ret; +} + +static int max9x_create_adapters_resume(struct max9x_common *common) +{ + struct device *dev = common->dev; + unsigned int link_id; + const unsigned int RETRY_MS_MIN = 32; + const unsigned int RETRY_MS_MAX = 512; + unsigned int ms; + int err = 0; + + for (link_id = 0; link_id < common->num_serial_links; link_id++) { + dev_info(dev, "Serial-link %d: %senabled", + link_id, (common->serial_link[link_id].enabled ? "" : "not ")); + + if (!common->serial_link[link_id].enabled) + continue; + + /* This exponential retry works around a current problem in the locking code. */ + for (ms = RETRY_MS_MIN; ms <= RETRY_MS_MAX; ms <<= 1) { + err = max9x_enable_serial_link(common, link_id); + if (!err) + break; + + dev_warn(dev, + "enable link %d failed, trying again (waiting %d ms)", + link_id, ms); + msleep(ms); + } + if (ms > RETRY_MS_MAX) { + dev_err(dev, "failed to enable link %d after multiple retries", + link_id); + max9x_disable_serial_link(common, link_id); + common->serial_link[link_id].enabled = false; + } + + if (common->type == MAX9X_DESERIALIZER) { + err = max9x_remap_serializers_resume(common, link_id); + if (err) { + dev_err(dev, "failed to remap serializers on link %d", link_id); + max9x_disable_serial_link(common, link_id); + common->serial_link[link_id].enabled = false; + } + } + } + + for (link_id = 0; link_id < common->num_serial_links; link_id++) + max9x_setup_translations(common); + + return 0; +} + +int max9x_common_resume(struct max9x_common *common) +{ + struct max9x_common *des_common = NULL; + struct device *dev = common->dev; + u32 des_link; + u32 phys_addr, virt_addr; + int ret; + + const struct regmap_config regmap_config = { + .reg_bits = 16, + .val_bits = 8, + }; + + if (dev->platform_data) { + struct max9x_pdata *pdata = dev->platform_data; + + virt_addr = common->client->addr; + phys_addr = pdata->phys_addr ? pdata->phys_addr : virt_addr; + + if (common->type == MAX9X_SERIALIZER) { + WARN_ON(pdata->num_serial_links < 1); + + des_common = max9x_client_to_common(pdata->serial_links[0].des_client); + if (des_common) + des_link = pdata->serial_links[0].des_link_id; + } + } + + if (common->type == MAX9X_SERIALIZER && des_common) { + /* + * Isolate this link until after reset and potential address remapping, + * avoiding a race condition with two serializers resetting at the same time + */ + ret = max9x_des_isolate_serial_link(des_common, des_link); + if (ret) + goto enable_err; + } + + dev_info(dev, "create phys dummy device"); + + if (phys_addr != virt_addr) { + common->phys_client = i2c_new_dummy_device(common->client->adapter, phys_addr); + if (IS_ERR_OR_NULL(common->phys_client)) { + dev_err(dev, "Failed to create dummy device for phys_addr"); + ret = PTR_ERR(common->phys_client); + goto enable_err; + } + + common->phys_map = regmap_init_i2c(common->phys_client, ®map_config); + if (IS_ERR_OR_NULL(common->phys_map)) { + dev_err(dev, "Failed to create dummy device regmap for phys_addr"); + ret = PTR_ERR(common->phys_client); + goto enable_err; + } + } + + dev_info(dev, "Enable"); + + ret = max9x_enable(common); + if (ret) + dev_err(dev, "Failed to enable"); + +enable_err: + if (common->phys_map) { + regmap_exit(common->phys_map); + common->phys_map = NULL; + } + + if (common->phys_client) { + i2c_unregister_device(common->phys_client); + common->phys_client = NULL; + } + + if (common->type == MAX9X_SERIALIZER && des_common) { + max9x_des_deisolate_serial_link(des_common, des_link); + } + + ret = max9x_create_adapters_resume(common); + if (ret) + goto err_enable; + + return 0; + +err_enable: + max9x_disable(common); + + return ret; +} +EXPORT_SYMBOL(max9x_common_resume); + +int max9x_common_suspend(struct max9x_common *common) +{ + unsigned int link_id; + + dev_info(common->dev, "try to suspend"); + + max9x_disable_translations(common); + + for (link_id = 0; link_id < common->num_serial_links; link_id++) + max9x_disable_serial_link(common, link_id); + + max9x_disable(common); + + return 0; +} +EXPORT_SYMBOL(max9x_common_suspend); + +int max9x_common_init_i2c_client(struct max9x_common *common, + struct i2c_client *client, + const struct regmap_config *regmap_config, + struct max9x_common_ops *common_ops, + struct max9x_serial_link_ops *serial_link_ops, + struct max9x_csi_link_ops *csi_link_ops, + struct max9x_line_fault_ops *lf_ops) +{ + struct device *dev = &client->dev; + struct i2c_adapter *adap = to_i2c_adapter(dev->parent); + struct max9x_pdata *pdata = NULL; + u32 phys_addr, virt_addr; + int ret; + + common->dev = dev; + common->client = client; + + /* If no GPIO is found this will return NULL, and will not error */ + common->reset_gpio = devm_gpiod_get_optional(dev, MAX9X_RESET_GPIO_NAME, GPIOD_OUT_HIGH); + if (IS_ERR(common->reset_gpio)) { + dev_err(dev, "gpiod_get failed with error: %ld", PTR_ERR(common->reset_gpio)); + return PTR_ERR(common->reset_gpio); + } + + common->vdd_regulator = devm_regulator_get_optional(dev, MAX9X_VDD_REGULATOR_NAME); + if (IS_ERR_OR_NULL(common->vdd_regulator)) + dev_info(dev, "Missing VDD regulator"); + + common->map = devm_regmap_init_i2c(client, regmap_config); + if (IS_ERR_OR_NULL(common->map)) { + dev_err(dev, "Failed to create regmap."); + return PTR_ERR(common->map); + } + + if (dev->platform_data) { + pdata = dev->platform_data; + /* + * for serializer, the i2c phys_addr supposed to be updated as virt_addr + * when deserializer do max9x_remap_serializer, + * check here to avoid failure when directly read chipid from virt_addr + */ + virt_addr = common->client->addr; + phys_addr = pdata->phys_addr ? pdata->phys_addr : virt_addr; + + if (phys_addr != virt_addr) { + common->phys_client = i2c_new_dummy_device(common->client->adapter, phys_addr); + if (IS_ERR_OR_NULL(common->phys_client)) { + dev_err(dev, "Failed to create dummy device for phys_addr"); + ret = PTR_ERR(common->phys_client); + goto enable_err; + } + + common->phys_map = regmap_init_i2c(common->phys_client, regmap_config); + if (IS_ERR_OR_NULL(common->phys_map)) { + dev_err(dev, "Failed to create dummy device regmap for phys_addr"); + ret = PTR_ERR(common->phys_client); + goto enable_err; + } + } + } + + ret = max9x_verify_devid(common); + if (ret) + return ret; + + ret = common->des->get_max9x_ops(common->des->chip_type, &(common->common_ops), + &(common->serial_link_ops), &(common->csi_link_ops), &(common->line_fault_ops), + &(common->translation_ops)); + if (ret) { + dev_err(dev, "Failed to get ops."); + return ret; + } + + mutex_init(&common->link_mutex); + mutex_init(&common->isolate_mutex); + common->isolated_link = -1; + common->selected_link = -1; + + ret = MAX9X_ALLOCATE_ELEMENTS(common, MAX9X_CSI_LINK, csi_link, num_csi_links); + if (ret) + return ret; + + ret = MAX9X_ALLOCATE_ELEMENTS(common, MAX9X_VIDEO_PIPE, video_pipe, num_video_pipes); + if (ret) + return ret; + + ret = MAX9X_ALLOCATE_ELEMENTS(common, MAX9X_SERIAL_LINK, serial_link, num_serial_links); + if (ret) + return ret; + + ret = MAX9X_ALLOCATE_ELEMENTS(common, MAX9X_LINE_FAULT, line_fault, num_line_faults); + if (ret) + return ret; + + common->muxc = i2c_mux_alloc( + adap, dev, + common->num_serial_links, + 0, + I2C_MUX_LOCKED, + max9x_select_i2c_chan, + max9x_deselect_i2c_chan); + if (IS_ERR_OR_NULL(common->muxc)) { + dev_err(dev, "Failed to allocate mux core"); + return -ENOMEM; + } + common->muxc->priv = common; + + if (common->type == MAX9X_DESERIALIZER) + dev->platform_data = parse_serdes_pdata(dev); + + if (dev->platform_data) { + pdata = dev->platform_data; + + dev_dbg(dev, "Parse pdata"); + + ret = max9x_parse_pdata(common, pdata); + if (ret) + return ret; + } + + dev_dbg(dev, "Enable"); + + ret = max9x_enable(common); + if (ret) { + dev_err(dev, "Failed to enable"); + goto enable_err; + } + +enable_err: + if (common->phys_map) { + regmap_exit(common->phys_map); + common->phys_map = NULL; + } + + if (common->phys_client) { + i2c_unregister_device(common->phys_client); + common->phys_client = NULL; + } + + if (ret) + return ret; + + dev_dbg(dev, "Enable gpio"); + ret = max9x_setup_gpio(common); + if (ret) + goto err_enable; + + dev_dbg(dev, "Enable line faults"); + + ret = max9x_enable_line_faults(common); + if (ret) { + dev_err(dev, "Failed to enable line faults"); + goto err_enable; + } + + dev_dbg(dev, "Enable links"); + + ret = max9x_create_adapters(common); + if (ret) + goto err_enable; + + ret = max9x_register_v4l_subdev(common); + if (ret) + goto err_adapters; + + return 0; + +err_adapters: + i2c_mux_del_adapters(common->muxc); + +err_enable: + max9x_disable(common); + + return ret; +} +EXPORT_SYMBOL(max9x_common_init_i2c_client); + +void max9x_destroy(struct max9x_common *common) +{ + unsigned int link_id; + unsigned int line; + + dev_dbg(common->dev, "Destroy"); + + max9x_disable_translations(common); + + for (link_id = 0; link_id < common->num_serial_links; link_id++) { + max9x_disable_serial_link(common, link_id); + max9x_sysfs_destroy_get_link(common, link_id); + } + + for (line = 0; line < common->num_line_faults; line++) + max9x_sysfs_destroy_line_fault_status(common, line); + + max9x_disable(common); + + /* unregister devices? */ + + v4l2_async_unregister_subdev(&common->v4l.sd); + media_entity_cleanup(&common->v4l.sd.entity); + + i2c_mux_del_adapters(common->muxc); + mutex_destroy(&common->link_mutex); + mutex_destroy(&common->isolate_mutex); +} +EXPORT_SYMBOL(max9x_destroy); + +/* + * max9x_serdes_mhz_to_rate()- Lookup register value for given frequency + * + * Return: Positive value on success, negative error on failure + */ +int max9x_serdes_mhz_to_rate(struct max9x_serdes_rate_table *table, + int len, unsigned int freq_mhz) +{ + int i; + + for (i = 0; i < len; i++) + if (table[i].freq_mhz == freq_mhz) + return table[i].val; + + WARN_ONCE(true, "Invalid GMSL frequency: %d MHz", freq_mhz); + + return -EINVAL; +} +EXPORT_SYMBOL(max9x_serdes_mhz_to_rate); + +struct max9x_common *max9x_phandle_to_common(struct device_node *node, const char *name) +{ + struct device_node *des_node = of_parse_phandle(node, name, 0); + struct i2c_client *c = of_find_i2c_device_by_node(des_node); + + if (!c) + return NULL; + + return max9x_client_to_common(c); +} +EXPORT_SYMBOL(max9x_phandle_to_common); + +struct max9x_common *max9x_sd_to_common(struct v4l2_subdev *sd) +{ + if (!sd) + return NULL; + + struct max9x_serdes_v4l *v4l = container_of(sd, struct max9x_serdes_v4l, sd); + + return container_of(v4l, struct max9x_common, v4l); +} +EXPORT_SYMBOL(max9x_sd_to_common); + +struct max9x_common *max9x_client_to_common(struct i2c_client *client) +{ + if (!client) + return NULL; + + return max9x_sd_to_common(i2c_get_clientdata(client)); +} +EXPORT_SYMBOL(max9x_client_to_common); + + +int max9x_soft_reset(struct max9x_common *common) +{ + if (common->common_ops && common->common_ops->soft_reset) + return common->common_ops->soft_reset(common); + + return 0; +} + +int max9x_remap_addr(struct max9x_common *common) +{ + if (common->common_ops && common->common_ops->remap_addr) + return common->common_ops->remap_addr(common); + + return 0; +} + +int max9x_setup_gpio(struct max9x_common *common) +{ + if (common->common_ops && common->common_ops->remap_addr) + return common->common_ops->setup_gpio(common); + + return 0; +} + +/* + * max9x_enable() - Enable reset and power to des. + */ +int max9x_enable(struct max9x_common *common) +{ + struct device *dev = common->dev; + unsigned int phys_addr, virt_addr; + int ret; + + if (common->regulator_enabled) + return 0; + + virt_addr = common->client->addr; + phys_addr = (common->phys_client ? common->phys_client->addr : virt_addr); + + if (!IS_ERR_OR_NULL(common->vdd_regulator)) { + ret = regulator_enable(common->vdd_regulator); + if (ret) { + dev_err(dev, "Failed to enable %s", MAX9X_VDD_REGULATOR_NAME); + return ret; + } + common->regulator_enabled = true; + } + + /* Ensure device is reset */ + if (!IS_ERR_OR_NULL(common->reset_gpio)) { + gpiod_set_value_cansleep(common->reset_gpio, 1); + usleep_range(1000, 1050); + gpiod_set_value_cansleep(common->reset_gpio, 0); + } else { + /* No reset_gpio, device requires soft-reset */ + if (phys_addr == virt_addr) { + /* No remapping necessary */ + ret = max9x_soft_reset(common); + if (ret) + goto err; + } else if (phys_addr != virt_addr) { + /* Try virt address first */ + ret = max9x_soft_reset(common); + if (ret) { + /* Nope? Try phys address next */ + struct regmap *virt_map = common->map; + + common->map = common->phys_map; + ret = max9x_soft_reset(common); + common->map = virt_map; + if (ret) + goto err; + } + } + } + + dev_info(dev, "sleep for startup after soft reset"); + usleep_range(100000, 100050); + + if (phys_addr != virt_addr) { + /* Device is now reset, but requires remap */ + ret = max9x_remap_addr(common); + if (ret) + goto err; + } + + if (common->common_ops && common->common_ops->enable) { + ret = common->common_ops->enable(common); + if (ret) + goto err; + } + + return 0; + +err: + if (!IS_ERR_OR_NULL(common->reset_gpio)) + gpiod_set_value_cansleep(common->reset_gpio, 1); + + if (common->regulator_enabled && !IS_ERR_OR_NULL(common->vdd_regulator)) + regulator_disable(common->vdd_regulator); + + common->regulator_enabled = false; + + return ret; +} + +/* + * max9x_disable() - Disable reset and power to des. + */ +int max9x_disable(struct max9x_common *common) +{ + struct device *dev = common->dev; + int ret; + + ret = max9x_disable_line_faults(common); + if (ret) { + dev_err(dev, "Failed to disable line faults"); + return ret; + } + + if (common->regulator_enabled) { + if (common->common_ops && common->common_ops->disable) + return common->common_ops->disable(common); + + common->regulator_enabled = false; + + if (!IS_ERR_OR_NULL(common->reset_gpio)) + gpiod_set_value_cansleep(common->reset_gpio, 1); + + if (!IS_ERR_OR_NULL(common->vdd_regulator)) { + ret = regulator_disable(common->vdd_regulator); + if (ret) { + dev_err(dev, "Failed to disable %s", MAX9X_VDD_REGULATOR_NAME); + return ret; + } + } + } + return 0; +} + +static int max9x_get_chip_type(unsigned int dev_id) +{ + unsigned int des_num = sizeof(max9x_chips) / sizeof(struct max9x_desc); + unsigned int i = 0; + + for (i = 0; i < des_num; i++) { + if (dev_id == max9x_chips[i].dev_id) + return i; + } + + return -1; +} + +int max9x_verify_devid(struct max9x_common *common) +{ + struct device *dev = common->dev; + struct regmap *map = common->map; + struct regmap *phys_map = common->phys_map; + unsigned int dev_id, dev_rev, chip_type; + int ret; + + /* + * Fetch and output chip name + revision + * try both virtual address and physical address + */ + ret = regmap_read(map, MAX9X_DEV_ID, &dev_id); + if (ret) { + dev_warn(dev, "Failed to read chip ID from virtual address"); + if (phys_map) { + ret = regmap_read(phys_map, MAX9X_DEV_ID, &dev_id); + if (ret) { + dev_err(dev, "Failed to read chip ID from phys address"); + return ret; + } + } else + return ret; + } + + chip_type = max9x_get_chip_type(dev_id); + if (chip_type == -1) { + dev_warn(dev, "Unknown chip ID 0x%x", dev_id); + return -1; + } + common->des = &max9x_chips[chip_type]; + common->type = common->des->serdes_type; + TRY(ret, regmap_read(map, common->des->rev_reg, &dev_rev)); + dev_rev = FIELD_GET(MAX9X_DEV_REV_FIELD, dev_rev); + + dev_info(dev, "Detected MAX9x chip ID 0x%x revision 0x%x", dev_id, dev_rev); + return 0; +} + +/* TODO: remap not hardcode according to pdata */ +int max9x_remap_serializers(struct max9x_common *common, unsigned int link_id) +{ + int ret; + struct max9x_serdes_serial_link *serial_link = &common->serial_link[link_id]; + unsigned int phys_addr, virt_addr; + struct i2c_client *phys_client, *virt_client; + struct regmap *phys_map, *virt_map; + unsigned int val; + const struct regmap_config regmap_config = { + .reg_bits = 16, + .val_bits = 8, + }; + + if (!serial_link->remote.pdata) + return 0; + + ret = max9x_select_i2c_chan(common->muxc, link_id); + if (ret) + return ret; + + ret = max9x_des_isolate_serial_link(common, link_id); + if (ret) + return ret; + + phys_addr = serial_link->remote.pdata->phys_addr; + virt_addr = serial_link->remote.pdata->board_info.addr; + if (phys_addr == virt_addr) + return 0; + + dev_info(common->dev, "Remap serializer from 0x%02x to 0x%02x", phys_addr, virt_addr); + + phys_client = i2c_new_dummy_device(common->client->adapter, phys_addr); + if (IS_ERR_OR_NULL(phys_client)) { + dev_err(common->dev, "Failed to create dummy client for phys_addr"); + return PTR_ERR(phys_client); + } + + phys_map = regmap_init_i2c(phys_client, ®map_config); + if (IS_ERR_OR_NULL(phys_map)) + goto err_client; + + virt_client = i2c_new_dummy_device(common->client->adapter, virt_addr); + if (IS_ERR_OR_NULL(virt_client)) + goto err_regmap; + + virt_map = regmap_init_i2c(virt_client, ®map_config); + if (IS_ERR_OR_NULL(virt_map)) + goto err_virt_client; + + ret = regmap_read(virt_map, MAX9X_DEV_ID, &val); + if (!ret) { + dev_info(common->dev, "Remap not necessary"); + ret = 0; + goto err_virt_regmap; + } + + ret = regmap_read(phys_map, MAX9X_DEV_ID, &val); + if (ret) { + dev_err(common->dev, "Device not present at 0x%02x", phys_addr); + goto err_virt_regmap; + } else { + dev_info(common->dev, "DEV_ID before: 0x%02x", val); + } + + ret = regmap_write(phys_map, 0x00, (virt_addr & 0x7f) << 1); + if (ret) { + dev_err(common->dev, "Failed to remap serialzier from 0x%02x to 0x%02x (%d)", + phys_addr, virt_addr, ret); + goto err_virt_regmap; + } + + usleep_range(1000, 1050); + + ret = regmap_read(virt_map, MAX9X_DEV_ID, &val); + if (ret) { + dev_err(common->dev, "Device not present after remap to 0x%02x", virt_addr); + goto err_virt_regmap; + } else { + dev_info(common->dev, "DEV_ID after: 0x%02x", val); + } + +err_virt_regmap: + regmap_exit(virt_map); +err_virt_client: + i2c_unregister_device(virt_client); + +err_regmap: + regmap_exit(phys_map); +err_client: + i2c_unregister_device(phys_client); + + max9x_deselect_i2c_chan(common->muxc, link_id); + + max9x_des_deisolate_serial_link(common, link_id); + + return ret; +} + +int max9x_create_adapters(struct max9x_common *common) +{ + struct device *dev = common->dev; + unsigned int link_id; + const unsigned int RETRY_MS_MIN = 32; + const unsigned int RETRY_MS_MAX = 512; + unsigned int ms; + int err = 0; + + for (link_id = 0; link_id < common->num_serial_links; link_id++) { + err = max9x_sysfs_create_get_link(common, link_id); + if (err) { + dev_err(dev, "failed to create sysfs lock status file for link %d", + link_id); + continue; + } + + dev_info(dev, "Serial-link %d: %senabled", + link_id, (common->serial_link[link_id].enabled ? "" : "not ")); + + if (!common->serial_link[link_id].enabled) + continue; + + /* This exponential retry works around a current problem in the locking code. */ + for (ms = RETRY_MS_MIN; ms <= RETRY_MS_MAX; ms <<= 1) { + err = max9x_enable_serial_link(common, link_id); + if (!err) + break; + + dev_warn(dev, + "enable link %d failed, trying again (waiting %d ms)", + link_id, ms); + msleep(ms); + } + if (ms > RETRY_MS_MAX) { + dev_err(dev, "failed to enable link %d after multiple retries", + link_id); + goto err_disable; + } + + if (common->type == MAX9X_DESERIALIZER) { + err = max9x_remap_serializers(common, link_id); + if (err) { + dev_err(dev, "failed to remap serializers on link %d", link_id); + goto err_disable; + } + } + + continue; +err_disable: + max9x_disable_serial_link(common, link_id); + common->serial_link[link_id].enabled = false; + } + + for (link_id = 0; link_id < common->num_serial_links; link_id++) { + max9x_setup_translations(common); + + err = i2c_mux_add_adapter(common->muxc, 0, link_id); + if (err) { + dev_err(dev, "failed to add adapter for link %d", + link_id); + max9x_disable_serial_link(common, link_id); + continue; + } + } + + return 0; +} + +static void max9x_des_s_csi_link(struct max9x_common *common, + unsigned int serial_link_id, int enable) +{ + unsigned int video_pipe_id; + int err = 0; + + for (video_pipe_id = 0; video_pipe_id < common->num_video_pipes; + video_pipe_id++) { + struct max9x_serdes_video_pipe *video_pipe = + &common->video_pipe[video_pipe_id]; + unsigned int map_id; + + if (!video_pipe->enabled) + continue; + + if (video_pipe->config.src_link != serial_link_id) + continue; + + for (map_id = 0; map_id < video_pipe->config.num_maps; + map_id++) { + unsigned int csi_link_id = + video_pipe->config.map[map_id].dst_csi; + + if (common->csi_link[csi_link_id].config.auto_start) + continue; /* Already started at probe */ + + if (enable) { + if (common->csi_link_ops->enable) { + err = common->csi_link_ops->enable( + common, csi_link_id); + if (err) + dev_warn( + common->dev, + "csi_link_ops->enable CSI %d failed: %d", + csi_link_id, err); + } + } else { + if (common->csi_link_ops->disable) { + err = common->csi_link_ops->disable( + common, csi_link_id); + if (err) + dev_warn( + common->dev, + "csi_link_ops->disable CSI %d failed: %d", + csi_link_id, err); + } + } + } + } +} + +static int _max9x_s_remote_stream(struct max9x_common *common, u32 sink_pad, + u32 sink_stream, int enable) +{ + struct media_pad *remote_pad; + struct v4l2_subdev *remote_sd; + int ret = 0; + + if (sink_pad < 0 || sink_pad >= common->v4l.num_pads) + return -EINVAL; + + remote_pad = media_pad_remote_pad_first(&common->v4l.pads[sink_pad]); + if (IS_ERR_OR_NULL(remote_pad)) { + dev_err(common->dev, "Failed to find remote pad for %s %u", + common->v4l.sd.entity.name, sink_pad); + return IS_ERR(remote_pad) ? PTR_ERR(remote_pad) : -ENODEV; + } + remote_sd = media_entity_to_v4l2_subdev(remote_pad->entity); + if (!remote_sd) { + dev_err(common->dev, "Failed to resolve entity %s to subdev", + remote_pad->entity->name); + return -ENODEV; + } + + if (common->type == MAX9X_DESERIALIZER) { + ret = enable ? v4l2_subdev_enable_streams(remote_sd, + remote_pad->index, + BIT(sink_stream)) : + v4l2_subdev_disable_streams(remote_sd, + remote_pad->index, + BIT(sink_stream)); + + } else { + ret = v4l2_subdev_call(remote_sd, video, s_stream, enable); + } + + if (ret) { + dev_err(common->dev, "Failed to %s stream %s %u:%u", + enable ? "enable" : "disable", remote_sd->entity.name, + remote_pad->index, sink_stream); + return ret; + } + + return ret; +} + +static int _max9x_des_set_stream(struct max9x_common *common, u32 sink_pad, + u32 sink_stream, int enable) +{ + u32 rxport; + int ret = 0; + + if (sink_pad < 0 || sink_pad >= common->v4l.num_pads) + return -EINVAL; + + rxport = sink_pad - common->num_csi_links; + if (rxport < 0 || rxport >= common->num_serial_links) { + dev_err(common->dev, "Failed to get rxport for pad: %u!", + sink_pad); + return -EINVAL; + } + if (enable) + max9x_des_s_csi_link(common, rxport, enable); + + ret = _max9x_s_remote_stream(common, sink_pad, sink_stream, enable); + if (ret) { + dev_err(common->dev, + "Failed to %s remote stream for sink %s %u:%u", + enable ? "enable" : "disable", + common->v4l.sd.entity.name, sink_pad, sink_stream); + return ret; + } + if (!enable) + max9x_des_s_csi_link(common, rxport, enable); + + return 0; +} + +static int _max9x_ser_set_stream(struct max9x_common *common, u32 sink_pad, + u32 sink_stream, int enable) +{ + return _max9x_s_remote_stream(common, sink_pad, sink_stream, enable); +} + +static int max9x_streams_mask_to_stream(u64 streams_mask, u32 *stream) +{ + int ret = -EINVAL; + + for (int i = 0; i < 64; i++) { + if (streams_mask & BIT(i)) { + *stream = i; + ret = 0; + break; + } + } + return ret; +} + +static int max9x_get_corresponding_pad_and_stream(struct v4l2_subdev_state *state, u32 pad, + u32 stream, u32 *another_pad, + u32 *another_stream) +{ + struct v4l2_subdev_route *route; + + for_each_active_route(&state->routing, route) { + if (route->sink_pad == pad && route->sink_stream == stream) { + *another_pad = route->source_pad; + *another_stream = route->source_stream; + return 0; + } else if (route->source_pad == pad && + route->source_stream == stream) { + *another_pad = route->sink_pad; + *another_stream = route->sink_stream; + return 0; + } + } + + return -EINVAL; +} + +static int _max9x_set_stream(struct v4l2_subdev *subdev, + struct v4l2_subdev_state *state, u32 pad, + u64 streams_mask, int enable) +{ + struct max9x_common *common = max9x_sd_to_common(subdev); + u32 sink_pad; + u32 sink_stream; + u32 source_stream; + int ret = 0; + + ret = max9x_streams_mask_to_stream(streams_mask, &source_stream); + if (ret) { + dev_err(common->dev, "Failed to get source stream!"); + return ret; + } + + ret = max9x_get_corresponding_pad_and_stream(state, pad, source_stream, + &sink_pad, &sink_stream); + if (ret) { + dev_err(common->dev, + "Failed to get sink pad and sink stream for %s %u:%u", + common->v4l.sd.entity.name, pad, source_stream); + return -EINVAL; + } + + if (common->type == MAX9X_DESERIALIZER) + ret = _max9x_des_set_stream(common, sink_pad, sink_stream, + enable); + else + ret = _max9x_ser_set_stream(common, sink_pad, sink_stream, + enable); + + return ret; +} + +static int max9x_enable_streams(struct v4l2_subdev *subdev, + struct v4l2_subdev_state *state, + u32 pad, u64 streams_mask) +{ + return _max9x_set_stream(subdev, state, pad, streams_mask, true); +} + +static int max9x_disable_streams(struct v4l2_subdev *subdev, + struct v4l2_subdev_state *state, + u32 pad, u64 streams_mask) +{ + return _max9x_set_stream(subdev, state, pad, streams_mask, false); +} + +static struct v4l2_mbus_framefmt *__max9x_get_ffmt(struct v4l2_subdev *sd, + struct v4l2_subdev_state *v4l2_state, + struct v4l2_subdev_format *fmt) +{ + struct max9x_common *common = max9x_sd_to_common(sd); + + if (IS_ERR_OR_NULL(fmt)) { + dev_err(common->dev, "Invalid fmt %p", fmt); + return ERR_PTR(-EINVAL); + } + + if (fmt->pad < 0 || fmt->pad >= common->v4l.num_pads) { + dev_err(sd->dev, "%s invalid pad %d", __func__, fmt->pad); + return ERR_PTR(-EINVAL); + } + + if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) + return v4l2_subdev_state_get_format(v4l2_state, fmt->pad, fmt->stream); + + if (fmt->pad >= 0 && fmt->pad < common->v4l.num_pads) + return &common->v4l.ffmts[fmt->pad]; + + return ERR_PTR(-EINVAL); +} + +static int max9x_get_fmt(struct v4l2_subdev *sd, + struct v4l2_subdev_state *v4l2_state, + struct v4l2_subdev_format *fmt) +{ + struct max9x_common *common = max9x_sd_to_common(sd); + struct max9x_serdes_v4l *v4l = &common->v4l; + struct v4l2_mbus_framefmt *ffmt; + + mutex_lock(&v4l->lock); + + ffmt = __max9x_get_ffmt(sd, v4l2_state, fmt); + if (IS_ERR_OR_NULL(ffmt)) { + mutex_unlock(&v4l->lock); + return -EINVAL; + } + + fmt->format = *ffmt; + mutex_unlock(&v4l->lock); + + dev_info(sd->dev, "framefmt: width: %d, height: %d, code: 0x%x.", + fmt->format.width, fmt->format.height, fmt->format.code); + + return 0; +} + +static int max9x_set_fmt(struct v4l2_subdev *sd, + struct v4l2_subdev_state *v4l2_state, + struct v4l2_subdev_format *fmt) +{ + struct max9x_common *common = max9x_sd_to_common(sd); + struct max9x_serdes_v4l *v4l = &common->v4l; + struct v4l2_mbus_framefmt *ffmt; + + mutex_lock(&v4l->lock); + + ffmt = __max9x_get_ffmt(sd, v4l2_state, fmt); + if (IS_ERR_OR_NULL(ffmt)) { + mutex_unlock(&v4l->lock); + return -EINVAL; + } + + if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) { + ffmt->width = fmt->format.width; + ffmt->height = fmt->format.height; + ffmt->code = fmt->format.code; + } + fmt->format = *ffmt; + + mutex_unlock(&v4l->lock); + + return 0; +} + +static int max9x_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad, + struct v4l2_mbus_frame_desc *desc) +{ + struct max9x_common *common = max9x_sd_to_common(sd); + int ret = -EINVAL; + + if (((common->type != MAX9X_DESERIALIZER) && + (common->type != MAX9X_SERIALIZER)) || + pad < 0 || pad >= common->v4l.num_pads) + return -EINVAL; + + desc->type = V4L2_MBUS_FRAME_DESC_TYPE_CSI2; + + struct v4l2_subdev_state *state = + v4l2_subdev_lock_and_get_active_state(sd); + struct v4l2_subdev_route *route; + + for_each_active_route(&state->routing, route) { + if (route->source_pad != pad) + continue; + if (route->sink_pad >= common->v4l.num_pads) { + ret = -EINVAL; + dev_err(common->dev, "Found invalid route sink_pad!"); + goto out_unlock; + } + + struct media_pad *remote_pad = media_pad_remote_pad_first( + &common->v4l.pads[route->sink_pad]); + struct v4l2_mbus_frame_desc source_desc; + + ret = v4l2_subdev_call( + media_entity_to_v4l2_subdev(remote_pad->entity), pad, + get_frame_desc, remote_pad->index, &source_desc); + if (ret) { + dev_err(common->dev, + "Failed to get sink pad %u remote frame desc!", + route->sink_pad); + goto out_unlock; + } + + struct v4l2_mbus_frame_desc_entry *source_desc_entry; + + for (int i = 0; i < source_desc.num_entries; i++) { + if (source_desc.entry[i].stream == route->sink_stream) { + source_desc_entry = &source_desc.entry[i]; + break; + } + } + if (!source_desc_entry) { + ret = -EPIPE; + dev_err(common->dev, + "Failed to get source desc entry for pad: %u, stream: %u", + route->sink_pad, route->sink_stream); + goto out_unlock; + } + desc->entry[desc->num_entries].flags = source_desc_entry->flags; + desc->entry[desc->num_entries].stream = route->source_stream; + desc->entry[desc->num_entries].pixelcode = + source_desc_entry->pixelcode; + desc->entry[desc->num_entries].length = + source_desc_entry->length; + if (common->type == MAX9X_DESERIALIZER) + desc->entry[desc->num_entries].bus.csi2.vc = + route->sink_pad - common->num_csi_links; + desc->entry[desc->num_entries].bus.csi2.dt = + source_desc_entry->bus.csi2.dt; + desc->num_entries++; + } + +out_unlock: + v4l2_subdev_unlock_state(state); + return ret; +} + +static int max9x_enum_mbus_code(struct v4l2_subdev *sd, + struct v4l2_subdev_state *v4l2_state, + struct v4l2_subdev_mbus_code_enum *code) +{ + if (mbus_code_to_csi_dt(code->code) < 0) + return -EINVAL; + return 0; +} + +static int _max9x_set_routing(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_krouting *routing) +{ + static const struct v4l2_mbus_framefmt format = { + .width = 1920, + .height = 1536, + .code = MEDIA_BUS_FMT_UYVY8_1X16, + }; + int ret; + + /* + * Note: we can only support up to V4L2_FRAME_DESC_ENTRY_MAX, until + * frame desc is made dynamically allocated. + */ + + if (routing->num_routes > V4L2_FRAME_DESC_ENTRY_MAX) + return -E2BIG; + + ret = v4l2_subdev_routing_validate(sd, routing, + V4L2_SUBDEV_ROUTING_ONLY_1_TO_1 | + V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX); + if (ret) + return ret; + + ret = v4l2_subdev_set_routing_with_fmt(sd, state, routing, &format); + if (ret) + return ret; + return 0; +} + +static int max9x_set_routing(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + enum v4l2_subdev_format_whence which, + struct v4l2_subdev_krouting *routing) +{ + _max9x_set_routing(sd, state, routing); + + return 0; +} + +static int max9x_init_state(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state) +{ + struct max9x_common *common = max9x_sd_to_common(sd); + + struct v4l2_subdev_route des_routes[] = { + { + .sink_pad = 5, + .sink_stream = 0, + .source_pad = 0, + .source_stream = 0, + .flags = V4L2_SUBDEV_ROUTE_FL_ACTIVE, + }, + }; + struct v4l2_subdev_route ser_routes[] = { + { + .sink_pad = 0, + .sink_stream = 0, + .source_pad = 2, + .source_stream = 0, + .flags = V4L2_SUBDEV_ROUTE_FL_ACTIVE, + }, + }; + struct v4l2_subdev_krouting des_routing = { + .num_routes = ARRAY_SIZE(des_routes), + .routes = des_routes, + }; + struct v4l2_subdev_krouting ser_routing = { + .num_routes = ARRAY_SIZE(ser_routes), + .routes = ser_routes, + }; + + if (common->type == MAX9X_DESERIALIZER) + return _max9x_set_routing(sd, state, &des_routing); + else + return _max9x_set_routing(sd, state, &ser_routing); +} + +static const struct v4l2_subdev_pad_ops max9x_sd_pad_ops = { + .get_fmt = max9x_get_fmt, + .set_fmt = max9x_set_fmt, + .get_frame_desc = max9x_get_frame_desc, + .enum_mbus_code = max9x_enum_mbus_code, + .set_routing = max9x_set_routing, + .enable_streams = max9x_enable_streams, + .disable_streams = max9x_disable_streams, +}; + +static struct v4l2_subdev_ops max9x_sd_ops = { + .pad = &max9x_sd_pad_ops, +}; + +static int max9x_registered(struct v4l2_subdev *sd) +{ + struct max9x_common *common = max9x_sd_to_common(sd); + struct device *dev = common->dev; + int ret; + + for (unsigned int link_id = 0; link_id < common->num_serial_links; link_id++) { + if (!common->serial_link[link_id].enabled) { + dev_dbg(dev, "Serial-link %d not enabled, skipping subdevs", link_id); + continue; + } + + if (common->type == MAX9X_DESERIALIZER) { + struct max9x_subdev_pdata *subdev_pdata = + common->serial_link[link_id].remote.pdata; + + if (subdev_pdata) { + struct max9x_pdata *ser_pdata = + subdev_pdata->board_info.platform_data; + + WARN_ON(ser_pdata->num_serial_links < 1); + + ser_pdata->serial_links[0].des_client = common->client; + ser_pdata->serial_links[0].des_link_id = link_id; + /* + * Isolate this link until after reset and potential address remapping, + * avoiding a race condition with two serializers resetting same + * physical i2c at the same time + */ + ret = max9x_des_isolate_serial_link(common, link_id); + if (ret) + return ret; + + struct v4l2_subdev *subdev = + v4l2_i2c_new_subdev_board(sd->v4l2_dev, + common->muxc->adapter[link_id], + &subdev_pdata->board_info, NULL); + + ret = max9x_des_deisolate_serial_link(common, link_id); + if (ret) + return ret; + + if (IS_ERR_OR_NULL(subdev)) { + dev_err(dev, "Failure registering serializer %s (0x%02x)", + subdev_pdata->board_info.type, + subdev_pdata->board_info.addr); + return PTR_ERR(subdev); + } + + dev_dbg(dev, "Registered serializer %s (0x%02x)", + subdev_pdata->board_info.type, + subdev_pdata->board_info.addr); + + struct max9x_common *ser_common = max9x_sd_to_common(subdev); + + int remote_pad = max9x_serial_link_to_pad(ser_common, 0); + int local_pad = max9x_serial_link_to_pad(common, link_id); + + dev_dbg(dev, "Create link from ser link 0 (pad %d) -> des link %d (pad %d)", + remote_pad, link_id, local_pad); + + ret = media_create_pad_link(&subdev->entity, remote_pad, + &sd->entity, local_pad, + MEDIA_LNK_FL_IMMUTABLE | + MEDIA_LNK_FL_ENABLED); + if (ret) { + dev_err(dev, "Failed creating pad link to serializer"); + return ret; + } + /* + * for suspend/resume sequential + * ser_common->serial_link[0].near.client = common->client; + * common->serial_link[link_id].remote.client = ser_common->client; + */ + } + } else { + struct max9x_pdata *pdata = dev->platform_data; + + for (unsigned int i = 0; i < pdata->num_subdevs; i++) { + struct max9x_subdev_pdata *subdev_pdata = &pdata->subdevs[i]; + + if (subdev_pdata->serial_link_id == link_id) { + char dev_id[I2C_NAME_SIZE]; + + snprintf(dev_id, sizeof(dev_id), "i2c-%s", + subdev_pdata->board_info.dev_name); + + dev_dbg(dev, "Registering sensor %s (%s)...", + subdev_pdata->board_info.type, dev_id); + + static struct gpiod_lookup_table sensor_gpios = { + .dev_id = "", + .table = { + GPIO_LOOKUP("", 0, "reset", + GPIO_ACTIVE_HIGH), + {} + }, + }; + + sensor_gpios.dev_id = dev_id; + sensor_gpios.table[0].key = common->gpio_chip.label; + + gpiod_add_lookup_table(&sensor_gpios); + + struct v4l2_subdev *subdev = + v4l2_i2c_new_subdev_board(sd->v4l2_dev, + common->muxc->adapter[link_id], + &subdev_pdata->board_info, NULL); + + gpiod_remove_lookup_table(&sensor_gpios); + + if (IS_ERR_OR_NULL(subdev)) { + dev_err(dev, + "Failure registering sensor %s (0x%02x)", + subdev_pdata->board_info.type, + subdev_pdata->board_info.addr); + return PTR_ERR(subdev); + } + + dev_dbg(dev, "Registered sensor %s (0x%02x)", + subdev_pdata->board_info.type, + subdev_pdata->board_info.addr); + + int remote_pad = media_get_pad_index(&subdev->entity, + MEDIA_PAD_FL_SOURCE, + PAD_SIGNAL_DEFAULT); + int local_pad = max9x_csi_link_to_pad(common, 0); + + dev_dbg(dev, "Create link from sen pad %d -> ser link %d (pad %d)", + remote_pad, link_id, + local_pad); + + ret = media_create_pad_link(&subdev->entity, remote_pad, + &sd->entity, local_pad, + MEDIA_LNK_FL_IMMUTABLE | + MEDIA_LNK_FL_ENABLED); + if (ret) { + dev_err(dev, "Failed creating pad link to serializer"); + return ret; + } + + } + } + } + } + + return 0; +} + +static struct v4l2_subdev_internal_ops max9x_sd_internal_ops = { + .registered = max9x_registered, + .init_state = max9x_init_state, +}; + +static int max9x_s_ctrl(struct v4l2_ctrl *ctrl) +{ + struct max9x_serdes_v4l *v4l = container_of(ctrl->handler, struct max9x_serdes_v4l, ctrl_handler); + struct max9x_common *common = container_of(v4l, struct max9x_common, v4l); + struct device *dev = common->dev; + + dev_dbg(dev, "s_ctrl"); + + switch (ctrl->id) { + case V4L2_CID_LINK_FREQ: { + if (ctrl->p_new.p_u8) { + if (*ctrl->p_new.p_u8 <= (ARRAY_SIZE(max9x_op_sys_clock) - 1)) { + unsigned int csi_link_id; + struct v4l2_ctrl *link_freq = v4l->link_freq; + + dev_info(dev, "user-modified %s index val=%d to user-val=%d", + ctrl->name, (unsigned int)link_freq->val, + (unsigned int)*ctrl->p_new.p_u8); + + link_freq->val = (s32) *ctrl->p_new.p_u8; + + for (csi_link_id = 0; csi_link_id < common->num_csi_links; csi_link_id++) { + if (!common->csi_link[csi_link_id].enabled) + continue; + + common->csi_link[csi_link_id].config.freq_mhz = + MAX9X_LINK_FREQ_HZ_TO_MBPS(max9x_op_sys_clock[link_freq->val]); + + if (common->csi_link[csi_link_id].config.freq_mhz > 1500) { + common->csi_link[csi_link_id].config.auto_init_deskew_enabled = true; + common->csi_link[csi_link_id].config.initial_deskew_width = 7; + } + } + } + } + } + break; + + default: + dev_info(dev, "unknown control id: 0x%X", ctrl->id); + } + + return 0; +} + +static const struct v4l2_ctrl_ops max9x_ctrl_ops = { + .s_ctrl = max9x_s_ctrl, +}; + +static struct v4l2_ctrl_config max9x_v4l2_controls[] = { + { + .ops = &max9x_ctrl_ops, + .id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = V4L2_CTRL_TYPE_INTEGER_MENU, + .min = 0, + .max = ARRAY_SIZE(max9x_op_sys_clock) - 1, + .def = 2, + .menu_skip_mask = 0, + .qmenu_int = max9x_op_sys_clock, + }, +}; + +static int max9x_csi_link_to_pad(struct max9x_common *common, int csi_id) +{ + return (csi_id < common->num_csi_links) ? csi_id : -EINVAL; +} + +static int max9x_serial_link_to_pad(struct max9x_common *common, int link_id) +{ + dev_dbg(common->dev, "link_id %d < num_serial_links %d ? num_csi_links %d + link_id %d : -EINVAL", + link_id, common->num_serial_links, common->num_csi_links, link_id); + return (link_id < common->num_serial_links) ? common->num_csi_links + link_id : -EINVAL; +} + +static int max9x_register_v4l_subdev(struct max9x_common *common) +{ + struct i2c_client *client = common->client; + struct device *dev = common->dev; + struct max9x_serdes_v4l *v4l = &common->v4l; + struct v4l2_subdev *sd = &v4l->sd; + struct v4l2_ctrl_handler *ctrl_handler = &v4l->ctrl_handler; + struct max9x_pdata *pdata = dev->platform_data; + int ret; + + mutex_init(&v4l->lock); + + v4l2_i2c_subdev_init(sd, client, &max9x_sd_ops); + snprintf(sd->name, sizeof(sd->name), "%s %s", client->name, pdata->suffix); + + sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_STREAMS; + sd->internal_ops = &max9x_sd_internal_ops; + sd->entity.function = MEDIA_ENT_F_VID_MUX; + + v4l->num_pads = common->num_csi_links + common->num_serial_links; + v4l->pads = devm_kzalloc(dev, v4l->num_pads * sizeof(*v4l->pads), GFP_KERNEL); + v4l->ffmts = devm_kzalloc(dev, v4l->num_pads * sizeof(*v4l->ffmts), GFP_KERNEL); + + /* change sink/source turn */ + for (unsigned int p = 0; p < v4l->num_pads; p++) { + struct media_pad *pad = &v4l->pads[p]; + + if (p < common->num_csi_links) { + unsigned int c = p; + + dev_dbg(dev, "pad %d/%d -> csi %d", p, v4l->num_pads, c); + + pad->flags = (common->type == MAX9X_SERIALIZER) ? MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE; + } else if (p >= common->num_csi_links) { + unsigned int s = p - common->num_csi_links; + + dev_dbg(dev, "pad %d/%d -> serial %d", p, v4l->num_pads, s); + + pad->flags = (common->type == MAX9X_SERIALIZER) ? MEDIA_PAD_FL_SOURCE : MEDIA_PAD_FL_SINK; + } + } + + ret = v4l2_ctrl_handler_init(ctrl_handler, ARRAY_SIZE(max9x_v4l2_controls)); + if (ret) { + dev_err(dev, "Failed to init V4L2 controls: %d", ret); + return ret; + } + + for (int i = 0; i < ARRAY_SIZE(max9x_v4l2_controls); i++) { + struct v4l2_ctrl_config *ctrl_config = &max9x_v4l2_controls[i]; + struct v4l2_ctrl *ctrl; + + if (ctrl_config->id == V4L2_CID_LINK_FREQ) { + unsigned int link_freq_n; + unsigned int csi_link_id; + + for (link_freq_n = 0; link_freq_n < ARRAY_SIZE(max9x_op_sys_clock); link_freq_n++) { + unsigned int link_freq = MAX9X_LINK_FREQ_HZ_TO_MBPS(max9x_op_sys_clock[link_freq_n]); + + for (csi_link_id = 0; csi_link_id < common->num_csi_links; csi_link_id++) { + if (!common->csi_link[csi_link_id].enabled) + continue; + if (common->csi_link[csi_link_id].config.freq_mhz == link_freq) { + ctrl_config->def = link_freq_n; + break; + } + } + } + } + + ctrl = v4l2_ctrl_new_custom(ctrl_handler, ctrl_config, common); + if (!ctrl) { + ret = ctrl_handler->error; + dev_err(dev, "Failed to create V4L2 control %s: %d", ctrl_config->name, ret); + goto probe_error_v4l2_ctrl_handler_free; + } + } + + v4l->link_freq = v4l2_ctrl_find(ctrl_handler, V4L2_CID_LINK_FREQ); + v4l->sd.ctrl_handler = ctrl_handler; + + ret = media_entity_pads_init(&v4l->sd.entity, v4l->num_pads, v4l->pads); + if (ret) { + dev_err(dev, "Failed to init media entity: %d", ret); + goto probe_error_v4l2_ctrl_handler_free; + } + + ret = v4l2_subdev_init_finalize(&v4l->sd); + if (ret) { + dev_err(dev, "failed to init v4l2 subdev: %d\n", ret); + media_entity_cleanup(&v4l->sd.entity); + goto probe_error_media_entity_cleanup; + } + + ret = v4l2_async_register_subdev(&v4l->sd); + if (ret) { + dev_err(dev, "v4l register failed: %d", ret); + goto probe_error_media_entity_cleanup; + } + + return 0; + +probe_error_media_entity_cleanup: + media_entity_cleanup(&v4l->sd.entity); +probe_error_v4l2_ctrl_handler_free: + v4l2_ctrl_handler_free(ctrl_handler); + return ret; +} + +/* + * max9x_enable_serial_link() - Enable power and logic to link. + */ +int max9x_enable_serial_link(struct max9x_common *common, unsigned int link_id) +{ + struct max9x_serdes_serial_link *serial_link; + struct device *dev = common->dev; + int ret; + + serial_link = &common->serial_link[link_id]; + + if (!serial_link->regulator_enabled) { + if (!IS_ERR_OR_NULL(serial_link->poc_regulator)) { + ret = regulator_enable(serial_link->poc_regulator); + if (ret) { + dev_err(dev, "Failed to enable %s", MAX9X_POC_REGULATOR_NAME); + return ret; + } + } + + serial_link->regulator_enabled = true; + } + + if (common->serial_link_ops && common->serial_link_ops->enable) + return common->serial_link_ops->enable(common, link_id); + + return 0; +} + +/* + * max9x_disable_serial_link() - Disable power and logic to link. + */ +int max9x_disable_serial_link(struct max9x_common *common, unsigned int link_id) +{ + struct max9x_serdes_serial_link *serial_link; + struct device *dev = common->dev; + int ret; + + if (link_id >= common->num_serial_links) + return 0; + + serial_link = &common->serial_link[link_id]; + + if (serial_link->regulator_enabled) { + if (common->serial_link_ops && common->serial_link_ops->disable) + common->serial_link_ops->disable(common, link_id); + + serial_link->regulator_enabled = false; + + if (!IS_ERR_OR_NULL(serial_link->poc_regulator)) { + ret = regulator_disable(serial_link->poc_regulator); + + if (ret) { + dev_err(dev, "Failed to disable %s", MAX9X_POC_REGULATOR_NAME); + return ret; + } + } + } + + return 0; +} + +/* + * max9x_sysfs_create_get_link() - Creates a sysfs virtual file to check link lock status + */ +int max9x_sysfs_create_get_link(struct max9x_common *common, unsigned int link_id) +{ + struct device *dev = common->dev; + int ret; + char *attr_name; + + if (common->serial_link_ops && common->serial_link_ops->get_locked) { + struct device_attribute *link_lock_status = + devm_kzalloc(dev, sizeof(struct device_attribute), GFP_KERNEL); + + if (!link_lock_status) { + dev_err(dev, "Failed to allocate memory for link lock status"); + return -ENOMEM; + } + + attr_name = (char *)devm_kzalloc(dev, sizeof(char) * ATTR_NAME_LEN, GFP_KERNEL); + if (!attr_name) { + dev_err(dev, "Failed to allocate memory link lock attribute name"); + return -ENOMEM; + } + + ret = snprintf(attr_name, ATTR_NAME_LEN, "link-lock_%d", link_id); + if (ret < 0) + return ret; + + link_lock_status->attr.name = attr_name; + link_lock_status->attr.mode = ATTR_READ_ONLY; + link_lock_status->show = max9x_link_status_show; + + ret = device_create_file(dev, link_lock_status); + if (ret < 0) + return ret; + + common->serial_link[link_id].link_lock_status = link_lock_status; + } + + return 0; +} + +/* + * max9x_sysfs_destroy_get_link() - Destroys the sysfs device attribute for link lock status + */ +static void max9x_sysfs_destroy_get_link(struct max9x_common *common, unsigned int link_id) +{ + struct device *dev = common->dev; + + if (common->serial_link[link_id].link_lock_status) + device_remove_file(dev, common->serial_link[link_id].link_lock_status); +} + +/* + * max9x_enable_line_faults() - Enables all the line fault monitors using the device tree + */ +int max9x_enable_line_faults(struct max9x_common *common) +{ + return 0; +} + +/* + * max9x_disable_line_faults() - Disables all currently stored line fault monitors + */ +int max9x_disable_line_faults(struct max9x_common *common) +{ + struct device *dev = common->dev; + int ret; + int line; + int final_ret = 0; + + if (common->line_fault && + common->line_fault_ops && + common->line_fault_ops->disable) { + + for (line = 0; line < common->num_line_faults; line++) { + ret = common->line_fault_ops->disable(common, line); + + if (ret) { + dev_err(dev, "Failed to disable line fault %d", line); + final_ret = ret; + } + + common->line_fault[line].enabled = false; + } + } + + return final_ret; +} + +static void max9x_sysfs_destroy_line_fault_status(struct max9x_common *common, unsigned int line) +{ + struct device *dev = common->dev; + + if (common->line_fault[line].line_fault_status) + device_remove_file(dev, common->line_fault[line].line_fault_status); +} + +static int max9x_parse_pdata(struct max9x_common *common, struct max9x_pdata *pdata) +{ + int ret; + + for (int i = 0; i < pdata->num_serial_links; i++) { + ret = max9x_parse_serial_link_pdata(common, &pdata->serial_links[i]); + if (ret) + goto err; + } + + for (int i = 0; i < pdata->num_video_pipes; i++) { + ret = max9x_parse_video_pipe_pdata(common, &pdata->video_pipes[i]); + if (ret) + goto err; + } + + for (int i = 0; i < pdata->num_csi_links; i++) { + ret = max9x_parse_csi_link_pdata(common, &pdata->csi_links[i]); + if (ret) + goto err; + } + + for (int i = 0; i < pdata->num_subdevs; i++) { + ret = max9x_parse_subdev_pdata(common, &pdata->subdevs[i]); + if (ret) + goto err; + } + + common->external_refclk_enable = pdata->external_refclk_enable; + +err: + return ret; +} + +static int max9x_parse_serial_link_pdata(struct max9x_common *common, + struct max9x_serial_link_pdata *serial_link_pdata) +{ + struct device *dev = common->dev; + unsigned int serial_link_id = serial_link_pdata->link_id; + + if (serial_link_id > common->num_serial_links) { + dev_err(dev, "Serial link pdata: Invalid link id"); + return -EINVAL; + } + + struct max9x_serdes_serial_link *serial_link = &common->serial_link[serial_link_id]; + + serial_link->enabled = true; + + serial_link->config.link_type = serial_link_pdata->link_type; + serial_link->config.rx_freq_mhz = serial_link_pdata->rx_freq_mhz; + serial_link->config.tx_freq_mhz = serial_link_pdata->tx_freq_mhz; + + if (serial_link_pdata->poc_regulator[0] != 0) { + serial_link->poc_regulator = devm_regulator_get_optional(dev, serial_link_pdata->poc_regulator); + + if (PTR_ERR(serial_link->poc_regulator) == -EPROBE_DEFER) { + dev_dbg(dev, "POC regulator not ready deferring..."); + return -EPROBE_DEFER; + } + if (IS_ERR_OR_NULL(serial_link->poc_regulator)) + dev_dbg(dev, "Missing POC regulator"); + } + + return 0; +} + +static int max9x_parse_video_pipe_pdata(struct max9x_common *common, + struct max9x_video_pipe_pdata *video_pipe_pdata) +{ + struct device *dev = common->dev; + unsigned int serial_link_id = video_pipe_pdata->serial_link_id; + unsigned int pipe_id = video_pipe_pdata->pipe_id; + unsigned int max_maps; + unsigned int max_data_types; + + if (serial_link_id > common->num_serial_links) { + dev_err(dev, "Video pdata: Invalid serial link id"); + return -EINVAL; + } + if (pipe_id > common->num_video_pipes) { + dev_err(dev, "Video pdata: Invalid video pipe id"); + return -EINVAL; + } + + struct max9x_serdes_video_pipe *pipe = &common->video_pipe[pipe_id]; + + pipe->enabled = true; + + max_maps = 0; + max_data_types = 0; + if (common->common_ops && common->common_ops->max_elements) { + max_maps = common->common_ops->max_elements(common, MAX9X_MIPI_MAP); + max_data_types = common->common_ops->max_elements(common, MAX9X_DATA_TYPES); + } + + if (common->type == MAX9X_DESERIALIZER) { + if (video_pipe_pdata->num_maps > max_maps) { + dev_err(dev, "Video pdata: Too many maps"); + return -EINVAL; + } + + pipe->config.map = devm_kzalloc(dev, + video_pipe_pdata->num_maps * sizeof(*pipe->config.map), + GFP_KERNEL); + if (!pipe->config.map) { + dev_err(dev, "Video pdata: Failed t oallocate mmeory for maps"); + return -ENOMEM; + } + + pipe->config.src_link = video_pipe_pdata->serial_link_id; + pipe->config.src_pipe = video_pipe_pdata->src_pipe_id; + + for (unsigned int i = 0; i < video_pipe_pdata->num_maps; i++) { + struct max9x_serdes_mipi_map *map = &pipe->config.map[i]; + struct max9x_serdes_mipi_map *map_pdata = &video_pipe_pdata->maps[i]; + + map->src_vc = map_pdata->src_vc; + map->src_dt = map_pdata->src_dt; + map->dst_vc = map_pdata->dst_vc; + map->dst_dt = map_pdata->dst_dt; + map->dst_csi = map_pdata->dst_csi; + } + pipe->config.num_maps = video_pipe_pdata->num_maps; + } else if (common->type == MAX9X_SERIALIZER) { + if (video_pipe_pdata->num_data_types > max_data_types) { + dev_err(dev, "Video pdata: Too many maps"); + return -EINVAL; + } + + pipe->config.data_type = devm_kzalloc(dev, + video_pipe_pdata->num_data_types * sizeof(*pipe->config.map), + GFP_KERNEL); + if (!pipe->config.data_type) { + dev_err(dev, "Video pdata: Failed t oallocate mmeory for data types"); + return -ENOMEM; + } + + pipe->config.src_csi = video_pipe_pdata->src_csi_id; + + for (unsigned int i = 0; i < video_pipe_pdata->num_data_types; i++) { + pipe->config.data_type[i] = video_pipe_pdata->data_types[i]; + } + pipe->config.num_data_types = video_pipe_pdata->num_data_types; + } + + return 0; +} + +static int max9x_parse_csi_link_pdata(struct max9x_common *common, + struct max9x_csi_link_pdata *csi_link_pdata) +{ + unsigned int csi_link_id = csi_link_pdata->link_id; + + if (csi_link_id > common->num_csi_links) { + dev_err(common->dev, "CSI link pdata: Invalid link id"); + return -EINVAL; + } + + struct max9x_serdes_csi_link *csi_link = &common->csi_link[csi_link_id]; + + csi_link->enabled = true; + + csi_link->config.num_maps = csi_link_pdata->num_maps; + csi_link->config.map = + devm_kzalloc(common->dev, + csi_link->config.num_maps * sizeof(*csi_link->config.map), + GFP_KERNEL); + memcpy(csi_link->config.map, csi_link_pdata->maps, + csi_link->config.num_maps * sizeof(*csi_link->config.map)); + csi_link->config.bus_type = csi_link_pdata->bus_type; + csi_link->config.num_lanes = csi_link_pdata->num_lanes; + csi_link->config.freq_mhz = csi_link_pdata->tx_rate_mbps; + csi_link->config.auto_init_deskew_enabled = csi_link_pdata->auto_initial_deskew; + + if (csi_link->config.auto_init_deskew_enabled) + csi_link->config.initial_deskew_width = csi_link_pdata->initial_deskew_width; + csi_link->config.auto_start = csi_link_pdata->auto_start; + + return 0; +} + +static int max9x_parse_subdev_pdata(struct max9x_common *common, + struct max9x_subdev_pdata *subdev_pdata) +{ + unsigned int serial_link_id = subdev_pdata->serial_link_id; + struct max9x_serdes_serial_link *serial_link = &common->serial_link[serial_link_id]; + + if (!serial_link->enabled) + return 0; + + serial_link->remote.pdata = subdev_pdata; + + return 0; +} + + +int max9x_select_i2c_chan(struct i2c_mux_core *muxc, u32 chan_id) +{ + struct max9x_common *common = i2c_mux_priv(muxc); + int ret = 0; + unsigned long timeout = jiffies + msecs_to_jiffies(10000); + + if (unlikely(chan_id > common->num_serial_links)) + return -EINVAL; + + do { + mutex_lock(&common->isolate_mutex); + if (common->selected_link < 0 || chan_id == common->selected_link) + break; + + mutex_unlock(&common->isolate_mutex); + + usleep_range(1000, 1050); + + if (time_is_before_jiffies(timeout)) { + dev_dbg(common->dev, "select %d TIMEOUT", chan_id); + return -ETIMEDOUT; + } + } while (1); + + common->selected_link = chan_id; + + if (common->serial_link_ops && common->serial_link_ops->select) + ret = common->serial_link_ops->select(common, chan_id); + + mutex_unlock(&common->isolate_mutex); + + return ret; +} + +int max9x_deselect_i2c_chan(struct i2c_mux_core *muxc, u32 chan_id) +{ + struct max9x_common *common = i2c_mux_priv(muxc); + int ret = 0; + + if (unlikely(chan_id > common->num_serial_links)) + return -EINVAL; + + if (common->serial_link_ops && common->serial_link_ops->deselect) + ret = common->serial_link_ops->deselect(common, chan_id); + + mutex_lock(&common->isolate_mutex); + common->selected_link = -1; + mutex_unlock(&common->isolate_mutex); + + return ret; +} + +int max9x_des_isolate_serial_link(struct max9x_common *common, unsigned int link_id) +{ + int ret = 0; + unsigned long timeout = jiffies + msecs_to_jiffies(10000); + + if (link_id >= common->num_serial_links) { + dev_err(common->dev, "link_id %d outside of num_serial_links %d", link_id, common->num_serial_links); + return -EINVAL; + } + + dev_info(common->dev, "Isolate %d", link_id); + + do { + mutex_lock(&common->isolate_mutex); + if ((common->isolated_link < 0) && (common->selected_link < 0 || link_id == common->selected_link)) + break; + + if (common->isolated_link == link_id) { + dev_warn(common->dev, "Link %d is already isolated", link_id); + mutex_unlock(&common->isolate_mutex); + return -EINVAL; + } + mutex_unlock(&common->isolate_mutex); + + usleep_range(1000, 1050); + + if (time_is_before_jiffies(timeout)) + return -ETIMEDOUT; + } while (1); + + common->isolated_link = link_id; + if (common->serial_link_ops && common->serial_link_ops->isolate) + ret = common->serial_link_ops->isolate(common, link_id); + + mutex_unlock(&common->isolate_mutex); + dev_info(common->dev, "Isolate %d complete", link_id); + + return ret; +} + +int max9x_des_deisolate_serial_link(struct max9x_common *common, unsigned int link_id) +{ + int ret = 0; + + if (link_id >= common->num_serial_links) + return -EINVAL; + + dev_info(common->dev, "Deisolate %d", link_id); + mutex_lock(&common->isolate_mutex); + + if (common->serial_link_ops && common->serial_link_ops->deisolate) + ret = common->serial_link_ops->deisolate(common, link_id); + + common->isolated_link = -1; + mutex_unlock(&common->isolate_mutex); + dev_info(common->dev, "Deisolate %d complete", link_id); + + return ret; +} + +ssize_t max9x_link_status_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct max9x_common *common = dev_get_drvdata(dev); + int link; + int ret; + bool locked; + + ret = sscanf(attr->attr.name, "link-lock_%d", &link); + if (ret < 0) + return ret; + + if (common->serial_link_ops && common->serial_link_ops->get_locked) { + ret = common->serial_link_ops->get_locked(common, link, &locked); + if (ret < 0) + return ret; + + return sysfs_emit(buf, "%d", !!locked); + } + + dev_err(dev, "get_locked not defined"); + return -EINVAL; +} + +int max9x_setup_translations(struct max9x_common *common) +{ + int err; + + if (!common->translation_ops) + return 0; + + /* + * Translation is currently only supported on serializer side + * (translating requests from SOC to remote sensor module) + */ + if (common->type != MAX9X_SERIALIZER) + return 0; + + struct max9x_pdata *pdata = common->dev->platform_data; + + for (unsigned int i = 0; i < pdata->num_subdevs; i++) { + struct max9x_subdev_pdata *subdev_pdata = &pdata->subdevs[i]; + unsigned int virt_addr = subdev_pdata->board_info.addr; + unsigned int phys_addr = subdev_pdata->phys_addr ? subdev_pdata->phys_addr : virt_addr; + + if (virt_addr == phys_addr || common->translation_ops->add == NULL) + continue; + + /* Only I2C 1 is supported at this time */ + err = common->translation_ops->add(common, 0, virt_addr, phys_addr); + if (err) + dev_warn(common->dev, "Failed to add translation for i2c address 0x%02x -> 0x%02x: %d", + virt_addr, phys_addr, err); + return err; + } + + return 0; +} + +int max9x_disable_translations(struct max9x_common *common) +{ + int err; + + if (!common->translation_ops) + return 0; + + /* + * Translation is currently only supported on serializer side + * (translating requests from SOC to remote sensor module) + */ + if (common->type != MAX9X_SERIALIZER) + return 0; + + struct max9x_pdata *pdata = common->dev->platform_data; + + for (unsigned int i = 0; i < pdata->num_subdevs; i++) { + struct max9x_subdev_pdata *subdev_pdata = &pdata->subdevs[i]; + unsigned int virt_addr = subdev_pdata->board_info.addr; + unsigned int phys_addr = subdev_pdata->phys_addr ? subdev_pdata->phys_addr : virt_addr; + + if (virt_addr != phys_addr) { + if (common->translation_ops->add) { + /* Only I2C 1 is supported at this time */ + err = common->translation_ops->remove(common, 0, virt_addr, phys_addr); + if (err) + dev_err(common->dev, "Failed to remove translation for i2c address 0x%02x -> 0x%02x: %d", + virt_addr, phys_addr, err); + return err; + } + } + } + + return 0; +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 3, 0) +static int max9x_des_probe(struct i2c_client *client, const struct i2c_device_id *id) +#else +static int max9x_des_probe(struct i2c_client *client) +#endif +{ + struct device *dev = &client->dev; + struct max9x_common *des = NULL; + int ret = 0; + + dev_info(dev, "Probing"); + + des = devm_kzalloc(dev, sizeof(*des), GFP_KERNEL); + if (!des) { + dev_err(dev, "Failed to allocate memory."); + return -ENOMEM; + } + + TRY(ret, max9x_common_init_i2c_client( + des, + client, + &max9x_regmap_config, + NULL, NULL, NULL, NULL)); + + dev_info(dev, "probe successful"); + return 0; +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0) +static int max9x_des_remove(struct i2c_client *client) +#else +static void max9x_des_remove(struct i2c_client *client) +#endif +{ + struct device *dev = &client->dev; + struct max9x_common *des = NULL; + + dev_info(dev, "Removing"); + + des = max9x_client_to_common(client); + + max9x_destroy(des); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0) + return 0; +#endif +} + +static int max9x_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct max9x_common *common = max9x_client_to_common(client); + /* fixme, temp WA to avoid max9295 hang */ + if (common->type == MAX9X_SERIALIZER) + return 0; + + return max9x_common_resume(common); +} + +static int max9x_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct max9x_common *common = max9x_client_to_common(client); + + if (common->type == MAX9X_SERIALIZER) + return 0; + + return max9x_common_suspend(common); +} + +static const struct dev_pm_ops max9x_des_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(max9x_suspend, max9x_resume) +}; + +static struct i2c_driver max9x_driver = { + .driver = { + .name = "max9x", + .of_match_table = max9x_of_match, + .pm = &max9x_des_pm_ops, + }, + .probe = max9x_des_probe, + .remove = max9x_des_remove, + .id_table = max9x_id, +}; + +module_i2c_driver(max9x_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Josh Watts "); +MODULE_AUTHOR("Yan, Dongcheng "); +MODULE_DESCRIPTION("Common logic for Maxim GMSL serializers & deserializers"); diff --git a/drivers/media/i2c/max9x/serdes.h b/drivers/media/i2c/max9x/serdes.h new file mode 100644 index 0000000..4de2aeb --- /dev/null +++ b/drivers/media/i2c/max9x/serdes.h @@ -0,0 +1,459 @@ +/* + * serdes.h + * + * Copyright (c) 2018-2020 D3 Engineering. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2025 Intel Corporation. + +#ifndef _SERDES_H_ +#define _SERDES_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ipu7-isys.h" +#include +#include "max9x_pdata.h" + +#define MAX9X_VDD_REGULATOR_NAME "vdd" +#define MAX9X_POC_REGULATOR_NAME "poc" +#define MAX9X_RESET_GPIO_NAME "reset" +#define MAX9X_DEV_ID 0xD +#define MAX9X_DEV_REV_FIELD GENMASK(3, 0) + + +/*Used for device attributes*/ +#define ATTR_NAME_LEN (30) /* arbitrary number used to allocate an attribute */ +#define ATTR_READ_ONLY (0444) + +#define MAX9X_LINK_FREQ_MBPS_TO_HZ(mbps) (((unsigned long long)(mbps)*1000000ULL)/2ULL) +#define MAX9X_LINK_FREQ_HZ_TO_MBPS(hz) (((unsigned long long)(hz)*2ULL)/1000000ULL) +#define MAX9X_LINK_FREQ_MBPS_TO_REG(mbps) ((mbps)/100U) + +#define MAX9X_FIELD_PREP(_mask, _val) \ + ({ \ + ((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask); \ + }) + +#define TRY(err, expr) \ + do {\ + err = expr; \ + if (err) { \ + return err; \ + } \ + } while (0) + +#define TRY_DEV_HERE(err, expr, dev) \ + do { \ + err = expr; \ + if (err) { \ + dev_err((dev), \ + "%s failed (%d) in %s() at %s:%d", #expr, \ + (err), __func__, __FILE__, __LINE__); \ + return err; \ + } \ + } while (0) + +/* dt is defined in soft_dt_x, such as BACKTOP15(0x316).[5:0] */ +#define MIPI_CSI2_TYPE_YUV422_8 0x1e +#define MIPI_CSI2_TYPE_YUV422_10 0x1f +#define MIPI_CSI2_TYPE_RGB565 0x22 +#define MIPI_CSI2_TYPE_RGB888 0x24 +#define MIPI_CSI2_TYPE_RAW8 0x2a +#define MIPI_CSI2_TYPE_RAW10 0x2b +#define MIPI_CSI2_TYPE_RAW12 0x2c + +static inline int mbus_code_to_csi_dt(int code) +{ + switch (code) { + case MEDIA_BUS_FMT_RGB565_1X16: + return MIPI_CSI2_TYPE_RGB565; + case MEDIA_BUS_FMT_RGB888_1X24: + return MIPI_CSI2_TYPE_RGB888; + case MEDIA_BUS_FMT_YUYV10_1X20: + return MIPI_CSI2_TYPE_YUV422_10; + case MEDIA_BUS_FMT_UYVY8_1X16: + case MEDIA_BUS_FMT_YUYV8_1X16: + case MEDIA_BUS_FMT_VYUY8_1X16: + return MIPI_CSI2_TYPE_YUV422_8; + case MEDIA_BUS_FMT_SBGGR12_1X12: + case MEDIA_BUS_FMT_SGBRG12_1X12: + case MEDIA_BUS_FMT_SGRBG12_1X12: + case MEDIA_BUS_FMT_SRGGB12_1X12: + return MIPI_CSI2_TYPE_RAW12; + case MEDIA_BUS_FMT_Y10_1X10: + case MEDIA_BUS_FMT_SBGGR10_1X10: + case MEDIA_BUS_FMT_SGBRG10_1X10: + case MEDIA_BUS_FMT_SGRBG10_1X10: + case MEDIA_BUS_FMT_SRGGB10_1X10: + return MIPI_CSI2_TYPE_RAW10; + case MEDIA_BUS_FMT_Y8_1X8: + case MEDIA_BUS_FMT_SBGGR8_1X8: + case MEDIA_BUS_FMT_SGBRG8_1X8: + case MEDIA_BUS_FMT_SGRBG8_1X8: + case MEDIA_BUS_FMT_SRGGB8_1X8: + return MIPI_CSI2_TYPE_RAW8; + default: + return -EINVAL; + } +} + +static const struct regmap_config max9x_regmap_config = { + .reg_bits = 16, + .val_bits = 8, +}; + +enum max9x_chip_type { + MAX9296 = 0, + MAX96724 = 1, + MAX9295, + MAX96717, + MAX96724F, + MAX96724R, +}; + +enum max9x_serdes_type { + MAX9X_DESERIALIZER = 0, + MAX9X_SERIALIZER = 1, +}; + +struct max9x_serdes_rate_table { + unsigned int val; + unsigned int freq_mhz; +}; + +struct max9x_serdes_csi_config { + struct max9x_serdes_phy_map *map; + unsigned int num_maps; + enum v4l2_mbus_type bus_type; + unsigned int num_lanes; + unsigned int freq_mhz; + unsigned int initial_deskew_width; + bool auto_init_deskew_enabled; + bool auto_start; +}; + +struct max9x_serdes_pipe_config { + // Deserializer + unsigned int src_link; + unsigned int src_pipe; + struct max9x_serdes_mipi_map *map; + unsigned int num_maps; + + // Serializer + //TODO: dst_link? + unsigned int src_csi; + unsigned int *data_type; + unsigned int num_data_types; + //TODO: MIPI VC filter mask + + /* + * Which bpp value to double i.e. dbl_pixel_bpp = 10 will + * cause 10 bit data to be transmitted together. + */ + //TODO: Multiple dbl bpp values + unsigned int dbl_pixel_bpp; + + /* + * Software override for bits per pixel. This is compatible with + * double bpp. These are the min/max values before padding + * and after doubling. Leave either 0 to disable. + */ + unsigned int soft_min_pixel_bpp; + unsigned int soft_max_pixel_bpp; + + //TODO: MIPI DT override +}; + +struct max9x_serdes_serial_config { + enum max9x_serdes_link_type link_type; + unsigned int rx_freq_mhz; // Previously forward_freq_mhz + unsigned int tx_freq_mhz; // Previously back_freq_mhz +}; + +struct max9x_serdes_v4l { + struct mutex lock; + struct max9x_common *common; + struct v4l2_subdev sd; + struct v4l2_ctrl_handler ctrl_handler; + struct media_pad *pads; + int num_pads; + + struct v4l2_ctrl *link_freq; // CSI link frequency, used to determine ISP clock + struct v4l2_mbus_framefmt *ffmts; + int ref_count; +}; + +struct max9x_serdes_csi_link { + bool enabled; + unsigned int usecount; + struct max9x_serdes_csi_config config; +}; + +struct max9x_serdes_video_pipe { + bool enabled; + //TODO: Anything else need to be tracked? + struct max9x_serdes_pipe_config config; +}; + +struct max9x_serdes_serial_link { + bool enabled; + bool detected; + struct regulator *poc_regulator; /* feeds the serializer, imager */ + bool regulator_enabled; + struct { + struct i2c_client *client; + struct max9x_subdev_pdata *pdata; + } remote; + struct regmap *map; + struct max9x_serdes_serial_config config; + struct device_attribute *link_lock_status; +}; + +struct max9x_serdes_line_fault { + bool enabled; + struct device_attribute *line_fault_status; +}; + +struct max9x_common { + struct max9x_desc *des; + struct device *dev; + struct i2c_client *client; + struct regmap *map; + struct i2c_client *phys_client; + struct regmap *phys_map; + struct i2c_mux_core *muxc; + struct gpio_chip gpio_chip; + enum max9x_serdes_type type; + + struct gpio_desc *reset_gpio; + struct regulator *vdd_regulator; + bool regulator_enabled; + + struct max9x_common_ops *common_ops; + struct max9x_serial_link_ops *serial_link_ops; + struct max9x_csi_link_ops *csi_link_ops; + struct max9x_line_fault_ops *line_fault_ops; + struct max9x_translation_ops *translation_ops; + + struct max9x_serdes_csi_link *csi_link; + int num_csi_links; + + struct max9x_serdes_video_pipe *video_pipe; + int num_video_pipes; + + struct max9x_serdes_serial_link *serial_link; + int num_serial_links; + + struct max9x_serdes_line_fault *line_fault; + int num_line_faults; + + struct max9x_serdes_v4l v4l; + + struct mutex link_mutex; + struct mutex isolate_mutex; + int isolated_link; + int selected_link; + bool external_refclk_enable; +}; + +int max9x_serdes_mhz_to_rate(struct max9x_serdes_rate_table *table, int len, unsigned int freq_mhz); +struct max9x_common *max9x_phandle_to_common(struct device_node *node, const char *name); +struct max9x_common *max9x_sd_to_common(struct v4l2_subdev *sd); +struct max9x_common *max9x_client_to_common(struct i2c_client *client); + +enum max9x_element_type { + MAX9X_SERIAL_LINK = 0, + MAX9X_VIDEO_PIPE, + MAX9X_MIPI_MAP, + MAX9X_CSI_LINK, + MAX9X_LINE_FAULT, + MAX9X_DATA_TYPES +}; + +struct max9x_common_ops { + int (*soft_reset)(struct max9x_common *common); + int (*enable)(struct max9x_common *common); + int (*disable)(struct max9x_common *common); + int (*max_elements)(struct max9x_common *common, enum max9x_element_type element); + int (*verify_devid)(struct max9x_common *common); + int (*remap_addr)(struct max9x_common *common); + int (*setup_gpio)(struct max9x_common *common); +}; + +struct max9x_serial_link_ops { + int (*enable)(struct max9x_common *common, unsigned int link); + int (*disable)(struct max9x_common *common, unsigned int link); + int (*select)(struct max9x_common *common, unsigned int link); + int (*deselect)(struct max9x_common *common, unsigned int link); + int (*get_locked)(struct max9x_common *common, unsigned int link, bool *locked); + int (*isolate)(struct max9x_common *common, unsigned int link); + int (*deisolate)(struct max9x_common *common, unsigned int link); +}; + +struct max9x_csi_link_ops { + int (*enable)(struct max9x_common *common, unsigned int link); + int (*disable)(struct max9x_common *common, unsigned int link); +}; + +struct max9x_translation_ops { + int (*add)(struct max9x_common *common, unsigned int i2c_id, unsigned int src, unsigned int dst); + int (*remove)(struct max9x_common *common, unsigned int i2c_id, unsigned int src, unsigned int dst); +}; + +struct max9x_line_fault_ops { + int (*enable)(struct max9x_common *common, unsigned int line); + int (*disable)(struct max9x_common *common, unsigned int line); + int (*get_status)(struct max9x_common *common, unsigned int line); +}; + +struct max9x_desc { + char dev_id; + char rev_reg; + enum max9x_serdes_type serdes_type; + enum max9x_chip_type chip_type; + int (*get_max9x_ops)(char dev_id, + struct max9x_common_ops **common_ops, + struct max9x_serial_link_ops **serial_ops, + struct max9x_csi_link_ops **csi_ops, + struct max9x_line_fault_ops **lf_ops, + struct max9x_translation_ops **trans_ops); +}; + +int max9x_common_init_i2c_client(struct max9x_common *common, + struct i2c_client *client, + const struct regmap_config *regmap_config, + struct max9x_common_ops *common_ops, + struct max9x_serial_link_ops *serial_link_ops, + struct max9x_csi_link_ops *csi_link_ops, + struct max9x_line_fault_ops *lf_ops); +void max9x_destroy(struct max9x_common *common); +int max9x_common_resume(struct max9x_common *common); +int max9x_common_suspend(struct max9x_common *common); +int max9x_get_ops(char dev_id, struct max9x_common_ops **common_ops, + struct max9x_serial_link_ops **serial_ops, + struct max9x_csi_link_ops **csi_ops, + struct max9x_line_fault_ops **lf_ops, + struct max9x_translation_ops **trans_ops); +extern int max96724_get_ops(struct max9x_common_ops **common_ops, + struct max9x_serial_link_ops **serial_ops, + struct max9x_csi_link_ops **csi_ops, + struct max9x_line_fault_ops **lf_ops, + struct max9x_translation_ops **trans_ops); +extern int max96717_get_ops(struct max9x_common_ops **common_ops, + struct max9x_serial_link_ops **serial_ops, + struct max9x_csi_link_ops **csi_ops, + struct max9x_line_fault_ops **lf_ops, + struct max9x_translation_ops **trans_ops); +extern int max9296_get_ops(struct max9x_common_ops **common_ops, + struct max9x_serial_link_ops **serial_ops, + struct max9x_csi_link_ops **csi_ops, + struct max9x_line_fault_ops **lf_ops, + struct max9x_translation_ops **trans_ops); +extern int max9295_get_ops(struct max9x_common_ops **common_ops, + struct max9x_serial_link_ops **serial_ops, + struct max9x_csi_link_ops **csi_ops, + struct max9x_line_fault_ops **lf_ops, + struct max9x_translation_ops **trans_ops); +/* + * Both DES and SER have their own i2c_mux_core: + * 1. SER's muxc does not provide select/deselect and has at most one link + * (which itself has all children of the SER/DES link) + * 2. DER's muxc has driver specific select/deselect, one or more links, + * each of which must have exactly one SER + * 3. SER's probe() performs re-numbering and address translation (if needed: + * i2c-mux means this is no longer strictly required) + * + * des -> i2c-mux -> link@0 -> ser -> sensor, eeprom, etc + * link@n -> ser -> sensor, eeprom, etc + * + * +-----+ + * | des | + * +--+--+ + * | + * | +--------+ + * +--+ link@0 | + * | +------+-+ + * | | +-----+ + * | +----+ ser | + * | +--+--+ + * | | + * | | +--------+ + * | +--+ link@0 | + * | +-----+--+ + * | | + * | | +--------+ + * | +----+ sensor | + * | | +--------+ + * | | + * | | +--------+ + * | +----+ eeprom | + * | +--------+ + * | + * | +--------+ + * +--+ link@n | + * +------+-+ + * | +-----+ + * +----+ ser | + * +--+--+ + * | + * | +--------+ + * +--+ link@0 | + * +-----+--+ + * | + * | +--------+ + * +----+ sensor | + * | +--------+ + * | + * | +--------+ + * +----+ eeprom | + * +--------+ + */ + +// for pdata parse + +#define SET_CSI_MAP(maps, i, _src_vc, _src_dt, _dst_vc, _dst_dt, _dst_csi) do { \ + (maps)[i].src_vc = _src_vc; \ + (maps)[i].src_dt = _src_dt; \ + (maps)[i].dst_vc = _dst_vc; \ + (maps)[i].dst_dt = _dst_dt; \ + (maps)[i].dst_csi = _dst_csi; \ +} while (0) + +#define SET_PHY_MAP(maps, i, _int_csi, _phy_ind, _phy_lane) do { \ + (maps)[i].int_csi = _int_csi; \ + (maps)[i].phy_ind = _phy_ind; \ + (maps)[i].phy_lane = _phy_lane; \ +} while (0) + +#endif // _SERDES_H_ From a5788c426284fd49a33fa05d70ceeecd90fbe66f Mon Sep 17 00:00:00 2001 From: florent pirou Date: Thu, 11 Sep 2025 08:26:58 -0700 Subject: [PATCH 09/14] max9x: align linux 6.17 gpio_chip ops Signed-off-by: florent pirou --- drivers/media/i2c/max9x/max9295.c | 15 +++++++++++++++ drivers/media/i2c/max9x/max96717.c | 15 +++++++++++++++ 2 files changed, 30 insertions(+) diff --git a/drivers/media/i2c/max9x/max9295.c b/drivers/media/i2c/max9x/max9295.c index a7ccbdf..4a75d79 100644 --- a/drivers/media/i2c/max9x/max9295.c +++ b/drivers/media/i2c/max9x/max9295.c @@ -19,6 +19,7 @@ // SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2025 Intel Corporation. +#include #include #include #include @@ -74,7 +75,11 @@ static int max9295_gpio_get_direction(struct gpio_chip *chip, unsigned int offse static int max9295_gpio_direction_input(struct gpio_chip *chip, unsigned int offset); static int max9295_gpio_direction_output(struct gpio_chip *chip, unsigned int offset, int value); static int max9295_gpio_get(struct gpio_chip *chip, unsigned int offset); +#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 17, 0) static void max9295_gpio_set(struct gpio_chip *chip, unsigned int offset, int value); +#else +static int max9295_gpio_set(struct gpio_chip *chip, unsigned int offset, int value); +#endif static int max9295_setup_gpio(struct max9x_common *common); /* max9295 gpio */ @@ -160,14 +165,24 @@ static int max9295_gpio_get(struct gpio_chip *chip, unsigned int offset) return FIELD_GET(MAX9295_GPIO_A_IN_FIELD, val); } +#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 17, 0) static void max9295_gpio_set(struct gpio_chip *chip, unsigned int offset, int value) +#else +static int max9295_gpio_set(struct gpio_chip *chip, unsigned int offset, int value) +#endif { struct max9x_common *common = from_gpio_chip(chip); struct regmap *map = common->map; +#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 17, 0) regmap_update_bits(map, MAX9295_GPIO_A(offset), MAX9295_GPIO_A_OUT_FIELD, MAX9X_FIELD_PREP(MAX9295_GPIO_A_OUT_FIELD, (value == 0 ? 0U : 1U))); +#else + return regmap_update_bits(map, MAX9295_GPIO_A(offset), + MAX9295_GPIO_A_OUT_FIELD, + MAX9X_FIELD_PREP(MAX9295_GPIO_A_OUT_FIELD, (value == 0 ? 0U : 1U))); +#endif } static int max9295_setup_gpio(struct max9x_common *common) diff --git a/drivers/media/i2c/max9x/max96717.c b/drivers/media/i2c/max9x/max96717.c index 178ad1c..4c586b3 100644 --- a/drivers/media/i2c/max9x/max96717.c +++ b/drivers/media/i2c/max9x/max96717.c @@ -19,6 +19,7 @@ // SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2025 Intel Corporation. +#include #include #include #include @@ -53,7 +54,11 @@ static int max96717_gpio_get_direction(struct gpio_chip *chip, unsigned int offs static int max96717_gpio_direction_input(struct gpio_chip *chip, unsigned int offset); static int max96717_gpio_direction_output(struct gpio_chip *chip, unsigned int offset, int value); static int max96717_gpio_get(struct gpio_chip *chip, unsigned int offset); +#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 17, 0) static void max96717_gpio_set(struct gpio_chip *chip, unsigned int offset, int value); +#else +static int max96717_gpio_set(struct gpio_chip *chip, unsigned int offset, int value); +#endif static int max96717_setup_gpio(struct max9x_common *common); static struct max9x_common_ops max96717_common_ops = { @@ -133,14 +138,24 @@ static int max96717_gpio_get(struct gpio_chip *chip, unsigned int offset) return FIELD_GET(MAX96717_GPIO_A_IN_FIELD, val); } +#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 17, 0) static void max96717_gpio_set(struct gpio_chip *chip, unsigned int offset, int value) +#else +static int max96717_gpio_set(struct gpio_chip *chip, unsigned int offset, int value) +#endif { struct max9x_common *common = from_gpio_chip(chip); struct regmap *map = common->map; +#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 17, 0) regmap_update_bits(map, MAX96717_GPIO_A(offset), MAX96717_GPIO_A_OUT_FIELD, MAX9X_FIELD_PREP(MAX96717_GPIO_A_OUT_FIELD, (value == 0 ? 0U : 1U))); +#else + return regmap_update_bits(map, MAX96717_GPIO_A(offset), + MAX96717_GPIO_A_OUT_FIELD, + MAX9X_FIELD_PREP(MAX96717_GPIO_A_OUT_FIELD, (value == 0 ? 0U : 1U))); +#endif } static int max96717_setup_gpio(struct max9x_common *common) From cba98015c5f8fc6589c3e4ddfd7a26aab0d09425 Mon Sep 17 00:00:00 2001 From: florent pirou Date: Thu, 11 Sep 2025 08:28:50 -0700 Subject: [PATCH 10/14] media, i2c : build max9x dkms modules Signed-off-by: florent pirou --- Makefile | 2 ++ dkms.conf | 8 ++++++-- drivers/media/i2c/Makefile | 8 ++++++-- drivers/media/i2c/max9x/Makefile | 7 +++++-- drivers/media/i2c/max9x/serdes.c | 3 ++- 5 files changed, 21 insertions(+), 7 deletions(-) diff --git a/Makefile b/Makefile index e65edb1..cee8e2e 100644 --- a/Makefile +++ b/Makefile @@ -12,6 +12,7 @@ subdir-ccflags-y += -DDRIVER_VERSION_SUFFIX=\"${DRIVER_VERSION_SUFFIX}\" # Define config macros for conditional compilation in ipu6-acpi.c # IS_ENABLED() checks for CONFIG_XXX or CONFIG_XXX_MODULE +subdir-ccflags-y += -DCONFIG_VIDEO_MAX9X_MODULE=1 subdir-ccflags-y += -DCONFIG_VIDEO_ISX031_MODULE=1 subdir-ccflags-y += -DCONFIG_VIDEO_AR0820_MODULE=1 subdir-ccflags-y += -DCONFIG_VIDEO_AR0234_MODULE=1 @@ -22,6 +23,7 @@ export CONFIG_IPU_BRIDGE=m export CONFIG_VIDEO_AR0820=m export CONFIG_VIDEO_AR0234=m export CONFIG_VIDEO_ISX031=m +export CONFIG_VIDEO_MAX9X=m obj-m += drivers/media/pci/intel/ obj-m += drivers/media/i2c/ diff --git a/dkms.conf b/dkms.conf index 31a1864..208b146 100644 --- a/dkms.conf +++ b/dkms.conf @@ -1,6 +1,5 @@ PACKAGE_NAME="intel-mipi-gmsl-dkms" -PACKAGE_VERSION="20260112-0eci1" -#PACKAGE_VERSION="#MODULE_VERSION#" +PACKAGE_VERSION="#MODULE_VERSION#" BUILD_EXCLUSIVE_KERNEL="^(6\.(1[278])\.)" VERSION_SUFFIX="${PACKAGE_VERSION}" @@ -249,3 +248,8 @@ BUILT_MODULE_NAME[$((++i))]="isx031" BUILT_MODULE_LOCATION[$i]="drivers/media/i2c" DEST_MODULE_LOCATION[$i]="/updates" STRIP[$i]=no + +BUILT_MODULE_NAME[$((++i))]="max9x" +BUILT_MODULE_LOCATION[$i]="drivers/media/i2c/max9x" +DEST_MODULE_LOCATION[$i]="/updates" +STRIP[$i]=no diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile index ba17411..bb6ee2a 100644 --- a/drivers/media/i2c/Makefile +++ b/drivers/media/i2c/Makefile @@ -3,10 +3,14 @@ KERNEL_VERSION := $(shell echo $(KERNELRELEASE) | sed 's/[^0-9.]*\([0-9]\.[0-9]*\)\..*/\1.0/') -CC := ${CC} -I ${M}/$(KERNEL_VERSION)/include-overrides +ccflags-y := -DDRIVER_VERSION_SUFFIX=\"${DRIVER_VERSION_SUFFIX}\" + +ccflags-y += -I$(src)/../../../ipu7-drivers/drivers/media/pci/intel/ipu7 +ccflags-y += -I$(src)/../../../$(KERNEL_VERSION)/include-overrides obj-$(CONFIG_VIDEO_AR0234) += ar0234.o obj-$(CONFIG_VIDEO_AR0820) += ar0820.o obj-$(CONFIG_VIDEO_ISX031) += isx031.o -ccflags-y := -DDRIVER_VERSION_SUFFIX=\"${DRIVER_VERSION_SUFFIX}\" +subdir-ccflags-y := -DDRIVER_VERSION_SUFFIX=\"${DRIVER_VERSION_SUFFIX}\" +obj-$(CONFIG_VIDEO_MAX9X) += max9x/ diff --git a/drivers/media/i2c/max9x/Makefile b/drivers/media/i2c/max9x/Makefile index fa2c006..fad9b2d 100644 --- a/drivers/media/i2c/max9x/Makefile +++ b/drivers/media/i2c/max9x/Makefile @@ -1,4 +1,7 @@ -ccflags-y += -I$(src)/../../pci/intel/ipu7/ +KERNEL_VERSION := $(shell echo $(KERNELRELEASE) | sed 's/[^0-9.]*\([0-9]\.[0-9]*\)\..*/\1.0/') + +ccflags-y += -I$(src)/../../../../ipu7-drivers/drivers/media/pci/intel/ipu7 +ccflags-y += -I$(src)/../../../../$(KERNEL_VERSION)/include-overrides obj-m += max9x.o -max9x-y += serdes.o max9296.o max96724.o max9295.o max96717.o \ No newline at end of file +max9x-y += serdes.o max9296.o max96724.o max9295.o max96717.o diff --git a/drivers/media/i2c/max9x/serdes.c b/drivers/media/i2c/max9x/serdes.c index 38d844c..7f17526 100644 --- a/drivers/media/i2c/max9x/serdes.c +++ b/drivers/media/i2c/max9x/serdes.c @@ -292,7 +292,7 @@ static void parse_sensor_pdata(struct device *dev, const char *sensor_name, char snprintf(dev_name, I2C_NAME_SIZE, "%s %s", sensor_name, suffix); ser_pdata->subdevs[0].board_info.dev_name = dev_name; - struct isx031_platform_data *sen_pdata = devm_kzalloc(dev, sizeof(*sen_pdata), GFP_KERNEL); + struct sensor_platform_data *sen_pdata = devm_kzalloc(dev, sizeof(*sen_pdata), GFP_KERNEL); if (!sen_pdata) return; @@ -2698,3 +2698,4 @@ MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Josh Watts "); MODULE_AUTHOR("Yan, Dongcheng "); MODULE_DESCRIPTION("Common logic for Maxim GMSL serializers & deserializers"); +MODULE_VERSION(DRIVER_VERSION_SUFFIX); From 0802e40ad39a0efc3a1daec54b830b09aaf56118 Mon Sep 17 00:00:00 2001 From: Florent Pirou Date: Sun, 18 Jan 2026 16:43:52 -0700 Subject: [PATCH 11/14] ar0234 : fix v4l2-cci linux 6.12 build Signed-off-by: Florent Pirou --- Makefile | 7 ++- dkms.conf | 16 +++---- ...a-v4l2-core-set-v4l2-cci-i2c-support.patch | 45 +++++++++++++++++++ ...odules-suffix-versioning-same-as-dkm.patch | 21 +++++---- 4 files changed, 70 insertions(+), 19 deletions(-) create mode 100644 patches/0027-media-v4l2-core-set-v4l2-cci-i2c-support.patch diff --git a/Makefile b/Makefile index cee8e2e..c795d53 100644 --- a/Makefile +++ b/Makefile @@ -3,7 +3,7 @@ KERNELRELEASE ?= $(shell uname -r) KERNEL_SRC ?= /lib/modules/$(KERNELRELEASE)/build -KERNEL_VERSION := $(shell echo $(KERNELRELEASE) | sed 's/[^0-9.]*\([0-9.]*\).*/\1/') +KERNEL_VERSION := $(shell echo $(KERNELRELEASE) | sed 's/[^0-9.]*\([0-9]\.[0-9]*\)\..*/\1.0/') BUILD_EXCLUSIVE_KERNEL="^(6\.(1[278])\.)" MODSRC := $(shell pwd) @@ -25,6 +25,9 @@ export CONFIG_VIDEO_AR0234=m export CONFIG_VIDEO_ISX031=m export CONFIG_VIDEO_MAX9X=m +# Path to v4l2-core module symbols +KBUILD_EXTRA_SYMBOLS = $(M)/$(KERNEL_VERSION)/drivers/media/v4l2-core/Module.symvers + obj-m += drivers/media/pci/intel/ obj-m += drivers/media/i2c/ @@ -33,6 +36,8 @@ obj-y += drivers/media/platform/intel/ subdir-ccflags-$(CONFIG_INTEL_IPU_ACPI) += \ -DCONFIG_INTEL_IPU_ACPI +subdir-ccflags-$(CONFIG_VIDEO_AR0234) += \ + -DCONFIG_V4L2_CCI_I2C subdir-ccflags-y += $(subdir-ccflags-m) diff --git a/dkms.conf b/dkms.conf index 208b146..99e96b8 100644 --- a/dkms.conf +++ b/dkms.conf @@ -16,10 +16,10 @@ SUBLEVEL=$(echo $kernelver | sed -ne 's/\([0-9]\+\)\.\([0-9]\+\)\.\([0-9]\+\)\-\ srctree=${dkms_tree}/${PACKAGE_NAME}/${PACKAGE_VERSION}/build #We will invoke multiple make directives to build all the required kernel modules, then manually copy them to the staged directory for dkms install rule -MAKE="make V=1 KERNELRELEASE=$kernelver DRIVER_VERSION_SUFFIX='${VERSION_SUFFIX}' KERNEL_SRC=$kernel_source_dir && \ - if [ -d ${KBASE}/drivers/media/v4l2-core ]; then rmdir ${KBASE}/drivers/media/v4l2-core ; fi &&\ +MAKE=" if [ -d ${KBASE}/drivers/media/v4l2-core ]; then rmdir ${KBASE}/drivers/media/v4l2-core ; fi &&\ make V=1 -C ${kernel_source_dir} M=${KBASE}/${SELECTED_KERNEL}/drivers/media/v4l2-core videodev.ko DRIVER_VERSION_SUFFIX='${VERSION_SUFFIX}' &&\ mkdir -p ${KBASE}/drivers/media/v4l2-core/ && cp ${KBASE}/${SELECTED_KERNEL}/drivers/media/v4l2-core/videodev.ko ${KBASE}/drivers/media/v4l2-core/ && \ + make V=1 KERNELRELEASE=$kernelver DRIVER_VERSION_SUFFIX='${VERSION_SUFFIX}' CFLAGS_MODULE=\"$(echo $MAKE_OPTS)\" KERNEL_SRC=$kernel_source_dir && \ if [ -d ${KBASE}/drivers/media/pci/intel/ipu6 ]; then rmdir ${KBASE}/drivers/media/pci/intel/ipu6 ; fi &&\ make V=1 -C ${kernel_source_dir} M=${KBASE}/${SELECTED_KERNEL}/drivers/media/pci/intel/ipu6 DRIVER_VERSION_SUFFIX='${VERSION_SUFFIX}' && \ mkdir -p ${KBASE}/drivers/media/pci/intel/ipu6/ && cp ${KBASE}/${SELECTED_KERNEL}/drivers/media/pci/intel/ipu6/intel-ipu6.ko ${KBASE}/${SELECTED_KERNEL}/drivers/media/pci/intel/ipu6/intel-ipu6-isys.ko ${KBASE}/${SELECTED_KERNEL}/drivers/media/pci/intel/ipu6/psys/intel-ipu6-psys.ko ${KBASE}/drivers/media/pci/intel/ipu6/ && \ @@ -35,6 +35,7 @@ CLEAN="make V=1 KERNELRELEASE=$kernelver KERNEL_SRC=$kernel_source_dir clean && make -C ${kernel_source_dir} M=${KBASE}/${SELECTED_KERNEL}/drivers/media/pci/intel/ipu6 clean" AUTOINSTALL="yes" +BUILD_EXCLUSIVE_CONFIG="CONFIG_VIDEO_V4L2_I2C" # Add non-upstreamed ISYS patches for Kernel 6.12 PATCH[0]="0001-media-intel-ipu6-Fix-dma-mask-for-non-secure-mode.patch" @@ -92,9 +93,11 @@ PATCH[24]="0025-ipu6-dkms-add-isys-makefile-adaptation-6.12.patch" PATCH[25]="0026-ipu6-dkms-align-ipu7-acpi-pdata-device-parser-on-6.1.patch" PATCH_MATCH[24]="^(6.1[2])" PATCH_MATCH[25]="^(6.1[2])" +# Make IPU V4L2-CIC out-of-tree +PATCH[26]="0027-media-v4l2-core-set-v4l2-cci-i2c-support.patch" +PATCH_MATCH[26]="^(6.1[2])" - -j=25 +j=26 # Linux non-upstreamed patches for Linux 6.17 to 6.18 PATCH[$((++j))]="0050-media-ipu6-isys-Use-v4l2_ctrl_subdev_subscribe_event.patch" @@ -229,11 +232,6 @@ BUILT_MODULE_LOCATION[$i]="drivers/media/i2c" DEST_MODULE_LOCATION[$i]="/updates" STRIP[$i]=no -BUILT_MODULE_NAME[$((++i))]="ar0234" -BUILT_MODULE_LOCATION[$i]="drivers/media/i2c" -DEST_MODULE_LOCATION[$i]="/updates" -STRIP[$i]=no - BUILT_MODULE_NAME[$((++i))]="ar0820" BUILT_MODULE_LOCATION[$i]="drivers/media/i2c" DEST_MODULE_LOCATION[$i]="/updates" diff --git a/patches/0027-media-v4l2-core-set-v4l2-cci-i2c-support.patch b/patches/0027-media-v4l2-core-set-v4l2-cci-i2c-support.patch new file mode 100644 index 0000000..ff7cc69 --- /dev/null +++ b/patches/0027-media-v4l2-core-set-v4l2-cci-i2c-support.patch @@ -0,0 +1,45 @@ +From 7f7bf005bc0108b96e9705f617fd1b391cf7ff2b Mon Sep 17 00:00:00 2001 +From: Florent Pirou +Date: Sun, 18 Jan 2026 13:46:44 -0700 +Subject: [PATCH 27/27] media: v4l2-core : set v4l2-cci i2c support + +Signed-off-by: Florent Pirou +--- + 6.12.0/drivers/media/v4l2-core/Makefile | 4 +++- + 6.12.0/drivers/media/v4l2-core/v4l2-cci.c | 1 + + 2 files changed, 4 insertions(+), 1 deletion(-) + +diff --git a/6.12.0/drivers/media/v4l2-core/Makefile b/6.12.0/drivers/media/v4l2-core/Makefile +index 82c235f..9128583 100644 +--- a/6.12.0/drivers/media/v4l2-core/Makefile ++++ b/6.12.0/drivers/media/v4l2-core/Makefile +@@ -5,6 +5,7 @@ + # ipu7-dkms appendix + CC := ${CC} -I ${M}/../../../include-overrides -I ${M}/../tuners -I ${M}/../dvb-core -I ${M}/../dvb-frontends + ccflags-y := -DDRIVER_VERSION_SUFFIX=\"${DRIVER_VERSION_SUFFIX}\" ++ccflags-y += -DCONFIG_V4L2_CCI_I2C + + ccflags-y += -I$(srctree)/drivers/media/dvb-frontends + ccflags-y += -I$(srctree)/drivers/media/tuners +@@ -14,7 +15,8 @@ tuner-objs := tuner-core.o + videodev-objs := v4l2-dev.o v4l2-ioctl.o v4l2-device.o v4l2-fh.o \ + v4l2-event.o v4l2-subdev.o v4l2-common.o \ + v4l2-ctrls-core.o v4l2-ctrls-api.o \ +- v4l2-ctrls-request.o v4l2-ctrls-defs.o ++ v4l2-ctrls-request.o v4l2-ctrls-defs.o \ ++ v4l2-cci.o + + # Please keep it alphabetically sorted by Kconfig name + # (e. g. LC_ALL=C sort Makefile) +diff --git a/6.12.0/drivers/media/v4l2-core/v4l2-cci.c b/6.12.0/drivers/media/v4l2-core/v4l2-cci.c +index e9ecf47..65e138a 100644 +--- a/6.12.0/drivers/media/v4l2-core/v4l2-cci.c ++++ b/6.12.0/drivers/media/v4l2-core/v4l2-cci.c +@@ -201,3 +201,4 @@ EXPORT_SYMBOL_GPL(devm_cci_regmap_init_i2c); + MODULE_LICENSE("GPL"); + MODULE_AUTHOR("Hans de Goede "); + MODULE_DESCRIPTION("MIPI Camera Control Interface (CCI) support"); ++MODULE_VERSION(DRIVER_VERSION_SUFFIX); +-- +2.43.0 + diff --git a/patches/0104-ipu7-isys-make-modules-suffix-versioning-same-as-dkm.patch b/patches/0104-ipu7-isys-make-modules-suffix-versioning-same-as-dkm.patch index b0eb6b2..aaaf24b 100644 --- a/patches/0104-ipu7-isys-make-modules-suffix-versioning-same-as-dkm.patch +++ b/patches/0104-ipu7-isys-make-modules-suffix-versioning-same-as-dkm.patch @@ -1,4 +1,4 @@ -From 15e1f9949d2934c4b417b25b17fb1c51ce1f3341 Mon Sep 17 00:00:00 2001 +From a2d5fa26b49e6f515cd4122a6874ca5b4f98030e Mon Sep 17 00:00:00 2001 From: Florent Pirou Date: Sat, 17 Jan 2026 02:10:39 -0700 Subject: [PATCH 104/104] ipu7-isys: make modules suffix versioning same as @@ -6,17 +6,17 @@ Subject: [PATCH 104/104] ipu7-isys: make modules suffix versioning same as Signed-off-by: Florent Pirou --- - ipu7-drivers/drivers/media/pci/intel/ipu7/Makefile | 9 +++++++++ - ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys.c | 1 + - ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7.c | 1 + - .../drivers/media/pci/intel/ipu7/psys/ipu-psys.c | 1 + - 4 files changed, 12 insertions(+) + ipu7-drivers/drivers/media/pci/intel/ipu7/Makefile | 12 ++++++++++++ + .../drivers/media/pci/intel/ipu7/ipu7-isys.c | 1 + + ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7.c | 1 + + .../drivers/media/pci/intel/ipu7/psys/ipu-psys.c | 1 + + 4 files changed, 15 insertions(+) diff --git a/ipu7-drivers/drivers/media/pci/intel/ipu7/Makefile b/ipu7-drivers/drivers/media/pci/intel/ipu7/Makefile -index 5041701..14d46ff 100644 +index 5041701..7497dc7 100644 --- a/ipu7-drivers/drivers/media/pci/intel/ipu7/Makefile +++ b/ipu7-drivers/drivers/media/pci/intel/ipu7/Makefile -@@ -1,5 +1,12 @@ +@@ -1,5 +1,15 @@ # SPDX-License-Identifier: GPL-2.0 # Copyright (c) 2017 - 2025 Intel Corporation. +KERNEL_VERSION := $(shell echo $(KERNELRELEASE) | sed 's/[^0-9.]*\([0-9.]*\).*/\1/') @@ -26,10 +26,13 @@ index 5041701..14d46ff 100644 +KBUILD_EXTRA_SYMBOLS += ${M}/../../../../../../Module.symvers + +ccflags-y += -DDRIVER_VERSION_SUFFIX=\"${DRIVER_VERSION_SUFFIX}\" ++ ++export EXTERNAL_BUILD = 1 ++export CONFIG_VIDEO_INTEL_IPU7=m is_kernel_lt_6_10 = $(shell if [ $$(printf "6.10\n$(KERNELVERSION)" | sort -V | head -n1) != "6.10" ]; then echo 1; fi) ifeq ($(is_kernel_lt_6_10), 1) -@@ -32,6 +39,8 @@ intel-ipu7-isys-objs += ipu7-isys-tpg.o +@@ -32,6 +42,8 @@ intel-ipu7-isys-objs += ipu7-isys-tpg.o endif obj-$(CONFIG_VIDEO_INTEL_IPU7) += intel-ipu7-isys.o From 5c7d6446d6605def579faaae73f23aadaee3dbb0 Mon Sep 17 00:00:00 2001 From: Florent Pirou Date: Mon, 19 Jan 2026 03:43:47 -0700 Subject: [PATCH 12/14] dkms: switch to ipu6-drivers git-submodule, upgrade ipu7-drivers depends * ipu6-drivers - baseline PTL release for iot on 2025-12-30 * ipu7-drivers - baseline PTL release for iot on 2025-12-30 Signed-off-by: Florent Pirou --- .gitmodules | 4 + 6.12.0/drivers/media/pci/intel/ipu6/Kconfig | 18 - 6.12.0/drivers/media/pci/intel/ipu6/Makefile | 23 - .../drivers/media/pci/intel/ipu6/ipu6-bus.c | 159 - .../drivers/media/pci/intel/ipu6/ipu6-bus.h | 58 - .../media/pci/intel/ipu6/ipu6-buttress.c | 933 -- .../media/pci/intel/ipu6/ipu6-buttress.h | 92 - .../drivers/media/pci/intel/ipu6/ipu6-cpd.c | 362 - .../drivers/media/pci/intel/ipu6/ipu6-cpd.h | 105 - .../drivers/media/pci/intel/ipu6/ipu6-dma.c | 492 - .../drivers/media/pci/intel/ipu6/ipu6-dma.h | 49 - .../media/pci/intel/ipu6/ipu6-fw-com.c | 413 - .../media/pci/intel/ipu6/ipu6-fw-com.h | 47 - .../media/pci/intel/ipu6/ipu6-fw-isys.c | 487 - .../media/pci/intel/ipu6/ipu6-fw-isys.h | 596 -- .../media/pci/intel/ipu6/ipu6-isys-csi2.c | 649 -- .../media/pci/intel/ipu6/ipu6-isys-csi2.h | 80 - .../media/pci/intel/ipu6/ipu6-isys-dwc-phy.c | 536 -- .../media/pci/intel/ipu6/ipu6-isys-jsl-phy.c | 242 - .../media/pci/intel/ipu6/ipu6-isys-mcd-phy.c | 720 -- .../media/pci/intel/ipu6/ipu6-isys-queue.c | 847 -- .../media/pci/intel/ipu6/ipu6-isys-queue.h | 79 - .../media/pci/intel/ipu6/ipu6-isys-subdev.c | 403 - .../media/pci/intel/ipu6/ipu6-isys-subdev.h | 59 - .../media/pci/intel/ipu6/ipu6-isys-video.c | 1392 --- .../media/pci/intel/ipu6/ipu6-isys-video.h | 141 - .../drivers/media/pci/intel/ipu6/ipu6-isys.c | 1382 --- .../drivers/media/pci/intel/ipu6/ipu6-isys.h | 206 - .../drivers/media/pci/intel/ipu6/ipu6-mmu.c | 852 -- .../drivers/media/pci/intel/ipu6/ipu6-mmu.h | 73 - .../intel/ipu6/ipu6-platform-buttress-regs.h | 226 - .../intel/ipu6/ipu6-platform-isys-csi2-reg.h | 172 - .../media/pci/intel/ipu6/ipu6-platform-regs.h | 179 - 6.12.0/drivers/media/pci/intel/ipu6/ipu6.c | 850 -- 6.12.0/drivers/media/pci/intel/ipu6/ipu6.h | 342 - 6.18.0/drivers/media/pci/intel/ipu6/Kconfig | 18 - 6.18.0/drivers/media/pci/intel/ipu6/Makefile | 23 - .../drivers/media/pci/intel/ipu6/ipu6-bus.c | 159 - .../drivers/media/pci/intel/ipu6/ipu6-bus.h | 55 - .../media/pci/intel/ipu6/ipu6-buttress.c | 910 -- .../media/pci/intel/ipu6/ipu6-buttress.h | 85 - .../drivers/media/pci/intel/ipu6/ipu6-cpd.c | 362 - .../drivers/media/pci/intel/ipu6/ipu6-cpd.h | 105 - .../drivers/media/pci/intel/ipu6/ipu6-dma.c | 459 - .../drivers/media/pci/intel/ipu6/ipu6-dma.h | 43 - .../media/pci/intel/ipu6/ipu6-fw-com.c | 413 - .../media/pci/intel/ipu6/ipu6-fw-com.h | 47 - .../media/pci/intel/ipu6/ipu6-fw-isys.c | 487 - .../media/pci/intel/ipu6/ipu6-fw-isys.h | 596 -- .../media/pci/intel/ipu6/ipu6-isys-csi2.c | 643 -- .../media/pci/intel/ipu6/ipu6-isys-csi2.h | 78 - .../media/pci/intel/ipu6/ipu6-isys-dwc-phy.c | 536 -- .../media/pci/intel/ipu6/ipu6-isys-jsl-phy.c | 242 - .../media/pci/intel/ipu6/ipu6-isys-mcd-phy.c | 720 -- .../media/pci/intel/ipu6/ipu6-isys-queue.c | 850 -- .../media/pci/intel/ipu6/ipu6-isys-queue.h | 71 - .../media/pci/intel/ipu6/ipu6-isys-subdev.c | 403 - .../media/pci/intel/ipu6/ipu6-isys-subdev.h | 55 - .../media/pci/intel/ipu6/ipu6-isys-video.c | 1391 --- .../media/pci/intel/ipu6/ipu6-isys-video.h | 135 - .../drivers/media/pci/intel/ipu6/ipu6-isys.c | 1380 --- .../drivers/media/pci/intel/ipu6/ipu6-isys.h | 202 - .../drivers/media/pci/intel/ipu6/ipu6-mmu.c | 806 -- .../drivers/media/pci/intel/ipu6/ipu6-mmu.h | 73 - .../intel/ipu6/ipu6-platform-buttress-regs.h | 224 - .../intel/ipu6/ipu6-platform-isys-csi2-reg.h | 172 - .../media/pci/intel/ipu6/ipu6-platform-regs.h | 179 - 6.18.0/drivers/media/pci/intel/ipu6/ipu6.c | 846 -- 6.18.0/drivers/media/pci/intel/ipu6/ipu6.h | 342 - dkms.conf | 164 +- ipu6-drivers | 1 + ipu7-drivers | 2 +- ...-v4l2-core-makefile-adaptation-6.12.patch} | 4 +- ...s-v4l2-core-makefile-adaptation-6.17.patch | 37 - ...pu6-Fix-dma-mask-for-non-secure-mode.patch | 49 - ...t-v4l2_subdev_enable_streams_api-tru.patch | 18 +- ...emove-workaround-for-Meteor-Lake-ES2.patch | 45 - ...ms-add-isys-makefile-adaptation-6.17.patch | 48 - ...l-ipu6-remove-buttress-ish-structure.patch | 125 - ...-v4l2-core-set-v4l2-cci-i2c-support.patch} | 4 +- ...-v4l2-core-makefile-adaptation-6.17.patch} | 4 +- ...se-module-parameter-to-set-isys-freq.patch | 45 - ...-v4l2_subdev_enable_streams_api-tru.patch} | 4 +- .../0005-media-pci-Enable-ISYS-reset.patch | 627 -- ...-v4l2-core-makefile-adaptation-6.18.patch} | 4 +- ...l-ipu6-remove-buttress-ish-structure.patch | 38 - ...-v4l2_subdev_enable_streams_api-tru.patch} | 4 +- ...tel-ipu6-use-vc1-dma-for-MTL-and-ARL.patch | 42 - ...-subdev-Make-struct-v4l2_subdev_str.patch} | 4 +- ...nc-at-buffer_prepare-callback-as-DMA.patch | 42 - ...-support-IPU6-ISYS-FW-trace-dump-for.patch | 1432 --- ...der-of-return-buffers-should-be-FIFO.patch | 100 - ...-Modify-enble-disable-stream-in-CSI2.patch | 338 - ...the-correct-SOF-for-different-stream.patch | 24 - ...l-ipu6-support-imx390-for-6.11.0-rc3.patch | 981 -- ...-media-intel-ipu6-fix-coverity-issue.patch | 31 - ...-move-ipu-acpi-module-to-linux-drive.patch | 253 - ...ster-i2c-device-to-complete-ext_subd.patch | 58 - ...7-media-pci-add-missing-if-for-PDATA.patch | 116 - ...edia-pci-refine-PDATA-related-config.patch | 526 -- ...-align-ACPI-PDATA-and-ACPI-fwnode-bu.patch | 385 - ...se-module-parameter-to-set-psys-freq.patch | 43 - ...21-media-pci-update-IPU6-PSYS-driver.patch | 8335 ----------------- ...-support-IPU6-PSYS-FW-trace-dump-for.patch | 134 - ...t-v4l2_subdev_enable_streams_api-tru.patch | 28 - ...ms-add-isys-makefile-adaptation-6.12.patch | 65 - ...ipu7-acpi-pdata-device-parser-on-6.1.patch | 80 - ...ms-add-isys-makefile-adaptation-6.12.patch | 76 + ...Use-v4l2_ctrl_subdev_subscribe_event.patch | 34 - ...-lt6911uxc-2-pads-linked-to-ipu-2-po.patch | 74 + ...Don-t-set-V4L2_FL_USES_V4L2_FH-manua.patch | 32 - ...Set-embedded-data-type-correctly-for.patch | 35 - ...t-symbol-namespace-to-string-literal.patch | 389 + ...se-module-parameter-to-set-isys-freq.patch | 45 - .../0054-media-pci-Enable-ISYS-reset.patch | 626 -- ...tel-ipu6-use-vc1-dma-for-MTL-and-ARL.patch | 41 - ...nc-at-buffer_prepare-callback-as-DMA.patch | 42 - ...-Support-IPU6-ISYS-FW-trace-dump-for.patch | 1432 --- ...der-of-return-buffers-should-be-FIFO.patch | 100 - ...-Modify-enble-disable-stream-in-CSI2.patch | 338 - ...the-correct-SOF-for-different-stream.patch | 29 - ...ia-pci-support-imx390-for-6.11.0-rc3.patch | 981 -- patches/0062-i2c-media-fix-cov-issue.patch | 31 - ...add-ipu-acpi-module-to-linux-drivers.patch | 252 - ...ster-i2c-device-to-complete-ext_subd.patch | 56 - ...5-media-pci-add-missing-if-for-PDATA.patch | 114 - ...edia-pci-refine-PDATA-related-config.patch | 527 -- ...I-PDATA-and-ACPI-fwnode-build-for-EC.patch | 382 - ...se-module-parameter-to-set-psys-freq.patch | 43 - .../0069-media-pci-add-IPU6-PSYS-driver.patch | 8335 ----------------- ...-support-IPU6-PSYS-FW-trace-dump-for.patch | 134 - ...el-ipu6-Constify-ipu6_buttress_ctrl-.patch | 84 - ...ms-add-isys-makefile-adaptation-6.17.patch | 48 - ...ipu7-acpi-pdata-device-parser-on-6.1.patch | 89 - ...e-the-PLATFORM-ACPI-driver-and-IPU-d.patch | 649 -- ...dules-suffix-versioning-same-as-dkm.patch} | 20 +- ...virtual-channels-on-16-csi2-input-vi.patch | 2 +- ...7-isys-add-d4xx-pixel-format-support.patch | 12 +- ...deo-VIDIOC_S_EXT_CTRLS-callbacks-by-.patch | 12 +- ...-dummy-patch-to-void-last-match-miss.patch | 39 + 140 files changed, 666 insertions(+), 55055 deletions(-) delete mode 100644 6.12.0/drivers/media/pci/intel/ipu6/Kconfig delete mode 100644 6.12.0/drivers/media/pci/intel/ipu6/Makefile delete mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-bus.c delete mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-bus.h delete mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c delete mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h delete mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-cpd.c delete mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-cpd.h delete mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-dma.c delete mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-dma.h delete mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.c delete mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.h delete mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.c delete mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h delete mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c delete mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h delete mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-dwc-phy.c delete mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-jsl-phy.c delete mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c delete mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c delete mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h delete mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c delete mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.h delete mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c delete mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.h delete mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c delete mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h delete mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-mmu.c delete mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-mmu.h delete mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-platform-buttress-regs.h delete mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-platform-isys-csi2-reg.h delete mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-platform-regs.h delete mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6.c delete mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6.h delete mode 100644 6.18.0/drivers/media/pci/intel/ipu6/Kconfig delete mode 100644 6.18.0/drivers/media/pci/intel/ipu6/Makefile delete mode 100644 6.18.0/drivers/media/pci/intel/ipu6/ipu6-bus.c delete mode 100644 6.18.0/drivers/media/pci/intel/ipu6/ipu6-bus.h delete mode 100644 6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c delete mode 100644 6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h delete mode 100644 6.18.0/drivers/media/pci/intel/ipu6/ipu6-cpd.c delete mode 100644 6.18.0/drivers/media/pci/intel/ipu6/ipu6-cpd.h delete mode 100644 6.18.0/drivers/media/pci/intel/ipu6/ipu6-dma.c delete mode 100644 6.18.0/drivers/media/pci/intel/ipu6/ipu6-dma.h delete mode 100644 6.18.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.c delete mode 100644 6.18.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.h delete mode 100644 6.18.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.c delete mode 100644 6.18.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h delete mode 100644 6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c delete mode 100644 6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h delete mode 100644 6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-dwc-phy.c delete mode 100644 6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-jsl-phy.c delete mode 100644 6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c delete mode 100644 6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c delete mode 100644 6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h delete mode 100644 6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c delete mode 100644 6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.h delete mode 100644 6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c delete mode 100644 6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.h delete mode 100644 6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c delete mode 100644 6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h delete mode 100644 6.18.0/drivers/media/pci/intel/ipu6/ipu6-mmu.c delete mode 100644 6.18.0/drivers/media/pci/intel/ipu6/ipu6-mmu.h delete mode 100644 6.18.0/drivers/media/pci/intel/ipu6/ipu6-platform-buttress-regs.h delete mode 100644 6.18.0/drivers/media/pci/intel/ipu6/ipu6-platform-isys-csi2-reg.h delete mode 100644 6.18.0/drivers/media/pci/intel/ipu6/ipu6-platform-regs.h delete mode 100644 6.18.0/drivers/media/pci/intel/ipu6/ipu6.c delete mode 100644 6.18.0/drivers/media/pci/intel/ipu6/ipu6.h create mode 160000 ipu6-drivers rename patches/{0023-ipu7-dkms-v4l2-core-makefile-adaptation-6.12.patch => 0001-ipu6-dkms-v4l2-core-makefile-adaptation-6.12.patch} (91%) delete mode 100644 patches/0001-ipu7-dkms-v4l2-core-makefile-adaptation-6.17.patch delete mode 100644 patches/0001-media-intel-ipu6-Fix-dma-mask-for-non-secure-mode.patch delete mode 100644 patches/0002-media-ipu6-Remove-workaround-for-Meteor-Lake-ES2.patch delete mode 100644 patches/0003-ipu6-dkms-add-isys-makefile-adaptation-6.17.patch delete mode 100644 patches/0003-media-intel-ipu6-remove-buttress-ish-structure.patch rename patches/{0027-media-v4l2-core-set-v4l2-cci-i2c-support.patch => 0003-media-v4l2-core-set-v4l2-cci-i2c-support.patch} (93%) rename patches/{0072-ipu7-dkms-v4l2-core-makefile-adaptation-6.17.patch => 0004-ipu7-dkms-v4l2-core-makefile-adaptation-6.17.patch} (91%) delete mode 100644 patches/0004-upstream-Use-module-parameter-to-set-isys-freq.patch rename patches/{0073-drivers-media-set-v4l2_subdev_enable_streams_api-tru.patch => 0005-drivers-media-set-v4l2_subdev_enable_streams_api-tru.patch} (83%) delete mode 100644 patches/0005-media-pci-Enable-ISYS-reset.patch rename patches/{0074-ipu7-dkms-v4l2-core-makefile-adaptation-6.18.patch => 0006-ipu7-dkms-v4l2-core-makefile-adaptation-6.18.patch} (85%) delete mode 100644 patches/0006-media-intel-ipu6-remove-buttress-ish-structure.patch rename patches/{0075-drivers-media-set-v4l2_subdev_enable_streams_api-tru.patch => 0007-drivers-media-set-v4l2_subdev_enable_streams_api-tru.patch} (83%) delete mode 100644 patches/0007-media-intel-ipu6-use-vc1-dma-for-MTL-and-ARL.patch rename patches/{0076-Revert-media-v4l2-subdev-Make-struct-v4l2_subdev_str.patch => 0008-Revert-media-v4l2-subdev-Make-struct-v4l2_subdev_str.patch} (95%) delete mode 100644 patches/0008-media-ipu-Dma-sync-at-buffer_prepare-callback-as-DMA.patch delete mode 100644 patches/0009-media-intel-ipu6-support-IPU6-ISYS-FW-trace-dump-for.patch delete mode 100644 patches/0010-media-pci-The-order-of-return-buffers-should-be-FIFO.patch delete mode 100644 patches/0011-media-pci-Modify-enble-disable-stream-in-CSI2.patch delete mode 100644 patches/0012-media-pci-Set-the-correct-SOF-for-different-stream.patch delete mode 100644 patches/0013-media-intel-ipu6-support-imx390-for-6.11.0-rc3.patch delete mode 100644 patches/0014-media-intel-ipu6-fix-coverity-issue.patch delete mode 100644 patches/0015-media-intel-ipu6-move-ipu-acpi-module-to-linux-drive.patch delete mode 100644 patches/0016-media-pci-unregister-i2c-device-to-complete-ext_subd.patch delete mode 100644 patches/0017-media-pci-add-missing-if-for-PDATA.patch delete mode 100644 patches/0018-media-pci-refine-PDATA-related-config.patch delete mode 100644 patches/0019-media-intel-ipu6-align-ACPI-PDATA-and-ACPI-fwnode-bu.patch delete mode 100644 patches/0020-upstream-Use-module-parameter-to-set-psys-freq.patch delete mode 100644 patches/0021-media-pci-update-IPU6-PSYS-driver.patch delete mode 100644 patches/0022-media-intel-ipu6-support-IPU6-PSYS-FW-trace-dump-for.patch delete mode 100644 patches/0024-drivers-media-set-v4l2_subdev_enable_streams_api-tru.patch delete mode 100644 patches/0025-ipu6-dkms-add-isys-makefile-adaptation-6.12.patch delete mode 100644 patches/0026-ipu6-dkms-align-ipu7-acpi-pdata-device-parser-on-6.1.patch create mode 100644 patches/0050-ipu6-dkms-add-isys-makefile-adaptation-6.12.patch delete mode 100644 patches/0050-media-ipu6-isys-Use-v4l2_ctrl_subdev_subscribe_event.patch create mode 100644 patches/0051-Revert-ipu6-isys-lt6911uxc-2-pads-linked-to-ipu-2-po.patch delete mode 100644 patches/0051-media-ipu6-isys-Don-t-set-V4L2_FL_USES_V4L2_FH-manua.patch delete mode 100644 patches/0052-media-ipu6-isys-Set-embedded-data-type-correctly-for.patch create mode 100644 patches/0052-module-Convert-symbol-namespace-to-string-literal.patch delete mode 100644 patches/0053-upstream-Use-module-parameter-to-set-isys-freq.patch delete mode 100644 patches/0054-media-pci-Enable-ISYS-reset.patch delete mode 100644 patches/0055-media-intel-ipu6-use-vc1-dma-for-MTL-and-ARL.patch delete mode 100644 patches/0056-media-ipu-Dma-sync-at-buffer_prepare-callback-as-DMA.patch delete mode 100644 patches/0057-media-intel-ipu6-Support-IPU6-ISYS-FW-trace-dump-for.patch delete mode 100644 patches/0058-media-pci-The-order-of-return-buffers-should-be-FIFO.patch delete mode 100644 patches/0059-media-pci-Modify-enble-disable-stream-in-CSI2.patch delete mode 100644 patches/0060-media-pci-Set-the-correct-SOF-for-different-stream.patch delete mode 100644 patches/0061-media-pci-support-imx390-for-6.11.0-rc3.patch delete mode 100644 patches/0062-i2c-media-fix-cov-issue.patch delete mode 100644 patches/0063-media-add-ipu-acpi-module-to-linux-drivers.patch delete mode 100644 patches/0064-media-pci-unregister-i2c-device-to-complete-ext_subd.patch delete mode 100644 patches/0065-media-pci-add-missing-if-for-PDATA.patch delete mode 100644 patches/0066-media-pci-refine-PDATA-related-config.patch delete mode 100644 patches/0067-kernel-align-ACPI-PDATA-and-ACPI-fwnode-build-for-EC.patch delete mode 100644 patches/0068-upstream-Use-module-parameter-to-set-psys-freq.patch delete mode 100644 patches/0069-media-pci-add-IPU6-PSYS-driver.patch delete mode 100644 patches/0070-media-intel-ipu6-support-IPU6-PSYS-FW-trace-dump-for.patch delete mode 100644 patches/0071-Revert-media-intel-ipu6-Constify-ipu6_buttress_ctrl-.patch delete mode 100644 patches/0077-ipu6-dkms-add-isys-makefile-adaptation-6.17.patch delete mode 100644 patches/0078-ipu6-dkms-align-ipu7-acpi-pdata-device-parser-on-6.1.patch delete mode 100644 patches/0100-ipu-acpi-decouple-the-PLATFORM-ACPI-driver-and-IPU-d.patch rename patches/{0104-ipu7-isys-make-modules-suffix-versioning-same-as-dkm.patch => 0100-ipu7-isys-make-modules-suffix-versioning-same-as-dkm.patch} (86%) create mode 100644 patches/0104-ipu7-dkms-dummy-patch-to-void-last-match-miss.patch diff --git a/.gitmodules b/.gitmodules index 0092983..aa9dba2 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +1,7 @@ [submodule "ipu7-drivers"] path = ipu7-drivers url = https://github.com/intel/ipu7-drivers +[submodule "ipu6-drivers"] + path = ipu6-drivers + url = https://github.com/intel/ipu6-drivers.git + branch = iotg_ipu6 diff --git a/6.12.0/drivers/media/pci/intel/ipu6/Kconfig b/6.12.0/drivers/media/pci/intel/ipu6/Kconfig deleted file mode 100644 index cd1c545..0000000 --- a/6.12.0/drivers/media/pci/intel/ipu6/Kconfig +++ /dev/null @@ -1,18 +0,0 @@ -config VIDEO_INTEL_IPU6 - tristate "Intel IPU6 driver" - depends on ACPI || COMPILE_TEST - depends on VIDEO_DEV - depends on X86 && X86_64 && HAS_DMA - depends on IPU_BRIDGE || !IPU_BRIDGE - select AUXILIARY_BUS - select IOMMU_IOVA - select VIDEO_V4L2_SUBDEV_API - select MEDIA_CONTROLLER - select VIDEOBUF2_DMA_SG - select V4L2_FWNODE - help - This is the 6th Gen Intel Image Processing Unit, found in Intel SoCs - and used for capturing images and video from camera sensors. - - To compile this driver, say Y here! It contains 2 modules - - intel_ipu6 and intel_ipu6_isys. diff --git a/6.12.0/drivers/media/pci/intel/ipu6/Makefile b/6.12.0/drivers/media/pci/intel/ipu6/Makefile deleted file mode 100644 index a821b0a..0000000 --- a/6.12.0/drivers/media/pci/intel/ipu6/Makefile +++ /dev/null @@ -1,23 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only - -intel-ipu6-y := ipu6.o \ - ipu6-bus.o \ - ipu6-dma.o \ - ipu6-mmu.o \ - ipu6-buttress.o \ - ipu6-cpd.o \ - ipu6-fw-com.o - -obj-$(CONFIG_VIDEO_INTEL_IPU6) += intel-ipu6.o - -intel-ipu6-isys-y := ipu6-isys.o \ - ipu6-isys-csi2.o \ - ipu6-fw-isys.o \ - ipu6-isys-video.o \ - ipu6-isys-queue.o \ - ipu6-isys-subdev.o \ - ipu6-isys-mcd-phy.o \ - ipu6-isys-jsl-phy.o \ - ipu6-isys-dwc-phy.o - -obj-$(CONFIG_VIDEO_INTEL_IPU6) += intel-ipu6-isys.o diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-bus.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-bus.c deleted file mode 100644 index 37d88dd..0000000 --- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-bus.c +++ /dev/null @@ -1,159 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2013 - 2024 Intel Corporation - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "ipu6.h" -#include "ipu6-bus.h" -#include "ipu6-buttress.h" -#include "ipu6-dma.h" - -static int bus_pm_runtime_suspend(struct device *dev) -{ - struct ipu6_bus_device *adev = to_ipu6_bus_device(dev); - int ret; - - ret = pm_generic_runtime_suspend(dev); - if (ret) - return ret; - - ret = ipu6_buttress_power(dev, adev->ctrl, false); - if (!ret) - return 0; - - dev_err(dev, "power down failed!\n"); - - /* Powering down failed, attempt to resume device now */ - ret = pm_generic_runtime_resume(dev); - if (!ret) - return -EBUSY; - - return -EIO; -} - -static int bus_pm_runtime_resume(struct device *dev) -{ - struct ipu6_bus_device *adev = to_ipu6_bus_device(dev); - int ret; - - ret = ipu6_buttress_power(dev, adev->ctrl, true); - if (ret) - return ret; - - ret = pm_generic_runtime_resume(dev); - if (ret) - goto out_err; - - return 0; - -out_err: - ipu6_buttress_power(dev, adev->ctrl, false); - - return -EBUSY; -} - -static struct dev_pm_domain ipu6_bus_pm_domain = { - .ops = { - .runtime_suspend = bus_pm_runtime_suspend, - .runtime_resume = bus_pm_runtime_resume, - }, -}; - -static DEFINE_MUTEX(ipu6_bus_mutex); - -static void ipu6_bus_release(struct device *dev) -{ - struct ipu6_bus_device *adev = to_ipu6_bus_device(dev); - - kfree(adev->pdata); - kfree(adev); -} - -struct ipu6_bus_device * -ipu6_bus_initialize_device(struct pci_dev *pdev, struct device *parent, - void *pdata, struct ipu6_buttress_ctrl *ctrl, - char *name) -{ - struct auxiliary_device *auxdev; - struct ipu6_bus_device *adev; - struct ipu6_device *isp = pci_get_drvdata(pdev); - int ret; - - adev = kzalloc(sizeof(*adev), GFP_KERNEL); - if (!adev) - return ERR_PTR(-ENOMEM); - - adev->isp = isp; - adev->ctrl = ctrl; - adev->pdata = pdata; - auxdev = &adev->auxdev; - auxdev->name = name; - auxdev->id = (pci_domain_nr(pdev->bus) << 16) | - PCI_DEVID(pdev->bus->number, pdev->devfn); - - auxdev->dev.parent = parent; - auxdev->dev.release = ipu6_bus_release; - - ret = auxiliary_device_init(auxdev); - if (ret < 0) { - dev_err(&isp->pdev->dev, "auxiliary device init failed (%d)\n", - ret); - kfree(adev); - return ERR_PTR(ret); - } - - dev_pm_domain_set(&auxdev->dev, &ipu6_bus_pm_domain); - - pm_runtime_forbid(&adev->auxdev.dev); - pm_runtime_enable(&adev->auxdev.dev); - - return adev; -} - -int ipu6_bus_add_device(struct ipu6_bus_device *adev) -{ - struct auxiliary_device *auxdev = &adev->auxdev; - int ret; - - ret = auxiliary_device_add(auxdev); - if (ret) { - auxiliary_device_uninit(auxdev); - return ret; - } - - mutex_lock(&ipu6_bus_mutex); - list_add(&adev->list, &adev->isp->devices); - mutex_unlock(&ipu6_bus_mutex); - - pm_runtime_allow(&auxdev->dev); - - return 0; -} - -void ipu6_bus_del_devices(struct pci_dev *pdev) -{ - struct ipu6_device *isp = pci_get_drvdata(pdev); - struct ipu6_bus_device *adev, *save; - - mutex_lock(&ipu6_bus_mutex); - - list_for_each_entry_safe(adev, save, &isp->devices, list) { - pm_runtime_disable(&adev->auxdev.dev); - list_del(&adev->list); - auxiliary_device_delete(&adev->auxdev); - auxiliary_device_uninit(&adev->auxdev); - } - - mutex_unlock(&ipu6_bus_mutex); -} diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-bus.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-bus.h deleted file mode 100644 index bb4926d..0000000 --- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-bus.h +++ /dev/null @@ -1,58 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (C) 2013 - 2024 Intel Corporation */ - -#ifndef IPU6_BUS_H -#define IPU6_BUS_H - -#include -#include -#include -#include -#include -#include -#include - -struct firmware; -struct pci_dev; - -#define IPU6_BUS_NAME IPU6_NAME "-bus" - -struct ipu6_buttress_ctrl; - -struct ipu6_bus_device { - struct auxiliary_device auxdev; - const struct auxiliary_driver *auxdrv; - const struct ipu6_auxdrv_data *auxdrv_data; - struct list_head list; - void *pdata; - struct ipu6_mmu *mmu; - struct ipu6_device *isp; - struct ipu6_buttress_ctrl *ctrl; - u64 dma_mask; - const struct firmware *fw; - struct sg_table fw_sgt; - u64 *pkg_dir; - dma_addr_t pkg_dir_dma_addr; - unsigned int pkg_dir_size; -}; - -struct ipu6_auxdrv_data { - irqreturn_t (*isr)(struct ipu6_bus_device *adev); - irqreturn_t (*isr_threaded)(struct ipu6_bus_device *adev); - bool wake_isr_thread; -}; - -#define to_ipu6_bus_device(_dev) \ - container_of(to_auxiliary_dev(_dev), struct ipu6_bus_device, auxdev) -#define auxdev_to_adev(_auxdev) \ - container_of(_auxdev, struct ipu6_bus_device, auxdev) -#define ipu6_bus_get_drvdata(adev) dev_get_drvdata(&(adev)->auxdev.dev) - -struct ipu6_bus_device * -ipu6_bus_initialize_device(struct pci_dev *pdev, struct device *parent, - void *pdata, struct ipu6_buttress_ctrl *ctrl, - char *name); -int ipu6_bus_add_device(struct ipu6_bus_device *adev); -void ipu6_bus_del_devices(struct pci_dev *pdev); - -#endif diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c deleted file mode 100644 index 1ee63ef..0000000 --- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c +++ /dev/null @@ -1,933 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2013--2024 Intel Corporation - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "ipu6.h" -#include "ipu6-bus.h" -#include "ipu6-dma.h" -#include "ipu6-buttress.h" -#include "ipu6-platform-buttress-regs.h" - -#define BOOTLOADER_STATUS_OFFSET 0x15c - -#define BOOTLOADER_MAGIC_KEY 0xb00710ad - -#define ENTRY BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE1 -#define EXIT BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE2 -#define QUERY BUTTRESS_IU2CSECSR_IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE - -#define BUTTRESS_TSC_SYNC_RESET_TRIAL_MAX 10 - -#define BUTTRESS_POWER_TIMEOUT_US (200 * USEC_PER_MSEC) - -#define BUTTRESS_CSE_BOOTLOAD_TIMEOUT_US (5 * USEC_PER_SEC) -#define BUTTRESS_CSE_AUTHENTICATE_TIMEOUT_US (10 * USEC_PER_SEC) -#define BUTTRESS_CSE_FWRESET_TIMEOUT_US (100 * USEC_PER_MSEC) - -#define BUTTRESS_IPC_TX_TIMEOUT_MS MSEC_PER_SEC -#define BUTTRESS_IPC_RX_TIMEOUT_MS MSEC_PER_SEC -#define BUTTRESS_IPC_VALIDITY_TIMEOUT_US (1 * USEC_PER_SEC) -#define BUTTRESS_TSC_SYNC_TIMEOUT_US (5 * USEC_PER_MSEC) - -#define BUTTRESS_IPC_RESET_RETRY 2000 -#define BUTTRESS_CSE_IPC_RESET_RETRY 4 -#define BUTTRESS_IPC_CMD_SEND_RETRY 1 - -#define BUTTRESS_MAX_CONSECUTIVE_IRQS 100 - -static const u32 ipu6_adev_irq_mask[2] = { - BUTTRESS_ISR_IS_IRQ, - BUTTRESS_ISR_PS_IRQ -}; - -int ipu6_buttress_ipc_reset(struct ipu6_device *isp, - struct ipu6_buttress_ipc *ipc) -{ - unsigned int retries = BUTTRESS_IPC_RESET_RETRY; - struct ipu6_buttress *b = &isp->buttress; - u32 val = 0, csr_in_clr; - - if (!isp->secure_mode) { - dev_dbg(&isp->pdev->dev, "Skip IPC reset for non-secure mode"); - return 0; - } - - mutex_lock(&b->ipc_mutex); - - /* Clear-by-1 CSR (all bits), corresponding internal states. */ - val = readl(isp->base + ipc->csr_in); - writel(val, isp->base + ipc->csr_in); - - /* Set peer CSR bit IPC_PEER_COMP_ACTIONS_RST_PHASE1 */ - writel(ENTRY, isp->base + ipc->csr_out); - /* - * Clear-by-1 all CSR bits EXCEPT following - * bits: - * A. IPC_PEER_COMP_ACTIONS_RST_PHASE1. - * B. IPC_PEER_COMP_ACTIONS_RST_PHASE2. - * C. Possibly custom bits, depending on - * their role. - */ - csr_in_clr = BUTTRESS_IU2CSECSR_IPC_PEER_DEASSERTED_REG_VALID_REQ | - BUTTRESS_IU2CSECSR_IPC_PEER_ACKED_REG_VALID | - BUTTRESS_IU2CSECSR_IPC_PEER_ASSERTED_REG_VALID_REQ | QUERY; - - do { - usleep_range(400, 500); - val = readl(isp->base + ipc->csr_in); - switch (val) { - case ENTRY | EXIT: - case ENTRY | EXIT | QUERY: - /* - * 1) Clear-by-1 CSR bits - * (IPC_PEER_COMP_ACTIONS_RST_PHASE1, - * IPC_PEER_COMP_ACTIONS_RST_PHASE2). - * 2) Set peer CSR bit - * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE. - */ - writel(ENTRY | EXIT, isp->base + ipc->csr_in); - writel(QUERY, isp->base + ipc->csr_out); - break; - case ENTRY: - case ENTRY | QUERY: - /* - * 1) Clear-by-1 CSR bits - * (IPC_PEER_COMP_ACTIONS_RST_PHASE1, - * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE). - * 2) Set peer CSR bit - * IPC_PEER_COMP_ACTIONS_RST_PHASE1. - */ - writel(ENTRY | QUERY, isp->base + ipc->csr_in); - writel(ENTRY, isp->base + ipc->csr_out); - break; - case EXIT: - case EXIT | QUERY: - /* - * Clear-by-1 CSR bit - * IPC_PEER_COMP_ACTIONS_RST_PHASE2. - * 1) Clear incoming doorbell. - * 2) Clear-by-1 all CSR bits EXCEPT following - * bits: - * A. IPC_PEER_COMP_ACTIONS_RST_PHASE1. - * B. IPC_PEER_COMP_ACTIONS_RST_PHASE2. - * C. Possibly custom bits, depending on - * their role. - * 3) Set peer CSR bit - * IPC_PEER_COMP_ACTIONS_RST_PHASE2. - */ - writel(EXIT, isp->base + ipc->csr_in); - writel(0, isp->base + ipc->db0_in); - writel(csr_in_clr, isp->base + ipc->csr_in); - writel(EXIT, isp->base + ipc->csr_out); - - /* - * Read csr_in again to make sure if RST_PHASE2 is done. - * If csr_in is QUERY, it should be handled again. - */ - usleep_range(200, 300); - val = readl(isp->base + ipc->csr_in); - if (val & QUERY) { - dev_dbg(&isp->pdev->dev, - "RST_PHASE2 retry csr_in = %x\n", val); - break; - } - mutex_unlock(&b->ipc_mutex); - return 0; - case QUERY: - /* - * 1) Clear-by-1 CSR bit - * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE. - * 2) Set peer CSR bit - * IPC_PEER_COMP_ACTIONS_RST_PHASE1 - */ - writel(QUERY, isp->base + ipc->csr_in); - writel(ENTRY, isp->base + ipc->csr_out); - break; - default: - dev_dbg_ratelimited(&isp->pdev->dev, - "Unexpected CSR 0x%x\n", val); - break; - } - } while (retries--); - - mutex_unlock(&b->ipc_mutex); - dev_err(&isp->pdev->dev, "Timed out while waiting for CSE\n"); - - return -ETIMEDOUT; -} - -static void ipu6_buttress_ipc_validity_close(struct ipu6_device *isp, - struct ipu6_buttress_ipc *ipc) -{ - writel(BUTTRESS_IU2CSECSR_IPC_PEER_DEASSERTED_REG_VALID_REQ, - isp->base + ipc->csr_out); -} - -static int -ipu6_buttress_ipc_validity_open(struct ipu6_device *isp, - struct ipu6_buttress_ipc *ipc) -{ - unsigned int mask = BUTTRESS_IU2CSECSR_IPC_PEER_ACKED_REG_VALID; - void __iomem *addr; - int ret; - u32 val; - - writel(BUTTRESS_IU2CSECSR_IPC_PEER_ASSERTED_REG_VALID_REQ, - isp->base + ipc->csr_out); - - addr = isp->base + ipc->csr_in; - ret = readl_poll_timeout(addr, val, val & mask, 200, - BUTTRESS_IPC_VALIDITY_TIMEOUT_US); - if (ret) { - dev_err(&isp->pdev->dev, "CSE validity timeout 0x%x\n", val); - ipu6_buttress_ipc_validity_close(isp, ipc); - } - - return ret; -} - -static void ipu6_buttress_ipc_recv(struct ipu6_device *isp, - struct ipu6_buttress_ipc *ipc, u32 *ipc_msg) -{ - if (ipc_msg) - *ipc_msg = readl(isp->base + ipc->data0_in); - writel(0, isp->base + ipc->db0_in); -} - -static int ipu6_buttress_ipc_send_bulk(struct ipu6_device *isp, - enum ipu6_buttress_ipc_domain ipc_domain, - struct ipu6_ipc_buttress_bulk_msg *msgs, - u32 size) -{ - unsigned long tx_timeout_jiffies, rx_timeout_jiffies; - unsigned int i, retry = BUTTRESS_IPC_CMD_SEND_RETRY; - struct ipu6_buttress *b = &isp->buttress; - struct ipu6_buttress_ipc *ipc; - u32 val; - int ret; - int tout; - - ipc = ipc_domain == IPU6_BUTTRESS_IPC_CSE ? &b->cse : &b->ish; - - mutex_lock(&b->ipc_mutex); - - ret = ipu6_buttress_ipc_validity_open(isp, ipc); - if (ret) { - dev_err(&isp->pdev->dev, "IPC validity open failed\n"); - goto out; - } - - tx_timeout_jiffies = msecs_to_jiffies(BUTTRESS_IPC_TX_TIMEOUT_MS); - rx_timeout_jiffies = msecs_to_jiffies(BUTTRESS_IPC_RX_TIMEOUT_MS); - - for (i = 0; i < size; i++) { - reinit_completion(&ipc->send_complete); - if (msgs[i].require_resp) - reinit_completion(&ipc->recv_complete); - - dev_dbg(&isp->pdev->dev, "bulk IPC command: 0x%x\n", - msgs[i].cmd); - writel(msgs[i].cmd, isp->base + ipc->data0_out); - val = BUTTRESS_IU2CSEDB0_BUSY | msgs[i].cmd_size; - writel(val, isp->base + ipc->db0_out); - - tout = wait_for_completion_timeout(&ipc->send_complete, - tx_timeout_jiffies); - if (!tout) { - dev_err(&isp->pdev->dev, "send IPC response timeout\n"); - if (!retry--) { - ret = -ETIMEDOUT; - goto out; - } - - /* Try again if CSE is not responding on first try */ - writel(0, isp->base + ipc->db0_out); - i--; - continue; - } - - retry = BUTTRESS_IPC_CMD_SEND_RETRY; - - if (!msgs[i].require_resp) - continue; - - tout = wait_for_completion_timeout(&ipc->recv_complete, - rx_timeout_jiffies); - if (!tout) { - dev_err(&isp->pdev->dev, "recv IPC response timeout\n"); - ret = -ETIMEDOUT; - goto out; - } - - if (ipc->nack_mask && - (ipc->recv_data & ipc->nack_mask) == ipc->nack) { - dev_err(&isp->pdev->dev, - "IPC NACK for cmd 0x%x\n", msgs[i].cmd); - ret = -EIO; - goto out; - } - - if (ipc->recv_data != msgs[i].expected_resp) { - dev_err(&isp->pdev->dev, - "expected resp: 0x%x, IPC response: 0x%x ", - msgs[i].expected_resp, ipc->recv_data); - ret = -EIO; - goto out; - } - } - - dev_dbg(&isp->pdev->dev, "bulk IPC commands done\n"); - -out: - ipu6_buttress_ipc_validity_close(isp, ipc); - mutex_unlock(&b->ipc_mutex); - return ret; -} - -static int -ipu6_buttress_ipc_send(struct ipu6_device *isp, - enum ipu6_buttress_ipc_domain ipc_domain, - u32 ipc_msg, u32 size, bool require_resp, - u32 expected_resp) -{ - struct ipu6_ipc_buttress_bulk_msg msg = { - .cmd = ipc_msg, - .cmd_size = size, - .require_resp = require_resp, - .expected_resp = expected_resp, - }; - - return ipu6_buttress_ipc_send_bulk(isp, ipc_domain, &msg, 1); -} - -static irqreturn_t ipu6_buttress_call_isr(struct ipu6_bus_device *adev) -{ - irqreturn_t ret = IRQ_WAKE_THREAD; - - if (!adev || !adev->auxdrv || !adev->auxdrv_data) - return IRQ_NONE; - - if (adev->auxdrv_data->isr) - ret = adev->auxdrv_data->isr(adev); - - if (ret == IRQ_WAKE_THREAD && !adev->auxdrv_data->isr_threaded) - ret = IRQ_NONE; - - return ret; -} - -irqreturn_t ipu6_buttress_isr(int irq, void *isp_ptr) -{ - struct ipu6_device *isp = isp_ptr; - struct ipu6_bus_device *adev[] = { isp->isys, isp->psys }; - struct ipu6_buttress *b = &isp->buttress; - u32 reg_irq_sts = BUTTRESS_REG_ISR_STATUS; - irqreturn_t ret = IRQ_NONE; - u32 disable_irqs = 0; - u32 irq_status; - u32 i, count = 0; - int active; - - active = pm_runtime_get_if_active(&isp->pdev->dev); - if (!active) - return IRQ_NONE; - - irq_status = readl(isp->base + reg_irq_sts); - if (irq_status == 0 || WARN_ON_ONCE(irq_status == 0xffffffffu)) { - if (active > 0) - pm_runtime_put_noidle(&isp->pdev->dev); - return IRQ_NONE; - } - - do { - writel(irq_status, isp->base + BUTTRESS_REG_ISR_CLEAR); - - for (i = 0; i < ARRAY_SIZE(ipu6_adev_irq_mask); i++) { - irqreturn_t r = ipu6_buttress_call_isr(adev[i]); - - if (!(irq_status & ipu6_adev_irq_mask[i])) - continue; - - if (r == IRQ_WAKE_THREAD) { - ret = IRQ_WAKE_THREAD; - disable_irqs |= ipu6_adev_irq_mask[i]; - } else if (ret == IRQ_NONE && r == IRQ_HANDLED) { - ret = IRQ_HANDLED; - } - } - - if ((irq_status & BUTTRESS_EVENT) && ret == IRQ_NONE) - ret = IRQ_HANDLED; - - if (irq_status & BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING) { - dev_dbg(&isp->pdev->dev, - "BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING\n"); - ipu6_buttress_ipc_recv(isp, &b->cse, &b->cse.recv_data); - complete(&b->cse.recv_complete); - } - - if (irq_status & BUTTRESS_ISR_IPC_FROM_ISH_IS_WAITING) { - dev_dbg(&isp->pdev->dev, - "BUTTRESS_ISR_IPC_FROM_ISH_IS_WAITING\n"); - ipu6_buttress_ipc_recv(isp, &b->ish, &b->ish.recv_data); - complete(&b->ish.recv_complete); - } - - if (irq_status & BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE) { - dev_dbg(&isp->pdev->dev, - "BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE\n"); - complete(&b->cse.send_complete); - } - - if (irq_status & BUTTRESS_ISR_IPC_EXEC_DONE_BY_ISH) { - dev_dbg(&isp->pdev->dev, - "BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE\n"); - complete(&b->ish.send_complete); - } - - if (irq_status & BUTTRESS_ISR_SAI_VIOLATION && - ipu6_buttress_get_secure_mode(isp)) - dev_err(&isp->pdev->dev, - "BUTTRESS_ISR_SAI_VIOLATION\n"); - - if (irq_status & (BUTTRESS_ISR_IS_FATAL_MEM_ERR | - BUTTRESS_ISR_PS_FATAL_MEM_ERR)) - dev_err(&isp->pdev->dev, - "BUTTRESS_ISR_FATAL_MEM_ERR\n"); - - if (irq_status & BUTTRESS_ISR_UFI_ERROR) - dev_err(&isp->pdev->dev, "BUTTRESS_ISR_UFI_ERROR\n"); - - if (++count == BUTTRESS_MAX_CONSECUTIVE_IRQS) { - dev_err(&isp->pdev->dev, "too many consecutive IRQs\n"); - ret = IRQ_NONE; - break; - } - - irq_status = readl(isp->base + reg_irq_sts); - } while (irq_status); - - if (disable_irqs) - writel(BUTTRESS_IRQS & ~disable_irqs, - isp->base + BUTTRESS_REG_ISR_ENABLE); - - if (active > 0) - pm_runtime_put(&isp->pdev->dev); - - return ret; -} - -irqreturn_t ipu6_buttress_isr_threaded(int irq, void *isp_ptr) -{ - struct ipu6_device *isp = isp_ptr; - struct ipu6_bus_device *adev[] = { isp->isys, isp->psys }; - const struct ipu6_auxdrv_data *drv_data = NULL; - irqreturn_t ret = IRQ_NONE; - unsigned int i; - - for (i = 0; i < ARRAY_SIZE(ipu6_adev_irq_mask) && adev[i]; i++) { - drv_data = adev[i]->auxdrv_data; - if (!drv_data) - continue; - - if (drv_data->wake_isr_thread && - drv_data->isr_threaded(adev[i]) == IRQ_HANDLED) - ret = IRQ_HANDLED; - } - - writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_ENABLE); - - return ret; -} - -int ipu6_buttress_power(struct device *dev, struct ipu6_buttress_ctrl *ctrl, - bool on) -{ - struct ipu6_device *isp = to_ipu6_bus_device(dev)->isp; - u32 pwr_sts, val; - int ret; - - if (!ctrl) - return 0; - - mutex_lock(&isp->buttress.power_mutex); - - if (!on) { - val = 0; - pwr_sts = ctrl->pwr_sts_off << ctrl->pwr_sts_shift; - } else { - val = BUTTRESS_FREQ_CTL_START | - FIELD_PREP(BUTTRESS_FREQ_CTL_RATIO_MASK, - ctrl->ratio) | - FIELD_PREP(BUTTRESS_FREQ_CTL_QOS_FLOOR_MASK, - ctrl->qos_floor) | - BUTTRESS_FREQ_CTL_ICCMAX_LEVEL; - - pwr_sts = ctrl->pwr_sts_on << ctrl->pwr_sts_shift; - } - - writel(val, isp->base + ctrl->freq_ctl); - - ret = readl_poll_timeout(isp->base + BUTTRESS_REG_PWR_STATE, - val, (val & ctrl->pwr_sts_mask) == pwr_sts, - 100, BUTTRESS_POWER_TIMEOUT_US); - if (ret) - dev_err(&isp->pdev->dev, - "Change power status timeout with 0x%x\n", val); - - ctrl->started = !ret && on; - - mutex_unlock(&isp->buttress.power_mutex); - - return ret; -} - -bool ipu6_buttress_get_secure_mode(struct ipu6_device *isp) -{ - u32 val; - - val = readl(isp->base + BUTTRESS_REG_SECURITY_CTL); - - return val & BUTTRESS_SECURITY_CTL_FW_SECURE_MODE; -} - -bool ipu6_buttress_auth_done(struct ipu6_device *isp) -{ - u32 val; - - if (!isp->secure_mode) - return true; - - val = readl(isp->base + BUTTRESS_REG_SECURITY_CTL); - val = FIELD_GET(BUTTRESS_SECURITY_CTL_FW_SETUP_MASK, val); - - return val == BUTTRESS_SECURITY_CTL_AUTH_DONE; -} -EXPORT_SYMBOL_NS_GPL(ipu6_buttress_auth_done, INTEL_IPU6); - -int ipu6_buttress_reset_authentication(struct ipu6_device *isp) -{ - int ret; - u32 val; - - if (!isp->secure_mode) { - dev_dbg(&isp->pdev->dev, "Skip auth for non-secure mode\n"); - return 0; - } - - writel(BUTTRESS_FW_RESET_CTL_START, isp->base + - BUTTRESS_REG_FW_RESET_CTL); - - ret = readl_poll_timeout(isp->base + BUTTRESS_REG_FW_RESET_CTL, val, - val & BUTTRESS_FW_RESET_CTL_DONE, 500, - BUTTRESS_CSE_FWRESET_TIMEOUT_US); - if (ret) { - dev_err(&isp->pdev->dev, - "Time out while resetting authentication state\n"); - return ret; - } - - dev_dbg(&isp->pdev->dev, "FW reset for authentication done\n"); - writel(0, isp->base + BUTTRESS_REG_FW_RESET_CTL); - /* leave some time for HW restore */ - usleep_range(800, 1000); - - return 0; -} - -int ipu6_buttress_map_fw_image(struct ipu6_bus_device *sys, - const struct firmware *fw, struct sg_table *sgt) -{ - bool is_vmalloc = is_vmalloc_addr(fw->data); - struct pci_dev *pdev = sys->isp->pdev; - struct page **pages; - const void *addr; - unsigned long n_pages; - unsigned int i; - int ret; - - if (!is_vmalloc && !virt_addr_valid(fw->data)) - return -EDOM; - - n_pages = PHYS_PFN(PAGE_ALIGN(fw->size)); - - pages = kmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL); - if (!pages) - return -ENOMEM; - - addr = fw->data; - for (i = 0; i < n_pages; i++) { - struct page *p = is_vmalloc ? - vmalloc_to_page(addr) : virt_to_page(addr); - - if (!p) { - ret = -ENOMEM; - goto out; - } - pages[i] = p; - addr += PAGE_SIZE; - } - - ret = sg_alloc_table_from_pages(sgt, pages, n_pages, 0, fw->size, - GFP_KERNEL); - if (ret) { - ret = -ENOMEM; - goto out; - } - - ret = dma_map_sgtable(&pdev->dev, sgt, DMA_TO_DEVICE, 0); - if (ret) { - sg_free_table(sgt); - goto out; - } - - ret = ipu6_dma_map_sgtable(sys, sgt, DMA_TO_DEVICE, 0); - if (ret) { - dma_unmap_sgtable(&pdev->dev, sgt, DMA_TO_DEVICE, 0); - sg_free_table(sgt); - goto out; - } - - ipu6_dma_sync_sgtable(sys, sgt); - -out: - kfree(pages); - - return ret; -} -EXPORT_SYMBOL_NS_GPL(ipu6_buttress_map_fw_image, INTEL_IPU6); - -void ipu6_buttress_unmap_fw_image(struct ipu6_bus_device *sys, - struct sg_table *sgt) -{ - struct pci_dev *pdev = sys->isp->pdev; - - ipu6_dma_unmap_sgtable(sys, sgt, DMA_TO_DEVICE, 0); - dma_unmap_sgtable(&pdev->dev, sgt, DMA_TO_DEVICE, 0); - sg_free_table(sgt); -} -EXPORT_SYMBOL_NS_GPL(ipu6_buttress_unmap_fw_image, INTEL_IPU6); - -int ipu6_buttress_authenticate(struct ipu6_device *isp) -{ - struct ipu6_buttress *b = &isp->buttress; - struct ipu6_psys_pdata *psys_pdata; - u32 data, mask, done, fail; - int ret; - - if (!isp->secure_mode) { - dev_dbg(&isp->pdev->dev, "Skip auth for non-secure mode\n"); - return 0; - } - - psys_pdata = isp->psys->pdata; - - mutex_lock(&b->auth_mutex); - - if (ipu6_buttress_auth_done(isp)) { - ret = 0; - goto out_unlock; - } - - /* - * Write address of FIT table to FW_SOURCE register - * Let's use fw address. I.e. not using FIT table yet - */ - data = lower_32_bits(isp->psys->pkg_dir_dma_addr); - writel(data, isp->base + BUTTRESS_REG_FW_SOURCE_BASE_LO); - - data = upper_32_bits(isp->psys->pkg_dir_dma_addr); - writel(data, isp->base + BUTTRESS_REG_FW_SOURCE_BASE_HI); - - /* - * Write boot_load into IU2CSEDATA0 - * Write sizeof(boot_load) | 0x2 << CLIENT_ID to - * IU2CSEDB.IU2CSECMD and set IU2CSEDB.IU2CSEBUSY as - */ - dev_info(&isp->pdev->dev, "Sending BOOT_LOAD to CSE\n"); - - ret = ipu6_buttress_ipc_send(isp, IPU6_BUTTRESS_IPC_CSE, - BUTTRESS_IU2CSEDATA0_IPC_BOOT_LOAD, - 1, true, - BUTTRESS_CSE2IUDATA0_IPC_BOOT_LOAD_DONE); - if (ret) { - dev_err(&isp->pdev->dev, "CSE boot_load failed\n"); - goto out_unlock; - } - - mask = BUTTRESS_SECURITY_CTL_FW_SETUP_MASK; - done = BUTTRESS_SECURITY_CTL_FW_SETUP_DONE; - fail = BUTTRESS_SECURITY_CTL_AUTH_FAILED; - ret = readl_poll_timeout(isp->base + BUTTRESS_REG_SECURITY_CTL, data, - ((data & mask) == done || - (data & mask) == fail), 500, - BUTTRESS_CSE_BOOTLOAD_TIMEOUT_US); - if (ret) { - dev_err(&isp->pdev->dev, "CSE boot_load timeout\n"); - goto out_unlock; - } - - if ((data & mask) == fail) { - dev_err(&isp->pdev->dev, "CSE auth failed\n"); - ret = -EINVAL; - goto out_unlock; - } - - ret = readl_poll_timeout(psys_pdata->base + BOOTLOADER_STATUS_OFFSET, - data, data == BOOTLOADER_MAGIC_KEY, 500, - BUTTRESS_CSE_BOOTLOAD_TIMEOUT_US); - if (ret) { - dev_err(&isp->pdev->dev, "Unexpected magic number 0x%x\n", - data); - goto out_unlock; - } - - /* - * Write authenticate_run into IU2CSEDATA0 - * Write sizeof(boot_load) | 0x2 << CLIENT_ID to - * IU2CSEDB.IU2CSECMD and set IU2CSEDB.IU2CSEBUSY as - */ - dev_info(&isp->pdev->dev, "Sending AUTHENTICATE_RUN to CSE\n"); - ret = ipu6_buttress_ipc_send(isp, IPU6_BUTTRESS_IPC_CSE, - BUTTRESS_IU2CSEDATA0_IPC_AUTH_RUN, - 1, true, - BUTTRESS_CSE2IUDATA0_IPC_AUTH_RUN_DONE); - if (ret) { - dev_err(&isp->pdev->dev, "CSE authenticate_run failed\n"); - goto out_unlock; - } - - done = BUTTRESS_SECURITY_CTL_AUTH_DONE; - ret = readl_poll_timeout(isp->base + BUTTRESS_REG_SECURITY_CTL, data, - ((data & mask) == done || - (data & mask) == fail), 500, - BUTTRESS_CSE_AUTHENTICATE_TIMEOUT_US); - if (ret) { - dev_err(&isp->pdev->dev, "CSE authenticate timeout\n"); - goto out_unlock; - } - - if ((data & mask) == fail) { - dev_err(&isp->pdev->dev, "CSE boot_load failed\n"); - ret = -EINVAL; - goto out_unlock; - } - - dev_info(&isp->pdev->dev, "CSE authenticate_run done\n"); - -out_unlock: - mutex_unlock(&b->auth_mutex); - - return ret; -} - -static int ipu6_buttress_send_tsc_request(struct ipu6_device *isp) -{ - u32 val, mask, done; - int ret; - - mask = BUTTRESS_PWR_STATE_HH_STATUS_MASK; - - writel(BUTTRESS_FABRIC_CMD_START_TSC_SYNC, - isp->base + BUTTRESS_REG_FABRIC_CMD); - - val = readl(isp->base + BUTTRESS_REG_PWR_STATE); - val = FIELD_GET(mask, val); - if (val == BUTTRESS_PWR_STATE_HH_STATE_ERR) { - dev_err(&isp->pdev->dev, "Start tsc sync failed\n"); - return -EINVAL; - } - - done = BUTTRESS_PWR_STATE_HH_STATE_DONE; - ret = readl_poll_timeout(isp->base + BUTTRESS_REG_PWR_STATE, val, - FIELD_GET(mask, val) == done, 500, - BUTTRESS_TSC_SYNC_TIMEOUT_US); - if (ret) - dev_err(&isp->pdev->dev, "Start tsc sync timeout\n"); - - return ret; -} - -int ipu6_buttress_start_tsc_sync(struct ipu6_device *isp) -{ - unsigned int i; - - for (i = 0; i < BUTTRESS_TSC_SYNC_RESET_TRIAL_MAX; i++) { - u32 val; - int ret; - - ret = ipu6_buttress_send_tsc_request(isp); - if (ret != -ETIMEDOUT) - return ret; - - val = readl(isp->base + BUTTRESS_REG_TSW_CTL); - val = val | BUTTRESS_TSW_CTL_SOFT_RESET; - writel(val, isp->base + BUTTRESS_REG_TSW_CTL); - val = val & ~BUTTRESS_TSW_CTL_SOFT_RESET; - writel(val, isp->base + BUTTRESS_REG_TSW_CTL); - } - - dev_err(&isp->pdev->dev, "TSC sync failed (timeout)\n"); - - return -ETIMEDOUT; -} -EXPORT_SYMBOL_NS_GPL(ipu6_buttress_start_tsc_sync, INTEL_IPU6); - -void ipu6_buttress_tsc_read(struct ipu6_device *isp, u64 *val) -{ - u32 tsc_hi_1, tsc_hi_2, tsc_lo; - unsigned long flags; - - local_irq_save(flags); - tsc_hi_1 = readl(isp->base + BUTTRESS_REG_TSC_HI); - tsc_lo = readl(isp->base + BUTTRESS_REG_TSC_LO); - tsc_hi_2 = readl(isp->base + BUTTRESS_REG_TSC_HI); - if (tsc_hi_1 == tsc_hi_2) { - *val = (u64)tsc_hi_1 << 32 | tsc_lo; - } else { - /* Check if TSC has rolled over */ - if (tsc_lo & BIT(31)) - *val = (u64)tsc_hi_1 << 32 | tsc_lo; - else - *val = (u64)tsc_hi_2 << 32 | tsc_lo; - } - local_irq_restore(flags); -} -EXPORT_SYMBOL_NS_GPL(ipu6_buttress_tsc_read, INTEL_IPU6); - -u64 ipu6_buttress_tsc_ticks_to_ns(u64 ticks, const struct ipu6_device *isp) -{ - u64 ns = ticks * 10000; - - /* - * converting TSC tick count to ns is calculated by: - * Example (TSC clock frequency is 19.2MHz): - * ns = ticks * 1000 000 000 / 19.2Mhz - * = ticks * 1000 000 000 / 19200000Hz - * = ticks * 10000 / 192 ns - */ - return div_u64(ns, isp->buttress.ref_clk); -} -EXPORT_SYMBOL_NS_GPL(ipu6_buttress_tsc_ticks_to_ns, INTEL_IPU6); - -void ipu6_buttress_restore(struct ipu6_device *isp) -{ - struct ipu6_buttress *b = &isp->buttress; - - writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_CLEAR); - writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_ENABLE); - writel(b->wdt_cached_value, isp->base + BUTTRESS_REG_WDT); -} - -int ipu6_buttress_init(struct ipu6_device *isp) -{ - int ret, ipc_reset_retry = BUTTRESS_CSE_IPC_RESET_RETRY; - struct ipu6_buttress *b = &isp->buttress; - u32 val; - - mutex_init(&b->power_mutex); - mutex_init(&b->auth_mutex); - mutex_init(&b->cons_mutex); - mutex_init(&b->ipc_mutex); - init_completion(&b->ish.send_complete); - init_completion(&b->cse.send_complete); - init_completion(&b->ish.recv_complete); - init_completion(&b->cse.recv_complete); - - b->cse.nack = BUTTRESS_CSE2IUDATA0_IPC_NACK; - b->cse.nack_mask = BUTTRESS_CSE2IUDATA0_IPC_NACK_MASK; - b->cse.csr_in = BUTTRESS_REG_CSE2IUCSR; - b->cse.csr_out = BUTTRESS_REG_IU2CSECSR; - b->cse.db0_in = BUTTRESS_REG_CSE2IUDB0; - b->cse.db0_out = BUTTRESS_REG_IU2CSEDB0; - b->cse.data0_in = BUTTRESS_REG_CSE2IUDATA0; - b->cse.data0_out = BUTTRESS_REG_IU2CSEDATA0; - - /* no ISH on IPU6 */ - memset(&b->ish, 0, sizeof(b->ish)); - INIT_LIST_HEAD(&b->constraints); - - isp->secure_mode = ipu6_buttress_get_secure_mode(isp); - dev_info(&isp->pdev->dev, "IPU6 in %s mode touch 0x%x mask 0x%x\n", - isp->secure_mode ? "secure" : "non-secure", - readl(isp->base + BUTTRESS_REG_SECURITY_TOUCH), - readl(isp->base + BUTTRESS_REG_CAMERA_MASK)); - - b->wdt_cached_value = readl(isp->base + BUTTRESS_REG_WDT); - writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_CLEAR); - writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_ENABLE); - - /* get ref_clk frequency by reading the indication in btrs control */ - val = readl(isp->base + BUTTRESS_REG_BTRS_CTRL); - val = FIELD_GET(BUTTRESS_REG_BTRS_CTRL_REF_CLK_IND, val); - - switch (val) { - case 0x0: - b->ref_clk = 240; - break; - case 0x1: - b->ref_clk = 192; - break; - case 0x2: - b->ref_clk = 384; - break; - default: - dev_warn(&isp->pdev->dev, - "Unsupported ref clock, use 19.2Mhz by default.\n"); - b->ref_clk = 192; - break; - } - - /* Retry couple of times in case of CSE initialization is delayed */ - do { - ret = ipu6_buttress_ipc_reset(isp, &b->cse); - if (ret) { - dev_warn(&isp->pdev->dev, - "IPC reset protocol failed, retrying\n"); - } else { - dev_dbg(&isp->pdev->dev, "IPC reset done\n"); - return 0; - } - } while (ipc_reset_retry--); - - dev_err(&isp->pdev->dev, "IPC reset protocol failed\n"); - - mutex_destroy(&b->power_mutex); - mutex_destroy(&b->auth_mutex); - mutex_destroy(&b->cons_mutex); - mutex_destroy(&b->ipc_mutex); - - return ret; -} - -void ipu6_buttress_exit(struct ipu6_device *isp) -{ - struct ipu6_buttress *b = &isp->buttress; - - writel(0, isp->base + BUTTRESS_REG_ISR_ENABLE); - - mutex_destroy(&b->power_mutex); - mutex_destroy(&b->auth_mutex); - mutex_destroy(&b->cons_mutex); - mutex_destroy(&b->ipc_mutex); -} diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h deleted file mode 100644 index 9b6f569..0000000 --- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h +++ /dev/null @@ -1,92 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (C) 2013--2024 Intel Corporation */ - -#ifndef IPU6_BUTTRESS_H -#define IPU6_BUTTRESS_H - -#include -#include -#include -#include - -struct device; -struct firmware; -struct ipu6_device; -struct ipu6_bus_device; - -#define BUTTRESS_PS_FREQ_STEP 25U -#define BUTTRESS_MIN_FORCE_PS_FREQ (BUTTRESS_PS_FREQ_STEP * 8) -#define BUTTRESS_MAX_FORCE_PS_FREQ (BUTTRESS_PS_FREQ_STEP * 32) - -#define BUTTRESS_IS_FREQ_STEP 25U -#define BUTTRESS_MIN_FORCE_IS_FREQ (BUTTRESS_IS_FREQ_STEP * 8) -#define BUTTRESS_MAX_FORCE_IS_FREQ (BUTTRESS_IS_FREQ_STEP * 22) - -struct ipu6_buttress_ctrl { - u32 freq_ctl, pwr_sts_shift, pwr_sts_mask, pwr_sts_on, pwr_sts_off; - unsigned int ratio; - unsigned int qos_floor; - bool started; -}; - -struct ipu6_buttress_ipc { - struct completion send_complete; - struct completion recv_complete; - u32 nack; - u32 nack_mask; - u32 recv_data; - u32 csr_out; - u32 csr_in; - u32 db0_in; - u32 db0_out; - u32 data0_out; - u32 data0_in; -}; - -struct ipu6_buttress { - struct mutex power_mutex, auth_mutex, cons_mutex, ipc_mutex; - struct ipu6_buttress_ipc cse; - struct ipu6_buttress_ipc ish; - struct list_head constraints; - u32 wdt_cached_value; - bool force_suspend; - u32 ref_clk; -}; - -enum ipu6_buttress_ipc_domain { - IPU6_BUTTRESS_IPC_CSE, - IPU6_BUTTRESS_IPC_ISH, -}; - -struct ipu6_ipc_buttress_bulk_msg { - u32 cmd; - u32 expected_resp; - bool require_resp; - u8 cmd_size; -}; - -int ipu6_buttress_ipc_reset(struct ipu6_device *isp, - struct ipu6_buttress_ipc *ipc); -int ipu6_buttress_map_fw_image(struct ipu6_bus_device *sys, - const struct firmware *fw, - struct sg_table *sgt); -void ipu6_buttress_unmap_fw_image(struct ipu6_bus_device *sys, - struct sg_table *sgt); -int ipu6_buttress_power(struct device *dev, struct ipu6_buttress_ctrl *ctrl, - bool on); -bool ipu6_buttress_get_secure_mode(struct ipu6_device *isp); -int ipu6_buttress_authenticate(struct ipu6_device *isp); -int ipu6_buttress_reset_authentication(struct ipu6_device *isp); -bool ipu6_buttress_auth_done(struct ipu6_device *isp); -int ipu6_buttress_start_tsc_sync(struct ipu6_device *isp); -void ipu6_buttress_tsc_read(struct ipu6_device *isp, u64 *val); -u64 ipu6_buttress_tsc_ticks_to_ns(u64 ticks, const struct ipu6_device *isp); - -irqreturn_t ipu6_buttress_isr(int irq, void *isp_ptr); -irqreturn_t ipu6_buttress_isr_threaded(int irq, void *isp_ptr); -int ipu6_buttress_init(struct ipu6_device *isp); -void ipu6_buttress_exit(struct ipu6_device *isp); -void ipu6_buttress_csi_port_config(struct ipu6_device *isp, - u32 legacy, u32 combo); -void ipu6_buttress_restore(struct ipu6_device *isp); -#endif /* IPU6_BUTTRESS_H */ diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-cpd.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-cpd.c deleted file mode 100644 index 21c1c12..0000000 --- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-cpd.c +++ /dev/null @@ -1,362 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2013--2024 Intel Corporation - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "ipu6.h" -#include "ipu6-bus.h" -#include "ipu6-cpd.h" -#include "ipu6-dma.h" - -/* 15 entries + header*/ -#define MAX_PKG_DIR_ENT_CNT 16 -/* 2 qword per entry/header */ -#define PKG_DIR_ENT_LEN 2 -/* PKG_DIR size in bytes */ -#define PKG_DIR_SIZE ((MAX_PKG_DIR_ENT_CNT) * \ - (PKG_DIR_ENT_LEN) * sizeof(u64)) -/* _IUPKDR_ */ -#define PKG_DIR_HDR_MARK 0x5f4955504b44525fULL - -/* $CPD */ -#define CPD_HDR_MARK 0x44504324 - -#define MAX_MANIFEST_SIZE (SZ_2K * sizeof(u32)) -#define MAX_METADATA_SIZE SZ_64K - -#define MAX_COMPONENT_ID 127 -#define MAX_COMPONENT_VERSION 0xffff - -#define MANIFEST_IDX 0 -#define METADATA_IDX 1 -#define MODULEDATA_IDX 2 -/* - * PKG_DIR Entry (type == id) - * 63:56 55 54:48 47:32 31:24 23:0 - * Rsvd Rsvd Type Version Rsvd Size - */ -#define PKG_DIR_SIZE_MASK GENMASK(23, 0) -#define PKG_DIR_VERSION_MASK GENMASK(47, 32) -#define PKG_DIR_TYPE_MASK GENMASK(54, 48) - -static inline const struct ipu6_cpd_ent *ipu6_cpd_get_entry(const void *cpd, - u8 idx) -{ - const struct ipu6_cpd_hdr *cpd_hdr = cpd; - const struct ipu6_cpd_ent *ent; - - ent = (const struct ipu6_cpd_ent *)((const u8 *)cpd + cpd_hdr->hdr_len); - return ent + idx; -} - -#define ipu6_cpd_get_manifest(cpd) ipu6_cpd_get_entry(cpd, MANIFEST_IDX) -#define ipu6_cpd_get_metadata(cpd) ipu6_cpd_get_entry(cpd, METADATA_IDX) -#define ipu6_cpd_get_moduledata(cpd) ipu6_cpd_get_entry(cpd, MODULEDATA_IDX) - -static const struct ipu6_cpd_metadata_cmpnt_hdr * -ipu6_cpd_metadata_get_cmpnt(struct ipu6_device *isp, const void *metadata, - unsigned int metadata_size, u8 idx) -{ - size_t extn_size = sizeof(struct ipu6_cpd_metadata_extn); - size_t cmpnt_count = metadata_size - extn_size; - - cmpnt_count = div_u64(cmpnt_count, isp->cpd_metadata_cmpnt_size); - - if (idx > MAX_COMPONENT_ID || idx >= cmpnt_count) { - dev_err(&isp->pdev->dev, "Component index out of range (%d)\n", - idx); - return ERR_PTR(-EINVAL); - } - - return metadata + extn_size + idx * isp->cpd_metadata_cmpnt_size; -} - -static u32 ipu6_cpd_metadata_cmpnt_version(struct ipu6_device *isp, - const void *metadata, - unsigned int metadata_size, u8 idx) -{ - const struct ipu6_cpd_metadata_cmpnt_hdr *cmpnt; - - cmpnt = ipu6_cpd_metadata_get_cmpnt(isp, metadata, metadata_size, idx); - if (IS_ERR(cmpnt)) - return PTR_ERR(cmpnt); - - return cmpnt->ver; -} - -static int ipu6_cpd_metadata_get_cmpnt_id(struct ipu6_device *isp, - const void *metadata, - unsigned int metadata_size, u8 idx) -{ - const struct ipu6_cpd_metadata_cmpnt_hdr *cmpnt; - - cmpnt = ipu6_cpd_metadata_get_cmpnt(isp, metadata, metadata_size, idx); - if (IS_ERR(cmpnt)) - return PTR_ERR(cmpnt); - - return cmpnt->id; -} - -static int ipu6_cpd_parse_module_data(struct ipu6_device *isp, - const void *module_data, - unsigned int module_data_size, - dma_addr_t dma_addr_module_data, - u64 *pkg_dir, const void *metadata, - unsigned int metadata_size) -{ - const struct ipu6_cpd_module_data_hdr *module_data_hdr; - const struct ipu6_cpd_hdr *dir_hdr; - const struct ipu6_cpd_ent *dir_ent; - unsigned int i; - u8 len; - - if (!module_data) - return -EINVAL; - - module_data_hdr = module_data; - dir_hdr = module_data + module_data_hdr->hdr_len; - len = dir_hdr->hdr_len; - dir_ent = (const struct ipu6_cpd_ent *)(((u8 *)dir_hdr) + len); - - pkg_dir[0] = PKG_DIR_HDR_MARK; - /* pkg_dir entry count = component count + pkg_dir header */ - pkg_dir[1] = dir_hdr->ent_cnt + 1; - - for (i = 0; i < dir_hdr->ent_cnt; i++, dir_ent++) { - u64 *p = &pkg_dir[PKG_DIR_ENT_LEN * (1 + i)]; - int ver, id; - - *p++ = dma_addr_module_data + dir_ent->offset; - id = ipu6_cpd_metadata_get_cmpnt_id(isp, metadata, - metadata_size, i); - if (id < 0 || id > MAX_COMPONENT_ID) { - dev_err(&isp->pdev->dev, "Invalid CPD component id\n"); - return -EINVAL; - } - - ver = ipu6_cpd_metadata_cmpnt_version(isp, metadata, - metadata_size, i); - if (ver < 0 || ver > MAX_COMPONENT_VERSION) { - dev_err(&isp->pdev->dev, - "Invalid CPD component version\n"); - return -EINVAL; - } - - *p = FIELD_PREP(PKG_DIR_SIZE_MASK, dir_ent->len) | - FIELD_PREP(PKG_DIR_TYPE_MASK, id) | - FIELD_PREP(PKG_DIR_VERSION_MASK, ver); - } - - return 0; -} - -int ipu6_cpd_create_pkg_dir(struct ipu6_bus_device *adev, const void *src) -{ - dma_addr_t dma_addr_src = sg_dma_address(adev->fw_sgt.sgl); - const struct ipu6_cpd_ent *ent, *man_ent, *met_ent; - struct ipu6_device *isp = adev->isp; - unsigned int man_sz, met_sz; - void *pkg_dir_pos; - int ret; - - man_ent = ipu6_cpd_get_manifest(src); - man_sz = man_ent->len; - - met_ent = ipu6_cpd_get_metadata(src); - met_sz = met_ent->len; - - adev->pkg_dir_size = PKG_DIR_SIZE + man_sz + met_sz; - adev->pkg_dir = ipu6_dma_alloc(adev, adev->pkg_dir_size, - &adev->pkg_dir_dma_addr, GFP_KERNEL, 0); - if (!adev->pkg_dir) - return -ENOMEM; - - /* - * pkg_dir entry/header: - * qword | 63:56 | 55 | 54:48 | 47:32 | 31:24 | 23:0 - * N Address/Offset/"_IUPKDR_" - * N + 1 | rsvd | rsvd | type | ver | rsvd | size - * - * We can ignore other fields that size in N + 1 qword as they - * are 0 anyway. Just setting size for now. - */ - - ent = ipu6_cpd_get_moduledata(src); - - ret = ipu6_cpd_parse_module_data(isp, src + ent->offset, - ent->len, dma_addr_src + ent->offset, - adev->pkg_dir, src + met_ent->offset, - met_ent->len); - if (ret) { - dev_err(&isp->pdev->dev, "Failed to parse module data\n"); - ipu6_dma_free(adev, adev->pkg_dir_size, - adev->pkg_dir, adev->pkg_dir_dma_addr, 0); - return ret; - } - - /* Copy manifest after pkg_dir */ - pkg_dir_pos = adev->pkg_dir + PKG_DIR_ENT_LEN * MAX_PKG_DIR_ENT_CNT; - memcpy(pkg_dir_pos, src + man_ent->offset, man_sz); - - /* Copy metadata after manifest */ - pkg_dir_pos += man_sz; - memcpy(pkg_dir_pos, src + met_ent->offset, met_sz); - - ipu6_dma_sync_single(adev, adev->pkg_dir_dma_addr, - adev->pkg_dir_size); - - return 0; -} -EXPORT_SYMBOL_NS_GPL(ipu6_cpd_create_pkg_dir, INTEL_IPU6); - -void ipu6_cpd_free_pkg_dir(struct ipu6_bus_device *adev) -{ - ipu6_dma_free(adev, adev->pkg_dir_size, adev->pkg_dir, - adev->pkg_dir_dma_addr, 0); -} -EXPORT_SYMBOL_NS_GPL(ipu6_cpd_free_pkg_dir, INTEL_IPU6); - -static int ipu6_cpd_validate_cpd(struct ipu6_device *isp, const void *cpd, - unsigned long cpd_size, - unsigned long data_size) -{ - const struct ipu6_cpd_hdr *cpd_hdr = cpd; - const struct ipu6_cpd_ent *ent; - unsigned int i; - u8 len; - - len = cpd_hdr->hdr_len; - - /* Ensure cpd hdr is within moduledata */ - if (cpd_size < len) { - dev_err(&isp->pdev->dev, "Invalid CPD moduledata size\n"); - return -EINVAL; - } - - /* Sanity check for CPD header */ - if ((cpd_size - len) / sizeof(*ent) < cpd_hdr->ent_cnt) { - dev_err(&isp->pdev->dev, "Invalid CPD header\n"); - return -EINVAL; - } - - /* Ensure that all entries are within moduledata */ - ent = (const struct ipu6_cpd_ent *)(((const u8 *)cpd_hdr) + len); - for (i = 0; i < cpd_hdr->ent_cnt; i++, ent++) { - if (data_size < ent->offset || - data_size - ent->offset < ent->len) { - dev_err(&isp->pdev->dev, "Invalid CPD entry (%d)\n", i); - return -EINVAL; - } - } - - return 0; -} - -static int ipu6_cpd_validate_moduledata(struct ipu6_device *isp, - const void *moduledata, - u32 moduledata_size) -{ - const struct ipu6_cpd_module_data_hdr *mod_hdr = moduledata; - int ret; - - /* Ensure moduledata hdr is within moduledata */ - if (moduledata_size < sizeof(*mod_hdr) || - moduledata_size < mod_hdr->hdr_len) { - dev_err(&isp->pdev->dev, "Invalid CPD moduledata size\n"); - return -EINVAL; - } - - dev_info(&isp->pdev->dev, "FW version: %x\n", mod_hdr->fw_pkg_date); - ret = ipu6_cpd_validate_cpd(isp, moduledata + mod_hdr->hdr_len, - moduledata_size - mod_hdr->hdr_len, - moduledata_size); - if (ret) { - dev_err(&isp->pdev->dev, "Invalid CPD in moduledata\n"); - return ret; - } - - return 0; -} - -static int ipu6_cpd_validate_metadata(struct ipu6_device *isp, - const void *metadata, u32 meta_size) -{ - const struct ipu6_cpd_metadata_extn *extn = metadata; - - /* Sanity check for metadata size */ - if (meta_size < sizeof(*extn) || meta_size > MAX_METADATA_SIZE) { - dev_err(&isp->pdev->dev, "Invalid CPD metadata\n"); - return -EINVAL; - } - - /* Validate extension and image types */ - if (extn->extn_type != IPU6_CPD_METADATA_EXTN_TYPE_IUNIT || - extn->img_type != IPU6_CPD_METADATA_IMAGE_TYPE_MAIN_FIRMWARE) { - dev_err(&isp->pdev->dev, - "Invalid CPD metadata descriptor img_type (%d)\n", - extn->img_type); - return -EINVAL; - } - - /* Validate metadata size multiple of metadata components */ - if ((meta_size - sizeof(*extn)) % isp->cpd_metadata_cmpnt_size) { - dev_err(&isp->pdev->dev, "Invalid CPD metadata size\n"); - return -EINVAL; - } - - return 0; -} - -int ipu6_cpd_validate_cpd_file(struct ipu6_device *isp, const void *cpd_file, - unsigned long cpd_file_size) -{ - const struct ipu6_cpd_hdr *hdr = cpd_file; - const struct ipu6_cpd_ent *ent; - int ret; - - ret = ipu6_cpd_validate_cpd(isp, cpd_file, cpd_file_size, - cpd_file_size); - if (ret) { - dev_err(&isp->pdev->dev, "Invalid CPD in file\n"); - return ret; - } - - /* Check for CPD file marker */ - if (hdr->hdr_mark != CPD_HDR_MARK) { - dev_err(&isp->pdev->dev, "Invalid CPD header\n"); - return -EINVAL; - } - - /* Sanity check for manifest size */ - ent = ipu6_cpd_get_manifest(cpd_file); - if (ent->len > MAX_MANIFEST_SIZE) { - dev_err(&isp->pdev->dev, "Invalid CPD manifest size\n"); - return -EINVAL; - } - - /* Validate metadata */ - ent = ipu6_cpd_get_metadata(cpd_file); - ret = ipu6_cpd_validate_metadata(isp, cpd_file + ent->offset, ent->len); - if (ret) { - dev_err(&isp->pdev->dev, "Invalid CPD metadata\n"); - return ret; - } - - /* Validate moduledata */ - ent = ipu6_cpd_get_moduledata(cpd_file); - ret = ipu6_cpd_validate_moduledata(isp, cpd_file + ent->offset, - ent->len); - if (ret) - dev_err(&isp->pdev->dev, "Invalid CPD moduledata\n"); - - return ret; -} diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-cpd.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-cpd.h deleted file mode 100644 index e0e4fde..0000000 --- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-cpd.h +++ /dev/null @@ -1,105 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (C) 2015--2024 Intel Corporation */ - -#ifndef IPU6_CPD_H -#define IPU6_CPD_H - -struct ipu6_device; -struct ipu6_bus_device; - -#define IPU6_CPD_SIZE_OF_FW_ARCH_VERSION 7 -#define IPU6_CPD_SIZE_OF_SYSTEM_VERSION 11 -#define IPU6_CPD_SIZE_OF_COMPONENT_NAME 12 - -#define IPU6_CPD_METADATA_EXTN_TYPE_IUNIT 0x10 - -#define IPU6_CPD_METADATA_IMAGE_TYPE_RESERVED 0 -#define IPU6_CPD_METADATA_IMAGE_TYPE_BOOTLOADER 1 -#define IPU6_CPD_METADATA_IMAGE_TYPE_MAIN_FIRMWARE 2 - -#define IPU6_CPD_PKG_DIR_PSYS_SERVER_IDX 0 -#define IPU6_CPD_PKG_DIR_ISYS_SERVER_IDX 1 - -#define IPU6_CPD_PKG_DIR_CLIENT_PG_TYPE 3 - -#define IPU6_CPD_METADATA_HASH_KEY_SIZE 48 -#define IPU6SE_CPD_METADATA_HASH_KEY_SIZE 32 - -struct ipu6_cpd_module_data_hdr { - u32 hdr_len; - u32 endian; - u32 fw_pkg_date; - u32 hive_sdk_date; - u32 compiler_date; - u32 target_platform_type; - u8 sys_ver[IPU6_CPD_SIZE_OF_SYSTEM_VERSION]; - u8 fw_arch_ver[IPU6_CPD_SIZE_OF_FW_ARCH_VERSION]; - u8 rsvd[2]; -} __packed; - -/* - * ipu6_cpd_hdr structure updated as the chksum and - * sub_partition_name is unused on host side - * CSE layout version 1.6 for IPU6SE (hdr_len = 0x10) - * CSE layout version 1.7 for IPU6 (hdr_len = 0x14) - */ -struct ipu6_cpd_hdr { - u32 hdr_mark; - u32 ent_cnt; - u8 hdr_ver; - u8 ent_ver; - u8 hdr_len; -} __packed; - -struct ipu6_cpd_ent { - u8 name[IPU6_CPD_SIZE_OF_COMPONENT_NAME]; - u32 offset; - u32 len; - u8 rsvd[4]; -} __packed; - -struct ipu6_cpd_metadata_cmpnt_hdr { - u32 id; - u32 size; - u32 ver; -} __packed; - -struct ipu6_cpd_metadata_cmpnt { - struct ipu6_cpd_metadata_cmpnt_hdr hdr; - u8 sha2_hash[IPU6_CPD_METADATA_HASH_KEY_SIZE]; - u32 entry_point; - u32 icache_base_offs; - u8 attrs[16]; -} __packed; - -struct ipu6se_cpd_metadata_cmpnt { - struct ipu6_cpd_metadata_cmpnt_hdr hdr; - u8 sha2_hash[IPU6SE_CPD_METADATA_HASH_KEY_SIZE]; - u32 entry_point; - u32 icache_base_offs; - u8 attrs[16]; -} __packed; - -struct ipu6_cpd_metadata_extn { - u32 extn_type; - u32 len; - u32 img_type; - u8 rsvd[16]; -} __packed; - -struct ipu6_cpd_client_pkg_hdr { - u32 prog_list_offs; - u32 prog_list_size; - u32 prog_desc_offs; - u32 prog_desc_size; - u32 pg_manifest_offs; - u32 pg_manifest_size; - u32 prog_bin_offs; - u32 prog_bin_size; -} __packed; - -int ipu6_cpd_create_pkg_dir(struct ipu6_bus_device *adev, const void *src); -void ipu6_cpd_free_pkg_dir(struct ipu6_bus_device *adev); -int ipu6_cpd_validate_cpd_file(struct ipu6_device *isp, const void *cpd_file, - unsigned long cpd_file_size); -#endif /* IPU6_CPD_H */ diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-dma.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-dma.c deleted file mode 100644 index b71f66b..0000000 --- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-dma.c +++ /dev/null @@ -1,492 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2013--2024 Intel Corporation - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "ipu6.h" -#include "ipu6-bus.h" -#include "ipu6-dma.h" -#include "ipu6-mmu.h" - -struct vm_info { - struct list_head list; - struct page **pages; - dma_addr_t ipu6_iova; - void *vaddr; - unsigned long size; -}; - -static struct vm_info *get_vm_info(struct ipu6_mmu *mmu, dma_addr_t iova) -{ - struct vm_info *info, *save; - - list_for_each_entry_safe(info, save, &mmu->vma_list, list) { - if (iova >= info->ipu6_iova && - iova < (info->ipu6_iova + info->size)) - return info; - } - - return NULL; -} - -static void __clear_buffer(struct page *page, size_t size, unsigned long attrs) -{ - void *ptr; - - if (!page) - return; - /* - * Ensure that the allocated pages are zeroed, and that any data - * lurking in the kernel direct-mapped region is invalidated. - */ - ptr = page_address(page); - memset(ptr, 0, size); - if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) - clflush_cache_range(ptr, size); -} - -static struct page **__alloc_buffer(size_t size, gfp_t gfp, unsigned long attrs) -{ - int count = PHYS_PFN(size); - int array_size = count * sizeof(struct page *); - struct page **pages; - int i = 0; - - pages = kvzalloc(array_size, GFP_KERNEL); - if (!pages) - return NULL; - - gfp |= __GFP_NOWARN; - - while (count) { - int j, order = __fls(count); - - pages[i] = alloc_pages(gfp, order); - while (!pages[i] && order) - pages[i] = alloc_pages(gfp, --order); - if (!pages[i]) - goto error; - - if (order) { - split_page(pages[i], order); - j = 1 << order; - while (j--) - pages[i + j] = pages[i] + j; - } - - __clear_buffer(pages[i], PAGE_SIZE << order, attrs); - i += 1 << order; - count -= 1 << order; - } - - return pages; -error: - while (i--) - if (pages[i]) - __free_pages(pages[i], 0); - kvfree(pages); - return NULL; -} - -static void __free_buffer(struct page **pages, size_t size, unsigned long attrs) -{ - int count = PHYS_PFN(size); - unsigned int i; - - for (i = 0; i < count && pages[i]; i++) { - __clear_buffer(pages[i], PAGE_SIZE, attrs); - __free_pages(pages[i], 0); - } - - kvfree(pages); -} - -void ipu6_dma_sync_single(struct ipu6_bus_device *sys, dma_addr_t dma_handle, - size_t size) -{ - void *vaddr; - u32 offset; - struct vm_info *info; - struct ipu6_mmu *mmu = sys->mmu; - - info = get_vm_info(mmu, dma_handle); - if (WARN_ON(!info)) - return; - - offset = dma_handle - info->ipu6_iova; - if (WARN_ON(size > (info->size - offset))) - return; - - vaddr = info->vaddr + offset; - clflush_cache_range(vaddr, size); -} -EXPORT_SYMBOL_NS_GPL(ipu6_dma_sync_single, INTEL_IPU6); - -void ipu6_dma_sync_sg(struct ipu6_bus_device *sys, struct scatterlist *sglist, - int nents) -{ - struct scatterlist *sg; - int i; - - for_each_sg(sglist, sg, nents, i) - clflush_cache_range(page_to_virt(sg_page(sg)), sg->length); -} -EXPORT_SYMBOL_NS_GPL(ipu6_dma_sync_sg, INTEL_IPU6); - -void ipu6_dma_sync_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt) -{ - ipu6_dma_sync_sg(sys, sgt->sgl, sgt->orig_nents); -} -EXPORT_SYMBOL_NS_GPL(ipu6_dma_sync_sgtable, INTEL_IPU6); - -void *ipu6_dma_alloc(struct ipu6_bus_device *sys, size_t size, - dma_addr_t *dma_handle, gfp_t gfp, - unsigned long attrs) -{ - struct device *dev = &sys->auxdev.dev; - struct pci_dev *pdev = sys->isp->pdev; - dma_addr_t pci_dma_addr, ipu6_iova; - struct ipu6_mmu *mmu = sys->mmu; - struct vm_info *info; - unsigned long count; - struct page **pages; - struct iova *iova; - unsigned int i; - int ret; - - info = kzalloc(sizeof(*info), GFP_KERNEL); - if (!info) - return NULL; - - size = PAGE_ALIGN(size); - count = PHYS_PFN(size); - - iova = alloc_iova(&mmu->dmap->iovad, count, - PHYS_PFN(dma_get_mask(dev)), 0); - if (!iova) - goto out_kfree; - - pages = __alloc_buffer(size, gfp, attrs); - if (!pages) - goto out_free_iova; - - dev_dbg(dev, "dma_alloc: size %zu iova low pfn %lu, high pfn %lu\n", - size, iova->pfn_lo, iova->pfn_hi); - for (i = 0; iova->pfn_lo + i <= iova->pfn_hi; i++) { - pci_dma_addr = dma_map_page_attrs(&pdev->dev, pages[i], 0, - PAGE_SIZE, DMA_BIDIRECTIONAL, - attrs); - dev_dbg(dev, "dma_alloc: mapped pci_dma_addr %pad\n", - &pci_dma_addr); - if (dma_mapping_error(&pdev->dev, pci_dma_addr)) { - dev_err(dev, "pci_dma_mapping for page[%d] failed", i); - goto out_unmap; - } - - ret = ipu6_mmu_map(mmu->dmap->mmu_info, - PFN_PHYS(iova->pfn_lo + i), pci_dma_addr, - PAGE_SIZE); - if (ret) { - dev_err(dev, "ipu6_mmu_map for pci_dma[%d] %pad failed", - i, &pci_dma_addr); - dma_unmap_page_attrs(&pdev->dev, pci_dma_addr, - PAGE_SIZE, DMA_BIDIRECTIONAL, - attrs); - goto out_unmap; - } - } - - info->vaddr = vmap(pages, count, VM_USERMAP, PAGE_KERNEL); - if (!info->vaddr) - goto out_unmap; - - *dma_handle = PFN_PHYS(iova->pfn_lo); - - info->pages = pages; - info->ipu6_iova = *dma_handle; - info->size = size; - list_add(&info->list, &mmu->vma_list); - - return info->vaddr; - -out_unmap: - while (i--) { - ipu6_iova = PFN_PHYS(iova->pfn_lo + i); - pci_dma_addr = ipu6_mmu_iova_to_phys(mmu->dmap->mmu_info, - ipu6_iova); - dma_unmap_page_attrs(&pdev->dev, pci_dma_addr, PAGE_SIZE, - DMA_BIDIRECTIONAL, attrs); - - ipu6_mmu_unmap(mmu->dmap->mmu_info, ipu6_iova, PAGE_SIZE); - } - - __free_buffer(pages, size, attrs); - -out_free_iova: - __free_iova(&mmu->dmap->iovad, iova); -out_kfree: - kfree(info); - - return NULL; -} -EXPORT_SYMBOL_NS_GPL(ipu6_dma_alloc, INTEL_IPU6); - -void ipu6_dma_free(struct ipu6_bus_device *sys, size_t size, void *vaddr, - dma_addr_t dma_handle, unsigned long attrs) -{ - struct ipu6_mmu *mmu = sys->mmu; - struct pci_dev *pdev = sys->isp->pdev; - struct iova *iova = find_iova(&mmu->dmap->iovad, PHYS_PFN(dma_handle)); - dma_addr_t pci_dma_addr, ipu6_iova; - struct vm_info *info; - struct page **pages; - unsigned int i; - - if (WARN_ON(!iova)) - return; - - info = get_vm_info(mmu, dma_handle); - if (WARN_ON(!info)) - return; - - if (WARN_ON(!info->vaddr)) - return; - - if (WARN_ON(!info->pages)) - return; - - list_del(&info->list); - - size = PAGE_ALIGN(size); - - pages = info->pages; - - vunmap(vaddr); - - for (i = 0; i < PHYS_PFN(size); i++) { - ipu6_iova = PFN_PHYS(iova->pfn_lo + i); - pci_dma_addr = ipu6_mmu_iova_to_phys(mmu->dmap->mmu_info, - ipu6_iova); - dma_unmap_page_attrs(&pdev->dev, pci_dma_addr, PAGE_SIZE, - DMA_BIDIRECTIONAL, attrs); - } - - ipu6_mmu_unmap(mmu->dmap->mmu_info, PFN_PHYS(iova->pfn_lo), - PFN_PHYS(iova_size(iova))); - - __free_buffer(pages, size, attrs); - - mmu->tlb_invalidate(mmu); - - __free_iova(&mmu->dmap->iovad, iova); - - kfree(info); -} -EXPORT_SYMBOL_NS_GPL(ipu6_dma_free, INTEL_IPU6); - -int ipu6_dma_mmap(struct ipu6_bus_device *sys, struct vm_area_struct *vma, - void *addr, dma_addr_t iova, size_t size, - unsigned long attrs) -{ - struct ipu6_mmu *mmu = sys->mmu; - size_t count = PFN_UP(size); - struct vm_info *info; - size_t i; - int ret; - - info = get_vm_info(mmu, iova); - if (!info) - return -EFAULT; - - if (!info->vaddr) - return -EFAULT; - - if (vma->vm_start & ~PAGE_MASK) - return -EINVAL; - - if (size > info->size) - return -EFAULT; - - for (i = 0; i < count; i++) { - ret = vm_insert_page(vma, vma->vm_start + PFN_PHYS(i), - info->pages[i]); - if (ret < 0) - return ret; - } - - return 0; -} - -void ipu6_dma_unmap_sg(struct ipu6_bus_device *sys, struct scatterlist *sglist, - int nents, enum dma_data_direction dir, - unsigned long attrs) -{ - struct device *dev = &sys->auxdev.dev; - struct ipu6_mmu *mmu = sys->mmu; - struct iova *iova = find_iova(&mmu->dmap->iovad, - PHYS_PFN(sg_dma_address(sglist))); - struct scatterlist *sg; - dma_addr_t pci_dma_addr; - unsigned int i; - - if (!nents) - return; - - if (WARN_ON(!iova)) - return; - - /* - * Before IPU6 mmu unmap, return the pci dma address back to sg - * assume the nents is less than orig_nents as the least granule - * is 1 SZ_4K page - */ - dev_dbg(dev, "trying to unmap concatenated %u ents\n", nents); - for_each_sg(sglist, sg, nents, i) { - dev_dbg(dev, "unmap sg[%d] %pad size %u\n", i, - &sg_dma_address(sg), sg_dma_len(sg)); - pci_dma_addr = ipu6_mmu_iova_to_phys(mmu->dmap->mmu_info, - sg_dma_address(sg)); - dev_dbg(dev, "return pci_dma_addr %pad back to sg[%d]\n", - &pci_dma_addr, i); - sg_dma_address(sg) = pci_dma_addr; - } - - dev_dbg(dev, "ipu6_mmu_unmap low pfn %lu high pfn %lu\n", - iova->pfn_lo, iova->pfn_hi); - ipu6_mmu_unmap(mmu->dmap->mmu_info, PFN_PHYS(iova->pfn_lo), - PFN_PHYS(iova_size(iova))); - - mmu->tlb_invalidate(mmu); - __free_iova(&mmu->dmap->iovad, iova); -} -EXPORT_SYMBOL_NS_GPL(ipu6_dma_unmap_sg, INTEL_IPU6); - -int ipu6_dma_map_sg(struct ipu6_bus_device *sys, struct scatterlist *sglist, - int nents, enum dma_data_direction dir, - unsigned long attrs) -{ - struct device *dev = &sys->auxdev.dev; - struct ipu6_mmu *mmu = sys->mmu; - struct scatterlist *sg; - struct iova *iova; - size_t npages = 0; - unsigned long iova_addr; - int i; - - for_each_sg(sglist, sg, nents, i) { - if (sg->offset) { - dev_err(dev, "Unsupported non-zero sg[%d].offset %x\n", - i, sg->offset); - return -EFAULT; - } - } - - for_each_sg(sglist, sg, nents, i) - npages += PHYS_PFN(PAGE_ALIGN(sg_dma_len(sg))); - - dev_dbg(dev, "dmamap trying to map %d ents %zu pages\n", - nents, npages); - - iova = alloc_iova(&mmu->dmap->iovad, npages, - PHYS_PFN(dma_get_mask(dev)), 0); - if (!iova) - return 0; - - dev_dbg(dev, "dmamap: iova low pfn %lu, high pfn %lu\n", iova->pfn_lo, - iova->pfn_hi); - - iova_addr = iova->pfn_lo; - for_each_sg(sglist, sg, nents, i) { - phys_addr_t iova_pa; - int ret; - - iova_pa = PFN_PHYS(iova_addr); - dev_dbg(dev, "mapping entry %d: iova %pap phy %pap size %d\n", - i, &iova_pa, &sg_dma_address(sg), sg_dma_len(sg)); - - ret = ipu6_mmu_map(mmu->dmap->mmu_info, PFN_PHYS(iova_addr), - sg_dma_address(sg), - PAGE_ALIGN(sg_dma_len(sg))); - if (ret) - goto out_fail; - - sg_dma_address(sg) = PFN_PHYS(iova_addr); - - iova_addr += PHYS_PFN(PAGE_ALIGN(sg_dma_len(sg))); - } - - dev_dbg(dev, "dmamap %d ents %zu pages mapped\n", nents, npages); - - return nents; - -out_fail: - ipu6_dma_unmap_sg(sys, sglist, i, dir, attrs); - - return 0; -} -EXPORT_SYMBOL_NS_GPL(ipu6_dma_map_sg, INTEL_IPU6); - -int ipu6_dma_map_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt, - enum dma_data_direction dir, unsigned long attrs) -{ - int nents; - - nents = ipu6_dma_map_sg(sys, sgt->sgl, sgt->nents, dir, attrs); - if (nents < 0) - return nents; - - sgt->nents = nents; - - return 0; -} -EXPORT_SYMBOL_NS_GPL(ipu6_dma_map_sgtable, INTEL_IPU6); - -void ipu6_dma_unmap_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt, - enum dma_data_direction dir, unsigned long attrs) -{ - ipu6_dma_unmap_sg(sys, sgt->sgl, sgt->nents, dir, attrs); -} -EXPORT_SYMBOL_NS_GPL(ipu6_dma_unmap_sgtable, INTEL_IPU6); - -/* - * Create scatter-list for the already allocated DMA buffer - */ -int ipu6_dma_get_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt, - void *cpu_addr, dma_addr_t handle, size_t size, - unsigned long attrs) -{ - struct device *dev = &sys->auxdev.dev; - struct ipu6_mmu *mmu = sys->mmu; - struct vm_info *info; - int n_pages; - int ret = 0; - - info = get_vm_info(mmu, handle); - if (!info) - return -EFAULT; - - if (!info->vaddr) - return -EFAULT; - - if (WARN_ON(!info->pages)) - return -ENOMEM; - - n_pages = PHYS_PFN(PAGE_ALIGN(size)); - - ret = sg_alloc_table_from_pages(sgt, info->pages, n_pages, 0, size, - GFP_KERNEL); - if (ret) - dev_warn(dev, "get sgt table failed\n"); - - return ret; -} diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-dma.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-dma.h deleted file mode 100644 index b51244a..0000000 --- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-dma.h +++ /dev/null @@ -1,49 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (C) 2013--2024 Intel Corporation */ - -#ifndef IPU6_DMA_H -#define IPU6_DMA_H - -#include -#include -#include -#include -#include -#include - -#include "ipu6-bus.h" - -struct ipu6_mmu_info; - -struct ipu6_dma_mapping { - struct ipu6_mmu_info *mmu_info; - struct iova_domain iovad; -}; - -void ipu6_dma_sync_single(struct ipu6_bus_device *sys, dma_addr_t dma_handle, - size_t size); -void ipu6_dma_sync_sg(struct ipu6_bus_device *sys, struct scatterlist *sglist, - int nents); -void ipu6_dma_sync_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt); -void *ipu6_dma_alloc(struct ipu6_bus_device *sys, size_t size, - dma_addr_t *dma_handle, gfp_t gfp, - unsigned long attrs); -void ipu6_dma_free(struct ipu6_bus_device *sys, size_t size, void *vaddr, - dma_addr_t dma_handle, unsigned long attrs); -int ipu6_dma_mmap(struct ipu6_bus_device *sys, struct vm_area_struct *vma, - void *addr, dma_addr_t iova, size_t size, - unsigned long attrs); -int ipu6_dma_map_sg(struct ipu6_bus_device *sys, struct scatterlist *sglist, - int nents, enum dma_data_direction dir, - unsigned long attrs); -void ipu6_dma_unmap_sg(struct ipu6_bus_device *sys, struct scatterlist *sglist, - int nents, enum dma_data_direction dir, - unsigned long attrs); -int ipu6_dma_map_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt, - enum dma_data_direction dir, unsigned long attrs); -void ipu6_dma_unmap_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt, - enum dma_data_direction dir, unsigned long attrs); -int ipu6_dma_get_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt, - void *cpu_addr, dma_addr_t handle, size_t size, - unsigned long attrs); -#endif /* IPU6_DMA_H */ diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.c deleted file mode 100644 index 7d3d931..0000000 --- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.c +++ /dev/null @@ -1,413 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2013--2024 Intel Corporation - */ - -#include -#include -#include -#include -#include -#include -#include - -#include "ipu6-bus.h" -#include "ipu6-dma.h" -#include "ipu6-fw-com.h" - -/* - * FWCOM layer is a shared resource between FW and driver. It consist - * of token queues to both send and receive directions. Queue is simply - * an array of structures with read and write indexes to the queue. - * There are 1...n queues to both directions. Queues locates in - * system RAM and are mapped to ISP MMU so that both CPU and ISP can - * see the same buffer. Indexes are located in ISP DMEM so that FW code - * can poll those with very low latency and cost. CPU access to indexes is - * more costly but that happens only at message sending time and - * interrupt triggered message handling. CPU doesn't need to poll indexes. - * wr_reg / rd_reg are offsets to those dmem location. They are not - * the indexes itself. - */ - -/* Shared structure between driver and FW - do not modify */ -struct ipu6_fw_sys_queue { - u64 host_address; - u32 vied_address; - u32 size; - u32 token_size; - u32 wr_reg; /* reg number in subsystem's regmem */ - u32 rd_reg; - u32 _align; -} __packed; - -struct ipu6_fw_sys_queue_res { - u64 host_address; - u32 vied_address; - u32 reg; -} __packed; - -enum syscom_state { - /* Program load or explicit host setting should init to this */ - SYSCOM_STATE_UNINIT = 0x57a7e000, - /* SP Syscom sets this when it is ready for use */ - SYSCOM_STATE_READY = 0x57a7e001, - /* SP Syscom sets this when no more syscom accesses will happen */ - SYSCOM_STATE_INACTIVE = 0x57a7e002, -}; - -enum syscom_cmd { - /* Program load or explicit host setting should init to this */ - SYSCOM_COMMAND_UNINIT = 0x57a7f000, - /* Host Syscom requests syscom to become inactive */ - SYSCOM_COMMAND_INACTIVE = 0x57a7f001, -}; - -/* firmware config: data that sent from the host to SP via DDR */ -/* Cell copies data into a context */ - -struct ipu6_fw_syscom_config { - u32 firmware_address; - - u32 num_input_queues; - u32 num_output_queues; - - /* ISP pointers to an array of ipu6_fw_sys_queue structures */ - u32 input_queue; - u32 output_queue; - - /* ISYS / PSYS private data */ - u32 specific_addr; - u32 specific_size; -}; - -struct ipu6_fw_com_context { - struct ipu6_bus_device *adev; - void __iomem *dmem_addr; - int (*cell_ready)(struct ipu6_bus_device *adev); - void (*cell_start)(struct ipu6_bus_device *adev); - - void *dma_buffer; - dma_addr_t dma_addr; - unsigned int dma_size; - - struct ipu6_fw_sys_queue *input_queue; /* array of host to SP queues */ - struct ipu6_fw_sys_queue *output_queue; /* array of SP to host */ - - u32 config_vied_addr; - - unsigned int buttress_boot_offset; - void __iomem *base_addr; -}; - -#define FW_COM_WR_REG 0 -#define FW_COM_RD_REG 4 - -#define REGMEM_OFFSET 0 -#define TUNIT_MAGIC_PATTERN 0x5a5a5a5a - -enum regmem_id { - /* pass pkg_dir address to SPC in non-secure mode */ - PKG_DIR_ADDR_REG = 0, - /* Tunit CFG blob for secure - provided by host.*/ - TUNIT_CFG_DWR_REG = 1, - /* syscom commands - modified by the host */ - SYSCOM_COMMAND_REG = 2, - /* Store interrupt status - updated by SP */ - SYSCOM_IRQ_REG = 3, - /* first syscom queue pointer register */ - SYSCOM_QPR_BASE_REG = 4 -}; - -#define BUTTRESS_FW_BOOT_PARAMS_0 0x4000 -#define BUTTRESS_FW_BOOT_PARAM_REG(base, offset, id) \ - ((base) + BUTTRESS_FW_BOOT_PARAMS_0 + ((offset) + (id)) * 4) - -enum buttress_syscom_id { - /* pass syscom configuration to SPC */ - SYSCOM_CONFIG_ID = 0, - /* syscom state - modified by SP */ - SYSCOM_STATE_ID = 1, - /* syscom vtl0 addr mask */ - SYSCOM_VTL0_ADDR_MASK_ID = 2, - SYSCOM_ID_MAX -}; - -static void ipu6_sys_queue_init(struct ipu6_fw_sys_queue *q, unsigned int size, - unsigned int token_size, - struct ipu6_fw_sys_queue_res *res) -{ - unsigned int buf_size = (size + 1) * token_size; - - q->size = size + 1; - q->token_size = token_size; - - /* acquire the shared buffer space */ - q->host_address = res->host_address; - res->host_address += buf_size; - q->vied_address = res->vied_address; - res->vied_address += buf_size; - - /* acquire the shared read and writer pointers */ - q->wr_reg = res->reg; - res->reg++; - q->rd_reg = res->reg; - res->reg++; -} - -void *ipu6_fw_com_prepare(struct ipu6_fw_com_cfg *cfg, - struct ipu6_bus_device *adev, void __iomem *base) -{ - size_t conf_size, inq_size, outq_size, specific_size; - struct ipu6_fw_syscom_config *config_host_addr; - unsigned int sizeinput = 0, sizeoutput = 0; - struct ipu6_fw_sys_queue_res res; - struct ipu6_fw_com_context *ctx; - struct device *dev = &adev->auxdev.dev; - size_t sizeall, offset; - void *specific_host_addr; - unsigned int i; - - if (!cfg || !cfg->cell_start || !cfg->cell_ready) - return NULL; - - ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); - if (!ctx) - return NULL; - ctx->dmem_addr = base + cfg->dmem_addr + REGMEM_OFFSET; - ctx->adev = adev; - ctx->cell_start = cfg->cell_start; - ctx->cell_ready = cfg->cell_ready; - ctx->buttress_boot_offset = cfg->buttress_boot_offset; - ctx->base_addr = base; - - /* - * Allocate DMA mapped memory. Allocate one big chunk. - */ - /* Base cfg for FW */ - conf_size = roundup(sizeof(struct ipu6_fw_syscom_config), 8); - /* Descriptions of the queues */ - inq_size = size_mul(cfg->num_input_queues, - sizeof(struct ipu6_fw_sys_queue)); - outq_size = size_mul(cfg->num_output_queues, - sizeof(struct ipu6_fw_sys_queue)); - /* FW specific information structure */ - specific_size = roundup(cfg->specific_size, 8); - - sizeall = conf_size + inq_size + outq_size + specific_size; - - for (i = 0; i < cfg->num_input_queues; i++) - sizeinput += size_mul(cfg->input[i].queue_size + 1, - cfg->input[i].token_size); - - for (i = 0; i < cfg->num_output_queues; i++) - sizeoutput += size_mul(cfg->output[i].queue_size + 1, - cfg->output[i].token_size); - - sizeall += sizeinput + sizeoutput; - - ctx->dma_buffer = ipu6_dma_alloc(adev, sizeall, &ctx->dma_addr, - GFP_KERNEL, 0); - if (!ctx->dma_buffer) { - dev_err(dev, "failed to allocate dma memory\n"); - kfree(ctx); - return NULL; - } - - ctx->dma_size = sizeall; - - config_host_addr = ctx->dma_buffer; - ctx->config_vied_addr = ctx->dma_addr; - - offset = conf_size; - ctx->input_queue = ctx->dma_buffer + offset; - config_host_addr->input_queue = ctx->dma_addr + offset; - config_host_addr->num_input_queues = cfg->num_input_queues; - - offset += inq_size; - ctx->output_queue = ctx->dma_buffer + offset; - config_host_addr->output_queue = ctx->dma_addr + offset; - config_host_addr->num_output_queues = cfg->num_output_queues; - - /* copy firmware specific data */ - offset += outq_size; - specific_host_addr = ctx->dma_buffer + offset; - config_host_addr->specific_addr = ctx->dma_addr + offset; - config_host_addr->specific_size = cfg->specific_size; - if (cfg->specific_addr && cfg->specific_size) - memcpy(specific_host_addr, cfg->specific_addr, - cfg->specific_size); - - ipu6_dma_sync_single(adev, ctx->config_vied_addr, sizeall); - - /* initialize input queues */ - offset += specific_size; - res.reg = SYSCOM_QPR_BASE_REG; - res.host_address = (u64)(ctx->dma_buffer + offset); - res.vied_address = ctx->dma_addr + offset; - for (i = 0; i < cfg->num_input_queues; i++) - ipu6_sys_queue_init(ctx->input_queue + i, - cfg->input[i].queue_size, - cfg->input[i].token_size, &res); - - /* initialize output queues */ - offset += sizeinput; - res.host_address = (u64)(ctx->dma_buffer + offset); - res.vied_address = ctx->dma_addr + offset; - for (i = 0; i < cfg->num_output_queues; i++) { - ipu6_sys_queue_init(ctx->output_queue + i, - cfg->output[i].queue_size, - cfg->output[i].token_size, &res); - } - - return ctx; -} -EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_prepare, INTEL_IPU6); - -int ipu6_fw_com_open(struct ipu6_fw_com_context *ctx) -{ - /* write magic pattern to disable the tunit trace */ - writel(TUNIT_MAGIC_PATTERN, ctx->dmem_addr + TUNIT_CFG_DWR_REG * 4); - /* Check if SP is in valid state */ - if (!ctx->cell_ready(ctx->adev)) - return -EIO; - - /* store syscom uninitialized command */ - writel(SYSCOM_COMMAND_UNINIT, ctx->dmem_addr + SYSCOM_COMMAND_REG * 4); - - /* store syscom uninitialized state */ - writel(SYSCOM_STATE_UNINIT, - BUTTRESS_FW_BOOT_PARAM_REG(ctx->base_addr, - ctx->buttress_boot_offset, - SYSCOM_STATE_ID)); - - /* store firmware configuration address */ - writel(ctx->config_vied_addr, - BUTTRESS_FW_BOOT_PARAM_REG(ctx->base_addr, - ctx->buttress_boot_offset, - SYSCOM_CONFIG_ID)); - ctx->cell_start(ctx->adev); - - return 0; -} -EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_open, INTEL_IPU6); - -int ipu6_fw_com_close(struct ipu6_fw_com_context *ctx) -{ - int state; - - state = readl(BUTTRESS_FW_BOOT_PARAM_REG(ctx->base_addr, - ctx->buttress_boot_offset, - SYSCOM_STATE_ID)); - if (state != SYSCOM_STATE_READY) - return -EBUSY; - - /* set close request flag */ - writel(SYSCOM_COMMAND_INACTIVE, ctx->dmem_addr + - SYSCOM_COMMAND_REG * 4); - - return 0; -} -EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_close, INTEL_IPU6); - -int ipu6_fw_com_release(struct ipu6_fw_com_context *ctx, unsigned int force) -{ - /* check if release is forced, an verify cell state if it is not */ - if (!force && !ctx->cell_ready(ctx->adev)) - return -EBUSY; - - ipu6_dma_free(ctx->adev, ctx->dma_size, - ctx->dma_buffer, ctx->dma_addr, 0); - kfree(ctx); - return 0; -} -EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_release, INTEL_IPU6); - -bool ipu6_fw_com_ready(struct ipu6_fw_com_context *ctx) -{ - int state; - - state = readl(BUTTRESS_FW_BOOT_PARAM_REG(ctx->base_addr, - ctx->buttress_boot_offset, - SYSCOM_STATE_ID)); - - return state == SYSCOM_STATE_READY; -} -EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_ready, INTEL_IPU6); - -void *ipu6_send_get_token(struct ipu6_fw_com_context *ctx, int q_nbr) -{ - struct ipu6_fw_sys_queue *q = &ctx->input_queue[q_nbr]; - void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4; - unsigned int wr, rd; - unsigned int packets; - unsigned int index; - - wr = readl(q_dmem + FW_COM_WR_REG); - rd = readl(q_dmem + FW_COM_RD_REG); - - if (WARN_ON_ONCE(wr >= q->size || rd >= q->size)) - return NULL; - - if (wr < rd) - packets = rd - wr - 1; - else - packets = q->size - (wr - rd + 1); - - if (!packets) - return NULL; - - index = readl(q_dmem + FW_COM_WR_REG); - - return (void *)(q->host_address + index * q->token_size); -} -EXPORT_SYMBOL_NS_GPL(ipu6_send_get_token, INTEL_IPU6); - -void ipu6_send_put_token(struct ipu6_fw_com_context *ctx, int q_nbr) -{ - struct ipu6_fw_sys_queue *q = &ctx->input_queue[q_nbr]; - void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4; - unsigned int wr = readl(q_dmem + FW_COM_WR_REG) + 1; - - if (wr >= q->size) - wr = 0; - - writel(wr, q_dmem + FW_COM_WR_REG); -} -EXPORT_SYMBOL_NS_GPL(ipu6_send_put_token, INTEL_IPU6); - -void *ipu6_recv_get_token(struct ipu6_fw_com_context *ctx, int q_nbr) -{ - struct ipu6_fw_sys_queue *q = &ctx->output_queue[q_nbr]; - void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4; - unsigned int wr, rd; - unsigned int packets; - - wr = readl(q_dmem + FW_COM_WR_REG); - rd = readl(q_dmem + FW_COM_RD_REG); - - if (WARN_ON_ONCE(wr >= q->size || rd >= q->size)) - return NULL; - - if (wr < rd) - wr += q->size; - - packets = wr - rd; - if (!packets) - return NULL; - - return (void *)(q->host_address + rd * q->token_size); -} -EXPORT_SYMBOL_NS_GPL(ipu6_recv_get_token, INTEL_IPU6); - -void ipu6_recv_put_token(struct ipu6_fw_com_context *ctx, int q_nbr) -{ - struct ipu6_fw_sys_queue *q = &ctx->output_queue[q_nbr]; - void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4; - unsigned int rd = readl(q_dmem + FW_COM_RD_REG) + 1; - - if (rd >= q->size) - rd = 0; - - writel(rd, q_dmem + FW_COM_RD_REG); -} -EXPORT_SYMBOL_NS_GPL(ipu6_recv_put_token, INTEL_IPU6); diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.h deleted file mode 100644 index b02285a..0000000 --- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.h +++ /dev/null @@ -1,47 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (C) 2013--2024 Intel Corporation */ - -#ifndef IPU6_FW_COM_H -#define IPU6_FW_COM_H - -struct ipu6_fw_com_context; -struct ipu6_bus_device; - -struct ipu6_fw_syscom_queue_config { - unsigned int queue_size; /* tokens per queue */ - unsigned int token_size; /* bytes per token */ -}; - -#define SYSCOM_BUTTRESS_FW_PARAMS_ISYS_OFFSET 0 - -struct ipu6_fw_com_cfg { - unsigned int num_input_queues; - unsigned int num_output_queues; - struct ipu6_fw_syscom_queue_config *input; - struct ipu6_fw_syscom_queue_config *output; - - unsigned int dmem_addr; - - /* firmware-specific configuration data */ - void *specific_addr; - unsigned int specific_size; - int (*cell_ready)(struct ipu6_bus_device *adev); - void (*cell_start)(struct ipu6_bus_device *adev); - - unsigned int buttress_boot_offset; -}; - -void *ipu6_fw_com_prepare(struct ipu6_fw_com_cfg *cfg, - struct ipu6_bus_device *adev, void __iomem *base); - -int ipu6_fw_com_open(struct ipu6_fw_com_context *ctx); -bool ipu6_fw_com_ready(struct ipu6_fw_com_context *ctx); -int ipu6_fw_com_close(struct ipu6_fw_com_context *ctx); -int ipu6_fw_com_release(struct ipu6_fw_com_context *ctx, unsigned int force); - -void *ipu6_recv_get_token(struct ipu6_fw_com_context *ctx, int q_nbr); -void ipu6_recv_put_token(struct ipu6_fw_com_context *ctx, int q_nbr); -void *ipu6_send_get_token(struct ipu6_fw_com_context *ctx, int q_nbr); -void ipu6_send_put_token(struct ipu6_fw_com_context *ctx, int q_nbr); - -#endif diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.c deleted file mode 100644 index 62ed92f..0000000 --- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.c +++ /dev/null @@ -1,487 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2013--2024 Intel Corporation - */ - -#include -#include -#include -#include -#include -#include - -#include "ipu6-bus.h" -#include "ipu6-fw-com.h" -#include "ipu6-isys.h" -#include "ipu6-platform-isys-csi2-reg.h" -#include "ipu6-platform-regs.h" - -static const char send_msg_types[N_IPU6_FW_ISYS_SEND_TYPE][32] = { - "STREAM_OPEN", - "STREAM_START", - "STREAM_START_AND_CAPTURE", - "STREAM_CAPTURE", - "STREAM_STOP", - "STREAM_FLUSH", - "STREAM_CLOSE" -}; - -static int handle_proxy_response(struct ipu6_isys *isys, unsigned int req_id) -{ - struct device *dev = &isys->adev->auxdev.dev; - struct ipu6_fw_isys_proxy_resp_info_abi *resp; - int ret; - - resp = ipu6_recv_get_token(isys->fwcom, IPU6_BASE_PROXY_RECV_QUEUES); - if (!resp) - return 1; - - dev_dbg(dev, "Proxy response: id %u, error %u, details %u\n", - resp->request_id, resp->error_info.error, - resp->error_info.error_details); - - ret = req_id == resp->request_id ? 0 : -EIO; - - ipu6_recv_put_token(isys->fwcom, IPU6_BASE_PROXY_RECV_QUEUES); - - return ret; -} - -int ipu6_fw_isys_send_proxy_token(struct ipu6_isys *isys, - unsigned int req_id, - unsigned int index, - unsigned int offset, u32 value) -{ - struct ipu6_fw_com_context *ctx = isys->fwcom; - struct device *dev = &isys->adev->auxdev.dev; - struct ipu6_fw_proxy_send_queue_token *token; - unsigned int timeout = 1000; - int ret; - - dev_dbg(dev, - "proxy send: req_id 0x%x, index %d, offset 0x%x, value 0x%x\n", - req_id, index, offset, value); - - token = ipu6_send_get_token(ctx, IPU6_BASE_PROXY_SEND_QUEUES); - if (!token) - return -EBUSY; - - token->request_id = req_id; - token->region_index = index; - token->offset = offset; - token->value = value; - ipu6_send_put_token(ctx, IPU6_BASE_PROXY_SEND_QUEUES); - - do { - usleep_range(100, 110); - ret = handle_proxy_response(isys, req_id); - if (!ret) - break; - if (ret == -EIO) { - dev_err(dev, "Proxy respond with unexpected id\n"); - break; - } - timeout--; - } while (ret && timeout); - - if (!timeout) - dev_err(dev, "Proxy response timed out\n"); - - return ret; -} - -int ipu6_fw_isys_complex_cmd(struct ipu6_isys *isys, - const unsigned int stream_handle, - void *cpu_mapped_buf, - dma_addr_t dma_mapped_buf, - size_t size, u16 send_type) -{ - struct ipu6_fw_com_context *ctx = isys->fwcom; - struct device *dev = &isys->adev->auxdev.dev; - struct ipu6_fw_send_queue_token *token; - - if (send_type >= N_IPU6_FW_ISYS_SEND_TYPE) - return -EINVAL; - - dev_dbg(dev, "send_token: %s\n", send_msg_types[send_type]); - - /* - * Time to flush cache in case we have some payload. Not all messages - * have that - */ - if (cpu_mapped_buf) - clflush_cache_range(cpu_mapped_buf, size); - - token = ipu6_send_get_token(ctx, - stream_handle + IPU6_BASE_MSG_SEND_QUEUES); - if (!token) - return -EBUSY; - - token->payload = dma_mapped_buf; - token->buf_handle = (unsigned long)cpu_mapped_buf; - token->send_type = send_type; - - ipu6_send_put_token(ctx, stream_handle + IPU6_BASE_MSG_SEND_QUEUES); - - return 0; -} - -int ipu6_fw_isys_simple_cmd(struct ipu6_isys *isys, - const unsigned int stream_handle, u16 send_type) -{ - return ipu6_fw_isys_complex_cmd(isys, stream_handle, NULL, 0, 0, - send_type); -} - -int ipu6_fw_isys_close(struct ipu6_isys *isys) -{ - struct device *dev = &isys->adev->auxdev.dev; - int retry = IPU6_ISYS_CLOSE_RETRY; - unsigned long flags; - void *fwcom; - int ret; - - /* - * Stop the isys fw. Actual close takes - * some time as the FW must stop its actions including code fetch - * to SP icache. - * spinlock to wait the interrupt handler to be finished - */ - spin_lock_irqsave(&isys->power_lock, flags); - ret = ipu6_fw_com_close(isys->fwcom); - fwcom = isys->fwcom; - isys->fwcom = NULL; - spin_unlock_irqrestore(&isys->power_lock, flags); - if (ret) - dev_err(dev, "Device close failure: %d\n", ret); - - /* release probably fails if the close failed. Let's try still */ - do { - usleep_range(400, 500); - ret = ipu6_fw_com_release(fwcom, 0); - retry--; - } while (ret && retry); - - if (ret) { - dev_err(dev, "Device release time out %d\n", ret); - spin_lock_irqsave(&isys->power_lock, flags); - isys->fwcom = fwcom; - spin_unlock_irqrestore(&isys->power_lock, flags); - } - - return ret; -} - -void ipu6_fw_isys_cleanup(struct ipu6_isys *isys) -{ - int ret; - - ret = ipu6_fw_com_release(isys->fwcom, 1); - if (ret < 0) - dev_warn(&isys->adev->auxdev.dev, - "Device busy, fw_com release failed."); - isys->fwcom = NULL; -} - -static void start_sp(struct ipu6_bus_device *adev) -{ - struct ipu6_isys *isys = ipu6_bus_get_drvdata(adev); - void __iomem *spc_regs_base = isys->pdata->base + - isys->pdata->ipdata->hw_variant.spc_offset; - u32 val = IPU6_ISYS_SPC_STATUS_START | - IPU6_ISYS_SPC_STATUS_RUN | - IPU6_ISYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE; - - val |= isys->icache_prefetch ? IPU6_ISYS_SPC_STATUS_ICACHE_PREFETCH : 0; - - writel(val, spc_regs_base + IPU6_ISYS_REG_SPC_STATUS_CTRL); -} - -static int query_sp(struct ipu6_bus_device *adev) -{ - struct ipu6_isys *isys = ipu6_bus_get_drvdata(adev); - void __iomem *spc_regs_base = isys->pdata->base + - isys->pdata->ipdata->hw_variant.spc_offset; - u32 val; - - val = readl(spc_regs_base + IPU6_ISYS_REG_SPC_STATUS_CTRL); - /* return true when READY == 1, START == 0 */ - val &= IPU6_ISYS_SPC_STATUS_READY | IPU6_ISYS_SPC_STATUS_START; - - return val == IPU6_ISYS_SPC_STATUS_READY; -} - -static int ipu6_isys_fwcom_cfg_init(struct ipu6_isys *isys, - struct ipu6_fw_com_cfg *fwcom, - unsigned int num_streams) -{ - unsigned int max_send_queues, max_sram_blocks, max_devq_size; - struct ipu6_fw_syscom_queue_config *input_queue_cfg; - struct ipu6_fw_syscom_queue_config *output_queue_cfg; - struct device *dev = &isys->adev->auxdev.dev; - int type_proxy = IPU6_FW_ISYS_QUEUE_TYPE_PROXY; - int type_dev = IPU6_FW_ISYS_QUEUE_TYPE_DEV; - int type_msg = IPU6_FW_ISYS_QUEUE_TYPE_MSG; - int base_dev_send = IPU6_BASE_DEV_SEND_QUEUES; - int base_msg_send = IPU6_BASE_MSG_SEND_QUEUES; - int base_msg_recv = IPU6_BASE_MSG_RECV_QUEUES; - struct ipu6_fw_isys_fw_config *isys_fw_cfg; - u32 num_in_message_queues; - unsigned int max_streams; - unsigned int size; - unsigned int i; - - max_streams = isys->pdata->ipdata->max_streams; - max_send_queues = isys->pdata->ipdata->max_send_queues; - max_sram_blocks = isys->pdata->ipdata->max_sram_blocks; - max_devq_size = isys->pdata->ipdata->max_devq_size; - num_in_message_queues = clamp(num_streams, 1U, max_streams); - isys_fw_cfg = devm_kzalloc(dev, sizeof(*isys_fw_cfg), GFP_KERNEL); - if (!isys_fw_cfg) - return -ENOMEM; - - isys_fw_cfg->num_send_queues[type_proxy] = IPU6_N_MAX_PROXY_SEND_QUEUES; - isys_fw_cfg->num_send_queues[type_dev] = IPU6_N_MAX_DEV_SEND_QUEUES; - isys_fw_cfg->num_send_queues[type_msg] = num_in_message_queues; - isys_fw_cfg->num_recv_queues[type_proxy] = IPU6_N_MAX_PROXY_RECV_QUEUES; - /* Common msg/dev return queue */ - isys_fw_cfg->num_recv_queues[type_dev] = 0; - isys_fw_cfg->num_recv_queues[type_msg] = 1; - - size = sizeof(*input_queue_cfg) * max_send_queues; - input_queue_cfg = devm_kzalloc(dev, size, GFP_KERNEL); - if (!input_queue_cfg) - return -ENOMEM; - - size = sizeof(*output_queue_cfg) * IPU6_N_MAX_RECV_QUEUES; - output_queue_cfg = devm_kzalloc(dev, size, GFP_KERNEL); - if (!output_queue_cfg) - return -ENOMEM; - - fwcom->input = input_queue_cfg; - fwcom->output = output_queue_cfg; - - fwcom->num_input_queues = isys_fw_cfg->num_send_queues[type_proxy] + - isys_fw_cfg->num_send_queues[type_dev] + - isys_fw_cfg->num_send_queues[type_msg]; - - fwcom->num_output_queues = isys_fw_cfg->num_recv_queues[type_proxy] + - isys_fw_cfg->num_recv_queues[type_dev] + - isys_fw_cfg->num_recv_queues[type_msg]; - - /* SRAM partitioning. Equal partitioning is set. */ - for (i = 0; i < max_sram_blocks; i++) { - if (i < num_in_message_queues) - isys_fw_cfg->buffer_partition.num_gda_pages[i] = - (IPU6_DEVICE_GDA_NR_PAGES * - IPU6_DEVICE_GDA_VIRT_FACTOR) / - num_in_message_queues; - else - isys_fw_cfg->buffer_partition.num_gda_pages[i] = 0; - } - - /* FW assumes proxy interface at fwcom queue 0 */ - for (i = 0; i < isys_fw_cfg->num_send_queues[type_proxy]; i++) { - input_queue_cfg[i].token_size = - sizeof(struct ipu6_fw_proxy_send_queue_token); - input_queue_cfg[i].queue_size = IPU6_ISYS_SIZE_PROXY_SEND_QUEUE; - } - - for (i = 0; i < isys_fw_cfg->num_send_queues[type_dev]; i++) { - input_queue_cfg[base_dev_send + i].token_size = - sizeof(struct ipu6_fw_send_queue_token); - input_queue_cfg[base_dev_send + i].queue_size = max_devq_size; - } - - for (i = 0; i < isys_fw_cfg->num_send_queues[type_msg]; i++) { - input_queue_cfg[base_msg_send + i].token_size = - sizeof(struct ipu6_fw_send_queue_token); - input_queue_cfg[base_msg_send + i].queue_size = - IPU6_ISYS_SIZE_SEND_QUEUE; - } - - for (i = 0; i < isys_fw_cfg->num_recv_queues[type_proxy]; i++) { - output_queue_cfg[i].token_size = - sizeof(struct ipu6_fw_proxy_resp_queue_token); - output_queue_cfg[i].queue_size = - IPU6_ISYS_SIZE_PROXY_RECV_QUEUE; - } - /* There is no recv DEV queue */ - for (i = 0; i < isys_fw_cfg->num_recv_queues[type_msg]; i++) { - output_queue_cfg[base_msg_recv + i].token_size = - sizeof(struct ipu6_fw_resp_queue_token); - output_queue_cfg[base_msg_recv + i].queue_size = - IPU6_ISYS_SIZE_RECV_QUEUE; - } - - fwcom->dmem_addr = isys->pdata->ipdata->hw_variant.dmem_offset; - fwcom->specific_addr = isys_fw_cfg; - fwcom->specific_size = sizeof(*isys_fw_cfg); - - return 0; -} - -int ipu6_fw_isys_init(struct ipu6_isys *isys, unsigned int num_streams) -{ - struct device *dev = &isys->adev->auxdev.dev; - int retry = IPU6_ISYS_OPEN_RETRY; - struct ipu6_fw_com_cfg fwcom = { - .cell_start = start_sp, - .cell_ready = query_sp, - .buttress_boot_offset = SYSCOM_BUTTRESS_FW_PARAMS_ISYS_OFFSET, - }; - int ret; - - ipu6_isys_fwcom_cfg_init(isys, &fwcom, num_streams); - - isys->fwcom = ipu6_fw_com_prepare(&fwcom, isys->adev, - isys->pdata->base); - if (!isys->fwcom) { - dev_err(dev, "isys fw com prepare failed\n"); - return -EIO; - } - - ret = ipu6_fw_com_open(isys->fwcom); - if (ret) { - dev_err(dev, "isys fw com open failed %d\n", ret); - return ret; - } - - do { - usleep_range(400, 500); - if (ipu6_fw_com_ready(isys->fwcom)) - break; - retry--; - } while (retry > 0); - - if (!retry) { - dev_err(dev, "isys port open ready failed %d\n", ret); - ipu6_fw_isys_close(isys); - ret = -EIO; - } - - return ret; -} - -struct ipu6_fw_isys_resp_info_abi * -ipu6_fw_isys_get_resp(void *context, unsigned int queue) -{ - return ipu6_recv_get_token(context, queue); -} - -void ipu6_fw_isys_put_resp(void *context, unsigned int queue) -{ - ipu6_recv_put_token(context, queue); -} - -void ipu6_fw_isys_dump_stream_cfg(struct device *dev, - struct ipu6_fw_isys_stream_cfg_data_abi *cfg) -{ - unsigned int i; - - dev_dbg(dev, "-----------------------------------------------------\n"); - dev_dbg(dev, "IPU6_FW_ISYS_STREAM_CFG_DATA\n"); - - dev_dbg(dev, "compfmt = %d\n", cfg->vc); - dev_dbg(dev, "src = %d\n", cfg->src); - dev_dbg(dev, "vc = %d\n", cfg->vc); - dev_dbg(dev, "isl_use = %d\n", cfg->isl_use); - dev_dbg(dev, "sensor_type = %d\n", cfg->sensor_type); - - dev_dbg(dev, "send_irq_sof_discarded = %d\n", - cfg->send_irq_sof_discarded); - dev_dbg(dev, "send_irq_eof_discarded = %d\n", - cfg->send_irq_eof_discarded); - dev_dbg(dev, "send_resp_sof_discarded = %d\n", - cfg->send_resp_sof_discarded); - dev_dbg(dev, "send_resp_eof_discarded = %d\n", - cfg->send_resp_eof_discarded); - - dev_dbg(dev, "crop:\n"); - dev_dbg(dev, "\t.left_top = [%d, %d]\n", cfg->crop.left_offset, - cfg->crop.top_offset); - dev_dbg(dev, "\t.right_bottom = [%d, %d]\n", cfg->crop.right_offset, - cfg->crop.bottom_offset); - - dev_dbg(dev, "nof_input_pins = %d\n", cfg->nof_input_pins); - for (i = 0; i < cfg->nof_input_pins; i++) { - dev_dbg(dev, "input pin[%d]:\n", i); - dev_dbg(dev, "\t.dt = 0x%0x\n", cfg->input_pins[i].dt); - dev_dbg(dev, "\t.mipi_store_mode = %d\n", - cfg->input_pins[i].mipi_store_mode); - dev_dbg(dev, "\t.bits_per_pix = %d\n", - cfg->input_pins[i].bits_per_pix); - dev_dbg(dev, "\t.mapped_dt = 0x%0x\n", - cfg->input_pins[i].mapped_dt); - dev_dbg(dev, "\t.input_res = %dx%d\n", - cfg->input_pins[i].input_res.width, - cfg->input_pins[i].input_res.height); - dev_dbg(dev, "\t.mipi_decompression = %d\n", - cfg->input_pins[i].mipi_decompression); - dev_dbg(dev, "\t.capture_mode = %d\n", - cfg->input_pins[i].capture_mode); - } - - dev_dbg(dev, "nof_output_pins = %d\n", cfg->nof_output_pins); - for (i = 0; i < cfg->nof_output_pins; i++) { - dev_dbg(dev, "output_pin[%d]:\n", i); - dev_dbg(dev, "\t.input_pin_id = %d\n", - cfg->output_pins[i].input_pin_id); - dev_dbg(dev, "\t.output_res = %dx%d\n", - cfg->output_pins[i].output_res.width, - cfg->output_pins[i].output_res.height); - dev_dbg(dev, "\t.stride = %d\n", cfg->output_pins[i].stride); - dev_dbg(dev, "\t.pt = %d\n", cfg->output_pins[i].pt); - dev_dbg(dev, "\t.payload_buf_size = %d\n", - cfg->output_pins[i].payload_buf_size); - dev_dbg(dev, "\t.ft = %d\n", cfg->output_pins[i].ft); - dev_dbg(dev, "\t.watermark_in_lines = %d\n", - cfg->output_pins[i].watermark_in_lines); - dev_dbg(dev, "\t.send_irq = %d\n", - cfg->output_pins[i].send_irq); - dev_dbg(dev, "\t.reserve_compression = %d\n", - cfg->output_pins[i].reserve_compression); - dev_dbg(dev, "\t.snoopable = %d\n", - cfg->output_pins[i].snoopable); - dev_dbg(dev, "\t.error_handling_enable = %d\n", - cfg->output_pins[i].error_handling_enable); - dev_dbg(dev, "\t.sensor_type = %d\n", - cfg->output_pins[i].sensor_type); - } - dev_dbg(dev, "-----------------------------------------------------\n"); -} - -void -ipu6_fw_isys_dump_frame_buff_set(struct device *dev, - struct ipu6_fw_isys_frame_buff_set_abi *buf, - unsigned int outputs) -{ - unsigned int i; - - dev_dbg(dev, "-----------------------------------------------------\n"); - dev_dbg(dev, "IPU6_FW_ISYS_FRAME_BUFF_SET\n"); - - for (i = 0; i < outputs; i++) { - dev_dbg(dev, "output_pin[%d]:\n", i); - dev_dbg(dev, "\t.out_buf_id = %llu\n", - buf->output_pins[i].out_buf_id); - dev_dbg(dev, "\t.addr = 0x%x\n", buf->output_pins[i].addr); - dev_dbg(dev, "\t.compress = %d\n", - buf->output_pins[i].compress); - } - - dev_dbg(dev, "send_irq_sof = 0x%x\n", buf->send_irq_sof); - dev_dbg(dev, "send_irq_eof = 0x%x\n", buf->send_irq_eof); - dev_dbg(dev, "send_resp_sof = 0x%x\n", buf->send_resp_sof); - dev_dbg(dev, "send_resp_eof = 0x%x\n", buf->send_resp_eof); - dev_dbg(dev, "send_irq_capture_ack = 0x%x\n", - buf->send_irq_capture_ack); - dev_dbg(dev, "send_irq_capture_done = 0x%x\n", - buf->send_irq_capture_done); - dev_dbg(dev, "send_resp_capture_ack = 0x%x\n", - buf->send_resp_capture_ack); - dev_dbg(dev, "send_resp_capture_done = 0x%x\n", - buf->send_resp_capture_done); - - dev_dbg(dev, "-----------------------------------------------------\n"); -} diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h deleted file mode 100644 index b60f020..0000000 --- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h +++ /dev/null @@ -1,596 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (C) 2013--2024 Intel Corporation */ - -#ifndef IPU6_FW_ISYS_H -#define IPU6_FW_ISYS_H - -#include - -struct device; -struct ipu6_isys; - -/* Max number of Input/Output Pins */ -#define IPU6_MAX_IPINS 4 - -#define IPU6_MAX_OPINS ((IPU6_MAX_IPINS) + 1) - -#define IPU6_STREAM_ID_MAX 16 -#define IPU6_NONSECURE_STREAM_ID_MAX 12 -#define IPU6_DEV_SEND_QUEUE_SIZE (IPU6_STREAM_ID_MAX) -#define IPU6_NOF_SRAM_BLOCKS_MAX (IPU6_STREAM_ID_MAX) -#define IPU6_N_MAX_MSG_SEND_QUEUES (IPU6_STREAM_ID_MAX) -#define IPU6SE_STREAM_ID_MAX 8 -#define IPU6SE_NONSECURE_STREAM_ID_MAX 4 -#define IPU6SE_DEV_SEND_QUEUE_SIZE (IPU6SE_STREAM_ID_MAX) -#define IPU6SE_NOF_SRAM_BLOCKS_MAX (IPU6SE_STREAM_ID_MAX) -#define IPU6SE_N_MAX_MSG_SEND_QUEUES (IPU6SE_STREAM_ID_MAX) - -/* Single return queue for all streams/commands type */ -#define IPU6_N_MAX_MSG_RECV_QUEUES 1 -/* Single device queue for high priority commands (bypass in-order queue) */ -#define IPU6_N_MAX_DEV_SEND_QUEUES 1 -/* Single dedicated send queue for proxy interface */ -#define IPU6_N_MAX_PROXY_SEND_QUEUES 1 -/* Single dedicated recv queue for proxy interface */ -#define IPU6_N_MAX_PROXY_RECV_QUEUES 1 -/* Send queues layout */ -#define IPU6_BASE_PROXY_SEND_QUEUES 0 -#define IPU6_BASE_DEV_SEND_QUEUES \ - (IPU6_BASE_PROXY_SEND_QUEUES + IPU6_N_MAX_PROXY_SEND_QUEUES) -#define IPU6_BASE_MSG_SEND_QUEUES \ - (IPU6_BASE_DEV_SEND_QUEUES + IPU6_N_MAX_DEV_SEND_QUEUES) -/* Recv queues layout */ -#define IPU6_BASE_PROXY_RECV_QUEUES 0 -#define IPU6_BASE_MSG_RECV_QUEUES \ - (IPU6_BASE_PROXY_RECV_QUEUES + IPU6_N_MAX_PROXY_RECV_QUEUES) -#define IPU6_N_MAX_RECV_QUEUES \ - (IPU6_BASE_MSG_RECV_QUEUES + IPU6_N_MAX_MSG_RECV_QUEUES) - -#define IPU6_N_MAX_SEND_QUEUES \ - (IPU6_BASE_MSG_SEND_QUEUES + IPU6_N_MAX_MSG_SEND_QUEUES) -#define IPU6SE_N_MAX_SEND_QUEUES \ - (IPU6_BASE_MSG_SEND_QUEUES + IPU6SE_N_MAX_MSG_SEND_QUEUES) - -/* Max number of planes for frame formats supported by the FW */ -#define IPU6_PIN_PLANES_MAX 4 - -#define IPU6_FW_ISYS_SENSOR_TYPE_START 14 -#define IPU6_FW_ISYS_SENSOR_TYPE_END 19 -#define IPU6SE_FW_ISYS_SENSOR_TYPE_START 6 -#define IPU6SE_FW_ISYS_SENSOR_TYPE_END 11 -/* - * Device close takes some time from last ack message to actual stopping - * of the SP processor. As long as the SP processor runs we can't proceed with - * clean up of resources. - */ -#define IPU6_ISYS_OPEN_RETRY 2000 -#define IPU6_ISYS_CLOSE_RETRY 2000 -#define IPU6_FW_CALL_TIMEOUT_JIFFIES msecs_to_jiffies(2000) - -enum ipu6_fw_isys_resp_type { - IPU6_FW_ISYS_RESP_TYPE_STREAM_OPEN_DONE = 0, - IPU6_FW_ISYS_RESP_TYPE_STREAM_START_ACK, - IPU6_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK, - IPU6_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK, - IPU6_FW_ISYS_RESP_TYPE_STREAM_STOP_ACK, - IPU6_FW_ISYS_RESP_TYPE_STREAM_FLUSH_ACK, - IPU6_FW_ISYS_RESP_TYPE_STREAM_CLOSE_ACK, - IPU6_FW_ISYS_RESP_TYPE_PIN_DATA_READY, - IPU6_FW_ISYS_RESP_TYPE_PIN_DATA_WATERMARK, - IPU6_FW_ISYS_RESP_TYPE_FRAME_SOF, - IPU6_FW_ISYS_RESP_TYPE_FRAME_EOF, - IPU6_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE, - IPU6_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_DONE, - IPU6_FW_ISYS_RESP_TYPE_PIN_DATA_SKIPPED, - IPU6_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_SKIPPED, - IPU6_FW_ISYS_RESP_TYPE_FRAME_SOF_DISCARDED, - IPU6_FW_ISYS_RESP_TYPE_FRAME_EOF_DISCARDED, - IPU6_FW_ISYS_RESP_TYPE_STATS_DATA_READY, - N_IPU6_FW_ISYS_RESP_TYPE -}; - -enum ipu6_fw_isys_send_type { - IPU6_FW_ISYS_SEND_TYPE_STREAM_OPEN = 0, - IPU6_FW_ISYS_SEND_TYPE_STREAM_START, - IPU6_FW_ISYS_SEND_TYPE_STREAM_START_AND_CAPTURE, - IPU6_FW_ISYS_SEND_TYPE_STREAM_CAPTURE, - IPU6_FW_ISYS_SEND_TYPE_STREAM_STOP, - IPU6_FW_ISYS_SEND_TYPE_STREAM_FLUSH, - IPU6_FW_ISYS_SEND_TYPE_STREAM_CLOSE, - N_IPU6_FW_ISYS_SEND_TYPE -}; - -enum ipu6_fw_isys_queue_type { - IPU6_FW_ISYS_QUEUE_TYPE_PROXY = 0, - IPU6_FW_ISYS_QUEUE_TYPE_DEV, - IPU6_FW_ISYS_QUEUE_TYPE_MSG, - N_IPU6_FW_ISYS_QUEUE_TYPE -}; - -enum ipu6_fw_isys_stream_source { - IPU6_FW_ISYS_STREAM_SRC_PORT_0 = 0, - IPU6_FW_ISYS_STREAM_SRC_PORT_1, - IPU6_FW_ISYS_STREAM_SRC_PORT_2, - IPU6_FW_ISYS_STREAM_SRC_PORT_3, - IPU6_FW_ISYS_STREAM_SRC_PORT_4, - IPU6_FW_ISYS_STREAM_SRC_PORT_5, - IPU6_FW_ISYS_STREAM_SRC_PORT_6, - IPU6_FW_ISYS_STREAM_SRC_PORT_7, - IPU6_FW_ISYS_STREAM_SRC_PORT_8, - IPU6_FW_ISYS_STREAM_SRC_PORT_9, - IPU6_FW_ISYS_STREAM_SRC_PORT_10, - IPU6_FW_ISYS_STREAM_SRC_PORT_11, - IPU6_FW_ISYS_STREAM_SRC_PORT_12, - IPU6_FW_ISYS_STREAM_SRC_PORT_13, - IPU6_FW_ISYS_STREAM_SRC_PORT_14, - IPU6_FW_ISYS_STREAM_SRC_PORT_15, - IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_0, - IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_1, - IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_2, - IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_3, - IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_4, - IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_5, - IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_6, - IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_7, - IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_8, - IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_9, - N_IPU6_FW_ISYS_STREAM_SRC -}; - -#define IPU6_FW_ISYS_STREAM_SRC_CSI2_PORT0 IPU6_FW_ISYS_STREAM_SRC_PORT_0 -#define IPU6_FW_ISYS_STREAM_SRC_CSI2_PORT1 IPU6_FW_ISYS_STREAM_SRC_PORT_1 -#define IPU6_FW_ISYS_STREAM_SRC_CSI2_PORT2 IPU6_FW_ISYS_STREAM_SRC_PORT_2 -#define IPU6_FW_ISYS_STREAM_SRC_CSI2_PORT3 IPU6_FW_ISYS_STREAM_SRC_PORT_3 - -#define IPU6_FW_ISYS_STREAM_SRC_CSI2_3PH_PORTA IPU6_FW_ISYS_STREAM_SRC_PORT_4 -#define IPU6_FW_ISYS_STREAM_SRC_CSI2_3PH_PORTB IPU6_FW_ISYS_STREAM_SRC_PORT_5 -#define IPU6_FW_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT0 \ - IPU6_FW_ISYS_STREAM_SRC_PORT_6 -#define IPU6_FW_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT1 \ - IPU6_FW_ISYS_STREAM_SRC_PORT_7 -#define IPU6_FW_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT2 \ - IPU6_FW_ISYS_STREAM_SRC_PORT_8 -#define IPU6_FW_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT3 \ - IPU6_FW_ISYS_STREAM_SRC_PORT_9 - -#define IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_PORT0 IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_0 -#define IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_PORT1 IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_1 - -/* - * enum ipu6_fw_isys_mipi_vc: MIPI csi2 spec - * supports up to 4 virtual per physical channel - */ -enum ipu6_fw_isys_mipi_vc { - IPU6_FW_ISYS_MIPI_VC_0 = 0, - IPU6_FW_ISYS_MIPI_VC_1, - IPU6_FW_ISYS_MIPI_VC_2, - IPU6_FW_ISYS_MIPI_VC_3, - N_IPU6_FW_ISYS_MIPI_VC -}; - -enum ipu6_fw_isys_frame_format_type { - IPU6_FW_ISYS_FRAME_FORMAT_NV11 = 0, /* 12 bit YUV 411, Y, UV plane */ - IPU6_FW_ISYS_FRAME_FORMAT_NV12, /* 12 bit YUV 420, Y, UV plane */ - IPU6_FW_ISYS_FRAME_FORMAT_NV12_16, /* 16 bit YUV 420, Y, UV plane */ - /* 12 bit YUV 420, Intel proprietary tiled format */ - IPU6_FW_ISYS_FRAME_FORMAT_NV12_TILEY, - - IPU6_FW_ISYS_FRAME_FORMAT_NV16, /* 16 bit YUV 422, Y, UV plane */ - IPU6_FW_ISYS_FRAME_FORMAT_NV21, /* 12 bit YUV 420, Y, VU plane */ - IPU6_FW_ISYS_FRAME_FORMAT_NV61, /* 16 bit YUV 422, Y, VU plane */ - IPU6_FW_ISYS_FRAME_FORMAT_YV12, /* 12 bit YUV 420, Y, V, U plane */ - IPU6_FW_ISYS_FRAME_FORMAT_YV16, /* 16 bit YUV 422, Y, V, U plane */ - IPU6_FW_ISYS_FRAME_FORMAT_YUV420, /* 12 bit YUV 420, Y, U, V plane */ - IPU6_FW_ISYS_FRAME_FORMAT_YUV420_10, /* yuv420, 10 bits per subpixel */ - IPU6_FW_ISYS_FRAME_FORMAT_YUV420_12, /* yuv420, 12 bits per subpixel */ - IPU6_FW_ISYS_FRAME_FORMAT_YUV420_14, /* yuv420, 14 bits per subpixel */ - IPU6_FW_ISYS_FRAME_FORMAT_YUV420_16, /* yuv420, 16 bits per subpixel */ - IPU6_FW_ISYS_FRAME_FORMAT_YUV422, /* 16 bit YUV 422, Y, U, V plane */ - IPU6_FW_ISYS_FRAME_FORMAT_YUV422_16, /* yuv422, 16 bits per subpixel */ - IPU6_FW_ISYS_FRAME_FORMAT_UYVY, /* 16 bit YUV 422, UYVY interleaved */ - IPU6_FW_ISYS_FRAME_FORMAT_YUYV, /* 16 bit YUV 422, YUYV interleaved */ - IPU6_FW_ISYS_FRAME_FORMAT_YUV444, /* 24 bit YUV 444, Y, U, V plane */ - /* Internal format, 2 y lines followed by a uvinterleaved line */ - IPU6_FW_ISYS_FRAME_FORMAT_YUV_LINE, - IPU6_FW_ISYS_FRAME_FORMAT_RAW8, /* RAW8, 1 plane */ - IPU6_FW_ISYS_FRAME_FORMAT_RAW10, /* RAW10, 1 plane */ - IPU6_FW_ISYS_FRAME_FORMAT_RAW12, /* RAW12, 1 plane */ - IPU6_FW_ISYS_FRAME_FORMAT_RAW14, /* RAW14, 1 plane */ - IPU6_FW_ISYS_FRAME_FORMAT_RAW16, /* RAW16, 1 plane */ - /** - * 16 bit RGB, 1 plane. Each 3 sub pixels are packed into one 16 bit - * value, 5 bits for R, 6 bits for G and 5 bits for B. - */ - IPU6_FW_ISYS_FRAME_FORMAT_RGB565, - IPU6_FW_ISYS_FRAME_FORMAT_PLANAR_RGB888, /* 24 bit RGB, 3 planes */ - IPU6_FW_ISYS_FRAME_FORMAT_RGBA888, /* 32 bit RGBA, 1 plane, A=Alpha */ - IPU6_FW_ISYS_FRAME_FORMAT_QPLANE6, /* Internal, for advanced ISP */ - IPU6_FW_ISYS_FRAME_FORMAT_BINARY_8, /* byte stream, used for jpeg. */ - N_IPU6_FW_ISYS_FRAME_FORMAT -}; - -enum ipu6_fw_isys_pin_type { - /* captured as MIPI packets */ - IPU6_FW_ISYS_PIN_TYPE_MIPI = 0, - /* captured through the SoC path */ - IPU6_FW_ISYS_PIN_TYPE_RAW_SOC = 3, -}; - -/* - * enum ipu6_fw_isys_mipi_store_mode. Describes if long MIPI packets reach - * MIPI SRAM with the long packet header or - * if not, then only option is to capture it with pin type MIPI. - */ -enum ipu6_fw_isys_mipi_store_mode { - IPU6_FW_ISYS_MIPI_STORE_MODE_NORMAL = 0, - IPU6_FW_ISYS_MIPI_STORE_MODE_DISCARD_LONG_HEADER, - N_IPU6_FW_ISYS_MIPI_STORE_MODE -}; - -enum ipu6_fw_isys_capture_mode { - IPU6_FW_ISYS_CAPTURE_MODE_REGULAR = 0, - IPU6_FW_ISYS_CAPTURE_MODE_BURST, - N_IPU6_FW_ISYS_CAPTURE_MODE, -}; - -enum ipu6_fw_isys_sensor_mode { - IPU6_FW_ISYS_SENSOR_MODE_NORMAL = 0, - IPU6_FW_ISYS_SENSOR_MODE_TOBII, - N_IPU6_FW_ISYS_SENSOR_MODE, -}; - -enum ipu6_fw_isys_error { - IPU6_FW_ISYS_ERROR_NONE = 0, - IPU6_FW_ISYS_ERROR_FW_INTERNAL_CONSISTENCY, - IPU6_FW_ISYS_ERROR_HW_CONSISTENCY, - IPU6_FW_ISYS_ERROR_DRIVER_INVALID_COMMAND_SEQUENCE, - IPU6_FW_ISYS_ERROR_DRIVER_INVALID_DEVICE_CONFIGURATION, - IPU6_FW_ISYS_ERROR_DRIVER_INVALID_STREAM_CONFIGURATION, - IPU6_FW_ISYS_ERROR_DRIVER_INVALID_FRAME_CONFIGURATION, - IPU6_FW_ISYS_ERROR_INSUFFICIENT_RESOURCES, - IPU6_FW_ISYS_ERROR_HW_REPORTED_STR2MMIO, - IPU6_FW_ISYS_ERROR_HW_REPORTED_SIG2CIO, - IPU6_FW_ISYS_ERROR_SENSOR_FW_SYNC, - IPU6_FW_ISYS_ERROR_STREAM_IN_SUSPENSION, - IPU6_FW_ISYS_ERROR_RESPONSE_QUEUE_FULL, - N_IPU6_FW_ISYS_ERROR -}; - -enum ipu6_fw_proxy_error { - IPU6_FW_PROXY_ERROR_NONE = 0, - IPU6_FW_PROXY_ERROR_INVALID_WRITE_REGION, - IPU6_FW_PROXY_ERROR_INVALID_WRITE_OFFSET, - N_IPU6_FW_PROXY_ERROR -}; - -/* firmware ABI structure below are aligned in firmware, no need pack */ -struct ipu6_fw_isys_buffer_partition_abi { - u32 num_gda_pages[IPU6_STREAM_ID_MAX]; -}; - -struct ipu6_fw_isys_fw_config { - struct ipu6_fw_isys_buffer_partition_abi buffer_partition; - u32 num_send_queues[N_IPU6_FW_ISYS_QUEUE_TYPE]; - u32 num_recv_queues[N_IPU6_FW_ISYS_QUEUE_TYPE]; -}; - -/* - * struct ipu6_fw_isys_resolution_abi: Generic resolution structure. - */ -struct ipu6_fw_isys_resolution_abi { - u32 width; - u32 height; -}; - -/** - * struct ipu6_fw_isys_output_pin_payload_abi - ISYS output pin buffer - * @out_buf_id: Points to output pin buffer - buffer identifier - * @addr: Points to output pin buffer - CSS Virtual Address - * @compress: Request frame compression (1), or not (0) - */ -struct ipu6_fw_isys_output_pin_payload_abi { - u64 out_buf_id; - u32 addr; - u32 compress; -}; - -/** - * struct ipu6_fw_isys_output_pin_info_abi - ISYS output pin info - * @output_res: output pin resolution - * @stride: output stride in Bytes (not valid for statistics) - * @watermark_in_lines: pin watermark level in lines - * @payload_buf_size: minimum size in Bytes of all buffers that will be - * supplied for capture on this pin - * @ts_offsets: ts_offsets - * @s2m_pixel_soc_pixel_remapping: pixel soc remapping (see the definition of - * S2M_PIXEL_SOC_PIXEL_REMAPPING_FLAG_NO_REMAPPING) - * @csi_be_soc_pixel_remapping: see s2m_pixel_soc_pixel_remapping - * @send_irq: assert if pin event should trigger irq - * @input_pin_id: related input pin id - * @pt: pin type -real format "enum ipu6_fw_isys_pin_type" - * @ft: frame format type -real format "enum ipu6_fw_isys_frame_format_type" - * @reserved: a reserved field - * @reserve_compression: reserve compression resources for pin - * @snoopable: snoopable - * @error_handling_enable: enable error handling - * @sensor_type: sensor_type - */ -struct ipu6_fw_isys_output_pin_info_abi { - struct ipu6_fw_isys_resolution_abi output_res; - u32 stride; - u32 watermark_in_lines; - u32 payload_buf_size; - u32 ts_offsets[IPU6_PIN_PLANES_MAX]; - u32 s2m_pixel_soc_pixel_remapping; - u32 csi_be_soc_pixel_remapping; - u8 send_irq; - u8 input_pin_id; - u8 pt; - u8 ft; - u8 reserved; - u8 reserve_compression; - u8 snoopable; - u8 error_handling_enable; - u32 sensor_type; -}; - -/** - * struct ipu6_fw_isys_input_pin_info_abi - ISYS input pin info - * @input_res: input resolution - * @dt: mipi data type ((enum ipu6_fw_isys_mipi_data_type) - * @mipi_store_mode: defines if legacy long packet header will be stored or - * discarded if discarded, output pin type for this - * input pin can only be MIPI - * (enum ipu6_fw_isys_mipi_store_mode) - * @bits_per_pix: native bits per pixel - * @mapped_dt: actual data type from sensor - * @mipi_decompression: defines which compression will be in mipi backend - * @crop_first_and_last_lines: Control whether to crop the first and last line - * of the input image. Crop done by HW device. - * @capture_mode: mode of capture, regular or burst, default value is regular - * @reserved: a reserved field - */ -struct ipu6_fw_isys_input_pin_info_abi { - struct ipu6_fw_isys_resolution_abi input_res; - u8 dt; - u8 mipi_store_mode; - u8 bits_per_pix; - u8 mapped_dt; - u8 mipi_decompression; - u8 crop_first_and_last_lines; - u8 capture_mode; - u8 reserved; -}; - -/** - * struct ipu6_fw_isys_cropping_abi - ISYS cropping coordinates - * @top_offset: Top offset - * @left_offset: Left offset - * @bottom_offset: Bottom offset - * @right_offset: Right offset - */ -struct ipu6_fw_isys_cropping_abi { - s32 top_offset; - s32 left_offset; - s32 bottom_offset; - s32 right_offset; -}; - -/** - * struct ipu6_fw_isys_stream_cfg_data_abi - ISYS stream configuration data - * ISYS stream configuration data structure - * @crop: for extended use and is not used in FW currently - * @input_pins: input pin descriptors - * @output_pins: output pin descriptors - * @compfmt: de-compression setting for User Defined Data - * @nof_input_pins: number of input pins - * @nof_output_pins: number of output pins - * @send_irq_sof_discarded: send irq on discarded frame sof response - * - if '1' it will override the send_resp_sof_discarded - * and send the response - * - if '0' the send_resp_sof_discarded will determine - * whether to send the response - * @send_irq_eof_discarded: send irq on discarded frame eof response - * - if '1' it will override the send_resp_eof_discarded - * and send the response - * - if '0' the send_resp_eof_discarded will determine - * whether to send the response - * @send_resp_sof_discarded: send response for discarded frame sof detected, - * used only when send_irq_sof_discarded is '0' - * @send_resp_eof_discarded: send response for discarded frame eof detected, - * used only when send_irq_eof_discarded is '0' - * @src: Stream source index e.g. MIPI_generator_0, CSI2-rx_1 - * @vc: MIPI Virtual Channel (up to 4 virtual per physical channel) - * @isl_use: indicates whether stream requires ISL and how - * @sensor_type: type of connected sensor, tobii or others, default is 0 - * @reserved: a reserved field - * @reserved2: a reserved field - */ -struct ipu6_fw_isys_stream_cfg_data_abi { - struct ipu6_fw_isys_cropping_abi crop; - struct ipu6_fw_isys_input_pin_info_abi input_pins[IPU6_MAX_IPINS]; - struct ipu6_fw_isys_output_pin_info_abi output_pins[IPU6_MAX_OPINS]; - u32 compfmt; - u8 nof_input_pins; - u8 nof_output_pins; - u8 send_irq_sof_discarded; - u8 send_irq_eof_discarded; - u8 send_resp_sof_discarded; - u8 send_resp_eof_discarded; - u8 src; - u8 vc; - u8 isl_use; - u8 sensor_type; - u8 reserved; - u8 reserved2; -}; - -/** - * struct ipu6_fw_isys_frame_buff_set_abi - ISYS frame buffer set (request) - * @output_pins: output pin addresses - * @send_irq_sof: send irq on frame sof response - * - if '1' it will override the send_resp_sof and - * send the response - * - if '0' the send_resp_sof will determine whether to - * send the response - * @send_irq_eof: send irq on frame eof response - * - if '1' it will override the send_resp_eof and - * send the response - * - if '0' the send_resp_eof will determine whether to - * send the response - * @send_irq_capture_ack: send irq on capture ack - * @send_irq_capture_done: send irq on capture done - * @send_resp_sof: send response for frame sof detected, - * used only when send_irq_sof is '0' - * @send_resp_eof: send response for frame eof detected, - * used only when send_irq_eof is '0' - * @send_resp_capture_ack: send response for capture ack event - * @send_resp_capture_done: send response for capture done event - * @reserved: a reserved field - */ -struct ipu6_fw_isys_frame_buff_set_abi { - struct ipu6_fw_isys_output_pin_payload_abi output_pins[IPU6_MAX_OPINS]; - u8 send_irq_sof; - u8 send_irq_eof; - u8 send_irq_capture_ack; - u8 send_irq_capture_done; - u8 send_resp_sof; - u8 send_resp_eof; - u8 send_resp_capture_ack; - u8 send_resp_capture_done; - u8 reserved[8]; -}; - -/** - * struct ipu6_fw_isys_error_info_abi - ISYS error information - * @error: error code if something went wrong - * @error_details: depending on error code, it may contain additional error info - */ -struct ipu6_fw_isys_error_info_abi { - u32 error; - u32 error_details; -}; - -/** - * struct ipu6_fw_isys_resp_info_abi - ISYS firmware response - * @buf_id: buffer ID - * @pin: this var is only valid for pin event related responses, - * contains pin addresses - * @error_info: error information from the FW - * @timestamp: Time information for event if available - * @stream_handle: stream id the response corresponds to - * @type: response type (enum ipu6_fw_isys_resp_type) - * @pin_id: pin id that the pin payload corresponds to - * @reserved: a reserved field - * @reserved2: a reserved field - */ -struct ipu6_fw_isys_resp_info_abi { - u64 buf_id; - struct ipu6_fw_isys_output_pin_payload_abi pin; - struct ipu6_fw_isys_error_info_abi error_info; - u32 timestamp[2]; - u8 stream_handle; - u8 type; - u8 pin_id; - u8 reserved; - u32 reserved2; -}; - -/** - * struct ipu6_fw_isys_proxy_error_info_abi - ISYS proxy error - * @error: error code if something went wrong - * @error_details: depending on error code, it may contain additional error info - */ -struct ipu6_fw_isys_proxy_error_info_abi { - u32 error; - u32 error_details; -}; - -struct ipu6_fw_isys_proxy_resp_info_abi { - u32 request_id; - struct ipu6_fw_isys_proxy_error_info_abi error_info; -}; - -/** - * struct ipu6_fw_proxy_write_queue_token - ISYS proxy write queue token - * @request_id: update id for the specific proxy write request - * @region_index: Region id for the proxy write request - * @offset: Offset of the write request according to the base address - * of the region - * @value: Value that is requested to be written with the proxy write request - */ -struct ipu6_fw_proxy_write_queue_token { - u32 request_id; - u32 region_index; - u32 offset; - u32 value; -}; - -/** - * struct ipu6_fw_resp_queue_token - ISYS response queue token - * @resp_info: response info - */ -struct ipu6_fw_resp_queue_token { - struct ipu6_fw_isys_resp_info_abi resp_info; -}; - -/** - * struct ipu6_fw_send_queue_token - ISYS send queue token - * @buf_handle: buffer handle - * @payload: payload - * @send_type: send_type - * @stream_id: stream_id - */ -struct ipu6_fw_send_queue_token { - u64 buf_handle; - u32 payload; - u16 send_type; - u16 stream_id; -}; - -/** - * struct ipu6_fw_proxy_resp_queue_token - ISYS proxy response queue token - * @proxy_resp_info: proxy response info - */ -struct ipu6_fw_proxy_resp_queue_token { - struct ipu6_fw_isys_proxy_resp_info_abi proxy_resp_info; -}; - -/** - * struct ipu6_fw_proxy_send_queue_token - SYS proxy send queue token - * @request_id: request_id - * @region_index: region_index - * @offset: offset - * @value: value - */ -struct ipu6_fw_proxy_send_queue_token { - u32 request_id; - u32 region_index; - u32 offset; - u32 value; -}; - -void -ipu6_fw_isys_dump_stream_cfg(struct device *dev, - struct ipu6_fw_isys_stream_cfg_data_abi *cfg); -void -ipu6_fw_isys_dump_frame_buff_set(struct device *dev, - struct ipu6_fw_isys_frame_buff_set_abi *buf, - unsigned int outputs); -int ipu6_fw_isys_init(struct ipu6_isys *isys, unsigned int num_streams); -int ipu6_fw_isys_close(struct ipu6_isys *isys); -int ipu6_fw_isys_simple_cmd(struct ipu6_isys *isys, - const unsigned int stream_handle, u16 send_type); -int ipu6_fw_isys_complex_cmd(struct ipu6_isys *isys, - const unsigned int stream_handle, - void *cpu_mapped_buf, dma_addr_t dma_mapped_buf, - size_t size, u16 send_type); -int ipu6_fw_isys_send_proxy_token(struct ipu6_isys *isys, - unsigned int req_id, - unsigned int index, - unsigned int offset, u32 value); -void ipu6_fw_isys_cleanup(struct ipu6_isys *isys); -struct ipu6_fw_isys_resp_info_abi * -ipu6_fw_isys_get_resp(void *context, unsigned int queue); -void ipu6_fw_isys_put_resp(void *context, unsigned int queue); -#endif diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c deleted file mode 100644 index 051898c..0000000 --- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c +++ /dev/null @@ -1,649 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2013--2024 Intel Corporation - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -#include "ipu6-bus.h" -#include "ipu6-isys.h" -#include "ipu6-isys-csi2.h" -#include "ipu6-isys-subdev.h" -#include "ipu6-platform-isys-csi2-reg.h" - -static const u32 csi2_supported_codes[] = { - MEDIA_BUS_FMT_RGB565_1X16, - MEDIA_BUS_FMT_RGB888_1X24, - MEDIA_BUS_FMT_UYVY8_1X16, - MEDIA_BUS_FMT_YUYV8_1X16, - MEDIA_BUS_FMT_SBGGR10_1X10, - MEDIA_BUS_FMT_SGBRG10_1X10, - MEDIA_BUS_FMT_SGRBG10_1X10, - MEDIA_BUS_FMT_SRGGB10_1X10, - MEDIA_BUS_FMT_SBGGR12_1X12, - MEDIA_BUS_FMT_SGBRG12_1X12, - MEDIA_BUS_FMT_SGRBG12_1X12, - MEDIA_BUS_FMT_SRGGB12_1X12, - MEDIA_BUS_FMT_SBGGR8_1X8, - MEDIA_BUS_FMT_SGBRG8_1X8, - MEDIA_BUS_FMT_SGRBG8_1X8, - MEDIA_BUS_FMT_SRGGB8_1X8, - MEDIA_BUS_FMT_META_8, - MEDIA_BUS_FMT_META_10, - MEDIA_BUS_FMT_META_12, - MEDIA_BUS_FMT_META_16, - MEDIA_BUS_FMT_META_24, - 0 -}; - -/* - * Strings corresponding to CSI-2 receiver errors are here. - * Corresponding macros are defined in the header file. - */ -static const struct ipu6_csi2_error dphy_rx_errors[] = { - { "Single packet header error corrected", true }, - { "Multiple packet header errors detected", true }, - { "Payload checksum (CRC) error", true }, - { "Transfer FIFO overflow", false }, - { "Reserved short packet data type detected", true }, - { "Reserved long packet data type detected", true }, - { "Incomplete long packet detected", false }, - { "Frame sync error", false }, - { "Line sync error", false }, - { "DPHY recoverable synchronization error", true }, - { "DPHY fatal error", false }, - { "DPHY elastic FIFO overflow", false }, - { "Inter-frame short packet discarded", true }, - { "Inter-frame long packet discarded", true }, - { "MIPI pktgen overflow", false }, - { "MIPI pktgen data loss", false }, - { "FIFO overflow", false }, - { "Lane deskew", false }, - { "SOT sync error", false }, - { "HSIDLE detected", false } -}; - -s64 ipu6_isys_csi2_get_link_freq(struct ipu6_isys_csi2 *csi2) -{ - struct media_pad *src_pad; - struct v4l2_subdev *ext_sd; - struct device *dev; - - if (!csi2) - return -EINVAL; - - dev = &csi2->isys->adev->auxdev.dev; - src_pad = media_entity_remote_source_pad_unique(&csi2->asd.sd.entity); - if (IS_ERR(src_pad)) { - dev_err(dev, "can't get source pad of %s (%ld)\n", - csi2->asd.sd.name, PTR_ERR(src_pad)); - return PTR_ERR(src_pad); - } - - ext_sd = media_entity_to_v4l2_subdev(src_pad->entity); - if (WARN(!ext_sd, "Failed to get subdev for %s\n", csi2->asd.sd.name)) - return -ENODEV; - - return v4l2_get_link_freq(ext_sd->ctrl_handler, 0, 0); -} - -static int csi2_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh, - struct v4l2_event_subscription *sub) -{ - struct ipu6_isys_subdev *asd = to_ipu6_isys_subdev(sd); - struct ipu6_isys_csi2 *csi2 = to_ipu6_isys_csi2(asd); - struct device *dev = &csi2->isys->adev->auxdev.dev; - - dev_dbg(dev, "csi2 subscribe event(type %u id %u)\n", - sub->type, sub->id); - - switch (sub->type) { - case V4L2_EVENT_FRAME_SYNC: - return v4l2_event_subscribe(fh, sub, 10, NULL); - case V4L2_EVENT_CTRL: - return v4l2_ctrl_subscribe_event(fh, sub); - default: - return -EINVAL; - } -} - -static const struct v4l2_subdev_core_ops csi2_sd_core_ops = { - .subscribe_event = csi2_subscribe_event, - .unsubscribe_event = v4l2_event_subdev_unsubscribe, -}; - -/* - * The input system CSI2+ receiver has several - * parameters affecting the receiver timings. These depend - * on the MIPI bus frequency F in Hz (sensor transmitter rate) - * as follows: - * register value = (A/1e9 + B * UI) / COUNT_ACC - * where - * UI = 1 / (2 * F) in seconds - * COUNT_ACC = counter accuracy in seconds - * COUNT_ACC = 0.125 ns = 1 / 8 ns, ACCINV = 8. - * - * A and B are coefficients from the table below, - * depending whether the register minimum or maximum value is - * calculated. - * Minimum Maximum - * Clock lane A B A B - * reg_rx_csi_dly_cnt_termen_clane 0 0 38 0 - * reg_rx_csi_dly_cnt_settle_clane 95 -8 300 -16 - * Data lanes - * reg_rx_csi_dly_cnt_termen_dlane0 0 0 35 4 - * reg_rx_csi_dly_cnt_settle_dlane0 85 -2 145 -6 - * reg_rx_csi_dly_cnt_termen_dlane1 0 0 35 4 - * reg_rx_csi_dly_cnt_settle_dlane1 85 -2 145 -6 - * reg_rx_csi_dly_cnt_termen_dlane2 0 0 35 4 - * reg_rx_csi_dly_cnt_settle_dlane2 85 -2 145 -6 - * reg_rx_csi_dly_cnt_termen_dlane3 0 0 35 4 - * reg_rx_csi_dly_cnt_settle_dlane3 85 -2 145 -6 - * - * We use the minimum values of both A and B. - */ - -#define DIV_SHIFT 8 -#define CSI2_ACCINV 8 - -static u32 calc_timing(s32 a, s32 b, s64 link_freq, s32 accinv) -{ - return accinv * a + (accinv * b * (500000000 >> DIV_SHIFT) - / (s32)(link_freq >> DIV_SHIFT)); -} - -static int -ipu6_isys_csi2_calc_timing(struct ipu6_isys_csi2 *csi2, - struct ipu6_isys_csi2_timing *timing, s32 accinv) -{ - struct device *dev = &csi2->isys->adev->auxdev.dev; - s64 link_freq; - - link_freq = ipu6_isys_csi2_get_link_freq(csi2); - if (link_freq < 0) - return link_freq; - - timing->ctermen = calc_timing(CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_A, - CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_B, - link_freq, accinv); - timing->csettle = calc_timing(CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_A, - CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_B, - link_freq, accinv); - timing->dtermen = calc_timing(CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_A, - CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_B, - link_freq, accinv); - timing->dsettle = calc_timing(CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_A, - CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_B, - link_freq, accinv); - - dev_dbg(dev, "ctermen %u csettle %u dtermen %u dsettle %u\n", - timing->ctermen, timing->csettle, - timing->dtermen, timing->dsettle); - - return 0; -} - -void ipu6_isys_register_errors(struct ipu6_isys_csi2 *csi2) -{ - u32 irq = readl(csi2->base + CSI_PORT_REG_BASE_IRQ_CSI + - CSI_PORT_REG_BASE_IRQ_STATUS_OFFSET); - struct ipu6_isys *isys = csi2->isys; - u32 mask; - - mask = isys->pdata->ipdata->csi2.irq_mask; - writel(irq & mask, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI + - CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET); - csi2->receiver_errors |= irq & mask; -} - -void ipu6_isys_csi2_error(struct ipu6_isys_csi2 *csi2) -{ - struct device *dev = &csi2->isys->adev->auxdev.dev; - const struct ipu6_csi2_error *errors; - u32 status; - u32 i; - - /* register errors once more in case of interrupts are disabled */ - ipu6_isys_register_errors(csi2); - status = csi2->receiver_errors; - csi2->receiver_errors = 0; - errors = dphy_rx_errors; - - for (i = 0; i < CSI_RX_NUM_ERRORS_IN_IRQ; i++) { - if (status & BIT(i)) - dev_err_ratelimited(dev, "csi2-%i error: %s\n", - csi2->port, errors[i].error_string); - } -} - -static int ipu6_isys_csi2_set_stream(struct v4l2_subdev *sd, - const struct ipu6_isys_csi2_timing *timing, - unsigned int nlanes, int enable) -{ - struct ipu6_isys_subdev *asd = to_ipu6_isys_subdev(sd); - struct ipu6_isys_csi2 *csi2 = to_ipu6_isys_csi2(asd); - struct ipu6_isys *isys = csi2->isys; - struct device *dev = &isys->adev->auxdev.dev; - struct ipu6_isys_csi2_config cfg; - unsigned int nports; - int ret = 0; - u32 mask = 0; - u32 i; - - dev_dbg(dev, "stream %s CSI2-%u with %u lanes\n", enable ? "on" : "off", - csi2->port, nlanes); - - cfg.port = csi2->port; - cfg.nlanes = nlanes; - - mask = isys->pdata->ipdata->csi2.irq_mask; - nports = isys->pdata->ipdata->csi2.nports; - - if (!enable) { - writel(0, csi2->base + CSI_REG_CSI_FE_ENABLE); - writel(0, csi2->base + CSI_REG_PPI2CSI_ENABLE); - - writel(0, - csi2->base + CSI_PORT_REG_BASE_IRQ_CSI + - CSI_PORT_REG_BASE_IRQ_ENABLE_OFFSET); - writel(mask, - csi2->base + CSI_PORT_REG_BASE_IRQ_CSI + - CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET); - writel(0, - csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC + - CSI_PORT_REG_BASE_IRQ_ENABLE_OFFSET); - writel(0xffffffff, - csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC + - CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET); - - isys->phy_set_power(isys, &cfg, timing, false); - - writel(0, isys->pdata->base + CSI_REG_HUB_FW_ACCESS_PORT - (isys->pdata->ipdata->csi2.fw_access_port_ofs, - csi2->port)); - writel(0, isys->pdata->base + - CSI_REG_HUB_DRV_ACCESS_PORT(csi2->port)); - - return ret; - } - - /* reset port reset */ - writel(0x1, csi2->base + CSI_REG_PORT_GPREG_SRST); - usleep_range(100, 200); - writel(0x0, csi2->base + CSI_REG_PORT_GPREG_SRST); - - /* enable port clock */ - for (i = 0; i < nports; i++) { - writel(1, isys->pdata->base + CSI_REG_HUB_DRV_ACCESS_PORT(i)); - writel(1, isys->pdata->base + CSI_REG_HUB_FW_ACCESS_PORT - (isys->pdata->ipdata->csi2.fw_access_port_ofs, i)); - } - - /* enable all error related irq */ - writel(mask, - csi2->base + CSI_PORT_REG_BASE_IRQ_CSI + - CSI_PORT_REG_BASE_IRQ_STATUS_OFFSET); - writel(mask, - csi2->base + CSI_PORT_REG_BASE_IRQ_CSI + - CSI_PORT_REG_BASE_IRQ_MASK_OFFSET); - writel(mask, - csi2->base + CSI_PORT_REG_BASE_IRQ_CSI + - CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET); - writel(mask, - csi2->base + CSI_PORT_REG_BASE_IRQ_CSI + - CSI_PORT_REG_BASE_IRQ_LEVEL_NOT_PULSE_OFFSET); - writel(mask, - csi2->base + CSI_PORT_REG_BASE_IRQ_CSI + - CSI_PORT_REG_BASE_IRQ_ENABLE_OFFSET); - - /* - * Using event from firmware instead of irq to handle CSI2 sync event - * which can reduce system wakeups. If CSI2 sync irq enabled, we need - * disable the firmware CSI2 sync event to avoid duplicate handling. - */ - writel(0xffffffff, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC + - CSI_PORT_REG_BASE_IRQ_STATUS_OFFSET); - writel(0, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC + - CSI_PORT_REG_BASE_IRQ_MASK_OFFSET); - writel(0xffffffff, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC + - CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET); - writel(0, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC + - CSI_PORT_REG_BASE_IRQ_LEVEL_NOT_PULSE_OFFSET); - writel(0xffffffff, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC + - CSI_PORT_REG_BASE_IRQ_ENABLE_OFFSET); - - /* configure to enable FE and PPI2CSI */ - writel(0, csi2->base + CSI_REG_CSI_FE_MODE); - writel(CSI_SENSOR_INPUT, csi2->base + CSI_REG_CSI_FE_MUX_CTRL); - writel(CSI_CNTR_SENSOR_LINE_ID | CSI_CNTR_SENSOR_FRAME_ID, - csi2->base + CSI_REG_CSI_FE_SYNC_CNTR_SEL); - writel(FIELD_PREP(PPI_INTF_CONFIG_NOF_ENABLED_DLANES_MASK, nlanes - 1), - csi2->base + CSI_REG_PPI2CSI_CONFIG_PPI_INTF); - - writel(1, csi2->base + CSI_REG_PPI2CSI_ENABLE); - writel(1, csi2->base + CSI_REG_CSI_FE_ENABLE); - - ret = isys->phy_set_power(isys, &cfg, timing, true); - if (ret) - dev_err(dev, "csi-%d phy power up failed %d\n", csi2->port, - ret); - - return ret; -} - -static int ipu6_isys_csi2_enable_streams(struct v4l2_subdev *sd, - struct v4l2_subdev_state *state, - u32 pad, u64 streams_mask) -{ - struct ipu6_isys_subdev *asd = to_ipu6_isys_subdev(sd); - struct ipu6_isys_csi2 *csi2 = to_ipu6_isys_csi2(asd); - struct ipu6_isys_csi2_timing timing = { }; - struct v4l2_subdev *remote_sd; - struct media_pad *remote_pad; - u64 sink_streams; - int ret; - - remote_pad = media_pad_remote_pad_first(&sd->entity.pads[CSI2_PAD_SINK]); - remote_sd = media_entity_to_v4l2_subdev(remote_pad->entity); - - sink_streams = v4l2_subdev_state_xlate_streams(state, CSI2_PAD_SRC, - CSI2_PAD_SINK, - &streams_mask); - - ret = ipu6_isys_csi2_calc_timing(csi2, &timing, CSI2_ACCINV); - if (ret) - return ret; - - ret = ipu6_isys_csi2_set_stream(sd, &timing, csi2->nlanes, true); - if (ret) - return ret; - - ret = v4l2_subdev_enable_streams(remote_sd, remote_pad->index, - sink_streams); - if (ret) { - ipu6_isys_csi2_set_stream(sd, NULL, 0, false); - return ret; - } - - return 0; -} - -static int ipu6_isys_csi2_disable_streams(struct v4l2_subdev *sd, - struct v4l2_subdev_state *state, - u32 pad, u64 streams_mask) -{ - struct v4l2_subdev *remote_sd; - struct media_pad *remote_pad; - u64 sink_streams; - - sink_streams = v4l2_subdev_state_xlate_streams(state, CSI2_PAD_SRC, - CSI2_PAD_SINK, - &streams_mask); - - remote_pad = media_pad_remote_pad_first(&sd->entity.pads[CSI2_PAD_SINK]); - remote_sd = media_entity_to_v4l2_subdev(remote_pad->entity); - - ipu6_isys_csi2_set_stream(sd, NULL, 0, false); - - v4l2_subdev_disable_streams(remote_sd, remote_pad->index, sink_streams); - - return 0; -} - -static int ipu6_isys_csi2_set_sel(struct v4l2_subdev *sd, - struct v4l2_subdev_state *state, - struct v4l2_subdev_selection *sel) -{ - struct ipu6_isys_subdev *asd = to_ipu6_isys_subdev(sd); - struct device *dev = &asd->isys->adev->auxdev.dev; - struct v4l2_mbus_framefmt *sink_ffmt; - struct v4l2_mbus_framefmt *src_ffmt; - struct v4l2_rect *crop; - - if (sel->pad == CSI2_PAD_SINK || sel->target != V4L2_SEL_TGT_CROP) - return -EINVAL; - - sink_ffmt = v4l2_subdev_state_get_opposite_stream_format(state, - sel->pad, - sel->stream); - if (!sink_ffmt) - return -EINVAL; - - src_ffmt = v4l2_subdev_state_get_format(state, sel->pad, sel->stream); - if (!src_ffmt) - return -EINVAL; - - crop = v4l2_subdev_state_get_crop(state, sel->pad, sel->stream); - if (!crop) - return -EINVAL; - - /* Only vertical cropping is supported */ - sel->r.left = 0; - sel->r.width = sink_ffmt->width; - /* Non-bayer formats can't be single line cropped */ - if (!ipu6_isys_is_bayer_format(sink_ffmt->code)) - sel->r.top &= ~1; - sel->r.height = clamp(sel->r.height & ~1, IPU6_ISYS_MIN_HEIGHT, - sink_ffmt->height - sel->r.top); - *crop = sel->r; - - /* update source pad format */ - src_ffmt->width = sel->r.width; - src_ffmt->height = sel->r.height; - if (ipu6_isys_is_bayer_format(sink_ffmt->code)) - src_ffmt->code = ipu6_isys_convert_bayer_order(sink_ffmt->code, - sel->r.left, - sel->r.top); - dev_dbg(dev, "set crop for %s sel: %d,%d,%d,%d code: 0x%x\n", - sd->name, sel->r.left, sel->r.top, sel->r.width, sel->r.height, - src_ffmt->code); - - return 0; -} - -static int ipu6_isys_csi2_get_sel(struct v4l2_subdev *sd, - struct v4l2_subdev_state *state, - struct v4l2_subdev_selection *sel) -{ - struct v4l2_mbus_framefmt *sink_ffmt; - struct v4l2_rect *crop; - int ret = 0; - - if (sd->entity.pads[sel->pad].flags & MEDIA_PAD_FL_SINK) - return -EINVAL; - - sink_ffmt = v4l2_subdev_state_get_opposite_stream_format(state, - sel->pad, - sel->stream); - if (!sink_ffmt) - return -EINVAL; - - crop = v4l2_subdev_state_get_crop(state, sel->pad, sel->stream); - if (!crop) - return -EINVAL; - - switch (sel->target) { - case V4L2_SEL_TGT_CROP_DEFAULT: - case V4L2_SEL_TGT_CROP_BOUNDS: - sel->r.left = 0; - sel->r.top = 0; - sel->r.width = sink_ffmt->width; - sel->r.height = sink_ffmt->height; - break; - case V4L2_SEL_TGT_CROP: - sel->r = *crop; - break; - default: - ret = -EINVAL; - } - - return ret; -} - -static const struct v4l2_subdev_pad_ops csi2_sd_pad_ops = { - .get_fmt = v4l2_subdev_get_fmt, - .set_fmt = ipu6_isys_subdev_set_fmt, - .get_selection = ipu6_isys_csi2_get_sel, - .set_selection = ipu6_isys_csi2_set_sel, - .enum_mbus_code = ipu6_isys_subdev_enum_mbus_code, - .set_routing = ipu6_isys_subdev_set_routing, - .enable_streams = ipu6_isys_csi2_enable_streams, - .disable_streams = ipu6_isys_csi2_disable_streams, -}; - -static const struct v4l2_subdev_ops csi2_sd_ops = { - .core = &csi2_sd_core_ops, - .pad = &csi2_sd_pad_ops, -}; - -static const struct media_entity_operations csi2_entity_ops = { - .link_validate = v4l2_subdev_link_validate, - .has_pad_interdep = v4l2_subdev_has_pad_interdep, -}; - -void ipu6_isys_csi2_cleanup(struct ipu6_isys_csi2 *csi2) -{ - if (!csi2->isys) - return; - - v4l2_device_unregister_subdev(&csi2->asd.sd); - v4l2_subdev_cleanup(&csi2->asd.sd); - ipu6_isys_subdev_cleanup(&csi2->asd); - csi2->isys = NULL; -} - -int ipu6_isys_csi2_init(struct ipu6_isys_csi2 *csi2, - struct ipu6_isys *isys, - void __iomem *base, unsigned int index) -{ - struct device *dev = &isys->adev->auxdev.dev; - int ret; - - csi2->isys = isys; - csi2->base = base; - csi2->port = index; - - csi2->asd.sd.entity.ops = &csi2_entity_ops; - csi2->asd.isys = isys; - ret = ipu6_isys_subdev_init(&csi2->asd, &csi2_sd_ops, 0, - NR_OF_CSI2_SINK_PADS, NR_OF_CSI2_SRC_PADS); - if (ret) - goto fail; - - csi2->asd.source = IPU6_FW_ISYS_STREAM_SRC_CSI2_PORT0 + index; - csi2->asd.supported_codes = csi2_supported_codes; - snprintf(csi2->asd.sd.name, sizeof(csi2->asd.sd.name), - IPU6_ISYS_ENTITY_PREFIX " CSI2 %u", index); - v4l2_set_subdevdata(&csi2->asd.sd, &csi2->asd); - ret = v4l2_subdev_init_finalize(&csi2->asd.sd); - if (ret) { - dev_err(dev, "failed to init v4l2 subdev\n"); - goto fail; - } - - ret = v4l2_device_register_subdev(&isys->v4l2_dev, &csi2->asd.sd); - if (ret) { - dev_err(dev, "failed to register v4l2 subdev\n"); - goto fail; - } - - return 0; - -fail: - ipu6_isys_csi2_cleanup(csi2); - - return ret; -} - -void ipu6_isys_csi2_sof_event_by_stream(struct ipu6_isys_stream *stream) -{ - struct video_device *vdev = stream->asd->sd.devnode; - struct device *dev = &stream->isys->adev->auxdev.dev; - struct ipu6_isys_csi2 *csi2 = ipu6_isys_subdev_to_csi2(stream->asd); - struct v4l2_event ev = { - .type = V4L2_EVENT_FRAME_SYNC, - }; - - ev.u.frame_sync.frame_sequence = atomic_fetch_inc(&stream->sequence); - v4l2_event_queue(vdev, &ev); - - dev_dbg(dev, "sof_event::csi2-%i sequence: %i, vc: %d\n", - csi2->port, ev.u.frame_sync.frame_sequence, stream->vc); -} - -void ipu6_isys_csi2_eof_event_by_stream(struct ipu6_isys_stream *stream) -{ - struct device *dev = &stream->isys->adev->auxdev.dev; - struct ipu6_isys_csi2 *csi2 = ipu6_isys_subdev_to_csi2(stream->asd); - u32 frame_sequence = atomic_read(&stream->sequence); - - dev_dbg(dev, "eof_event::csi2-%i sequence: %i\n", - csi2->port, frame_sequence); -} - -int ipu6_isys_csi2_get_remote_desc(u32 source_stream, - struct ipu6_isys_csi2 *csi2, - struct media_entity *source_entity, - struct v4l2_mbus_frame_desc_entry *entry) -{ - struct v4l2_mbus_frame_desc_entry *desc_entry = NULL; - struct device *dev = &csi2->isys->adev->auxdev.dev; - struct v4l2_mbus_frame_desc desc; - struct v4l2_subdev *source; - struct media_pad *pad; - unsigned int i; - int ret; - - source = media_entity_to_v4l2_subdev(source_entity); - if (!source) - return -EPIPE; - - pad = media_pad_remote_pad_first(&csi2->asd.pad[CSI2_PAD_SINK]); - if (!pad) - return -EPIPE; - - ret = v4l2_subdev_call(source, pad, get_frame_desc, pad->index, &desc); - if (ret) - return ret; - - if (desc.type != V4L2_MBUS_FRAME_DESC_TYPE_CSI2) { - dev_err(dev, "Unsupported frame descriptor type\n"); - return -EINVAL; - } - - for (i = 0; i < desc.num_entries; i++) { - if (source_stream == desc.entry[i].stream) { - desc_entry = &desc.entry[i]; - break; - } - } - - if (!desc_entry) { - dev_err(dev, "Failed to find stream %u from remote subdev\n", - source_stream); - return -EINVAL; - } - - if (desc_entry->bus.csi2.vc >= NR_OF_CSI2_VC) { - dev_err(dev, "invalid vc %d\n", desc_entry->bus.csi2.vc); - return -EINVAL; - } - - *entry = *desc_entry; - - return 0; -} diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h deleted file mode 100644 index bc8594c..0000000 --- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h +++ /dev/null @@ -1,80 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (C) 2013--2024 Intel Corporation */ - -#ifndef IPU6_ISYS_CSI2_H -#define IPU6_ISYS_CSI2_H - -#include - -#include "ipu6-isys-subdev.h" -#include "ipu6-isys-video.h" - -struct media_entity; -struct v4l2_mbus_frame_desc_entry; - -struct ipu6_isys_video; -struct ipu6_isys; -struct ipu6_isys_csi2_pdata; -struct ipu6_isys_stream; - -#define NR_OF_CSI2_VC 16 -#define INVALID_VC_ID -1 -#define NR_OF_CSI2_SINK_PADS 1 -#define CSI2_PAD_SINK 0 -#define NR_OF_CSI2_SRC_PADS 8 -#define CSI2_PAD_SRC 1 -#define NR_OF_CSI2_PADS (NR_OF_CSI2_SINK_PADS + NR_OF_CSI2_SRC_PADS) - -#define CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_A 0 -#define CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_B 0 -#define CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_A 95 -#define CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_B -8 - -#define CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_A 0 -#define CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_B 0 -#define CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_A 85 -#define CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_B -2 - -struct ipu6_isys_csi2 { - struct ipu6_isys_subdev asd; - struct ipu6_isys_csi2_pdata *pdata; - struct ipu6_isys *isys; - struct ipu6_isys_video av[NR_OF_CSI2_SRC_PADS]; - - void __iomem *base; - u32 receiver_errors; - unsigned int nlanes; - unsigned int port; -}; - -struct ipu6_isys_csi2_timing { - u32 ctermen; - u32 csettle; - u32 dtermen; - u32 dsettle; -}; - -struct ipu6_csi2_error { - const char *error_string; - bool is_info_only; -}; - -#define ipu6_isys_subdev_to_csi2(__sd) \ - container_of(__sd, struct ipu6_isys_csi2, asd) - -#define to_ipu6_isys_csi2(__asd) container_of(__asd, struct ipu6_isys_csi2, asd) - -s64 ipu6_isys_csi2_get_link_freq(struct ipu6_isys_csi2 *csi2); -int ipu6_isys_csi2_init(struct ipu6_isys_csi2 *csi2, struct ipu6_isys *isys, - void __iomem *base, unsigned int index); -void ipu6_isys_csi2_cleanup(struct ipu6_isys_csi2 *csi2); -void ipu6_isys_csi2_sof_event_by_stream(struct ipu6_isys_stream *stream); -void ipu6_isys_csi2_eof_event_by_stream(struct ipu6_isys_stream *stream); -void ipu6_isys_register_errors(struct ipu6_isys_csi2 *csi2); -void ipu6_isys_csi2_error(struct ipu6_isys_csi2 *csi2); -int ipu6_isys_csi2_get_remote_desc(u32 source_stream, - struct ipu6_isys_csi2 *csi2, - struct media_entity *source_entity, - struct v4l2_mbus_frame_desc_entry *entry); - -#endif /* IPU6_ISYS_CSI2_H */ diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-dwc-phy.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-dwc-phy.c deleted file mode 100644 index 1715275..0000000 --- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-dwc-phy.c +++ /dev/null @@ -1,536 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2013--2024 Intel Corporation - */ - -#include -#include -#include -#include -#include -#include - -#include "ipu6-bus.h" -#include "ipu6-isys.h" -#include "ipu6-platform-isys-csi2-reg.h" - -#define IPU6_DWC_DPHY_BASE(i) (0x238038 + 0x34 * (i)) -#define IPU6_DWC_DPHY_RSTZ 0x00 -#define IPU6_DWC_DPHY_SHUTDOWNZ 0x04 -#define IPU6_DWC_DPHY_HSFREQRANGE 0x08 -#define IPU6_DWC_DPHY_CFGCLKFREQRANGE 0x0c -#define IPU6_DWC_DPHY_TEST_IFC_ACCESS_MODE 0x10 -#define IPU6_DWC_DPHY_TEST_IFC_REQ 0x14 -#define IPU6_DWC_DPHY_TEST_IFC_REQ_COMPLETION 0x18 -#define IPU6_DWC_DPHY_DFT_CTRL0 0x28 -#define IPU6_DWC_DPHY_DFT_CTRL1 0x2c -#define IPU6_DWC_DPHY_DFT_CTRL2 0x30 - -/* - * test IFC request definition: - * - req: 0 for read, 1 for write - * - 12 bits address - * - 8bits data (will ignore for read) - * --24----16------4-----0 - * --|-data-|-addr-|-req-| - */ -#define IFC_REQ(req, addr, data) (FIELD_PREP(GENMASK(23, 16), data) | \ - FIELD_PREP(GENMASK(15, 4), addr) | \ - FIELD_PREP(GENMASK(1, 0), req)) - -#define TEST_IFC_REQ_READ 0 -#define TEST_IFC_REQ_WRITE 1 -#define TEST_IFC_REQ_RESET 2 - -#define TEST_IFC_ACCESS_MODE_FSM 0 -#define TEST_IFC_ACCESS_MODE_IFC_CTL 1 - -enum phy_fsm_state { - PHY_FSM_STATE_POWERON = 0, - PHY_FSM_STATE_BGPON = 1, - PHY_FSM_STATE_CAL_TYPE = 2, - PHY_FSM_STATE_BURNIN_CAL = 3, - PHY_FSM_STATE_TERMCAL = 4, - PHY_FSM_STATE_OFFSETCAL = 5, - PHY_FSM_STATE_OFFSET_LANE = 6, - PHY_FSM_STATE_IDLE = 7, - PHY_FSM_STATE_ULP = 8, - PHY_FSM_STATE_DDLTUNNING = 9, - PHY_FSM_STATE_SKEW_BACKWARD = 10, - PHY_FSM_STATE_INVALID, -}; - -static void dwc_dphy_write(struct ipu6_isys *isys, u32 phy_id, u32 addr, - u32 data) -{ - struct device *dev = &isys->adev->auxdev.dev; - void __iomem *isys_base = isys->pdata->base; - void __iomem *base = isys_base + IPU6_DWC_DPHY_BASE(phy_id); - - dev_dbg(dev, "write: reg 0x%lx = data 0x%x", base + addr - isys_base, - data); - writel(data, base + addr); -} - -static u32 dwc_dphy_read(struct ipu6_isys *isys, u32 phy_id, u32 addr) -{ - struct device *dev = &isys->adev->auxdev.dev; - void __iomem *isys_base = isys->pdata->base; - void __iomem *base = isys_base + IPU6_DWC_DPHY_BASE(phy_id); - u32 data; - - data = readl(base + addr); - dev_dbg(dev, "read: reg 0x%lx = data 0x%x", base + addr - isys_base, - data); - - return data; -} - -static void dwc_dphy_write_mask(struct ipu6_isys *isys, u32 phy_id, u32 addr, - u32 data, u8 shift, u8 width) -{ - u32 temp; - u32 mask; - - mask = (1 << width) - 1; - temp = dwc_dphy_read(isys, phy_id, addr); - temp &= ~(mask << shift); - temp |= (data & mask) << shift; - dwc_dphy_write(isys, phy_id, addr, temp); -} - -static u32 __maybe_unused dwc_dphy_read_mask(struct ipu6_isys *isys, u32 phy_id, - u32 addr, u8 shift, u8 width) -{ - u32 val; - - val = dwc_dphy_read(isys, phy_id, addr) >> shift; - return val & ((1 << width) - 1); -} - -#define DWC_DPHY_TIMEOUT (5 * USEC_PER_SEC) -static int dwc_dphy_ifc_read(struct ipu6_isys *isys, u32 phy_id, u32 addr, - u32 *val) -{ - struct device *dev = &isys->adev->auxdev.dev; - void __iomem *isys_base = isys->pdata->base; - void __iomem *base = isys_base + IPU6_DWC_DPHY_BASE(phy_id); - void __iomem *reg; - u32 completion; - int ret; - - dwc_dphy_write(isys, phy_id, IPU6_DWC_DPHY_TEST_IFC_REQ, - IFC_REQ(TEST_IFC_REQ_READ, addr, 0)); - reg = base + IPU6_DWC_DPHY_TEST_IFC_REQ_COMPLETION; - ret = readl_poll_timeout(reg, completion, !(completion & BIT(0)), - 10, DWC_DPHY_TIMEOUT); - if (ret) { - dev_err(dev, "DWC ifc request read timeout\n"); - return ret; - } - - *val = completion >> 8 & 0xff; - *val = FIELD_GET(GENMASK(15, 8), completion); - dev_dbg(dev, "DWC ifc read 0x%x = 0x%x", addr, *val); - - return 0; -} - -static int dwc_dphy_ifc_write(struct ipu6_isys *isys, u32 phy_id, u32 addr, - u32 data) -{ - struct device *dev = &isys->adev->auxdev.dev; - void __iomem *isys_base = isys->pdata->base; - void __iomem *base = isys_base + IPU6_DWC_DPHY_BASE(phy_id); - void __iomem *reg; - u32 completion; - int ret; - - dwc_dphy_write(isys, phy_id, IPU6_DWC_DPHY_TEST_IFC_REQ, - IFC_REQ(TEST_IFC_REQ_WRITE, addr, data)); - completion = readl(base + IPU6_DWC_DPHY_TEST_IFC_REQ_COMPLETION); - reg = base + IPU6_DWC_DPHY_TEST_IFC_REQ_COMPLETION; - ret = readl_poll_timeout(reg, completion, !(completion & BIT(0)), - 10, DWC_DPHY_TIMEOUT); - if (ret) - dev_err(dev, "DWC ifc request write timeout\n"); - - return ret; -} - -static void dwc_dphy_ifc_write_mask(struct ipu6_isys *isys, u32 phy_id, - u32 addr, u32 data, u8 shift, u8 width) -{ - u32 temp, mask; - int ret; - - ret = dwc_dphy_ifc_read(isys, phy_id, addr, &temp); - if (ret) - return; - - mask = (1 << width) - 1; - temp &= ~(mask << shift); - temp |= (data & mask) << shift; - dwc_dphy_ifc_write(isys, phy_id, addr, temp); -} - -static u32 dwc_dphy_ifc_read_mask(struct ipu6_isys *isys, u32 phy_id, u32 addr, - u8 shift, u8 width) -{ - int ret; - u32 val; - - ret = dwc_dphy_ifc_read(isys, phy_id, addr, &val); - if (ret) - return 0; - - return ((val >> shift) & ((1 << width) - 1)); -} - -static int dwc_dphy_pwr_up(struct ipu6_isys *isys, u32 phy_id) -{ - struct device *dev = &isys->adev->auxdev.dev; - u32 fsm_state; - int ret; - - dwc_dphy_write(isys, phy_id, IPU6_DWC_DPHY_RSTZ, 1); - usleep_range(10, 20); - dwc_dphy_write(isys, phy_id, IPU6_DWC_DPHY_SHUTDOWNZ, 1); - - ret = read_poll_timeout(dwc_dphy_ifc_read_mask, fsm_state, - (fsm_state == PHY_FSM_STATE_IDLE || - fsm_state == PHY_FSM_STATE_ULP), - 100, DWC_DPHY_TIMEOUT, false, isys, - phy_id, 0x1e, 0, 4); - - if (ret) - dev_err(dev, "Dphy %d power up failed, state 0x%x", phy_id, - fsm_state); - - return ret; -} - -struct dwc_dphy_freq_range { - u8 hsfreq; - u16 min; - u16 max; - u16 default_mbps; - u16 osc_freq_target; -}; - -#define DPHY_FREQ_RANGE_NUM (63) -#define DPHY_FREQ_RANGE_INVALID_INDEX (0xff) -static const struct dwc_dphy_freq_range freqranges[DPHY_FREQ_RANGE_NUM] = { - {0x00, 80, 97, 80, 335}, - {0x10, 80, 107, 90, 335}, - {0x20, 84, 118, 100, 335}, - {0x30, 93, 128, 110, 335}, - {0x01, 103, 139, 120, 335}, - {0x11, 112, 149, 130, 335}, - {0x21, 122, 160, 140, 335}, - {0x31, 131, 170, 150, 335}, - {0x02, 141, 181, 160, 335}, - {0x12, 150, 191, 170, 335}, - {0x22, 160, 202, 180, 335}, - {0x32, 169, 212, 190, 335}, - {0x03, 183, 228, 205, 335}, - {0x13, 198, 244, 220, 335}, - {0x23, 212, 259, 235, 335}, - {0x33, 226, 275, 250, 335}, - {0x04, 250, 301, 275, 335}, - {0x14, 274, 328, 300, 335}, - {0x25, 297, 354, 325, 335}, - {0x35, 321, 380, 350, 335}, - {0x05, 369, 433, 400, 335}, - {0x16, 416, 485, 450, 335}, - {0x26, 464, 538, 500, 335}, - {0x37, 511, 590, 550, 335}, - {0x07, 559, 643, 600, 335}, - {0x18, 606, 695, 650, 335}, - {0x28, 654, 748, 700, 335}, - {0x39, 701, 800, 750, 335}, - {0x09, 749, 853, 800, 335}, - {0x19, 796, 905, 850, 335}, - {0x29, 844, 958, 900, 335}, - {0x3a, 891, 1010, 950, 335}, - {0x0a, 939, 1063, 1000, 335}, - {0x1a, 986, 1115, 1050, 335}, - {0x2a, 1034, 1168, 1100, 335}, - {0x3b, 1081, 1220, 1150, 335}, - {0x0b, 1129, 1273, 1200, 335}, - {0x1b, 1176, 1325, 1250, 335}, - {0x2b, 1224, 1378, 1300, 335}, - {0x3c, 1271, 1430, 1350, 335}, - {0x0c, 1319, 1483, 1400, 335}, - {0x1c, 1366, 1535, 1450, 335}, - {0x2c, 1414, 1588, 1500, 335}, - {0x3d, 1461, 1640, 1550, 208}, - {0x0d, 1509, 1693, 1600, 214}, - {0x1d, 1556, 1745, 1650, 221}, - {0x2e, 1604, 1798, 1700, 228}, - {0x3e, 1651, 1850, 1750, 234}, - {0x0e, 1699, 1903, 1800, 241}, - {0x1e, 1746, 1955, 1850, 248}, - {0x2f, 1794, 2008, 1900, 255}, - {0x3f, 1841, 2060, 1950, 261}, - {0x0f, 1889, 2113, 2000, 268}, - {0x40, 1936, 2165, 2050, 275}, - {0x41, 1984, 2218, 2100, 281}, - {0x42, 2031, 2270, 2150, 288}, - {0x43, 2079, 2323, 2200, 294}, - {0x44, 2126, 2375, 2250, 302}, - {0x45, 2174, 2428, 2300, 308}, - {0x46, 2221, 2480, 2350, 315}, - {0x47, 2269, 2500, 2400, 321}, - {0x48, 2316, 2500, 2450, 328}, - {0x49, 2364, 2500, 2500, 335} -}; - -static u16 get_hsfreq_by_mbps(u32 mbps) -{ - unsigned int i = DPHY_FREQ_RANGE_NUM; - - while (i--) { - if (freqranges[i].default_mbps == mbps || - (mbps >= freqranges[i].min && mbps <= freqranges[i].max)) - return i; - } - - return DPHY_FREQ_RANGE_INVALID_INDEX; -} - -static int ipu6_isys_dwc_phy_config(struct ipu6_isys *isys, - u32 phy_id, u32 mbps) -{ - struct ipu6_bus_device *adev = isys->adev; - struct device *dev = &adev->auxdev.dev; - struct ipu6_device *isp = adev->isp; - u32 cfg_clk_freqrange; - u32 osc_freq_target; - u32 index; - - dev_dbg(dev, "config Dphy %u with %u mbps", phy_id, mbps); - - index = get_hsfreq_by_mbps(mbps); - if (index == DPHY_FREQ_RANGE_INVALID_INDEX) { - dev_err(dev, "link freq not found for mbps %u", mbps); - return -EINVAL; - } - - dwc_dphy_write_mask(isys, phy_id, IPU6_DWC_DPHY_HSFREQRANGE, - freqranges[index].hsfreq, 0, 7); - - /* Force termination Calibration */ - if (isys->phy_termcal_val) { - dwc_dphy_ifc_write_mask(isys, phy_id, 0x20a, 0x1, 0, 1); - dwc_dphy_ifc_write_mask(isys, phy_id, 0x209, 0x3, 0, 2); - dwc_dphy_ifc_write_mask(isys, phy_id, 0x209, - isys->phy_termcal_val, 4, 4); - } - - /* - * Enable override to configure the DDL target oscillation - * frequency on bit 0 of register 0xe4 - */ - dwc_dphy_ifc_write_mask(isys, phy_id, 0xe4, 0x1, 0, 1); - /* - * configure registers 0xe2, 0xe3 with the - * appropriate DDL target oscillation frequency - * 0x1cc(460) - */ - osc_freq_target = freqranges[index].osc_freq_target; - dwc_dphy_ifc_write_mask(isys, phy_id, 0xe2, - osc_freq_target & 0xff, 0, 8); - dwc_dphy_ifc_write_mask(isys, phy_id, 0xe3, - (osc_freq_target >> 8) & 0xf, 0, 4); - - if (mbps < 1500) { - /* deskew_polarity_rw, for < 1.5Gbps */ - dwc_dphy_ifc_write_mask(isys, phy_id, 0x8, 0x1, 5, 1); - } - - /* - * Set cfgclkfreqrange[5:0] = round[(Fcfg_clk(MHz)-17)*4] - * (38.4 - 17) * 4 = ~85 (0x55) - */ - cfg_clk_freqrange = (isp->buttress.ref_clk - 170) * 4 / 10; - dev_dbg(dev, "ref_clk = %u clk_freqrange = %u", - isp->buttress.ref_clk, cfg_clk_freqrange); - dwc_dphy_write_mask(isys, phy_id, IPU6_DWC_DPHY_CFGCLKFREQRANGE, - cfg_clk_freqrange, 0, 8); - - dwc_dphy_write_mask(isys, phy_id, IPU6_DWC_DPHY_DFT_CTRL2, 0x1, 4, 1); - dwc_dphy_write_mask(isys, phy_id, IPU6_DWC_DPHY_DFT_CTRL2, 0x1, 8, 1); - - return 0; -} - -static void ipu6_isys_dwc_phy_aggr_setup(struct ipu6_isys *isys, u32 master, - u32 slave, u32 mbps) -{ - /* Config mastermacro */ - dwc_dphy_ifc_write_mask(isys, master, 0x133, 0x1, 0, 1); - dwc_dphy_ifc_write_mask(isys, slave, 0x133, 0x0, 0, 1); - - /* Config master PHY clk lane to drive long channel clk */ - dwc_dphy_ifc_write_mask(isys, master, 0x307, 0x1, 2, 1); - dwc_dphy_ifc_write_mask(isys, slave, 0x307, 0x0, 2, 1); - - /* Config both PHYs data lanes to get clk from long channel */ - dwc_dphy_ifc_write_mask(isys, master, 0x508, 0x1, 5, 1); - dwc_dphy_ifc_write_mask(isys, slave, 0x508, 0x1, 5, 1); - dwc_dphy_ifc_write_mask(isys, master, 0x708, 0x1, 5, 1); - dwc_dphy_ifc_write_mask(isys, slave, 0x708, 0x1, 5, 1); - - /* Config slave PHY clk lane to bypass long channel clk to DDR clk */ - dwc_dphy_ifc_write_mask(isys, master, 0x308, 0x0, 3, 1); - dwc_dphy_ifc_write_mask(isys, slave, 0x308, 0x1, 3, 1); - - /* Override slave PHY clk lane enable (DPHYRXCLK_CLL_demux module) */ - dwc_dphy_ifc_write_mask(isys, slave, 0xe0, 0x3, 0, 2); - - /* Override slave PHY DDR clk lane enable (DPHYHSRX_div124 module) */ - dwc_dphy_ifc_write_mask(isys, slave, 0xe1, 0x1, 1, 1); - dwc_dphy_ifc_write_mask(isys, slave, 0x307, 0x1, 3, 1); - - /* Turn off slave PHY LP-RX clk lane */ - dwc_dphy_ifc_write_mask(isys, slave, 0x304, 0x1, 7, 1); - dwc_dphy_ifc_write_mask(isys, slave, 0x305, 0xa, 0, 5); -} - -#define PHY_E 4 -static int ipu6_isys_dwc_phy_powerup_ack(struct ipu6_isys *isys, u32 phy_id) -{ - struct device *dev = &isys->adev->auxdev.dev; - u32 rescal_done; - int ret; - - ret = dwc_dphy_pwr_up(isys, phy_id); - if (ret != 0) { - dev_err(dev, "Dphy %u power up failed(%d)", phy_id, ret); - return ret; - } - - /* reset forcerxmode */ - dwc_dphy_write_mask(isys, phy_id, IPU6_DWC_DPHY_DFT_CTRL2, 0, 4, 1); - dwc_dphy_write_mask(isys, phy_id, IPU6_DWC_DPHY_DFT_CTRL2, 0, 8, 1); - - dev_dbg(dev, "Dphy %u is ready!", phy_id); - - if (phy_id != PHY_E || isys->phy_termcal_val) - return 0; - - usleep_range(100, 200); - rescal_done = dwc_dphy_ifc_read_mask(isys, phy_id, 0x221, 7, 1); - if (rescal_done) { - isys->phy_termcal_val = dwc_dphy_ifc_read_mask(isys, phy_id, - 0x220, 2, 4); - dev_dbg(dev, "termcal done with value = %u", - isys->phy_termcal_val); - } - - return 0; -} - -static void ipu6_isys_dwc_phy_reset(struct ipu6_isys *isys, u32 phy_id) -{ - dev_dbg(&isys->adev->auxdev.dev, "Reset phy %u", phy_id); - - dwc_dphy_write(isys, phy_id, IPU6_DWC_DPHY_SHUTDOWNZ, 0); - dwc_dphy_write(isys, phy_id, IPU6_DWC_DPHY_RSTZ, 0); - dwc_dphy_write(isys, phy_id, IPU6_DWC_DPHY_TEST_IFC_ACCESS_MODE, - TEST_IFC_ACCESS_MODE_FSM); - dwc_dphy_write(isys, phy_id, IPU6_DWC_DPHY_TEST_IFC_REQ, - TEST_IFC_REQ_RESET); -} - -int ipu6_isys_dwc_phy_set_power(struct ipu6_isys *isys, - struct ipu6_isys_csi2_config *cfg, - const struct ipu6_isys_csi2_timing *timing, - bool on) -{ - struct device *dev = &isys->adev->auxdev.dev; - void __iomem *isys_base = isys->pdata->base; - u32 phy_id, primary, secondary; - u32 nlanes, port, mbps; - s64 link_freq; - int ret; - - port = cfg->port; - - if (!isys_base || port >= isys->pdata->ipdata->csi2.nports) { - dev_warn(dev, "invalid port ID %d\n", port); - return -EINVAL; - } - - nlanes = cfg->nlanes; - /* only port 0, 2 and 4 support 4 lanes */ - if (nlanes == 4 && port % 2) { - dev_err(dev, "invalid csi-port %u with %u lanes\n", port, - nlanes); - return -EINVAL; - } - - link_freq = ipu6_isys_csi2_get_link_freq(&isys->csi2[port]); - if (link_freq < 0) { - dev_err(dev, "get link freq failed(%lld).\n", link_freq); - return link_freq; - } - - mbps = div_u64(link_freq, 500000); - - phy_id = port; - primary = port & ~1; - secondary = primary + 1; - if (on) { - if (nlanes == 4) { - dev_dbg(dev, "config phy %u and %u in aggr mode\n", - primary, secondary); - - ipu6_isys_dwc_phy_reset(isys, primary); - ipu6_isys_dwc_phy_reset(isys, secondary); - ipu6_isys_dwc_phy_aggr_setup(isys, primary, - secondary, mbps); - - ret = ipu6_isys_dwc_phy_config(isys, primary, mbps); - if (ret) - return ret; - ret = ipu6_isys_dwc_phy_config(isys, secondary, mbps); - if (ret) - return ret; - - ret = ipu6_isys_dwc_phy_powerup_ack(isys, primary); - if (ret) - return ret; - - ret = ipu6_isys_dwc_phy_powerup_ack(isys, secondary); - return ret; - } - - dev_dbg(dev, "config phy %u with %u lanes in non-aggr mode\n", - phy_id, nlanes); - - ipu6_isys_dwc_phy_reset(isys, phy_id); - ret = ipu6_isys_dwc_phy_config(isys, phy_id, mbps); - if (ret) - return ret; - - ret = ipu6_isys_dwc_phy_powerup_ack(isys, phy_id); - return ret; - } - - if (nlanes == 4) { - dev_dbg(dev, "Power down phy %u and phy %u for port %u\n", - primary, secondary, port); - ipu6_isys_dwc_phy_reset(isys, secondary); - ipu6_isys_dwc_phy_reset(isys, primary); - - return 0; - } - - dev_dbg(dev, "Powerdown phy %u with %u lanes\n", phy_id, nlanes); - - ipu6_isys_dwc_phy_reset(isys, phy_id); - - return 0; -} diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-jsl-phy.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-jsl-phy.c deleted file mode 100644 index c804291..0000000 --- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-jsl-phy.c +++ /dev/null @@ -1,242 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2013--2024 Intel Corporation - */ - -#include -#include -#include -#include - -#include "ipu6-bus.h" -#include "ipu6-isys.h" -#include "ipu6-isys-csi2.h" -#include "ipu6-platform-isys-csi2-reg.h" - -/* only use BB0, BB2, BB4, and BB6 on PHY0 */ -#define IPU6SE_ISYS_PHY_BB_NUM 4 -#define IPU6SE_ISYS_PHY_0_BASE 0x10000 - -#define PHY_CPHY_DLL_OVRD(x) (0x100 + 0x100 * (x)) -#define PHY_CPHY_RX_CONTROL1(x) (0x110 + 0x100 * (x)) -#define PHY_DPHY_CFG(x) (0x148 + 0x100 * (x)) -#define PHY_BB_AFE_CONFIG(x) (0x174 + 0x100 * (x)) - -/* - * use port_cfg to configure that which data lanes used - * +---------+ +------+ +-----+ - * | port0 x4<-----| | | | - * | | | port | | | - * | port1 x2<-----| | | | - * | | | <-| PHY | - * | port2 x4<-----| | | | - * | | |config| | | - * | port3 x2<-----| | | | - * +---------+ +------+ +-----+ - */ -static const unsigned int csi2_port_cfg[][3] = { - {0, 0, 0x1f}, /* no link */ - {4, 0, 0x10}, /* x4 + x4 config */ - {2, 0, 0x12}, /* x2 + x2 config */ - {1, 0, 0x13}, /* x1 + x1 config */ - {2, 1, 0x15}, /* x2x1 + x2x1 config */ - {1, 1, 0x16}, /* x1x1 + x1x1 config */ - {2, 2, 0x18}, /* x2x2 + x2x2 config */ - {1, 2, 0x19} /* x1x2 + x1x2 config */ -}; - -/* port, nlanes, bbindex, portcfg */ -static const unsigned int phy_port_cfg[][4] = { - /* sip0 */ - {0, 1, 0, 0x15}, - {0, 2, 0, 0x15}, - {0, 4, 0, 0x15}, - {0, 4, 2, 0x22}, - /* sip1 */ - {2, 1, 4, 0x15}, - {2, 2, 4, 0x15}, - {2, 4, 4, 0x15}, - {2, 4, 6, 0x22} -}; - -static void ipu6_isys_csi2_phy_config_by_port(struct ipu6_isys *isys, - unsigned int port, - unsigned int nlanes) -{ - struct device *dev = &isys->adev->auxdev.dev; - void __iomem *base = isys->adev->isp->base; - unsigned int bbnum; - u32 val, reg, i; - - dev_dbg(dev, "port %u with %u lanes", port, nlanes); - - /* only support <1.5Gbps */ - for (i = 0; i < IPU6SE_ISYS_PHY_BB_NUM; i++) { - /* cphy_dll_ovrd.crcdc_fsm_dlane0 = 13 */ - reg = IPU6SE_ISYS_PHY_0_BASE + PHY_CPHY_DLL_OVRD(i); - val = readl(base + reg); - val |= FIELD_PREP(GENMASK(6, 1), 13); - writel(val, base + reg); - - /* cphy_rx_control1.en_crc1 = 1 */ - reg = IPU6SE_ISYS_PHY_0_BASE + PHY_CPHY_RX_CONTROL1(i); - val = readl(base + reg); - val |= BIT(31); - writel(val, base + reg); - - /* dphy_cfg.reserved = 1, .lden_from_dll_ovrd_0 = 1 */ - reg = IPU6SE_ISYS_PHY_0_BASE + PHY_DPHY_CFG(i); - val = readl(base + reg); - val |= BIT(25) | BIT(26); - writel(val, base + reg); - - /* cphy_dll_ovrd.lden_crcdc_fsm_dlane0 = 1 */ - reg = IPU6SE_ISYS_PHY_0_BASE + PHY_CPHY_DLL_OVRD(i); - val = readl(base + reg); - val |= BIT(0); - writel(val, base + reg); - } - - /* Front end config, use minimal channel loss */ - for (i = 0; i < ARRAY_SIZE(phy_port_cfg); i++) { - if (phy_port_cfg[i][0] == port && - phy_port_cfg[i][1] == nlanes) { - bbnum = phy_port_cfg[i][2] / 2; - reg = IPU6SE_ISYS_PHY_0_BASE + PHY_BB_AFE_CONFIG(bbnum); - val = readl(base + reg); - val |= phy_port_cfg[i][3]; - writel(val, base + reg); - } - } -} - -static void ipu6_isys_csi2_rx_control(struct ipu6_isys *isys) -{ - void __iomem *base = isys->adev->isp->base; - u32 val, reg; - - reg = CSI2_HUB_GPREG_SIP0_CSI_RX_A_CONTROL; - val = readl(base + reg); - val |= BIT(0); - writel(val, base + CSI2_HUB_GPREG_SIP0_CSI_RX_A_CONTROL); - - reg = CSI2_HUB_GPREG_SIP0_CSI_RX_B_CONTROL; - val = readl(base + reg); - val |= BIT(0); - writel(val, base + CSI2_HUB_GPREG_SIP0_CSI_RX_B_CONTROL); - - reg = CSI2_HUB_GPREG_SIP1_CSI_RX_A_CONTROL; - val = readl(base + reg); - val |= BIT(0); - writel(val, base + CSI2_HUB_GPREG_SIP1_CSI_RX_A_CONTROL); - - reg = CSI2_HUB_GPREG_SIP1_CSI_RX_B_CONTROL; - val = readl(base + reg); - val |= BIT(0); - writel(val, base + CSI2_HUB_GPREG_SIP1_CSI_RX_B_CONTROL); -} - -static int ipu6_isys_csi2_set_port_cfg(struct ipu6_isys *isys, - unsigned int port, unsigned int nlanes) -{ - struct device *dev = &isys->adev->auxdev.dev; - unsigned int sip = port / 2; - unsigned int index; - - switch (nlanes) { - case 1: - index = 5; - break; - case 2: - index = 6; - break; - case 4: - index = 1; - break; - default: - dev_err(dev, "lanes nr %u is unsupported\n", nlanes); - return -EINVAL; - } - - dev_dbg(dev, "port config for port %u with %u lanes\n", port, nlanes); - - writel(csi2_port_cfg[index][2], - isys->pdata->base + CSI2_HUB_GPREG_SIP_FB_PORT_CFG(sip)); - - return 0; -} - -static void -ipu6_isys_csi2_set_timing(struct ipu6_isys *isys, - const struct ipu6_isys_csi2_timing *timing, - unsigned int port, unsigned int nlanes) -{ - struct device *dev = &isys->adev->auxdev.dev; - void __iomem *reg; - u32 port_base; - u32 i; - - port_base = (port % 2) ? CSI2_SIP_TOP_CSI_RX_PORT_BASE_1(port) : - CSI2_SIP_TOP_CSI_RX_PORT_BASE_0(port); - - dev_dbg(dev, "set timing for port %u with %u lanes\n", port, nlanes); - - reg = isys->pdata->base + port_base; - reg += CSI2_SIP_TOP_CSI_RX_DLY_CNT_TERMEN_CLANE; - - writel(timing->ctermen, reg); - - reg = isys->pdata->base + port_base; - reg += CSI2_SIP_TOP_CSI_RX_DLY_CNT_SETTLE_CLANE; - writel(timing->csettle, reg); - - for (i = 0; i < nlanes; i++) { - reg = isys->pdata->base + port_base; - reg += CSI2_SIP_TOP_CSI_RX_DLY_CNT_TERMEN_DLANE(i); - writel(timing->dtermen, reg); - - reg = isys->pdata->base + port_base; - reg += CSI2_SIP_TOP_CSI_RX_DLY_CNT_SETTLE_DLANE(i); - writel(timing->dsettle, reg); - } -} - -#define DPHY_TIMER_INCR 0x28 -int ipu6_isys_jsl_phy_set_power(struct ipu6_isys *isys, - struct ipu6_isys_csi2_config *cfg, - const struct ipu6_isys_csi2_timing *timing, - bool on) -{ - struct device *dev = &isys->adev->auxdev.dev; - void __iomem *isys_base = isys->pdata->base; - int ret = 0; - u32 nlanes; - u32 port; - - if (!on) - return 0; - - port = cfg->port; - nlanes = cfg->nlanes; - - if (!isys_base || port >= isys->pdata->ipdata->csi2.nports) { - dev_warn(dev, "invalid port ID %d\n", port); - return -EINVAL; - } - - ipu6_isys_csi2_phy_config_by_port(isys, port, nlanes); - - writel(DPHY_TIMER_INCR, - isys->pdata->base + CSI2_HUB_GPREG_DPHY_TIMER_INCR); - - /* set port cfg and rx timing */ - ipu6_isys_csi2_set_timing(isys, timing, port, nlanes); - - ret = ipu6_isys_csi2_set_port_cfg(isys, port, nlanes); - if (ret) - return ret; - - ipu6_isys_csi2_rx_control(isys); - - return 0; -} diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c deleted file mode 100644 index 71aa500..0000000 --- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c +++ /dev/null @@ -1,720 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2013--2024 Intel Corporation - */ - -#include -#include -#include -#include -#include -#include -#include - -#include - -#include "ipu6.h" -#include "ipu6-bus.h" -#include "ipu6-isys.h" -#include "ipu6-isys-csi2.h" -#include "ipu6-platform-isys-csi2-reg.h" - -#define CSI_REG_HUB_GPREG_PHY_CTL(id) (CSI_REG_BASE + 0x18008 + (id) * 0x8) -#define CSI_REG_HUB_GPREG_PHY_CTL_RESET BIT(4) -#define CSI_REG_HUB_GPREG_PHY_CTL_PWR_EN BIT(0) -#define CSI_REG_HUB_GPREG_PHY_STATUS(id) (CSI_REG_BASE + 0x1800c + (id) * 0x8) -#define CSI_REG_HUB_GPREG_PHY_POWER_ACK BIT(0) -#define CSI_REG_HUB_GPREG_PHY_READY BIT(4) - -#define MCD_PHY_POWER_STATUS_TIMEOUT (200 * USEC_PER_MSEC) - -/* - * bridge to phy in buttress reg map, each phy has 16 kbytes - * only 2 phys for TGL U and Y - */ -#define IPU6_ISYS_MCD_PHY_BASE(i) (0x10000 + (i) * 0x4000) - -/* - * There are 2 MCD DPHY instances on TGL and 1 MCD DPHY instance on ADL. - * Each MCD PHY has 12-lanes which has 8 data lanes and 4 clock lanes. - * CSI port 1, 3 (5, 7) can support max 2 data lanes. - * CSI port 0, 2 (4, 6) can support max 4 data lanes. - * PHY configurations are PPI based instead of port. - * Left: - * +---------+---------+---------+---------+--------+---------+----------+ - * | | | | | | | | - * | PPI | PPI5 | PPI4 | PPI3 | PPI2 | PPI1 | PPI0 | - * +---------+---------+---------+---------+--------+---------+----------+ - * | | | | | | | | - * | x4 | unused | D3 | D2 | C0 | D0 | D1 | - * |---------+---------+---------+---------+--------+---------+----------+ - * | | | | | | | | - * | x2x2 | C1 | D0 | D1 | C0 | D0 | D1 | - * ----------+---------+---------+---------+--------+---------+----------+ - * | | | | | | | | - * | x2x1 | C1 | D0 | unused | C0 | D0 | D1 | - * +---------+---------+---------+---------+--------+---------+----------+ - * | | | | | | | | - * | x1x1 | C1 | D0 | unused | C0 | D0 | unused | - * +---------+---------+---------+---------+--------+---------+----------+ - * | | | | | | | | - * | x1x2 | C1 | D0 | D1 | C0 | D0 | unused | - * +---------+---------+---------+---------+--------+---------+----------+ - * - * Right: - * +---------+---------+---------+---------+--------+---------+----------+ - * | | | | | | | | - * | PPI | PPI6 | PPI7 | PPI8 | PPI9 | PPI10 | PPI11 | - * +---------+---------+---------+---------+--------+---------+----------+ - * | | | | | | | | - * | x4 | D1 | D0 | C2 | D2 | D3 | unused | - * |---------+---------+---------+---------+--------+---------+----------+ - * | | | | | | | | - * | x2x2 | D1 | D0 | C2 | D1 | D0 | C3 | - * ----------+---------+---------+---------+--------+---------+----------+ - * | | | | | | | | - * | x2x1 | D1 | D0 | C2 | unused | D0 | C3 | - * +---------+---------+---------+---------+--------+---------+----------+ - * | | | | | | | | - * | x1x1 | unused | D0 | C2 | unused | D0 | C3 | - * +---------+---------+---------+---------+--------+---------+----------+ - * | | | | | | | | - * | x1x2 | unused | D0 | C2 | D1 | D0 | C3 | - * +---------+---------+---------+---------+--------+---------+----------+ - * - * ppi mapping per phy : - * - * x4 + x4: - * Left : port0 - PPI range {0, 1, 2, 3, 4} - * Right: port2 - PPI range {6, 7, 8, 9, 10} - * - * x4 + x2x2: - * Left: port0 - PPI range {0, 1, 2, 3, 4} - * Right: port2 - PPI range {6, 7, 8}, port3 - PPI range {9, 10, 11} - * - * x2x2 + x4: - * Left: port0 - PPI range {0, 1, 2}, port1 - PPI range {3, 4, 5} - * Right: port2 - PPI range {6, 7, 8, 9, 10} - * - * x2x2 + x2x2: - * Left : port0 - PPI range {0, 1, 2}, port1 - PPI range {3, 4, 5} - * Right: port2 - PPI range {6, 7, 8}, port3 - PPI range {9, 10, 11} - */ - -struct phy_reg { - u32 reg; - u32 val; -}; - -static const struct phy_reg common_init_regs[] = { - /* for TGL-U, use 0x80000000 */ - {0x00000040, 0x80000000}, - {0x00000044, 0x00a80880}, - {0x00000044, 0x00b80880}, - {0x00000010, 0x0000078c}, - {0x00000344, 0x2f4401e2}, - {0x00000544, 0x924401e2}, - {0x00000744, 0x594401e2}, - {0x00000944, 0x624401e2}, - {0x00000b44, 0xfc4401e2}, - {0x00000d44, 0xc54401e2}, - {0x00000f44, 0x034401e2}, - {0x00001144, 0x8f4401e2}, - {0x00001344, 0x754401e2}, - {0x00001544, 0xe94401e2}, - {0x00001744, 0xcb4401e2}, - {0x00001944, 0xfa4401e2} -}; - -static const struct phy_reg x1_port0_config_regs[] = { - {0x00000694, 0xc80060fa}, - {0x00000680, 0x3d4f78ea}, - {0x00000690, 0x10a0140b}, - {0x000006a8, 0xdf04010a}, - {0x00000700, 0x57050060}, - {0x00000710, 0x0030001c}, - {0x00000738, 0x5f004444}, - {0x0000073c, 0x78464204}, - {0x00000748, 0x7821f940}, - {0x0000074c, 0xb2000433}, - {0x00000494, 0xfe6030fa}, - {0x00000480, 0x29ef5ed0}, - {0x00000490, 0x10a0540b}, - {0x000004a8, 0x7a01010a}, - {0x00000500, 0xef053460}, - {0x00000510, 0xe030101c}, - {0x00000538, 0xdf808444}, - {0x0000053c, 0xc8422204}, - {0x00000540, 0x0180088c}, - {0x00000574, 0x00000000}, - {0x00000000, 0x00000000} -}; - -static const struct phy_reg x1_port1_config_regs[] = { - {0x00000c94, 0xc80060fa}, - {0x00000c80, 0xcf47abea}, - {0x00000c90, 0x10a0840b}, - {0x00000ca8, 0xdf04010a}, - {0x00000d00, 0x57050060}, - {0x00000d10, 0x0030001c}, - {0x00000d38, 0x5f004444}, - {0x00000d3c, 0x78464204}, - {0x00000d48, 0x7821f940}, - {0x00000d4c, 0xb2000433}, - {0x00000a94, 0xc91030fa}, - {0x00000a80, 0x5a166ed0}, - {0x00000a90, 0x10a0540b}, - {0x00000aa8, 0x5d060100}, - {0x00000b00, 0xef053460}, - {0x00000b10, 0xa030101c}, - {0x00000b38, 0xdf808444}, - {0x00000b3c, 0xc8422204}, - {0x00000b40, 0x0180088c}, - {0x00000b74, 0x00000000}, - {0x00000000, 0x00000000} -}; - -static const struct phy_reg x1_port2_config_regs[] = { - {0x00001294, 0x28f000fa}, - {0x00001280, 0x08130cea}, - {0x00001290, 0x10a0140b}, - {0x000012a8, 0xd704010a}, - {0x00001300, 0x8d050060}, - {0x00001310, 0x0030001c}, - {0x00001338, 0xdf008444}, - {0x0000133c, 0x78422204}, - {0x00001348, 0x7821f940}, - {0x0000134c, 0x5a000433}, - {0x00001094, 0x2d20b0fa}, - {0x00001080, 0xade75dd0}, - {0x00001090, 0x10a0540b}, - {0x000010a8, 0xb101010a}, - {0x00001100, 0x33053460}, - {0x00001110, 0x0030101c}, - {0x00001138, 0xdf808444}, - {0x0000113c, 0xc8422204}, - {0x00001140, 0x8180088c}, - {0x00001174, 0x00000000}, - {0x00000000, 0x00000000} -}; - -static const struct phy_reg x1_port3_config_regs[] = { - {0x00001894, 0xc80060fa}, - {0x00001880, 0x0f90fd6a}, - {0x00001890, 0x10a0840b}, - {0x000018a8, 0xdf04010a}, - {0x00001900, 0x57050060}, - {0x00001910, 0x0030001c}, - {0x00001938, 0x5f004444}, - {0x0000193c, 0x78464204}, - {0x00001948, 0x7821f940}, - {0x0000194c, 0xb2000433}, - {0x00001694, 0x3050d0fa}, - {0x00001680, 0x0ef6d050}, - {0x00001690, 0x10a0540b}, - {0x000016a8, 0xe301010a}, - {0x00001700, 0x69053460}, - {0x00001710, 0xa030101c}, - {0x00001738, 0xdf808444}, - {0x0000173c, 0xc8422204}, - {0x00001740, 0x0180088c}, - {0x00001774, 0x00000000}, - {0x00000000, 0x00000000} -}; - -static const struct phy_reg x2_port0_config_regs[] = { - {0x00000694, 0xc80060fa}, - {0x00000680, 0x3d4f78ea}, - {0x00000690, 0x10a0140b}, - {0x000006a8, 0xdf04010a}, - {0x00000700, 0x57050060}, - {0x00000710, 0x0030001c}, - {0x00000738, 0x5f004444}, - {0x0000073c, 0x78464204}, - {0x00000748, 0x7821f940}, - {0x0000074c, 0xb2000433}, - {0x00000494, 0xc80060fa}, - {0x00000480, 0x29ef5ed8}, - {0x00000490, 0x10a0540b}, - {0x000004a8, 0x7a01010a}, - {0x00000500, 0xef053460}, - {0x00000510, 0xe030101c}, - {0x00000538, 0xdf808444}, - {0x0000053c, 0xc8422204}, - {0x00000540, 0x0180088c}, - {0x00000574, 0x00000000}, - {0x00000294, 0xc80060fa}, - {0x00000280, 0xcb45b950}, - {0x00000290, 0x10a0540b}, - {0x000002a8, 0x8c01010a}, - {0x00000300, 0xef053460}, - {0x00000310, 0x8030101c}, - {0x00000338, 0x41808444}, - {0x0000033c, 0x32422204}, - {0x00000340, 0x0180088c}, - {0x00000374, 0x00000000}, - {0x00000000, 0x00000000} -}; - -static const struct phy_reg x2_port1_config_regs[] = { - {0x00000c94, 0xc80060fa}, - {0x00000c80, 0xcf47abea}, - {0x00000c90, 0x10a0840b}, - {0x00000ca8, 0xdf04010a}, - {0x00000d00, 0x57050060}, - {0x00000d10, 0x0030001c}, - {0x00000d38, 0x5f004444}, - {0x00000d3c, 0x78464204}, - {0x00000d48, 0x7821f940}, - {0x00000d4c, 0xb2000433}, - {0x00000a94, 0xc80060fa}, - {0x00000a80, 0x5a166ed8}, - {0x00000a90, 0x10a0540b}, - {0x00000aa8, 0x7a01010a}, - {0x00000b00, 0xef053460}, - {0x00000b10, 0xa030101c}, - {0x00000b38, 0xdf808444}, - {0x00000b3c, 0xc8422204}, - {0x00000b40, 0x0180088c}, - {0x00000b74, 0x00000000}, - {0x00000894, 0xc80060fa}, - {0x00000880, 0x4d4f21d0}, - {0x00000890, 0x10a0540b}, - {0x000008a8, 0x5601010a}, - {0x00000900, 0xef053460}, - {0x00000910, 0x8030101c}, - {0x00000938, 0xdf808444}, - {0x0000093c, 0xc8422204}, - {0x00000940, 0x0180088c}, - {0x00000974, 0x00000000}, - {0x00000000, 0x00000000} -}; - -static const struct phy_reg x2_port2_config_regs[] = { - {0x00001294, 0xc80060fa}, - {0x00001280, 0x08130cea}, - {0x00001290, 0x10a0140b}, - {0x000012a8, 0xd704010a}, - {0x00001300, 0x8d050060}, - {0x00001310, 0x0030001c}, - {0x00001338, 0xdf008444}, - {0x0000133c, 0x78422204}, - {0x00001348, 0x7821f940}, - {0x0000134c, 0x5a000433}, - {0x00001094, 0xc80060fa}, - {0x00001080, 0xade75dd8}, - {0x00001090, 0x10a0540b}, - {0x000010a8, 0xb101010a}, - {0x00001100, 0x33053460}, - {0x00001110, 0x0030101c}, - {0x00001138, 0xdf808444}, - {0x0000113c, 0xc8422204}, - {0x00001140, 0x8180088c}, - {0x00001174, 0x00000000}, - {0x00000e94, 0xc80060fa}, - {0x00000e80, 0x0fbf16d0}, - {0x00000e90, 0x10a0540b}, - {0x00000ea8, 0x7a01010a}, - {0x00000f00, 0xf5053460}, - {0x00000f10, 0xc030101c}, - {0x00000f38, 0xdf808444}, - {0x00000f3c, 0xc8422204}, - {0x00000f40, 0x8180088c}, - {0x00000000, 0x00000000} -}; - -static const struct phy_reg x2_port3_config_regs[] = { - {0x00001894, 0xc80060fa}, - {0x00001880, 0x0f90fd6a}, - {0x00001890, 0x10a0840b}, - {0x000018a8, 0xdf04010a}, - {0x00001900, 0x57050060}, - {0x00001910, 0x0030001c}, - {0x00001938, 0x5f004444}, - {0x0000193c, 0x78464204}, - {0x00001948, 0x7821f940}, - {0x0000194c, 0xb2000433}, - {0x00001694, 0xc80060fa}, - {0x00001680, 0x0ef6d058}, - {0x00001690, 0x10a0540b}, - {0x000016a8, 0x7a01010a}, - {0x00001700, 0x69053460}, - {0x00001710, 0xa030101c}, - {0x00001738, 0xdf808444}, - {0x0000173c, 0xc8422204}, - {0x00001740, 0x0180088c}, - {0x00001774, 0x00000000}, - {0x00001494, 0xc80060fa}, - {0x00001480, 0xf9d34bd0}, - {0x00001490, 0x10a0540b}, - {0x000014a8, 0x7a01010a}, - {0x00001500, 0x1b053460}, - {0x00001510, 0x0030101c}, - {0x00001538, 0xdf808444}, - {0x0000153c, 0xc8422204}, - {0x00001540, 0x8180088c}, - {0x00001574, 0x00000000}, - {0x00000000, 0x00000000} -}; - -static const struct phy_reg x4_port0_config_regs[] = { - {0x00000694, 0xc80060fa}, - {0x00000680, 0x3d4f78fa}, - {0x00000690, 0x10a0140b}, - {0x000006a8, 0xdf04010a}, - {0x00000700, 0x57050060}, - {0x00000710, 0x0030001c}, - {0x00000738, 0x5f004444}, - {0x0000073c, 0x78464204}, - {0x00000748, 0x7821f940}, - {0x0000074c, 0xb2000433}, - {0x00000494, 0xfe6030fa}, - {0x00000480, 0x29ef5ed8}, - {0x00000490, 0x10a0540b}, - {0x000004a8, 0x7a01010a}, - {0x00000500, 0xef053460}, - {0x00000510, 0xe030101c}, - {0x00000538, 0xdf808444}, - {0x0000053c, 0xc8422204}, - {0x00000540, 0x0180088c}, - {0x00000574, 0x00000004}, - {0x00000294, 0x23e030fa}, - {0x00000280, 0xcb45b950}, - {0x00000290, 0x10a0540b}, - {0x000002a8, 0x8c01010a}, - {0x00000300, 0xef053460}, - {0x00000310, 0x8030101c}, - {0x00000338, 0x41808444}, - {0x0000033c, 0x32422204}, - {0x00000340, 0x0180088c}, - {0x00000374, 0x00000004}, - {0x00000894, 0x5620b0fa}, - {0x00000880, 0x4d4f21dc}, - {0x00000890, 0x10a0540b}, - {0x000008a8, 0x5601010a}, - {0x00000900, 0xef053460}, - {0x00000910, 0x8030101c}, - {0x00000938, 0xdf808444}, - {0x0000093c, 0xc8422204}, - {0x00000940, 0x0180088c}, - {0x00000974, 0x00000004}, - {0x00000a94, 0xc91030fa}, - {0x00000a80, 0x5a166ecc}, - {0x00000a90, 0x10a0540b}, - {0x00000aa8, 0x5d01010a}, - {0x00000b00, 0xef053460}, - {0x00000b10, 0xa030101c}, - {0x00000b38, 0xdf808444}, - {0x00000b3c, 0xc8422204}, - {0x00000b40, 0x0180088c}, - {0x00000b74, 0x00000004}, - {0x00000000, 0x00000000} -}; - -static const struct phy_reg x4_port1_config_regs[] = { - {0x00000000, 0x00000000} -}; - -static const struct phy_reg x4_port2_config_regs[] = { - {0x00001294, 0x28f000fa}, - {0x00001280, 0x08130cfa}, - {0x00001290, 0x10c0140b}, - {0x000012a8, 0xd704010a}, - {0x00001300, 0x8d050060}, - {0x00001310, 0x0030001c}, - {0x00001338, 0xdf008444}, - {0x0000133c, 0x78422204}, - {0x00001348, 0x7821f940}, - {0x0000134c, 0x5a000433}, - {0x00001094, 0x2d20b0fa}, - {0x00001080, 0xade75dd8}, - {0x00001090, 0x10a0540b}, - {0x000010a8, 0xb101010a}, - {0x00001100, 0x33053460}, - {0x00001110, 0x0030101c}, - {0x00001138, 0xdf808444}, - {0x0000113c, 0xc8422204}, - {0x00001140, 0x8180088c}, - {0x00001174, 0x00000004}, - {0x00000e94, 0xd308d0fa}, - {0x00000e80, 0x0fbf16d0}, - {0x00000e90, 0x10a0540b}, - {0x00000ea8, 0x2c01010a}, - {0x00000f00, 0xf5053460}, - {0x00000f10, 0xc030101c}, - {0x00000f38, 0xdf808444}, - {0x00000f3c, 0xc8422204}, - {0x00000f40, 0x8180088c}, - {0x00000f74, 0x00000004}, - {0x00001494, 0x136850fa}, - {0x00001480, 0xf9d34bdc}, - {0x00001490, 0x10a0540b}, - {0x000014a8, 0x5a01010a}, - {0x00001500, 0x1b053460}, - {0x00001510, 0x0030101c}, - {0x00001538, 0xdf808444}, - {0x0000153c, 0xc8422204}, - {0x00001540, 0x8180088c}, - {0x00001574, 0x00000004}, - {0x00001694, 0x3050d0fa}, - {0x00001680, 0x0ef6d04c}, - {0x00001690, 0x10a0540b}, - {0x000016a8, 0xe301010a}, - {0x00001700, 0x69053460}, - {0x00001710, 0xa030101c}, - {0x00001738, 0xdf808444}, - {0x0000173c, 0xc8422204}, - {0x00001740, 0x0180088c}, - {0x00001774, 0x00000004}, - {0x00000000, 0x00000000} -}; - -static const struct phy_reg x4_port3_config_regs[] = { - {0x00000000, 0x00000000} -}; - -static const struct phy_reg *x1_config_regs[4] = { - x1_port0_config_regs, - x1_port1_config_regs, - x1_port2_config_regs, - x1_port3_config_regs -}; - -static const struct phy_reg *x2_config_regs[4] = { - x2_port0_config_regs, - x2_port1_config_regs, - x2_port2_config_regs, - x2_port3_config_regs -}; - -static const struct phy_reg *x4_config_regs[4] = { - x4_port0_config_regs, - x4_port1_config_regs, - x4_port2_config_regs, - x4_port3_config_regs -}; - -static const struct phy_reg **config_regs[3] = { - x1_config_regs, - x2_config_regs, - x4_config_regs -}; - -static int ipu6_isys_mcd_phy_powerup_ack(struct ipu6_isys *isys, u8 id) -{ - struct device *dev = &isys->adev->auxdev.dev; - void __iomem *isys_base = isys->pdata->base; - u32 val; - int ret; - - val = readl(isys_base + CSI_REG_HUB_GPREG_PHY_CTL(id)); - val |= CSI_REG_HUB_GPREG_PHY_CTL_PWR_EN; - writel(val, isys_base + CSI_REG_HUB_GPREG_PHY_CTL(id)); - - ret = readl_poll_timeout(isys_base + CSI_REG_HUB_GPREG_PHY_STATUS(id), - val, val & CSI_REG_HUB_GPREG_PHY_POWER_ACK, - 200, MCD_PHY_POWER_STATUS_TIMEOUT); - if (ret) - dev_err(dev, "PHY%d powerup ack timeout", id); - - return ret; -} - -static int ipu6_isys_mcd_phy_powerdown_ack(struct ipu6_isys *isys, u8 id) -{ - struct device *dev = &isys->adev->auxdev.dev; - void __iomem *isys_base = isys->pdata->base; - u32 val; - int ret; - - writel(0, isys_base + CSI_REG_HUB_GPREG_PHY_CTL(id)); - ret = readl_poll_timeout(isys_base + CSI_REG_HUB_GPREG_PHY_STATUS(id), - val, !(val & CSI_REG_HUB_GPREG_PHY_POWER_ACK), - 200, MCD_PHY_POWER_STATUS_TIMEOUT); - if (ret) - dev_err(dev, "PHY%d powerdown ack timeout", id); - - return ret; -} - -static void ipu6_isys_mcd_phy_reset(struct ipu6_isys *isys, u8 id, bool assert) -{ - void __iomem *isys_base = isys->pdata->base; - u32 val; - - val = readl(isys_base + CSI_REG_HUB_GPREG_PHY_CTL(id)); - if (assert) - val |= CSI_REG_HUB_GPREG_PHY_CTL_RESET; - else - val &= ~(CSI_REG_HUB_GPREG_PHY_CTL_RESET); - - writel(val, isys_base + CSI_REG_HUB_GPREG_PHY_CTL(id)); -} - -static int ipu6_isys_mcd_phy_ready(struct ipu6_isys *isys, u8 id) -{ - struct device *dev = &isys->adev->auxdev.dev; - void __iomem *isys_base = isys->pdata->base; - u32 val; - int ret; - - ret = readl_poll_timeout(isys_base + CSI_REG_HUB_GPREG_PHY_STATUS(id), - val, val & CSI_REG_HUB_GPREG_PHY_READY, - 200, MCD_PHY_POWER_STATUS_TIMEOUT); - if (ret) - dev_err(dev, "PHY%d ready ack timeout", id); - - return ret; -} - -static void ipu6_isys_mcd_phy_common_init(struct ipu6_isys *isys) -{ - struct ipu6_bus_device *adev = isys->adev; - struct ipu6_device *isp = adev->isp; - void __iomem *isp_base = isp->base; - struct sensor_async_sd *s_asd; - struct v4l2_async_connection *asc; - void __iomem *phy_base; - unsigned int i; - u8 phy_id; - - list_for_each_entry(asc, &isys->notifier.done_list, asc_entry) { - s_asd = container_of(asc, struct sensor_async_sd, asc); - phy_id = s_asd->csi2.port / 4; - phy_base = isp_base + IPU6_ISYS_MCD_PHY_BASE(phy_id); - - for (i = 0; i < ARRAY_SIZE(common_init_regs); i++) - writel(common_init_regs[i].val, - phy_base + common_init_regs[i].reg); - } -} - -static int ipu6_isys_driver_port_to_phy_port(struct ipu6_isys_csi2_config *cfg) -{ - int phy_port; - int ret; - - if (!(cfg->nlanes == 4 || cfg->nlanes == 2 || cfg->nlanes == 1)) - return -EINVAL; - - /* B,F -> C0 A,E -> C1 C,G -> C2 D,H -> C4 */ - /* normalize driver port number */ - phy_port = cfg->port % 4; - - /* swap port number only for A and B */ - if (phy_port == 0) - phy_port = 1; - else if (phy_port == 1) - phy_port = 0; - - ret = phy_port; - - /* check validity per lane configuration */ - if (cfg->nlanes == 4 && !(phy_port == 0 || phy_port == 2)) - ret = -EINVAL; - else if ((cfg->nlanes == 2 || cfg->nlanes == 1) && - !(phy_port >= 0 && phy_port <= 3)) - ret = -EINVAL; - - return ret; -} - -static int ipu6_isys_mcd_phy_config(struct ipu6_isys *isys) -{ - struct device *dev = &isys->adev->auxdev.dev; - struct ipu6_bus_device *adev = isys->adev; - const struct phy_reg **phy_config_regs; - struct ipu6_device *isp = adev->isp; - void __iomem *isp_base = isp->base; - struct sensor_async_sd *s_asd; - struct ipu6_isys_csi2_config cfg; - struct v4l2_async_connection *asc; - int phy_port, phy_id; - unsigned int i; - void __iomem *phy_base; - - list_for_each_entry(asc, &isys->notifier.done_list, asc_entry) { - s_asd = container_of(asc, struct sensor_async_sd, asc); - cfg.port = s_asd->csi2.port; - cfg.nlanes = s_asd->csi2.nlanes; - phy_port = ipu6_isys_driver_port_to_phy_port(&cfg); - if (phy_port < 0) { - dev_err(dev, "invalid port %d for lane %d", cfg.port, - cfg.nlanes); - return -ENXIO; - } - - phy_id = cfg.port / 4; - phy_base = isp_base + IPU6_ISYS_MCD_PHY_BASE(phy_id); - dev_dbg(dev, "port%d PHY%u lanes %u\n", cfg.port, phy_id, - cfg.nlanes); - - phy_config_regs = config_regs[cfg.nlanes / 2]; - cfg.port = phy_port; - for (i = 0; phy_config_regs[cfg.port][i].reg; i++) - writel(phy_config_regs[cfg.port][i].val, - phy_base + phy_config_regs[cfg.port][i].reg); - } - - return 0; -} - -#define CSI_MCD_PHY_NUM 2 -static refcount_t phy_power_ref_count[CSI_MCD_PHY_NUM]; - -int ipu6_isys_mcd_phy_set_power(struct ipu6_isys *isys, - struct ipu6_isys_csi2_config *cfg, - const struct ipu6_isys_csi2_timing *timing, - bool on) -{ - struct device *dev = &isys->adev->auxdev.dev; - void __iomem *isys_base = isys->pdata->base; - u8 port, phy_id; - refcount_t *ref; - int ret; - - port = cfg->port; - phy_id = port / 4; - - ref = &phy_power_ref_count[phy_id]; - - dev_dbg(dev, "for phy %d port %d, lanes: %d\n", phy_id, port, - cfg->nlanes); - - if (!isys_base || port >= isys->pdata->ipdata->csi2.nports) { - dev_warn(dev, "invalid port ID %d\n", port); - return -EINVAL; - } - - if (on) { - if (refcount_read(ref)) { - dev_dbg(dev, "for phy %d is already UP", phy_id); - refcount_inc(ref); - return 0; - } - - ret = ipu6_isys_mcd_phy_powerup_ack(isys, phy_id); - if (ret) - return ret; - - ipu6_isys_mcd_phy_reset(isys, phy_id, 0); - ipu6_isys_mcd_phy_common_init(isys); - - ret = ipu6_isys_mcd_phy_config(isys); - if (ret) - return ret; - - ipu6_isys_mcd_phy_reset(isys, phy_id, 1); - ret = ipu6_isys_mcd_phy_ready(isys, phy_id); - if (ret) - return ret; - - refcount_set(ref, 1); - return 0; - } - - if (!refcount_dec_and_test(ref)) - return 0; - - return ipu6_isys_mcd_phy_powerdown_ack(isys, phy_id); -} diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c deleted file mode 100644 index bbb66b5..0000000 --- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c +++ /dev/null @@ -1,847 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2013--2024 Intel Corporation - */ -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include "ipu6-bus.h" -#include "ipu6-dma.h" -#include "ipu6-fw-isys.h" -#include "ipu6-isys.h" -#include "ipu6-isys-video.h" - -static int ipu6_isys_buf_init(struct vb2_buffer *vb) -{ - struct ipu6_isys *isys = vb2_get_drv_priv(vb->vb2_queue); - struct sg_table *sg = vb2_dma_sg_plane_desc(vb, 0); - struct vb2_v4l2_buffer *vvb = to_vb2_v4l2_buffer(vb); - struct ipu6_isys_video_buffer *ivb = - vb2_buffer_to_ipu6_isys_video_buffer(vvb); - int ret; - - ret = ipu6_dma_map_sgtable(isys->adev, sg, DMA_TO_DEVICE, 0); - if (ret) - return ret; - - ivb->dma_addr = sg_dma_address(sg->sgl); - - return 0; -} - -static void ipu6_isys_buf_cleanup(struct vb2_buffer *vb) -{ - struct ipu6_isys *isys = vb2_get_drv_priv(vb->vb2_queue); - struct sg_table *sg = vb2_dma_sg_plane_desc(vb, 0); - struct vb2_v4l2_buffer *vvb = to_vb2_v4l2_buffer(vb); - struct ipu6_isys_video_buffer *ivb = - vb2_buffer_to_ipu6_isys_video_buffer(vvb); - - ivb->dma_addr = 0; - ipu6_dma_unmap_sgtable(isys->adev, sg, DMA_TO_DEVICE, 0); -} - -static int ipu6_isys_queue_setup(struct vb2_queue *q, unsigned int *num_buffers, - unsigned int *num_planes, unsigned int sizes[], - struct device *alloc_devs[]) -{ - struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(q); - struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq); - struct device *dev = &av->isys->adev->auxdev.dev; - u32 size = ipu6_isys_get_data_size(av); - - /* num_planes == 0: we're being called through VIDIOC_REQBUFS */ - if (!*num_planes) { - sizes[0] = size; - } else if (sizes[0] < size) { - dev_dbg(dev, "%s: queue setup: size %u < %u\n", - av->vdev.name, sizes[0], size); - return -EINVAL; - } - - *num_planes = 1; - - return 0; -} - -static int ipu6_isys_buf_prepare(struct vb2_buffer *vb) -{ - struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(vb->vb2_queue); - struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq); - struct device *dev = &av->isys->adev->auxdev.dev; - u32 bytesperline = ipu6_isys_get_bytes_per_line(av); - u32 height = ipu6_isys_get_frame_height(av); - u32 size = ipu6_isys_get_data_size(av); - - dev_dbg(dev, "buffer: %s: configured size %u, buffer size %lu\n", - av->vdev.name, size, vb2_plane_size(vb, 0)); - - if (size > vb2_plane_size(vb, 0)) - return -EINVAL; - - vb2_set_plane_payload(vb, 0, bytesperline * height); - - return 0; -} - -/* - * Queue a buffer list back to incoming or active queues. The buffers - * are removed from the buffer list. - */ -void ipu6_isys_buffer_list_queue(struct ipu6_isys_buffer_list *bl, - unsigned long op_flags, - enum vb2_buffer_state state) -{ - struct ipu6_isys_buffer *ib, *ib_safe; - unsigned long flags; - bool first = true; - - if (!bl) - return; - - WARN_ON_ONCE(!bl->nbufs); - WARN_ON_ONCE(op_flags & IPU6_ISYS_BUFFER_LIST_FL_ACTIVE && - op_flags & IPU6_ISYS_BUFFER_LIST_FL_INCOMING); - - list_for_each_entry_safe(ib, ib_safe, &bl->head, head) { - struct ipu6_isys_video *av; - struct vb2_buffer *vb = ipu6_isys_buffer_to_vb2_buffer(ib); - struct ipu6_isys_queue *aq = - vb2_queue_to_isys_queue(vb->vb2_queue); - struct device *dev; - - av = ipu6_isys_queue_to_video(aq); - dev = &av->isys->adev->auxdev.dev; - spin_lock_irqsave(&aq->lock, flags); - list_del(&ib->head); - if (op_flags & IPU6_ISYS_BUFFER_LIST_FL_ACTIVE) - list_add(&ib->head, &aq->active); - else if (op_flags & IPU6_ISYS_BUFFER_LIST_FL_INCOMING) - list_add_tail(&ib->head, &aq->incoming); - spin_unlock_irqrestore(&aq->lock, flags); - - if (op_flags & IPU6_ISYS_BUFFER_LIST_FL_SET_STATE) - vb2_buffer_done(vb, state); - - if (first) { - dev_dbg(dev, - "queue buf list %p flags %lx, s %d, %d bufs\n", - bl, op_flags, state, bl->nbufs); - first = false; - } - - bl->nbufs--; - } - - WARN_ON(bl->nbufs); -} - -/* - * flush_firmware_streamon_fail() - Flush in cases where requests may - * have been queued to firmware and the *firmware streamon fails for a - * reason or another. - */ -static void flush_firmware_streamon_fail(struct ipu6_isys_stream *stream) -{ - struct device *dev = &stream->isys->adev->auxdev.dev; - struct ipu6_isys_queue *aq; - unsigned long flags; - - lockdep_assert_held(&stream->mutex); - - list_for_each_entry(aq, &stream->queues, node) { - struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq); - struct ipu6_isys_buffer *ib, *ib_safe; - - spin_lock_irqsave(&aq->lock, flags); - list_for_each_entry_safe(ib, ib_safe, &aq->active, head) { - struct vb2_buffer *vb = - ipu6_isys_buffer_to_vb2_buffer(ib); - - list_del(&ib->head); - if (av->streaming) { - dev_dbg(dev, - "%s: queue buffer %u back to incoming\n", - av->vdev.name, vb->index); - /* Queue already streaming, return to driver. */ - list_add(&ib->head, &aq->incoming); - continue; - } - /* Queue not yet streaming, return to user. */ - dev_dbg(dev, "%s: return %u back to videobuf2\n", - av->vdev.name, vb->index); - vb2_buffer_done(ipu6_isys_buffer_to_vb2_buffer(ib), - VB2_BUF_STATE_QUEUED); - } - spin_unlock_irqrestore(&aq->lock, flags); - } -} - -/* - * Attempt obtaining a buffer list from the incoming queues, a list of buffers - * that contains one entry from each video buffer queue. If a buffer can't be - * obtained from every queue, the buffers are returned back to the queue. - */ -static int buffer_list_get(struct ipu6_isys_stream *stream, - struct ipu6_isys_buffer_list *bl) -{ - struct device *dev = &stream->isys->adev->auxdev.dev; - struct ipu6_isys_queue *aq; - unsigned long flags; - unsigned long buf_flag = IPU6_ISYS_BUFFER_LIST_FL_INCOMING; - - bl->nbufs = 0; - INIT_LIST_HEAD(&bl->head); - - list_for_each_entry(aq, &stream->queues, node) { - struct ipu6_isys_buffer *ib; - - spin_lock_irqsave(&aq->lock, flags); - if (list_empty(&aq->incoming)) { - spin_unlock_irqrestore(&aq->lock, flags); - if (!list_empty(&bl->head)) - ipu6_isys_buffer_list_queue(bl, buf_flag, 0); - return -ENODATA; - } - - ib = list_last_entry(&aq->incoming, - struct ipu6_isys_buffer, head); - - dev_dbg(dev, "buffer: %s: buffer %u\n", - ipu6_isys_queue_to_video(aq)->vdev.name, - ipu6_isys_buffer_to_vb2_buffer(ib)->index); - list_del(&ib->head); - list_add(&ib->head, &bl->head); - spin_unlock_irqrestore(&aq->lock, flags); - - bl->nbufs++; - } - - dev_dbg(dev, "get buffer list %p, %u buffers\n", bl, bl->nbufs); - - return 0; -} - -static void -ipu6_isys_buf_to_fw_frame_buf_pin(struct vb2_buffer *vb, - struct ipu6_fw_isys_frame_buff_set_abi *set) -{ - struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(vb->vb2_queue); - struct vb2_v4l2_buffer *vvb = to_vb2_v4l2_buffer(vb); - struct ipu6_isys_video_buffer *ivb = - vb2_buffer_to_ipu6_isys_video_buffer(vvb); - - set->output_pins[aq->fw_output].addr = ivb->dma_addr; - set->output_pins[aq->fw_output].out_buf_id = vb->index + 1; -} - -/* - * Convert a buffer list to a isys fw ABI framebuffer set. The - * buffer list is not modified. - */ -#define IPU6_ISYS_FRAME_NUM_THRESHOLD (30) -void -ipu6_isys_buf_to_fw_frame_buf(struct ipu6_fw_isys_frame_buff_set_abi *set, - struct ipu6_isys_stream *stream, - struct ipu6_isys_buffer_list *bl) -{ - struct ipu6_isys_buffer *ib; - - WARN_ON(!bl->nbufs); - - set->send_irq_sof = 1; - set->send_resp_sof = 1; - set->send_irq_eof = 0; - set->send_resp_eof = 0; - - if (stream->streaming) - set->send_irq_capture_ack = 0; - else - set->send_irq_capture_ack = 1; - set->send_irq_capture_done = 0; - - set->send_resp_capture_ack = 1; - set->send_resp_capture_done = 1; - if (atomic_read(&stream->sequence) >= IPU6_ISYS_FRAME_NUM_THRESHOLD) { - set->send_resp_capture_ack = 0; - set->send_resp_capture_done = 0; - } - - list_for_each_entry(ib, &bl->head, head) { - struct vb2_buffer *vb = ipu6_isys_buffer_to_vb2_buffer(ib); - - ipu6_isys_buf_to_fw_frame_buf_pin(vb, set); - } -} - -/* Start streaming for real. The buffer list must be available. */ -static int ipu6_isys_stream_start(struct ipu6_isys_video *av, - struct ipu6_isys_buffer_list *bl, bool error) -{ - struct ipu6_isys_stream *stream = av->stream; - struct device *dev = &stream->isys->adev->auxdev.dev; - struct ipu6_isys_buffer_list __bl; - int ret; - - mutex_lock(&stream->isys->stream_mutex); - ret = ipu6_isys_video_set_streaming(av, 1, bl); - mutex_unlock(&stream->isys->stream_mutex); - if (ret) - goto out_requeue; - - stream->streaming = 1; - - bl = &__bl; - - do { - struct ipu6_fw_isys_frame_buff_set_abi *buf = NULL; - struct isys_fw_msgs *msg; - u16 send_type = IPU6_FW_ISYS_SEND_TYPE_STREAM_CAPTURE; - - ret = buffer_list_get(stream, bl); - if (ret < 0) - break; - - msg = ipu6_get_fw_msg_buf(stream); - if (!msg) - return -ENOMEM; - - buf = &msg->fw_msg.frame; - ipu6_isys_buf_to_fw_frame_buf(buf, stream, bl); - ipu6_fw_isys_dump_frame_buff_set(dev, buf, - stream->nr_output_pins); - ipu6_isys_buffer_list_queue(bl, IPU6_ISYS_BUFFER_LIST_FL_ACTIVE, - 0); - ret = ipu6_fw_isys_complex_cmd(stream->isys, - stream->stream_handle, buf, - msg->dma_addr, sizeof(*buf), - send_type); - } while (!WARN_ON(ret)); - - return 0; - -out_requeue: - if (bl && bl->nbufs) - ipu6_isys_buffer_list_queue(bl, - IPU6_ISYS_BUFFER_LIST_FL_INCOMING | - (error ? - IPU6_ISYS_BUFFER_LIST_FL_SET_STATE : - 0), error ? VB2_BUF_STATE_ERROR : - VB2_BUF_STATE_QUEUED); - flush_firmware_streamon_fail(stream); - - return ret; -} - -static void buf_queue(struct vb2_buffer *vb) -{ - struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(vb->vb2_queue); - struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq); - struct vb2_v4l2_buffer *vvb = to_vb2_v4l2_buffer(vb); - struct ipu6_isys_video_buffer *ivb = - vb2_buffer_to_ipu6_isys_video_buffer(vvb); - struct ipu6_isys_buffer *ib = &ivb->ib; - struct device *dev = &av->isys->adev->auxdev.dev; - struct media_pipeline *media_pipe = - media_entity_pipeline(&av->vdev.entity); - struct ipu6_fw_isys_frame_buff_set_abi *buf = NULL; - struct ipu6_isys_stream *stream = av->stream; - struct ipu6_isys_buffer_list bl; - struct isys_fw_msgs *msg; - unsigned long flags; - dma_addr_t dma; - int ret; - - dev_dbg(dev, "queue buffer %u for %s\n", vb->index, av->vdev.name); - - dma = ivb->dma_addr; - dev_dbg(dev, "iova: iova %pad\n", &dma); - - spin_lock_irqsave(&aq->lock, flags); - list_add(&ib->head, &aq->incoming); - spin_unlock_irqrestore(&aq->lock, flags); - - if (!media_pipe || !vb->vb2_queue->start_streaming_called) { - dev_dbg(dev, "media pipeline is not ready for %s\n", - av->vdev.name); - return; - } - - mutex_lock(&stream->mutex); - - if (stream->nr_streaming != stream->nr_queues) { - dev_dbg(dev, "not streaming yet, adding to incoming\n"); - goto out; - } - - /* - * We just put one buffer to the incoming list of this queue - * (above). Let's see whether all queues in the pipeline would - * have a buffer. - */ - ret = buffer_list_get(stream, &bl); - if (ret < 0) { - dev_dbg(dev, "No buffers available\n"); - goto out; - } - - msg = ipu6_get_fw_msg_buf(stream); - if (!msg) { - ret = -ENOMEM; - goto out; - } - - buf = &msg->fw_msg.frame; - ipu6_isys_buf_to_fw_frame_buf(buf, stream, &bl); - ipu6_fw_isys_dump_frame_buff_set(dev, buf, stream->nr_output_pins); - - if (!stream->streaming) { - ret = ipu6_isys_stream_start(av, &bl, true); - if (ret) - dev_err(dev, "stream start failed.\n"); - goto out; - } - - /* - * We must queue the buffers in the buffer list to the - * appropriate video buffer queues BEFORE passing them to the - * firmware since we could get a buffer event back before we - * have queued them ourselves to the active queue. - */ - ipu6_isys_buffer_list_queue(&bl, IPU6_ISYS_BUFFER_LIST_FL_ACTIVE, 0); - - ret = ipu6_fw_isys_complex_cmd(stream->isys, stream->stream_handle, - buf, msg->dma_addr, sizeof(*buf), - IPU6_FW_ISYS_SEND_TYPE_STREAM_CAPTURE); - if (ret < 0) - dev_err(dev, "send stream capture failed\n"); - -out: - mutex_unlock(&stream->mutex); -} - -static int ipu6_isys_link_fmt_validate(struct ipu6_isys_queue *aq) -{ - struct v4l2_mbus_framefmt format; - struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq); - struct device *dev = &av->isys->adev->auxdev.dev; - struct media_pad *remote_pad = - media_pad_remote_pad_first(av->vdev.entity.pads); - struct v4l2_subdev *sd; - u32 r_stream, code; - int ret; - - if (!remote_pad) - return -ENOTCONN; - - sd = media_entity_to_v4l2_subdev(remote_pad->entity); - r_stream = ipu6_isys_get_src_stream_by_src_pad(sd, remote_pad->index); - - ret = ipu6_isys_get_stream_pad_fmt(sd, remote_pad->index, r_stream, - &format); - - if (ret) { - dev_dbg(dev, "failed to get %s: pad %d, stream:%d format\n", - sd->entity.name, remote_pad->index, r_stream); - return ret; - } - - if (format.width != ipu6_isys_get_frame_width(av) || - format.height != ipu6_isys_get_frame_height(av)) { - dev_dbg(dev, "wrong width or height %ux%u (%ux%u expected)\n", - ipu6_isys_get_frame_width(av), - ipu6_isys_get_frame_height(av), format.width, - format.height); - return -EINVAL; - } - - code = ipu6_isys_get_isys_format(ipu6_isys_get_format(av), 0)->code; - if (format.code != code) { - dev_dbg(dev, "wrong mbus code 0x%8.8x (0x%8.8x expected)\n", - code, format.code); - return -EINVAL; - } - - return 0; -} - -static void return_buffers(struct ipu6_isys_queue *aq, - enum vb2_buffer_state state) -{ - struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq); - struct ipu6_isys_buffer *ib; - bool need_reset = false; - unsigned long flags; - - spin_lock_irqsave(&aq->lock, flags); - while (!list_empty(&aq->incoming)) { - struct vb2_buffer *vb; - - ib = list_first_entry(&aq->incoming, struct ipu6_isys_buffer, - head); - vb = ipu6_isys_buffer_to_vb2_buffer(ib); - list_del(&ib->head); - spin_unlock_irqrestore(&aq->lock, flags); - - vb2_buffer_done(vb, state); - - spin_lock_irqsave(&aq->lock, flags); - } - - /* - * Something went wrong (FW crash / HW hang / not all buffers - * returned from isys) if there are still buffers queued in active - * queue. We have to clean up places a bit. - */ - while (!list_empty(&aq->active)) { - struct vb2_buffer *vb; - - ib = list_first_entry(&aq->active, struct ipu6_isys_buffer, - head); - vb = ipu6_isys_buffer_to_vb2_buffer(ib); - - list_del(&ib->head); - spin_unlock_irqrestore(&aq->lock, flags); - - vb2_buffer_done(vb, state); - - spin_lock_irqsave(&aq->lock, flags); - need_reset = true; - } - - spin_unlock_irqrestore(&aq->lock, flags); - - if (need_reset) { - mutex_lock(&av->isys->mutex); - av->isys->need_reset = true; - mutex_unlock(&av->isys->mutex); - } -} - -static void ipu6_isys_stream_cleanup(struct ipu6_isys_video *av) -{ - video_device_pipeline_stop(&av->vdev); - ipu6_isys_put_stream(av->stream); - av->stream = NULL; -} - -static int start_streaming(struct vb2_queue *q, unsigned int count) -{ - struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(q); - struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq); - struct device *dev = &av->isys->adev->auxdev.dev; - const struct ipu6_isys_pixelformat *pfmt = - ipu6_isys_get_isys_format(ipu6_isys_get_format(av), 0); - struct ipu6_isys_buffer_list __bl, *bl = NULL; - struct ipu6_isys_stream *stream; - struct media_entity *source_entity = NULL; - int nr_queues, ret; - - dev_dbg(dev, "stream: %s: width %u, height %u, css pixelformat %u\n", - av->vdev.name, ipu6_isys_get_frame_width(av), - ipu6_isys_get_frame_height(av), pfmt->css_pixelformat); - - ret = ipu6_isys_setup_video(av, &source_entity, &nr_queues); - if (ret < 0) { - dev_dbg(dev, "failed to setup video\n"); - goto out_return_buffers; - } - - ret = ipu6_isys_link_fmt_validate(aq); - if (ret) { - dev_dbg(dev, - "%s: link format validation failed (%d)\n", - av->vdev.name, ret); - goto out_pipeline_stop; - } - - ret = ipu6_isys_fw_open(av->isys); - if (ret) - goto out_pipeline_stop; - - stream = av->stream; - mutex_lock(&stream->mutex); - if (!stream->nr_streaming) { - ret = ipu6_isys_video_prepare_stream(av, source_entity, - nr_queues); - if (ret) - goto out_fw_close; - } - - stream->nr_streaming++; - dev_dbg(dev, "queue %u of %u\n", stream->nr_streaming, - stream->nr_queues); - - list_add(&aq->node, &stream->queues); - ipu6_isys_configure_stream_watermark(av, true); - ipu6_isys_update_stream_watermark(av, true); - - if (stream->nr_streaming != stream->nr_queues) - goto out; - - bl = &__bl; - ret = buffer_list_get(stream, bl); - if (ret < 0) { - dev_warn(dev, "no buffer available, DRIVER BUG?\n"); - goto out; - } - - ret = ipu6_isys_stream_start(av, bl, false); - if (ret) - goto out_stream_start; - -out: - mutex_unlock(&stream->mutex); - - return 0; - -out_stream_start: - ipu6_isys_update_stream_watermark(av, false); - list_del(&aq->node); - stream->nr_streaming--; - -out_fw_close: - mutex_unlock(&stream->mutex); - ipu6_isys_fw_close(av->isys); - -out_pipeline_stop: - ipu6_isys_stream_cleanup(av); - -out_return_buffers: - return_buffers(aq, VB2_BUF_STATE_QUEUED); - - return ret; -} - -static void stop_streaming(struct vb2_queue *q) -{ - struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(q); - struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq); - struct ipu6_isys_stream *stream = av->stream; - - mutex_lock(&stream->mutex); - - ipu6_isys_update_stream_watermark(av, false); - - mutex_lock(&av->isys->stream_mutex); - if (stream->nr_streaming == stream->nr_queues && stream->streaming) - ipu6_isys_video_set_streaming(av, 0, NULL); - mutex_unlock(&av->isys->stream_mutex); - - stream->nr_streaming--; - list_del(&aq->node); - stream->streaming = 0; - mutex_unlock(&stream->mutex); - - ipu6_isys_stream_cleanup(av); - - return_buffers(aq, VB2_BUF_STATE_ERROR); - - ipu6_isys_fw_close(av->isys); -} - -static unsigned int -get_sof_sequence_by_timestamp(struct ipu6_isys_stream *stream, - struct ipu6_fw_isys_resp_info_abi *info) -{ - u64 time = (u64)info->timestamp[1] << 32 | info->timestamp[0]; - struct ipu6_isys *isys = stream->isys; - struct device *dev = &isys->adev->auxdev.dev; - unsigned int i; - - /* - * The timestamp is invalid as no TSC in some FPGA platform, - * so get the sequence from pipeline directly in this case. - */ - if (time == 0) - return atomic_read(&stream->sequence) - 1; - - for (i = 0; i < IPU6_ISYS_MAX_PARALLEL_SOF; i++) - if (time == stream->seq[i].timestamp) { - dev_dbg(dev, "sof: using seq nr %u for ts %llu\n", - stream->seq[i].sequence, time); - return stream->seq[i].sequence; - } - - for (i = 0; i < IPU6_ISYS_MAX_PARALLEL_SOF; i++) - dev_dbg(dev, "sof: sequence %u, timestamp value %llu\n", - stream->seq[i].sequence, stream->seq[i].timestamp); - - return 0; -} - -static u64 get_sof_ns_delta(struct ipu6_isys_video *av, - struct ipu6_fw_isys_resp_info_abi *info) -{ - struct ipu6_bus_device *adev = av->isys->adev; - struct ipu6_device *isp = adev->isp; - u64 delta, tsc_now; - - ipu6_buttress_tsc_read(isp, &tsc_now); - if (!tsc_now) - return 0; - - delta = tsc_now - ((u64)info->timestamp[1] << 32 | info->timestamp[0]); - - return ipu6_buttress_tsc_ticks_to_ns(delta, isp); -} - -void ipu6_isys_buf_calc_sequence_time(struct ipu6_isys_buffer *ib, - struct ipu6_fw_isys_resp_info_abi *info) -{ - struct vb2_buffer *vb = ipu6_isys_buffer_to_vb2_buffer(ib); - struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); - struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(vb->vb2_queue); - struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq); - struct device *dev = &av->isys->adev->auxdev.dev; - struct ipu6_isys_stream *stream = av->stream; - u64 ns; - u32 sequence; - - ns = ktime_get_ns() - get_sof_ns_delta(av, info); - sequence = get_sof_sequence_by_timestamp(stream, info); - - vbuf->vb2_buf.timestamp = ns; - vbuf->sequence = sequence; - - dev_dbg(dev, "buf: %s: buffer done, CPU-timestamp:%lld, sequence:%d\n", - av->vdev.name, ktime_get_ns(), sequence); - dev_dbg(dev, "index:%d, vbuf timestamp:%lld\n", vb->index, - vbuf->vb2_buf.timestamp); -} - -void ipu6_isys_queue_buf_done(struct ipu6_isys_buffer *ib) -{ - struct vb2_buffer *vb = ipu6_isys_buffer_to_vb2_buffer(ib); - - if (atomic_read(&ib->str2mmio_flag)) { - vb2_buffer_done(vb, VB2_BUF_STATE_ERROR); - /* - * Operation on buffer is ended with error and will be reported - * to the userspace when it is de-queued - */ - atomic_set(&ib->str2mmio_flag, 0); - } else { - vb2_buffer_done(vb, VB2_BUF_STATE_DONE); - } -} - -void ipu6_isys_queue_buf_ready(struct ipu6_isys_stream *stream, - struct ipu6_fw_isys_resp_info_abi *info) -{ - struct ipu6_isys_queue *aq = stream->output_pins[info->pin_id].aq; - struct ipu6_isys *isys = stream->isys; - struct device *dev = &isys->adev->auxdev.dev; - struct ipu6_isys_buffer *ib; - struct vb2_buffer *vb; - unsigned long flags; - bool first = true; - struct vb2_v4l2_buffer *buf; - - spin_lock_irqsave(&aq->lock, flags); - if (list_empty(&aq->active)) { - spin_unlock_irqrestore(&aq->lock, flags); - dev_err(dev, "active queue empty\n"); - return; - } - - list_for_each_entry_reverse(ib, &aq->active, head) { - struct ipu6_isys_video_buffer *ivb; - struct vb2_v4l2_buffer *vvb; - dma_addr_t addr; - - vb = ipu6_isys_buffer_to_vb2_buffer(ib); - vvb = to_vb2_v4l2_buffer(vb); - ivb = vb2_buffer_to_ipu6_isys_video_buffer(vvb); - addr = ivb->dma_addr; - - if (info->pin.addr != addr) { - if (first) - dev_err(dev, "Unexpected buffer address %pad\n", - &addr); - first = false; - continue; - } - - if (info->error_info.error == - IPU6_FW_ISYS_ERROR_HW_REPORTED_STR2MMIO) { - /* - * Check for error message: - * 'IPU6_FW_ISYS_ERROR_HW_REPORTED_STR2MMIO' - */ - atomic_set(&ib->str2mmio_flag, 1); - } - dev_dbg(dev, "buffer: found buffer %pad\n", &addr); - - buf = to_vb2_v4l2_buffer(vb); - buf->field = V4L2_FIELD_NONE; - - list_del(&ib->head); - spin_unlock_irqrestore(&aq->lock, flags); - - ipu6_isys_buf_calc_sequence_time(ib, info); - - ipu6_isys_queue_buf_done(ib); - - return; - } - - dev_err(dev, "Failed to find a matching video buffer"); - - spin_unlock_irqrestore(&aq->lock, flags); -} - -static const struct vb2_ops ipu6_isys_queue_ops = { - .queue_setup = ipu6_isys_queue_setup, - .wait_prepare = vb2_ops_wait_prepare, - .wait_finish = vb2_ops_wait_finish, - .buf_init = ipu6_isys_buf_init, - .buf_prepare = ipu6_isys_buf_prepare, - .buf_cleanup = ipu6_isys_buf_cleanup, - .start_streaming = start_streaming, - .stop_streaming = stop_streaming, - .buf_queue = buf_queue, -}; - -int ipu6_isys_queue_init(struct ipu6_isys_queue *aq) -{ - struct ipu6_isys *isys = ipu6_isys_queue_to_video(aq)->isys; - struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq); - struct ipu6_bus_device *adev = isys->adev; - int ret; - - /* no support for userptr */ - if (!aq->vbq.io_modes) - aq->vbq.io_modes = VB2_MMAP | VB2_DMABUF; - - aq->vbq.drv_priv = isys; - aq->vbq.ops = &ipu6_isys_queue_ops; - aq->vbq.lock = &av->mutex; - aq->vbq.mem_ops = &vb2_dma_sg_memops; - aq->vbq.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; - aq->vbq.min_queued_buffers = 1; - aq->vbq.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; - - ret = vb2_queue_init(&aq->vbq); - if (ret) - return ret; - - aq->dev = &adev->auxdev.dev; - aq->vbq.dev = &adev->isp->pdev->dev; - spin_lock_init(&aq->lock); - INIT_LIST_HEAD(&aq->active); - INIT_LIST_HEAD(&aq->incoming); - - return 0; -} diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h deleted file mode 100644 index fe8fc79..0000000 --- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h +++ /dev/null @@ -1,79 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (C) 2013--2024 Intel Corporation */ - -#ifndef IPU6_ISYS_QUEUE_H -#define IPU6_ISYS_QUEUE_H - -#include -#include -#include -#include -#include - -#include - -#include "ipu6-fw-isys.h" -#include "ipu6-isys-video.h" - -struct ipu6_isys_stream; - -struct ipu6_isys_queue { - struct vb2_queue vbq; - struct list_head node; - struct device *dev; - /* - * @lock: serialise access to queued and pre_streamon_queued - */ - spinlock_t lock; - struct list_head active; - struct list_head incoming; - unsigned int fw_output; -}; - -struct ipu6_isys_buffer { - struct list_head head; - atomic_t str2mmio_flag; -}; - -struct ipu6_isys_video_buffer { - struct vb2_v4l2_buffer vb_v4l2; - struct ipu6_isys_buffer ib; - dma_addr_t dma_addr; -}; - -#define IPU6_ISYS_BUFFER_LIST_FL_INCOMING BIT(0) -#define IPU6_ISYS_BUFFER_LIST_FL_ACTIVE BIT(1) -#define IPU6_ISYS_BUFFER_LIST_FL_SET_STATE BIT(2) - -struct ipu6_isys_buffer_list { - struct list_head head; - unsigned int nbufs; -}; - -#define vb2_queue_to_isys_queue(__vb2) \ - container_of(__vb2, struct ipu6_isys_queue, vbq) - -#define ipu6_isys_to_isys_video_buffer(__ib) \ - container_of(__ib, struct ipu6_isys_video_buffer, ib) - -#define vb2_buffer_to_ipu6_isys_video_buffer(__vvb) \ - container_of(__vvb, struct ipu6_isys_video_buffer, vb_v4l2) - -#define ipu6_isys_buffer_to_vb2_buffer(__ib) \ - (&ipu6_isys_to_isys_video_buffer(__ib)->vb_v4l2.vb2_buf) - -void ipu6_isys_buffer_list_queue(struct ipu6_isys_buffer_list *bl, - unsigned long op_flags, - enum vb2_buffer_state state); -void -ipu6_isys_buf_to_fw_frame_buf(struct ipu6_fw_isys_frame_buff_set_abi *set, - struct ipu6_isys_stream *stream, - struct ipu6_isys_buffer_list *bl); -void -ipu6_isys_buf_calc_sequence_time(struct ipu6_isys_buffer *ib, - struct ipu6_fw_isys_resp_info_abi *info); -void ipu6_isys_queue_buf_done(struct ipu6_isys_buffer *ib); -void ipu6_isys_queue_buf_ready(struct ipu6_isys_stream *stream, - struct ipu6_fw_isys_resp_info_abi *info); -int ipu6_isys_queue_init(struct ipu6_isys_queue *aq); -#endif /* IPU6_ISYS_QUEUE_H */ diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c deleted file mode 100644 index 0a06de5..0000000 --- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c +++ /dev/null @@ -1,403 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2013--2024 Intel Corporation - */ - -#include -#include -#include - -#include -#include -#include -#include - -#include "ipu6-bus.h" -#include "ipu6-isys.h" -#include "ipu6-isys-subdev.h" - -unsigned int ipu6_isys_mbus_code_to_bpp(u32 code) -{ - switch (code) { - case MEDIA_BUS_FMT_RGB888_1X24: - case MEDIA_BUS_FMT_META_24: - return 24; - case MEDIA_BUS_FMT_RGB565_1X16: - case MEDIA_BUS_FMT_UYVY8_1X16: - case MEDIA_BUS_FMT_YUYV8_1X16: - case MEDIA_BUS_FMT_META_16: - return 16; - case MEDIA_BUS_FMT_SBGGR12_1X12: - case MEDIA_BUS_FMT_SGBRG12_1X12: - case MEDIA_BUS_FMT_SGRBG12_1X12: - case MEDIA_BUS_FMT_SRGGB12_1X12: - case MEDIA_BUS_FMT_META_12: - return 12; - case MEDIA_BUS_FMT_SBGGR10_1X10: - case MEDIA_BUS_FMT_SGBRG10_1X10: - case MEDIA_BUS_FMT_SGRBG10_1X10: - case MEDIA_BUS_FMT_SRGGB10_1X10: - case MEDIA_BUS_FMT_META_10: - return 10; - case MEDIA_BUS_FMT_SBGGR8_1X8: - case MEDIA_BUS_FMT_SGBRG8_1X8: - case MEDIA_BUS_FMT_SGRBG8_1X8: - case MEDIA_BUS_FMT_SRGGB8_1X8: - case MEDIA_BUS_FMT_META_8: - return 8; - default: - WARN_ON(1); - return 8; - } -} - -unsigned int ipu6_isys_mbus_code_to_mipi(u32 code) -{ - switch (code) { - case MEDIA_BUS_FMT_RGB565_1X16: - return MIPI_CSI2_DT_RGB565; - case MEDIA_BUS_FMT_RGB888_1X24: - return MIPI_CSI2_DT_RGB888; - case MEDIA_BUS_FMT_UYVY8_1X16: - case MEDIA_BUS_FMT_YUYV8_1X16: - return MIPI_CSI2_DT_YUV422_8B; - case MEDIA_BUS_FMT_SBGGR16_1X16: - case MEDIA_BUS_FMT_SGBRG16_1X16: - case MEDIA_BUS_FMT_SGRBG16_1X16: - case MEDIA_BUS_FMT_SRGGB16_1X16: - return MIPI_CSI2_DT_RAW16; - case MEDIA_BUS_FMT_SBGGR12_1X12: - case MEDIA_BUS_FMT_SGBRG12_1X12: - case MEDIA_BUS_FMT_SGRBG12_1X12: - case MEDIA_BUS_FMT_SRGGB12_1X12: - return MIPI_CSI2_DT_RAW12; - case MEDIA_BUS_FMT_SBGGR10_1X10: - case MEDIA_BUS_FMT_SGBRG10_1X10: - case MEDIA_BUS_FMT_SGRBG10_1X10: - case MEDIA_BUS_FMT_SRGGB10_1X10: - return MIPI_CSI2_DT_RAW10; - case MEDIA_BUS_FMT_SBGGR8_1X8: - case MEDIA_BUS_FMT_SGBRG8_1X8: - case MEDIA_BUS_FMT_SGRBG8_1X8: - case MEDIA_BUS_FMT_SRGGB8_1X8: - return MIPI_CSI2_DT_RAW8; - default: - /* return unavailable MIPI data type - 0x3f */ - WARN_ON(1); - return 0x3f; - } -} - -bool ipu6_isys_is_bayer_format(u32 code) -{ - switch (ipu6_isys_mbus_code_to_mipi(code)) { - case MIPI_CSI2_DT_RAW8: - case MIPI_CSI2_DT_RAW10: - case MIPI_CSI2_DT_RAW12: - case MIPI_CSI2_DT_RAW14: - case MIPI_CSI2_DT_RAW16: - case MIPI_CSI2_DT_RAW20: - case MIPI_CSI2_DT_RAW24: - case MIPI_CSI2_DT_RAW28: - return true; - default: - return false; - } -} - -u32 ipu6_isys_convert_bayer_order(u32 code, int x, int y) -{ - static const u32 code_map[] = { - MEDIA_BUS_FMT_SRGGB8_1X8, - MEDIA_BUS_FMT_SGRBG8_1X8, - MEDIA_BUS_FMT_SGBRG8_1X8, - MEDIA_BUS_FMT_SBGGR8_1X8, - MEDIA_BUS_FMT_SRGGB10_1X10, - MEDIA_BUS_FMT_SGRBG10_1X10, - MEDIA_BUS_FMT_SGBRG10_1X10, - MEDIA_BUS_FMT_SBGGR10_1X10, - MEDIA_BUS_FMT_SRGGB12_1X12, - MEDIA_BUS_FMT_SGRBG12_1X12, - MEDIA_BUS_FMT_SGBRG12_1X12, - MEDIA_BUS_FMT_SBGGR12_1X12, - MEDIA_BUS_FMT_SRGGB16_1X16, - MEDIA_BUS_FMT_SGRBG16_1X16, - MEDIA_BUS_FMT_SGBRG16_1X16, - MEDIA_BUS_FMT_SBGGR16_1X16, - }; - u32 i; - - for (i = 0; i < ARRAY_SIZE(code_map); i++) - if (code_map[i] == code) - break; - - if (WARN_ON(i == ARRAY_SIZE(code_map))) - return code; - - return code_map[i ^ (((y & 1) << 1) | (x & 1))]; -} - -int ipu6_isys_subdev_set_fmt(struct v4l2_subdev *sd, - struct v4l2_subdev_state *state, - struct v4l2_subdev_format *format) -{ - struct ipu6_isys_subdev *asd = to_ipu6_isys_subdev(sd); - struct v4l2_mbus_framefmt *fmt; - struct v4l2_rect *crop; - u32 code = asd->supported_codes[0]; - u32 other_pad, other_stream; - unsigned int i; - int ret; - - /* No transcoding, source and sink formats must match. */ - if ((sd->entity.pads[format->pad].flags & MEDIA_PAD_FL_SOURCE) && - sd->entity.num_pads > 1) - return v4l2_subdev_get_fmt(sd, state, format); - - format->format.width = clamp(format->format.width, IPU6_ISYS_MIN_WIDTH, - IPU6_ISYS_MAX_WIDTH); - format->format.height = clamp(format->format.height, - IPU6_ISYS_MIN_HEIGHT, - IPU6_ISYS_MAX_HEIGHT); - - for (i = 0; asd->supported_codes[i]; i++) { - if (asd->supported_codes[i] == format->format.code) { - code = asd->supported_codes[i]; - break; - } - } - format->format.code = code; - format->format.field = V4L2_FIELD_NONE; - - /* Store the format and propagate it to the source pad. */ - fmt = v4l2_subdev_state_get_format(state, format->pad, format->stream); - if (!fmt) - return -EINVAL; - - *fmt = format->format; - - if (!(sd->entity.pads[format->pad].flags & MEDIA_PAD_FL_SINK)) - return 0; - - /* propagate format to following source pad */ - fmt = v4l2_subdev_state_get_opposite_stream_format(state, format->pad, - format->stream); - if (!fmt) - return -EINVAL; - - *fmt = format->format; - - ret = v4l2_subdev_routing_find_opposite_end(&state->routing, - format->pad, - format->stream, - &other_pad, - &other_stream); - if (ret) - return -EINVAL; - - crop = v4l2_subdev_state_get_crop(state, other_pad, other_stream); - /* reset crop */ - crop->left = 0; - crop->top = 0; - crop->width = fmt->width; - crop->height = fmt->height; - - return 0; -} - -int ipu6_isys_subdev_enum_mbus_code(struct v4l2_subdev *sd, - struct v4l2_subdev_state *state, - struct v4l2_subdev_mbus_code_enum *code) -{ - struct ipu6_isys_subdev *asd = to_ipu6_isys_subdev(sd); - const u32 *supported_codes = asd->supported_codes; - u32 index; - - for (index = 0; supported_codes[index]; index++) { - if (index == code->index) { - code->code = supported_codes[index]; - return 0; - } - } - - return -EINVAL; -} - -static int subdev_set_routing(struct v4l2_subdev *sd, - struct v4l2_subdev_state *state, - struct v4l2_subdev_krouting *routing) -{ - static const struct v4l2_mbus_framefmt format = { - .width = 4096, - .height = 3072, - .code = MEDIA_BUS_FMT_SGRBG10_1X10, - .field = V4L2_FIELD_NONE, - }; - int ret; - - ret = v4l2_subdev_routing_validate(sd, routing, - V4L2_SUBDEV_ROUTING_ONLY_1_TO_1); - if (ret) - return ret; - - return v4l2_subdev_set_routing_with_fmt(sd, state, routing, &format); -} - -int ipu6_isys_get_stream_pad_fmt(struct v4l2_subdev *sd, u32 pad, u32 stream, - struct v4l2_mbus_framefmt *format) -{ - struct v4l2_mbus_framefmt *fmt; - struct v4l2_subdev_state *state; - - if (!sd || !format) - return -EINVAL; - - state = v4l2_subdev_lock_and_get_active_state(sd); - fmt = v4l2_subdev_state_get_format(state, pad, stream); - if (fmt) - *format = *fmt; - v4l2_subdev_unlock_state(state); - - return fmt ? 0 : -EINVAL; -} - -int ipu6_isys_get_stream_pad_crop(struct v4l2_subdev *sd, u32 pad, u32 stream, - struct v4l2_rect *crop) -{ - struct v4l2_subdev_state *state; - struct v4l2_rect *rect; - - if (!sd || !crop) - return -EINVAL; - - state = v4l2_subdev_lock_and_get_active_state(sd); - rect = v4l2_subdev_state_get_crop(state, pad, stream); - if (rect) - *crop = *rect; - v4l2_subdev_unlock_state(state); - - return rect ? 0 : -EINVAL; -} - -u32 ipu6_isys_get_src_stream_by_src_pad(struct v4l2_subdev *sd, u32 pad) -{ - struct v4l2_subdev_state *state; - struct v4l2_subdev_route *routes; - unsigned int i; - u32 source_stream = 0; - - state = v4l2_subdev_lock_and_get_active_state(sd); - if (!state) - return 0; - - routes = state->routing.routes; - for (i = 0; i < state->routing.num_routes; i++) { - if (routes[i].source_pad == pad) { - source_stream = routes[i].source_stream; - break; - } - } - - v4l2_subdev_unlock_state(state); - - return source_stream; -} - -static int ipu6_isys_subdev_init_state(struct v4l2_subdev *sd, - struct v4l2_subdev_state *state) -{ - struct v4l2_subdev_route route = { - .sink_pad = 0, - .sink_stream = 0, - .source_pad = 1, - .source_stream = 0, - .flags = V4L2_SUBDEV_ROUTE_FL_ACTIVE, - }; - struct v4l2_subdev_krouting routing = { - .num_routes = 1, - .routes = &route, - }; - - return subdev_set_routing(sd, state, &routing); -} - -int ipu6_isys_subdev_set_routing(struct v4l2_subdev *sd, - struct v4l2_subdev_state *state, - enum v4l2_subdev_format_whence which, - struct v4l2_subdev_krouting *routing) -{ - return subdev_set_routing(sd, state, routing); -} - -static const struct v4l2_subdev_internal_ops ipu6_isys_subdev_internal_ops = { - .init_state = ipu6_isys_subdev_init_state, -}; - -int ipu6_isys_subdev_init(struct ipu6_isys_subdev *asd, - const struct v4l2_subdev_ops *ops, - unsigned int nr_ctrls, - unsigned int num_sink_pads, - unsigned int num_source_pads) -{ - unsigned int num_pads = num_sink_pads + num_source_pads; - unsigned int i; - int ret; - - v4l2_subdev_init(&asd->sd, ops); - - asd->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | - V4L2_SUBDEV_FL_HAS_EVENTS | - V4L2_SUBDEV_FL_STREAMS; - asd->sd.owner = THIS_MODULE; - asd->sd.dev = &asd->isys->adev->auxdev.dev; - asd->sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE; - asd->sd.internal_ops = &ipu6_isys_subdev_internal_ops; - - asd->pad = devm_kcalloc(&asd->isys->adev->auxdev.dev, num_pads, - sizeof(*asd->pad), GFP_KERNEL); - if (!asd->pad) - return -ENOMEM; - - for (i = 0; i < num_sink_pads; i++) - asd->pad[i].flags = MEDIA_PAD_FL_SINK | - MEDIA_PAD_FL_MUST_CONNECT; - - for (i = num_sink_pads; i < num_pads; i++) - asd->pad[i].flags = MEDIA_PAD_FL_SOURCE; - - ret = media_entity_pads_init(&asd->sd.entity, num_pads, asd->pad); - if (ret) - return ret; - - if (asd->ctrl_init) { - ret = v4l2_ctrl_handler_init(&asd->ctrl_handler, nr_ctrls); - if (ret) - goto out_media_entity_cleanup; - - asd->ctrl_init(&asd->sd); - if (asd->ctrl_handler.error) { - ret = asd->ctrl_handler.error; - goto out_v4l2_ctrl_handler_free; - } - - asd->sd.ctrl_handler = &asd->ctrl_handler; - } - - asd->source = -1; - - return 0; - -out_v4l2_ctrl_handler_free: - v4l2_ctrl_handler_free(&asd->ctrl_handler); - -out_media_entity_cleanup: - media_entity_cleanup(&asd->sd.entity); - - return ret; -} - -void ipu6_isys_subdev_cleanup(struct ipu6_isys_subdev *asd) -{ - media_entity_cleanup(&asd->sd.entity); - v4l2_ctrl_handler_free(&asd->ctrl_handler); -} diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.h deleted file mode 100644 index 9ef8d95..0000000 --- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.h +++ /dev/null @@ -1,59 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (C) 2013--2024 Intel Corporation */ - -#ifndef IPU6_ISYS_SUBDEV_H -#define IPU6_ISYS_SUBDEV_H - -#include - -#include -#include -#include - -struct ipu6_isys; - -struct ipu6_isys_subdev { - struct v4l2_subdev sd; - struct ipu6_isys *isys; - u32 const *supported_codes; - struct media_pad *pad; - struct v4l2_ctrl_handler ctrl_handler; - void (*ctrl_init)(struct v4l2_subdev *sd); - int source; /* SSI stream source; -1 if unset */ -}; - -#define to_ipu6_isys_subdev(__sd) \ - container_of(__sd, struct ipu6_isys_subdev, sd) - -unsigned int ipu6_isys_mbus_code_to_bpp(u32 code); -unsigned int ipu6_isys_mbus_code_to_mipi(u32 code); -bool ipu6_isys_is_bayer_format(u32 code); -u32 ipu6_isys_convert_bayer_order(u32 code, int x, int y); - -int ipu6_isys_subdev_set_fmt(struct v4l2_subdev *sd, - struct v4l2_subdev_state *state, - struct v4l2_subdev_format *fmt); -int ipu6_isys_subdev_enum_mbus_code(struct v4l2_subdev *sd, - struct v4l2_subdev_state *state, - struct v4l2_subdev_mbus_code_enum - *code); -int ipu6_isys_subdev_link_validate(struct v4l2_subdev *sd, - struct media_link *link, - struct v4l2_subdev_format *source_fmt, - struct v4l2_subdev_format *sink_fmt); -u32 ipu6_isys_get_src_stream_by_src_pad(struct v4l2_subdev *sd, u32 pad); -int ipu6_isys_get_stream_pad_fmt(struct v4l2_subdev *sd, u32 pad, u32 stream, - struct v4l2_mbus_framefmt *format); -int ipu6_isys_get_stream_pad_crop(struct v4l2_subdev *sd, u32 pad, u32 stream, - struct v4l2_rect *crop); -int ipu6_isys_subdev_set_routing(struct v4l2_subdev *sd, - struct v4l2_subdev_state *state, - enum v4l2_subdev_format_whence which, - struct v4l2_subdev_krouting *routing); -int ipu6_isys_subdev_init(struct ipu6_isys_subdev *asd, - const struct v4l2_subdev_ops *ops, - unsigned int nr_ctrls, - unsigned int num_sink_pads, - unsigned int num_source_pads); -void ipu6_isys_subdev_cleanup(struct ipu6_isys_subdev *asd); -#endif /* IPU6_ISYS_SUBDEV_H */ diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c deleted file mode 100644 index 48388c0..0000000 --- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c +++ /dev/null @@ -1,1392 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2013--2024 Intel Corporation - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -#include "ipu6.h" -#include "ipu6-bus.h" -#include "ipu6-cpd.h" -#include "ipu6-fw-isys.h" -#include "ipu6-isys.h" -#include "ipu6-isys-csi2.h" -#include "ipu6-isys-queue.h" -#include "ipu6-isys-video.h" -#include "ipu6-platform-regs.h" - -const struct ipu6_isys_pixelformat ipu6_isys_pfmts[] = { - { V4L2_PIX_FMT_SBGGR12, 16, 12, MEDIA_BUS_FMT_SBGGR12_1X12, - IPU6_FW_ISYS_FRAME_FORMAT_RAW16 }, - { V4L2_PIX_FMT_SGBRG12, 16, 12, MEDIA_BUS_FMT_SGBRG12_1X12, - IPU6_FW_ISYS_FRAME_FORMAT_RAW16 }, - { V4L2_PIX_FMT_SGRBG12, 16, 12, MEDIA_BUS_FMT_SGRBG12_1X12, - IPU6_FW_ISYS_FRAME_FORMAT_RAW16 }, - { V4L2_PIX_FMT_SRGGB12, 16, 12, MEDIA_BUS_FMT_SRGGB12_1X12, - IPU6_FW_ISYS_FRAME_FORMAT_RAW16 }, - { V4L2_PIX_FMT_SBGGR10, 16, 10, MEDIA_BUS_FMT_SBGGR10_1X10, - IPU6_FW_ISYS_FRAME_FORMAT_RAW16 }, - { V4L2_PIX_FMT_SGBRG10, 16, 10, MEDIA_BUS_FMT_SGBRG10_1X10, - IPU6_FW_ISYS_FRAME_FORMAT_RAW16 }, - { V4L2_PIX_FMT_SGRBG10, 16, 10, MEDIA_BUS_FMT_SGRBG10_1X10, - IPU6_FW_ISYS_FRAME_FORMAT_RAW16 }, - { V4L2_PIX_FMT_SRGGB10, 16, 10, MEDIA_BUS_FMT_SRGGB10_1X10, - IPU6_FW_ISYS_FRAME_FORMAT_RAW16 }, - { V4L2_PIX_FMT_SBGGR8, 8, 8, MEDIA_BUS_FMT_SBGGR8_1X8, - IPU6_FW_ISYS_FRAME_FORMAT_RAW8 }, - { V4L2_PIX_FMT_SGBRG8, 8, 8, MEDIA_BUS_FMT_SGBRG8_1X8, - IPU6_FW_ISYS_FRAME_FORMAT_RAW8 }, - { V4L2_PIX_FMT_SGRBG8, 8, 8, MEDIA_BUS_FMT_SGRBG8_1X8, - IPU6_FW_ISYS_FRAME_FORMAT_RAW8 }, - { V4L2_PIX_FMT_SRGGB8, 8, 8, MEDIA_BUS_FMT_SRGGB8_1X8, - IPU6_FW_ISYS_FRAME_FORMAT_RAW8 }, - { V4L2_PIX_FMT_SBGGR12P, 12, 12, MEDIA_BUS_FMT_SBGGR12_1X12, - IPU6_FW_ISYS_FRAME_FORMAT_RAW12 }, - { V4L2_PIX_FMT_SGBRG12P, 12, 12, MEDIA_BUS_FMT_SGBRG12_1X12, - IPU6_FW_ISYS_FRAME_FORMAT_RAW12 }, - { V4L2_PIX_FMT_SGRBG12P, 12, 12, MEDIA_BUS_FMT_SGRBG12_1X12, - IPU6_FW_ISYS_FRAME_FORMAT_RAW12 }, - { V4L2_PIX_FMT_SRGGB12P, 12, 12, MEDIA_BUS_FMT_SRGGB12_1X12, - IPU6_FW_ISYS_FRAME_FORMAT_RAW12 }, - { V4L2_PIX_FMT_SBGGR10P, 10, 10, MEDIA_BUS_FMT_SBGGR10_1X10, - IPU6_FW_ISYS_FRAME_FORMAT_RAW10 }, - { V4L2_PIX_FMT_SGBRG10P, 10, 10, MEDIA_BUS_FMT_SGBRG10_1X10, - IPU6_FW_ISYS_FRAME_FORMAT_RAW10 }, - { V4L2_PIX_FMT_SGRBG10P, 10, 10, MEDIA_BUS_FMT_SGRBG10_1X10, - IPU6_FW_ISYS_FRAME_FORMAT_RAW10 }, - { V4L2_PIX_FMT_SRGGB10P, 10, 10, MEDIA_BUS_FMT_SRGGB10_1X10, - IPU6_FW_ISYS_FRAME_FORMAT_RAW10 }, - { V4L2_PIX_FMT_UYVY, 16, 16, MEDIA_BUS_FMT_UYVY8_1X16, - IPU6_FW_ISYS_FRAME_FORMAT_UYVY}, - { V4L2_PIX_FMT_YUYV, 16, 16, MEDIA_BUS_FMT_YUYV8_1X16, - IPU6_FW_ISYS_FRAME_FORMAT_YUYV}, - { V4L2_PIX_FMT_RGB565, 16, 16, MEDIA_BUS_FMT_RGB565_1X16, - IPU6_FW_ISYS_FRAME_FORMAT_RGB565 }, - { V4L2_PIX_FMT_BGR24, 24, 24, MEDIA_BUS_FMT_RGB888_1X24, - IPU6_FW_ISYS_FRAME_FORMAT_RGBA888 }, - { V4L2_META_FMT_GENERIC_8, 8, 8, MEDIA_BUS_FMT_META_8, - IPU6_FW_ISYS_FRAME_FORMAT_RAW8, true }, - { V4L2_META_FMT_GENERIC_CSI2_10, 10, 10, MEDIA_BUS_FMT_META_10, - IPU6_FW_ISYS_FRAME_FORMAT_RAW10, true }, - { V4L2_META_FMT_GENERIC_CSI2_12, 12, 12, MEDIA_BUS_FMT_META_12, - IPU6_FW_ISYS_FRAME_FORMAT_RAW12, true }, - { V4L2_META_FMT_GENERIC_CSI2_16, 16, 16, MEDIA_BUS_FMT_META_16, - IPU6_FW_ISYS_FRAME_FORMAT_RAW16, true }, -}; - -static int video_open(struct file *file) -{ - struct ipu6_isys_video *av = video_drvdata(file); - struct ipu6_isys *isys = av->isys; - struct ipu6_bus_device *adev = isys->adev; - - mutex_lock(&isys->mutex); - if (isys->need_reset) { - mutex_unlock(&isys->mutex); - dev_warn(&adev->auxdev.dev, "isys power cycle required\n"); - return -EIO; - } - mutex_unlock(&isys->mutex); - - return v4l2_fh_open(file); -} - -const struct ipu6_isys_pixelformat * -ipu6_isys_get_isys_format(u32 pixelformat, u32 type) -{ - const struct ipu6_isys_pixelformat *default_pfmt = NULL; - unsigned int i; - - for (i = 0; i < ARRAY_SIZE(ipu6_isys_pfmts); i++) { - const struct ipu6_isys_pixelformat *pfmt = &ipu6_isys_pfmts[i]; - - if (type && ((!pfmt->is_meta && - type != V4L2_BUF_TYPE_VIDEO_CAPTURE) || - (pfmt->is_meta && - type != V4L2_BUF_TYPE_META_CAPTURE))) - continue; - - if (!default_pfmt) - default_pfmt = pfmt; - - if (pfmt->pixelformat != pixelformat) - continue; - - return pfmt; - } - - return default_pfmt; -} - -static int ipu6_isys_vidioc_querycap(struct file *file, void *fh, - struct v4l2_capability *cap) -{ - struct ipu6_isys_video *av = video_drvdata(file); - - strscpy(cap->driver, IPU6_ISYS_NAME, sizeof(cap->driver)); - strscpy(cap->card, av->isys->media_dev.model, sizeof(cap->card)); - - return 0; -} - -static int ipu6_isys_vidioc_enum_fmt(struct file *file, void *fh, - struct v4l2_fmtdesc *f) -{ - unsigned int i, num_found; - - for (i = 0, num_found = 0; i < ARRAY_SIZE(ipu6_isys_pfmts); i++) { - if ((ipu6_isys_pfmts[i].is_meta && - f->type != V4L2_BUF_TYPE_META_CAPTURE) || - (!ipu6_isys_pfmts[i].is_meta && - f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)) - continue; - - if (f->mbus_code && f->mbus_code != ipu6_isys_pfmts[i].code) - continue; - - if (num_found < f->index) { - num_found++; - continue; - } - - f->flags = 0; - f->pixelformat = ipu6_isys_pfmts[i].pixelformat; - - return 0; - } - - return -EINVAL; -} - -static int ipu6_isys_vidioc_enum_framesizes(struct file *file, void *fh, - struct v4l2_frmsizeenum *fsize) -{ - unsigned int i; - - if (fsize->index > 0) - return -EINVAL; - - for (i = 0; i < ARRAY_SIZE(ipu6_isys_pfmts); i++) { - if (fsize->pixel_format != ipu6_isys_pfmts[i].pixelformat) - continue; - - fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE; - fsize->stepwise.min_width = IPU6_ISYS_MIN_WIDTH; - fsize->stepwise.max_width = IPU6_ISYS_MAX_WIDTH; - fsize->stepwise.min_height = IPU6_ISYS_MIN_HEIGHT; - fsize->stepwise.max_height = IPU6_ISYS_MAX_HEIGHT; - fsize->stepwise.step_width = 2; - fsize->stepwise.step_height = 2; - - return 0; - } - - return -EINVAL; -} - -static int ipu6_isys_vidioc_g_fmt_vid_cap(struct file *file, void *fh, - struct v4l2_format *f) -{ - struct ipu6_isys_video *av = video_drvdata(file); - - f->fmt.pix = av->pix_fmt; - - return 0; -} - -static int ipu6_isys_vidioc_g_fmt_meta_cap(struct file *file, void *fh, - struct v4l2_format *f) -{ - struct ipu6_isys_video *av = video_drvdata(file); - - f->fmt.meta = av->meta_fmt; - - return 0; -} - -static void ipu6_isys_try_fmt_cap(struct ipu6_isys_video *av, u32 type, - u32 *format, u32 *width, u32 *height, - u32 *bytesperline, u32 *sizeimage) -{ - const struct ipu6_isys_pixelformat *pfmt = - ipu6_isys_get_isys_format(*format, type); - - *format = pfmt->pixelformat; - *width = clamp(*width, IPU6_ISYS_MIN_WIDTH, IPU6_ISYS_MAX_WIDTH); - *height = clamp(*height, IPU6_ISYS_MIN_HEIGHT, IPU6_ISYS_MAX_HEIGHT); - - if (pfmt->bpp != pfmt->bpp_packed) - *bytesperline = *width * DIV_ROUND_UP(pfmt->bpp, BITS_PER_BYTE); - else - *bytesperline = DIV_ROUND_UP(*width * pfmt->bpp, BITS_PER_BYTE); - - *bytesperline = ALIGN(*bytesperline, av->isys->line_align); - - /* - * (height + 1) * bytesperline due to a hardware issue: the DMA unit - * is a power of two, and a line should be transferred as few units - * as possible. The result is that up to line length more data than - * the image size may be transferred to memory after the image. - * Another limitation is the GDA allocation unit size. For low - * resolution it gives a bigger number. Use larger one to avoid - * memory corruption. - */ - *sizeimage = *bytesperline * *height + - max(*bytesperline, - av->isys->pdata->ipdata->isys_dma_overshoot); -} - -static void __ipu6_isys_vidioc_try_fmt_vid_cap(struct ipu6_isys_video *av, - struct v4l2_format *f) -{ - ipu6_isys_try_fmt_cap(av, f->type, &f->fmt.pix.pixelformat, - &f->fmt.pix.width, &f->fmt.pix.height, - &f->fmt.pix.bytesperline, &f->fmt.pix.sizeimage); - - f->fmt.pix.field = V4L2_FIELD_NONE; - f->fmt.pix.colorspace = V4L2_COLORSPACE_RAW; - f->fmt.pix.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; - f->fmt.pix.quantization = V4L2_QUANTIZATION_DEFAULT; - f->fmt.pix.xfer_func = V4L2_XFER_FUNC_DEFAULT; -} - -static int ipu6_isys_vidioc_try_fmt_vid_cap(struct file *file, void *fh, - struct v4l2_format *f) -{ - struct ipu6_isys_video *av = video_drvdata(file); - - if (vb2_is_busy(&av->aq.vbq)) - return -EBUSY; - - __ipu6_isys_vidioc_try_fmt_vid_cap(av, f); - - return 0; -} - -static int __ipu6_isys_vidioc_try_fmt_meta_cap(struct ipu6_isys_video *av, - struct v4l2_format *f) -{ - ipu6_isys_try_fmt_cap(av, f->type, &f->fmt.meta.dataformat, - &f->fmt.meta.width, &f->fmt.meta.height, - &f->fmt.meta.bytesperline, - &f->fmt.meta.buffersize); - - return 0; -} - -static int ipu6_isys_vidioc_try_fmt_meta_cap(struct file *file, void *fh, - struct v4l2_format *f) -{ - struct ipu6_isys_video *av = video_drvdata(file); - - __ipu6_isys_vidioc_try_fmt_meta_cap(av, f); - - return 0; -} - -static int ipu6_isys_vidioc_s_fmt_vid_cap(struct file *file, void *fh, - struct v4l2_format *f) -{ - struct ipu6_isys_video *av = video_drvdata(file); - - ipu6_isys_vidioc_try_fmt_vid_cap(file, fh, f); - av->pix_fmt = f->fmt.pix; - - return 0; -} - -static int ipu6_isys_vidioc_s_fmt_meta_cap(struct file *file, void *fh, - struct v4l2_format *f) -{ - struct ipu6_isys_video *av = video_drvdata(file); - - if (vb2_is_busy(&av->aq.vbq)) - return -EBUSY; - - ipu6_isys_vidioc_try_fmt_meta_cap(file, fh, f); - av->meta_fmt = f->fmt.meta; - - return 0; -} - -static int ipu6_isys_vidioc_reqbufs(struct file *file, void *priv, - struct v4l2_requestbuffers *p) -{ - struct ipu6_isys_video *av = video_drvdata(file); - int ret; - - av->aq.vbq.is_multiplanar = V4L2_TYPE_IS_MULTIPLANAR(p->type); - av->aq.vbq.is_output = V4L2_TYPE_IS_OUTPUT(p->type); - - ret = vb2_queue_change_type(&av->aq.vbq, p->type); - if (ret) - return ret; - - return vb2_ioctl_reqbufs(file, priv, p); -} - -static int ipu6_isys_vidioc_create_bufs(struct file *file, void *priv, - struct v4l2_create_buffers *p) -{ - struct ipu6_isys_video *av = video_drvdata(file); - int ret; - - av->aq.vbq.is_multiplanar = V4L2_TYPE_IS_MULTIPLANAR(p->format.type); - av->aq.vbq.is_output = V4L2_TYPE_IS_OUTPUT(p->format.type); - - ret = vb2_queue_change_type(&av->aq.vbq, p->format.type); - if (ret) - return ret; - - return vb2_ioctl_create_bufs(file, priv, p); -} - -static int link_validate(struct media_link *link) -{ - struct ipu6_isys_video *av = - container_of(link->sink, struct ipu6_isys_video, pad); - struct device *dev = &av->isys->adev->auxdev.dev; - struct v4l2_subdev_state *s_state; - struct v4l2_subdev *s_sd; - struct v4l2_mbus_framefmt *s_fmt; - struct media_pad *s_pad; - u32 s_stream, code; - int ret = -EPIPE; - - if (!link->source->entity) - return ret; - - s_sd = media_entity_to_v4l2_subdev(link->source->entity); - s_state = v4l2_subdev_get_unlocked_active_state(s_sd); - if (!s_state) - return ret; - - dev_dbg(dev, "validating link \"%s\":%u -> \"%s\"\n", - link->source->entity->name, link->source->index, - link->sink->entity->name); - - s_pad = media_pad_remote_pad_first(&av->pad); - s_stream = ipu6_isys_get_src_stream_by_src_pad(s_sd, s_pad->index); - - v4l2_subdev_lock_state(s_state); - - s_fmt = v4l2_subdev_state_get_format(s_state, s_pad->index, s_stream); - if (!s_fmt) { - dev_err(dev, "failed to get source pad format\n"); - goto unlock; - } - - code = ipu6_isys_get_isys_format(ipu6_isys_get_format(av), 0)->code; - - if (s_fmt->width != ipu6_isys_get_frame_width(av) || - s_fmt->height != ipu6_isys_get_frame_height(av) || - s_fmt->code != code) { - dev_dbg(dev, "format mismatch %dx%d,%x != %dx%d,%x\n", - s_fmt->width, s_fmt->height, s_fmt->code, - ipu6_isys_get_frame_width(av), - ipu6_isys_get_frame_height(av), code); - goto unlock; - } - - v4l2_subdev_unlock_state(s_state); - - return 0; -unlock: - v4l2_subdev_unlock_state(s_state); - - return ret; -} - -static void get_stream_opened(struct ipu6_isys_video *av) -{ - unsigned long flags; - - spin_lock_irqsave(&av->isys->streams_lock, flags); - av->isys->stream_opened++; - spin_unlock_irqrestore(&av->isys->streams_lock, flags); -} - -static void put_stream_opened(struct ipu6_isys_video *av) -{ - unsigned long flags; - - spin_lock_irqsave(&av->isys->streams_lock, flags); - av->isys->stream_opened--; - spin_unlock_irqrestore(&av->isys->streams_lock, flags); -} - -static int ipu6_isys_fw_pin_cfg(struct ipu6_isys_video *av, - struct ipu6_fw_isys_stream_cfg_data_abi *cfg) -{ - struct media_pad *src_pad = media_pad_remote_pad_first(&av->pad); - struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(src_pad->entity); - struct ipu6_fw_isys_input_pin_info_abi *input_pin; - struct ipu6_fw_isys_output_pin_info_abi *output_pin; - struct ipu6_isys_stream *stream = av->stream; - struct ipu6_isys_queue *aq = &av->aq; - struct v4l2_mbus_framefmt fmt; - const struct ipu6_isys_pixelformat *pfmt = - ipu6_isys_get_isys_format(ipu6_isys_get_format(av), 0); - struct v4l2_rect v4l2_crop; - struct ipu6_isys *isys = av->isys; - struct device *dev = &isys->adev->auxdev.dev; - int input_pins = cfg->nof_input_pins++; - int output_pins; - u32 src_stream; - int ret; - - src_stream = ipu6_isys_get_src_stream_by_src_pad(sd, src_pad->index); - ret = ipu6_isys_get_stream_pad_fmt(sd, src_pad->index, src_stream, - &fmt); - if (ret < 0) { - dev_err(dev, "can't get stream format (%d)\n", ret); - return ret; - } - - ret = ipu6_isys_get_stream_pad_crop(sd, src_pad->index, src_stream, - &v4l2_crop); - if (ret < 0) { - dev_err(dev, "can't get stream crop (%d)\n", ret); - return ret; - } - - input_pin = &cfg->input_pins[input_pins]; - input_pin->input_res.width = fmt.width; - input_pin->input_res.height = fmt.height; - input_pin->dt = av->dt; - input_pin->bits_per_pix = pfmt->bpp_packed; - input_pin->mapped_dt = 0x40; /* invalid mipi data type */ - input_pin->mipi_decompression = 0; - input_pin->capture_mode = IPU6_FW_ISYS_CAPTURE_MODE_REGULAR; - input_pin->mipi_store_mode = pfmt->bpp == pfmt->bpp_packed ? - IPU6_FW_ISYS_MIPI_STORE_MODE_DISCARD_LONG_HEADER : - IPU6_FW_ISYS_MIPI_STORE_MODE_NORMAL; - input_pin->crop_first_and_last_lines = v4l2_crop.top & 1; - - output_pins = cfg->nof_output_pins++; - aq->fw_output = output_pins; - stream->output_pins[output_pins].pin_ready = ipu6_isys_queue_buf_ready; - stream->output_pins[output_pins].aq = aq; - - output_pin = &cfg->output_pins[output_pins]; - output_pin->input_pin_id = input_pins; - output_pin->output_res.width = ipu6_isys_get_frame_width(av); - output_pin->output_res.height = ipu6_isys_get_frame_height(av); - - output_pin->stride = ipu6_isys_get_bytes_per_line(av); - if (pfmt->bpp != pfmt->bpp_packed) - output_pin->pt = IPU6_FW_ISYS_PIN_TYPE_RAW_SOC; - else - output_pin->pt = IPU6_FW_ISYS_PIN_TYPE_MIPI; - output_pin->ft = pfmt->css_pixelformat; - output_pin->send_irq = 1; - memset(output_pin->ts_offsets, 0, sizeof(output_pin->ts_offsets)); - output_pin->s2m_pixel_soc_pixel_remapping = - S2M_PIXEL_SOC_PIXEL_REMAPPING_FLAG_NO_REMAPPING; - output_pin->csi_be_soc_pixel_remapping = - CSI_BE_SOC_PIXEL_REMAPPING_FLAG_NO_REMAPPING; - - output_pin->snoopable = true; - output_pin->error_handling_enable = false; - output_pin->sensor_type = isys->sensor_type++; - if (isys->sensor_type > isys->pdata->ipdata->sensor_type_end) - isys->sensor_type = isys->pdata->ipdata->sensor_type_start; - - return 0; -} - -static int start_stream_firmware(struct ipu6_isys_video *av, - struct ipu6_isys_buffer_list *bl) -{ - struct ipu6_fw_isys_stream_cfg_data_abi *stream_cfg; - struct ipu6_fw_isys_frame_buff_set_abi *buf = NULL; - struct ipu6_isys_stream *stream = av->stream; - struct device *dev = &av->isys->adev->auxdev.dev; - struct isys_fw_msgs *msg = NULL; - struct ipu6_isys_queue *aq; - int ret, retout, tout; - u16 send_type; - - msg = ipu6_get_fw_msg_buf(stream); - if (!msg) - return -ENOMEM; - - stream_cfg = &msg->fw_msg.stream; - stream_cfg->src = stream->stream_source; - stream_cfg->vc = stream->vc; - stream_cfg->isl_use = 0; - stream_cfg->sensor_type = IPU6_FW_ISYS_SENSOR_MODE_NORMAL; - - list_for_each_entry(aq, &stream->queues, node) { - struct ipu6_isys_video *__av = ipu6_isys_queue_to_video(aq); - - ret = ipu6_isys_fw_pin_cfg(__av, stream_cfg); - if (ret < 0) { - ipu6_put_fw_msg_buf(av->isys, (u64)stream_cfg); - return ret; - } - } - - ipu6_fw_isys_dump_stream_cfg(dev, stream_cfg); - - stream->nr_output_pins = stream_cfg->nof_output_pins; - - reinit_completion(&stream->stream_open_completion); - - ret = ipu6_fw_isys_complex_cmd(av->isys, stream->stream_handle, - stream_cfg, msg->dma_addr, - sizeof(*stream_cfg), - IPU6_FW_ISYS_SEND_TYPE_STREAM_OPEN); - if (ret < 0) { - dev_err(dev, "can't open stream (%d)\n", ret); - ipu6_put_fw_msg_buf(av->isys, (u64)stream_cfg); - return ret; - } - - get_stream_opened(av); - - tout = wait_for_completion_timeout(&stream->stream_open_completion, - IPU6_FW_CALL_TIMEOUT_JIFFIES); - - ipu6_put_fw_msg_buf(av->isys, (u64)stream_cfg); - - if (!tout) { - dev_err(dev, "stream open time out\n"); - ret = -ETIMEDOUT; - goto out_put_stream_opened; - } - if (stream->error) { - dev_err(dev, "stream open error: %d\n", stream->error); - ret = -EIO; - goto out_put_stream_opened; - } - dev_dbg(dev, "start stream: open complete\n"); - - if (bl) { - msg = ipu6_get_fw_msg_buf(stream); - if (!msg) { - ret = -ENOMEM; - goto out_put_stream_opened; - } - buf = &msg->fw_msg.frame; - ipu6_isys_buf_to_fw_frame_buf(buf, stream, bl); - ipu6_isys_buffer_list_queue(bl, - IPU6_ISYS_BUFFER_LIST_FL_ACTIVE, 0); - } - - reinit_completion(&stream->stream_start_completion); - - if (bl) { - send_type = IPU6_FW_ISYS_SEND_TYPE_STREAM_START_AND_CAPTURE; - ipu6_fw_isys_dump_frame_buff_set(dev, buf, - stream_cfg->nof_output_pins); - ret = ipu6_fw_isys_complex_cmd(av->isys, stream->stream_handle, - buf, msg->dma_addr, - sizeof(*buf), send_type); - } else { - send_type = IPU6_FW_ISYS_SEND_TYPE_STREAM_START; - ret = ipu6_fw_isys_simple_cmd(av->isys, stream->stream_handle, - send_type); - } - - if (ret < 0) { - dev_err(dev, "can't start streaming (%d)\n", ret); - goto out_stream_close; - } - - tout = wait_for_completion_timeout(&stream->stream_start_completion, - IPU6_FW_CALL_TIMEOUT_JIFFIES); - if (!tout) { - dev_err(dev, "stream start time out\n"); - ret = -ETIMEDOUT; - goto out_stream_close; - } - if (stream->error) { - dev_err(dev, "stream start error: %d\n", stream->error); - ret = -EIO; - goto out_stream_close; - } - dev_dbg(dev, "start stream: complete\n"); - - return 0; - -out_stream_close: - reinit_completion(&stream->stream_close_completion); - - retout = ipu6_fw_isys_simple_cmd(av->isys, - stream->stream_handle, - IPU6_FW_ISYS_SEND_TYPE_STREAM_CLOSE); - if (retout < 0) { - dev_dbg(dev, "can't close stream (%d)\n", retout); - goto out_put_stream_opened; - } - - tout = wait_for_completion_timeout(&stream->stream_close_completion, - IPU6_FW_CALL_TIMEOUT_JIFFIES); - if (!tout) - dev_err(dev, "stream close time out\n"); - else if (stream->error) - dev_err(dev, "stream close error: %d\n", stream->error); - else - dev_dbg(dev, "stream close complete\n"); - -out_put_stream_opened: - put_stream_opened(av); - - return ret; -} - -static void stop_streaming_firmware(struct ipu6_isys_video *av) -{ - struct device *dev = &av->isys->adev->auxdev.dev; - struct ipu6_isys_stream *stream = av->stream; - int ret, tout; - - reinit_completion(&stream->stream_stop_completion); - - ret = ipu6_fw_isys_simple_cmd(av->isys, stream->stream_handle, - IPU6_FW_ISYS_SEND_TYPE_STREAM_FLUSH); - - if (ret < 0) { - dev_err(dev, "can't stop stream (%d)\n", ret); - return; - } - - tout = wait_for_completion_timeout(&stream->stream_stop_completion, - IPU6_FW_CALL_TIMEOUT_JIFFIES); - if (!tout) - dev_warn(dev, "stream stop time out\n"); - else if (stream->error) - dev_warn(dev, "stream stop error: %d\n", stream->error); - else - dev_dbg(dev, "stop stream: complete\n"); -} - -static void close_streaming_firmware(struct ipu6_isys_video *av) -{ - struct ipu6_isys_stream *stream = av->stream; - struct device *dev = &av->isys->adev->auxdev.dev; - int ret, tout; - - reinit_completion(&stream->stream_close_completion); - - ret = ipu6_fw_isys_simple_cmd(av->isys, stream->stream_handle, - IPU6_FW_ISYS_SEND_TYPE_STREAM_CLOSE); - if (ret < 0) { - dev_err(dev, "can't close stream (%d)\n", ret); - return; - } - - tout = wait_for_completion_timeout(&stream->stream_close_completion, - IPU6_FW_CALL_TIMEOUT_JIFFIES); - if (!tout) - dev_warn(dev, "stream close time out\n"); - else if (stream->error) - dev_warn(dev, "stream close error: %d\n", stream->error); - else - dev_dbg(dev, "close stream: complete\n"); - - put_stream_opened(av); -} - -int ipu6_isys_video_prepare_stream(struct ipu6_isys_video *av, - struct media_entity *source_entity, - int nr_queues) -{ - struct ipu6_isys_stream *stream = av->stream; - struct ipu6_isys_csi2 *csi2; - - if (WARN_ON(stream->nr_streaming)) - return -EINVAL; - - stream->nr_queues = nr_queues; - atomic_set(&stream->sequence, 0); - - stream->seq_index = 0; - memset(stream->seq, 0, sizeof(stream->seq)); - - if (WARN_ON(!list_empty(&stream->queues))) - return -EINVAL; - - stream->stream_source = stream->asd->source; - csi2 = ipu6_isys_subdev_to_csi2(stream->asd); - csi2->receiver_errors = 0; - stream->source_entity = source_entity; - - dev_dbg(&av->isys->adev->auxdev.dev, - "prepare stream: external entity %s\n", - stream->source_entity->name); - - return 0; -} - -void ipu6_isys_configure_stream_watermark(struct ipu6_isys_video *av, - bool state) -{ - struct ipu6_isys *isys = av->isys; - struct ipu6_isys_csi2 *csi2 = NULL; - struct isys_iwake_watermark *iwake_watermark = &isys->iwake_watermark; - struct device *dev = &isys->adev->auxdev.dev; - struct v4l2_mbus_framefmt format; - struct v4l2_subdev *esd; - struct v4l2_control hb = { .id = V4L2_CID_HBLANK, .value = 0 }; - unsigned int bpp, lanes; - s64 link_freq = 0; - u64 pixel_rate = 0; - int ret; - - if (!state) - return; - - esd = media_entity_to_v4l2_subdev(av->stream->source_entity); - - av->watermark.width = ipu6_isys_get_frame_width(av); - av->watermark.height = ipu6_isys_get_frame_height(av); - av->watermark.sram_gran_shift = isys->pdata->ipdata->sram_gran_shift; - av->watermark.sram_gran_size = isys->pdata->ipdata->sram_gran_size; - - ret = v4l2_g_ctrl(esd->ctrl_handler, &hb); - if (!ret && hb.value >= 0) - av->watermark.hblank = hb.value; - else - av->watermark.hblank = 0; - - csi2 = ipu6_isys_subdev_to_csi2(av->stream->asd); - link_freq = ipu6_isys_csi2_get_link_freq(csi2); - if (link_freq > 0) { - lanes = csi2->nlanes; - ret = ipu6_isys_get_stream_pad_fmt(&csi2->asd.sd, 0, - av->source_stream, &format); - if (!ret) { - bpp = ipu6_isys_mbus_code_to_bpp(format.code); - pixel_rate = mul_u64_u32_div(link_freq, lanes * 2, bpp); - } - } - - av->watermark.pixel_rate = pixel_rate; - - if (!pixel_rate) { - mutex_lock(&iwake_watermark->mutex); - iwake_watermark->force_iwake_disable = true; - mutex_unlock(&iwake_watermark->mutex); - dev_warn(dev, "unexpected pixel_rate from %s, disable iwake.\n", - av->stream->source_entity->name); - } -} - -static void calculate_stream_datarate(struct ipu6_isys_video *av) -{ - struct video_stream_watermark *watermark = &av->watermark; - const struct ipu6_isys_pixelformat *pfmt = - ipu6_isys_get_isys_format(ipu6_isys_get_format(av), 0); - u32 pages_per_line, pb_bytes_per_line, pixels_per_line, bytes_per_line; - u64 line_time_ns, stream_data_rate; - u16 shift, size; - - shift = watermark->sram_gran_shift; - size = watermark->sram_gran_size; - - pixels_per_line = watermark->width + watermark->hblank; - line_time_ns = div_u64(pixels_per_line * NSEC_PER_SEC, - watermark->pixel_rate); - bytes_per_line = watermark->width * pfmt->bpp / 8; - pages_per_line = DIV_ROUND_UP(bytes_per_line, size); - pb_bytes_per_line = pages_per_line << shift; - stream_data_rate = div64_u64(pb_bytes_per_line * 1000, line_time_ns); - - watermark->stream_data_rate = stream_data_rate; -} - -void ipu6_isys_update_stream_watermark(struct ipu6_isys_video *av, bool state) -{ - struct isys_iwake_watermark *iwake_watermark = - &av->isys->iwake_watermark; - - if (!av->watermark.pixel_rate) - return; - - if (state) { - calculate_stream_datarate(av); - mutex_lock(&iwake_watermark->mutex); - list_add(&av->watermark.stream_node, - &iwake_watermark->video_list); - mutex_unlock(&iwake_watermark->mutex); - } else { - av->watermark.stream_data_rate = 0; - mutex_lock(&iwake_watermark->mutex); - list_del(&av->watermark.stream_node); - mutex_unlock(&iwake_watermark->mutex); - } - - update_watermark_setting(av->isys); -} - -void ipu6_isys_put_stream(struct ipu6_isys_stream *stream) -{ - struct device *dev; - unsigned int i; - unsigned long flags; - - if (!stream) { - pr_err("ipu6-isys: no available stream\n"); - return; - } - - dev = &stream->isys->adev->auxdev.dev; - - spin_lock_irqsave(&stream->isys->streams_lock, flags); - for (i = 0; i < IPU6_ISYS_MAX_STREAMS; i++) { - if (&stream->isys->streams[i] == stream) { - if (stream->isys->streams_ref_count[i] > 0) - stream->isys->streams_ref_count[i]--; - else - dev_warn(dev, "invalid stream %d\n", i); - - break; - } - } - spin_unlock_irqrestore(&stream->isys->streams_lock, flags); -} - -static struct ipu6_isys_stream * -ipu6_isys_get_stream(struct ipu6_isys_video *av, struct ipu6_isys_subdev *asd) -{ - struct ipu6_isys_stream *stream = NULL; - struct ipu6_isys *isys = av->isys; - unsigned long flags; - unsigned int i; - u8 vc = av->vc; - - if (!isys) - return NULL; - - spin_lock_irqsave(&isys->streams_lock, flags); - for (i = 0; i < IPU6_ISYS_MAX_STREAMS; i++) { - if (isys->streams_ref_count[i] && isys->streams[i].vc == vc && - isys->streams[i].asd == asd) { - isys->streams_ref_count[i]++; - stream = &isys->streams[i]; - break; - } - } - - if (!stream) { - for (i = 0; i < IPU6_ISYS_MAX_STREAMS; i++) { - if (!isys->streams_ref_count[i]) { - isys->streams_ref_count[i]++; - stream = &isys->streams[i]; - stream->vc = vc; - stream->asd = asd; - break; - } - } - } - spin_unlock_irqrestore(&isys->streams_lock, flags); - - return stream; -} - -struct ipu6_isys_stream * -ipu6_isys_query_stream_by_handle(struct ipu6_isys *isys, u8 stream_handle) -{ - unsigned long flags; - struct ipu6_isys_stream *stream = NULL; - - if (!isys) - return NULL; - - if (stream_handle >= IPU6_ISYS_MAX_STREAMS) { - dev_err(&isys->adev->auxdev.dev, - "stream_handle %d is invalid\n", stream_handle); - return NULL; - } - - spin_lock_irqsave(&isys->streams_lock, flags); - if (isys->streams_ref_count[stream_handle] > 0) { - isys->streams_ref_count[stream_handle]++; - stream = &isys->streams[stream_handle]; - } - spin_unlock_irqrestore(&isys->streams_lock, flags); - - return stream; -} - -struct ipu6_isys_stream * -ipu6_isys_query_stream_by_source(struct ipu6_isys *isys, int source, u8 vc) -{ - struct ipu6_isys_stream *stream = NULL; - unsigned long flags; - unsigned int i; - - if (!isys) - return NULL; - - if (source < 0) { - dev_err(&isys->adev->auxdev.dev, - "query stream with invalid port number\n"); - return NULL; - } - - spin_lock_irqsave(&isys->streams_lock, flags); - for (i = 0; i < IPU6_ISYS_MAX_STREAMS; i++) { - if (!isys->streams_ref_count[i]) - continue; - - if (isys->streams[i].stream_source == source && - isys->streams[i].vc == vc) { - stream = &isys->streams[i]; - isys->streams_ref_count[i]++; - break; - } - } - spin_unlock_irqrestore(&isys->streams_lock, flags); - - return stream; -} - -static u64 get_stream_mask_by_pipeline(struct ipu6_isys_video *__av) -{ - struct media_pipeline *pipeline = - media_entity_pipeline(&__av->vdev.entity); - unsigned int i; - u64 stream_mask = 0; - - for (i = 0; i < NR_OF_CSI2_SRC_PADS; i++) { - struct ipu6_isys_video *av = &__av->csi2->av[i]; - - if (pipeline == media_entity_pipeline(&av->vdev.entity)) - stream_mask |= BIT_ULL(av->source_stream); - } - - return stream_mask; -} - -int ipu6_isys_video_set_streaming(struct ipu6_isys_video *av, int state, - struct ipu6_isys_buffer_list *bl) -{ - struct v4l2_subdev_krouting *routing; - struct ipu6_isys_stream *stream = av->stream; - struct v4l2_subdev_state *subdev_state; - struct device *dev = &av->isys->adev->auxdev.dev; - struct v4l2_subdev *sd; - struct media_pad *r_pad; - u32 sink_pad, sink_stream; - u64 r_stream; - u64 stream_mask = 0; - int ret = 0; - - dev_dbg(dev, "set stream: %d\n", state); - - if (WARN(!stream->source_entity, "No source entity for stream\n")) - return -ENODEV; - - sd = &stream->asd->sd; - r_pad = media_pad_remote_pad_first(&av->pad); - r_stream = ipu6_isys_get_src_stream_by_src_pad(sd, r_pad->index); - - subdev_state = v4l2_subdev_lock_and_get_active_state(sd); - routing = &subdev_state->routing; - ret = v4l2_subdev_routing_find_opposite_end(routing, r_pad->index, - r_stream, &sink_pad, - &sink_stream); - v4l2_subdev_unlock_state(subdev_state); - if (ret) - return ret; - - stream_mask = get_stream_mask_by_pipeline(av); - if (!state) { - stop_streaming_firmware(av); - - /* stop sub-device which connects with video */ - dev_dbg(dev, "stream off entity %s pad:%d mask:0x%llx\n", - sd->name, r_pad->index, stream_mask); - ret = v4l2_subdev_disable_streams(sd, r_pad->index, - stream_mask); - if (ret) { - dev_err(dev, "stream off %s failed with %d\n", sd->name, - ret); - return ret; - } - close_streaming_firmware(av); - } else { - ret = start_stream_firmware(av, bl); - if (ret) { - dev_err(dev, "start stream of firmware failed\n"); - return ret; - } - - /* start sub-device which connects with video */ - dev_dbg(dev, "stream on %s pad %d mask 0x%llx\n", sd->name, - r_pad->index, stream_mask); - ret = v4l2_subdev_enable_streams(sd, r_pad->index, stream_mask); - if (ret) { - dev_err(dev, "stream on %s failed with %d\n", sd->name, - ret); - goto out_media_entity_stop_streaming_firmware; - } - } - - av->streaming = state; - - return 0; - -out_media_entity_stop_streaming_firmware: - stop_streaming_firmware(av); - - return ret; -} - -static const struct v4l2_ioctl_ops ipu6_v4l2_ioctl_ops = { - .vidioc_querycap = ipu6_isys_vidioc_querycap, - .vidioc_enum_fmt_vid_cap = ipu6_isys_vidioc_enum_fmt, - .vidioc_enum_fmt_meta_cap = ipu6_isys_vidioc_enum_fmt, - .vidioc_enum_framesizes = ipu6_isys_vidioc_enum_framesizes, - .vidioc_g_fmt_vid_cap = ipu6_isys_vidioc_g_fmt_vid_cap, - .vidioc_s_fmt_vid_cap = ipu6_isys_vidioc_s_fmt_vid_cap, - .vidioc_try_fmt_vid_cap = ipu6_isys_vidioc_try_fmt_vid_cap, - .vidioc_g_fmt_meta_cap = ipu6_isys_vidioc_g_fmt_meta_cap, - .vidioc_s_fmt_meta_cap = ipu6_isys_vidioc_s_fmt_meta_cap, - .vidioc_try_fmt_meta_cap = ipu6_isys_vidioc_try_fmt_meta_cap, - .vidioc_reqbufs = ipu6_isys_vidioc_reqbufs, - .vidioc_create_bufs = ipu6_isys_vidioc_create_bufs, - .vidioc_prepare_buf = vb2_ioctl_prepare_buf, - .vidioc_querybuf = vb2_ioctl_querybuf, - .vidioc_qbuf = vb2_ioctl_qbuf, - .vidioc_dqbuf = vb2_ioctl_dqbuf, - .vidioc_streamon = vb2_ioctl_streamon, - .vidioc_streamoff = vb2_ioctl_streamoff, - .vidioc_expbuf = vb2_ioctl_expbuf, -}; - -static const struct media_entity_operations entity_ops = { - .link_validate = link_validate, -}; - -static const struct v4l2_file_operations isys_fops = { - .owner = THIS_MODULE, - .poll = vb2_fop_poll, - .unlocked_ioctl = video_ioctl2, - .mmap = vb2_fop_mmap, - .open = video_open, - .release = vb2_fop_release, -}; - -int ipu6_isys_fw_open(struct ipu6_isys *isys) -{ - struct ipu6_bus_device *adev = isys->adev; - const struct ipu6_isys_internal_pdata *ipdata = isys->pdata->ipdata; - int ret; - - ret = pm_runtime_resume_and_get(&adev->auxdev.dev); - if (ret < 0) - return ret; - - mutex_lock(&isys->mutex); - - if (isys->ref_count++) - goto unlock; - - ipu6_configure_spc(adev->isp, &ipdata->hw_variant, - IPU6_CPD_PKG_DIR_ISYS_SERVER_IDX, isys->pdata->base, - adev->pkg_dir, adev->pkg_dir_dma_addr); - - /* - * Buffers could have been left to wrong queue at last closure. - * Move them now back to empty buffer queue. - */ - ipu6_cleanup_fw_msg_bufs(isys); - - if (isys->fwcom) { - /* - * Something went wrong in previous shutdown. As we are now - * restarting isys we can safely delete old context. - */ - dev_warn(&adev->auxdev.dev, "clearing old context\n"); - ipu6_fw_isys_cleanup(isys); - } - - ret = ipu6_fw_isys_init(isys, ipdata->num_parallel_streams); - if (ret < 0) - goto out; - -unlock: - mutex_unlock(&isys->mutex); - - return 0; - -out: - isys->ref_count--; - mutex_unlock(&isys->mutex); - pm_runtime_put(&adev->auxdev.dev); - - return ret; -} - -void ipu6_isys_fw_close(struct ipu6_isys *isys) -{ - mutex_lock(&isys->mutex); - - isys->ref_count--; - if (!isys->ref_count) { - ipu6_fw_isys_close(isys); - if (isys->fwcom) { - isys->need_reset = true; - dev_warn(&isys->adev->auxdev.dev, - "failed to close fw isys\n"); - } - } - - mutex_unlock(&isys->mutex); - - if (isys->need_reset) - pm_runtime_put_sync(&isys->adev->auxdev.dev); - else - pm_runtime_put(&isys->adev->auxdev.dev); -} - -int ipu6_isys_setup_video(struct ipu6_isys_video *av, - struct media_entity **source_entity, int *nr_queues) -{ - const struct ipu6_isys_pixelformat *pfmt = - ipu6_isys_get_isys_format(ipu6_isys_get_format(av), 0); - struct device *dev = &av->isys->adev->auxdev.dev; - struct v4l2_mbus_frame_desc_entry entry; - struct v4l2_subdev_route *route = NULL; - struct v4l2_subdev_route *r; - struct v4l2_subdev_state *state; - struct ipu6_isys_subdev *asd; - struct v4l2_subdev *remote_sd; - struct media_pipeline *pipeline; - struct media_pad *source_pad, *remote_pad; - int ret = -EINVAL; - - *nr_queues = 0; - - remote_pad = media_pad_remote_pad_unique(&av->pad); - if (IS_ERR(remote_pad)) { - dev_dbg(dev, "failed to get remote pad\n"); - return PTR_ERR(remote_pad); - } - - remote_sd = media_entity_to_v4l2_subdev(remote_pad->entity); - asd = to_ipu6_isys_subdev(remote_sd); - source_pad = media_pad_remote_pad_first(&remote_pad->entity->pads[0]); - if (!source_pad) { - dev_dbg(dev, "No external source entity\n"); - return -ENODEV; - } - - *source_entity = source_pad->entity; - - /* Find the root */ - state = v4l2_subdev_lock_and_get_active_state(remote_sd); - for_each_active_route(&state->routing, r) { - (*nr_queues)++; - - if (r->source_pad == remote_pad->index) - route = r; - } - - if (!route) { - v4l2_subdev_unlock_state(state); - dev_dbg(dev, "Failed to find route\n"); - return -ENODEV; - } - av->source_stream = route->sink_stream; - v4l2_subdev_unlock_state(state); - - ret = ipu6_isys_csi2_get_remote_desc(av->source_stream, - to_ipu6_isys_csi2(asd), - *source_entity, &entry); - if (ret == -ENOIOCTLCMD) { - av->vc = 0; - av->dt = ipu6_isys_mbus_code_to_mipi(pfmt->code); - } else if (!ret) { - dev_dbg(dev, "Framedesc: stream %u, len %u, vc %u, dt %#x\n", - entry.stream, entry.length, entry.bus.csi2.vc, - entry.bus.csi2.dt); - - av->vc = entry.bus.csi2.vc; - av->dt = entry.bus.csi2.dt; - } else { - dev_err(dev, "failed to get remote frame desc\n"); - return ret; - } - - pipeline = media_entity_pipeline(&av->vdev.entity); - if (!pipeline) - ret = video_device_pipeline_alloc_start(&av->vdev); - else - ret = video_device_pipeline_start(&av->vdev, pipeline); - if (ret < 0) { - dev_dbg(dev, "media pipeline start failed\n"); - return ret; - } - - av->stream = ipu6_isys_get_stream(av, asd); - if (!av->stream) { - video_device_pipeline_stop(&av->vdev); - dev_err(dev, "no available stream for firmware\n"); - return -EINVAL; - } - - return 0; -} - -/* - * Do everything that's needed to initialise things related to video - * buffer queue, video node, and the related media entity. The caller - * is expected to assign isys field and set the name of the video - * device. - */ -int ipu6_isys_video_init(struct ipu6_isys_video *av) -{ - struct v4l2_format format = { - .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, - .fmt.pix = { - .width = 1920, - .height = 1080, - }, - }; - struct v4l2_format format_meta = { - .type = V4L2_BUF_TYPE_META_CAPTURE, - .fmt.meta = { - .width = 1920, - .height = 4, - }, - }; - int ret; - - mutex_init(&av->mutex); - av->vdev.device_caps = V4L2_CAP_STREAMING | V4L2_CAP_IO_MC | - V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_META_CAPTURE; - av->vdev.vfl_dir = VFL_DIR_RX; - - ret = ipu6_isys_queue_init(&av->aq); - if (ret) - goto out_free_watermark; - - av->pad.flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT; - ret = media_entity_pads_init(&av->vdev.entity, 1, &av->pad); - if (ret) - goto out_vb2_queue_release; - - av->vdev.entity.ops = &entity_ops; - av->vdev.release = video_device_release_empty; - av->vdev.fops = &isys_fops; - av->vdev.v4l2_dev = &av->isys->v4l2_dev; - av->vdev.dev_parent = &av->isys->adev->isp->pdev->dev; - if (!av->vdev.ioctl_ops) - av->vdev.ioctl_ops = &ipu6_v4l2_ioctl_ops; - av->vdev.queue = &av->aq.vbq; - av->vdev.lock = &av->mutex; - - __ipu6_isys_vidioc_try_fmt_vid_cap(av, &format); - av->pix_fmt = format.fmt.pix; - __ipu6_isys_vidioc_try_fmt_meta_cap(av, &format_meta); - av->meta_fmt = format_meta.fmt.meta; - - set_bit(V4L2_FL_USES_V4L2_FH, &av->vdev.flags); - video_set_drvdata(&av->vdev, av); - - ret = video_register_device(&av->vdev, VFL_TYPE_VIDEO, -1); - if (ret) - goto out_media_entity_cleanup; - - return ret; - -out_media_entity_cleanup: - vb2_video_unregister_device(&av->vdev); - media_entity_cleanup(&av->vdev.entity); - -out_vb2_queue_release: - vb2_queue_release(&av->aq.vbq); - -out_free_watermark: - mutex_destroy(&av->mutex); - - return ret; -} - -void ipu6_isys_video_cleanup(struct ipu6_isys_video *av) -{ - vb2_video_unregister_device(&av->vdev); - media_entity_cleanup(&av->vdev.entity); - mutex_destroy(&av->mutex); -} - -u32 ipu6_isys_get_format(struct ipu6_isys_video *av) -{ - if (av->aq.vbq.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) - return av->pix_fmt.pixelformat; - - if (av->aq.vbq.type == V4L2_BUF_TYPE_META_CAPTURE) - return av->meta_fmt.dataformat; - - return 0; -} - -u32 ipu6_isys_get_data_size(struct ipu6_isys_video *av) -{ - if (av->aq.vbq.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) - return av->pix_fmt.sizeimage; - - if (av->aq.vbq.type == V4L2_BUF_TYPE_META_CAPTURE) - return av->meta_fmt.buffersize; - - return 0; -} - -u32 ipu6_isys_get_bytes_per_line(struct ipu6_isys_video *av) -{ - if (av->aq.vbq.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) - return av->pix_fmt.bytesperline; - - if (av->aq.vbq.type == V4L2_BUF_TYPE_META_CAPTURE) - return av->meta_fmt.bytesperline; - - return 0; -} - -u32 ipu6_isys_get_frame_width(struct ipu6_isys_video *av) -{ - if (av->aq.vbq.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) - return av->pix_fmt.width; - - if (av->aq.vbq.type == V4L2_BUF_TYPE_META_CAPTURE) - return av->meta_fmt.width; - - return 0; -} - -u32 ipu6_isys_get_frame_height(struct ipu6_isys_video *av) -{ - if (av->aq.vbq.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) - return av->pix_fmt.height; - - if (av->aq.vbq.type == V4L2_BUF_TYPE_META_CAPTURE) - return av->meta_fmt.height; - - return 0; -} diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.h deleted file mode 100644 index 1d945be..0000000 --- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.h +++ /dev/null @@ -1,141 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (C) 2013--2024 Intel Corporation */ - -#ifndef IPU6_ISYS_VIDEO_H -#define IPU6_ISYS_VIDEO_H - -#include -#include -#include -#include -#include - -#include -#include - -#include "ipu6-isys-queue.h" - -#define IPU6_ISYS_OUTPUT_PINS 11 -#define IPU6_ISYS_MAX_PARALLEL_SOF 2 - -struct file; -struct ipu6_isys; -struct ipu6_isys_csi2; -struct ipu6_isys_subdev; - -struct ipu6_isys_pixelformat { - u32 pixelformat; - u32 bpp; - u32 bpp_packed; - u32 code; - u32 css_pixelformat; - bool is_meta; -}; - -struct sequence_info { - unsigned int sequence; - u64 timestamp; -}; - -struct output_pin_data { - void (*pin_ready)(struct ipu6_isys_stream *stream, - struct ipu6_fw_isys_resp_info_abi *info); - struct ipu6_isys_queue *aq; -}; - -/* - * Align with firmware stream. Each stream represents a CSI virtual channel. - * May map to multiple video devices - */ -struct ipu6_isys_stream { - struct mutex mutex; - struct media_entity *source_entity; - atomic_t sequence; - unsigned int seq_index; - struct sequence_info seq[IPU6_ISYS_MAX_PARALLEL_SOF]; - int stream_source; - int stream_handle; - unsigned int nr_output_pins; - struct ipu6_isys_subdev *asd; - - int nr_queues; /* Number of capture queues */ - int nr_streaming; - int streaming; /* Has streaming been really started? */ - struct list_head queues; - struct completion stream_open_completion; - struct completion stream_close_completion; - struct completion stream_start_completion; - struct completion stream_stop_completion; - struct ipu6_isys *isys; - - struct output_pin_data output_pins[IPU6_ISYS_OUTPUT_PINS]; - int error; - u8 vc; -}; - -struct video_stream_watermark { - u32 width; - u32 height; - u32 hblank; - u32 frame_rate; - u64 pixel_rate; - u64 stream_data_rate; - u16 sram_gran_shift; - u16 sram_gran_size; - struct list_head stream_node; -}; - -struct ipu6_isys_video { - struct ipu6_isys_queue aq; - /* Serialise access to other fields in the struct. */ - struct mutex mutex; - struct media_pad pad; - struct video_device vdev; - struct v4l2_pix_format pix_fmt; - struct v4l2_meta_format meta_fmt; - struct ipu6_isys *isys; - struct ipu6_isys_csi2 *csi2; - struct ipu6_isys_stream *stream; - unsigned int streaming; - struct video_stream_watermark watermark; - u32 source_stream; - u8 vc; - u8 dt; -}; - -#define ipu6_isys_queue_to_video(__aq) \ - container_of(__aq, struct ipu6_isys_video, aq) - -extern const struct ipu6_isys_pixelformat ipu6_isys_pfmts[]; -extern const struct ipu6_isys_pixelformat ipu6_isys_pfmts_packed[]; - -const struct ipu6_isys_pixelformat * -ipu6_isys_get_isys_format(u32 pixelformat, u32 code); -int ipu6_isys_video_prepare_stream(struct ipu6_isys_video *av, - struct media_entity *source_entity, - int nr_queues); -int ipu6_isys_video_set_streaming(struct ipu6_isys_video *av, int state, - struct ipu6_isys_buffer_list *bl); -int ipu6_isys_fw_open(struct ipu6_isys *isys); -void ipu6_isys_fw_close(struct ipu6_isys *isys); -int ipu6_isys_setup_video(struct ipu6_isys_video *av, - struct media_entity **source_entity, int *nr_queues); -int ipu6_isys_video_init(struct ipu6_isys_video *av); -void ipu6_isys_video_cleanup(struct ipu6_isys_video *av); -void ipu6_isys_put_stream(struct ipu6_isys_stream *stream); -struct ipu6_isys_stream * -ipu6_isys_query_stream_by_handle(struct ipu6_isys *isys, u8 stream_handle); -struct ipu6_isys_stream * -ipu6_isys_query_stream_by_source(struct ipu6_isys *isys, int source, u8 vc); - -void ipu6_isys_configure_stream_watermark(struct ipu6_isys_video *av, - bool state); -void ipu6_isys_update_stream_watermark(struct ipu6_isys_video *av, bool state); - -u32 ipu6_isys_get_format(struct ipu6_isys_video *av); -u32 ipu6_isys_get_data_size(struct ipu6_isys_video *av); -u32 ipu6_isys_get_bytes_per_line(struct ipu6_isys_video *av); -u32 ipu6_isys_get_frame_width(struct ipu6_isys_video *av); -u32 ipu6_isys_get_frame_height(struct ipu6_isys_video *av); - -#endif /* IPU6_ISYS_VIDEO_H */ diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c deleted file mode 100644 index 17bc8ca..0000000 --- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c +++ /dev/null @@ -1,1382 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2013--2024 Intel Corporation - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#include "ipu6-bus.h" -#include "ipu6-cpd.h" -#include "ipu6-dma.h" -#include "ipu6-isys.h" -#include "ipu6-isys-csi2.h" -#include "ipu6-mmu.h" -#include "ipu6-platform-buttress-regs.h" -#include "ipu6-platform-isys-csi2-reg.h" -#include "ipu6-platform-regs.h" - -#define IPU6_BUTTRESS_FABIC_CONTROL 0x68 -#define GDA_ENABLE_IWAKE_INDEX 2 -#define GDA_IWAKE_THRESHOLD_INDEX 1 -#define GDA_IRQ_CRITICAL_THRESHOLD_INDEX 0 -#define GDA_MEMOPEN_THRESHOLD_INDEX 3 -#define DEFAULT_DID_RATIO 90 -#define DEFAULT_IWAKE_THRESHOLD 0x42 -#define DEFAULT_MEM_OPEN_TIME 10 -#define ONE_THOUSAND_MICROSECOND 1000 -/* One page is 2KB, 8 x 16 x 16 = 2048B = 2KB */ -#define ISF_DMA_TOP_GDA_PROFERTY_PAGE_SIZE 0x800 - -/* LTR & DID value are 10 bit at most */ -#define LTR_DID_VAL_MAX 1023 -#define LTR_DEFAULT_VALUE 0x70503c19 -#define FILL_TIME_DEFAULT_VALUE 0xfff0783c -#define LTR_DID_PKGC_2R 20 -#define LTR_SCALE_DEFAULT 5 -#define LTR_SCALE_1024NS 2 -#define DID_SCALE_1US 2 -#define DID_SCALE_32US 3 -#define REG_PKGC_PMON_CFG 0xb00 - -#define VAL_PKGC_PMON_CFG_RESET 0x38 -#define VAL_PKGC_PMON_CFG_START 0x7 - -#define IS_PIXEL_BUFFER_PAGES 0x80 -/* - * when iwake mode is disabled, the critical threshold is statically set - * to 75% of the IS pixel buffer, criticalThreshold = (128 * 3) / 4 - */ -#define CRITICAL_THRESHOLD_IWAKE_DISABLE (IS_PIXEL_BUFFER_PAGES * 3 / 4) - -union fabric_ctrl { - struct { - u16 ltr_val : 10; - u16 ltr_scale : 3; - u16 reserved : 3; - u16 did_val : 10; - u16 did_scale : 3; - u16 reserved2 : 1; - u16 keep_power_in_D0 : 1; - u16 keep_power_override : 1; - } bits; - u32 value; -}; - -enum ltr_did_type { - LTR_IWAKE_ON, - LTR_IWAKE_OFF, - LTR_ISYS_ON, - LTR_ISYS_OFF, - LTR_ENHANNCE_IWAKE, - LTR_TYPE_MAX -}; - -#define ISYS_PM_QOS_VALUE 300 - -static int isys_isr_one(struct ipu6_bus_device *adev); - -static int -isys_complete_ext_device_registration(struct ipu6_isys *isys, - struct v4l2_subdev *sd, - struct ipu6_isys_csi2_config *csi2) -{ - struct device *dev = &isys->adev->auxdev.dev; - unsigned int i; - int ret; - - for (i = 0; i < sd->entity.num_pads; i++) { - if (sd->entity.pads[i].flags & MEDIA_PAD_FL_SOURCE) - break; - } - - if (i == sd->entity.num_pads) { - dev_warn(dev, "no src pad in external entity\n"); - ret = -ENOENT; - goto unregister_subdev; - } - - ret = media_create_pad_link(&sd->entity, i, - &isys->csi2[csi2->port].asd.sd.entity, - 0, MEDIA_LNK_FL_ENABLED | - MEDIA_LNK_FL_IMMUTABLE); - if (ret) { - dev_warn(dev, "can't create link\n"); - goto unregister_subdev; - } - - isys->csi2[csi2->port].nlanes = csi2->nlanes; - - return 0; - -unregister_subdev: - v4l2_device_unregister_subdev(sd); - - return ret; -} - -static void isys_stream_init(struct ipu6_isys *isys) -{ - u32 i; - - for (i = 0; i < IPU6_ISYS_MAX_STREAMS; i++) { - mutex_init(&isys->streams[i].mutex); - init_completion(&isys->streams[i].stream_open_completion); - init_completion(&isys->streams[i].stream_close_completion); - init_completion(&isys->streams[i].stream_start_completion); - init_completion(&isys->streams[i].stream_stop_completion); - INIT_LIST_HEAD(&isys->streams[i].queues); - isys->streams[i].isys = isys; - isys->streams[i].stream_handle = i; - isys->streams[i].vc = INVALID_VC_ID; - } -} - -static void isys_csi2_unregister_subdevices(struct ipu6_isys *isys) -{ - const struct ipu6_isys_internal_csi2_pdata *csi2 = - &isys->pdata->ipdata->csi2; - unsigned int i; - - for (i = 0; i < csi2->nports; i++) - ipu6_isys_csi2_cleanup(&isys->csi2[i]); -} - -static int isys_csi2_register_subdevices(struct ipu6_isys *isys) -{ - const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = - &isys->pdata->ipdata->csi2; - unsigned int i; - int ret; - - for (i = 0; i < csi2_pdata->nports; i++) { - ret = ipu6_isys_csi2_init(&isys->csi2[i], isys, - isys->pdata->base + - CSI_REG_PORT_BASE(i), i); - if (ret) - goto fail; - - isys->isr_csi2_bits |= IPU6_ISYS_UNISPART_IRQ_CSI2(i); - } - - return 0; - -fail: - while (i--) - ipu6_isys_csi2_cleanup(&isys->csi2[i]); - - return ret; -} - -static int isys_csi2_create_media_links(struct ipu6_isys *isys) -{ - const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = - &isys->pdata->ipdata->csi2; - struct device *dev = &isys->adev->auxdev.dev; - unsigned int i, j; - int ret; - - for (i = 0; i < csi2_pdata->nports; i++) { - struct media_entity *sd = &isys->csi2[i].asd.sd.entity; - - for (j = 0; j < NR_OF_CSI2_SRC_PADS; j++) { - struct ipu6_isys_video *av = &isys->csi2[i].av[j]; - - ret = media_create_pad_link(sd, CSI2_PAD_SRC + j, - &av->vdev.entity, 0, 0); - if (ret) { - dev_err(dev, "CSI2 can't create link\n"); - return ret; - } - - av->csi2 = &isys->csi2[i]; - } - } - - return 0; -} - -static void isys_unregister_video_devices(struct ipu6_isys *isys) -{ - const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = - &isys->pdata->ipdata->csi2; - unsigned int i, j; - - for (i = 0; i < csi2_pdata->nports; i++) - for (j = 0; j < NR_OF_CSI2_SRC_PADS; j++) - ipu6_isys_video_cleanup(&isys->csi2[i].av[j]); -} - -static int isys_register_video_devices(struct ipu6_isys *isys) -{ - const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = - &isys->pdata->ipdata->csi2; - unsigned int i, j; - int ret; - - for (i = 0; i < csi2_pdata->nports; i++) { - for (j = 0; j < NR_OF_CSI2_SRC_PADS; j++) { - struct ipu6_isys_video *av = &isys->csi2[i].av[j]; - - snprintf(av->vdev.name, sizeof(av->vdev.name), - IPU6_ISYS_ENTITY_PREFIX " ISYS Capture %u", - i * NR_OF_CSI2_SRC_PADS + j); - av->isys = isys; - av->aq.vbq.buf_struct_size = - sizeof(struct ipu6_isys_video_buffer); - - ret = ipu6_isys_video_init(av); - if (ret) - goto fail; - } - } - - return 0; - -fail: - while (i--) { - while (j--) - ipu6_isys_video_cleanup(&isys->csi2[i].av[j]); - j = NR_OF_CSI2_SRC_PADS; - } - - return ret; -} - -void isys_setup_hw(struct ipu6_isys *isys) -{ - void __iomem *base = isys->pdata->base; - const u8 *thd = isys->pdata->ipdata->hw_variant.cdc_fifo_threshold; - u32 irqs = 0; - unsigned int i, nports; - - nports = isys->pdata->ipdata->csi2.nports; - - /* Enable irqs for all MIPI ports */ - for (i = 0; i < nports; i++) - irqs |= IPU6_ISYS_UNISPART_IRQ_CSI2(i); - - writel(irqs, base + isys->pdata->ipdata->csi2.ctrl0_irq_edge); - writel(irqs, base + isys->pdata->ipdata->csi2.ctrl0_irq_lnp); - writel(irqs, base + isys->pdata->ipdata->csi2.ctrl0_irq_mask); - writel(irqs, base + isys->pdata->ipdata->csi2.ctrl0_irq_enable); - writel(GENMASK(19, 0), - base + isys->pdata->ipdata->csi2.ctrl0_irq_clear); - - irqs = ISYS_UNISPART_IRQS; - writel(irqs, base + IPU6_REG_ISYS_UNISPART_IRQ_EDGE); - writel(irqs, base + IPU6_REG_ISYS_UNISPART_IRQ_LEVEL_NOT_PULSE); - writel(GENMASK(28, 0), base + IPU6_REG_ISYS_UNISPART_IRQ_CLEAR); - writel(irqs, base + IPU6_REG_ISYS_UNISPART_IRQ_MASK); - writel(irqs, base + IPU6_REG_ISYS_UNISPART_IRQ_ENABLE); - - writel(0, base + IPU6_REG_ISYS_UNISPART_SW_IRQ_REG); - writel(0, base + IPU6_REG_ISYS_UNISPART_SW_IRQ_MUX_REG); - - /* Write CDC FIFO threshold values for isys */ - for (i = 0; i < isys->pdata->ipdata->hw_variant.cdc_fifos; i++) - writel(thd[i], base + IPU6_REG_ISYS_CDC_THRESHOLD(i)); -} - -static void ipu6_isys_csi2_isr(struct ipu6_isys_csi2 *csi2) -{ - struct ipu6_isys_stream *stream; - unsigned int i; - u32 status; - int source; - - ipu6_isys_register_errors(csi2); - - status = readl(csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC + - CSI_PORT_REG_BASE_IRQ_STATUS_OFFSET); - - writel(status, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC + - CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET); - - source = csi2->asd.source; - for (i = 0; i < NR_OF_CSI2_VC; i++) { - if (status & IPU_CSI_RX_IRQ_FS_VC(i)) { - stream = ipu6_isys_query_stream_by_source(csi2->isys, - source, i); - if (stream) { - ipu6_isys_csi2_sof_event_by_stream(stream); - ipu6_isys_put_stream(stream); - } - } - - if (status & IPU_CSI_RX_IRQ_FE_VC(i)) { - stream = ipu6_isys_query_stream_by_source(csi2->isys, - source, i); - if (stream) { - ipu6_isys_csi2_eof_event_by_stream(stream); - ipu6_isys_put_stream(stream); - } - } - } -} - -irqreturn_t isys_isr(struct ipu6_bus_device *adev) -{ - struct ipu6_isys *isys = ipu6_bus_get_drvdata(adev); - void __iomem *base = isys->pdata->base; - u32 status_sw, status_csi; - u32 ctrl0_status, ctrl0_clear; - - spin_lock(&isys->power_lock); - if (!isys->power) { - spin_unlock(&isys->power_lock); - return IRQ_NONE; - } - - ctrl0_status = isys->pdata->ipdata->csi2.ctrl0_irq_status; - ctrl0_clear = isys->pdata->ipdata->csi2.ctrl0_irq_clear; - - status_csi = readl(isys->pdata->base + ctrl0_status); - status_sw = readl(isys->pdata->base + - IPU6_REG_ISYS_UNISPART_IRQ_STATUS); - - writel(ISYS_UNISPART_IRQS & ~IPU6_ISYS_UNISPART_IRQ_SW, - base + IPU6_REG_ISYS_UNISPART_IRQ_MASK); - - do { - writel(status_csi, isys->pdata->base + ctrl0_clear); - - writel(status_sw, isys->pdata->base + - IPU6_REG_ISYS_UNISPART_IRQ_CLEAR); - - if (isys->isr_csi2_bits & status_csi) { - unsigned int i; - - for (i = 0; i < isys->pdata->ipdata->csi2.nports; i++) { - /* irq from not enabled port */ - if (!isys->csi2[i].base) - continue; - if (status_csi & IPU6_ISYS_UNISPART_IRQ_CSI2(i)) - ipu6_isys_csi2_isr(&isys->csi2[i]); - } - } - - writel(0, base + IPU6_REG_ISYS_UNISPART_SW_IRQ_REG); - - if (!isys_isr_one(adev)) - status_sw = IPU6_ISYS_UNISPART_IRQ_SW; - else - status_sw = 0; - - status_csi = readl(isys->pdata->base + ctrl0_status); - status_sw |= readl(isys->pdata->base + - IPU6_REG_ISYS_UNISPART_IRQ_STATUS); - } while ((status_csi & isys->isr_csi2_bits) || - (status_sw & IPU6_ISYS_UNISPART_IRQ_SW)); - - writel(ISYS_UNISPART_IRQS, base + IPU6_REG_ISYS_UNISPART_IRQ_MASK); - - spin_unlock(&isys->power_lock); - - return IRQ_HANDLED; -} - -static void get_lut_ltrdid(struct ipu6_isys *isys, struct ltr_did *pltr_did) -{ - struct isys_iwake_watermark *iwake_watermark = &isys->iwake_watermark; - struct ltr_did ltrdid_default; - - ltrdid_default.lut_ltr.value = LTR_DEFAULT_VALUE; - ltrdid_default.lut_fill_time.value = FILL_TIME_DEFAULT_VALUE; - - if (iwake_watermark->ltrdid.lut_ltr.value) - *pltr_did = iwake_watermark->ltrdid; - else - *pltr_did = ltrdid_default; -} - -static int set_iwake_register(struct ipu6_isys *isys, u32 index, u32 value) -{ - struct device *dev = &isys->adev->auxdev.dev; - u32 req_id = index; - u32 offset = 0; - int ret; - - ret = ipu6_fw_isys_send_proxy_token(isys, req_id, index, offset, value); - if (ret) - dev_err(dev, "write %d failed %d", index, ret); - - return ret; -} - -/* - * When input system is powered up and before enabling any new sensor capture, - * or after disabling any sensor capture the following values need to be set: - * LTR_value = LTR(usec) from calculation; - * LTR_scale = 2; - * DID_value = DID(usec) from calculation; - * DID_scale = 2; - * - * When input system is powered down, the LTR and DID values - * must be returned to the default values: - * LTR_value = 1023; - * LTR_scale = 5; - * DID_value = 1023; - * DID_scale = 2; - */ -static void set_iwake_ltrdid(struct ipu6_isys *isys, u16 ltr, u16 did, - enum ltr_did_type use) -{ - struct device *dev = &isys->adev->auxdev.dev; - u16 ltr_val, ltr_scale = LTR_SCALE_1024NS; - u16 did_val, did_scale = DID_SCALE_1US; - struct ipu6_device *isp = isys->adev->isp; - union fabric_ctrl fc; - - switch (use) { - case LTR_IWAKE_ON: - ltr_val = min_t(u16, ltr, (u16)LTR_DID_VAL_MAX); - did_val = min_t(u16, did, (u16)LTR_DID_VAL_MAX); - ltr_scale = (ltr == LTR_DID_VAL_MAX && - did == LTR_DID_VAL_MAX) ? - LTR_SCALE_DEFAULT : LTR_SCALE_1024NS; - break; - case LTR_ISYS_ON: - case LTR_IWAKE_OFF: - ltr_val = LTR_DID_PKGC_2R; - did_val = LTR_DID_PKGC_2R; - break; - case LTR_ISYS_OFF: - ltr_val = LTR_DID_VAL_MAX; - did_val = LTR_DID_VAL_MAX; - ltr_scale = LTR_SCALE_DEFAULT; - break; - case LTR_ENHANNCE_IWAKE: - if (ltr == LTR_DID_VAL_MAX && did == LTR_DID_VAL_MAX) { - ltr_val = LTR_DID_VAL_MAX; - did_val = LTR_DID_VAL_MAX; - ltr_scale = LTR_SCALE_DEFAULT; - } else if (did < ONE_THOUSAND_MICROSECOND) { - ltr_val = ltr; - did_val = did; - } else { - ltr_val = ltr; - /* div 90% value by 32 to account for scale change */ - did_val = did / 32; - did_scale = DID_SCALE_32US; - } - break; - default: - ltr_val = LTR_DID_VAL_MAX; - did_val = LTR_DID_VAL_MAX; - ltr_scale = LTR_SCALE_DEFAULT; - break; - } - - fc.value = readl(isp->base + IPU6_BUTTRESS_FABIC_CONTROL); - fc.bits.ltr_val = ltr_val; - fc.bits.ltr_scale = ltr_scale; - fc.bits.did_val = did_val; - fc.bits.did_scale = did_scale; - - dev_dbg(dev, "ltr: value %u scale %u, did: value %u scale %u\n", - ltr_val, ltr_scale, did_val, did_scale); - writel(fc.value, isp->base + IPU6_BUTTRESS_FABIC_CONTROL); -} - -/* - * Driver may clear register GDA_ENABLE_IWAKE before FW configures the - * stream for debug purpose. Otherwise driver should not access this register. - */ -static void enable_iwake(struct ipu6_isys *isys, bool enable) -{ - struct isys_iwake_watermark *iwake_watermark = &isys->iwake_watermark; - int ret; - - mutex_lock(&iwake_watermark->mutex); - - if (iwake_watermark->iwake_enabled == enable) { - mutex_unlock(&iwake_watermark->mutex); - return; - } - - ret = set_iwake_register(isys, GDA_ENABLE_IWAKE_INDEX, enable); - if (!ret) - iwake_watermark->iwake_enabled = enable; - - mutex_unlock(&iwake_watermark->mutex); -} - -void update_watermark_setting(struct ipu6_isys *isys) -{ - struct isys_iwake_watermark *iwake_watermark = &isys->iwake_watermark; - u32 iwake_threshold, iwake_critical_threshold, page_num; - struct device *dev = &isys->adev->auxdev.dev; - u32 calc_fill_time_us = 0, ltr = 0, did = 0; - struct video_stream_watermark *p_watermark; - enum ltr_did_type ltr_did_type; - struct list_head *stream_node; - u64 isys_pb_datarate_mbs = 0; - u32 mem_open_threshold = 0; - struct ltr_did ltrdid; - u64 threshold_bytes; - u32 max_sram_size; - u32 shift; - - shift = isys->pdata->ipdata->sram_gran_shift; - max_sram_size = isys->pdata->ipdata->max_sram_size; - - mutex_lock(&iwake_watermark->mutex); - if (iwake_watermark->force_iwake_disable) { - set_iwake_ltrdid(isys, 0, 0, LTR_IWAKE_OFF); - set_iwake_register(isys, GDA_IRQ_CRITICAL_THRESHOLD_INDEX, - CRITICAL_THRESHOLD_IWAKE_DISABLE); - goto unlock_exit; - } - - if (list_empty(&iwake_watermark->video_list)) { - isys_pb_datarate_mbs = 0; - } else { - list_for_each(stream_node, &iwake_watermark->video_list) { - p_watermark = list_entry(stream_node, - struct video_stream_watermark, - stream_node); - isys_pb_datarate_mbs += p_watermark->stream_data_rate; - } - } - mutex_unlock(&iwake_watermark->mutex); - - if (!isys_pb_datarate_mbs) { - enable_iwake(isys, false); - set_iwake_ltrdid(isys, 0, 0, LTR_IWAKE_OFF); - mutex_lock(&iwake_watermark->mutex); - set_iwake_register(isys, GDA_IRQ_CRITICAL_THRESHOLD_INDEX, - CRITICAL_THRESHOLD_IWAKE_DISABLE); - goto unlock_exit; - } - - enable_iwake(isys, true); - calc_fill_time_us = max_sram_size / isys_pb_datarate_mbs; - - if (isys->pdata->ipdata->enhanced_iwake) { - ltr = isys->pdata->ipdata->ltr; - did = calc_fill_time_us * DEFAULT_DID_RATIO / 100; - ltr_did_type = LTR_ENHANNCE_IWAKE; - } else { - get_lut_ltrdid(isys, <rdid); - - if (calc_fill_time_us <= ltrdid.lut_fill_time.bits.th0) - ltr = 0; - else if (calc_fill_time_us <= ltrdid.lut_fill_time.bits.th1) - ltr = ltrdid.lut_ltr.bits.val0; - else if (calc_fill_time_us <= ltrdid.lut_fill_time.bits.th2) - ltr = ltrdid.lut_ltr.bits.val1; - else if (calc_fill_time_us <= ltrdid.lut_fill_time.bits.th3) - ltr = ltrdid.lut_ltr.bits.val2; - else - ltr = ltrdid.lut_ltr.bits.val3; - - did = calc_fill_time_us - ltr; - ltr_did_type = LTR_IWAKE_ON; - } - - set_iwake_ltrdid(isys, ltr, did, ltr_did_type); - - /* calculate iwake threshold with 2KB granularity pages */ - threshold_bytes = did * isys_pb_datarate_mbs; - iwake_threshold = max_t(u32, 1, threshold_bytes >> shift); - iwake_threshold = min_t(u32, iwake_threshold, max_sram_size); - - mutex_lock(&iwake_watermark->mutex); - if (isys->pdata->ipdata->enhanced_iwake) { - set_iwake_register(isys, GDA_IWAKE_THRESHOLD_INDEX, - DEFAULT_IWAKE_THRESHOLD); - /* calculate number of pages that will be filled in 10 usec */ - page_num = (DEFAULT_MEM_OPEN_TIME * isys_pb_datarate_mbs) / - ISF_DMA_TOP_GDA_PROFERTY_PAGE_SIZE; - page_num += ((DEFAULT_MEM_OPEN_TIME * isys_pb_datarate_mbs) % - ISF_DMA_TOP_GDA_PROFERTY_PAGE_SIZE) ? 1 : 0; - mem_open_threshold = isys->pdata->ipdata->memopen_threshold; - mem_open_threshold = max_t(u32, mem_open_threshold, page_num); - dev_dbg(dev, "mem_open_threshold: %u\n", mem_open_threshold); - set_iwake_register(isys, GDA_MEMOPEN_THRESHOLD_INDEX, - mem_open_threshold); - } else { - set_iwake_register(isys, GDA_IWAKE_THRESHOLD_INDEX, - iwake_threshold); - } - - iwake_critical_threshold = iwake_threshold + - (IS_PIXEL_BUFFER_PAGES - iwake_threshold) / 2; - - dev_dbg(dev, "threshold: %u critical: %u\n", iwake_threshold, - iwake_critical_threshold); - - set_iwake_register(isys, GDA_IRQ_CRITICAL_THRESHOLD_INDEX, - iwake_critical_threshold); - - writel(VAL_PKGC_PMON_CFG_RESET, - isys->adev->isp->base + REG_PKGC_PMON_CFG); - writel(VAL_PKGC_PMON_CFG_START, - isys->adev->isp->base + REG_PKGC_PMON_CFG); -unlock_exit: - mutex_unlock(&iwake_watermark->mutex); -} - -static void isys_iwake_watermark_init(struct ipu6_isys *isys) -{ - struct isys_iwake_watermark *iwake_watermark = &isys->iwake_watermark; - - INIT_LIST_HEAD(&iwake_watermark->video_list); - mutex_init(&iwake_watermark->mutex); - - iwake_watermark->ltrdid.lut_ltr.value = 0; - iwake_watermark->isys = isys; - iwake_watermark->iwake_enabled = false; - iwake_watermark->force_iwake_disable = false; -} - -static void isys_iwake_watermark_cleanup(struct ipu6_isys *isys) -{ - struct isys_iwake_watermark *iwake_watermark = &isys->iwake_watermark; - - mutex_lock(&iwake_watermark->mutex); - list_del(&iwake_watermark->video_list); - mutex_unlock(&iwake_watermark->mutex); - - mutex_destroy(&iwake_watermark->mutex); -} - -/* The .bound() notifier callback when a match is found */ -static int isys_notifier_bound(struct v4l2_async_notifier *notifier, - struct v4l2_subdev *sd, - struct v4l2_async_connection *asc) -{ - struct ipu6_isys *isys = - container_of(notifier, struct ipu6_isys, notifier); - struct sensor_async_sd *s_asd = - container_of(asc, struct sensor_async_sd, asc); - int ret; - - if (s_asd->csi2.port >= isys->pdata->ipdata->csi2.nports) { - dev_err(&isys->adev->auxdev.dev, "invalid csi2 port %u\n", - s_asd->csi2.port); - return -EINVAL; - } - - ret = ipu_bridge_instantiate_vcm(sd->dev); - if (ret) { - dev_err(&isys->adev->auxdev.dev, "instantiate vcm failed\n"); - return ret; - } - - dev_dbg(&isys->adev->auxdev.dev, "bind %s nlanes is %d port is %d\n", - sd->name, s_asd->csi2.nlanes, s_asd->csi2.port); - ret = isys_complete_ext_device_registration(isys, sd, &s_asd->csi2); - if (ret) - return ret; - - return v4l2_device_register_subdev_nodes(&isys->v4l2_dev); -} - -static int isys_notifier_complete(struct v4l2_async_notifier *notifier) -{ - struct ipu6_isys *isys = - container_of(notifier, struct ipu6_isys, notifier); - - return v4l2_device_register_subdev_nodes(&isys->v4l2_dev); -} - -static const struct v4l2_async_notifier_operations isys_async_ops = { - .bound = isys_notifier_bound, - .complete = isys_notifier_complete, -}; - -#define ISYS_MAX_PORTS 8 -static int isys_notifier_init(struct ipu6_isys *isys) -{ - struct ipu6_device *isp = isys->adev->isp; - struct device *dev = &isp->pdev->dev; - unsigned int i; - int ret; - - v4l2_async_nf_init(&isys->notifier, &isys->v4l2_dev); - - for (i = 0; i < ISYS_MAX_PORTS; i++) { - struct v4l2_fwnode_endpoint vep = { - .bus_type = V4L2_MBUS_CSI2_DPHY - }; - struct sensor_async_sd *s_asd; - struct fwnode_handle *ep; - - ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), i, 0, - FWNODE_GRAPH_ENDPOINT_NEXT); - if (!ep) - continue; - - ret = v4l2_fwnode_endpoint_parse(ep, &vep); - if (ret) { - dev_err(dev, "fwnode endpoint parse failed: %d\n", ret); - goto err_parse; - } - - s_asd = v4l2_async_nf_add_fwnode_remote(&isys->notifier, ep, - struct sensor_async_sd); - if (IS_ERR(s_asd)) { - ret = PTR_ERR(s_asd); - dev_err(dev, "add remove fwnode failed: %d\n", ret); - goto err_parse; - } - - s_asd->csi2.port = vep.base.port; - s_asd->csi2.nlanes = vep.bus.mipi_csi2.num_data_lanes; - - dev_dbg(dev, "remote endpoint port %d with %d lanes added\n", - s_asd->csi2.port, s_asd->csi2.nlanes); - - fwnode_handle_put(ep); - - continue; - -err_parse: - fwnode_handle_put(ep); - return ret; - } - - isys->notifier.ops = &isys_async_ops; - ret = v4l2_async_nf_register(&isys->notifier); - if (ret) { - dev_err(dev, "failed to register async notifier : %d\n", ret); - v4l2_async_nf_cleanup(&isys->notifier); - } - - return ret; -} - -static void isys_notifier_cleanup(struct ipu6_isys *isys) -{ - v4l2_async_nf_unregister(&isys->notifier); - v4l2_async_nf_cleanup(&isys->notifier); -} - -static int isys_register_devices(struct ipu6_isys *isys) -{ - struct device *dev = &isys->adev->auxdev.dev; - struct pci_dev *pdev = isys->adev->isp->pdev; - int ret; - - isys->media_dev.dev = dev; - media_device_pci_init(&isys->media_dev, - pdev, IPU6_MEDIA_DEV_MODEL_NAME); - - strscpy(isys->v4l2_dev.name, isys->media_dev.model, - sizeof(isys->v4l2_dev.name)); - - ret = media_device_register(&isys->media_dev); - if (ret < 0) - goto out_media_device_unregister; - - isys->v4l2_dev.mdev = &isys->media_dev; - isys->v4l2_dev.ctrl_handler = NULL; - - ret = v4l2_device_register(dev, &isys->v4l2_dev); - if (ret < 0) - goto out_media_device_unregister; - - ret = isys_register_video_devices(isys); - if (ret) - goto out_v4l2_device_unregister; - - ret = isys_csi2_register_subdevices(isys); - if (ret) - goto out_isys_unregister_video_device; - - ret = isys_csi2_create_media_links(isys); - if (ret) - goto out_isys_unregister_subdevices; - - ret = isys_notifier_init(isys); - if (ret) - goto out_isys_unregister_subdevices; - - return 0; - -out_isys_unregister_subdevices: - isys_csi2_unregister_subdevices(isys); - -out_isys_unregister_video_device: - isys_unregister_video_devices(isys); - -out_v4l2_device_unregister: - v4l2_device_unregister(&isys->v4l2_dev); - -out_media_device_unregister: - media_device_unregister(&isys->media_dev); - media_device_cleanup(&isys->media_dev); - - dev_err(dev, "failed to register isys devices\n"); - - return ret; -} - -static void isys_unregister_devices(struct ipu6_isys *isys) -{ - isys_unregister_video_devices(isys); - isys_csi2_unregister_subdevices(isys); - v4l2_device_unregister(&isys->v4l2_dev); - media_device_unregister(&isys->media_dev); - media_device_cleanup(&isys->media_dev); -} - -static int isys_runtime_pm_resume(struct device *dev) -{ - struct ipu6_bus_device *adev = to_ipu6_bus_device(dev); - struct ipu6_isys *isys = ipu6_bus_get_drvdata(adev); - struct ipu6_device *isp = adev->isp; - unsigned long flags; - int ret; - - if (!isys) - return 0; - - ret = ipu6_mmu_hw_init(adev->mmu); - if (ret) - return ret; - - cpu_latency_qos_update_request(&isys->pm_qos, ISYS_PM_QOS_VALUE); - - ret = ipu6_buttress_start_tsc_sync(isp); - if (ret) - return ret; - - spin_lock_irqsave(&isys->power_lock, flags); - isys->power = 1; - spin_unlock_irqrestore(&isys->power_lock, flags); - - isys_setup_hw(isys); - - set_iwake_ltrdid(isys, 0, 0, LTR_ISYS_ON); - - return 0; -} - -static int isys_runtime_pm_suspend(struct device *dev) -{ - struct ipu6_bus_device *adev = to_ipu6_bus_device(dev); - struct ipu6_isys *isys; - unsigned long flags; - - isys = dev_get_drvdata(dev); - if (!isys) - return 0; - - spin_lock_irqsave(&isys->power_lock, flags); - isys->power = 0; - spin_unlock_irqrestore(&isys->power_lock, flags); - - mutex_lock(&isys->mutex); - isys->need_reset = false; - mutex_unlock(&isys->mutex); - - isys->phy_termcal_val = 0; - cpu_latency_qos_update_request(&isys->pm_qos, PM_QOS_DEFAULT_VALUE); - - set_iwake_ltrdid(isys, 0, 0, LTR_ISYS_OFF); - - ipu6_mmu_hw_cleanup(adev->mmu); - - return 0; -} - -static int isys_suspend(struct device *dev) -{ - struct ipu6_isys *isys = dev_get_drvdata(dev); - - /* If stream is open, refuse to suspend */ - if (isys->stream_opened) - return -EBUSY; - - return 0; -} - -static int isys_resume(struct device *dev) -{ - return 0; -} - -static const struct dev_pm_ops isys_pm_ops = { - .runtime_suspend = isys_runtime_pm_suspend, - .runtime_resume = isys_runtime_pm_resume, - .suspend = isys_suspend, - .resume = isys_resume, -}; - -static void free_fw_msg_bufs(struct ipu6_isys *isys) -{ - struct isys_fw_msgs *fwmsg, *safe; - - list_for_each_entry_safe(fwmsg, safe, &isys->framebuflist, head) - ipu6_dma_free(isys->adev, sizeof(struct isys_fw_msgs), fwmsg, - fwmsg->dma_addr, 0); - - list_for_each_entry_safe(fwmsg, safe, &isys->framebuflist_fw, head) - ipu6_dma_free(isys->adev, sizeof(struct isys_fw_msgs), fwmsg, - fwmsg->dma_addr, 0); -} - -static int alloc_fw_msg_bufs(struct ipu6_isys *isys, int amount) -{ - struct isys_fw_msgs *addr; - dma_addr_t dma_addr; - unsigned long flags; - unsigned int i; - - for (i = 0; i < amount; i++) { - addr = ipu6_dma_alloc(isys->adev, sizeof(*addr), - &dma_addr, GFP_KERNEL, 0); - if (!addr) - break; - addr->dma_addr = dma_addr; - - spin_lock_irqsave(&isys->listlock, flags); - list_add(&addr->head, &isys->framebuflist); - spin_unlock_irqrestore(&isys->listlock, flags); - } - - if (i == amount) - return 0; - - spin_lock_irqsave(&isys->listlock, flags); - while (!list_empty(&isys->framebuflist)) { - addr = list_first_entry(&isys->framebuflist, - struct isys_fw_msgs, head); - list_del(&addr->head); - spin_unlock_irqrestore(&isys->listlock, flags); - ipu6_dma_free(isys->adev, sizeof(struct isys_fw_msgs), addr, - addr->dma_addr, 0); - spin_lock_irqsave(&isys->listlock, flags); - } - spin_unlock_irqrestore(&isys->listlock, flags); - - return -ENOMEM; -} - -struct isys_fw_msgs *ipu6_get_fw_msg_buf(struct ipu6_isys_stream *stream) -{ - struct ipu6_isys *isys = stream->isys; - struct device *dev = &isys->adev->auxdev.dev; - struct isys_fw_msgs *msg; - unsigned long flags; - int ret; - - spin_lock_irqsave(&isys->listlock, flags); - if (list_empty(&isys->framebuflist)) { - spin_unlock_irqrestore(&isys->listlock, flags); - dev_dbg(dev, "Frame list empty\n"); - - ret = alloc_fw_msg_bufs(isys, 5); - if (ret < 0) - return NULL; - - spin_lock_irqsave(&isys->listlock, flags); - if (list_empty(&isys->framebuflist)) { - spin_unlock_irqrestore(&isys->listlock, flags); - dev_err(dev, "Frame list empty\n"); - return NULL; - } - } - msg = list_last_entry(&isys->framebuflist, struct isys_fw_msgs, head); - list_move(&msg->head, &isys->framebuflist_fw); - spin_unlock_irqrestore(&isys->listlock, flags); - memset(&msg->fw_msg, 0, sizeof(msg->fw_msg)); - - return msg; -} - -void ipu6_cleanup_fw_msg_bufs(struct ipu6_isys *isys) -{ - struct isys_fw_msgs *fwmsg, *fwmsg0; - unsigned long flags; - - spin_lock_irqsave(&isys->listlock, flags); - list_for_each_entry_safe(fwmsg, fwmsg0, &isys->framebuflist_fw, head) - list_move(&fwmsg->head, &isys->framebuflist); - spin_unlock_irqrestore(&isys->listlock, flags); -} - -void ipu6_put_fw_msg_buf(struct ipu6_isys *isys, u64 data) -{ - struct isys_fw_msgs *msg; - unsigned long flags; - u64 *ptr = (u64 *)data; - - if (!ptr) - return; - - spin_lock_irqsave(&isys->listlock, flags); - msg = container_of(ptr, struct isys_fw_msgs, fw_msg.dummy); - list_move(&msg->head, &isys->framebuflist); - spin_unlock_irqrestore(&isys->listlock, flags); -} - -static int isys_probe(struct auxiliary_device *auxdev, - const struct auxiliary_device_id *auxdev_id) -{ - const struct ipu6_isys_internal_csi2_pdata *csi2_pdata; - struct ipu6_bus_device *adev = auxdev_to_adev(auxdev); - struct ipu6_device *isp = adev->isp; - const struct firmware *fw; - struct ipu6_isys *isys; - unsigned int i; - int ret; - - if (!isp->bus_ready_to_probe) - return -EPROBE_DEFER; - - isys = devm_kzalloc(&auxdev->dev, sizeof(*isys), GFP_KERNEL); - if (!isys) - return -ENOMEM; - - adev->auxdrv_data = - (const struct ipu6_auxdrv_data *)auxdev_id->driver_data; - adev->auxdrv = to_auxiliary_drv(auxdev->dev.driver); - isys->adev = adev; - isys->pdata = adev->pdata; - csi2_pdata = &isys->pdata->ipdata->csi2; - - isys->csi2 = devm_kcalloc(&auxdev->dev, csi2_pdata->nports, - sizeof(*isys->csi2), GFP_KERNEL); - if (!isys->csi2) - return -ENOMEM; - - ret = ipu6_mmu_hw_init(adev->mmu); - if (ret) - return ret; - - /* initial sensor type */ - isys->sensor_type = isys->pdata->ipdata->sensor_type_start; - - spin_lock_init(&isys->streams_lock); - spin_lock_init(&isys->power_lock); - isys->power = 0; - isys->phy_termcal_val = 0; - - mutex_init(&isys->mutex); - mutex_init(&isys->stream_mutex); - - spin_lock_init(&isys->listlock); - INIT_LIST_HEAD(&isys->framebuflist); - INIT_LIST_HEAD(&isys->framebuflist_fw); - - isys->line_align = IPU6_ISYS_2600_MEM_LINE_ALIGN; - isys->icache_prefetch = 0; - - dev_set_drvdata(&auxdev->dev, isys); - - isys_stream_init(isys); - - if (!isp->secure_mode) { - fw = isp->cpd_fw; - ret = ipu6_buttress_map_fw_image(adev, fw, &adev->fw_sgt); - if (ret) - goto release_firmware; - - ret = ipu6_cpd_create_pkg_dir(adev, isp->cpd_fw->data); - if (ret) - goto remove_shared_buffer; - } - - cpu_latency_qos_add_request(&isys->pm_qos, PM_QOS_DEFAULT_VALUE); - - ret = alloc_fw_msg_bufs(isys, 20); - if (ret < 0) - goto out_remove_pkg_dir_shared_buffer; - - isys_iwake_watermark_init(isys); - - if (is_ipu6se(adev->isp->hw_ver)) - isys->phy_set_power = ipu6_isys_jsl_phy_set_power; - else if (is_ipu6ep_mtl(adev->isp->hw_ver)) - isys->phy_set_power = ipu6_isys_dwc_phy_set_power; - else - isys->phy_set_power = ipu6_isys_mcd_phy_set_power; - - ret = isys_register_devices(isys); - if (ret) - goto free_fw_msg_bufs; - - ipu6_mmu_hw_cleanup(adev->mmu); - - return 0; - -free_fw_msg_bufs: - free_fw_msg_bufs(isys); -out_remove_pkg_dir_shared_buffer: - cpu_latency_qos_remove_request(&isys->pm_qos); - if (!isp->secure_mode) - ipu6_cpd_free_pkg_dir(adev); -remove_shared_buffer: - if (!isp->secure_mode) - ipu6_buttress_unmap_fw_image(adev, &adev->fw_sgt); -release_firmware: - if (!isp->secure_mode) - release_firmware(adev->fw); - - for (i = 0; i < IPU6_ISYS_MAX_STREAMS; i++) - mutex_destroy(&isys->streams[i].mutex); - - mutex_destroy(&isys->mutex); - mutex_destroy(&isys->stream_mutex); - - ipu6_mmu_hw_cleanup(adev->mmu); - - return ret; -} - -static void isys_remove(struct auxiliary_device *auxdev) -{ - struct ipu6_bus_device *adev = auxdev_to_adev(auxdev); - struct ipu6_isys *isys = dev_get_drvdata(&auxdev->dev); - struct ipu6_device *isp = adev->isp; - unsigned int i; - - free_fw_msg_bufs(isys); - - isys_unregister_devices(isys); - isys_notifier_cleanup(isys); - - cpu_latency_qos_remove_request(&isys->pm_qos); - - if (!isp->secure_mode) { - ipu6_cpd_free_pkg_dir(adev); - ipu6_buttress_unmap_fw_image(adev, &adev->fw_sgt); - release_firmware(adev->fw); - } - - for (i = 0; i < IPU6_ISYS_MAX_STREAMS; i++) - mutex_destroy(&isys->streams[i].mutex); - - isys_iwake_watermark_cleanup(isys); - mutex_destroy(&isys->stream_mutex); - mutex_destroy(&isys->mutex); -} - -struct fwmsg { - int type; - char *msg; - bool valid_ts; -}; - -static const struct fwmsg fw_msg[] = { - {IPU6_FW_ISYS_RESP_TYPE_STREAM_OPEN_DONE, "STREAM_OPEN_DONE", 0}, - {IPU6_FW_ISYS_RESP_TYPE_STREAM_CLOSE_ACK, "STREAM_CLOSE_ACK", 0}, - {IPU6_FW_ISYS_RESP_TYPE_STREAM_START_ACK, "STREAM_START_ACK", 0}, - {IPU6_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK, - "STREAM_START_AND_CAPTURE_ACK", 0}, - {IPU6_FW_ISYS_RESP_TYPE_STREAM_STOP_ACK, "STREAM_STOP_ACK", 0}, - {IPU6_FW_ISYS_RESP_TYPE_STREAM_FLUSH_ACK, "STREAM_FLUSH_ACK", 0}, - {IPU6_FW_ISYS_RESP_TYPE_PIN_DATA_READY, "PIN_DATA_READY", 1}, - {IPU6_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK, "STREAM_CAPTURE_ACK", 0}, - {IPU6_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE, - "STREAM_START_AND_CAPTURE_DONE", 1}, - {IPU6_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_DONE, "STREAM_CAPTURE_DONE", 1}, - {IPU6_FW_ISYS_RESP_TYPE_FRAME_SOF, "FRAME_SOF", 1}, - {IPU6_FW_ISYS_RESP_TYPE_FRAME_EOF, "FRAME_EOF", 1}, - {IPU6_FW_ISYS_RESP_TYPE_STATS_DATA_READY, "STATS_READY", 1}, - {-1, "UNKNOWN MESSAGE", 0} -}; - -static u32 resp_type_to_index(int type) -{ - unsigned int i; - - for (i = 0; i < ARRAY_SIZE(fw_msg); i++) - if (fw_msg[i].type == type) - return i; - - return ARRAY_SIZE(fw_msg) - 1; -} - -static int isys_isr_one(struct ipu6_bus_device *adev) -{ - struct ipu6_isys *isys = ipu6_bus_get_drvdata(adev); - struct ipu6_fw_isys_resp_info_abi *resp; - struct ipu6_isys_stream *stream; - struct ipu6_isys_csi2 *csi2 = NULL; - u32 index; - u64 ts; - - if (!isys->fwcom) - return 1; - - resp = ipu6_fw_isys_get_resp(isys->fwcom, IPU6_BASE_MSG_RECV_QUEUES); - if (!resp) - return 1; - - ts = (u64)resp->timestamp[1] << 32 | resp->timestamp[0]; - - index = resp_type_to_index(resp->type); - dev_dbg(&adev->auxdev.dev, - "FW resp %02d %s, stream %u, ts 0x%16.16llx, pin %d\n", - resp->type, fw_msg[index].msg, resp->stream_handle, - fw_msg[index].valid_ts ? ts : 0, resp->pin_id); - - if (resp->error_info.error == IPU6_FW_ISYS_ERROR_STREAM_IN_SUSPENSION) - /* Suspension is kind of special case: not enough buffers */ - dev_dbg(&adev->auxdev.dev, - "FW error resp SUSPENSION, details %d\n", - resp->error_info.error_details); - else if (resp->error_info.error) - dev_dbg(&adev->auxdev.dev, - "FW error resp error %d, details %d\n", - resp->error_info.error, resp->error_info.error_details); - - if (resp->stream_handle >= IPU6_ISYS_MAX_STREAMS) { - dev_err(&adev->auxdev.dev, "bad stream handle %u\n", - resp->stream_handle); - goto leave; - } - - stream = ipu6_isys_query_stream_by_handle(isys, resp->stream_handle); - if (!stream) { - dev_err(&adev->auxdev.dev, "stream of stream_handle %u is unused\n", - resp->stream_handle); - goto leave; - } - stream->error = resp->error_info.error; - - csi2 = ipu6_isys_subdev_to_csi2(stream->asd); - - switch (resp->type) { - case IPU6_FW_ISYS_RESP_TYPE_STREAM_OPEN_DONE: - complete(&stream->stream_open_completion); - break; - case IPU6_FW_ISYS_RESP_TYPE_STREAM_CLOSE_ACK: - complete(&stream->stream_close_completion); - break; - case IPU6_FW_ISYS_RESP_TYPE_STREAM_START_ACK: - complete(&stream->stream_start_completion); - break; - case IPU6_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK: - complete(&stream->stream_start_completion); - break; - case IPU6_FW_ISYS_RESP_TYPE_STREAM_STOP_ACK: - complete(&stream->stream_stop_completion); - break; - case IPU6_FW_ISYS_RESP_TYPE_STREAM_FLUSH_ACK: - complete(&stream->stream_stop_completion); - break; - case IPU6_FW_ISYS_RESP_TYPE_PIN_DATA_READY: - /* - * firmware only release the capture msg until software - * get pin_data_ready event - */ - ipu6_put_fw_msg_buf(ipu6_bus_get_drvdata(adev), resp->buf_id); - if (resp->pin_id < IPU6_ISYS_OUTPUT_PINS && - stream->output_pins[resp->pin_id].pin_ready) - stream->output_pins[resp->pin_id].pin_ready(stream, - resp); - else - dev_warn(&adev->auxdev.dev, - "%d:No data pin ready handler for pin id %d\n", - resp->stream_handle, resp->pin_id); - if (csi2) - ipu6_isys_csi2_error(csi2); - - break; - case IPU6_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK: - break; - case IPU6_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE: - case IPU6_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_DONE: - break; - case IPU6_FW_ISYS_RESP_TYPE_FRAME_SOF: - - ipu6_isys_csi2_sof_event_by_stream(stream); - stream->seq[stream->seq_index].sequence = - atomic_read(&stream->sequence) - 1; - stream->seq[stream->seq_index].timestamp = ts; - dev_dbg(&adev->auxdev.dev, - "sof: handle %d: (index %u), timestamp 0x%16.16llx\n", - resp->stream_handle, - stream->seq[stream->seq_index].sequence, ts); - stream->seq_index = (stream->seq_index + 1) - % IPU6_ISYS_MAX_PARALLEL_SOF; - break; - case IPU6_FW_ISYS_RESP_TYPE_FRAME_EOF: - ipu6_isys_csi2_eof_event_by_stream(stream); - dev_dbg(&adev->auxdev.dev, - "eof: handle %d: (index %u), timestamp 0x%16.16llx\n", - resp->stream_handle, - stream->seq[stream->seq_index].sequence, ts); - break; - case IPU6_FW_ISYS_RESP_TYPE_STATS_DATA_READY: - break; - default: - dev_err(&adev->auxdev.dev, "%d:unknown response type %u\n", - resp->stream_handle, resp->type); - break; - } - - ipu6_isys_put_stream(stream); -leave: - ipu6_fw_isys_put_resp(isys->fwcom, IPU6_BASE_MSG_RECV_QUEUES); - return 0; -} - -static const struct ipu6_auxdrv_data ipu6_isys_auxdrv_data = { - .isr = isys_isr, - .isr_threaded = NULL, - .wake_isr_thread = false, -}; - -static const struct auxiliary_device_id ipu6_isys_id_table[] = { - { - .name = "intel_ipu6.isys", - .driver_data = (kernel_ulong_t)&ipu6_isys_auxdrv_data, - }, - { } -}; -MODULE_DEVICE_TABLE(auxiliary, ipu6_isys_id_table); - -static struct auxiliary_driver isys_driver = { - .name = IPU6_ISYS_NAME, - .probe = isys_probe, - .remove = isys_remove, - .id_table = ipu6_isys_id_table, - .driver = { - .pm = &isys_pm_ops, - }, -}; - -module_auxiliary_driver(isys_driver); - -MODULE_AUTHOR("Sakari Ailus "); -MODULE_AUTHOR("Tianshu Qiu "); -MODULE_AUTHOR("Bingbu Cao "); -MODULE_AUTHOR("Yunliang Ding "); -MODULE_AUTHOR("Hongju Wang "); -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Intel IPU6 input system driver"); -MODULE_IMPORT_NS(INTEL_IPU6); -MODULE_IMPORT_NS(INTEL_IPU_BRIDGE); diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h deleted file mode 100644 index 86dfc2e..0000000 --- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h +++ /dev/null @@ -1,206 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (C) 2013--2024 Intel Corporation */ - -#ifndef IPU6_ISYS_H -#define IPU6_ISYS_H - -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include "ipu6.h" -#include "ipu6-fw-isys.h" -#include "ipu6-isys-csi2.h" -#include "ipu6-isys-video.h" - -struct ipu6_bus_device; - -#define IPU6_ISYS_ENTITY_PREFIX "Intel IPU6" -/* FW support max 16 streams */ -#define IPU6_ISYS_MAX_STREAMS 16 -#define ISYS_UNISPART_IRQS (IPU6_ISYS_UNISPART_IRQ_SW | \ - IPU6_ISYS_UNISPART_IRQ_CSI0 | \ - IPU6_ISYS_UNISPART_IRQ_CSI1) - -#define IPU6_ISYS_2600_MEM_LINE_ALIGN 64 - -/* - * Current message queue configuration. These must be big enough - * so that they never gets full. Queues are located in system memory - */ -#define IPU6_ISYS_SIZE_RECV_QUEUE 40 -#define IPU6_ISYS_SIZE_SEND_QUEUE 40 -#define IPU6_ISYS_SIZE_PROXY_RECV_QUEUE 5 -#define IPU6_ISYS_SIZE_PROXY_SEND_QUEUE 5 -#define IPU6_ISYS_NUM_RECV_QUEUE 1 - -#define IPU6_ISYS_MIN_WIDTH 2U -#define IPU6_ISYS_MIN_HEIGHT 2U -#define IPU6_ISYS_MAX_WIDTH 4672U -#define IPU6_ISYS_MAX_HEIGHT 3416U - -/* the threshold granularity is 2KB on IPU6 */ -#define IPU6_SRAM_GRANULARITY_SHIFT 11 -#define IPU6_SRAM_GRANULARITY_SIZE 2048 -/* the threshold granularity is 1KB on IPU6SE */ -#define IPU6SE_SRAM_GRANULARITY_SHIFT 10 -#define IPU6SE_SRAM_GRANULARITY_SIZE 1024 -/* IS pixel buffer is 256KB, MaxSRAMSize is 200KB on IPU6 */ -#define IPU6_MAX_SRAM_SIZE (200 << 10) -/* IS pixel buffer is 128KB, MaxSRAMSize is 96KB on IPU6SE */ -#define IPU6SE_MAX_SRAM_SIZE (96 << 10) - -#define IPU6EP_LTR_VALUE 200 -#define IPU6EP_MIN_MEMOPEN_TH 0x4 -#define IPU6EP_MTL_LTR_VALUE 1023 -#define IPU6EP_MTL_MIN_MEMOPEN_TH 0xc - -struct ltr_did { - union { - u32 value; - struct { - u8 val0; - u8 val1; - u8 val2; - u8 val3; - } bits; - } lut_ltr; - union { - u32 value; - struct { - u8 th0; - u8 th1; - u8 th2; - u8 th3; - } bits; - } lut_fill_time; -}; - -struct isys_iwake_watermark { - bool iwake_enabled; - bool force_iwake_disable; - u32 iwake_threshold; - u64 isys_pixelbuffer_datarate; - struct ltr_did ltrdid; - struct mutex mutex; /* protect whole struct */ - struct ipu6_isys *isys; - struct list_head video_list; -}; - -struct ipu6_isys_csi2_config { - u32 nlanes; - u32 port; -}; - -struct sensor_async_sd { - struct v4l2_async_connection asc; - struct ipu6_isys_csi2_config csi2; -}; - -/* - * struct ipu6_isys - * - * @media_dev: Media device - * @v4l2_dev: V4L2 device - * @adev: ISYS bus device - * @power: Is ISYS powered on or not? - * @isr_bits: Which bits does the ISR handle? - * @power_lock: Serialise access to power (power state in general) - * @csi2_rx_ctrl_cached: cached shared value between all CSI2 receivers - * @streams_lock: serialise access to streams - * @streams: streams per firmware stream ID - * @fwcom: fw communication layer private pointer - * or optional external library private pointer - * @line_align: line alignment in memory - * @phy_termcal_val: the termination calibration value, only used for DWC PHY - * @need_reset: Isys requires d0i0->i3 transition - * @ref_count: total number of callers fw open - * @mutex: serialise access isys video open/release related operations - * @stream_mutex: serialise stream start and stop, queueing requests - * @pdata: platform data pointer - * @csi2: CSI-2 receivers - */ -struct ipu6_isys { - struct media_device media_dev; - struct v4l2_device v4l2_dev; - struct ipu6_bus_device *adev; - - int power; - spinlock_t power_lock; - u32 isr_csi2_bits; - u32 csi2_rx_ctrl_cached; - spinlock_t streams_lock; - struct ipu6_isys_stream streams[IPU6_ISYS_MAX_STREAMS]; - int streams_ref_count[IPU6_ISYS_MAX_STREAMS]; - void *fwcom; - unsigned int line_align; - u32 phy_termcal_val; - bool need_reset; - bool icache_prefetch; - bool csi2_cse_ipc_not_supported; - unsigned int ref_count; - unsigned int stream_opened; - unsigned int sensor_type; - - struct mutex mutex; - struct mutex stream_mutex; - - struct ipu6_isys_pdata *pdata; - - int (*phy_set_power)(struct ipu6_isys *isys, - struct ipu6_isys_csi2_config *cfg, - const struct ipu6_isys_csi2_timing *timing, - bool on); - - struct ipu6_isys_csi2 *csi2; - - struct pm_qos_request pm_qos; - spinlock_t listlock; /* Protect framebuflist */ - struct list_head framebuflist; - struct list_head framebuflist_fw; - struct v4l2_async_notifier notifier; - struct isys_iwake_watermark iwake_watermark; -}; - -struct isys_fw_msgs { - union { - u64 dummy; - struct ipu6_fw_isys_frame_buff_set_abi frame; - struct ipu6_fw_isys_stream_cfg_data_abi stream; - } fw_msg; - struct list_head head; - dma_addr_t dma_addr; -}; - -struct isys_fw_msgs *ipu6_get_fw_msg_buf(struct ipu6_isys_stream *stream); -void ipu6_put_fw_msg_buf(struct ipu6_isys *isys, u64 data); -void ipu6_cleanup_fw_msg_bufs(struct ipu6_isys *isys); - -extern const struct v4l2_ioctl_ops ipu6_isys_ioctl_ops; - -void isys_setup_hw(struct ipu6_isys *isys); -irqreturn_t isys_isr(struct ipu6_bus_device *adev); -void update_watermark_setting(struct ipu6_isys *isys); - -int ipu6_isys_mcd_phy_set_power(struct ipu6_isys *isys, - struct ipu6_isys_csi2_config *cfg, - const struct ipu6_isys_csi2_timing *timing, - bool on); - -int ipu6_isys_dwc_phy_set_power(struct ipu6_isys *isys, - struct ipu6_isys_csi2_config *cfg, - const struct ipu6_isys_csi2_timing *timing, - bool on); - -int ipu6_isys_jsl_phy_set_power(struct ipu6_isys *isys, - struct ipu6_isys_csi2_config *cfg, - const struct ipu6_isys_csi2_timing *timing, - bool on); -#endif /* IPU6_ISYS_H */ diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-mmu.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-mmu.c deleted file mode 100644 index 57298ac..0000000 --- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-mmu.c +++ /dev/null @@ -1,852 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2013--2024 Intel Corporation - */ -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "ipu6.h" -#include "ipu6-dma.h" -#include "ipu6-mmu.h" -#include "ipu6-platform-regs.h" - -#define ISP_PAGE_SHIFT 12 -#define ISP_PAGE_SIZE BIT(ISP_PAGE_SHIFT) -#define ISP_PAGE_MASK (~(ISP_PAGE_SIZE - 1)) - -#define ISP_L1PT_SHIFT 22 -#define ISP_L1PT_MASK (~((1U << ISP_L1PT_SHIFT) - 1)) - -#define ISP_L2PT_SHIFT 12 -#define ISP_L2PT_MASK (~(ISP_L1PT_MASK | (~(ISP_PAGE_MASK)))) - -#define ISP_L1PT_PTES 1024 -#define ISP_L2PT_PTES 1024 - -#define ISP_PADDR_SHIFT 12 - -#define REG_TLB_INVALIDATE 0x0000 - -#define REG_L1_PHYS 0x0004 /* 27-bit pfn */ -#define REG_INFO 0x0008 - -#define TBL_PHYS_ADDR(a) ((phys_addr_t)(a) << ISP_PADDR_SHIFT) - -static void tlb_invalidate(struct ipu6_mmu *mmu) -{ - unsigned long flags; - unsigned int i; - - spin_lock_irqsave(&mmu->ready_lock, flags); - if (!mmu->ready) { - spin_unlock_irqrestore(&mmu->ready_lock, flags); - return; - } - - for (i = 0; i < mmu->nr_mmus; i++) { - /* - * To avoid the HW bug induced dead lock in some of the IPU6 - * MMUs on successive invalidate calls, we need to first do a - * read to the page table base before writing the invalidate - * register. MMUs which need to implement this WA, will have - * the insert_read_before_invalidate flags set as true. - * Disregard the return value of the read. - */ - if (mmu->mmu_hw[i].insert_read_before_invalidate) - readl(mmu->mmu_hw[i].base + REG_L1_PHYS); - - writel(0xffffffff, mmu->mmu_hw[i].base + - REG_TLB_INVALIDATE); - /* - * The TLB invalidation is a "single cycle" (IOMMU clock cycles) - * When the actual MMIO write reaches the IPU6 TLB Invalidate - * register, wmb() will force the TLB invalidate out if the CPU - * attempts to update the IOMMU page table (or sooner). - */ - wmb(); - } - spin_unlock_irqrestore(&mmu->ready_lock, flags); -} - -#ifdef DEBUG -static void page_table_dump(struct ipu6_mmu_info *mmu_info) -{ - u32 l1_idx; - - dev_dbg(mmu_info->dev, "begin IOMMU page table dump\n"); - - for (l1_idx = 0; l1_idx < ISP_L1PT_PTES; l1_idx++) { - u32 l2_idx; - u32 iova = (phys_addr_t)l1_idx << ISP_L1PT_SHIFT; - phys_addr_t l2_phys; - - if (mmu_info->l1_pt[l1_idx] == mmu_info->dummy_l2_pteval) - continue; - - l2_phys = TBL_PHYS_ADDR(mmu_info->l1_pt[l1_idx];) - dev_dbg(mmu_info->dev, - "l1 entry %u; iovas 0x%8.8x-0x%8.8x, at %pap\n", - l1_idx, iova, iova + ISP_PAGE_SIZE, &l2_phys); - - for (l2_idx = 0; l2_idx < ISP_L2PT_PTES; l2_idx++) { - u32 *l2_pt = mmu_info->l2_pts[l1_idx]; - u32 iova2 = iova + (l2_idx << ISP_L2PT_SHIFT); - - if (l2_pt[l2_idx] == mmu_info->dummy_page_pteval) - continue; - - dev_dbg(mmu_info->dev, - "\tl2 entry %u; iova 0x%8.8x, phys %pa\n", - l2_idx, iova2, - TBL_PHYS_ADDR(l2_pt[l2_idx])); - } - } - - dev_dbg(mmu_info->dev, "end IOMMU page table dump\n"); -} -#endif /* DEBUG */ - -static dma_addr_t map_single(struct ipu6_mmu_info *mmu_info, void *ptr) -{ - dma_addr_t dma; - - dma = dma_map_single(mmu_info->dev, ptr, PAGE_SIZE, DMA_BIDIRECTIONAL); - if (dma_mapping_error(mmu_info->dev, dma)) - return 0; - - return dma; -} - -static int get_dummy_page(struct ipu6_mmu_info *mmu_info) -{ - void *pt = (void *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32); - dma_addr_t dma; - - if (!pt) - return -ENOMEM; - - dev_dbg(mmu_info->dev, "dummy_page: get_zeroed_page() == %p\n", pt); - - dma = map_single(mmu_info, pt); - if (!dma) { - dev_err(mmu_info->dev, "Failed to map dummy page\n"); - goto err_free_page; - } - - mmu_info->dummy_page = pt; - mmu_info->dummy_page_pteval = dma >> ISP_PAGE_SHIFT; - - return 0; - -err_free_page: - free_page((unsigned long)pt); - return -ENOMEM; -} - -static void free_dummy_page(struct ipu6_mmu_info *mmu_info) -{ - dma_unmap_single(mmu_info->dev, - TBL_PHYS_ADDR(mmu_info->dummy_page_pteval), - PAGE_SIZE, DMA_BIDIRECTIONAL); - free_page((unsigned long)mmu_info->dummy_page); -} - -static int alloc_dummy_l2_pt(struct ipu6_mmu_info *mmu_info) -{ - u32 *pt = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32); - dma_addr_t dma; - unsigned int i; - - if (!pt) - return -ENOMEM; - - dev_dbg(mmu_info->dev, "dummy_l2: get_zeroed_page() = %p\n", pt); - - dma = map_single(mmu_info, pt); - if (!dma) { - dev_err(mmu_info->dev, "Failed to map l2pt page\n"); - goto err_free_page; - } - - for (i = 0; i < ISP_L2PT_PTES; i++) - pt[i] = mmu_info->dummy_page_pteval; - - mmu_info->dummy_l2_pt = pt; - mmu_info->dummy_l2_pteval = dma >> ISP_PAGE_SHIFT; - - return 0; - -err_free_page: - free_page((unsigned long)pt); - return -ENOMEM; -} - -static void free_dummy_l2_pt(struct ipu6_mmu_info *mmu_info) -{ - dma_unmap_single(mmu_info->dev, - TBL_PHYS_ADDR(mmu_info->dummy_l2_pteval), - PAGE_SIZE, DMA_BIDIRECTIONAL); - free_page((unsigned long)mmu_info->dummy_l2_pt); -} - -static u32 *alloc_l1_pt(struct ipu6_mmu_info *mmu_info) -{ - u32 *pt = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32); - dma_addr_t dma; - unsigned int i; - - if (!pt) - return NULL; - - dev_dbg(mmu_info->dev, "alloc_l1: get_zeroed_page() = %p\n", pt); - - for (i = 0; i < ISP_L1PT_PTES; i++) - pt[i] = mmu_info->dummy_l2_pteval; - - dma = map_single(mmu_info, pt); - if (!dma) { - dev_err(mmu_info->dev, "Failed to map l1pt page\n"); - goto err_free_page; - } - - mmu_info->l1_pt_dma = dma >> ISP_PADDR_SHIFT; - dev_dbg(mmu_info->dev, "l1 pt %p mapped at %pad\n", pt, &dma); - - return pt; - -err_free_page: - free_page((unsigned long)pt); - return NULL; -} - -static u32 *alloc_l2_pt(struct ipu6_mmu_info *mmu_info) -{ - u32 *pt = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32); - unsigned int i; - - if (!pt) - return NULL; - - dev_dbg(mmu_info->dev, "alloc_l2: get_zeroed_page() = %p\n", pt); - - for (i = 0; i < ISP_L1PT_PTES; i++) - pt[i] = mmu_info->dummy_page_pteval; - - return pt; -} - -static int l2_map(struct ipu6_mmu_info *mmu_info, unsigned long iova, - phys_addr_t paddr, size_t size) -{ - u32 l1_idx = iova >> ISP_L1PT_SHIFT; - u32 iova_start = iova; - u32 *l2_pt, *l2_virt; - unsigned int l2_idx; - unsigned long flags; - dma_addr_t dma; - u32 l1_entry; - - dev_dbg(mmu_info->dev, - "mapping l2 page table for l1 index %u (iova %8.8x)\n", - l1_idx, (u32)iova); - - spin_lock_irqsave(&mmu_info->lock, flags); - l1_entry = mmu_info->l1_pt[l1_idx]; - if (l1_entry == mmu_info->dummy_l2_pteval) { - l2_virt = mmu_info->l2_pts[l1_idx]; - if (likely(!l2_virt)) { - l2_virt = alloc_l2_pt(mmu_info); - if (!l2_virt) { - spin_unlock_irqrestore(&mmu_info->lock, flags); - return -ENOMEM; - } - } - - dma = map_single(mmu_info, l2_virt); - if (!dma) { - dev_err(mmu_info->dev, "Failed to map l2pt page\n"); - free_page((unsigned long)l2_virt); - spin_unlock_irqrestore(&mmu_info->lock, flags); - return -EINVAL; - } - - l1_entry = dma >> ISP_PADDR_SHIFT; - - dev_dbg(mmu_info->dev, "page for l1_idx %u %p allocated\n", - l1_idx, l2_virt); - mmu_info->l1_pt[l1_idx] = l1_entry; - mmu_info->l2_pts[l1_idx] = l2_virt; - clflush_cache_range((void *)&mmu_info->l1_pt[l1_idx], - sizeof(mmu_info->l1_pt[l1_idx])); - } - - l2_pt = mmu_info->l2_pts[l1_idx]; - - dev_dbg(mmu_info->dev, "l2_pt at %p with dma 0x%x\n", l2_pt, l1_entry); - - paddr = ALIGN(paddr, ISP_PAGE_SIZE); - - l2_idx = (iova_start & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT; - - dev_dbg(mmu_info->dev, "l2_idx %u, phys 0x%8.8x\n", l2_idx, - l2_pt[l2_idx]); - if (l2_pt[l2_idx] != mmu_info->dummy_page_pteval) { - spin_unlock_irqrestore(&mmu_info->lock, flags); - return -EINVAL; - } - - l2_pt[l2_idx] = paddr >> ISP_PADDR_SHIFT; - - clflush_cache_range((void *)&l2_pt[l2_idx], sizeof(l2_pt[l2_idx])); - spin_unlock_irqrestore(&mmu_info->lock, flags); - - dev_dbg(mmu_info->dev, "l2 index %u mapped as 0x%8.8x\n", l2_idx, - l2_pt[l2_idx]); - - return 0; -} - -static int __ipu6_mmu_map(struct ipu6_mmu_info *mmu_info, unsigned long iova, - phys_addr_t paddr, size_t size) -{ - u32 iova_start = round_down(iova, ISP_PAGE_SIZE); - u32 iova_end = ALIGN(iova + size, ISP_PAGE_SIZE); - - dev_dbg(mmu_info->dev, - "mapping iova 0x%8.8x--0x%8.8x, size %zu at paddr %pap\n", - iova_start, iova_end, size, &paddr); - - return l2_map(mmu_info, iova_start, paddr, size); -} - -static size_t l2_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova, - phys_addr_t dummy, size_t size) -{ - u32 l1_idx = iova >> ISP_L1PT_SHIFT; - u32 iova_start = iova; - unsigned int l2_idx; - size_t unmapped = 0; - unsigned long flags; - u32 *l2_pt; - - dev_dbg(mmu_info->dev, "unmapping l2 page table for l1 index %u (iova 0x%8.8lx)\n", - l1_idx, iova); - - spin_lock_irqsave(&mmu_info->lock, flags); - if (mmu_info->l1_pt[l1_idx] == mmu_info->dummy_l2_pteval) { - spin_unlock_irqrestore(&mmu_info->lock, flags); - dev_err(mmu_info->dev, - "unmap iova 0x%8.8lx l1 idx %u which was not mapped\n", - iova, l1_idx); - return 0; - } - - for (l2_idx = (iova_start & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT; - (iova_start & ISP_L1PT_MASK) + (l2_idx << ISP_PAGE_SHIFT) - < iova_start + size && l2_idx < ISP_L2PT_PTES; l2_idx++) { - phys_addr_t pteval; - - l2_pt = mmu_info->l2_pts[l1_idx]; - pteval = TBL_PHYS_ADDR(l2_pt[l2_idx]); - dev_dbg(mmu_info->dev, - "unmap l2 index %u with pteval 0x%p\n", - l2_idx, &pteval); - l2_pt[l2_idx] = mmu_info->dummy_page_pteval; - - clflush_cache_range((void *)&l2_pt[l2_idx], - sizeof(l2_pt[l2_idx])); - unmapped++; - } - spin_unlock_irqrestore(&mmu_info->lock, flags); - - return unmapped << ISP_PAGE_SHIFT; -} - -static size_t __ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info, - unsigned long iova, size_t size) -{ - return l2_unmap(mmu_info, iova, 0, size); -} - -static int allocate_trash_buffer(struct ipu6_mmu *mmu) -{ - unsigned int n_pages = PHYS_PFN(PAGE_ALIGN(IPU6_MMUV2_TRASH_RANGE)); - struct iova *iova; - unsigned int i; - dma_addr_t dma; - unsigned long iova_addr; - int ret; - - /* Allocate 8MB in iova range */ - iova = alloc_iova(&mmu->dmap->iovad, n_pages, - PHYS_PFN(mmu->dmap->mmu_info->aperture_end), 0); - if (!iova) { - dev_err(mmu->dev, "cannot allocate iova range for trash\n"); - return -ENOMEM; - } - - dma = dma_map_page(mmu->dmap->mmu_info->dev, mmu->trash_page, 0, - PAGE_SIZE, DMA_BIDIRECTIONAL); - if (dma_mapping_error(mmu->dmap->mmu_info->dev, dma)) { - dev_err(mmu->dmap->mmu_info->dev, "Failed to map trash page\n"); - ret = -ENOMEM; - goto out_free_iova; - } - - mmu->pci_trash_page = dma; - - /* - * Map the 8MB iova address range to the same physical trash page - * mmu->trash_page which is already reserved at the probe - */ - iova_addr = iova->pfn_lo; - for (i = 0; i < n_pages; i++) { - ret = ipu6_mmu_map(mmu->dmap->mmu_info, PFN_PHYS(iova_addr), - mmu->pci_trash_page, PAGE_SIZE); - if (ret) { - dev_err(mmu->dev, - "mapping trash buffer range failed\n"); - goto out_unmap; - } - - iova_addr++; - } - - mmu->iova_trash_page = PFN_PHYS(iova->pfn_lo); - dev_dbg(mmu->dev, "iova trash buffer for MMUID: %d is %u\n", - mmu->mmid, (unsigned int)mmu->iova_trash_page); - return 0; - -out_unmap: - ipu6_mmu_unmap(mmu->dmap->mmu_info, PFN_PHYS(iova->pfn_lo), - PFN_PHYS(iova_size(iova))); - dma_unmap_page(mmu->dmap->mmu_info->dev, mmu->pci_trash_page, - PAGE_SIZE, DMA_BIDIRECTIONAL); -out_free_iova: - __free_iova(&mmu->dmap->iovad, iova); - return ret; -} - -int ipu6_mmu_hw_init(struct ipu6_mmu *mmu) -{ - struct ipu6_mmu_info *mmu_info; - unsigned long flags; - unsigned int i; - - mmu_info = mmu->dmap->mmu_info; - - /* Initialise the each MMU HW block */ - for (i = 0; i < mmu->nr_mmus; i++) { - struct ipu6_mmu_hw *mmu_hw = &mmu->mmu_hw[i]; - unsigned int j; - u16 block_addr; - - /* Write page table address per MMU */ - writel((phys_addr_t)mmu_info->l1_pt_dma, - mmu->mmu_hw[i].base + REG_L1_PHYS); - - /* Set info bits per MMU */ - writel(mmu->mmu_hw[i].info_bits, - mmu->mmu_hw[i].base + REG_INFO); - - /* Configure MMU TLB stream configuration for L1 */ - for (j = 0, block_addr = 0; j < mmu_hw->nr_l1streams; - block_addr += mmu->mmu_hw[i].l1_block_sz[j], j++) { - if (block_addr > IPU6_MAX_LI_BLOCK_ADDR) { - dev_err(mmu->dev, "invalid L1 configuration\n"); - return -EINVAL; - } - - /* Write block start address for each streams */ - writel(block_addr, mmu_hw->base + - mmu_hw->l1_stream_id_reg_offset + 4 * j); - } - - /* Configure MMU TLB stream configuration for L2 */ - for (j = 0, block_addr = 0; j < mmu_hw->nr_l2streams; - block_addr += mmu->mmu_hw[i].l2_block_sz[j], j++) { - if (block_addr > IPU6_MAX_L2_BLOCK_ADDR) { - dev_err(mmu->dev, "invalid L2 configuration\n"); - return -EINVAL; - } - - writel(block_addr, mmu_hw->base + - mmu_hw->l2_stream_id_reg_offset + 4 * j); - } - } - - if (!mmu->trash_page) { - int ret; - - mmu->trash_page = alloc_page(GFP_KERNEL); - if (!mmu->trash_page) { - dev_err(mmu->dev, "insufficient memory for trash buffer\n"); - return -ENOMEM; - } - - ret = allocate_trash_buffer(mmu); - if (ret) { - __free_page(mmu->trash_page); - mmu->trash_page = NULL; - dev_err(mmu->dev, "trash buffer allocation failed\n"); - return ret; - } - } - - spin_lock_irqsave(&mmu->ready_lock, flags); - mmu->ready = true; - spin_unlock_irqrestore(&mmu->ready_lock, flags); - - return 0; -} -EXPORT_SYMBOL_NS_GPL(ipu6_mmu_hw_init, INTEL_IPU6); - -static struct ipu6_mmu_info *ipu6_mmu_alloc(struct ipu6_device *isp) -{ - struct ipu6_mmu_info *mmu_info; - int ret; - - mmu_info = kzalloc(sizeof(*mmu_info), GFP_KERNEL); - if (!mmu_info) - return NULL; - - mmu_info->aperture_start = 0; - mmu_info->aperture_end = - (dma_addr_t)DMA_BIT_MASK(isp->secure_mode ? - IPU6_MMU_ADDR_BITS : - IPU6_MMU_ADDR_BITS_NON_SECURE); - mmu_info->pgsize_bitmap = SZ_4K; - mmu_info->dev = &isp->pdev->dev; - - ret = get_dummy_page(mmu_info); - if (ret) - goto err_free_info; - - ret = alloc_dummy_l2_pt(mmu_info); - if (ret) - goto err_free_dummy_page; - - mmu_info->l2_pts = vzalloc(ISP_L2PT_PTES * sizeof(*mmu_info->l2_pts)); - if (!mmu_info->l2_pts) - goto err_free_dummy_l2_pt; - - /* - * We always map the L1 page table (a single page as well as - * the L2 page tables). - */ - mmu_info->l1_pt = alloc_l1_pt(mmu_info); - if (!mmu_info->l1_pt) - goto err_free_l2_pts; - - spin_lock_init(&mmu_info->lock); - - dev_dbg(mmu_info->dev, "domain initialised\n"); - - return mmu_info; - -err_free_l2_pts: - vfree(mmu_info->l2_pts); -err_free_dummy_l2_pt: - free_dummy_l2_pt(mmu_info); -err_free_dummy_page: - free_dummy_page(mmu_info); -err_free_info: - kfree(mmu_info); - - return NULL; -} - -void ipu6_mmu_hw_cleanup(struct ipu6_mmu *mmu) -{ - unsigned long flags; - - spin_lock_irqsave(&mmu->ready_lock, flags); - mmu->ready = false; - spin_unlock_irqrestore(&mmu->ready_lock, flags); -} -EXPORT_SYMBOL_NS_GPL(ipu6_mmu_hw_cleanup, INTEL_IPU6); - -static struct ipu6_dma_mapping *alloc_dma_mapping(struct ipu6_device *isp) -{ - struct ipu6_dma_mapping *dmap; - - dmap = kzalloc(sizeof(*dmap), GFP_KERNEL); - if (!dmap) - return NULL; - - dmap->mmu_info = ipu6_mmu_alloc(isp); - if (!dmap->mmu_info) { - kfree(dmap); - return NULL; - } - - init_iova_domain(&dmap->iovad, SZ_4K, 1); - dmap->mmu_info->dmap = dmap; - - dev_dbg(&isp->pdev->dev, "alloc mapping\n"); - - iova_cache_get(); - - return dmap; -} - -phys_addr_t ipu6_mmu_iova_to_phys(struct ipu6_mmu_info *mmu_info, - dma_addr_t iova) -{ - phys_addr_t phy_addr; - unsigned long flags; - u32 *l2_pt; - - spin_lock_irqsave(&mmu_info->lock, flags); - l2_pt = mmu_info->l2_pts[iova >> ISP_L1PT_SHIFT]; - phy_addr = (phys_addr_t)l2_pt[(iova & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT]; - phy_addr <<= ISP_PAGE_SHIFT; - spin_unlock_irqrestore(&mmu_info->lock, flags); - - return phy_addr; -} - -static size_t ipu6_mmu_pgsize(unsigned long pgsize_bitmap, - unsigned long addr_merge, size_t size) -{ - unsigned int pgsize_idx; - size_t pgsize; - - /* Max page size that still fits into 'size' */ - pgsize_idx = __fls(size); - - if (likely(addr_merge)) { - /* Max page size allowed by address */ - unsigned int align_pgsize_idx = __ffs(addr_merge); - - pgsize_idx = min(pgsize_idx, align_pgsize_idx); - } - - pgsize = (1UL << (pgsize_idx + 1)) - 1; - pgsize &= pgsize_bitmap; - - WARN_ON(!pgsize); - - /* pick the biggest page */ - pgsize_idx = __fls(pgsize); - pgsize = 1UL << pgsize_idx; - - return pgsize; -} - -size_t ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova, - size_t size) -{ - size_t unmapped_page, unmapped = 0; - unsigned int min_pagesz; - - /* find out the minimum page size supported */ - min_pagesz = 1 << __ffs(mmu_info->pgsize_bitmap); - - /* - * The virtual address and the size of the mapping must be - * aligned (at least) to the size of the smallest page supported - * by the hardware - */ - if (!IS_ALIGNED(iova | size, min_pagesz)) { - dev_err(NULL, "unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n", - iova, size, min_pagesz); - return -EINVAL; - } - - /* - * Keep iterating until we either unmap 'size' bytes (or more) - * or we hit an area that isn't mapped. - */ - while (unmapped < size) { - size_t pgsize = ipu6_mmu_pgsize(mmu_info->pgsize_bitmap, - iova, size - unmapped); - - unmapped_page = __ipu6_mmu_unmap(mmu_info, iova, pgsize); - if (!unmapped_page) - break; - - dev_dbg(mmu_info->dev, "unmapped: iova 0x%lx size 0x%zx\n", - iova, unmapped_page); - - iova += unmapped_page; - unmapped += unmapped_page; - } - - return unmapped; -} - -int ipu6_mmu_map(struct ipu6_mmu_info *mmu_info, unsigned long iova, - phys_addr_t paddr, size_t size) -{ - unsigned long orig_iova = iova; - unsigned int min_pagesz; - size_t orig_size = size; - int ret = 0; - - if (mmu_info->pgsize_bitmap == 0UL) - return -ENODEV; - - /* find out the minimum page size supported */ - min_pagesz = 1 << __ffs(mmu_info->pgsize_bitmap); - - /* - * both the virtual address and the physical one, as well as - * the size of the mapping, must be aligned (at least) to the - * size of the smallest page supported by the hardware - */ - if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) { - dev_err(mmu_info->dev, - "unaligned: iova %lx pa %pa size %zx min_pagesz %x\n", - iova, &paddr, size, min_pagesz); - return -EINVAL; - } - - dev_dbg(mmu_info->dev, "map: iova 0x%lx pa %pa size 0x%zx\n", - iova, &paddr, size); - - while (size) { - size_t pgsize = ipu6_mmu_pgsize(mmu_info->pgsize_bitmap, - iova | paddr, size); - - dev_dbg(mmu_info->dev, - "mapping: iova 0x%lx pa %pa pgsize 0x%zx\n", - iova, &paddr, pgsize); - - ret = __ipu6_mmu_map(mmu_info, iova, paddr, pgsize); - if (ret) - break; - - iova += pgsize; - paddr += pgsize; - size -= pgsize; - } - - /* unroll mapping in case something went wrong */ - if (ret) - ipu6_mmu_unmap(mmu_info, orig_iova, orig_size - size); - - return ret; -} - -static void ipu6_mmu_destroy(struct ipu6_mmu *mmu) -{ - struct ipu6_dma_mapping *dmap = mmu->dmap; - struct ipu6_mmu_info *mmu_info = dmap->mmu_info; - struct iova *iova; - u32 l1_idx; - - if (mmu->iova_trash_page) { - iova = find_iova(&dmap->iovad, PHYS_PFN(mmu->iova_trash_page)); - if (iova) { - /* unmap and free the trash buffer iova */ - ipu6_mmu_unmap(mmu_info, PFN_PHYS(iova->pfn_lo), - PFN_PHYS(iova_size(iova))); - __free_iova(&dmap->iovad, iova); - } else { - dev_err(mmu->dev, "trash buffer iova not found.\n"); - } - - mmu->iova_trash_page = 0; - dma_unmap_page(mmu_info->dev, mmu->pci_trash_page, - PAGE_SIZE, DMA_BIDIRECTIONAL); - mmu->pci_trash_page = 0; - __free_page(mmu->trash_page); - } - - for (l1_idx = 0; l1_idx < ISP_L1PT_PTES; l1_idx++) { - if (mmu_info->l1_pt[l1_idx] != mmu_info->dummy_l2_pteval) { - dma_unmap_single(mmu_info->dev, - TBL_PHYS_ADDR(mmu_info->l1_pt[l1_idx]), - PAGE_SIZE, DMA_BIDIRECTIONAL); - free_page((unsigned long)mmu_info->l2_pts[l1_idx]); - } - } - - vfree(mmu_info->l2_pts); - free_dummy_page(mmu_info); - dma_unmap_single(mmu_info->dev, TBL_PHYS_ADDR(mmu_info->l1_pt_dma), - PAGE_SIZE, DMA_BIDIRECTIONAL); - free_page((unsigned long)mmu_info->dummy_l2_pt); - free_page((unsigned long)mmu_info->l1_pt); - kfree(mmu_info); -} - -struct ipu6_mmu *ipu6_mmu_init(struct device *dev, - void __iomem *base, int mmid, - const struct ipu6_hw_variants *hw) -{ - struct ipu6_device *isp = pci_get_drvdata(to_pci_dev(dev)); - struct ipu6_mmu_pdata *pdata; - struct ipu6_mmu *mmu; - unsigned int i; - - if (hw->nr_mmus > IPU6_MMU_MAX_DEVICES) - return ERR_PTR(-EINVAL); - - pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); - if (!pdata) - return ERR_PTR(-ENOMEM); - - for (i = 0; i < hw->nr_mmus; i++) { - struct ipu6_mmu_hw *pdata_mmu = &pdata->mmu_hw[i]; - const struct ipu6_mmu_hw *src_mmu = &hw->mmu_hw[i]; - - if (src_mmu->nr_l1streams > IPU6_MMU_MAX_TLB_L1_STREAMS || - src_mmu->nr_l2streams > IPU6_MMU_MAX_TLB_L2_STREAMS) - return ERR_PTR(-EINVAL); - - *pdata_mmu = *src_mmu; - pdata_mmu->base = base + src_mmu->offset; - } - - mmu = devm_kzalloc(dev, sizeof(*mmu), GFP_KERNEL); - if (!mmu) - return ERR_PTR(-ENOMEM); - - mmu->mmid = mmid; - mmu->mmu_hw = pdata->mmu_hw; - mmu->nr_mmus = hw->nr_mmus; - mmu->tlb_invalidate = tlb_invalidate; - mmu->ready = false; - INIT_LIST_HEAD(&mmu->vma_list); - spin_lock_init(&mmu->ready_lock); - - mmu->dmap = alloc_dma_mapping(isp); - if (!mmu->dmap) { - dev_err(dev, "can't alloc dma mapping\n"); - return ERR_PTR(-ENOMEM); - } - - return mmu; -} - -void ipu6_mmu_cleanup(struct ipu6_mmu *mmu) -{ - struct ipu6_dma_mapping *dmap = mmu->dmap; - - ipu6_mmu_destroy(mmu); - mmu->dmap = NULL; - iova_cache_put(); - put_iova_domain(&dmap->iovad); - kfree(dmap); -} diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-mmu.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-mmu.h deleted file mode 100644 index 21cdb0f..0000000 --- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-mmu.h +++ /dev/null @@ -1,73 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (C) 2013--2024 Intel Corporation */ - -#ifndef IPU6_MMU_H -#define IPU6_MMU_H - -#define ISYS_MMID 1 -#define PSYS_MMID 0 - -#include -#include -#include - -struct device; -struct page; -struct ipu6_hw_variants; - -struct ipu6_mmu_info { - struct device *dev; - - u32 *l1_pt; - u32 l1_pt_dma; - u32 **l2_pts; - - u32 *dummy_l2_pt; - u32 dummy_l2_pteval; - void *dummy_page; - u32 dummy_page_pteval; - - dma_addr_t aperture_start; - dma_addr_t aperture_end; - unsigned long pgsize_bitmap; - - spinlock_t lock; /* Serialize access to users */ - struct ipu6_dma_mapping *dmap; -}; - -struct ipu6_mmu { - struct list_head node; - - struct ipu6_mmu_hw *mmu_hw; - unsigned int nr_mmus; - unsigned int mmid; - - phys_addr_t pgtbl; - struct device *dev; - - struct ipu6_dma_mapping *dmap; - struct list_head vma_list; - - struct page *trash_page; - dma_addr_t pci_trash_page; /* IOVA from PCI DMA services (parent) */ - dma_addr_t iova_trash_page; /* IOVA for IPU6 child nodes to use */ - - bool ready; - spinlock_t ready_lock; /* Serialize access to bool ready */ - - void (*tlb_invalidate)(struct ipu6_mmu *mmu); -}; - -struct ipu6_mmu *ipu6_mmu_init(struct device *dev, - void __iomem *base, int mmid, - const struct ipu6_hw_variants *hw); -void ipu6_mmu_cleanup(struct ipu6_mmu *mmu); -int ipu6_mmu_hw_init(struct ipu6_mmu *mmu); -void ipu6_mmu_hw_cleanup(struct ipu6_mmu *mmu); -int ipu6_mmu_map(struct ipu6_mmu_info *mmu_info, unsigned long iova, - phys_addr_t paddr, size_t size); -size_t ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova, - size_t size); -phys_addr_t ipu6_mmu_iova_to_phys(struct ipu6_mmu_info *mmu_info, - dma_addr_t iova); -#endif diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-platform-buttress-regs.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-platform-buttress-regs.h deleted file mode 100644 index 20f2701..0000000 --- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-platform-buttress-regs.h +++ /dev/null @@ -1,226 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (C) 2023--2024 Intel Corporation */ - -#ifndef IPU6_PLATFORM_BUTTRESS_REGS_H -#define IPU6_PLATFORM_BUTTRESS_REGS_H - -#include - -/* IS_WORKPOINT_REQ */ -#define IPU6_BUTTRESS_REG_IS_FREQ_CTL 0x34 -/* PS_WORKPOINT_REQ */ -#define IPU6_BUTTRESS_REG_PS_FREQ_CTL 0x38 - -/* should be tuned for real silicon */ -#define IPU6_IS_FREQ_CTL_DEFAULT_RATIO 0x08 -#define IPU6SE_IS_FREQ_CTL_DEFAULT_RATIO 0x0a -#define IPU6_PS_FREQ_CTL_DEFAULT_RATIO 0x0d - -#define IPU6_IS_FREQ_CTL_DEFAULT_QOS_FLOOR_RATIO 0x10 -#define IPU6_PS_FREQ_CTL_DEFAULT_QOS_FLOOR_RATIO 0x0708 - -#define IPU6_BUTTRESS_PWR_STATE_IS_PWR_SHIFT 3 -#define IPU6_BUTTRESS_PWR_STATE_IS_PWR_MASK GENMASK(4, 3) - -#define IPU6_BUTTRESS_PWR_STATE_PS_PWR_SHIFT 6 -#define IPU6_BUTTRESS_PWR_STATE_PS_PWR_MASK GENMASK(7, 6) - -#define IPU6_BUTTRESS_PWR_STATE_DN_DONE 0x0 -#define IPU6_BUTTRESS_PWR_STATE_UP_PROCESS 0x1 -#define IPU6_BUTTRESS_PWR_STATE_DN_PROCESS 0x2 -#define IPU6_BUTTRESS_PWR_STATE_UP_DONE 0x3 - -#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_0 0x270 -#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_1 0x274 -#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_2 0x278 -#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_3 0x27c -#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_4 0x280 -#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_5 0x284 -#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_6 0x288 -#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_7 0x28c - -#define BUTTRESS_REG_WDT 0x8 -#define BUTTRESS_REG_BTRS_CTRL 0xc -#define BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC0 BIT(0) -#define BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC1 BIT(1) -#define BUTTRESS_REG_BTRS_CTRL_REF_CLK_IND GENMASK(9, 8) - -#define BUTTRESS_REG_FW_RESET_CTL 0x30 -#define BUTTRESS_FW_RESET_CTL_START BIT(0) -#define BUTTRESS_FW_RESET_CTL_DONE BIT(1) - -#define BUTTRESS_REG_IS_FREQ_CTL 0x34 -#define BUTTRESS_REG_PS_FREQ_CTL 0x38 - -#define BUTTRESS_FREQ_CTL_START BIT(31) -#define BUTTRESS_FREQ_CTL_ICCMAX_LEVEL GENMASK(19, 16) -#define BUTTRESS_FREQ_CTL_QOS_FLOOR_MASK GENMASK(15, 8) -#define BUTTRESS_FREQ_CTL_RATIO_MASK GENMASK(7, 0) - -#define BUTTRESS_REG_PWR_STATE 0x5c - -#define BUTTRESS_PWR_STATE_RESET 0x0 -#define BUTTRESS_PWR_STATE_PWR_ON_DONE 0x1 -#define BUTTRESS_PWR_STATE_PWR_RDY 0x3 -#define BUTTRESS_PWR_STATE_PWR_IDLE 0x4 - -#define BUTTRESS_PWR_STATE_HH_STATUS_MASK GENMASK(12, 11) - -enum { - BUTTRESS_PWR_STATE_HH_STATE_IDLE, - BUTTRESS_PWR_STATE_HH_STATE_IN_PRGS, - BUTTRESS_PWR_STATE_HH_STATE_DONE, - BUTTRESS_PWR_STATE_HH_STATE_ERR, -}; - -#define BUTTRESS_PWR_STATE_IS_PWR_FSM_MASK GENMASK(23, 19) - -#define BUTTRESS_PWR_STATE_IS_PWR_FSM_IDLE 0x0 -#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_PLL_CMP 0x1 -#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_CLKACK 0x2 -#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_PG_ACK 0x3 -#define BUTTRESS_PWR_STATE_IS_PWR_FSM_RST_ASSRT_CYCLES 0x4 -#define BUTTRESS_PWR_STATE_IS_PWR_FSM_STOP_CLK_CYCLES1 0x5 -#define BUTTRESS_PWR_STATE_IS_PWR_FSM_STOP_CLK_CYCLES2 0x6 -#define BUTTRESS_PWR_STATE_IS_PWR_FSM_RST_DEASSRT_CYCLES 0x7 -#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_FUSE_WR_CMP 0x8 -#define BUTTRESS_PWR_STATE_IS_PWR_FSM_BRK_POINT 0x9 -#define BUTTRESS_PWR_STATE_IS_PWR_FSM_IS_RDY 0xa -#define BUTTRESS_PWR_STATE_IS_PWR_FSM_HALT_HALTED 0xb -#define BUTTRESS_PWR_STATE_IS_PWR_FSM_RST_DURATION_CNT3 0xc -#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_CLKACK_PD 0xd -#define BUTTRESS_PWR_STATE_IS_PWR_FSM_PD_BRK_POINT 0xe -#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_PD_PG_ACK0 0xf - -#define BUTTRESS_PWR_STATE_PS_PWR_FSM_MASK GENMASK(28, 24) - -#define BUTTRESS_PWR_STATE_PS_PWR_FSM_IDLE 0x0 -#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PU_PLL_IP_RDY 0x1 -#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_RO_PRE_CNT_EXH 0x2 -#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PU_VGI_PWRGOOD 0x3 -#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_RO_POST_CNT_EXH 0x4 -#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WR_PLL_RATIO 0x5 -#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PU_PLL_CMP 0x6 -#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PU_CLKACK 0x7 -#define BUTTRESS_PWR_STATE_PS_PWR_FSM_RST_ASSRT_CYCLES 0x8 -#define BUTTRESS_PWR_STATE_PS_PWR_FSM_STOP_CLK_CYCLES1 0x9 -#define BUTTRESS_PWR_STATE_PS_PWR_FSM_STOP_CLK_CYCLES2 0xa -#define BUTTRESS_PWR_STATE_PS_PWR_FSM_RST_DEASSRT_CYCLES 0xb -#define BUTTRESS_PWR_STATE_PS_PWR_FSM_PU_BRK_PNT 0xc -#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_FUSE_ACCPT 0xd -#define BUTTRESS_PWR_STATE_PS_PWR_FSM_PS_PWR_UP 0xf -#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_4_HALTED 0x10 -#define BUTTRESS_PWR_STATE_PS_PWR_FSM_RESET_CNT3 0x11 -#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PD_CLKACK 0x12 -#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PD_OFF_IND 0x13 -#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_DVFS_PH4 0x14 -#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_DVFS_PLL_CMP 0x15 -#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_DVFS_CLKACK 0x16 - -#define BUTTRESS_REG_SECURITY_CTL 0x300 -#define BUTTRESS_REG_SKU 0x314 -#define BUTTRESS_REG_SECURITY_TOUCH 0x318 -#define BUTTRESS_REG_CAMERA_MASK 0x84 - -#define BUTTRESS_SECURITY_CTL_FW_SECURE_MODE BIT(16) -#define BUTTRESS_SECURITY_CTL_FW_SETUP_MASK GENMASK(4, 0) - -#define BUTTRESS_SECURITY_CTL_FW_SETUP_DONE BIT(0) -#define BUTTRESS_SECURITY_CTL_AUTH_DONE BIT(1) -#define BUTTRESS_SECURITY_CTL_AUTH_FAILED BIT(3) - -#define BUTTRESS_REG_FW_SOURCE_BASE_LO 0x78 -#define BUTTRESS_REG_FW_SOURCE_BASE_HI 0x7C -#define BUTTRESS_REG_FW_SOURCE_SIZE 0x80 - -#define BUTTRESS_REG_ISR_STATUS 0x90 -#define BUTTRESS_REG_ISR_ENABLED_STATUS 0x94 -#define BUTTRESS_REG_ISR_ENABLE 0x98 -#define BUTTRESS_REG_ISR_CLEAR 0x9C - -#define BUTTRESS_ISR_IS_IRQ BIT(0) -#define BUTTRESS_ISR_PS_IRQ BIT(1) -#define BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE BIT(2) -#define BUTTRESS_ISR_IPC_EXEC_DONE_BY_ISH BIT(3) -#define BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING BIT(4) -#define BUTTRESS_ISR_IPC_FROM_ISH_IS_WAITING BIT(5) -#define BUTTRESS_ISR_CSE_CSR_SET BIT(6) -#define BUTTRESS_ISR_ISH_CSR_SET BIT(7) -#define BUTTRESS_ISR_SPURIOUS_CMP BIT(8) -#define BUTTRESS_ISR_WATCHDOG_EXPIRED BIT(9) -#define BUTTRESS_ISR_PUNIT_2_IUNIT_IRQ BIT(10) -#define BUTTRESS_ISR_SAI_VIOLATION BIT(11) -#define BUTTRESS_ISR_HW_ASSERTION BIT(12) -#define BUTTRESS_ISR_IS_CORRECTABLE_MEM_ERR BIT(13) -#define BUTTRESS_ISR_IS_FATAL_MEM_ERR BIT(14) -#define BUTTRESS_ISR_IS_NON_FATAL_MEM_ERR BIT(15) -#define BUTTRESS_ISR_PS_CORRECTABLE_MEM_ERR BIT(16) -#define BUTTRESS_ISR_PS_FATAL_MEM_ERR BIT(17) -#define BUTTRESS_ISR_PS_NON_FATAL_MEM_ERR BIT(18) -#define BUTTRESS_ISR_PS_FAST_THROTTLE BIT(19) -#define BUTTRESS_ISR_UFI_ERROR BIT(20) - -#define BUTTRESS_REG_IU2CSEDB0 0x100 - -#define BUTTRESS_IU2CSEDB0_BUSY BIT(31) -#define BUTTRESS_IU2CSEDB0_IPC_CLIENT_ID_VAL 2 - -#define BUTTRESS_REG_IU2CSEDATA0 0x104 - -#define BUTTRESS_IU2CSEDATA0_IPC_BOOT_LOAD 1 -#define BUTTRESS_IU2CSEDATA0_IPC_AUTH_RUN 2 -#define BUTTRESS_IU2CSEDATA0_IPC_AUTH_REPLACE 3 -#define BUTTRESS_IU2CSEDATA0_IPC_UPDATE_SECURE_TOUCH 16 - -#define BUTTRESS_CSE2IUDATA0_IPC_BOOT_LOAD_DONE BIT(0) -#define BUTTRESS_CSE2IUDATA0_IPC_AUTH_RUN_DONE BIT(1) -#define BUTTRESS_CSE2IUDATA0_IPC_AUTH_REPLACE_DONE BIT(2) -#define BUTTRESS_CSE2IUDATA0_IPC_UPDATE_SECURE_TOUCH_DONE BIT(4) - -#define BUTTRESS_REG_IU2CSECSR 0x108 - -#define BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE1 BIT(0) -#define BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE2 BIT(1) -#define BUTTRESS_IU2CSECSR_IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE BIT(2) -#define BUTTRESS_IU2CSECSR_IPC_PEER_ASSERTED_REG_VALID_REQ BIT(3) -#define BUTTRESS_IU2CSECSR_IPC_PEER_ACKED_REG_VALID BIT(4) -#define BUTTRESS_IU2CSECSR_IPC_PEER_DEASSERTED_REG_VALID_REQ BIT(5) - -#define BUTTRESS_REG_CSE2IUDB0 0x304 -#define BUTTRESS_REG_CSE2IUCSR 0x30C -#define BUTTRESS_REG_CSE2IUDATA0 0x308 - -/* 0x20 == NACK, 0xf == unknown command */ -#define BUTTRESS_CSE2IUDATA0_IPC_NACK 0xf20 -#define BUTTRESS_CSE2IUDATA0_IPC_NACK_MASK GENMASK(15, 0) - -#define BUTTRESS_REG_ISH2IUCSR 0x50 -#define BUTTRESS_REG_ISH2IUDB0 0x54 -#define BUTTRESS_REG_ISH2IUDATA0 0x58 - -#define BUTTRESS_REG_IU2ISHDB0 0x10C -#define BUTTRESS_REG_IU2ISHDATA0 0x110 -#define BUTTRESS_REG_IU2ISHDATA1 0x114 -#define BUTTRESS_REG_IU2ISHCSR 0x118 - -#define BUTTRESS_REG_FABRIC_CMD 0x88 - -#define BUTTRESS_FABRIC_CMD_START_TSC_SYNC BIT(0) -#define BUTTRESS_FABRIC_CMD_IS_DRAIN BIT(4) - -#define BUTTRESS_REG_TSW_CTL 0x120 -#define BUTTRESS_TSW_CTL_SOFT_RESET BIT(8) - -#define BUTTRESS_REG_TSC_LO 0x164 -#define BUTTRESS_REG_TSC_HI 0x168 - -#define BUTTRESS_IRQS (BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING | \ - BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE | \ - BUTTRESS_ISR_IS_IRQ | BUTTRESS_ISR_PS_IRQ) - -#define BUTTRESS_EVENT (BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING | \ - BUTTRESS_ISR_IPC_FROM_ISH_IS_WAITING | \ - BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE | \ - BUTTRESS_ISR_IPC_EXEC_DONE_BY_ISH | \ - BUTTRESS_ISR_SAI_VIOLATION) -#endif /* IPU6_PLATFORM_BUTTRESS_REGS_H */ diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-platform-isys-csi2-reg.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-platform-isys-csi2-reg.h deleted file mode 100644 index cc58377..0000000 --- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-platform-isys-csi2-reg.h +++ /dev/null @@ -1,172 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (C) 2023--2024 Intel Corporation */ - -#ifndef IPU6_PLATFORM_ISYS_CSI2_REG_H -#define IPU6_PLATFORM_ISYS_CSI2_REG_H - -#include - -#define CSI_REG_BASE 0x220000 -#define CSI_REG_PORT_BASE(id) (CSI_REG_BASE + (id) * 0x1000) - -/* CSI Port Genral Purpose Registers */ -#define CSI_REG_PORT_GPREG_SRST 0x0 -#define CSI_REG_PORT_GPREG_CSI2_SLV_REG_SRST 0x4 -#define CSI_REG_PORT_GPREG_CSI2_PORT_CONTROL 0x8 - -/* - * Port IRQs mapping events: - * IRQ0 - CSI_FE event - * IRQ1 - CSI_SYNC - * IRQ2 - S2M_SIDS0TO7 - * IRQ3 - S2M_SIDS8TO15 - */ -#define CSI_PORT_REG_BASE_IRQ_CSI 0x80 -#define CSI_PORT_REG_BASE_IRQ_CSI_SYNC 0xA0 -#define CSI_PORT_REG_BASE_IRQ_S2M_SIDS0TOS7 0xC0 -#define CSI_PORT_REG_BASE_IRQ_S2M_SIDS8TOS15 0xE0 - -#define CSI_PORT_REG_BASE_IRQ_EDGE_OFFSET 0x0 -#define CSI_PORT_REG_BASE_IRQ_MASK_OFFSET 0x4 -#define CSI_PORT_REG_BASE_IRQ_STATUS_OFFSET 0x8 -#define CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET 0xc -#define CSI_PORT_REG_BASE_IRQ_ENABLE_OFFSET 0x10 -#define CSI_PORT_REG_BASE_IRQ_LEVEL_NOT_PULSE_OFFSET 0x14 - -#define IPU6SE_CSI_RX_ERROR_IRQ_MASK GENMASK(18, 0) -#define IPU6_CSI_RX_ERROR_IRQ_MASK GENMASK(19, 0) - -#define CSI_RX_NUM_ERRORS_IN_IRQ 20 -#define CSI_RX_NUM_IRQ 32 - -#define IPU_CSI_RX_IRQ_FS_VC(chn) (1 << ((chn) * 2)) -#define IPU_CSI_RX_IRQ_FE_VC(chn) (2 << ((chn) * 2)) - -/* PPI2CSI */ -#define CSI_REG_PPI2CSI_ENABLE 0x200 -#define CSI_REG_PPI2CSI_CONFIG_PPI_INTF 0x204 -#define PPI_INTF_CONFIG_NOF_ENABLED_DLANES_MASK GENMASK(4, 3) -#define CSI_REG_PPI2CSI_CONFIG_CSI_FEATURE 0x208 - -enum CSI_PPI2CSI_CTRL { - CSI_PPI2CSI_DISABLE = 0, - CSI_PPI2CSI_ENABLE = 1, -}; - -/* CSI_FE */ -#define CSI_REG_CSI_FE_ENABLE 0x280 -#define CSI_REG_CSI_FE_MODE 0x284 -#define CSI_REG_CSI_FE_MUX_CTRL 0x288 -#define CSI_REG_CSI_FE_SYNC_CNTR_SEL 0x290 - -enum CSI_FE_ENABLE_TYPE { - CSI_FE_DISABLE = 0, - CSI_FE_ENABLE = 1, -}; - -enum CSI_FE_MODE_TYPE { - CSI_FE_DPHY_MODE = 0, - CSI_FE_CPHY_MODE = 1, -}; - -enum CSI_FE_INPUT_SELECTOR { - CSI_SENSOR_INPUT = 0, - CSI_MIPIGEN_INPUT = 1, -}; - -enum CSI_FE_SYNC_CNTR_SEL_TYPE { - CSI_CNTR_SENSOR_LINE_ID = BIT(0), - CSI_CNTR_INT_LINE_PKT_ID = ~CSI_CNTR_SENSOR_LINE_ID, - CSI_CNTR_SENSOR_FRAME_ID = BIT(1), - CSI_CNTR_INT_FRAME_PKT_ID = ~CSI_CNTR_SENSOR_FRAME_ID, -}; - -/* CSI HUB General Purpose Registers */ -#define CSI_REG_HUB_GPREG_SRST (CSI_REG_BASE + 0x18000) -#define CSI_REG_HUB_GPREG_SLV_REG_SRST (CSI_REG_BASE + 0x18004) - -#define CSI_REG_HUB_DRV_ACCESS_PORT(id) (CSI_REG_BASE + 0x18018 + (id) * 4) -#define CSI_REG_HUB_FW_ACCESS_PORT_OFS 0x17000 -#define CSI_REG_HUB_FW_ACCESS_PORT_V6OFS 0x16000 -#define CSI_REG_HUB_FW_ACCESS_PORT(ofs, id) \ - (CSI_REG_BASE + (ofs) + (id) * 4) - -enum CSI_PORT_CLK_GATING_SWITCH { - CSI_PORT_CLK_GATING_OFF = 0, - CSI_PORT_CLK_GATING_ON = 1, -}; - -#define CSI_REG_BASE_HUB_IRQ 0x18200 - -#define IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_EDGE 0x238200 -#define IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_MASK 0x238204 -#define IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_STATUS 0x238208 -#define IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_CLEAR 0x23820c -#define IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_ENABLE 0x238210 -#define IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_LEVEL_NOT_PULSE 0x238214 - -#define IPU6_REG_ISYS_CSI_TOP_CTRL1_IRQ_EDGE 0x238220 -#define IPU6_REG_ISYS_CSI_TOP_CTRL1_IRQ_MASK 0x238224 -#define IPU6_REG_ISYS_CSI_TOP_CTRL1_IRQ_STATUS 0x238228 -#define IPU6_REG_ISYS_CSI_TOP_CTRL1_IRQ_CLEAR 0x23822c -#define IPU6_REG_ISYS_CSI_TOP_CTRL1_IRQ_ENABLE 0x238230 -#define IPU6_REG_ISYS_CSI_TOP_CTRL1_IRQ_LEVEL_NOT_PULSE 0x238234 - -/* MTL IPU6V6 irq ctrl0 & ctrl1 */ -#define IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_EDGE 0x238700 -#define IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_MASK 0x238704 -#define IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_STATUS 0x238708 -#define IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_CLEAR 0x23870c -#define IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_ENABLE 0x238710 -#define IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_LEVEL_NOT_PULSE 0x238714 - -#define IPU6V6_REG_ISYS_CSI_TOP_CTRL1_IRQ_EDGE 0x238720 -#define IPU6V6_REG_ISYS_CSI_TOP_CTRL1_IRQ_MASK 0x238724 -#define IPU6V6_REG_ISYS_CSI_TOP_CTRL1_IRQ_STATUS 0x238728 -#define IPU6V6_REG_ISYS_CSI_TOP_CTRL1_IRQ_CLEAR 0x23872c -#define IPU6V6_REG_ISYS_CSI_TOP_CTRL1_IRQ_ENABLE 0x238730 -#define IPU6V6_REG_ISYS_CSI_TOP_CTRL1_IRQ_LEVEL_NOT_PULSE 0x238734 - -/* - * 3:0 CSI_PORT.irq_out[3:0] CSI_PORT_CTRL0 IRQ outputs (4bits) - * [0] CSI_PORT.IRQ_CTRL0_csi - * [1] CSI_PORT.IRQ_CTRL1_csi_sync - * [2] CSI_PORT.IRQ_CTRL2_s2m_sids0to7 - * [3] CSI_PORT.IRQ_CTRL3_s2m_sids8to15 - */ -#define IPU6_ISYS_UNISPART_IRQ_CSI2(port) \ - (0x3 << ((port) * IPU6_CSI_IRQ_NUM_PER_PIPE)) - -/* - * ipu6se support 2 front ends, 2 port per front end, 4 ports 0..3 - * sip0 - 0, 1 - * sip1 - 2, 3 - * 0 and 2 support 4 data lanes, 1 and 3 support 2 data lanes - * all offset are base from isys base address - */ - -#define CSI2_HUB_GPREG_SIP_SRST(sip) (0x238038 + (sip) * 4) -#define CSI2_HUB_GPREG_SIP_FB_PORT_CFG(sip) (0x238050 + (sip) * 4) - -#define CSI2_HUB_GPREG_DPHY_TIMER_INCR 0x238040 -#define CSI2_HUB_GPREG_HPLL_FREQ 0x238044 -#define CSI2_HUB_GPREG_IS_CLK_RATIO 0x238048 -#define CSI2_HUB_GPREG_HPLL_FREQ_ISCLK_RATE_OVERRIDE 0x23804c -#define CSI2_HUB_GPREG_PORT_CLKGATING_DISABLE 0x238058 -#define CSI2_HUB_GPREG_SIP0_CSI_RX_A_CONTROL 0x23805c -#define CSI2_HUB_GPREG_SIP0_CSI_RX_B_CONTROL 0x238088 -#define CSI2_HUB_GPREG_SIP1_CSI_RX_A_CONTROL 0x2380a4 -#define CSI2_HUB_GPREG_SIP1_CSI_RX_B_CONTROL 0x2380d0 - -#define CSI2_SIP_TOP_CSI_RX_BASE(sip) (0x23805c + (sip) * 0x48) -#define CSI2_SIP_TOP_CSI_RX_PORT_BASE_0(port) (0x23805c + ((port) / 2) * 0x48) -#define CSI2_SIP_TOP_CSI_RX_PORT_BASE_1(port) (0x238088 + ((port) / 2) * 0x48) - -/* offset from port base */ -#define CSI2_SIP_TOP_CSI_RX_PORT_CONTROL 0x0 -#define CSI2_SIP_TOP_CSI_RX_DLY_CNT_TERMEN_CLANE 0x4 -#define CSI2_SIP_TOP_CSI_RX_DLY_CNT_SETTLE_CLANE 0x8 -#define CSI2_SIP_TOP_CSI_RX_DLY_CNT_TERMEN_DLANE(lane) (0xc + (lane) * 8) -#define CSI2_SIP_TOP_CSI_RX_DLY_CNT_SETTLE_DLANE(lane) (0x10 + (lane) * 8) - -#endif /* IPU6_ISYS_CSI2_REG_H */ diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-platform-regs.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-platform-regs.h deleted file mode 100644 index b883385..0000000 --- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-platform-regs.h +++ /dev/null @@ -1,179 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (C) 2018 - 2024 Intel Corporation */ - -#ifndef IPU6_PLATFORM_REGS_H -#define IPU6_PLATFORM_REGS_H - -#include - -/* - * IPU6 uses uniform address within IPU6, therefore all subsystem registers - * locates in one single space starts from 0 but in different sctions with - * different addresses, the subsystem offsets are defined to 0 as the - * register definition will have the address offset to 0. - */ -#define IPU6_UNIFIED_OFFSET 0 - -#define IPU6_ISYS_IOMMU0_OFFSET 0x2e0000 -#define IPU6_ISYS_IOMMU1_OFFSET 0x2e0500 -#define IPU6_ISYS_IOMMUI_OFFSET 0x2e0a00 - -#define IPU6_PSYS_IOMMU0_OFFSET 0x1b0000 -#define IPU6_PSYS_IOMMU1_OFFSET 0x1b0700 -#define IPU6_PSYS_IOMMU1R_OFFSET 0x1b0e00 -#define IPU6_PSYS_IOMMUI_OFFSET 0x1b1500 - -/* the offset from IOMMU base register */ -#define IPU6_MMU_L1_STREAM_ID_REG_OFFSET 0x0c -#define IPU6_MMU_L2_STREAM_ID_REG_OFFSET 0x4c -#define IPU6_PSYS_MMU1W_L2_STREAM_ID_REG_OFFSET 0x8c - -#define IPU6_MMU_INFO_OFFSET 0x8 - -#define IPU6_ISYS_SPC_OFFSET 0x210000 - -#define IPU6SE_PSYS_SPC_OFFSET 0x110000 -#define IPU6_PSYS_SPC_OFFSET 0x118000 - -#define IPU6_ISYS_DMEM_OFFSET 0x200000 -#define IPU6_PSYS_DMEM_OFFSET 0x100000 - -#define IPU6_REG_ISYS_UNISPART_IRQ_EDGE 0x27c000 -#define IPU6_REG_ISYS_UNISPART_IRQ_MASK 0x27c004 -#define IPU6_REG_ISYS_UNISPART_IRQ_STATUS 0x27c008 -#define IPU6_REG_ISYS_UNISPART_IRQ_CLEAR 0x27c00c -#define IPU6_REG_ISYS_UNISPART_IRQ_ENABLE 0x27c010 -#define IPU6_REG_ISYS_UNISPART_IRQ_LEVEL_NOT_PULSE 0x27c014 -#define IPU6_REG_ISYS_UNISPART_SW_IRQ_REG 0x27c414 -#define IPU6_REG_ISYS_UNISPART_SW_IRQ_MUX_REG 0x27c418 -#define IPU6_ISYS_UNISPART_IRQ_CSI0 BIT(2) -#define IPU6_ISYS_UNISPART_IRQ_CSI1 BIT(3) -#define IPU6_ISYS_UNISPART_IRQ_SW BIT(22) - -#define IPU6_REG_ISYS_ISL_TOP_IRQ_EDGE 0x2b0200 -#define IPU6_REG_ISYS_ISL_TOP_IRQ_MASK 0x2b0204 -#define IPU6_REG_ISYS_ISL_TOP_IRQ_STATUS 0x2b0208 -#define IPU6_REG_ISYS_ISL_TOP_IRQ_CLEAR 0x2b020c -#define IPU6_REG_ISYS_ISL_TOP_IRQ_ENABLE 0x2b0210 -#define IPU6_REG_ISYS_ISL_TOP_IRQ_LEVEL_NOT_PULSE 0x2b0214 - -#define IPU6_REG_ISYS_CMPR_TOP_IRQ_EDGE 0x2d2100 -#define IPU6_REG_ISYS_CMPR_TOP_IRQ_MASK 0x2d2104 -#define IPU6_REG_ISYS_CMPR_TOP_IRQ_STATUS 0x2d2108 -#define IPU6_REG_ISYS_CMPR_TOP_IRQ_CLEAR 0x2d210c -#define IPU6_REG_ISYS_CMPR_TOP_IRQ_ENABLE 0x2d2110 -#define IPU6_REG_ISYS_CMPR_TOP_IRQ_LEVEL_NOT_PULSE 0x2d2114 - -/* CDC Burst collector thresholds for isys - 3 FIFOs i = 0..2 */ -#define IPU6_REG_ISYS_CDC_THRESHOLD(i) (0x27c400 + ((i) * 4)) - -#define IPU6_CSI_IRQ_NUM_PER_PIPE 4 -#define IPU6SE_ISYS_CSI_PORT_NUM 4 -#define IPU6_ISYS_CSI_PORT_NUM 8 - -#define IPU6_ISYS_CSI_PORT_IRQ(irq_num) BIT(irq_num) - -/* PKG DIR OFFSET in IMR in secure mode */ -#define IPU6_PKG_DIR_IMR_OFFSET 0x40 - -#define IPU6_ISYS_REG_SPC_STATUS_CTRL 0x0 - -#define IPU6_ISYS_SPC_STATUS_START BIT(1) -#define IPU6_ISYS_SPC_STATUS_RUN BIT(3) -#define IPU6_ISYS_SPC_STATUS_READY BIT(5) -#define IPU6_ISYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE BIT(12) -#define IPU6_ISYS_SPC_STATUS_ICACHE_PREFETCH BIT(13) - -#define IPU6_PSYS_REG_SPC_STATUS_CTRL 0x0 -#define IPU6_PSYS_REG_SPC_START_PC 0x4 -#define IPU6_PSYS_REG_SPC_ICACHE_BASE 0x10 -#define IPU6_REG_PSYS_INFO_SEG_0_CONFIG_ICACHE_MASTER 0x14 - -#define IPU6_PSYS_SPC_STATUS_START BIT(1) -#define IPU6_PSYS_SPC_STATUS_RUN BIT(3) -#define IPU6_PSYS_SPC_STATUS_READY BIT(5) -#define IPU6_PSYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE BIT(12) -#define IPU6_PSYS_SPC_STATUS_ICACHE_PREFETCH BIT(13) - -#define IPU6_PSYS_REG_SPP0_STATUS_CTRL 0x20000 - -#define IPU6_INFO_ENABLE_SNOOP BIT(0) -#define IPU6_INFO_DEC_FORCE_FLUSH BIT(1) -#define IPU6_INFO_DEC_PASS_THROUGH BIT(2) -#define IPU6_INFO_ZLW BIT(3) -#define IPU6_INFO_REQUEST_DESTINATION_IOSF BIT(9) -#define IPU6_INFO_IMR_BASE BIT(10) -#define IPU6_INFO_IMR_DESTINED BIT(11) - -#define IPU6_INFO_REQUEST_DESTINATION_PRIMARY IPU6_INFO_REQUEST_DESTINATION_IOSF - -/* - * s2m_pixel_soc_pixel_remapping is dedicated for the enabling of the - * pixel s2m remp ability.Remap here means that s2m rearange the order - * of the pixels in each 4 pixels group. - * For examle, mirroring remping means that if input's 4 first pixels - * are 1 2 3 4 then in output we should see 4 3 2 1 in this 4 first pixels. - * 0xE4 is from s2m MAS document. It means no remapping. - */ -#define S2M_PIXEL_SOC_PIXEL_REMAPPING_FLAG_NO_REMAPPING 0xe4 -/* - * csi_be_soc_pixel_remapping is for the enabling of the pixel remapping. - * This remapping is exactly like the stream2mmio remapping. - */ -#define CSI_BE_SOC_PIXEL_REMAPPING_FLAG_NO_REMAPPING 0xe4 - -#define IPU6_REG_DMA_TOP_AB_GROUP1_BASE_ADDR 0x1ae000 -#define IPU6_REG_DMA_TOP_AB_GROUP2_BASE_ADDR 0x1af000 -#define IPU6_REG_DMA_TOP_AB_RING_MIN_OFFSET(n) (0x4 + (n) * 0xc) -#define IPU6_REG_DMA_TOP_AB_RING_MAX_OFFSET(n) (0x8 + (n) * 0xc) -#define IPU6_REG_DMA_TOP_AB_RING_ACCESS_OFFSET(n) (0xc + (n) * 0xc) - -enum ipu6_device_ab_group1_target_id { - IPU6_DEVICE_AB_GROUP1_TARGET_ID_R0_SPC_DMEM, - IPU6_DEVICE_AB_GROUP1_TARGET_ID_R1_SPC_DMEM, - IPU6_DEVICE_AB_GROUP1_TARGET_ID_R2_SPC_DMEM, - IPU6_DEVICE_AB_GROUP1_TARGET_ID_R3_SPC_STATUS_REG, - IPU6_DEVICE_AB_GROUP1_TARGET_ID_R4_SPC_MASTER_BASE_ADDR, - IPU6_DEVICE_AB_GROUP1_TARGET_ID_R5_SPC_PC_STALL, - IPU6_DEVICE_AB_GROUP1_TARGET_ID_R6_SPC_EQ, - IPU6_DEVICE_AB_GROUP1_TARGET_ID_R7_SPC_RESERVED, - IPU6_DEVICE_AB_GROUP1_TARGET_ID_R8_SPC_RESERVED, - IPU6_DEVICE_AB_GROUP1_TARGET_ID_R9_SPP0, - IPU6_DEVICE_AB_GROUP1_TARGET_ID_R10_SPP1, - IPU6_DEVICE_AB_GROUP1_TARGET_ID_R11_CENTRAL_R1, - IPU6_DEVICE_AB_GROUP1_TARGET_ID_R12_IRQ, - IPU6_DEVICE_AB_GROUP1_TARGET_ID_R13_CENTRAL_R2, - IPU6_DEVICE_AB_GROUP1_TARGET_ID_R14_DMA, - IPU6_DEVICE_AB_GROUP1_TARGET_ID_R15_DMA, - IPU6_DEVICE_AB_GROUP1_TARGET_ID_R16_GP, - IPU6_DEVICE_AB_GROUP1_TARGET_ID_R17_ZLW_INSERTER, - IPU6_DEVICE_AB_GROUP1_TARGET_ID_R18_AB, -}; - -enum nci_ab_access_mode { - NCI_AB_ACCESS_MODE_RW, /* read & write */ - NCI_AB_ACCESS_MODE_RO, /* read only */ - NCI_AB_ACCESS_MODE_WO, /* write only */ - NCI_AB_ACCESS_MODE_NA, /* No access at all */ -}; - -/* IRQ-related registers in PSYS */ -#define IPU6_REG_PSYS_GPDEV_IRQ_EDGE 0x1aa200 -#define IPU6_REG_PSYS_GPDEV_IRQ_MASK 0x1aa204 -#define IPU6_REG_PSYS_GPDEV_IRQ_STATUS 0x1aa208 -#define IPU6_REG_PSYS_GPDEV_IRQ_CLEAR 0x1aa20c -#define IPU6_REG_PSYS_GPDEV_IRQ_ENABLE 0x1aa210 -#define IPU6_REG_PSYS_GPDEV_IRQ_LEVEL_NOT_PULSE 0x1aa214 -/* There are 8 FW interrupts, n = 0..7 */ -#define IPU6_PSYS_GPDEV_FWIRQ0 5 -#define IPU6_PSYS_GPDEV_FWIRQ1 6 -#define IPU6_PSYS_GPDEV_FWIRQ2 7 -#define IPU6_PSYS_GPDEV_FWIRQ3 8 -#define IPU6_PSYS_GPDEV_FWIRQ4 9 -#define IPU6_PSYS_GPDEV_FWIRQ5 10 -#define IPU6_PSYS_GPDEV_FWIRQ6 11 -#define IPU6_PSYS_GPDEV_FWIRQ7 12 -#define IPU6_PSYS_GPDEV_IRQ_FWIRQ(n) BIT(n) -#define IPU6_REG_PSYS_GPDEV_FWIRQ(n) (4 * (n) + 0x1aa100) - -#endif /* IPU6_PLATFORM_REGS_H */ diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c deleted file mode 100644 index 91718ea..0000000 --- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c +++ /dev/null @@ -1,850 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2013--2024 Intel Corporation - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include "ipu6.h" -#include "ipu6-bus.h" -#include "ipu6-buttress.h" -#include "ipu6-cpd.h" -#include "ipu6-isys.h" -#include "ipu6-mmu.h" -#include "ipu6-platform-buttress-regs.h" -#include "ipu6-platform-isys-csi2-reg.h" -#include "ipu6-platform-regs.h" - -#define IPU6_PCI_BAR 0 - -struct ipu6_cell_program { - u32 magic_number; - - u32 blob_offset; - u32 blob_size; - - u32 start[3]; - - u32 icache_source; - u32 icache_target; - u32 icache_size; - - u32 pmem_source; - u32 pmem_target; - u32 pmem_size; - - u32 data_source; - u32 data_target; - u32 data_size; - - u32 bss_target; - u32 bss_size; - - u32 cell_id; - u32 regs_addr; - - u32 cell_pmem_data_bus_address; - u32 cell_dmem_data_bus_address; - u32 cell_pmem_control_bus_address; - u32 cell_dmem_control_bus_address; - - u32 next; - u32 dummy[2]; -}; - -static struct ipu6_isys_internal_pdata isys_ipdata = { - .hw_variant = { - .offset = IPU6_UNIFIED_OFFSET, - .nr_mmus = 3, - .mmu_hw = { - { - .offset = IPU6_ISYS_IOMMU0_OFFSET, - .info_bits = IPU6_INFO_REQUEST_DESTINATION_IOSF, - .nr_l1streams = 16, - .l1_block_sz = { - 3, 8, 2, 2, 2, 2, 2, 2, 1, 1, - 1, 1, 1, 1, 1, 1 - }, - .nr_l2streams = 16, - .l2_block_sz = { - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2 - }, - .insert_read_before_invalidate = false, - .l1_stream_id_reg_offset = - IPU6_MMU_L1_STREAM_ID_REG_OFFSET, - .l2_stream_id_reg_offset = - IPU6_MMU_L2_STREAM_ID_REG_OFFSET, - }, - { - .offset = IPU6_ISYS_IOMMU1_OFFSET, - .info_bits = 0, - .nr_l1streams = 16, - .l1_block_sz = { - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 1, 1, 4 - }, - .nr_l2streams = 16, - .l2_block_sz = { - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2 - }, - .insert_read_before_invalidate = false, - .l1_stream_id_reg_offset = - IPU6_MMU_L1_STREAM_ID_REG_OFFSET, - .l2_stream_id_reg_offset = - IPU6_MMU_L2_STREAM_ID_REG_OFFSET, - }, - { - .offset = IPU6_ISYS_IOMMUI_OFFSET, - .info_bits = 0, - .nr_l1streams = 0, - .nr_l2streams = 0, - .insert_read_before_invalidate = false, - }, - }, - .cdc_fifos = 3, - .cdc_fifo_threshold = {6, 8, 2}, - .dmem_offset = IPU6_ISYS_DMEM_OFFSET, - .spc_offset = IPU6_ISYS_SPC_OFFSET, - }, - .isys_dma_overshoot = IPU6_ISYS_OVERALLOC_MIN, -}; - -static struct ipu6_psys_internal_pdata psys_ipdata = { - .hw_variant = { - .offset = IPU6_UNIFIED_OFFSET, - .nr_mmus = 4, - .mmu_hw = { - { - .offset = IPU6_PSYS_IOMMU0_OFFSET, - .info_bits = - IPU6_INFO_REQUEST_DESTINATION_IOSF, - .nr_l1streams = 16, - .l1_block_sz = { - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2 - }, - .nr_l2streams = 16, - .l2_block_sz = { - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2 - }, - .insert_read_before_invalidate = false, - .l1_stream_id_reg_offset = - IPU6_MMU_L1_STREAM_ID_REG_OFFSET, - .l2_stream_id_reg_offset = - IPU6_MMU_L2_STREAM_ID_REG_OFFSET, - }, - { - .offset = IPU6_PSYS_IOMMU1_OFFSET, - .info_bits = 0, - .nr_l1streams = 32, - .l1_block_sz = { - 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 10, - 5, 4, 14, 6, 4, 14, 6, 4, 8, - 4, 2, 1, 1, 1, 1, 14 - }, - .nr_l2streams = 32, - .l2_block_sz = { - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2 - }, - .insert_read_before_invalidate = false, - .l1_stream_id_reg_offset = - IPU6_MMU_L1_STREAM_ID_REG_OFFSET, - .l2_stream_id_reg_offset = - IPU6_PSYS_MMU1W_L2_STREAM_ID_REG_OFFSET, - }, - { - .offset = IPU6_PSYS_IOMMU1R_OFFSET, - .info_bits = 0, - .nr_l1streams = 16, - .l1_block_sz = { - 1, 4, 4, 4, 4, 16, 8, 4, 32, - 16, 16, 2, 2, 2, 1, 12 - }, - .nr_l2streams = 16, - .l2_block_sz = { - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2 - }, - .insert_read_before_invalidate = false, - .l1_stream_id_reg_offset = - IPU6_MMU_L1_STREAM_ID_REG_OFFSET, - .l2_stream_id_reg_offset = - IPU6_MMU_L2_STREAM_ID_REG_OFFSET, - }, - { - .offset = IPU6_PSYS_IOMMUI_OFFSET, - .info_bits = 0, - .nr_l1streams = 0, - .nr_l2streams = 0, - .insert_read_before_invalidate = false, - }, - }, - .dmem_offset = IPU6_PSYS_DMEM_OFFSET, - }, -}; - -static const struct ipu6_buttress_ctrl isys_buttress_ctrl = { - .ratio = IPU6_IS_FREQ_CTL_DEFAULT_RATIO, - .qos_floor = IPU6_IS_FREQ_CTL_DEFAULT_QOS_FLOOR_RATIO, - .freq_ctl = IPU6_BUTTRESS_REG_IS_FREQ_CTL, - .pwr_sts_shift = IPU6_BUTTRESS_PWR_STATE_IS_PWR_SHIFT, - .pwr_sts_mask = IPU6_BUTTRESS_PWR_STATE_IS_PWR_MASK, - .pwr_sts_on = IPU6_BUTTRESS_PWR_STATE_UP_DONE, - .pwr_sts_off = IPU6_BUTTRESS_PWR_STATE_DN_DONE, -}; - -static const struct ipu6_buttress_ctrl psys_buttress_ctrl = { - .ratio = IPU6_PS_FREQ_CTL_DEFAULT_RATIO, - .qos_floor = IPU6_PS_FREQ_CTL_DEFAULT_QOS_FLOOR_RATIO, - .freq_ctl = IPU6_BUTTRESS_REG_PS_FREQ_CTL, - .pwr_sts_shift = IPU6_BUTTRESS_PWR_STATE_PS_PWR_SHIFT, - .pwr_sts_mask = IPU6_BUTTRESS_PWR_STATE_PS_PWR_MASK, - .pwr_sts_on = IPU6_BUTTRESS_PWR_STATE_UP_DONE, - .pwr_sts_off = IPU6_BUTTRESS_PWR_STATE_DN_DONE, -}; - -static void -ipu6_pkg_dir_configure_spc(struct ipu6_device *isp, - const struct ipu6_hw_variants *hw_variant, - int pkg_dir_idx, void __iomem *base, - u64 *pkg_dir, dma_addr_t pkg_dir_vied_address) -{ - struct ipu6_cell_program *prog; - void __iomem *spc_base; - u32 server_fw_addr; - dma_addr_t dma_addr; - u32 pg_offset; - - server_fw_addr = lower_32_bits(*(pkg_dir + (pkg_dir_idx + 1) * 2)); - if (pkg_dir_idx == IPU6_CPD_PKG_DIR_ISYS_SERVER_IDX) - dma_addr = sg_dma_address(isp->isys->fw_sgt.sgl); - else - dma_addr = sg_dma_address(isp->psys->fw_sgt.sgl); - - pg_offset = server_fw_addr - dma_addr; - prog = (struct ipu6_cell_program *)((u64)isp->cpd_fw->data + pg_offset); - spc_base = base + prog->regs_addr; - if (spc_base != (base + hw_variant->spc_offset)) - dev_warn(&isp->pdev->dev, - "SPC reg addr %p not matching value from CPD %p\n", - base + hw_variant->spc_offset, spc_base); - writel(server_fw_addr + prog->blob_offset + - prog->icache_source, spc_base + IPU6_PSYS_REG_SPC_ICACHE_BASE); - writel(IPU6_INFO_REQUEST_DESTINATION_IOSF, - spc_base + IPU6_REG_PSYS_INFO_SEG_0_CONFIG_ICACHE_MASTER); - writel(prog->start[1], spc_base + IPU6_PSYS_REG_SPC_START_PC); - writel(pkg_dir_vied_address, base + hw_variant->dmem_offset); -} - -void ipu6_configure_spc(struct ipu6_device *isp, - const struct ipu6_hw_variants *hw_variant, - int pkg_dir_idx, void __iomem *base, u64 *pkg_dir, - dma_addr_t pkg_dir_dma_addr) -{ - void __iomem *dmem_base = base + hw_variant->dmem_offset; - void __iomem *spc_regs_base = base + hw_variant->spc_offset; - u32 val; - - val = readl(spc_regs_base + IPU6_PSYS_REG_SPC_STATUS_CTRL); - val |= IPU6_PSYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE; - writel(val, spc_regs_base + IPU6_PSYS_REG_SPC_STATUS_CTRL); - - if (isp->secure_mode) - writel(IPU6_PKG_DIR_IMR_OFFSET, dmem_base); - else - ipu6_pkg_dir_configure_spc(isp, hw_variant, pkg_dir_idx, base, - pkg_dir, pkg_dir_dma_addr); -} -EXPORT_SYMBOL_NS_GPL(ipu6_configure_spc, INTEL_IPU6); - -#define IPU6_ISYS_CSI2_NPORTS 4 -#define IPU6SE_ISYS_CSI2_NPORTS 4 -#define IPU6_TGL_ISYS_CSI2_NPORTS 8 -#define IPU6EP_MTL_ISYS_CSI2_NPORTS 6 - -static void ipu6_internal_pdata_init(struct ipu6_device *isp) -{ - u8 hw_ver = isp->hw_ver; - - isys_ipdata.num_parallel_streams = IPU6_ISYS_NUM_STREAMS; - isys_ipdata.sram_gran_shift = IPU6_SRAM_GRANULARITY_SHIFT; - isys_ipdata.sram_gran_size = IPU6_SRAM_GRANULARITY_SIZE; - isys_ipdata.max_sram_size = IPU6_MAX_SRAM_SIZE; - isys_ipdata.sensor_type_start = IPU6_FW_ISYS_SENSOR_TYPE_START; - isys_ipdata.sensor_type_end = IPU6_FW_ISYS_SENSOR_TYPE_END; - isys_ipdata.max_streams = IPU6_ISYS_NUM_STREAMS; - isys_ipdata.max_send_queues = IPU6_N_MAX_SEND_QUEUES; - isys_ipdata.max_sram_blocks = IPU6_NOF_SRAM_BLOCKS_MAX; - isys_ipdata.max_devq_size = IPU6_DEV_SEND_QUEUE_SIZE; - isys_ipdata.csi2.nports = IPU6_ISYS_CSI2_NPORTS; - isys_ipdata.csi2.irq_mask = IPU6_CSI_RX_ERROR_IRQ_MASK; - isys_ipdata.csi2.ctrl0_irq_edge = IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_EDGE; - isys_ipdata.csi2.ctrl0_irq_clear = - IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_CLEAR; - isys_ipdata.csi2.ctrl0_irq_mask = IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_MASK; - isys_ipdata.csi2.ctrl0_irq_enable = - IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_ENABLE; - isys_ipdata.csi2.ctrl0_irq_status = - IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_STATUS; - isys_ipdata.csi2.ctrl0_irq_lnp = - IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_LEVEL_NOT_PULSE; - isys_ipdata.enhanced_iwake = is_ipu6ep_mtl(hw_ver) || is_ipu6ep(hw_ver); - psys_ipdata.hw_variant.spc_offset = IPU6_PSYS_SPC_OFFSET; - isys_ipdata.csi2.fw_access_port_ofs = CSI_REG_HUB_FW_ACCESS_PORT_OFS; - - if (is_ipu6ep(hw_ver)) { - isys_ipdata.ltr = IPU6EP_LTR_VALUE; - isys_ipdata.memopen_threshold = IPU6EP_MIN_MEMOPEN_TH; - } - - if (is_ipu6_tgl(hw_ver)) - isys_ipdata.csi2.nports = IPU6_TGL_ISYS_CSI2_NPORTS; - - if (is_ipu6ep_mtl(hw_ver)) { - isys_ipdata.csi2.nports = IPU6EP_MTL_ISYS_CSI2_NPORTS; - - isys_ipdata.csi2.ctrl0_irq_edge = - IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_EDGE; - isys_ipdata.csi2.ctrl0_irq_clear = - IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_CLEAR; - isys_ipdata.csi2.ctrl0_irq_mask = - IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_MASK; - isys_ipdata.csi2.ctrl0_irq_enable = - IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_ENABLE; - isys_ipdata.csi2.ctrl0_irq_lnp = - IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_LEVEL_NOT_PULSE; - isys_ipdata.csi2.ctrl0_irq_status = - IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_STATUS; - isys_ipdata.csi2.fw_access_port_ofs = - CSI_REG_HUB_FW_ACCESS_PORT_V6OFS; - isys_ipdata.ltr = IPU6EP_MTL_LTR_VALUE; - isys_ipdata.memopen_threshold = IPU6EP_MTL_MIN_MEMOPEN_TH; - } - - if (is_ipu6se(hw_ver)) { - isys_ipdata.csi2.nports = IPU6SE_ISYS_CSI2_NPORTS; - isys_ipdata.csi2.irq_mask = IPU6SE_CSI_RX_ERROR_IRQ_MASK; - isys_ipdata.num_parallel_streams = IPU6SE_ISYS_NUM_STREAMS; - isys_ipdata.sram_gran_shift = IPU6SE_SRAM_GRANULARITY_SHIFT; - isys_ipdata.sram_gran_size = IPU6SE_SRAM_GRANULARITY_SIZE; - isys_ipdata.max_sram_size = IPU6SE_MAX_SRAM_SIZE; - isys_ipdata.sensor_type_start = - IPU6SE_FW_ISYS_SENSOR_TYPE_START; - isys_ipdata.sensor_type_end = IPU6SE_FW_ISYS_SENSOR_TYPE_END; - isys_ipdata.max_streams = IPU6SE_ISYS_NUM_STREAMS; - isys_ipdata.max_send_queues = IPU6SE_N_MAX_SEND_QUEUES; - isys_ipdata.max_sram_blocks = IPU6SE_NOF_SRAM_BLOCKS_MAX; - isys_ipdata.max_devq_size = IPU6SE_DEV_SEND_QUEUE_SIZE; - psys_ipdata.hw_variant.spc_offset = IPU6SE_PSYS_SPC_OFFSET; - } -} - -static struct ipu6_bus_device * -ipu6_isys_init(struct pci_dev *pdev, struct device *parent, - struct ipu6_buttress_ctrl *ctrl, void __iomem *base, - const struct ipu6_isys_internal_pdata *ipdata) -{ - struct device *dev = &pdev->dev; - struct ipu6_bus_device *isys_adev; - struct ipu6_isys_pdata *pdata; - int ret; - - ret = ipu_bridge_init(dev, ipu_bridge_parse_ssdb); - if (ret) { - dev_err_probe(dev, ret, "IPU6 bridge init failed\n"); - return ERR_PTR(ret); - } - - pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); - if (!pdata) - return ERR_PTR(-ENOMEM); - - pdata->base = base; - pdata->ipdata = ipdata; - - isys_adev = ipu6_bus_initialize_device(pdev, parent, pdata, ctrl, - IPU6_ISYS_NAME); - if (IS_ERR(isys_adev)) { - kfree(pdata); - return dev_err_cast_probe(dev, isys_adev, - "ipu6_bus_initialize_device isys failed\n"); - } - - isys_adev->mmu = ipu6_mmu_init(dev, base, ISYS_MMID, - &ipdata->hw_variant); - if (IS_ERR(isys_adev->mmu)) { - put_device(&isys_adev->auxdev.dev); - kfree(pdata); - return dev_err_cast_probe(dev, isys_adev->mmu, - "ipu6_mmu_init(isys_adev->mmu) failed\n"); - } - - isys_adev->mmu->dev = &isys_adev->auxdev.dev; - - ret = ipu6_bus_add_device(isys_adev); - if (ret) { - kfree(pdata); - return ERR_PTR(ret); - } - - return isys_adev; -} - -static struct ipu6_bus_device * -ipu6_psys_init(struct pci_dev *pdev, struct device *parent, - struct ipu6_buttress_ctrl *ctrl, void __iomem *base, - const struct ipu6_psys_internal_pdata *ipdata) -{ - struct ipu6_bus_device *psys_adev; - struct ipu6_psys_pdata *pdata; - int ret; - - pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); - if (!pdata) - return ERR_PTR(-ENOMEM); - - pdata->base = base; - pdata->ipdata = ipdata; - - psys_adev = ipu6_bus_initialize_device(pdev, parent, pdata, ctrl, - IPU6_PSYS_NAME); - if (IS_ERR(psys_adev)) { - kfree(pdata); - return dev_err_cast_probe(&pdev->dev, psys_adev, - "ipu6_bus_initialize_device psys failed\n"); - } - - psys_adev->mmu = ipu6_mmu_init(&pdev->dev, base, PSYS_MMID, - &ipdata->hw_variant); - if (IS_ERR(psys_adev->mmu)) { - put_device(&psys_adev->auxdev.dev); - kfree(pdata); - return dev_err_cast_probe(&pdev->dev, psys_adev->mmu, - "ipu6_mmu_init(psys_adev->mmu) failed\n"); - } - - psys_adev->mmu->dev = &psys_adev->auxdev.dev; - - ret = ipu6_bus_add_device(psys_adev); - if (ret) { - kfree(pdata); - return ERR_PTR(ret); - } - - return psys_adev; -} - -static int ipu6_pci_config_setup(struct pci_dev *dev, u8 hw_ver) -{ - int ret; - - /* disable IPU6 PCI ATS on mtl ES2 */ - if (is_ipu6ep_mtl(hw_ver) && boot_cpu_data.x86_stepping == 0x2 && - pci_ats_supported(dev)) - pci_disable_ats(dev); - - /* No PCI msi capability for IPU6EP */ - if (is_ipu6ep(hw_ver) || is_ipu6ep_mtl(hw_ver)) { - /* likely do nothing as msi not enabled by default */ - pci_disable_msi(dev); - return 0; - } - - ret = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_MSI); - if (ret < 0) - return dev_err_probe(&dev->dev, ret, "Request msi failed"); - - return 0; -} - -static void ipu6_configure_vc_mechanism(struct ipu6_device *isp) -{ - u32 val = readl(isp->base + BUTTRESS_REG_BTRS_CTRL); - - if (IPU6_BTRS_ARB_STALL_MODE_VC0 == IPU6_BTRS_ARB_MODE_TYPE_STALL) - val |= BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC0; - else - val &= ~BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC0; - - if (IPU6_BTRS_ARB_STALL_MODE_VC1 == IPU6_BTRS_ARB_MODE_TYPE_STALL) - val |= BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC1; - else - val &= ~BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC1; - - writel(val, isp->base + BUTTRESS_REG_BTRS_CTRL); -} - -static int ipu6_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) -{ - struct ipu6_buttress_ctrl *isys_ctrl = NULL, *psys_ctrl = NULL; - struct device *dev = &pdev->dev; - void __iomem *isys_base = NULL; - void __iomem *psys_base = NULL; - struct ipu6_device *isp; - phys_addr_t phys; - u32 val, version, sku_id; - int ret; - - isp = devm_kzalloc(dev, sizeof(*isp), GFP_KERNEL); - if (!isp) - return -ENOMEM; - - isp->pdev = pdev; - INIT_LIST_HEAD(&isp->devices); - - ret = pcim_enable_device(pdev); - if (ret) - return dev_err_probe(dev, ret, "Enable PCI device failed\n"); - - phys = pci_resource_start(pdev, IPU6_PCI_BAR); - dev_dbg(dev, "IPU6 PCI bar[%u] = %pa\n", IPU6_PCI_BAR, &phys); - - ret = pcim_iomap_regions(pdev, 1 << IPU6_PCI_BAR, pci_name(pdev)); - if (ret) - return dev_err_probe(dev, ret, "Failed to I/O mem remapping\n"); - - isp->base = pcim_iomap_table(pdev)[IPU6_PCI_BAR]; - pci_set_drvdata(pdev, isp); - pci_set_master(pdev); - - isp->cpd_metadata_cmpnt_size = sizeof(struct ipu6_cpd_metadata_cmpnt); - switch (id->device) { - case PCI_DEVICE_ID_INTEL_IPU6: - isp->hw_ver = IPU6_VER_6; - isp->cpd_fw_name = IPU6_FIRMWARE_NAME; - break; - case PCI_DEVICE_ID_INTEL_IPU6SE: - isp->hw_ver = IPU6_VER_6SE; - isp->cpd_fw_name = IPU6SE_FIRMWARE_NAME; - isp->cpd_metadata_cmpnt_size = - sizeof(struct ipu6se_cpd_metadata_cmpnt); - break; - case PCI_DEVICE_ID_INTEL_IPU6EP_ADLP: - case PCI_DEVICE_ID_INTEL_IPU6EP_RPLP: - isp->hw_ver = IPU6_VER_6EP; - isp->cpd_fw_name = IPU6EP_FIRMWARE_NAME; - break; - case PCI_DEVICE_ID_INTEL_IPU6EP_ADLN: - isp->hw_ver = IPU6_VER_6EP; - isp->cpd_fw_name = IPU6EPADLN_FIRMWARE_NAME; - break; - case PCI_DEVICE_ID_INTEL_IPU6EP_MTL: - isp->hw_ver = IPU6_VER_6EP_MTL; - isp->cpd_fw_name = IPU6EPMTL_FIRMWARE_NAME; - break; - default: - return dev_err_probe(dev, -ENODEV, - "Unsupported IPU6 device %x\n", - id->device); - } - - ipu6_internal_pdata_init(isp); - - isys_base = isp->base + isys_ipdata.hw_variant.offset; - psys_base = isp->base + psys_ipdata.hw_variant.offset; - - ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(39)); - if (ret) - return dev_err_probe(dev, ret, "Failed to set DMA mask\n"); - - dma_set_max_seg_size(dev, UINT_MAX); - - ret = ipu6_pci_config_setup(pdev, isp->hw_ver); - if (ret) - return ret; - - ret = ipu6_buttress_init(isp); - if (ret) - return ret; - - ret = request_firmware(&isp->cpd_fw, isp->cpd_fw_name, dev); - if (ret) { - dev_err_probe(&isp->pdev->dev, ret, - "Requesting signed firmware %s failed\n", - isp->cpd_fw_name); - goto buttress_exit; - } - - ret = ipu6_cpd_validate_cpd_file(isp, isp->cpd_fw->data, - isp->cpd_fw->size); - if (ret) { - dev_err_probe(&isp->pdev->dev, ret, - "Failed to validate cpd\n"); - goto out_ipu6_bus_del_devices; - } - - isys_ctrl = devm_kmemdup(dev, &isys_buttress_ctrl, - sizeof(isys_buttress_ctrl), GFP_KERNEL); - if (!isys_ctrl) { - ret = -ENOMEM; - goto out_ipu6_bus_del_devices; - } - - isp->isys = ipu6_isys_init(pdev, dev, isys_ctrl, isys_base, - &isys_ipdata); - if (IS_ERR(isp->isys)) { - ret = PTR_ERR(isp->isys); - goto out_ipu6_bus_del_devices; - } - - psys_ctrl = devm_kmemdup(dev, &psys_buttress_ctrl, - sizeof(psys_buttress_ctrl), GFP_KERNEL); - if (!psys_ctrl) { - ret = -ENOMEM; - goto out_ipu6_bus_del_devices; - } - - isp->psys = ipu6_psys_init(pdev, &isp->isys->auxdev.dev, psys_ctrl, - psys_base, &psys_ipdata); - if (IS_ERR(isp->psys)) { - ret = PTR_ERR(isp->psys); - goto out_ipu6_bus_del_devices; - } - - ret = pm_runtime_resume_and_get(&isp->psys->auxdev.dev); - if (ret < 0) - goto out_ipu6_bus_del_devices; - - ret = ipu6_mmu_hw_init(isp->psys->mmu); - if (ret) { - dev_err_probe(&isp->pdev->dev, ret, - "Failed to set MMU hardware\n"); - goto out_ipu6_bus_del_devices; - } - - ret = ipu6_buttress_map_fw_image(isp->psys, isp->cpd_fw, - &isp->psys->fw_sgt); - if (ret) { - dev_err_probe(&isp->pdev->dev, ret, "failed to map fw image\n"); - goto out_ipu6_bus_del_devices; - } - - ret = ipu6_cpd_create_pkg_dir(isp->psys, isp->cpd_fw->data); - if (ret) { - dev_err_probe(&isp->pdev->dev, ret, - "failed to create pkg dir\n"); - goto out_ipu6_bus_del_devices; - } - - ret = devm_request_threaded_irq(dev, pdev->irq, ipu6_buttress_isr, - ipu6_buttress_isr_threaded, - IRQF_SHARED, IPU6_NAME, isp); - if (ret) { - dev_err_probe(dev, ret, "Requesting irq failed\n"); - goto out_ipu6_bus_del_devices; - } - - ret = ipu6_buttress_authenticate(isp); - if (ret) { - dev_err_probe(&isp->pdev->dev, ret, - "FW authentication failed\n"); - goto out_free_irq; - } - - ipu6_mmu_hw_cleanup(isp->psys->mmu); - pm_runtime_put(&isp->psys->auxdev.dev); - - /* Configure the arbitration mechanisms for VC requests */ - ipu6_configure_vc_mechanism(isp); - - val = readl(isp->base + BUTTRESS_REG_SKU); - sku_id = FIELD_GET(GENMASK(6, 4), val); - version = FIELD_GET(GENMASK(3, 0), val); - dev_info(dev, "IPU%u-v%u[%x] hardware version %d\n", version, sku_id, - pdev->device, isp->hw_ver); - - pm_runtime_put_noidle(dev); - pm_runtime_allow(dev); - - isp->bus_ready_to_probe = true; - - return 0; - -out_free_irq: - devm_free_irq(dev, pdev->irq, isp); -out_ipu6_bus_del_devices: - if (isp->psys) { - ipu6_cpd_free_pkg_dir(isp->psys); - ipu6_buttress_unmap_fw_image(isp->psys, &isp->psys->fw_sgt); - } - if (!IS_ERR_OR_NULL(isp->psys) && !IS_ERR_OR_NULL(isp->psys->mmu)) - ipu6_mmu_cleanup(isp->psys->mmu); - if (!IS_ERR_OR_NULL(isp->isys) && !IS_ERR_OR_NULL(isp->isys->mmu)) - ipu6_mmu_cleanup(isp->isys->mmu); - ipu6_bus_del_devices(pdev); - release_firmware(isp->cpd_fw); -buttress_exit: - ipu6_buttress_exit(isp); - - return ret; -} - -static void ipu6_pci_remove(struct pci_dev *pdev) -{ - struct ipu6_device *isp = pci_get_drvdata(pdev); - struct ipu6_mmu *isys_mmu = isp->isys->mmu; - struct ipu6_mmu *psys_mmu = isp->psys->mmu; - - devm_free_irq(&pdev->dev, pdev->irq, isp); - ipu6_cpd_free_pkg_dir(isp->psys); - - ipu6_buttress_unmap_fw_image(isp->psys, &isp->psys->fw_sgt); - ipu6_buttress_exit(isp); - - ipu6_bus_del_devices(pdev); - - pm_runtime_forbid(&pdev->dev); - pm_runtime_get_noresume(&pdev->dev); - - release_firmware(isp->cpd_fw); - - ipu6_mmu_cleanup(psys_mmu); - ipu6_mmu_cleanup(isys_mmu); -} - -static void ipu6_pci_reset_prepare(struct pci_dev *pdev) -{ - struct ipu6_device *isp = pci_get_drvdata(pdev); - - pm_runtime_forbid(&isp->pdev->dev); -} - -static void ipu6_pci_reset_done(struct pci_dev *pdev) -{ - struct ipu6_device *isp = pci_get_drvdata(pdev); - - ipu6_buttress_restore(isp); - if (isp->secure_mode) - ipu6_buttress_reset_authentication(isp); - - isp->need_ipc_reset = true; - pm_runtime_allow(&isp->pdev->dev); -} - -/* - * PCI base driver code requires driver to provide these to enable - * PCI device level PM state transitions (D0<->D3) - */ -static int ipu6_suspend(struct device *dev) -{ - struct pci_dev *pdev = to_pci_dev(dev); - - synchronize_irq(pdev->irq); - return 0; -} - -static int ipu6_resume(struct device *dev) -{ - struct pci_dev *pdev = to_pci_dev(dev); - struct ipu6_device *isp = pci_get_drvdata(pdev); - struct ipu6_buttress *b = &isp->buttress; - int ret; - - /* Configure the arbitration mechanisms for VC requests */ - ipu6_configure_vc_mechanism(isp); - - isp->secure_mode = ipu6_buttress_get_secure_mode(isp); - dev_info(dev, "IPU6 in %s mode\n", - isp->secure_mode ? "secure" : "non-secure"); - - ipu6_buttress_restore(isp); - - ret = ipu6_buttress_ipc_reset(isp, &b->cse); - if (ret) - dev_err(&isp->pdev->dev, "IPC reset protocol failed!\n"); - - ret = pm_runtime_resume_and_get(&isp->psys->auxdev.dev); - if (ret < 0) { - dev_err(&isp->psys->auxdev.dev, "Failed to get runtime PM\n"); - return 0; - } - - ret = ipu6_buttress_authenticate(isp); - if (ret) - dev_err(&isp->pdev->dev, "FW authentication failed(%d)\n", ret); - - pm_runtime_put(&isp->psys->auxdev.dev); - - return 0; -} - -static int ipu6_runtime_resume(struct device *dev) -{ - struct pci_dev *pdev = to_pci_dev(dev); - struct ipu6_device *isp = pci_get_drvdata(pdev); - int ret; - - ipu6_configure_vc_mechanism(isp); - ipu6_buttress_restore(isp); - - if (isp->need_ipc_reset) { - struct ipu6_buttress *b = &isp->buttress; - - isp->need_ipc_reset = false; - ret = ipu6_buttress_ipc_reset(isp, &b->cse); - if (ret) - dev_err(&isp->pdev->dev, "IPC reset protocol failed\n"); - } - - return 0; -} - -static const struct dev_pm_ops ipu6_pm_ops = { - SYSTEM_SLEEP_PM_OPS(&ipu6_suspend, &ipu6_resume) - RUNTIME_PM_OPS(&ipu6_suspend, &ipu6_runtime_resume, NULL) -}; - -MODULE_DEVICE_TABLE(pci, ipu6_pci_tbl); - -static const struct pci_error_handlers pci_err_handlers = { - .reset_prepare = ipu6_pci_reset_prepare, - .reset_done = ipu6_pci_reset_done, -}; - -static struct pci_driver ipu6_pci_driver = { - .name = IPU6_NAME, - .id_table = ipu6_pci_tbl, - .probe = ipu6_pci_probe, - .remove = ipu6_pci_remove, - .driver = { - .pm = pm_ptr(&ipu6_pm_ops), - }, - .err_handler = &pci_err_handlers, -}; - -module_pci_driver(ipu6_pci_driver); - -MODULE_IMPORT_NS(INTEL_IPU_BRIDGE); -MODULE_AUTHOR("Sakari Ailus "); -MODULE_AUTHOR("Tianshu Qiu "); -MODULE_AUTHOR("Bingbu Cao "); -MODULE_AUTHOR("Qingwu Zhang "); -MODULE_AUTHOR("Yunliang Ding "); -MODULE_AUTHOR("Hongju Wang "); -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Intel IPU6 PCI driver"); diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.h deleted file mode 100644 index 92e3c34..0000000 --- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.h +++ /dev/null @@ -1,342 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (C) 2013 - 2024 Intel Corporation */ - -#ifndef IPU6_H -#define IPU6_H - -#include -#include -#include - -#include "ipu6-buttress.h" - -struct firmware; -struct pci_dev; -struct ipu6_bus_device; - -#define IPU6_NAME "intel-ipu6" -#define IPU6_MEDIA_DEV_MODEL_NAME "ipu6" - -#define IPU6SE_FIRMWARE_NAME "intel/ipu/ipu6se_fw.bin" -#define IPU6EP_FIRMWARE_NAME "intel/ipu/ipu6ep_fw.bin" -#define IPU6_FIRMWARE_NAME "intel/ipu/ipu6_fw.bin" -#define IPU6EPMTL_FIRMWARE_NAME "intel/ipu/ipu6epmtl_fw.bin" -#define IPU6EPADLN_FIRMWARE_NAME "intel/ipu/ipu6epadln_fw.bin" - -enum ipu6_version { - IPU6_VER_INVALID = 0, - IPU6_VER_6 = 1, - IPU6_VER_6SE = 3, - IPU6_VER_6EP = 5, - IPU6_VER_6EP_MTL = 6, -}; - -/* - * IPU6 - TGL - * IPU6SE - JSL - * IPU6EP - ADL/RPL - * IPU6EP_MTL - MTL - */ -static inline bool is_ipu6se(u8 hw_ver) -{ - return hw_ver == IPU6_VER_6SE; -} - -static inline bool is_ipu6ep(u8 hw_ver) -{ - return hw_ver == IPU6_VER_6EP; -} - -static inline bool is_ipu6ep_mtl(u8 hw_ver) -{ - return hw_ver == IPU6_VER_6EP_MTL; -} - -static inline bool is_ipu6_tgl(u8 hw_ver) -{ - return hw_ver == IPU6_VER_6; -} - -/* - * ISYS DMA can overshoot. For higher resolutions over allocation is one line - * but it must be at minimum 1024 bytes. Value could be different in - * different versions / generations thus provide it via platform data. - */ -#define IPU6_ISYS_OVERALLOC_MIN 1024 - -/* Physical pages in GDA is 128, page size is 2K for IPU6, 1K for others */ -#define IPU6_DEVICE_GDA_NR_PAGES 128 - -/* Virtualization factor to calculate the available virtual pages */ -#define IPU6_DEVICE_GDA_VIRT_FACTOR 32 - -struct ipu6_device { - struct pci_dev *pdev; - struct list_head devices; - struct ipu6_bus_device *isys; - struct ipu6_bus_device *psys; - struct ipu6_buttress buttress; - - const struct firmware *cpd_fw; - const char *cpd_fw_name; - u32 cpd_metadata_cmpnt_size; - - void __iomem *base; - bool need_ipc_reset; - bool secure_mode; - u8 hw_ver; - bool bus_ready_to_probe; -}; - -#define IPU6_ISYS_NAME "isys" -#define IPU6_PSYS_NAME "psys" - -#define IPU6_MMU_MAX_DEVICES 4 -#define IPU6_MMU_ADDR_BITS 32 -/* The firmware is accessible within the first 2 GiB only in non-secure mode. */ -#define IPU6_MMU_ADDR_BITS_NON_SECURE 31 - -#define IPU6_MMU_MAX_TLB_L1_STREAMS 32 -#define IPU6_MMU_MAX_TLB_L2_STREAMS 32 -#define IPU6_MAX_LI_BLOCK_ADDR 128 -#define IPU6_MAX_L2_BLOCK_ADDR 64 - -#define IPU6SE_ISYS_NUM_STREAMS IPU6SE_NONSECURE_STREAM_ID_MAX -#define IPU6_ISYS_NUM_STREAMS IPU6_NONSECURE_STREAM_ID_MAX - -/* - * To maximize the IOSF utlization, IPU6 need to send requests in bursts. - * At the DMA interface with the buttress, there are CDC FIFOs with burst - * collection capability. CDC FIFO burst collectors have a configurable - * threshold and is configured based on the outcome of performance measurements. - * - * isys has 3 ports with IOSF interface for VC0, VC1 and VC2 - * psys has 4 ports with IOSF interface for VC0, VC1w, VC1r and VC2 - * - * Threshold values are pre-defined and are arrived at after performance - * evaluations on a type of IPU6 - */ -#define IPU6_MAX_VC_IOSF_PORTS 4 - -/* - * IPU6 must configure correct arbitration mechanism related to the IOSF VC - * requests. There are two options per VC0 and VC1 - > 0 means rearbitrate on - * stall and 1 means stall until the request is completed. - */ -#define IPU6_BTRS_ARB_MODE_TYPE_REARB 0 -#define IPU6_BTRS_ARB_MODE_TYPE_STALL 1 - -/* Currently chosen arbitration mechanism for VC0 */ -#define IPU6_BTRS_ARB_STALL_MODE_VC0 \ - IPU6_BTRS_ARB_MODE_TYPE_REARB - -/* Currently chosen arbitration mechanism for VC1 */ -#define IPU6_BTRS_ARB_STALL_MODE_VC1 \ - IPU6_BTRS_ARB_MODE_TYPE_REARB - -/* - * MMU Invalidation HW bug workaround by ZLW mechanism - * - * Old IPU6 MMUV2 has a bug in the invalidation mechanism which might result in - * wrong translation or replication of the translation. This will cause data - * corruption. So we cannot directly use the MMU V2 invalidation registers - * to invalidate the MMU. Instead, whenever an invalidate is called, we need to - * clear the TLB by evicting all the valid translations by filling it with trash - * buffer (which is guaranteed not to be used by any other processes). ZLW is - * used to fill the L1 and L2 caches with the trash buffer translations. ZLW - * or Zero length write, is pre-fetch mechanism to pre-fetch the pages in - * advance to the L1 and L2 caches without triggering any memory operations. - * - * In MMU V2, L1 -> 16 streams and 64 blocks, maximum 16 blocks per stream - * One L1 block has 16 entries, hence points to 16 * 4K pages - * L2 -> 16 streams and 32 blocks. 2 blocks per streams - * One L2 block maps to 1024 L1 entries, hence points to 4MB address range - * 2 blocks per L2 stream means, 1 stream points to 8MB range - * - * As we need to clear the caches and 8MB being the biggest cache size, we need - * to have trash buffer which points to 8MB address range. As these trash - * buffers are not used for any memory transactions, we need only the least - * amount of physical memory. So we reserve 8MB IOVA address range but only - * one page is reserved from physical memory. Each of this 8MB IOVA address - * range is then mapped to the same physical memory page. - */ -/* One L2 entry maps 1024 L1 entries and one L1 entry per page */ -#define IPU6_MMUV2_L2_RANGE (1024 * PAGE_SIZE) -/* Max L2 blocks per stream */ -#define IPU6_MMUV2_MAX_L2_BLOCKS 2 -/* Max L1 blocks per stream */ -#define IPU6_MMUV2_MAX_L1_BLOCKS 16 -#define IPU6_MMUV2_TRASH_RANGE (IPU6_MMUV2_L2_RANGE * IPU6_MMUV2_MAX_L2_BLOCKS) -/* Entries per L1 block */ -#define MMUV2_ENTRIES_PER_L1_BLOCK 16 -#define MMUV2_TRASH_L1_BLOCK_OFFSET (MMUV2_ENTRIES_PER_L1_BLOCK * PAGE_SIZE) -#define MMUV2_TRASH_L2_BLOCK_OFFSET IPU6_MMUV2_L2_RANGE - -/* - * In some of the IPU6 MMUs, there is provision to configure L1 and L2 page - * table caches. Both these L1 and L2 caches are divided into multiple sections - * called streams. There is maximum 16 streams for both caches. Each of these - * sections are subdivided into multiple blocks. When nr_l1streams = 0 and - * nr_l2streams = 0, means the MMU is of type MMU_V1 and do not support - * L1/L2 page table caches. - * - * L1 stream per block sizes are configurable and varies per usecase. - * L2 has constant block sizes - 2 blocks per stream. - * - * MMU1 support pre-fetching of the pages to have less cache lookup misses. To - * enable the pre-fetching, MMU1 AT (Address Translator) device registers - * need to be configured. - * - * There are four types of memory accesses which requires ZLW configuration. - * ZLW(Zero Length Write) is a mechanism to enable VT-d pre-fetching on IOMMU. - * - * 1. Sequential Access or 1D mode - * Set ZLW_EN -> 1 - * set ZLW_PAGE_CROSS_1D -> 1 - * Set ZLW_N to "N" pages so that ZLW will be inserte N pages ahead where - * N is pre-defined and hardcoded in the platform data - * Set ZLW_2D -> 0 - * - * 2. ZLW 2D mode - * Set ZLW_EN -> 1 - * set ZLW_PAGE_CROSS_1D -> 1, - * Set ZLW_N -> 0 - * Set ZLW_2D -> 1 - * - * 3. ZLW Enable (no 1D or 2D mode) - * Set ZLW_EN -> 1 - * set ZLW_PAGE_CROSS_1D -> 0, - * Set ZLW_N -> 0 - * Set ZLW_2D -> 0 - * - * 4. ZLW disable - * Set ZLW_EN -> 0 - * set ZLW_PAGE_CROSS_1D -> 0, - * Set ZLW_N -> 0 - * Set ZLW_2D -> 0 - * - * To configure the ZLW for the above memory access, four registers are - * available. Hence to track these four settings, we have the following entries - * in the struct ipu6_mmu_hw. Each of these entries are per stream and - * available only for the L1 streams. - * - * a. l1_zlw_en -> To track zlw enabled per stream (ZLW_EN) - * b. l1_zlw_1d_mode -> Track 1D mode per stream. ZLW inserted at page boundary - * c. l1_ins_zlw_ahead_pages -> to track how advance the ZLW need to be inserted - * Insert ZLW request N pages ahead address. - * d. l1_zlw_2d_mode -> To track 2D mode per stream (ZLW_2D) - * - * - * Currently L1/L2 streams, blocks, AT ZLW configurations etc. are pre-defined - * as per the usecase specific calculations. Any change to this pre-defined - * table has to happen in sync with IPU6 FW. - */ -struct ipu6_mmu_hw { - union { - unsigned long offset; - void __iomem *base; - }; - u32 info_bits; - u8 nr_l1streams; - /* - * L1 has variable blocks per stream - total of 64 blocks and maximum of - * 16 blocks per stream. Configurable by using the block start address - * per stream. Block start address is calculated from the block size - */ - u8 l1_block_sz[IPU6_MMU_MAX_TLB_L1_STREAMS]; - /* Is ZLW is enabled in each stream */ - bool l1_zlw_en[IPU6_MMU_MAX_TLB_L1_STREAMS]; - bool l1_zlw_1d_mode[IPU6_MMU_MAX_TLB_L1_STREAMS]; - u8 l1_ins_zlw_ahead_pages[IPU6_MMU_MAX_TLB_L1_STREAMS]; - bool l1_zlw_2d_mode[IPU6_MMU_MAX_TLB_L1_STREAMS]; - - u32 l1_stream_id_reg_offset; - u32 l2_stream_id_reg_offset; - - u8 nr_l2streams; - /* - * L2 has fixed 2 blocks per stream. Block address is calculated - * from the block size - */ - u8 l2_block_sz[IPU6_MMU_MAX_TLB_L2_STREAMS]; - /* flag to track if WA is needed for successive invalidate HW bug */ - bool insert_read_before_invalidate; -}; - -struct ipu6_mmu_pdata { - u32 nr_mmus; - struct ipu6_mmu_hw mmu_hw[IPU6_MMU_MAX_DEVICES]; - int mmid; -}; - -struct ipu6_isys_csi2_pdata { - void __iomem *base; -}; - -struct ipu6_isys_internal_csi2_pdata { - u32 nports; - u32 irq_mask; - u32 ctrl0_irq_edge; - u32 ctrl0_irq_clear; - u32 ctrl0_irq_mask; - u32 ctrl0_irq_enable; - u32 ctrl0_irq_lnp; - u32 ctrl0_irq_status; - u32 fw_access_port_ofs; -}; - -struct ipu6_isys_internal_tpg_pdata { - u32 ntpgs; - u32 *offsets; - u32 *sels; -}; - -struct ipu6_hw_variants { - unsigned long offset; - u32 nr_mmus; - struct ipu6_mmu_hw mmu_hw[IPU6_MMU_MAX_DEVICES]; - u8 cdc_fifos; - u8 cdc_fifo_threshold[IPU6_MAX_VC_IOSF_PORTS]; - u32 dmem_offset; - u32 spc_offset; -}; - -struct ipu6_isys_internal_pdata { - struct ipu6_isys_internal_csi2_pdata csi2; - struct ipu6_hw_variants hw_variant; - u32 num_parallel_streams; - u32 isys_dma_overshoot; - u32 sram_gran_shift; - u32 sram_gran_size; - u32 max_sram_size; - u32 max_streams; - u32 max_send_queues; - u32 max_sram_blocks; - u32 max_devq_size; - u32 sensor_type_start; - u32 sensor_type_end; - u32 ltr; - u32 memopen_threshold; - bool enhanced_iwake; -}; - -struct ipu6_isys_pdata { - void __iomem *base; - const struct ipu6_isys_internal_pdata *ipdata; -}; - -struct ipu6_psys_internal_pdata { - struct ipu6_hw_variants hw_variant; -}; - -struct ipu6_psys_pdata { - void __iomem *base; - const struct ipu6_psys_internal_pdata *ipdata; -}; - -int ipu6_fw_authenticate(void *data, u64 val); -void ipu6_configure_spc(struct ipu6_device *isp, - const struct ipu6_hw_variants *hw_variant, - int pkg_dir_idx, void __iomem *base, u64 *pkg_dir, - dma_addr_t pkg_dir_dma_addr); -#endif /* IPU6_H */ diff --git a/6.18.0/drivers/media/pci/intel/ipu6/Kconfig b/6.18.0/drivers/media/pci/intel/ipu6/Kconfig deleted file mode 100644 index 1129e2b..0000000 --- a/6.18.0/drivers/media/pci/intel/ipu6/Kconfig +++ /dev/null @@ -1,18 +0,0 @@ -config VIDEO_INTEL_IPU6 - tristate "Intel IPU6 driver" - depends on ACPI || COMPILE_TEST - depends on VIDEO_DEV - depends on X86 && HAS_DMA - depends on IPU_BRIDGE || !IPU_BRIDGE - select AUXILIARY_BUS - select IOMMU_IOVA - select VIDEO_V4L2_SUBDEV_API - select MEDIA_CONTROLLER - select VIDEOBUF2_DMA_SG - select V4L2_FWNODE - help - This is the 6th Gen Intel Image Processing Unit, found in Intel SoCs - and used for capturing images and video from camera sensors. - - To compile this driver, say Y here! It contains 2 modules - - intel_ipu6 and intel_ipu6_isys. diff --git a/6.18.0/drivers/media/pci/intel/ipu6/Makefile b/6.18.0/drivers/media/pci/intel/ipu6/Makefile deleted file mode 100644 index a821b0a..0000000 --- a/6.18.0/drivers/media/pci/intel/ipu6/Makefile +++ /dev/null @@ -1,23 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only - -intel-ipu6-y := ipu6.o \ - ipu6-bus.o \ - ipu6-dma.o \ - ipu6-mmu.o \ - ipu6-buttress.o \ - ipu6-cpd.o \ - ipu6-fw-com.o - -obj-$(CONFIG_VIDEO_INTEL_IPU6) += intel-ipu6.o - -intel-ipu6-isys-y := ipu6-isys.o \ - ipu6-isys-csi2.o \ - ipu6-fw-isys.o \ - ipu6-isys-video.o \ - ipu6-isys-queue.o \ - ipu6-isys-subdev.o \ - ipu6-isys-mcd-phy.o \ - ipu6-isys-jsl-phy.o \ - ipu6-isys-dwc-phy.o - -obj-$(CONFIG_VIDEO_INTEL_IPU6) += intel-ipu6-isys.o diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-bus.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-bus.c deleted file mode 100644 index 5cee274..0000000 --- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-bus.c +++ /dev/null @@ -1,159 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2013 - 2024 Intel Corporation - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "ipu6.h" -#include "ipu6-bus.h" -#include "ipu6-buttress.h" -#include "ipu6-dma.h" - -static int bus_pm_runtime_suspend(struct device *dev) -{ - struct ipu6_bus_device *adev = to_ipu6_bus_device(dev); - int ret; - - ret = pm_generic_runtime_suspend(dev); - if (ret) - return ret; - - ret = ipu6_buttress_power(dev, adev->ctrl, false); - if (!ret) - return 0; - - dev_err(dev, "power down failed!\n"); - - /* Powering down failed, attempt to resume device now */ - ret = pm_generic_runtime_resume(dev); - if (!ret) - return -EBUSY; - - return -EIO; -} - -static int bus_pm_runtime_resume(struct device *dev) -{ - struct ipu6_bus_device *adev = to_ipu6_bus_device(dev); - int ret; - - ret = ipu6_buttress_power(dev, adev->ctrl, true); - if (ret) - return ret; - - ret = pm_generic_runtime_resume(dev); - if (ret) - goto out_err; - - return 0; - -out_err: - ipu6_buttress_power(dev, adev->ctrl, false); - - return -EBUSY; -} - -static struct dev_pm_domain ipu6_bus_pm_domain = { - .ops = { - .runtime_suspend = bus_pm_runtime_suspend, - .runtime_resume = bus_pm_runtime_resume, - }, -}; - -static DEFINE_MUTEX(ipu6_bus_mutex); - -static void ipu6_bus_release(struct device *dev) -{ - struct ipu6_bus_device *adev = to_ipu6_bus_device(dev); - - kfree(adev->pdata); - kfree(adev); -} - -struct ipu6_bus_device * -ipu6_bus_initialize_device(struct pci_dev *pdev, struct device *parent, - void *pdata, const struct ipu6_buttress_ctrl *ctrl, - char *name) -{ - struct auxiliary_device *auxdev; - struct ipu6_bus_device *adev; - struct ipu6_device *isp = pci_get_drvdata(pdev); - int ret; - - adev = kzalloc(sizeof(*adev), GFP_KERNEL); - if (!adev) - return ERR_PTR(-ENOMEM); - - adev->isp = isp; - adev->ctrl = ctrl; - adev->pdata = pdata; - auxdev = &adev->auxdev; - auxdev->name = name; - auxdev->id = (pci_domain_nr(pdev->bus) << 16) | - PCI_DEVID(pdev->bus->number, pdev->devfn); - - auxdev->dev.parent = parent; - auxdev->dev.release = ipu6_bus_release; - - ret = auxiliary_device_init(auxdev); - if (ret < 0) { - dev_err(&isp->pdev->dev, "auxiliary device init failed (%d)\n", - ret); - kfree(adev); - return ERR_PTR(ret); - } - - dev_pm_domain_set(&auxdev->dev, &ipu6_bus_pm_domain); - - pm_runtime_forbid(&adev->auxdev.dev); - pm_runtime_enable(&adev->auxdev.dev); - - return adev; -} - -int ipu6_bus_add_device(struct ipu6_bus_device *adev) -{ - struct auxiliary_device *auxdev = &adev->auxdev; - int ret; - - ret = auxiliary_device_add(auxdev); - if (ret) { - auxiliary_device_uninit(auxdev); - return ret; - } - - mutex_lock(&ipu6_bus_mutex); - list_add(&adev->list, &adev->isp->devices); - mutex_unlock(&ipu6_bus_mutex); - - pm_runtime_allow(&auxdev->dev); - - return 0; -} - -void ipu6_bus_del_devices(struct pci_dev *pdev) -{ - struct ipu6_device *isp = pci_get_drvdata(pdev); - struct ipu6_bus_device *adev, *save; - - mutex_lock(&ipu6_bus_mutex); - - list_for_each_entry_safe(adev, save, &isp->devices, list) { - pm_runtime_disable(&adev->auxdev.dev); - list_del(&adev->list); - auxiliary_device_delete(&adev->auxdev); - auxiliary_device_uninit(&adev->auxdev); - } - - mutex_unlock(&ipu6_bus_mutex); -} diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-bus.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-bus.h deleted file mode 100644 index a08c546..0000000 --- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-bus.h +++ /dev/null @@ -1,55 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (C) 2013 - 2024 Intel Corporation */ - -#ifndef IPU6_BUS_H -#define IPU6_BUS_H - -#include -#include -#include -#include -#include -#include -#include - -struct firmware; -struct pci_dev; - -struct ipu6_buttress_ctrl; - -struct ipu6_bus_device { - struct auxiliary_device auxdev; - const struct auxiliary_driver *auxdrv; - const struct ipu6_auxdrv_data *auxdrv_data; - struct list_head list; - void *pdata; - struct ipu6_mmu *mmu; - struct ipu6_device *isp; - const struct ipu6_buttress_ctrl *ctrl; - const struct firmware *fw; - struct sg_table fw_sgt; - u64 *pkg_dir; - dma_addr_t pkg_dir_dma_addr; - unsigned int pkg_dir_size; -}; - -struct ipu6_auxdrv_data { - irqreturn_t (*isr)(struct ipu6_bus_device *adev); - irqreturn_t (*isr_threaded)(struct ipu6_bus_device *adev); - bool wake_isr_thread; -}; - -#define to_ipu6_bus_device(_dev) \ - container_of(to_auxiliary_dev(_dev), struct ipu6_bus_device, auxdev) -#define auxdev_to_adev(_auxdev) \ - container_of(_auxdev, struct ipu6_bus_device, auxdev) -#define ipu6_bus_get_drvdata(adev) dev_get_drvdata(&(adev)->auxdev.dev) - -struct ipu6_bus_device * -ipu6_bus_initialize_device(struct pci_dev *pdev, struct device *parent, - void *pdata, const struct ipu6_buttress_ctrl *ctrl, - char *name); -int ipu6_bus_add_device(struct ipu6_bus_device *adev); -void ipu6_bus_del_devices(struct pci_dev *pdev); - -#endif diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c deleted file mode 100644 index 103386c..0000000 --- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c +++ /dev/null @@ -1,910 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2013--2024 Intel Corporation - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "ipu6.h" -#include "ipu6-bus.h" -#include "ipu6-dma.h" -#include "ipu6-buttress.h" -#include "ipu6-platform-buttress-regs.h" - -#define BOOTLOADER_STATUS_OFFSET 0x15c - -#define BOOTLOADER_MAGIC_KEY 0xb00710ad - -#define ENTRY BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE1 -#define EXIT BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE2 -#define QUERY BUTTRESS_IU2CSECSR_IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE - -#define BUTTRESS_TSC_SYNC_RESET_TRIAL_MAX 10 - -#define BUTTRESS_POWER_TIMEOUT_US (200 * USEC_PER_MSEC) - -#define BUTTRESS_CSE_BOOTLOAD_TIMEOUT_US (5 * USEC_PER_SEC) -#define BUTTRESS_CSE_AUTHENTICATE_TIMEOUT_US (10 * USEC_PER_SEC) -#define BUTTRESS_CSE_FWRESET_TIMEOUT_US (100 * USEC_PER_MSEC) - -#define BUTTRESS_IPC_TX_TIMEOUT_MS MSEC_PER_SEC -#define BUTTRESS_IPC_RX_TIMEOUT_MS MSEC_PER_SEC -#define BUTTRESS_IPC_VALIDITY_TIMEOUT_US (1 * USEC_PER_SEC) -#define BUTTRESS_TSC_SYNC_TIMEOUT_US (5 * USEC_PER_MSEC) - -#define BUTTRESS_IPC_RESET_RETRY 2000 -#define BUTTRESS_CSE_IPC_RESET_RETRY 4 -#define BUTTRESS_IPC_CMD_SEND_RETRY 1 - -#define BUTTRESS_MAX_CONSECUTIVE_IRQS 100 - -static const u32 ipu6_adev_irq_mask[2] = { - BUTTRESS_ISR_IS_IRQ, - BUTTRESS_ISR_PS_IRQ -}; - -int ipu6_buttress_ipc_reset(struct ipu6_device *isp, - struct ipu6_buttress_ipc *ipc) -{ - unsigned int retries = BUTTRESS_IPC_RESET_RETRY; - struct ipu6_buttress *b = &isp->buttress; - u32 val = 0, csr_in_clr; - - if (!isp->secure_mode) { - dev_dbg(&isp->pdev->dev, "Skip IPC reset for non-secure mode"); - return 0; - } - - mutex_lock(&b->ipc_mutex); - - /* Clear-by-1 CSR (all bits), corresponding internal states. */ - val = readl(isp->base + ipc->csr_in); - writel(val, isp->base + ipc->csr_in); - - /* Set peer CSR bit IPC_PEER_COMP_ACTIONS_RST_PHASE1 */ - writel(ENTRY, isp->base + ipc->csr_out); - /* - * Clear-by-1 all CSR bits EXCEPT following - * bits: - * A. IPC_PEER_COMP_ACTIONS_RST_PHASE1. - * B. IPC_PEER_COMP_ACTIONS_RST_PHASE2. - * C. Possibly custom bits, depending on - * their role. - */ - csr_in_clr = BUTTRESS_IU2CSECSR_IPC_PEER_DEASSERTED_REG_VALID_REQ | - BUTTRESS_IU2CSECSR_IPC_PEER_ACKED_REG_VALID | - BUTTRESS_IU2CSECSR_IPC_PEER_ASSERTED_REG_VALID_REQ | QUERY; - - do { - usleep_range(400, 500); - val = readl(isp->base + ipc->csr_in); - switch (val) { - case ENTRY | EXIT: - case ENTRY | EXIT | QUERY: - /* - * 1) Clear-by-1 CSR bits - * (IPC_PEER_COMP_ACTIONS_RST_PHASE1, - * IPC_PEER_COMP_ACTIONS_RST_PHASE2). - * 2) Set peer CSR bit - * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE. - */ - writel(ENTRY | EXIT, isp->base + ipc->csr_in); - writel(QUERY, isp->base + ipc->csr_out); - break; - case ENTRY: - case ENTRY | QUERY: - /* - * 1) Clear-by-1 CSR bits - * (IPC_PEER_COMP_ACTIONS_RST_PHASE1, - * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE). - * 2) Set peer CSR bit - * IPC_PEER_COMP_ACTIONS_RST_PHASE1. - */ - writel(ENTRY | QUERY, isp->base + ipc->csr_in); - writel(ENTRY, isp->base + ipc->csr_out); - break; - case EXIT: - case EXIT | QUERY: - /* - * Clear-by-1 CSR bit - * IPC_PEER_COMP_ACTIONS_RST_PHASE2. - * 1) Clear incoming doorbell. - * 2) Clear-by-1 all CSR bits EXCEPT following - * bits: - * A. IPC_PEER_COMP_ACTIONS_RST_PHASE1. - * B. IPC_PEER_COMP_ACTIONS_RST_PHASE2. - * C. Possibly custom bits, depending on - * their role. - * 3) Set peer CSR bit - * IPC_PEER_COMP_ACTIONS_RST_PHASE2. - */ - writel(EXIT, isp->base + ipc->csr_in); - writel(0, isp->base + ipc->db0_in); - writel(csr_in_clr, isp->base + ipc->csr_in); - writel(EXIT, isp->base + ipc->csr_out); - - /* - * Read csr_in again to make sure if RST_PHASE2 is done. - * If csr_in is QUERY, it should be handled again. - */ - usleep_range(200, 300); - val = readl(isp->base + ipc->csr_in); - if (val & QUERY) { - dev_dbg(&isp->pdev->dev, - "RST_PHASE2 retry csr_in = %x\n", val); - break; - } - mutex_unlock(&b->ipc_mutex); - return 0; - case QUERY: - /* - * 1) Clear-by-1 CSR bit - * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE. - * 2) Set peer CSR bit - * IPC_PEER_COMP_ACTIONS_RST_PHASE1 - */ - writel(QUERY, isp->base + ipc->csr_in); - writel(ENTRY, isp->base + ipc->csr_out); - break; - default: - dev_dbg_ratelimited(&isp->pdev->dev, - "Unexpected CSR 0x%x\n", val); - break; - } - } while (retries--); - - mutex_unlock(&b->ipc_mutex); - dev_err(&isp->pdev->dev, "Timed out while waiting for CSE\n"); - - return -ETIMEDOUT; -} - -static void ipu6_buttress_ipc_validity_close(struct ipu6_device *isp, - struct ipu6_buttress_ipc *ipc) -{ - writel(BUTTRESS_IU2CSECSR_IPC_PEER_DEASSERTED_REG_VALID_REQ, - isp->base + ipc->csr_out); -} - -static int -ipu6_buttress_ipc_validity_open(struct ipu6_device *isp, - struct ipu6_buttress_ipc *ipc) -{ - unsigned int mask = BUTTRESS_IU2CSECSR_IPC_PEER_ACKED_REG_VALID; - void __iomem *addr; - int ret; - u32 val; - - writel(BUTTRESS_IU2CSECSR_IPC_PEER_ASSERTED_REG_VALID_REQ, - isp->base + ipc->csr_out); - - addr = isp->base + ipc->csr_in; - ret = readl_poll_timeout(addr, val, val & mask, 200, - BUTTRESS_IPC_VALIDITY_TIMEOUT_US); - if (ret) { - dev_err(&isp->pdev->dev, "CSE validity timeout 0x%x\n", val); - ipu6_buttress_ipc_validity_close(isp, ipc); - } - - return ret; -} - -static void ipu6_buttress_ipc_recv(struct ipu6_device *isp, - struct ipu6_buttress_ipc *ipc, u32 *ipc_msg) -{ - if (ipc_msg) - *ipc_msg = readl(isp->base + ipc->data0_in); - writel(0, isp->base + ipc->db0_in); -} - -static int ipu6_buttress_ipc_send_bulk(struct ipu6_device *isp, - struct ipu6_ipc_buttress_bulk_msg *msgs, - u32 size) -{ - unsigned long tx_timeout_jiffies, rx_timeout_jiffies; - unsigned int i, retry = BUTTRESS_IPC_CMD_SEND_RETRY; - struct ipu6_buttress *b = &isp->buttress; - struct ipu6_buttress_ipc *ipc = &b->cse; - u32 val; - int ret; - int tout; - - mutex_lock(&b->ipc_mutex); - - ret = ipu6_buttress_ipc_validity_open(isp, ipc); - if (ret) { - dev_err(&isp->pdev->dev, "IPC validity open failed\n"); - goto out; - } - - tx_timeout_jiffies = msecs_to_jiffies(BUTTRESS_IPC_TX_TIMEOUT_MS); - rx_timeout_jiffies = msecs_to_jiffies(BUTTRESS_IPC_RX_TIMEOUT_MS); - - for (i = 0; i < size; i++) { - reinit_completion(&ipc->send_complete); - if (msgs[i].require_resp) - reinit_completion(&ipc->recv_complete); - - dev_dbg(&isp->pdev->dev, "bulk IPC command: 0x%x\n", - msgs[i].cmd); - writel(msgs[i].cmd, isp->base + ipc->data0_out); - val = BUTTRESS_IU2CSEDB0_BUSY | msgs[i].cmd_size; - writel(val, isp->base + ipc->db0_out); - - tout = wait_for_completion_timeout(&ipc->send_complete, - tx_timeout_jiffies); - if (!tout) { - dev_err(&isp->pdev->dev, "send IPC response timeout\n"); - if (!retry--) { - ret = -ETIMEDOUT; - goto out; - } - - /* Try again if CSE is not responding on first try */ - writel(0, isp->base + ipc->db0_out); - i--; - continue; - } - - retry = BUTTRESS_IPC_CMD_SEND_RETRY; - - if (!msgs[i].require_resp) - continue; - - tout = wait_for_completion_timeout(&ipc->recv_complete, - rx_timeout_jiffies); - if (!tout) { - dev_err(&isp->pdev->dev, "recv IPC response timeout\n"); - ret = -ETIMEDOUT; - goto out; - } - - if (ipc->nack_mask && - (ipc->recv_data & ipc->nack_mask) == ipc->nack) { - dev_err(&isp->pdev->dev, - "IPC NACK for cmd 0x%x\n", msgs[i].cmd); - ret = -EIO; - goto out; - } - - if (ipc->recv_data != msgs[i].expected_resp) { - dev_err(&isp->pdev->dev, - "expected resp: 0x%x, IPC response: 0x%x ", - msgs[i].expected_resp, ipc->recv_data); - ret = -EIO; - goto out; - } - } - - dev_dbg(&isp->pdev->dev, "bulk IPC commands done\n"); - -out: - ipu6_buttress_ipc_validity_close(isp, ipc); - mutex_unlock(&b->ipc_mutex); - return ret; -} - -static int -ipu6_buttress_ipc_send(struct ipu6_device *isp, - u32 ipc_msg, u32 size, bool require_resp, - u32 expected_resp) -{ - struct ipu6_ipc_buttress_bulk_msg msg = { - .cmd = ipc_msg, - .cmd_size = size, - .require_resp = require_resp, - .expected_resp = expected_resp, - }; - - return ipu6_buttress_ipc_send_bulk(isp, &msg, 1); -} - -static irqreturn_t ipu6_buttress_call_isr(struct ipu6_bus_device *adev) -{ - irqreturn_t ret = IRQ_WAKE_THREAD; - - if (!adev || !adev->auxdrv || !adev->auxdrv_data) - return IRQ_NONE; - - if (adev->auxdrv_data->isr) - ret = adev->auxdrv_data->isr(adev); - - if (ret == IRQ_WAKE_THREAD && !adev->auxdrv_data->isr_threaded) - ret = IRQ_NONE; - - return ret; -} - -irqreturn_t ipu6_buttress_isr(int irq, void *isp_ptr) -{ - struct ipu6_device *isp = isp_ptr; - struct ipu6_bus_device *adev[] = { isp->isys, isp->psys }; - struct ipu6_buttress *b = &isp->buttress; - u32 reg_irq_sts = BUTTRESS_REG_ISR_STATUS; - irqreturn_t ret = IRQ_NONE; - u32 disable_irqs = 0; - u32 irq_status; - u32 i, count = 0; - int active; - - active = pm_runtime_get_if_active(&isp->pdev->dev); - if (!active) - return IRQ_NONE; - - irq_status = readl(isp->base + reg_irq_sts); - if (irq_status == 0 || WARN_ON_ONCE(irq_status == 0xffffffffu)) { - if (active > 0) - pm_runtime_put_noidle(&isp->pdev->dev); - return IRQ_NONE; - } - - do { - writel(irq_status, isp->base + BUTTRESS_REG_ISR_CLEAR); - - for (i = 0; i < ARRAY_SIZE(ipu6_adev_irq_mask); i++) { - irqreturn_t r = ipu6_buttress_call_isr(adev[i]); - - if (!(irq_status & ipu6_adev_irq_mask[i])) - continue; - - if (r == IRQ_WAKE_THREAD) { - ret = IRQ_WAKE_THREAD; - disable_irqs |= ipu6_adev_irq_mask[i]; - } else if (ret == IRQ_NONE && r == IRQ_HANDLED) { - ret = IRQ_HANDLED; - } - } - - if ((irq_status & BUTTRESS_EVENT) && ret == IRQ_NONE) - ret = IRQ_HANDLED; - - if (irq_status & BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING) { - dev_dbg(&isp->pdev->dev, - "BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING\n"); - ipu6_buttress_ipc_recv(isp, &b->cse, &b->cse.recv_data); - complete(&b->cse.recv_complete); - } - - if (irq_status & BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE) { - dev_dbg(&isp->pdev->dev, - "BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE\n"); - complete(&b->cse.send_complete); - } - - if (irq_status & BUTTRESS_ISR_SAI_VIOLATION && - ipu6_buttress_get_secure_mode(isp)) - dev_err(&isp->pdev->dev, - "BUTTRESS_ISR_SAI_VIOLATION\n"); - - if (irq_status & (BUTTRESS_ISR_IS_FATAL_MEM_ERR | - BUTTRESS_ISR_PS_FATAL_MEM_ERR)) - dev_err(&isp->pdev->dev, - "BUTTRESS_ISR_FATAL_MEM_ERR\n"); - - if (irq_status & BUTTRESS_ISR_UFI_ERROR) - dev_err(&isp->pdev->dev, "BUTTRESS_ISR_UFI_ERROR\n"); - - if (++count == BUTTRESS_MAX_CONSECUTIVE_IRQS) { - dev_err(&isp->pdev->dev, "too many consecutive IRQs\n"); - ret = IRQ_NONE; - break; - } - - irq_status = readl(isp->base + reg_irq_sts); - } while (irq_status); - - if (disable_irqs) - writel(BUTTRESS_IRQS & ~disable_irqs, - isp->base + BUTTRESS_REG_ISR_ENABLE); - - if (active > 0) - pm_runtime_put(&isp->pdev->dev); - - return ret; -} - -irqreturn_t ipu6_buttress_isr_threaded(int irq, void *isp_ptr) -{ - struct ipu6_device *isp = isp_ptr; - struct ipu6_bus_device *adev[] = { isp->isys, isp->psys }; - const struct ipu6_auxdrv_data *drv_data = NULL; - irqreturn_t ret = IRQ_NONE; - unsigned int i; - - for (i = 0; i < ARRAY_SIZE(ipu6_adev_irq_mask) && adev[i]; i++) { - drv_data = adev[i]->auxdrv_data; - if (!drv_data) - continue; - - if (drv_data->wake_isr_thread && - drv_data->isr_threaded(adev[i]) == IRQ_HANDLED) - ret = IRQ_HANDLED; - } - - writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_ENABLE); - - return ret; -} - -int ipu6_buttress_power(struct device *dev, - const struct ipu6_buttress_ctrl *ctrl, bool on) -{ - struct ipu6_device *isp = to_ipu6_bus_device(dev)->isp; - u32 pwr_sts, val; - int ret; - - if (!ctrl) - return 0; - - mutex_lock(&isp->buttress.power_mutex); - - if (!on) { - val = 0; - pwr_sts = ctrl->pwr_sts_off << ctrl->pwr_sts_shift; - } else { - val = BUTTRESS_FREQ_CTL_START | - FIELD_PREP(BUTTRESS_FREQ_CTL_RATIO_MASK, - ctrl->ratio) | - FIELD_PREP(BUTTRESS_FREQ_CTL_QOS_FLOOR_MASK, - ctrl->qos_floor) | - BUTTRESS_FREQ_CTL_ICCMAX_LEVEL; - - pwr_sts = ctrl->pwr_sts_on << ctrl->pwr_sts_shift; - } - - writel(val, isp->base + ctrl->freq_ctl); - - ret = readl_poll_timeout(isp->base + BUTTRESS_REG_PWR_STATE, - val, (val & ctrl->pwr_sts_mask) == pwr_sts, - 100, BUTTRESS_POWER_TIMEOUT_US); - if (ret) - dev_err(&isp->pdev->dev, - "Change power status timeout with 0x%x\n", val); - - mutex_unlock(&isp->buttress.power_mutex); - - return ret; -} - -bool ipu6_buttress_get_secure_mode(struct ipu6_device *isp) -{ - u32 val; - - val = readl(isp->base + BUTTRESS_REG_SECURITY_CTL); - - return val & BUTTRESS_SECURITY_CTL_FW_SECURE_MODE; -} - -bool ipu6_buttress_auth_done(struct ipu6_device *isp) -{ - u32 val; - - if (!isp->secure_mode) - return true; - - val = readl(isp->base + BUTTRESS_REG_SECURITY_CTL); - val = FIELD_GET(BUTTRESS_SECURITY_CTL_FW_SETUP_MASK, val); - - return val == BUTTRESS_SECURITY_CTL_AUTH_DONE; -} -EXPORT_SYMBOL_NS_GPL(ipu6_buttress_auth_done, "INTEL_IPU6"); - -int ipu6_buttress_reset_authentication(struct ipu6_device *isp) -{ - int ret; - u32 val; - - if (!isp->secure_mode) { - dev_dbg(&isp->pdev->dev, "Skip auth for non-secure mode\n"); - return 0; - } - - writel(BUTTRESS_FW_RESET_CTL_START, isp->base + - BUTTRESS_REG_FW_RESET_CTL); - - ret = readl_poll_timeout(isp->base + BUTTRESS_REG_FW_RESET_CTL, val, - val & BUTTRESS_FW_RESET_CTL_DONE, 500, - BUTTRESS_CSE_FWRESET_TIMEOUT_US); - if (ret) { - dev_err(&isp->pdev->dev, - "Time out while resetting authentication state\n"); - return ret; - } - - dev_dbg(&isp->pdev->dev, "FW reset for authentication done\n"); - writel(0, isp->base + BUTTRESS_REG_FW_RESET_CTL); - /* leave some time for HW restore */ - usleep_range(800, 1000); - - return 0; -} - -int ipu6_buttress_map_fw_image(struct ipu6_bus_device *sys, - const struct firmware *fw, struct sg_table *sgt) -{ - bool is_vmalloc = is_vmalloc_addr(fw->data); - struct pci_dev *pdev = sys->isp->pdev; - struct page **pages; - const void *addr; - unsigned long n_pages; - unsigned int i; - int ret; - - if (!is_vmalloc && !virt_addr_valid(fw->data)) - return -EDOM; - - n_pages = PFN_UP(fw->size); - - pages = kmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL); - if (!pages) - return -ENOMEM; - - addr = fw->data; - for (i = 0; i < n_pages; i++) { - struct page *p = is_vmalloc ? - vmalloc_to_page(addr) : virt_to_page(addr); - - if (!p) { - ret = -ENOMEM; - goto out; - } - pages[i] = p; - addr += PAGE_SIZE; - } - - ret = sg_alloc_table_from_pages(sgt, pages, n_pages, 0, fw->size, - GFP_KERNEL); - if (ret) { - ret = -ENOMEM; - goto out; - } - - ret = dma_map_sgtable(&pdev->dev, sgt, DMA_TO_DEVICE, 0); - if (ret) { - sg_free_table(sgt); - goto out; - } - - ret = ipu6_dma_map_sgtable(sys, sgt, DMA_TO_DEVICE, 0); - if (ret) { - dma_unmap_sgtable(&pdev->dev, sgt, DMA_TO_DEVICE, 0); - sg_free_table(sgt); - goto out; - } - - ipu6_dma_sync_sgtable(sys, sgt); - -out: - kfree(pages); - - return ret; -} -EXPORT_SYMBOL_NS_GPL(ipu6_buttress_map_fw_image, "INTEL_IPU6"); - -void ipu6_buttress_unmap_fw_image(struct ipu6_bus_device *sys, - struct sg_table *sgt) -{ - struct pci_dev *pdev = sys->isp->pdev; - - ipu6_dma_unmap_sgtable(sys, sgt, DMA_TO_DEVICE, 0); - dma_unmap_sgtable(&pdev->dev, sgt, DMA_TO_DEVICE, 0); - sg_free_table(sgt); -} -EXPORT_SYMBOL_NS_GPL(ipu6_buttress_unmap_fw_image, "INTEL_IPU6"); - -int ipu6_buttress_authenticate(struct ipu6_device *isp) -{ - struct ipu6_buttress *b = &isp->buttress; - struct ipu6_psys_pdata *psys_pdata; - u32 data, mask, done, fail; - int ret; - - if (!isp->secure_mode) { - dev_dbg(&isp->pdev->dev, "Skip auth for non-secure mode\n"); - return 0; - } - - psys_pdata = isp->psys->pdata; - - mutex_lock(&b->auth_mutex); - - if (ipu6_buttress_auth_done(isp)) { - ret = 0; - goto out_unlock; - } - - /* - * Write address of FIT table to FW_SOURCE register - * Let's use fw address. I.e. not using FIT table yet - */ - data = lower_32_bits(isp->psys->pkg_dir_dma_addr); - writel(data, isp->base + BUTTRESS_REG_FW_SOURCE_BASE_LO); - - data = upper_32_bits(isp->psys->pkg_dir_dma_addr); - writel(data, isp->base + BUTTRESS_REG_FW_SOURCE_BASE_HI); - - /* - * Write boot_load into IU2CSEDATA0 - * Write sizeof(boot_load) | 0x2 << CLIENT_ID to - * IU2CSEDB.IU2CSECMD and set IU2CSEDB.IU2CSEBUSY as - */ - dev_info(&isp->pdev->dev, "Sending BOOT_LOAD to CSE\n"); - - ret = ipu6_buttress_ipc_send(isp, - BUTTRESS_IU2CSEDATA0_IPC_BOOT_LOAD, - 1, true, - BUTTRESS_CSE2IUDATA0_IPC_BOOT_LOAD_DONE); - if (ret) { - dev_err(&isp->pdev->dev, "CSE boot_load failed\n"); - goto out_unlock; - } - - mask = BUTTRESS_SECURITY_CTL_FW_SETUP_MASK; - done = BUTTRESS_SECURITY_CTL_FW_SETUP_DONE; - fail = BUTTRESS_SECURITY_CTL_AUTH_FAILED; - ret = readl_poll_timeout(isp->base + BUTTRESS_REG_SECURITY_CTL, data, - ((data & mask) == done || - (data & mask) == fail), 500, - BUTTRESS_CSE_BOOTLOAD_TIMEOUT_US); - if (ret) { - dev_err(&isp->pdev->dev, "CSE boot_load timeout\n"); - goto out_unlock; - } - - if ((data & mask) == fail) { - dev_err(&isp->pdev->dev, "CSE auth failed\n"); - ret = -EINVAL; - goto out_unlock; - } - - ret = readl_poll_timeout(psys_pdata->base + BOOTLOADER_STATUS_OFFSET, - data, data == BOOTLOADER_MAGIC_KEY, 500, - BUTTRESS_CSE_BOOTLOAD_TIMEOUT_US); - if (ret) { - dev_err(&isp->pdev->dev, "Unexpected magic number 0x%x\n", - data); - goto out_unlock; - } - - /* - * Write authenticate_run into IU2CSEDATA0 - * Write sizeof(boot_load) | 0x2 << CLIENT_ID to - * IU2CSEDB.IU2CSECMD and set IU2CSEDB.IU2CSEBUSY as - */ - dev_info(&isp->pdev->dev, "Sending AUTHENTICATE_RUN to CSE\n"); - ret = ipu6_buttress_ipc_send(isp, - BUTTRESS_IU2CSEDATA0_IPC_AUTH_RUN, - 1, true, - BUTTRESS_CSE2IUDATA0_IPC_AUTH_RUN_DONE); - if (ret) { - dev_err(&isp->pdev->dev, "CSE authenticate_run failed\n"); - goto out_unlock; - } - - done = BUTTRESS_SECURITY_CTL_AUTH_DONE; - ret = readl_poll_timeout(isp->base + BUTTRESS_REG_SECURITY_CTL, data, - ((data & mask) == done || - (data & mask) == fail), 500, - BUTTRESS_CSE_AUTHENTICATE_TIMEOUT_US); - if (ret) { - dev_err(&isp->pdev->dev, "CSE authenticate timeout\n"); - goto out_unlock; - } - - if ((data & mask) == fail) { - dev_err(&isp->pdev->dev, "CSE boot_load failed\n"); - ret = -EINVAL; - goto out_unlock; - } - - dev_info(&isp->pdev->dev, "CSE authenticate_run done\n"); - -out_unlock: - mutex_unlock(&b->auth_mutex); - - return ret; -} - -static int ipu6_buttress_send_tsc_request(struct ipu6_device *isp) -{ - u32 val, mask, done; - int ret; - - mask = BUTTRESS_PWR_STATE_HH_STATUS_MASK; - - writel(BUTTRESS_FABRIC_CMD_START_TSC_SYNC, - isp->base + BUTTRESS_REG_FABRIC_CMD); - - val = readl(isp->base + BUTTRESS_REG_PWR_STATE); - val = FIELD_GET(mask, val); - if (val == BUTTRESS_PWR_STATE_HH_STATE_ERR) { - dev_err(&isp->pdev->dev, "Start tsc sync failed\n"); - return -EINVAL; - } - - done = BUTTRESS_PWR_STATE_HH_STATE_DONE; - ret = readl_poll_timeout(isp->base + BUTTRESS_REG_PWR_STATE, val, - FIELD_GET(mask, val) == done, 500, - BUTTRESS_TSC_SYNC_TIMEOUT_US); - if (ret) - dev_err(&isp->pdev->dev, "Start tsc sync timeout\n"); - - return ret; -} - -int ipu6_buttress_start_tsc_sync(struct ipu6_device *isp) -{ - unsigned int i; - - for (i = 0; i < BUTTRESS_TSC_SYNC_RESET_TRIAL_MAX; i++) { - u32 val; - int ret; - - ret = ipu6_buttress_send_tsc_request(isp); - if (ret != -ETIMEDOUT) - return ret; - - val = readl(isp->base + BUTTRESS_REG_TSW_CTL); - val = val | BUTTRESS_TSW_CTL_SOFT_RESET; - writel(val, isp->base + BUTTRESS_REG_TSW_CTL); - val = val & ~BUTTRESS_TSW_CTL_SOFT_RESET; - writel(val, isp->base + BUTTRESS_REG_TSW_CTL); - } - - dev_err(&isp->pdev->dev, "TSC sync failed (timeout)\n"); - - return -ETIMEDOUT; -} -EXPORT_SYMBOL_NS_GPL(ipu6_buttress_start_tsc_sync, "INTEL_IPU6"); - -void ipu6_buttress_tsc_read(struct ipu6_device *isp, u64 *val) -{ - u32 tsc_hi_1, tsc_hi_2, tsc_lo; - unsigned long flags; - - local_irq_save(flags); - tsc_hi_1 = readl(isp->base + BUTTRESS_REG_TSC_HI); - tsc_lo = readl(isp->base + BUTTRESS_REG_TSC_LO); - tsc_hi_2 = readl(isp->base + BUTTRESS_REG_TSC_HI); - if (tsc_hi_1 == tsc_hi_2) { - *val = (u64)tsc_hi_1 << 32 | tsc_lo; - } else { - /* Check if TSC has rolled over */ - if (tsc_lo & BIT(31)) - *val = (u64)tsc_hi_1 << 32 | tsc_lo; - else - *val = (u64)tsc_hi_2 << 32 | tsc_lo; - } - local_irq_restore(flags); -} -EXPORT_SYMBOL_NS_GPL(ipu6_buttress_tsc_read, "INTEL_IPU6"); - -u64 ipu6_buttress_tsc_ticks_to_ns(u64 ticks, const struct ipu6_device *isp) -{ - u64 ns = ticks * 10000; - - /* - * converting TSC tick count to ns is calculated by: - * Example (TSC clock frequency is 19.2MHz): - * ns = ticks * 1000 000 000 / 19.2Mhz - * = ticks * 1000 000 000 / 19200000Hz - * = ticks * 10000 / 192 ns - */ - return div_u64(ns, isp->buttress.ref_clk); -} -EXPORT_SYMBOL_NS_GPL(ipu6_buttress_tsc_ticks_to_ns, "INTEL_IPU6"); - -void ipu6_buttress_restore(struct ipu6_device *isp) -{ - struct ipu6_buttress *b = &isp->buttress; - - writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_CLEAR); - writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_ENABLE); - writel(b->wdt_cached_value, isp->base + BUTTRESS_REG_WDT); -} - -int ipu6_buttress_init(struct ipu6_device *isp) -{ - int ret, ipc_reset_retry = BUTTRESS_CSE_IPC_RESET_RETRY; - struct ipu6_buttress *b = &isp->buttress; - u32 val; - - mutex_init(&b->power_mutex); - mutex_init(&b->auth_mutex); - mutex_init(&b->cons_mutex); - mutex_init(&b->ipc_mutex); - init_completion(&b->cse.send_complete); - init_completion(&b->cse.recv_complete); - - b->cse.nack = BUTTRESS_CSE2IUDATA0_IPC_NACK; - b->cse.nack_mask = BUTTRESS_CSE2IUDATA0_IPC_NACK_MASK; - b->cse.csr_in = BUTTRESS_REG_CSE2IUCSR; - b->cse.csr_out = BUTTRESS_REG_IU2CSECSR; - b->cse.db0_in = BUTTRESS_REG_CSE2IUDB0; - b->cse.db0_out = BUTTRESS_REG_IU2CSEDB0; - b->cse.data0_in = BUTTRESS_REG_CSE2IUDATA0; - b->cse.data0_out = BUTTRESS_REG_IU2CSEDATA0; - - INIT_LIST_HEAD(&b->constraints); - - isp->secure_mode = ipu6_buttress_get_secure_mode(isp); - dev_dbg(&isp->pdev->dev, "IPU6 in %s mode touch 0x%x mask 0x%x\n", - isp->secure_mode ? "secure" : "non-secure", - readl(isp->base + BUTTRESS_REG_SECURITY_TOUCH), - readl(isp->base + BUTTRESS_REG_CAMERA_MASK)); - - b->wdt_cached_value = readl(isp->base + BUTTRESS_REG_WDT); - writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_CLEAR); - writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_ENABLE); - - /* get ref_clk frequency by reading the indication in btrs control */ - val = readl(isp->base + BUTTRESS_REG_BTRS_CTRL); - val = FIELD_GET(BUTTRESS_REG_BTRS_CTRL_REF_CLK_IND, val); - - switch (val) { - case 0x0: - b->ref_clk = 240; - break; - case 0x1: - b->ref_clk = 192; - break; - case 0x2: - b->ref_clk = 384; - break; - default: - dev_warn(&isp->pdev->dev, - "Unsupported ref clock, use 19.2Mhz by default.\n"); - b->ref_clk = 192; - break; - } - - /* Retry couple of times in case of CSE initialization is delayed */ - do { - ret = ipu6_buttress_ipc_reset(isp, &b->cse); - if (ret) { - dev_warn(&isp->pdev->dev, - "IPC reset protocol failed, retrying\n"); - } else { - dev_dbg(&isp->pdev->dev, "IPC reset done\n"); - return 0; - } - } while (ipc_reset_retry--); - - dev_err(&isp->pdev->dev, "IPC reset protocol failed\n"); - - mutex_destroy(&b->power_mutex); - mutex_destroy(&b->auth_mutex); - mutex_destroy(&b->cons_mutex); - mutex_destroy(&b->ipc_mutex); - - return ret; -} - -void ipu6_buttress_exit(struct ipu6_device *isp) -{ - struct ipu6_buttress *b = &isp->buttress; - - writel(0, isp->base + BUTTRESS_REG_ISR_ENABLE); - - mutex_destroy(&b->power_mutex); - mutex_destroy(&b->auth_mutex); - mutex_destroy(&b->cons_mutex); - mutex_destroy(&b->ipc_mutex); -} diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h deleted file mode 100644 index 51e5ad4..0000000 --- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h +++ /dev/null @@ -1,85 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (C) 2013--2024 Intel Corporation */ - -#ifndef IPU6_BUTTRESS_H -#define IPU6_BUTTRESS_H - -#include -#include -#include -#include - -struct device; -struct firmware; -struct ipu6_device; -struct ipu6_bus_device; - -#define BUTTRESS_PS_FREQ_STEP 25U -#define BUTTRESS_MIN_FORCE_PS_FREQ (BUTTRESS_PS_FREQ_STEP * 8) -#define BUTTRESS_MAX_FORCE_PS_FREQ (BUTTRESS_PS_FREQ_STEP * 32) - -#define BUTTRESS_IS_FREQ_STEP 25U -#define BUTTRESS_MIN_FORCE_IS_FREQ (BUTTRESS_IS_FREQ_STEP * 8) -#define BUTTRESS_MAX_FORCE_IS_FREQ (BUTTRESS_IS_FREQ_STEP * 22) - -struct ipu6_buttress_ctrl { - u32 freq_ctl, pwr_sts_shift, pwr_sts_mask, pwr_sts_on, pwr_sts_off; - unsigned int ratio; - unsigned int qos_floor; -}; - -struct ipu6_buttress_ipc { - struct completion send_complete; - struct completion recv_complete; - u32 nack; - u32 nack_mask; - u32 recv_data; - u32 csr_out; - u32 csr_in; - u32 db0_in; - u32 db0_out; - u32 data0_out; - u32 data0_in; -}; - -struct ipu6_buttress { - struct mutex power_mutex, auth_mutex, cons_mutex, ipc_mutex; - struct ipu6_buttress_ipc cse; - struct list_head constraints; - u32 wdt_cached_value; - bool force_suspend; - u32 ref_clk; -}; - -struct ipu6_ipc_buttress_bulk_msg { - u32 cmd; - u32 expected_resp; - bool require_resp; - u8 cmd_size; -}; - -int ipu6_buttress_ipc_reset(struct ipu6_device *isp, - struct ipu6_buttress_ipc *ipc); -int ipu6_buttress_map_fw_image(struct ipu6_bus_device *sys, - const struct firmware *fw, - struct sg_table *sgt); -void ipu6_buttress_unmap_fw_image(struct ipu6_bus_device *sys, - struct sg_table *sgt); -int ipu6_buttress_power(struct device *dev, - const struct ipu6_buttress_ctrl *ctrl, bool on); -bool ipu6_buttress_get_secure_mode(struct ipu6_device *isp); -int ipu6_buttress_authenticate(struct ipu6_device *isp); -int ipu6_buttress_reset_authentication(struct ipu6_device *isp); -bool ipu6_buttress_auth_done(struct ipu6_device *isp); -int ipu6_buttress_start_tsc_sync(struct ipu6_device *isp); -void ipu6_buttress_tsc_read(struct ipu6_device *isp, u64 *val); -u64 ipu6_buttress_tsc_ticks_to_ns(u64 ticks, const struct ipu6_device *isp); - -irqreturn_t ipu6_buttress_isr(int irq, void *isp_ptr); -irqreturn_t ipu6_buttress_isr_threaded(int irq, void *isp_ptr); -int ipu6_buttress_init(struct ipu6_device *isp); -void ipu6_buttress_exit(struct ipu6_device *isp); -void ipu6_buttress_csi_port_config(struct ipu6_device *isp, - u32 legacy, u32 combo); -void ipu6_buttress_restore(struct ipu6_device *isp); -#endif /* IPU6_BUTTRESS_H */ diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-cpd.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-cpd.c deleted file mode 100644 index b7013f6..0000000 --- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-cpd.c +++ /dev/null @@ -1,362 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2013--2024 Intel Corporation - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "ipu6.h" -#include "ipu6-bus.h" -#include "ipu6-cpd.h" -#include "ipu6-dma.h" - -/* 15 entries + header*/ -#define MAX_PKG_DIR_ENT_CNT 16 -/* 2 qword per entry/header */ -#define PKG_DIR_ENT_LEN 2 -/* PKG_DIR size in bytes */ -#define PKG_DIR_SIZE ((MAX_PKG_DIR_ENT_CNT) * \ - (PKG_DIR_ENT_LEN) * sizeof(u64)) -/* _IUPKDR_ */ -#define PKG_DIR_HDR_MARK 0x5f4955504b44525fULL - -/* $CPD */ -#define CPD_HDR_MARK 0x44504324 - -#define MAX_MANIFEST_SIZE (SZ_2K * sizeof(u32)) -#define MAX_METADATA_SIZE SZ_64K - -#define MAX_COMPONENT_ID 127 -#define MAX_COMPONENT_VERSION 0xffff - -#define MANIFEST_IDX 0 -#define METADATA_IDX 1 -#define MODULEDATA_IDX 2 -/* - * PKG_DIR Entry (type == id) - * 63:56 55 54:48 47:32 31:24 23:0 - * Rsvd Rsvd Type Version Rsvd Size - */ -#define PKG_DIR_SIZE_MASK GENMASK_ULL(23, 0) -#define PKG_DIR_VERSION_MASK GENMASK_ULL(47, 32) -#define PKG_DIR_TYPE_MASK GENMASK_ULL(54, 48) - -static inline const struct ipu6_cpd_ent *ipu6_cpd_get_entry(const void *cpd, - u8 idx) -{ - const struct ipu6_cpd_hdr *cpd_hdr = cpd; - const struct ipu6_cpd_ent *ent; - - ent = (const struct ipu6_cpd_ent *)((const u8 *)cpd + cpd_hdr->hdr_len); - return ent + idx; -} - -#define ipu6_cpd_get_manifest(cpd) ipu6_cpd_get_entry(cpd, MANIFEST_IDX) -#define ipu6_cpd_get_metadata(cpd) ipu6_cpd_get_entry(cpd, METADATA_IDX) -#define ipu6_cpd_get_moduledata(cpd) ipu6_cpd_get_entry(cpd, MODULEDATA_IDX) - -static const struct ipu6_cpd_metadata_cmpnt_hdr * -ipu6_cpd_metadata_get_cmpnt(struct ipu6_device *isp, const void *metadata, - unsigned int metadata_size, u8 idx) -{ - size_t extn_size = sizeof(struct ipu6_cpd_metadata_extn); - size_t cmpnt_count = metadata_size - extn_size; - - cmpnt_count = div_u64(cmpnt_count, isp->cpd_metadata_cmpnt_size); - - if (idx > MAX_COMPONENT_ID || idx >= cmpnt_count) { - dev_err(&isp->pdev->dev, "Component index out of range (%d)\n", - idx); - return ERR_PTR(-EINVAL); - } - - return metadata + extn_size + idx * isp->cpd_metadata_cmpnt_size; -} - -static u32 ipu6_cpd_metadata_cmpnt_version(struct ipu6_device *isp, - const void *metadata, - unsigned int metadata_size, u8 idx) -{ - const struct ipu6_cpd_metadata_cmpnt_hdr *cmpnt; - - cmpnt = ipu6_cpd_metadata_get_cmpnt(isp, metadata, metadata_size, idx); - if (IS_ERR(cmpnt)) - return PTR_ERR(cmpnt); - - return cmpnt->ver; -} - -static int ipu6_cpd_metadata_get_cmpnt_id(struct ipu6_device *isp, - const void *metadata, - unsigned int metadata_size, u8 idx) -{ - const struct ipu6_cpd_metadata_cmpnt_hdr *cmpnt; - - cmpnt = ipu6_cpd_metadata_get_cmpnt(isp, metadata, metadata_size, idx); - if (IS_ERR(cmpnt)) - return PTR_ERR(cmpnt); - - return cmpnt->id; -} - -static int ipu6_cpd_parse_module_data(struct ipu6_device *isp, - const void *module_data, - unsigned int module_data_size, - dma_addr_t dma_addr_module_data, - u64 *pkg_dir, const void *metadata, - unsigned int metadata_size) -{ - const struct ipu6_cpd_module_data_hdr *module_data_hdr; - const struct ipu6_cpd_hdr *dir_hdr; - const struct ipu6_cpd_ent *dir_ent; - unsigned int i; - u8 len; - - if (!module_data) - return -EINVAL; - - module_data_hdr = module_data; - dir_hdr = module_data + module_data_hdr->hdr_len; - len = dir_hdr->hdr_len; - dir_ent = (const struct ipu6_cpd_ent *)(((u8 *)dir_hdr) + len); - - pkg_dir[0] = PKG_DIR_HDR_MARK; - /* pkg_dir entry count = component count + pkg_dir header */ - pkg_dir[1] = dir_hdr->ent_cnt + 1; - - for (i = 0; i < dir_hdr->ent_cnt; i++, dir_ent++) { - u64 *p = &pkg_dir[PKG_DIR_ENT_LEN * (1 + i)]; - int ver, id; - - *p++ = dma_addr_module_data + dir_ent->offset; - id = ipu6_cpd_metadata_get_cmpnt_id(isp, metadata, - metadata_size, i); - if (id < 0 || id > MAX_COMPONENT_ID) { - dev_err(&isp->pdev->dev, "Invalid CPD component id\n"); - return -EINVAL; - } - - ver = ipu6_cpd_metadata_cmpnt_version(isp, metadata, - metadata_size, i); - if (ver < 0 || ver > MAX_COMPONENT_VERSION) { - dev_err(&isp->pdev->dev, - "Invalid CPD component version\n"); - return -EINVAL; - } - - *p = FIELD_PREP(PKG_DIR_SIZE_MASK, dir_ent->len) | - FIELD_PREP(PKG_DIR_TYPE_MASK, id) | - FIELD_PREP(PKG_DIR_VERSION_MASK, ver); - } - - return 0; -} - -int ipu6_cpd_create_pkg_dir(struct ipu6_bus_device *adev, const void *src) -{ - dma_addr_t dma_addr_src = sg_dma_address(adev->fw_sgt.sgl); - const struct ipu6_cpd_ent *ent, *man_ent, *met_ent; - struct ipu6_device *isp = adev->isp; - unsigned int man_sz, met_sz; - void *pkg_dir_pos; - int ret; - - man_ent = ipu6_cpd_get_manifest(src); - man_sz = man_ent->len; - - met_ent = ipu6_cpd_get_metadata(src); - met_sz = met_ent->len; - - adev->pkg_dir_size = PKG_DIR_SIZE + man_sz + met_sz; - adev->pkg_dir = ipu6_dma_alloc(adev, adev->pkg_dir_size, - &adev->pkg_dir_dma_addr, GFP_KERNEL, 0); - if (!adev->pkg_dir) - return -ENOMEM; - - /* - * pkg_dir entry/header: - * qword | 63:56 | 55 | 54:48 | 47:32 | 31:24 | 23:0 - * N Address/Offset/"_IUPKDR_" - * N + 1 | rsvd | rsvd | type | ver | rsvd | size - * - * We can ignore other fields that size in N + 1 qword as they - * are 0 anyway. Just setting size for now. - */ - - ent = ipu6_cpd_get_moduledata(src); - - ret = ipu6_cpd_parse_module_data(isp, src + ent->offset, - ent->len, dma_addr_src + ent->offset, - adev->pkg_dir, src + met_ent->offset, - met_ent->len); - if (ret) { - dev_err(&isp->pdev->dev, "Failed to parse module data\n"); - ipu6_dma_free(adev, adev->pkg_dir_size, - adev->pkg_dir, adev->pkg_dir_dma_addr, 0); - return ret; - } - - /* Copy manifest after pkg_dir */ - pkg_dir_pos = adev->pkg_dir + PKG_DIR_ENT_LEN * MAX_PKG_DIR_ENT_CNT; - memcpy(pkg_dir_pos, src + man_ent->offset, man_sz); - - /* Copy metadata after manifest */ - pkg_dir_pos += man_sz; - memcpy(pkg_dir_pos, src + met_ent->offset, met_sz); - - ipu6_dma_sync_single(adev, adev->pkg_dir_dma_addr, - adev->pkg_dir_size); - - return 0; -} -EXPORT_SYMBOL_NS_GPL(ipu6_cpd_create_pkg_dir, "INTEL_IPU6"); - -void ipu6_cpd_free_pkg_dir(struct ipu6_bus_device *adev) -{ - ipu6_dma_free(adev, adev->pkg_dir_size, adev->pkg_dir, - adev->pkg_dir_dma_addr, 0); -} -EXPORT_SYMBOL_NS_GPL(ipu6_cpd_free_pkg_dir, "INTEL_IPU6"); - -static int ipu6_cpd_validate_cpd(struct ipu6_device *isp, const void *cpd, - unsigned long cpd_size, - unsigned long data_size) -{ - const struct ipu6_cpd_hdr *cpd_hdr = cpd; - const struct ipu6_cpd_ent *ent; - unsigned int i; - u8 len; - - len = cpd_hdr->hdr_len; - - /* Ensure cpd hdr is within moduledata */ - if (cpd_size < len) { - dev_err(&isp->pdev->dev, "Invalid CPD moduledata size\n"); - return -EINVAL; - } - - /* Sanity check for CPD header */ - if ((cpd_size - len) / sizeof(*ent) < cpd_hdr->ent_cnt) { - dev_err(&isp->pdev->dev, "Invalid CPD header\n"); - return -EINVAL; - } - - /* Ensure that all entries are within moduledata */ - ent = (const struct ipu6_cpd_ent *)(((const u8 *)cpd_hdr) + len); - for (i = 0; i < cpd_hdr->ent_cnt; i++, ent++) { - if (data_size < ent->offset || - data_size - ent->offset < ent->len) { - dev_err(&isp->pdev->dev, "Invalid CPD entry (%d)\n", i); - return -EINVAL; - } - } - - return 0; -} - -static int ipu6_cpd_validate_moduledata(struct ipu6_device *isp, - const void *moduledata, - u32 moduledata_size) -{ - const struct ipu6_cpd_module_data_hdr *mod_hdr = moduledata; - int ret; - - /* Ensure moduledata hdr is within moduledata */ - if (moduledata_size < sizeof(*mod_hdr) || - moduledata_size < mod_hdr->hdr_len) { - dev_err(&isp->pdev->dev, "Invalid CPD moduledata size\n"); - return -EINVAL; - } - - dev_dbg(&isp->pdev->dev, "FW version: %x\n", mod_hdr->fw_pkg_date); - ret = ipu6_cpd_validate_cpd(isp, moduledata + mod_hdr->hdr_len, - moduledata_size - mod_hdr->hdr_len, - moduledata_size); - if (ret) { - dev_err(&isp->pdev->dev, "Invalid CPD in moduledata\n"); - return ret; - } - - return 0; -} - -static int ipu6_cpd_validate_metadata(struct ipu6_device *isp, - const void *metadata, u32 meta_size) -{ - const struct ipu6_cpd_metadata_extn *extn = metadata; - - /* Sanity check for metadata size */ - if (meta_size < sizeof(*extn) || meta_size > MAX_METADATA_SIZE) { - dev_err(&isp->pdev->dev, "Invalid CPD metadata\n"); - return -EINVAL; - } - - /* Validate extension and image types */ - if (extn->extn_type != IPU6_CPD_METADATA_EXTN_TYPE_IUNIT || - extn->img_type != IPU6_CPD_METADATA_IMAGE_TYPE_MAIN_FIRMWARE) { - dev_err(&isp->pdev->dev, - "Invalid CPD metadata descriptor img_type (%d)\n", - extn->img_type); - return -EINVAL; - } - - /* Validate metadata size multiple of metadata components */ - if ((meta_size - sizeof(*extn)) % isp->cpd_metadata_cmpnt_size) { - dev_err(&isp->pdev->dev, "Invalid CPD metadata size\n"); - return -EINVAL; - } - - return 0; -} - -int ipu6_cpd_validate_cpd_file(struct ipu6_device *isp, const void *cpd_file, - unsigned long cpd_file_size) -{ - const struct ipu6_cpd_hdr *hdr = cpd_file; - const struct ipu6_cpd_ent *ent; - int ret; - - ret = ipu6_cpd_validate_cpd(isp, cpd_file, cpd_file_size, - cpd_file_size); - if (ret) { - dev_err(&isp->pdev->dev, "Invalid CPD in file\n"); - return ret; - } - - /* Check for CPD file marker */ - if (hdr->hdr_mark != CPD_HDR_MARK) { - dev_err(&isp->pdev->dev, "Invalid CPD header\n"); - return -EINVAL; - } - - /* Sanity check for manifest size */ - ent = ipu6_cpd_get_manifest(cpd_file); - if (ent->len > MAX_MANIFEST_SIZE) { - dev_err(&isp->pdev->dev, "Invalid CPD manifest size\n"); - return -EINVAL; - } - - /* Validate metadata */ - ent = ipu6_cpd_get_metadata(cpd_file); - ret = ipu6_cpd_validate_metadata(isp, cpd_file + ent->offset, ent->len); - if (ret) { - dev_err(&isp->pdev->dev, "Invalid CPD metadata\n"); - return ret; - } - - /* Validate moduledata */ - ent = ipu6_cpd_get_moduledata(cpd_file); - ret = ipu6_cpd_validate_moduledata(isp, cpd_file + ent->offset, - ent->len); - if (ret) - dev_err(&isp->pdev->dev, "Invalid CPD moduledata\n"); - - return ret; -} diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-cpd.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-cpd.h deleted file mode 100644 index e0e4fde..0000000 --- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-cpd.h +++ /dev/null @@ -1,105 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (C) 2015--2024 Intel Corporation */ - -#ifndef IPU6_CPD_H -#define IPU6_CPD_H - -struct ipu6_device; -struct ipu6_bus_device; - -#define IPU6_CPD_SIZE_OF_FW_ARCH_VERSION 7 -#define IPU6_CPD_SIZE_OF_SYSTEM_VERSION 11 -#define IPU6_CPD_SIZE_OF_COMPONENT_NAME 12 - -#define IPU6_CPD_METADATA_EXTN_TYPE_IUNIT 0x10 - -#define IPU6_CPD_METADATA_IMAGE_TYPE_RESERVED 0 -#define IPU6_CPD_METADATA_IMAGE_TYPE_BOOTLOADER 1 -#define IPU6_CPD_METADATA_IMAGE_TYPE_MAIN_FIRMWARE 2 - -#define IPU6_CPD_PKG_DIR_PSYS_SERVER_IDX 0 -#define IPU6_CPD_PKG_DIR_ISYS_SERVER_IDX 1 - -#define IPU6_CPD_PKG_DIR_CLIENT_PG_TYPE 3 - -#define IPU6_CPD_METADATA_HASH_KEY_SIZE 48 -#define IPU6SE_CPD_METADATA_HASH_KEY_SIZE 32 - -struct ipu6_cpd_module_data_hdr { - u32 hdr_len; - u32 endian; - u32 fw_pkg_date; - u32 hive_sdk_date; - u32 compiler_date; - u32 target_platform_type; - u8 sys_ver[IPU6_CPD_SIZE_OF_SYSTEM_VERSION]; - u8 fw_arch_ver[IPU6_CPD_SIZE_OF_FW_ARCH_VERSION]; - u8 rsvd[2]; -} __packed; - -/* - * ipu6_cpd_hdr structure updated as the chksum and - * sub_partition_name is unused on host side - * CSE layout version 1.6 for IPU6SE (hdr_len = 0x10) - * CSE layout version 1.7 for IPU6 (hdr_len = 0x14) - */ -struct ipu6_cpd_hdr { - u32 hdr_mark; - u32 ent_cnt; - u8 hdr_ver; - u8 ent_ver; - u8 hdr_len; -} __packed; - -struct ipu6_cpd_ent { - u8 name[IPU6_CPD_SIZE_OF_COMPONENT_NAME]; - u32 offset; - u32 len; - u8 rsvd[4]; -} __packed; - -struct ipu6_cpd_metadata_cmpnt_hdr { - u32 id; - u32 size; - u32 ver; -} __packed; - -struct ipu6_cpd_metadata_cmpnt { - struct ipu6_cpd_metadata_cmpnt_hdr hdr; - u8 sha2_hash[IPU6_CPD_METADATA_HASH_KEY_SIZE]; - u32 entry_point; - u32 icache_base_offs; - u8 attrs[16]; -} __packed; - -struct ipu6se_cpd_metadata_cmpnt { - struct ipu6_cpd_metadata_cmpnt_hdr hdr; - u8 sha2_hash[IPU6SE_CPD_METADATA_HASH_KEY_SIZE]; - u32 entry_point; - u32 icache_base_offs; - u8 attrs[16]; -} __packed; - -struct ipu6_cpd_metadata_extn { - u32 extn_type; - u32 len; - u32 img_type; - u8 rsvd[16]; -} __packed; - -struct ipu6_cpd_client_pkg_hdr { - u32 prog_list_offs; - u32 prog_list_size; - u32 prog_desc_offs; - u32 prog_desc_size; - u32 pg_manifest_offs; - u32 pg_manifest_size; - u32 prog_bin_offs; - u32 prog_bin_size; -} __packed; - -int ipu6_cpd_create_pkg_dir(struct ipu6_bus_device *adev, const void *src); -void ipu6_cpd_free_pkg_dir(struct ipu6_bus_device *adev); -int ipu6_cpd_validate_cpd_file(struct ipu6_device *isp, const void *cpd_file, - unsigned long cpd_file_size); -#endif /* IPU6_CPD_H */ diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-dma.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-dma.c deleted file mode 100644 index 7296373..0000000 --- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-dma.c +++ /dev/null @@ -1,459 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2013--2024 Intel Corporation - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "ipu6.h" -#include "ipu6-bus.h" -#include "ipu6-dma.h" -#include "ipu6-mmu.h" - -struct vm_info { - struct list_head list; - struct page **pages; - dma_addr_t ipu6_iova; - void *vaddr; - unsigned long size; -}; - -static struct vm_info *get_vm_info(struct ipu6_mmu *mmu, dma_addr_t iova) -{ - struct vm_info *info, *save; - - list_for_each_entry_safe(info, save, &mmu->vma_list, list) { - if (iova >= info->ipu6_iova && - iova < (info->ipu6_iova + info->size)) - return info; - } - - return NULL; -} - -static void __clear_buffer(struct page *page, size_t size, unsigned long attrs) -{ - void *ptr; - - if (!page) - return; - /* - * Ensure that the allocated pages are zeroed, and that any data - * lurking in the kernel direct-mapped region is invalidated. - */ - ptr = page_address(page); - memset(ptr, 0, size); - if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) - clflush_cache_range(ptr, size); -} - -static struct page **__alloc_buffer(size_t size, gfp_t gfp, unsigned long attrs) -{ - int count = PHYS_PFN(size); - int array_size = count * sizeof(struct page *); - struct page **pages; - int i = 0; - - pages = kvzalloc(array_size, GFP_KERNEL); - if (!pages) - return NULL; - - gfp |= __GFP_NOWARN; - - while (count) { - int j, order = __fls(count); - - pages[i] = alloc_pages(gfp, order); - while (!pages[i] && order) - pages[i] = alloc_pages(gfp, --order); - if (!pages[i]) - goto error; - - if (order) { - split_page(pages[i], order); - j = 1 << order; - while (j--) - pages[i + j] = pages[i] + j; - } - - __clear_buffer(pages[i], PAGE_SIZE << order, attrs); - i += 1 << order; - count -= 1 << order; - } - - return pages; -error: - while (i--) - if (pages[i]) - __free_pages(pages[i], 0); - kvfree(pages); - return NULL; -} - -static void __free_buffer(struct page **pages, size_t size, unsigned long attrs) -{ - int count = PHYS_PFN(size); - unsigned int i; - - for (i = 0; i < count && pages[i]; i++) { - __clear_buffer(pages[i], PAGE_SIZE, attrs); - __free_pages(pages[i], 0); - } - - kvfree(pages); -} - -void ipu6_dma_sync_single(struct ipu6_bus_device *sys, dma_addr_t dma_handle, - size_t size) -{ - void *vaddr; - u32 offset; - struct vm_info *info; - struct ipu6_mmu *mmu = sys->mmu; - - info = get_vm_info(mmu, dma_handle); - if (WARN_ON(!info)) - return; - - offset = dma_handle - info->ipu6_iova; - if (WARN_ON(size > (info->size - offset))) - return; - - vaddr = info->vaddr + offset; - clflush_cache_range(vaddr, size); -} -EXPORT_SYMBOL_NS_GPL(ipu6_dma_sync_single, "INTEL_IPU6"); - -void ipu6_dma_sync_sg(struct ipu6_bus_device *sys, struct scatterlist *sglist, - int nents) -{ - struct scatterlist *sg; - int i; - - for_each_sg(sglist, sg, nents, i) - clflush_cache_range(sg_virt(sg), sg->length); -} -EXPORT_SYMBOL_NS_GPL(ipu6_dma_sync_sg, "INTEL_IPU6"); - -void ipu6_dma_sync_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt) -{ - ipu6_dma_sync_sg(sys, sgt->sgl, sgt->orig_nents); -} -EXPORT_SYMBOL_NS_GPL(ipu6_dma_sync_sgtable, "INTEL_IPU6"); - -void *ipu6_dma_alloc(struct ipu6_bus_device *sys, size_t size, - dma_addr_t *dma_handle, gfp_t gfp, - unsigned long attrs) -{ - struct device *dev = &sys->auxdev.dev; - struct pci_dev *pdev = sys->isp->pdev; - dma_addr_t pci_dma_addr, ipu6_iova; - struct ipu6_mmu *mmu = sys->mmu; - struct vm_info *info; - unsigned long count; - struct page **pages; - struct iova *iova; - unsigned int i; - int ret; - - info = kzalloc(sizeof(*info), GFP_KERNEL); - if (!info) - return NULL; - - size = PAGE_ALIGN(size); - count = PHYS_PFN(size); - - iova = alloc_iova(&mmu->dmap->iovad, count, - PHYS_PFN(mmu->dmap->mmu_info->aperture_end), 0); - if (!iova) - goto out_kfree; - - pages = __alloc_buffer(size, gfp, attrs); - if (!pages) - goto out_free_iova; - - dev_dbg(dev, "dma_alloc: size %zu iova low pfn %lu, high pfn %lu\n", - size, iova->pfn_lo, iova->pfn_hi); - for (i = 0; iova->pfn_lo + i <= iova->pfn_hi; i++) { - pci_dma_addr = dma_map_page_attrs(&pdev->dev, pages[i], 0, - PAGE_SIZE, DMA_BIDIRECTIONAL, - attrs); - dev_dbg(dev, "dma_alloc: mapped pci_dma_addr %pad\n", - &pci_dma_addr); - if (dma_mapping_error(&pdev->dev, pci_dma_addr)) { - dev_err(dev, "pci_dma_mapping for page[%d] failed", i); - goto out_unmap; - } - - ret = ipu6_mmu_map(mmu->dmap->mmu_info, - PFN_PHYS(iova->pfn_lo + i), pci_dma_addr, - PAGE_SIZE); - if (ret) { - dev_err(dev, "ipu6_mmu_map for pci_dma[%d] %pad failed", - i, &pci_dma_addr); - dma_unmap_page_attrs(&pdev->dev, pci_dma_addr, - PAGE_SIZE, DMA_BIDIRECTIONAL, - attrs); - goto out_unmap; - } - } - - info->vaddr = vmap(pages, count, VM_USERMAP, PAGE_KERNEL); - if (!info->vaddr) - goto out_unmap; - - *dma_handle = PFN_PHYS(iova->pfn_lo); - - info->pages = pages; - info->ipu6_iova = *dma_handle; - info->size = size; - list_add(&info->list, &mmu->vma_list); - - return info->vaddr; - -out_unmap: - while (i--) { - ipu6_iova = PFN_PHYS(iova->pfn_lo + i); - pci_dma_addr = ipu6_mmu_iova_to_phys(mmu->dmap->mmu_info, - ipu6_iova); - dma_unmap_page_attrs(&pdev->dev, pci_dma_addr, PAGE_SIZE, - DMA_BIDIRECTIONAL, attrs); - - ipu6_mmu_unmap(mmu->dmap->mmu_info, ipu6_iova, PAGE_SIZE); - } - - __free_buffer(pages, size, attrs); - -out_free_iova: - __free_iova(&mmu->dmap->iovad, iova); -out_kfree: - kfree(info); - - return NULL; -} -EXPORT_SYMBOL_NS_GPL(ipu6_dma_alloc, "INTEL_IPU6"); - -void ipu6_dma_free(struct ipu6_bus_device *sys, size_t size, void *vaddr, - dma_addr_t dma_handle, unsigned long attrs) -{ - struct ipu6_mmu *mmu = sys->mmu; - struct pci_dev *pdev = sys->isp->pdev; - struct iova *iova = find_iova(&mmu->dmap->iovad, PHYS_PFN(dma_handle)); - dma_addr_t pci_dma_addr, ipu6_iova; - struct vm_info *info; - struct page **pages; - unsigned int i; - - if (WARN_ON(!iova)) - return; - - info = get_vm_info(mmu, dma_handle); - if (WARN_ON(!info)) - return; - - if (WARN_ON(!info->vaddr)) - return; - - if (WARN_ON(!info->pages)) - return; - - list_del(&info->list); - - size = PAGE_ALIGN(size); - - pages = info->pages; - - vunmap(vaddr); - - for (i = 0; i < PHYS_PFN(size); i++) { - ipu6_iova = PFN_PHYS(iova->pfn_lo + i); - pci_dma_addr = ipu6_mmu_iova_to_phys(mmu->dmap->mmu_info, - ipu6_iova); - dma_unmap_page_attrs(&pdev->dev, pci_dma_addr, PAGE_SIZE, - DMA_BIDIRECTIONAL, attrs); - } - - ipu6_mmu_unmap(mmu->dmap->mmu_info, PFN_PHYS(iova->pfn_lo), - PFN_PHYS(iova_size(iova))); - - __free_buffer(pages, size, attrs); - - mmu->tlb_invalidate(mmu); - - __free_iova(&mmu->dmap->iovad, iova); - - kfree(info); -} -EXPORT_SYMBOL_NS_GPL(ipu6_dma_free, "INTEL_IPU6"); - -int ipu6_dma_mmap(struct ipu6_bus_device *sys, struct vm_area_struct *vma, - void *addr, dma_addr_t iova, size_t size, - unsigned long attrs) -{ - struct ipu6_mmu *mmu = sys->mmu; - size_t count = PFN_UP(size); - struct vm_info *info; - size_t i; - int ret; - - info = get_vm_info(mmu, iova); - if (!info) - return -EFAULT; - - if (!info->vaddr) - return -EFAULT; - - if (vma->vm_start & ~PAGE_MASK) - return -EINVAL; - - if (size > info->size) - return -EFAULT; - - for (i = 0; i < count; i++) { - ret = vm_insert_page(vma, vma->vm_start + PFN_PHYS(i), - info->pages[i]); - if (ret < 0) - return ret; - } - - return 0; -} - -void ipu6_dma_unmap_sg(struct ipu6_bus_device *sys, struct scatterlist *sglist, - int nents, enum dma_data_direction dir, - unsigned long attrs) -{ - struct device *dev = &sys->auxdev.dev; - struct ipu6_mmu *mmu = sys->mmu; - struct iova *iova = find_iova(&mmu->dmap->iovad, - PHYS_PFN(sg_dma_address(sglist))); - struct scatterlist *sg; - dma_addr_t pci_dma_addr; - unsigned int i; - - if (!nents) - return; - - if (WARN_ON(!iova)) - return; - - /* - * Before IPU6 mmu unmap, return the pci dma address back to sg - * assume the nents is less than orig_nents as the least granule - * is 1 SZ_4K page - */ - dev_dbg(dev, "trying to unmap concatenated %u ents\n", nents); - for_each_sg(sglist, sg, nents, i) { - dev_dbg(dev, "unmap sg[%d] %pad size %u\n", i, - &sg_dma_address(sg), sg_dma_len(sg)); - pci_dma_addr = ipu6_mmu_iova_to_phys(mmu->dmap->mmu_info, - sg_dma_address(sg)); - dev_dbg(dev, "return pci_dma_addr %pad back to sg[%d]\n", - &pci_dma_addr, i); - sg_dma_address(sg) = pci_dma_addr; - } - - dev_dbg(dev, "ipu6_mmu_unmap low pfn %lu high pfn %lu\n", - iova->pfn_lo, iova->pfn_hi); - ipu6_mmu_unmap(mmu->dmap->mmu_info, PFN_PHYS(iova->pfn_lo), - PFN_PHYS(iova_size(iova))); - - mmu->tlb_invalidate(mmu); - __free_iova(&mmu->dmap->iovad, iova); -} -EXPORT_SYMBOL_NS_GPL(ipu6_dma_unmap_sg, "INTEL_IPU6"); - -int ipu6_dma_map_sg(struct ipu6_bus_device *sys, struct scatterlist *sglist, - int nents, enum dma_data_direction dir, - unsigned long attrs) -{ - struct device *dev = &sys->auxdev.dev; - struct ipu6_mmu *mmu = sys->mmu; - struct scatterlist *sg; - struct iova *iova; - size_t npages = 0; - unsigned long iova_addr; - int i; - - for_each_sg(sglist, sg, nents, i) { - if (sg->offset) { - dev_err(dev, "Unsupported non-zero sg[%d].offset %x\n", - i, sg->offset); - return -EFAULT; - } - } - - for_each_sg(sglist, sg, nents, i) - npages += PFN_UP(sg_dma_len(sg)); - - dev_dbg(dev, "dmamap trying to map %d ents %zu pages\n", - nents, npages); - - iova = alloc_iova(&mmu->dmap->iovad, npages, - PHYS_PFN(mmu->dmap->mmu_info->aperture_end), 0); - if (!iova) - return 0; - - dev_dbg(dev, "dmamap: iova low pfn %lu, high pfn %lu\n", iova->pfn_lo, - iova->pfn_hi); - - iova_addr = iova->pfn_lo; - for_each_sg(sglist, sg, nents, i) { - phys_addr_t iova_pa; - int ret; - - iova_pa = PFN_PHYS(iova_addr); - dev_dbg(dev, "mapping entry %d: iova %pap phy %pap size %d\n", - i, &iova_pa, &sg_dma_address(sg), sg_dma_len(sg)); - - ret = ipu6_mmu_map(mmu->dmap->mmu_info, PFN_PHYS(iova_addr), - sg_dma_address(sg), - PAGE_ALIGN(sg_dma_len(sg))); - if (ret) - goto out_fail; - - sg_dma_address(sg) = PFN_PHYS(iova_addr); - - iova_addr += PFN_UP(sg_dma_len(sg)); - } - - dev_dbg(dev, "dmamap %d ents %zu pages mapped\n", nents, npages); - - return nents; - -out_fail: - ipu6_dma_unmap_sg(sys, sglist, i, dir, attrs); - - return 0; -} -EXPORT_SYMBOL_NS_GPL(ipu6_dma_map_sg, "INTEL_IPU6"); - -int ipu6_dma_map_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt, - enum dma_data_direction dir, unsigned long attrs) -{ - int nents; - - nents = ipu6_dma_map_sg(sys, sgt->sgl, sgt->nents, dir, attrs); - if (nents < 0) - return nents; - - sgt->nents = nents; - - return 0; -} -EXPORT_SYMBOL_NS_GPL(ipu6_dma_map_sgtable, "INTEL_IPU6"); - -void ipu6_dma_unmap_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt, - enum dma_data_direction dir, unsigned long attrs) -{ - ipu6_dma_unmap_sg(sys, sgt->sgl, sgt->nents, dir, attrs); -} -EXPORT_SYMBOL_NS_GPL(ipu6_dma_unmap_sgtable, "INTEL_IPU6"); diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-dma.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-dma.h deleted file mode 100644 index ae9b9a5..0000000 --- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-dma.h +++ /dev/null @@ -1,43 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (C) 2013--2024 Intel Corporation */ - -#ifndef IPU6_DMA_H -#define IPU6_DMA_H - -#include -#include -#include - -#include "ipu6-bus.h" - -struct ipu6_mmu_info; - -struct ipu6_dma_mapping { - struct ipu6_mmu_info *mmu_info; - struct iova_domain iovad; -}; - -void ipu6_dma_sync_single(struct ipu6_bus_device *sys, dma_addr_t dma_handle, - size_t size); -void ipu6_dma_sync_sg(struct ipu6_bus_device *sys, struct scatterlist *sglist, - int nents); -void ipu6_dma_sync_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt); -void *ipu6_dma_alloc(struct ipu6_bus_device *sys, size_t size, - dma_addr_t *dma_handle, gfp_t gfp, - unsigned long attrs); -void ipu6_dma_free(struct ipu6_bus_device *sys, size_t size, void *vaddr, - dma_addr_t dma_handle, unsigned long attrs); -int ipu6_dma_mmap(struct ipu6_bus_device *sys, struct vm_area_struct *vma, - void *addr, dma_addr_t iova, size_t size, - unsigned long attrs); -int ipu6_dma_map_sg(struct ipu6_bus_device *sys, struct scatterlist *sglist, - int nents, enum dma_data_direction dir, - unsigned long attrs); -void ipu6_dma_unmap_sg(struct ipu6_bus_device *sys, struct scatterlist *sglist, - int nents, enum dma_data_direction dir, - unsigned long attrs); -int ipu6_dma_map_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt, - enum dma_data_direction dir, unsigned long attrs); -void ipu6_dma_unmap_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt, - enum dma_data_direction dir, unsigned long attrs); -#endif /* IPU6_DMA_H */ diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.c deleted file mode 100644 index 40d8ce1..0000000 --- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.c +++ /dev/null @@ -1,413 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2013--2024 Intel Corporation - */ - -#include -#include -#include -#include -#include -#include -#include - -#include "ipu6-bus.h" -#include "ipu6-dma.h" -#include "ipu6-fw-com.h" - -/* - * FWCOM layer is a shared resource between FW and driver. It consist - * of token queues to both send and receive directions. Queue is simply - * an array of structures with read and write indexes to the queue. - * There are 1...n queues to both directions. Queues locates in - * system RAM and are mapped to ISP MMU so that both CPU and ISP can - * see the same buffer. Indexes are located in ISP DMEM so that FW code - * can poll those with very low latency and cost. CPU access to indexes is - * more costly but that happens only at message sending time and - * interrupt triggered message handling. CPU doesn't need to poll indexes. - * wr_reg / rd_reg are offsets to those dmem location. They are not - * the indexes itself. - */ - -/* Shared structure between driver and FW - do not modify */ -struct ipu6_fw_sys_queue { - u64 host_address; - u32 vied_address; - u32 size; - u32 token_size; - u32 wr_reg; /* reg number in subsystem's regmem */ - u32 rd_reg; - u32 _align; -} __packed; - -struct ipu6_fw_sys_queue_res { - u64 host_address; - u32 vied_address; - u32 reg; -} __packed; - -enum syscom_state { - /* Program load or explicit host setting should init to this */ - SYSCOM_STATE_UNINIT = 0x57a7e000, - /* SP Syscom sets this when it is ready for use */ - SYSCOM_STATE_READY = 0x57a7e001, - /* SP Syscom sets this when no more syscom accesses will happen */ - SYSCOM_STATE_INACTIVE = 0x57a7e002, -}; - -enum syscom_cmd { - /* Program load or explicit host setting should init to this */ - SYSCOM_COMMAND_UNINIT = 0x57a7f000, - /* Host Syscom requests syscom to become inactive */ - SYSCOM_COMMAND_INACTIVE = 0x57a7f001, -}; - -/* firmware config: data that sent from the host to SP via DDR */ -/* Cell copies data into a context */ - -struct ipu6_fw_syscom_config { - u32 firmware_address; - - u32 num_input_queues; - u32 num_output_queues; - - /* ISP pointers to an array of ipu6_fw_sys_queue structures */ - u32 input_queue; - u32 output_queue; - - /* ISYS / PSYS private data */ - u32 specific_addr; - u32 specific_size; -}; - -struct ipu6_fw_com_context { - struct ipu6_bus_device *adev; - void __iomem *dmem_addr; - int (*cell_ready)(struct ipu6_bus_device *adev); - void (*cell_start)(struct ipu6_bus_device *adev); - - void *dma_buffer; - dma_addr_t dma_addr; - unsigned int dma_size; - - struct ipu6_fw_sys_queue *input_queue; /* array of host to SP queues */ - struct ipu6_fw_sys_queue *output_queue; /* array of SP to host */ - - u32 config_vied_addr; - - unsigned int buttress_boot_offset; - void __iomem *base_addr; -}; - -#define FW_COM_WR_REG 0 -#define FW_COM_RD_REG 4 - -#define REGMEM_OFFSET 0 -#define TUNIT_MAGIC_PATTERN 0x5a5a5a5a - -enum regmem_id { - /* pass pkg_dir address to SPC in non-secure mode */ - PKG_DIR_ADDR_REG = 0, - /* Tunit CFG blob for secure - provided by host.*/ - TUNIT_CFG_DWR_REG = 1, - /* syscom commands - modified by the host */ - SYSCOM_COMMAND_REG = 2, - /* Store interrupt status - updated by SP */ - SYSCOM_IRQ_REG = 3, - /* first syscom queue pointer register */ - SYSCOM_QPR_BASE_REG = 4 -}; - -#define BUTTRESS_FW_BOOT_PARAMS_0 0x4000 -#define BUTTRESS_FW_BOOT_PARAM_REG(base, offset, id) \ - ((base) + BUTTRESS_FW_BOOT_PARAMS_0 + ((offset) + (id)) * 4) - -enum buttress_syscom_id { - /* pass syscom configuration to SPC */ - SYSCOM_CONFIG_ID = 0, - /* syscom state - modified by SP */ - SYSCOM_STATE_ID = 1, - /* syscom vtl0 addr mask */ - SYSCOM_VTL0_ADDR_MASK_ID = 2, - SYSCOM_ID_MAX -}; - -static void ipu6_sys_queue_init(struct ipu6_fw_sys_queue *q, unsigned int size, - unsigned int token_size, - struct ipu6_fw_sys_queue_res *res) -{ - unsigned int buf_size = (size + 1) * token_size; - - q->size = size + 1; - q->token_size = token_size; - - /* acquire the shared buffer space */ - q->host_address = res->host_address; - res->host_address += buf_size; - q->vied_address = res->vied_address; - res->vied_address += buf_size; - - /* acquire the shared read and writer pointers */ - q->wr_reg = res->reg; - res->reg++; - q->rd_reg = res->reg; - res->reg++; -} - -void *ipu6_fw_com_prepare(struct ipu6_fw_com_cfg *cfg, - struct ipu6_bus_device *adev, void __iomem *base) -{ - size_t conf_size, inq_size, outq_size, specific_size; - struct ipu6_fw_syscom_config *config_host_addr; - unsigned int sizeinput = 0, sizeoutput = 0; - struct ipu6_fw_sys_queue_res res; - struct ipu6_fw_com_context *ctx; - struct device *dev = &adev->auxdev.dev; - size_t sizeall, offset; - void *specific_host_addr; - unsigned int i; - - if (!cfg || !cfg->cell_start || !cfg->cell_ready) - return NULL; - - ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); - if (!ctx) - return NULL; - ctx->dmem_addr = base + cfg->dmem_addr + REGMEM_OFFSET; - ctx->adev = adev; - ctx->cell_start = cfg->cell_start; - ctx->cell_ready = cfg->cell_ready; - ctx->buttress_boot_offset = cfg->buttress_boot_offset; - ctx->base_addr = base; - - /* - * Allocate DMA mapped memory. Allocate one big chunk. - */ - /* Base cfg for FW */ - conf_size = roundup(sizeof(struct ipu6_fw_syscom_config), 8); - /* Descriptions of the queues */ - inq_size = size_mul(cfg->num_input_queues, - sizeof(struct ipu6_fw_sys_queue)); - outq_size = size_mul(cfg->num_output_queues, - sizeof(struct ipu6_fw_sys_queue)); - /* FW specific information structure */ - specific_size = roundup(cfg->specific_size, 8); - - sizeall = conf_size + inq_size + outq_size + specific_size; - - for (i = 0; i < cfg->num_input_queues; i++) - sizeinput += size_mul(cfg->input[i].queue_size + 1, - cfg->input[i].token_size); - - for (i = 0; i < cfg->num_output_queues; i++) - sizeoutput += size_mul(cfg->output[i].queue_size + 1, - cfg->output[i].token_size); - - sizeall += sizeinput + sizeoutput; - - ctx->dma_buffer = ipu6_dma_alloc(adev, sizeall, &ctx->dma_addr, - GFP_KERNEL, 0); - if (!ctx->dma_buffer) { - dev_err(dev, "failed to allocate dma memory\n"); - kfree(ctx); - return NULL; - } - - ctx->dma_size = sizeall; - - config_host_addr = ctx->dma_buffer; - ctx->config_vied_addr = ctx->dma_addr; - - offset = conf_size; - ctx->input_queue = ctx->dma_buffer + offset; - config_host_addr->input_queue = ctx->dma_addr + offset; - config_host_addr->num_input_queues = cfg->num_input_queues; - - offset += inq_size; - ctx->output_queue = ctx->dma_buffer + offset; - config_host_addr->output_queue = ctx->dma_addr + offset; - config_host_addr->num_output_queues = cfg->num_output_queues; - - /* copy firmware specific data */ - offset += outq_size; - specific_host_addr = ctx->dma_buffer + offset; - config_host_addr->specific_addr = ctx->dma_addr + offset; - config_host_addr->specific_size = cfg->specific_size; - if (cfg->specific_addr && cfg->specific_size) - memcpy(specific_host_addr, cfg->specific_addr, - cfg->specific_size); - - ipu6_dma_sync_single(adev, ctx->config_vied_addr, sizeall); - - /* initialize input queues */ - offset += specific_size; - res.reg = SYSCOM_QPR_BASE_REG; - res.host_address = (uintptr_t)(ctx->dma_buffer + offset); - res.vied_address = ctx->dma_addr + offset; - for (i = 0; i < cfg->num_input_queues; i++) - ipu6_sys_queue_init(ctx->input_queue + i, - cfg->input[i].queue_size, - cfg->input[i].token_size, &res); - - /* initialize output queues */ - offset += sizeinput; - res.host_address = (uintptr_t)(ctx->dma_buffer + offset); - res.vied_address = ctx->dma_addr + offset; - for (i = 0; i < cfg->num_output_queues; i++) { - ipu6_sys_queue_init(ctx->output_queue + i, - cfg->output[i].queue_size, - cfg->output[i].token_size, &res); - } - - return ctx; -} -EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_prepare, "INTEL_IPU6"); - -int ipu6_fw_com_open(struct ipu6_fw_com_context *ctx) -{ - /* write magic pattern to disable the tunit trace */ - writel(TUNIT_MAGIC_PATTERN, ctx->dmem_addr + TUNIT_CFG_DWR_REG * 4); - /* Check if SP is in valid state */ - if (!ctx->cell_ready(ctx->adev)) - return -EIO; - - /* store syscom uninitialized command */ - writel(SYSCOM_COMMAND_UNINIT, ctx->dmem_addr + SYSCOM_COMMAND_REG * 4); - - /* store syscom uninitialized state */ - writel(SYSCOM_STATE_UNINIT, - BUTTRESS_FW_BOOT_PARAM_REG(ctx->base_addr, - ctx->buttress_boot_offset, - SYSCOM_STATE_ID)); - - /* store firmware configuration address */ - writel(ctx->config_vied_addr, - BUTTRESS_FW_BOOT_PARAM_REG(ctx->base_addr, - ctx->buttress_boot_offset, - SYSCOM_CONFIG_ID)); - ctx->cell_start(ctx->adev); - - return 0; -} -EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_open, "INTEL_IPU6"); - -int ipu6_fw_com_close(struct ipu6_fw_com_context *ctx) -{ - int state; - - state = readl(BUTTRESS_FW_BOOT_PARAM_REG(ctx->base_addr, - ctx->buttress_boot_offset, - SYSCOM_STATE_ID)); - if (state != SYSCOM_STATE_READY) - return -EBUSY; - - /* set close request flag */ - writel(SYSCOM_COMMAND_INACTIVE, ctx->dmem_addr + - SYSCOM_COMMAND_REG * 4); - - return 0; -} -EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_close, "INTEL_IPU6"); - -int ipu6_fw_com_release(struct ipu6_fw_com_context *ctx, unsigned int force) -{ - /* check if release is forced, an verify cell state if it is not */ - if (!force && !ctx->cell_ready(ctx->adev)) - return -EBUSY; - - ipu6_dma_free(ctx->adev, ctx->dma_size, - ctx->dma_buffer, ctx->dma_addr, 0); - kfree(ctx); - return 0; -} -EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_release, "INTEL_IPU6"); - -bool ipu6_fw_com_ready(struct ipu6_fw_com_context *ctx) -{ - int state; - - state = readl(BUTTRESS_FW_BOOT_PARAM_REG(ctx->base_addr, - ctx->buttress_boot_offset, - SYSCOM_STATE_ID)); - - return state == SYSCOM_STATE_READY; -} -EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_ready, "INTEL_IPU6"); - -void *ipu6_send_get_token(struct ipu6_fw_com_context *ctx, int q_nbr) -{ - struct ipu6_fw_sys_queue *q = &ctx->input_queue[q_nbr]; - void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4; - unsigned int wr, rd; - unsigned int packets; - unsigned int index; - - wr = readl(q_dmem + FW_COM_WR_REG); - rd = readl(q_dmem + FW_COM_RD_REG); - - if (WARN_ON_ONCE(wr >= q->size || rd >= q->size)) - return NULL; - - if (wr < rd) - packets = rd - wr - 1; - else - packets = q->size - (wr - rd + 1); - - if (!packets) - return NULL; - - index = readl(q_dmem + FW_COM_WR_REG); - - return (void *)((uintptr_t)q->host_address + index * q->token_size); -} -EXPORT_SYMBOL_NS_GPL(ipu6_send_get_token, "INTEL_IPU6"); - -void ipu6_send_put_token(struct ipu6_fw_com_context *ctx, int q_nbr) -{ - struct ipu6_fw_sys_queue *q = &ctx->input_queue[q_nbr]; - void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4; - unsigned int wr = readl(q_dmem + FW_COM_WR_REG) + 1; - - if (wr >= q->size) - wr = 0; - - writel(wr, q_dmem + FW_COM_WR_REG); -} -EXPORT_SYMBOL_NS_GPL(ipu6_send_put_token, "INTEL_IPU6"); - -void *ipu6_recv_get_token(struct ipu6_fw_com_context *ctx, int q_nbr) -{ - struct ipu6_fw_sys_queue *q = &ctx->output_queue[q_nbr]; - void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4; - unsigned int wr, rd; - unsigned int packets; - - wr = readl(q_dmem + FW_COM_WR_REG); - rd = readl(q_dmem + FW_COM_RD_REG); - - if (WARN_ON_ONCE(wr >= q->size || rd >= q->size)) - return NULL; - - if (wr < rd) - wr += q->size; - - packets = wr - rd; - if (!packets) - return NULL; - - return (void *)((uintptr_t)q->host_address + rd * q->token_size); -} -EXPORT_SYMBOL_NS_GPL(ipu6_recv_get_token, "INTEL_IPU6"); - -void ipu6_recv_put_token(struct ipu6_fw_com_context *ctx, int q_nbr) -{ - struct ipu6_fw_sys_queue *q = &ctx->output_queue[q_nbr]; - void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4; - unsigned int rd = readl(q_dmem + FW_COM_RD_REG) + 1; - - if (rd >= q->size) - rd = 0; - - writel(rd, q_dmem + FW_COM_RD_REG); -} -EXPORT_SYMBOL_NS_GPL(ipu6_recv_put_token, "INTEL_IPU6"); diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.h deleted file mode 100644 index b02285a..0000000 --- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.h +++ /dev/null @@ -1,47 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (C) 2013--2024 Intel Corporation */ - -#ifndef IPU6_FW_COM_H -#define IPU6_FW_COM_H - -struct ipu6_fw_com_context; -struct ipu6_bus_device; - -struct ipu6_fw_syscom_queue_config { - unsigned int queue_size; /* tokens per queue */ - unsigned int token_size; /* bytes per token */ -}; - -#define SYSCOM_BUTTRESS_FW_PARAMS_ISYS_OFFSET 0 - -struct ipu6_fw_com_cfg { - unsigned int num_input_queues; - unsigned int num_output_queues; - struct ipu6_fw_syscom_queue_config *input; - struct ipu6_fw_syscom_queue_config *output; - - unsigned int dmem_addr; - - /* firmware-specific configuration data */ - void *specific_addr; - unsigned int specific_size; - int (*cell_ready)(struct ipu6_bus_device *adev); - void (*cell_start)(struct ipu6_bus_device *adev); - - unsigned int buttress_boot_offset; -}; - -void *ipu6_fw_com_prepare(struct ipu6_fw_com_cfg *cfg, - struct ipu6_bus_device *adev, void __iomem *base); - -int ipu6_fw_com_open(struct ipu6_fw_com_context *ctx); -bool ipu6_fw_com_ready(struct ipu6_fw_com_context *ctx); -int ipu6_fw_com_close(struct ipu6_fw_com_context *ctx); -int ipu6_fw_com_release(struct ipu6_fw_com_context *ctx, unsigned int force); - -void *ipu6_recv_get_token(struct ipu6_fw_com_context *ctx, int q_nbr); -void ipu6_recv_put_token(struct ipu6_fw_com_context *ctx, int q_nbr); -void *ipu6_send_get_token(struct ipu6_fw_com_context *ctx, int q_nbr); -void ipu6_send_put_token(struct ipu6_fw_com_context *ctx, int q_nbr); - -#endif diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.c deleted file mode 100644 index 62ed92f..0000000 --- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.c +++ /dev/null @@ -1,487 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2013--2024 Intel Corporation - */ - -#include -#include -#include -#include -#include -#include - -#include "ipu6-bus.h" -#include "ipu6-fw-com.h" -#include "ipu6-isys.h" -#include "ipu6-platform-isys-csi2-reg.h" -#include "ipu6-platform-regs.h" - -static const char send_msg_types[N_IPU6_FW_ISYS_SEND_TYPE][32] = { - "STREAM_OPEN", - "STREAM_START", - "STREAM_START_AND_CAPTURE", - "STREAM_CAPTURE", - "STREAM_STOP", - "STREAM_FLUSH", - "STREAM_CLOSE" -}; - -static int handle_proxy_response(struct ipu6_isys *isys, unsigned int req_id) -{ - struct device *dev = &isys->adev->auxdev.dev; - struct ipu6_fw_isys_proxy_resp_info_abi *resp; - int ret; - - resp = ipu6_recv_get_token(isys->fwcom, IPU6_BASE_PROXY_RECV_QUEUES); - if (!resp) - return 1; - - dev_dbg(dev, "Proxy response: id %u, error %u, details %u\n", - resp->request_id, resp->error_info.error, - resp->error_info.error_details); - - ret = req_id == resp->request_id ? 0 : -EIO; - - ipu6_recv_put_token(isys->fwcom, IPU6_BASE_PROXY_RECV_QUEUES); - - return ret; -} - -int ipu6_fw_isys_send_proxy_token(struct ipu6_isys *isys, - unsigned int req_id, - unsigned int index, - unsigned int offset, u32 value) -{ - struct ipu6_fw_com_context *ctx = isys->fwcom; - struct device *dev = &isys->adev->auxdev.dev; - struct ipu6_fw_proxy_send_queue_token *token; - unsigned int timeout = 1000; - int ret; - - dev_dbg(dev, - "proxy send: req_id 0x%x, index %d, offset 0x%x, value 0x%x\n", - req_id, index, offset, value); - - token = ipu6_send_get_token(ctx, IPU6_BASE_PROXY_SEND_QUEUES); - if (!token) - return -EBUSY; - - token->request_id = req_id; - token->region_index = index; - token->offset = offset; - token->value = value; - ipu6_send_put_token(ctx, IPU6_BASE_PROXY_SEND_QUEUES); - - do { - usleep_range(100, 110); - ret = handle_proxy_response(isys, req_id); - if (!ret) - break; - if (ret == -EIO) { - dev_err(dev, "Proxy respond with unexpected id\n"); - break; - } - timeout--; - } while (ret && timeout); - - if (!timeout) - dev_err(dev, "Proxy response timed out\n"); - - return ret; -} - -int ipu6_fw_isys_complex_cmd(struct ipu6_isys *isys, - const unsigned int stream_handle, - void *cpu_mapped_buf, - dma_addr_t dma_mapped_buf, - size_t size, u16 send_type) -{ - struct ipu6_fw_com_context *ctx = isys->fwcom; - struct device *dev = &isys->adev->auxdev.dev; - struct ipu6_fw_send_queue_token *token; - - if (send_type >= N_IPU6_FW_ISYS_SEND_TYPE) - return -EINVAL; - - dev_dbg(dev, "send_token: %s\n", send_msg_types[send_type]); - - /* - * Time to flush cache in case we have some payload. Not all messages - * have that - */ - if (cpu_mapped_buf) - clflush_cache_range(cpu_mapped_buf, size); - - token = ipu6_send_get_token(ctx, - stream_handle + IPU6_BASE_MSG_SEND_QUEUES); - if (!token) - return -EBUSY; - - token->payload = dma_mapped_buf; - token->buf_handle = (unsigned long)cpu_mapped_buf; - token->send_type = send_type; - - ipu6_send_put_token(ctx, stream_handle + IPU6_BASE_MSG_SEND_QUEUES); - - return 0; -} - -int ipu6_fw_isys_simple_cmd(struct ipu6_isys *isys, - const unsigned int stream_handle, u16 send_type) -{ - return ipu6_fw_isys_complex_cmd(isys, stream_handle, NULL, 0, 0, - send_type); -} - -int ipu6_fw_isys_close(struct ipu6_isys *isys) -{ - struct device *dev = &isys->adev->auxdev.dev; - int retry = IPU6_ISYS_CLOSE_RETRY; - unsigned long flags; - void *fwcom; - int ret; - - /* - * Stop the isys fw. Actual close takes - * some time as the FW must stop its actions including code fetch - * to SP icache. - * spinlock to wait the interrupt handler to be finished - */ - spin_lock_irqsave(&isys->power_lock, flags); - ret = ipu6_fw_com_close(isys->fwcom); - fwcom = isys->fwcom; - isys->fwcom = NULL; - spin_unlock_irqrestore(&isys->power_lock, flags); - if (ret) - dev_err(dev, "Device close failure: %d\n", ret); - - /* release probably fails if the close failed. Let's try still */ - do { - usleep_range(400, 500); - ret = ipu6_fw_com_release(fwcom, 0); - retry--; - } while (ret && retry); - - if (ret) { - dev_err(dev, "Device release time out %d\n", ret); - spin_lock_irqsave(&isys->power_lock, flags); - isys->fwcom = fwcom; - spin_unlock_irqrestore(&isys->power_lock, flags); - } - - return ret; -} - -void ipu6_fw_isys_cleanup(struct ipu6_isys *isys) -{ - int ret; - - ret = ipu6_fw_com_release(isys->fwcom, 1); - if (ret < 0) - dev_warn(&isys->adev->auxdev.dev, - "Device busy, fw_com release failed."); - isys->fwcom = NULL; -} - -static void start_sp(struct ipu6_bus_device *adev) -{ - struct ipu6_isys *isys = ipu6_bus_get_drvdata(adev); - void __iomem *spc_regs_base = isys->pdata->base + - isys->pdata->ipdata->hw_variant.spc_offset; - u32 val = IPU6_ISYS_SPC_STATUS_START | - IPU6_ISYS_SPC_STATUS_RUN | - IPU6_ISYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE; - - val |= isys->icache_prefetch ? IPU6_ISYS_SPC_STATUS_ICACHE_PREFETCH : 0; - - writel(val, spc_regs_base + IPU6_ISYS_REG_SPC_STATUS_CTRL); -} - -static int query_sp(struct ipu6_bus_device *adev) -{ - struct ipu6_isys *isys = ipu6_bus_get_drvdata(adev); - void __iomem *spc_regs_base = isys->pdata->base + - isys->pdata->ipdata->hw_variant.spc_offset; - u32 val; - - val = readl(spc_regs_base + IPU6_ISYS_REG_SPC_STATUS_CTRL); - /* return true when READY == 1, START == 0 */ - val &= IPU6_ISYS_SPC_STATUS_READY | IPU6_ISYS_SPC_STATUS_START; - - return val == IPU6_ISYS_SPC_STATUS_READY; -} - -static int ipu6_isys_fwcom_cfg_init(struct ipu6_isys *isys, - struct ipu6_fw_com_cfg *fwcom, - unsigned int num_streams) -{ - unsigned int max_send_queues, max_sram_blocks, max_devq_size; - struct ipu6_fw_syscom_queue_config *input_queue_cfg; - struct ipu6_fw_syscom_queue_config *output_queue_cfg; - struct device *dev = &isys->adev->auxdev.dev; - int type_proxy = IPU6_FW_ISYS_QUEUE_TYPE_PROXY; - int type_dev = IPU6_FW_ISYS_QUEUE_TYPE_DEV; - int type_msg = IPU6_FW_ISYS_QUEUE_TYPE_MSG; - int base_dev_send = IPU6_BASE_DEV_SEND_QUEUES; - int base_msg_send = IPU6_BASE_MSG_SEND_QUEUES; - int base_msg_recv = IPU6_BASE_MSG_RECV_QUEUES; - struct ipu6_fw_isys_fw_config *isys_fw_cfg; - u32 num_in_message_queues; - unsigned int max_streams; - unsigned int size; - unsigned int i; - - max_streams = isys->pdata->ipdata->max_streams; - max_send_queues = isys->pdata->ipdata->max_send_queues; - max_sram_blocks = isys->pdata->ipdata->max_sram_blocks; - max_devq_size = isys->pdata->ipdata->max_devq_size; - num_in_message_queues = clamp(num_streams, 1U, max_streams); - isys_fw_cfg = devm_kzalloc(dev, sizeof(*isys_fw_cfg), GFP_KERNEL); - if (!isys_fw_cfg) - return -ENOMEM; - - isys_fw_cfg->num_send_queues[type_proxy] = IPU6_N_MAX_PROXY_SEND_QUEUES; - isys_fw_cfg->num_send_queues[type_dev] = IPU6_N_MAX_DEV_SEND_QUEUES; - isys_fw_cfg->num_send_queues[type_msg] = num_in_message_queues; - isys_fw_cfg->num_recv_queues[type_proxy] = IPU6_N_MAX_PROXY_RECV_QUEUES; - /* Common msg/dev return queue */ - isys_fw_cfg->num_recv_queues[type_dev] = 0; - isys_fw_cfg->num_recv_queues[type_msg] = 1; - - size = sizeof(*input_queue_cfg) * max_send_queues; - input_queue_cfg = devm_kzalloc(dev, size, GFP_KERNEL); - if (!input_queue_cfg) - return -ENOMEM; - - size = sizeof(*output_queue_cfg) * IPU6_N_MAX_RECV_QUEUES; - output_queue_cfg = devm_kzalloc(dev, size, GFP_KERNEL); - if (!output_queue_cfg) - return -ENOMEM; - - fwcom->input = input_queue_cfg; - fwcom->output = output_queue_cfg; - - fwcom->num_input_queues = isys_fw_cfg->num_send_queues[type_proxy] + - isys_fw_cfg->num_send_queues[type_dev] + - isys_fw_cfg->num_send_queues[type_msg]; - - fwcom->num_output_queues = isys_fw_cfg->num_recv_queues[type_proxy] + - isys_fw_cfg->num_recv_queues[type_dev] + - isys_fw_cfg->num_recv_queues[type_msg]; - - /* SRAM partitioning. Equal partitioning is set. */ - for (i = 0; i < max_sram_blocks; i++) { - if (i < num_in_message_queues) - isys_fw_cfg->buffer_partition.num_gda_pages[i] = - (IPU6_DEVICE_GDA_NR_PAGES * - IPU6_DEVICE_GDA_VIRT_FACTOR) / - num_in_message_queues; - else - isys_fw_cfg->buffer_partition.num_gda_pages[i] = 0; - } - - /* FW assumes proxy interface at fwcom queue 0 */ - for (i = 0; i < isys_fw_cfg->num_send_queues[type_proxy]; i++) { - input_queue_cfg[i].token_size = - sizeof(struct ipu6_fw_proxy_send_queue_token); - input_queue_cfg[i].queue_size = IPU6_ISYS_SIZE_PROXY_SEND_QUEUE; - } - - for (i = 0; i < isys_fw_cfg->num_send_queues[type_dev]; i++) { - input_queue_cfg[base_dev_send + i].token_size = - sizeof(struct ipu6_fw_send_queue_token); - input_queue_cfg[base_dev_send + i].queue_size = max_devq_size; - } - - for (i = 0; i < isys_fw_cfg->num_send_queues[type_msg]; i++) { - input_queue_cfg[base_msg_send + i].token_size = - sizeof(struct ipu6_fw_send_queue_token); - input_queue_cfg[base_msg_send + i].queue_size = - IPU6_ISYS_SIZE_SEND_QUEUE; - } - - for (i = 0; i < isys_fw_cfg->num_recv_queues[type_proxy]; i++) { - output_queue_cfg[i].token_size = - sizeof(struct ipu6_fw_proxy_resp_queue_token); - output_queue_cfg[i].queue_size = - IPU6_ISYS_SIZE_PROXY_RECV_QUEUE; - } - /* There is no recv DEV queue */ - for (i = 0; i < isys_fw_cfg->num_recv_queues[type_msg]; i++) { - output_queue_cfg[base_msg_recv + i].token_size = - sizeof(struct ipu6_fw_resp_queue_token); - output_queue_cfg[base_msg_recv + i].queue_size = - IPU6_ISYS_SIZE_RECV_QUEUE; - } - - fwcom->dmem_addr = isys->pdata->ipdata->hw_variant.dmem_offset; - fwcom->specific_addr = isys_fw_cfg; - fwcom->specific_size = sizeof(*isys_fw_cfg); - - return 0; -} - -int ipu6_fw_isys_init(struct ipu6_isys *isys, unsigned int num_streams) -{ - struct device *dev = &isys->adev->auxdev.dev; - int retry = IPU6_ISYS_OPEN_RETRY; - struct ipu6_fw_com_cfg fwcom = { - .cell_start = start_sp, - .cell_ready = query_sp, - .buttress_boot_offset = SYSCOM_BUTTRESS_FW_PARAMS_ISYS_OFFSET, - }; - int ret; - - ipu6_isys_fwcom_cfg_init(isys, &fwcom, num_streams); - - isys->fwcom = ipu6_fw_com_prepare(&fwcom, isys->adev, - isys->pdata->base); - if (!isys->fwcom) { - dev_err(dev, "isys fw com prepare failed\n"); - return -EIO; - } - - ret = ipu6_fw_com_open(isys->fwcom); - if (ret) { - dev_err(dev, "isys fw com open failed %d\n", ret); - return ret; - } - - do { - usleep_range(400, 500); - if (ipu6_fw_com_ready(isys->fwcom)) - break; - retry--; - } while (retry > 0); - - if (!retry) { - dev_err(dev, "isys port open ready failed %d\n", ret); - ipu6_fw_isys_close(isys); - ret = -EIO; - } - - return ret; -} - -struct ipu6_fw_isys_resp_info_abi * -ipu6_fw_isys_get_resp(void *context, unsigned int queue) -{ - return ipu6_recv_get_token(context, queue); -} - -void ipu6_fw_isys_put_resp(void *context, unsigned int queue) -{ - ipu6_recv_put_token(context, queue); -} - -void ipu6_fw_isys_dump_stream_cfg(struct device *dev, - struct ipu6_fw_isys_stream_cfg_data_abi *cfg) -{ - unsigned int i; - - dev_dbg(dev, "-----------------------------------------------------\n"); - dev_dbg(dev, "IPU6_FW_ISYS_STREAM_CFG_DATA\n"); - - dev_dbg(dev, "compfmt = %d\n", cfg->vc); - dev_dbg(dev, "src = %d\n", cfg->src); - dev_dbg(dev, "vc = %d\n", cfg->vc); - dev_dbg(dev, "isl_use = %d\n", cfg->isl_use); - dev_dbg(dev, "sensor_type = %d\n", cfg->sensor_type); - - dev_dbg(dev, "send_irq_sof_discarded = %d\n", - cfg->send_irq_sof_discarded); - dev_dbg(dev, "send_irq_eof_discarded = %d\n", - cfg->send_irq_eof_discarded); - dev_dbg(dev, "send_resp_sof_discarded = %d\n", - cfg->send_resp_sof_discarded); - dev_dbg(dev, "send_resp_eof_discarded = %d\n", - cfg->send_resp_eof_discarded); - - dev_dbg(dev, "crop:\n"); - dev_dbg(dev, "\t.left_top = [%d, %d]\n", cfg->crop.left_offset, - cfg->crop.top_offset); - dev_dbg(dev, "\t.right_bottom = [%d, %d]\n", cfg->crop.right_offset, - cfg->crop.bottom_offset); - - dev_dbg(dev, "nof_input_pins = %d\n", cfg->nof_input_pins); - for (i = 0; i < cfg->nof_input_pins; i++) { - dev_dbg(dev, "input pin[%d]:\n", i); - dev_dbg(dev, "\t.dt = 0x%0x\n", cfg->input_pins[i].dt); - dev_dbg(dev, "\t.mipi_store_mode = %d\n", - cfg->input_pins[i].mipi_store_mode); - dev_dbg(dev, "\t.bits_per_pix = %d\n", - cfg->input_pins[i].bits_per_pix); - dev_dbg(dev, "\t.mapped_dt = 0x%0x\n", - cfg->input_pins[i].mapped_dt); - dev_dbg(dev, "\t.input_res = %dx%d\n", - cfg->input_pins[i].input_res.width, - cfg->input_pins[i].input_res.height); - dev_dbg(dev, "\t.mipi_decompression = %d\n", - cfg->input_pins[i].mipi_decompression); - dev_dbg(dev, "\t.capture_mode = %d\n", - cfg->input_pins[i].capture_mode); - } - - dev_dbg(dev, "nof_output_pins = %d\n", cfg->nof_output_pins); - for (i = 0; i < cfg->nof_output_pins; i++) { - dev_dbg(dev, "output_pin[%d]:\n", i); - dev_dbg(dev, "\t.input_pin_id = %d\n", - cfg->output_pins[i].input_pin_id); - dev_dbg(dev, "\t.output_res = %dx%d\n", - cfg->output_pins[i].output_res.width, - cfg->output_pins[i].output_res.height); - dev_dbg(dev, "\t.stride = %d\n", cfg->output_pins[i].stride); - dev_dbg(dev, "\t.pt = %d\n", cfg->output_pins[i].pt); - dev_dbg(dev, "\t.payload_buf_size = %d\n", - cfg->output_pins[i].payload_buf_size); - dev_dbg(dev, "\t.ft = %d\n", cfg->output_pins[i].ft); - dev_dbg(dev, "\t.watermark_in_lines = %d\n", - cfg->output_pins[i].watermark_in_lines); - dev_dbg(dev, "\t.send_irq = %d\n", - cfg->output_pins[i].send_irq); - dev_dbg(dev, "\t.reserve_compression = %d\n", - cfg->output_pins[i].reserve_compression); - dev_dbg(dev, "\t.snoopable = %d\n", - cfg->output_pins[i].snoopable); - dev_dbg(dev, "\t.error_handling_enable = %d\n", - cfg->output_pins[i].error_handling_enable); - dev_dbg(dev, "\t.sensor_type = %d\n", - cfg->output_pins[i].sensor_type); - } - dev_dbg(dev, "-----------------------------------------------------\n"); -} - -void -ipu6_fw_isys_dump_frame_buff_set(struct device *dev, - struct ipu6_fw_isys_frame_buff_set_abi *buf, - unsigned int outputs) -{ - unsigned int i; - - dev_dbg(dev, "-----------------------------------------------------\n"); - dev_dbg(dev, "IPU6_FW_ISYS_FRAME_BUFF_SET\n"); - - for (i = 0; i < outputs; i++) { - dev_dbg(dev, "output_pin[%d]:\n", i); - dev_dbg(dev, "\t.out_buf_id = %llu\n", - buf->output_pins[i].out_buf_id); - dev_dbg(dev, "\t.addr = 0x%x\n", buf->output_pins[i].addr); - dev_dbg(dev, "\t.compress = %d\n", - buf->output_pins[i].compress); - } - - dev_dbg(dev, "send_irq_sof = 0x%x\n", buf->send_irq_sof); - dev_dbg(dev, "send_irq_eof = 0x%x\n", buf->send_irq_eof); - dev_dbg(dev, "send_resp_sof = 0x%x\n", buf->send_resp_sof); - dev_dbg(dev, "send_resp_eof = 0x%x\n", buf->send_resp_eof); - dev_dbg(dev, "send_irq_capture_ack = 0x%x\n", - buf->send_irq_capture_ack); - dev_dbg(dev, "send_irq_capture_done = 0x%x\n", - buf->send_irq_capture_done); - dev_dbg(dev, "send_resp_capture_ack = 0x%x\n", - buf->send_resp_capture_ack); - dev_dbg(dev, "send_resp_capture_done = 0x%x\n", - buf->send_resp_capture_done); - - dev_dbg(dev, "-----------------------------------------------------\n"); -} diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h deleted file mode 100644 index b60f020..0000000 --- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h +++ /dev/null @@ -1,596 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (C) 2013--2024 Intel Corporation */ - -#ifndef IPU6_FW_ISYS_H -#define IPU6_FW_ISYS_H - -#include - -struct device; -struct ipu6_isys; - -/* Max number of Input/Output Pins */ -#define IPU6_MAX_IPINS 4 - -#define IPU6_MAX_OPINS ((IPU6_MAX_IPINS) + 1) - -#define IPU6_STREAM_ID_MAX 16 -#define IPU6_NONSECURE_STREAM_ID_MAX 12 -#define IPU6_DEV_SEND_QUEUE_SIZE (IPU6_STREAM_ID_MAX) -#define IPU6_NOF_SRAM_BLOCKS_MAX (IPU6_STREAM_ID_MAX) -#define IPU6_N_MAX_MSG_SEND_QUEUES (IPU6_STREAM_ID_MAX) -#define IPU6SE_STREAM_ID_MAX 8 -#define IPU6SE_NONSECURE_STREAM_ID_MAX 4 -#define IPU6SE_DEV_SEND_QUEUE_SIZE (IPU6SE_STREAM_ID_MAX) -#define IPU6SE_NOF_SRAM_BLOCKS_MAX (IPU6SE_STREAM_ID_MAX) -#define IPU6SE_N_MAX_MSG_SEND_QUEUES (IPU6SE_STREAM_ID_MAX) - -/* Single return queue for all streams/commands type */ -#define IPU6_N_MAX_MSG_RECV_QUEUES 1 -/* Single device queue for high priority commands (bypass in-order queue) */ -#define IPU6_N_MAX_DEV_SEND_QUEUES 1 -/* Single dedicated send queue for proxy interface */ -#define IPU6_N_MAX_PROXY_SEND_QUEUES 1 -/* Single dedicated recv queue for proxy interface */ -#define IPU6_N_MAX_PROXY_RECV_QUEUES 1 -/* Send queues layout */ -#define IPU6_BASE_PROXY_SEND_QUEUES 0 -#define IPU6_BASE_DEV_SEND_QUEUES \ - (IPU6_BASE_PROXY_SEND_QUEUES + IPU6_N_MAX_PROXY_SEND_QUEUES) -#define IPU6_BASE_MSG_SEND_QUEUES \ - (IPU6_BASE_DEV_SEND_QUEUES + IPU6_N_MAX_DEV_SEND_QUEUES) -/* Recv queues layout */ -#define IPU6_BASE_PROXY_RECV_QUEUES 0 -#define IPU6_BASE_MSG_RECV_QUEUES \ - (IPU6_BASE_PROXY_RECV_QUEUES + IPU6_N_MAX_PROXY_RECV_QUEUES) -#define IPU6_N_MAX_RECV_QUEUES \ - (IPU6_BASE_MSG_RECV_QUEUES + IPU6_N_MAX_MSG_RECV_QUEUES) - -#define IPU6_N_MAX_SEND_QUEUES \ - (IPU6_BASE_MSG_SEND_QUEUES + IPU6_N_MAX_MSG_SEND_QUEUES) -#define IPU6SE_N_MAX_SEND_QUEUES \ - (IPU6_BASE_MSG_SEND_QUEUES + IPU6SE_N_MAX_MSG_SEND_QUEUES) - -/* Max number of planes for frame formats supported by the FW */ -#define IPU6_PIN_PLANES_MAX 4 - -#define IPU6_FW_ISYS_SENSOR_TYPE_START 14 -#define IPU6_FW_ISYS_SENSOR_TYPE_END 19 -#define IPU6SE_FW_ISYS_SENSOR_TYPE_START 6 -#define IPU6SE_FW_ISYS_SENSOR_TYPE_END 11 -/* - * Device close takes some time from last ack message to actual stopping - * of the SP processor. As long as the SP processor runs we can't proceed with - * clean up of resources. - */ -#define IPU6_ISYS_OPEN_RETRY 2000 -#define IPU6_ISYS_CLOSE_RETRY 2000 -#define IPU6_FW_CALL_TIMEOUT_JIFFIES msecs_to_jiffies(2000) - -enum ipu6_fw_isys_resp_type { - IPU6_FW_ISYS_RESP_TYPE_STREAM_OPEN_DONE = 0, - IPU6_FW_ISYS_RESP_TYPE_STREAM_START_ACK, - IPU6_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK, - IPU6_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK, - IPU6_FW_ISYS_RESP_TYPE_STREAM_STOP_ACK, - IPU6_FW_ISYS_RESP_TYPE_STREAM_FLUSH_ACK, - IPU6_FW_ISYS_RESP_TYPE_STREAM_CLOSE_ACK, - IPU6_FW_ISYS_RESP_TYPE_PIN_DATA_READY, - IPU6_FW_ISYS_RESP_TYPE_PIN_DATA_WATERMARK, - IPU6_FW_ISYS_RESP_TYPE_FRAME_SOF, - IPU6_FW_ISYS_RESP_TYPE_FRAME_EOF, - IPU6_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE, - IPU6_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_DONE, - IPU6_FW_ISYS_RESP_TYPE_PIN_DATA_SKIPPED, - IPU6_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_SKIPPED, - IPU6_FW_ISYS_RESP_TYPE_FRAME_SOF_DISCARDED, - IPU6_FW_ISYS_RESP_TYPE_FRAME_EOF_DISCARDED, - IPU6_FW_ISYS_RESP_TYPE_STATS_DATA_READY, - N_IPU6_FW_ISYS_RESP_TYPE -}; - -enum ipu6_fw_isys_send_type { - IPU6_FW_ISYS_SEND_TYPE_STREAM_OPEN = 0, - IPU6_FW_ISYS_SEND_TYPE_STREAM_START, - IPU6_FW_ISYS_SEND_TYPE_STREAM_START_AND_CAPTURE, - IPU6_FW_ISYS_SEND_TYPE_STREAM_CAPTURE, - IPU6_FW_ISYS_SEND_TYPE_STREAM_STOP, - IPU6_FW_ISYS_SEND_TYPE_STREAM_FLUSH, - IPU6_FW_ISYS_SEND_TYPE_STREAM_CLOSE, - N_IPU6_FW_ISYS_SEND_TYPE -}; - -enum ipu6_fw_isys_queue_type { - IPU6_FW_ISYS_QUEUE_TYPE_PROXY = 0, - IPU6_FW_ISYS_QUEUE_TYPE_DEV, - IPU6_FW_ISYS_QUEUE_TYPE_MSG, - N_IPU6_FW_ISYS_QUEUE_TYPE -}; - -enum ipu6_fw_isys_stream_source { - IPU6_FW_ISYS_STREAM_SRC_PORT_0 = 0, - IPU6_FW_ISYS_STREAM_SRC_PORT_1, - IPU6_FW_ISYS_STREAM_SRC_PORT_2, - IPU6_FW_ISYS_STREAM_SRC_PORT_3, - IPU6_FW_ISYS_STREAM_SRC_PORT_4, - IPU6_FW_ISYS_STREAM_SRC_PORT_5, - IPU6_FW_ISYS_STREAM_SRC_PORT_6, - IPU6_FW_ISYS_STREAM_SRC_PORT_7, - IPU6_FW_ISYS_STREAM_SRC_PORT_8, - IPU6_FW_ISYS_STREAM_SRC_PORT_9, - IPU6_FW_ISYS_STREAM_SRC_PORT_10, - IPU6_FW_ISYS_STREAM_SRC_PORT_11, - IPU6_FW_ISYS_STREAM_SRC_PORT_12, - IPU6_FW_ISYS_STREAM_SRC_PORT_13, - IPU6_FW_ISYS_STREAM_SRC_PORT_14, - IPU6_FW_ISYS_STREAM_SRC_PORT_15, - IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_0, - IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_1, - IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_2, - IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_3, - IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_4, - IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_5, - IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_6, - IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_7, - IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_8, - IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_9, - N_IPU6_FW_ISYS_STREAM_SRC -}; - -#define IPU6_FW_ISYS_STREAM_SRC_CSI2_PORT0 IPU6_FW_ISYS_STREAM_SRC_PORT_0 -#define IPU6_FW_ISYS_STREAM_SRC_CSI2_PORT1 IPU6_FW_ISYS_STREAM_SRC_PORT_1 -#define IPU6_FW_ISYS_STREAM_SRC_CSI2_PORT2 IPU6_FW_ISYS_STREAM_SRC_PORT_2 -#define IPU6_FW_ISYS_STREAM_SRC_CSI2_PORT3 IPU6_FW_ISYS_STREAM_SRC_PORT_3 - -#define IPU6_FW_ISYS_STREAM_SRC_CSI2_3PH_PORTA IPU6_FW_ISYS_STREAM_SRC_PORT_4 -#define IPU6_FW_ISYS_STREAM_SRC_CSI2_3PH_PORTB IPU6_FW_ISYS_STREAM_SRC_PORT_5 -#define IPU6_FW_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT0 \ - IPU6_FW_ISYS_STREAM_SRC_PORT_6 -#define IPU6_FW_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT1 \ - IPU6_FW_ISYS_STREAM_SRC_PORT_7 -#define IPU6_FW_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT2 \ - IPU6_FW_ISYS_STREAM_SRC_PORT_8 -#define IPU6_FW_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT3 \ - IPU6_FW_ISYS_STREAM_SRC_PORT_9 - -#define IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_PORT0 IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_0 -#define IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_PORT1 IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_1 - -/* - * enum ipu6_fw_isys_mipi_vc: MIPI csi2 spec - * supports up to 4 virtual per physical channel - */ -enum ipu6_fw_isys_mipi_vc { - IPU6_FW_ISYS_MIPI_VC_0 = 0, - IPU6_FW_ISYS_MIPI_VC_1, - IPU6_FW_ISYS_MIPI_VC_2, - IPU6_FW_ISYS_MIPI_VC_3, - N_IPU6_FW_ISYS_MIPI_VC -}; - -enum ipu6_fw_isys_frame_format_type { - IPU6_FW_ISYS_FRAME_FORMAT_NV11 = 0, /* 12 bit YUV 411, Y, UV plane */ - IPU6_FW_ISYS_FRAME_FORMAT_NV12, /* 12 bit YUV 420, Y, UV plane */ - IPU6_FW_ISYS_FRAME_FORMAT_NV12_16, /* 16 bit YUV 420, Y, UV plane */ - /* 12 bit YUV 420, Intel proprietary tiled format */ - IPU6_FW_ISYS_FRAME_FORMAT_NV12_TILEY, - - IPU6_FW_ISYS_FRAME_FORMAT_NV16, /* 16 bit YUV 422, Y, UV plane */ - IPU6_FW_ISYS_FRAME_FORMAT_NV21, /* 12 bit YUV 420, Y, VU plane */ - IPU6_FW_ISYS_FRAME_FORMAT_NV61, /* 16 bit YUV 422, Y, VU plane */ - IPU6_FW_ISYS_FRAME_FORMAT_YV12, /* 12 bit YUV 420, Y, V, U plane */ - IPU6_FW_ISYS_FRAME_FORMAT_YV16, /* 16 bit YUV 422, Y, V, U plane */ - IPU6_FW_ISYS_FRAME_FORMAT_YUV420, /* 12 bit YUV 420, Y, U, V plane */ - IPU6_FW_ISYS_FRAME_FORMAT_YUV420_10, /* yuv420, 10 bits per subpixel */ - IPU6_FW_ISYS_FRAME_FORMAT_YUV420_12, /* yuv420, 12 bits per subpixel */ - IPU6_FW_ISYS_FRAME_FORMAT_YUV420_14, /* yuv420, 14 bits per subpixel */ - IPU6_FW_ISYS_FRAME_FORMAT_YUV420_16, /* yuv420, 16 bits per subpixel */ - IPU6_FW_ISYS_FRAME_FORMAT_YUV422, /* 16 bit YUV 422, Y, U, V plane */ - IPU6_FW_ISYS_FRAME_FORMAT_YUV422_16, /* yuv422, 16 bits per subpixel */ - IPU6_FW_ISYS_FRAME_FORMAT_UYVY, /* 16 bit YUV 422, UYVY interleaved */ - IPU6_FW_ISYS_FRAME_FORMAT_YUYV, /* 16 bit YUV 422, YUYV interleaved */ - IPU6_FW_ISYS_FRAME_FORMAT_YUV444, /* 24 bit YUV 444, Y, U, V plane */ - /* Internal format, 2 y lines followed by a uvinterleaved line */ - IPU6_FW_ISYS_FRAME_FORMAT_YUV_LINE, - IPU6_FW_ISYS_FRAME_FORMAT_RAW8, /* RAW8, 1 plane */ - IPU6_FW_ISYS_FRAME_FORMAT_RAW10, /* RAW10, 1 plane */ - IPU6_FW_ISYS_FRAME_FORMAT_RAW12, /* RAW12, 1 plane */ - IPU6_FW_ISYS_FRAME_FORMAT_RAW14, /* RAW14, 1 plane */ - IPU6_FW_ISYS_FRAME_FORMAT_RAW16, /* RAW16, 1 plane */ - /** - * 16 bit RGB, 1 plane. Each 3 sub pixels are packed into one 16 bit - * value, 5 bits for R, 6 bits for G and 5 bits for B. - */ - IPU6_FW_ISYS_FRAME_FORMAT_RGB565, - IPU6_FW_ISYS_FRAME_FORMAT_PLANAR_RGB888, /* 24 bit RGB, 3 planes */ - IPU6_FW_ISYS_FRAME_FORMAT_RGBA888, /* 32 bit RGBA, 1 plane, A=Alpha */ - IPU6_FW_ISYS_FRAME_FORMAT_QPLANE6, /* Internal, for advanced ISP */ - IPU6_FW_ISYS_FRAME_FORMAT_BINARY_8, /* byte stream, used for jpeg. */ - N_IPU6_FW_ISYS_FRAME_FORMAT -}; - -enum ipu6_fw_isys_pin_type { - /* captured as MIPI packets */ - IPU6_FW_ISYS_PIN_TYPE_MIPI = 0, - /* captured through the SoC path */ - IPU6_FW_ISYS_PIN_TYPE_RAW_SOC = 3, -}; - -/* - * enum ipu6_fw_isys_mipi_store_mode. Describes if long MIPI packets reach - * MIPI SRAM with the long packet header or - * if not, then only option is to capture it with pin type MIPI. - */ -enum ipu6_fw_isys_mipi_store_mode { - IPU6_FW_ISYS_MIPI_STORE_MODE_NORMAL = 0, - IPU6_FW_ISYS_MIPI_STORE_MODE_DISCARD_LONG_HEADER, - N_IPU6_FW_ISYS_MIPI_STORE_MODE -}; - -enum ipu6_fw_isys_capture_mode { - IPU6_FW_ISYS_CAPTURE_MODE_REGULAR = 0, - IPU6_FW_ISYS_CAPTURE_MODE_BURST, - N_IPU6_FW_ISYS_CAPTURE_MODE, -}; - -enum ipu6_fw_isys_sensor_mode { - IPU6_FW_ISYS_SENSOR_MODE_NORMAL = 0, - IPU6_FW_ISYS_SENSOR_MODE_TOBII, - N_IPU6_FW_ISYS_SENSOR_MODE, -}; - -enum ipu6_fw_isys_error { - IPU6_FW_ISYS_ERROR_NONE = 0, - IPU6_FW_ISYS_ERROR_FW_INTERNAL_CONSISTENCY, - IPU6_FW_ISYS_ERROR_HW_CONSISTENCY, - IPU6_FW_ISYS_ERROR_DRIVER_INVALID_COMMAND_SEQUENCE, - IPU6_FW_ISYS_ERROR_DRIVER_INVALID_DEVICE_CONFIGURATION, - IPU6_FW_ISYS_ERROR_DRIVER_INVALID_STREAM_CONFIGURATION, - IPU6_FW_ISYS_ERROR_DRIVER_INVALID_FRAME_CONFIGURATION, - IPU6_FW_ISYS_ERROR_INSUFFICIENT_RESOURCES, - IPU6_FW_ISYS_ERROR_HW_REPORTED_STR2MMIO, - IPU6_FW_ISYS_ERROR_HW_REPORTED_SIG2CIO, - IPU6_FW_ISYS_ERROR_SENSOR_FW_SYNC, - IPU6_FW_ISYS_ERROR_STREAM_IN_SUSPENSION, - IPU6_FW_ISYS_ERROR_RESPONSE_QUEUE_FULL, - N_IPU6_FW_ISYS_ERROR -}; - -enum ipu6_fw_proxy_error { - IPU6_FW_PROXY_ERROR_NONE = 0, - IPU6_FW_PROXY_ERROR_INVALID_WRITE_REGION, - IPU6_FW_PROXY_ERROR_INVALID_WRITE_OFFSET, - N_IPU6_FW_PROXY_ERROR -}; - -/* firmware ABI structure below are aligned in firmware, no need pack */ -struct ipu6_fw_isys_buffer_partition_abi { - u32 num_gda_pages[IPU6_STREAM_ID_MAX]; -}; - -struct ipu6_fw_isys_fw_config { - struct ipu6_fw_isys_buffer_partition_abi buffer_partition; - u32 num_send_queues[N_IPU6_FW_ISYS_QUEUE_TYPE]; - u32 num_recv_queues[N_IPU6_FW_ISYS_QUEUE_TYPE]; -}; - -/* - * struct ipu6_fw_isys_resolution_abi: Generic resolution structure. - */ -struct ipu6_fw_isys_resolution_abi { - u32 width; - u32 height; -}; - -/** - * struct ipu6_fw_isys_output_pin_payload_abi - ISYS output pin buffer - * @out_buf_id: Points to output pin buffer - buffer identifier - * @addr: Points to output pin buffer - CSS Virtual Address - * @compress: Request frame compression (1), or not (0) - */ -struct ipu6_fw_isys_output_pin_payload_abi { - u64 out_buf_id; - u32 addr; - u32 compress; -}; - -/** - * struct ipu6_fw_isys_output_pin_info_abi - ISYS output pin info - * @output_res: output pin resolution - * @stride: output stride in Bytes (not valid for statistics) - * @watermark_in_lines: pin watermark level in lines - * @payload_buf_size: minimum size in Bytes of all buffers that will be - * supplied for capture on this pin - * @ts_offsets: ts_offsets - * @s2m_pixel_soc_pixel_remapping: pixel soc remapping (see the definition of - * S2M_PIXEL_SOC_PIXEL_REMAPPING_FLAG_NO_REMAPPING) - * @csi_be_soc_pixel_remapping: see s2m_pixel_soc_pixel_remapping - * @send_irq: assert if pin event should trigger irq - * @input_pin_id: related input pin id - * @pt: pin type -real format "enum ipu6_fw_isys_pin_type" - * @ft: frame format type -real format "enum ipu6_fw_isys_frame_format_type" - * @reserved: a reserved field - * @reserve_compression: reserve compression resources for pin - * @snoopable: snoopable - * @error_handling_enable: enable error handling - * @sensor_type: sensor_type - */ -struct ipu6_fw_isys_output_pin_info_abi { - struct ipu6_fw_isys_resolution_abi output_res; - u32 stride; - u32 watermark_in_lines; - u32 payload_buf_size; - u32 ts_offsets[IPU6_PIN_PLANES_MAX]; - u32 s2m_pixel_soc_pixel_remapping; - u32 csi_be_soc_pixel_remapping; - u8 send_irq; - u8 input_pin_id; - u8 pt; - u8 ft; - u8 reserved; - u8 reserve_compression; - u8 snoopable; - u8 error_handling_enable; - u32 sensor_type; -}; - -/** - * struct ipu6_fw_isys_input_pin_info_abi - ISYS input pin info - * @input_res: input resolution - * @dt: mipi data type ((enum ipu6_fw_isys_mipi_data_type) - * @mipi_store_mode: defines if legacy long packet header will be stored or - * discarded if discarded, output pin type for this - * input pin can only be MIPI - * (enum ipu6_fw_isys_mipi_store_mode) - * @bits_per_pix: native bits per pixel - * @mapped_dt: actual data type from sensor - * @mipi_decompression: defines which compression will be in mipi backend - * @crop_first_and_last_lines: Control whether to crop the first and last line - * of the input image. Crop done by HW device. - * @capture_mode: mode of capture, regular or burst, default value is regular - * @reserved: a reserved field - */ -struct ipu6_fw_isys_input_pin_info_abi { - struct ipu6_fw_isys_resolution_abi input_res; - u8 dt; - u8 mipi_store_mode; - u8 bits_per_pix; - u8 mapped_dt; - u8 mipi_decompression; - u8 crop_first_and_last_lines; - u8 capture_mode; - u8 reserved; -}; - -/** - * struct ipu6_fw_isys_cropping_abi - ISYS cropping coordinates - * @top_offset: Top offset - * @left_offset: Left offset - * @bottom_offset: Bottom offset - * @right_offset: Right offset - */ -struct ipu6_fw_isys_cropping_abi { - s32 top_offset; - s32 left_offset; - s32 bottom_offset; - s32 right_offset; -}; - -/** - * struct ipu6_fw_isys_stream_cfg_data_abi - ISYS stream configuration data - * ISYS stream configuration data structure - * @crop: for extended use and is not used in FW currently - * @input_pins: input pin descriptors - * @output_pins: output pin descriptors - * @compfmt: de-compression setting for User Defined Data - * @nof_input_pins: number of input pins - * @nof_output_pins: number of output pins - * @send_irq_sof_discarded: send irq on discarded frame sof response - * - if '1' it will override the send_resp_sof_discarded - * and send the response - * - if '0' the send_resp_sof_discarded will determine - * whether to send the response - * @send_irq_eof_discarded: send irq on discarded frame eof response - * - if '1' it will override the send_resp_eof_discarded - * and send the response - * - if '0' the send_resp_eof_discarded will determine - * whether to send the response - * @send_resp_sof_discarded: send response for discarded frame sof detected, - * used only when send_irq_sof_discarded is '0' - * @send_resp_eof_discarded: send response for discarded frame eof detected, - * used only when send_irq_eof_discarded is '0' - * @src: Stream source index e.g. MIPI_generator_0, CSI2-rx_1 - * @vc: MIPI Virtual Channel (up to 4 virtual per physical channel) - * @isl_use: indicates whether stream requires ISL and how - * @sensor_type: type of connected sensor, tobii or others, default is 0 - * @reserved: a reserved field - * @reserved2: a reserved field - */ -struct ipu6_fw_isys_stream_cfg_data_abi { - struct ipu6_fw_isys_cropping_abi crop; - struct ipu6_fw_isys_input_pin_info_abi input_pins[IPU6_MAX_IPINS]; - struct ipu6_fw_isys_output_pin_info_abi output_pins[IPU6_MAX_OPINS]; - u32 compfmt; - u8 nof_input_pins; - u8 nof_output_pins; - u8 send_irq_sof_discarded; - u8 send_irq_eof_discarded; - u8 send_resp_sof_discarded; - u8 send_resp_eof_discarded; - u8 src; - u8 vc; - u8 isl_use; - u8 sensor_type; - u8 reserved; - u8 reserved2; -}; - -/** - * struct ipu6_fw_isys_frame_buff_set_abi - ISYS frame buffer set (request) - * @output_pins: output pin addresses - * @send_irq_sof: send irq on frame sof response - * - if '1' it will override the send_resp_sof and - * send the response - * - if '0' the send_resp_sof will determine whether to - * send the response - * @send_irq_eof: send irq on frame eof response - * - if '1' it will override the send_resp_eof and - * send the response - * - if '0' the send_resp_eof will determine whether to - * send the response - * @send_irq_capture_ack: send irq on capture ack - * @send_irq_capture_done: send irq on capture done - * @send_resp_sof: send response for frame sof detected, - * used only when send_irq_sof is '0' - * @send_resp_eof: send response for frame eof detected, - * used only when send_irq_eof is '0' - * @send_resp_capture_ack: send response for capture ack event - * @send_resp_capture_done: send response for capture done event - * @reserved: a reserved field - */ -struct ipu6_fw_isys_frame_buff_set_abi { - struct ipu6_fw_isys_output_pin_payload_abi output_pins[IPU6_MAX_OPINS]; - u8 send_irq_sof; - u8 send_irq_eof; - u8 send_irq_capture_ack; - u8 send_irq_capture_done; - u8 send_resp_sof; - u8 send_resp_eof; - u8 send_resp_capture_ack; - u8 send_resp_capture_done; - u8 reserved[8]; -}; - -/** - * struct ipu6_fw_isys_error_info_abi - ISYS error information - * @error: error code if something went wrong - * @error_details: depending on error code, it may contain additional error info - */ -struct ipu6_fw_isys_error_info_abi { - u32 error; - u32 error_details; -}; - -/** - * struct ipu6_fw_isys_resp_info_abi - ISYS firmware response - * @buf_id: buffer ID - * @pin: this var is only valid for pin event related responses, - * contains pin addresses - * @error_info: error information from the FW - * @timestamp: Time information for event if available - * @stream_handle: stream id the response corresponds to - * @type: response type (enum ipu6_fw_isys_resp_type) - * @pin_id: pin id that the pin payload corresponds to - * @reserved: a reserved field - * @reserved2: a reserved field - */ -struct ipu6_fw_isys_resp_info_abi { - u64 buf_id; - struct ipu6_fw_isys_output_pin_payload_abi pin; - struct ipu6_fw_isys_error_info_abi error_info; - u32 timestamp[2]; - u8 stream_handle; - u8 type; - u8 pin_id; - u8 reserved; - u32 reserved2; -}; - -/** - * struct ipu6_fw_isys_proxy_error_info_abi - ISYS proxy error - * @error: error code if something went wrong - * @error_details: depending on error code, it may contain additional error info - */ -struct ipu6_fw_isys_proxy_error_info_abi { - u32 error; - u32 error_details; -}; - -struct ipu6_fw_isys_proxy_resp_info_abi { - u32 request_id; - struct ipu6_fw_isys_proxy_error_info_abi error_info; -}; - -/** - * struct ipu6_fw_proxy_write_queue_token - ISYS proxy write queue token - * @request_id: update id for the specific proxy write request - * @region_index: Region id for the proxy write request - * @offset: Offset of the write request according to the base address - * of the region - * @value: Value that is requested to be written with the proxy write request - */ -struct ipu6_fw_proxy_write_queue_token { - u32 request_id; - u32 region_index; - u32 offset; - u32 value; -}; - -/** - * struct ipu6_fw_resp_queue_token - ISYS response queue token - * @resp_info: response info - */ -struct ipu6_fw_resp_queue_token { - struct ipu6_fw_isys_resp_info_abi resp_info; -}; - -/** - * struct ipu6_fw_send_queue_token - ISYS send queue token - * @buf_handle: buffer handle - * @payload: payload - * @send_type: send_type - * @stream_id: stream_id - */ -struct ipu6_fw_send_queue_token { - u64 buf_handle; - u32 payload; - u16 send_type; - u16 stream_id; -}; - -/** - * struct ipu6_fw_proxy_resp_queue_token - ISYS proxy response queue token - * @proxy_resp_info: proxy response info - */ -struct ipu6_fw_proxy_resp_queue_token { - struct ipu6_fw_isys_proxy_resp_info_abi proxy_resp_info; -}; - -/** - * struct ipu6_fw_proxy_send_queue_token - SYS proxy send queue token - * @request_id: request_id - * @region_index: region_index - * @offset: offset - * @value: value - */ -struct ipu6_fw_proxy_send_queue_token { - u32 request_id; - u32 region_index; - u32 offset; - u32 value; -}; - -void -ipu6_fw_isys_dump_stream_cfg(struct device *dev, - struct ipu6_fw_isys_stream_cfg_data_abi *cfg); -void -ipu6_fw_isys_dump_frame_buff_set(struct device *dev, - struct ipu6_fw_isys_frame_buff_set_abi *buf, - unsigned int outputs); -int ipu6_fw_isys_init(struct ipu6_isys *isys, unsigned int num_streams); -int ipu6_fw_isys_close(struct ipu6_isys *isys); -int ipu6_fw_isys_simple_cmd(struct ipu6_isys *isys, - const unsigned int stream_handle, u16 send_type); -int ipu6_fw_isys_complex_cmd(struct ipu6_isys *isys, - const unsigned int stream_handle, - void *cpu_mapped_buf, dma_addr_t dma_mapped_buf, - size_t size, u16 send_type); -int ipu6_fw_isys_send_proxy_token(struct ipu6_isys *isys, - unsigned int req_id, - unsigned int index, - unsigned int offset, u32 value); -void ipu6_fw_isys_cleanup(struct ipu6_isys *isys); -struct ipu6_fw_isys_resp_info_abi * -ipu6_fw_isys_get_resp(void *context, unsigned int queue); -void ipu6_fw_isys_put_resp(void *context, unsigned int queue); -#endif diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c deleted file mode 100644 index 6030bd2..0000000 --- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c +++ /dev/null @@ -1,643 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2013--2024 Intel Corporation - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -#include "ipu6-bus.h" -#include "ipu6-isys.h" -#include "ipu6-isys-csi2.h" -#include "ipu6-isys-subdev.h" -#include "ipu6-platform-isys-csi2-reg.h" - -static const u32 csi2_supported_codes[] = { - MEDIA_BUS_FMT_RGB565_1X16, - MEDIA_BUS_FMT_RGB888_1X24, - MEDIA_BUS_FMT_UYVY8_1X16, - MEDIA_BUS_FMT_YUYV8_1X16, - MEDIA_BUS_FMT_SBGGR10_1X10, - MEDIA_BUS_FMT_SGBRG10_1X10, - MEDIA_BUS_FMT_SGRBG10_1X10, - MEDIA_BUS_FMT_SRGGB10_1X10, - MEDIA_BUS_FMT_SBGGR12_1X12, - MEDIA_BUS_FMT_SGBRG12_1X12, - MEDIA_BUS_FMT_SGRBG12_1X12, - MEDIA_BUS_FMT_SRGGB12_1X12, - MEDIA_BUS_FMT_SBGGR8_1X8, - MEDIA_BUS_FMT_SGBRG8_1X8, - MEDIA_BUS_FMT_SGRBG8_1X8, - MEDIA_BUS_FMT_SRGGB8_1X8, - MEDIA_BUS_FMT_META_8, - MEDIA_BUS_FMT_META_10, - MEDIA_BUS_FMT_META_12, - MEDIA_BUS_FMT_META_16, - MEDIA_BUS_FMT_META_24, - 0 -}; - -/* - * Strings corresponding to CSI-2 receiver errors are here. - * Corresponding macros are defined in the header file. - */ -static const struct ipu6_csi2_error dphy_rx_errors[] = { - { "Single packet header error corrected", true }, - { "Multiple packet header errors detected", true }, - { "Payload checksum (CRC) error", true }, - { "Transfer FIFO overflow", false }, - { "Reserved short packet data type detected", true }, - { "Reserved long packet data type detected", true }, - { "Incomplete long packet detected", false }, - { "Frame sync error", false }, - { "Line sync error", false }, - { "DPHY recoverable synchronization error", true }, - { "DPHY fatal error", false }, - { "DPHY elastic FIFO overflow", false }, - { "Inter-frame short packet discarded", true }, - { "Inter-frame long packet discarded", true }, - { "MIPI pktgen overflow", false }, - { "MIPI pktgen data loss", false }, - { "FIFO overflow", false }, - { "Lane deskew", false }, - { "SOT sync error", false }, - { "HSIDLE detected", false } -}; - -s64 ipu6_isys_csi2_get_link_freq(struct ipu6_isys_csi2 *csi2) -{ - struct media_pad *src_pad; - - if (!csi2) - return -EINVAL; - - src_pad = media_entity_remote_source_pad_unique(&csi2->asd.sd.entity); - if (IS_ERR(src_pad)) { - dev_err(&csi2->isys->adev->auxdev.dev, - "can't get source pad of %s (%ld)\n", - csi2->asd.sd.name, PTR_ERR(src_pad)); - return PTR_ERR(src_pad); - } - - return v4l2_get_link_freq(src_pad, 0, 0); -} - -static int csi2_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh, - struct v4l2_event_subscription *sub) -{ - struct ipu6_isys_subdev *asd = to_ipu6_isys_subdev(sd); - struct ipu6_isys_csi2 *csi2 = to_ipu6_isys_csi2(asd); - struct device *dev = &csi2->isys->adev->auxdev.dev; - - dev_dbg(dev, "csi2 subscribe event(type %u id %u)\n", - sub->type, sub->id); - - switch (sub->type) { - case V4L2_EVENT_FRAME_SYNC: - return v4l2_event_subscribe(fh, sub, 10, NULL); - case V4L2_EVENT_CTRL: - return v4l2_ctrl_subscribe_event(fh, sub); - default: - return -EINVAL; - } -} - -static const struct v4l2_subdev_core_ops csi2_sd_core_ops = { - .subscribe_event = csi2_subscribe_event, - .unsubscribe_event = v4l2_event_subdev_unsubscribe, -}; - -/* - * The input system CSI2+ receiver has several - * parameters affecting the receiver timings. These depend - * on the MIPI bus frequency F in Hz (sensor transmitter rate) - * as follows: - * register value = (A/1e9 + B * UI) / COUNT_ACC - * where - * UI = 1 / (2 * F) in seconds - * COUNT_ACC = counter accuracy in seconds - * COUNT_ACC = 0.125 ns = 1 / 8 ns, ACCINV = 8. - * - * A and B are coefficients from the table below, - * depending whether the register minimum or maximum value is - * calculated. - * Minimum Maximum - * Clock lane A B A B - * reg_rx_csi_dly_cnt_termen_clane 0 0 38 0 - * reg_rx_csi_dly_cnt_settle_clane 95 -8 300 -16 - * Data lanes - * reg_rx_csi_dly_cnt_termen_dlane0 0 0 35 4 - * reg_rx_csi_dly_cnt_settle_dlane0 85 -2 145 -6 - * reg_rx_csi_dly_cnt_termen_dlane1 0 0 35 4 - * reg_rx_csi_dly_cnt_settle_dlane1 85 -2 145 -6 - * reg_rx_csi_dly_cnt_termen_dlane2 0 0 35 4 - * reg_rx_csi_dly_cnt_settle_dlane2 85 -2 145 -6 - * reg_rx_csi_dly_cnt_termen_dlane3 0 0 35 4 - * reg_rx_csi_dly_cnt_settle_dlane3 85 -2 145 -6 - * - * We use the minimum values of both A and B. - */ - -#define DIV_SHIFT 8 -#define CSI2_ACCINV 8 - -static u32 calc_timing(s32 a, s32 b, s64 link_freq, s32 accinv) -{ - return accinv * a + (accinv * b * (500000000 >> DIV_SHIFT) - / (s32)(link_freq >> DIV_SHIFT)); -} - -static int -ipu6_isys_csi2_calc_timing(struct ipu6_isys_csi2 *csi2, - struct ipu6_isys_csi2_timing *timing, s32 accinv) -{ - struct device *dev = &csi2->isys->adev->auxdev.dev; - s64 link_freq; - - link_freq = ipu6_isys_csi2_get_link_freq(csi2); - if (link_freq < 0) - return link_freq; - - timing->ctermen = calc_timing(CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_A, - CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_B, - link_freq, accinv); - timing->csettle = calc_timing(CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_A, - CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_B, - link_freq, accinv); - timing->dtermen = calc_timing(CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_A, - CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_B, - link_freq, accinv); - timing->dsettle = calc_timing(CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_A, - CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_B, - link_freq, accinv); - - dev_dbg(dev, "ctermen %u csettle %u dtermen %u dsettle %u\n", - timing->ctermen, timing->csettle, - timing->dtermen, timing->dsettle); - - return 0; -} - -void ipu6_isys_register_errors(struct ipu6_isys_csi2 *csi2) -{ - u32 irq = readl(csi2->base + CSI_PORT_REG_BASE_IRQ_CSI + - CSI_PORT_REG_BASE_IRQ_STATUS_OFFSET); - struct ipu6_isys *isys = csi2->isys; - u32 mask; - - mask = isys->pdata->ipdata->csi2.irq_mask; - writel(irq & mask, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI + - CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET); - csi2->receiver_errors |= irq & mask; -} - -void ipu6_isys_csi2_error(struct ipu6_isys_csi2 *csi2) -{ - struct device *dev = &csi2->isys->adev->auxdev.dev; - const struct ipu6_csi2_error *errors; - u32 status; - u32 i; - - /* register errors once more in case of interrupts are disabled */ - ipu6_isys_register_errors(csi2); - status = csi2->receiver_errors; - csi2->receiver_errors = 0; - errors = dphy_rx_errors; - - for (i = 0; i < CSI_RX_NUM_ERRORS_IN_IRQ; i++) { - if (status & BIT(i)) - dev_err_ratelimited(dev, "csi2-%i error: %s\n", - csi2->port, errors[i].error_string); - } -} - -static int ipu6_isys_csi2_set_stream(struct v4l2_subdev *sd, - const struct ipu6_isys_csi2_timing *timing, - unsigned int nlanes, int enable) -{ - struct ipu6_isys_subdev *asd = to_ipu6_isys_subdev(sd); - struct ipu6_isys_csi2 *csi2 = to_ipu6_isys_csi2(asd); - struct ipu6_isys *isys = csi2->isys; - struct device *dev = &isys->adev->auxdev.dev; - struct ipu6_isys_csi2_config cfg; - unsigned int nports; - int ret = 0; - u32 mask = 0; - u32 i; - - dev_dbg(dev, "stream %s CSI2-%u with %u lanes\n", enable ? "on" : "off", - csi2->port, nlanes); - - cfg.port = csi2->port; - cfg.nlanes = nlanes; - - mask = isys->pdata->ipdata->csi2.irq_mask; - nports = isys->pdata->ipdata->csi2.nports; - - if (!enable) { - writel(0, csi2->base + CSI_REG_CSI_FE_ENABLE); - writel(0, csi2->base + CSI_REG_PPI2CSI_ENABLE); - - writel(0, - csi2->base + CSI_PORT_REG_BASE_IRQ_CSI + - CSI_PORT_REG_BASE_IRQ_ENABLE_OFFSET); - writel(mask, - csi2->base + CSI_PORT_REG_BASE_IRQ_CSI + - CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET); - writel(0, - csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC + - CSI_PORT_REG_BASE_IRQ_ENABLE_OFFSET); - writel(0xffffffff, - csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC + - CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET); - - isys->phy_set_power(isys, &cfg, timing, false); - - writel(0, isys->pdata->base + CSI_REG_HUB_FW_ACCESS_PORT - (isys->pdata->ipdata->csi2.fw_access_port_ofs, - csi2->port)); - writel(0, isys->pdata->base + - CSI_REG_HUB_DRV_ACCESS_PORT(csi2->port)); - - return ret; - } - - /* reset port reset */ - writel(0x1, csi2->base + CSI_REG_PORT_GPREG_SRST); - usleep_range(100, 200); - writel(0x0, csi2->base + CSI_REG_PORT_GPREG_SRST); - - /* enable port clock */ - for (i = 0; i < nports; i++) { - writel(1, isys->pdata->base + CSI_REG_HUB_DRV_ACCESS_PORT(i)); - writel(1, isys->pdata->base + CSI_REG_HUB_FW_ACCESS_PORT - (isys->pdata->ipdata->csi2.fw_access_port_ofs, i)); - } - - /* enable all error related irq */ - writel(mask, - csi2->base + CSI_PORT_REG_BASE_IRQ_CSI + - CSI_PORT_REG_BASE_IRQ_STATUS_OFFSET); - writel(mask, - csi2->base + CSI_PORT_REG_BASE_IRQ_CSI + - CSI_PORT_REG_BASE_IRQ_MASK_OFFSET); - writel(mask, - csi2->base + CSI_PORT_REG_BASE_IRQ_CSI + - CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET); - writel(mask, - csi2->base + CSI_PORT_REG_BASE_IRQ_CSI + - CSI_PORT_REG_BASE_IRQ_LEVEL_NOT_PULSE_OFFSET); - writel(mask, - csi2->base + CSI_PORT_REG_BASE_IRQ_CSI + - CSI_PORT_REG_BASE_IRQ_ENABLE_OFFSET); - - /* - * Using event from firmware instead of irq to handle CSI2 sync event - * which can reduce system wakeups. If CSI2 sync irq enabled, we need - * disable the firmware CSI2 sync event to avoid duplicate handling. - */ - writel(0xffffffff, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC + - CSI_PORT_REG_BASE_IRQ_STATUS_OFFSET); - writel(0, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC + - CSI_PORT_REG_BASE_IRQ_MASK_OFFSET); - writel(0xffffffff, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC + - CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET); - writel(0, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC + - CSI_PORT_REG_BASE_IRQ_LEVEL_NOT_PULSE_OFFSET); - writel(0xffffffff, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC + - CSI_PORT_REG_BASE_IRQ_ENABLE_OFFSET); - - /* configure to enable FE and PPI2CSI */ - writel(0, csi2->base + CSI_REG_CSI_FE_MODE); - writel(CSI_SENSOR_INPUT, csi2->base + CSI_REG_CSI_FE_MUX_CTRL); - writel(CSI_CNTR_SENSOR_LINE_ID | CSI_CNTR_SENSOR_FRAME_ID, - csi2->base + CSI_REG_CSI_FE_SYNC_CNTR_SEL); - writel(FIELD_PREP(PPI_INTF_CONFIG_NOF_ENABLED_DLANES_MASK, nlanes - 1), - csi2->base + CSI_REG_PPI2CSI_CONFIG_PPI_INTF); - - writel(1, csi2->base + CSI_REG_PPI2CSI_ENABLE); - writel(1, csi2->base + CSI_REG_CSI_FE_ENABLE); - - ret = isys->phy_set_power(isys, &cfg, timing, true); - if (ret) - dev_err(dev, "csi-%d phy power up failed %d\n", csi2->port, - ret); - - return ret; -} - -static int ipu6_isys_csi2_enable_streams(struct v4l2_subdev *sd, - struct v4l2_subdev_state *state, - u32 pad, u64 streams_mask) -{ - struct ipu6_isys_subdev *asd = to_ipu6_isys_subdev(sd); - struct ipu6_isys_csi2 *csi2 = to_ipu6_isys_csi2(asd); - struct ipu6_isys_csi2_timing timing = { }; - struct v4l2_subdev *remote_sd; - struct media_pad *remote_pad; - u64 sink_streams; - int ret; - - remote_pad = media_pad_remote_pad_first(&sd->entity.pads[CSI2_PAD_SINK]); - remote_sd = media_entity_to_v4l2_subdev(remote_pad->entity); - - sink_streams = - v4l2_subdev_state_xlate_streams(state, pad, CSI2_PAD_SINK, - &streams_mask); - - ret = ipu6_isys_csi2_calc_timing(csi2, &timing, CSI2_ACCINV); - if (ret) - return ret; - - ret = ipu6_isys_csi2_set_stream(sd, &timing, csi2->nlanes, true); - if (ret) - return ret; - - ret = v4l2_subdev_enable_streams(remote_sd, remote_pad->index, - sink_streams); - if (ret) { - ipu6_isys_csi2_set_stream(sd, NULL, 0, false); - return ret; - } - - return 0; -} - -static int ipu6_isys_csi2_disable_streams(struct v4l2_subdev *sd, - struct v4l2_subdev_state *state, - u32 pad, u64 streams_mask) -{ - struct v4l2_subdev *remote_sd; - struct media_pad *remote_pad; - u64 sink_streams; - - sink_streams = - v4l2_subdev_state_xlate_streams(state, pad, CSI2_PAD_SINK, - &streams_mask); - - remote_pad = media_pad_remote_pad_first(&sd->entity.pads[CSI2_PAD_SINK]); - remote_sd = media_entity_to_v4l2_subdev(remote_pad->entity); - - ipu6_isys_csi2_set_stream(sd, NULL, 0, false); - - v4l2_subdev_disable_streams(remote_sd, remote_pad->index, sink_streams); - - return 0; -} - -static int ipu6_isys_csi2_set_sel(struct v4l2_subdev *sd, - struct v4l2_subdev_state *state, - struct v4l2_subdev_selection *sel) -{ - struct ipu6_isys_subdev *asd = to_ipu6_isys_subdev(sd); - struct device *dev = &asd->isys->adev->auxdev.dev; - struct v4l2_mbus_framefmt *sink_ffmt; - struct v4l2_mbus_framefmt *src_ffmt; - struct v4l2_rect *crop; - - if (sel->pad == CSI2_PAD_SINK || sel->target != V4L2_SEL_TGT_CROP) - return -EINVAL; - - sink_ffmt = v4l2_subdev_state_get_opposite_stream_format(state, - sel->pad, - sel->stream); - if (!sink_ffmt) - return -EINVAL; - - src_ffmt = v4l2_subdev_state_get_format(state, sel->pad, sel->stream); - if (!src_ffmt) - return -EINVAL; - - crop = v4l2_subdev_state_get_crop(state, sel->pad, sel->stream); - if (!crop) - return -EINVAL; - - /* Only vertical cropping is supported */ - sel->r.left = 0; - sel->r.width = sink_ffmt->width; - /* Non-bayer formats can't be single line cropped */ - if (!ipu6_isys_is_bayer_format(sink_ffmt->code)) - sel->r.top &= ~1; - sel->r.height = clamp(sel->r.height & ~1, IPU6_ISYS_MIN_HEIGHT, - sink_ffmt->height - sel->r.top); - *crop = sel->r; - - /* update source pad format */ - src_ffmt->width = sel->r.width; - src_ffmt->height = sel->r.height; - if (ipu6_isys_is_bayer_format(sink_ffmt->code)) - src_ffmt->code = ipu6_isys_convert_bayer_order(sink_ffmt->code, - sel->r.left, - sel->r.top); - dev_dbg(dev, "set crop for %s sel: %d,%d,%d,%d code: 0x%x\n", - sd->name, sel->r.left, sel->r.top, sel->r.width, sel->r.height, - src_ffmt->code); - - return 0; -} - -static int ipu6_isys_csi2_get_sel(struct v4l2_subdev *sd, - struct v4l2_subdev_state *state, - struct v4l2_subdev_selection *sel) -{ - struct v4l2_mbus_framefmt *sink_ffmt; - struct v4l2_rect *crop; - int ret = 0; - - if (sd->entity.pads[sel->pad].flags & MEDIA_PAD_FL_SINK) - return -EINVAL; - - sink_ffmt = v4l2_subdev_state_get_opposite_stream_format(state, - sel->pad, - sel->stream); - if (!sink_ffmt) - return -EINVAL; - - crop = v4l2_subdev_state_get_crop(state, sel->pad, sel->stream); - if (!crop) - return -EINVAL; - - switch (sel->target) { - case V4L2_SEL_TGT_CROP_DEFAULT: - case V4L2_SEL_TGT_CROP_BOUNDS: - sel->r.left = 0; - sel->r.top = 0; - sel->r.width = sink_ffmt->width; - sel->r.height = sink_ffmt->height; - break; - case V4L2_SEL_TGT_CROP: - sel->r = *crop; - break; - default: - ret = -EINVAL; - } - - return ret; -} - -static const struct v4l2_subdev_pad_ops csi2_sd_pad_ops = { - .get_fmt = v4l2_subdev_get_fmt, - .set_fmt = ipu6_isys_subdev_set_fmt, - .get_selection = ipu6_isys_csi2_get_sel, - .set_selection = ipu6_isys_csi2_set_sel, - .enum_mbus_code = ipu6_isys_subdev_enum_mbus_code, - .set_routing = ipu6_isys_subdev_set_routing, - .enable_streams = ipu6_isys_csi2_enable_streams, - .disable_streams = ipu6_isys_csi2_disable_streams, -}; - -static const struct v4l2_subdev_ops csi2_sd_ops = { - .core = &csi2_sd_core_ops, - .pad = &csi2_sd_pad_ops, -}; - -static const struct media_entity_operations csi2_entity_ops = { - .link_validate = v4l2_subdev_link_validate, - .has_pad_interdep = v4l2_subdev_has_pad_interdep, -}; - -void ipu6_isys_csi2_cleanup(struct ipu6_isys_csi2 *csi2) -{ - if (!csi2->isys) - return; - - v4l2_device_unregister_subdev(&csi2->asd.sd); - v4l2_subdev_cleanup(&csi2->asd.sd); - ipu6_isys_subdev_cleanup(&csi2->asd); - csi2->isys = NULL; -} - -int ipu6_isys_csi2_init(struct ipu6_isys_csi2 *csi2, - struct ipu6_isys *isys, - void __iomem *base, unsigned int index) -{ - struct device *dev = &isys->adev->auxdev.dev; - int ret; - - csi2->isys = isys; - csi2->base = base; - csi2->port = index; - - csi2->asd.sd.entity.ops = &csi2_entity_ops; - csi2->asd.isys = isys; - ret = ipu6_isys_subdev_init(&csi2->asd, &csi2_sd_ops, 0, - NR_OF_CSI2_SINK_PADS, NR_OF_CSI2_SRC_PADS); - if (ret) - goto fail; - - csi2->asd.source = IPU6_FW_ISYS_STREAM_SRC_CSI2_PORT0 + index; - csi2->asd.supported_codes = csi2_supported_codes; - snprintf(csi2->asd.sd.name, sizeof(csi2->asd.sd.name), - IPU6_ISYS_ENTITY_PREFIX " CSI2 %u", index); - v4l2_set_subdevdata(&csi2->asd.sd, &csi2->asd); - ret = v4l2_subdev_init_finalize(&csi2->asd.sd); - if (ret) { - dev_err(dev, "failed to init v4l2 subdev\n"); - goto fail; - } - - ret = v4l2_device_register_subdev(&isys->v4l2_dev, &csi2->asd.sd); - if (ret) { - dev_err(dev, "failed to register v4l2 subdev\n"); - goto fail; - } - - return 0; - -fail: - ipu6_isys_csi2_cleanup(csi2); - - return ret; -} - -void ipu6_isys_csi2_sof_event_by_stream(struct ipu6_isys_stream *stream) -{ - struct video_device *vdev = stream->asd->sd.devnode; - struct device *dev = &stream->isys->adev->auxdev.dev; - struct ipu6_isys_csi2 *csi2 = ipu6_isys_subdev_to_csi2(stream->asd); - struct v4l2_event ev = { - .type = V4L2_EVENT_FRAME_SYNC, - }; - - ev.u.frame_sync.frame_sequence = atomic_fetch_inc(&stream->sequence); - v4l2_event_queue(vdev, &ev); - - dev_dbg(dev, "sof_event::csi2-%i sequence: %i, vc: %d\n", - csi2->port, ev.u.frame_sync.frame_sequence, stream->vc); -} - -void ipu6_isys_csi2_eof_event_by_stream(struct ipu6_isys_stream *stream) -{ - struct device *dev = &stream->isys->adev->auxdev.dev; - struct ipu6_isys_csi2 *csi2 = ipu6_isys_subdev_to_csi2(stream->asd); - u32 frame_sequence = atomic_read(&stream->sequence); - - dev_dbg(dev, "eof_event::csi2-%i sequence: %i\n", - csi2->port, frame_sequence); -} - -int ipu6_isys_csi2_get_remote_desc(u32 source_stream, - struct ipu6_isys_csi2 *csi2, - struct media_entity *source_entity, - struct v4l2_mbus_frame_desc_entry *entry) -{ - struct v4l2_mbus_frame_desc_entry *desc_entry = NULL; - struct device *dev = &csi2->isys->adev->auxdev.dev; - struct v4l2_mbus_frame_desc desc; - struct v4l2_subdev *source; - struct media_pad *pad; - unsigned int i; - int ret; - - source = media_entity_to_v4l2_subdev(source_entity); - if (!source) - return -EPIPE; - - pad = media_pad_remote_pad_first(&csi2->asd.pad[CSI2_PAD_SINK]); - if (!pad) - return -EPIPE; - - ret = v4l2_subdev_call(source, pad, get_frame_desc, pad->index, &desc); - if (ret) - return ret; - - if (desc.type != V4L2_MBUS_FRAME_DESC_TYPE_CSI2) { - dev_err(dev, "Unsupported frame descriptor type\n"); - return -EINVAL; - } - - for (i = 0; i < desc.num_entries; i++) { - if (source_stream == desc.entry[i].stream) { - desc_entry = &desc.entry[i]; - break; - } - } - - if (!desc_entry) { - dev_err(dev, "Failed to find stream %u from remote subdev\n", - source_stream); - return -EINVAL; - } - - if (desc_entry->bus.csi2.vc >= NR_OF_CSI2_VC) { - dev_err(dev, "invalid vc %d\n", desc_entry->bus.csi2.vc); - return -EINVAL; - } - - *entry = *desc_entry; - - return 0; -} diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h deleted file mode 100644 index ce8eed9..0000000 --- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h +++ /dev/null @@ -1,78 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (C) 2013--2024 Intel Corporation */ - -#ifndef IPU6_ISYS_CSI2_H -#define IPU6_ISYS_CSI2_H - -#include - -#include "ipu6-isys-subdev.h" -#include "ipu6-isys-video.h" - -struct media_entity; -struct v4l2_mbus_frame_desc_entry; - -struct ipu6_isys_video; -struct ipu6_isys; -struct ipu6_isys_stream; - -#define NR_OF_CSI2_VC 16 -#define INVALID_VC_ID -1 -#define NR_OF_CSI2_SINK_PADS 1 -#define CSI2_PAD_SINK 0 -#define NR_OF_CSI2_SRC_PADS 8 -#define CSI2_PAD_SRC 1 -#define NR_OF_CSI2_PADS (NR_OF_CSI2_SINK_PADS + NR_OF_CSI2_SRC_PADS) - -#define CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_A 0 -#define CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_B 0 -#define CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_A 95 -#define CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_B -8 - -#define CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_A 0 -#define CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_B 0 -#define CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_A 85 -#define CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_B -2 - -struct ipu6_isys_csi2 { - struct ipu6_isys_subdev asd; - struct ipu6_isys *isys; - struct ipu6_isys_video av[NR_OF_CSI2_SRC_PADS]; - - void __iomem *base; - u32 receiver_errors; - unsigned int nlanes; - unsigned int port; -}; - -struct ipu6_isys_csi2_timing { - u32 ctermen; - u32 csettle; - u32 dtermen; - u32 dsettle; -}; - -struct ipu6_csi2_error { - const char *error_string; - bool is_info_only; -}; - -#define ipu6_isys_subdev_to_csi2(__sd) \ - container_of(__sd, struct ipu6_isys_csi2, asd) - -#define to_ipu6_isys_csi2(__asd) container_of(__asd, struct ipu6_isys_csi2, asd) - -s64 ipu6_isys_csi2_get_link_freq(struct ipu6_isys_csi2 *csi2); -int ipu6_isys_csi2_init(struct ipu6_isys_csi2 *csi2, struct ipu6_isys *isys, - void __iomem *base, unsigned int index); -void ipu6_isys_csi2_cleanup(struct ipu6_isys_csi2 *csi2); -void ipu6_isys_csi2_sof_event_by_stream(struct ipu6_isys_stream *stream); -void ipu6_isys_csi2_eof_event_by_stream(struct ipu6_isys_stream *stream); -void ipu6_isys_register_errors(struct ipu6_isys_csi2 *csi2); -void ipu6_isys_csi2_error(struct ipu6_isys_csi2 *csi2); -int ipu6_isys_csi2_get_remote_desc(u32 source_stream, - struct ipu6_isys_csi2 *csi2, - struct media_entity *source_entity, - struct v4l2_mbus_frame_desc_entry *entry); - -#endif /* IPU6_ISYS_CSI2_H */ diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-dwc-phy.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-dwc-phy.c deleted file mode 100644 index db28748..0000000 --- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-dwc-phy.c +++ /dev/null @@ -1,536 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2013--2024 Intel Corporation - */ - -#include -#include -#include -#include -#include -#include - -#include "ipu6-bus.h" -#include "ipu6-isys.h" -#include "ipu6-platform-isys-csi2-reg.h" - -#define IPU6_DWC_DPHY_BASE(i) (0x238038 + 0x34 * (i)) -#define IPU6_DWC_DPHY_RSTZ 0x00 -#define IPU6_DWC_DPHY_SHUTDOWNZ 0x04 -#define IPU6_DWC_DPHY_HSFREQRANGE 0x08 -#define IPU6_DWC_DPHY_CFGCLKFREQRANGE 0x0c -#define IPU6_DWC_DPHY_TEST_IFC_ACCESS_MODE 0x10 -#define IPU6_DWC_DPHY_TEST_IFC_REQ 0x14 -#define IPU6_DWC_DPHY_TEST_IFC_REQ_COMPLETION 0x18 -#define IPU6_DWC_DPHY_DFT_CTRL0 0x28 -#define IPU6_DWC_DPHY_DFT_CTRL1 0x2c -#define IPU6_DWC_DPHY_DFT_CTRL2 0x30 - -/* - * test IFC request definition: - * - req: 0 for read, 1 for write - * - 12 bits address - * - 8bits data (will ignore for read) - * --24----16------4-----0 - * --|-data-|-addr-|-req-| - */ -#define IFC_REQ(req, addr, data) (FIELD_PREP(GENMASK(23, 16), data) | \ - FIELD_PREP(GENMASK(15, 4), addr) | \ - FIELD_PREP(GENMASK(1, 0), req)) - -#define TEST_IFC_REQ_READ 0 -#define TEST_IFC_REQ_WRITE 1 -#define TEST_IFC_REQ_RESET 2 - -#define TEST_IFC_ACCESS_MODE_FSM 0 -#define TEST_IFC_ACCESS_MODE_IFC_CTL 1 - -enum phy_fsm_state { - PHY_FSM_STATE_POWERON = 0, - PHY_FSM_STATE_BGPON = 1, - PHY_FSM_STATE_CAL_TYPE = 2, - PHY_FSM_STATE_BURNIN_CAL = 3, - PHY_FSM_STATE_TERMCAL = 4, - PHY_FSM_STATE_OFFSETCAL = 5, - PHY_FSM_STATE_OFFSET_LANE = 6, - PHY_FSM_STATE_IDLE = 7, - PHY_FSM_STATE_ULP = 8, - PHY_FSM_STATE_DDLTUNNING = 9, - PHY_FSM_STATE_SKEW_BACKWARD = 10, - PHY_FSM_STATE_INVALID, -}; - -static void dwc_dphy_write(struct ipu6_isys *isys, u32 phy_id, u32 addr, - u32 data) -{ - struct device *dev = &isys->adev->auxdev.dev; - void __iomem *isys_base = isys->pdata->base; - void __iomem *base = isys_base + IPU6_DWC_DPHY_BASE(phy_id); - - dev_dbg(dev, "write: reg 0x%zx = data 0x%x", base + addr - isys_base, - data); - writel(data, base + addr); -} - -static u32 dwc_dphy_read(struct ipu6_isys *isys, u32 phy_id, u32 addr) -{ - struct device *dev = &isys->adev->auxdev.dev; - void __iomem *isys_base = isys->pdata->base; - void __iomem *base = isys_base + IPU6_DWC_DPHY_BASE(phy_id); - u32 data; - - data = readl(base + addr); - dev_dbg(dev, "read: reg 0x%zx = data 0x%x", base + addr - isys_base, - data); - - return data; -} - -static void dwc_dphy_write_mask(struct ipu6_isys *isys, u32 phy_id, u32 addr, - u32 data, u8 shift, u8 width) -{ - u32 temp; - u32 mask; - - mask = (1 << width) - 1; - temp = dwc_dphy_read(isys, phy_id, addr); - temp &= ~(mask << shift); - temp |= (data & mask) << shift; - dwc_dphy_write(isys, phy_id, addr, temp); -} - -static u32 __maybe_unused dwc_dphy_read_mask(struct ipu6_isys *isys, u32 phy_id, - u32 addr, u8 shift, u8 width) -{ - u32 val; - - val = dwc_dphy_read(isys, phy_id, addr) >> shift; - return val & ((1 << width) - 1); -} - -#define DWC_DPHY_TIMEOUT (5 * USEC_PER_SEC) -static int dwc_dphy_ifc_read(struct ipu6_isys *isys, u32 phy_id, u32 addr, - u32 *val) -{ - struct device *dev = &isys->adev->auxdev.dev; - void __iomem *isys_base = isys->pdata->base; - void __iomem *base = isys_base + IPU6_DWC_DPHY_BASE(phy_id); - void __iomem *reg; - u32 completion; - int ret; - - dwc_dphy_write(isys, phy_id, IPU6_DWC_DPHY_TEST_IFC_REQ, - IFC_REQ(TEST_IFC_REQ_READ, addr, 0)); - reg = base + IPU6_DWC_DPHY_TEST_IFC_REQ_COMPLETION; - ret = readl_poll_timeout(reg, completion, !(completion & BIT(0)), - 10, DWC_DPHY_TIMEOUT); - if (ret) { - dev_err(dev, "DWC ifc request read timeout\n"); - return ret; - } - - *val = completion >> 8 & 0xff; - *val = FIELD_GET(GENMASK(15, 8), completion); - dev_dbg(dev, "DWC ifc read 0x%x = 0x%x", addr, *val); - - return 0; -} - -static int dwc_dphy_ifc_write(struct ipu6_isys *isys, u32 phy_id, u32 addr, - u32 data) -{ - struct device *dev = &isys->adev->auxdev.dev; - void __iomem *isys_base = isys->pdata->base; - void __iomem *base = isys_base + IPU6_DWC_DPHY_BASE(phy_id); - void __iomem *reg; - u32 completion; - int ret; - - dwc_dphy_write(isys, phy_id, IPU6_DWC_DPHY_TEST_IFC_REQ, - IFC_REQ(TEST_IFC_REQ_WRITE, addr, data)); - completion = readl(base + IPU6_DWC_DPHY_TEST_IFC_REQ_COMPLETION); - reg = base + IPU6_DWC_DPHY_TEST_IFC_REQ_COMPLETION; - ret = readl_poll_timeout(reg, completion, !(completion & BIT(0)), - 10, DWC_DPHY_TIMEOUT); - if (ret) - dev_err(dev, "DWC ifc request write timeout\n"); - - return ret; -} - -static void dwc_dphy_ifc_write_mask(struct ipu6_isys *isys, u32 phy_id, - u32 addr, u32 data, u8 shift, u8 width) -{ - u32 temp, mask; - int ret; - - ret = dwc_dphy_ifc_read(isys, phy_id, addr, &temp); - if (ret) - return; - - mask = (1 << width) - 1; - temp &= ~(mask << shift); - temp |= (data & mask) << shift; - dwc_dphy_ifc_write(isys, phy_id, addr, temp); -} - -static u32 dwc_dphy_ifc_read_mask(struct ipu6_isys *isys, u32 phy_id, u32 addr, - u8 shift, u8 width) -{ - int ret; - u32 val; - - ret = dwc_dphy_ifc_read(isys, phy_id, addr, &val); - if (ret) - return 0; - - return ((val >> shift) & ((1 << width) - 1)); -} - -static int dwc_dphy_pwr_up(struct ipu6_isys *isys, u32 phy_id) -{ - struct device *dev = &isys->adev->auxdev.dev; - u32 fsm_state; - int ret; - - dwc_dphy_write(isys, phy_id, IPU6_DWC_DPHY_RSTZ, 1); - usleep_range(10, 20); - dwc_dphy_write(isys, phy_id, IPU6_DWC_DPHY_SHUTDOWNZ, 1); - - ret = read_poll_timeout(dwc_dphy_ifc_read_mask, fsm_state, - (fsm_state == PHY_FSM_STATE_IDLE || - fsm_state == PHY_FSM_STATE_ULP), - 100, DWC_DPHY_TIMEOUT, false, isys, - phy_id, 0x1e, 0, 4); - - if (ret) - dev_err(dev, "Dphy %d power up failed, state 0x%x", phy_id, - fsm_state); - - return ret; -} - -struct dwc_dphy_freq_range { - u8 hsfreq; - u16 min; - u16 max; - u16 default_mbps; - u16 osc_freq_target; -}; - -#define DPHY_FREQ_RANGE_NUM (63) -#define DPHY_FREQ_RANGE_INVALID_INDEX (0xff) -static const struct dwc_dphy_freq_range freqranges[DPHY_FREQ_RANGE_NUM] = { - {0x00, 80, 97, 80, 335}, - {0x10, 80, 107, 90, 335}, - {0x20, 84, 118, 100, 335}, - {0x30, 93, 128, 110, 335}, - {0x01, 103, 139, 120, 335}, - {0x11, 112, 149, 130, 335}, - {0x21, 122, 160, 140, 335}, - {0x31, 131, 170, 150, 335}, - {0x02, 141, 181, 160, 335}, - {0x12, 150, 191, 170, 335}, - {0x22, 160, 202, 180, 335}, - {0x32, 169, 212, 190, 335}, - {0x03, 183, 228, 205, 335}, - {0x13, 198, 244, 220, 335}, - {0x23, 212, 259, 235, 335}, - {0x33, 226, 275, 250, 335}, - {0x04, 250, 301, 275, 335}, - {0x14, 274, 328, 300, 335}, - {0x25, 297, 354, 325, 335}, - {0x35, 321, 380, 350, 335}, - {0x05, 369, 433, 400, 335}, - {0x16, 416, 485, 450, 335}, - {0x26, 464, 538, 500, 335}, - {0x37, 511, 590, 550, 335}, - {0x07, 559, 643, 600, 335}, - {0x18, 606, 695, 650, 335}, - {0x28, 654, 748, 700, 335}, - {0x39, 701, 800, 750, 335}, - {0x09, 749, 853, 800, 335}, - {0x19, 796, 905, 850, 335}, - {0x29, 844, 958, 900, 335}, - {0x3a, 891, 1010, 950, 335}, - {0x0a, 939, 1063, 1000, 335}, - {0x1a, 986, 1115, 1050, 335}, - {0x2a, 1034, 1168, 1100, 335}, - {0x3b, 1081, 1220, 1150, 335}, - {0x0b, 1129, 1273, 1200, 335}, - {0x1b, 1176, 1325, 1250, 335}, - {0x2b, 1224, 1378, 1300, 335}, - {0x3c, 1271, 1430, 1350, 335}, - {0x0c, 1319, 1483, 1400, 335}, - {0x1c, 1366, 1535, 1450, 335}, - {0x2c, 1414, 1588, 1500, 335}, - {0x3d, 1461, 1640, 1550, 208}, - {0x0d, 1509, 1693, 1600, 214}, - {0x1d, 1556, 1745, 1650, 221}, - {0x2e, 1604, 1798, 1700, 228}, - {0x3e, 1651, 1850, 1750, 234}, - {0x0e, 1699, 1903, 1800, 241}, - {0x1e, 1746, 1955, 1850, 248}, - {0x2f, 1794, 2008, 1900, 255}, - {0x3f, 1841, 2060, 1950, 261}, - {0x0f, 1889, 2113, 2000, 268}, - {0x40, 1936, 2165, 2050, 275}, - {0x41, 1984, 2218, 2100, 281}, - {0x42, 2031, 2270, 2150, 288}, - {0x43, 2079, 2323, 2200, 294}, - {0x44, 2126, 2375, 2250, 302}, - {0x45, 2174, 2428, 2300, 308}, - {0x46, 2221, 2480, 2350, 315}, - {0x47, 2269, 2500, 2400, 321}, - {0x48, 2316, 2500, 2450, 328}, - {0x49, 2364, 2500, 2500, 335} -}; - -static u16 get_hsfreq_by_mbps(u32 mbps) -{ - unsigned int i = DPHY_FREQ_RANGE_NUM; - - while (i--) { - if (freqranges[i].default_mbps == mbps || - (mbps >= freqranges[i].min && mbps <= freqranges[i].max)) - return i; - } - - return DPHY_FREQ_RANGE_INVALID_INDEX; -} - -static int ipu6_isys_dwc_phy_config(struct ipu6_isys *isys, - u32 phy_id, u32 mbps) -{ - struct ipu6_bus_device *adev = isys->adev; - struct device *dev = &adev->auxdev.dev; - struct ipu6_device *isp = adev->isp; - u32 cfg_clk_freqrange; - u32 osc_freq_target; - u32 index; - - dev_dbg(dev, "config Dphy %u with %u mbps", phy_id, mbps); - - index = get_hsfreq_by_mbps(mbps); - if (index == DPHY_FREQ_RANGE_INVALID_INDEX) { - dev_err(dev, "link freq not found for mbps %u", mbps); - return -EINVAL; - } - - dwc_dphy_write_mask(isys, phy_id, IPU6_DWC_DPHY_HSFREQRANGE, - freqranges[index].hsfreq, 0, 7); - - /* Force termination Calibration */ - if (isys->phy_termcal_val) { - dwc_dphy_ifc_write_mask(isys, phy_id, 0x20a, 0x1, 0, 1); - dwc_dphy_ifc_write_mask(isys, phy_id, 0x209, 0x3, 0, 2); - dwc_dphy_ifc_write_mask(isys, phy_id, 0x209, - isys->phy_termcal_val, 4, 4); - } - - /* - * Enable override to configure the DDL target oscillation - * frequency on bit 0 of register 0xe4 - */ - dwc_dphy_ifc_write_mask(isys, phy_id, 0xe4, 0x1, 0, 1); - /* - * configure registers 0xe2, 0xe3 with the - * appropriate DDL target oscillation frequency - * 0x1cc(460) - */ - osc_freq_target = freqranges[index].osc_freq_target; - dwc_dphy_ifc_write_mask(isys, phy_id, 0xe2, - osc_freq_target & 0xff, 0, 8); - dwc_dphy_ifc_write_mask(isys, phy_id, 0xe3, - (osc_freq_target >> 8) & 0xf, 0, 4); - - if (mbps < 1500) { - /* deskew_polarity_rw, for < 1.5Gbps */ - dwc_dphy_ifc_write_mask(isys, phy_id, 0x8, 0x1, 5, 1); - } - - /* - * Set cfgclkfreqrange[5:0] = round[(Fcfg_clk(MHz)-17)*4] - * (38.4 - 17) * 4 = ~85 (0x55) - */ - cfg_clk_freqrange = (isp->buttress.ref_clk - 170) * 4 / 10; - dev_dbg(dev, "ref_clk = %u clk_freqrange = %u", - isp->buttress.ref_clk, cfg_clk_freqrange); - dwc_dphy_write_mask(isys, phy_id, IPU6_DWC_DPHY_CFGCLKFREQRANGE, - cfg_clk_freqrange, 0, 8); - - dwc_dphy_write_mask(isys, phy_id, IPU6_DWC_DPHY_DFT_CTRL2, 0x1, 4, 1); - dwc_dphy_write_mask(isys, phy_id, IPU6_DWC_DPHY_DFT_CTRL2, 0x1, 8, 1); - - return 0; -} - -static void ipu6_isys_dwc_phy_aggr_setup(struct ipu6_isys *isys, u32 master, - u32 slave, u32 mbps) -{ - /* Config mastermacro */ - dwc_dphy_ifc_write_mask(isys, master, 0x133, 0x1, 0, 1); - dwc_dphy_ifc_write_mask(isys, slave, 0x133, 0x0, 0, 1); - - /* Config master PHY clk lane to drive long channel clk */ - dwc_dphy_ifc_write_mask(isys, master, 0x307, 0x1, 2, 1); - dwc_dphy_ifc_write_mask(isys, slave, 0x307, 0x0, 2, 1); - - /* Config both PHYs data lanes to get clk from long channel */ - dwc_dphy_ifc_write_mask(isys, master, 0x508, 0x1, 5, 1); - dwc_dphy_ifc_write_mask(isys, slave, 0x508, 0x1, 5, 1); - dwc_dphy_ifc_write_mask(isys, master, 0x708, 0x1, 5, 1); - dwc_dphy_ifc_write_mask(isys, slave, 0x708, 0x1, 5, 1); - - /* Config slave PHY clk lane to bypass long channel clk to DDR clk */ - dwc_dphy_ifc_write_mask(isys, master, 0x308, 0x0, 3, 1); - dwc_dphy_ifc_write_mask(isys, slave, 0x308, 0x1, 3, 1); - - /* Override slave PHY clk lane enable (DPHYRXCLK_CLL_demux module) */ - dwc_dphy_ifc_write_mask(isys, slave, 0xe0, 0x3, 0, 2); - - /* Override slave PHY DDR clk lane enable (DPHYHSRX_div124 module) */ - dwc_dphy_ifc_write_mask(isys, slave, 0xe1, 0x1, 1, 1); - dwc_dphy_ifc_write_mask(isys, slave, 0x307, 0x1, 3, 1); - - /* Turn off slave PHY LP-RX clk lane */ - dwc_dphy_ifc_write_mask(isys, slave, 0x304, 0x1, 7, 1); - dwc_dphy_ifc_write_mask(isys, slave, 0x305, 0xa, 0, 5); -} - -#define PHY_E 4 -static int ipu6_isys_dwc_phy_powerup_ack(struct ipu6_isys *isys, u32 phy_id) -{ - struct device *dev = &isys->adev->auxdev.dev; - u32 rescal_done; - int ret; - - ret = dwc_dphy_pwr_up(isys, phy_id); - if (ret != 0) { - dev_err(dev, "Dphy %u power up failed(%d)", phy_id, ret); - return ret; - } - - /* reset forcerxmode */ - dwc_dphy_write_mask(isys, phy_id, IPU6_DWC_DPHY_DFT_CTRL2, 0, 4, 1); - dwc_dphy_write_mask(isys, phy_id, IPU6_DWC_DPHY_DFT_CTRL2, 0, 8, 1); - - dev_dbg(dev, "Dphy %u is ready!", phy_id); - - if (phy_id != PHY_E || isys->phy_termcal_val) - return 0; - - usleep_range(100, 200); - rescal_done = dwc_dphy_ifc_read_mask(isys, phy_id, 0x221, 7, 1); - if (rescal_done) { - isys->phy_termcal_val = dwc_dphy_ifc_read_mask(isys, phy_id, - 0x220, 2, 4); - dev_dbg(dev, "termcal done with value = %u", - isys->phy_termcal_val); - } - - return 0; -} - -static void ipu6_isys_dwc_phy_reset(struct ipu6_isys *isys, u32 phy_id) -{ - dev_dbg(&isys->adev->auxdev.dev, "Reset phy %u", phy_id); - - dwc_dphy_write(isys, phy_id, IPU6_DWC_DPHY_SHUTDOWNZ, 0); - dwc_dphy_write(isys, phy_id, IPU6_DWC_DPHY_RSTZ, 0); - dwc_dphy_write(isys, phy_id, IPU6_DWC_DPHY_TEST_IFC_ACCESS_MODE, - TEST_IFC_ACCESS_MODE_FSM); - dwc_dphy_write(isys, phy_id, IPU6_DWC_DPHY_TEST_IFC_REQ, - TEST_IFC_REQ_RESET); -} - -int ipu6_isys_dwc_phy_set_power(struct ipu6_isys *isys, - struct ipu6_isys_csi2_config *cfg, - const struct ipu6_isys_csi2_timing *timing, - bool on) -{ - struct device *dev = &isys->adev->auxdev.dev; - void __iomem *isys_base = isys->pdata->base; - u32 phy_id, primary, secondary; - u32 nlanes, port, mbps; - s64 link_freq; - int ret; - - port = cfg->port; - - if (!isys_base || port >= isys->pdata->ipdata->csi2.nports) { - dev_warn(dev, "invalid port ID %d\n", port); - return -EINVAL; - } - - nlanes = cfg->nlanes; - /* only port 0, 2 and 4 support 4 lanes */ - if (nlanes == 4 && port % 2) { - dev_err(dev, "invalid csi-port %u with %u lanes\n", port, - nlanes); - return -EINVAL; - } - - link_freq = ipu6_isys_csi2_get_link_freq(&isys->csi2[port]); - if (link_freq < 0) { - dev_err(dev, "get link freq failed(%lld).\n", link_freq); - return link_freq; - } - - mbps = div_u64(link_freq, 500000); - - phy_id = port; - primary = port & ~1; - secondary = primary + 1; - if (on) { - if (nlanes == 4) { - dev_dbg(dev, "config phy %u and %u in aggr mode\n", - primary, secondary); - - ipu6_isys_dwc_phy_reset(isys, primary); - ipu6_isys_dwc_phy_reset(isys, secondary); - ipu6_isys_dwc_phy_aggr_setup(isys, primary, - secondary, mbps); - - ret = ipu6_isys_dwc_phy_config(isys, primary, mbps); - if (ret) - return ret; - ret = ipu6_isys_dwc_phy_config(isys, secondary, mbps); - if (ret) - return ret; - - ret = ipu6_isys_dwc_phy_powerup_ack(isys, primary); - if (ret) - return ret; - - ret = ipu6_isys_dwc_phy_powerup_ack(isys, secondary); - return ret; - } - - dev_dbg(dev, "config phy %u with %u lanes in non-aggr mode\n", - phy_id, nlanes); - - ipu6_isys_dwc_phy_reset(isys, phy_id); - ret = ipu6_isys_dwc_phy_config(isys, phy_id, mbps); - if (ret) - return ret; - - ret = ipu6_isys_dwc_phy_powerup_ack(isys, phy_id); - return ret; - } - - if (nlanes == 4) { - dev_dbg(dev, "Power down phy %u and phy %u for port %u\n", - primary, secondary, port); - ipu6_isys_dwc_phy_reset(isys, secondary); - ipu6_isys_dwc_phy_reset(isys, primary); - - return 0; - } - - dev_dbg(dev, "Powerdown phy %u with %u lanes\n", phy_id, nlanes); - - ipu6_isys_dwc_phy_reset(isys, phy_id); - - return 0; -} diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-jsl-phy.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-jsl-phy.c deleted file mode 100644 index c804291..0000000 --- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-jsl-phy.c +++ /dev/null @@ -1,242 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2013--2024 Intel Corporation - */ - -#include -#include -#include -#include - -#include "ipu6-bus.h" -#include "ipu6-isys.h" -#include "ipu6-isys-csi2.h" -#include "ipu6-platform-isys-csi2-reg.h" - -/* only use BB0, BB2, BB4, and BB6 on PHY0 */ -#define IPU6SE_ISYS_PHY_BB_NUM 4 -#define IPU6SE_ISYS_PHY_0_BASE 0x10000 - -#define PHY_CPHY_DLL_OVRD(x) (0x100 + 0x100 * (x)) -#define PHY_CPHY_RX_CONTROL1(x) (0x110 + 0x100 * (x)) -#define PHY_DPHY_CFG(x) (0x148 + 0x100 * (x)) -#define PHY_BB_AFE_CONFIG(x) (0x174 + 0x100 * (x)) - -/* - * use port_cfg to configure that which data lanes used - * +---------+ +------+ +-----+ - * | port0 x4<-----| | | | - * | | | port | | | - * | port1 x2<-----| | | | - * | | | <-| PHY | - * | port2 x4<-----| | | | - * | | |config| | | - * | port3 x2<-----| | | | - * +---------+ +------+ +-----+ - */ -static const unsigned int csi2_port_cfg[][3] = { - {0, 0, 0x1f}, /* no link */ - {4, 0, 0x10}, /* x4 + x4 config */ - {2, 0, 0x12}, /* x2 + x2 config */ - {1, 0, 0x13}, /* x1 + x1 config */ - {2, 1, 0x15}, /* x2x1 + x2x1 config */ - {1, 1, 0x16}, /* x1x1 + x1x1 config */ - {2, 2, 0x18}, /* x2x2 + x2x2 config */ - {1, 2, 0x19} /* x1x2 + x1x2 config */ -}; - -/* port, nlanes, bbindex, portcfg */ -static const unsigned int phy_port_cfg[][4] = { - /* sip0 */ - {0, 1, 0, 0x15}, - {0, 2, 0, 0x15}, - {0, 4, 0, 0x15}, - {0, 4, 2, 0x22}, - /* sip1 */ - {2, 1, 4, 0x15}, - {2, 2, 4, 0x15}, - {2, 4, 4, 0x15}, - {2, 4, 6, 0x22} -}; - -static void ipu6_isys_csi2_phy_config_by_port(struct ipu6_isys *isys, - unsigned int port, - unsigned int nlanes) -{ - struct device *dev = &isys->adev->auxdev.dev; - void __iomem *base = isys->adev->isp->base; - unsigned int bbnum; - u32 val, reg, i; - - dev_dbg(dev, "port %u with %u lanes", port, nlanes); - - /* only support <1.5Gbps */ - for (i = 0; i < IPU6SE_ISYS_PHY_BB_NUM; i++) { - /* cphy_dll_ovrd.crcdc_fsm_dlane0 = 13 */ - reg = IPU6SE_ISYS_PHY_0_BASE + PHY_CPHY_DLL_OVRD(i); - val = readl(base + reg); - val |= FIELD_PREP(GENMASK(6, 1), 13); - writel(val, base + reg); - - /* cphy_rx_control1.en_crc1 = 1 */ - reg = IPU6SE_ISYS_PHY_0_BASE + PHY_CPHY_RX_CONTROL1(i); - val = readl(base + reg); - val |= BIT(31); - writel(val, base + reg); - - /* dphy_cfg.reserved = 1, .lden_from_dll_ovrd_0 = 1 */ - reg = IPU6SE_ISYS_PHY_0_BASE + PHY_DPHY_CFG(i); - val = readl(base + reg); - val |= BIT(25) | BIT(26); - writel(val, base + reg); - - /* cphy_dll_ovrd.lden_crcdc_fsm_dlane0 = 1 */ - reg = IPU6SE_ISYS_PHY_0_BASE + PHY_CPHY_DLL_OVRD(i); - val = readl(base + reg); - val |= BIT(0); - writel(val, base + reg); - } - - /* Front end config, use minimal channel loss */ - for (i = 0; i < ARRAY_SIZE(phy_port_cfg); i++) { - if (phy_port_cfg[i][0] == port && - phy_port_cfg[i][1] == nlanes) { - bbnum = phy_port_cfg[i][2] / 2; - reg = IPU6SE_ISYS_PHY_0_BASE + PHY_BB_AFE_CONFIG(bbnum); - val = readl(base + reg); - val |= phy_port_cfg[i][3]; - writel(val, base + reg); - } - } -} - -static void ipu6_isys_csi2_rx_control(struct ipu6_isys *isys) -{ - void __iomem *base = isys->adev->isp->base; - u32 val, reg; - - reg = CSI2_HUB_GPREG_SIP0_CSI_RX_A_CONTROL; - val = readl(base + reg); - val |= BIT(0); - writel(val, base + CSI2_HUB_GPREG_SIP0_CSI_RX_A_CONTROL); - - reg = CSI2_HUB_GPREG_SIP0_CSI_RX_B_CONTROL; - val = readl(base + reg); - val |= BIT(0); - writel(val, base + CSI2_HUB_GPREG_SIP0_CSI_RX_B_CONTROL); - - reg = CSI2_HUB_GPREG_SIP1_CSI_RX_A_CONTROL; - val = readl(base + reg); - val |= BIT(0); - writel(val, base + CSI2_HUB_GPREG_SIP1_CSI_RX_A_CONTROL); - - reg = CSI2_HUB_GPREG_SIP1_CSI_RX_B_CONTROL; - val = readl(base + reg); - val |= BIT(0); - writel(val, base + CSI2_HUB_GPREG_SIP1_CSI_RX_B_CONTROL); -} - -static int ipu6_isys_csi2_set_port_cfg(struct ipu6_isys *isys, - unsigned int port, unsigned int nlanes) -{ - struct device *dev = &isys->adev->auxdev.dev; - unsigned int sip = port / 2; - unsigned int index; - - switch (nlanes) { - case 1: - index = 5; - break; - case 2: - index = 6; - break; - case 4: - index = 1; - break; - default: - dev_err(dev, "lanes nr %u is unsupported\n", nlanes); - return -EINVAL; - } - - dev_dbg(dev, "port config for port %u with %u lanes\n", port, nlanes); - - writel(csi2_port_cfg[index][2], - isys->pdata->base + CSI2_HUB_GPREG_SIP_FB_PORT_CFG(sip)); - - return 0; -} - -static void -ipu6_isys_csi2_set_timing(struct ipu6_isys *isys, - const struct ipu6_isys_csi2_timing *timing, - unsigned int port, unsigned int nlanes) -{ - struct device *dev = &isys->adev->auxdev.dev; - void __iomem *reg; - u32 port_base; - u32 i; - - port_base = (port % 2) ? CSI2_SIP_TOP_CSI_RX_PORT_BASE_1(port) : - CSI2_SIP_TOP_CSI_RX_PORT_BASE_0(port); - - dev_dbg(dev, "set timing for port %u with %u lanes\n", port, nlanes); - - reg = isys->pdata->base + port_base; - reg += CSI2_SIP_TOP_CSI_RX_DLY_CNT_TERMEN_CLANE; - - writel(timing->ctermen, reg); - - reg = isys->pdata->base + port_base; - reg += CSI2_SIP_TOP_CSI_RX_DLY_CNT_SETTLE_CLANE; - writel(timing->csettle, reg); - - for (i = 0; i < nlanes; i++) { - reg = isys->pdata->base + port_base; - reg += CSI2_SIP_TOP_CSI_RX_DLY_CNT_TERMEN_DLANE(i); - writel(timing->dtermen, reg); - - reg = isys->pdata->base + port_base; - reg += CSI2_SIP_TOP_CSI_RX_DLY_CNT_SETTLE_DLANE(i); - writel(timing->dsettle, reg); - } -} - -#define DPHY_TIMER_INCR 0x28 -int ipu6_isys_jsl_phy_set_power(struct ipu6_isys *isys, - struct ipu6_isys_csi2_config *cfg, - const struct ipu6_isys_csi2_timing *timing, - bool on) -{ - struct device *dev = &isys->adev->auxdev.dev; - void __iomem *isys_base = isys->pdata->base; - int ret = 0; - u32 nlanes; - u32 port; - - if (!on) - return 0; - - port = cfg->port; - nlanes = cfg->nlanes; - - if (!isys_base || port >= isys->pdata->ipdata->csi2.nports) { - dev_warn(dev, "invalid port ID %d\n", port); - return -EINVAL; - } - - ipu6_isys_csi2_phy_config_by_port(isys, port, nlanes); - - writel(DPHY_TIMER_INCR, - isys->pdata->base + CSI2_HUB_GPREG_DPHY_TIMER_INCR); - - /* set port cfg and rx timing */ - ipu6_isys_csi2_set_timing(isys, timing, port, nlanes); - - ret = ipu6_isys_csi2_set_port_cfg(isys, port, nlanes); - if (ret) - return ret; - - ipu6_isys_csi2_rx_control(isys); - - return 0; -} diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c deleted file mode 100644 index 71aa500..0000000 --- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c +++ /dev/null @@ -1,720 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2013--2024 Intel Corporation - */ - -#include -#include -#include -#include -#include -#include -#include - -#include - -#include "ipu6.h" -#include "ipu6-bus.h" -#include "ipu6-isys.h" -#include "ipu6-isys-csi2.h" -#include "ipu6-platform-isys-csi2-reg.h" - -#define CSI_REG_HUB_GPREG_PHY_CTL(id) (CSI_REG_BASE + 0x18008 + (id) * 0x8) -#define CSI_REG_HUB_GPREG_PHY_CTL_RESET BIT(4) -#define CSI_REG_HUB_GPREG_PHY_CTL_PWR_EN BIT(0) -#define CSI_REG_HUB_GPREG_PHY_STATUS(id) (CSI_REG_BASE + 0x1800c + (id) * 0x8) -#define CSI_REG_HUB_GPREG_PHY_POWER_ACK BIT(0) -#define CSI_REG_HUB_GPREG_PHY_READY BIT(4) - -#define MCD_PHY_POWER_STATUS_TIMEOUT (200 * USEC_PER_MSEC) - -/* - * bridge to phy in buttress reg map, each phy has 16 kbytes - * only 2 phys for TGL U and Y - */ -#define IPU6_ISYS_MCD_PHY_BASE(i) (0x10000 + (i) * 0x4000) - -/* - * There are 2 MCD DPHY instances on TGL and 1 MCD DPHY instance on ADL. - * Each MCD PHY has 12-lanes which has 8 data lanes and 4 clock lanes. - * CSI port 1, 3 (5, 7) can support max 2 data lanes. - * CSI port 0, 2 (4, 6) can support max 4 data lanes. - * PHY configurations are PPI based instead of port. - * Left: - * +---------+---------+---------+---------+--------+---------+----------+ - * | | | | | | | | - * | PPI | PPI5 | PPI4 | PPI3 | PPI2 | PPI1 | PPI0 | - * +---------+---------+---------+---------+--------+---------+----------+ - * | | | | | | | | - * | x4 | unused | D3 | D2 | C0 | D0 | D1 | - * |---------+---------+---------+---------+--------+---------+----------+ - * | | | | | | | | - * | x2x2 | C1 | D0 | D1 | C0 | D0 | D1 | - * ----------+---------+---------+---------+--------+---------+----------+ - * | | | | | | | | - * | x2x1 | C1 | D0 | unused | C0 | D0 | D1 | - * +---------+---------+---------+---------+--------+---------+----------+ - * | | | | | | | | - * | x1x1 | C1 | D0 | unused | C0 | D0 | unused | - * +---------+---------+---------+---------+--------+---------+----------+ - * | | | | | | | | - * | x1x2 | C1 | D0 | D1 | C0 | D0 | unused | - * +---------+---------+---------+---------+--------+---------+----------+ - * - * Right: - * +---------+---------+---------+---------+--------+---------+----------+ - * | | | | | | | | - * | PPI | PPI6 | PPI7 | PPI8 | PPI9 | PPI10 | PPI11 | - * +---------+---------+---------+---------+--------+---------+----------+ - * | | | | | | | | - * | x4 | D1 | D0 | C2 | D2 | D3 | unused | - * |---------+---------+---------+---------+--------+---------+----------+ - * | | | | | | | | - * | x2x2 | D1 | D0 | C2 | D1 | D0 | C3 | - * ----------+---------+---------+---------+--------+---------+----------+ - * | | | | | | | | - * | x2x1 | D1 | D0 | C2 | unused | D0 | C3 | - * +---------+---------+---------+---------+--------+---------+----------+ - * | | | | | | | | - * | x1x1 | unused | D0 | C2 | unused | D0 | C3 | - * +---------+---------+---------+---------+--------+---------+----------+ - * | | | | | | | | - * | x1x2 | unused | D0 | C2 | D1 | D0 | C3 | - * +---------+---------+---------+---------+--------+---------+----------+ - * - * ppi mapping per phy : - * - * x4 + x4: - * Left : port0 - PPI range {0, 1, 2, 3, 4} - * Right: port2 - PPI range {6, 7, 8, 9, 10} - * - * x4 + x2x2: - * Left: port0 - PPI range {0, 1, 2, 3, 4} - * Right: port2 - PPI range {6, 7, 8}, port3 - PPI range {9, 10, 11} - * - * x2x2 + x4: - * Left: port0 - PPI range {0, 1, 2}, port1 - PPI range {3, 4, 5} - * Right: port2 - PPI range {6, 7, 8, 9, 10} - * - * x2x2 + x2x2: - * Left : port0 - PPI range {0, 1, 2}, port1 - PPI range {3, 4, 5} - * Right: port2 - PPI range {6, 7, 8}, port3 - PPI range {9, 10, 11} - */ - -struct phy_reg { - u32 reg; - u32 val; -}; - -static const struct phy_reg common_init_regs[] = { - /* for TGL-U, use 0x80000000 */ - {0x00000040, 0x80000000}, - {0x00000044, 0x00a80880}, - {0x00000044, 0x00b80880}, - {0x00000010, 0x0000078c}, - {0x00000344, 0x2f4401e2}, - {0x00000544, 0x924401e2}, - {0x00000744, 0x594401e2}, - {0x00000944, 0x624401e2}, - {0x00000b44, 0xfc4401e2}, - {0x00000d44, 0xc54401e2}, - {0x00000f44, 0x034401e2}, - {0x00001144, 0x8f4401e2}, - {0x00001344, 0x754401e2}, - {0x00001544, 0xe94401e2}, - {0x00001744, 0xcb4401e2}, - {0x00001944, 0xfa4401e2} -}; - -static const struct phy_reg x1_port0_config_regs[] = { - {0x00000694, 0xc80060fa}, - {0x00000680, 0x3d4f78ea}, - {0x00000690, 0x10a0140b}, - {0x000006a8, 0xdf04010a}, - {0x00000700, 0x57050060}, - {0x00000710, 0x0030001c}, - {0x00000738, 0x5f004444}, - {0x0000073c, 0x78464204}, - {0x00000748, 0x7821f940}, - {0x0000074c, 0xb2000433}, - {0x00000494, 0xfe6030fa}, - {0x00000480, 0x29ef5ed0}, - {0x00000490, 0x10a0540b}, - {0x000004a8, 0x7a01010a}, - {0x00000500, 0xef053460}, - {0x00000510, 0xe030101c}, - {0x00000538, 0xdf808444}, - {0x0000053c, 0xc8422204}, - {0x00000540, 0x0180088c}, - {0x00000574, 0x00000000}, - {0x00000000, 0x00000000} -}; - -static const struct phy_reg x1_port1_config_regs[] = { - {0x00000c94, 0xc80060fa}, - {0x00000c80, 0xcf47abea}, - {0x00000c90, 0x10a0840b}, - {0x00000ca8, 0xdf04010a}, - {0x00000d00, 0x57050060}, - {0x00000d10, 0x0030001c}, - {0x00000d38, 0x5f004444}, - {0x00000d3c, 0x78464204}, - {0x00000d48, 0x7821f940}, - {0x00000d4c, 0xb2000433}, - {0x00000a94, 0xc91030fa}, - {0x00000a80, 0x5a166ed0}, - {0x00000a90, 0x10a0540b}, - {0x00000aa8, 0x5d060100}, - {0x00000b00, 0xef053460}, - {0x00000b10, 0xa030101c}, - {0x00000b38, 0xdf808444}, - {0x00000b3c, 0xc8422204}, - {0x00000b40, 0x0180088c}, - {0x00000b74, 0x00000000}, - {0x00000000, 0x00000000} -}; - -static const struct phy_reg x1_port2_config_regs[] = { - {0x00001294, 0x28f000fa}, - {0x00001280, 0x08130cea}, - {0x00001290, 0x10a0140b}, - {0x000012a8, 0xd704010a}, - {0x00001300, 0x8d050060}, - {0x00001310, 0x0030001c}, - {0x00001338, 0xdf008444}, - {0x0000133c, 0x78422204}, - {0x00001348, 0x7821f940}, - {0x0000134c, 0x5a000433}, - {0x00001094, 0x2d20b0fa}, - {0x00001080, 0xade75dd0}, - {0x00001090, 0x10a0540b}, - {0x000010a8, 0xb101010a}, - {0x00001100, 0x33053460}, - {0x00001110, 0x0030101c}, - {0x00001138, 0xdf808444}, - {0x0000113c, 0xc8422204}, - {0x00001140, 0x8180088c}, - {0x00001174, 0x00000000}, - {0x00000000, 0x00000000} -}; - -static const struct phy_reg x1_port3_config_regs[] = { - {0x00001894, 0xc80060fa}, - {0x00001880, 0x0f90fd6a}, - {0x00001890, 0x10a0840b}, - {0x000018a8, 0xdf04010a}, - {0x00001900, 0x57050060}, - {0x00001910, 0x0030001c}, - {0x00001938, 0x5f004444}, - {0x0000193c, 0x78464204}, - {0x00001948, 0x7821f940}, - {0x0000194c, 0xb2000433}, - {0x00001694, 0x3050d0fa}, - {0x00001680, 0x0ef6d050}, - {0x00001690, 0x10a0540b}, - {0x000016a8, 0xe301010a}, - {0x00001700, 0x69053460}, - {0x00001710, 0xa030101c}, - {0x00001738, 0xdf808444}, - {0x0000173c, 0xc8422204}, - {0x00001740, 0x0180088c}, - {0x00001774, 0x00000000}, - {0x00000000, 0x00000000} -}; - -static const struct phy_reg x2_port0_config_regs[] = { - {0x00000694, 0xc80060fa}, - {0x00000680, 0x3d4f78ea}, - {0x00000690, 0x10a0140b}, - {0x000006a8, 0xdf04010a}, - {0x00000700, 0x57050060}, - {0x00000710, 0x0030001c}, - {0x00000738, 0x5f004444}, - {0x0000073c, 0x78464204}, - {0x00000748, 0x7821f940}, - {0x0000074c, 0xb2000433}, - {0x00000494, 0xc80060fa}, - {0x00000480, 0x29ef5ed8}, - {0x00000490, 0x10a0540b}, - {0x000004a8, 0x7a01010a}, - {0x00000500, 0xef053460}, - {0x00000510, 0xe030101c}, - {0x00000538, 0xdf808444}, - {0x0000053c, 0xc8422204}, - {0x00000540, 0x0180088c}, - {0x00000574, 0x00000000}, - {0x00000294, 0xc80060fa}, - {0x00000280, 0xcb45b950}, - {0x00000290, 0x10a0540b}, - {0x000002a8, 0x8c01010a}, - {0x00000300, 0xef053460}, - {0x00000310, 0x8030101c}, - {0x00000338, 0x41808444}, - {0x0000033c, 0x32422204}, - {0x00000340, 0x0180088c}, - {0x00000374, 0x00000000}, - {0x00000000, 0x00000000} -}; - -static const struct phy_reg x2_port1_config_regs[] = { - {0x00000c94, 0xc80060fa}, - {0x00000c80, 0xcf47abea}, - {0x00000c90, 0x10a0840b}, - {0x00000ca8, 0xdf04010a}, - {0x00000d00, 0x57050060}, - {0x00000d10, 0x0030001c}, - {0x00000d38, 0x5f004444}, - {0x00000d3c, 0x78464204}, - {0x00000d48, 0x7821f940}, - {0x00000d4c, 0xb2000433}, - {0x00000a94, 0xc80060fa}, - {0x00000a80, 0x5a166ed8}, - {0x00000a90, 0x10a0540b}, - {0x00000aa8, 0x7a01010a}, - {0x00000b00, 0xef053460}, - {0x00000b10, 0xa030101c}, - {0x00000b38, 0xdf808444}, - {0x00000b3c, 0xc8422204}, - {0x00000b40, 0x0180088c}, - {0x00000b74, 0x00000000}, - {0x00000894, 0xc80060fa}, - {0x00000880, 0x4d4f21d0}, - {0x00000890, 0x10a0540b}, - {0x000008a8, 0x5601010a}, - {0x00000900, 0xef053460}, - {0x00000910, 0x8030101c}, - {0x00000938, 0xdf808444}, - {0x0000093c, 0xc8422204}, - {0x00000940, 0x0180088c}, - {0x00000974, 0x00000000}, - {0x00000000, 0x00000000} -}; - -static const struct phy_reg x2_port2_config_regs[] = { - {0x00001294, 0xc80060fa}, - {0x00001280, 0x08130cea}, - {0x00001290, 0x10a0140b}, - {0x000012a8, 0xd704010a}, - {0x00001300, 0x8d050060}, - {0x00001310, 0x0030001c}, - {0x00001338, 0xdf008444}, - {0x0000133c, 0x78422204}, - {0x00001348, 0x7821f940}, - {0x0000134c, 0x5a000433}, - {0x00001094, 0xc80060fa}, - {0x00001080, 0xade75dd8}, - {0x00001090, 0x10a0540b}, - {0x000010a8, 0xb101010a}, - {0x00001100, 0x33053460}, - {0x00001110, 0x0030101c}, - {0x00001138, 0xdf808444}, - {0x0000113c, 0xc8422204}, - {0x00001140, 0x8180088c}, - {0x00001174, 0x00000000}, - {0x00000e94, 0xc80060fa}, - {0x00000e80, 0x0fbf16d0}, - {0x00000e90, 0x10a0540b}, - {0x00000ea8, 0x7a01010a}, - {0x00000f00, 0xf5053460}, - {0x00000f10, 0xc030101c}, - {0x00000f38, 0xdf808444}, - {0x00000f3c, 0xc8422204}, - {0x00000f40, 0x8180088c}, - {0x00000000, 0x00000000} -}; - -static const struct phy_reg x2_port3_config_regs[] = { - {0x00001894, 0xc80060fa}, - {0x00001880, 0x0f90fd6a}, - {0x00001890, 0x10a0840b}, - {0x000018a8, 0xdf04010a}, - {0x00001900, 0x57050060}, - {0x00001910, 0x0030001c}, - {0x00001938, 0x5f004444}, - {0x0000193c, 0x78464204}, - {0x00001948, 0x7821f940}, - {0x0000194c, 0xb2000433}, - {0x00001694, 0xc80060fa}, - {0x00001680, 0x0ef6d058}, - {0x00001690, 0x10a0540b}, - {0x000016a8, 0x7a01010a}, - {0x00001700, 0x69053460}, - {0x00001710, 0xa030101c}, - {0x00001738, 0xdf808444}, - {0x0000173c, 0xc8422204}, - {0x00001740, 0x0180088c}, - {0x00001774, 0x00000000}, - {0x00001494, 0xc80060fa}, - {0x00001480, 0xf9d34bd0}, - {0x00001490, 0x10a0540b}, - {0x000014a8, 0x7a01010a}, - {0x00001500, 0x1b053460}, - {0x00001510, 0x0030101c}, - {0x00001538, 0xdf808444}, - {0x0000153c, 0xc8422204}, - {0x00001540, 0x8180088c}, - {0x00001574, 0x00000000}, - {0x00000000, 0x00000000} -}; - -static const struct phy_reg x4_port0_config_regs[] = { - {0x00000694, 0xc80060fa}, - {0x00000680, 0x3d4f78fa}, - {0x00000690, 0x10a0140b}, - {0x000006a8, 0xdf04010a}, - {0x00000700, 0x57050060}, - {0x00000710, 0x0030001c}, - {0x00000738, 0x5f004444}, - {0x0000073c, 0x78464204}, - {0x00000748, 0x7821f940}, - {0x0000074c, 0xb2000433}, - {0x00000494, 0xfe6030fa}, - {0x00000480, 0x29ef5ed8}, - {0x00000490, 0x10a0540b}, - {0x000004a8, 0x7a01010a}, - {0x00000500, 0xef053460}, - {0x00000510, 0xe030101c}, - {0x00000538, 0xdf808444}, - {0x0000053c, 0xc8422204}, - {0x00000540, 0x0180088c}, - {0x00000574, 0x00000004}, - {0x00000294, 0x23e030fa}, - {0x00000280, 0xcb45b950}, - {0x00000290, 0x10a0540b}, - {0x000002a8, 0x8c01010a}, - {0x00000300, 0xef053460}, - {0x00000310, 0x8030101c}, - {0x00000338, 0x41808444}, - {0x0000033c, 0x32422204}, - {0x00000340, 0x0180088c}, - {0x00000374, 0x00000004}, - {0x00000894, 0x5620b0fa}, - {0x00000880, 0x4d4f21dc}, - {0x00000890, 0x10a0540b}, - {0x000008a8, 0x5601010a}, - {0x00000900, 0xef053460}, - {0x00000910, 0x8030101c}, - {0x00000938, 0xdf808444}, - {0x0000093c, 0xc8422204}, - {0x00000940, 0x0180088c}, - {0x00000974, 0x00000004}, - {0x00000a94, 0xc91030fa}, - {0x00000a80, 0x5a166ecc}, - {0x00000a90, 0x10a0540b}, - {0x00000aa8, 0x5d01010a}, - {0x00000b00, 0xef053460}, - {0x00000b10, 0xa030101c}, - {0x00000b38, 0xdf808444}, - {0x00000b3c, 0xc8422204}, - {0x00000b40, 0x0180088c}, - {0x00000b74, 0x00000004}, - {0x00000000, 0x00000000} -}; - -static const struct phy_reg x4_port1_config_regs[] = { - {0x00000000, 0x00000000} -}; - -static const struct phy_reg x4_port2_config_regs[] = { - {0x00001294, 0x28f000fa}, - {0x00001280, 0x08130cfa}, - {0x00001290, 0x10c0140b}, - {0x000012a8, 0xd704010a}, - {0x00001300, 0x8d050060}, - {0x00001310, 0x0030001c}, - {0x00001338, 0xdf008444}, - {0x0000133c, 0x78422204}, - {0x00001348, 0x7821f940}, - {0x0000134c, 0x5a000433}, - {0x00001094, 0x2d20b0fa}, - {0x00001080, 0xade75dd8}, - {0x00001090, 0x10a0540b}, - {0x000010a8, 0xb101010a}, - {0x00001100, 0x33053460}, - {0x00001110, 0x0030101c}, - {0x00001138, 0xdf808444}, - {0x0000113c, 0xc8422204}, - {0x00001140, 0x8180088c}, - {0x00001174, 0x00000004}, - {0x00000e94, 0xd308d0fa}, - {0x00000e80, 0x0fbf16d0}, - {0x00000e90, 0x10a0540b}, - {0x00000ea8, 0x2c01010a}, - {0x00000f00, 0xf5053460}, - {0x00000f10, 0xc030101c}, - {0x00000f38, 0xdf808444}, - {0x00000f3c, 0xc8422204}, - {0x00000f40, 0x8180088c}, - {0x00000f74, 0x00000004}, - {0x00001494, 0x136850fa}, - {0x00001480, 0xf9d34bdc}, - {0x00001490, 0x10a0540b}, - {0x000014a8, 0x5a01010a}, - {0x00001500, 0x1b053460}, - {0x00001510, 0x0030101c}, - {0x00001538, 0xdf808444}, - {0x0000153c, 0xc8422204}, - {0x00001540, 0x8180088c}, - {0x00001574, 0x00000004}, - {0x00001694, 0x3050d0fa}, - {0x00001680, 0x0ef6d04c}, - {0x00001690, 0x10a0540b}, - {0x000016a8, 0xe301010a}, - {0x00001700, 0x69053460}, - {0x00001710, 0xa030101c}, - {0x00001738, 0xdf808444}, - {0x0000173c, 0xc8422204}, - {0x00001740, 0x0180088c}, - {0x00001774, 0x00000004}, - {0x00000000, 0x00000000} -}; - -static const struct phy_reg x4_port3_config_regs[] = { - {0x00000000, 0x00000000} -}; - -static const struct phy_reg *x1_config_regs[4] = { - x1_port0_config_regs, - x1_port1_config_regs, - x1_port2_config_regs, - x1_port3_config_regs -}; - -static const struct phy_reg *x2_config_regs[4] = { - x2_port0_config_regs, - x2_port1_config_regs, - x2_port2_config_regs, - x2_port3_config_regs -}; - -static const struct phy_reg *x4_config_regs[4] = { - x4_port0_config_regs, - x4_port1_config_regs, - x4_port2_config_regs, - x4_port3_config_regs -}; - -static const struct phy_reg **config_regs[3] = { - x1_config_regs, - x2_config_regs, - x4_config_regs -}; - -static int ipu6_isys_mcd_phy_powerup_ack(struct ipu6_isys *isys, u8 id) -{ - struct device *dev = &isys->adev->auxdev.dev; - void __iomem *isys_base = isys->pdata->base; - u32 val; - int ret; - - val = readl(isys_base + CSI_REG_HUB_GPREG_PHY_CTL(id)); - val |= CSI_REG_HUB_GPREG_PHY_CTL_PWR_EN; - writel(val, isys_base + CSI_REG_HUB_GPREG_PHY_CTL(id)); - - ret = readl_poll_timeout(isys_base + CSI_REG_HUB_GPREG_PHY_STATUS(id), - val, val & CSI_REG_HUB_GPREG_PHY_POWER_ACK, - 200, MCD_PHY_POWER_STATUS_TIMEOUT); - if (ret) - dev_err(dev, "PHY%d powerup ack timeout", id); - - return ret; -} - -static int ipu6_isys_mcd_phy_powerdown_ack(struct ipu6_isys *isys, u8 id) -{ - struct device *dev = &isys->adev->auxdev.dev; - void __iomem *isys_base = isys->pdata->base; - u32 val; - int ret; - - writel(0, isys_base + CSI_REG_HUB_GPREG_PHY_CTL(id)); - ret = readl_poll_timeout(isys_base + CSI_REG_HUB_GPREG_PHY_STATUS(id), - val, !(val & CSI_REG_HUB_GPREG_PHY_POWER_ACK), - 200, MCD_PHY_POWER_STATUS_TIMEOUT); - if (ret) - dev_err(dev, "PHY%d powerdown ack timeout", id); - - return ret; -} - -static void ipu6_isys_mcd_phy_reset(struct ipu6_isys *isys, u8 id, bool assert) -{ - void __iomem *isys_base = isys->pdata->base; - u32 val; - - val = readl(isys_base + CSI_REG_HUB_GPREG_PHY_CTL(id)); - if (assert) - val |= CSI_REG_HUB_GPREG_PHY_CTL_RESET; - else - val &= ~(CSI_REG_HUB_GPREG_PHY_CTL_RESET); - - writel(val, isys_base + CSI_REG_HUB_GPREG_PHY_CTL(id)); -} - -static int ipu6_isys_mcd_phy_ready(struct ipu6_isys *isys, u8 id) -{ - struct device *dev = &isys->adev->auxdev.dev; - void __iomem *isys_base = isys->pdata->base; - u32 val; - int ret; - - ret = readl_poll_timeout(isys_base + CSI_REG_HUB_GPREG_PHY_STATUS(id), - val, val & CSI_REG_HUB_GPREG_PHY_READY, - 200, MCD_PHY_POWER_STATUS_TIMEOUT); - if (ret) - dev_err(dev, "PHY%d ready ack timeout", id); - - return ret; -} - -static void ipu6_isys_mcd_phy_common_init(struct ipu6_isys *isys) -{ - struct ipu6_bus_device *adev = isys->adev; - struct ipu6_device *isp = adev->isp; - void __iomem *isp_base = isp->base; - struct sensor_async_sd *s_asd; - struct v4l2_async_connection *asc; - void __iomem *phy_base; - unsigned int i; - u8 phy_id; - - list_for_each_entry(asc, &isys->notifier.done_list, asc_entry) { - s_asd = container_of(asc, struct sensor_async_sd, asc); - phy_id = s_asd->csi2.port / 4; - phy_base = isp_base + IPU6_ISYS_MCD_PHY_BASE(phy_id); - - for (i = 0; i < ARRAY_SIZE(common_init_regs); i++) - writel(common_init_regs[i].val, - phy_base + common_init_regs[i].reg); - } -} - -static int ipu6_isys_driver_port_to_phy_port(struct ipu6_isys_csi2_config *cfg) -{ - int phy_port; - int ret; - - if (!(cfg->nlanes == 4 || cfg->nlanes == 2 || cfg->nlanes == 1)) - return -EINVAL; - - /* B,F -> C0 A,E -> C1 C,G -> C2 D,H -> C4 */ - /* normalize driver port number */ - phy_port = cfg->port % 4; - - /* swap port number only for A and B */ - if (phy_port == 0) - phy_port = 1; - else if (phy_port == 1) - phy_port = 0; - - ret = phy_port; - - /* check validity per lane configuration */ - if (cfg->nlanes == 4 && !(phy_port == 0 || phy_port == 2)) - ret = -EINVAL; - else if ((cfg->nlanes == 2 || cfg->nlanes == 1) && - !(phy_port >= 0 && phy_port <= 3)) - ret = -EINVAL; - - return ret; -} - -static int ipu6_isys_mcd_phy_config(struct ipu6_isys *isys) -{ - struct device *dev = &isys->adev->auxdev.dev; - struct ipu6_bus_device *adev = isys->adev; - const struct phy_reg **phy_config_regs; - struct ipu6_device *isp = adev->isp; - void __iomem *isp_base = isp->base; - struct sensor_async_sd *s_asd; - struct ipu6_isys_csi2_config cfg; - struct v4l2_async_connection *asc; - int phy_port, phy_id; - unsigned int i; - void __iomem *phy_base; - - list_for_each_entry(asc, &isys->notifier.done_list, asc_entry) { - s_asd = container_of(asc, struct sensor_async_sd, asc); - cfg.port = s_asd->csi2.port; - cfg.nlanes = s_asd->csi2.nlanes; - phy_port = ipu6_isys_driver_port_to_phy_port(&cfg); - if (phy_port < 0) { - dev_err(dev, "invalid port %d for lane %d", cfg.port, - cfg.nlanes); - return -ENXIO; - } - - phy_id = cfg.port / 4; - phy_base = isp_base + IPU6_ISYS_MCD_PHY_BASE(phy_id); - dev_dbg(dev, "port%d PHY%u lanes %u\n", cfg.port, phy_id, - cfg.nlanes); - - phy_config_regs = config_regs[cfg.nlanes / 2]; - cfg.port = phy_port; - for (i = 0; phy_config_regs[cfg.port][i].reg; i++) - writel(phy_config_regs[cfg.port][i].val, - phy_base + phy_config_regs[cfg.port][i].reg); - } - - return 0; -} - -#define CSI_MCD_PHY_NUM 2 -static refcount_t phy_power_ref_count[CSI_MCD_PHY_NUM]; - -int ipu6_isys_mcd_phy_set_power(struct ipu6_isys *isys, - struct ipu6_isys_csi2_config *cfg, - const struct ipu6_isys_csi2_timing *timing, - bool on) -{ - struct device *dev = &isys->adev->auxdev.dev; - void __iomem *isys_base = isys->pdata->base; - u8 port, phy_id; - refcount_t *ref; - int ret; - - port = cfg->port; - phy_id = port / 4; - - ref = &phy_power_ref_count[phy_id]; - - dev_dbg(dev, "for phy %d port %d, lanes: %d\n", phy_id, port, - cfg->nlanes); - - if (!isys_base || port >= isys->pdata->ipdata->csi2.nports) { - dev_warn(dev, "invalid port ID %d\n", port); - return -EINVAL; - } - - if (on) { - if (refcount_read(ref)) { - dev_dbg(dev, "for phy %d is already UP", phy_id); - refcount_inc(ref); - return 0; - } - - ret = ipu6_isys_mcd_phy_powerup_ack(isys, phy_id); - if (ret) - return ret; - - ipu6_isys_mcd_phy_reset(isys, phy_id, 0); - ipu6_isys_mcd_phy_common_init(isys); - - ret = ipu6_isys_mcd_phy_config(isys); - if (ret) - return ret; - - ipu6_isys_mcd_phy_reset(isys, phy_id, 1); - ret = ipu6_isys_mcd_phy_ready(isys, phy_id); - if (ret) - return ret; - - refcount_set(ref, 1); - return 0; - } - - if (!refcount_dec_and_test(ref)) - return 0; - - return ipu6_isys_mcd_phy_powerdown_ack(isys, phy_id); -} diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c deleted file mode 100644 index aa2cf72..0000000 --- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c +++ /dev/null @@ -1,850 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2013--2024 Intel Corporation - */ -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include "ipu6-bus.h" -#include "ipu6-dma.h" -#include "ipu6-fw-isys.h" -#include "ipu6-isys.h" -#include "ipu6-isys-video.h" - -static int ipu6_isys_buf_init(struct vb2_buffer *vb) -{ - struct ipu6_isys *isys = vb2_get_drv_priv(vb->vb2_queue); - struct sg_table *sg = vb2_dma_sg_plane_desc(vb, 0); - struct vb2_v4l2_buffer *vvb = to_vb2_v4l2_buffer(vb); - struct ipu6_isys_video_buffer *ivb = - vb2_buffer_to_ipu6_isys_video_buffer(vvb); - int ret; - - ret = ipu6_dma_map_sgtable(isys->adev, sg, DMA_TO_DEVICE, 0); - if (ret) - return ret; - - ivb->dma_addr = sg_dma_address(sg->sgl); - - return 0; -} - -static void ipu6_isys_buf_cleanup(struct vb2_buffer *vb) -{ - struct ipu6_isys *isys = vb2_get_drv_priv(vb->vb2_queue); - struct sg_table *sg = vb2_dma_sg_plane_desc(vb, 0); - struct vb2_v4l2_buffer *vvb = to_vb2_v4l2_buffer(vb); - struct ipu6_isys_video_buffer *ivb = - vb2_buffer_to_ipu6_isys_video_buffer(vvb); - - ivb->dma_addr = 0; - ipu6_dma_unmap_sgtable(isys->adev, sg, DMA_TO_DEVICE, 0); -} - -static int ipu6_isys_queue_setup(struct vb2_queue *q, unsigned int *num_buffers, - unsigned int *num_planes, unsigned int sizes[], - struct device *alloc_devs[]) -{ - struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(q); - struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq); - struct device *dev = &av->isys->adev->auxdev.dev; - u32 size = ipu6_isys_get_data_size(av); - - /* num_planes == 0: we're being called through VIDIOC_REQBUFS */ - if (!*num_planes) { - sizes[0] = size; - } else if (sizes[0] < size) { - dev_dbg(dev, "%s: queue setup: size %u < %u\n", - av->vdev.name, sizes[0], size); - return -EINVAL; - } - - *num_planes = 1; - - return 0; -} - -static int ipu6_isys_buf_prepare(struct vb2_buffer *vb) -{ - struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(vb->vb2_queue); - struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq); - struct device *dev = &av->isys->adev->auxdev.dev; - u32 bytesperline = ipu6_isys_get_bytes_per_line(av); - u32 height = ipu6_isys_get_frame_height(av); - u32 size = ipu6_isys_get_data_size(av); - - dev_dbg(dev, "buffer: %s: configured size %u, buffer size %lu\n", - av->vdev.name, size, vb2_plane_size(vb, 0)); - - if (size > vb2_plane_size(vb, 0)) - return -EINVAL; - - vb2_set_plane_payload(vb, 0, bytesperline * height); - - return 0; -} - -/* - * Queue a buffer list back to incoming or active queues. The buffers - * are removed from the buffer list. - */ -void ipu6_isys_buffer_list_queue(struct ipu6_isys_buffer_list *bl, - unsigned long op_flags, - enum vb2_buffer_state state) -{ - struct ipu6_isys_buffer *ib, *ib_safe; - unsigned long flags; - bool first = true; - - if (!bl) - return; - - WARN_ON_ONCE(!bl->nbufs); - WARN_ON_ONCE(op_flags & IPU6_ISYS_BUFFER_LIST_FL_ACTIVE && - op_flags & IPU6_ISYS_BUFFER_LIST_FL_INCOMING); - - list_for_each_entry_safe(ib, ib_safe, &bl->head, head) { - struct ipu6_isys_video *av; - struct vb2_buffer *vb = ipu6_isys_buffer_to_vb2_buffer(ib); - struct ipu6_isys_queue *aq = - vb2_queue_to_isys_queue(vb->vb2_queue); - struct device *dev; - - av = ipu6_isys_queue_to_video(aq); - dev = &av->isys->adev->auxdev.dev; - spin_lock_irqsave(&aq->lock, flags); - list_del(&ib->head); - if (op_flags & IPU6_ISYS_BUFFER_LIST_FL_ACTIVE) - list_add(&ib->head, &aq->active); - else if (op_flags & IPU6_ISYS_BUFFER_LIST_FL_INCOMING) - list_add_tail(&ib->head, &aq->incoming); - spin_unlock_irqrestore(&aq->lock, flags); - - if (op_flags & IPU6_ISYS_BUFFER_LIST_FL_SET_STATE) - vb2_buffer_done(vb, state); - - if (first) { - dev_dbg(dev, - "queue buf list %p flags %lx, s %d, %d bufs\n", - bl, op_flags, state, bl->nbufs); - first = false; - } - - bl->nbufs--; - } - - WARN_ON(bl->nbufs); -} - -/* - * flush_firmware_streamon_fail() - Flush in cases where requests may - * have been queued to firmware and the *firmware streamon fails for a - * reason or another. - */ -static void flush_firmware_streamon_fail(struct ipu6_isys_stream *stream) -{ - struct device *dev = &stream->isys->adev->auxdev.dev; - struct ipu6_isys_queue *aq; - unsigned long flags; - - lockdep_assert_held(&stream->mutex); - - list_for_each_entry(aq, &stream->queues, node) { - struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq); - struct ipu6_isys_buffer *ib, *ib_safe; - - spin_lock_irqsave(&aq->lock, flags); - list_for_each_entry_safe(ib, ib_safe, &aq->active, head) { - struct vb2_buffer *vb = - ipu6_isys_buffer_to_vb2_buffer(ib); - - list_del(&ib->head); - if (av->streaming) { - dev_dbg(dev, - "%s: queue buffer %u back to incoming\n", - av->vdev.name, vb->index); - /* Queue already streaming, return to driver. */ - list_add(&ib->head, &aq->incoming); - continue; - } - /* Queue not yet streaming, return to user. */ - dev_dbg(dev, "%s: return %u back to videobuf2\n", - av->vdev.name, vb->index); - vb2_buffer_done(ipu6_isys_buffer_to_vb2_buffer(ib), - VB2_BUF_STATE_QUEUED); - } - spin_unlock_irqrestore(&aq->lock, flags); - } -} - -/* - * Attempt obtaining a buffer list from the incoming queues, a list of buffers - * that contains one entry from each video buffer queue. If a buffer can't be - * obtained from every queue, the buffers are returned back to the queue. - */ -static int buffer_list_get(struct ipu6_isys_stream *stream, - struct ipu6_isys_buffer_list *bl) -{ - struct device *dev = &stream->isys->adev->auxdev.dev; - struct ipu6_isys_queue *aq; - unsigned long flags; - unsigned long buf_flag = IPU6_ISYS_BUFFER_LIST_FL_INCOMING; - - bl->nbufs = 0; - INIT_LIST_HEAD(&bl->head); - - list_for_each_entry(aq, &stream->queues, node) { - struct ipu6_isys_buffer *ib; - - spin_lock_irqsave(&aq->lock, flags); - if (list_empty(&aq->incoming)) { - spin_unlock_irqrestore(&aq->lock, flags); - if (!list_empty(&bl->head)) - ipu6_isys_buffer_list_queue(bl, buf_flag, 0); - return -ENODATA; - } - - ib = list_last_entry(&aq->incoming, - struct ipu6_isys_buffer, head); - - dev_dbg(dev, "buffer: %s: buffer %u\n", - ipu6_isys_queue_to_video(aq)->vdev.name, - ipu6_isys_buffer_to_vb2_buffer(ib)->index); - list_del(&ib->head); - list_add(&ib->head, &bl->head); - spin_unlock_irqrestore(&aq->lock, flags); - - bl->nbufs++; - } - - dev_dbg(dev, "get buffer list %p, %u buffers\n", bl, bl->nbufs); - - return 0; -} - -static void -ipu6_isys_buf_to_fw_frame_buf_pin(struct vb2_buffer *vb, - struct ipu6_fw_isys_frame_buff_set_abi *set) -{ - struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(vb->vb2_queue); - struct vb2_v4l2_buffer *vvb = to_vb2_v4l2_buffer(vb); - struct ipu6_isys_video_buffer *ivb = - vb2_buffer_to_ipu6_isys_video_buffer(vvb); - - set->output_pins[aq->fw_output].addr = ivb->dma_addr; - set->output_pins[aq->fw_output].out_buf_id = vb->index + 1; -} - -/* - * Convert a buffer list to a isys fw ABI framebuffer set. The - * buffer list is not modified. - */ -#define IPU6_ISYS_FRAME_NUM_THRESHOLD (30) -void -ipu6_isys_buf_to_fw_frame_buf(struct ipu6_fw_isys_frame_buff_set_abi *set, - struct ipu6_isys_stream *stream, - struct ipu6_isys_buffer_list *bl) -{ - struct ipu6_isys_buffer *ib; - - WARN_ON(!bl->nbufs); - - set->send_irq_sof = 1; - set->send_resp_sof = 1; - set->send_irq_eof = 0; - set->send_resp_eof = 0; - - if (stream->streaming) - set->send_irq_capture_ack = 0; - else - set->send_irq_capture_ack = 1; - set->send_irq_capture_done = 0; - - set->send_resp_capture_ack = 1; - set->send_resp_capture_done = 1; - if (atomic_read(&stream->sequence) >= IPU6_ISYS_FRAME_NUM_THRESHOLD) { - set->send_resp_capture_ack = 0; - set->send_resp_capture_done = 0; - } - - list_for_each_entry(ib, &bl->head, head) { - struct vb2_buffer *vb = ipu6_isys_buffer_to_vb2_buffer(ib); - - ipu6_isys_buf_to_fw_frame_buf_pin(vb, set); - } -} - -/* Start streaming for real. The buffer list must be available. */ -static int ipu6_isys_stream_start(struct ipu6_isys_video *av, - struct ipu6_isys_buffer_list *bl, bool error) -{ - struct ipu6_isys_stream *stream = av->stream; - struct device *dev = &stream->isys->adev->auxdev.dev; - struct ipu6_isys_buffer_list __bl; - int ret; - - mutex_lock(&stream->isys->stream_mutex); - ret = ipu6_isys_video_set_streaming(av, 1, bl); - mutex_unlock(&stream->isys->stream_mutex); - if (ret) - goto out_requeue; - - stream->streaming = 1; - - bl = &__bl; - - do { - struct ipu6_fw_isys_frame_buff_set_abi *buf = NULL; - struct isys_fw_msgs *msg; - u16 send_type = IPU6_FW_ISYS_SEND_TYPE_STREAM_CAPTURE; - - ret = buffer_list_get(stream, bl); - if (ret < 0) - break; - - msg = ipu6_get_fw_msg_buf(stream); - if (!msg) - return -ENOMEM; - - buf = &msg->fw_msg.frame; - ipu6_isys_buf_to_fw_frame_buf(buf, stream, bl); - ipu6_fw_isys_dump_frame_buff_set(dev, buf, - stream->nr_output_pins); - ipu6_isys_buffer_list_queue(bl, IPU6_ISYS_BUFFER_LIST_FL_ACTIVE, - 0); - ret = ipu6_fw_isys_complex_cmd(stream->isys, - stream->stream_handle, buf, - msg->dma_addr, sizeof(*buf), - send_type); - } while (!WARN_ON(ret)); - - return 0; - -out_requeue: - if (bl && bl->nbufs) - ipu6_isys_buffer_list_queue(bl, - IPU6_ISYS_BUFFER_LIST_FL_INCOMING | - (error ? - IPU6_ISYS_BUFFER_LIST_FL_SET_STATE : - 0), error ? VB2_BUF_STATE_ERROR : - VB2_BUF_STATE_QUEUED); - flush_firmware_streamon_fail(stream); - - return ret; -} - -static void buf_queue(struct vb2_buffer *vb) -{ - struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(vb->vb2_queue); - struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq); - struct vb2_v4l2_buffer *vvb = to_vb2_v4l2_buffer(vb); - struct ipu6_isys_video_buffer *ivb = - vb2_buffer_to_ipu6_isys_video_buffer(vvb); - struct ipu6_isys_buffer *ib = &ivb->ib; - struct device *dev = &av->isys->adev->auxdev.dev; - struct media_pipeline *media_pipe = - media_entity_pipeline(&av->vdev.entity); - struct ipu6_fw_isys_frame_buff_set_abi *buf = NULL; - struct ipu6_isys_stream *stream = av->stream; - struct ipu6_isys_buffer_list bl; - struct isys_fw_msgs *msg; - unsigned long flags; - dma_addr_t dma; - int ret; - - dev_dbg(dev, "queue buffer %u for %s\n", vb->index, av->vdev.name); - - dma = ivb->dma_addr; - dev_dbg(dev, "iova: iova %pad\n", &dma); - - spin_lock_irqsave(&aq->lock, flags); - list_add(&ib->head, &aq->incoming); - spin_unlock_irqrestore(&aq->lock, flags); - - if (!media_pipe || !vb->vb2_queue->start_streaming_called) { - dev_dbg(dev, "media pipeline is not ready for %s\n", - av->vdev.name); - return; - } - - mutex_lock(&stream->mutex); - - if (stream->nr_streaming != stream->nr_queues) { - dev_dbg(dev, "not streaming yet, adding to incoming\n"); - goto out; - } - - /* - * We just put one buffer to the incoming list of this queue - * (above). Let's see whether all queues in the pipeline would - * have a buffer. - */ - ret = buffer_list_get(stream, &bl); - if (ret < 0) { - dev_dbg(dev, "No buffers available\n"); - goto out; - } - - msg = ipu6_get_fw_msg_buf(stream); - if (!msg) { - ret = -ENOMEM; - goto out; - } - - buf = &msg->fw_msg.frame; - ipu6_isys_buf_to_fw_frame_buf(buf, stream, &bl); - ipu6_fw_isys_dump_frame_buff_set(dev, buf, stream->nr_output_pins); - - if (!stream->streaming) { - ret = ipu6_isys_stream_start(av, &bl, true); - if (ret) - dev_err(dev, "stream start failed.\n"); - goto out; - } - - /* - * We must queue the buffers in the buffer list to the - * appropriate video buffer queues BEFORE passing them to the - * firmware since we could get a buffer event back before we - * have queued them ourselves to the active queue. - */ - ipu6_isys_buffer_list_queue(&bl, IPU6_ISYS_BUFFER_LIST_FL_ACTIVE, 0); - - ret = ipu6_fw_isys_complex_cmd(stream->isys, stream->stream_handle, - buf, msg->dma_addr, sizeof(*buf), - IPU6_FW_ISYS_SEND_TYPE_STREAM_CAPTURE); - if (ret < 0) - dev_err(dev, "send stream capture failed\n"); - -out: - mutex_unlock(&stream->mutex); -} - -static int ipu6_isys_link_fmt_validate(struct ipu6_isys_queue *aq) -{ - struct v4l2_mbus_framefmt format; - struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq); - struct device *dev = &av->isys->adev->auxdev.dev; - struct media_pad *remote_pad = - media_pad_remote_pad_first(av->vdev.entity.pads); - struct v4l2_subdev *sd; - u32 r_stream, code; - int ret; - - if (!remote_pad) - return -ENOTCONN; - - sd = media_entity_to_v4l2_subdev(remote_pad->entity); - r_stream = ipu6_isys_get_src_stream_by_src_pad(sd, remote_pad->index); - - ret = ipu6_isys_get_stream_pad_fmt(sd, remote_pad->index, r_stream, - &format); - - if (ret) { - dev_dbg(dev, "failed to get %s: pad %d, stream:%d format\n", - sd->entity.name, remote_pad->index, r_stream); - return ret; - } - - if (format.width != ipu6_isys_get_frame_width(av) || - format.height != ipu6_isys_get_frame_height(av)) { - dev_dbg(dev, "wrong width or height %ux%u (%ux%u expected)\n", - ipu6_isys_get_frame_width(av), - ipu6_isys_get_frame_height(av), format.width, - format.height); - return -EINVAL; - } - - code = ipu6_isys_get_isys_format(ipu6_isys_get_format(av), 0)->code; - if (format.code != code) { - dev_dbg(dev, "wrong mbus code 0x%8.8x (0x%8.8x expected)\n", - code, format.code); - return -EINVAL; - } - - return 0; -} - -static void return_buffers(struct ipu6_isys_queue *aq, - enum vb2_buffer_state state) -{ - struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq); - struct ipu6_isys_buffer *ib; - bool need_reset = false; - unsigned long flags; - - spin_lock_irqsave(&aq->lock, flags); - while (!list_empty(&aq->incoming)) { - struct vb2_buffer *vb; - - ib = list_first_entry(&aq->incoming, struct ipu6_isys_buffer, - head); - vb = ipu6_isys_buffer_to_vb2_buffer(ib); - list_del(&ib->head); - spin_unlock_irqrestore(&aq->lock, flags); - - vb2_buffer_done(vb, state); - - spin_lock_irqsave(&aq->lock, flags); - } - - /* - * Something went wrong (FW crash / HW hang / not all buffers - * returned from isys) if there are still buffers queued in active - * queue. We have to clean up places a bit. - */ - while (!list_empty(&aq->active)) { - struct vb2_buffer *vb; - - ib = list_first_entry(&aq->active, struct ipu6_isys_buffer, - head); - vb = ipu6_isys_buffer_to_vb2_buffer(ib); - - list_del(&ib->head); - spin_unlock_irqrestore(&aq->lock, flags); - - vb2_buffer_done(vb, state); - - spin_lock_irqsave(&aq->lock, flags); - need_reset = true; - } - - spin_unlock_irqrestore(&aq->lock, flags); - - if (need_reset) { - mutex_lock(&av->isys->mutex); - av->isys->need_reset = true; - mutex_unlock(&av->isys->mutex); - } -} - -static void ipu6_isys_stream_cleanup(struct ipu6_isys_video *av) -{ - video_device_pipeline_stop(&av->vdev); - ipu6_isys_put_stream(av->stream); - av->stream = NULL; -} - -static int start_streaming(struct vb2_queue *q, unsigned int count) -{ - struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(q); - struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq); - struct device *dev = &av->isys->adev->auxdev.dev; - const struct ipu6_isys_pixelformat *pfmt = - ipu6_isys_get_isys_format(ipu6_isys_get_format(av), 0); - struct ipu6_isys_buffer_list __bl, *bl = NULL; - struct ipu6_isys_stream *stream; - struct media_entity *source_entity = NULL; - int nr_queues, ret; - - dev_dbg(dev, "stream: %s: width %u, height %u, css pixelformat %u\n", - av->vdev.name, ipu6_isys_get_frame_width(av), - ipu6_isys_get_frame_height(av), pfmt->css_pixelformat); - - ret = ipu6_isys_setup_video(av, &source_entity, &nr_queues); - if (ret < 0) { - dev_dbg(dev, "failed to setup video\n"); - goto out_return_buffers; - } - - ret = ipu6_isys_link_fmt_validate(aq); - if (ret) { - dev_dbg(dev, - "%s: link format validation failed (%d)\n", - av->vdev.name, ret); - goto out_pipeline_stop; - } - - ret = ipu6_isys_fw_open(av->isys); - if (ret) - goto out_pipeline_stop; - - stream = av->stream; - mutex_lock(&stream->mutex); - if (!stream->nr_streaming) { - ret = ipu6_isys_video_prepare_stream(av, source_entity, - nr_queues); - if (ret) - goto out_fw_close; - } - - stream->nr_streaming++; - dev_dbg(dev, "queue %u of %u\n", stream->nr_streaming, - stream->nr_queues); - - list_add(&aq->node, &stream->queues); - ipu6_isys_configure_stream_watermark(av, true); - ipu6_isys_update_stream_watermark(av, true); - - if (stream->nr_streaming != stream->nr_queues) - goto out; - - bl = &__bl; - ret = buffer_list_get(stream, bl); - if (ret < 0) { - dev_warn(dev, "no buffer available, DRIVER BUG?\n"); - goto out; - } - - ret = ipu6_isys_stream_start(av, bl, false); - if (ret) - goto out_stream_start; - -out: - mutex_unlock(&stream->mutex); - - return 0; - -out_stream_start: - ipu6_isys_update_stream_watermark(av, false); - list_del(&aq->node); - stream->nr_streaming--; - -out_fw_close: - mutex_unlock(&stream->mutex); - ipu6_isys_fw_close(av->isys); - -out_pipeline_stop: - ipu6_isys_stream_cleanup(av); - -out_return_buffers: - return_buffers(aq, VB2_BUF_STATE_QUEUED); - - return ret; -} - -static void stop_streaming(struct vb2_queue *q) -{ - struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(q); - struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq); - struct ipu6_isys_stream *stream = av->stream; - - mutex_lock(&stream->mutex); - - ipu6_isys_update_stream_watermark(av, false); - - mutex_lock(&av->isys->stream_mutex); - if (stream->nr_streaming == stream->nr_queues && stream->streaming) - ipu6_isys_video_set_streaming(av, 0, NULL); - mutex_unlock(&av->isys->stream_mutex); - - stream->nr_streaming--; - list_del(&aq->node); - stream->streaming = 0; - mutex_unlock(&stream->mutex); - - ipu6_isys_stream_cleanup(av); - - return_buffers(aq, VB2_BUF_STATE_ERROR); - - ipu6_isys_fw_close(av->isys); -} - -static unsigned int -get_sof_sequence_by_timestamp(struct ipu6_isys_stream *stream, u64 time) -{ - struct ipu6_isys *isys = stream->isys; - struct device *dev = &isys->adev->auxdev.dev; - unsigned int i; - - /* - * The timestamp is invalid as no TSC in some FPGA platform, - * so get the sequence from pipeline directly in this case. - */ - if (time == 0) - return atomic_read(&stream->sequence) - 1; - - for (i = 0; i < IPU6_ISYS_MAX_PARALLEL_SOF; i++) - if (time == stream->seq[i].timestamp) { - dev_dbg(dev, "sof: using seq nr %u for ts %llu\n", - stream->seq[i].sequence, time); - return stream->seq[i].sequence; - } - - for (i = 0; i < IPU6_ISYS_MAX_PARALLEL_SOF; i++) - dev_dbg(dev, "sof: sequence %u, timestamp value %llu\n", - stream->seq[i].sequence, stream->seq[i].timestamp); - - return 0; -} - -static u64 get_sof_ns_delta(struct ipu6_isys_video *av, u64 timestamp) -{ - struct ipu6_bus_device *adev = av->isys->adev; - struct ipu6_device *isp = adev->isp; - u64 delta, tsc_now; - - ipu6_buttress_tsc_read(isp, &tsc_now); - if (!tsc_now) - return 0; - - delta = tsc_now - timestamp; - - return ipu6_buttress_tsc_ticks_to_ns(delta, isp); -} - -static void -ipu6_isys_buf_calc_sequence_time(struct ipu6_isys_buffer *ib, u64 time) -{ - struct vb2_buffer *vb = ipu6_isys_buffer_to_vb2_buffer(ib); - struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); - struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(vb->vb2_queue); - struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq); - struct device *dev = &av->isys->adev->auxdev.dev; - struct ipu6_isys_stream *stream = av->stream; - u64 ns; - u32 sequence; - - ns = ktime_get_ns() - get_sof_ns_delta(av, time); - sequence = get_sof_sequence_by_timestamp(stream, time); - - vbuf->vb2_buf.timestamp = ns; - vbuf->sequence = sequence; - - dev_dbg(dev, "buf: %s: buffer done, CPU-timestamp:%lld, sequence:%d\n", - av->vdev.name, ktime_get_ns(), sequence); - dev_dbg(dev, "index:%d, vbuf timestamp:%lld\n", vb->index, - vbuf->vb2_buf.timestamp); -} - -static void ipu6_isys_queue_buf_done(struct ipu6_isys_buffer *ib) -{ - struct vb2_buffer *vb = ipu6_isys_buffer_to_vb2_buffer(ib); - - if (atomic_read(&ib->str2mmio_flag)) { - vb2_buffer_done(vb, VB2_BUF_STATE_ERROR); - /* - * Operation on buffer is ended with error and will be reported - * to the userspace when it is de-queued - */ - atomic_set(&ib->str2mmio_flag, 0); - } else { - vb2_buffer_done(vb, VB2_BUF_STATE_DONE); - } -} - -static void -ipu6_stream_buf_ready(struct ipu6_isys_stream *stream, u8 pin_id, u32 pin_addr, - u64 time, bool error_check) -{ - struct ipu6_isys_queue *aq = stream->output_pins_queue[pin_id]; - struct ipu6_isys *isys = stream->isys; - struct device *dev = &isys->adev->auxdev.dev; - struct ipu6_isys_buffer *ib; - struct vb2_buffer *vb; - unsigned long flags; - bool first = true; - struct vb2_v4l2_buffer *buf; - - spin_lock_irqsave(&aq->lock, flags); - if (list_empty(&aq->active)) { - spin_unlock_irqrestore(&aq->lock, flags); - dev_err(dev, "active queue empty\n"); - return; - } - - list_for_each_entry_reverse(ib, &aq->active, head) { - struct ipu6_isys_video_buffer *ivb; - struct vb2_v4l2_buffer *vvb; - dma_addr_t addr; - - vb = ipu6_isys_buffer_to_vb2_buffer(ib); - vvb = to_vb2_v4l2_buffer(vb); - ivb = vb2_buffer_to_ipu6_isys_video_buffer(vvb); - addr = ivb->dma_addr; - - if (pin_addr != addr) { - if (first) - dev_err(dev, "Unexpected buffer address %pad\n", - &addr); - first = false; - continue; - } - - if (error_check) { - /* - * Check for error message: - * 'IPU6_FW_ISYS_ERROR_HW_REPORTED_STR2MMIO' - */ - atomic_set(&ib->str2mmio_flag, 1); - } - dev_dbg(dev, "buffer: found buffer %pad\n", &addr); - - buf = to_vb2_v4l2_buffer(vb); - buf->field = V4L2_FIELD_NONE; - - list_del(&ib->head); - spin_unlock_irqrestore(&aq->lock, flags); - - ipu6_isys_buf_calc_sequence_time(ib, time); - - ipu6_isys_queue_buf_done(ib); - - return; - } - - dev_err(dev, "Failed to find a matching video buffer\n"); - - spin_unlock_irqrestore(&aq->lock, flags); -} - -void ipu6_isys_queue_buf_ready(struct ipu6_isys_stream *stream, - struct ipu6_fw_isys_resp_info_abi *info) -{ - u64 time = (u64)info->timestamp[1] << 32 | info->timestamp[0]; - bool err = info->error_info.error == IPU6_FW_ISYS_ERROR_HW_REPORTED_STR2MMIO; - - ipu6_stream_buf_ready(stream, info->pin_id, info->pin.addr, time, err); -} - -static const struct vb2_ops ipu6_isys_queue_ops = { - .queue_setup = ipu6_isys_queue_setup, - .buf_init = ipu6_isys_buf_init, - .buf_prepare = ipu6_isys_buf_prepare, - .buf_cleanup = ipu6_isys_buf_cleanup, - .start_streaming = start_streaming, - .stop_streaming = stop_streaming, - .buf_queue = buf_queue, -}; - -int ipu6_isys_queue_init(struct ipu6_isys_queue *aq) -{ - struct ipu6_isys *isys = ipu6_isys_queue_to_video(aq)->isys; - struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq); - struct ipu6_bus_device *adev = isys->adev; - int ret; - - /* no support for userptr */ - if (!aq->vbq.io_modes) - aq->vbq.io_modes = VB2_MMAP | VB2_DMABUF; - - aq->vbq.drv_priv = isys; - aq->vbq.ops = &ipu6_isys_queue_ops; - aq->vbq.lock = &av->mutex; - aq->vbq.mem_ops = &vb2_dma_sg_memops; - aq->vbq.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; - aq->vbq.min_queued_buffers = 1; - aq->vbq.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; - - ret = vb2_queue_init(&aq->vbq); - if (ret) - return ret; - - aq->vbq.dev = &adev->isp->pdev->dev; - spin_lock_init(&aq->lock); - INIT_LIST_HEAD(&aq->active); - INIT_LIST_HEAD(&aq->incoming); - - return 0; -} diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h deleted file mode 100644 index 844dfda..0000000 --- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h +++ /dev/null @@ -1,71 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (C) 2013--2024 Intel Corporation */ - -#ifndef IPU6_ISYS_QUEUE_H -#define IPU6_ISYS_QUEUE_H - -#include -#include -#include -#include -#include - -#include - -#include "ipu6-fw-isys.h" -#include "ipu6-isys-video.h" - -struct ipu6_isys_stream; - -struct ipu6_isys_queue { - struct vb2_queue vbq; - struct list_head node; - spinlock_t lock; /* Protects active and incoming lists */ - struct list_head active; - struct list_head incoming; - unsigned int fw_output; -}; - -struct ipu6_isys_buffer { - struct list_head head; - atomic_t str2mmio_flag; -}; - -struct ipu6_isys_video_buffer { - struct vb2_v4l2_buffer vb_v4l2; - struct ipu6_isys_buffer ib; - dma_addr_t dma_addr; -}; - -#define IPU6_ISYS_BUFFER_LIST_FL_INCOMING BIT(0) -#define IPU6_ISYS_BUFFER_LIST_FL_ACTIVE BIT(1) -#define IPU6_ISYS_BUFFER_LIST_FL_SET_STATE BIT(2) - -struct ipu6_isys_buffer_list { - struct list_head head; - unsigned int nbufs; -}; - -#define vb2_queue_to_isys_queue(__vb2) \ - container_of(__vb2, struct ipu6_isys_queue, vbq) - -#define ipu6_isys_to_isys_video_buffer(__ib) \ - container_of(__ib, struct ipu6_isys_video_buffer, ib) - -#define vb2_buffer_to_ipu6_isys_video_buffer(__vvb) \ - container_of(__vvb, struct ipu6_isys_video_buffer, vb_v4l2) - -#define ipu6_isys_buffer_to_vb2_buffer(__ib) \ - (&ipu6_isys_to_isys_video_buffer(__ib)->vb_v4l2.vb2_buf) - -void ipu6_isys_buffer_list_queue(struct ipu6_isys_buffer_list *bl, - unsigned long op_flags, - enum vb2_buffer_state state); -void -ipu6_isys_buf_to_fw_frame_buf(struct ipu6_fw_isys_frame_buff_set_abi *set, - struct ipu6_isys_stream *stream, - struct ipu6_isys_buffer_list *bl); -void ipu6_isys_queue_buf_ready(struct ipu6_isys_stream *stream, - struct ipu6_fw_isys_resp_info_abi *info); -int ipu6_isys_queue_init(struct ipu6_isys_queue *aq); -#endif /* IPU6_ISYS_QUEUE_H */ diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c deleted file mode 100644 index 0a06de5..0000000 --- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c +++ /dev/null @@ -1,403 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2013--2024 Intel Corporation - */ - -#include -#include -#include - -#include -#include -#include -#include - -#include "ipu6-bus.h" -#include "ipu6-isys.h" -#include "ipu6-isys-subdev.h" - -unsigned int ipu6_isys_mbus_code_to_bpp(u32 code) -{ - switch (code) { - case MEDIA_BUS_FMT_RGB888_1X24: - case MEDIA_BUS_FMT_META_24: - return 24; - case MEDIA_BUS_FMT_RGB565_1X16: - case MEDIA_BUS_FMT_UYVY8_1X16: - case MEDIA_BUS_FMT_YUYV8_1X16: - case MEDIA_BUS_FMT_META_16: - return 16; - case MEDIA_BUS_FMT_SBGGR12_1X12: - case MEDIA_BUS_FMT_SGBRG12_1X12: - case MEDIA_BUS_FMT_SGRBG12_1X12: - case MEDIA_BUS_FMT_SRGGB12_1X12: - case MEDIA_BUS_FMT_META_12: - return 12; - case MEDIA_BUS_FMT_SBGGR10_1X10: - case MEDIA_BUS_FMT_SGBRG10_1X10: - case MEDIA_BUS_FMT_SGRBG10_1X10: - case MEDIA_BUS_FMT_SRGGB10_1X10: - case MEDIA_BUS_FMT_META_10: - return 10; - case MEDIA_BUS_FMT_SBGGR8_1X8: - case MEDIA_BUS_FMT_SGBRG8_1X8: - case MEDIA_BUS_FMT_SGRBG8_1X8: - case MEDIA_BUS_FMT_SRGGB8_1X8: - case MEDIA_BUS_FMT_META_8: - return 8; - default: - WARN_ON(1); - return 8; - } -} - -unsigned int ipu6_isys_mbus_code_to_mipi(u32 code) -{ - switch (code) { - case MEDIA_BUS_FMT_RGB565_1X16: - return MIPI_CSI2_DT_RGB565; - case MEDIA_BUS_FMT_RGB888_1X24: - return MIPI_CSI2_DT_RGB888; - case MEDIA_BUS_FMT_UYVY8_1X16: - case MEDIA_BUS_FMT_YUYV8_1X16: - return MIPI_CSI2_DT_YUV422_8B; - case MEDIA_BUS_FMT_SBGGR16_1X16: - case MEDIA_BUS_FMT_SGBRG16_1X16: - case MEDIA_BUS_FMT_SGRBG16_1X16: - case MEDIA_BUS_FMT_SRGGB16_1X16: - return MIPI_CSI2_DT_RAW16; - case MEDIA_BUS_FMT_SBGGR12_1X12: - case MEDIA_BUS_FMT_SGBRG12_1X12: - case MEDIA_BUS_FMT_SGRBG12_1X12: - case MEDIA_BUS_FMT_SRGGB12_1X12: - return MIPI_CSI2_DT_RAW12; - case MEDIA_BUS_FMT_SBGGR10_1X10: - case MEDIA_BUS_FMT_SGBRG10_1X10: - case MEDIA_BUS_FMT_SGRBG10_1X10: - case MEDIA_BUS_FMT_SRGGB10_1X10: - return MIPI_CSI2_DT_RAW10; - case MEDIA_BUS_FMT_SBGGR8_1X8: - case MEDIA_BUS_FMT_SGBRG8_1X8: - case MEDIA_BUS_FMT_SGRBG8_1X8: - case MEDIA_BUS_FMT_SRGGB8_1X8: - return MIPI_CSI2_DT_RAW8; - default: - /* return unavailable MIPI data type - 0x3f */ - WARN_ON(1); - return 0x3f; - } -} - -bool ipu6_isys_is_bayer_format(u32 code) -{ - switch (ipu6_isys_mbus_code_to_mipi(code)) { - case MIPI_CSI2_DT_RAW8: - case MIPI_CSI2_DT_RAW10: - case MIPI_CSI2_DT_RAW12: - case MIPI_CSI2_DT_RAW14: - case MIPI_CSI2_DT_RAW16: - case MIPI_CSI2_DT_RAW20: - case MIPI_CSI2_DT_RAW24: - case MIPI_CSI2_DT_RAW28: - return true; - default: - return false; - } -} - -u32 ipu6_isys_convert_bayer_order(u32 code, int x, int y) -{ - static const u32 code_map[] = { - MEDIA_BUS_FMT_SRGGB8_1X8, - MEDIA_BUS_FMT_SGRBG8_1X8, - MEDIA_BUS_FMT_SGBRG8_1X8, - MEDIA_BUS_FMT_SBGGR8_1X8, - MEDIA_BUS_FMT_SRGGB10_1X10, - MEDIA_BUS_FMT_SGRBG10_1X10, - MEDIA_BUS_FMT_SGBRG10_1X10, - MEDIA_BUS_FMT_SBGGR10_1X10, - MEDIA_BUS_FMT_SRGGB12_1X12, - MEDIA_BUS_FMT_SGRBG12_1X12, - MEDIA_BUS_FMT_SGBRG12_1X12, - MEDIA_BUS_FMT_SBGGR12_1X12, - MEDIA_BUS_FMT_SRGGB16_1X16, - MEDIA_BUS_FMT_SGRBG16_1X16, - MEDIA_BUS_FMT_SGBRG16_1X16, - MEDIA_BUS_FMT_SBGGR16_1X16, - }; - u32 i; - - for (i = 0; i < ARRAY_SIZE(code_map); i++) - if (code_map[i] == code) - break; - - if (WARN_ON(i == ARRAY_SIZE(code_map))) - return code; - - return code_map[i ^ (((y & 1) << 1) | (x & 1))]; -} - -int ipu6_isys_subdev_set_fmt(struct v4l2_subdev *sd, - struct v4l2_subdev_state *state, - struct v4l2_subdev_format *format) -{ - struct ipu6_isys_subdev *asd = to_ipu6_isys_subdev(sd); - struct v4l2_mbus_framefmt *fmt; - struct v4l2_rect *crop; - u32 code = asd->supported_codes[0]; - u32 other_pad, other_stream; - unsigned int i; - int ret; - - /* No transcoding, source and sink formats must match. */ - if ((sd->entity.pads[format->pad].flags & MEDIA_PAD_FL_SOURCE) && - sd->entity.num_pads > 1) - return v4l2_subdev_get_fmt(sd, state, format); - - format->format.width = clamp(format->format.width, IPU6_ISYS_MIN_WIDTH, - IPU6_ISYS_MAX_WIDTH); - format->format.height = clamp(format->format.height, - IPU6_ISYS_MIN_HEIGHT, - IPU6_ISYS_MAX_HEIGHT); - - for (i = 0; asd->supported_codes[i]; i++) { - if (asd->supported_codes[i] == format->format.code) { - code = asd->supported_codes[i]; - break; - } - } - format->format.code = code; - format->format.field = V4L2_FIELD_NONE; - - /* Store the format and propagate it to the source pad. */ - fmt = v4l2_subdev_state_get_format(state, format->pad, format->stream); - if (!fmt) - return -EINVAL; - - *fmt = format->format; - - if (!(sd->entity.pads[format->pad].flags & MEDIA_PAD_FL_SINK)) - return 0; - - /* propagate format to following source pad */ - fmt = v4l2_subdev_state_get_opposite_stream_format(state, format->pad, - format->stream); - if (!fmt) - return -EINVAL; - - *fmt = format->format; - - ret = v4l2_subdev_routing_find_opposite_end(&state->routing, - format->pad, - format->stream, - &other_pad, - &other_stream); - if (ret) - return -EINVAL; - - crop = v4l2_subdev_state_get_crop(state, other_pad, other_stream); - /* reset crop */ - crop->left = 0; - crop->top = 0; - crop->width = fmt->width; - crop->height = fmt->height; - - return 0; -} - -int ipu6_isys_subdev_enum_mbus_code(struct v4l2_subdev *sd, - struct v4l2_subdev_state *state, - struct v4l2_subdev_mbus_code_enum *code) -{ - struct ipu6_isys_subdev *asd = to_ipu6_isys_subdev(sd); - const u32 *supported_codes = asd->supported_codes; - u32 index; - - for (index = 0; supported_codes[index]; index++) { - if (index == code->index) { - code->code = supported_codes[index]; - return 0; - } - } - - return -EINVAL; -} - -static int subdev_set_routing(struct v4l2_subdev *sd, - struct v4l2_subdev_state *state, - struct v4l2_subdev_krouting *routing) -{ - static const struct v4l2_mbus_framefmt format = { - .width = 4096, - .height = 3072, - .code = MEDIA_BUS_FMT_SGRBG10_1X10, - .field = V4L2_FIELD_NONE, - }; - int ret; - - ret = v4l2_subdev_routing_validate(sd, routing, - V4L2_SUBDEV_ROUTING_ONLY_1_TO_1); - if (ret) - return ret; - - return v4l2_subdev_set_routing_with_fmt(sd, state, routing, &format); -} - -int ipu6_isys_get_stream_pad_fmt(struct v4l2_subdev *sd, u32 pad, u32 stream, - struct v4l2_mbus_framefmt *format) -{ - struct v4l2_mbus_framefmt *fmt; - struct v4l2_subdev_state *state; - - if (!sd || !format) - return -EINVAL; - - state = v4l2_subdev_lock_and_get_active_state(sd); - fmt = v4l2_subdev_state_get_format(state, pad, stream); - if (fmt) - *format = *fmt; - v4l2_subdev_unlock_state(state); - - return fmt ? 0 : -EINVAL; -} - -int ipu6_isys_get_stream_pad_crop(struct v4l2_subdev *sd, u32 pad, u32 stream, - struct v4l2_rect *crop) -{ - struct v4l2_subdev_state *state; - struct v4l2_rect *rect; - - if (!sd || !crop) - return -EINVAL; - - state = v4l2_subdev_lock_and_get_active_state(sd); - rect = v4l2_subdev_state_get_crop(state, pad, stream); - if (rect) - *crop = *rect; - v4l2_subdev_unlock_state(state); - - return rect ? 0 : -EINVAL; -} - -u32 ipu6_isys_get_src_stream_by_src_pad(struct v4l2_subdev *sd, u32 pad) -{ - struct v4l2_subdev_state *state; - struct v4l2_subdev_route *routes; - unsigned int i; - u32 source_stream = 0; - - state = v4l2_subdev_lock_and_get_active_state(sd); - if (!state) - return 0; - - routes = state->routing.routes; - for (i = 0; i < state->routing.num_routes; i++) { - if (routes[i].source_pad == pad) { - source_stream = routes[i].source_stream; - break; - } - } - - v4l2_subdev_unlock_state(state); - - return source_stream; -} - -static int ipu6_isys_subdev_init_state(struct v4l2_subdev *sd, - struct v4l2_subdev_state *state) -{ - struct v4l2_subdev_route route = { - .sink_pad = 0, - .sink_stream = 0, - .source_pad = 1, - .source_stream = 0, - .flags = V4L2_SUBDEV_ROUTE_FL_ACTIVE, - }; - struct v4l2_subdev_krouting routing = { - .num_routes = 1, - .routes = &route, - }; - - return subdev_set_routing(sd, state, &routing); -} - -int ipu6_isys_subdev_set_routing(struct v4l2_subdev *sd, - struct v4l2_subdev_state *state, - enum v4l2_subdev_format_whence which, - struct v4l2_subdev_krouting *routing) -{ - return subdev_set_routing(sd, state, routing); -} - -static const struct v4l2_subdev_internal_ops ipu6_isys_subdev_internal_ops = { - .init_state = ipu6_isys_subdev_init_state, -}; - -int ipu6_isys_subdev_init(struct ipu6_isys_subdev *asd, - const struct v4l2_subdev_ops *ops, - unsigned int nr_ctrls, - unsigned int num_sink_pads, - unsigned int num_source_pads) -{ - unsigned int num_pads = num_sink_pads + num_source_pads; - unsigned int i; - int ret; - - v4l2_subdev_init(&asd->sd, ops); - - asd->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | - V4L2_SUBDEV_FL_HAS_EVENTS | - V4L2_SUBDEV_FL_STREAMS; - asd->sd.owner = THIS_MODULE; - asd->sd.dev = &asd->isys->adev->auxdev.dev; - asd->sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE; - asd->sd.internal_ops = &ipu6_isys_subdev_internal_ops; - - asd->pad = devm_kcalloc(&asd->isys->adev->auxdev.dev, num_pads, - sizeof(*asd->pad), GFP_KERNEL); - if (!asd->pad) - return -ENOMEM; - - for (i = 0; i < num_sink_pads; i++) - asd->pad[i].flags = MEDIA_PAD_FL_SINK | - MEDIA_PAD_FL_MUST_CONNECT; - - for (i = num_sink_pads; i < num_pads; i++) - asd->pad[i].flags = MEDIA_PAD_FL_SOURCE; - - ret = media_entity_pads_init(&asd->sd.entity, num_pads, asd->pad); - if (ret) - return ret; - - if (asd->ctrl_init) { - ret = v4l2_ctrl_handler_init(&asd->ctrl_handler, nr_ctrls); - if (ret) - goto out_media_entity_cleanup; - - asd->ctrl_init(&asd->sd); - if (asd->ctrl_handler.error) { - ret = asd->ctrl_handler.error; - goto out_v4l2_ctrl_handler_free; - } - - asd->sd.ctrl_handler = &asd->ctrl_handler; - } - - asd->source = -1; - - return 0; - -out_v4l2_ctrl_handler_free: - v4l2_ctrl_handler_free(&asd->ctrl_handler); - -out_media_entity_cleanup: - media_entity_cleanup(&asd->sd.entity); - - return ret; -} - -void ipu6_isys_subdev_cleanup(struct ipu6_isys_subdev *asd) -{ - media_entity_cleanup(&asd->sd.entity); - v4l2_ctrl_handler_free(&asd->ctrl_handler); -} diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.h deleted file mode 100644 index 268dfa0..0000000 --- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.h +++ /dev/null @@ -1,55 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (C) 2013--2024 Intel Corporation */ - -#ifndef IPU6_ISYS_SUBDEV_H -#define IPU6_ISYS_SUBDEV_H - -#include - -#include -#include -#include - -struct ipu6_isys; - -struct ipu6_isys_subdev { - struct v4l2_subdev sd; - struct ipu6_isys *isys; - u32 const *supported_codes; - struct media_pad *pad; - struct v4l2_ctrl_handler ctrl_handler; - void (*ctrl_init)(struct v4l2_subdev *sd); - int source; /* SSI stream source; -1 if unset */ -}; - -#define to_ipu6_isys_subdev(__sd) \ - container_of(__sd, struct ipu6_isys_subdev, sd) - -unsigned int ipu6_isys_mbus_code_to_bpp(u32 code); -unsigned int ipu6_isys_mbus_code_to_mipi(u32 code); -bool ipu6_isys_is_bayer_format(u32 code); -u32 ipu6_isys_convert_bayer_order(u32 code, int x, int y); - -int ipu6_isys_subdev_set_fmt(struct v4l2_subdev *sd, - struct v4l2_subdev_state *state, - struct v4l2_subdev_format *fmt); -int ipu6_isys_subdev_enum_mbus_code(struct v4l2_subdev *sd, - struct v4l2_subdev_state *state, - struct v4l2_subdev_mbus_code_enum - *code); -u32 ipu6_isys_get_src_stream_by_src_pad(struct v4l2_subdev *sd, u32 pad); -int ipu6_isys_get_stream_pad_fmt(struct v4l2_subdev *sd, u32 pad, u32 stream, - struct v4l2_mbus_framefmt *format); -int ipu6_isys_get_stream_pad_crop(struct v4l2_subdev *sd, u32 pad, u32 stream, - struct v4l2_rect *crop); -int ipu6_isys_subdev_set_routing(struct v4l2_subdev *sd, - struct v4l2_subdev_state *state, - enum v4l2_subdev_format_whence which, - struct v4l2_subdev_krouting *routing); -int ipu6_isys_subdev_init(struct ipu6_isys_subdev *asd, - const struct v4l2_subdev_ops *ops, - unsigned int nr_ctrls, - unsigned int num_sink_pads, - unsigned int num_source_pads); -void ipu6_isys_subdev_cleanup(struct ipu6_isys_subdev *asd); -#endif /* IPU6_ISYS_SUBDEV_H */ diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c deleted file mode 100644 index 24a2ef9..0000000 --- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c +++ /dev/null @@ -1,1391 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2013--2024 Intel Corporation - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -#include "ipu6.h" -#include "ipu6-bus.h" -#include "ipu6-cpd.h" -#include "ipu6-fw-isys.h" -#include "ipu6-isys.h" -#include "ipu6-isys-csi2.h" -#include "ipu6-isys-queue.h" -#include "ipu6-isys-video.h" -#include "ipu6-platform-regs.h" - -const struct ipu6_isys_pixelformat ipu6_isys_pfmts[] = { - { V4L2_PIX_FMT_SBGGR12, 16, 12, MEDIA_BUS_FMT_SBGGR12_1X12, - IPU6_FW_ISYS_FRAME_FORMAT_RAW16 }, - { V4L2_PIX_FMT_SGBRG12, 16, 12, MEDIA_BUS_FMT_SGBRG12_1X12, - IPU6_FW_ISYS_FRAME_FORMAT_RAW16 }, - { V4L2_PIX_FMT_SGRBG12, 16, 12, MEDIA_BUS_FMT_SGRBG12_1X12, - IPU6_FW_ISYS_FRAME_FORMAT_RAW16 }, - { V4L2_PIX_FMT_SRGGB12, 16, 12, MEDIA_BUS_FMT_SRGGB12_1X12, - IPU6_FW_ISYS_FRAME_FORMAT_RAW16 }, - { V4L2_PIX_FMT_SBGGR10, 16, 10, MEDIA_BUS_FMT_SBGGR10_1X10, - IPU6_FW_ISYS_FRAME_FORMAT_RAW16 }, - { V4L2_PIX_FMT_SGBRG10, 16, 10, MEDIA_BUS_FMT_SGBRG10_1X10, - IPU6_FW_ISYS_FRAME_FORMAT_RAW16 }, - { V4L2_PIX_FMT_SGRBG10, 16, 10, MEDIA_BUS_FMT_SGRBG10_1X10, - IPU6_FW_ISYS_FRAME_FORMAT_RAW16 }, - { V4L2_PIX_FMT_SRGGB10, 16, 10, MEDIA_BUS_FMT_SRGGB10_1X10, - IPU6_FW_ISYS_FRAME_FORMAT_RAW16 }, - { V4L2_PIX_FMT_SBGGR8, 8, 8, MEDIA_BUS_FMT_SBGGR8_1X8, - IPU6_FW_ISYS_FRAME_FORMAT_RAW8 }, - { V4L2_PIX_FMT_SGBRG8, 8, 8, MEDIA_BUS_FMT_SGBRG8_1X8, - IPU6_FW_ISYS_FRAME_FORMAT_RAW8 }, - { V4L2_PIX_FMT_SGRBG8, 8, 8, MEDIA_BUS_FMT_SGRBG8_1X8, - IPU6_FW_ISYS_FRAME_FORMAT_RAW8 }, - { V4L2_PIX_FMT_SRGGB8, 8, 8, MEDIA_BUS_FMT_SRGGB8_1X8, - IPU6_FW_ISYS_FRAME_FORMAT_RAW8 }, - { V4L2_PIX_FMT_SBGGR12P, 12, 12, MEDIA_BUS_FMT_SBGGR12_1X12, - IPU6_FW_ISYS_FRAME_FORMAT_RAW12 }, - { V4L2_PIX_FMT_SGBRG12P, 12, 12, MEDIA_BUS_FMT_SGBRG12_1X12, - IPU6_FW_ISYS_FRAME_FORMAT_RAW12 }, - { V4L2_PIX_FMT_SGRBG12P, 12, 12, MEDIA_BUS_FMT_SGRBG12_1X12, - IPU6_FW_ISYS_FRAME_FORMAT_RAW12 }, - { V4L2_PIX_FMT_SRGGB12P, 12, 12, MEDIA_BUS_FMT_SRGGB12_1X12, - IPU6_FW_ISYS_FRAME_FORMAT_RAW12 }, - { V4L2_PIX_FMT_SBGGR10P, 10, 10, MEDIA_BUS_FMT_SBGGR10_1X10, - IPU6_FW_ISYS_FRAME_FORMAT_RAW10 }, - { V4L2_PIX_FMT_SGBRG10P, 10, 10, MEDIA_BUS_FMT_SGBRG10_1X10, - IPU6_FW_ISYS_FRAME_FORMAT_RAW10 }, - { V4L2_PIX_FMT_SGRBG10P, 10, 10, MEDIA_BUS_FMT_SGRBG10_1X10, - IPU6_FW_ISYS_FRAME_FORMAT_RAW10 }, - { V4L2_PIX_FMT_SRGGB10P, 10, 10, MEDIA_BUS_FMT_SRGGB10_1X10, - IPU6_FW_ISYS_FRAME_FORMAT_RAW10 }, - { V4L2_PIX_FMT_UYVY, 16, 16, MEDIA_BUS_FMT_UYVY8_1X16, - IPU6_FW_ISYS_FRAME_FORMAT_UYVY}, - { V4L2_PIX_FMT_YUYV, 16, 16, MEDIA_BUS_FMT_YUYV8_1X16, - IPU6_FW_ISYS_FRAME_FORMAT_YUYV}, - { V4L2_PIX_FMT_RGB565, 16, 16, MEDIA_BUS_FMT_RGB565_1X16, - IPU6_FW_ISYS_FRAME_FORMAT_RGB565 }, - { V4L2_PIX_FMT_BGR24, 24, 24, MEDIA_BUS_FMT_RGB888_1X24, - IPU6_FW_ISYS_FRAME_FORMAT_RGBA888 }, - { V4L2_META_FMT_GENERIC_8, 8, 8, MEDIA_BUS_FMT_META_8, - IPU6_FW_ISYS_FRAME_FORMAT_RAW8, true }, - { V4L2_META_FMT_GENERIC_CSI2_10, 10, 10, MEDIA_BUS_FMT_META_10, - IPU6_FW_ISYS_FRAME_FORMAT_RAW10, true }, - { V4L2_META_FMT_GENERIC_CSI2_12, 12, 12, MEDIA_BUS_FMT_META_12, - IPU6_FW_ISYS_FRAME_FORMAT_RAW12, true }, - { V4L2_META_FMT_GENERIC_CSI2_16, 16, 16, MEDIA_BUS_FMT_META_16, - IPU6_FW_ISYS_FRAME_FORMAT_RAW16, true }, -}; - -static int video_open(struct file *file) -{ - struct ipu6_isys_video *av = video_drvdata(file); - struct ipu6_isys *isys = av->isys; - struct ipu6_bus_device *adev = isys->adev; - - mutex_lock(&isys->mutex); - if (isys->need_reset) { - mutex_unlock(&isys->mutex); - dev_warn(&adev->auxdev.dev, "isys power cycle required\n"); - return -EIO; - } - mutex_unlock(&isys->mutex); - - return v4l2_fh_open(file); -} - -const struct ipu6_isys_pixelformat * -ipu6_isys_get_isys_format(u32 pixelformat, u32 type) -{ - const struct ipu6_isys_pixelformat *default_pfmt = NULL; - unsigned int i; - - for (i = 0; i < ARRAY_SIZE(ipu6_isys_pfmts); i++) { - const struct ipu6_isys_pixelformat *pfmt = &ipu6_isys_pfmts[i]; - - if (type && ((!pfmt->is_meta && - type != V4L2_BUF_TYPE_VIDEO_CAPTURE) || - (pfmt->is_meta && - type != V4L2_BUF_TYPE_META_CAPTURE))) - continue; - - if (!default_pfmt) - default_pfmt = pfmt; - - if (pfmt->pixelformat != pixelformat) - continue; - - return pfmt; - } - - return default_pfmt; -} - -static int ipu6_isys_vidioc_querycap(struct file *file, void *fh, - struct v4l2_capability *cap) -{ - struct ipu6_isys_video *av = video_drvdata(file); - - strscpy(cap->driver, IPU6_ISYS_NAME, sizeof(cap->driver)); - strscpy(cap->card, av->isys->media_dev.model, sizeof(cap->card)); - - return 0; -} - -static int ipu6_isys_vidioc_enum_fmt(struct file *file, void *fh, - struct v4l2_fmtdesc *f) -{ - unsigned int i, num_found; - - for (i = 0, num_found = 0; i < ARRAY_SIZE(ipu6_isys_pfmts); i++) { - if ((ipu6_isys_pfmts[i].is_meta && - f->type != V4L2_BUF_TYPE_META_CAPTURE) || - (!ipu6_isys_pfmts[i].is_meta && - f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)) - continue; - - if (f->mbus_code && f->mbus_code != ipu6_isys_pfmts[i].code) - continue; - - if (num_found < f->index) { - num_found++; - continue; - } - - f->flags = 0; - f->pixelformat = ipu6_isys_pfmts[i].pixelformat; - - return 0; - } - - return -EINVAL; -} - -static int ipu6_isys_vidioc_enum_framesizes(struct file *file, void *fh, - struct v4l2_frmsizeenum *fsize) -{ - unsigned int i; - - if (fsize->index > 0) - return -EINVAL; - - for (i = 0; i < ARRAY_SIZE(ipu6_isys_pfmts); i++) { - if (fsize->pixel_format != ipu6_isys_pfmts[i].pixelformat) - continue; - - fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE; - fsize->stepwise.min_width = IPU6_ISYS_MIN_WIDTH; - fsize->stepwise.max_width = IPU6_ISYS_MAX_WIDTH; - fsize->stepwise.min_height = IPU6_ISYS_MIN_HEIGHT; - fsize->stepwise.max_height = IPU6_ISYS_MAX_HEIGHT; - fsize->stepwise.step_width = 2; - fsize->stepwise.step_height = 2; - - return 0; - } - - return -EINVAL; -} - -static int ipu6_isys_vidioc_g_fmt_vid_cap(struct file *file, void *fh, - struct v4l2_format *f) -{ - struct ipu6_isys_video *av = video_drvdata(file); - - f->fmt.pix = av->pix_fmt; - - return 0; -} - -static int ipu6_isys_vidioc_g_fmt_meta_cap(struct file *file, void *fh, - struct v4l2_format *f) -{ - struct ipu6_isys_video *av = video_drvdata(file); - - f->fmt.meta = av->meta_fmt; - - return 0; -} - -static void ipu6_isys_try_fmt_cap(struct ipu6_isys_video *av, u32 type, - u32 *format, u32 *width, u32 *height, - u32 *bytesperline, u32 *sizeimage) -{ - const struct ipu6_isys_pixelformat *pfmt = - ipu6_isys_get_isys_format(*format, type); - - *format = pfmt->pixelformat; - *width = clamp(*width, IPU6_ISYS_MIN_WIDTH, IPU6_ISYS_MAX_WIDTH); - *height = clamp(*height, IPU6_ISYS_MIN_HEIGHT, IPU6_ISYS_MAX_HEIGHT); - - if (pfmt->bpp != pfmt->bpp_packed) - *bytesperline = *width * DIV_ROUND_UP(pfmt->bpp, BITS_PER_BYTE); - else - *bytesperline = DIV_ROUND_UP(*width * pfmt->bpp, BITS_PER_BYTE); - - *bytesperline = ALIGN(*bytesperline, 64); - - /* - * (height + 1) * bytesperline due to a hardware issue: the DMA unit - * is a power of two, and a line should be transferred as few units - * as possible. The result is that up to line length more data than - * the image size may be transferred to memory after the image. - * Another limitation is the GDA allocation unit size. For low - * resolution it gives a bigger number. Use larger one to avoid - * memory corruption. - */ - *sizeimage = *bytesperline * *height + - max(*bytesperline, - av->isys->pdata->ipdata->isys_dma_overshoot); -} - -static void __ipu6_isys_vidioc_try_fmt_vid_cap(struct ipu6_isys_video *av, - struct v4l2_format *f) -{ - ipu6_isys_try_fmt_cap(av, f->type, &f->fmt.pix.pixelformat, - &f->fmt.pix.width, &f->fmt.pix.height, - &f->fmt.pix.bytesperline, &f->fmt.pix.sizeimage); - - f->fmt.pix.field = V4L2_FIELD_NONE; - f->fmt.pix.colorspace = V4L2_COLORSPACE_RAW; - f->fmt.pix.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; - f->fmt.pix.quantization = V4L2_QUANTIZATION_DEFAULT; - f->fmt.pix.xfer_func = V4L2_XFER_FUNC_DEFAULT; -} - -static int ipu6_isys_vidioc_try_fmt_vid_cap(struct file *file, void *fh, - struct v4l2_format *f) -{ - struct ipu6_isys_video *av = video_drvdata(file); - - if (vb2_is_busy(&av->aq.vbq)) - return -EBUSY; - - __ipu6_isys_vidioc_try_fmt_vid_cap(av, f); - - return 0; -} - -static int __ipu6_isys_vidioc_try_fmt_meta_cap(struct ipu6_isys_video *av, - struct v4l2_format *f) -{ - ipu6_isys_try_fmt_cap(av, f->type, &f->fmt.meta.dataformat, - &f->fmt.meta.width, &f->fmt.meta.height, - &f->fmt.meta.bytesperline, - &f->fmt.meta.buffersize); - - return 0; -} - -static int ipu6_isys_vidioc_try_fmt_meta_cap(struct file *file, void *fh, - struct v4l2_format *f) -{ - struct ipu6_isys_video *av = video_drvdata(file); - - __ipu6_isys_vidioc_try_fmt_meta_cap(av, f); - - return 0; -} - -static int ipu6_isys_vidioc_s_fmt_vid_cap(struct file *file, void *fh, - struct v4l2_format *f) -{ - struct ipu6_isys_video *av = video_drvdata(file); - - ipu6_isys_vidioc_try_fmt_vid_cap(file, fh, f); - av->pix_fmt = f->fmt.pix; - - return 0; -} - -static int ipu6_isys_vidioc_s_fmt_meta_cap(struct file *file, void *fh, - struct v4l2_format *f) -{ - struct ipu6_isys_video *av = video_drvdata(file); - - if (vb2_is_busy(&av->aq.vbq)) - return -EBUSY; - - ipu6_isys_vidioc_try_fmt_meta_cap(file, fh, f); - av->meta_fmt = f->fmt.meta; - - return 0; -} - -static int ipu6_isys_vidioc_reqbufs(struct file *file, void *priv, - struct v4l2_requestbuffers *p) -{ - struct ipu6_isys_video *av = video_drvdata(file); - int ret; - - av->aq.vbq.is_multiplanar = V4L2_TYPE_IS_MULTIPLANAR(p->type); - av->aq.vbq.is_output = V4L2_TYPE_IS_OUTPUT(p->type); - - ret = vb2_queue_change_type(&av->aq.vbq, p->type); - if (ret) - return ret; - - return vb2_ioctl_reqbufs(file, priv, p); -} - -static int ipu6_isys_vidioc_create_bufs(struct file *file, void *priv, - struct v4l2_create_buffers *p) -{ - struct ipu6_isys_video *av = video_drvdata(file); - int ret; - - av->aq.vbq.is_multiplanar = V4L2_TYPE_IS_MULTIPLANAR(p->format.type); - av->aq.vbq.is_output = V4L2_TYPE_IS_OUTPUT(p->format.type); - - ret = vb2_queue_change_type(&av->aq.vbq, p->format.type); - if (ret) - return ret; - - return vb2_ioctl_create_bufs(file, priv, p); -} - -static int link_validate(struct media_link *link) -{ - struct ipu6_isys_video *av = - container_of(link->sink, struct ipu6_isys_video, pad); - struct device *dev = &av->isys->adev->auxdev.dev; - struct v4l2_subdev_state *s_state; - struct v4l2_subdev *s_sd; - struct v4l2_mbus_framefmt *s_fmt; - struct media_pad *s_pad; - u32 s_stream, code; - int ret = -EPIPE; - - if (!link->source->entity) - return ret; - - s_sd = media_entity_to_v4l2_subdev(link->source->entity); - s_state = v4l2_subdev_get_unlocked_active_state(s_sd); - if (!s_state) - return ret; - - dev_dbg(dev, "validating link \"%s\":%u -> \"%s\"\n", - link->source->entity->name, link->source->index, - link->sink->entity->name); - - s_pad = media_pad_remote_pad_first(&av->pad); - s_stream = ipu6_isys_get_src_stream_by_src_pad(s_sd, s_pad->index); - - v4l2_subdev_lock_state(s_state); - - s_fmt = v4l2_subdev_state_get_format(s_state, s_pad->index, s_stream); - if (!s_fmt) { - dev_err(dev, "failed to get source pad format\n"); - goto unlock; - } - - code = ipu6_isys_get_isys_format(ipu6_isys_get_format(av), 0)->code; - - if (s_fmt->width != ipu6_isys_get_frame_width(av) || - s_fmt->height != ipu6_isys_get_frame_height(av) || - s_fmt->code != code) { - dev_dbg(dev, "format mismatch %dx%d,%x != %dx%d,%x\n", - s_fmt->width, s_fmt->height, s_fmt->code, - ipu6_isys_get_frame_width(av), - ipu6_isys_get_frame_height(av), code); - goto unlock; - } - - v4l2_subdev_unlock_state(s_state); - - return 0; -unlock: - v4l2_subdev_unlock_state(s_state); - - return ret; -} - -static void get_stream_opened(struct ipu6_isys_video *av) -{ - unsigned long flags; - - spin_lock_irqsave(&av->isys->streams_lock, flags); - av->isys->stream_opened++; - spin_unlock_irqrestore(&av->isys->streams_lock, flags); -} - -static void put_stream_opened(struct ipu6_isys_video *av) -{ - unsigned long flags; - - spin_lock_irqsave(&av->isys->streams_lock, flags); - av->isys->stream_opened--; - spin_unlock_irqrestore(&av->isys->streams_lock, flags); -} - -static int ipu6_isys_fw_pin_cfg(struct ipu6_isys_video *av, - struct ipu6_fw_isys_stream_cfg_data_abi *cfg) -{ - struct media_pad *src_pad = media_pad_remote_pad_first(&av->pad); - struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(src_pad->entity); - struct ipu6_fw_isys_input_pin_info_abi *input_pin; - struct ipu6_fw_isys_output_pin_info_abi *output_pin; - struct ipu6_isys_stream *stream = av->stream; - struct ipu6_isys_queue *aq = &av->aq; - struct v4l2_mbus_framefmt fmt; - const struct ipu6_isys_pixelformat *pfmt = - ipu6_isys_get_isys_format(ipu6_isys_get_format(av), 0); - struct v4l2_rect v4l2_crop; - struct ipu6_isys *isys = av->isys; - struct device *dev = &isys->adev->auxdev.dev; - int input_pins = cfg->nof_input_pins++; - int output_pins; - u32 src_stream; - int ret; - - src_stream = ipu6_isys_get_src_stream_by_src_pad(sd, src_pad->index); - ret = ipu6_isys_get_stream_pad_fmt(sd, src_pad->index, src_stream, - &fmt); - if (ret < 0) { - dev_err(dev, "can't get stream format (%d)\n", ret); - return ret; - } - - ret = ipu6_isys_get_stream_pad_crop(sd, src_pad->index, src_stream, - &v4l2_crop); - if (ret < 0) { - dev_err(dev, "can't get stream crop (%d)\n", ret); - return ret; - } - - input_pin = &cfg->input_pins[input_pins]; - input_pin->input_res.width = fmt.width; - input_pin->input_res.height = fmt.height; - input_pin->dt = av->dt; - input_pin->bits_per_pix = pfmt->bpp_packed; - input_pin->mapped_dt = 0x40; /* invalid mipi data type */ - input_pin->mipi_decompression = 0; - input_pin->capture_mode = IPU6_FW_ISYS_CAPTURE_MODE_REGULAR; - input_pin->mipi_store_mode = pfmt->bpp == pfmt->bpp_packed ? - IPU6_FW_ISYS_MIPI_STORE_MODE_DISCARD_LONG_HEADER : - IPU6_FW_ISYS_MIPI_STORE_MODE_NORMAL; - input_pin->crop_first_and_last_lines = v4l2_crop.top & 1; - - output_pins = cfg->nof_output_pins++; - aq->fw_output = output_pins; - stream->output_pins_queue[output_pins] = aq; - - output_pin = &cfg->output_pins[output_pins]; - output_pin->input_pin_id = input_pins; - output_pin->output_res.width = ipu6_isys_get_frame_width(av); - output_pin->output_res.height = ipu6_isys_get_frame_height(av); - - output_pin->stride = ipu6_isys_get_bytes_per_line(av); - if (pfmt->bpp != pfmt->bpp_packed) - output_pin->pt = IPU6_FW_ISYS_PIN_TYPE_RAW_SOC; - else - output_pin->pt = IPU6_FW_ISYS_PIN_TYPE_MIPI; - output_pin->ft = pfmt->css_pixelformat; - output_pin->send_irq = 1; - memset(output_pin->ts_offsets, 0, sizeof(output_pin->ts_offsets)); - output_pin->s2m_pixel_soc_pixel_remapping = - S2M_PIXEL_SOC_PIXEL_REMAPPING_FLAG_NO_REMAPPING; - output_pin->csi_be_soc_pixel_remapping = - CSI_BE_SOC_PIXEL_REMAPPING_FLAG_NO_REMAPPING; - - output_pin->snoopable = true; - output_pin->error_handling_enable = false; - output_pin->sensor_type = isys->sensor_type++; - if (isys->sensor_type > isys->pdata->ipdata->sensor_type_end) - isys->sensor_type = isys->pdata->ipdata->sensor_type_start; - - return 0; -} - -static int start_stream_firmware(struct ipu6_isys_video *av, - struct ipu6_isys_buffer_list *bl) -{ - struct ipu6_fw_isys_stream_cfg_data_abi *stream_cfg; - struct ipu6_fw_isys_frame_buff_set_abi *buf = NULL; - struct ipu6_isys_stream *stream = av->stream; - struct device *dev = &av->isys->adev->auxdev.dev; - struct isys_fw_msgs *msg = NULL; - struct ipu6_isys_queue *aq; - int ret, retout, tout; - u16 send_type; - - msg = ipu6_get_fw_msg_buf(stream); - if (!msg) - return -ENOMEM; - - stream_cfg = &msg->fw_msg.stream; - stream_cfg->src = stream->stream_source; - stream_cfg->vc = stream->vc; - stream_cfg->isl_use = 0; - stream_cfg->sensor_type = IPU6_FW_ISYS_SENSOR_MODE_NORMAL; - - list_for_each_entry(aq, &stream->queues, node) { - struct ipu6_isys_video *__av = ipu6_isys_queue_to_video(aq); - - ret = ipu6_isys_fw_pin_cfg(__av, stream_cfg); - if (ret < 0) { - ipu6_put_fw_msg_buf(av->isys, (uintptr_t)stream_cfg); - return ret; - } - } - - ipu6_fw_isys_dump_stream_cfg(dev, stream_cfg); - - stream->nr_output_pins = stream_cfg->nof_output_pins; - - reinit_completion(&stream->stream_open_completion); - - ret = ipu6_fw_isys_complex_cmd(av->isys, stream->stream_handle, - stream_cfg, msg->dma_addr, - sizeof(*stream_cfg), - IPU6_FW_ISYS_SEND_TYPE_STREAM_OPEN); - if (ret < 0) { - dev_err(dev, "can't open stream (%d)\n", ret); - ipu6_put_fw_msg_buf(av->isys, (uintptr_t)stream_cfg); - return ret; - } - - get_stream_opened(av); - - tout = wait_for_completion_timeout(&stream->stream_open_completion, - IPU6_FW_CALL_TIMEOUT_JIFFIES); - - ipu6_put_fw_msg_buf(av->isys, (uintptr_t)stream_cfg); - - if (!tout) { - dev_err(dev, "stream open time out\n"); - ret = -ETIMEDOUT; - goto out_put_stream_opened; - } - if (stream->error) { - dev_err(dev, "stream open error: %d\n", stream->error); - ret = -EIO; - goto out_put_stream_opened; - } - dev_dbg(dev, "start stream: open complete\n"); - - if (bl) { - msg = ipu6_get_fw_msg_buf(stream); - if (!msg) { - ret = -ENOMEM; - goto out_put_stream_opened; - } - buf = &msg->fw_msg.frame; - ipu6_isys_buf_to_fw_frame_buf(buf, stream, bl); - ipu6_isys_buffer_list_queue(bl, - IPU6_ISYS_BUFFER_LIST_FL_ACTIVE, 0); - } - - reinit_completion(&stream->stream_start_completion); - - if (bl) { - send_type = IPU6_FW_ISYS_SEND_TYPE_STREAM_START_AND_CAPTURE; - ipu6_fw_isys_dump_frame_buff_set(dev, buf, - stream_cfg->nof_output_pins); - ret = ipu6_fw_isys_complex_cmd(av->isys, stream->stream_handle, - buf, msg->dma_addr, - sizeof(*buf), send_type); - } else { - send_type = IPU6_FW_ISYS_SEND_TYPE_STREAM_START; - ret = ipu6_fw_isys_simple_cmd(av->isys, stream->stream_handle, - send_type); - } - - if (ret < 0) { - dev_err(dev, "can't start streaming (%d)\n", ret); - goto out_stream_close; - } - - tout = wait_for_completion_timeout(&stream->stream_start_completion, - IPU6_FW_CALL_TIMEOUT_JIFFIES); - if (!tout) { - dev_err(dev, "stream start time out\n"); - ret = -ETIMEDOUT; - goto out_stream_close; - } - if (stream->error) { - dev_err(dev, "stream start error: %d\n", stream->error); - ret = -EIO; - goto out_stream_close; - } - dev_dbg(dev, "start stream: complete\n"); - - return 0; - -out_stream_close: - reinit_completion(&stream->stream_close_completion); - - retout = ipu6_fw_isys_simple_cmd(av->isys, - stream->stream_handle, - IPU6_FW_ISYS_SEND_TYPE_STREAM_CLOSE); - if (retout < 0) { - dev_dbg(dev, "can't close stream (%d)\n", retout); - goto out_put_stream_opened; - } - - tout = wait_for_completion_timeout(&stream->stream_close_completion, - IPU6_FW_CALL_TIMEOUT_JIFFIES); - if (!tout) - dev_err(dev, "stream close time out\n"); - else if (stream->error) - dev_err(dev, "stream close error: %d\n", stream->error); - else - dev_dbg(dev, "stream close complete\n"); - -out_put_stream_opened: - put_stream_opened(av); - - return ret; -} - -static void stop_streaming_firmware(struct ipu6_isys_video *av) -{ - struct device *dev = &av->isys->adev->auxdev.dev; - struct ipu6_isys_stream *stream = av->stream; - int ret, tout; - - reinit_completion(&stream->stream_stop_completion); - - ret = ipu6_fw_isys_simple_cmd(av->isys, stream->stream_handle, - IPU6_FW_ISYS_SEND_TYPE_STREAM_FLUSH); - - if (ret < 0) { - dev_err(dev, "can't stop stream (%d)\n", ret); - return; - } - - tout = wait_for_completion_timeout(&stream->stream_stop_completion, - IPU6_FW_CALL_TIMEOUT_JIFFIES); - if (!tout) - dev_warn(dev, "stream stop time out\n"); - else if (stream->error) - dev_warn(dev, "stream stop error: %d\n", stream->error); - else - dev_dbg(dev, "stop stream: complete\n"); -} - -static void close_streaming_firmware(struct ipu6_isys_video *av) -{ - struct ipu6_isys_stream *stream = av->stream; - struct device *dev = &av->isys->adev->auxdev.dev; - int ret, tout; - - reinit_completion(&stream->stream_close_completion); - - ret = ipu6_fw_isys_simple_cmd(av->isys, stream->stream_handle, - IPU6_FW_ISYS_SEND_TYPE_STREAM_CLOSE); - if (ret < 0) { - dev_err(dev, "can't close stream (%d)\n", ret); - return; - } - - tout = wait_for_completion_timeout(&stream->stream_close_completion, - IPU6_FW_CALL_TIMEOUT_JIFFIES); - if (!tout) - dev_warn(dev, "stream close time out\n"); - else if (stream->error) - dev_warn(dev, "stream close error: %d\n", stream->error); - else - dev_dbg(dev, "close stream: complete\n"); - - put_stream_opened(av); -} - -int ipu6_isys_video_prepare_stream(struct ipu6_isys_video *av, - struct media_entity *source_entity, - int nr_queues) -{ - struct ipu6_isys_stream *stream = av->stream; - struct ipu6_isys_csi2 *csi2; - - if (WARN_ON(stream->nr_streaming)) - return -EINVAL; - - stream->nr_queues = nr_queues; - atomic_set(&stream->sequence, 0); - - stream->seq_index = 0; - memset(stream->seq, 0, sizeof(stream->seq)); - - if (WARN_ON(!list_empty(&stream->queues))) - return -EINVAL; - - stream->stream_source = stream->asd->source; - csi2 = ipu6_isys_subdev_to_csi2(stream->asd); - csi2->receiver_errors = 0; - stream->source_entity = source_entity; - - dev_dbg(&av->isys->adev->auxdev.dev, - "prepare stream: external entity %s\n", - stream->source_entity->name); - - return 0; -} - -void ipu6_isys_configure_stream_watermark(struct ipu6_isys_video *av, - bool state) -{ - struct ipu6_isys *isys = av->isys; - struct ipu6_isys_csi2 *csi2 = NULL; - struct isys_iwake_watermark *iwake_watermark = &isys->iwake_watermark; - struct device *dev = &isys->adev->auxdev.dev; - struct v4l2_mbus_framefmt format; - struct v4l2_subdev *esd; - struct v4l2_control hb = { .id = V4L2_CID_HBLANK, .value = 0 }; - unsigned int bpp, lanes; - s64 link_freq = 0; - u64 pixel_rate = 0; - int ret; - - if (!state) - return; - - esd = media_entity_to_v4l2_subdev(av->stream->source_entity); - - av->watermark.width = ipu6_isys_get_frame_width(av); - av->watermark.height = ipu6_isys_get_frame_height(av); - av->watermark.sram_gran_shift = isys->pdata->ipdata->sram_gran_shift; - av->watermark.sram_gran_size = isys->pdata->ipdata->sram_gran_size; - - ret = v4l2_g_ctrl(esd->ctrl_handler, &hb); - if (!ret && hb.value >= 0) - av->watermark.hblank = hb.value; - else - av->watermark.hblank = 0; - - csi2 = ipu6_isys_subdev_to_csi2(av->stream->asd); - link_freq = ipu6_isys_csi2_get_link_freq(csi2); - if (link_freq > 0) { - lanes = csi2->nlanes; - ret = ipu6_isys_get_stream_pad_fmt(&csi2->asd.sd, 0, - av->source_stream, &format); - if (!ret) { - bpp = ipu6_isys_mbus_code_to_bpp(format.code); - pixel_rate = mul_u64_u32_div(link_freq, lanes * 2, bpp); - } - } - - av->watermark.pixel_rate = pixel_rate; - - if (!pixel_rate) { - mutex_lock(&iwake_watermark->mutex); - iwake_watermark->force_iwake_disable = true; - mutex_unlock(&iwake_watermark->mutex); - dev_warn(dev, "unexpected pixel_rate from %s, disable iwake.\n", - av->stream->source_entity->name); - } -} - -static void calculate_stream_datarate(struct ipu6_isys_video *av) -{ - struct video_stream_watermark *watermark = &av->watermark; - const struct ipu6_isys_pixelformat *pfmt = - ipu6_isys_get_isys_format(ipu6_isys_get_format(av), 0); - u32 pages_per_line, pb_bytes_per_line, pixels_per_line, bytes_per_line; - u64 line_time_ns, stream_data_rate; - u16 shift, size; - - shift = watermark->sram_gran_shift; - size = watermark->sram_gran_size; - - pixels_per_line = watermark->width + watermark->hblank; - line_time_ns = div_u64(pixels_per_line * NSEC_PER_SEC, - watermark->pixel_rate); - bytes_per_line = watermark->width * pfmt->bpp / 8; - pages_per_line = DIV_ROUND_UP(bytes_per_line, size); - pb_bytes_per_line = pages_per_line << shift; - stream_data_rate = div64_u64(pb_bytes_per_line * 1000, line_time_ns); - - watermark->stream_data_rate = stream_data_rate; -} - -void ipu6_isys_update_stream_watermark(struct ipu6_isys_video *av, bool state) -{ - struct isys_iwake_watermark *iwake_watermark = - &av->isys->iwake_watermark; - - if (!av->watermark.pixel_rate) - return; - - if (state) { - calculate_stream_datarate(av); - mutex_lock(&iwake_watermark->mutex); - list_add(&av->watermark.stream_node, - &iwake_watermark->video_list); - mutex_unlock(&iwake_watermark->mutex); - } else { - av->watermark.stream_data_rate = 0; - mutex_lock(&iwake_watermark->mutex); - list_del(&av->watermark.stream_node); - mutex_unlock(&iwake_watermark->mutex); - } - - update_watermark_setting(av->isys); -} - -void ipu6_isys_put_stream(struct ipu6_isys_stream *stream) -{ - struct device *dev; - unsigned int i; - unsigned long flags; - - if (!stream) { - pr_err("ipu6-isys: no available stream\n"); - return; - } - - dev = &stream->isys->adev->auxdev.dev; - - spin_lock_irqsave(&stream->isys->streams_lock, flags); - for (i = 0; i < IPU6_ISYS_MAX_STREAMS; i++) { - if (&stream->isys->streams[i] == stream) { - if (stream->isys->streams_ref_count[i] > 0) - stream->isys->streams_ref_count[i]--; - else - dev_warn(dev, "invalid stream %d\n", i); - - break; - } - } - spin_unlock_irqrestore(&stream->isys->streams_lock, flags); -} - -static struct ipu6_isys_stream * -ipu6_isys_get_stream(struct ipu6_isys_video *av, struct ipu6_isys_subdev *asd) -{ - struct ipu6_isys_stream *stream = NULL; - struct ipu6_isys *isys = av->isys; - unsigned long flags; - unsigned int i; - u8 vc = av->vc; - - if (!isys) - return NULL; - - spin_lock_irqsave(&isys->streams_lock, flags); - for (i = 0; i < IPU6_ISYS_MAX_STREAMS; i++) { - if (isys->streams_ref_count[i] && isys->streams[i].vc == vc && - isys->streams[i].asd == asd) { - isys->streams_ref_count[i]++; - stream = &isys->streams[i]; - break; - } - } - - if (!stream) { - for (i = 0; i < IPU6_ISYS_MAX_STREAMS; i++) { - if (!isys->streams_ref_count[i]) { - isys->streams_ref_count[i]++; - stream = &isys->streams[i]; - stream->vc = vc; - stream->asd = asd; - break; - } - } - } - spin_unlock_irqrestore(&isys->streams_lock, flags); - - return stream; -} - -struct ipu6_isys_stream * -ipu6_isys_query_stream_by_handle(struct ipu6_isys *isys, u8 stream_handle) -{ - unsigned long flags; - struct ipu6_isys_stream *stream = NULL; - - if (!isys) - return NULL; - - if (stream_handle >= IPU6_ISYS_MAX_STREAMS) { - dev_err(&isys->adev->auxdev.dev, - "stream_handle %d is invalid\n", stream_handle); - return NULL; - } - - spin_lock_irqsave(&isys->streams_lock, flags); - if (isys->streams_ref_count[stream_handle] > 0) { - isys->streams_ref_count[stream_handle]++; - stream = &isys->streams[stream_handle]; - } - spin_unlock_irqrestore(&isys->streams_lock, flags); - - return stream; -} - -struct ipu6_isys_stream * -ipu6_isys_query_stream_by_source(struct ipu6_isys *isys, int source, u8 vc) -{ - struct ipu6_isys_stream *stream = NULL; - unsigned long flags; - unsigned int i; - - if (!isys) - return NULL; - - if (source < 0) { - dev_err(&isys->adev->auxdev.dev, - "query stream with invalid port number\n"); - return NULL; - } - - spin_lock_irqsave(&isys->streams_lock, flags); - for (i = 0; i < IPU6_ISYS_MAX_STREAMS; i++) { - if (!isys->streams_ref_count[i]) - continue; - - if (isys->streams[i].stream_source == source && - isys->streams[i].vc == vc) { - stream = &isys->streams[i]; - isys->streams_ref_count[i]++; - break; - } - } - spin_unlock_irqrestore(&isys->streams_lock, flags); - - return stream; -} - -static u64 get_stream_mask_by_pipeline(struct ipu6_isys_video *__av) -{ - struct media_pipeline *pipeline = - media_entity_pipeline(&__av->vdev.entity); - unsigned int i; - u64 stream_mask = 0; - - for (i = 0; i < NR_OF_CSI2_SRC_PADS; i++) { - struct ipu6_isys_video *av = &__av->csi2->av[i]; - - if (pipeline == media_entity_pipeline(&av->vdev.entity)) - stream_mask |= BIT_ULL(av->source_stream); - } - - return stream_mask; -} - -int ipu6_isys_video_set_streaming(struct ipu6_isys_video *av, int state, - struct ipu6_isys_buffer_list *bl) -{ - struct v4l2_subdev_krouting *routing; - struct ipu6_isys_stream *stream = av->stream; - struct v4l2_subdev_state *subdev_state; - struct device *dev = &av->isys->adev->auxdev.dev; - struct v4l2_subdev *sd; - struct media_pad *r_pad; - u32 sink_pad, sink_stream; - u64 r_stream; - u64 stream_mask = 0; - int ret = 0; - - dev_dbg(dev, "set stream: %d\n", state); - - if (WARN(!stream->source_entity, "No source entity for stream\n")) - return -ENODEV; - - sd = &stream->asd->sd; - r_pad = media_pad_remote_pad_first(&av->pad); - r_stream = ipu6_isys_get_src_stream_by_src_pad(sd, r_pad->index); - - subdev_state = v4l2_subdev_lock_and_get_active_state(sd); - routing = &subdev_state->routing; - ret = v4l2_subdev_routing_find_opposite_end(routing, r_pad->index, - r_stream, &sink_pad, - &sink_stream); - v4l2_subdev_unlock_state(subdev_state); - if (ret) - return ret; - - stream_mask = get_stream_mask_by_pipeline(av); - if (!state) { - stop_streaming_firmware(av); - - /* stop sub-device which connects with video */ - dev_dbg(dev, "stream off entity %s pad:%d mask:0x%llx\n", - sd->name, r_pad->index, stream_mask); - ret = v4l2_subdev_disable_streams(sd, r_pad->index, - stream_mask); - if (ret) { - dev_err(dev, "stream off %s failed with %d\n", sd->name, - ret); - return ret; - } - close_streaming_firmware(av); - } else { - ret = start_stream_firmware(av, bl); - if (ret) { - dev_err(dev, "start stream of firmware failed\n"); - return ret; - } - - /* start sub-device which connects with video */ - dev_dbg(dev, "stream on %s pad %d mask 0x%llx\n", sd->name, - r_pad->index, stream_mask); - ret = v4l2_subdev_enable_streams(sd, r_pad->index, stream_mask); - if (ret) { - dev_err(dev, "stream on %s failed with %d\n", sd->name, - ret); - goto out_media_entity_stop_streaming_firmware; - } - } - - av->streaming = state; - - return 0; - -out_media_entity_stop_streaming_firmware: - stop_streaming_firmware(av); - - return ret; -} - -static const struct v4l2_ioctl_ops ipu6_v4l2_ioctl_ops = { - .vidioc_querycap = ipu6_isys_vidioc_querycap, - .vidioc_enum_fmt_vid_cap = ipu6_isys_vidioc_enum_fmt, - .vidioc_enum_fmt_meta_cap = ipu6_isys_vidioc_enum_fmt, - .vidioc_enum_framesizes = ipu6_isys_vidioc_enum_framesizes, - .vidioc_g_fmt_vid_cap = ipu6_isys_vidioc_g_fmt_vid_cap, - .vidioc_s_fmt_vid_cap = ipu6_isys_vidioc_s_fmt_vid_cap, - .vidioc_try_fmt_vid_cap = ipu6_isys_vidioc_try_fmt_vid_cap, - .vidioc_g_fmt_meta_cap = ipu6_isys_vidioc_g_fmt_meta_cap, - .vidioc_s_fmt_meta_cap = ipu6_isys_vidioc_s_fmt_meta_cap, - .vidioc_try_fmt_meta_cap = ipu6_isys_vidioc_try_fmt_meta_cap, - .vidioc_reqbufs = ipu6_isys_vidioc_reqbufs, - .vidioc_create_bufs = ipu6_isys_vidioc_create_bufs, - .vidioc_prepare_buf = vb2_ioctl_prepare_buf, - .vidioc_querybuf = vb2_ioctl_querybuf, - .vidioc_qbuf = vb2_ioctl_qbuf, - .vidioc_dqbuf = vb2_ioctl_dqbuf, - .vidioc_streamon = vb2_ioctl_streamon, - .vidioc_streamoff = vb2_ioctl_streamoff, - .vidioc_expbuf = vb2_ioctl_expbuf, -}; - -static const struct media_entity_operations entity_ops = { - .link_validate = link_validate, -}; - -static const struct v4l2_file_operations isys_fops = { - .owner = THIS_MODULE, - .poll = vb2_fop_poll, - .unlocked_ioctl = video_ioctl2, - .mmap = vb2_fop_mmap, - .open = video_open, - .release = vb2_fop_release, -}; - -int ipu6_isys_fw_open(struct ipu6_isys *isys) -{ - struct ipu6_bus_device *adev = isys->adev; - const struct ipu6_isys_internal_pdata *ipdata = isys->pdata->ipdata; - int ret; - - ret = pm_runtime_resume_and_get(&adev->auxdev.dev); - if (ret < 0) - return ret; - - mutex_lock(&isys->mutex); - - if (isys->ref_count++) - goto unlock; - - ipu6_configure_spc(adev->isp, &ipdata->hw_variant, - IPU6_CPD_PKG_DIR_ISYS_SERVER_IDX, isys->pdata->base, - adev->pkg_dir, adev->pkg_dir_dma_addr); - - /* - * Buffers could have been left to wrong queue at last closure. - * Move them now back to empty buffer queue. - */ - ipu6_cleanup_fw_msg_bufs(isys); - - if (isys->fwcom) { - /* - * Something went wrong in previous shutdown. As we are now - * restarting isys we can safely delete old context. - */ - dev_warn(&adev->auxdev.dev, "clearing old context\n"); - ipu6_fw_isys_cleanup(isys); - } - - ret = ipu6_fw_isys_init(isys, ipdata->num_parallel_streams); - if (ret < 0) - goto out; - -unlock: - mutex_unlock(&isys->mutex); - - return 0; - -out: - isys->ref_count--; - mutex_unlock(&isys->mutex); - pm_runtime_put(&adev->auxdev.dev); - - return ret; -} - -void ipu6_isys_fw_close(struct ipu6_isys *isys) -{ - mutex_lock(&isys->mutex); - - isys->ref_count--; - if (!isys->ref_count) { - ipu6_fw_isys_close(isys); - if (isys->fwcom) { - isys->need_reset = true; - dev_warn(&isys->adev->auxdev.dev, - "failed to close fw isys\n"); - } - } - - mutex_unlock(&isys->mutex); - - if (isys->need_reset) - pm_runtime_put_sync(&isys->adev->auxdev.dev); - else - pm_runtime_put(&isys->adev->auxdev.dev); -} - -int ipu6_isys_setup_video(struct ipu6_isys_video *av, - struct media_entity **source_entity, int *nr_queues) -{ - const struct ipu6_isys_pixelformat *pfmt = - ipu6_isys_get_isys_format(ipu6_isys_get_format(av), 0); - struct device *dev = &av->isys->adev->auxdev.dev; - struct v4l2_mbus_frame_desc_entry entry; - struct v4l2_subdev_route *route = NULL; - struct v4l2_subdev_route *r; - struct v4l2_subdev_state *state; - struct ipu6_isys_subdev *asd; - struct v4l2_subdev *remote_sd; - struct media_pipeline *pipeline; - struct media_pad *source_pad, *remote_pad; - int ret = -EINVAL; - - *nr_queues = 0; - - remote_pad = media_pad_remote_pad_unique(&av->pad); - if (IS_ERR(remote_pad)) { - dev_dbg(dev, "failed to get remote pad\n"); - return PTR_ERR(remote_pad); - } - - remote_sd = media_entity_to_v4l2_subdev(remote_pad->entity); - asd = to_ipu6_isys_subdev(remote_sd); - source_pad = media_pad_remote_pad_first(&remote_pad->entity->pads[0]); - if (!source_pad) { - dev_dbg(dev, "No external source entity\n"); - return -ENODEV; - } - - *source_entity = source_pad->entity; - - /* Find the root */ - state = v4l2_subdev_lock_and_get_active_state(remote_sd); - for_each_active_route(&state->routing, r) { - (*nr_queues)++; - - if (r->source_pad == remote_pad->index) - route = r; - } - - if (!route) { - v4l2_subdev_unlock_state(state); - dev_dbg(dev, "Failed to find route\n"); - return -ENODEV; - } - av->source_stream = route->sink_stream; - v4l2_subdev_unlock_state(state); - - ret = ipu6_isys_csi2_get_remote_desc(av->source_stream, - to_ipu6_isys_csi2(asd), - *source_entity, &entry); - if (ret == -ENOIOCTLCMD) { - av->vc = 0; - av->dt = ipu6_isys_mbus_code_to_mipi(pfmt->code); - } else if (!ret) { - dev_dbg(dev, "Framedesc: stream %u, len %u, vc %u, dt %#x\n", - entry.stream, entry.length, entry.bus.csi2.vc, - entry.bus.csi2.dt); - - av->vc = entry.bus.csi2.vc; - av->dt = entry.bus.csi2.dt; - } else { - dev_err(dev, "failed to get remote frame desc\n"); - return ret; - } - - pipeline = media_entity_pipeline(&av->vdev.entity); - if (!pipeline) - ret = video_device_pipeline_alloc_start(&av->vdev); - else - ret = video_device_pipeline_start(&av->vdev, pipeline); - if (ret < 0) { - dev_dbg(dev, "media pipeline start failed\n"); - return ret; - } - - av->stream = ipu6_isys_get_stream(av, asd); - if (!av->stream) { - video_device_pipeline_stop(&av->vdev); - dev_err(dev, "no available stream for firmware\n"); - return -EINVAL; - } - - return 0; -} - -/* - * Do everything that's needed to initialise things related to video - * buffer queue, video node, and the related media entity. The caller - * is expected to assign isys field and set the name of the video - * device. - */ -int ipu6_isys_video_init(struct ipu6_isys_video *av) -{ - struct v4l2_format format = { - .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, - .fmt.pix = { - .width = 1920, - .height = 1080, - }, - }; - struct v4l2_format format_meta = { - .type = V4L2_BUF_TYPE_META_CAPTURE, - .fmt.meta = { - .width = 1920, - .height = 4, - }, - }; - int ret; - - mutex_init(&av->mutex); - av->vdev.device_caps = V4L2_CAP_STREAMING | V4L2_CAP_IO_MC | - V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_META_CAPTURE; - av->vdev.vfl_dir = VFL_DIR_RX; - - ret = ipu6_isys_queue_init(&av->aq); - if (ret) - goto out_free_watermark; - - av->pad.flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT; - ret = media_entity_pads_init(&av->vdev.entity, 1, &av->pad); - if (ret) - goto out_vb2_queue_release; - - av->vdev.entity.ops = &entity_ops; - av->vdev.release = video_device_release_empty; - av->vdev.fops = &isys_fops; - av->vdev.v4l2_dev = &av->isys->v4l2_dev; - av->vdev.dev_parent = &av->isys->adev->isp->pdev->dev; - if (!av->vdev.ioctl_ops) - av->vdev.ioctl_ops = &ipu6_v4l2_ioctl_ops; - av->vdev.queue = &av->aq.vbq; - av->vdev.lock = &av->mutex; - - __ipu6_isys_vidioc_try_fmt_vid_cap(av, &format); - av->pix_fmt = format.fmt.pix; - __ipu6_isys_vidioc_try_fmt_meta_cap(av, &format_meta); - av->meta_fmt = format_meta.fmt.meta; - - set_bit(V4L2_FL_USES_V4L2_FH, &av->vdev.flags); - video_set_drvdata(&av->vdev, av); - - ret = video_register_device(&av->vdev, VFL_TYPE_VIDEO, -1); - if (ret) - goto out_media_entity_cleanup; - - return ret; - -out_media_entity_cleanup: - vb2_video_unregister_device(&av->vdev); - media_entity_cleanup(&av->vdev.entity); - -out_vb2_queue_release: - vb2_queue_release(&av->aq.vbq); - -out_free_watermark: - mutex_destroy(&av->mutex); - - return ret; -} - -void ipu6_isys_video_cleanup(struct ipu6_isys_video *av) -{ - vb2_video_unregister_device(&av->vdev); - media_entity_cleanup(&av->vdev.entity); - mutex_destroy(&av->mutex); -} - -u32 ipu6_isys_get_format(struct ipu6_isys_video *av) -{ - if (av->aq.vbq.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) - return av->pix_fmt.pixelformat; - - if (av->aq.vbq.type == V4L2_BUF_TYPE_META_CAPTURE) - return av->meta_fmt.dataformat; - - return 0; -} - -u32 ipu6_isys_get_data_size(struct ipu6_isys_video *av) -{ - if (av->aq.vbq.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) - return av->pix_fmt.sizeimage; - - if (av->aq.vbq.type == V4L2_BUF_TYPE_META_CAPTURE) - return av->meta_fmt.buffersize; - - return 0; -} - -u32 ipu6_isys_get_bytes_per_line(struct ipu6_isys_video *av) -{ - if (av->aq.vbq.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) - return av->pix_fmt.bytesperline; - - if (av->aq.vbq.type == V4L2_BUF_TYPE_META_CAPTURE) - return av->meta_fmt.bytesperline; - - return 0; -} - -u32 ipu6_isys_get_frame_width(struct ipu6_isys_video *av) -{ - if (av->aq.vbq.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) - return av->pix_fmt.width; - - if (av->aq.vbq.type == V4L2_BUF_TYPE_META_CAPTURE) - return av->meta_fmt.width; - - return 0; -} - -u32 ipu6_isys_get_frame_height(struct ipu6_isys_video *av) -{ - if (av->aq.vbq.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) - return av->pix_fmt.height; - - if (av->aq.vbq.type == V4L2_BUF_TYPE_META_CAPTURE) - return av->meta_fmt.height; - - return 0; -} diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.h deleted file mode 100644 index 1dd36f2..0000000 --- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.h +++ /dev/null @@ -1,135 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (C) 2013--2024 Intel Corporation */ - -#ifndef IPU6_ISYS_VIDEO_H -#define IPU6_ISYS_VIDEO_H - -#include -#include -#include -#include -#include - -#include -#include - -#include "ipu6-isys-queue.h" - -#define IPU6_ISYS_OUTPUT_PINS 11 -#define IPU6_ISYS_MAX_PARALLEL_SOF 2 - -struct file; -struct ipu6_isys; -struct ipu6_isys_csi2; -struct ipu6_isys_subdev; - -struct ipu6_isys_pixelformat { - u32 pixelformat; - u32 bpp; - u32 bpp_packed; - u32 code; - u32 css_pixelformat; - bool is_meta; -}; - -struct sequence_info { - unsigned int sequence; - u64 timestamp; -}; - -/* - * Align with firmware stream. Each stream represents a CSI virtual channel. - * May map to multiple video devices - */ -struct ipu6_isys_stream { - struct mutex mutex; - struct media_entity *source_entity; - atomic_t sequence; - unsigned int seq_index; - struct sequence_info seq[IPU6_ISYS_MAX_PARALLEL_SOF]; - int stream_source; - int stream_handle; - unsigned int nr_output_pins; - struct ipu6_isys_subdev *asd; - - int nr_queues; /* Number of capture queues */ - int nr_streaming; - int streaming; /* Has streaming been really started? */ - struct list_head queues; - struct completion stream_open_completion; - struct completion stream_close_completion; - struct completion stream_start_completion; - struct completion stream_stop_completion; - struct ipu6_isys *isys; - - struct ipu6_isys_queue *output_pins_queue[IPU6_ISYS_OUTPUT_PINS]; - int error; - u8 vc; -}; - -struct video_stream_watermark { - u32 width; - u32 height; - u32 hblank; - u32 frame_rate; - u64 pixel_rate; - u64 stream_data_rate; - u16 sram_gran_shift; - u16 sram_gran_size; - struct list_head stream_node; -}; - -struct ipu6_isys_video { - struct ipu6_isys_queue aq; - /* Serialise access to other fields in the struct. */ - struct mutex mutex; - struct media_pad pad; - struct video_device vdev; - struct v4l2_pix_format pix_fmt; - struct v4l2_meta_format meta_fmt; - struct ipu6_isys *isys; - struct ipu6_isys_csi2 *csi2; - struct ipu6_isys_stream *stream; - unsigned int streaming; - struct video_stream_watermark watermark; - u32 source_stream; - u8 vc; - u8 dt; -}; - -#define ipu6_isys_queue_to_video(__aq) \ - container_of(__aq, struct ipu6_isys_video, aq) - -extern const struct ipu6_isys_pixelformat ipu6_isys_pfmts[]; -extern const struct ipu6_isys_pixelformat ipu6_isys_pfmts_packed[]; - -const struct ipu6_isys_pixelformat * -ipu6_isys_get_isys_format(u32 pixelformat, u32 code); -int ipu6_isys_video_prepare_stream(struct ipu6_isys_video *av, - struct media_entity *source_entity, - int nr_queues); -int ipu6_isys_video_set_streaming(struct ipu6_isys_video *av, int state, - struct ipu6_isys_buffer_list *bl); -int ipu6_isys_fw_open(struct ipu6_isys *isys); -void ipu6_isys_fw_close(struct ipu6_isys *isys); -int ipu6_isys_setup_video(struct ipu6_isys_video *av, - struct media_entity **source_entity, int *nr_queues); -int ipu6_isys_video_init(struct ipu6_isys_video *av); -void ipu6_isys_video_cleanup(struct ipu6_isys_video *av); -void ipu6_isys_put_stream(struct ipu6_isys_stream *stream); -struct ipu6_isys_stream * -ipu6_isys_query_stream_by_handle(struct ipu6_isys *isys, u8 stream_handle); -struct ipu6_isys_stream * -ipu6_isys_query_stream_by_source(struct ipu6_isys *isys, int source, u8 vc); - -void ipu6_isys_configure_stream_watermark(struct ipu6_isys_video *av, - bool state); -void ipu6_isys_update_stream_watermark(struct ipu6_isys_video *av, bool state); - -u32 ipu6_isys_get_format(struct ipu6_isys_video *av); -u32 ipu6_isys_get_data_size(struct ipu6_isys_video *av); -u32 ipu6_isys_get_bytes_per_line(struct ipu6_isys_video *av); -u32 ipu6_isys_get_frame_width(struct ipu6_isys_video *av); -u32 ipu6_isys_get_frame_height(struct ipu6_isys_video *av); - -#endif /* IPU6_ISYS_VIDEO_H */ diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c deleted file mode 100644 index fc0ec0a..0000000 --- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c +++ /dev/null @@ -1,1380 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2013--2024 Intel Corporation - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#include "ipu6-bus.h" -#include "ipu6-cpd.h" -#include "ipu6-dma.h" -#include "ipu6-isys.h" -#include "ipu6-isys-csi2.h" -#include "ipu6-mmu.h" -#include "ipu6-platform-buttress-regs.h" -#include "ipu6-platform-isys-csi2-reg.h" -#include "ipu6-platform-regs.h" - -#define IPU6_BUTTRESS_FABIC_CONTROL 0x68 -#define GDA_ENABLE_IWAKE_INDEX 2 -#define GDA_IWAKE_THRESHOLD_INDEX 1 -#define GDA_IRQ_CRITICAL_THRESHOLD_INDEX 0 -#define GDA_MEMOPEN_THRESHOLD_INDEX 3 -#define DEFAULT_DID_RATIO 90 -#define DEFAULT_IWAKE_THRESHOLD 0x42 -#define DEFAULT_MEM_OPEN_TIME 10 -#define ONE_THOUSAND_MICROSECOND 1000 -/* One page is 2KB, 8 x 16 x 16 = 2048B = 2KB */ -#define ISF_DMA_TOP_GDA_PROFERTY_PAGE_SIZE 0x800 - -/* LTR & DID value are 10 bit at most */ -#define LTR_DID_VAL_MAX 1023 -#define LTR_DEFAULT_VALUE 0x70503c19 -#define FILL_TIME_DEFAULT_VALUE 0xfff0783c -#define LTR_DID_PKGC_2R 20 -#define LTR_SCALE_DEFAULT 5 -#define LTR_SCALE_1024NS 2 -#define DID_SCALE_1US 2 -#define DID_SCALE_32US 3 -#define REG_PKGC_PMON_CFG 0xb00 - -#define VAL_PKGC_PMON_CFG_RESET 0x38 -#define VAL_PKGC_PMON_CFG_START 0x7 - -#define IS_PIXEL_BUFFER_PAGES 0x80 -/* - * when iwake mode is disabled, the critical threshold is statically set - * to 75% of the IS pixel buffer, criticalThreshold = (128 * 3) / 4 - */ -#define CRITICAL_THRESHOLD_IWAKE_DISABLE (IS_PIXEL_BUFFER_PAGES * 3 / 4) - -union fabric_ctrl { - struct { - u16 ltr_val : 10; - u16 ltr_scale : 3; - u16 reserved : 3; - u16 did_val : 10; - u16 did_scale : 3; - u16 reserved2 : 1; - u16 keep_power_in_D0 : 1; - u16 keep_power_override : 1; - } bits; - u32 value; -}; - -enum ltr_did_type { - LTR_IWAKE_ON, - LTR_IWAKE_OFF, - LTR_ISYS_ON, - LTR_ISYS_OFF, - LTR_ENHANNCE_IWAKE, - LTR_TYPE_MAX -}; - -#define ISYS_PM_QOS_VALUE 300 - -static int isys_isr_one(struct ipu6_bus_device *adev); - -static int -isys_complete_ext_device_registration(struct ipu6_isys *isys, - struct v4l2_subdev *sd, - struct ipu6_isys_csi2_config *csi2) -{ - struct device *dev = &isys->adev->auxdev.dev; - unsigned int i; - int ret; - - for (i = 0; i < sd->entity.num_pads; i++) { - if (sd->entity.pads[i].flags & MEDIA_PAD_FL_SOURCE) - break; - } - - if (i == sd->entity.num_pads) { - dev_warn(dev, "no src pad in external entity\n"); - ret = -ENOENT; - goto unregister_subdev; - } - - ret = media_create_pad_link(&sd->entity, i, - &isys->csi2[csi2->port].asd.sd.entity, - 0, MEDIA_LNK_FL_ENABLED | - MEDIA_LNK_FL_IMMUTABLE); - if (ret) { - dev_warn(dev, "can't create link\n"); - goto unregister_subdev; - } - - isys->csi2[csi2->port].nlanes = csi2->nlanes; - - return 0; - -unregister_subdev: - v4l2_device_unregister_subdev(sd); - - return ret; -} - -static void isys_stream_init(struct ipu6_isys *isys) -{ - u32 i; - - for (i = 0; i < IPU6_ISYS_MAX_STREAMS; i++) { - mutex_init(&isys->streams[i].mutex); - init_completion(&isys->streams[i].stream_open_completion); - init_completion(&isys->streams[i].stream_close_completion); - init_completion(&isys->streams[i].stream_start_completion); - init_completion(&isys->streams[i].stream_stop_completion); - INIT_LIST_HEAD(&isys->streams[i].queues); - isys->streams[i].isys = isys; - isys->streams[i].stream_handle = i; - isys->streams[i].vc = INVALID_VC_ID; - } -} - -static void isys_csi2_unregister_subdevices(struct ipu6_isys *isys) -{ - const struct ipu6_isys_internal_csi2_pdata *csi2 = - &isys->pdata->ipdata->csi2; - unsigned int i; - - for (i = 0; i < csi2->nports; i++) - ipu6_isys_csi2_cleanup(&isys->csi2[i]); -} - -static int isys_csi2_register_subdevices(struct ipu6_isys *isys) -{ - const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = - &isys->pdata->ipdata->csi2; - unsigned int i; - int ret; - - for (i = 0; i < csi2_pdata->nports; i++) { - ret = ipu6_isys_csi2_init(&isys->csi2[i], isys, - isys->pdata->base + - CSI_REG_PORT_BASE(i), i); - if (ret) - goto fail; - - isys->isr_csi2_bits |= IPU6_ISYS_UNISPART_IRQ_CSI2(i); - } - - return 0; - -fail: - while (i--) - ipu6_isys_csi2_cleanup(&isys->csi2[i]); - - return ret; -} - -static int isys_csi2_create_media_links(struct ipu6_isys *isys) -{ - const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = - &isys->pdata->ipdata->csi2; - struct device *dev = &isys->adev->auxdev.dev; - unsigned int i, j; - int ret; - - for (i = 0; i < csi2_pdata->nports; i++) { - struct media_entity *sd = &isys->csi2[i].asd.sd.entity; - - for (j = 0; j < NR_OF_CSI2_SRC_PADS; j++) { - struct ipu6_isys_video *av = &isys->csi2[i].av[j]; - - ret = media_create_pad_link(sd, CSI2_PAD_SRC + j, - &av->vdev.entity, 0, 0); - if (ret) { - dev_err(dev, "CSI2 can't create link\n"); - return ret; - } - - av->csi2 = &isys->csi2[i]; - } - } - - return 0; -} - -static void isys_unregister_video_devices(struct ipu6_isys *isys) -{ - const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = - &isys->pdata->ipdata->csi2; - unsigned int i, j; - - for (i = 0; i < csi2_pdata->nports; i++) - for (j = 0; j < NR_OF_CSI2_SRC_PADS; j++) - ipu6_isys_video_cleanup(&isys->csi2[i].av[j]); -} - -static int isys_register_video_devices(struct ipu6_isys *isys) -{ - const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = - &isys->pdata->ipdata->csi2; - unsigned int i, j; - int ret; - - for (i = 0; i < csi2_pdata->nports; i++) { - for (j = 0; j < NR_OF_CSI2_SRC_PADS; j++) { - struct ipu6_isys_video *av = &isys->csi2[i].av[j]; - - snprintf(av->vdev.name, sizeof(av->vdev.name), - IPU6_ISYS_ENTITY_PREFIX " ISYS Capture %u", - i * NR_OF_CSI2_SRC_PADS + j); - av->isys = isys; - av->aq.vbq.buf_struct_size = - sizeof(struct ipu6_isys_video_buffer); - - ret = ipu6_isys_video_init(av); - if (ret) - goto fail; - } - } - - return 0; - -fail: - while (i--) { - while (j--) - ipu6_isys_video_cleanup(&isys->csi2[i].av[j]); - j = NR_OF_CSI2_SRC_PADS; - } - - return ret; -} - -void isys_setup_hw(struct ipu6_isys *isys) -{ - void __iomem *base = isys->pdata->base; - const u8 *thd = isys->pdata->ipdata->hw_variant.cdc_fifo_threshold; - u32 irqs = 0; - unsigned int i, nports; - - nports = isys->pdata->ipdata->csi2.nports; - - /* Enable irqs for all MIPI ports */ - for (i = 0; i < nports; i++) - irqs |= IPU6_ISYS_UNISPART_IRQ_CSI2(i); - - writel(irqs, base + isys->pdata->ipdata->csi2.ctrl0_irq_edge); - writel(irqs, base + isys->pdata->ipdata->csi2.ctrl0_irq_lnp); - writel(irqs, base + isys->pdata->ipdata->csi2.ctrl0_irq_mask); - writel(irqs, base + isys->pdata->ipdata->csi2.ctrl0_irq_enable); - writel(GENMASK(19, 0), - base + isys->pdata->ipdata->csi2.ctrl0_irq_clear); - - irqs = ISYS_UNISPART_IRQS; - writel(irqs, base + IPU6_REG_ISYS_UNISPART_IRQ_EDGE); - writel(irqs, base + IPU6_REG_ISYS_UNISPART_IRQ_LEVEL_NOT_PULSE); - writel(GENMASK(28, 0), base + IPU6_REG_ISYS_UNISPART_IRQ_CLEAR); - writel(irqs, base + IPU6_REG_ISYS_UNISPART_IRQ_MASK); - writel(irqs, base + IPU6_REG_ISYS_UNISPART_IRQ_ENABLE); - - writel(0, base + IPU6_REG_ISYS_UNISPART_SW_IRQ_REG); - writel(0, base + IPU6_REG_ISYS_UNISPART_SW_IRQ_MUX_REG); - - /* Write CDC FIFO threshold values for isys */ - for (i = 0; i < isys->pdata->ipdata->hw_variant.cdc_fifos; i++) - writel(thd[i], base + IPU6_REG_ISYS_CDC_THRESHOLD(i)); -} - -static void ipu6_isys_csi2_isr(struct ipu6_isys_csi2 *csi2) -{ - struct ipu6_isys_stream *stream; - unsigned int i; - u32 status; - int source; - - ipu6_isys_register_errors(csi2); - - status = readl(csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC + - CSI_PORT_REG_BASE_IRQ_STATUS_OFFSET); - - writel(status, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC + - CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET); - - source = csi2->asd.source; - for (i = 0; i < NR_OF_CSI2_VC; i++) { - if (status & IPU_CSI_RX_IRQ_FS_VC(i)) { - stream = ipu6_isys_query_stream_by_source(csi2->isys, - source, i); - if (stream) { - ipu6_isys_csi2_sof_event_by_stream(stream); - ipu6_isys_put_stream(stream); - } - } - - if (status & IPU_CSI_RX_IRQ_FE_VC(i)) { - stream = ipu6_isys_query_stream_by_source(csi2->isys, - source, i); - if (stream) { - ipu6_isys_csi2_eof_event_by_stream(stream); - ipu6_isys_put_stream(stream); - } - } - } -} - -irqreturn_t isys_isr(struct ipu6_bus_device *adev) -{ - struct ipu6_isys *isys = ipu6_bus_get_drvdata(adev); - void __iomem *base = isys->pdata->base; - u32 status_sw, status_csi; - u32 ctrl0_status, ctrl0_clear; - - spin_lock(&isys->power_lock); - if (!isys->power) { - spin_unlock(&isys->power_lock); - return IRQ_NONE; - } - - ctrl0_status = isys->pdata->ipdata->csi2.ctrl0_irq_status; - ctrl0_clear = isys->pdata->ipdata->csi2.ctrl0_irq_clear; - - status_csi = readl(isys->pdata->base + ctrl0_status); - status_sw = readl(isys->pdata->base + - IPU6_REG_ISYS_UNISPART_IRQ_STATUS); - - writel(ISYS_UNISPART_IRQS & ~IPU6_ISYS_UNISPART_IRQ_SW, - base + IPU6_REG_ISYS_UNISPART_IRQ_MASK); - - do { - writel(status_csi, isys->pdata->base + ctrl0_clear); - - writel(status_sw, isys->pdata->base + - IPU6_REG_ISYS_UNISPART_IRQ_CLEAR); - - if (isys->isr_csi2_bits & status_csi) { - unsigned int i; - - for (i = 0; i < isys->pdata->ipdata->csi2.nports; i++) { - /* irq from not enabled port */ - if (!isys->csi2[i].base) - continue; - if (status_csi & IPU6_ISYS_UNISPART_IRQ_CSI2(i)) - ipu6_isys_csi2_isr(&isys->csi2[i]); - } - } - - writel(0, base + IPU6_REG_ISYS_UNISPART_SW_IRQ_REG); - - if (!isys_isr_one(adev)) - status_sw = IPU6_ISYS_UNISPART_IRQ_SW; - else - status_sw = 0; - - status_csi = readl(isys->pdata->base + ctrl0_status); - status_sw |= readl(isys->pdata->base + - IPU6_REG_ISYS_UNISPART_IRQ_STATUS); - } while ((status_csi & isys->isr_csi2_bits) || - (status_sw & IPU6_ISYS_UNISPART_IRQ_SW)); - - writel(ISYS_UNISPART_IRQS, base + IPU6_REG_ISYS_UNISPART_IRQ_MASK); - - spin_unlock(&isys->power_lock); - - return IRQ_HANDLED; -} - -static void get_lut_ltrdid(struct ipu6_isys *isys, struct ltr_did *pltr_did) -{ - struct isys_iwake_watermark *iwake_watermark = &isys->iwake_watermark; - struct ltr_did ltrdid_default; - - ltrdid_default.lut_ltr.value = LTR_DEFAULT_VALUE; - ltrdid_default.lut_fill_time.value = FILL_TIME_DEFAULT_VALUE; - - if (iwake_watermark->ltrdid.lut_ltr.value) - *pltr_did = iwake_watermark->ltrdid; - else - *pltr_did = ltrdid_default; -} - -static int set_iwake_register(struct ipu6_isys *isys, u32 index, u32 value) -{ - struct device *dev = &isys->adev->auxdev.dev; - u32 req_id = index; - u32 offset = 0; - int ret; - - ret = ipu6_fw_isys_send_proxy_token(isys, req_id, index, offset, value); - if (ret) - dev_err(dev, "write %d failed %d", index, ret); - - return ret; -} - -/* - * When input system is powered up and before enabling any new sensor capture, - * or after disabling any sensor capture the following values need to be set: - * LTR_value = LTR(usec) from calculation; - * LTR_scale = 2; - * DID_value = DID(usec) from calculation; - * DID_scale = 2; - * - * When input system is powered down, the LTR and DID values - * must be returned to the default values: - * LTR_value = 1023; - * LTR_scale = 5; - * DID_value = 1023; - * DID_scale = 2; - */ -static void set_iwake_ltrdid(struct ipu6_isys *isys, u16 ltr, u16 did, - enum ltr_did_type use) -{ - struct device *dev = &isys->adev->auxdev.dev; - u16 ltr_val, ltr_scale = LTR_SCALE_1024NS; - u16 did_val, did_scale = DID_SCALE_1US; - struct ipu6_device *isp = isys->adev->isp; - union fabric_ctrl fc; - - switch (use) { - case LTR_IWAKE_ON: - ltr_val = min_t(u16, ltr, (u16)LTR_DID_VAL_MAX); - did_val = min_t(u16, did, (u16)LTR_DID_VAL_MAX); - ltr_scale = (ltr == LTR_DID_VAL_MAX && - did == LTR_DID_VAL_MAX) ? - LTR_SCALE_DEFAULT : LTR_SCALE_1024NS; - break; - case LTR_ISYS_ON: - case LTR_IWAKE_OFF: - ltr_val = LTR_DID_PKGC_2R; - did_val = LTR_DID_PKGC_2R; - break; - case LTR_ISYS_OFF: - ltr_val = LTR_DID_VAL_MAX; - did_val = LTR_DID_VAL_MAX; - ltr_scale = LTR_SCALE_DEFAULT; - break; - case LTR_ENHANNCE_IWAKE: - if (ltr == LTR_DID_VAL_MAX && did == LTR_DID_VAL_MAX) { - ltr_val = LTR_DID_VAL_MAX; - did_val = LTR_DID_VAL_MAX; - ltr_scale = LTR_SCALE_DEFAULT; - } else if (did < ONE_THOUSAND_MICROSECOND) { - ltr_val = ltr; - did_val = did; - } else { - ltr_val = ltr; - /* div 90% value by 32 to account for scale change */ - did_val = did / 32; - did_scale = DID_SCALE_32US; - } - break; - default: - ltr_val = LTR_DID_VAL_MAX; - did_val = LTR_DID_VAL_MAX; - ltr_scale = LTR_SCALE_DEFAULT; - break; - } - - fc.value = readl(isp->base + IPU6_BUTTRESS_FABIC_CONTROL); - fc.bits.ltr_val = ltr_val; - fc.bits.ltr_scale = ltr_scale; - fc.bits.did_val = did_val; - fc.bits.did_scale = did_scale; - - dev_dbg(dev, "ltr: value %u scale %u, did: value %u scale %u\n", - ltr_val, ltr_scale, did_val, did_scale); - writel(fc.value, isp->base + IPU6_BUTTRESS_FABIC_CONTROL); -} - -/* - * Driver may clear register GDA_ENABLE_IWAKE before FW configures the - * stream for debug purpose. Otherwise driver should not access this register. - */ -static void enable_iwake(struct ipu6_isys *isys, bool enable) -{ - struct isys_iwake_watermark *iwake_watermark = &isys->iwake_watermark; - int ret; - - mutex_lock(&iwake_watermark->mutex); - - if (iwake_watermark->iwake_enabled == enable) { - mutex_unlock(&iwake_watermark->mutex); - return; - } - - ret = set_iwake_register(isys, GDA_ENABLE_IWAKE_INDEX, enable); - if (!ret) - iwake_watermark->iwake_enabled = enable; - - mutex_unlock(&iwake_watermark->mutex); -} - -void update_watermark_setting(struct ipu6_isys *isys) -{ - struct isys_iwake_watermark *iwake_watermark = &isys->iwake_watermark; - u32 iwake_threshold, iwake_critical_threshold, page_num; - struct device *dev = &isys->adev->auxdev.dev; - u32 calc_fill_time_us = 0, ltr = 0, did = 0; - struct video_stream_watermark *p_watermark; - enum ltr_did_type ltr_did_type; - struct list_head *stream_node; - u64 isys_pb_datarate_mbs = 0; - u32 mem_open_threshold = 0; - struct ltr_did ltrdid; - u64 threshold_bytes; - u32 max_sram_size; - u32 shift; - - shift = isys->pdata->ipdata->sram_gran_shift; - max_sram_size = isys->pdata->ipdata->max_sram_size; - - mutex_lock(&iwake_watermark->mutex); - if (iwake_watermark->force_iwake_disable) { - set_iwake_ltrdid(isys, 0, 0, LTR_IWAKE_OFF); - set_iwake_register(isys, GDA_IRQ_CRITICAL_THRESHOLD_INDEX, - CRITICAL_THRESHOLD_IWAKE_DISABLE); - goto unlock_exit; - } - - if (list_empty(&iwake_watermark->video_list)) { - isys_pb_datarate_mbs = 0; - } else { - list_for_each(stream_node, &iwake_watermark->video_list) { - p_watermark = list_entry(stream_node, - struct video_stream_watermark, - stream_node); - isys_pb_datarate_mbs += p_watermark->stream_data_rate; - } - } - mutex_unlock(&iwake_watermark->mutex); - - if (!isys_pb_datarate_mbs) { - enable_iwake(isys, false); - set_iwake_ltrdid(isys, 0, 0, LTR_IWAKE_OFF); - mutex_lock(&iwake_watermark->mutex); - set_iwake_register(isys, GDA_IRQ_CRITICAL_THRESHOLD_INDEX, - CRITICAL_THRESHOLD_IWAKE_DISABLE); - goto unlock_exit; - } - - enable_iwake(isys, true); - calc_fill_time_us = div64_u64(max_sram_size, isys_pb_datarate_mbs); - - if (isys->pdata->ipdata->enhanced_iwake) { - ltr = isys->pdata->ipdata->ltr; - did = calc_fill_time_us * DEFAULT_DID_RATIO / 100; - ltr_did_type = LTR_ENHANNCE_IWAKE; - } else { - get_lut_ltrdid(isys, <rdid); - - if (calc_fill_time_us <= ltrdid.lut_fill_time.bits.th0) - ltr = 0; - else if (calc_fill_time_us <= ltrdid.lut_fill_time.bits.th1) - ltr = ltrdid.lut_ltr.bits.val0; - else if (calc_fill_time_us <= ltrdid.lut_fill_time.bits.th2) - ltr = ltrdid.lut_ltr.bits.val1; - else if (calc_fill_time_us <= ltrdid.lut_fill_time.bits.th3) - ltr = ltrdid.lut_ltr.bits.val2; - else - ltr = ltrdid.lut_ltr.bits.val3; - - did = calc_fill_time_us - ltr; - ltr_did_type = LTR_IWAKE_ON; - } - - set_iwake_ltrdid(isys, ltr, did, ltr_did_type); - - /* calculate iwake threshold with 2KB granularity pages */ - threshold_bytes = did * isys_pb_datarate_mbs; - iwake_threshold = max_t(u32, 1, threshold_bytes >> shift); - iwake_threshold = min_t(u32, iwake_threshold, max_sram_size); - - mutex_lock(&iwake_watermark->mutex); - if (isys->pdata->ipdata->enhanced_iwake) { - set_iwake_register(isys, GDA_IWAKE_THRESHOLD_INDEX, - DEFAULT_IWAKE_THRESHOLD); - /* calculate number of pages that will be filled in 10 usec */ - page_num = (DEFAULT_MEM_OPEN_TIME * isys_pb_datarate_mbs) / - ISF_DMA_TOP_GDA_PROFERTY_PAGE_SIZE; - page_num += ((DEFAULT_MEM_OPEN_TIME * isys_pb_datarate_mbs) % - ISF_DMA_TOP_GDA_PROFERTY_PAGE_SIZE) ? 1 : 0; - mem_open_threshold = isys->pdata->ipdata->memopen_threshold; - mem_open_threshold = max_t(u32, mem_open_threshold, page_num); - dev_dbg(dev, "mem_open_threshold: %u\n", mem_open_threshold); - set_iwake_register(isys, GDA_MEMOPEN_THRESHOLD_INDEX, - mem_open_threshold); - } else { - set_iwake_register(isys, GDA_IWAKE_THRESHOLD_INDEX, - iwake_threshold); - } - - iwake_critical_threshold = iwake_threshold + - (IS_PIXEL_BUFFER_PAGES - iwake_threshold) / 2; - - dev_dbg(dev, "threshold: %u critical: %u\n", iwake_threshold, - iwake_critical_threshold); - - set_iwake_register(isys, GDA_IRQ_CRITICAL_THRESHOLD_INDEX, - iwake_critical_threshold); - - writel(VAL_PKGC_PMON_CFG_RESET, - isys->adev->isp->base + REG_PKGC_PMON_CFG); - writel(VAL_PKGC_PMON_CFG_START, - isys->adev->isp->base + REG_PKGC_PMON_CFG); -unlock_exit: - mutex_unlock(&iwake_watermark->mutex); -} - -static void isys_iwake_watermark_init(struct ipu6_isys *isys) -{ - struct isys_iwake_watermark *iwake_watermark = &isys->iwake_watermark; - - INIT_LIST_HEAD(&iwake_watermark->video_list); - mutex_init(&iwake_watermark->mutex); - - iwake_watermark->ltrdid.lut_ltr.value = 0; - iwake_watermark->isys = isys; - iwake_watermark->iwake_enabled = false; - iwake_watermark->force_iwake_disable = false; -} - -static void isys_iwake_watermark_cleanup(struct ipu6_isys *isys) -{ - struct isys_iwake_watermark *iwake_watermark = &isys->iwake_watermark; - - mutex_lock(&iwake_watermark->mutex); - list_del(&iwake_watermark->video_list); - mutex_unlock(&iwake_watermark->mutex); - - mutex_destroy(&iwake_watermark->mutex); -} - -/* The .bound() notifier callback when a match is found */ -static int isys_notifier_bound(struct v4l2_async_notifier *notifier, - struct v4l2_subdev *sd, - struct v4l2_async_connection *asc) -{ - struct ipu6_isys *isys = - container_of(notifier, struct ipu6_isys, notifier); - struct sensor_async_sd *s_asd = - container_of(asc, struct sensor_async_sd, asc); - int ret; - - if (s_asd->csi2.port >= isys->pdata->ipdata->csi2.nports) { - dev_err(&isys->adev->auxdev.dev, "invalid csi2 port %u\n", - s_asd->csi2.port); - return -EINVAL; - } - - ret = ipu_bridge_instantiate_vcm(sd->dev); - if (ret) { - dev_err(&isys->adev->auxdev.dev, "instantiate vcm failed\n"); - return ret; - } - - dev_dbg(&isys->adev->auxdev.dev, "bind %s nlanes is %d port is %d\n", - sd->name, s_asd->csi2.nlanes, s_asd->csi2.port); - ret = isys_complete_ext_device_registration(isys, sd, &s_asd->csi2); - if (ret) - return ret; - - return v4l2_device_register_subdev_nodes(&isys->v4l2_dev); -} - -static int isys_notifier_complete(struct v4l2_async_notifier *notifier) -{ - struct ipu6_isys *isys = - container_of(notifier, struct ipu6_isys, notifier); - - return v4l2_device_register_subdev_nodes(&isys->v4l2_dev); -} - -static const struct v4l2_async_notifier_operations isys_async_ops = { - .bound = isys_notifier_bound, - .complete = isys_notifier_complete, -}; - -#define ISYS_MAX_PORTS 8 -static int isys_notifier_init(struct ipu6_isys *isys) -{ - struct ipu6_device *isp = isys->adev->isp; - struct device *dev = &isp->pdev->dev; - unsigned int i; - int ret; - - v4l2_async_nf_init(&isys->notifier, &isys->v4l2_dev); - - for (i = 0; i < ISYS_MAX_PORTS; i++) { - struct v4l2_fwnode_endpoint vep = { - .bus_type = V4L2_MBUS_CSI2_DPHY - }; - struct sensor_async_sd *s_asd; - struct fwnode_handle *ep; - - ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), i, 0, - FWNODE_GRAPH_ENDPOINT_NEXT); - if (!ep) - continue; - - ret = v4l2_fwnode_endpoint_parse(ep, &vep); - if (ret) { - dev_err(dev, "fwnode endpoint parse failed: %d\n", ret); - goto err_parse; - } - - s_asd = v4l2_async_nf_add_fwnode_remote(&isys->notifier, ep, - struct sensor_async_sd); - if (IS_ERR(s_asd)) { - ret = PTR_ERR(s_asd); - dev_err(dev, "add remove fwnode failed: %d\n", ret); - goto err_parse; - } - - s_asd->csi2.port = vep.base.port; - s_asd->csi2.nlanes = vep.bus.mipi_csi2.num_data_lanes; - - dev_dbg(dev, "remote endpoint port %d with %d lanes added\n", - s_asd->csi2.port, s_asd->csi2.nlanes); - - fwnode_handle_put(ep); - - continue; - -err_parse: - fwnode_handle_put(ep); - return ret; - } - - isys->notifier.ops = &isys_async_ops; - ret = v4l2_async_nf_register(&isys->notifier); - if (ret) { - dev_err(dev, "failed to register async notifier : %d\n", ret); - v4l2_async_nf_cleanup(&isys->notifier); - } - - return ret; -} - -static void isys_notifier_cleanup(struct ipu6_isys *isys) -{ - v4l2_async_nf_unregister(&isys->notifier); - v4l2_async_nf_cleanup(&isys->notifier); -} - -static int isys_register_devices(struct ipu6_isys *isys) -{ - struct device *dev = &isys->adev->auxdev.dev; - struct pci_dev *pdev = isys->adev->isp->pdev; - int ret; - - isys->media_dev.dev = dev; - media_device_pci_init(&isys->media_dev, - pdev, IPU6_MEDIA_DEV_MODEL_NAME); - - strscpy(isys->v4l2_dev.name, isys->media_dev.model, - sizeof(isys->v4l2_dev.name)); - - ret = media_device_register(&isys->media_dev); - if (ret < 0) - goto out_media_device_unregister; - - isys->v4l2_dev.mdev = &isys->media_dev; - isys->v4l2_dev.ctrl_handler = NULL; - - ret = v4l2_device_register(dev, &isys->v4l2_dev); - if (ret < 0) - goto out_media_device_unregister; - - ret = isys_register_video_devices(isys); - if (ret) - goto out_v4l2_device_unregister; - - ret = isys_csi2_register_subdevices(isys); - if (ret) - goto out_isys_unregister_video_device; - - ret = isys_csi2_create_media_links(isys); - if (ret) - goto out_isys_unregister_subdevices; - - ret = isys_notifier_init(isys); - if (ret) - goto out_isys_unregister_subdevices; - - return 0; - -out_isys_unregister_subdevices: - isys_csi2_unregister_subdevices(isys); - -out_isys_unregister_video_device: - isys_unregister_video_devices(isys); - -out_v4l2_device_unregister: - v4l2_device_unregister(&isys->v4l2_dev); - -out_media_device_unregister: - media_device_unregister(&isys->media_dev); - media_device_cleanup(&isys->media_dev); - - dev_err(dev, "failed to register isys devices\n"); - - return ret; -} - -static void isys_unregister_devices(struct ipu6_isys *isys) -{ - isys_unregister_video_devices(isys); - isys_csi2_unregister_subdevices(isys); - v4l2_device_unregister(&isys->v4l2_dev); - media_device_unregister(&isys->media_dev); - media_device_cleanup(&isys->media_dev); -} - -static int isys_runtime_pm_resume(struct device *dev) -{ - struct ipu6_bus_device *adev = to_ipu6_bus_device(dev); - struct ipu6_isys *isys = ipu6_bus_get_drvdata(adev); - struct ipu6_device *isp = adev->isp; - unsigned long flags; - int ret; - - if (!isys) - return 0; - - ret = ipu6_mmu_hw_init(adev->mmu); - if (ret) - return ret; - - cpu_latency_qos_update_request(&isys->pm_qos, ISYS_PM_QOS_VALUE); - - ret = ipu6_buttress_start_tsc_sync(isp); - if (ret) - return ret; - - spin_lock_irqsave(&isys->power_lock, flags); - isys->power = 1; - spin_unlock_irqrestore(&isys->power_lock, flags); - - isys_setup_hw(isys); - - set_iwake_ltrdid(isys, 0, 0, LTR_ISYS_ON); - - return 0; -} - -static int isys_runtime_pm_suspend(struct device *dev) -{ - struct ipu6_bus_device *adev = to_ipu6_bus_device(dev); - struct ipu6_isys *isys; - unsigned long flags; - - isys = dev_get_drvdata(dev); - if (!isys) - return 0; - - spin_lock_irqsave(&isys->power_lock, flags); - isys->power = 0; - spin_unlock_irqrestore(&isys->power_lock, flags); - - mutex_lock(&isys->mutex); - isys->need_reset = false; - mutex_unlock(&isys->mutex); - - isys->phy_termcal_val = 0; - cpu_latency_qos_update_request(&isys->pm_qos, PM_QOS_DEFAULT_VALUE); - - set_iwake_ltrdid(isys, 0, 0, LTR_ISYS_OFF); - - ipu6_mmu_hw_cleanup(adev->mmu); - - return 0; -} - -static int isys_suspend(struct device *dev) -{ - struct ipu6_isys *isys = dev_get_drvdata(dev); - - /* If stream is open, refuse to suspend */ - if (isys->stream_opened) - return -EBUSY; - - return 0; -} - -static int isys_resume(struct device *dev) -{ - return 0; -} - -static const struct dev_pm_ops isys_pm_ops = { - .runtime_suspend = isys_runtime_pm_suspend, - .runtime_resume = isys_runtime_pm_resume, - .suspend = isys_suspend, - .resume = isys_resume, -}; - -static void free_fw_msg_bufs(struct ipu6_isys *isys) -{ - struct isys_fw_msgs *fwmsg, *safe; - - list_for_each_entry_safe(fwmsg, safe, &isys->framebuflist, head) - ipu6_dma_free(isys->adev, sizeof(struct isys_fw_msgs), fwmsg, - fwmsg->dma_addr, 0); - - list_for_each_entry_safe(fwmsg, safe, &isys->framebuflist_fw, head) - ipu6_dma_free(isys->adev, sizeof(struct isys_fw_msgs), fwmsg, - fwmsg->dma_addr, 0); -} - -static int alloc_fw_msg_bufs(struct ipu6_isys *isys, int amount) -{ - struct isys_fw_msgs *addr; - dma_addr_t dma_addr; - unsigned long flags; - unsigned int i; - - for (i = 0; i < amount; i++) { - addr = ipu6_dma_alloc(isys->adev, sizeof(*addr), - &dma_addr, GFP_KERNEL, 0); - if (!addr) - break; - addr->dma_addr = dma_addr; - - spin_lock_irqsave(&isys->listlock, flags); - list_add(&addr->head, &isys->framebuflist); - spin_unlock_irqrestore(&isys->listlock, flags); - } - - if (i == amount) - return 0; - - spin_lock_irqsave(&isys->listlock, flags); - while (!list_empty(&isys->framebuflist)) { - addr = list_first_entry(&isys->framebuflist, - struct isys_fw_msgs, head); - list_del(&addr->head); - spin_unlock_irqrestore(&isys->listlock, flags); - ipu6_dma_free(isys->adev, sizeof(struct isys_fw_msgs), addr, - addr->dma_addr, 0); - spin_lock_irqsave(&isys->listlock, flags); - } - spin_unlock_irqrestore(&isys->listlock, flags); - - return -ENOMEM; -} - -struct isys_fw_msgs *ipu6_get_fw_msg_buf(struct ipu6_isys_stream *stream) -{ - struct ipu6_isys *isys = stream->isys; - struct device *dev = &isys->adev->auxdev.dev; - struct isys_fw_msgs *msg; - unsigned long flags; - int ret; - - spin_lock_irqsave(&isys->listlock, flags); - if (list_empty(&isys->framebuflist)) { - spin_unlock_irqrestore(&isys->listlock, flags); - dev_dbg(dev, "Frame list empty\n"); - - ret = alloc_fw_msg_bufs(isys, 5); - if (ret < 0) - return NULL; - - spin_lock_irqsave(&isys->listlock, flags); - if (list_empty(&isys->framebuflist)) { - spin_unlock_irqrestore(&isys->listlock, flags); - dev_err(dev, "Frame list empty\n"); - return NULL; - } - } - msg = list_last_entry(&isys->framebuflist, struct isys_fw_msgs, head); - list_move(&msg->head, &isys->framebuflist_fw); - spin_unlock_irqrestore(&isys->listlock, flags); - memset(&msg->fw_msg, 0, sizeof(msg->fw_msg)); - - return msg; -} - -void ipu6_cleanup_fw_msg_bufs(struct ipu6_isys *isys) -{ - struct isys_fw_msgs *fwmsg, *fwmsg0; - unsigned long flags; - - spin_lock_irqsave(&isys->listlock, flags); - list_for_each_entry_safe(fwmsg, fwmsg0, &isys->framebuflist_fw, head) - list_move(&fwmsg->head, &isys->framebuflist); - spin_unlock_irqrestore(&isys->listlock, flags); -} - -void ipu6_put_fw_msg_buf(struct ipu6_isys *isys, uintptr_t data) -{ - struct isys_fw_msgs *msg; - unsigned long flags; - void *ptr = (void *)data; - - if (!ptr) - return; - - spin_lock_irqsave(&isys->listlock, flags); - msg = container_of(ptr, struct isys_fw_msgs, fw_msg.dummy); - list_move(&msg->head, &isys->framebuflist); - spin_unlock_irqrestore(&isys->listlock, flags); -} - -static int isys_probe(struct auxiliary_device *auxdev, - const struct auxiliary_device_id *auxdev_id) -{ - const struct ipu6_isys_internal_csi2_pdata *csi2_pdata; - struct ipu6_bus_device *adev = auxdev_to_adev(auxdev); - struct ipu6_device *isp = adev->isp; - const struct firmware *fw; - struct ipu6_isys *isys; - unsigned int i; - int ret; - - if (!isp->bus_ready_to_probe) - return -EPROBE_DEFER; - - isys = devm_kzalloc(&auxdev->dev, sizeof(*isys), GFP_KERNEL); - if (!isys) - return -ENOMEM; - - adev->auxdrv_data = - (const struct ipu6_auxdrv_data *)auxdev_id->driver_data; - adev->auxdrv = to_auxiliary_drv(auxdev->dev.driver); - isys->adev = adev; - isys->pdata = adev->pdata; - csi2_pdata = &isys->pdata->ipdata->csi2; - - isys->csi2 = devm_kcalloc(&auxdev->dev, csi2_pdata->nports, - sizeof(*isys->csi2), GFP_KERNEL); - if (!isys->csi2) - return -ENOMEM; - - ret = ipu6_mmu_hw_init(adev->mmu); - if (ret) - return ret; - - /* initial sensor type */ - isys->sensor_type = isys->pdata->ipdata->sensor_type_start; - - spin_lock_init(&isys->streams_lock); - spin_lock_init(&isys->power_lock); - isys->power = 0; - isys->phy_termcal_val = 0; - - mutex_init(&isys->mutex); - mutex_init(&isys->stream_mutex); - - spin_lock_init(&isys->listlock); - INIT_LIST_HEAD(&isys->framebuflist); - INIT_LIST_HEAD(&isys->framebuflist_fw); - - isys->icache_prefetch = 0; - - dev_set_drvdata(&auxdev->dev, isys); - - isys_stream_init(isys); - - if (!isp->secure_mode) { - fw = isp->cpd_fw; - ret = ipu6_buttress_map_fw_image(adev, fw, &adev->fw_sgt); - if (ret) - goto release_firmware; - - ret = ipu6_cpd_create_pkg_dir(adev, isp->cpd_fw->data); - if (ret) - goto remove_shared_buffer; - } - - cpu_latency_qos_add_request(&isys->pm_qos, PM_QOS_DEFAULT_VALUE); - - ret = alloc_fw_msg_bufs(isys, 20); - if (ret < 0) - goto out_remove_pkg_dir_shared_buffer; - - isys_iwake_watermark_init(isys); - - if (is_ipu6se(adev->isp->hw_ver)) - isys->phy_set_power = ipu6_isys_jsl_phy_set_power; - else if (is_ipu6ep_mtl(adev->isp->hw_ver)) - isys->phy_set_power = ipu6_isys_dwc_phy_set_power; - else - isys->phy_set_power = ipu6_isys_mcd_phy_set_power; - - ret = isys_register_devices(isys); - if (ret) - goto free_fw_msg_bufs; - - ipu6_mmu_hw_cleanup(adev->mmu); - - return 0; - -free_fw_msg_bufs: - free_fw_msg_bufs(isys); -out_remove_pkg_dir_shared_buffer: - cpu_latency_qos_remove_request(&isys->pm_qos); - if (!isp->secure_mode) - ipu6_cpd_free_pkg_dir(adev); -remove_shared_buffer: - if (!isp->secure_mode) - ipu6_buttress_unmap_fw_image(adev, &adev->fw_sgt); -release_firmware: - if (!isp->secure_mode) - release_firmware(adev->fw); - - for (i = 0; i < IPU6_ISYS_MAX_STREAMS; i++) - mutex_destroy(&isys->streams[i].mutex); - - mutex_destroy(&isys->mutex); - mutex_destroy(&isys->stream_mutex); - - ipu6_mmu_hw_cleanup(adev->mmu); - - return ret; -} - -static void isys_remove(struct auxiliary_device *auxdev) -{ - struct ipu6_bus_device *adev = auxdev_to_adev(auxdev); - struct ipu6_isys *isys = dev_get_drvdata(&auxdev->dev); - struct ipu6_device *isp = adev->isp; - unsigned int i; - - free_fw_msg_bufs(isys); - - isys_unregister_devices(isys); - isys_notifier_cleanup(isys); - - cpu_latency_qos_remove_request(&isys->pm_qos); - - if (!isp->secure_mode) { - ipu6_cpd_free_pkg_dir(adev); - ipu6_buttress_unmap_fw_image(adev, &adev->fw_sgt); - release_firmware(adev->fw); - } - - for (i = 0; i < IPU6_ISYS_MAX_STREAMS; i++) - mutex_destroy(&isys->streams[i].mutex); - - isys_iwake_watermark_cleanup(isys); - mutex_destroy(&isys->stream_mutex); - mutex_destroy(&isys->mutex); -} - -struct fwmsg { - int type; - char *msg; - bool valid_ts; -}; - -static const struct fwmsg fw_msg[] = { - {IPU6_FW_ISYS_RESP_TYPE_STREAM_OPEN_DONE, "STREAM_OPEN_DONE", 0}, - {IPU6_FW_ISYS_RESP_TYPE_STREAM_CLOSE_ACK, "STREAM_CLOSE_ACK", 0}, - {IPU6_FW_ISYS_RESP_TYPE_STREAM_START_ACK, "STREAM_START_ACK", 0}, - {IPU6_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK, - "STREAM_START_AND_CAPTURE_ACK", 0}, - {IPU6_FW_ISYS_RESP_TYPE_STREAM_STOP_ACK, "STREAM_STOP_ACK", 0}, - {IPU6_FW_ISYS_RESP_TYPE_STREAM_FLUSH_ACK, "STREAM_FLUSH_ACK", 0}, - {IPU6_FW_ISYS_RESP_TYPE_PIN_DATA_READY, "PIN_DATA_READY", 1}, - {IPU6_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK, "STREAM_CAPTURE_ACK", 0}, - {IPU6_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE, - "STREAM_START_AND_CAPTURE_DONE", 1}, - {IPU6_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_DONE, "STREAM_CAPTURE_DONE", 1}, - {IPU6_FW_ISYS_RESP_TYPE_FRAME_SOF, "FRAME_SOF", 1}, - {IPU6_FW_ISYS_RESP_TYPE_FRAME_EOF, "FRAME_EOF", 1}, - {IPU6_FW_ISYS_RESP_TYPE_STATS_DATA_READY, "STATS_READY", 1}, - {-1, "UNKNOWN MESSAGE", 0} -}; - -static u32 resp_type_to_index(int type) -{ - unsigned int i; - - for (i = 0; i < ARRAY_SIZE(fw_msg); i++) - if (fw_msg[i].type == type) - return i; - - return ARRAY_SIZE(fw_msg) - 1; -} - -static int isys_isr_one(struct ipu6_bus_device *adev) -{ - struct ipu6_isys *isys = ipu6_bus_get_drvdata(adev); - struct ipu6_fw_isys_resp_info_abi *resp; - struct ipu6_isys_stream *stream; - struct ipu6_isys_csi2 *csi2 = NULL; - u32 index; - u64 ts; - - if (!isys->fwcom) - return 1; - - resp = ipu6_fw_isys_get_resp(isys->fwcom, IPU6_BASE_MSG_RECV_QUEUES); - if (!resp) - return 1; - - ts = (u64)resp->timestamp[1] << 32 | resp->timestamp[0]; - - index = resp_type_to_index(resp->type); - dev_dbg(&adev->auxdev.dev, - "FW resp %02d %s, stream %u, ts 0x%16.16llx, pin %d\n", - resp->type, fw_msg[index].msg, resp->stream_handle, - fw_msg[index].valid_ts ? ts : 0, resp->pin_id); - - if (resp->error_info.error == IPU6_FW_ISYS_ERROR_STREAM_IN_SUSPENSION) - /* Suspension is kind of special case: not enough buffers */ - dev_dbg(&adev->auxdev.dev, - "FW error resp SUSPENSION, details %d\n", - resp->error_info.error_details); - else if (resp->error_info.error) - dev_dbg(&adev->auxdev.dev, - "FW error resp error %d, details %d\n", - resp->error_info.error, resp->error_info.error_details); - - if (resp->stream_handle >= IPU6_ISYS_MAX_STREAMS) { - dev_err(&adev->auxdev.dev, "bad stream handle %u\n", - resp->stream_handle); - goto leave; - } - - stream = ipu6_isys_query_stream_by_handle(isys, resp->stream_handle); - if (!stream) { - dev_err(&adev->auxdev.dev, "stream of stream_handle %u is unused\n", - resp->stream_handle); - goto leave; - } - stream->error = resp->error_info.error; - - csi2 = ipu6_isys_subdev_to_csi2(stream->asd); - - switch (resp->type) { - case IPU6_FW_ISYS_RESP_TYPE_STREAM_OPEN_DONE: - complete(&stream->stream_open_completion); - break; - case IPU6_FW_ISYS_RESP_TYPE_STREAM_CLOSE_ACK: - complete(&stream->stream_close_completion); - break; - case IPU6_FW_ISYS_RESP_TYPE_STREAM_START_ACK: - complete(&stream->stream_start_completion); - break; - case IPU6_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK: - complete(&stream->stream_start_completion); - break; - case IPU6_FW_ISYS_RESP_TYPE_STREAM_STOP_ACK: - complete(&stream->stream_stop_completion); - break; - case IPU6_FW_ISYS_RESP_TYPE_STREAM_FLUSH_ACK: - complete(&stream->stream_stop_completion); - break; - case IPU6_FW_ISYS_RESP_TYPE_PIN_DATA_READY: - /* - * firmware only release the capture msg until software - * get pin_data_ready event - */ - ipu6_put_fw_msg_buf(ipu6_bus_get_drvdata(adev), resp->buf_id); - if (resp->pin_id < IPU6_ISYS_OUTPUT_PINS && - stream->output_pins_queue[resp->pin_id]) - ipu6_isys_queue_buf_ready(stream, resp); - else - dev_warn(&adev->auxdev.dev, - "%d:No queue for pin id %d\n", - resp->stream_handle, resp->pin_id); - if (csi2) - ipu6_isys_csi2_error(csi2); - - break; - case IPU6_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK: - break; - case IPU6_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE: - case IPU6_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_DONE: - break; - case IPU6_FW_ISYS_RESP_TYPE_FRAME_SOF: - - ipu6_isys_csi2_sof_event_by_stream(stream); - stream->seq[stream->seq_index].sequence = - atomic_read(&stream->sequence) - 1; - stream->seq[stream->seq_index].timestamp = ts; - dev_dbg(&adev->auxdev.dev, - "sof: handle %d: (index %u), timestamp 0x%16.16llx\n", - resp->stream_handle, - stream->seq[stream->seq_index].sequence, ts); - stream->seq_index = (stream->seq_index + 1) - % IPU6_ISYS_MAX_PARALLEL_SOF; - break; - case IPU6_FW_ISYS_RESP_TYPE_FRAME_EOF: - ipu6_isys_csi2_eof_event_by_stream(stream); - dev_dbg(&adev->auxdev.dev, - "eof: handle %d: (index %u), timestamp 0x%16.16llx\n", - resp->stream_handle, - stream->seq[stream->seq_index].sequence, ts); - break; - case IPU6_FW_ISYS_RESP_TYPE_STATS_DATA_READY: - break; - default: - dev_err(&adev->auxdev.dev, "%d:unknown response type %u\n", - resp->stream_handle, resp->type); - break; - } - - ipu6_isys_put_stream(stream); -leave: - ipu6_fw_isys_put_resp(isys->fwcom, IPU6_BASE_MSG_RECV_QUEUES); - return 0; -} - -static const struct ipu6_auxdrv_data ipu6_isys_auxdrv_data = { - .isr = isys_isr, - .isr_threaded = NULL, - .wake_isr_thread = false, -}; - -static const struct auxiliary_device_id ipu6_isys_id_table[] = { - { - .name = "intel_ipu6.isys", - .driver_data = (kernel_ulong_t)&ipu6_isys_auxdrv_data, - }, - { } -}; -MODULE_DEVICE_TABLE(auxiliary, ipu6_isys_id_table); - -static struct auxiliary_driver isys_driver = { - .name = IPU6_ISYS_NAME, - .probe = isys_probe, - .remove = isys_remove, - .id_table = ipu6_isys_id_table, - .driver = { - .pm = &isys_pm_ops, - }, -}; - -module_auxiliary_driver(isys_driver); - -MODULE_AUTHOR("Sakari Ailus "); -MODULE_AUTHOR("Tianshu Qiu "); -MODULE_AUTHOR("Bingbu Cao "); -MODULE_AUTHOR("Yunliang Ding "); -MODULE_AUTHOR("Hongju Wang "); -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Intel IPU6 input system driver"); -MODULE_IMPORT_NS("INTEL_IPU6"); -MODULE_IMPORT_NS("INTEL_IPU_BRIDGE"); diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h deleted file mode 100644 index 0e2c8b7..0000000 --- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h +++ /dev/null @@ -1,202 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (C) 2013--2024 Intel Corporation */ - -#ifndef IPU6_ISYS_H -#define IPU6_ISYS_H - -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include "ipu6.h" -#include "ipu6-fw-isys.h" -#include "ipu6-isys-csi2.h" -#include "ipu6-isys-video.h" - -struct ipu6_bus_device; - -#define IPU6_ISYS_ENTITY_PREFIX "Intel IPU6" -/* FW support max 16 streams */ -#define IPU6_ISYS_MAX_STREAMS 16 -#define ISYS_UNISPART_IRQS (IPU6_ISYS_UNISPART_IRQ_SW | \ - IPU6_ISYS_UNISPART_IRQ_CSI0 | \ - IPU6_ISYS_UNISPART_IRQ_CSI1) - -/* - * Current message queue configuration. These must be big enough - * so that they never gets full. Queues are located in system memory - */ -#define IPU6_ISYS_SIZE_RECV_QUEUE 40 -#define IPU6_ISYS_SIZE_SEND_QUEUE 40 -#define IPU6_ISYS_SIZE_PROXY_RECV_QUEUE 5 -#define IPU6_ISYS_SIZE_PROXY_SEND_QUEUE 5 -#define IPU6_ISYS_NUM_RECV_QUEUE 1 - -#define IPU6_ISYS_MIN_WIDTH 2U -#define IPU6_ISYS_MIN_HEIGHT 1U -#define IPU6_ISYS_MAX_WIDTH 4672U -#define IPU6_ISYS_MAX_HEIGHT 3416U - -/* the threshold granularity is 2KB on IPU6 */ -#define IPU6_SRAM_GRANULARITY_SHIFT 11 -#define IPU6_SRAM_GRANULARITY_SIZE 2048 -/* the threshold granularity is 1KB on IPU6SE */ -#define IPU6SE_SRAM_GRANULARITY_SHIFT 10 -#define IPU6SE_SRAM_GRANULARITY_SIZE 1024 -/* IS pixel buffer is 256KB, MaxSRAMSize is 200KB on IPU6 */ -#define IPU6_MAX_SRAM_SIZE (200 << 10) -/* IS pixel buffer is 128KB, MaxSRAMSize is 96KB on IPU6SE */ -#define IPU6SE_MAX_SRAM_SIZE (96 << 10) - -#define IPU6EP_LTR_VALUE 200 -#define IPU6EP_MIN_MEMOPEN_TH 0x4 -#define IPU6EP_MTL_LTR_VALUE 1023 -#define IPU6EP_MTL_MIN_MEMOPEN_TH 0xc - -struct ltr_did { - union { - u32 value; - struct { - u8 val0; - u8 val1; - u8 val2; - u8 val3; - } bits; - } lut_ltr; - union { - u32 value; - struct { - u8 th0; - u8 th1; - u8 th2; - u8 th3; - } bits; - } lut_fill_time; -}; - -struct isys_iwake_watermark { - bool iwake_enabled; - bool force_iwake_disable; - u32 iwake_threshold; - u64 isys_pixelbuffer_datarate; - struct ltr_did ltrdid; - struct mutex mutex; /* protect whole struct */ - struct ipu6_isys *isys; - struct list_head video_list; -}; - -struct ipu6_isys_csi2_config { - u32 nlanes; - u32 port; -}; - -struct sensor_async_sd { - struct v4l2_async_connection asc; - struct ipu6_isys_csi2_config csi2; -}; - -/* - * struct ipu6_isys - * - * @media_dev: Media device - * @v4l2_dev: V4L2 device - * @adev: ISYS bus device - * @power: Is ISYS powered on or not? - * @isr_bits: Which bits does the ISR handle? - * @power_lock: Serialise access to power (power state in general) - * @csi2_rx_ctrl_cached: cached shared value between all CSI2 receivers - * @streams_lock: serialise access to streams - * @streams: streams per firmware stream ID - * @fwcom: fw communication layer private pointer - * or optional external library private pointer - * @phy_termcal_val: the termination calibration value, only used for DWC PHY - * @need_reset: Isys requires d0i0->i3 transition - * @ref_count: total number of callers fw open - * @mutex: serialise access isys video open/release related operations - * @stream_mutex: serialise stream start and stop, queueing requests - * @pdata: platform data pointer - * @csi2: CSI-2 receivers - */ -struct ipu6_isys { - struct media_device media_dev; - struct v4l2_device v4l2_dev; - struct ipu6_bus_device *adev; - - int power; - spinlock_t power_lock; - u32 isr_csi2_bits; - u32 csi2_rx_ctrl_cached; - spinlock_t streams_lock; - struct ipu6_isys_stream streams[IPU6_ISYS_MAX_STREAMS]; - int streams_ref_count[IPU6_ISYS_MAX_STREAMS]; - void *fwcom; - u32 phy_termcal_val; - bool need_reset; - bool icache_prefetch; - bool csi2_cse_ipc_not_supported; - unsigned int ref_count; - unsigned int stream_opened; - unsigned int sensor_type; - - struct mutex mutex; - struct mutex stream_mutex; - - struct ipu6_isys_pdata *pdata; - - int (*phy_set_power)(struct ipu6_isys *isys, - struct ipu6_isys_csi2_config *cfg, - const struct ipu6_isys_csi2_timing *timing, - bool on); - - struct ipu6_isys_csi2 *csi2; - - struct pm_qos_request pm_qos; - spinlock_t listlock; /* Protect framebuflist */ - struct list_head framebuflist; - struct list_head framebuflist_fw; - struct v4l2_async_notifier notifier; - struct isys_iwake_watermark iwake_watermark; -}; - -struct isys_fw_msgs { - union { - u64 dummy; - struct ipu6_fw_isys_frame_buff_set_abi frame; - struct ipu6_fw_isys_stream_cfg_data_abi stream; - } fw_msg; - struct list_head head; - dma_addr_t dma_addr; -}; - -struct isys_fw_msgs *ipu6_get_fw_msg_buf(struct ipu6_isys_stream *stream); -void ipu6_put_fw_msg_buf(struct ipu6_isys *isys, uintptr_t data); -void ipu6_cleanup_fw_msg_bufs(struct ipu6_isys *isys); - -extern const struct v4l2_ioctl_ops ipu6_isys_ioctl_ops; - -void isys_setup_hw(struct ipu6_isys *isys); -irqreturn_t isys_isr(struct ipu6_bus_device *adev); -void update_watermark_setting(struct ipu6_isys *isys); - -int ipu6_isys_mcd_phy_set_power(struct ipu6_isys *isys, - struct ipu6_isys_csi2_config *cfg, - const struct ipu6_isys_csi2_timing *timing, - bool on); - -int ipu6_isys_dwc_phy_set_power(struct ipu6_isys *isys, - struct ipu6_isys_csi2_config *cfg, - const struct ipu6_isys_csi2_timing *timing, - bool on); - -int ipu6_isys_jsl_phy_set_power(struct ipu6_isys *isys, - struct ipu6_isys_csi2_config *cfg, - const struct ipu6_isys_csi2_timing *timing, - bool on); -#endif /* IPU6_ISYS_H */ diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-mmu.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-mmu.c deleted file mode 100644 index 6d1c0b9..0000000 --- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-mmu.c +++ /dev/null @@ -1,806 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2013--2024 Intel Corporation - */ -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "ipu6.h" -#include "ipu6-dma.h" -#include "ipu6-mmu.h" -#include "ipu6-platform-regs.h" - -#define ISP_PAGE_SHIFT 12 -#define ISP_PAGE_SIZE BIT(ISP_PAGE_SHIFT) -#define ISP_PAGE_MASK (~(ISP_PAGE_SIZE - 1)) - -#define ISP_L1PT_SHIFT 22 -#define ISP_L1PT_MASK (~((1U << ISP_L1PT_SHIFT) - 1)) - -#define ISP_L2PT_SHIFT 12 -#define ISP_L2PT_MASK (~(ISP_L1PT_MASK | (~(ISP_PAGE_MASK)))) - -#define ISP_L1PT_PTES 1024 -#define ISP_L2PT_PTES 1024 - -#define ISP_PADDR_SHIFT 12 - -#define REG_TLB_INVALIDATE 0x0000 - -#define REG_L1_PHYS 0x0004 /* 27-bit pfn */ -#define REG_INFO 0x0008 - -#define TBL_PHYS_ADDR(a) ((phys_addr_t)(a) << ISP_PADDR_SHIFT) - -static void tlb_invalidate(struct ipu6_mmu *mmu) -{ - unsigned long flags; - unsigned int i; - - spin_lock_irqsave(&mmu->ready_lock, flags); - if (!mmu->ready) { - spin_unlock_irqrestore(&mmu->ready_lock, flags); - return; - } - - for (i = 0; i < mmu->nr_mmus; i++) { - /* - * To avoid the HW bug induced dead lock in some of the IPU6 - * MMUs on successive invalidate calls, we need to first do a - * read to the page table base before writing the invalidate - * register. MMUs which need to implement this WA, will have - * the insert_read_before_invalidate flags set as true. - * Disregard the return value of the read. - */ - if (mmu->mmu_hw[i].insert_read_before_invalidate) - readl(mmu->mmu_hw[i].base + REG_L1_PHYS); - - writel(0xffffffff, mmu->mmu_hw[i].base + - REG_TLB_INVALIDATE); - /* - * The TLB invalidation is a "single cycle" (IOMMU clock cycles) - * When the actual MMIO write reaches the IPU6 TLB Invalidate - * register, wmb() will force the TLB invalidate out if the CPU - * attempts to update the IOMMU page table (or sooner). - */ - wmb(); - } - spin_unlock_irqrestore(&mmu->ready_lock, flags); -} - -#ifdef DEBUG -static void page_table_dump(struct ipu6_mmu_info *mmu_info) -{ - u32 l1_idx; - - dev_dbg(mmu_info->dev, "begin IOMMU page table dump\n"); - - for (l1_idx = 0; l1_idx < ISP_L1PT_PTES; l1_idx++) { - u32 l2_idx; - u32 iova = (phys_addr_t)l1_idx << ISP_L1PT_SHIFT; - phys_addr_t l2_phys; - - if (mmu_info->l1_pt[l1_idx] == mmu_info->dummy_l2_pteval) - continue; - - l2_phys = TBL_PHYS_ADDR(mmu_info->l1_pt[l1_idx];) - dev_dbg(mmu_info->dev, - "l1 entry %u; iovas 0x%8.8x-0x%8.8x, at %pap\n", - l1_idx, iova, iova + ISP_PAGE_SIZE, &l2_phys); - - for (l2_idx = 0; l2_idx < ISP_L2PT_PTES; l2_idx++) { - u32 *l2_pt = mmu_info->l2_pts[l1_idx]; - u32 iova2 = iova + (l2_idx << ISP_L2PT_SHIFT); - - if (l2_pt[l2_idx] == mmu_info->dummy_page_pteval) - continue; - - dev_dbg(mmu_info->dev, - "\tl2 entry %u; iova 0x%8.8x, phys %pa\n", - l2_idx, iova2, - TBL_PHYS_ADDR(l2_pt[l2_idx])); - } - } - - dev_dbg(mmu_info->dev, "end IOMMU page table dump\n"); -} -#endif /* DEBUG */ - -static dma_addr_t map_single(struct ipu6_mmu_info *mmu_info, void *ptr) -{ - dma_addr_t dma; - - dma = dma_map_single(mmu_info->dev, ptr, PAGE_SIZE, DMA_BIDIRECTIONAL); - if (dma_mapping_error(mmu_info->dev, dma)) - return 0; - - return dma; -} - -static int get_dummy_page(struct ipu6_mmu_info *mmu_info) -{ - void *pt = (void *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32); - dma_addr_t dma; - - if (!pt) - return -ENOMEM; - - dev_dbg(mmu_info->dev, "dummy_page: get_zeroed_page() == %p\n", pt); - - dma = map_single(mmu_info, pt); - if (!dma) { - dev_err(mmu_info->dev, "Failed to map dummy page\n"); - goto err_free_page; - } - - mmu_info->dummy_page = pt; - mmu_info->dummy_page_pteval = dma >> ISP_PAGE_SHIFT; - - return 0; - -err_free_page: - free_page((unsigned long)pt); - return -ENOMEM; -} - -static void free_dummy_page(struct ipu6_mmu_info *mmu_info) -{ - dma_unmap_single(mmu_info->dev, - TBL_PHYS_ADDR(mmu_info->dummy_page_pteval), - PAGE_SIZE, DMA_BIDIRECTIONAL); - free_page((unsigned long)mmu_info->dummy_page); -} - -static int alloc_dummy_l2_pt(struct ipu6_mmu_info *mmu_info) -{ - u32 *pt = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32); - dma_addr_t dma; - unsigned int i; - - if (!pt) - return -ENOMEM; - - dev_dbg(mmu_info->dev, "dummy_l2: get_zeroed_page() = %p\n", pt); - - dma = map_single(mmu_info, pt); - if (!dma) { - dev_err(mmu_info->dev, "Failed to map l2pt page\n"); - goto err_free_page; - } - - for (i = 0; i < ISP_L2PT_PTES; i++) - pt[i] = mmu_info->dummy_page_pteval; - - mmu_info->dummy_l2_pt = pt; - mmu_info->dummy_l2_pteval = dma >> ISP_PAGE_SHIFT; - - return 0; - -err_free_page: - free_page((unsigned long)pt); - return -ENOMEM; -} - -static void free_dummy_l2_pt(struct ipu6_mmu_info *mmu_info) -{ - dma_unmap_single(mmu_info->dev, - TBL_PHYS_ADDR(mmu_info->dummy_l2_pteval), - PAGE_SIZE, DMA_BIDIRECTIONAL); - free_page((unsigned long)mmu_info->dummy_l2_pt); -} - -static u32 *alloc_l1_pt(struct ipu6_mmu_info *mmu_info) -{ - u32 *pt = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32); - dma_addr_t dma; - unsigned int i; - - if (!pt) - return NULL; - - dev_dbg(mmu_info->dev, "alloc_l1: get_zeroed_page() = %p\n", pt); - - for (i = 0; i < ISP_L1PT_PTES; i++) - pt[i] = mmu_info->dummy_l2_pteval; - - dma = map_single(mmu_info, pt); - if (!dma) { - dev_err(mmu_info->dev, "Failed to map l1pt page\n"); - goto err_free_page; - } - - mmu_info->l1_pt_dma = dma >> ISP_PADDR_SHIFT; - dev_dbg(mmu_info->dev, "l1 pt %p mapped at %pad\n", pt, &dma); - - return pt; - -err_free_page: - free_page((unsigned long)pt); - return NULL; -} - -static u32 *alloc_l2_pt(struct ipu6_mmu_info *mmu_info) -{ - u32 *pt = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32); - unsigned int i; - - if (!pt) - return NULL; - - dev_dbg(mmu_info->dev, "alloc_l2: get_zeroed_page() = %p\n", pt); - - for (i = 0; i < ISP_L1PT_PTES; i++) - pt[i] = mmu_info->dummy_page_pteval; - - return pt; -} - -static void l2_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova, - phys_addr_t dummy, size_t size) -{ - unsigned int l2_entries; - unsigned int l2_idx; - unsigned long flags; - u32 l1_idx; - u32 *l2_pt; - - spin_lock_irqsave(&mmu_info->lock, flags); - for (l1_idx = iova >> ISP_L1PT_SHIFT; - size > 0 && l1_idx < ISP_L1PT_PTES; l1_idx++) { - dev_dbg(mmu_info->dev, - "unmapping l2 pgtable (l1 index %u (iova 0x%8.8lx))\n", - l1_idx, iova); - - if (mmu_info->l1_pt[l1_idx] == mmu_info->dummy_l2_pteval) { - dev_err(mmu_info->dev, - "unmap not mapped iova 0x%8.8lx l1 index %u\n", - iova, l1_idx); - continue; - } - l2_pt = mmu_info->l2_pts[l1_idx]; - - l2_entries = 0; - for (l2_idx = (iova & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT; - size > 0 && l2_idx < ISP_L2PT_PTES; l2_idx++) { - phys_addr_t pteval = TBL_PHYS_ADDR(l2_pt[l2_idx]); - - dev_dbg(mmu_info->dev, - "unmap l2 index %u with pteval 0x%p\n", - l2_idx, &pteval); - l2_pt[l2_idx] = mmu_info->dummy_page_pteval; - - iova += ISP_PAGE_SIZE; - size -= ISP_PAGE_SIZE; - - l2_entries++; - } - - WARN_ON_ONCE(!l2_entries); - clflush_cache_range(&l2_pt[l2_idx - l2_entries], - sizeof(l2_pt[0]) * l2_entries); - } - - WARN_ON_ONCE(size); - spin_unlock_irqrestore(&mmu_info->lock, flags); -} - -static int l2_map(struct ipu6_mmu_info *mmu_info, unsigned long iova, - phys_addr_t paddr, size_t size) -{ - struct device *dev = mmu_info->dev; - unsigned int l2_entries; - u32 *l2_pt, *l2_virt; - unsigned int l2_idx; - unsigned long flags; - size_t mapped = 0; - dma_addr_t dma; - u32 l1_entry; - u32 l1_idx; - int err = 0; - - spin_lock_irqsave(&mmu_info->lock, flags); - - paddr = ALIGN(paddr, ISP_PAGE_SIZE); - for (l1_idx = iova >> ISP_L1PT_SHIFT; - size > 0 && l1_idx < ISP_L1PT_PTES; l1_idx++) { - dev_dbg(dev, - "mapping l2 page table for l1 index %u (iova %8.8x)\n", - l1_idx, (u32)iova); - - l1_entry = mmu_info->l1_pt[l1_idx]; - if (l1_entry == mmu_info->dummy_l2_pteval) { - l2_virt = mmu_info->l2_pts[l1_idx]; - if (likely(!l2_virt)) { - l2_virt = alloc_l2_pt(mmu_info); - if (!l2_virt) { - err = -ENOMEM; - goto error; - } - } - - dma = map_single(mmu_info, l2_virt); - if (!dma) { - dev_err(dev, "Failed to map l2pt page\n"); - free_page((unsigned long)l2_virt); - err = -EINVAL; - goto error; - } - - l1_entry = dma >> ISP_PADDR_SHIFT; - - dev_dbg(dev, "page for l1_idx %u %p allocated\n", - l1_idx, l2_virt); - mmu_info->l1_pt[l1_idx] = l1_entry; - mmu_info->l2_pts[l1_idx] = l2_virt; - - clflush_cache_range(&mmu_info->l1_pt[l1_idx], - sizeof(mmu_info->l1_pt[l1_idx])); - } - - l2_pt = mmu_info->l2_pts[l1_idx]; - l2_entries = 0; - - for (l2_idx = (iova & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT; - size > 0 && l2_idx < ISP_L2PT_PTES; l2_idx++) { - l2_pt[l2_idx] = paddr >> ISP_PADDR_SHIFT; - - dev_dbg(dev, "l2 index %u mapped as 0x%8.8x\n", l2_idx, - l2_pt[l2_idx]); - - iova += ISP_PAGE_SIZE; - paddr += ISP_PAGE_SIZE; - mapped += ISP_PAGE_SIZE; - size -= ISP_PAGE_SIZE; - - l2_entries++; - } - - WARN_ON_ONCE(!l2_entries); - clflush_cache_range(&l2_pt[l2_idx - l2_entries], - sizeof(l2_pt[0]) * l2_entries); - } - - spin_unlock_irqrestore(&mmu_info->lock, flags); - - return 0; - -error: - spin_unlock_irqrestore(&mmu_info->lock, flags); - /* unroll mapping in case something went wrong */ - if (mapped) - l2_unmap(mmu_info, iova - mapped, paddr - mapped, mapped); - - return err; -} - -static int __ipu6_mmu_map(struct ipu6_mmu_info *mmu_info, unsigned long iova, - phys_addr_t paddr, size_t size) -{ - u32 iova_start = round_down(iova, ISP_PAGE_SIZE); - u32 iova_end = ALIGN(iova + size, ISP_PAGE_SIZE); - - dev_dbg(mmu_info->dev, - "mapping iova 0x%8.8x--0x%8.8x, size %zu at paddr %pap\n", - iova_start, iova_end, size, &paddr); - - return l2_map(mmu_info, iova_start, paddr, size); -} - -static void __ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info, - unsigned long iova, size_t size) -{ - l2_unmap(mmu_info, iova, 0, size); -} - -static int allocate_trash_buffer(struct ipu6_mmu *mmu) -{ - unsigned int n_pages = PFN_UP(IPU6_MMUV2_TRASH_RANGE); - struct iova *iova; - unsigned int i; - dma_addr_t dma; - unsigned long iova_addr; - int ret; - - /* Allocate 8MB in iova range */ - iova = alloc_iova(&mmu->dmap->iovad, n_pages, - PHYS_PFN(mmu->dmap->mmu_info->aperture_end), 0); - if (!iova) { - dev_err(mmu->dev, "cannot allocate iova range for trash\n"); - return -ENOMEM; - } - - dma = dma_map_page(mmu->dmap->mmu_info->dev, mmu->trash_page, 0, - PAGE_SIZE, DMA_BIDIRECTIONAL); - if (dma_mapping_error(mmu->dmap->mmu_info->dev, dma)) { - dev_err(mmu->dmap->mmu_info->dev, "Failed to map trash page\n"); - ret = -ENOMEM; - goto out_free_iova; - } - - mmu->pci_trash_page = dma; - - /* - * Map the 8MB iova address range to the same physical trash page - * mmu->trash_page which is already reserved at the probe - */ - iova_addr = iova->pfn_lo; - for (i = 0; i < n_pages; i++) { - ret = ipu6_mmu_map(mmu->dmap->mmu_info, PFN_PHYS(iova_addr), - mmu->pci_trash_page, PAGE_SIZE); - if (ret) { - dev_err(mmu->dev, - "mapping trash buffer range failed\n"); - goto out_unmap; - } - - iova_addr++; - } - - mmu->iova_trash_page = PFN_PHYS(iova->pfn_lo); - dev_dbg(mmu->dev, "iova trash buffer for MMUID: %d is %u\n", - mmu->mmid, (unsigned int)mmu->iova_trash_page); - return 0; - -out_unmap: - ipu6_mmu_unmap(mmu->dmap->mmu_info, PFN_PHYS(iova->pfn_lo), - PFN_PHYS(iova_size(iova))); - dma_unmap_page(mmu->dmap->mmu_info->dev, mmu->pci_trash_page, - PAGE_SIZE, DMA_BIDIRECTIONAL); -out_free_iova: - __free_iova(&mmu->dmap->iovad, iova); - return ret; -} - -int ipu6_mmu_hw_init(struct ipu6_mmu *mmu) -{ - struct ipu6_mmu_info *mmu_info; - unsigned long flags; - unsigned int i; - - mmu_info = mmu->dmap->mmu_info; - - /* Initialise the each MMU HW block */ - for (i = 0; i < mmu->nr_mmus; i++) { - struct ipu6_mmu_hw *mmu_hw = &mmu->mmu_hw[i]; - unsigned int j; - u16 block_addr; - - /* Write page table address per MMU */ - writel((phys_addr_t)mmu_info->l1_pt_dma, - mmu->mmu_hw[i].base + REG_L1_PHYS); - - /* Set info bits per MMU */ - writel(mmu->mmu_hw[i].info_bits, - mmu->mmu_hw[i].base + REG_INFO); - - /* Configure MMU TLB stream configuration for L1 */ - for (j = 0, block_addr = 0; j < mmu_hw->nr_l1streams; - block_addr += mmu->mmu_hw[i].l1_block_sz[j], j++) { - if (block_addr > IPU6_MAX_LI_BLOCK_ADDR) { - dev_err(mmu->dev, "invalid L1 configuration\n"); - return -EINVAL; - } - - /* Write block start address for each streams */ - writel(block_addr, mmu_hw->base + - mmu_hw->l1_stream_id_reg_offset + 4 * j); - } - - /* Configure MMU TLB stream configuration for L2 */ - for (j = 0, block_addr = 0; j < mmu_hw->nr_l2streams; - block_addr += mmu->mmu_hw[i].l2_block_sz[j], j++) { - if (block_addr > IPU6_MAX_L2_BLOCK_ADDR) { - dev_err(mmu->dev, "invalid L2 configuration\n"); - return -EINVAL; - } - - writel(block_addr, mmu_hw->base + - mmu_hw->l2_stream_id_reg_offset + 4 * j); - } - } - - if (!mmu->trash_page) { - int ret; - - mmu->trash_page = alloc_page(GFP_KERNEL); - if (!mmu->trash_page) { - dev_err(mmu->dev, "insufficient memory for trash buffer\n"); - return -ENOMEM; - } - - ret = allocate_trash_buffer(mmu); - if (ret) { - __free_page(mmu->trash_page); - mmu->trash_page = NULL; - dev_err(mmu->dev, "trash buffer allocation failed\n"); - return ret; - } - } - - spin_lock_irqsave(&mmu->ready_lock, flags); - mmu->ready = true; - spin_unlock_irqrestore(&mmu->ready_lock, flags); - - return 0; -} -EXPORT_SYMBOL_NS_GPL(ipu6_mmu_hw_init, "INTEL_IPU6"); - -static struct ipu6_mmu_info *ipu6_mmu_alloc(struct ipu6_device *isp) -{ - struct ipu6_mmu_info *mmu_info; - int ret; - - mmu_info = kzalloc(sizeof(*mmu_info), GFP_KERNEL); - if (!mmu_info) - return NULL; - - mmu_info->aperture_start = 0; - mmu_info->aperture_end = - (dma_addr_t)DMA_BIT_MASK(isp->secure_mode ? - IPU6_MMU_ADDR_BITS : - IPU6_MMU_ADDR_BITS_NON_SECURE); - mmu_info->pgsize_bitmap = SZ_4K; - mmu_info->dev = &isp->pdev->dev; - - ret = get_dummy_page(mmu_info); - if (ret) - goto err_free_info; - - ret = alloc_dummy_l2_pt(mmu_info); - if (ret) - goto err_free_dummy_page; - - mmu_info->l2_pts = vzalloc(ISP_L2PT_PTES * sizeof(*mmu_info->l2_pts)); - if (!mmu_info->l2_pts) - goto err_free_dummy_l2_pt; - - /* - * We always map the L1 page table (a single page as well as - * the L2 page tables). - */ - mmu_info->l1_pt = alloc_l1_pt(mmu_info); - if (!mmu_info->l1_pt) - goto err_free_l2_pts; - - spin_lock_init(&mmu_info->lock); - - dev_dbg(mmu_info->dev, "domain initialised\n"); - - return mmu_info; - -err_free_l2_pts: - vfree(mmu_info->l2_pts); -err_free_dummy_l2_pt: - free_dummy_l2_pt(mmu_info); -err_free_dummy_page: - free_dummy_page(mmu_info); -err_free_info: - kfree(mmu_info); - - return NULL; -} - -void ipu6_mmu_hw_cleanup(struct ipu6_mmu *mmu) -{ - unsigned long flags; - - spin_lock_irqsave(&mmu->ready_lock, flags); - mmu->ready = false; - spin_unlock_irqrestore(&mmu->ready_lock, flags); -} -EXPORT_SYMBOL_NS_GPL(ipu6_mmu_hw_cleanup, "INTEL_IPU6"); - -static struct ipu6_dma_mapping *alloc_dma_mapping(struct ipu6_device *isp) -{ - struct ipu6_dma_mapping *dmap; - - dmap = kzalloc(sizeof(*dmap), GFP_KERNEL); - if (!dmap) - return NULL; - - dmap->mmu_info = ipu6_mmu_alloc(isp); - if (!dmap->mmu_info) { - kfree(dmap); - return NULL; - } - - init_iova_domain(&dmap->iovad, SZ_4K, 1); - dmap->mmu_info->dmap = dmap; - - dev_dbg(&isp->pdev->dev, "alloc mapping\n"); - - iova_cache_get(); - - return dmap; -} - -phys_addr_t ipu6_mmu_iova_to_phys(struct ipu6_mmu_info *mmu_info, - dma_addr_t iova) -{ - phys_addr_t phy_addr; - unsigned long flags; - u32 *l2_pt; - - spin_lock_irqsave(&mmu_info->lock, flags); - l2_pt = mmu_info->l2_pts[iova >> ISP_L1PT_SHIFT]; - phy_addr = (phys_addr_t)l2_pt[(iova & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT]; - phy_addr <<= ISP_PAGE_SHIFT; - spin_unlock_irqrestore(&mmu_info->lock, flags); - - return phy_addr; -} - -void ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova, - size_t size) -{ - unsigned int min_pagesz; - - dev_dbg(mmu_info->dev, "unmapping iova 0x%lx size 0x%zx\n", iova, size); - - /* find out the minimum page size supported */ - min_pagesz = 1 << __ffs(mmu_info->pgsize_bitmap); - - /* - * The virtual address and the size of the mapping must be - * aligned (at least) to the size of the smallest page supported - * by the hardware - */ - if (!IS_ALIGNED(iova | size, min_pagesz)) { - dev_err(NULL, "unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n", - iova, size, min_pagesz); - return; - } - - __ipu6_mmu_unmap(mmu_info, iova, size); -} - -int ipu6_mmu_map(struct ipu6_mmu_info *mmu_info, unsigned long iova, - phys_addr_t paddr, size_t size) -{ - unsigned int min_pagesz; - - if (mmu_info->pgsize_bitmap == 0UL) - return -ENODEV; - - /* find out the minimum page size supported */ - min_pagesz = 1 << __ffs(mmu_info->pgsize_bitmap); - - /* - * both the virtual address and the physical one, as well as - * the size of the mapping, must be aligned (at least) to the - * size of the smallest page supported by the hardware - */ - if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) { - dev_err(mmu_info->dev, - "unaligned: iova %lx pa %pa size %zx min_pagesz %x\n", - iova, &paddr, size, min_pagesz); - return -EINVAL; - } - - dev_dbg(mmu_info->dev, "map: iova 0x%lx pa %pa size 0x%zx\n", - iova, &paddr, size); - - return __ipu6_mmu_map(mmu_info, iova, paddr, size); -} - -static void ipu6_mmu_destroy(struct ipu6_mmu *mmu) -{ - struct ipu6_dma_mapping *dmap = mmu->dmap; - struct ipu6_mmu_info *mmu_info = dmap->mmu_info; - struct iova *iova; - u32 l1_idx; - - if (mmu->iova_trash_page) { - iova = find_iova(&dmap->iovad, PHYS_PFN(mmu->iova_trash_page)); - if (iova) { - /* unmap and free the trash buffer iova */ - ipu6_mmu_unmap(mmu_info, PFN_PHYS(iova->pfn_lo), - PFN_PHYS(iova_size(iova))); - __free_iova(&dmap->iovad, iova); - } else { - dev_err(mmu->dev, "trash buffer iova not found.\n"); - } - - mmu->iova_trash_page = 0; - dma_unmap_page(mmu_info->dev, mmu->pci_trash_page, - PAGE_SIZE, DMA_BIDIRECTIONAL); - mmu->pci_trash_page = 0; - __free_page(mmu->trash_page); - } - - for (l1_idx = 0; l1_idx < ISP_L1PT_PTES; l1_idx++) { - if (mmu_info->l1_pt[l1_idx] != mmu_info->dummy_l2_pteval) { - dma_unmap_single(mmu_info->dev, - TBL_PHYS_ADDR(mmu_info->l1_pt[l1_idx]), - PAGE_SIZE, DMA_BIDIRECTIONAL); - free_page((unsigned long)mmu_info->l2_pts[l1_idx]); - } - } - - vfree(mmu_info->l2_pts); - free_dummy_page(mmu_info); - dma_unmap_single(mmu_info->dev, TBL_PHYS_ADDR(mmu_info->l1_pt_dma), - PAGE_SIZE, DMA_BIDIRECTIONAL); - free_page((unsigned long)mmu_info->dummy_l2_pt); - free_page((unsigned long)mmu_info->l1_pt); - kfree(mmu_info); -} - -struct ipu6_mmu *ipu6_mmu_init(struct device *dev, - void __iomem *base, int mmid, - const struct ipu6_hw_variants *hw) -{ - struct ipu6_device *isp = pci_get_drvdata(to_pci_dev(dev)); - struct ipu6_mmu_pdata *pdata; - struct ipu6_mmu *mmu; - unsigned int i; - - if (hw->nr_mmus > IPU6_MMU_MAX_DEVICES) - return ERR_PTR(-EINVAL); - - pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); - if (!pdata) - return ERR_PTR(-ENOMEM); - - for (i = 0; i < hw->nr_mmus; i++) { - struct ipu6_mmu_hw *pdata_mmu = &pdata->mmu_hw[i]; - const struct ipu6_mmu_hw *src_mmu = &hw->mmu_hw[i]; - - if (src_mmu->nr_l1streams > IPU6_MMU_MAX_TLB_L1_STREAMS || - src_mmu->nr_l2streams > IPU6_MMU_MAX_TLB_L2_STREAMS) - return ERR_PTR(-EINVAL); - - *pdata_mmu = *src_mmu; - pdata_mmu->base = base + src_mmu->offset; - } - - mmu = devm_kzalloc(dev, sizeof(*mmu), GFP_KERNEL); - if (!mmu) - return ERR_PTR(-ENOMEM); - - mmu->mmid = mmid; - mmu->mmu_hw = pdata->mmu_hw; - mmu->nr_mmus = hw->nr_mmus; - mmu->tlb_invalidate = tlb_invalidate; - mmu->ready = false; - INIT_LIST_HEAD(&mmu->vma_list); - spin_lock_init(&mmu->ready_lock); - - mmu->dmap = alloc_dma_mapping(isp); - if (!mmu->dmap) { - dev_err(dev, "can't alloc dma mapping\n"); - return ERR_PTR(-ENOMEM); - } - - return mmu; -} - -void ipu6_mmu_cleanup(struct ipu6_mmu *mmu) -{ - struct ipu6_dma_mapping *dmap = mmu->dmap; - - ipu6_mmu_destroy(mmu); - mmu->dmap = NULL; - iova_cache_put(); - put_iova_domain(&dmap->iovad); - kfree(dmap); -} diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-mmu.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-mmu.h deleted file mode 100644 index 8e40b4a..0000000 --- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-mmu.h +++ /dev/null @@ -1,73 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (C) 2013--2024 Intel Corporation */ - -#ifndef IPU6_MMU_H -#define IPU6_MMU_H - -#define ISYS_MMID 1 -#define PSYS_MMID 0 - -#include -#include -#include - -struct device; -struct page; -struct ipu6_hw_variants; - -struct ipu6_mmu_info { - struct device *dev; - - u32 *l1_pt; - u32 l1_pt_dma; - u32 **l2_pts; - - u32 *dummy_l2_pt; - u32 dummy_l2_pteval; - void *dummy_page; - u32 dummy_page_pteval; - - dma_addr_t aperture_start; - dma_addr_t aperture_end; - unsigned long pgsize_bitmap; - - spinlock_t lock; /* Serialize access to users */ - struct ipu6_dma_mapping *dmap; -}; - -struct ipu6_mmu { - struct list_head node; - - struct ipu6_mmu_hw *mmu_hw; - unsigned int nr_mmus; - unsigned int mmid; - - phys_addr_t pgtbl; - struct device *dev; - - struct ipu6_dma_mapping *dmap; - struct list_head vma_list; - - struct page *trash_page; - dma_addr_t pci_trash_page; /* IOVA from PCI DMA services (parent) */ - dma_addr_t iova_trash_page; /* IOVA for IPU6 child nodes to use */ - - bool ready; - spinlock_t ready_lock; /* Serialize access to bool ready */ - - void (*tlb_invalidate)(struct ipu6_mmu *mmu); -}; - -struct ipu6_mmu *ipu6_mmu_init(struct device *dev, - void __iomem *base, int mmid, - const struct ipu6_hw_variants *hw); -void ipu6_mmu_cleanup(struct ipu6_mmu *mmu); -int ipu6_mmu_hw_init(struct ipu6_mmu *mmu); -void ipu6_mmu_hw_cleanup(struct ipu6_mmu *mmu); -int ipu6_mmu_map(struct ipu6_mmu_info *mmu_info, unsigned long iova, - phys_addr_t paddr, size_t size); -void ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova, - size_t size); -phys_addr_t ipu6_mmu_iova_to_phys(struct ipu6_mmu_info *mmu_info, - dma_addr_t iova); -#endif diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-platform-buttress-regs.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-platform-buttress-regs.h deleted file mode 100644 index efd65e4..0000000 --- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-platform-buttress-regs.h +++ /dev/null @@ -1,224 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (C) 2023--2024 Intel Corporation */ - -#ifndef IPU6_PLATFORM_BUTTRESS_REGS_H -#define IPU6_PLATFORM_BUTTRESS_REGS_H - -#include - -/* IS_WORKPOINT_REQ */ -#define IPU6_BUTTRESS_REG_IS_FREQ_CTL 0x34 -/* PS_WORKPOINT_REQ */ -#define IPU6_BUTTRESS_REG_PS_FREQ_CTL 0x38 - -/* should be tuned for real silicon */ -#define IPU6_IS_FREQ_CTL_DEFAULT_RATIO 0x08 -#define IPU6SE_IS_FREQ_CTL_DEFAULT_RATIO 0x0a -#define IPU6_PS_FREQ_CTL_DEFAULT_RATIO 0x0d - -#define IPU6_IS_FREQ_CTL_DEFAULT_QOS_FLOOR_RATIO 0x10 -#define IPU6_PS_FREQ_CTL_DEFAULT_QOS_FLOOR_RATIO 0x0708 - -#define IPU6_BUTTRESS_PWR_STATE_IS_PWR_SHIFT 3 -#define IPU6_BUTTRESS_PWR_STATE_IS_PWR_MASK GENMASK(4, 3) - -#define IPU6_BUTTRESS_PWR_STATE_PS_PWR_SHIFT 6 -#define IPU6_BUTTRESS_PWR_STATE_PS_PWR_MASK GENMASK(7, 6) - -#define IPU6_BUTTRESS_PWR_STATE_DN_DONE 0x0 -#define IPU6_BUTTRESS_PWR_STATE_UP_PROCESS 0x1 -#define IPU6_BUTTRESS_PWR_STATE_DN_PROCESS 0x2 -#define IPU6_BUTTRESS_PWR_STATE_UP_DONE 0x3 - -#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_0 0x270 -#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_1 0x274 -#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_2 0x278 -#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_3 0x27c -#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_4 0x280 -#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_5 0x284 -#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_6 0x288 -#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_7 0x28c - -#define BUTTRESS_REG_WDT 0x8 -#define BUTTRESS_REG_BTRS_CTRL 0xc -#define BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC0 BIT(0) -#define BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC1 BIT(1) -#define BUTTRESS_REG_BTRS_CTRL_REF_CLK_IND GENMASK(9, 8) - -#define BUTTRESS_REG_FW_RESET_CTL 0x30 -#define BUTTRESS_FW_RESET_CTL_START BIT(0) -#define BUTTRESS_FW_RESET_CTL_DONE BIT(1) - -#define BUTTRESS_REG_IS_FREQ_CTL 0x34 -#define BUTTRESS_REG_PS_FREQ_CTL 0x38 - -#define BUTTRESS_FREQ_CTL_START BIT(31) -#define BUTTRESS_FREQ_CTL_ICCMAX_LEVEL GENMASK(19, 16) -#define BUTTRESS_FREQ_CTL_QOS_FLOOR_MASK GENMASK(15, 8) -#define BUTTRESS_FREQ_CTL_RATIO_MASK GENMASK(7, 0) - -#define BUTTRESS_REG_PWR_STATE 0x5c - -#define BUTTRESS_PWR_STATE_RESET 0x0 -#define BUTTRESS_PWR_STATE_PWR_ON_DONE 0x1 -#define BUTTRESS_PWR_STATE_PWR_RDY 0x3 -#define BUTTRESS_PWR_STATE_PWR_IDLE 0x4 - -#define BUTTRESS_PWR_STATE_HH_STATUS_MASK GENMASK(12, 11) - -enum { - BUTTRESS_PWR_STATE_HH_STATE_IDLE, - BUTTRESS_PWR_STATE_HH_STATE_IN_PRGS, - BUTTRESS_PWR_STATE_HH_STATE_DONE, - BUTTRESS_PWR_STATE_HH_STATE_ERR, -}; - -#define BUTTRESS_PWR_STATE_IS_PWR_FSM_MASK GENMASK(23, 19) - -#define BUTTRESS_PWR_STATE_IS_PWR_FSM_IDLE 0x0 -#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_PLL_CMP 0x1 -#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_CLKACK 0x2 -#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_PG_ACK 0x3 -#define BUTTRESS_PWR_STATE_IS_PWR_FSM_RST_ASSRT_CYCLES 0x4 -#define BUTTRESS_PWR_STATE_IS_PWR_FSM_STOP_CLK_CYCLES1 0x5 -#define BUTTRESS_PWR_STATE_IS_PWR_FSM_STOP_CLK_CYCLES2 0x6 -#define BUTTRESS_PWR_STATE_IS_PWR_FSM_RST_DEASSRT_CYCLES 0x7 -#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_FUSE_WR_CMP 0x8 -#define BUTTRESS_PWR_STATE_IS_PWR_FSM_BRK_POINT 0x9 -#define BUTTRESS_PWR_STATE_IS_PWR_FSM_IS_RDY 0xa -#define BUTTRESS_PWR_STATE_IS_PWR_FSM_HALT_HALTED 0xb -#define BUTTRESS_PWR_STATE_IS_PWR_FSM_RST_DURATION_CNT3 0xc -#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_CLKACK_PD 0xd -#define BUTTRESS_PWR_STATE_IS_PWR_FSM_PD_BRK_POINT 0xe -#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_PD_PG_ACK0 0xf - -#define BUTTRESS_PWR_STATE_PS_PWR_FSM_MASK GENMASK(28, 24) - -#define BUTTRESS_PWR_STATE_PS_PWR_FSM_IDLE 0x0 -#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PU_PLL_IP_RDY 0x1 -#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_RO_PRE_CNT_EXH 0x2 -#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PU_VGI_PWRGOOD 0x3 -#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_RO_POST_CNT_EXH 0x4 -#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WR_PLL_RATIO 0x5 -#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PU_PLL_CMP 0x6 -#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PU_CLKACK 0x7 -#define BUTTRESS_PWR_STATE_PS_PWR_FSM_RST_ASSRT_CYCLES 0x8 -#define BUTTRESS_PWR_STATE_PS_PWR_FSM_STOP_CLK_CYCLES1 0x9 -#define BUTTRESS_PWR_STATE_PS_PWR_FSM_STOP_CLK_CYCLES2 0xa -#define BUTTRESS_PWR_STATE_PS_PWR_FSM_RST_DEASSRT_CYCLES 0xb -#define BUTTRESS_PWR_STATE_PS_PWR_FSM_PU_BRK_PNT 0xc -#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_FUSE_ACCPT 0xd -#define BUTTRESS_PWR_STATE_PS_PWR_FSM_PS_PWR_UP 0xf -#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_4_HALTED 0x10 -#define BUTTRESS_PWR_STATE_PS_PWR_FSM_RESET_CNT3 0x11 -#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PD_CLKACK 0x12 -#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PD_OFF_IND 0x13 -#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_DVFS_PH4 0x14 -#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_DVFS_PLL_CMP 0x15 -#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_DVFS_CLKACK 0x16 - -#define BUTTRESS_REG_SECURITY_CTL 0x300 -#define BUTTRESS_REG_SKU 0x314 -#define BUTTRESS_REG_SECURITY_TOUCH 0x318 -#define BUTTRESS_REG_CAMERA_MASK 0x84 - -#define BUTTRESS_SECURITY_CTL_FW_SECURE_MODE BIT(16) -#define BUTTRESS_SECURITY_CTL_FW_SETUP_MASK GENMASK(4, 0) - -#define BUTTRESS_SECURITY_CTL_FW_SETUP_DONE BIT(0) -#define BUTTRESS_SECURITY_CTL_AUTH_DONE BIT(1) -#define BUTTRESS_SECURITY_CTL_AUTH_FAILED BIT(3) - -#define BUTTRESS_REG_FW_SOURCE_BASE_LO 0x78 -#define BUTTRESS_REG_FW_SOURCE_BASE_HI 0x7C -#define BUTTRESS_REG_FW_SOURCE_SIZE 0x80 - -#define BUTTRESS_REG_ISR_STATUS 0x90 -#define BUTTRESS_REG_ISR_ENABLED_STATUS 0x94 -#define BUTTRESS_REG_ISR_ENABLE 0x98 -#define BUTTRESS_REG_ISR_CLEAR 0x9C - -#define BUTTRESS_ISR_IS_IRQ BIT(0) -#define BUTTRESS_ISR_PS_IRQ BIT(1) -#define BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE BIT(2) -#define BUTTRESS_ISR_IPC_EXEC_DONE_BY_ISH BIT(3) -#define BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING BIT(4) -#define BUTTRESS_ISR_IPC_FROM_ISH_IS_WAITING BIT(5) -#define BUTTRESS_ISR_CSE_CSR_SET BIT(6) -#define BUTTRESS_ISR_ISH_CSR_SET BIT(7) -#define BUTTRESS_ISR_SPURIOUS_CMP BIT(8) -#define BUTTRESS_ISR_WATCHDOG_EXPIRED BIT(9) -#define BUTTRESS_ISR_PUNIT_2_IUNIT_IRQ BIT(10) -#define BUTTRESS_ISR_SAI_VIOLATION BIT(11) -#define BUTTRESS_ISR_HW_ASSERTION BIT(12) -#define BUTTRESS_ISR_IS_CORRECTABLE_MEM_ERR BIT(13) -#define BUTTRESS_ISR_IS_FATAL_MEM_ERR BIT(14) -#define BUTTRESS_ISR_IS_NON_FATAL_MEM_ERR BIT(15) -#define BUTTRESS_ISR_PS_CORRECTABLE_MEM_ERR BIT(16) -#define BUTTRESS_ISR_PS_FATAL_MEM_ERR BIT(17) -#define BUTTRESS_ISR_PS_NON_FATAL_MEM_ERR BIT(18) -#define BUTTRESS_ISR_PS_FAST_THROTTLE BIT(19) -#define BUTTRESS_ISR_UFI_ERROR BIT(20) - -#define BUTTRESS_REG_IU2CSEDB0 0x100 - -#define BUTTRESS_IU2CSEDB0_BUSY BIT(31) -#define BUTTRESS_IU2CSEDB0_IPC_CLIENT_ID_VAL 2 - -#define BUTTRESS_REG_IU2CSEDATA0 0x104 - -#define BUTTRESS_IU2CSEDATA0_IPC_BOOT_LOAD 1 -#define BUTTRESS_IU2CSEDATA0_IPC_AUTH_RUN 2 -#define BUTTRESS_IU2CSEDATA0_IPC_AUTH_REPLACE 3 -#define BUTTRESS_IU2CSEDATA0_IPC_UPDATE_SECURE_TOUCH 16 - -#define BUTTRESS_CSE2IUDATA0_IPC_BOOT_LOAD_DONE BIT(0) -#define BUTTRESS_CSE2IUDATA0_IPC_AUTH_RUN_DONE BIT(1) -#define BUTTRESS_CSE2IUDATA0_IPC_AUTH_REPLACE_DONE BIT(2) -#define BUTTRESS_CSE2IUDATA0_IPC_UPDATE_SECURE_TOUCH_DONE BIT(4) - -#define BUTTRESS_REG_IU2CSECSR 0x108 - -#define BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE1 BIT(0) -#define BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE2 BIT(1) -#define BUTTRESS_IU2CSECSR_IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE BIT(2) -#define BUTTRESS_IU2CSECSR_IPC_PEER_ASSERTED_REG_VALID_REQ BIT(3) -#define BUTTRESS_IU2CSECSR_IPC_PEER_ACKED_REG_VALID BIT(4) -#define BUTTRESS_IU2CSECSR_IPC_PEER_DEASSERTED_REG_VALID_REQ BIT(5) - -#define BUTTRESS_REG_CSE2IUDB0 0x304 -#define BUTTRESS_REG_CSE2IUCSR 0x30C -#define BUTTRESS_REG_CSE2IUDATA0 0x308 - -/* 0x20 == NACK, 0xf == unknown command */ -#define BUTTRESS_CSE2IUDATA0_IPC_NACK 0xf20 -#define BUTTRESS_CSE2IUDATA0_IPC_NACK_MASK GENMASK(15, 0) - -#define BUTTRESS_REG_ISH2IUCSR 0x50 -#define BUTTRESS_REG_ISH2IUDB0 0x54 -#define BUTTRESS_REG_ISH2IUDATA0 0x58 - -#define BUTTRESS_REG_IU2ISHDB0 0x10C -#define BUTTRESS_REG_IU2ISHDATA0 0x110 -#define BUTTRESS_REG_IU2ISHDATA1 0x114 -#define BUTTRESS_REG_IU2ISHCSR 0x118 - -#define BUTTRESS_REG_FABRIC_CMD 0x88 - -#define BUTTRESS_FABRIC_CMD_START_TSC_SYNC BIT(0) -#define BUTTRESS_FABRIC_CMD_IS_DRAIN BIT(4) - -#define BUTTRESS_REG_TSW_CTL 0x120 -#define BUTTRESS_TSW_CTL_SOFT_RESET BIT(8) - -#define BUTTRESS_REG_TSC_LO 0x164 -#define BUTTRESS_REG_TSC_HI 0x168 - -#define BUTTRESS_IRQS (BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING | \ - BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE | \ - BUTTRESS_ISR_IS_IRQ | BUTTRESS_ISR_PS_IRQ) - -#define BUTTRESS_EVENT (BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING | \ - BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE | \ - BUTTRESS_ISR_SAI_VIOLATION) -#endif /* IPU6_PLATFORM_BUTTRESS_REGS_H */ diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-platform-isys-csi2-reg.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-platform-isys-csi2-reg.h deleted file mode 100644 index cc58377..0000000 --- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-platform-isys-csi2-reg.h +++ /dev/null @@ -1,172 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (C) 2023--2024 Intel Corporation */ - -#ifndef IPU6_PLATFORM_ISYS_CSI2_REG_H -#define IPU6_PLATFORM_ISYS_CSI2_REG_H - -#include - -#define CSI_REG_BASE 0x220000 -#define CSI_REG_PORT_BASE(id) (CSI_REG_BASE + (id) * 0x1000) - -/* CSI Port Genral Purpose Registers */ -#define CSI_REG_PORT_GPREG_SRST 0x0 -#define CSI_REG_PORT_GPREG_CSI2_SLV_REG_SRST 0x4 -#define CSI_REG_PORT_GPREG_CSI2_PORT_CONTROL 0x8 - -/* - * Port IRQs mapping events: - * IRQ0 - CSI_FE event - * IRQ1 - CSI_SYNC - * IRQ2 - S2M_SIDS0TO7 - * IRQ3 - S2M_SIDS8TO15 - */ -#define CSI_PORT_REG_BASE_IRQ_CSI 0x80 -#define CSI_PORT_REG_BASE_IRQ_CSI_SYNC 0xA0 -#define CSI_PORT_REG_BASE_IRQ_S2M_SIDS0TOS7 0xC0 -#define CSI_PORT_REG_BASE_IRQ_S2M_SIDS8TOS15 0xE0 - -#define CSI_PORT_REG_BASE_IRQ_EDGE_OFFSET 0x0 -#define CSI_PORT_REG_BASE_IRQ_MASK_OFFSET 0x4 -#define CSI_PORT_REG_BASE_IRQ_STATUS_OFFSET 0x8 -#define CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET 0xc -#define CSI_PORT_REG_BASE_IRQ_ENABLE_OFFSET 0x10 -#define CSI_PORT_REG_BASE_IRQ_LEVEL_NOT_PULSE_OFFSET 0x14 - -#define IPU6SE_CSI_RX_ERROR_IRQ_MASK GENMASK(18, 0) -#define IPU6_CSI_RX_ERROR_IRQ_MASK GENMASK(19, 0) - -#define CSI_RX_NUM_ERRORS_IN_IRQ 20 -#define CSI_RX_NUM_IRQ 32 - -#define IPU_CSI_RX_IRQ_FS_VC(chn) (1 << ((chn) * 2)) -#define IPU_CSI_RX_IRQ_FE_VC(chn) (2 << ((chn) * 2)) - -/* PPI2CSI */ -#define CSI_REG_PPI2CSI_ENABLE 0x200 -#define CSI_REG_PPI2CSI_CONFIG_PPI_INTF 0x204 -#define PPI_INTF_CONFIG_NOF_ENABLED_DLANES_MASK GENMASK(4, 3) -#define CSI_REG_PPI2CSI_CONFIG_CSI_FEATURE 0x208 - -enum CSI_PPI2CSI_CTRL { - CSI_PPI2CSI_DISABLE = 0, - CSI_PPI2CSI_ENABLE = 1, -}; - -/* CSI_FE */ -#define CSI_REG_CSI_FE_ENABLE 0x280 -#define CSI_REG_CSI_FE_MODE 0x284 -#define CSI_REG_CSI_FE_MUX_CTRL 0x288 -#define CSI_REG_CSI_FE_SYNC_CNTR_SEL 0x290 - -enum CSI_FE_ENABLE_TYPE { - CSI_FE_DISABLE = 0, - CSI_FE_ENABLE = 1, -}; - -enum CSI_FE_MODE_TYPE { - CSI_FE_DPHY_MODE = 0, - CSI_FE_CPHY_MODE = 1, -}; - -enum CSI_FE_INPUT_SELECTOR { - CSI_SENSOR_INPUT = 0, - CSI_MIPIGEN_INPUT = 1, -}; - -enum CSI_FE_SYNC_CNTR_SEL_TYPE { - CSI_CNTR_SENSOR_LINE_ID = BIT(0), - CSI_CNTR_INT_LINE_PKT_ID = ~CSI_CNTR_SENSOR_LINE_ID, - CSI_CNTR_SENSOR_FRAME_ID = BIT(1), - CSI_CNTR_INT_FRAME_PKT_ID = ~CSI_CNTR_SENSOR_FRAME_ID, -}; - -/* CSI HUB General Purpose Registers */ -#define CSI_REG_HUB_GPREG_SRST (CSI_REG_BASE + 0x18000) -#define CSI_REG_HUB_GPREG_SLV_REG_SRST (CSI_REG_BASE + 0x18004) - -#define CSI_REG_HUB_DRV_ACCESS_PORT(id) (CSI_REG_BASE + 0x18018 + (id) * 4) -#define CSI_REG_HUB_FW_ACCESS_PORT_OFS 0x17000 -#define CSI_REG_HUB_FW_ACCESS_PORT_V6OFS 0x16000 -#define CSI_REG_HUB_FW_ACCESS_PORT(ofs, id) \ - (CSI_REG_BASE + (ofs) + (id) * 4) - -enum CSI_PORT_CLK_GATING_SWITCH { - CSI_PORT_CLK_GATING_OFF = 0, - CSI_PORT_CLK_GATING_ON = 1, -}; - -#define CSI_REG_BASE_HUB_IRQ 0x18200 - -#define IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_EDGE 0x238200 -#define IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_MASK 0x238204 -#define IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_STATUS 0x238208 -#define IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_CLEAR 0x23820c -#define IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_ENABLE 0x238210 -#define IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_LEVEL_NOT_PULSE 0x238214 - -#define IPU6_REG_ISYS_CSI_TOP_CTRL1_IRQ_EDGE 0x238220 -#define IPU6_REG_ISYS_CSI_TOP_CTRL1_IRQ_MASK 0x238224 -#define IPU6_REG_ISYS_CSI_TOP_CTRL1_IRQ_STATUS 0x238228 -#define IPU6_REG_ISYS_CSI_TOP_CTRL1_IRQ_CLEAR 0x23822c -#define IPU6_REG_ISYS_CSI_TOP_CTRL1_IRQ_ENABLE 0x238230 -#define IPU6_REG_ISYS_CSI_TOP_CTRL1_IRQ_LEVEL_NOT_PULSE 0x238234 - -/* MTL IPU6V6 irq ctrl0 & ctrl1 */ -#define IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_EDGE 0x238700 -#define IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_MASK 0x238704 -#define IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_STATUS 0x238708 -#define IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_CLEAR 0x23870c -#define IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_ENABLE 0x238710 -#define IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_LEVEL_NOT_PULSE 0x238714 - -#define IPU6V6_REG_ISYS_CSI_TOP_CTRL1_IRQ_EDGE 0x238720 -#define IPU6V6_REG_ISYS_CSI_TOP_CTRL1_IRQ_MASK 0x238724 -#define IPU6V6_REG_ISYS_CSI_TOP_CTRL1_IRQ_STATUS 0x238728 -#define IPU6V6_REG_ISYS_CSI_TOP_CTRL1_IRQ_CLEAR 0x23872c -#define IPU6V6_REG_ISYS_CSI_TOP_CTRL1_IRQ_ENABLE 0x238730 -#define IPU6V6_REG_ISYS_CSI_TOP_CTRL1_IRQ_LEVEL_NOT_PULSE 0x238734 - -/* - * 3:0 CSI_PORT.irq_out[3:0] CSI_PORT_CTRL0 IRQ outputs (4bits) - * [0] CSI_PORT.IRQ_CTRL0_csi - * [1] CSI_PORT.IRQ_CTRL1_csi_sync - * [2] CSI_PORT.IRQ_CTRL2_s2m_sids0to7 - * [3] CSI_PORT.IRQ_CTRL3_s2m_sids8to15 - */ -#define IPU6_ISYS_UNISPART_IRQ_CSI2(port) \ - (0x3 << ((port) * IPU6_CSI_IRQ_NUM_PER_PIPE)) - -/* - * ipu6se support 2 front ends, 2 port per front end, 4 ports 0..3 - * sip0 - 0, 1 - * sip1 - 2, 3 - * 0 and 2 support 4 data lanes, 1 and 3 support 2 data lanes - * all offset are base from isys base address - */ - -#define CSI2_HUB_GPREG_SIP_SRST(sip) (0x238038 + (sip) * 4) -#define CSI2_HUB_GPREG_SIP_FB_PORT_CFG(sip) (0x238050 + (sip) * 4) - -#define CSI2_HUB_GPREG_DPHY_TIMER_INCR 0x238040 -#define CSI2_HUB_GPREG_HPLL_FREQ 0x238044 -#define CSI2_HUB_GPREG_IS_CLK_RATIO 0x238048 -#define CSI2_HUB_GPREG_HPLL_FREQ_ISCLK_RATE_OVERRIDE 0x23804c -#define CSI2_HUB_GPREG_PORT_CLKGATING_DISABLE 0x238058 -#define CSI2_HUB_GPREG_SIP0_CSI_RX_A_CONTROL 0x23805c -#define CSI2_HUB_GPREG_SIP0_CSI_RX_B_CONTROL 0x238088 -#define CSI2_HUB_GPREG_SIP1_CSI_RX_A_CONTROL 0x2380a4 -#define CSI2_HUB_GPREG_SIP1_CSI_RX_B_CONTROL 0x2380d0 - -#define CSI2_SIP_TOP_CSI_RX_BASE(sip) (0x23805c + (sip) * 0x48) -#define CSI2_SIP_TOP_CSI_RX_PORT_BASE_0(port) (0x23805c + ((port) / 2) * 0x48) -#define CSI2_SIP_TOP_CSI_RX_PORT_BASE_1(port) (0x238088 + ((port) / 2) * 0x48) - -/* offset from port base */ -#define CSI2_SIP_TOP_CSI_RX_PORT_CONTROL 0x0 -#define CSI2_SIP_TOP_CSI_RX_DLY_CNT_TERMEN_CLANE 0x4 -#define CSI2_SIP_TOP_CSI_RX_DLY_CNT_SETTLE_CLANE 0x8 -#define CSI2_SIP_TOP_CSI_RX_DLY_CNT_TERMEN_DLANE(lane) (0xc + (lane) * 8) -#define CSI2_SIP_TOP_CSI_RX_DLY_CNT_SETTLE_DLANE(lane) (0x10 + (lane) * 8) - -#endif /* IPU6_ISYS_CSI2_REG_H */ diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-platform-regs.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-platform-regs.h deleted file mode 100644 index b883385..0000000 --- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-platform-regs.h +++ /dev/null @@ -1,179 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (C) 2018 - 2024 Intel Corporation */ - -#ifndef IPU6_PLATFORM_REGS_H -#define IPU6_PLATFORM_REGS_H - -#include - -/* - * IPU6 uses uniform address within IPU6, therefore all subsystem registers - * locates in one single space starts from 0 but in different sctions with - * different addresses, the subsystem offsets are defined to 0 as the - * register definition will have the address offset to 0. - */ -#define IPU6_UNIFIED_OFFSET 0 - -#define IPU6_ISYS_IOMMU0_OFFSET 0x2e0000 -#define IPU6_ISYS_IOMMU1_OFFSET 0x2e0500 -#define IPU6_ISYS_IOMMUI_OFFSET 0x2e0a00 - -#define IPU6_PSYS_IOMMU0_OFFSET 0x1b0000 -#define IPU6_PSYS_IOMMU1_OFFSET 0x1b0700 -#define IPU6_PSYS_IOMMU1R_OFFSET 0x1b0e00 -#define IPU6_PSYS_IOMMUI_OFFSET 0x1b1500 - -/* the offset from IOMMU base register */ -#define IPU6_MMU_L1_STREAM_ID_REG_OFFSET 0x0c -#define IPU6_MMU_L2_STREAM_ID_REG_OFFSET 0x4c -#define IPU6_PSYS_MMU1W_L2_STREAM_ID_REG_OFFSET 0x8c - -#define IPU6_MMU_INFO_OFFSET 0x8 - -#define IPU6_ISYS_SPC_OFFSET 0x210000 - -#define IPU6SE_PSYS_SPC_OFFSET 0x110000 -#define IPU6_PSYS_SPC_OFFSET 0x118000 - -#define IPU6_ISYS_DMEM_OFFSET 0x200000 -#define IPU6_PSYS_DMEM_OFFSET 0x100000 - -#define IPU6_REG_ISYS_UNISPART_IRQ_EDGE 0x27c000 -#define IPU6_REG_ISYS_UNISPART_IRQ_MASK 0x27c004 -#define IPU6_REG_ISYS_UNISPART_IRQ_STATUS 0x27c008 -#define IPU6_REG_ISYS_UNISPART_IRQ_CLEAR 0x27c00c -#define IPU6_REG_ISYS_UNISPART_IRQ_ENABLE 0x27c010 -#define IPU6_REG_ISYS_UNISPART_IRQ_LEVEL_NOT_PULSE 0x27c014 -#define IPU6_REG_ISYS_UNISPART_SW_IRQ_REG 0x27c414 -#define IPU6_REG_ISYS_UNISPART_SW_IRQ_MUX_REG 0x27c418 -#define IPU6_ISYS_UNISPART_IRQ_CSI0 BIT(2) -#define IPU6_ISYS_UNISPART_IRQ_CSI1 BIT(3) -#define IPU6_ISYS_UNISPART_IRQ_SW BIT(22) - -#define IPU6_REG_ISYS_ISL_TOP_IRQ_EDGE 0x2b0200 -#define IPU6_REG_ISYS_ISL_TOP_IRQ_MASK 0x2b0204 -#define IPU6_REG_ISYS_ISL_TOP_IRQ_STATUS 0x2b0208 -#define IPU6_REG_ISYS_ISL_TOP_IRQ_CLEAR 0x2b020c -#define IPU6_REG_ISYS_ISL_TOP_IRQ_ENABLE 0x2b0210 -#define IPU6_REG_ISYS_ISL_TOP_IRQ_LEVEL_NOT_PULSE 0x2b0214 - -#define IPU6_REG_ISYS_CMPR_TOP_IRQ_EDGE 0x2d2100 -#define IPU6_REG_ISYS_CMPR_TOP_IRQ_MASK 0x2d2104 -#define IPU6_REG_ISYS_CMPR_TOP_IRQ_STATUS 0x2d2108 -#define IPU6_REG_ISYS_CMPR_TOP_IRQ_CLEAR 0x2d210c -#define IPU6_REG_ISYS_CMPR_TOP_IRQ_ENABLE 0x2d2110 -#define IPU6_REG_ISYS_CMPR_TOP_IRQ_LEVEL_NOT_PULSE 0x2d2114 - -/* CDC Burst collector thresholds for isys - 3 FIFOs i = 0..2 */ -#define IPU6_REG_ISYS_CDC_THRESHOLD(i) (0x27c400 + ((i) * 4)) - -#define IPU6_CSI_IRQ_NUM_PER_PIPE 4 -#define IPU6SE_ISYS_CSI_PORT_NUM 4 -#define IPU6_ISYS_CSI_PORT_NUM 8 - -#define IPU6_ISYS_CSI_PORT_IRQ(irq_num) BIT(irq_num) - -/* PKG DIR OFFSET in IMR in secure mode */ -#define IPU6_PKG_DIR_IMR_OFFSET 0x40 - -#define IPU6_ISYS_REG_SPC_STATUS_CTRL 0x0 - -#define IPU6_ISYS_SPC_STATUS_START BIT(1) -#define IPU6_ISYS_SPC_STATUS_RUN BIT(3) -#define IPU6_ISYS_SPC_STATUS_READY BIT(5) -#define IPU6_ISYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE BIT(12) -#define IPU6_ISYS_SPC_STATUS_ICACHE_PREFETCH BIT(13) - -#define IPU6_PSYS_REG_SPC_STATUS_CTRL 0x0 -#define IPU6_PSYS_REG_SPC_START_PC 0x4 -#define IPU6_PSYS_REG_SPC_ICACHE_BASE 0x10 -#define IPU6_REG_PSYS_INFO_SEG_0_CONFIG_ICACHE_MASTER 0x14 - -#define IPU6_PSYS_SPC_STATUS_START BIT(1) -#define IPU6_PSYS_SPC_STATUS_RUN BIT(3) -#define IPU6_PSYS_SPC_STATUS_READY BIT(5) -#define IPU6_PSYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE BIT(12) -#define IPU6_PSYS_SPC_STATUS_ICACHE_PREFETCH BIT(13) - -#define IPU6_PSYS_REG_SPP0_STATUS_CTRL 0x20000 - -#define IPU6_INFO_ENABLE_SNOOP BIT(0) -#define IPU6_INFO_DEC_FORCE_FLUSH BIT(1) -#define IPU6_INFO_DEC_PASS_THROUGH BIT(2) -#define IPU6_INFO_ZLW BIT(3) -#define IPU6_INFO_REQUEST_DESTINATION_IOSF BIT(9) -#define IPU6_INFO_IMR_BASE BIT(10) -#define IPU6_INFO_IMR_DESTINED BIT(11) - -#define IPU6_INFO_REQUEST_DESTINATION_PRIMARY IPU6_INFO_REQUEST_DESTINATION_IOSF - -/* - * s2m_pixel_soc_pixel_remapping is dedicated for the enabling of the - * pixel s2m remp ability.Remap here means that s2m rearange the order - * of the pixels in each 4 pixels group. - * For examle, mirroring remping means that if input's 4 first pixels - * are 1 2 3 4 then in output we should see 4 3 2 1 in this 4 first pixels. - * 0xE4 is from s2m MAS document. It means no remapping. - */ -#define S2M_PIXEL_SOC_PIXEL_REMAPPING_FLAG_NO_REMAPPING 0xe4 -/* - * csi_be_soc_pixel_remapping is for the enabling of the pixel remapping. - * This remapping is exactly like the stream2mmio remapping. - */ -#define CSI_BE_SOC_PIXEL_REMAPPING_FLAG_NO_REMAPPING 0xe4 - -#define IPU6_REG_DMA_TOP_AB_GROUP1_BASE_ADDR 0x1ae000 -#define IPU6_REG_DMA_TOP_AB_GROUP2_BASE_ADDR 0x1af000 -#define IPU6_REG_DMA_TOP_AB_RING_MIN_OFFSET(n) (0x4 + (n) * 0xc) -#define IPU6_REG_DMA_TOP_AB_RING_MAX_OFFSET(n) (0x8 + (n) * 0xc) -#define IPU6_REG_DMA_TOP_AB_RING_ACCESS_OFFSET(n) (0xc + (n) * 0xc) - -enum ipu6_device_ab_group1_target_id { - IPU6_DEVICE_AB_GROUP1_TARGET_ID_R0_SPC_DMEM, - IPU6_DEVICE_AB_GROUP1_TARGET_ID_R1_SPC_DMEM, - IPU6_DEVICE_AB_GROUP1_TARGET_ID_R2_SPC_DMEM, - IPU6_DEVICE_AB_GROUP1_TARGET_ID_R3_SPC_STATUS_REG, - IPU6_DEVICE_AB_GROUP1_TARGET_ID_R4_SPC_MASTER_BASE_ADDR, - IPU6_DEVICE_AB_GROUP1_TARGET_ID_R5_SPC_PC_STALL, - IPU6_DEVICE_AB_GROUP1_TARGET_ID_R6_SPC_EQ, - IPU6_DEVICE_AB_GROUP1_TARGET_ID_R7_SPC_RESERVED, - IPU6_DEVICE_AB_GROUP1_TARGET_ID_R8_SPC_RESERVED, - IPU6_DEVICE_AB_GROUP1_TARGET_ID_R9_SPP0, - IPU6_DEVICE_AB_GROUP1_TARGET_ID_R10_SPP1, - IPU6_DEVICE_AB_GROUP1_TARGET_ID_R11_CENTRAL_R1, - IPU6_DEVICE_AB_GROUP1_TARGET_ID_R12_IRQ, - IPU6_DEVICE_AB_GROUP1_TARGET_ID_R13_CENTRAL_R2, - IPU6_DEVICE_AB_GROUP1_TARGET_ID_R14_DMA, - IPU6_DEVICE_AB_GROUP1_TARGET_ID_R15_DMA, - IPU6_DEVICE_AB_GROUP1_TARGET_ID_R16_GP, - IPU6_DEVICE_AB_GROUP1_TARGET_ID_R17_ZLW_INSERTER, - IPU6_DEVICE_AB_GROUP1_TARGET_ID_R18_AB, -}; - -enum nci_ab_access_mode { - NCI_AB_ACCESS_MODE_RW, /* read & write */ - NCI_AB_ACCESS_MODE_RO, /* read only */ - NCI_AB_ACCESS_MODE_WO, /* write only */ - NCI_AB_ACCESS_MODE_NA, /* No access at all */ -}; - -/* IRQ-related registers in PSYS */ -#define IPU6_REG_PSYS_GPDEV_IRQ_EDGE 0x1aa200 -#define IPU6_REG_PSYS_GPDEV_IRQ_MASK 0x1aa204 -#define IPU6_REG_PSYS_GPDEV_IRQ_STATUS 0x1aa208 -#define IPU6_REG_PSYS_GPDEV_IRQ_CLEAR 0x1aa20c -#define IPU6_REG_PSYS_GPDEV_IRQ_ENABLE 0x1aa210 -#define IPU6_REG_PSYS_GPDEV_IRQ_LEVEL_NOT_PULSE 0x1aa214 -/* There are 8 FW interrupts, n = 0..7 */ -#define IPU6_PSYS_GPDEV_FWIRQ0 5 -#define IPU6_PSYS_GPDEV_FWIRQ1 6 -#define IPU6_PSYS_GPDEV_FWIRQ2 7 -#define IPU6_PSYS_GPDEV_FWIRQ3 8 -#define IPU6_PSYS_GPDEV_FWIRQ4 9 -#define IPU6_PSYS_GPDEV_FWIRQ5 10 -#define IPU6_PSYS_GPDEV_FWIRQ6 11 -#define IPU6_PSYS_GPDEV_FWIRQ7 12 -#define IPU6_PSYS_GPDEV_IRQ_FWIRQ(n) BIT(n) -#define IPU6_REG_PSYS_GPDEV_FWIRQ(n) (4 * (n) + 0x1aa100) - -#endif /* IPU6_PLATFORM_REGS_H */ diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c deleted file mode 100644 index 1f4f20b..0000000 --- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c +++ /dev/null @@ -1,846 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2013--2024 Intel Corporation - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include "ipu6.h" -#include "ipu6-bus.h" -#include "ipu6-buttress.h" -#include "ipu6-cpd.h" -#include "ipu6-isys.h" -#include "ipu6-mmu.h" -#include "ipu6-platform-buttress-regs.h" -#include "ipu6-platform-isys-csi2-reg.h" -#include "ipu6-platform-regs.h" - -#define IPU6_PCI_BAR 0 - -struct ipu6_cell_program { - u32 magic_number; - - u32 blob_offset; - u32 blob_size; - - u32 start[3]; - - u32 icache_source; - u32 icache_target; - u32 icache_size; - - u32 pmem_source; - u32 pmem_target; - u32 pmem_size; - - u32 data_source; - u32 data_target; - u32 data_size; - - u32 bss_target; - u32 bss_size; - - u32 cell_id; - u32 regs_addr; - - u32 cell_pmem_data_bus_address; - u32 cell_dmem_data_bus_address; - u32 cell_pmem_control_bus_address; - u32 cell_dmem_control_bus_address; - - u32 next; - u32 dummy[2]; -}; - -static struct ipu6_isys_internal_pdata isys_ipdata = { - .hw_variant = { - .offset = IPU6_UNIFIED_OFFSET, - .nr_mmus = 3, - .mmu_hw = { - { - .offset = IPU6_ISYS_IOMMU0_OFFSET, - .info_bits = IPU6_INFO_REQUEST_DESTINATION_IOSF, - .nr_l1streams = 16, - .l1_block_sz = { - 3, 8, 2, 2, 2, 2, 2, 2, 1, 1, - 1, 1, 1, 1, 1, 1 - }, - .nr_l2streams = 16, - .l2_block_sz = { - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2 - }, - .insert_read_before_invalidate = false, - .l1_stream_id_reg_offset = - IPU6_MMU_L1_STREAM_ID_REG_OFFSET, - .l2_stream_id_reg_offset = - IPU6_MMU_L2_STREAM_ID_REG_OFFSET, - }, - { - .offset = IPU6_ISYS_IOMMU1_OFFSET, - .info_bits = 0, - .nr_l1streams = 16, - .l1_block_sz = { - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 1, 1, 4 - }, - .nr_l2streams = 16, - .l2_block_sz = { - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2 - }, - .insert_read_before_invalidate = false, - .l1_stream_id_reg_offset = - IPU6_MMU_L1_STREAM_ID_REG_OFFSET, - .l2_stream_id_reg_offset = - IPU6_MMU_L2_STREAM_ID_REG_OFFSET, - }, - { - .offset = IPU6_ISYS_IOMMUI_OFFSET, - .info_bits = 0, - .nr_l1streams = 0, - .nr_l2streams = 0, - .insert_read_before_invalidate = false, - }, - }, - .cdc_fifos = 3, - .cdc_fifo_threshold = {6, 8, 2}, - .dmem_offset = IPU6_ISYS_DMEM_OFFSET, - .spc_offset = IPU6_ISYS_SPC_OFFSET, - }, - .isys_dma_overshoot = IPU6_ISYS_OVERALLOC_MIN, -}; - -static struct ipu6_psys_internal_pdata psys_ipdata = { - .hw_variant = { - .offset = IPU6_UNIFIED_OFFSET, - .nr_mmus = 4, - .mmu_hw = { - { - .offset = IPU6_PSYS_IOMMU0_OFFSET, - .info_bits = - IPU6_INFO_REQUEST_DESTINATION_IOSF, - .nr_l1streams = 16, - .l1_block_sz = { - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2 - }, - .nr_l2streams = 16, - .l2_block_sz = { - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2 - }, - .insert_read_before_invalidate = false, - .l1_stream_id_reg_offset = - IPU6_MMU_L1_STREAM_ID_REG_OFFSET, - .l2_stream_id_reg_offset = - IPU6_MMU_L2_STREAM_ID_REG_OFFSET, - }, - { - .offset = IPU6_PSYS_IOMMU1_OFFSET, - .info_bits = 0, - .nr_l1streams = 32, - .l1_block_sz = { - 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 10, - 5, 4, 14, 6, 4, 14, 6, 4, 8, - 4, 2, 1, 1, 1, 1, 14 - }, - .nr_l2streams = 32, - .l2_block_sz = { - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2 - }, - .insert_read_before_invalidate = false, - .l1_stream_id_reg_offset = - IPU6_MMU_L1_STREAM_ID_REG_OFFSET, - .l2_stream_id_reg_offset = - IPU6_PSYS_MMU1W_L2_STREAM_ID_REG_OFFSET, - }, - { - .offset = IPU6_PSYS_IOMMU1R_OFFSET, - .info_bits = 0, - .nr_l1streams = 16, - .l1_block_sz = { - 1, 4, 4, 4, 4, 16, 8, 4, 32, - 16, 16, 2, 2, 2, 1, 12 - }, - .nr_l2streams = 16, - .l2_block_sz = { - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2 - }, - .insert_read_before_invalidate = false, - .l1_stream_id_reg_offset = - IPU6_MMU_L1_STREAM_ID_REG_OFFSET, - .l2_stream_id_reg_offset = - IPU6_MMU_L2_STREAM_ID_REG_OFFSET, - }, - { - .offset = IPU6_PSYS_IOMMUI_OFFSET, - .info_bits = 0, - .nr_l1streams = 0, - .nr_l2streams = 0, - .insert_read_before_invalidate = false, - }, - }, - .dmem_offset = IPU6_PSYS_DMEM_OFFSET, - }, -}; - -static const struct ipu6_buttress_ctrl isys_buttress_ctrl = { - .ratio = IPU6_IS_FREQ_CTL_DEFAULT_RATIO, - .qos_floor = IPU6_IS_FREQ_CTL_DEFAULT_QOS_FLOOR_RATIO, - .freq_ctl = IPU6_BUTTRESS_REG_IS_FREQ_CTL, - .pwr_sts_shift = IPU6_BUTTRESS_PWR_STATE_IS_PWR_SHIFT, - .pwr_sts_mask = IPU6_BUTTRESS_PWR_STATE_IS_PWR_MASK, - .pwr_sts_on = IPU6_BUTTRESS_PWR_STATE_UP_DONE, - .pwr_sts_off = IPU6_BUTTRESS_PWR_STATE_DN_DONE, -}; - -static const struct ipu6_buttress_ctrl psys_buttress_ctrl = { - .ratio = IPU6_PS_FREQ_CTL_DEFAULT_RATIO, - .qos_floor = IPU6_PS_FREQ_CTL_DEFAULT_QOS_FLOOR_RATIO, - .freq_ctl = IPU6_BUTTRESS_REG_PS_FREQ_CTL, - .pwr_sts_shift = IPU6_BUTTRESS_PWR_STATE_PS_PWR_SHIFT, - .pwr_sts_mask = IPU6_BUTTRESS_PWR_STATE_PS_PWR_MASK, - .pwr_sts_on = IPU6_BUTTRESS_PWR_STATE_UP_DONE, - .pwr_sts_off = IPU6_BUTTRESS_PWR_STATE_DN_DONE, -}; - -static void -ipu6_pkg_dir_configure_spc(struct ipu6_device *isp, - const struct ipu6_hw_variants *hw_variant, - int pkg_dir_idx, void __iomem *base, - u64 *pkg_dir, dma_addr_t pkg_dir_vied_address) -{ - struct ipu6_cell_program *prog; - void __iomem *spc_base; - u32 server_fw_addr; - dma_addr_t dma_addr; - u32 pg_offset; - - server_fw_addr = lower_32_bits(*(pkg_dir + (pkg_dir_idx + 1) * 2)); - if (pkg_dir_idx == IPU6_CPD_PKG_DIR_ISYS_SERVER_IDX) - dma_addr = sg_dma_address(isp->isys->fw_sgt.sgl); - else - dma_addr = sg_dma_address(isp->psys->fw_sgt.sgl); - - pg_offset = server_fw_addr - dma_addr; - prog = (struct ipu6_cell_program *)((uintptr_t)isp->cpd_fw->data + - pg_offset); - spc_base = base + prog->regs_addr; - if (spc_base != (base + hw_variant->spc_offset)) - dev_warn(&isp->pdev->dev, - "SPC reg addr %p not matching value from CPD %p\n", - base + hw_variant->spc_offset, spc_base); - writel(server_fw_addr + prog->blob_offset + - prog->icache_source, spc_base + IPU6_PSYS_REG_SPC_ICACHE_BASE); - writel(IPU6_INFO_REQUEST_DESTINATION_IOSF, - spc_base + IPU6_REG_PSYS_INFO_SEG_0_CONFIG_ICACHE_MASTER); - writel(prog->start[1], spc_base + IPU6_PSYS_REG_SPC_START_PC); - writel(pkg_dir_vied_address, base + hw_variant->dmem_offset); -} - -void ipu6_configure_spc(struct ipu6_device *isp, - const struct ipu6_hw_variants *hw_variant, - int pkg_dir_idx, void __iomem *base, u64 *pkg_dir, - dma_addr_t pkg_dir_dma_addr) -{ - void __iomem *dmem_base = base + hw_variant->dmem_offset; - void __iomem *spc_regs_base = base + hw_variant->spc_offset; - u32 val; - - val = readl(spc_regs_base + IPU6_PSYS_REG_SPC_STATUS_CTRL); - val |= IPU6_PSYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE; - writel(val, spc_regs_base + IPU6_PSYS_REG_SPC_STATUS_CTRL); - - if (isp->secure_mode) - writel(IPU6_PKG_DIR_IMR_OFFSET, dmem_base); - else - ipu6_pkg_dir_configure_spc(isp, hw_variant, pkg_dir_idx, base, - pkg_dir, pkg_dir_dma_addr); -} -EXPORT_SYMBOL_NS_GPL(ipu6_configure_spc, "INTEL_IPU6"); - -#define IPU6_ISYS_CSI2_NPORTS 4 -#define IPU6SE_ISYS_CSI2_NPORTS 4 -#define IPU6_TGL_ISYS_CSI2_NPORTS 8 -#define IPU6EP_MTL_ISYS_CSI2_NPORTS 6 - -static void ipu6_internal_pdata_init(struct ipu6_device *isp) -{ - u8 hw_ver = isp->hw_ver; - - isys_ipdata.num_parallel_streams = IPU6_ISYS_NUM_STREAMS; - isys_ipdata.sram_gran_shift = IPU6_SRAM_GRANULARITY_SHIFT; - isys_ipdata.sram_gran_size = IPU6_SRAM_GRANULARITY_SIZE; - isys_ipdata.max_sram_size = IPU6_MAX_SRAM_SIZE; - isys_ipdata.sensor_type_start = IPU6_FW_ISYS_SENSOR_TYPE_START; - isys_ipdata.sensor_type_end = IPU6_FW_ISYS_SENSOR_TYPE_END; - isys_ipdata.max_streams = IPU6_ISYS_NUM_STREAMS; - isys_ipdata.max_send_queues = IPU6_N_MAX_SEND_QUEUES; - isys_ipdata.max_sram_blocks = IPU6_NOF_SRAM_BLOCKS_MAX; - isys_ipdata.max_devq_size = IPU6_DEV_SEND_QUEUE_SIZE; - isys_ipdata.csi2.nports = IPU6_ISYS_CSI2_NPORTS; - isys_ipdata.csi2.irq_mask = IPU6_CSI_RX_ERROR_IRQ_MASK; - isys_ipdata.csi2.ctrl0_irq_edge = IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_EDGE; - isys_ipdata.csi2.ctrl0_irq_clear = - IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_CLEAR; - isys_ipdata.csi2.ctrl0_irq_mask = IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_MASK; - isys_ipdata.csi2.ctrl0_irq_enable = - IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_ENABLE; - isys_ipdata.csi2.ctrl0_irq_status = - IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_STATUS; - isys_ipdata.csi2.ctrl0_irq_lnp = - IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_LEVEL_NOT_PULSE; - isys_ipdata.enhanced_iwake = is_ipu6ep_mtl(hw_ver) || is_ipu6ep(hw_ver); - psys_ipdata.hw_variant.spc_offset = IPU6_PSYS_SPC_OFFSET; - isys_ipdata.csi2.fw_access_port_ofs = CSI_REG_HUB_FW_ACCESS_PORT_OFS; - - if (is_ipu6ep(hw_ver)) { - isys_ipdata.ltr = IPU6EP_LTR_VALUE; - isys_ipdata.memopen_threshold = IPU6EP_MIN_MEMOPEN_TH; - } - - if (is_ipu6_tgl(hw_ver)) - isys_ipdata.csi2.nports = IPU6_TGL_ISYS_CSI2_NPORTS; - - if (is_ipu6ep_mtl(hw_ver)) { - isys_ipdata.csi2.nports = IPU6EP_MTL_ISYS_CSI2_NPORTS; - - isys_ipdata.csi2.ctrl0_irq_edge = - IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_EDGE; - isys_ipdata.csi2.ctrl0_irq_clear = - IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_CLEAR; - isys_ipdata.csi2.ctrl0_irq_mask = - IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_MASK; - isys_ipdata.csi2.ctrl0_irq_enable = - IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_ENABLE; - isys_ipdata.csi2.ctrl0_irq_lnp = - IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_LEVEL_NOT_PULSE; - isys_ipdata.csi2.ctrl0_irq_status = - IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_STATUS; - isys_ipdata.csi2.fw_access_port_ofs = - CSI_REG_HUB_FW_ACCESS_PORT_V6OFS; - isys_ipdata.ltr = IPU6EP_MTL_LTR_VALUE; - isys_ipdata.memopen_threshold = IPU6EP_MTL_MIN_MEMOPEN_TH; - } - - if (is_ipu6se(hw_ver)) { - isys_ipdata.csi2.nports = IPU6SE_ISYS_CSI2_NPORTS; - isys_ipdata.csi2.irq_mask = IPU6SE_CSI_RX_ERROR_IRQ_MASK; - isys_ipdata.num_parallel_streams = IPU6SE_ISYS_NUM_STREAMS; - isys_ipdata.sram_gran_shift = IPU6SE_SRAM_GRANULARITY_SHIFT; - isys_ipdata.sram_gran_size = IPU6SE_SRAM_GRANULARITY_SIZE; - isys_ipdata.max_sram_size = IPU6SE_MAX_SRAM_SIZE; - isys_ipdata.sensor_type_start = - IPU6SE_FW_ISYS_SENSOR_TYPE_START; - isys_ipdata.sensor_type_end = IPU6SE_FW_ISYS_SENSOR_TYPE_END; - isys_ipdata.max_streams = IPU6SE_ISYS_NUM_STREAMS; - isys_ipdata.max_send_queues = IPU6SE_N_MAX_SEND_QUEUES; - isys_ipdata.max_sram_blocks = IPU6SE_NOF_SRAM_BLOCKS_MAX; - isys_ipdata.max_devq_size = IPU6SE_DEV_SEND_QUEUE_SIZE; - psys_ipdata.hw_variant.spc_offset = IPU6SE_PSYS_SPC_OFFSET; - } -} - -static struct ipu6_bus_device * -ipu6_isys_init(struct pci_dev *pdev, struct device *parent, - struct ipu6_buttress_ctrl *ctrl, void __iomem *base, - const struct ipu6_isys_internal_pdata *ipdata) -{ - struct device *dev = &pdev->dev; - struct ipu6_bus_device *isys_adev; - struct ipu6_isys_pdata *pdata; - int ret; - - ret = ipu_bridge_init(dev, ipu_bridge_parse_ssdb); - if (ret) { - dev_err_probe(dev, ret, "IPU6 bridge init failed\n"); - return ERR_PTR(ret); - } - - pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); - if (!pdata) - return ERR_PTR(-ENOMEM); - - pdata->base = base; - pdata->ipdata = ipdata; - - isys_adev = ipu6_bus_initialize_device(pdev, parent, pdata, ctrl, - IPU6_ISYS_NAME); - if (IS_ERR(isys_adev)) { - kfree(pdata); - return dev_err_cast_probe(dev, isys_adev, - "ipu6_bus_initialize_device isys failed\n"); - } - - isys_adev->mmu = ipu6_mmu_init(dev, base, ISYS_MMID, - &ipdata->hw_variant); - if (IS_ERR(isys_adev->mmu)) { - put_device(&isys_adev->auxdev.dev); - kfree(pdata); - return dev_err_cast_probe(dev, isys_adev->mmu, - "ipu6_mmu_init(isys_adev->mmu) failed\n"); - } - - isys_adev->mmu->dev = &isys_adev->auxdev.dev; - - ret = ipu6_bus_add_device(isys_adev); - if (ret) { - kfree(pdata); - return ERR_PTR(ret); - } - - return isys_adev; -} - -static struct ipu6_bus_device * -ipu6_psys_init(struct pci_dev *pdev, struct device *parent, - struct ipu6_buttress_ctrl *ctrl, void __iomem *base, - const struct ipu6_psys_internal_pdata *ipdata) -{ - struct ipu6_bus_device *psys_adev; - struct ipu6_psys_pdata *pdata; - int ret; - - pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); - if (!pdata) - return ERR_PTR(-ENOMEM); - - pdata->base = base; - pdata->ipdata = ipdata; - - psys_adev = ipu6_bus_initialize_device(pdev, parent, pdata, ctrl, - IPU6_PSYS_NAME); - if (IS_ERR(psys_adev)) { - kfree(pdata); - return dev_err_cast_probe(&pdev->dev, psys_adev, - "ipu6_bus_initialize_device psys failed\n"); - } - - psys_adev->mmu = ipu6_mmu_init(&pdev->dev, base, PSYS_MMID, - &ipdata->hw_variant); - if (IS_ERR(psys_adev->mmu)) { - put_device(&psys_adev->auxdev.dev); - kfree(pdata); - return dev_err_cast_probe(&pdev->dev, psys_adev->mmu, - "ipu6_mmu_init(psys_adev->mmu) failed\n"); - } - - psys_adev->mmu->dev = &psys_adev->auxdev.dev; - - ret = ipu6_bus_add_device(psys_adev); - if (ret) { - kfree(pdata); - return ERR_PTR(ret); - } - - return psys_adev; -} - -static int ipu6_pci_config_setup(struct pci_dev *dev, u8 hw_ver) -{ - int ret; - - /* No PCI msi capability for IPU6EP */ - if (is_ipu6ep(hw_ver) || is_ipu6ep_mtl(hw_ver)) { - /* likely do nothing as msi not enabled by default */ - pci_disable_msi(dev); - return 0; - } - - ret = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_MSI); - if (ret < 0) - return dev_err_probe(&dev->dev, ret, "Request msi failed"); - - return 0; -} - -static void ipu6_configure_vc_mechanism(struct ipu6_device *isp) -{ - u32 val = readl(isp->base + BUTTRESS_REG_BTRS_CTRL); - - if (IPU6_BTRS_ARB_STALL_MODE_VC0 == IPU6_BTRS_ARB_MODE_TYPE_STALL) - val |= BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC0; - else - val &= ~BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC0; - - if (IPU6_BTRS_ARB_STALL_MODE_VC1 == IPU6_BTRS_ARB_MODE_TYPE_STALL) - val |= BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC1; - else - val &= ~BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC1; - - writel(val, isp->base + BUTTRESS_REG_BTRS_CTRL); -} - -static int ipu6_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) -{ - struct ipu6_buttress_ctrl *isys_ctrl = NULL, *psys_ctrl = NULL; - struct device *dev = &pdev->dev; - void __iomem *isys_base = NULL; - void __iomem *psys_base = NULL; - struct ipu6_device *isp; - phys_addr_t phys; - u32 val, version, sku_id; - int ret; - - isp = devm_kzalloc(dev, sizeof(*isp), GFP_KERNEL); - if (!isp) - return -ENOMEM; - - isp->pdev = pdev; - INIT_LIST_HEAD(&isp->devices); - - ret = pcim_enable_device(pdev); - if (ret) - return dev_err_probe(dev, ret, "Enable PCI device failed\n"); - - phys = pci_resource_start(pdev, IPU6_PCI_BAR); - dev_dbg(dev, "IPU6 PCI bar[%u] = %pa\n", IPU6_PCI_BAR, &phys); - - isp->base = pcim_iomap_region(pdev, IPU6_PCI_BAR, IPU6_NAME); - if (IS_ERR(isp->base)) - return dev_err_probe(dev, PTR_ERR(isp->base), - "Failed to I/O mem remapping\n"); - - pci_set_drvdata(pdev, isp); - pci_set_master(pdev); - - isp->cpd_metadata_cmpnt_size = sizeof(struct ipu6_cpd_metadata_cmpnt); - switch (id->device) { - case PCI_DEVICE_ID_INTEL_IPU6: - isp->hw_ver = IPU6_VER_6; - isp->cpd_fw_name = IPU6_FIRMWARE_NAME; - break; - case PCI_DEVICE_ID_INTEL_IPU6SE: - isp->hw_ver = IPU6_VER_6SE; - isp->cpd_fw_name = IPU6SE_FIRMWARE_NAME; - isp->cpd_metadata_cmpnt_size = - sizeof(struct ipu6se_cpd_metadata_cmpnt); - break; - case PCI_DEVICE_ID_INTEL_IPU6EP_ADLP: - case PCI_DEVICE_ID_INTEL_IPU6EP_RPLP: - isp->hw_ver = IPU6_VER_6EP; - isp->cpd_fw_name = IPU6EP_FIRMWARE_NAME; - break; - case PCI_DEVICE_ID_INTEL_IPU6EP_ADLN: - isp->hw_ver = IPU6_VER_6EP; - isp->cpd_fw_name = IPU6EPADLN_FIRMWARE_NAME; - break; - case PCI_DEVICE_ID_INTEL_IPU6EP_MTL: - isp->hw_ver = IPU6_VER_6EP_MTL; - isp->cpd_fw_name = IPU6EPMTL_FIRMWARE_NAME; - break; - default: - return dev_err_probe(dev, -ENODEV, - "Unsupported IPU6 device %x\n", - id->device); - } - - ipu6_internal_pdata_init(isp); - - isys_base = isp->base + isys_ipdata.hw_variant.offset; - psys_base = isp->base + psys_ipdata.hw_variant.offset; - - ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(39)); - if (ret) - return dev_err_probe(dev, ret, "Failed to set DMA mask\n"); - - dma_set_max_seg_size(dev, UINT_MAX); - - ret = ipu6_pci_config_setup(pdev, isp->hw_ver); - if (ret) - return ret; - - ret = ipu6_buttress_init(isp); - if (ret) - return ret; - - ret = request_firmware(&isp->cpd_fw, isp->cpd_fw_name, dev); - if (ret) { - dev_err_probe(&isp->pdev->dev, ret, - "Requesting signed firmware %s failed\n", - isp->cpd_fw_name); - goto buttress_exit; - } - - ret = ipu6_cpd_validate_cpd_file(isp, isp->cpd_fw->data, - isp->cpd_fw->size); - if (ret) { - dev_err_probe(&isp->pdev->dev, ret, - "Failed to validate cpd\n"); - goto out_ipu6_bus_del_devices; - } - - isys_ctrl = devm_kmemdup(dev, &isys_buttress_ctrl, - sizeof(isys_buttress_ctrl), GFP_KERNEL); - if (!isys_ctrl) { - ret = -ENOMEM; - goto out_ipu6_bus_del_devices; - } - - isp->isys = ipu6_isys_init(pdev, dev, isys_ctrl, isys_base, - &isys_ipdata); - if (IS_ERR(isp->isys)) { - ret = PTR_ERR(isp->isys); - goto out_ipu6_bus_del_devices; - } - - psys_ctrl = devm_kmemdup(dev, &psys_buttress_ctrl, - sizeof(psys_buttress_ctrl), GFP_KERNEL); - if (!psys_ctrl) { - ret = -ENOMEM; - goto out_ipu6_bus_del_devices; - } - - isp->psys = ipu6_psys_init(pdev, &isp->isys->auxdev.dev, psys_ctrl, - psys_base, &psys_ipdata); - if (IS_ERR(isp->psys)) { - ret = PTR_ERR(isp->psys); - goto out_ipu6_bus_del_devices; - } - - ret = pm_runtime_resume_and_get(&isp->psys->auxdev.dev); - if (ret < 0) - goto out_ipu6_bus_del_devices; - - ret = ipu6_mmu_hw_init(isp->psys->mmu); - if (ret) { - dev_err_probe(&isp->pdev->dev, ret, - "Failed to set MMU hardware\n"); - goto out_ipu6_bus_del_devices; - } - - ret = ipu6_buttress_map_fw_image(isp->psys, isp->cpd_fw, - &isp->psys->fw_sgt); - if (ret) { - dev_err_probe(&isp->pdev->dev, ret, "failed to map fw image\n"); - goto out_ipu6_bus_del_devices; - } - - ret = ipu6_cpd_create_pkg_dir(isp->psys, isp->cpd_fw->data); - if (ret) { - dev_err_probe(&isp->pdev->dev, ret, - "failed to create pkg dir\n"); - goto out_ipu6_bus_del_devices; - } - - ret = devm_request_threaded_irq(dev, pdev->irq, ipu6_buttress_isr, - ipu6_buttress_isr_threaded, - IRQF_SHARED, IPU6_NAME, isp); - if (ret) { - dev_err_probe(dev, ret, "Requesting irq failed\n"); - goto out_ipu6_bus_del_devices; - } - - ret = ipu6_buttress_authenticate(isp); - if (ret) { - dev_err_probe(&isp->pdev->dev, ret, - "FW authentication failed\n"); - goto out_free_irq; - } - - ipu6_mmu_hw_cleanup(isp->psys->mmu); - pm_runtime_put(&isp->psys->auxdev.dev); - - /* Configure the arbitration mechanisms for VC requests */ - ipu6_configure_vc_mechanism(isp); - - val = readl(isp->base + BUTTRESS_REG_SKU); - sku_id = FIELD_GET(GENMASK(6, 4), val); - version = FIELD_GET(GENMASK(3, 0), val); - dev_info(dev, "IPU%u-v%u[%x] hardware version %d\n", version, sku_id, - pdev->device, isp->hw_ver); - - pm_runtime_put_noidle(dev); - pm_runtime_allow(dev); - - isp->bus_ready_to_probe = true; - - return 0; - -out_free_irq: - devm_free_irq(dev, pdev->irq, isp); -out_ipu6_bus_del_devices: - if (isp->psys) { - ipu6_cpd_free_pkg_dir(isp->psys); - ipu6_buttress_unmap_fw_image(isp->psys, &isp->psys->fw_sgt); - } - if (!IS_ERR_OR_NULL(isp->psys) && !IS_ERR_OR_NULL(isp->psys->mmu)) - ipu6_mmu_cleanup(isp->psys->mmu); - if (!IS_ERR_OR_NULL(isp->isys) && !IS_ERR_OR_NULL(isp->isys->mmu)) - ipu6_mmu_cleanup(isp->isys->mmu); - ipu6_bus_del_devices(pdev); - release_firmware(isp->cpd_fw); -buttress_exit: - ipu6_buttress_exit(isp); - - return ret; -} - -static void ipu6_pci_remove(struct pci_dev *pdev) -{ - struct ipu6_device *isp = pci_get_drvdata(pdev); - struct ipu6_mmu *isys_mmu = isp->isys->mmu; - struct ipu6_mmu *psys_mmu = isp->psys->mmu; - - devm_free_irq(&pdev->dev, pdev->irq, isp); - ipu6_cpd_free_pkg_dir(isp->psys); - - ipu6_buttress_unmap_fw_image(isp->psys, &isp->psys->fw_sgt); - ipu6_buttress_exit(isp); - - ipu6_bus_del_devices(pdev); - - pm_runtime_forbid(&pdev->dev); - pm_runtime_get_noresume(&pdev->dev); - - release_firmware(isp->cpd_fw); - - ipu6_mmu_cleanup(psys_mmu); - ipu6_mmu_cleanup(isys_mmu); -} - -static void ipu6_pci_reset_prepare(struct pci_dev *pdev) -{ - struct ipu6_device *isp = pci_get_drvdata(pdev); - - pm_runtime_forbid(&isp->pdev->dev); -} - -static void ipu6_pci_reset_done(struct pci_dev *pdev) -{ - struct ipu6_device *isp = pci_get_drvdata(pdev); - - ipu6_buttress_restore(isp); - if (isp->secure_mode) - ipu6_buttress_reset_authentication(isp); - - isp->need_ipc_reset = true; - pm_runtime_allow(&isp->pdev->dev); -} - -/* - * PCI base driver code requires driver to provide these to enable - * PCI device level PM state transitions (D0<->D3) - */ -static int ipu6_suspend(struct device *dev) -{ - struct pci_dev *pdev = to_pci_dev(dev); - - synchronize_irq(pdev->irq); - return 0; -} - -static int ipu6_resume(struct device *dev) -{ - struct pci_dev *pdev = to_pci_dev(dev); - struct ipu6_device *isp = pci_get_drvdata(pdev); - struct ipu6_buttress *b = &isp->buttress; - int ret; - - /* Configure the arbitration mechanisms for VC requests */ - ipu6_configure_vc_mechanism(isp); - - isp->secure_mode = ipu6_buttress_get_secure_mode(isp); - dev_info(dev, "IPU6 in %s mode\n", - isp->secure_mode ? "secure" : "non-secure"); - - ipu6_buttress_restore(isp); - - ret = ipu6_buttress_ipc_reset(isp, &b->cse); - if (ret) - dev_err(&isp->pdev->dev, "IPC reset protocol failed!\n"); - - ret = pm_runtime_resume_and_get(&isp->psys->auxdev.dev); - if (ret < 0) { - dev_err(&isp->psys->auxdev.dev, "Failed to get runtime PM\n"); - return 0; - } - - ret = ipu6_buttress_authenticate(isp); - if (ret) - dev_err(&isp->pdev->dev, "FW authentication failed(%d)\n", ret); - - pm_runtime_put(&isp->psys->auxdev.dev); - - return 0; -} - -static int ipu6_runtime_resume(struct device *dev) -{ - struct pci_dev *pdev = to_pci_dev(dev); - struct ipu6_device *isp = pci_get_drvdata(pdev); - int ret; - - ipu6_configure_vc_mechanism(isp); - ipu6_buttress_restore(isp); - - if (isp->need_ipc_reset) { - struct ipu6_buttress *b = &isp->buttress; - - isp->need_ipc_reset = false; - ret = ipu6_buttress_ipc_reset(isp, &b->cse); - if (ret) - dev_err(&isp->pdev->dev, "IPC reset protocol failed\n"); - } - - return 0; -} - -static const struct dev_pm_ops ipu6_pm_ops = { - SYSTEM_SLEEP_PM_OPS(&ipu6_suspend, &ipu6_resume) - RUNTIME_PM_OPS(&ipu6_suspend, &ipu6_runtime_resume, NULL) -}; - -MODULE_DEVICE_TABLE(pci, ipu6_pci_tbl); - -static const struct pci_error_handlers pci_err_handlers = { - .reset_prepare = ipu6_pci_reset_prepare, - .reset_done = ipu6_pci_reset_done, -}; - -static struct pci_driver ipu6_pci_driver = { - .name = IPU6_NAME, - .id_table = ipu6_pci_tbl, - .probe = ipu6_pci_probe, - .remove = ipu6_pci_remove, - .driver = { - .pm = pm_ptr(&ipu6_pm_ops), - }, - .err_handler = &pci_err_handlers, -}; - -module_pci_driver(ipu6_pci_driver); - -MODULE_IMPORT_NS("INTEL_IPU_BRIDGE"); -MODULE_AUTHOR("Sakari Ailus "); -MODULE_AUTHOR("Tianshu Qiu "); -MODULE_AUTHOR("Bingbu Cao "); -MODULE_AUTHOR("Qingwu Zhang "); -MODULE_AUTHOR("Yunliang Ding "); -MODULE_AUTHOR("Hongju Wang "); -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Intel IPU6 PCI driver"); diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.h deleted file mode 100644 index 92e3c34..0000000 --- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.h +++ /dev/null @@ -1,342 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (C) 2013 - 2024 Intel Corporation */ - -#ifndef IPU6_H -#define IPU6_H - -#include -#include -#include - -#include "ipu6-buttress.h" - -struct firmware; -struct pci_dev; -struct ipu6_bus_device; - -#define IPU6_NAME "intel-ipu6" -#define IPU6_MEDIA_DEV_MODEL_NAME "ipu6" - -#define IPU6SE_FIRMWARE_NAME "intel/ipu/ipu6se_fw.bin" -#define IPU6EP_FIRMWARE_NAME "intel/ipu/ipu6ep_fw.bin" -#define IPU6_FIRMWARE_NAME "intel/ipu/ipu6_fw.bin" -#define IPU6EPMTL_FIRMWARE_NAME "intel/ipu/ipu6epmtl_fw.bin" -#define IPU6EPADLN_FIRMWARE_NAME "intel/ipu/ipu6epadln_fw.bin" - -enum ipu6_version { - IPU6_VER_INVALID = 0, - IPU6_VER_6 = 1, - IPU6_VER_6SE = 3, - IPU6_VER_6EP = 5, - IPU6_VER_6EP_MTL = 6, -}; - -/* - * IPU6 - TGL - * IPU6SE - JSL - * IPU6EP - ADL/RPL - * IPU6EP_MTL - MTL - */ -static inline bool is_ipu6se(u8 hw_ver) -{ - return hw_ver == IPU6_VER_6SE; -} - -static inline bool is_ipu6ep(u8 hw_ver) -{ - return hw_ver == IPU6_VER_6EP; -} - -static inline bool is_ipu6ep_mtl(u8 hw_ver) -{ - return hw_ver == IPU6_VER_6EP_MTL; -} - -static inline bool is_ipu6_tgl(u8 hw_ver) -{ - return hw_ver == IPU6_VER_6; -} - -/* - * ISYS DMA can overshoot. For higher resolutions over allocation is one line - * but it must be at minimum 1024 bytes. Value could be different in - * different versions / generations thus provide it via platform data. - */ -#define IPU6_ISYS_OVERALLOC_MIN 1024 - -/* Physical pages in GDA is 128, page size is 2K for IPU6, 1K for others */ -#define IPU6_DEVICE_GDA_NR_PAGES 128 - -/* Virtualization factor to calculate the available virtual pages */ -#define IPU6_DEVICE_GDA_VIRT_FACTOR 32 - -struct ipu6_device { - struct pci_dev *pdev; - struct list_head devices; - struct ipu6_bus_device *isys; - struct ipu6_bus_device *psys; - struct ipu6_buttress buttress; - - const struct firmware *cpd_fw; - const char *cpd_fw_name; - u32 cpd_metadata_cmpnt_size; - - void __iomem *base; - bool need_ipc_reset; - bool secure_mode; - u8 hw_ver; - bool bus_ready_to_probe; -}; - -#define IPU6_ISYS_NAME "isys" -#define IPU6_PSYS_NAME "psys" - -#define IPU6_MMU_MAX_DEVICES 4 -#define IPU6_MMU_ADDR_BITS 32 -/* The firmware is accessible within the first 2 GiB only in non-secure mode. */ -#define IPU6_MMU_ADDR_BITS_NON_SECURE 31 - -#define IPU6_MMU_MAX_TLB_L1_STREAMS 32 -#define IPU6_MMU_MAX_TLB_L2_STREAMS 32 -#define IPU6_MAX_LI_BLOCK_ADDR 128 -#define IPU6_MAX_L2_BLOCK_ADDR 64 - -#define IPU6SE_ISYS_NUM_STREAMS IPU6SE_NONSECURE_STREAM_ID_MAX -#define IPU6_ISYS_NUM_STREAMS IPU6_NONSECURE_STREAM_ID_MAX - -/* - * To maximize the IOSF utlization, IPU6 need to send requests in bursts. - * At the DMA interface with the buttress, there are CDC FIFOs with burst - * collection capability. CDC FIFO burst collectors have a configurable - * threshold and is configured based on the outcome of performance measurements. - * - * isys has 3 ports with IOSF interface for VC0, VC1 and VC2 - * psys has 4 ports with IOSF interface for VC0, VC1w, VC1r and VC2 - * - * Threshold values are pre-defined and are arrived at after performance - * evaluations on a type of IPU6 - */ -#define IPU6_MAX_VC_IOSF_PORTS 4 - -/* - * IPU6 must configure correct arbitration mechanism related to the IOSF VC - * requests. There are two options per VC0 and VC1 - > 0 means rearbitrate on - * stall and 1 means stall until the request is completed. - */ -#define IPU6_BTRS_ARB_MODE_TYPE_REARB 0 -#define IPU6_BTRS_ARB_MODE_TYPE_STALL 1 - -/* Currently chosen arbitration mechanism for VC0 */ -#define IPU6_BTRS_ARB_STALL_MODE_VC0 \ - IPU6_BTRS_ARB_MODE_TYPE_REARB - -/* Currently chosen arbitration mechanism for VC1 */ -#define IPU6_BTRS_ARB_STALL_MODE_VC1 \ - IPU6_BTRS_ARB_MODE_TYPE_REARB - -/* - * MMU Invalidation HW bug workaround by ZLW mechanism - * - * Old IPU6 MMUV2 has a bug in the invalidation mechanism which might result in - * wrong translation or replication of the translation. This will cause data - * corruption. So we cannot directly use the MMU V2 invalidation registers - * to invalidate the MMU. Instead, whenever an invalidate is called, we need to - * clear the TLB by evicting all the valid translations by filling it with trash - * buffer (which is guaranteed not to be used by any other processes). ZLW is - * used to fill the L1 and L2 caches with the trash buffer translations. ZLW - * or Zero length write, is pre-fetch mechanism to pre-fetch the pages in - * advance to the L1 and L2 caches without triggering any memory operations. - * - * In MMU V2, L1 -> 16 streams and 64 blocks, maximum 16 blocks per stream - * One L1 block has 16 entries, hence points to 16 * 4K pages - * L2 -> 16 streams and 32 blocks. 2 blocks per streams - * One L2 block maps to 1024 L1 entries, hence points to 4MB address range - * 2 blocks per L2 stream means, 1 stream points to 8MB range - * - * As we need to clear the caches and 8MB being the biggest cache size, we need - * to have trash buffer which points to 8MB address range. As these trash - * buffers are not used for any memory transactions, we need only the least - * amount of physical memory. So we reserve 8MB IOVA address range but only - * one page is reserved from physical memory. Each of this 8MB IOVA address - * range is then mapped to the same physical memory page. - */ -/* One L2 entry maps 1024 L1 entries and one L1 entry per page */ -#define IPU6_MMUV2_L2_RANGE (1024 * PAGE_SIZE) -/* Max L2 blocks per stream */ -#define IPU6_MMUV2_MAX_L2_BLOCKS 2 -/* Max L1 blocks per stream */ -#define IPU6_MMUV2_MAX_L1_BLOCKS 16 -#define IPU6_MMUV2_TRASH_RANGE (IPU6_MMUV2_L2_RANGE * IPU6_MMUV2_MAX_L2_BLOCKS) -/* Entries per L1 block */ -#define MMUV2_ENTRIES_PER_L1_BLOCK 16 -#define MMUV2_TRASH_L1_BLOCK_OFFSET (MMUV2_ENTRIES_PER_L1_BLOCK * PAGE_SIZE) -#define MMUV2_TRASH_L2_BLOCK_OFFSET IPU6_MMUV2_L2_RANGE - -/* - * In some of the IPU6 MMUs, there is provision to configure L1 and L2 page - * table caches. Both these L1 and L2 caches are divided into multiple sections - * called streams. There is maximum 16 streams for both caches. Each of these - * sections are subdivided into multiple blocks. When nr_l1streams = 0 and - * nr_l2streams = 0, means the MMU is of type MMU_V1 and do not support - * L1/L2 page table caches. - * - * L1 stream per block sizes are configurable and varies per usecase. - * L2 has constant block sizes - 2 blocks per stream. - * - * MMU1 support pre-fetching of the pages to have less cache lookup misses. To - * enable the pre-fetching, MMU1 AT (Address Translator) device registers - * need to be configured. - * - * There are four types of memory accesses which requires ZLW configuration. - * ZLW(Zero Length Write) is a mechanism to enable VT-d pre-fetching on IOMMU. - * - * 1. Sequential Access or 1D mode - * Set ZLW_EN -> 1 - * set ZLW_PAGE_CROSS_1D -> 1 - * Set ZLW_N to "N" pages so that ZLW will be inserte N pages ahead where - * N is pre-defined and hardcoded in the platform data - * Set ZLW_2D -> 0 - * - * 2. ZLW 2D mode - * Set ZLW_EN -> 1 - * set ZLW_PAGE_CROSS_1D -> 1, - * Set ZLW_N -> 0 - * Set ZLW_2D -> 1 - * - * 3. ZLW Enable (no 1D or 2D mode) - * Set ZLW_EN -> 1 - * set ZLW_PAGE_CROSS_1D -> 0, - * Set ZLW_N -> 0 - * Set ZLW_2D -> 0 - * - * 4. ZLW disable - * Set ZLW_EN -> 0 - * set ZLW_PAGE_CROSS_1D -> 0, - * Set ZLW_N -> 0 - * Set ZLW_2D -> 0 - * - * To configure the ZLW for the above memory access, four registers are - * available. Hence to track these four settings, we have the following entries - * in the struct ipu6_mmu_hw. Each of these entries are per stream and - * available only for the L1 streams. - * - * a. l1_zlw_en -> To track zlw enabled per stream (ZLW_EN) - * b. l1_zlw_1d_mode -> Track 1D mode per stream. ZLW inserted at page boundary - * c. l1_ins_zlw_ahead_pages -> to track how advance the ZLW need to be inserted - * Insert ZLW request N pages ahead address. - * d. l1_zlw_2d_mode -> To track 2D mode per stream (ZLW_2D) - * - * - * Currently L1/L2 streams, blocks, AT ZLW configurations etc. are pre-defined - * as per the usecase specific calculations. Any change to this pre-defined - * table has to happen in sync with IPU6 FW. - */ -struct ipu6_mmu_hw { - union { - unsigned long offset; - void __iomem *base; - }; - u32 info_bits; - u8 nr_l1streams; - /* - * L1 has variable blocks per stream - total of 64 blocks and maximum of - * 16 blocks per stream. Configurable by using the block start address - * per stream. Block start address is calculated from the block size - */ - u8 l1_block_sz[IPU6_MMU_MAX_TLB_L1_STREAMS]; - /* Is ZLW is enabled in each stream */ - bool l1_zlw_en[IPU6_MMU_MAX_TLB_L1_STREAMS]; - bool l1_zlw_1d_mode[IPU6_MMU_MAX_TLB_L1_STREAMS]; - u8 l1_ins_zlw_ahead_pages[IPU6_MMU_MAX_TLB_L1_STREAMS]; - bool l1_zlw_2d_mode[IPU6_MMU_MAX_TLB_L1_STREAMS]; - - u32 l1_stream_id_reg_offset; - u32 l2_stream_id_reg_offset; - - u8 nr_l2streams; - /* - * L2 has fixed 2 blocks per stream. Block address is calculated - * from the block size - */ - u8 l2_block_sz[IPU6_MMU_MAX_TLB_L2_STREAMS]; - /* flag to track if WA is needed for successive invalidate HW bug */ - bool insert_read_before_invalidate; -}; - -struct ipu6_mmu_pdata { - u32 nr_mmus; - struct ipu6_mmu_hw mmu_hw[IPU6_MMU_MAX_DEVICES]; - int mmid; -}; - -struct ipu6_isys_csi2_pdata { - void __iomem *base; -}; - -struct ipu6_isys_internal_csi2_pdata { - u32 nports; - u32 irq_mask; - u32 ctrl0_irq_edge; - u32 ctrl0_irq_clear; - u32 ctrl0_irq_mask; - u32 ctrl0_irq_enable; - u32 ctrl0_irq_lnp; - u32 ctrl0_irq_status; - u32 fw_access_port_ofs; -}; - -struct ipu6_isys_internal_tpg_pdata { - u32 ntpgs; - u32 *offsets; - u32 *sels; -}; - -struct ipu6_hw_variants { - unsigned long offset; - u32 nr_mmus; - struct ipu6_mmu_hw mmu_hw[IPU6_MMU_MAX_DEVICES]; - u8 cdc_fifos; - u8 cdc_fifo_threshold[IPU6_MAX_VC_IOSF_PORTS]; - u32 dmem_offset; - u32 spc_offset; -}; - -struct ipu6_isys_internal_pdata { - struct ipu6_isys_internal_csi2_pdata csi2; - struct ipu6_hw_variants hw_variant; - u32 num_parallel_streams; - u32 isys_dma_overshoot; - u32 sram_gran_shift; - u32 sram_gran_size; - u32 max_sram_size; - u32 max_streams; - u32 max_send_queues; - u32 max_sram_blocks; - u32 max_devq_size; - u32 sensor_type_start; - u32 sensor_type_end; - u32 ltr; - u32 memopen_threshold; - bool enhanced_iwake; -}; - -struct ipu6_isys_pdata { - void __iomem *base; - const struct ipu6_isys_internal_pdata *ipdata; -}; - -struct ipu6_psys_internal_pdata { - struct ipu6_hw_variants hw_variant; -}; - -struct ipu6_psys_pdata { - void __iomem *base; - const struct ipu6_psys_internal_pdata *ipdata; -}; - -int ipu6_fw_authenticate(void *data, u64 val); -void ipu6_configure_spc(struct ipu6_device *isp, - const struct ipu6_hw_variants *hw_variant, - int pkg_dir_idx, void __iomem *base, u64 *pkg_dir, - dma_addr_t pkg_dir_dma_addr); -#endif /* IPU6_H */ diff --git a/dkms.conf b/dkms.conf index 99e96b8..53fd52c 100644 --- a/dkms.conf +++ b/dkms.conf @@ -21,8 +21,8 @@ MAKE=" if [ -d ${KBASE}/drivers/media/v4l2-core ]; then rmdir ${KBASE}/drivers/m mkdir -p ${KBASE}/drivers/media/v4l2-core/ && cp ${KBASE}/${SELECTED_KERNEL}/drivers/media/v4l2-core/videodev.ko ${KBASE}/drivers/media/v4l2-core/ && \ make V=1 KERNELRELEASE=$kernelver DRIVER_VERSION_SUFFIX='${VERSION_SUFFIX}' CFLAGS_MODULE=\"$(echo $MAKE_OPTS)\" KERNEL_SRC=$kernel_source_dir && \ if [ -d ${KBASE}/drivers/media/pci/intel/ipu6 ]; then rmdir ${KBASE}/drivers/media/pci/intel/ipu6 ; fi &&\ - make V=1 -C ${kernel_source_dir} M=${KBASE}/${SELECTED_KERNEL}/drivers/media/pci/intel/ipu6 DRIVER_VERSION_SUFFIX='${VERSION_SUFFIX}' && \ - mkdir -p ${KBASE}/drivers/media/pci/intel/ipu6/ && cp ${KBASE}/${SELECTED_KERNEL}/drivers/media/pci/intel/ipu6/intel-ipu6.ko ${KBASE}/${SELECTED_KERNEL}/drivers/media/pci/intel/ipu6/intel-ipu6-isys.ko ${KBASE}/${SELECTED_KERNEL}/drivers/media/pci/intel/ipu6/psys/intel-ipu6-psys.ko ${KBASE}/drivers/media/pci/intel/ipu6/ && \ + make V=1 -C ${kernel_source_dir} M=${KBASE}/ipu6-drivers/drivers/media/pci/intel/ipu6 DRIVER_VERSION_SUFFIX='${VERSION_SUFFIX}' && \ + mkdir -p ${KBASE}/drivers/media/pci/intel/ipu6/ && cp ${KBASE}/ipu6-drivers/drivers/media/pci/intel/ipu6/intel-ipu6.ko ${KBASE}/ipu6-drivers/drivers/media/pci/intel/ipu6/intel-ipu6-isys.ko ${KBASE}/ipu6-drivers/drivers/media/pci/intel/ipu6/psys/intel-ipu6-psys.ko ${KBASE}/drivers/media/pci/intel/ipu6/&& \ if [ -d ${KBASE}/drivers/media/pci/intel/ipu7 ]; then rmdir ${KBASE}/drivers/media/pci/intel/ipu7 ; fi &&\ make V=1 -C ${kernel_source_dir} M=${KBASE}/ipu7-drivers/drivers/media/pci/intel/ipu7 DRIVER_VERSION_SUFFIX='${VERSION_SUFFIX}' && \ mkdir -p ${KBASE}/drivers/media/pci/intel/ipu7/ && cp ${KBASE}/ipu7-drivers/drivers/media/pci/intel/ipu7/intel-ipu7.ko ${KBASE}/ipu7-drivers/drivers/media/pci/intel/ipu7/intel-ipu7-isys.ko ${KBASE}/ipu7-drivers/drivers/media/pci/intel/ipu7/psys/intel-ipu7-psys.ko ${KBASE}/drivers/media/pci/intel/ipu7/" @@ -32,138 +32,44 @@ MAKE_MATCH[1]="^(6.1[278])" CLEAN="make V=1 KERNELRELEASE=$kernelver KERNEL_SRC=$kernel_source_dir clean && \ make -C ${kernel_source_dir} M=${KBASE}/${SELECTED_KERNEL}/drivers/media/v4l2-core clean clean && \ make -C ${kernel_source_dir} M=${KBASE}/ipu7-drivers/drivers/media/pci/intel/ipu7 clean && \ - make -C ${kernel_source_dir} M=${KBASE}/${SELECTED_KERNEL}/drivers/media/pci/intel/ipu6 clean" + make -C ${kernel_source_dir} M=${KBASE}/ipu6-drivers/drivers/media/pci/intel/ipu6 clean" AUTOINSTALL="yes" BUILD_EXCLUSIVE_CONFIG="CONFIG_VIDEO_V4L2_I2C" -# Add non-upstreamed ISYS patches for Kernel 6.12 -PATCH[0]="0001-media-intel-ipu6-Fix-dma-mask-for-non-secure-mode.patch" -PATCH[1]="0002-media-ipu6-Remove-workaround-for-Meteor-Lake-ES2.patch" -PATCH[2]="0003-media-intel-ipu6-remove-buttress-ish-structure.patch" -PATCH[3]="0004-upstream-Use-module-parameter-to-set-isys-freq.patch" -PATCH[4]="0005-media-pci-Enable-ISYS-reset.patch" -PATCH[5]="0006-media-intel-ipu6-remove-buttress-ish-structure.patch" -PATCH[6]="0007-media-intel-ipu6-use-vc1-dma-for-MTL-and-ARL.patch" -PATCH[7]="0008-media-ipu-Dma-sync-at-buffer_prepare-callback-as-DMA.patch" -PATCH[8]="0009-media-intel-ipu6-support-IPU6-ISYS-FW-trace-dump-for.patch" -PATCH[9]="0010-media-pci-The-order-of-return-buffers-should-be-FIFO.patch" -PATCH[10]="0011-media-pci-Modify-enble-disable-stream-in-CSI2.patch" -PATCH[11]="0012-media-pci-Set-the-correct-SOF-for-different-stream.patch" -PATCH[12]="0013-media-intel-ipu6-support-imx390-for-6.11.0-rc3.patch" -PATCH[13]="0014-media-intel-ipu6-fix-coverity-issue.patch" -PATCH[14]="0015-media-intel-ipu6-move-ipu-acpi-module-to-linux-drive.patch" -PATCH[15]="0016-media-pci-unregister-i2c-device-to-complete-ext_subd.patch" -PATCH[16]="0017-media-pci-add-missing-if-for-PDATA.patch" -PATCH[17]="0018-media-pci-refine-PDATA-related-config.patch" -PATCH[18]="0019-media-intel-ipu6-align-ACPI-PDATA-and-ACPI-fwnode-bu.patch" -PATCH_MATCH[0]="^(6.1[2])" -PATCH_MATCH[1]="^(6.1[2])" -PATCH_MATCH[2]="^(6.1[2])" -PATCH_MATCH[3]="^(6.1[2])" -PATCH_MATCH[4]="^(6.1[2])" -PATCH_MATCH[5]="^(6.1[2])" -PATCH_MATCH[6]="^(6.1[2])" -PATCH_MATCH[7]="^(6.1[2])" -PATCH_MATCH[8]="^(6.1[2])" -PATCH_MATCH[9]="^(6.1[2])" -PATCH_MATCH[10]="^(6.1[2])" -PATCH_MATCH[11]="^(6.1[2])" -PATCH_MATCH[12]="^(6.1[2])" -PATCH_MATCH[13]="^(6.1[2])" -PATCH_MATCH[14]="^(6.1[2])" -PATCH_MATCH[15]="^(6.1[2])" -PATCH_MATCH[16]="^(6.1[2])" -PATCH_MATCH[17]="^(6.1[2])" -PATCH_MATCH[18]="^(6.1[2])" -# Add out-of-tree PSYS codebase -PATCH[19]="0020-upstream-Use-module-parameter-to-set-psys-freq.patch" -PATCH[20]="0021-media-pci-update-IPU6-PSYS-driver.patch" -PATCH[21]="0022-media-intel-ipu6-support-IPU6-PSYS-FW-trace-dump-for.patch" -PATCH_MATCH[19]="^(6.1[2])" -PATCH_MATCH[20]="^(6.1[2])" -PATCH_MATCH[21]="^(6.1[2])" -# Make IPU V4L2-CORE out-of-tree -PATCH[22]="0023-ipu7-dkms-v4l2-core-makefile-adaptation-6.12.patch" -PATCH[23]="0024-drivers-media-set-v4l2_subdev_enable_streams_api-tru.patch" -PATCH_MATCH[22]="^(6.1[2])" -PATCH_MATCH[23]="^(6.1[2])" -# Make IPU ISYS+PSYS out-of-tree -PATCH[24]="0025-ipu6-dkms-add-isys-makefile-adaptation-6.12.patch" -PATCH[25]="0026-ipu6-dkms-align-ipu7-acpi-pdata-device-parser-on-6.1.patch" -PATCH_MATCH[24]="^(6.1[2])" -PATCH_MATCH[25]="^(6.1[2])" -# Make IPU V4L2-CIC out-of-tree -PATCH[26]="0027-media-v4l2-core-set-v4l2-cci-i2c-support.patch" -PATCH_MATCH[26]="^(6.1[2])" - -j=26 - -# Linux non-upstreamed patches for Linux 6.17 to 6.18 -PATCH[$((++j))]="0050-media-ipu6-isys-Use-v4l2_ctrl_subdev_subscribe_event.patch" -PATCH_MATCH[$j]="^(6.1[78])" -PATCH[$((++j))]="0051-media-ipu6-isys-Don-t-set-V4L2_FL_USES_V4L2_FH-manua.patch" -PATCH_MATCH[$j]="^(6.1[78])" -PATCH[$((++j))]="0052-media-ipu6-isys-Set-embedded-data-type-correctly-for.patch" -PATCH_MATCH[$j]="^(6.1[78])" -PATCH[$((++j))]="0053-upstream-Use-module-parameter-to-set-isys-freq.patch" -PATCH_MATCH[$j]="^(6.1[78])" -PATCH[$((++j))]="0054-media-pci-Enable-ISYS-reset.patch" -PATCH_MATCH[$j]="^(6.1[78])" -PATCH[$((++j))]="0055-media-intel-ipu6-use-vc1-dma-for-MTL-and-ARL.patch" -PATCH_MATCH[$j]="^(6.1[78])" -PATCH[$((++j))]="0056-media-ipu-Dma-sync-at-buffer_prepare-callback-as-DMA.patch" -PATCH_MATCH[$j]="^(6.1[78])" -PATCH[$((++j))]="0057-media-intel-ipu6-Support-IPU6-ISYS-FW-trace-dump-for.patch" -PATCH_MATCH[$j]="^(6.1[78])" -PATCH[$((++j))]="0058-media-pci-The-order-of-return-buffers-should-be-FIFO.patch" -PATCH_MATCH[$j]="^(6.1[78])" -PATCH[$((++j))]="0059-media-pci-Modify-enble-disable-stream-in-CSI2.patch" -PATCH_MATCH[$j]="^(6.1[78])" -PATCH[$((++j))]="0060-media-pci-Set-the-correct-SOF-for-different-stream.patch" -PATCH_MATCH[$j]="^(6.1[78])" -PATCH[$((++j))]="0061-media-pci-support-imx390-for-6.11.0-rc3.patch" -PATCH_MATCH[$j]="^(6.1[78])" -PATCH[$((++j))]="0062-i2c-media-fix-cov-issue.patch" -PATCH_MATCH[$j]="^(6.1[78])" -PATCH[$((++j))]="0063-media-add-ipu-acpi-module-to-linux-drivers.patch" -PATCH_MATCH[$j]="^(6.1[78])" -PATCH[$((++j))]="0064-media-pci-unregister-i2c-device-to-complete-ext_subd.patch" -PATCH_MATCH[$j]="^(6.1[78])" -PATCH[$((++j))]="0065-media-pci-add-missing-if-for-PDATA.patch" -PATCH_MATCH[$j]="^(6.1[78])" -PATCH[$((++j))]="0066-media-pci-refine-PDATA-related-config.patch" -PATCH_MATCH[$j]="^(6.1[78])" -PATCH[$((++j))]="0067-kernel-align-ACPI-PDATA-and-ACPI-fwnode-build-for-EC.patch" -PATCH_MATCH[$j]="^(6.1[78])" -PATCH[$((++j))]="0068-upstream-Use-module-parameter-to-set-psys-freq.patch" -PATCH_MATCH[$j]="^(6.1[78])" -PATCH[$((++j))]="0069-media-pci-add-IPU6-PSYS-driver.patch" -PATCH_MATCH[$j]="^(6.1[78])" -PATCH[$((++j))]="0070-media-intel-ipu6-support-IPU6-PSYS-FW-trace-dump-for.patch" -PATCH_MATCH[$j]="^(6.1[78])" -PATCH[$((++j))]="0071-Revert-media-intel-ipu6-Constify-ipu6_buttress_ctrl-.patch" -PATCH_MATCH[$j]="^(6.1[78])" +j=0 + # Make IPU V4L2-CORE out-of-tree -PATCH[$((++j))]="0072-ipu7-dkms-v4l2-core-makefile-adaptation-6.17.patch" -PATCH_MATCH[$j]="^(6.1[78])" -PATCH[$((++j))]="0073-drivers-media-set-v4l2_subdev_enable_streams_api-tru.patch" -PATCH_MATCH[$j]="^(6.1[78])" -PATCH[$((++j))]="0074-ipu7-dkms-v4l2-core-makefile-adaptation-6.18.patch" -PATCH_MATCH[$j]="^(6.1[78])" -PATCH[$((++j))]="0075-drivers-media-set-v4l2_subdev_enable_streams_api-tru.patch" -PATCH_MATCH[$j]="^(6.1[78])" -PATCH[$((++j))]="0076-Revert-media-v4l2-subdev-Make-struct-v4l2_subdev_str.patch" -PATCH_MATCH[$j]="^(6.1[78])" -# Make IPU ISYS+PSYS out-of-tree -PATCH[$((++j))]="0077-ipu6-dkms-add-isys-makefile-adaptation-6.17.patch" -PATCH_MATCH[$j]="^(6.1[78])" -PATCH[$((++j))]="0078-ipu6-dkms-align-ipu7-acpi-pdata-device-parser-on-6.1.patch" +# kernel patches for Linux 6.12 +PATCH[$((++j))]="0001-ipu6-dkms-v4l2-core-makefile-adaptation-6.12.patch" +PATCH_MATCH[$j]="^(6.1[2])" +PATCH[$((++j))]="0002-drivers-media-set-v4l2_subdev_enable_streams_api-tru.patch" +PATCH_MATCH[$j]="^(6.1[2])" +PATCH[$((++j))]="0003-media-v4l2-core-set-v4l2-cci-i2c-support.patch" +PATCH_MATCH[$j]="^(6.1[2])" +# kernel patches for Linux 6.17 +PATCH[$((++j))]="0004-ipu7-dkms-v4l2-core-makefile-adaptation-6.17.patch" +PATCH_MATCH[$j]="^(6.1[7])" +PATCH[$((++j))]="0005-drivers-media-set-v4l2_subdev_enable_streams_api-tru.patch" +PATCH_MATCH[$j]="^(6.1[7])" +# kernel patches for Linux 6.18 +PATCH[$((++j))]="0006-ipu7-dkms-v4l2-core-makefile-adaptation-6.18.patch" +PATCH_MATCH[$j]="^(6.1[8])" +PATCH[$((++j))]="0007-drivers-media-set-v4l2_subdev_enable_streams_api-tru.patch" +PATCH_MATCH[$j]="^(6.1[8])" +PATCH[$((++j))]="0008-Revert-media-v4l2-subdev-Make-struct-v4l2_subdev_str.patch" +PATCH_MATCH[$j]="^(6.1[8])" + +# Make IPU6 ISYS+PSYS out-of-tree +PATCH[$((++j))]="0050-ipu6-dkms-add-isys-makefile-adaptation-6.12.patch" +PATCH_MATCH[$j]="^(6.1[278])" +PATCH[$((++j))]="0051-Revert-ipu6-isys-lt6911uxc-2-pads-linked-to-ipu-2-po.patch" +PATCH_MATCH[$j]="^(6.1[278])" +PATCH[$((++j))]="0052-module-Convert-symbol-namespace-to-string-literal.patch" PATCH_MATCH[$j]="^(6.1[78])" - -# Make IPU7 ISYS+PSYS out-of-tree patch from PTL release for iot on 2025-09-23 -PATCH[$((++j))]="0100-ipu-acpi-decouple-the-PLATFORM-ACPI-driver-and-IPU-d.patch" +# Make IPU7 ISYS+PSYS out-of-tree patch from PTL release for iot on 2025-12-10 +PATCH[$((++j))]="0100-ipu7-isys-make-modules-suffix-versioning-same-as-dkm.patch" PATCH_MATCH[$j]="^(6.1[278])" PATCH[$((++j))]="0101-ipu7-isys-allow-virtual-channels-on-16-csi2-input-vi.patch" PATCH_MATCH[$j]="^(6.1[278])" @@ -171,7 +77,9 @@ PATCH[$((++j))]="0102-ipu7-isys-add-d4xx-pixel-format-support.patch" PATCH_MATCH[$j]="^(6.1[278])" PATCH[$((++j))]="0103-ipu7-isys-add-video-VIDIOC_S_EXT_CTRLS-callbacks-by-.patch" PATCH_MATCH[$j]="^(6.1[278])" -PATCH[$((++j))]="0104-ipu7-isys-make-modules-suffix-versioning-same-as-dkm.patch" + +#apply dummy chnage to avoid last miss patch +PATCH[$((++j))]="0104-ipu7-dkms-dummy-patch-to-void-last-match-miss.patch" PATCH_MATCH[$j]="^(6.1[278])" i=0 diff --git a/ipu6-drivers b/ipu6-drivers new file mode 160000 index 0000000..71e2e42 --- /dev/null +++ b/ipu6-drivers @@ -0,0 +1 @@ +Subproject commit 71e2e426f3e16f8bc42bf2f88a21fa95eeca5923 diff --git a/ipu7-drivers b/ipu7-drivers index 62a3704..3f014a1 160000 --- a/ipu7-drivers +++ b/ipu7-drivers @@ -1 +1 @@ -Subproject commit 62a3704433c35d3bdfa679fc4dee74e133ce815c +Subproject commit 3f014a183b770c8cf2f928a3f07396bdfb76609f diff --git a/patches/0023-ipu7-dkms-v4l2-core-makefile-adaptation-6.12.patch b/patches/0001-ipu6-dkms-v4l2-core-makefile-adaptation-6.12.patch similarity index 91% rename from patches/0023-ipu7-dkms-v4l2-core-makefile-adaptation-6.12.patch rename to patches/0001-ipu6-dkms-v4l2-core-makefile-adaptation-6.12.patch index 04a048c..fa4d50a 100644 --- a/patches/0023-ipu7-dkms-v4l2-core-makefile-adaptation-6.12.patch +++ b/patches/0001-ipu6-dkms-v4l2-core-makefile-adaptation-6.12.patch @@ -1,7 +1,7 @@ -From 8fe1a4467308a5bc10777ecbc95f5b660f566763 Mon Sep 17 00:00:00 2001 +From c3c5d9f0239c3776f82bbc90d6d126338c63e823 Mon Sep 17 00:00:00 2001 From: florent pirou Date: Mon, 17 Nov 2025 07:44:33 -0700 -Subject: [PATCH 23/28] ipu7 dkms : v4l2-core makefile adaptation 6.12 +Subject: [PATCH 1/8] ipu6-dkms : v4l2-core makefile adaptation 6.12 Signed-off-by: florent pirou --- diff --git a/patches/0001-ipu7-dkms-v4l2-core-makefile-adaptation-6.17.patch b/patches/0001-ipu7-dkms-v4l2-core-makefile-adaptation-6.17.patch deleted file mode 100644 index 44cff75..0000000 --- a/patches/0001-ipu7-dkms-v4l2-core-makefile-adaptation-6.17.patch +++ /dev/null @@ -1,37 +0,0 @@ -From 459bc453270ba6fd0a77ff8d53a0f22641a1c55f Mon Sep 17 00:00:00 2001 -From: florent pirou -Date: Mon, 17 Nov 2025 07:44:33 -0700 -Subject: [PATCH 1/2] ipu7 dkms : v4l2-core makefile adaptation 6.17 - -Signed-off-by: florent pirou ---- - 6.17.0/drivers/media/v4l2-core/Makefile | 3 +++ - 6.17.0/drivers/media/v4l2-core/v4l2-dev.c | 1 + - 2 files changed, 4 insertions(+) - -diff --git a/6.17.0/drivers/media/v4l2-core/Makefile b/6.17.0/drivers/media/v4l2-core/Makefile -index 2177b9d63..82c235f19 100644 ---- a/6.17.0/drivers/media/v4l2-core/Makefile -+++ b/6.17.0/drivers/media/v4l2-core/Makefile -@@ -2,6 +2,9 @@ - # - # Makefile for the V4L2 core - # -+# ipu7-dkms appendix -+CC := ${CC} -I ${M}/../../../include-overrides -I ${M}/../tuners -I ${M}/../dvb-core -I ${M}/../dvb-frontends -+ccflags-y := -DDRIVER_VERSION_SUFFIX=\"${DRIVER_VERSION_SUFFIX}\" - - ccflags-y += -I$(srctree)/drivers/media/dvb-frontends - ccflags-y += -I$(srctree)/drivers/media/tuners -diff --git a/6.17.0/drivers/media/v4l2-core/v4l2-dev.c b/6.17.0/drivers/media/v4l2-core/v4l2-dev.c -index c36923511..199737025 100644 ---- a/6.17.0/drivers/media/v4l2-core/v4l2-dev.c -+++ b/6.17.0/drivers/media/v4l2-core/v4l2-dev.c -@@ -1245,3 +1245,4 @@ MODULE_AUTHOR("Alan Cox, Mauro Carvalho Chehab , Bill Dirks, - MODULE_DESCRIPTION("Video4Linux2 core driver"); - MODULE_LICENSE("GPL"); - MODULE_ALIAS_CHARDEV_MAJOR(VIDEO_MAJOR); -+MODULE_VERSION(DRIVER_VERSION_SUFFIX); --- -2.25.1 - diff --git a/patches/0001-media-intel-ipu6-Fix-dma-mask-for-non-secure-mode.patch b/patches/0001-media-intel-ipu6-Fix-dma-mask-for-non-secure-mode.patch deleted file mode 100644 index d2b21c4..0000000 --- a/patches/0001-media-intel-ipu6-Fix-dma-mask-for-non-secure-mode.patch +++ /dev/null @@ -1,49 +0,0 @@ -From 61bd4d3d1182afbb7e658aef7c1e66d4f0c91205 Mon Sep 17 00:00:00 2001 -From: Stanislaw Gruszka -Date: Mon, 12 Jan 2026 05:06:31 -0700 -Subject: [PATCH 01/28] media: intel/ipu6: Fix dma mask for non-secure mode - -commit 0209916ebe2475079ce6d8dc4114afbc0ccad1c2 upstream. - -We use dma_get_mask() of auxdev device for calculate iova pfn limit. -This is always 32 bit mask as we do not initialize the mask (and we can -not do so, since dev->dev_mask is NULL anyways for auxdev). - -Since we need 31 bit mask for non-secure mode use mmu_info->aperture_end -which is properly initialized to correct mask for both modes. - -Fixes: daabc5c64703 ("media: ipu6: not override the dma_ops of device in driver") -Cc: stable@vger.kernel.org -Signed-off-by: Stanislaw Gruszka -Signed-off-by: Sakari Ailus -Signed-off-by: Hans Verkuil -Signed-off-by: Greg Kroah-Hartman ---- - 6.12.0/drivers/media/pci/intel/ipu6/ipu6-dma.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-dma.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-dma.c -index b71f66b..92d5136 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-dma.c -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-dma.c -@@ -172,7 +172,7 @@ void *ipu6_dma_alloc(struct ipu6_bus_device *sys, size_t size, - count = PHYS_PFN(size); - - iova = alloc_iova(&mmu->dmap->iovad, count, -- PHYS_PFN(dma_get_mask(dev)), 0); -+ PHYS_PFN(mmu->dmap->mmu_info->aperture_end), 0); - if (!iova) - goto out_kfree; - -@@ -398,7 +398,7 @@ int ipu6_dma_map_sg(struct ipu6_bus_device *sys, struct scatterlist *sglist, - nents, npages); - - iova = alloc_iova(&mmu->dmap->iovad, npages, -- PHYS_PFN(dma_get_mask(dev)), 0); -+ PHYS_PFN(mmu->dmap->mmu_info->aperture_end), 0); - if (!iova) - return 0; - --- -2.43.0 - diff --git a/patches/0002-drivers-media-set-v4l2_subdev_enable_streams_api-tru.patch b/patches/0002-drivers-media-set-v4l2_subdev_enable_streams_api-tru.patch index 427599d..f8359ce 100644 --- a/patches/0002-drivers-media-set-v4l2_subdev_enable_streams_api-tru.patch +++ b/patches/0002-drivers-media-set-v4l2_subdev_enable_streams_api-tru.patch @@ -1,19 +1,19 @@ -From 20ee554282631a6b12eba6ebb60bdb7bf9fc9c35 Mon Sep 17 00:00:00 2001 +From 70aae79dcba097c7575e6f8e5a2dce576ffda35f Mon Sep 17 00:00:00 2001 From: florent pirou Date: Mon, 17 Nov 2025 07:45:23 -0700 -Subject: [PATCH 2/2] drivers: media: set v4l2_subdev_enable_streams_api=true - for WA +Subject: [PATCH 2/8] drivers: media: set v4l2_subdev_enable_streams_api=true + for WA 6.12 Signed-off-by: hepengpx Signed-off-by: florent pirou --- - 6.17.0/drivers/media/v4l2-core/v4l2-subdev.c | 2 +- + 6.12.0/drivers/media/v4l2-core/v4l2-subdev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -diff --git a/6.17.0/drivers/media/v4l2-core/v4l2-subdev.c b/6.17.0/drivers/media/v4l2-core/v4l2-subdev.c -index 4fd25fea3..60a05561f 100644 ---- a/6.17.0/drivers/media/v4l2-core/v4l2-subdev.c -+++ b/6.17.0/drivers/media/v4l2-core/v4l2-subdev.c +diff --git a/6.12.0/drivers/media/v4l2-core/v4l2-subdev.c b/6.12.0/drivers/media/v4l2-core/v4l2-subdev.c +index 3a4ba08..f535929 100644 +--- a/6.12.0/drivers/media/v4l2-core/v4l2-subdev.c ++++ b/6.12.0/drivers/media/v4l2-core/v4l2-subdev.c @@ -32,7 +32,7 @@ * 'v4l2_subdev_enable_streams_api' to 1 below. */ @@ -24,5 +24,5 @@ index 4fd25fea3..60a05561f 100644 /* -- -2.25.1 +2.43.0 diff --git a/patches/0002-media-ipu6-Remove-workaround-for-Meteor-Lake-ES2.patch b/patches/0002-media-ipu6-Remove-workaround-for-Meteor-Lake-ES2.patch deleted file mode 100644 index 5db8f87..0000000 --- a/patches/0002-media-ipu6-Remove-workaround-for-Meteor-Lake-ES2.patch +++ /dev/null @@ -1,45 +0,0 @@ -From 94b81f0eec26ba7ee16abb58f895187f7b1b1cdf Mon Sep 17 00:00:00 2001 -From: Hao Yao -Date: Mon, 12 Jan 2026 05:00:33 -0700 -Subject: [PATCH 02/28] media: ipu6: Remove workaround for Meteor Lake ES2 - -commit d471fb06b21ae54bf76464731ae1dcb26ef1ca68 upstream. - -There was a hardware bug which need IPU6 driver to disable the ATS. This -workaround is not needed anymore as the bug was fixed in hardware level. - -Additionally, Arrow Lake has the same IPU6 PCI ID and x86 stepping but -does not have the bug. Removing the Meteor Lake workaround is also -required for the driver to function on Arrow Lake. - -Signed-off-by: Hao Yao -Reviewed-by: Stanislaw Gruszka -Fixes: 25fedc021985 ("media: intel/ipu6: add Intel IPU6 PCI device driver") -Cc: stable@vger.kernel.org -[Sakari Ailus: Added tags and explanation of what is fixed.] -Signed-off-by: Sakari Ailus -Signed-off-by: Hans Verkuil -Signed-off-by: Greg Kroah-Hartman ---- - 6.12.0/drivers/media/pci/intel/ipu6/ipu6.c | 5 ----- - 1 file changed, 5 deletions(-) - -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c -index 91718ea..1c5c38a 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c -@@ -463,11 +463,6 @@ static int ipu6_pci_config_setup(struct pci_dev *dev, u8 hw_ver) - { - int ret; - -- /* disable IPU6 PCI ATS on mtl ES2 */ -- if (is_ipu6ep_mtl(hw_ver) && boot_cpu_data.x86_stepping == 0x2 && -- pci_ats_supported(dev)) -- pci_disable_ats(dev); -- - /* No PCI msi capability for IPU6EP */ - if (is_ipu6ep(hw_ver) || is_ipu6ep_mtl(hw_ver)) { - /* likely do nothing as msi not enabled by default */ --- -2.43.0 - diff --git a/patches/0003-ipu6-dkms-add-isys-makefile-adaptation-6.17.patch b/patches/0003-ipu6-dkms-add-isys-makefile-adaptation-6.17.patch deleted file mode 100644 index 31f9366..0000000 --- a/patches/0003-ipu6-dkms-add-isys-makefile-adaptation-6.17.patch +++ /dev/null @@ -1,48 +0,0 @@ -From 11adf12607a6dab13805325def41b6c87184bb31 Mon Sep 17 00:00:00 2001 -From: florent pirou -Date: Thu, 4 Dec 2025 00:50:25 -0700 -Subject: [PATCH 3/3] ipu6-dkms: add isys makefile adaptation 6.17 - -Signed-off-by: florent pirou ---- - 6.17.0/drivers/media/pci/intel/ipu6/Makefile | 5 +++++ - 6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys.c | 1 + - 6.17.0/drivers/media/pci/intel/ipu6/ipu6.c | 1 + - 3 files changed, 7 insertions(+) - -diff --git a/6.17.0/drivers/media/pci/intel/ipu6/Makefile b/6.17.0/drivers/media/pci/intel/ipu6/Makefile -index a821b0a..f15fd92 100644 ---- a/6.17.0/drivers/media/pci/intel/ipu6/Makefile -+++ b/6.17.0/drivers/media/pci/intel/ipu6/Makefile -@@ -1,5 +1,10 @@ - # SPDX-License-Identifier: GPL-2.0-only - -+CC := ${CC} -I ${M}/../../../include-overrides -+ccflags-y := -DDRIVER_VERSION_SUFFIX=\"${DRIVER_VERSION_SUFFIX}\" -+ -+export CONFIG_VIDEO_INTEL_IPU6 = m -+ - intel-ipu6-y := ipu6.o \ - ipu6-bus.o \ - ipu6-dma.o \ -diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys.c b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -index fc0ec0a..f048f05 100644 ---- a/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -+++ b/6.17.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -@@ -1378,3 +1378,4 @@ MODULE_LICENSE("GPL"); - MODULE_DESCRIPTION("Intel IPU6 input system driver"); - MODULE_IMPORT_NS("INTEL_IPU6"); - MODULE_IMPORT_NS("INTEL_IPU_BRIDGE"); -+MODULE_VERSION(DRIVER_VERSION_SUFFIX); -diff --git a/6.17.0/drivers/media/pci/intel/ipu6/ipu6.c b/6.17.0/drivers/media/pci/intel/ipu6/ipu6.c -index 1f4f20b..abada2f 100644 ---- a/6.17.0/drivers/media/pci/intel/ipu6/ipu6.c -+++ b/6.17.0/drivers/media/pci/intel/ipu6/ipu6.c -@@ -844,3 +844,4 @@ MODULE_AUTHOR("Yunliang Ding "); - MODULE_AUTHOR("Hongju Wang "); - MODULE_LICENSE("GPL"); - MODULE_DESCRIPTION("Intel IPU6 PCI driver"); -+MODULE_VERSION(DRIVER_VERSION_SUFFIX); --- -2.43.0 - diff --git a/patches/0003-media-intel-ipu6-remove-buttress-ish-structure.patch b/patches/0003-media-intel-ipu6-remove-buttress-ish-structure.patch deleted file mode 100644 index c1d5de9..0000000 --- a/patches/0003-media-intel-ipu6-remove-buttress-ish-structure.patch +++ /dev/null @@ -1,125 +0,0 @@ -From a611ad3710a6c3d65051b5fc0d84fde15d11c4c1 Mon Sep 17 00:00:00 2001 -From: Stanislaw Gruszka -Date: Mon, 12 Jan 2026 05:25:13 -0700 -Subject: [PATCH 03/28] media: intel/ipu6: remove buttress ish structure - -The buttress ipc ish structure is not effectively used on IPU6 - data -is nullified on init. Remove the ish structure and handing of related -interrupts to cleanup the code. - -Signed-off-by: Stanislaw Gruszka -Signed-off-by: Sakari Ailus -Signed-off-by: Hans Verkuil ---- - .../media/pci/intel/ipu6/ipu6-buttress.c | 29 +++---------------- - 1 file changed, 4 insertions(+), 25 deletions(-) - -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c -index 1ee63ef..ffa6dff 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c -@@ -215,20 +215,17 @@ static void ipu6_buttress_ipc_recv(struct ipu6_device *isp, - } - - static int ipu6_buttress_ipc_send_bulk(struct ipu6_device *isp, -- enum ipu6_buttress_ipc_domain ipc_domain, - struct ipu6_ipc_buttress_bulk_msg *msgs, - u32 size) - { - unsigned long tx_timeout_jiffies, rx_timeout_jiffies; - unsigned int i, retry = BUTTRESS_IPC_CMD_SEND_RETRY; - struct ipu6_buttress *b = &isp->buttress; -- struct ipu6_buttress_ipc *ipc; -+ struct ipu6_buttress_ipc *ipc = &b->cse; - u32 val; - int ret; - int tout; - -- ipc = ipc_domain == IPU6_BUTTRESS_IPC_CSE ? &b->cse : &b->ish; -- - mutex_lock(&b->ipc_mutex); - - ret = ipu6_buttress_ipc_validity_open(isp, ipc); -@@ -306,7 +303,6 @@ out: - - static int - ipu6_buttress_ipc_send(struct ipu6_device *isp, -- enum ipu6_buttress_ipc_domain ipc_domain, - u32 ipc_msg, u32 size, bool require_resp, - u32 expected_resp) - { -@@ -317,7 +313,7 @@ ipu6_buttress_ipc_send(struct ipu6_device *isp, - .expected_resp = expected_resp, - }; - -- return ipu6_buttress_ipc_send_bulk(isp, ipc_domain, &msg, 1); -+ return ipu6_buttress_ipc_send_bulk(isp, &msg, 1); - } - - static irqreturn_t ipu6_buttress_call_isr(struct ipu6_bus_device *adev) -@@ -386,25 +382,12 @@ irqreturn_t ipu6_buttress_isr(int irq, void *isp_ptr) - complete(&b->cse.recv_complete); - } - -- if (irq_status & BUTTRESS_ISR_IPC_FROM_ISH_IS_WAITING) { -- dev_dbg(&isp->pdev->dev, -- "BUTTRESS_ISR_IPC_FROM_ISH_IS_WAITING\n"); -- ipu6_buttress_ipc_recv(isp, &b->ish, &b->ish.recv_data); -- complete(&b->ish.recv_complete); -- } -- - if (irq_status & BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE) { - dev_dbg(&isp->pdev->dev, - "BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE\n"); - complete(&b->cse.send_complete); - } - -- if (irq_status & BUTTRESS_ISR_IPC_EXEC_DONE_BY_ISH) { -- dev_dbg(&isp->pdev->dev, -- "BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE\n"); -- complete(&b->ish.send_complete); -- } -- - if (irq_status & BUTTRESS_ISR_SAI_VIOLATION && - ipu6_buttress_get_secure_mode(isp)) - dev_err(&isp->pdev->dev, -@@ -666,7 +649,7 @@ int ipu6_buttress_authenticate(struct ipu6_device *isp) - */ - dev_info(&isp->pdev->dev, "Sending BOOT_LOAD to CSE\n"); - -- ret = ipu6_buttress_ipc_send(isp, IPU6_BUTTRESS_IPC_CSE, -+ ret = ipu6_buttress_ipc_send(isp, - BUTTRESS_IU2CSEDATA0_IPC_BOOT_LOAD, - 1, true, - BUTTRESS_CSE2IUDATA0_IPC_BOOT_LOAD_DONE); -@@ -708,7 +691,7 @@ int ipu6_buttress_authenticate(struct ipu6_device *isp) - * IU2CSEDB.IU2CSECMD and set IU2CSEDB.IU2CSEBUSY as - */ - dev_info(&isp->pdev->dev, "Sending AUTHENTICATE_RUN to CSE\n"); -- ret = ipu6_buttress_ipc_send(isp, IPU6_BUTTRESS_IPC_CSE, -+ ret = ipu6_buttress_ipc_send(isp, - BUTTRESS_IU2CSEDATA0_IPC_AUTH_RUN, - 1, true, - BUTTRESS_CSE2IUDATA0_IPC_AUTH_RUN_DONE); -@@ -849,9 +832,7 @@ int ipu6_buttress_init(struct ipu6_device *isp) - mutex_init(&b->auth_mutex); - mutex_init(&b->cons_mutex); - mutex_init(&b->ipc_mutex); -- init_completion(&b->ish.send_complete); - init_completion(&b->cse.send_complete); -- init_completion(&b->ish.recv_complete); - init_completion(&b->cse.recv_complete); - - b->cse.nack = BUTTRESS_CSE2IUDATA0_IPC_NACK; -@@ -863,8 +844,6 @@ int ipu6_buttress_init(struct ipu6_device *isp) - b->cse.data0_in = BUTTRESS_REG_CSE2IUDATA0; - b->cse.data0_out = BUTTRESS_REG_IU2CSEDATA0; - -- /* no ISH on IPU6 */ -- memset(&b->ish, 0, sizeof(b->ish)); - INIT_LIST_HEAD(&b->constraints); - - isp->secure_mode = ipu6_buttress_get_secure_mode(isp); --- -2.43.0 - diff --git a/patches/0027-media-v4l2-core-set-v4l2-cci-i2c-support.patch b/patches/0003-media-v4l2-core-set-v4l2-cci-i2c-support.patch similarity index 93% rename from patches/0027-media-v4l2-core-set-v4l2-cci-i2c-support.patch rename to patches/0003-media-v4l2-core-set-v4l2-cci-i2c-support.patch index ff7cc69..f9460e5 100644 --- a/patches/0027-media-v4l2-core-set-v4l2-cci-i2c-support.patch +++ b/patches/0003-media-v4l2-core-set-v4l2-cci-i2c-support.patch @@ -1,7 +1,7 @@ -From 7f7bf005bc0108b96e9705f617fd1b391cf7ff2b Mon Sep 17 00:00:00 2001 +From 1f1be31e07f67b8c4a3432d86961b848463f7aca Mon Sep 17 00:00:00 2001 From: Florent Pirou Date: Sun, 18 Jan 2026 13:46:44 -0700 -Subject: [PATCH 27/27] media: v4l2-core : set v4l2-cci i2c support +Subject: [PATCH 3/8] media: v4l2-core : set v4l2-cci i2c support Signed-off-by: Florent Pirou --- diff --git a/patches/0072-ipu7-dkms-v4l2-core-makefile-adaptation-6.17.patch b/patches/0004-ipu7-dkms-v4l2-core-makefile-adaptation-6.17.patch similarity index 91% rename from patches/0072-ipu7-dkms-v4l2-core-makefile-adaptation-6.17.patch rename to patches/0004-ipu7-dkms-v4l2-core-makefile-adaptation-6.17.patch index d7d2e93..ae77460 100644 --- a/patches/0072-ipu7-dkms-v4l2-core-makefile-adaptation-6.17.patch +++ b/patches/0004-ipu7-dkms-v4l2-core-makefile-adaptation-6.17.patch @@ -1,7 +1,7 @@ -From ab4266d7926dd398e0ff9ffad8888b20c68fa663 Mon Sep 17 00:00:00 2001 +From 9097a7b8bcb983c7afc812af26eb7d313200f849 Mon Sep 17 00:00:00 2001 From: florent pirou Date: Mon, 17 Nov 2025 07:44:33 -0700 -Subject: [PATCH 72/77] ipu7 dkms : v4l2-core makefile adaptation 6.17 +Subject: [PATCH 4/8] ipu7 dkms : v4l2-core makefile adaptation 6.17 Signed-off-by: florent pirou --- diff --git a/patches/0004-upstream-Use-module-parameter-to-set-isys-freq.patch b/patches/0004-upstream-Use-module-parameter-to-set-isys-freq.patch deleted file mode 100644 index cc8f7df..0000000 --- a/patches/0004-upstream-Use-module-parameter-to-set-isys-freq.patch +++ /dev/null @@ -1,45 +0,0 @@ -From a3deded546b8560ada051dd94269c7afa24dd299 Mon Sep 17 00:00:00 2001 -From: Dongcheng Yan -Date: Mon, 12 Jan 2026 05:27:39 -0700 -Subject: [PATCH 04/28] upstream: Use module parameter to set isys freq - -Signed-off-by: Hongju Wang -Signed-off-by: Dongcheng Yan -Signed-off-by: zouxiaoh ---- - 6.12.0/drivers/media/pci/intel/ipu6/ipu6.c | 12 ++++++++++++ - 1 file changed, 12 insertions(+) - -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c -index 1c5c38a..18f0dd2 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c -@@ -33,6 +33,10 @@ - #include "ipu6-platform-isys-csi2-reg.h" - #include "ipu6-platform-regs.h" - -+static unsigned int isys_freq_override; -+module_param(isys_freq_override, uint, 0660); -+MODULE_PARM_DESC(isys_freq_override, "Override ISYS freq(mhz)"); -+ - #define IPU6_PCI_BAR 0 - - struct ipu6_cell_program { -@@ -387,6 +391,14 @@ ipu6_isys_init(struct pci_dev *pdev, struct device *parent, - pdata->base = base; - pdata->ipdata = ipdata; - -+ /* Override the isys freq */ -+ if (isys_freq_override >= BUTTRESS_MIN_FORCE_IS_FREQ && -+ isys_freq_override <= BUTTRESS_MAX_FORCE_IS_FREQ) { -+ ctrl->ratio = isys_freq_override / BUTTRESS_IS_FREQ_STEP; -+ dev_dbg(&pdev->dev, "Override the isys freq:%u(mhz)\n", -+ isys_freq_override); -+ } -+ - isys_adev = ipu6_bus_initialize_device(pdev, parent, pdata, ctrl, - IPU6_ISYS_NAME); - if (IS_ERR(isys_adev)) { --- -2.43.0 - diff --git a/patches/0073-drivers-media-set-v4l2_subdev_enable_streams_api-tru.patch b/patches/0005-drivers-media-set-v4l2_subdev_enable_streams_api-tru.patch similarity index 83% rename from patches/0073-drivers-media-set-v4l2_subdev_enable_streams_api-tru.patch rename to patches/0005-drivers-media-set-v4l2_subdev_enable_streams_api-tru.patch index b694a18..9d7099f 100644 --- a/patches/0073-drivers-media-set-v4l2_subdev_enable_streams_api-tru.patch +++ b/patches/0005-drivers-media-set-v4l2_subdev_enable_streams_api-tru.patch @@ -1,7 +1,7 @@ -From 92714a8bb27332674cebe3a5adc46ff8ba99fe12 Mon Sep 17 00:00:00 2001 +From 33750bc14545f84d7c26168b956e960b6427b005 Mon Sep 17 00:00:00 2001 From: florent pirou Date: Mon, 17 Nov 2025 07:45:23 -0700 -Subject: [PATCH 73/77] drivers: media: set v4l2_subdev_enable_streams_api=true +Subject: [PATCH 5/8] drivers: media: set v4l2_subdev_enable_streams_api=true for WA 6.17 Signed-off-by: hepengpx diff --git a/patches/0005-media-pci-Enable-ISYS-reset.patch b/patches/0005-media-pci-Enable-ISYS-reset.patch deleted file mode 100644 index c2bb0b1..0000000 --- a/patches/0005-media-pci-Enable-ISYS-reset.patch +++ /dev/null @@ -1,627 +0,0 @@ -From 20f1e38fca4e59b674ae67763bdaca560d55004b Mon Sep 17 00:00:00 2001 -From: zouxiaoh -Date: Mon, 12 Jan 2026 05:30:26 -0700 -Subject: [PATCH 05/28] media: pci: Enable ISYS reset - -Signed-off-by: zouxiaoh ---- - 6.12.0/drivers/media/pci/intel/ipu6/Kconfig | 9 + - .../media/pci/intel/ipu6/ipu6-fw-isys.h | 3 + - .../media/pci/intel/ipu6/ipu6-isys-queue.c | 251 ++++++++++++++++++ - .../media/pci/intel/ipu6/ipu6-isys-queue.h | 3 + - .../media/pci/intel/ipu6/ipu6-isys-video.c | 82 +++++- - .../media/pci/intel/ipu6/ipu6-isys-video.h | 8 + - .../drivers/media/pci/intel/ipu6/ipu6-isys.c | 10 + - .../drivers/media/pci/intel/ipu6/ipu6-isys.h | 5 + - 8 files changed, 370 insertions(+), 1 deletion(-) - -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/Kconfig b/6.12.0/drivers/media/pci/intel/ipu6/Kconfig -index cd1c545..f48b833 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/Kconfig -+++ b/6.12.0/drivers/media/pci/intel/ipu6/Kconfig -@@ -16,3 +16,12 @@ config VIDEO_INTEL_IPU6 - - To compile this driver, say Y here! It contains 2 modules - - intel_ipu6 and intel_ipu6_isys. -+ -+config VIDEO_INTEL_IPU6_ISYS_RESET -+ depends on VIDEO_INTEL_IPU6 -+ bool "Enable isys reset" -+ default n -+ help -+ Support ISYS reset in IPU6. -+ -+ To compile this driver, say Y here! -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h -index b60f020..e6ee161 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h -@@ -66,6 +66,9 @@ struct ipu6_isys; - #define IPU6_ISYS_OPEN_RETRY 2000 - #define IPU6_ISYS_CLOSE_RETRY 2000 - #define IPU6_FW_CALL_TIMEOUT_JIFFIES msecs_to_jiffies(2000) -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+#define IPU6_LIB_CALL_TIMEOUT_JIFFIES_RESET msecs_to_jiffies(200) -+#endif - - enum ipu6_fw_isys_resp_type { - IPU6_FW_ISYS_RESP_TYPE_STREAM_OPEN_DONE = 0, -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c -index bbb66b5..69fde84 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c -@@ -10,6 +10,9 @@ - #include - #include - #include -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+#include -+#endif - - #include - #include -@@ -21,6 +24,9 @@ - #include "ipu6-fw-isys.h" - #include "ipu6-isys.h" - #include "ipu6-isys-video.h" -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+#include "ipu6-cpd.h" -+#endif - - static int ipu6_isys_buf_init(struct vb2_buffer *vb) - { -@@ -218,6 +224,16 @@ static int buffer_list_get(struct ipu6_isys_stream *stream, - ib = list_last_entry(&aq->incoming, - struct ipu6_isys_buffer, head); - -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+ struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq); -+ -+ if (av->skipframe) { -+ atomic_set(&ib->skipframe_flag, 1); -+ av->skipframe--; -+ } else { -+ atomic_set(&ib->skipframe_flag, 0); -+ } -+#endif - dev_dbg(dev, "buffer: %s: buffer %u\n", - ipu6_isys_queue_to_video(aq)->vdev.name, - ipu6_isys_buffer_to_vb2_buffer(ib)->index); -@@ -372,6 +388,19 @@ static void buf_queue(struct vb2_buffer *vb) - list_add(&ib->head, &aq->incoming); - spin_unlock_irqrestore(&aq->lock, flags); - -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+ mutex_lock(&av->isys->reset_mutex); -+ while (av->isys->in_reset) { -+ mutex_unlock(&av->isys->reset_mutex); -+ dev_dbg(dev, "buffer: %s: wait for reset\n", av->vdev.name); -+ usleep_range(10000, 11000); -+ mutex_lock(&av->isys->reset_mutex); -+ } -+ mutex_unlock(&av->isys->reset_mutex); -+ /* ip may be cleared in ipu reset */ -+ stream = av->stream; -+ -+#endif - if (!media_pipe || !vb->vb2_queue->start_streaming_called) { - dev_dbg(dev, "media pipeline is not ready for %s\n", - av->vdev.name); -@@ -603,6 +632,9 @@ static int start_streaming(struct vb2_queue *q, unsigned int count) - - out: - mutex_unlock(&stream->mutex); -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+ av->start_streaming = 1; -+#endif - - return 0; - -@@ -624,12 +656,210 @@ out_return_buffers: - return ret; - } - -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+static void reset_stop_streaming(struct ipu6_isys_video *av) -+{ -+ struct ipu6_isys_queue *aq = &av->aq; -+ struct ipu6_isys_stream *stream = av->stream; -+ -+ mutex_lock(&stream->mutex); -+ -+ ipu6_isys_update_stream_watermark(av, false); -+ -+ mutex_lock(&av->isys->stream_mutex); -+ if (stream->nr_streaming == stream->nr_queues && stream->streaming) -+ ipu6_isys_video_set_streaming(av, 0, NULL); -+ mutex_unlock(&av->isys->stream_mutex); -+ -+ stream->nr_streaming--; -+ list_del(&aq->node); -+ stream->streaming = 0; -+ mutex_unlock(&stream->mutex); -+ -+ ipu6_isys_stream_cleanup(av); -+ -+ return_buffers(aq, VB2_BUF_STATE_ERROR); -+ -+ ipu6_isys_fw_close(av->isys); -+} -+ -+static int reset_start_streaming(struct ipu6_isys_video *av) -+{ -+ struct ipu6_isys_queue *aq = &av->aq; -+ struct device *dev = &av->isys->adev->auxdev.dev; -+ unsigned long flags; -+ int ret; -+ -+ dev_dbg(dev, "%s: start streaming\n", av->vdev.name); -+ -+ spin_lock_irqsave(&aq->lock, flags); -+ while (!list_empty(&aq->active)) { -+ struct ipu6_isys_buffer *ib = list_first_entry(&aq->active, -+ struct ipu6_isys_buffer, head); -+ -+ list_del(&ib->head); -+ list_add_tail(&ib->head, &aq->incoming); -+ } -+ spin_unlock_irqrestore(&aq->lock, flags); -+ -+ av->skipframe = 1; -+ ret = start_streaming(&aq->vbq, 0); -+ if (ret) { -+ dev_dbg(dev, -+ "%s: start streaming failed in reset ! set av->start_streaming = 0.\n", -+ av->vdev.name); -+ av->start_streaming = 0; -+ } else -+ av->start_streaming = 1; -+ -+ return ret; -+} -+ -+static int ipu_isys_reset(struct ipu6_isys_video *self_av, -+ struct ipu6_isys_stream *self_stream) -+{ -+ struct ipu6_isys *isys = self_av->isys; -+ struct ipu6_bus_device *adev = isys->adev; -+ struct ipu6_device *isp = adev->isp; -+ struct ipu6_isys_video *av = NULL; -+ struct ipu6_isys_stream *stream = NULL; -+ struct device *dev = &adev->auxdev.dev; -+ int ret, i, j; -+ int has_streaming = 0; -+ const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = -+ &isys->pdata->ipdata->csi2; -+ -+ -+ mutex_lock(&isys->reset_mutex); -+ if (isys->in_reset) { -+ mutex_unlock(&isys->reset_mutex); -+ return 0; -+ } -+ isys->in_reset = true; -+ -+ while (isys->in_stop_streaming) { -+ dev_dbg(dev, "isys reset: %s: wait for stop\n", -+ self_av->vdev.name); -+ mutex_unlock(&isys->reset_mutex); -+ usleep_range(10000, 11000); -+ mutex_lock(&isys->reset_mutex); -+ } -+ -+ mutex_unlock(&isys->reset_mutex); -+ -+ for (i = 0; i < csi2_pdata->nports; i++) { -+ for (j = 0; j < NR_OF_CSI2_SRC_PADS; j++) { -+ av = &isys->csi2[i].av[j]; -+ if (av == self_av) -+ continue; -+ -+ stream = av->stream; -+ if (!stream || stream == self_stream) -+ continue; -+ -+ if (!stream->streaming && !stream->nr_streaming) -+ continue; -+ -+ av->reset = true; -+ has_streaming = true; -+ reset_stop_streaming(av); -+ } -+ } -+ -+ if (!has_streaming) -+ goto end_of_reset; -+ -+ dev_dbg(dev, "ipu reset, power cycle\n"); -+ /* bus_pm_runtime_suspend() */ -+ /* isys_runtime_pm_suspend() */ -+ dev->bus->pm->runtime_suspend(dev); -+ -+ /* ipu_suspend */ -+ isp->pdev->driver->driver.pm->runtime_suspend(&isp->pdev->dev); -+ -+ /* ipu_runtime_resume */ -+ isp->pdev->driver->driver.pm->runtime_resume(&isp->pdev->dev); -+ -+ /* bus_pm_runtime_resume() */ -+ /* isys_runtime_pm_resume() */ -+ dev->bus->pm->runtime_resume(dev); -+ -+ ipu6_cleanup_fw_msg_bufs(isys); -+ -+ if (isys->fwcom) { -+ dev_err(dev, "Clearing old context\n"); -+ ipu6_fw_isys_cleanup(isys); -+ } -+ -+ ret = ipu6_fw_isys_init(av->isys, -+ isys->pdata->ipdata->num_parallel_streams); -+ if (ret < 0) -+ dev_err(dev, "ipu fw isys init failed\n"); -+ -+ dev_dbg(dev, "restart streams\n"); -+ -+ for (j = 0; j < csi2_pdata->nports; j++) { -+ for (i = 0; i < NR_OF_CSI2_SRC_PADS; i++) { -+ av = &isys->csi2[j].av[i]; -+ if (!av->reset) -+ continue; -+ -+ av->reset = false; -+ reset_start_streaming(av); -+ } -+ } -+ -+end_of_reset: -+ mutex_lock(&isys->reset_mutex); -+ isys->in_reset = false; -+ mutex_unlock(&isys->reset_mutex); -+ dev_dbg(dev, "reset done\n"); -+ -+ return 0; -+} -+ -+#endif - static void stop_streaming(struct vb2_queue *q) - { - struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(q); - struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq); - struct ipu6_isys_stream *stream = av->stream; - -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+ struct device *dev = &av->isys->adev->auxdev.dev; -+ -+ dev_dbg(dev, "stop: %s: enter\n", av->vdev.name); -+ -+ mutex_lock(&av->isys->reset_mutex); -+ while (av->isys->in_reset || av->isys->in_stop_streaming) { -+ mutex_unlock(&av->isys->reset_mutex); -+ dev_dbg(dev, "stop: %s: wait for in_reset = %d\n", -+ av->vdev.name, av->isys->in_reset); -+ dev_dbg(dev, "stop: %s: wait for in_stop = %d\n", -+ av->vdev.name, av->isys->in_stop_streaming); -+ usleep_range(10000, 11000); -+ mutex_lock(&av->isys->reset_mutex); -+ } -+ -+ if (!av->start_streaming) { -+ mutex_unlock(&av->isys->reset_mutex); -+ return; -+ } -+ -+ av->isys->in_stop_streaming = true; -+ mutex_unlock(&av->isys->reset_mutex); -+ -+ stream = av->stream; -+ if (!stream) { -+ dev_err(dev, "stop: %s: ip cleard!\n", av->vdev.name); -+ return_buffers(aq, VB2_BUF_STATE_ERROR); -+ mutex_lock(&av->isys->reset_mutex); -+ av->isys->in_stop_streaming = false; -+ mutex_unlock(&av->isys->reset_mutex); -+ return; -+ } -+#endif -+ - mutex_lock(&stream->mutex); - - ipu6_isys_update_stream_watermark(av, false); -@@ -649,6 +879,22 @@ static void stop_streaming(struct vb2_queue *q) - return_buffers(aq, VB2_BUF_STATE_ERROR); - - ipu6_isys_fw_close(av->isys); -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+ -+ av->start_streaming = 0; -+ mutex_lock(&av->isys->reset_mutex); -+ av->isys->in_stop_streaming = false; -+ mutex_unlock(&av->isys->reset_mutex); -+ -+ if (av->isys->need_reset) { -+ if (!stream->nr_streaming) -+ ipu_isys_reset(av, stream); -+ else -+ av->isys->need_reset = 0; -+ } -+ -+ dev_dbg(dev, "stop: %s: exit\n", av->vdev.name); -+#endif - } - - static unsigned int -@@ -732,6 +978,11 @@ void ipu6_isys_queue_buf_done(struct ipu6_isys_buffer *ib) - * to the userspace when it is de-queued - */ - atomic_set(&ib->str2mmio_flag, 0); -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+ } else if (atomic_read(&ib->skipframe_flag)) { -+ vb2_buffer_done(vb, VB2_BUF_STATE_ERROR); -+ atomic_set(&ib->skipframe_flag, 0); -+#endif - } else { - vb2_buffer_done(vb, VB2_BUF_STATE_DONE); - } -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h -index fe8fc79..78f3c2f 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h -@@ -33,6 +33,9 @@ struct ipu6_isys_queue { - struct ipu6_isys_buffer { - struct list_head head; - atomic_t str2mmio_flag; -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+ atomic_t skipframe_flag; -+#endif - }; - - struct ipu6_isys_video_buffer { -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c -index 48388c0..e060bae 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c -@@ -17,6 +17,9 @@ - #include - #include - #include -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+#include -+#endif - - #include - #include -@@ -112,6 +115,26 @@ static int video_open(struct file *file) - return v4l2_fh_open(file); - } - -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+static int video_release(struct file *file) -+{ -+ struct ipu6_isys_video *av = video_drvdata(file); -+ -+ dev_dbg(&av->isys->adev->auxdev.dev, -+ "release: %s: enter\n", av->vdev.name); -+ mutex_lock(&av->isys->reset_mutex); -+ while (av->isys->in_reset) { -+ mutex_unlock(&av->isys->reset_mutex); -+ dev_dbg(&av->isys->adev->auxdev.dev, -+ "release: %s: wait for reset\n", av->vdev.name); -+ usleep_range(10000, 11000); -+ mutex_lock(&av->isys->reset_mutex); -+ } -+ mutex_unlock(&av->isys->reset_mutex); -+ return vb2_fop_release(file); -+} -+ -+#endif - const struct ipu6_isys_pixelformat * - ipu6_isys_get_isys_format(u32 pixelformat, u32 type) - { -@@ -596,7 +619,11 @@ static int start_stream_firmware(struct ipu6_isys_video *av, - } - - reinit_completion(&stream->stream_start_completion); -- -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+ send_type = IPU6_FW_ISYS_SEND_TYPE_STREAM_START; -+ ret = ipu6_fw_isys_simple_cmd(av->isys, stream->stream_handle, -+ send_type); -+#else - if (bl) { - send_type = IPU6_FW_ISYS_SEND_TYPE_STREAM_START_AND_CAPTURE; - ipu6_fw_isys_dump_frame_buff_set(dev, buf, -@@ -609,6 +636,7 @@ static int start_stream_firmware(struct ipu6_isys_video *av, - ret = ipu6_fw_isys_simple_cmd(av->isys, stream->stream_handle, - send_type); - } -+#endif - - if (ret < 0) { - dev_err(dev, "can't start streaming (%d)\n", ret); -@@ -627,7 +655,25 @@ static int start_stream_firmware(struct ipu6_isys_video *av, - ret = -EIO; - goto out_stream_close; - } -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+ if (bl) { -+ dev_dbg(dev, "start stream: capture\n"); -+ send_type = IPU6_FW_ISYS_SEND_TYPE_STREAM_CAPTURE; -+ ipu6_fw_isys_dump_frame_buff_set(dev, buf, stream_cfg->nof_output_pins); -+ ret = ipu6_fw_isys_complex_cmd(av->isys, stream->stream_handle, -+ buf, msg->dma_addr, -+ sizeof(*buf), -+ send_type); -+ -+ if (ret < 0) { -+ dev_err(dev, "can't queue buffers (%d)\n", ret); -+ goto out_stream_close; -+ } -+ } -+ -+#else - dev_dbg(dev, "start stream: complete\n"); -+#endif - - return 0; - -@@ -674,7 +720,11 @@ static void stop_streaming_firmware(struct ipu6_isys_video *av) - } - - tout = wait_for_completion_timeout(&stream->stream_stop_completion, -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+ IPU6_LIB_CALL_TIMEOUT_JIFFIES_RESET); -+#else - IPU6_FW_CALL_TIMEOUT_JIFFIES); -+#endif - if (!tout) - dev_warn(dev, "stream stop time out\n"); - else if (stream->error) -@@ -699,7 +749,11 @@ static void close_streaming_firmware(struct ipu6_isys_video *av) - } - - tout = wait_for_completion_timeout(&stream->stream_close_completion, -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+ IPU6_LIB_CALL_TIMEOUT_JIFFIES_RESET); -+#else - IPU6_FW_CALL_TIMEOUT_JIFFIES); -+#endif - if (!tout) - dev_warn(dev, "stream close time out\n"); - else if (stream->error) -@@ -707,6 +761,12 @@ static void close_streaming_firmware(struct ipu6_isys_video *av) - else - dev_dbg(dev, "close stream: complete\n"); - -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+ stream->last_sequence = atomic_read(&stream->sequence); -+ dev_dbg(dev, "IPU_ISYS_RESET: ip->last_sequence = %d\n", -+ stream->last_sequence); -+ -+#endif - put_stream_opened(av); - } - -@@ -721,7 +781,18 @@ int ipu6_isys_video_prepare_stream(struct ipu6_isys_video *av, - return -EINVAL; - - stream->nr_queues = nr_queues; -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+ if (av->isys->in_reset) { -+ atomic_set(&stream->sequence, stream->last_sequence); -+ dev_dbg(&av->isys->adev->auxdev.dev, -+ "atomic_set : stream->last_sequence = %d\n", -+ stream->last_sequence); -+ } else { -+ atomic_set(&stream->sequence, 0); -+ } -+# else - atomic_set(&stream->sequence, 0); -+#endif - - stream->seq_index = 0; - memset(stream->seq, 0, sizeof(stream->seq)); -@@ -1089,7 +1160,11 @@ static const struct v4l2_file_operations isys_fops = { - .unlocked_ioctl = video_ioctl2, - .mmap = vb2_fop_mmap, - .open = video_open, -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+ .release = video_release, -+#else - .release = vb2_fop_release, -+#endif - }; - - int ipu6_isys_fw_open(struct ipu6_isys *isys) -@@ -1306,6 +1381,11 @@ int ipu6_isys_video_init(struct ipu6_isys_video *av) - av->pix_fmt = format.fmt.pix; - __ipu6_isys_vidioc_try_fmt_meta_cap(av, &format_meta); - av->meta_fmt = format_meta.fmt.meta; -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+ av->reset = false; -+ av->skipframe = 0; -+ av->start_streaming = 0; -+#endif - - set_bit(V4L2_FL_USES_V4L2_FH, &av->vdev.flags); - video_set_drvdata(&av->vdev, av); -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.h -index 1d945be..0dfa477 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.h -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.h -@@ -51,6 +51,9 @@ struct ipu6_isys_stream { - struct mutex mutex; - struct media_entity *source_entity; - atomic_t sequence; -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+ int last_sequence; -+#endif - unsigned int seq_index; - struct sequence_info seq[IPU6_ISYS_MAX_PARALLEL_SOF]; - int stream_source; -@@ -101,6 +104,11 @@ struct ipu6_isys_video { - u32 source_stream; - u8 vc; - u8 dt; -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+ unsigned int reset; -+ unsigned int skipframe; -+ unsigned int start_streaming; -+#endif - }; - - #define ipu6_isys_queue_to_video(__aq) \ -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -index 17bc8ca..4958a28 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -@@ -1084,6 +1084,10 @@ static int isys_probe(struct auxiliary_device *auxdev, - - mutex_init(&isys->mutex); - mutex_init(&isys->stream_mutex); -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+ mutex_init(&isys->reset_mutex); -+ isys->in_reset = false; -+#endif - - spin_lock_init(&isys->listlock); - INIT_LIST_HEAD(&isys->framebuflist); -@@ -1126,6 +1130,9 @@ static int isys_probe(struct auxiliary_device *auxdev, - if (ret) - goto free_fw_msg_bufs; - -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+ mutex_destroy(&isys->reset_mutex); -+#endif - ipu6_mmu_hw_cleanup(adev->mmu); - - return 0; -@@ -1180,6 +1187,9 @@ static void isys_remove(struct auxiliary_device *auxdev) - isys_iwake_watermark_cleanup(isys); - mutex_destroy(&isys->stream_mutex); - mutex_destroy(&isys->mutex); -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+ mutex_destroy(&isys->reset_mutex); -+#endif - } - - struct fwmsg { -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h -index 86dfc2e..982eb5d 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h -@@ -167,6 +167,11 @@ struct ipu6_isys { - struct list_head framebuflist_fw; - struct v4l2_async_notifier notifier; - struct isys_iwake_watermark iwake_watermark; -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+ struct mutex reset_mutex; -+ bool in_reset; -+ bool in_stop_streaming; -+#endif - }; - - struct isys_fw_msgs { --- -2.43.0 - diff --git a/patches/0074-ipu7-dkms-v4l2-core-makefile-adaptation-6.18.patch b/patches/0006-ipu7-dkms-v4l2-core-makefile-adaptation-6.18.patch similarity index 85% rename from patches/0074-ipu7-dkms-v4l2-core-makefile-adaptation-6.18.patch rename to patches/0006-ipu7-dkms-v4l2-core-makefile-adaptation-6.18.patch index 196e208..2d1505a 100644 --- a/patches/0074-ipu7-dkms-v4l2-core-makefile-adaptation-6.18.patch +++ b/patches/0006-ipu7-dkms-v4l2-core-makefile-adaptation-6.18.patch @@ -1,7 +1,7 @@ -From 52fec029544562294137641ca6cb48df710536f8 Mon Sep 17 00:00:00 2001 +From e110405c132f81a5bf2f6ead853b072e002692bc Mon Sep 17 00:00:00 2001 From: Florent Pirou Date: Fri, 16 Jan 2026 03:20:05 -0700 -Subject: [PATCH 74/78] ipu7 dkms : v4l2-core makefile adaptation 6.18 +Subject: [PATCH 6/8] ipu7 dkms : v4l2-core makefile adaptation 6.18 Signed-off-by: Florent Pirou --- diff --git a/patches/0006-media-intel-ipu6-remove-buttress-ish-structure.patch b/patches/0006-media-intel-ipu6-remove-buttress-ish-structure.patch deleted file mode 100644 index 8a5c0fe..0000000 --- a/patches/0006-media-intel-ipu6-remove-buttress-ish-structure.patch +++ /dev/null @@ -1,38 +0,0 @@ -From 394f85bc8316347626789f0e186403482da2c3da Mon Sep 17 00:00:00 2001 -From: Julian Chen -Date: Mon, 12 Jan 2026 05:32:38 -0700 -Subject: [PATCH 06/28] media: intel/ipu6: remove buttress ish structure - -missed content in remove buttress ish patch - -Signed-off-by: Julian Chen ---- - 6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h | 6 ------ - 1 file changed, 6 deletions(-) - -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h -index 9b6f569..482978c 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h -@@ -46,18 +46,12 @@ struct ipu6_buttress_ipc { - struct ipu6_buttress { - struct mutex power_mutex, auth_mutex, cons_mutex, ipc_mutex; - struct ipu6_buttress_ipc cse; -- struct ipu6_buttress_ipc ish; - struct list_head constraints; - u32 wdt_cached_value; - bool force_suspend; - u32 ref_clk; - }; - --enum ipu6_buttress_ipc_domain { -- IPU6_BUTTRESS_IPC_CSE, -- IPU6_BUTTRESS_IPC_ISH, --}; -- - struct ipu6_ipc_buttress_bulk_msg { - u32 cmd; - u32 expected_resp; --- -2.43.0 - diff --git a/patches/0075-drivers-media-set-v4l2_subdev_enable_streams_api-tru.patch b/patches/0007-drivers-media-set-v4l2_subdev_enable_streams_api-tru.patch similarity index 83% rename from patches/0075-drivers-media-set-v4l2_subdev_enable_streams_api-tru.patch rename to patches/0007-drivers-media-set-v4l2_subdev_enable_streams_api-tru.patch index 512fab4..bd4f325 100644 --- a/patches/0075-drivers-media-set-v4l2_subdev_enable_streams_api-tru.patch +++ b/patches/0007-drivers-media-set-v4l2_subdev_enable_streams_api-tru.patch @@ -1,7 +1,7 @@ -From 6732cbe871d179380f48bbfae76abef6cd720e3a Mon Sep 17 00:00:00 2001 +From 0fb4ec54ab3fe0df40ee4bd96a8b05ca7ecce4aa Mon Sep 17 00:00:00 2001 From: Florent Pirou Date: Fri, 16 Jan 2026 03:22:26 -0700 -Subject: [PATCH 75/78] drivers: media: set v4l2_subdev_enable_streams_api=true +Subject: [PATCH 7/8] drivers: media: set v4l2_subdev_enable_streams_api=true for WA 6.18 Signed-off-by: Florent Pirou diff --git a/patches/0007-media-intel-ipu6-use-vc1-dma-for-MTL-and-ARL.patch b/patches/0007-media-intel-ipu6-use-vc1-dma-for-MTL-and-ARL.patch deleted file mode 100644 index c93ff76..0000000 --- a/patches/0007-media-intel-ipu6-use-vc1-dma-for-MTL-and-ARL.patch +++ /dev/null @@ -1,42 +0,0 @@ -From b1f999981313f65a89fa53539ef9d23bf380621b Mon Sep 17 00:00:00 2001 -From: Chen Meng J -Date: Mon, 12 Jan 2026 05:34:38 -0700 -Subject: [PATCH 07/28] media: intel-ipu6: use vc1 dma for MTL and ARL - -Signed-off-by: Chen Meng J ---- - 6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h | 4 ++-- - 6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c | 2 +- - 2 files changed, 3 insertions(+), 3 deletions(-) - -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h -index e6ee161..545180c 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h -@@ -54,8 +54,8 @@ struct ipu6_isys; - /* Max number of planes for frame formats supported by the FW */ - #define IPU6_PIN_PLANES_MAX 4 - --#define IPU6_FW_ISYS_SENSOR_TYPE_START 14 --#define IPU6_FW_ISYS_SENSOR_TYPE_END 19 -+#define IPU6_FW_ISYS_SENSOR_TYPE_START 1 -+#define IPU6_FW_ISYS_SENSOR_TYPE_END 10 - #define IPU6SE_FW_ISYS_SENSOR_TYPE_START 6 - #define IPU6SE_FW_ISYS_SENSOR_TYPE_END 11 - /* -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c -index e060bae..2d1391e 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c -@@ -530,7 +530,7 @@ static int ipu6_isys_fw_pin_cfg(struct ipu6_isys_video *av, - output_pin->csi_be_soc_pixel_remapping = - CSI_BE_SOC_PIXEL_REMAPPING_FLAG_NO_REMAPPING; - -- output_pin->snoopable = true; -+ output_pin->snoopable = false; - output_pin->error_handling_enable = false; - output_pin->sensor_type = isys->sensor_type++; - if (isys->sensor_type > isys->pdata->ipdata->sensor_type_end) --- -2.43.0 - diff --git a/patches/0076-Revert-media-v4l2-subdev-Make-struct-v4l2_subdev_str.patch b/patches/0008-Revert-media-v4l2-subdev-Make-struct-v4l2_subdev_str.patch similarity index 95% rename from patches/0076-Revert-media-v4l2-subdev-Make-struct-v4l2_subdev_str.patch rename to patches/0008-Revert-media-v4l2-subdev-Make-struct-v4l2_subdev_str.patch index 1861e78..d73d518 100644 --- a/patches/0076-Revert-media-v4l2-subdev-Make-struct-v4l2_subdev_str.patch +++ b/patches/0008-Revert-media-v4l2-subdev-Make-struct-v4l2_subdev_str.patch @@ -1,7 +1,7 @@ -From 560e0edd91435231530af29e668b682500a7d38e Mon Sep 17 00:00:00 2001 +From 235d0d27af9e511637a2a6ee681533594164f8a7 Mon Sep 17 00:00:00 2001 From: Florent Pirou Date: Fri, 16 Jan 2026 10:10:16 -0700 -Subject: [PATCH 76/78] Revert "media: v4l2-subdev: Make struct +Subject: [PATCH 8/8] Revert "media: v4l2-subdev: Make struct v4l2_subdev_stream_config private" This reverts commit 5195b777552d2e2fa735c6cad75797efa132bd60. diff --git a/patches/0008-media-ipu-Dma-sync-at-buffer_prepare-callback-as-DMA.patch b/patches/0008-media-ipu-Dma-sync-at-buffer_prepare-callback-as-DMA.patch deleted file mode 100644 index 0c96ba2..0000000 --- a/patches/0008-media-ipu-Dma-sync-at-buffer_prepare-callback-as-DMA.patch +++ /dev/null @@ -1,42 +0,0 @@ -From 8b60cd4f3bd091dee52e1e1d2a00c8a5326f09d9 Mon Sep 17 00:00:00 2001 -From: linya14x -Date: Mon, 12 Jan 2026 05:43:14 -0700 -Subject: [PATCH 08/28] media: ipu: Dma sync at buffer_prepare callback as DMA - is non-coherent - -Test Platform: -MTL ARL LT6911UXE - -Signed-off-by: linya14x -Signed-off-by: Bingbu Cao ---- - 6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c | 5 +++++ - 1 file changed, 5 insertions(+) - -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c -index 69fde84..b931c43 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c -@@ -84,7 +84,9 @@ static int ipu6_isys_queue_setup(struct vb2_queue *q, unsigned int *num_buffers, - static int ipu6_isys_buf_prepare(struct vb2_buffer *vb) - { - struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(vb->vb2_queue); -+ struct ipu6_isys *isys = vb2_get_drv_priv(vb->vb2_queue); - struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq); -+ struct sg_table *sg = vb2_dma_sg_plane_desc(vb, 0); - struct device *dev = &av->isys->adev->auxdev.dev; - u32 bytesperline = ipu6_isys_get_bytes_per_line(av); - u32 height = ipu6_isys_get_frame_height(av); -@@ -98,6 +100,9 @@ static int ipu6_isys_buf_prepare(struct vb2_buffer *vb) - - vb2_set_plane_payload(vb, 0, bytesperline * height); - -+ /* assume IPU is not DMA coherent */ -+ ipu6_dma_sync_sgtable(isys->adev, sg); -+ - return 0; - } - --- -2.43.0 - diff --git a/patches/0009-media-intel-ipu6-support-IPU6-ISYS-FW-trace-dump-for.patch b/patches/0009-media-intel-ipu6-support-IPU6-ISYS-FW-trace-dump-for.patch deleted file mode 100644 index 8d50a21..0000000 --- a/patches/0009-media-intel-ipu6-support-IPU6-ISYS-FW-trace-dump-for.patch +++ /dev/null @@ -1,1432 +0,0 @@ -From 592e4eb78f74948ad2b0e6929d7fb2650b4cb501 Mon Sep 17 00:00:00 2001 -From: hepengpx -Date: Mon, 12 Jan 2026 05:36:07 -0700 -Subject: [PATCH 09/28] media: intel-ipu6: support IPU6 ISYS FW trace dump for - upstream driverxsy - -[media]pci]Support IPU6 ISYS FW trace dump for upstream drive -Signed-off-by: hepengpx ---- - 6.12.0/drivers/media/pci/intel/ipu6/Makefile | 3 +- - .../drivers/media/pci/intel/ipu6/ipu6-bus.h | 2 + - .../media/pci/intel/ipu6/ipu6-fw-com.c | 12 +- - .../drivers/media/pci/intel/ipu6/ipu6-isys.c | 58 +- - .../media/pci/intel/ipu6/ipu6-platform-regs.h | 40 + - .../drivers/media/pci/intel/ipu6/ipu6-trace.c | 896 ++++++++++++++++++ - .../drivers/media/pci/intel/ipu6/ipu6-trace.h | 147 +++ - 6.12.0/drivers/media/pci/intel/ipu6/ipu6.c | 48 + - 6.12.0/drivers/media/pci/intel/ipu6/ipu6.h | 5 + - 9 files changed, 1206 insertions(+), 5 deletions(-) - create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-trace.c - create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/ipu6-trace.h - -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/Makefile b/6.12.0/drivers/media/pci/intel/ipu6/Makefile -index a821b0a..e1e123b 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/Makefile -+++ b/6.12.0/drivers/media/pci/intel/ipu6/Makefile -@@ -6,7 +6,8 @@ intel-ipu6-y := ipu6.o \ - ipu6-mmu.o \ - ipu6-buttress.o \ - ipu6-cpd.o \ -- ipu6-fw-com.o -+ ipu6-fw-com.o \ -+ ipu6-trace.o - - obj-$(CONFIG_VIDEO_INTEL_IPU6) += intel-ipu6.o - -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-bus.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-bus.h -index bb4926d..c2a3f22 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-bus.h -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-bus.h -@@ -18,6 +18,7 @@ struct pci_dev; - #define IPU6_BUS_NAME IPU6_NAME "-bus" - - struct ipu6_buttress_ctrl; -+struct ipu_subsystem_trace_config; - - struct ipu6_bus_device { - struct auxiliary_device auxdev; -@@ -27,6 +28,7 @@ struct ipu6_bus_device { - void *pdata; - struct ipu6_mmu *mmu; - struct ipu6_device *isp; -+ struct ipu_subsystem_trace_config *trace_cfg; - struct ipu6_buttress_ctrl *ctrl; - u64 dma_mask; - const struct firmware *fw; -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.c -index 7d3d931..45738f3 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.c -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.c -@@ -14,6 +14,7 @@ - #include "ipu6-bus.h" - #include "ipu6-dma.h" - #include "ipu6-fw-com.h" -+#include "ipu6-trace.h" - - /* - * FWCOM layer is a shared resource between FW and driver. It consist -@@ -265,8 +266,15 @@ EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_prepare, INTEL_IPU6); - - int ipu6_fw_com_open(struct ipu6_fw_com_context *ctx) - { -- /* write magic pattern to disable the tunit trace */ -- writel(TUNIT_MAGIC_PATTERN, ctx->dmem_addr + TUNIT_CFG_DWR_REG * 4); -+ dma_addr_t trace_buff = TUNIT_MAGIC_PATTERN; -+ -+ /* -+ * Write trace buff start addr to tunit cfg reg. -+ * This feature is used to enable tunit trace in secure mode. -+ */ -+ ipu_trace_buffer_dma_handle(&ctx->adev->auxdev.dev, &trace_buff); -+ writel(trace_buff, ctx->dmem_addr + TUNIT_CFG_DWR_REG * 4); -+ - /* Check if SP is in valid state */ - if (!ctx->cell_ready(ctx->adev)) - return -EIO; -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -index 4958a28..af03e2f 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -@@ -41,6 +41,7 @@ - #include "ipu6-platform-buttress-regs.h" - #include "ipu6-platform-isys-csi2-reg.h" - #include "ipu6-platform-regs.h" -+#include "ipu6-trace.h" - - #define IPU6_BUTTRESS_FABIC_CONTROL 0x68 - #define GDA_ENABLE_IWAKE_INDEX 2 -@@ -348,9 +349,11 @@ irqreturn_t isys_isr(struct ipu6_bus_device *adev) - u32 status_sw, status_csi; - u32 ctrl0_status, ctrl0_clear; - -+ dev_dbg(&adev->auxdev.dev, "%s enter", __func__); - spin_lock(&isys->power_lock); - if (!isys->power) { - spin_unlock(&isys->power_lock); -+ dev_dbg(&adev->auxdev.dev, "%s exit, no power", __func__); - return IRQ_NONE; - } - -@@ -399,6 +402,7 @@ irqreturn_t isys_isr(struct ipu6_bus_device *adev) - - spin_unlock(&isys->power_lock); - -+ dev_dbg(&adev->auxdev.dev, "%s exit", __func__); - return IRQ_HANDLED; - } - -@@ -1040,6 +1044,46 @@ void ipu6_put_fw_msg_buf(struct ipu6_isys *isys, u64 data) - spin_unlock_irqrestore(&isys->listlock, flags); - } - -+struct ipu_trace_block isys_trace_blocks[] = { -+ { -+ .offset = IPU_TRACE_REG_IS_TRACE_UNIT_BASE, -+ .type = IPU_TRACE_BLOCK_TUN, -+ }, -+ { -+ .offset = IPU_TRACE_REG_IS_SP_EVQ_BASE, -+ .type = IPU_TRACE_BLOCK_TM, -+ }, -+ { -+ .offset = IPU_TRACE_REG_IS_SP_GPC_BASE, -+ .type = IPU_TRACE_BLOCK_GPC, -+ }, -+ { -+ .offset = IPU_TRACE_REG_IS_ISL_GPC_BASE, -+ .type = IPU_TRACE_BLOCK_GPC, -+ }, -+ { -+ .offset = IPU_TRACE_REG_IS_MMU_GPC_BASE, -+ .type = IPU_TRACE_BLOCK_GPC, -+ }, -+ { -+ /* Note! this covers all 8 blocks */ -+ .offset = IPU_TRACE_REG_CSI2_TM_BASE(0), -+ .type = IPU_TRACE_CSI2, -+ }, -+ { -+ /* Note! this covers all 11 blocks */ -+ .offset = IPU_TRACE_REG_CSI2_PORT_SIG2SIO_GR_BASE(0), -+ .type = IPU_TRACE_SIG2CIOS, -+ }, -+ { -+ .offset = IPU_TRACE_REG_IS_GPREG_TRACE_TIMER_RST_N, -+ .type = IPU_TRACE_TIMER_RST, -+ }, -+ { -+ .type = IPU_TRACE_BLOCK_END, -+ } -+}; -+ - static int isys_probe(struct auxiliary_device *auxdev, - const struct auxiliary_device_id *auxdev_id) - { -@@ -1111,6 +1155,8 @@ static int isys_probe(struct auxiliary_device *auxdev, - goto remove_shared_buffer; - } - -+ ipu_trace_init(adev->isp, isys->pdata->base, &auxdev->dev, -+ isys_trace_blocks); - cpu_latency_qos_add_request(&isys->pm_qos, PM_QOS_DEFAULT_VALUE); - - ret = alloc_fw_msg_bufs(isys, 20); -@@ -1140,6 +1186,7 @@ static int isys_probe(struct auxiliary_device *auxdev, - free_fw_msg_bufs: - free_fw_msg_bufs(isys); - out_remove_pkg_dir_shared_buffer: -+ ipu_trace_uninit(&auxdev->dev); - cpu_latency_qos_remove_request(&isys->pm_qos); - if (!isp->secure_mode) - ipu6_cpd_free_pkg_dir(adev); -@@ -1185,6 +1232,7 @@ static void isys_remove(struct auxiliary_device *auxdev) - mutex_destroy(&isys->streams[i].mutex); - - isys_iwake_watermark_cleanup(isys); -+ ipu_trace_uninit(&auxdev->dev); - mutex_destroy(&isys->stream_mutex); - mutex_destroy(&isys->mutex); - #ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -@@ -1237,12 +1285,17 @@ static int isys_isr_one(struct ipu6_bus_device *adev) - u32 index; - u64 ts; - -- if (!isys->fwcom) -+ dev_dbg(&adev->auxdev.dev, "%s enter", __func__); -+ if (!isys->fwcom) { -+ dev_dbg(&adev->auxdev.dev, "%s exit, fwcom is null", __func__); - return 1; -+ } - - resp = ipu6_fw_isys_get_resp(isys->fwcom, IPU6_BASE_MSG_RECV_QUEUES); -- if (!resp) -+ if (!resp) { -+ dev_dbg(&adev->auxdev.dev, "%s exit, resp is null", __func__); - return 1; -+ } - - ts = (u64)resp->timestamp[1] << 32 | resp->timestamp[0]; - -@@ -1351,6 +1404,7 @@ static int isys_isr_one(struct ipu6_bus_device *adev) - ipu6_isys_put_stream(stream); - leave: - ipu6_fw_isys_put_resp(isys->fwcom, IPU6_BASE_MSG_RECV_QUEUES); -+ dev_dbg(&adev->auxdev.dev, "%s exit", __func__); - return 0; - } - -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-platform-regs.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-platform-regs.h -index b883385..a1744d2 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-platform-regs.h -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-platform-regs.h -@@ -176,4 +176,44 @@ enum nci_ab_access_mode { - #define IPU6_PSYS_GPDEV_IRQ_FWIRQ(n) BIT(n) - #define IPU6_REG_PSYS_GPDEV_FWIRQ(n) (4 * (n) + 0x1aa100) - -+/* Trace unit related register definitions */ -+#define TRACE_REG_MAX_ISYS_OFFSET 0xfffff -+#define TRACE_REG_MAX_PSYS_OFFSET 0xfffff -+#define IPU_ISYS_OFFSET IPU6_ISYS_DMEM_OFFSET -+#define IPU_PSYS_OFFSET IPU6_PSYS_DMEM_OFFSET -+/* ISYS trace unit registers */ -+/* Trace unit base offset */ -+#define IPU_TRACE_REG_IS_TRACE_UNIT_BASE 0x27d000 -+/* Trace monitors */ -+#define IPU_TRACE_REG_IS_SP_EVQ_BASE 0x211000 -+/* GPC blocks */ -+#define IPU_TRACE_REG_IS_SP_GPC_BASE 0x210800 -+#define IPU_TRACE_REG_IS_ISL_GPC_BASE 0x2b0a00 -+#define IPU_TRACE_REG_IS_MMU_GPC_BASE 0x2e0f00 -+/* each CSI2 port has a dedicated trace monitor, index 0..7 */ -+#define IPU_TRACE_REG_CSI2_TM_BASE(port) (0x220400 + 0x1000 * (port)) -+ -+/* Trace timers */ -+#define IPU_TRACE_REG_IS_GPREG_TRACE_TIMER_RST_N 0x27c410 -+#define TRACE_REG_GPREG_TRACE_TIMER_RST_OFF BIT(0) -+ -+/* SIG2CIO */ -+#define IPU_TRACE_REG_CSI2_PORT_SIG2SIO_GR_BASE(port) \ -+ (0x220e00 + (port) * 0x1000) -+ -+/* PSYS trace unit registers */ -+/* Trace unit base offset */ -+#define IPU_TRACE_REG_PS_TRACE_UNIT_BASE 0x1b4000 -+/* Trace monitors */ -+#define IPU_TRACE_REG_PS_SPC_EVQ_BASE 0x119000 -+#define IPU_TRACE_REG_PS_SPP0_EVQ_BASE 0x139000 -+ -+/* GPC blocks */ -+#define IPU_TRACE_REG_PS_SPC_GPC_BASE 0x118800 -+#define IPU_TRACE_REG_PS_SPP0_GPC_BASE 0x138800 -+#define IPU_TRACE_REG_PS_MMU_GPC_BASE 0x1b1b00 -+ -+/* Trace timers */ -+#define IPU_TRACE_REG_PS_GPREG_TRACE_TIMER_RST_N 0x1aa714 -+ - #endif /* IPU6_PLATFORM_REGS_H */ -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-trace.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-trace.c -new file mode 100644 -index 0000000..adcee20 ---- /dev/null -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-trace.c -@@ -0,0 +1,896 @@ -+// SPDX-License-Identifier: GPL-2.0 -+// Copyright (C) 2014 - 2025 Intel Corporation -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "ipu6.h" -+#include "ipu6-bus.h" -+#include "ipu6-platform-regs.h" -+#include "ipu6-dma.h" -+#include "ipu6-trace.h" -+ -+/* -+ * enabling ipu trace need a 96 MB buffer. -+ */ -+static bool ipu_trace_enable; -+module_param(ipu_trace_enable, bool, 0660); -+MODULE_PARM_DESC(ipu_trace_enable, "IPU trace enable"); -+ -+struct trace_register_range { -+ u32 start; -+ u32 end; -+}; -+ -+#define MEMORY_RING_BUFFER_SIZE (SZ_1M * 96) -+#define TRACE_MESSAGE_SIZE 16 -+/* -+ * It looks that the trace unit sometimes writes outside the given buffer. -+ * To avoid memory corruption one extra page is reserved at the end -+ * of the buffer. Read also the extra area since it may contain valid data. -+ */ -+#define MEMORY_RING_BUFFER_GUARD PAGE_SIZE -+#define MEMORY_RING_BUFFER_OVERREAD MEMORY_RING_BUFFER_GUARD -+#define MAX_TRACE_REGISTERS 200 -+#define TRACE_CONF_DUMP_BUFFER_SIZE (MAX_TRACE_REGISTERS * 2 * 32) -+#define TRACE_CONF_DATA_MAX_LEN (1024 * 4) -+#define WPT_TRACE_CONF_DATA_MAX_LEN (1024 * 64) -+ -+struct config_value { -+ u32 reg; -+ u32 value; -+}; -+ -+struct ipu_trace_buffer { -+ dma_addr_t dma_handle; -+ void *memory_buffer; -+}; -+ -+struct ipu_subsystem_wptrace_config { -+ bool open; -+ char *conf_dump_buffer; -+ int size_conf_dump; -+ unsigned int fill_level; -+ struct config_value config[MAX_TRACE_REGISTERS]; -+}; -+ -+struct ipu_subsystem_trace_config { -+ u32 offset; -+ void __iomem *base; -+ struct ipu_trace_buffer memory; /* ring buffer */ -+ struct device *dev; -+ struct ipu_trace_block *blocks; -+ unsigned int fill_level; /* Nbr of regs in config table below */ -+ bool running; -+ /* Cached register values */ -+ struct config_value config[MAX_TRACE_REGISTERS]; -+ /* watchpoint trace info */ -+ struct ipu_subsystem_wptrace_config wpt; -+}; -+ -+struct ipu_trace { -+ struct mutex lock; /* Protect ipu trace operations */ -+ bool open; -+ char *conf_dump_buffer; -+ int size_conf_dump; -+ -+ struct ipu_subsystem_trace_config isys; -+ struct ipu_subsystem_trace_config psys; -+}; -+ -+static void __ipu_trace_restore(struct device *dev) -+{ -+ struct ipu6_bus_device *adev = to_ipu6_bus_device(dev); -+ struct ipu6_device *isp = adev->isp; -+ struct ipu_trace *trace = isp->trace; -+ struct config_value *config; -+ struct ipu_subsystem_trace_config *sys = adev->trace_cfg; -+ struct ipu_trace_block *blocks; -+ u32 mapped_trace_buffer; -+ void __iomem *addr = NULL; -+ int i; -+ -+ if (trace->open) { -+ dev_info(dev, "Trace control file open. Skipping update\n"); -+ return; -+ } -+ -+ if (!sys) -+ return; -+ -+ /* leave if no trace configuration for this subsystem */ -+ if (sys->fill_level == 0) -+ return; -+ -+ /* Find trace unit base address */ -+ blocks = sys->blocks; -+ while (blocks->type != IPU_TRACE_BLOCK_END) { -+ if (blocks->type == IPU_TRACE_BLOCK_TUN) { -+ addr = sys->base + blocks->offset; -+ break; -+ } -+ blocks++; -+ } -+ if (!addr) -+ return; -+ -+ if (!sys->memory.memory_buffer) { -+ sys->memory.memory_buffer = -+ ipu6_dma_alloc(adev, MEMORY_RING_BUFFER_SIZE + -+ MEMORY_RING_BUFFER_GUARD, -+ &sys->memory.dma_handle, -+ GFP_KERNEL, 0); -+ } -+ -+ if (!sys->memory.memory_buffer) { -+ dev_err(dev, "No memory for tracing. Trace unit disabled\n"); -+ return; -+ } -+ -+ config = sys->config; -+ mapped_trace_buffer = sys->memory.dma_handle; -+ -+ /* ring buffer base */ -+ writel(mapped_trace_buffer, addr + TRACE_REG_TUN_DRAM_BASE_ADDR); -+ -+ /* ring buffer end */ -+ writel(mapped_trace_buffer + MEMORY_RING_BUFFER_SIZE - -+ TRACE_MESSAGE_SIZE, addr + TRACE_REG_TUN_DRAM_END_ADDR); -+ -+ /* Infobits for ddr trace */ -+ writel(IPU6_INFO_REQUEST_DESTINATION_PRIMARY, -+ addr + TRACE_REG_TUN_DDR_INFO_VAL); -+ -+ /* Find trace timer reset address */ -+ addr = NULL; -+ blocks = sys->blocks; -+ while (blocks->type != IPU_TRACE_BLOCK_END) { -+ if (blocks->type == IPU_TRACE_TIMER_RST) { -+ addr = sys->base + blocks->offset; -+ break; -+ } -+ blocks++; -+ } -+ if (!addr) { -+ dev_err(dev, "No trace reset addr\n"); -+ return; -+ } -+ -+ /* Remove reset from trace timers */ -+ writel(TRACE_REG_GPREG_TRACE_TIMER_RST_OFF, addr); -+ -+ /* Register config received from userspace */ -+ for (i = 0; i < sys->fill_level; i++) { -+ dev_dbg(dev, -+ "Trace restore: reg 0x%08x, value 0x%08x\n", -+ config[i].reg, config[i].value); -+ writel(config[i].value, isp->base + config[i].reg); -+ } -+ -+ /* Register wpt config received from userspace, and only psys has wpt */ -+ config = sys->wpt.config; -+ for (i = 0; i < sys->wpt.fill_level; i++) { -+ dev_dbg(dev, "Trace restore: reg 0x%08x, value 0x%08x\n", -+ config[i].reg, config[i].value); -+ writel(config[i].value, isp->base + config[i].reg); -+ } -+ sys->running = true; -+} -+ -+void ipu_trace_restore(struct device *dev) -+{ -+ struct ipu_trace *trace = to_ipu6_bus_device(dev)->isp->trace; -+ -+ if (!trace) -+ return; -+ -+ mutex_lock(&trace->lock); -+ __ipu_trace_restore(dev); -+ mutex_unlock(&trace->lock); -+} -+EXPORT_SYMBOL_GPL(ipu_trace_restore); -+ -+static void __ipu_trace_stop(struct device *dev) -+{ -+ struct ipu_subsystem_trace_config *sys = -+ to_ipu6_bus_device(dev)->trace_cfg; -+ struct ipu_trace_block *blocks; -+ -+ if (!sys) -+ return; -+ -+ if (!sys->running) -+ return; -+ sys->running = false; -+ -+ /* Turn off all the gpc blocks */ -+ blocks = sys->blocks; -+ while (blocks->type != IPU_TRACE_BLOCK_END) { -+ if (blocks->type == IPU_TRACE_BLOCK_GPC) { -+ writel(0, sys->base + blocks->offset + -+ TRACE_REG_GPC_OVERALL_ENABLE); -+ } -+ blocks++; -+ } -+ -+ /* Turn off all the trace monitors */ -+ blocks = sys->blocks; -+ while (blocks->type != IPU_TRACE_BLOCK_END) { -+ if (blocks->type == IPU_TRACE_BLOCK_TM) { -+ writel(0, sys->base + blocks->offset + -+ TRACE_REG_TM_TRACE_ENABLE_NPK); -+ -+ writel(0, sys->base + blocks->offset + -+ TRACE_REG_TM_TRACE_ENABLE_DDR); -+ } -+ blocks++; -+ } -+ -+ /* Turn off trace units */ -+ blocks = sys->blocks; -+ while (blocks->type != IPU_TRACE_BLOCK_END) { -+ if (blocks->type == IPU_TRACE_BLOCK_TUN) { -+ writel(0, sys->base + blocks->offset + -+ TRACE_REG_TUN_DDR_ENABLE); -+ writel(0, sys->base + blocks->offset + -+ TRACE_REG_TUN_NPK_ENABLE); -+ } -+ blocks++; -+ } -+} -+ -+void ipu_trace_stop(struct device *dev) -+{ -+ struct ipu_trace *trace = to_ipu6_bus_device(dev)->isp->trace; -+ -+ if (!trace) -+ return; -+ -+ mutex_lock(&trace->lock); -+ __ipu_trace_stop(dev); -+ mutex_unlock(&trace->lock); -+} -+EXPORT_SYMBOL_GPL(ipu_trace_stop); -+ -+static int update_register_cache(struct ipu6_device *isp, u32 reg, u32 value) -+{ -+ struct ipu_trace *dctrl = isp->trace; -+ struct ipu_subsystem_trace_config *sys; -+ int rval = -EINVAL; -+ -+ if (dctrl->isys.offset == dctrl->psys.offset) { -+ /* For the IPU with uniform address space */ -+ if (reg >= IPU_ISYS_OFFSET && -+ reg < IPU_ISYS_OFFSET + TRACE_REG_MAX_ISYS_OFFSET) -+ sys = &dctrl->isys; -+ else if (reg >= IPU_PSYS_OFFSET && -+ reg < IPU_PSYS_OFFSET + TRACE_REG_MAX_PSYS_OFFSET) -+ sys = &dctrl->psys; -+ else -+ goto error; -+ } else { -+ if (dctrl->isys.offset && -+ reg >= dctrl->isys.offset && -+ reg < dctrl->isys.offset + TRACE_REG_MAX_ISYS_OFFSET) -+ sys = &dctrl->isys; -+ else if (dctrl->psys.offset && -+ reg >= dctrl->psys.offset && -+ reg < dctrl->psys.offset + TRACE_REG_MAX_PSYS_OFFSET) -+ sys = &dctrl->psys; -+ else -+ goto error; -+ } -+ -+ if (sys->fill_level < MAX_TRACE_REGISTERS) { -+ dev_dbg(sys->dev, -+ "Trace reg addr 0x%08x value 0x%08x\n", reg, value); -+ sys->config[sys->fill_level].reg = reg; -+ sys->config[sys->fill_level].value = value; -+ sys->fill_level++; -+ } else { -+ rval = -ENOMEM; -+ goto error; -+ } -+ return 0; -+error: -+ dev_info(&isp->pdev->dev, -+ "Trace register address 0x%08x ignored as invalid register\n", -+ reg); -+ return rval; -+} -+ -+static void traceconf_dump(struct ipu6_device *isp) -+{ -+ struct ipu_subsystem_trace_config *sys[2] = { -+ &isp->trace->isys, -+ &isp->trace->psys -+ }; -+ int i, j, rem_size; -+ char *out; -+ -+ isp->trace->size_conf_dump = 0; -+ out = isp->trace->conf_dump_buffer; -+ rem_size = TRACE_CONF_DUMP_BUFFER_SIZE; -+ -+ for (j = 0; j < ARRAY_SIZE(sys); j++) { -+ for (i = 0; i < sys[j]->fill_level && rem_size > 0; i++) { -+ int bytes_print; -+ int n = snprintf(out, rem_size, "0x%08x = 0x%08x\n", -+ sys[j]->config[i].reg, -+ sys[j]->config[i].value); -+ -+ bytes_print = min(n, rem_size - 1); -+ rem_size -= bytes_print; -+ out += bytes_print; -+ } -+ } -+ isp->trace->size_conf_dump = out - isp->trace->conf_dump_buffer; -+} -+ -+static void clear_trace_buffer(struct ipu_subsystem_trace_config *sys) -+{ -+ if (!sys || !sys->memory.memory_buffer) -+ return; -+ -+ memset(sys->memory.memory_buffer, 0, MEMORY_RING_BUFFER_SIZE + -+ MEMORY_RING_BUFFER_OVERREAD); -+ -+ dma_sync_single_for_device(sys->dev, -+ sys->memory.dma_handle, -+ MEMORY_RING_BUFFER_SIZE + -+ MEMORY_RING_BUFFER_GUARD, DMA_FROM_DEVICE); -+} -+ -+static int traceconf_open(struct inode *inode, struct file *file) -+{ -+ int ret; -+ struct ipu6_device *isp; -+ -+ if (!inode->i_private) -+ return -EACCES; -+ -+ isp = inode->i_private; -+ -+ ret = mutex_trylock(&isp->trace->lock); -+ if (!ret) -+ return -EBUSY; -+ -+ if (isp->trace->open) { -+ mutex_unlock(&isp->trace->lock); -+ return -EBUSY; -+ } -+ -+ file->private_data = isp; -+ isp->trace->open = 1; -+ if (file->f_mode & FMODE_WRITE) { -+ /* TBD: Allocate temp buffer for processing. -+ * Push validated buffer to active config -+ */ -+ -+ /* Forget old config if opened for write */ -+ isp->trace->isys.fill_level = 0; -+ isp->trace->psys.fill_level = 0; -+ isp->trace->psys.wpt.fill_level = 0; -+ } -+ -+ if (file->f_mode & FMODE_READ) { -+ isp->trace->conf_dump_buffer = -+ vzalloc(TRACE_CONF_DUMP_BUFFER_SIZE); -+ if (!isp->trace->conf_dump_buffer) { -+ isp->trace->open = 0; -+ mutex_unlock(&isp->trace->lock); -+ return -ENOMEM; -+ } -+ traceconf_dump(isp); -+ } -+ mutex_unlock(&isp->trace->lock); -+ return 0; -+} -+ -+static ssize_t traceconf_read(struct file *file, char __user *buf, -+ size_t len, loff_t *ppos) -+{ -+ struct ipu6_device *isp = file->private_data; -+ -+ return simple_read_from_buffer(buf, len, ppos, -+ isp->trace->conf_dump_buffer, -+ isp->trace->size_conf_dump); -+} -+ -+static ssize_t traceconf_write(struct file *file, const char __user *buf, -+ size_t len, loff_t *ppos) -+{ -+ int i; -+ struct ipu6_device *isp = file->private_data; -+ ssize_t bytes = 0; -+ char *ipu_trace_buffer = NULL; -+ size_t buffer_size = 0; -+ u32 ipu_trace_number = 0; -+ struct config_value *cfg_buffer = NULL; -+ -+ if ((*ppos < 0) || (len > TRACE_CONF_DATA_MAX_LEN) || -+ (len < sizeof(ipu_trace_number))) { -+ dev_info(&isp->pdev->dev, -+ "length is error, len:%ld, loff:%lld\n", -+ len, *ppos); -+ return -EINVAL; -+ } -+ -+ ipu_trace_buffer = vzalloc(len); -+ if (!ipu_trace_buffer) -+ return -ENOMEM; -+ -+ bytes = copy_from_user(ipu_trace_buffer, buf, len); -+ if (bytes != 0) { -+ vfree(ipu_trace_buffer); -+ return -EFAULT; -+ } -+ -+ memcpy(&ipu_trace_number, ipu_trace_buffer, sizeof(u32)); -+ buffer_size = ipu_trace_number * sizeof(struct config_value); -+ if ((buffer_size + sizeof(ipu_trace_number)) != len) { -+ dev_info(&isp->pdev->dev, -+ "File size is not right, len:%ld, buffer_size:%zu\n", -+ len, buffer_size); -+ vfree(ipu_trace_buffer); -+ return -EFAULT; -+ } -+ -+ mutex_lock(&isp->trace->lock); -+ cfg_buffer = (struct config_value *)(ipu_trace_buffer + sizeof(u32)); -+ for (i = 0; i < ipu_trace_number; i++) { -+ update_register_cache(isp, cfg_buffer[i].reg, -+ cfg_buffer[i].value); -+ } -+ mutex_unlock(&isp->trace->lock); -+ vfree(ipu_trace_buffer); -+ -+ return len; -+} -+ -+static int traceconf_release(struct inode *inode, struct file *file) -+{ -+ struct ipu6_device *isp = file->private_data; -+ struct device *psys_dev = isp->psys ? &isp->psys->auxdev.dev : NULL; -+ struct device *isys_dev = isp->isys ? &isp->isys->auxdev.dev : NULL; -+ int isys_pm_rval = -EINVAL; -+ int psys_pm_rval = -EINVAL; -+ -+ /* -+ * Turn devices on outside trace->lock mutex. PM transition may -+ * cause call to function which tries to take the same lock. -+ * Also do this before trace->open is set back to 0 to avoid -+ * double restore (one here and one in pm transition). We can't -+ * rely purely on the restore done by pm call backs since trace -+ * configuration can occur in any phase compared to other activity. -+ */ -+ -+ if (file->f_mode & FMODE_WRITE) { -+ if (isys_dev) -+ isys_pm_rval = pm_runtime_resume_and_get(isys_dev); -+ if (isys_pm_rval >= 0 && psys_dev) -+ psys_pm_rval = pm_runtime_resume_and_get(psys_dev); -+ } -+ -+ mutex_lock(&isp->trace->lock); -+ isp->trace->open = 0; -+ vfree(isp->trace->conf_dump_buffer); -+ isp->trace->conf_dump_buffer = NULL; -+ -+ /* Update new cfg to HW */ -+ if (isys_pm_rval >= 0) { -+ __ipu_trace_stop(isys_dev); -+ clear_trace_buffer(isp->isys->trace_cfg); -+ __ipu_trace_restore(isys_dev); -+ } -+ -+ if (psys_pm_rval >= 0) { -+ __ipu_trace_stop(psys_dev); -+ clear_trace_buffer(isp->psys->trace_cfg); -+ __ipu_trace_restore(psys_dev); -+ } -+ mutex_unlock(&isp->trace->lock); -+ -+ if (psys_pm_rval >= 0) -+ pm_runtime_put(psys_dev); -+ if (isys_pm_rval >= 0) -+ pm_runtime_put(isys_dev); -+ -+ return 0; -+} -+ -+static const struct file_operations ipu_traceconf_fops = { -+ .owner = THIS_MODULE, -+ .open = traceconf_open, -+ .release = traceconf_release, -+ .read = traceconf_read, -+ .write = traceconf_write, -+ .llseek = noop_llseek, -+}; -+ -+static void wptraceconf_dump(struct ipu6_device *isp) -+{ -+ struct ipu_subsystem_wptrace_config *sys = &isp->trace->psys.wpt; -+ int i, rem_size; -+ char *out; -+ -+ sys->size_conf_dump = 0; -+ out = sys->conf_dump_buffer; -+ rem_size = TRACE_CONF_DUMP_BUFFER_SIZE; -+ -+ for (i = 0; i < sys->fill_level && rem_size > 0; i++) { -+ int bytes_print; -+ int n = snprintf(out, rem_size, "0x%08x = 0x%08x\n", -+ sys->config[i].reg, -+ sys->config[i].value); -+ -+ bytes_print = min(n, rem_size - 1); -+ rem_size -= bytes_print; -+ out += bytes_print; -+ } -+ sys->size_conf_dump = out - sys->conf_dump_buffer; -+} -+ -+static int wptraceconf_open(struct inode *inode, struct file *file) -+{ -+ int ret; -+ struct ipu6_device *isp; -+ -+ if (!inode->i_private) -+ return -EACCES; -+ -+ isp = inode->i_private; -+ ret = mutex_trylock(&isp->trace->lock); -+ if (!ret) -+ return -EBUSY; -+ -+ if (isp->trace->psys.wpt.open) { -+ mutex_unlock(&isp->trace->lock); -+ return -EBUSY; -+ } -+ -+ file->private_data = isp; -+ if (file->f_mode & FMODE_WRITE) { -+ /* TBD: Allocate temp buffer for processing. -+ * Push validated buffer to active config -+ */ -+ /* Forget old config if opened for write */ -+ isp->trace->psys.wpt.fill_level = 0; -+ } -+ -+ if (file->f_mode & FMODE_READ) { -+ isp->trace->psys.wpt.conf_dump_buffer = -+ vzalloc(TRACE_CONF_DUMP_BUFFER_SIZE); -+ if (!isp->trace->psys.wpt.conf_dump_buffer) { -+ mutex_unlock(&isp->trace->lock); -+ return -ENOMEM; -+ } -+ wptraceconf_dump(isp); -+ } -+ mutex_unlock(&isp->trace->lock); -+ return 0; -+} -+ -+static ssize_t wptraceconf_read(struct file *file, char __user *buf, -+ size_t len, loff_t *ppos) -+{ -+ struct ipu6_device *isp = file->private_data; -+ -+ return simple_read_from_buffer(buf, len, ppos, -+ isp->trace->psys.wpt.conf_dump_buffer, -+ isp->trace->psys.wpt.size_conf_dump); -+} -+ -+static ssize_t wptraceconf_write(struct file *file, const char __user *buf, -+ size_t len, loff_t *ppos) -+{ -+ int i; -+ struct ipu6_device *isp = file->private_data; -+ ssize_t bytes = 0; -+ char *wpt_info_buffer = NULL; -+ size_t buffer_size = 0; -+ u32 wp_node_number = 0; -+ struct config_value *wpt_buffer = NULL; -+ struct ipu_subsystem_wptrace_config *wpt = &isp->trace->psys.wpt; -+ -+ if ((*ppos < 0) || (len > WPT_TRACE_CONF_DATA_MAX_LEN) || -+ (len < sizeof(wp_node_number))) { -+ dev_info(&isp->pdev->dev, -+ "length is error, len:%ld, loff:%lld\n", -+ len, *ppos); -+ return -EINVAL; -+ } -+ -+ wpt_info_buffer = vzalloc(len); -+ if (!wpt_info_buffer) -+ return -ENOMEM; -+ -+ bytes = copy_from_user(wpt_info_buffer, buf, len); -+ if (bytes != 0) { -+ vfree(wpt_info_buffer); -+ return -EFAULT; -+ } -+ -+ memcpy(&wp_node_number, wpt_info_buffer, sizeof(u32)); -+ buffer_size = wp_node_number * sizeof(struct config_value); -+ if ((buffer_size + sizeof(wp_node_number)) != len) { -+ dev_info(&isp->pdev->dev, -+ "File size is not right, len:%ld, buffer_size:%zu\n", -+ len, buffer_size); -+ vfree(wpt_info_buffer); -+ return -EFAULT; -+ } -+ -+ mutex_lock(&isp->trace->lock); -+ wpt_buffer = (struct config_value *)(wpt_info_buffer + sizeof(u32)); -+ for (i = 0; i < wp_node_number; i++) { -+ if (wpt->fill_level < MAX_TRACE_REGISTERS) { -+ wpt->config[wpt->fill_level].reg = wpt_buffer[i].reg; -+ wpt->config[wpt->fill_level].value = -+ wpt_buffer[i].value; -+ wpt->fill_level++; -+ } else { -+ dev_info(&isp->pdev->dev, -+ "Address 0x%08x ignored as invalid register\n", -+ wpt_buffer[i].reg); -+ break; -+ } -+ } -+ mutex_unlock(&isp->trace->lock); -+ vfree(wpt_info_buffer); -+ -+ return len; -+} -+ -+static int wptraceconf_release(struct inode *inode, struct file *file) -+{ -+ struct ipu6_device *isp = file->private_data; -+ -+ mutex_lock(&isp->trace->lock); -+ isp->trace->open = 0; -+ vfree(isp->trace->psys.wpt.conf_dump_buffer); -+ isp->trace->psys.wpt.conf_dump_buffer = NULL; -+ mutex_unlock(&isp->trace->lock); -+ -+ return 0; -+} -+ -+static const struct file_operations ipu_wptraceconf_fops = { -+ .owner = THIS_MODULE, -+ .open = wptraceconf_open, -+ .release = wptraceconf_release, -+ .read = wptraceconf_read, -+ .write = wptraceconf_write, -+ .llseek = noop_llseek, -+}; -+ -+static int gettrace_open(struct inode *inode, struct file *file) -+{ -+ struct ipu_subsystem_trace_config *sys = inode->i_private; -+ -+ if (!sys) -+ return -EACCES; -+ -+ if (!sys->memory.memory_buffer) -+ return -EACCES; -+ -+ dma_sync_single_for_cpu(sys->dev, -+ sys->memory.dma_handle, -+ MEMORY_RING_BUFFER_SIZE + -+ MEMORY_RING_BUFFER_GUARD, DMA_FROM_DEVICE); -+ -+ file->private_data = sys; -+ return 0; -+}; -+ -+static ssize_t gettrace_read(struct file *file, char __user *buf, -+ size_t len, loff_t *ppos) -+{ -+ struct ipu_subsystem_trace_config *sys = file->private_data; -+ -+ return simple_read_from_buffer(buf, len, ppos, -+ sys->memory.memory_buffer, -+ MEMORY_RING_BUFFER_SIZE + -+ MEMORY_RING_BUFFER_OVERREAD); -+} -+ -+static ssize_t gettrace_write(struct file *file, const char __user *buf, -+ size_t len, loff_t *ppos) -+{ -+ struct ipu_subsystem_trace_config *sys = file->private_data; -+ static const char str[] = "clear"; -+ char buffer[sizeof(str)] = { 0 }; -+ ssize_t ret; -+ -+ ret = simple_write_to_buffer(buffer, sizeof(buffer), ppos, buf, len); -+ if (ret < 0) -+ return ret; -+ -+ if (ret < sizeof(str) - 1) -+ return -EINVAL; -+ -+ if (!strncmp(str, buffer, sizeof(str) - 1)) { -+ clear_trace_buffer(sys); -+ return len; -+ } -+ -+ return -EINVAL; -+} -+ -+static int gettrace_release(struct inode *inode, struct file *file) -+{ -+ return 0; -+} -+ -+static const struct file_operations ipu_gettrace_fops = { -+ .owner = THIS_MODULE, -+ .open = gettrace_open, -+ .release = gettrace_release, -+ .read = gettrace_read, -+ .write = gettrace_write, -+ .llseek = noop_llseek, -+}; -+ -+int ipu_trace_init(struct ipu6_device *isp, void __iomem *base, -+ struct device *dev, struct ipu_trace_block *blocks) -+{ -+ struct ipu6_bus_device *adev = to_ipu6_bus_device(dev); -+ struct ipu_trace *trace = isp->trace; -+ struct ipu_subsystem_trace_config *sys; -+ int ret = 0; -+ -+ if (!isp->trace) -+ return 0; -+ -+ mutex_lock(&isp->trace->lock); -+ -+ if (dev == &isp->isys->auxdev.dev) { -+ sys = &trace->isys; -+ } else if (dev == &isp->psys->auxdev.dev) { -+ sys = &trace->psys; -+ } else { -+ ret = -EINVAL; -+ goto leave; -+ } -+ -+ adev->trace_cfg = sys; -+ sys->dev = dev; -+ sys->offset = base - isp->base; /* sub system offset */ -+ sys->base = base; -+ sys->blocks = blocks; -+ -+ sys->memory.memory_buffer = -+ ipu6_dma_alloc(adev, MEMORY_RING_BUFFER_SIZE + -+ MEMORY_RING_BUFFER_GUARD, -+ &sys->memory.dma_handle, -+ GFP_KERNEL, 0); -+ -+ if (!sys->memory.memory_buffer) -+ dev_err(dev, "failed alloc memory for tracing.\n"); -+ -+leave: -+ mutex_unlock(&isp->trace->lock); -+ -+ return ret; -+} -+EXPORT_SYMBOL_GPL(ipu_trace_init); -+ -+void ipu_trace_uninit(struct device *dev) -+{ -+ struct ipu6_bus_device *adev = to_ipu6_bus_device(dev); -+ struct ipu6_device *isp = adev->isp; -+ struct ipu_trace *trace = isp->trace; -+ struct ipu_subsystem_trace_config *sys = adev->trace_cfg; -+ -+ if (!trace || !sys) -+ return; -+ -+ mutex_lock(&trace->lock); -+ -+ if (sys->memory.memory_buffer) -+ ipu6_dma_free( -+ adev, -+ MEMORY_RING_BUFFER_SIZE + MEMORY_RING_BUFFER_GUARD, -+ sys->memory.memory_buffer, sys->memory.dma_handle, 0); -+ -+ sys->dev = NULL; -+ sys->memory.memory_buffer = NULL; -+ -+ mutex_unlock(&trace->lock); -+} -+EXPORT_SYMBOL_GPL(ipu_trace_uninit); -+ -+int ipu_trace_debugfs_add(struct ipu6_device *isp, struct dentry *dir) -+{ -+ struct dentry *files[4]; -+ int i = 0; -+ -+ if (!ipu_trace_enable) -+ return 0; -+ -+ files[i] = debugfs_create_file("traceconf", 0644, -+ dir, isp, &ipu_traceconf_fops); -+ if (!files[i]) -+ return -ENOMEM; -+ i++; -+ -+ files[i] = debugfs_create_file("wptraceconf", 0644, -+ dir, isp, &ipu_wptraceconf_fops); -+ if (!files[i]) -+ goto error; -+ i++; -+ -+ files[i] = debugfs_create_file("getisystrace", 0444, -+ dir, -+ &isp->trace->isys, &ipu_gettrace_fops); -+ -+ if (!files[i]) -+ goto error; -+ i++; -+ -+ files[i] = debugfs_create_file("getpsystrace", 0444, -+ dir, -+ &isp->trace->psys, &ipu_gettrace_fops); -+ if (!files[i]) -+ goto error; -+ -+ return 0; -+ -+error: -+ for (; i > 0; i--) -+ debugfs_remove(files[i - 1]); -+ return -ENOMEM; -+} -+ -+int ipu_trace_add(struct ipu6_device *isp) -+{ -+ if (!ipu_trace_enable) -+ return 0; -+ -+ isp->trace = devm_kzalloc(&isp->pdev->dev, -+ sizeof(struct ipu_trace), GFP_KERNEL); -+ if (!isp->trace) -+ return -ENOMEM; -+ -+ mutex_init(&isp->trace->lock); -+ -+ dev_dbg(&isp->pdev->dev, "ipu trace enabled!"); -+ -+ return 0; -+} -+ -+void ipu_trace_release(struct ipu6_device *isp) -+{ -+ if (!isp->trace) -+ return; -+ mutex_destroy(&isp->trace->lock); -+} -+ -+int ipu_trace_buffer_dma_handle(struct device *dev, dma_addr_t *dma_handle) -+{ -+ struct ipu6_bus_device *adev = to_ipu6_bus_device(dev); -+ struct ipu_subsystem_trace_config *sys = adev->trace_cfg; -+ -+ if (!ipu_trace_enable) -+ return -EACCES; -+ -+ if (!sys || !sys->memory.memory_buffer) -+ return -EACCES; -+ -+ *dma_handle = sys->memory.dma_handle; -+ -+ return 0; -+} -+EXPORT_SYMBOL_GPL(ipu_trace_buffer_dma_handle); -+ -+MODULE_AUTHOR("Samu Onkalo "); -+MODULE_LICENSE("GPL"); -+MODULE_DESCRIPTION("Intel ipu trace support"); -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-trace.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-trace.h -new file mode 100644 -index 0000000..f66d889 ---- /dev/null -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-trace.h -@@ -0,0 +1,147 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* Copyright (C) 2014 - 2025 Intel Corporation */ -+ -+#ifndef IPU_TRACE_H -+#define IPU_TRACE_H -+#include -+ -+/* Trace unit register offsets */ -+#define TRACE_REG_TUN_DDR_ENABLE 0x000 -+#define TRACE_REG_TUN_NPK_ENABLE 0x004 -+#define TRACE_REG_TUN_DDR_INFO_VAL 0x008 -+#define TRACE_REG_TUN_NPK_ADDR 0x00C -+#define TRACE_REG_TUN_DRAM_BASE_ADDR 0x010 -+#define TRACE_REG_TUN_DRAM_END_ADDR 0x014 -+#define TRACE_REG_TUN_LOCAL_TIMER0 0x018 -+#define TRACE_REG_TUN_LOCAL_TIMER1 0x01C -+#define TRACE_REG_TUN_WR_PTR 0x020 -+#define TRACE_REG_TUN_RD_PTR 0x024 -+ -+/* -+ * Following registers are left out on purpose: -+ * TUN_LOCAL_TIMER0, TUN_LOCAL_TIMER1, TUN_DRAM_BASE_ADDR -+ * TUN_DRAM_END_ADDR, TUN_WR_PTR, TUN_RD_PTR -+ */ -+ -+/* Trace monitor register offsets */ -+#define TRACE_REG_TM_TRACE_ADDR_A 0x0900 -+#define TRACE_REG_TM_TRACE_ADDR_B 0x0904 -+#define TRACE_REG_TM_TRACE_ADDR_C 0x0908 -+#define TRACE_REG_TM_TRACE_ADDR_D 0x090c -+#define TRACE_REG_TM_TRACE_ENABLE_NPK 0x0910 -+#define TRACE_REG_TM_TRACE_ENABLE_DDR 0x0914 -+#define TRACE_REG_TM_TRACE_PER_PC 0x0918 -+#define TRACE_REG_TM_TRACE_PER_BRANCH 0x091c -+#define TRACE_REG_TM_TRACE_HEADER 0x0920 -+#define TRACE_REG_TM_TRACE_CFG 0x0924 -+#define TRACE_REG_TM_TRACE_LOST_PACKETS 0x0928 -+#define TRACE_REG_TM_TRACE_LP_CLEAR 0x092c -+#define TRACE_REG_TM_TRACE_LMRUN_MASK 0x0930 -+#define TRACE_REG_TM_TRACE_LMRUN_PC_LOW 0x0934 -+#define TRACE_REG_TM_TRACE_LMRUN_PC_HIGH 0x0938 -+#define TRACE_REG_TM_TRACE_MMIO_SEL 0x093c -+#define TRACE_REG_TM_TRACE_MMIO_WP0_LOW 0x0940 -+#define TRACE_REG_TM_TRACE_MMIO_WP1_LOW 0x0944 -+#define TRACE_REG_TM_TRACE_MMIO_WP2_LOW 0x0948 -+#define TRACE_REG_TM_TRACE_MMIO_WP3_LOW 0x094c -+#define TRACE_REG_TM_TRACE_MMIO_WP0_HIGH 0x0950 -+#define TRACE_REG_TM_TRACE_MMIO_WP1_HIGH 0x0954 -+#define TRACE_REG_TM_TRACE_MMIO_WP2_HIGH 0x0958 -+#define TRACE_REG_TM_TRACE_MMIO_WP3_HIGH 0x095c -+#define TRACE_REG_TM_FWTRACE_FIRST 0x0A00 -+#define TRACE_REG_TM_FWTRACE_MIDDLE 0x0A04 -+#define TRACE_REG_TM_FWTRACE_LAST 0x0A08 -+ -+/* -+ * Following exists only in (I)SP address space: -+ * TM_FWTRACE_FIRST, TM_FWTRACE_MIDDLE, TM_FWTRACE_LAST -+ */ -+ -+#define TRACE_REG_GPC_RESET 0x000 -+#define TRACE_REG_GPC_OVERALL_ENABLE 0x004 -+#define TRACE_REG_GPC_TRACE_HEADER 0x008 -+#define TRACE_REG_GPC_TRACE_ADDRESS 0x00C -+#define TRACE_REG_GPC_TRACE_NPK_EN 0x010 -+#define TRACE_REG_GPC_TRACE_DDR_EN 0x014 -+#define TRACE_REG_GPC_TRACE_LPKT_CLEAR 0x018 -+#define TRACE_REG_GPC_TRACE_LPKT 0x01C -+ -+#define TRACE_REG_GPC_ENABLE_ID0 0x020 -+#define TRACE_REG_GPC_ENABLE_ID1 0x024 -+#define TRACE_REG_GPC_ENABLE_ID2 0x028 -+#define TRACE_REG_GPC_ENABLE_ID3 0x02c -+ -+#define TRACE_REG_GPC_VALUE_ID0 0x030 -+#define TRACE_REG_GPC_VALUE_ID1 0x034 -+#define TRACE_REG_GPC_VALUE_ID2 0x038 -+#define TRACE_REG_GPC_VALUE_ID3 0x03c -+ -+#define TRACE_REG_GPC_CNT_INPUT_SELECT_ID0 0x040 -+#define TRACE_REG_GPC_CNT_INPUT_SELECT_ID1 0x044 -+#define TRACE_REG_GPC_CNT_INPUT_SELECT_ID2 0x048 -+#define TRACE_REG_GPC_CNT_INPUT_SELECT_ID3 0x04c -+ -+#define TRACE_REG_GPC_CNT_START_SELECT_ID0 0x050 -+#define TRACE_REG_GPC_CNT_START_SELECT_ID1 0x054 -+#define TRACE_REG_GPC_CNT_START_SELECT_ID2 0x058 -+#define TRACE_REG_GPC_CNT_START_SELECT_ID3 0x05c -+ -+#define TRACE_REG_GPC_CNT_STOP_SELECT_ID0 0x060 -+#define TRACE_REG_GPC_CNT_STOP_SELECT_ID1 0x064 -+#define TRACE_REG_GPC_CNT_STOP_SELECT_ID2 0x068 -+#define TRACE_REG_GPC_CNT_STOP_SELECT_ID3 0x06c -+ -+#define TRACE_REG_GPC_CNT_MSG_SELECT_ID0 0x070 -+#define TRACE_REG_GPC_CNT_MSG_SELECT_ID1 0x074 -+#define TRACE_REG_GPC_CNT_MSG_SELECT_ID2 0x078 -+#define TRACE_REG_GPC_CNT_MSG_SELECT_ID3 0x07c -+ -+#define TRACE_REG_GPC_CNT_MSG_PLOAD_SELECT_ID0 0x080 -+#define TRACE_REG_GPC_CNT_MSG_PLOAD_SELECT_ID1 0x084 -+#define TRACE_REG_GPC_CNT_MSG_PLOAD_SELECT_ID2 0x088 -+#define TRACE_REG_GPC_CNT_MSG_PLOAD_SELECT_ID3 0x08c -+ -+#define TRACE_REG_GPC_IRQ_TRIGGER_VALUE_ID0 0x090 -+#define TRACE_REG_GPC_IRQ_TRIGGER_VALUE_ID1 0x094 -+#define TRACE_REG_GPC_IRQ_TRIGGER_VALUE_ID2 0x098 -+#define TRACE_REG_GPC_IRQ_TRIGGER_VALUE_ID3 0x09c -+ -+#define TRACE_REG_GPC_IRQ_TIMER_SELECT_ID0 0x0a0 -+#define TRACE_REG_GPC_IRQ_TIMER_SELECT_ID1 0x0a4 -+#define TRACE_REG_GPC_IRQ_TIMER_SELECT_ID2 0x0a8 -+#define TRACE_REG_GPC_IRQ_TIMER_SELECT_ID3 0x0ac -+ -+#define TRACE_REG_GPC_IRQ_ENABLE_ID0 0x0b0 -+#define TRACE_REG_GPC_IRQ_ENABLE_ID1 0x0b4 -+#define TRACE_REG_GPC_IRQ_ENABLE_ID2 0x0b8 -+#define TRACE_REG_GPC_IRQ_ENABLE_ID3 0x0bc -+ -+struct ipu_trace; -+struct ipu_subsystem_trace_config; -+ -+enum ipu_trace_block_type { -+ IPU_TRACE_BLOCK_TUN = 0, /* Trace unit */ -+ IPU_TRACE_BLOCK_TM, /* Trace monitor */ -+ IPU_TRACE_BLOCK_GPC, /* General purpose control */ -+ IPU_TRACE_CSI2, /* CSI2 legacy receiver */ -+ IPU_TRACE_CSI2_3PH, /* CSI2 combo receiver */ -+ IPU_TRACE_SIG2CIOS, -+ IPU_TRACE_TIMER_RST, /* Trace reset control timer */ -+ IPU_TRACE_BLOCK_END /* End of list */ -+}; -+ -+struct ipu_trace_block { -+ u32 offset; /* Offset to block inside subsystem */ -+ enum ipu_trace_block_type type; -+}; -+ -+int ipu_trace_add(struct ipu6_device *isp); -+int ipu_trace_debugfs_add(struct ipu6_device *isp, struct dentry *dir); -+void ipu_trace_release(struct ipu6_device *isp); -+int ipu_trace_init(struct ipu6_device *isp, void __iomem *base, -+ struct device *dev, struct ipu_trace_block *blocks); -+void ipu_trace_restore(struct device *dev); -+void ipu_trace_uninit(struct device *dev); -+void ipu_trace_stop(struct device *dev); -+int ipu_trace_buffer_dma_handle(struct device *dev, dma_addr_t *dma_handle); -+#endif -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c -index 18f0dd2..e8ce0c9 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c -@@ -32,6 +32,7 @@ - #include "ipu6-platform-buttress-regs.h" - #include "ipu6-platform-isys-csi2-reg.h" - #include "ipu6-platform-regs.h" -+#include "ipu6-trace.h" - - static unsigned int isys_freq_override; - module_param(isys_freq_override, uint, 0660); -@@ -489,6 +490,37 @@ static int ipu6_pci_config_setup(struct pci_dev *dev, u8 hw_ver) - return 0; - } - -+#ifdef CONFIG_DEBUG_FS -+static int ipu_init_debugfs(struct ipu6_device *isp) -+{ -+ struct dentry *dir; -+ -+ dir = debugfs_create_dir(IPU6_NAME, NULL); -+ if (!dir) -+ return -ENOMEM; -+ -+ if (ipu_trace_debugfs_add(isp, dir)) -+ goto err; -+ -+ isp->ipu_dir = dir; -+ -+ return 0; -+err: -+ debugfs_remove_recursive(dir); -+ return -ENOMEM; -+} -+ -+static void ipu_remove_debugfs(struct ipu6_device *isp) -+{ -+ /* -+ * Since isys and psys debugfs dir will be created under ipu root dir, -+ * mark its dentry to NULL to avoid duplicate removal. -+ */ -+ debugfs_remove_recursive(isp->ipu_dir); -+ isp->ipu_dir = NULL; -+} -+#endif /* CONFIG_DEBUG_FS */ -+ - static void ipu6_configure_vc_mechanism(struct ipu6_device *isp) - { - u32 val = readl(isp->base + BUTTRESS_REG_BTRS_CTRL); -@@ -597,6 +629,10 @@ static int ipu6_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) - goto buttress_exit; - } - -+ ret = ipu_trace_add(isp); -+ if (ret) -+ dev_err(&isp->pdev->dev, "Trace support not available\n"); -+ - ret = ipu6_cpd_validate_cpd_file(isp, isp->cpd_fw->data, - isp->cpd_fw->size); - if (ret) { -@@ -676,6 +712,13 @@ static int ipu6_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) - ipu6_mmu_hw_cleanup(isp->psys->mmu); - pm_runtime_put(&isp->psys->auxdev.dev); - -+#ifdef CONFIG_DEBUG_FS -+ ret = ipu_init_debugfs(isp); -+ if (ret) -+ dev_err(&isp->pdev->dev, "Failed to initialize debugfs"); -+ -+#endif -+ - /* Configure the arbitration mechanisms for VC requests */ - ipu6_configure_vc_mechanism(isp); - -@@ -717,6 +760,11 @@ static void ipu6_pci_remove(struct pci_dev *pdev) - struct ipu6_mmu *isys_mmu = isp->isys->mmu; - struct ipu6_mmu *psys_mmu = isp->psys->mmu; - -+#ifdef CONFIG_DEBUG_FS -+ ipu_remove_debugfs(isp); -+#endif -+ ipu_trace_release(isp); -+ - devm_free_irq(&pdev->dev, pdev->irq, isp); - ipu6_cpd_free_pkg_dir(isp->psys); - -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.h -index 92e3c34..d91a78e 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.h -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.h -@@ -82,10 +82,15 @@ struct ipu6_device { - u32 cpd_metadata_cmpnt_size; - - void __iomem *base; -+#ifdef CONFIG_DEBUG_FS -+ struct dentry *ipu_dir; -+#endif - bool need_ipc_reset; - bool secure_mode; - u8 hw_ver; - bool bus_ready_to_probe; -+ -+ struct ipu_trace *trace; - }; - - #define IPU6_ISYS_NAME "isys" --- -2.43.0 - diff --git a/patches/0010-media-pci-The-order-of-return-buffers-should-be-FIFO.patch b/patches/0010-media-pci-The-order-of-return-buffers-should-be-FIFO.patch deleted file mode 100644 index 25b41d8..0000000 --- a/patches/0010-media-pci-The-order-of-return-buffers-should-be-FIFO.patch +++ /dev/null @@ -1,100 +0,0 @@ -From dbe7da034781bfd23399a8f4f71a3c8c0a18ab10 Mon Sep 17 00:00:00 2001 -From: zouxiaoh -Date: Mon, 12 Jan 2026 05:46:03 -0700 -Subject: [PATCH 10/28] media: pci: The order of return buffers should be FIFO. -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Change Description: -return_buffers : use "list_first_entry", will get these logs in hal: -“CamHAL[ERR] DeviceBase: dequeueBuffer, CamBuf index isn't same with -index used by kernel -CamHAL[ERR] CaptureUnit: Device:Generic grab frame failed:-22” - -The index order is changed from sequential to reverse after return_buffers. -The reason why the normal start&stop does not expose the problem is that -every Hal start will start the buffer index from 0 instead of continuing -to use the buffer index returned last stop. - -So need return_buffers from the list of tail, -and need use the "list_last_entry". - -Signed-off-by: linya14x -Signed-off-by: zouxiaoh ---- - .../media/pci/intel/ipu6/ipu6-isys-queue.c | 31 ++++++++++--------- - 1 file changed, 16 insertions(+), 15 deletions(-) - -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c -index b931c43..14868b1 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c -@@ -519,39 +519,40 @@ static void return_buffers(struct ipu6_isys_queue *aq, - unsigned long flags; - - spin_lock_irqsave(&aq->lock, flags); -- while (!list_empty(&aq->incoming)) { -+ -+ /* -+ * Something went wrong (FW crash / HW hang / not all buffers -+ * returned from isys) if there are still buffers queued in active -+ * queue. We have to clean up places a bit. -+ */ -+ while (!list_empty(&aq->active)) { - struct vb2_buffer *vb; - -- ib = list_first_entry(&aq->incoming, struct ipu6_isys_buffer, -- head); -+ ib = list_last_entry(&aq->active, struct ipu6_isys_buffer, -+ head); - vb = ipu6_isys_buffer_to_vb2_buffer(ib); -+ - list_del(&ib->head); - spin_unlock_irqrestore(&aq->lock, flags); - - vb2_buffer_done(vb, state); - - spin_lock_irqsave(&aq->lock, flags); -+ need_reset = true; - } - -- /* -- * Something went wrong (FW crash / HW hang / not all buffers -- * returned from isys) if there are still buffers queued in active -- * queue. We have to clean up places a bit. -- */ -- while (!list_empty(&aq->active)) { -+ while (!list_empty(&aq->incoming)) { - struct vb2_buffer *vb; - -- ib = list_first_entry(&aq->active, struct ipu6_isys_buffer, -- head); -+ ib = list_last_entry(&aq->incoming, struct ipu6_isys_buffer, -+ head); - vb = ipu6_isys_buffer_to_vb2_buffer(ib); -- - list_del(&ib->head); - spin_unlock_irqrestore(&aq->lock, flags); - - vb2_buffer_done(vb, state); - - spin_lock_irqsave(&aq->lock, flags); -- need_reset = true; - } - - spin_unlock_irqrestore(&aq->lock, flags); -@@ -699,8 +700,8 @@ static int reset_start_streaming(struct ipu6_isys_video *av) - - spin_lock_irqsave(&aq->lock, flags); - while (!list_empty(&aq->active)) { -- struct ipu6_isys_buffer *ib = list_first_entry(&aq->active, -- struct ipu6_isys_buffer, head); -+ struct ipu6_isys_buffer *ib = list_last_entry(&aq->active, -+ struct ipu6_isys_buffer, head); - - list_del(&ib->head); - list_add_tail(&ib->head, &aq->incoming); --- -2.43.0 - diff --git a/patches/0011-media-pci-Modify-enble-disable-stream-in-CSI2.patch b/patches/0011-media-pci-Modify-enble-disable-stream-in-CSI2.patch deleted file mode 100644 index 55886a2..0000000 --- a/patches/0011-media-pci-Modify-enble-disable-stream-in-CSI2.patch +++ /dev/null @@ -1,338 +0,0 @@ -From b184c6f1f79f757ebdbf3b31098f7ed5f9336586 Mon Sep 17 00:00:00 2001 -From: Hongju Wang -Date: Mon, 12 Jan 2026 05:37:29 -0700 -Subject: [PATCH 11/28] media: pci: Modify enble/disable stream in CSI2 - -Signed-off-by: Hongju Wang -Signed-off-by: zouxiaoh ---- - .../media/pci/intel/ipu6/ipu6-isys-csi2.c | 105 ++++++++++++------ - .../media/pci/intel/ipu6/ipu6-isys-csi2.h | 7 +- - .../media/pci/intel/ipu6/ipu6-isys-video.c | 78 +++++++------ - 3 files changed, 118 insertions(+), 72 deletions(-) - -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c -index 051898c..7779c47 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c -@@ -345,6 +345,12 @@ static int ipu6_isys_csi2_set_stream(struct v4l2_subdev *sd, - return ret; - } - -+/* -+ * Maximum stream ID is 63 for now, as we use u64 bitmask to represent a set -+ * of streams. -+ */ -+#define CSI2_SUBDEV_MAX_STREAM_ID 63 -+ - static int ipu6_isys_csi2_enable_streams(struct v4l2_subdev *sd, - struct v4l2_subdev_state *state, - u32 pad, u64 streams_mask) -@@ -352,54 +358,81 @@ static int ipu6_isys_csi2_enable_streams(struct v4l2_subdev *sd, - struct ipu6_isys_subdev *asd = to_ipu6_isys_subdev(sd); - struct ipu6_isys_csi2 *csi2 = to_ipu6_isys_csi2(asd); - struct ipu6_isys_csi2_timing timing = { }; -- struct v4l2_subdev *remote_sd; -- struct media_pad *remote_pad; -- u64 sink_streams; -- int ret; -- -- remote_pad = media_pad_remote_pad_first(&sd->entity.pads[CSI2_PAD_SINK]); -- remote_sd = media_entity_to_v4l2_subdev(remote_pad->entity); -+ u32 sink_pad, sink_stream; -+ struct v4l2_subdev *r_sd; -+ struct media_pad *r_pad; -+ int ret, i; -+ -+ if (!csi2->stream_count) { -+ ret = ipu6_isys_csi2_calc_timing(csi2, &timing, CSI2_ACCINV); -+ if (ret) -+ return ret; -+ -+ ret = ipu6_isys_csi2_set_stream(sd, &timing, csi2->nlanes, -+ true); -+ if (ret) -+ return ret; -+ } - -- sink_streams = v4l2_subdev_state_xlate_streams(state, CSI2_PAD_SRC, -- CSI2_PAD_SINK, -- &streams_mask); -+ for (i = 0; i <= CSI2_SUBDEV_MAX_STREAM_ID; i++) { -+ if (streams_mask & BIT_ULL(i)) -+ break; -+ } - -- ret = ipu6_isys_csi2_calc_timing(csi2, &timing, CSI2_ACCINV); -+ ret = v4l2_subdev_routing_find_opposite_end(&state->routing, pad, i, -+ &sink_pad, &sink_stream); - if (ret) - return ret; - -- ret = ipu6_isys_csi2_set_stream(sd, &timing, csi2->nlanes, true); -- if (ret) -- return ret; -+ r_pad = media_pad_remote_pad_first(&sd->entity.pads[CSI2_PAD_SINK]); -+ r_sd = media_entity_to_v4l2_subdev(r_pad->entity); - -- ret = v4l2_subdev_enable_streams(remote_sd, remote_pad->index, -- sink_streams); -- if (ret) { -- ipu6_isys_csi2_set_stream(sd, NULL, 0, false); -- return ret; -+ ret = v4l2_subdev_enable_streams(r_sd, r_pad->index, -+ BIT_ULL(sink_stream)); -+ if (!ret) { -+ csi2->stream_count++; -+ return 0; - } - -- return 0; -+ if (!csi2->stream_count) -+ ipu6_isys_csi2_set_stream(sd, NULL, 0, false); -+ -+ return ret; - } - - static int ipu6_isys_csi2_disable_streams(struct v4l2_subdev *sd, - struct v4l2_subdev_state *state, - u32 pad, u64 streams_mask) - { -- struct v4l2_subdev *remote_sd; -- struct media_pad *remote_pad; -- u64 sink_streams; -+ struct ipu6_isys_subdev *asd = to_ipu6_isys_subdev(sd); -+ struct ipu6_isys_csi2 *csi2 = to_ipu6_isys_csi2(asd); -+ u32 sink_pad, sink_stream; -+ struct v4l2_subdev *r_sd; -+ struct media_pad *r_pad; -+ int ret, i; - -- sink_streams = v4l2_subdev_state_xlate_streams(state, CSI2_PAD_SRC, -- CSI2_PAD_SINK, -- &streams_mask); -+ for (i = 0; i <= CSI2_SUBDEV_MAX_STREAM_ID; i++) { -+ if (streams_mask & BIT_ULL(i)) -+ break; -+ } - -- remote_pad = media_pad_remote_pad_first(&sd->entity.pads[CSI2_PAD_SINK]); -- remote_sd = media_entity_to_v4l2_subdev(remote_pad->entity); -+ ret = v4l2_subdev_routing_find_opposite_end(&state->routing, pad, i, -+ &sink_pad, &sink_stream); -+ if (ret) -+ return ret; - -- ipu6_isys_csi2_set_stream(sd, NULL, 0, false); -+ r_pad = media_pad_remote_pad_first(&sd->entity.pads[CSI2_PAD_SINK]); -+ r_sd = media_entity_to_v4l2_subdev(r_pad->entity); - -- v4l2_subdev_disable_streams(remote_sd, remote_pad->index, sink_streams); -+ v4l2_subdev_disable_streams(r_sd, r_pad->index, BIT_ULL(sink_stream)); -+ -+ if (--csi2->stream_count) -+ return 0; -+ -+ dev_dbg(&csi2->isys->adev->auxdev.dev, -+ "stream off CSI2-%u with %u lanes\n", csi2->port, csi2->nlanes); -+ -+ ipu6_isys_csi2_set_stream(sd, NULL, 0, false); - - return 0; - } -@@ -598,7 +631,8 @@ void ipu6_isys_csi2_eof_event_by_stream(struct ipu6_isys_stream *stream) - int ipu6_isys_csi2_get_remote_desc(u32 source_stream, - struct ipu6_isys_csi2 *csi2, - struct media_entity *source_entity, -- struct v4l2_mbus_frame_desc_entry *entry) -+ struct v4l2_mbus_frame_desc_entry *entry, -+ int *nr_queues) - { - struct v4l2_mbus_frame_desc_entry *desc_entry = NULL; - struct device *dev = &csi2->isys->adev->auxdev.dev; -@@ -645,5 +679,14 @@ int ipu6_isys_csi2_get_remote_desc(u32 source_stream, - - *entry = *desc_entry; - -+ for (i = 0; i < desc.num_entries; i++) { -+ if (desc_entry->bus.csi2.vc == desc.entry[i].bus.csi2.vc) -+ (*nr_queues)++; -+ } -+ -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+ csi2->is_multiple = true; -+ dev_dbg(dev, "set csi2->is_multiple is true.\n"); -+#endif - return 0; - } -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h -index bc8594c..ea609dd 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h -@@ -45,6 +45,10 @@ struct ipu6_isys_csi2 { - u32 receiver_errors; - unsigned int nlanes; - unsigned int port; -+ unsigned int stream_count; -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+ bool is_multiple; -+#endif - }; - - struct ipu6_isys_csi2_timing { -@@ -75,6 +79,7 @@ void ipu6_isys_csi2_error(struct ipu6_isys_csi2 *csi2); - int ipu6_isys_csi2_get_remote_desc(u32 source_stream, - struct ipu6_isys_csi2 *csi2, - struct media_entity *source_entity, -- struct v4l2_mbus_frame_desc_entry *entry); -+ struct v4l2_mbus_frame_desc_entry *entry, -+ int *nr_queues); - - #endif /* IPU6_ISYS_CSI2_H */ -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c -index 2d1391e..d1a0a72 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c -@@ -1036,35 +1036,40 @@ ipu6_isys_query_stream_by_source(struct ipu6_isys *isys, int source, u8 vc) - return stream; - } - --static u64 get_stream_mask_by_pipeline(struct ipu6_isys_video *__av) -+static u32 get_remote_pad_stream(struct media_pad *r_pad) - { -- struct media_pipeline *pipeline = -- media_entity_pipeline(&__av->vdev.entity); -+ struct v4l2_subdev_state *state; -+ struct v4l2_subdev *sd; -+ u32 stream_id = 0; - unsigned int i; -- u64 stream_mask = 0; - -- for (i = 0; i < NR_OF_CSI2_SRC_PADS; i++) { -- struct ipu6_isys_video *av = &__av->csi2->av[i]; -+ sd = media_entity_to_v4l2_subdev(r_pad->entity); -+ state = v4l2_subdev_lock_and_get_active_state(sd); -+ if (!state) -+ return 0; - -- if (pipeline == media_entity_pipeline(&av->vdev.entity)) -- stream_mask |= BIT_ULL(av->source_stream); -+ for (i = 0; i < state->stream_configs.num_configs; i++) { -+ struct v4l2_subdev_stream_config *cfg = -+ &state->stream_configs.configs[i]; -+ if (cfg->pad == r_pad->index) { -+ stream_id = cfg->stream; -+ break; -+ } - } - -- return stream_mask; -+ v4l2_subdev_unlock_state(state); -+ -+ return stream_id; - } - - int ipu6_isys_video_set_streaming(struct ipu6_isys_video *av, int state, - struct ipu6_isys_buffer_list *bl) - { -- struct v4l2_subdev_krouting *routing; - struct ipu6_isys_stream *stream = av->stream; -- struct v4l2_subdev_state *subdev_state; - struct device *dev = &av->isys->adev->auxdev.dev; - struct v4l2_subdev *sd; - struct media_pad *r_pad; -- u32 sink_pad, sink_stream; -- u64 r_stream; -- u64 stream_mask = 0; -+ u32 r_stream = 0; - int ret = 0; - - dev_dbg(dev, "set stream: %d\n", state); -@@ -1074,31 +1079,22 @@ int ipu6_isys_video_set_streaming(struct ipu6_isys_video *av, int state, - - sd = &stream->asd->sd; - r_pad = media_pad_remote_pad_first(&av->pad); -- r_stream = ipu6_isys_get_src_stream_by_src_pad(sd, r_pad->index); -- -- subdev_state = v4l2_subdev_lock_and_get_active_state(sd); -- routing = &subdev_state->routing; -- ret = v4l2_subdev_routing_find_opposite_end(routing, r_pad->index, -- r_stream, &sink_pad, -- &sink_stream); -- v4l2_subdev_unlock_state(subdev_state); -- if (ret) -- return ret; -- -- stream_mask = get_stream_mask_by_pipeline(av); -+ r_stream = get_remote_pad_stream(r_pad); - if (!state) { - stop_streaming_firmware(av); - - /* stop sub-device which connects with video */ -- dev_dbg(dev, "stream off entity %s pad:%d mask:0x%llx\n", -- sd->name, r_pad->index, stream_mask); -+ dev_dbg(dev, "disable streams %s pad:%d mask:0x%llx for %s\n", -+ sd->name, r_pad->index, BIT_ULL(r_stream), -+ stream->source_entity->name); - ret = v4l2_subdev_disable_streams(sd, r_pad->index, -- stream_mask); -+ BIT_ULL(r_stream)); - if (ret) { -- dev_err(dev, "stream off %s failed with %d\n", sd->name, -- ret); -+ dev_err(dev, "disable streams %s failed with %d\n", -+ sd->name, ret); - return ret; - } -+ - close_streaming_firmware(av); - } else { - ret = start_stream_firmware(av, bl); -@@ -1108,12 +1104,14 @@ int ipu6_isys_video_set_streaming(struct ipu6_isys_video *av, int state, - } - - /* start sub-device which connects with video */ -- dev_dbg(dev, "stream on %s pad %d mask 0x%llx\n", sd->name, -- r_pad->index, stream_mask); -- ret = v4l2_subdev_enable_streams(sd, r_pad->index, stream_mask); -+ dev_dbg(dev, "enable streams %s pad: %d mask:0x%llx for %s\n", -+ sd->name, r_pad->index, BIT_ULL(r_stream), -+ stream->source_entity->name); -+ ret = v4l2_subdev_enable_streams(sd, r_pad->index, -+ BIT_ULL(r_stream)); - if (ret) { -- dev_err(dev, "stream on %s failed with %d\n", sd->name, -- ret); -+ dev_err(dev, "enable streams %s failed with %d\n", -+ sd->name, ret); - goto out_media_entity_stop_streaming_firmware; - } - } -@@ -1277,8 +1275,6 @@ int ipu6_isys_setup_video(struct ipu6_isys_video *av, - /* Find the root */ - state = v4l2_subdev_lock_and_get_active_state(remote_sd); - for_each_active_route(&state->routing, r) { -- (*nr_queues)++; -- - if (r->source_pad == remote_pad->index) - route = r; - } -@@ -1293,11 +1289,13 @@ int ipu6_isys_setup_video(struct ipu6_isys_video *av, - - ret = ipu6_isys_csi2_get_remote_desc(av->source_stream, - to_ipu6_isys_csi2(asd), -- *source_entity, &entry); -+ *source_entity, &entry, -+ nr_queues); - if (ret == -ENOIOCTLCMD) { - av->vc = 0; - av->dt = ipu6_isys_mbus_code_to_mipi(pfmt->code); -- } else if (!ret) { -+ *nr_queues = 1; -+ } else if (*nr_queues && !ret) { - dev_dbg(dev, "Framedesc: stream %u, len %u, vc %u, dt %#x\n", - entry.stream, entry.length, entry.bus.csi2.vc, - entry.bus.csi2.dt); --- -2.43.0 - diff --git a/patches/0012-media-pci-Set-the-correct-SOF-for-different-stream.patch b/patches/0012-media-pci-Set-the-correct-SOF-for-different-stream.patch deleted file mode 100644 index 3f494da..0000000 --- a/patches/0012-media-pci-Set-the-correct-SOF-for-different-stream.patch +++ /dev/null @@ -1,24 +0,0 @@ -From fdf4f8d8cda8254118d673d053534ad59488c932 Mon Sep 17 00:00:00 2001 -From: Hongju Wang -Date: Mon, 12 Jan 2026 07:28:11 -0700 -Subject: [PATCH 12/28] media: pci: Set the correct SOF for different stream - ---- - 6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c -index 7779c47..70aaf8f 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c -@@ -611,6 +611,7 @@ void ipu6_isys_csi2_sof_event_by_stream(struct ipu6_isys_stream *stream) - .type = V4L2_EVENT_FRAME_SYNC, - }; - -+ ev.id = stream->vc; - ev.u.frame_sync.frame_sequence = atomic_fetch_inc(&stream->sequence); - v4l2_event_queue(vdev, &ev); - --- -2.43.0 - diff --git a/patches/0013-media-intel-ipu6-support-imx390-for-6.11.0-rc3.patch b/patches/0013-media-intel-ipu6-support-imx390-for-6.11.0-rc3.patch deleted file mode 100644 index 31e8f46..0000000 --- a/patches/0013-media-intel-ipu6-support-imx390-for-6.11.0-rc3.patch +++ /dev/null @@ -1,981 +0,0 @@ -From 8e9a0b679a7a1dfef27eb9d9db8da768ae09e1b8 Mon Sep 17 00:00:00 2001 -From: hepengpx -Date: Mon, 12 Jan 2026 07:30:57 -0700 -Subject: [PATCH 13/28] media: intel-ipu6: support imx390 for 6.11.0-rc3. - -Signed-off-by: hepengpx -Signed-off-by: zouxiaoh ---- - .../media/pci/intel/ipu6/ipu6-buttress.c | 42 +++ - .../media/pci/intel/ipu6/ipu6-buttress.h | 1 + - .../media/pci/intel/ipu6/ipu6-isys-mcd-phy.c | 77 +++++ - .../drivers/media/pci/intel/ipu6/ipu6-isys.c | 324 +++++++++++++++++- - .../drivers/media/pci/intel/ipu6/ipu6-isys.h | 85 +++++ - 6.12.0/drivers/media/pci/intel/ipu6/ipu6.c | 123 ++++++- - 6.12.0/drivers/media/pci/intel/ipu6/ipu6.h | 15 +- - 7 files changed, 656 insertions(+), 11 deletions(-) - -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c -index ffa6dff..0cf42e4 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c -@@ -21,6 +21,7 @@ - #include - #include - #include -+#include - - #include "ipu6.h" - #include "ipu6-bus.h" -@@ -776,6 +777,47 @@ int ipu6_buttress_start_tsc_sync(struct ipu6_device *isp) - } - EXPORT_SYMBOL_NS_GPL(ipu6_buttress_start_tsc_sync, INTEL_IPU6); - -+/* -+ * The dev_id was hard code in platform data, as i2c bus number -+ * may change dynamiclly, we need to update this bus id -+ * accordingly. -+ * -+ * @adapter_id: hardware i2c adapter id, this was fixed in platform data -+ * return: i2c bus id registered in system -+ */ -+int ipu6_get_i2c_bus_id(int adapter_id, char *adapter_bdf, int bdf_len) -+{ -+ struct i2c_adapter *adapter; -+ char name[32]; -+ int i = 0; -+ -+ if (adapter_bdf) { -+ while ((adapter = i2c_get_adapter(i)) != NULL) { -+ struct device *parent = adapter->dev.parent; -+ struct device *pp = parent->parent; -+ -+ if (pp && !strncmp(adapter_bdf, dev_name(pp), bdf_len)) -+ return i; -+ i++; -+ } -+ } -+ -+ i = 0; -+ snprintf(name, sizeof(name), "i2c_designware.%d", adapter_id); -+ while ((adapter = i2c_get_adapter(i)) != NULL) { -+ struct device *parent = adapter->dev.parent; -+ -+ if (parent && !strncmp(name, dev_name(parent), sizeof(name))) -+ return i; -+ i++; -+ } -+ -+ /* Not found, should never happen! */ -+ WARN_ON_ONCE(1); -+ return -1; -+} -+EXPORT_SYMBOL_NS_GPL(ipu6_get_i2c_bus_id, INTEL_IPU6); -+ - void ipu6_buttress_tsc_read(struct ipu6_device *isp, u64 *val) - { - u32 tsc_hi_1, tsc_hi_2, tsc_lo; -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h -index 482978c..18d6e0b 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h -@@ -83,4 +83,5 @@ void ipu6_buttress_exit(struct ipu6_device *isp); - void ipu6_buttress_csi_port_config(struct ipu6_device *isp, - u32 legacy, u32 combo); - void ipu6_buttress_restore(struct ipu6_device *isp); -+int ipu6_get_i2c_bus_id(int adapter_id, char *adapter_bdf, int bdf_len); - #endif /* IPU6_BUTTRESS_H */ -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c -index 71aa500..f18e6fa 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c -@@ -567,6 +567,27 @@ static int ipu6_isys_mcd_phy_ready(struct ipu6_isys *isys, u8 id) - return ret; - } - -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+static int ipu6_isys_mcd_phy_common_init(struct ipu6_isys *isys, struct ipu6_isys_csi2_config *cfg) -+{ -+ unsigned int phy_id; -+ void __iomem *phy_base; -+ struct ipu6_bus_device *adev = to_ipu6_bus_device(&isys->adev->auxdev.dev); -+ struct ipu6_device *isp = adev->isp; -+ void __iomem *isp_base = isp->base; -+ unsigned int i; -+ -+ phy_id = cfg->port / 4; -+ phy_base = isp_base + IPU6_ISYS_MCD_PHY_BASE(phy_id); -+ -+ for (i = 0 ; i < ARRAY_SIZE(common_init_regs); i++) { -+ writel(common_init_regs[i].val, -+ phy_base + common_init_regs[i].reg); -+ } -+ -+ return 0; -+} -+#else - static void ipu6_isys_mcd_phy_common_init(struct ipu6_isys *isys) - { - struct ipu6_bus_device *adev = isys->adev; -@@ -588,6 +609,7 @@ static void ipu6_isys_mcd_phy_common_init(struct ipu6_isys *isys) - phy_base + common_init_regs[i].reg); - } - } -+#endif - - static int ipu6_isys_driver_port_to_phy_port(struct ipu6_isys_csi2_config *cfg) - { -@@ -619,6 +641,55 @@ static int ipu6_isys_driver_port_to_phy_port(struct ipu6_isys_csi2_config *cfg) - return ret; - } - -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+static int ipu6_isys_mcd_phy_config(struct ipu6_isys *isys, struct ipu6_isys_csi2_config *cfg) -+{ -+ unsigned int phy_port, phy_id; -+ void __iomem *phy_base; -+ struct ipu6_bus_device *adev = to_ipu6_bus_device(&isys->adev->auxdev.dev); -+ struct ipu6_device *isp = adev->isp; -+ void __iomem *isp_base = isp->base; -+ const struct phy_reg **phy_config_regs; -+ struct ipu6_isys_subdev_pdata *spdata = isys->pdata->spdata; -+ struct ipu6_isys_subdev_info **subdevs, *sd_info; -+ int i; -+ -+ if (!spdata) { -+ dev_err(&isys->adev->auxdev.dev, "no subdevice info provided\n"); -+ return -EINVAL; -+ } -+ -+ phy_id = cfg->port / 4; -+ phy_base = isp_base + IPU6_ISYS_MCD_PHY_BASE(phy_id); -+ for (subdevs = spdata->subdevs; *subdevs; subdevs++) { -+ sd_info = *subdevs; -+ if (!sd_info->csi2) -+ continue; -+ -+ phy_port = ipu6_isys_driver_port_to_phy_port(sd_info->csi2); -+ if (phy_port < 0) { -+ dev_err(&isys->adev->auxdev.dev, "invalid port %d for lane %d", -+ cfg->port, cfg->nlanes); -+ return -ENXIO; -+ } -+ -+ if ((sd_info->csi2->port / 4) != phy_id) -+ continue; -+ -+ dev_dbg(&isys->adev->auxdev.dev, "port%d PHY%u lanes %u\n", -+ phy_port, phy_id, cfg->nlanes); -+ -+ phy_config_regs = config_regs[sd_info->csi2->nlanes/2]; -+ -+ for (i = 0; phy_config_regs[phy_port][i].reg; i++) { -+ writel(phy_config_regs[phy_port][i].val, -+ phy_base + phy_config_regs[phy_port][i].reg); -+ } -+ } -+ -+ return 0; -+} -+#else - static int ipu6_isys_mcd_phy_config(struct ipu6_isys *isys) - { - struct device *dev = &isys->adev->auxdev.dev; -@@ -658,6 +729,7 @@ static int ipu6_isys_mcd_phy_config(struct ipu6_isys *isys) - - return 0; - } -+#endif - - #define CSI_MCD_PHY_NUM 2 - static refcount_t phy_power_ref_count[CSI_MCD_PHY_NUM]; -@@ -698,9 +770,14 @@ int ipu6_isys_mcd_phy_set_power(struct ipu6_isys *isys, - return ret; - - ipu6_isys_mcd_phy_reset(isys, phy_id, 0); -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+ ipu6_isys_mcd_phy_common_init(isys, cfg); -+ ret = ipu6_isys_mcd_phy_config(isys, cfg); -+#else - ipu6_isys_mcd_phy_common_init(isys); - - ret = ipu6_isys_mcd_phy_config(isys); -+#endif - if (ret) - return ret; - -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -index af03e2f..05ced8b 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -@@ -28,9 +28,14 @@ - #include - #include - #include --#include --#include -+#if !IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) - #include -+#include -+#include -+#include -+#include -+#include -+#endif - - #include "ipu6-bus.h" - #include "ipu6-cpd.h" -@@ -100,6 +105,57 @@ enum ltr_did_type { - }; - - #define ISYS_PM_QOS_VALUE 300 -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+/* -+ * The param was passed from module to indicate if port -+ * could be optimized. -+ */ -+static bool csi2_port_optimized = true; -+module_param(csi2_port_optimized, bool, 0660); -+MODULE_PARM_DESC(csi2_port_optimized, "IPU CSI2 port optimization"); -+#endif -+ -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+struct isys_i2c_test { -+ u8 bus_nr; -+ u16 addr; -+ struct i2c_client *client; -+}; -+ -+static int isys_i2c_test(struct device *dev, void *priv) -+{ -+ struct i2c_client *client = i2c_verify_client(dev); -+ struct isys_i2c_test *test = priv; -+ -+ if (!client) -+ return 0; -+ -+ if (i2c_adapter_id(client->adapter) != test->bus_nr || -+ client->addr != test->addr) -+ return 0; -+ -+ test->client = client; -+ -+ return 0; -+} -+ -+static struct -+i2c_client *isys_find_i2c_subdev(struct i2c_adapter *adapter, -+ struct ipu6_isys_subdev_info *sd_info) -+{ -+ struct i2c_board_info *info = &sd_info->i2c.board_info; -+ struct isys_i2c_test test = { -+ .bus_nr = i2c_adapter_id(adapter), -+ .addr = info->addr, -+ }; -+ int rval; -+ -+ rval = i2c_for_each_dev(&test, isys_i2c_test); -+ if (rval || !test.client) -+ return NULL; -+ return test.client; -+} -+#endif - - static int isys_isr_one(struct ipu6_bus_device *adev); - -@@ -142,6 +198,153 @@ unregister_subdev: - return ret; - } - -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+static int isys_register_ext_subdev(struct ipu6_isys *isys, -+ struct ipu6_isys_subdev_info *sd_info) -+{ -+ struct i2c_adapter *adapter; -+ struct v4l2_subdev *sd; -+ struct i2c_client *client; -+ int rval; -+ int bus; -+ -+ bus = ipu6_get_i2c_bus_id(sd_info->i2c.i2c_adapter_id, -+ sd_info->i2c.i2c_adapter_bdf, -+ sizeof(sd_info->i2c.i2c_adapter_bdf)); -+ if (bus < 0) { -+ dev_err(&isys->adev->auxdev.dev, -+ "getting i2c bus id for adapter %d (bdf %s) failed", -+ sd_info->i2c.i2c_adapter_id, -+ sd_info->i2c.i2c_adapter_bdf); -+ return -ENOENT; -+ } -+ dev_info(&isys->adev->auxdev.dev, -+ "got i2c bus id %d for adapter %d (bdf %s)", bus, -+ sd_info->i2c.i2c_adapter_id, -+ sd_info->i2c.i2c_adapter_bdf); -+ adapter = i2c_get_adapter(bus); -+ if (!adapter) { -+ dev_warn(&isys->adev->auxdev.dev, "can't find adapter\n"); -+ return -ENOENT; -+ } -+ -+ dev_info(&isys->adev->auxdev.dev, -+ "creating new i2c subdev for %s (address %2.2x, bus %d)", -+ sd_info->i2c.board_info.type, sd_info->i2c.board_info.addr, -+ bus); -+ -+ if (sd_info->csi2) { -+ dev_info(&isys->adev->auxdev.dev, "sensor device on CSI port: %d\n", -+ sd_info->csi2->port); -+ if (sd_info->csi2->port >= isys->pdata->ipdata->csi2.nports || -+ !isys->csi2[sd_info->csi2->port].isys) { -+ dev_warn(&isys->adev->auxdev.dev, "invalid csi2 port %u\n", -+ sd_info->csi2->port); -+ rval = -EINVAL; -+ goto skip_put_adapter; -+ } -+ } else { -+ dev_info(&isys->adev->auxdev.dev, "non camera subdevice\n"); -+ } -+ -+ client = isys_find_i2c_subdev(adapter, sd_info); -+ if (client) { -+ dev_dbg(&isys->adev->auxdev.dev, "Device exists\n"); -+ rval = 0; -+ goto skip_put_adapter; -+ } -+ -+ sd = v4l2_i2c_new_subdev_board(&isys->v4l2_dev, adapter, -+ &sd_info->i2c.board_info, NULL); -+ if (!sd) { -+ dev_warn(&isys->adev->auxdev.dev, "can't create new i2c subdev\n"); -+ rval = -EINVAL; -+ goto skip_put_adapter; -+ } -+ -+ if (!sd_info->csi2) -+ return 0; -+ -+ return isys_complete_ext_device_registration(isys, sd, sd_info->csi2); -+ -+skip_put_adapter: -+ i2c_put_adapter(adapter); -+ -+ return rval; -+} -+ -+static int isys_unregister_ext_subdev(struct ipu6_isys *isys, -+ struct ipu6_isys_subdev_info *sd_info) -+{ -+ struct i2c_adapter *adapter; -+ struct i2c_client *client; -+ int bus; -+ -+ bus = ipu6_get_i2c_bus_id(sd_info->i2c.i2c_adapter_id, -+ sd_info->i2c.i2c_adapter_bdf, -+ sizeof(sd_info->i2c.i2c_adapter_bdf)); -+ if (bus < 0) { -+ dev_err(&isys->adev->auxdev.dev, -+ "getting i2c bus id for adapter %d (bdf %s) failed\n", -+ sd_info->i2c.i2c_adapter_id, -+ sd_info->i2c.i2c_adapter_bdf); -+ return -ENOENT; -+ } -+ dev_dbg(&isys->adev->auxdev.dev, -+ "got i2c bus id %d for adapter %d (bdf %s)\n", bus, -+ sd_info->i2c.i2c_adapter_id, -+ sd_info->i2c.i2c_adapter_bdf); -+ adapter = i2c_get_adapter(bus); -+ if (!adapter) { -+ dev_warn(&isys->adev->auxdev.dev, "can't find adapter\n"); -+ return -ENOENT; -+ } -+ -+ dev_dbg(&isys->adev->auxdev.dev, -+ "unregister i2c subdev for %s (address %2.2x, bus %d)\n", -+ sd_info->i2c.board_info.type, sd_info->i2c.board_info.addr, -+ bus); -+ -+ client = isys_find_i2c_subdev(adapter, sd_info); -+ if (!client) { -+ dev_dbg(&isys->adev->auxdev.dev, "Device not exists\n"); -+ goto skip_put_adapter; -+ } -+ -+ i2c_unregister_device(client); -+ -+skip_put_adapter: -+ i2c_put_adapter(adapter); -+ -+ return 0; -+} -+ -+static void isys_register_ext_subdevs(struct ipu6_isys *isys) -+{ -+ struct ipu6_isys_subdev_pdata *spdata = isys->pdata->spdata; -+ struct ipu6_isys_subdev_info **sd_info; -+ -+ if (!spdata) { -+ dev_info(&isys->adev->auxdev.dev, "no subdevice info provided\n"); -+ return; -+ } -+ for (sd_info = spdata->subdevs; *sd_info; sd_info++) -+ isys_register_ext_subdev(isys, *sd_info); -+} -+ -+static void isys_unregister_ext_subdevs(struct ipu6_isys *isys) -+{ -+ struct ipu6_isys_subdev_pdata *spdata = isys->pdata->spdata; -+ struct ipu6_isys_subdev_info **sd_info; -+ -+ if (!spdata) -+ return; -+ -+ for (sd_info = spdata->subdevs; *sd_info; sd_info++) -+ isys_unregister_ext_subdev(isys, *sd_info); -+} -+#endif -+ - static void isys_stream_init(struct ipu6_isys *isys) - { - u32 i; -@@ -173,10 +376,43 @@ static int isys_csi2_register_subdevices(struct ipu6_isys *isys) - { - const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = - &isys->pdata->ipdata->csi2; -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+ struct ipu6_isys_subdev_pdata *spdata = isys->pdata->spdata; -+ struct ipu6_isys_subdev_info **sd_info; -+ DECLARE_BITMAP(csi2_enable, 32); -+#endif - unsigned int i; - int ret; - -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+ /* -+ * Here is somewhat a workaround, let each platform decide -+ * if csi2 port can be optimized, which means only registered -+ * port from pdata would be enabled. -+ */ -+ if (csi2_port_optimized && spdata) { -+ bitmap_zero(csi2_enable, 32); -+ for (sd_info = spdata->subdevs; *sd_info; sd_info++) { -+ if ((*sd_info)->csi2) { -+ i = (*sd_info)->csi2->port; -+ if (i >= csi2_pdata->nports) { -+ dev_warn(&isys->adev->auxdev.dev, -+ "invalid csi2 port %u\n", i); -+ continue; -+ } -+ bitmap_set(csi2_enable, i, 1); -+ } -+ } -+ } else { -+ bitmap_fill(csi2_enable, 32); -+ } -+#endif -+ - for (i = 0; i < csi2_pdata->nports; i++) { -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+ if (!test_bit(i, csi2_enable)) -+ continue; -+#endif - ret = ipu6_isys_csi2_init(&isys->csi2[i], isys, - isys->pdata->base + - CSI_REG_PORT_BASE(i), i); -@@ -200,10 +436,43 @@ static int isys_csi2_create_media_links(struct ipu6_isys *isys) - const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = - &isys->pdata->ipdata->csi2; - struct device *dev = &isys->adev->auxdev.dev; -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+ struct ipu6_isys_subdev_pdata *spdata = isys->pdata->spdata; -+ struct ipu6_isys_subdev_info **sd_info; -+ DECLARE_BITMAP(csi2_enable, 32); -+#endif - unsigned int i, j; - int ret; - -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+ /* -+ * Here is somewhat a workaround, let each platform decide -+ * if csi2 port can be optimized, which means only registered -+ * port from pdata would be enabled. -+ */ -+ if (csi2_port_optimized && spdata) { -+ bitmap_zero(csi2_enable, 32); -+ for (sd_info = spdata->subdevs; *sd_info; sd_info++) { -+ if ((*sd_info)->csi2) { -+ i = (*sd_info)->csi2->port; -+ if (i >= csi2_pdata->nports) { -+ dev_warn(&isys->adev->auxdev.dev, -+ "invalid csi2 port %u\n", i); -+ continue; -+ } -+ bitmap_set(csi2_enable, i, 1); -+ } -+ } -+ } else { -+ bitmap_fill(csi2_enable, 32); -+ } -+#endif -+ - for (i = 0; i < csi2_pdata->nports; i++) { -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+ if (!test_bit(i, csi2_enable)) -+ continue; -+#endif - struct media_entity *sd = &isys->csi2[i].asd.sd.entity; - - for (j = 0; j < NR_OF_CSI2_SRC_PADS; j++) { -@@ -238,10 +507,43 @@ static int isys_register_video_devices(struct ipu6_isys *isys) - { - const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = - &isys->pdata->ipdata->csi2; -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+ struct ipu6_isys_subdev_pdata *spdata = isys->pdata->spdata; -+ struct ipu6_isys_subdev_info **sd_info; -+ DECLARE_BITMAP(csi2_enable, 32); -+#endif - unsigned int i, j; - int ret; - -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+ /* -+ * Here is somewhat a workaround, let each platform decide -+ * if csi2 port can be optimized, which means only registered -+ * port from pdata would be enabled. -+ */ -+ if (csi2_port_optimized && spdata) { -+ bitmap_zero(csi2_enable, 32); -+ for (sd_info = spdata->subdevs; *sd_info; sd_info++) { -+ if ((*sd_info)->csi2) { -+ i = (*sd_info)->csi2->port; -+ if (i >= csi2_pdata->nports) { -+ dev_warn(&isys->adev->auxdev.dev, -+ "invalid csi2 port %u\n", i); -+ continue; -+ } -+ bitmap_set(csi2_enable, i, 1); -+ } -+ } -+ } else { -+ bitmap_fill(csi2_enable, 32); -+ } -+#endif -+ - for (i = 0; i < csi2_pdata->nports; i++) { -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+ if (!test_bit(i, csi2_enable)) -+ continue; -+#endif - for (j = 0; j < NR_OF_CSI2_SRC_PADS; j++) { - struct ipu6_isys_video *av = &isys->csi2[i].av[j]; - -@@ -672,6 +974,7 @@ static void isys_iwake_watermark_cleanup(struct ipu6_isys *isys) - mutex_destroy(&iwake_watermark->mutex); - } - -+#if !IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) - /* The .bound() notifier callback when a match is found */ - static int isys_notifier_bound(struct v4l2_async_notifier *notifier, - struct v4l2_subdev *sd, -@@ -783,6 +1086,7 @@ static void isys_notifier_cleanup(struct ipu6_isys *isys) - v4l2_async_nf_unregister(&isys->notifier); - v4l2_async_nf_cleanup(&isys->notifier); - } -+#endif - - static int isys_register_devices(struct ipu6_isys *isys) - { -@@ -820,12 +1124,23 @@ static int isys_register_devices(struct ipu6_isys *isys) - if (ret) - goto out_isys_unregister_subdevices; - -+#if !IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) - ret = isys_notifier_init(isys); - if (ret) - goto out_isys_unregister_subdevices; -+#else -+ isys_register_ext_subdevs(isys); -+#endif -+ -+ ret = v4l2_device_register_subdev_nodes(&isys->v4l2_dev); -+ if (ret) -+ goto out_isys_unregister_ext_subdevs; - - return 0; - -+out_isys_unregister_ext_subdevs: -+ isys_unregister_ext_subdevs(isys); -+ - out_isys_unregister_subdevices: - isys_csi2_unregister_subdevices(isys); - -@@ -848,6 +1163,9 @@ static void isys_unregister_devices(struct ipu6_isys *isys) - { - isys_unregister_video_devices(isys); - isys_csi2_unregister_subdevices(isys); -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+ isys_unregister_ext_subdevs(isys); -+#endif - v4l2_device_unregister(&isys->v4l2_dev); - media_device_unregister(&isys->media_dev); - media_device_cleanup(&isys->media_dev); -@@ -1218,7 +1536,9 @@ static void isys_remove(struct auxiliary_device *auxdev) - free_fw_msg_bufs(isys); - - isys_unregister_devices(isys); -+#if !IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) - isys_notifier_cleanup(isys); -+#endif - - cpu_latency_qos_remove_request(&isys->pm_qos); - -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h -index 982eb5d..a956045 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h -@@ -10,6 +10,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -62,6 +63,15 @@ struct ipu6_bus_device; - #define IPU6EP_MTL_LTR_VALUE 1023 - #define IPU6EP_MTL_MIN_MEMOPEN_TH 0xc - -+#if (IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ -+ && IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING)) \ -+ || IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) -+#define IPU6_SPDATA_NAME_LEN 20 -+#define IPU6_SPDATA_BDF_LEN 32 -+#define IPU6_SPDATA_GPIO_NUM 4 -+#define IPU6_SPDATA_IRQ_PIN_NAME_LEN 16 -+#endif -+ - struct ltr_did { - union { - u32 value; -@@ -165,7 +175,9 @@ struct ipu6_isys { - spinlock_t listlock; /* Protect framebuflist */ - struct list_head framebuflist; - struct list_head framebuflist_fw; -+#if !IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) - struct v4l2_async_notifier notifier; -+#endif - struct isys_iwake_watermark iwake_watermark; - #ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET - struct mutex reset_mutex; -@@ -184,6 +196,79 @@ struct isys_fw_msgs { - dma_addr_t dma_addr; - }; - -+struct ipu6_isys_subdev_i2c_info { -+ struct i2c_board_info board_info; -+ int i2c_adapter_id; -+ char i2c_adapter_bdf[32]; -+}; -+ -+#if (IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ -+ && IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING)) \ -+ || IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) -+#define IPU6_SPDATA_NAME_LEN 20 -+#define IPU6_SPDATA_BDF_LEN 32 -+#define IPU6_SPDATA_GPIO_NUM 4 -+#define IPU6_SPDATA_IRQ_PIN_NAME_LEN 16 -+#endif -+ -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ -+ && IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) -+/** -+ * struct ipu6_spdata_rep - override subdev platform data -+ * -+ * @name: i2c_board_info.type -+ * @i2c_adapter_bdf_o: old i2c adapter bdf -+ * @slave_addr_o: old i2c slave address -+ * @i2c_adapter_bdf_n: new i2c adapter bdf -+ * @slave_addr_n: new i2c slave address -+ * -+ * identify a subdev with @name, @i2c_adapter_bdf_o and @slave_addr_o and -+ * configure it to use the new @i2c_adapter_bdf_n and @slave_addr_n -+ */ -+struct ipu6_spdata_rep { -+ /* i2c old information */ -+ char name[IPU6_SPDATA_NAME_LEN]; -+ unsigned int port_o; -+ char i2c_adapter_bdf_o[IPU6_SPDATA_BDF_LEN]; -+ uint32_t slave_addr_o; -+ -+ /* i2c new information */ -+ unsigned int port_n; -+ char i2c_adapter_bdf_n[IPU6_SPDATA_BDF_LEN]; -+ uint32_t slave_addr_n; -+ -+ /* sensor_platform */ -+ unsigned int lanes; -+ int gpios[IPU6_SPDATA_GPIO_NUM]; -+ int irq_pin; -+ unsigned int irq_pin_flags; -+ char irq_pin_name[IPU6_SPDATA_IRQ_PIN_NAME_LEN]; -+ char suffix; -+}; -+#endif -+ -+struct ipu6_isys_subdev_info { -+ struct ipu6_isys_csi2_config *csi2; -+ struct ipu6_isys_subdev_i2c_info i2c; -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ -+ && IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) -+ void (*fixup_spdata)(const void *spdata_rep, void *spdata); -+#endif -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) -+ char *acpi_hid; -+#endif -+}; -+ -+struct ipu6_isys_clk_mapping { -+ struct clk_lookup clkdev_data; -+ char *platform_clock_name; -+}; -+ -+struct ipu6_isys_subdev_pdata { -+ struct ipu6_isys_subdev_info **subdevs; -+ struct ipu6_isys_clk_mapping *clk_map; -+}; -+ - struct isys_fw_msgs *ipu6_get_fw_msg_buf(struct ipu6_isys_stream *stream); - void ipu6_put_fw_msg_buf(struct ipu6_isys *isys, u64 data); - void ipu6_cleanup_fw_msg_bufs(struct ipu6_isys *isys); -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c -index e8ce0c9..6019705 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c -@@ -34,6 +34,19 @@ - #include "ipu6-platform-regs.h" - #include "ipu6-trace.h" - -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) -+#include -+#endif -+ -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) -+static int isys_init_acpi_add_device(struct device *dev, void *priv, -+ struct ipu6_isys_csi2_config *csi2, -+ bool reprobe) -+{ -+ return 0; -+} -+#endif -+ - static unsigned int isys_freq_override; - module_param(isys_freq_override, uint, 0660); - MODULE_PARM_DESC(isys_freq_override, "Override ISYS freq(mhz)"); -@@ -372,18 +385,25 @@ static void ipu6_internal_pdata_init(struct ipu6_device *isp) - static struct ipu6_bus_device * - ipu6_isys_init(struct pci_dev *pdev, struct device *parent, - struct ipu6_buttress_ctrl *ctrl, void __iomem *base, -- const struct ipu6_isys_internal_pdata *ipdata) -+ const struct ipu6_isys_internal_pdata *ipdata, -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+ struct ipu6_isys_subdev_pdata *spdata -+#endif -+ ) - { - struct device *dev = &pdev->dev; - struct ipu6_bus_device *isys_adev; - struct ipu6_isys_pdata *pdata; -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) -+ struct ipu6_isys_subdev_pdata *acpi_pdata; -+#endif - int ret; - -- ret = ipu_bridge_init(dev, ipu_bridge_parse_ssdb); -- if (ret) { -- dev_err_probe(dev, ret, "IPU6 bridge init failed\n"); -- return ERR_PTR(ret); -- } -+ // ret = ipu_bridge_init(dev, ipu_bridge_parse_ssdb); -+ // if (ret) { -+ // dev_err_probe(dev, ret, "IPU6 bridge init failed\n"); -+ // return ERR_PTR(ret); -+ // } - - pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); - if (!pdata) -@@ -391,7 +411,9 @@ ipu6_isys_init(struct pci_dev *pdev, struct device *parent, - - pdata->base = base; - pdata->ipdata = ipdata; -- -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+ pdata->spdata = spdata; -+#endif - /* Override the isys freq */ - if (isys_freq_override >= BUTTRESS_MIN_FORCE_IS_FREQ && - isys_freq_override <= BUTTRESS_MAX_FORCE_IS_FREQ) { -@@ -408,6 +430,19 @@ ipu6_isys_init(struct pci_dev *pdev, struct device *parent, - "ipu6_bus_initialize_device isys failed\n"); - } - -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) -+ if (!spdata) { -+ dev_dbg(&pdev->dev, "No subdevice info provided"); -+ ipu_get_acpi_devices(isys_adev, &isys_adev->auxdev.dev, &acpi_pdata, NULL, -+ isys_init_acpi_add_device); -+ pdata->spdata = acpi_pdata; -+ } else { -+ dev_dbg(&pdev->dev, "Subdevice info found"); -+ ipu_get_acpi_devices(isys_adev, &isys_adev->auxdev.dev, &acpi_pdata, &spdata, -+ isys_init_acpi_add_device); -+ } -+#endif -+ - isys_adev->mmu = ipu6_mmu_init(dev, base, ISYS_MMID, - &ipdata->hw_variant); - if (IS_ERR(isys_adev->mmu)) { -@@ -538,6 +573,59 @@ static void ipu6_configure_vc_mechanism(struct ipu6_device *isp) - writel(val, isp->base + BUTTRESS_REG_BTRS_CTRL); - } - -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) -+static inline int match_spdata(struct ipu6_isys_subdev_info *sd, -+ const struct ipu6_spdata_rep *rep) -+{ -+ if (strcmp(sd->i2c.board_info.type, rep->name)) -+ return 0; -+ -+ if (strcmp(sd->i2c.i2c_adapter_bdf, rep->i2c_adapter_bdf_o)) -+ return 0; -+ -+ if (sd->i2c.board_info.addr != rep->slave_addr_o) -+ return 0; -+ -+ if (sd->csi2->port != rep->port_o) -+ return 0; -+ -+ return 1; -+} -+ -+static void fixup_spdata(const void *spdata_rep, -+ struct ipu6_isys_subdev_pdata *spdata) -+{ -+ const struct ipu6_spdata_rep *rep = spdata_rep; -+ struct ipu6_isys_subdev_info **subdevs, *sd_info; -+ -+ if (!spdata) -+ return; -+ -+ for (; rep->name[0]; rep++) { -+ for (subdevs = spdata->subdevs; *subdevs; subdevs++) { -+ sd_info = *subdevs; -+ -+ if (!sd_info->csi2) -+ continue; -+ -+ if (match_spdata(sd_info, rep)) { -+ strcpy(sd_info->i2c.i2c_adapter_bdf, -+ rep->i2c_adapter_bdf_n); -+ sd_info->i2c.board_info.addr = -+ rep->slave_addr_n; -+ sd_info->csi2->port = rep->port_n; -+ -+ if (sd_info->fixup_spdata) -+ sd_info->fixup_spdata(rep, -+ sd_info->i2c.board_info.platform_data); -+ } -+ } -+ } -+} -+#endif -+#endif -+ - static int ipu6_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) - { - struct ipu6_buttress_ctrl *isys_ctrl = NULL, *psys_ctrl = NULL; -@@ -641,6 +729,16 @@ static int ipu6_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) - goto out_ipu6_bus_del_devices; - } - -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) -+ rval = request_firmware(&isp->spdata_fw, IPU6_SPDATA_NAME, &pdev->dev); -+ if (rval) -+ dev_warn(&isp->pdev->dev, "no spdata replace, using default\n"); -+ else -+ fixup_spdata(isp->spdata_fw->data, pdev->dev.platform_data); -+#endif -+#endif -+ - isys_ctrl = devm_kmemdup(dev, &isys_buttress_ctrl, - sizeof(isys_buttress_ctrl), GFP_KERNEL); - if (!isys_ctrl) { -@@ -649,7 +747,11 @@ static int ipu6_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) - } - - isp->isys = ipu6_isys_init(pdev, dev, isys_ctrl, isys_base, -- &isys_ipdata); -+ &isys_ipdata, -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+ pdev->dev.platform_data -+#endif -+ ); - if (IS_ERR(isp->isys)) { - ret = PTR_ERR(isp->isys); - goto out_ipu6_bus_del_devices; -@@ -748,6 +850,11 @@ out_ipu6_bus_del_devices: - ipu6_mmu_cleanup(isp->isys->mmu); - ipu6_bus_del_devices(pdev); - release_firmware(isp->cpd_fw); -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) -+ release_firmware(isp->spdata_fw); -+#endif -+#endif - buttress_exit: - ipu6_buttress_exit(isp); - -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.h -index d91a78e..71d8cdf 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.h -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.h -@@ -23,6 +23,12 @@ struct ipu6_bus_device; - #define IPU6EPMTL_FIRMWARE_NAME "intel/ipu/ipu6epmtl_fw.bin" - #define IPU6EPADLN_FIRMWARE_NAME "intel/ipu/ipu6epadln_fw.bin" - -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ -+ && IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) -+/* array of struct ipu6_spdata_rep terminated by NULL */ -+#define IPU6_SPDATA_NAME "ipu6v1_spdata.bin" -+#endif -+ - enum ipu6_version { - IPU6_VER_INVALID = 0, - IPU6_VER_6 = 1, -@@ -80,7 +86,11 @@ struct ipu6_device { - const struct firmware *cpd_fw; - const char *cpd_fw_name; - u32 cpd_metadata_cmpnt_size; -- -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) -+ const struct firmware *spdata_fw; -+#endif -+#endif - void __iomem *base; - #ifdef CONFIG_DEBUG_FS - struct dentry *ipu_dir; -@@ -328,6 +338,9 @@ struct ipu6_isys_internal_pdata { - struct ipu6_isys_pdata { - void __iomem *base; - const struct ipu6_isys_internal_pdata *ipdata; -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+ struct ipu6_isys_subdev_pdata *spdata; -+#endif - }; - - struct ipu6_psys_internal_pdata { --- -2.43.0 - diff --git a/patches/0014-media-intel-ipu6-fix-coverity-issue.patch b/patches/0014-media-intel-ipu6-fix-coverity-issue.patch deleted file mode 100644 index 10c5e2c..0000000 --- a/patches/0014-media-intel-ipu6-fix-coverity-issue.patch +++ /dev/null @@ -1,31 +0,0 @@ -From d17dcc21b65a37c86913dfed523184be691e4362 Mon Sep 17 00:00:00 2001 -From: hepengpx -Date: Mon, 12 Jan 2026 07:33:57 -0700 -Subject: [PATCH 14/28] media: intel-ipu6: fix coverity issue - -check return value of v4l2_ctrl_handler_init; -change the type of phy_port to int; - -Signed-off-by: hepengpx -Signed-off-by: zouxiaoh ---- - 6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c -index f18e6fa..ff29130 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c -@@ -644,7 +644,8 @@ static int ipu6_isys_driver_port_to_phy_port(struct ipu6_isys_csi2_config *cfg) - #if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) - static int ipu6_isys_mcd_phy_config(struct ipu6_isys *isys, struct ipu6_isys_csi2_config *cfg) - { -- unsigned int phy_port, phy_id; -+ unsigned int phy_id; -+ int phy_port; - void __iomem *phy_base; - struct ipu6_bus_device *adev = to_ipu6_bus_device(&isys->adev->auxdev.dev); - struct ipu6_device *isp = adev->isp; --- -2.43.0 - diff --git a/patches/0015-media-intel-ipu6-move-ipu-acpi-module-to-linux-drive.patch b/patches/0015-media-intel-ipu6-move-ipu-acpi-module-to-linux-drive.patch deleted file mode 100644 index 025ac56..0000000 --- a/patches/0015-media-intel-ipu6-move-ipu-acpi-module-to-linux-drive.patch +++ /dev/null @@ -1,253 +0,0 @@ -From 1d70488bb65f48cf6196bef78d8f546b28ee1738 Mon Sep 17 00:00:00 2001 -From: Dongcheng Yan -Date: Mon, 12 Jan 2026 07:37:10 -0700 -Subject: [PATCH 15/28] media: intel-ipu6: move ipu-acpi module to - linux-drivers - -The ipu-acpi module is responsible for obtaining ACPI device platform -data (pdata) and is universally applicable to both IPU6/7 and -upstream/non-upstream. To better align with its purpose and usage, the -ipu-acpi module is being moved out of the linux_kernel and into the -linux-drivers[android/platform/main]. - -Specific changes include: - -- Struct ipu_isys_subdev_pdata and its internal structs - ipu_isys_subdev_info/ipu_isys_clk_mapping: - solely related to ACPI pdata, not tied to any specific IPU. -- IPU_SPDATA_NAME_LEN and related macros: - used exclusively for ACPI device definition and configuration. - -Signed-off-by: Dongcheng Yan -Signed-off-by: zouxiaoh ---- - .../media/pci/intel/ipu6/ipu6-isys-mcd-phy.c | 4 +-- - .../drivers/media/pci/intel/ipu6/ipu6-isys.c | 26 +++++++++---------- - .../drivers/media/pci/intel/ipu6/ipu6-isys.h | 26 +++++++++---------- - 6.12.0/drivers/media/pci/intel/ipu6/ipu6.c | 10 +++---- - 6.12.0/drivers/media/pci/intel/ipu6/ipu6.h | 2 +- - 5 files changed, 34 insertions(+), 34 deletions(-) - -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c -index ff29130..7ab02b7 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c -@@ -651,8 +651,8 @@ static int ipu6_isys_mcd_phy_config(struct ipu6_isys *isys, struct ipu6_isys_csi - struct ipu6_device *isp = adev->isp; - void __iomem *isp_base = isp->base; - const struct phy_reg **phy_config_regs; -- struct ipu6_isys_subdev_pdata *spdata = isys->pdata->spdata; -- struct ipu6_isys_subdev_info **subdevs, *sd_info; -+ struct ipu_isys_subdev_pdata *spdata = isys->pdata->spdata; -+ struct ipu_isys_subdev_info **subdevs, *sd_info; - int i; - - if (!spdata) { -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -index 05ced8b..bc40325 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -@@ -141,7 +141,7 @@ static int isys_i2c_test(struct device *dev, void *priv) - - static struct - i2c_client *isys_find_i2c_subdev(struct i2c_adapter *adapter, -- struct ipu6_isys_subdev_info *sd_info) -+ struct ipu_isys_subdev_info *sd_info) - { - struct i2c_board_info *info = &sd_info->i2c.board_info; - struct isys_i2c_test test = { -@@ -200,7 +200,7 @@ unregister_subdev: - - #if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) - static int isys_register_ext_subdev(struct ipu6_isys *isys, -- struct ipu6_isys_subdev_info *sd_info) -+ struct ipu_isys_subdev_info *sd_info) - { - struct i2c_adapter *adapter; - struct v4l2_subdev *sd; -@@ -274,7 +274,7 @@ skip_put_adapter: - } - - static int isys_unregister_ext_subdev(struct ipu6_isys *isys, -- struct ipu6_isys_subdev_info *sd_info) -+ struct ipu_isys_subdev_info *sd_info) - { - struct i2c_adapter *adapter; - struct i2c_client *client; -@@ -321,8 +321,8 @@ skip_put_adapter: - - static void isys_register_ext_subdevs(struct ipu6_isys *isys) - { -- struct ipu6_isys_subdev_pdata *spdata = isys->pdata->spdata; -- struct ipu6_isys_subdev_info **sd_info; -+ struct ipu_isys_subdev_pdata *spdata = isys->pdata->spdata; -+ struct ipu_isys_subdev_info **sd_info; - - if (!spdata) { - dev_info(&isys->adev->auxdev.dev, "no subdevice info provided\n"); -@@ -334,8 +334,8 @@ static void isys_register_ext_subdevs(struct ipu6_isys *isys) - - static void isys_unregister_ext_subdevs(struct ipu6_isys *isys) - { -- struct ipu6_isys_subdev_pdata *spdata = isys->pdata->spdata; -- struct ipu6_isys_subdev_info **sd_info; -+ struct ipu_isys_subdev_pdata *spdata = isys->pdata->spdata; -+ struct ipu_isys_subdev_info **sd_info; - - if (!spdata) - return; -@@ -377,8 +377,8 @@ static int isys_csi2_register_subdevices(struct ipu6_isys *isys) - const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = - &isys->pdata->ipdata->csi2; - #if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -- struct ipu6_isys_subdev_pdata *spdata = isys->pdata->spdata; -- struct ipu6_isys_subdev_info **sd_info; -+ struct ipu_isys_subdev_pdata *spdata = isys->pdata->spdata; -+ struct ipu_isys_subdev_info **sd_info; - DECLARE_BITMAP(csi2_enable, 32); - #endif - unsigned int i; -@@ -437,8 +437,8 @@ static int isys_csi2_create_media_links(struct ipu6_isys *isys) - &isys->pdata->ipdata->csi2; - struct device *dev = &isys->adev->auxdev.dev; - #if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -- struct ipu6_isys_subdev_pdata *spdata = isys->pdata->spdata; -- struct ipu6_isys_subdev_info **sd_info; -+ struct ipu_isys_subdev_pdata *spdata = isys->pdata->spdata; -+ struct ipu_isys_subdev_info **sd_info; - DECLARE_BITMAP(csi2_enable, 32); - #endif - unsigned int i, j; -@@ -508,8 +508,8 @@ static int isys_register_video_devices(struct ipu6_isys *isys) - const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = - &isys->pdata->ipdata->csi2; - #if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -- struct ipu6_isys_subdev_pdata *spdata = isys->pdata->spdata; -- struct ipu6_isys_subdev_info **sd_info; -+ struct ipu_isys_subdev_pdata *spdata = isys->pdata->spdata; -+ struct ipu_isys_subdev_info **sd_info; - DECLARE_BITMAP(csi2_enable, 32); - #endif - unsigned int i, j; -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h -index a956045..a897650 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h -@@ -66,10 +66,10 @@ struct ipu6_bus_device; - #if (IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ - && IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING)) \ - || IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) --#define IPU6_SPDATA_NAME_LEN 20 --#define IPU6_SPDATA_BDF_LEN 32 --#define IPU6_SPDATA_GPIO_NUM 4 --#define IPU6_SPDATA_IRQ_PIN_NAME_LEN 16 -+#define IPU_SPDATA_NAME_LEN 20 -+#define IPU_SPDATA_BDF_LEN 32 -+#define IPU_SPDATA_GPIO_NUM 4 -+#define IPU_SPDATA_IRQ_PIN_NAME_LEN 16 - #endif - - struct ltr_did { -@@ -205,10 +205,10 @@ struct ipu6_isys_subdev_i2c_info { - #if (IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ - && IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING)) \ - || IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) --#define IPU6_SPDATA_NAME_LEN 20 --#define IPU6_SPDATA_BDF_LEN 32 --#define IPU6_SPDATA_GPIO_NUM 4 --#define IPU6_SPDATA_IRQ_PIN_NAME_LEN 16 -+#define IPU_SPDATA_NAME_LEN 20 -+#define IPU_SPDATA_BDF_LEN 32 -+#define IPU_SPDATA_GPIO_NUM 4 -+#define IPU_SPDATA_IRQ_PIN_NAME_LEN 16 - #endif - - #if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ -@@ -247,7 +247,7 @@ struct ipu6_spdata_rep { - }; - #endif - --struct ipu6_isys_subdev_info { -+struct ipu_isys_subdev_info { - struct ipu6_isys_csi2_config *csi2; - struct ipu6_isys_subdev_i2c_info i2c; - #if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ -@@ -259,14 +259,14 @@ struct ipu6_isys_subdev_info { - #endif - }; - --struct ipu6_isys_clk_mapping { -+struct ipu_isys_clk_mapping { - struct clk_lookup clkdev_data; - char *platform_clock_name; - }; - --struct ipu6_isys_subdev_pdata { -- struct ipu6_isys_subdev_info **subdevs; -- struct ipu6_isys_clk_mapping *clk_map; -+struct ipu_isys_subdev_pdata { -+ struct ipu_isys_subdev_info **subdevs; -+ struct ipu_isys_clk_mapping *clk_map; - }; - - struct isys_fw_msgs *ipu6_get_fw_msg_buf(struct ipu6_isys_stream *stream); -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c -index 6019705..5456801 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c -@@ -387,7 +387,7 @@ ipu6_isys_init(struct pci_dev *pdev, struct device *parent, - struct ipu6_buttress_ctrl *ctrl, void __iomem *base, - const struct ipu6_isys_internal_pdata *ipdata, - #if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -- struct ipu6_isys_subdev_pdata *spdata -+ struct ipu_isys_subdev_pdata *spdata - #endif - ) - { -@@ -395,7 +395,7 @@ ipu6_isys_init(struct pci_dev *pdev, struct device *parent, - struct ipu6_bus_device *isys_adev; - struct ipu6_isys_pdata *pdata; - #if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) -- struct ipu6_isys_subdev_pdata *acpi_pdata; -+ struct ipu_isys_subdev_pdata *acpi_pdata; - #endif - int ret; - -@@ -575,7 +575,7 @@ static void ipu6_configure_vc_mechanism(struct ipu6_device *isp) - - #if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) - #if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) --static inline int match_spdata(struct ipu6_isys_subdev_info *sd, -+static inline int match_spdata(struct ipu_isys_subdev_info *sd, - const struct ipu6_spdata_rep *rep) - { - if (strcmp(sd->i2c.board_info.type, rep->name)) -@@ -594,10 +594,10 @@ static inline int match_spdata(struct ipu6_isys_subdev_info *sd, - } - - static void fixup_spdata(const void *spdata_rep, -- struct ipu6_isys_subdev_pdata *spdata) -+ struct ipu_isys_subdev_pdata *spdata) - { - const struct ipu6_spdata_rep *rep = spdata_rep; -- struct ipu6_isys_subdev_info **subdevs, *sd_info; -+ struct ipu_isys_subdev_info **subdevs, *sd_info; - - if (!spdata) - return; -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.h -index 71d8cdf..786494d 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.h -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.h -@@ -339,7 +339,7 @@ struct ipu6_isys_pdata { - void __iomem *base; - const struct ipu6_isys_internal_pdata *ipdata; - #if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -- struct ipu6_isys_subdev_pdata *spdata; -+ struct ipu_isys_subdev_pdata *spdata; - #endif - }; - --- -2.43.0 - diff --git a/patches/0016-media-pci-unregister-i2c-device-to-complete-ext_subd.patch b/patches/0016-media-pci-unregister-i2c-device-to-complete-ext_subd.patch deleted file mode 100644 index 391eff2..0000000 --- a/patches/0016-media-pci-unregister-i2c-device-to-complete-ext_subd.patch +++ /dev/null @@ -1,58 +0,0 @@ -From 03e22286960e89231418c7ad45676b991a3655af Mon Sep 17 00:00:00 2001 -From: Dongcheng Yan -Date: Mon, 12 Jan 2026 07:40:01 -0700 -Subject: [PATCH 16/28] media: pci: unregister i2c device to complete - ext_subdev_register - -Signed-off-by: Dongcheng Yan -Signed-off-by: zouxiaoh - -u# Please enter the commit message for your changes. Lines starting ---- - 6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c | 4 ++-- - 6.12.0/drivers/media/pci/intel/ipu6/ipu6.c | 7 ++++--- - 2 files changed, 6 insertions(+), 5 deletions(-) - -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -index bc40325..e61b5b4 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -@@ -250,8 +250,8 @@ static int isys_register_ext_subdev(struct ipu6_isys *isys, - client = isys_find_i2c_subdev(adapter, sd_info); - if (client) { - dev_dbg(&isys->adev->auxdev.dev, "Device exists\n"); -- rval = 0; -- goto skip_put_adapter; -+ i2c_unregister_device(client); -+ dev_dbg(&isys->adev->auxdev.dev, "Unregister device"); - } - - sd = v4l2_i2c_new_subdev_board(&isys->v4l2_dev, adapter, -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c -index 5456801..406ff21 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c -@@ -433,16 +433,17 @@ ipu6_isys_init(struct pci_dev *pdev, struct device *parent, - #if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - if (!spdata) { - dev_dbg(&pdev->dev, "No subdevice info provided"); -- ipu_get_acpi_devices(isys_adev, &isys_adev->auxdev.dev, &acpi_pdata, NULL, -+ ret = ipu_get_acpi_devices(isys_adev, &isys_adev->auxdev.dev, &acpi_pdata, NULL, - isys_init_acpi_add_device); - pdata->spdata = acpi_pdata; - } else { - dev_dbg(&pdev->dev, "Subdevice info found"); -- ipu_get_acpi_devices(isys_adev, &isys_adev->auxdev.dev, &acpi_pdata, &spdata, -+ ret = ipu_get_acpi_devices(isys_adev, &isys_adev->auxdev.dev, &acpi_pdata, &spdata, - isys_init_acpi_add_device); - } -+ if (ret) -+ return ERR_PTR(ret); - #endif -- - isys_adev->mmu = ipu6_mmu_init(dev, base, ISYS_MMID, - &ipdata->hw_variant); - if (IS_ERR(isys_adev->mmu)) { --- -2.43.0 - diff --git a/patches/0017-media-pci-add-missing-if-for-PDATA.patch b/patches/0017-media-pci-add-missing-if-for-PDATA.patch deleted file mode 100644 index ccfda66..0000000 --- a/patches/0017-media-pci-add-missing-if-for-PDATA.patch +++ /dev/null @@ -1,116 +0,0 @@ -From 5b761df0eab492439ec7045320ea2e28016073b1 Mon Sep 17 00:00:00 2001 -From: zouxiaoh -Date: Mon, 12 Jan 2026 07:46:33 -0700 -Subject: [PATCH 17/28] media: pci: add missing #if for PDATA - ---- - 6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c | 7 ++++--- - 6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h | 2 ++ - 6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c | 8 ++++---- - 6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h | 7 ++++--- - 4 files changed, 14 insertions(+), 10 deletions(-) - -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c -index 0cf42e4..0ba908c 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c -@@ -21,8 +21,9 @@ - #include - #include - #include -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) - #include -- -+#endif - #include "ipu6.h" - #include "ipu6-bus.h" - #include "ipu6-dma.h" -@@ -776,7 +777,7 @@ int ipu6_buttress_start_tsc_sync(struct ipu6_device *isp) - return -ETIMEDOUT; - } - EXPORT_SYMBOL_NS_GPL(ipu6_buttress_start_tsc_sync, INTEL_IPU6); -- -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) - /* - * The dev_id was hard code in platform data, as i2c bus number - * may change dynamiclly, we need to update this bus id -@@ -817,7 +818,7 @@ int ipu6_get_i2c_bus_id(int adapter_id, char *adapter_bdf, int bdf_len) - return -1; - } - EXPORT_SYMBOL_NS_GPL(ipu6_get_i2c_bus_id, INTEL_IPU6); -- -+#endif - void ipu6_buttress_tsc_read(struct ipu6_device *isp, u64 *val) - { - u32 tsc_hi_1, tsc_hi_2, tsc_lo; -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h -index 18d6e0b..99fa266 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h -@@ -83,5 +83,7 @@ void ipu6_buttress_exit(struct ipu6_device *isp); - void ipu6_buttress_csi_port_config(struct ipu6_device *isp, - u32 legacy, u32 combo); - void ipu6_buttress_restore(struct ipu6_device *isp); -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) - int ipu6_get_i2c_bus_id(int adapter_id, char *adapter_bdf, int bdf_len); -+#endif - #endif /* IPU6_BUTTRESS_H */ -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -index e61b5b4..a74a3a0 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -@@ -1131,16 +1131,16 @@ static int isys_register_devices(struct ipu6_isys *isys) - #else - isys_register_ext_subdevs(isys); - #endif -- -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) - ret = v4l2_device_register_subdev_nodes(&isys->v4l2_dev); - if (ret) - goto out_isys_unregister_ext_subdevs; -- -+#endif - return 0; -- -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) - out_isys_unregister_ext_subdevs: - isys_unregister_ext_subdevs(isys); -- -+#endif - out_isys_unregister_subdevices: - isys_csi2_unregister_subdevices(isys); - -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h -index a897650..80e1a0f 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h -@@ -10,8 +10,9 @@ - #include - #include - #include -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) - #include -- -+#endif - #include - #include - #include -@@ -195,13 +196,13 @@ struct isys_fw_msgs { - struct list_head head; - dma_addr_t dma_addr; - }; -- -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) - struct ipu6_isys_subdev_i2c_info { - struct i2c_board_info board_info; - int i2c_adapter_id; - char i2c_adapter_bdf[32]; - }; -- -+#endif - #if (IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ - && IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING)) \ - || IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) --- -2.43.0 - diff --git a/patches/0018-media-pci-refine-PDATA-related-config.patch b/patches/0018-media-pci-refine-PDATA-related-config.patch deleted file mode 100644 index 3de098d..0000000 --- a/patches/0018-media-pci-refine-PDATA-related-config.patch +++ /dev/null @@ -1,526 +0,0 @@ -From 99ab4097d616470ca9a5410d89345f9f931a4b30 Mon Sep 17 00:00:00 2001 -From: hepengpx -Date: Mon, 12 Jan 2026 07:51:32 -0700 -Subject: [PATCH 18/28] media: pci: refine PDATA related config - -remove dynamic pdata related code; -replace CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA -with CONFIG_INTEL_IPU6_ACPI; - -Signed-off-by: hepengpx -Signed-off-by: zouxiaoh ---- - .../media/pci/intel/ipu6/ipu6-buttress.c | 4 +- - .../media/pci/intel/ipu6/ipu6-buttress.h | 2 +- - .../media/pci/intel/ipu6/ipu6-isys-mcd-phy.c | 6 +- - .../drivers/media/pci/intel/ipu6/ipu6-isys.c | 38 +++++----- - .../drivers/media/pci/intel/ipu6/ipu6-isys.h | 60 ++------------- - 6.12.0/drivers/media/pci/intel/ipu6/ipu6.c | 74 +------------------ - 6.12.0/drivers/media/pci/intel/ipu6/ipu6.h | 13 +--- - 7 files changed, 34 insertions(+), 163 deletions(-) - -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c -index 0ba908c..32fa979 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c -@@ -21,7 +21,7 @@ - #include - #include - #include --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - #include - #endif - #include "ipu6.h" -@@ -777,7 +777,7 @@ int ipu6_buttress_start_tsc_sync(struct ipu6_device *isp) - return -ETIMEDOUT; - } - EXPORT_SYMBOL_NS_GPL(ipu6_buttress_start_tsc_sync, INTEL_IPU6); --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - /* - * The dev_id was hard code in platform data, as i2c bus number - * may change dynamiclly, we need to update this bus id -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h -index 99fa266..34c0da0 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h -@@ -83,7 +83,7 @@ void ipu6_buttress_exit(struct ipu6_device *isp); - void ipu6_buttress_csi_port_config(struct ipu6_device *isp, - u32 legacy, u32 combo); - void ipu6_buttress_restore(struct ipu6_device *isp); --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - int ipu6_get_i2c_bus_id(int adapter_id, char *adapter_bdf, int bdf_len); - #endif - #endif /* IPU6_BUTTRESS_H */ -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c -index 7ab02b7..1cf33b5 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c -@@ -567,7 +567,7 @@ static int ipu6_isys_mcd_phy_ready(struct ipu6_isys *isys, u8 id) - return ret; - } - --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - static int ipu6_isys_mcd_phy_common_init(struct ipu6_isys *isys, struct ipu6_isys_csi2_config *cfg) - { - unsigned int phy_id; -@@ -641,7 +641,7 @@ static int ipu6_isys_driver_port_to_phy_port(struct ipu6_isys_csi2_config *cfg) - return ret; - } - --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - static int ipu6_isys_mcd_phy_config(struct ipu6_isys *isys, struct ipu6_isys_csi2_config *cfg) - { - unsigned int phy_id; -@@ -771,7 +771,7 @@ int ipu6_isys_mcd_phy_set_power(struct ipu6_isys *isys, - return ret; - - ipu6_isys_mcd_phy_reset(isys, phy_id, 0); --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - ipu6_isys_mcd_phy_common_init(isys, cfg); - ret = ipu6_isys_mcd_phy_config(isys, cfg); - #else -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -index a74a3a0..eebcffc 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -@@ -28,7 +28,7 @@ - #include - #include - #include --#if !IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if !IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - #include - #include - #include -@@ -105,7 +105,7 @@ enum ltr_did_type { - }; - - #define ISYS_PM_QOS_VALUE 300 --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - /* - * The param was passed from module to indicate if port - * could be optimized. -@@ -113,9 +113,7 @@ enum ltr_did_type { - static bool csi2_port_optimized = true; - module_param(csi2_port_optimized, bool, 0660); - MODULE_PARM_DESC(csi2_port_optimized, "IPU CSI2 port optimization"); --#endif - --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) - struct isys_i2c_test { - u8 bus_nr; - u16 addr; -@@ -198,7 +196,7 @@ unregister_subdev: - return ret; - } - --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - static int isys_register_ext_subdev(struct ipu6_isys *isys, - struct ipu_isys_subdev_info *sd_info) - { -@@ -376,7 +374,7 @@ static int isys_csi2_register_subdevices(struct ipu6_isys *isys) - { - const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = - &isys->pdata->ipdata->csi2; --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - struct ipu_isys_subdev_pdata *spdata = isys->pdata->spdata; - struct ipu_isys_subdev_info **sd_info; - DECLARE_BITMAP(csi2_enable, 32); -@@ -384,7 +382,7 @@ static int isys_csi2_register_subdevices(struct ipu6_isys *isys) - unsigned int i; - int ret; - --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - /* - * Here is somewhat a workaround, let each platform decide - * if csi2 port can be optimized, which means only registered -@@ -409,7 +407,7 @@ static int isys_csi2_register_subdevices(struct ipu6_isys *isys) - #endif - - for (i = 0; i < csi2_pdata->nports; i++) { --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - if (!test_bit(i, csi2_enable)) - continue; - #endif -@@ -436,7 +434,7 @@ static int isys_csi2_create_media_links(struct ipu6_isys *isys) - const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = - &isys->pdata->ipdata->csi2; - struct device *dev = &isys->adev->auxdev.dev; --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - struct ipu_isys_subdev_pdata *spdata = isys->pdata->spdata; - struct ipu_isys_subdev_info **sd_info; - DECLARE_BITMAP(csi2_enable, 32); -@@ -444,7 +442,7 @@ static int isys_csi2_create_media_links(struct ipu6_isys *isys) - unsigned int i, j; - int ret; - --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - /* - * Here is somewhat a workaround, let each platform decide - * if csi2 port can be optimized, which means only registered -@@ -469,7 +467,7 @@ static int isys_csi2_create_media_links(struct ipu6_isys *isys) - #endif - - for (i = 0; i < csi2_pdata->nports; i++) { --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - if (!test_bit(i, csi2_enable)) - continue; - #endif -@@ -507,7 +505,7 @@ static int isys_register_video_devices(struct ipu6_isys *isys) - { - const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = - &isys->pdata->ipdata->csi2; --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - struct ipu_isys_subdev_pdata *spdata = isys->pdata->spdata; - struct ipu_isys_subdev_info **sd_info; - DECLARE_BITMAP(csi2_enable, 32); -@@ -515,7 +513,7 @@ static int isys_register_video_devices(struct ipu6_isys *isys) - unsigned int i, j; - int ret; - --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - /* - * Here is somewhat a workaround, let each platform decide - * if csi2 port can be optimized, which means only registered -@@ -540,7 +538,7 @@ static int isys_register_video_devices(struct ipu6_isys *isys) - #endif - - for (i = 0; i < csi2_pdata->nports; i++) { --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - if (!test_bit(i, csi2_enable)) - continue; - #endif -@@ -974,7 +972,7 @@ static void isys_iwake_watermark_cleanup(struct ipu6_isys *isys) - mutex_destroy(&iwake_watermark->mutex); - } - --#if !IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if !IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - /* The .bound() notifier callback when a match is found */ - static int isys_notifier_bound(struct v4l2_async_notifier *notifier, - struct v4l2_subdev *sd, -@@ -1124,20 +1122,20 @@ static int isys_register_devices(struct ipu6_isys *isys) - if (ret) - goto out_isys_unregister_subdevices; - --#if !IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if !IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - ret = isys_notifier_init(isys); - if (ret) - goto out_isys_unregister_subdevices; - #else - isys_register_ext_subdevs(isys); - #endif --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - ret = v4l2_device_register_subdev_nodes(&isys->v4l2_dev); - if (ret) - goto out_isys_unregister_ext_subdevs; - #endif - return 0; --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - out_isys_unregister_ext_subdevs: - isys_unregister_ext_subdevs(isys); - #endif -@@ -1163,7 +1161,7 @@ static void isys_unregister_devices(struct ipu6_isys *isys) - { - isys_unregister_video_devices(isys); - isys_csi2_unregister_subdevices(isys); --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - isys_unregister_ext_subdevs(isys); - #endif - v4l2_device_unregister(&isys->v4l2_dev); -@@ -1536,7 +1534,7 @@ static void isys_remove(struct auxiliary_device *auxdev) - free_fw_msg_bufs(isys); - - isys_unregister_devices(isys); --#if !IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if !IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - isys_notifier_cleanup(isys); - #endif - -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h -index 80e1a0f..1db940a 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h -@@ -10,7 +10,7 @@ - #include - #include - #include --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - #include - #endif - #include -@@ -64,11 +64,7 @@ struct ipu6_bus_device; - #define IPU6EP_MTL_LTR_VALUE 1023 - #define IPU6EP_MTL_MIN_MEMOPEN_TH 0xc - --#if (IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ -- && IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING)) \ -- || IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) --#define IPU_SPDATA_NAME_LEN 20 --#define IPU_SPDATA_BDF_LEN 32 -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - #define IPU_SPDATA_GPIO_NUM 4 - #define IPU_SPDATA_IRQ_PIN_NAME_LEN 16 - #endif -@@ -176,7 +172,7 @@ struct ipu6_isys { - spinlock_t listlock; /* Protect framebuflist */ - struct list_head framebuflist; - struct list_head framebuflist_fw; --#if !IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if !IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - struct v4l2_async_notifier notifier; - #endif - struct isys_iwake_watermark iwake_watermark; -@@ -196,66 +192,22 @@ struct isys_fw_msgs { - struct list_head head; - dma_addr_t dma_addr; - }; --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - struct ipu6_isys_subdev_i2c_info { - struct i2c_board_info board_info; - int i2c_adapter_id; - char i2c_adapter_bdf[32]; - }; - #endif --#if (IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ -- && IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING)) \ -- || IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) --#define IPU_SPDATA_NAME_LEN 20 --#define IPU_SPDATA_BDF_LEN 32 -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - #define IPU_SPDATA_GPIO_NUM 4 - #define IPU_SPDATA_IRQ_PIN_NAME_LEN 16 - #endif - --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ -- && IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) --/** -- * struct ipu6_spdata_rep - override subdev platform data -- * -- * @name: i2c_board_info.type -- * @i2c_adapter_bdf_o: old i2c adapter bdf -- * @slave_addr_o: old i2c slave address -- * @i2c_adapter_bdf_n: new i2c adapter bdf -- * @slave_addr_n: new i2c slave address -- * -- * identify a subdev with @name, @i2c_adapter_bdf_o and @slave_addr_o and -- * configure it to use the new @i2c_adapter_bdf_n and @slave_addr_n -- */ --struct ipu6_spdata_rep { -- /* i2c old information */ -- char name[IPU6_SPDATA_NAME_LEN]; -- unsigned int port_o; -- char i2c_adapter_bdf_o[IPU6_SPDATA_BDF_LEN]; -- uint32_t slave_addr_o; -- -- /* i2c new information */ -- unsigned int port_n; -- char i2c_adapter_bdf_n[IPU6_SPDATA_BDF_LEN]; -- uint32_t slave_addr_n; -- -- /* sensor_platform */ -- unsigned int lanes; -- int gpios[IPU6_SPDATA_GPIO_NUM]; -- int irq_pin; -- unsigned int irq_pin_flags; -- char irq_pin_name[IPU6_SPDATA_IRQ_PIN_NAME_LEN]; -- char suffix; --}; --#endif -- - struct ipu_isys_subdev_info { - struct ipu6_isys_csi2_config *csi2; -- struct ipu6_isys_subdev_i2c_info i2c; --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ -- && IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) -- void (*fixup_spdata)(const void *spdata_rep, void *spdata); --#endif - #if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) -+ struct ipu6_isys_subdev_i2c_info i2c; - char *acpi_hid; - #endif - }; -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c -index 406ff21..57ab6a5 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c -@@ -386,7 +386,7 @@ static struct ipu6_bus_device * - ipu6_isys_init(struct pci_dev *pdev, struct device *parent, - struct ipu6_buttress_ctrl *ctrl, void __iomem *base, - const struct ipu6_isys_internal_pdata *ipdata, --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - struct ipu_isys_subdev_pdata *spdata - #endif - ) -@@ -411,7 +411,7 @@ ipu6_isys_init(struct pci_dev *pdev, struct device *parent, - - pdata->base = base; - pdata->ipdata = ipdata; --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - pdata->spdata = spdata; - #endif - /* Override the isys freq */ -@@ -574,59 +574,6 @@ static void ipu6_configure_vc_mechanism(struct ipu6_device *isp) - writel(val, isp->base + BUTTRESS_REG_BTRS_CTRL); - } - --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) --static inline int match_spdata(struct ipu_isys_subdev_info *sd, -- const struct ipu6_spdata_rep *rep) --{ -- if (strcmp(sd->i2c.board_info.type, rep->name)) -- return 0; -- -- if (strcmp(sd->i2c.i2c_adapter_bdf, rep->i2c_adapter_bdf_o)) -- return 0; -- -- if (sd->i2c.board_info.addr != rep->slave_addr_o) -- return 0; -- -- if (sd->csi2->port != rep->port_o) -- return 0; -- -- return 1; --} -- --static void fixup_spdata(const void *spdata_rep, -- struct ipu_isys_subdev_pdata *spdata) --{ -- const struct ipu6_spdata_rep *rep = spdata_rep; -- struct ipu_isys_subdev_info **subdevs, *sd_info; -- -- if (!spdata) -- return; -- -- for (; rep->name[0]; rep++) { -- for (subdevs = spdata->subdevs; *subdevs; subdevs++) { -- sd_info = *subdevs; -- -- if (!sd_info->csi2) -- continue; -- -- if (match_spdata(sd_info, rep)) { -- strcpy(sd_info->i2c.i2c_adapter_bdf, -- rep->i2c_adapter_bdf_n); -- sd_info->i2c.board_info.addr = -- rep->slave_addr_n; -- sd_info->csi2->port = rep->port_n; -- -- if (sd_info->fixup_spdata) -- sd_info->fixup_spdata(rep, -- sd_info->i2c.board_info.platform_data); -- } -- } -- } --} --#endif --#endif -- - static int ipu6_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) - { - struct ipu6_buttress_ctrl *isys_ctrl = NULL, *psys_ctrl = NULL; -@@ -730,16 +677,6 @@ static int ipu6_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) - goto out_ipu6_bus_del_devices; - } - --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) -- rval = request_firmware(&isp->spdata_fw, IPU6_SPDATA_NAME, &pdev->dev); -- if (rval) -- dev_warn(&isp->pdev->dev, "no spdata replace, using default\n"); -- else -- fixup_spdata(isp->spdata_fw->data, pdev->dev.platform_data); --#endif --#endif -- - isys_ctrl = devm_kmemdup(dev, &isys_buttress_ctrl, - sizeof(isys_buttress_ctrl), GFP_KERNEL); - if (!isys_ctrl) { -@@ -749,7 +686,7 @@ static int ipu6_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) - - isp->isys = ipu6_isys_init(pdev, dev, isys_ctrl, isys_base, - &isys_ipdata, --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - pdev->dev.platform_data - #endif - ); -@@ -851,11 +788,6 @@ out_ipu6_bus_del_devices: - ipu6_mmu_cleanup(isp->isys->mmu); - ipu6_bus_del_devices(pdev); - release_firmware(isp->cpd_fw); --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) -- release_firmware(isp->spdata_fw); --#endif --#endif - buttress_exit: - ipu6_buttress_exit(isp); - -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.h -index 786494d..f464a7b 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.h -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.h -@@ -23,12 +23,6 @@ struct ipu6_bus_device; - #define IPU6EPMTL_FIRMWARE_NAME "intel/ipu/ipu6epmtl_fw.bin" - #define IPU6EPADLN_FIRMWARE_NAME "intel/ipu/ipu6epadln_fw.bin" - --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ -- && IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) --/* array of struct ipu6_spdata_rep terminated by NULL */ --#define IPU6_SPDATA_NAME "ipu6v1_spdata.bin" --#endif -- - enum ipu6_version { - IPU6_VER_INVALID = 0, - IPU6_VER_6 = 1, -@@ -86,11 +80,6 @@ struct ipu6_device { - const struct firmware *cpd_fw; - const char *cpd_fw_name; - u32 cpd_metadata_cmpnt_size; --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) -- const struct firmware *spdata_fw; --#endif --#endif - void __iomem *base; - #ifdef CONFIG_DEBUG_FS - struct dentry *ipu_dir; -@@ -338,7 +327,7 @@ struct ipu6_isys_internal_pdata { - struct ipu6_isys_pdata { - void __iomem *base; - const struct ipu6_isys_internal_pdata *ipdata; --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - struct ipu_isys_subdev_pdata *spdata; - #endif - }; --- -2.43.0 - diff --git a/patches/0019-media-intel-ipu6-align-ACPI-PDATA-and-ACPI-fwnode-bu.patch b/patches/0019-media-intel-ipu6-align-ACPI-PDATA-and-ACPI-fwnode-bu.patch deleted file mode 100644 index 84ae2d8..0000000 --- a/patches/0019-media-intel-ipu6-align-ACPI-PDATA-and-ACPI-fwnode-bu.patch +++ /dev/null @@ -1,385 +0,0 @@ -From a7d31333a164a84a02df565a3f0f9620a9a0098b Mon Sep 17 00:00:00 2001 -From: hepengpx -Date: Mon, 12 Jan 2026 07:53:37 -0700 -Subject: [PATCH 19/28] media: intel-ipu6: align ACPI PDATA and ACPI fwnode - build for ECG - -* align ACPI PDATA and ACPI fwnode build for ECG - -Signed-off-by: hepengpx -Signed-off-by: zouxiaoh ---- - .../media/pci/intel/ipu6/ipu6-isys-mcd-phy.c | 170 ++++++++---------- - .../drivers/media/pci/intel/ipu6/ipu6-isys.c | 29 ++- - .../drivers/media/pci/intel/ipu6/ipu6-isys.h | 4 - - 6.12.0/drivers/media/pci/intel/ipu6/ipu6.c | 14 +- - 4 files changed, 96 insertions(+), 121 deletions(-) - -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c -index 1cf33b5..1be7d75 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c -@@ -567,49 +567,46 @@ static int ipu6_isys_mcd_phy_ready(struct ipu6_isys *isys, u8 id) - return ret; - } - --#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) --static int ipu6_isys_mcd_phy_common_init(struct ipu6_isys *isys, struct ipu6_isys_csi2_config *cfg) -+static void ipu6_isys_write_mcd_phy_common_init_regs(struct ipu6_isys *isys, -+ u32 port) - { -- unsigned int phy_id; -- void __iomem *phy_base; -- struct ipu6_bus_device *adev = to_ipu6_bus_device(&isys->adev->auxdev.dev); -+ struct ipu6_bus_device *adev = isys->adev; - struct ipu6_device *isp = adev->isp; - void __iomem *isp_base = isp->base; -+ void __iomem *phy_base; - unsigned int i; -+ u8 phy_id; - -- phy_id = cfg->port / 4; -+ phy_id = port / 4; - phy_base = isp_base + IPU6_ISYS_MCD_PHY_BASE(phy_id); - -- for (i = 0 ; i < ARRAY_SIZE(common_init_regs); i++) { -+ for (i = 0; i < ARRAY_SIZE(common_init_regs); i++) - writel(common_init_regs[i].val, -- phy_base + common_init_regs[i].reg); -- } -- -- return 0; -+ phy_base + common_init_regs[i].reg); - } --#else --static void ipu6_isys_mcd_phy_common_init(struct ipu6_isys *isys) -+ -+static void ipu6_isys_mcd_phy_common_init(struct ipu6_isys *isys -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) -+ , struct ipu6_isys_csi2_config *csi2_cfg -+#endif -+) - { -- struct ipu6_bus_device *adev = isys->adev; -- struct ipu6_device *isp = adev->isp; -- void __iomem *isp_base = isp->base; - struct sensor_async_sd *s_asd; - struct v4l2_async_connection *asc; -- void __iomem *phy_base; -- unsigned int i; -- u8 phy_id; - -- list_for_each_entry(asc, &isys->notifier.done_list, asc_entry) { -- s_asd = container_of(asc, struct sensor_async_sd, asc); -- phy_id = s_asd->csi2.port / 4; -- phy_base = isp_base + IPU6_ISYS_MCD_PHY_BASE(phy_id); -- -- for (i = 0; i < ARRAY_SIZE(common_init_regs); i++) -- writel(common_init_regs[i].val, -- phy_base + common_init_regs[i].reg); -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) -+ if (isys->pdata->spdata) { -+ ipu6_isys_write_mcd_phy_common_init_regs(isys, csi2_cfg->port); -+ } else { -+#endif -+ list_for_each_entry(asc, &isys->notifier.done_list, asc_entry) { -+ s_asd = container_of(asc, struct sensor_async_sd, asc); -+ ipu6_isys_write_mcd_phy_common_init_regs(isys, s_asd->csi2.port); -+ } -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - } --} - #endif -+} - - static int ipu6_isys_driver_port_to_phy_port(struct ipu6_isys_csi2_config *cfg) - { -@@ -641,96 +638,79 @@ static int ipu6_isys_driver_port_to_phy_port(struct ipu6_isys_csi2_config *cfg) - return ret; - } - --#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) --static int ipu6_isys_mcd_phy_config(struct ipu6_isys *isys, struct ipu6_isys_csi2_config *cfg) -+static int ipu6_isys_write_mcd_phy_config_regs(struct ipu6_isys *isys, -+ struct ipu6_isys_csi2_config *cfg) - { -- unsigned int phy_id; -- int phy_port; -- void __iomem *phy_base; -- struct ipu6_bus_device *adev = to_ipu6_bus_device(&isys->adev->auxdev.dev); -+ struct device *dev = &isys->adev->auxdev.dev; -+ struct ipu6_bus_device *adev = isys->adev; -+ const struct phy_reg **phy_config_regs; - struct ipu6_device *isp = adev->isp; - void __iomem *isp_base = isp->base; -- const struct phy_reg **phy_config_regs; -- struct ipu_isys_subdev_pdata *spdata = isys->pdata->spdata; -- struct ipu_isys_subdev_info **subdevs, *sd_info; -- int i; -+ void __iomem *phy_base; -+ int phy_port, phy_id; -+ unsigned int i; - -- if (!spdata) { -- dev_err(&isys->adev->auxdev.dev, "no subdevice info provided\n"); -- return -EINVAL; -+ phy_port = ipu6_isys_driver_port_to_phy_port(cfg); -+ if (phy_port < 0) { -+ dev_err(dev, "invalid port %d for lane %d", cfg->port, -+ cfg->nlanes); -+ return -ENXIO; - } - - phy_id = cfg->port / 4; - phy_base = isp_base + IPU6_ISYS_MCD_PHY_BASE(phy_id); -- for (subdevs = spdata->subdevs; *subdevs; subdevs++) { -- sd_info = *subdevs; -- if (!sd_info->csi2) -- continue; -- -- phy_port = ipu6_isys_driver_port_to_phy_port(sd_info->csi2); -- if (phy_port < 0) { -- dev_err(&isys->adev->auxdev.dev, "invalid port %d for lane %d", -- cfg->port, cfg->nlanes); -- return -ENXIO; -- } -- -- if ((sd_info->csi2->port / 4) != phy_id) -- continue; -- -- dev_dbg(&isys->adev->auxdev.dev, "port%d PHY%u lanes %u\n", -- phy_port, phy_id, cfg->nlanes); -+ dev_dbg(dev, "port%d PHY%u lanes %u\n", cfg->port, phy_id, cfg->nlanes); - -- phy_config_regs = config_regs[sd_info->csi2->nlanes/2]; -- -- for (i = 0; phy_config_regs[phy_port][i].reg; i++) { -- writel(phy_config_regs[phy_port][i].val, -- phy_base + phy_config_regs[phy_port][i].reg); -- } -- } -+ phy_config_regs = config_regs[cfg->nlanes / 2]; -+ for (i = 0; phy_config_regs[phy_port][i].reg; i++) -+ writel(phy_config_regs[phy_port][i].val, -+ phy_base + phy_config_regs[phy_port][i].reg); - - return 0; - } --#else --static int ipu6_isys_mcd_phy_config(struct ipu6_isys *isys) -+ -+static int ipu6_isys_mcd_phy_config(struct ipu6_isys *isys -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) -+ , struct ipu6_isys_csi2_config *csi2_cfg -+#endif -+) - { -- struct device *dev = &isys->adev->auxdev.dev; -- struct ipu6_bus_device *adev = isys->adev; -- const struct phy_reg **phy_config_regs; -- struct ipu6_device *isp = adev->isp; -- void __iomem *isp_base = isp->base; - struct sensor_async_sd *s_asd; - struct ipu6_isys_csi2_config cfg; - struct v4l2_async_connection *asc; -- int phy_port, phy_id; -- unsigned int i; -- void __iomem *phy_base; -+ int ret = 0; - -- list_for_each_entry(asc, &isys->notifier.done_list, asc_entry) { -- s_asd = container_of(asc, struct sensor_async_sd, asc); -- cfg.port = s_asd->csi2.port; -- cfg.nlanes = s_asd->csi2.nlanes; -- phy_port = ipu6_isys_driver_port_to_phy_port(&cfg); -- if (phy_port < 0) { -- dev_err(dev, "invalid port %d for lane %d", cfg.port, -- cfg.nlanes); -- return -ENXIO; -- } -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) -+ struct ipu_isys_subdev_pdata *spdata = isys->pdata->spdata; -+ struct ipu_isys_subdev_info **subdevs, *sd_info; - -- phy_id = cfg.port / 4; -- phy_base = isp_base + IPU6_ISYS_MCD_PHY_BASE(phy_id); -- dev_dbg(dev, "port%d PHY%u lanes %u\n", cfg.port, phy_id, -- cfg.nlanes); -+ if (spdata) { -+ for (subdevs = spdata->subdevs; *subdevs; subdevs++) { -+ sd_info = *subdevs; -+ if (!sd_info->csi2 || -+ (sd_info->csi2->port / 4) != (csi2_cfg->port / 4)) -+ continue; - -- phy_config_regs = config_regs[cfg.nlanes / 2]; -- cfg.port = phy_port; -- for (i = 0; phy_config_regs[cfg.port][i].reg; i++) -- writel(phy_config_regs[cfg.port][i].val, -- phy_base + phy_config_regs[cfg.port][i].reg); -+ ret = ipu6_isys_write_mcd_phy_config_regs(isys, sd_info->csi2); -+ if (ret) -+ return ret; -+ } -+ } else { -+#endif -+ list_for_each_entry(asc, &isys->notifier.done_list, asc_entry) { -+ s_asd = container_of(asc, struct sensor_async_sd, asc); -+ cfg.port = s_asd->csi2.port; -+ cfg.nlanes = s_asd->csi2.nlanes; -+ ret = ipu6_isys_write_mcd_phy_config_regs(isys, &cfg); -+ if (ret) -+ return ret; -+ } -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - } -+#endif - - return 0; - } --#endif - - #define CSI_MCD_PHY_NUM 2 - static refcount_t phy_power_ref_count[CSI_MCD_PHY_NUM]; -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -index eebcffc..caf29f7 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -@@ -28,14 +28,12 @@ - #include - #include - #include --#if !IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - #include - #include - #include - #include - #include - #include --#endif - - #include "ipu6-bus.h" - #include "ipu6-cpd.h" -@@ -972,7 +970,6 @@ static void isys_iwake_watermark_cleanup(struct ipu6_isys *isys) - mutex_destroy(&iwake_watermark->mutex); - } - --#if !IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - /* The .bound() notifier callback when a match is found */ - static int isys_notifier_bound(struct v4l2_async_notifier *notifier, - struct v4l2_subdev *sd, -@@ -1084,7 +1081,6 @@ static void isys_notifier_cleanup(struct ipu6_isys *isys) - v4l2_async_nf_unregister(&isys->notifier); - v4l2_async_nf_cleanup(&isys->notifier); - } --#endif - - static int isys_register_devices(struct ipu6_isys *isys) - { -@@ -1122,17 +1118,19 @@ static int isys_register_devices(struct ipu6_isys *isys) - if (ret) - goto out_isys_unregister_subdevices; - --#if !IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) -- ret = isys_notifier_init(isys); -- if (ret) -- goto out_isys_unregister_subdevices; --#else -- isys_register_ext_subdevs(isys); -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) -+ if (!isys->pdata->spdata) { - #endif -+ ret = isys_notifier_init(isys); -+ if (ret) -+ goto out_isys_unregister_subdevices; - #if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) -- ret = v4l2_device_register_subdev_nodes(&isys->v4l2_dev); -- if (ret) -- goto out_isys_unregister_ext_subdevs; -+ } else { -+ isys_register_ext_subdevs(isys); -+ ret = v4l2_device_register_subdev_nodes(&isys->v4l2_dev); -+ if (ret) -+ goto out_isys_unregister_ext_subdevs; -+ } - #endif - return 0; - #if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) -@@ -1162,7 +1160,8 @@ static void isys_unregister_devices(struct ipu6_isys *isys) - isys_unregister_video_devices(isys); - isys_csi2_unregister_subdevices(isys); - #if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) -- isys_unregister_ext_subdevs(isys); -+ if (isys->pdata->spdata) -+ isys_unregister_ext_subdevs(isys); - #endif - v4l2_device_unregister(&isys->v4l2_dev); - media_device_unregister(&isys->media_dev); -@@ -1534,9 +1533,7 @@ static void isys_remove(struct auxiliary_device *auxdev) - free_fw_msg_bufs(isys); - - isys_unregister_devices(isys); --#if !IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - isys_notifier_cleanup(isys); --#endif - - cpu_latency_qos_remove_request(&isys->pm_qos); - -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h -index 1db940a..57df01c 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h -@@ -172,9 +172,7 @@ struct ipu6_isys { - spinlock_t listlock; /* Protect framebuflist */ - struct list_head framebuflist; - struct list_head framebuflist_fw; --#if !IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - struct v4l2_async_notifier notifier; --#endif - struct isys_iwake_watermark iwake_watermark; - #ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET - struct mutex reset_mutex; -@@ -198,8 +196,6 @@ struct ipu6_isys_subdev_i2c_info { - int i2c_adapter_id; - char i2c_adapter_bdf[32]; - }; --#endif --#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - #define IPU_SPDATA_GPIO_NUM 4 - #define IPU_SPDATA_IRQ_PIN_NAME_LEN 16 - #endif -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c -index 57ab6a5..5684dac 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c -@@ -399,11 +399,11 @@ ipu6_isys_init(struct pci_dev *pdev, struct device *parent, - #endif - int ret; - -- // ret = ipu_bridge_init(dev, ipu_bridge_parse_ssdb); -- // if (ret) { -- // dev_err_probe(dev, ret, "IPU6 bridge init failed\n"); -- // return ERR_PTR(ret); -- // } -+ ret = ipu_bridge_init(dev, ipu_bridge_parse_ssdb); -+ if (ret) { -+ dev_err_probe(dev, ret, "IPU6 bridge init failed\n"); -+ return ERR_PTR(ret); -+ } - - pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); - if (!pdata) -@@ -435,7 +435,9 @@ ipu6_isys_init(struct pci_dev *pdev, struct device *parent, - dev_dbg(&pdev->dev, "No subdevice info provided"); - ret = ipu_get_acpi_devices(isys_adev, &isys_adev->auxdev.dev, &acpi_pdata, NULL, - isys_init_acpi_add_device); -- pdata->spdata = acpi_pdata; -+ if (acpi_pdata && (*acpi_pdata->subdevs)) { -+ pdata->spdata = acpi_pdata; -+ } - } else { - dev_dbg(&pdev->dev, "Subdevice info found"); - ret = ipu_get_acpi_devices(isys_adev, &isys_adev->auxdev.dev, &acpi_pdata, &spdata, --- -2.43.0 - diff --git a/patches/0020-upstream-Use-module-parameter-to-set-psys-freq.patch b/patches/0020-upstream-Use-module-parameter-to-set-psys-freq.patch deleted file mode 100644 index 43fa627..0000000 --- a/patches/0020-upstream-Use-module-parameter-to-set-psys-freq.patch +++ /dev/null @@ -1,43 +0,0 @@ -From ea56f3f2c08dc6ef45ff26221f92f45562b149b9 Mon Sep 17 00:00:00 2001 -From: Dongcheng Yan -Date: Mon, 12 Jan 2026 08:06:36 -0700 -Subject: [PATCH 20/28] upstream: Use module parameter to set psys freq - ---- - 6.12.0/drivers/media/pci/intel/ipu6/ipu6.c | 13 +++++++++++++ - 1 file changed, 13 insertions(+) - -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c -index 5684dac..c9513c5 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c -@@ -51,6 +51,10 @@ static unsigned int isys_freq_override; - module_param(isys_freq_override, uint, 0660); - MODULE_PARM_DESC(isys_freq_override, "Override ISYS freq(mhz)"); - -+static unsigned int psys_freq_override; -+module_param(psys_freq_override, uint, 0660); -+MODULE_PARM_DESC(psys_freq_override, "Override PSYS freq(mhz)"); -+ - #define IPU6_PCI_BAR 0 - - struct ipu6_cell_program { -@@ -482,6 +486,15 @@ ipu6_psys_init(struct pci_dev *pdev, struct device *parent, - pdata->base = base; - pdata->ipdata = ipdata; - -+ /* Override the isys freq */ -+ if (psys_freq_override >= BUTTRESS_MIN_FORCE_PS_FREQ && -+ psys_freq_override <= BUTTRESS_MAX_FORCE_PS_FREQ) { -+ ctrl->ratio = psys_freq_override / BUTTRESS_PS_FREQ_STEP; -+ ctrl->qos_floor = psys_freq_override; -+ dev_dbg(&pdev->dev, "Override the psys freq:%u(mhz)\n", -+ psys_freq_override); -+ } -+ - psys_adev = ipu6_bus_initialize_device(pdev, parent, pdata, ctrl, - IPU6_PSYS_NAME); - if (IS_ERR(psys_adev)) { --- -2.43.0 - diff --git a/patches/0021-media-pci-update-IPU6-PSYS-driver.patch b/patches/0021-media-pci-update-IPU6-PSYS-driver.patch deleted file mode 100644 index 67a0d9e..0000000 --- a/patches/0021-media-pci-update-IPU6-PSYS-driver.patch +++ /dev/null @@ -1,8335 +0,0 @@ -From d55eab67d68fc04677229cf8d410f344b2d60b04 Mon Sep 17 00:00:00 2001 -From: zouxiaoh -Date: Mon, 12 Jan 2026 08:09:05 -0700 -Subject: [PATCH 21/28] media: pci: update IPU6 PSYS driver - -Signed-off-by: zouxiaoh -Signed-off-by: Florent Pirou ---- - 6.12.0/drivers/media/pci/intel/ipu6/Makefile | 1 + - .../media/pci/intel/ipu6/psys/Makefile | 26 + - .../media/pci/intel/ipu6/psys/ipu-fw-psys.c | 433 +++++ - .../media/pci/intel/ipu6/psys/ipu-fw-psys.h | 382 ++++ - .../pci/intel/ipu6/psys/ipu-fw-resources.c | 103 ++ - .../pci/intel/ipu6/psys/ipu-platform-psys.h | 78 + - .../intel/ipu6/psys/ipu-platform-resources.h | 103 ++ - .../pci/intel/ipu6/psys/ipu-psys-compat32.c | 222 +++ - .../media/pci/intel/ipu6/psys/ipu-psys.c | 1599 +++++++++++++++++ - .../media/pci/intel/ipu6/psys/ipu-psys.h | 324 ++++ - .../media/pci/intel/ipu6/psys/ipu-resources.c | 861 +++++++++ - .../pci/intel/ipu6/psys/ipu6-fw-resources.c | 607 +++++++ - .../pci/intel/ipu6/psys/ipu6-l-scheduler.c | 621 +++++++ - .../intel/ipu6/psys/ipu6-platform-resources.h | 194 ++ - .../media/pci/intel/ipu6/psys/ipu6-ppg.c | 556 ++++++ - .../media/pci/intel/ipu6/psys/ipu6-ppg.h | 38 + - .../media/pci/intel/ipu6/psys/ipu6-psys-gpc.c | 209 +++ - .../media/pci/intel/ipu6/psys/ipu6-psys.c | 1056 +++++++++++ - .../pci/intel/ipu6/psys/ipu6ep-fw-resources.c | 393 ++++ - .../ipu6/psys/ipu6ep-platform-resources.h | 42 + - .../pci/intel/ipu6/psys/ipu6se-fw-resources.c | 194 ++ - .../ipu6/psys/ipu6se-platform-resources.h | 103 ++ - 22 files changed, 8145 insertions(+) - create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/psys/Makefile - create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-fw-psys.c - create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-fw-psys.h - create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-fw-resources.c - create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-platform-psys.h - create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-platform-resources.h - create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-psys-compat32.c - create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.c - create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.h - create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-resources.c - create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-fw-resources.c - create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-l-scheduler.c - create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-platform-resources.h - create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-ppg.c - create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-ppg.h - create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-psys-gpc.c - create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-psys.c - create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6ep-fw-resources.c - create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6ep-platform-resources.h - create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6se-fw-resources.c - create mode 100644 6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6se-platform-resources.h - -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/Makefile b/6.12.0/drivers/media/pci/intel/ipu6/Makefile -index e1e123b..67c13c9 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/Makefile -+++ b/6.12.0/drivers/media/pci/intel/ipu6/Makefile -@@ -22,3 +22,4 @@ intel-ipu6-isys-y := ipu6-isys.o \ - ipu6-isys-dwc-phy.o - - obj-$(CONFIG_VIDEO_INTEL_IPU6) += intel-ipu6-isys.o -+obj-$(CONFIG_VIDEO_INTEL_IPU6) += psys/ -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/psys/Makefile b/6.12.0/drivers/media/pci/intel/ipu6/psys/Makefile -new file mode 100644 -index 0000000..299cb04 ---- /dev/null -+++ b/6.12.0/drivers/media/pci/intel/ipu6/psys/Makefile -@@ -0,0 +1,26 @@ -+# SPDX-License-Identifier: GPL-2.0-only -+ -+is_kernel_lt_6_10 = $(shell if [ $$(printf "6.10\n$(KERNELVERSION)" | sort -V | head -n1) != "6.10" ]; then echo 1; fi) -+ifeq ($(is_kernel_lt_6_10), 1) -+ifneq ($(EXTERNAL_BUILD), 1) -+src := $(srctree)/$(src) -+endif -+endif -+ -+intel-ipu6-psys-objs += ipu-psys.o \ -+ ipu-fw-psys.o \ -+ ipu-fw-resources.o \ -+ ipu-resources.o \ -+ ipu6-psys.o \ -+ ipu6-ppg.o \ -+ ipu6-l-scheduler.o \ -+ ipu6-fw-resources.o \ -+ ipu6se-fw-resources.o \ -+ ipu6ep-fw-resources.o -+ -+obj-$(CONFIG_VIDEO_INTEL_IPU6) += intel-ipu6-psys.o -+ -+ifeq ($(is_kernel_lt_6_10), 1) -+ccflags-y += -I$(src)/../ipu6/ -+endif -+ccflags-y += -I$(src)/../ -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-fw-psys.c b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-fw-psys.c -new file mode 100644 -index 0000000..c89cd0c ---- /dev/null -+++ b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-fw-psys.c -@@ -0,0 +1,433 @@ -+// SPDX-License-Identifier: GPL-2.0 -+// Copyright (C) 2016 - 2024 Intel Corporation -+ -+#include -+ -+#include -+ -+#include -+#include "ipu6-fw-com.h" -+#include "ipu-fw-psys.h" -+#include "ipu-psys.h" -+ -+int ipu_fw_psys_pg_start(struct ipu_psys_kcmd *kcmd) -+{ -+ kcmd->kpg->pg->state = IPU_FW_PSYS_PROCESS_GROUP_STARTED; -+ return 0; -+} -+ -+int ipu_fw_psys_pg_disown(struct ipu_psys_kcmd *kcmd) -+{ -+ struct ipu_fw_psys_cmd *psys_cmd; -+ struct device *dev = &kcmd->fh->psys->adev->auxdev.dev; -+ int ret = 0; -+ -+ psys_cmd = ipu6_send_get_token(kcmd->fh->psys->fwcom, 0); -+ if (!psys_cmd) { -+ dev_err(dev, "%s failed to get token!\n", __func__); -+ kcmd->pg_user = NULL; -+ ret = -ENODATA; -+ goto out; -+ } -+ psys_cmd->command = IPU_FW_PSYS_PROCESS_GROUP_CMD_START; -+ psys_cmd->msg = 0; -+ psys_cmd->context_handle = kcmd->kpg->pg->ipu_virtual_address; -+ ipu6_send_put_token(kcmd->fh->psys->fwcom, 0); -+ -+out: -+ return ret; -+} -+ -+int ipu_fw_psys_ppg_suspend(struct ipu_psys_kcmd *kcmd) -+{ -+ struct device *dev = &kcmd->fh->psys->adev->auxdev.dev; -+ struct ipu_fw_psys_cmd *psys_cmd; -+ int ret = 0; -+ -+ /* ppg suspend cmd uses QUEUE_DEVICE_ID instead of QUEUE_COMMAND_ID */ -+ psys_cmd = ipu6_send_get_token(kcmd->fh->psys->fwcom, 1); -+ if (!psys_cmd) { -+ dev_err(dev, "%s failed to get token!\n", __func__); -+ kcmd->pg_user = NULL; -+ ret = -ENODATA; -+ goto out; -+ } -+ psys_cmd->command = IPU_FW_PSYS_PROCESS_GROUP_CMD_SUSPEND; -+ psys_cmd->msg = 0; -+ psys_cmd->context_handle = kcmd->kpg->pg->ipu_virtual_address; -+ ipu6_send_put_token(kcmd->fh->psys->fwcom, 1); -+ -+out: -+ return ret; -+} -+ -+int ipu_fw_psys_ppg_resume(struct ipu_psys_kcmd *kcmd) -+{ -+ struct device *dev = &kcmd->fh->psys->adev->auxdev.dev; -+ struct ipu_fw_psys_cmd *psys_cmd; -+ int ret = 0; -+ -+ psys_cmd = ipu6_send_get_token(kcmd->fh->psys->fwcom, 0); -+ if (!psys_cmd) { -+ dev_err(dev, "%s failed to get token!\n", __func__); -+ kcmd->pg_user = NULL; -+ ret = -ENODATA; -+ goto out; -+ } -+ psys_cmd->command = IPU_FW_PSYS_PROCESS_GROUP_CMD_RESUME; -+ psys_cmd->msg = 0; -+ psys_cmd->context_handle = kcmd->kpg->pg->ipu_virtual_address; -+ ipu6_send_put_token(kcmd->fh->psys->fwcom, 0); -+ -+out: -+ return ret; -+} -+ -+int ipu_fw_psys_pg_abort(struct ipu_psys_kcmd *kcmd) -+{ -+ struct device *dev = &kcmd->fh->psys->adev->auxdev.dev; -+ struct ipu_fw_psys_cmd *psys_cmd; -+ int ret = 0; -+ -+ psys_cmd = ipu6_send_get_token(kcmd->fh->psys->fwcom, 0); -+ if (!psys_cmd) { -+ dev_err(dev, "%s failed to get token!\n", __func__); -+ kcmd->pg_user = NULL; -+ ret = -ENODATA; -+ goto out; -+ } -+ psys_cmd->command = IPU_FW_PSYS_PROCESS_GROUP_CMD_STOP; -+ psys_cmd->msg = 0; -+ psys_cmd->context_handle = kcmd->kpg->pg->ipu_virtual_address; -+ ipu6_send_put_token(kcmd->fh->psys->fwcom, 0); -+ -+out: -+ return ret; -+} -+ -+int ipu_fw_psys_pg_submit(struct ipu_psys_kcmd *kcmd) -+{ -+ kcmd->kpg->pg->state = IPU_FW_PSYS_PROCESS_GROUP_BLOCKED; -+ return 0; -+} -+ -+int ipu_fw_psys_rcv_event(struct ipu_psys *psys, -+ struct ipu_fw_psys_event *event) -+{ -+ void *rcv; -+ -+ rcv = ipu6_recv_get_token(psys->fwcom, 0); -+ if (!rcv) -+ return 0; -+ -+ memcpy(event, rcv, sizeof(*event)); -+ ipu6_recv_put_token(psys->fwcom, 0); -+ return 1; -+} -+ -+int ipu_fw_psys_terminal_set(struct ipu_fw_psys_terminal *terminal, -+ int terminal_idx, -+ struct ipu_psys_kcmd *kcmd, -+ u32 buffer, unsigned int size) -+{ -+ struct device *dev = &kcmd->fh->psys->adev->auxdev.dev; -+ u32 type; -+ u32 buffer_state; -+ -+ type = terminal->terminal_type; -+ -+ switch (type) { -+ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_CACHED_IN: -+ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_CACHED_OUT: -+ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SPATIAL_IN: -+ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SPATIAL_OUT: -+ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SLICED_IN: -+ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SLICED_OUT: -+ case IPU_FW_PSYS_TERMINAL_TYPE_PROGRAM: -+ case IPU_FW_PSYS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT: -+ buffer_state = IPU_FW_PSYS_BUFFER_UNDEFINED; -+ break; -+ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_STREAM: -+ case IPU_FW_PSYS_TERMINAL_TYPE_DATA_IN: -+ case IPU_FW_PSYS_TERMINAL_TYPE_STATE_IN: -+ buffer_state = IPU_FW_PSYS_BUFFER_FULL; -+ break; -+ case IPU_FW_PSYS_TERMINAL_TYPE_DATA_OUT: -+ case IPU_FW_PSYS_TERMINAL_TYPE_STATE_OUT: -+ buffer_state = IPU_FW_PSYS_BUFFER_EMPTY; -+ break; -+ default: -+ dev_err(dev, "unknown terminal type: 0x%x\n", type); -+ return -EAGAIN; -+ } -+ -+ if (type == IPU_FW_PSYS_TERMINAL_TYPE_DATA_IN || -+ type == IPU_FW_PSYS_TERMINAL_TYPE_DATA_OUT) { -+ struct ipu_fw_psys_data_terminal *dterminal = -+ (struct ipu_fw_psys_data_terminal *)terminal; -+ dterminal->connection_type = IPU_FW_PSYS_CONNECTION_MEMORY; -+ dterminal->frame.data_bytes = size; -+ if (!ipu_fw_psys_pg_get_protocol(kcmd)) -+ dterminal->frame.data = buffer; -+ else -+ dterminal->frame.data_index = terminal_idx; -+ dterminal->frame.buffer_state = buffer_state; -+ } else { -+ struct ipu_fw_psys_param_terminal *pterminal = -+ (struct ipu_fw_psys_param_terminal *)terminal; -+ if (!ipu_fw_psys_pg_get_protocol(kcmd)) -+ pterminal->param_payload.buffer = buffer; -+ else -+ pterminal->param_payload.terminal_index = terminal_idx; -+ } -+ return 0; -+} -+ -+void ipu_fw_psys_pg_dump(struct ipu_psys *psys, -+ struct ipu_psys_kcmd *kcmd, const char *note) -+{ -+ ipu6_fw_psys_pg_dump(psys, kcmd, note); -+} -+ -+int ipu_fw_psys_pg_get_id(struct ipu_psys_kcmd *kcmd) -+{ -+ return kcmd->kpg->pg->ID; -+} -+ -+int ipu_fw_psys_pg_get_terminal_count(struct ipu_psys_kcmd *kcmd) -+{ -+ return kcmd->kpg->pg->terminal_count; -+} -+ -+int ipu_fw_psys_pg_get_size(struct ipu_psys_kcmd *kcmd) -+{ -+ return kcmd->kpg->pg->size; -+} -+ -+int ipu_fw_psys_pg_set_ipu_vaddress(struct ipu_psys_kcmd *kcmd, -+ dma_addr_t vaddress) -+{ -+ kcmd->kpg->pg->ipu_virtual_address = vaddress; -+ return 0; -+} -+ -+struct ipu_fw_psys_terminal *ipu_fw_psys_pg_get_terminal(struct ipu_psys_kcmd -+ *kcmd, int index) -+{ -+ struct ipu_fw_psys_terminal *terminal; -+ u16 *terminal_offset_table; -+ -+ terminal_offset_table = -+ (uint16_t *)((char *)kcmd->kpg->pg + -+ kcmd->kpg->pg->terminals_offset); -+ terminal = (struct ipu_fw_psys_terminal *) -+ ((char *)kcmd->kpg->pg + terminal_offset_table[index]); -+ return terminal; -+} -+ -+void ipu_fw_psys_pg_set_token(struct ipu_psys_kcmd *kcmd, u64 token) -+{ -+ kcmd->kpg->pg->token = (u64)token; -+} -+ -+u64 ipu_fw_psys_pg_get_token(struct ipu_psys_kcmd *kcmd) -+{ -+ return kcmd->kpg->pg->token; -+} -+ -+int ipu_fw_psys_pg_get_protocol(struct ipu_psys_kcmd *kcmd) -+{ -+ return kcmd->kpg->pg->protocol_version; -+} -+ -+int ipu_fw_psys_ppg_set_buffer_set(struct ipu_psys_kcmd *kcmd, -+ struct ipu_fw_psys_terminal *terminal, -+ int terminal_idx, u32 buffer) -+{ -+ struct device *dev = &kcmd->fh->psys->adev->auxdev.dev; -+ struct ipu_fw_psys_buffer_set *buf_set = kcmd->kbuf_set->buf_set; -+ u32 buffer_state; -+ u32 *buffer_ptr; -+ u32 type; -+ -+ type = terminal->terminal_type; -+ -+ switch (type) { -+ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_CACHED_IN: -+ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_CACHED_OUT: -+ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SPATIAL_IN: -+ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SPATIAL_OUT: -+ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SLICED_IN: -+ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SLICED_OUT: -+ case IPU_FW_PSYS_TERMINAL_TYPE_PROGRAM: -+ case IPU_FW_PSYS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT: -+ buffer_state = IPU_FW_PSYS_BUFFER_UNDEFINED; -+ break; -+ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_STREAM: -+ case IPU_FW_PSYS_TERMINAL_TYPE_DATA_IN: -+ case IPU_FW_PSYS_TERMINAL_TYPE_STATE_IN: -+ buffer_state = IPU_FW_PSYS_BUFFER_FULL; -+ break; -+ case IPU_FW_PSYS_TERMINAL_TYPE_DATA_OUT: -+ case IPU_FW_PSYS_TERMINAL_TYPE_STATE_OUT: -+ buffer_state = IPU_FW_PSYS_BUFFER_EMPTY; -+ break; -+ default: -+ dev_err(dev, "unknown terminal type: 0x%x\n", type); -+ return -EAGAIN; -+ } -+ -+ buffer_ptr = (u32 *)((char *)buf_set + sizeof(*buf_set) + -+ terminal_idx * sizeof(*buffer_ptr)); -+ -+ *buffer_ptr = buffer; -+ -+ if (type == IPU_FW_PSYS_TERMINAL_TYPE_DATA_IN || -+ type == IPU_FW_PSYS_TERMINAL_TYPE_DATA_OUT) { -+ struct ipu_fw_psys_data_terminal *dterminal = -+ (struct ipu_fw_psys_data_terminal *)terminal; -+ dterminal->frame.buffer_state = buffer_state; -+ } -+ -+ return 0; -+} -+ -+size_t ipu_fw_psys_ppg_get_buffer_set_size(struct ipu_psys_kcmd *kcmd) -+{ -+ return (sizeof(struct ipu_fw_psys_buffer_set) + -+ kcmd->kpg->pg->terminal_count * sizeof(u32)); -+} -+ -+int -+ipu_fw_psys_ppg_buffer_set_vaddress(struct ipu_fw_psys_buffer_set *buf_set, -+ u32 vaddress) -+{ -+ buf_set->ipu_virtual_address = vaddress; -+ return 0; -+} -+ -+int ipu_fw_psys_ppg_buffer_set_set_keb(struct ipu_fw_psys_buffer_set *buf_set, -+ u32 *kernel_enable_bitmap) -+{ -+ memcpy(buf_set->kernel_enable_bitmap, (u8 *)kernel_enable_bitmap, -+ sizeof(buf_set->kernel_enable_bitmap)); -+ return 0; -+} -+ -+struct ipu_fw_psys_buffer_set * -+ipu_fw_psys_ppg_create_buffer_set(struct ipu_psys_kcmd *kcmd, -+ void *kaddr, u32 frame_counter) -+{ -+ struct ipu_fw_psys_buffer_set *buffer_set = NULL; -+ unsigned int i; -+ -+ buffer_set = (struct ipu_fw_psys_buffer_set *)kaddr; -+ -+ /* -+ * Set base struct members -+ */ -+ buffer_set->ipu_virtual_address = 0; -+ buffer_set->process_group_handle = kcmd->kpg->pg->ipu_virtual_address; -+ buffer_set->frame_counter = frame_counter; -+ buffer_set->terminal_count = kcmd->kpg->pg->terminal_count; -+ -+ /* -+ * Initialize adjacent buffer addresses -+ */ -+ for (i = 0; i < buffer_set->terminal_count; i++) { -+ u32 *buffer = -+ (u32 *)((char *)buffer_set + -+ sizeof(*buffer_set) + sizeof(u32) * i); -+ -+ *buffer = 0; -+ } -+ -+ return buffer_set; -+} -+ -+int ipu_fw_psys_ppg_enqueue_bufs(struct ipu_psys_kcmd *kcmd) -+{ -+ struct device *dev = &kcmd->fh->psys->adev->auxdev.dev; -+ struct ipu_fw_psys_cmd *psys_cmd; -+ unsigned int queue_id; -+ int ret = 0; -+ unsigned int size; -+ -+ if (ipu_ver == IPU6_VER_6SE) -+ size = IPU6SE_FW_PSYS_N_PSYS_CMD_QUEUE_ID; -+ else -+ size = IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID; -+ queue_id = kcmd->kpg->pg->base_queue_id; -+ -+ if (queue_id >= size) -+ return -EINVAL; -+ -+ psys_cmd = ipu6_send_get_token(kcmd->fh->psys->fwcom, queue_id); -+ if (!psys_cmd) { -+ dev_err(dev, "%s failed to get token!\n", __func__); -+ kcmd->pg_user = NULL; -+ return -ENODATA; -+ } -+ -+ psys_cmd->command = IPU_FW_PSYS_PROCESS_GROUP_CMD_RUN; -+ psys_cmd->msg = 0; -+ psys_cmd->context_handle = kcmd->kbuf_set->buf_set->ipu_virtual_address; -+ -+ ipu6_send_put_token(kcmd->fh->psys->fwcom, queue_id); -+ -+ return ret; -+} -+ -+u8 ipu_fw_psys_ppg_get_base_queue_id(struct ipu_psys_kcmd *kcmd) -+{ -+ return kcmd->kpg->pg->base_queue_id; -+} -+ -+void ipu_fw_psys_ppg_set_base_queue_id(struct ipu_psys_kcmd *kcmd, u8 queue_id) -+{ -+ kcmd->kpg->pg->base_queue_id = queue_id; -+} -+ -+int ipu_fw_psys_open(struct ipu_psys *psys) -+{ -+ struct device *dev = &psys->adev->auxdev.dev; -+ int retry = IPU_PSYS_OPEN_RETRY, retval; -+ -+ retval = ipu6_fw_com_open(psys->fwcom); -+ if (retval) { -+ dev_err(dev, "fw com open failed.\n"); -+ return retval; -+ } -+ -+ do { -+ usleep_range(IPU_PSYS_OPEN_TIMEOUT_US, -+ IPU_PSYS_OPEN_TIMEOUT_US + 10); -+ retval = ipu6_fw_com_ready(psys->fwcom); -+ if (retval) { -+ dev_dbg(dev, "psys port open ready!\n"); -+ break; -+ } -+ retry--; -+ } while (retry > 0); -+ -+ if (!retry) { -+ dev_err(dev, "psys port open ready failed\n"); -+ ipu6_fw_com_close(psys->fwcom); -+ return -EIO; -+ } -+ -+ return 0; -+} -+ -+int ipu_fw_psys_close(struct ipu_psys *psys) -+{ -+ struct device *dev = &psys->adev->auxdev.dev; -+ int retval; -+ -+ retval = ipu6_fw_com_close(psys->fwcom); -+ if (retval) { -+ dev_err(dev, "fw com close failed.\n"); -+ return retval; -+ } -+ return retval; -+} -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-fw-psys.h b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-fw-psys.h -new file mode 100644 -index 0000000..492a9ea ---- /dev/null -+++ b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-fw-psys.h -@@ -0,0 +1,382 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* Copyright (C) 2016 - 2024 Intel Corporation */ -+ -+#ifndef IPU_FW_PSYS_H -+#define IPU_FW_PSYS_H -+ -+#include "ipu6-platform-resources.h" -+#include "ipu6se-platform-resources.h" -+#include "ipu6ep-platform-resources.h" -+ -+#define IPU_FW_PSYS_CMD_QUEUE_SIZE 0x20 -+#define IPU_FW_PSYS_EVENT_QUEUE_SIZE 0x40 -+ -+#define IPU_FW_PSYS_CMD_BITS 64 -+#define IPU_FW_PSYS_EVENT_BITS 128 -+ -+enum { -+ IPU_FW_PSYS_EVENT_TYPE_SUCCESS = 0, -+ IPU_FW_PSYS_EVENT_TYPE_UNKNOWN_ERROR = 1, -+ IPU_FW_PSYS_EVENT_TYPE_RET_REM_OBJ_NOT_FOUND = 2, -+ IPU_FW_PSYS_EVENT_TYPE_RET_REM_OBJ_TOO_BIG = 3, -+ IPU_FW_PSYS_EVENT_TYPE_RET_REM_OBJ_DDR_TRANS_ERR = 4, -+ IPU_FW_PSYS_EVENT_TYPE_RET_REM_OBJ_NULL_PKG_DIR_ADDR = 5, -+ IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_LOAD_FRAME_ERR = 6, -+ IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_LOAD_FRAGMENT_ERR = 7, -+ IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_PROCESS_COUNT_ZERO = 8, -+ IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_PROCESS_INIT_ERR = 9, -+ IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_ABORT = 10, -+ IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_NULL = 11, -+ IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_VALIDATION_ERR = 12, -+ IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_INVALID_FRAME = 13 -+}; -+ -+enum { -+ IPU_FW_PSYS_EVENT_QUEUE_MAIN_ID, -+ IPU_FW_PSYS_N_PSYS_EVENT_QUEUE_ID -+}; -+ -+enum { -+ IPU_FW_PSYS_PROCESS_GROUP_ERROR = 0, -+ IPU_FW_PSYS_PROCESS_GROUP_CREATED, -+ IPU_FW_PSYS_PROCESS_GROUP_READY, -+ IPU_FW_PSYS_PROCESS_GROUP_BLOCKED, -+ IPU_FW_PSYS_PROCESS_GROUP_STARTED, -+ IPU_FW_PSYS_PROCESS_GROUP_RUNNING, -+ IPU_FW_PSYS_PROCESS_GROUP_STALLED, -+ IPU_FW_PSYS_PROCESS_GROUP_STOPPED, -+ IPU_FW_PSYS_N_PROCESS_GROUP_STATES -+}; -+ -+enum { -+ IPU_FW_PSYS_CONNECTION_MEMORY = 0, -+ IPU_FW_PSYS_CONNECTION_MEMORY_STREAM, -+ IPU_FW_PSYS_CONNECTION_STREAM, -+ IPU_FW_PSYS_N_CONNECTION_TYPES -+}; -+ -+enum { -+ IPU_FW_PSYS_BUFFER_NULL = 0, -+ IPU_FW_PSYS_BUFFER_UNDEFINED, -+ IPU_FW_PSYS_BUFFER_EMPTY, -+ IPU_FW_PSYS_BUFFER_NONEMPTY, -+ IPU_FW_PSYS_BUFFER_FULL, -+ IPU_FW_PSYS_N_BUFFER_STATES -+}; -+ -+enum { -+ IPU_FW_PSYS_TERMINAL_TYPE_DATA_IN = 0, -+ IPU_FW_PSYS_TERMINAL_TYPE_DATA_OUT, -+ IPU_FW_PSYS_TERMINAL_TYPE_PARAM_STREAM, -+ IPU_FW_PSYS_TERMINAL_TYPE_PARAM_CACHED_IN, -+ IPU_FW_PSYS_TERMINAL_TYPE_PARAM_CACHED_OUT, -+ IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SPATIAL_IN, -+ IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SPATIAL_OUT, -+ IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SLICED_IN, -+ IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SLICED_OUT, -+ IPU_FW_PSYS_TERMINAL_TYPE_STATE_IN, -+ IPU_FW_PSYS_TERMINAL_TYPE_STATE_OUT, -+ IPU_FW_PSYS_TERMINAL_TYPE_PROGRAM, -+ IPU_FW_PSYS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT, -+ IPU_FW_PSYS_N_TERMINAL_TYPES -+}; -+ -+enum { -+ IPU_FW_PSYS_COL_DIMENSION = 0, -+ IPU_FW_PSYS_ROW_DIMENSION = 1, -+ IPU_FW_PSYS_N_DATA_DIMENSION = 2 -+}; -+ -+enum { -+ IPU_FW_PSYS_PROCESS_GROUP_CMD_NOP = 0, -+ IPU_FW_PSYS_PROCESS_GROUP_CMD_SUBMIT, -+ IPU_FW_PSYS_PROCESS_GROUP_CMD_ATTACH, -+ IPU_FW_PSYS_PROCESS_GROUP_CMD_DETACH, -+ IPU_FW_PSYS_PROCESS_GROUP_CMD_START, -+ IPU_FW_PSYS_PROCESS_GROUP_CMD_DISOWN, -+ IPU_FW_PSYS_PROCESS_GROUP_CMD_RUN, -+ IPU_FW_PSYS_PROCESS_GROUP_CMD_STOP, -+ IPU_FW_PSYS_PROCESS_GROUP_CMD_SUSPEND, -+ IPU_FW_PSYS_PROCESS_GROUP_CMD_RESUME, -+ IPU_FW_PSYS_PROCESS_GROUP_CMD_ABORT, -+ IPU_FW_PSYS_PROCESS_GROUP_CMD_RESET, -+ IPU_FW_PSYS_N_PROCESS_GROUP_CMDS -+}; -+ -+enum { -+ IPU_FW_PSYS_PROCESS_GROUP_PROTOCOL_LEGACY = 0, -+ IPU_FW_PSYS_PROCESS_GROUP_PROTOCOL_PPG, -+ IPU_FW_PSYS_PROCESS_GROUP_N_PROTOCOLS -+}; -+ -+struct __packed ipu_fw_psys_process_group { -+ u64 token; -+ u64 private_token; -+ u32 routing_bitmap[IPU_FW_PSYS_RBM_NOF_ELEMS]; -+ u32 kernel_bitmap[IPU_FW_PSYS_KBM_NOF_ELEMS]; -+ u32 size; -+ u32 psys_server_init_cycles; -+ u32 pg_load_start_ts; -+ u32 pg_load_cycles; -+ u32 pg_init_cycles; -+ u32 pg_processing_cycles; -+ u32 pg_next_frame_init_cycles; -+ u32 pg_complete_cycles; -+ u32 ID; -+ u32 state; -+ u32 ipu_virtual_address; -+ u32 resource_bitmap; -+ u16 fragment_count; -+ u16 fragment_state; -+ u16 fragment_limit; -+ u16 processes_offset; -+ u16 terminals_offset; -+ u8 process_count; -+ u8 terminal_count; -+ u8 subgraph_count; -+ u8 protocol_version; -+ u8 base_queue_id; -+ u8 num_queues; -+ u8 mask_irq; -+ u8 error_handling_enable; -+ u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_PROCESS_GROUP_STRUCT]; -+}; -+ -+struct ipu_fw_psys_srv_init { -+ void *host_ddr_pkg_dir; -+ u32 ddr_pkg_dir_address; -+ u32 pkg_dir_size; -+ -+ u32 icache_prefetch_sp; -+ u32 icache_prefetch_isp; -+}; -+ -+struct __packed ipu_fw_psys_cmd { -+ u16 command; -+ u16 msg; -+ u32 context_handle; -+}; -+ -+struct __packed ipu_fw_psys_event { -+ u16 status; -+ u16 command; -+ u32 context_handle; -+ u64 token; -+}; -+ -+struct ipu_fw_psys_terminal { -+ u32 terminal_type; -+ s16 parent_offset; -+ u16 size; -+ u16 tm_index; -+ u8 ID; -+ u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_TERMINAL_STRUCT]; -+}; -+ -+struct ipu_fw_psys_param_payload { -+ u64 host_buffer; -+ u32 buffer; -+ u32 terminal_index; -+}; -+ -+struct ipu_fw_psys_param_terminal { -+ struct ipu_fw_psys_terminal base; -+ struct ipu_fw_psys_param_payload param_payload; -+ u16 param_section_desc_offset; -+ u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_PARAM_TERMINAL_STRUCT]; -+}; -+ -+struct ipu_fw_psys_frame { -+ u32 buffer_state; -+ u32 access_type; -+ u32 pointer_state; -+ u32 access_scope; -+ u32 data; -+ u32 data_index; -+ u32 data_bytes; -+ u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_FRAME_STRUCT]; -+}; -+ -+struct ipu_fw_psys_frame_descriptor { -+ u32 frame_format_type; -+ u32 plane_count; -+ u32 plane_offsets[IPU_FW_PSYS_N_FRAME_PLANES]; -+ u32 stride[1]; -+ u32 ts_offsets[IPU_FW_PSYS_N_FRAME_PLANES]; -+ u16 dimension[2]; -+ u16 size; -+ u8 bpp; -+ u8 bpe; -+ u8 is_compressed; -+ u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_FRAME_DESC_STRUCT]; -+}; -+ -+struct ipu_fw_psys_stream { -+ u64 dummy; -+}; -+ -+struct ipu_fw_psys_data_terminal { -+ struct ipu_fw_psys_terminal base; -+ struct ipu_fw_psys_frame_descriptor frame_descriptor; -+ struct ipu_fw_psys_frame frame; -+ struct ipu_fw_psys_stream stream; -+ u32 reserved; -+ u32 connection_type; -+ u16 fragment_descriptor_offset; -+ u8 kernel_id; -+ u8 subgraph_id; -+ u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_DATA_TERMINAL_STRUCT]; -+}; -+ -+struct ipu_fw_psys_buffer_set { -+ u64 token; -+ u32 kernel_enable_bitmap[IPU_FW_PSYS_KERNEL_BITMAP_NOF_ELEMS]; -+ u32 terminal_enable_bitmap[IPU_FW_PSYS_KERNEL_BITMAP_NOF_ELEMS]; -+ u32 routing_enable_bitmap[IPU_FW_PSYS_KERNEL_BITMAP_NOF_ELEMS]; -+ u32 rbm[IPU_FW_PSYS_RBM_NOF_ELEMS]; -+ u32 ipu_virtual_address; -+ u32 process_group_handle; -+ u16 terminal_count; -+ u8 frame_counter; -+ u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_BUFFER_SET_STRUCT]; -+}; -+ -+struct ipu_fw_psys_pgm { -+ u32 kernel_bitmap[IPU_FW_PSYS_KERNEL_BITMAP_NOF_ELEMS]; -+ u32 ID; -+ u16 program_manifest_offset; -+ u16 terminal_manifest_offset; -+ u16 private_data_offset; -+ u16 rbm_manifest_offset; -+ u16 size; -+ u8 alignment; -+ u8 kernel_count; -+ u8 program_count; -+ u8 terminal_count; -+ u8 subgraph_count; -+ u8 reserved[5]; -+}; -+ -+struct ipu_fw_generic_program_manifest { -+ u16 *dev_chn_size; -+ u16 *dev_chn_offset; -+ u16 *ext_mem_size; -+ u16 *ext_mem_offset; -+ u8 cell_id; -+ u8 cells[IPU_FW_PSYS_PROCESS_MAX_CELLS]; -+ u8 cell_type_id; -+ u8 *is_dfm_relocatable; -+ u32 *dfm_port_bitmap; -+ u32 *dfm_active_port_bitmap; -+}; -+ -+struct ipu_fw_resource_definitions { -+ u32 num_cells; -+ u32 num_cells_type; -+ const u8 *cells; -+ u32 num_dev_channels; -+ const u16 *dev_channels; -+ -+ u32 num_ext_mem_types; -+ u32 num_ext_mem_ids; -+ const u16 *ext_mem_ids; -+ -+ u32 num_dfm_ids; -+ const u16 *dfms; -+ -+ u32 cell_mem_row; -+ const u8 *cell_mem; -+}; -+ -+struct ipu6_psys_hw_res_variant { -+ unsigned int queue_num; -+ unsigned int cell_num; -+ int (*set_proc_dev_chn)(struct ipu_fw_psys_process *ptr, u16 offset, -+ u16 value); -+ int (*set_proc_dfm_bitmap)(struct ipu_fw_psys_process *ptr, -+ u16 id, u32 bitmap, u32 active_bitmap); -+ int (*set_proc_ext_mem)(struct ipu_fw_psys_process *ptr, -+ u16 type_id, u16 mem_id, u16 offset); -+ int (*get_pgm_by_proc)(struct ipu_fw_generic_program_manifest *gen_pm, -+ const struct ipu_fw_psys_pgm *pg_manifest, -+ struct ipu_fw_psys_process *process); -+}; -+ -+struct ipu_psys_kcmd; -+struct ipu_psys; -+int ipu_fw_psys_pg_start(struct ipu_psys_kcmd *kcmd); -+int ipu_fw_psys_pg_disown(struct ipu_psys_kcmd *kcmd); -+int ipu_fw_psys_pg_abort(struct ipu_psys_kcmd *kcmd); -+int ipu_fw_psys_pg_submit(struct ipu_psys_kcmd *kcmd); -+int ipu_fw_psys_ppg_suspend(struct ipu_psys_kcmd *kcmd); -+int ipu_fw_psys_ppg_resume(struct ipu_psys_kcmd *kcmd); -+int ipu_fw_psys_pg_load_cycles(struct ipu_psys_kcmd *kcmd); -+int ipu_fw_psys_pg_init_cycles(struct ipu_psys_kcmd *kcmd); -+int ipu_fw_psys_pg_processing_cycles(struct ipu_psys_kcmd *kcmd); -+int ipu_fw_psys_pg_server_init_cycles(struct ipu_psys_kcmd *kcmd); -+int ipu_fw_psys_pg_next_frame_init_cycles(struct ipu_psys_kcmd *kcmd); -+int ipu_fw_psys_pg_complete_cycles(struct ipu_psys_kcmd *kcmd); -+int ipu_fw_psys_rcv_event(struct ipu_psys *psys, -+ struct ipu_fw_psys_event *event); -+int ipu_fw_psys_terminal_set(struct ipu_fw_psys_terminal *terminal, -+ int terminal_idx, -+ struct ipu_psys_kcmd *kcmd, -+ u32 buffer, unsigned int size); -+void ipu_fw_psys_pg_dump(struct ipu_psys *psys, -+ struct ipu_psys_kcmd *kcmd, const char *note); -+int ipu_fw_psys_pg_get_id(struct ipu_psys_kcmd *kcmd); -+int ipu_fw_psys_pg_get_terminal_count(struct ipu_psys_kcmd *kcmd); -+int ipu_fw_psys_pg_get_size(struct ipu_psys_kcmd *kcmd); -+int ipu_fw_psys_pg_set_ipu_vaddress(struct ipu_psys_kcmd *kcmd, -+ dma_addr_t vaddress); -+struct ipu_fw_psys_terminal *ipu_fw_psys_pg_get_terminal(struct ipu_psys_kcmd -+ *kcmd, int index); -+void ipu_fw_psys_pg_set_token(struct ipu_psys_kcmd *kcmd, u64 token); -+u64 ipu_fw_psys_pg_get_token(struct ipu_psys_kcmd *kcmd); -+int ipu_fw_psys_ppg_set_buffer_set(struct ipu_psys_kcmd *kcmd, -+ struct ipu_fw_psys_terminal *terminal, -+ int terminal_idx, u32 buffer); -+size_t ipu_fw_psys_ppg_get_buffer_set_size(struct ipu_psys_kcmd *kcmd); -+int -+ipu_fw_psys_ppg_buffer_set_vaddress(struct ipu_fw_psys_buffer_set *buf_set, -+ u32 vaddress); -+int ipu_fw_psys_ppg_buffer_set_set_keb(struct ipu_fw_psys_buffer_set *buf_set, -+ u32 *kernel_enable_bitmap); -+struct ipu_fw_psys_buffer_set * -+ipu_fw_psys_ppg_create_buffer_set(struct ipu_psys_kcmd *kcmd, -+ void *kaddr, u32 frame_counter); -+int ipu_fw_psys_ppg_enqueue_bufs(struct ipu_psys_kcmd *kcmd); -+u8 ipu_fw_psys_ppg_get_base_queue_id(struct ipu_psys_kcmd *kcmd); -+void ipu_fw_psys_ppg_set_base_queue_id(struct ipu_psys_kcmd *kcmd, u8 queue_id); -+int ipu_fw_psys_pg_get_protocol(struct ipu_psys_kcmd *kcmd); -+int ipu_fw_psys_open(struct ipu_psys *psys); -+int ipu_fw_psys_close(struct ipu_psys *psys); -+ -+/* common resource interface for both abi and api mode */ -+int ipu_fw_psys_set_process_cell_id(struct ipu_fw_psys_process *ptr, u8 index, -+ u8 value); -+u8 ipu_fw_psys_get_process_cell_id(struct ipu_fw_psys_process *ptr, u8 index); -+int ipu_fw_psys_clear_process_cell(struct ipu_fw_psys_process *ptr); -+int ipu_fw_psys_set_proc_dev_chn(struct ipu_fw_psys_process *ptr, u16 offset, -+ u16 value); -+int ipu_fw_psys_set_process_ext_mem(struct ipu_fw_psys_process *ptr, -+ u16 type_id, u16 mem_id, u16 offset); -+int -+ipu_fw_psys_get_pgm_by_process(struct ipu_fw_generic_program_manifest *gen_pm, -+ const struct ipu_fw_psys_pgm *pg_manifest, -+ struct ipu_fw_psys_process *process); -+int ipu6_fw_psys_set_proc_dev_chn(struct ipu_fw_psys_process *ptr, u16 offset, -+ u16 value); -+int ipu6_fw_psys_set_proc_dfm_bitmap(struct ipu_fw_psys_process *ptr, -+ u16 id, u32 bitmap, -+ u32 active_bitmap); -+int ipu6_fw_psys_set_process_ext_mem(struct ipu_fw_psys_process *ptr, -+ u16 type_id, u16 mem_id, u16 offset); -+int -+ipu6_fw_psys_get_pgm_by_process(struct ipu_fw_generic_program_manifest *gen_pm, -+ const struct ipu_fw_psys_pgm *pg_manifest, -+ struct ipu_fw_psys_process *process); -+void ipu6_fw_psys_pg_dump(struct ipu_psys *psys, -+ struct ipu_psys_kcmd *kcmd, const char *note); -+void ipu6_psys_hw_res_variant_init(void); -+#endif /* IPU_FW_PSYS_H */ -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-fw-resources.c b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-fw-resources.c -new file mode 100644 -index 0000000..b6af0b7 ---- /dev/null -+++ b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-fw-resources.c -@@ -0,0 +1,103 @@ -+// SPDX-License-Identifier: GPL-2.0 -+// Copyright (C) 2015 - 2024 Intel Corporation -+ -+#include -+#include -+ -+#include "ipu-psys.h" -+#include "ipu-fw-psys.h" -+#include "ipu6-platform-resources.h" -+#include "ipu6se-platform-resources.h" -+ -+/********** Generic resource handling **********/ -+ -+/* -+ * Extension library gives byte offsets to its internal structures. -+ * use those offsets to update fields. Without extension lib access -+ * structures directly. -+ */ -+static const struct ipu6_psys_hw_res_variant *var = &hw_var; -+ -+int ipu_fw_psys_set_process_cell_id(struct ipu_fw_psys_process *ptr, u8 index, -+ u8 value) -+{ -+ struct ipu_fw_psys_process_group *parent = -+ (struct ipu_fw_psys_process_group *)((char *)ptr + -+ ptr->parent_offset); -+ -+ ptr->cells[index] = value; -+ parent->resource_bitmap |= 1 << value; -+ -+ return 0; -+} -+ -+u8 ipu_fw_psys_get_process_cell_id(struct ipu_fw_psys_process *ptr, u8 index) -+{ -+ return ptr->cells[index]; -+} -+ -+int ipu_fw_psys_clear_process_cell(struct ipu_fw_psys_process *ptr) -+{ -+ struct ipu_fw_psys_process_group *parent; -+ u8 cell_id = ipu_fw_psys_get_process_cell_id(ptr, 0); -+ int retval = -1; -+ u8 value; -+ -+ parent = (struct ipu_fw_psys_process_group *)((char *)ptr + -+ ptr->parent_offset); -+ -+ value = var->cell_num; -+ if ((1 << cell_id) != 0 && -+ ((1 << cell_id) & parent->resource_bitmap)) { -+ ipu_fw_psys_set_process_cell_id(ptr, 0, value); -+ parent->resource_bitmap &= ~(1 << cell_id); -+ retval = 0; -+ } -+ -+ return retval; -+} -+ -+int ipu_fw_psys_set_proc_dev_chn(struct ipu_fw_psys_process *ptr, u16 offset, -+ u16 value) -+{ -+ if (var->set_proc_dev_chn) -+ return var->set_proc_dev_chn(ptr, offset, value); -+ -+ WARN(1, "ipu6 psys res var is not initialised correctly."); -+ return 0; -+} -+ -+int ipu_fw_psys_set_proc_dfm_bitmap(struct ipu_fw_psys_process *ptr, -+ u16 id, u32 bitmap, -+ u32 active_bitmap) -+{ -+ if (var->set_proc_dfm_bitmap) -+ return var->set_proc_dfm_bitmap(ptr, id, bitmap, -+ active_bitmap); -+ -+ WARN(1, "ipu6 psys res var is not initialised correctly."); -+ return 0; -+} -+ -+int ipu_fw_psys_set_process_ext_mem(struct ipu_fw_psys_process *ptr, -+ u16 type_id, u16 mem_id, u16 offset) -+{ -+ if (var->set_proc_ext_mem) -+ return var->set_proc_ext_mem(ptr, type_id, mem_id, offset); -+ -+ WARN(1, "ipu6 psys res var is not initialised correctly."); -+ return 0; -+} -+ -+int -+ipu_fw_psys_get_pgm_by_process(struct ipu_fw_generic_program_manifest *gen_pm, -+ const struct ipu_fw_psys_pgm *pg_manifest, -+ struct ipu_fw_psys_process *process) -+{ -+ if (var->get_pgm_by_proc) -+ return var->get_pgm_by_proc(gen_pm, pg_manifest, process); -+ -+ WARN(1, "ipu6 psys res var is not initialised correctly."); -+ return 0; -+} -+ -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-platform-psys.h b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-platform-psys.h -new file mode 100644 -index 0000000..dc2cae3 ---- /dev/null -+++ b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-platform-psys.h -@@ -0,0 +1,78 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* Copyright (C) 2020 - 2024 Intel Corporation */ -+ -+#ifndef IPU_PLATFORM_PSYS_H -+#define IPU_PLATFORM_PSYS_H -+ -+#include "ipu-psys.h" -+#include -+ -+#define IPU_PSYS_BUF_SET_POOL_SIZE 8 -+#define IPU_PSYS_BUF_SET_MAX_SIZE 1024 -+ -+struct ipu_fw_psys_buffer_set; -+ -+enum ipu_psys_cmd_state { -+ KCMD_STATE_PPG_NEW, -+ KCMD_STATE_PPG_START, -+ KCMD_STATE_PPG_ENQUEUE, -+ KCMD_STATE_PPG_STOP, -+ KCMD_STATE_PPG_COMPLETE -+}; -+ -+struct ipu_psys_scheduler { -+ struct list_head ppgs; -+ struct mutex bs_mutex; /* Protects buf_set field */ -+ struct list_head buf_sets; -+}; -+ -+enum ipu_psys_ppg_state { -+ PPG_STATE_START = (1 << 0), -+ PPG_STATE_STARTING = (1 << 1), -+ PPG_STATE_STARTED = (1 << 2), -+ PPG_STATE_RUNNING = (1 << 3), -+ PPG_STATE_SUSPEND = (1 << 4), -+ PPG_STATE_SUSPENDING = (1 << 5), -+ PPG_STATE_SUSPENDED = (1 << 6), -+ PPG_STATE_RESUME = (1 << 7), -+ PPG_STATE_RESUMING = (1 << 8), -+ PPG_STATE_RESUMED = (1 << 9), -+ PPG_STATE_STOP = (1 << 10), -+ PPG_STATE_STOPPING = (1 << 11), -+ PPG_STATE_STOPPED = (1 << 12), -+}; -+ -+struct ipu_psys_ppg { -+ struct ipu_psys_pg *kpg; -+ struct ipu_psys_fh *fh; -+ struct list_head list; -+ struct list_head sched_list; -+ u64 token; -+ void *manifest; -+ struct mutex mutex; /* Protects kcmd and ppg state field */ -+ struct list_head kcmds_new_list; -+ struct list_head kcmds_processing_list; -+ struct list_head kcmds_finished_list; -+ enum ipu_psys_ppg_state state; -+ u32 pri_base; -+ int pri_dynamic; -+}; -+ -+struct ipu_psys_buffer_set { -+ struct list_head list; -+ struct ipu_fw_psys_buffer_set *buf_set; -+ size_t size; -+ size_t buf_set_size; -+ dma_addr_t dma_addr; -+ void *kaddr; -+ struct ipu_psys_kcmd *kcmd; -+}; -+ -+int ipu_psys_kcmd_start(struct ipu_psys *psys, struct ipu_psys_kcmd *kcmd); -+void ipu_psys_kcmd_complete(struct ipu_psys_ppg *kppg, -+ struct ipu_psys_kcmd *kcmd, -+ int error); -+int ipu_psys_fh_init(struct ipu_psys_fh *fh); -+int ipu_psys_fh_deinit(struct ipu_psys_fh *fh); -+ -+#endif /* IPU_PLATFORM_PSYS_H */ -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-platform-resources.h b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-platform-resources.h -new file mode 100644 -index 0000000..1f064dc ---- /dev/null -+++ b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-platform-resources.h -@@ -0,0 +1,103 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* Copyright (C) 2018 - 2024 Intel Corporation */ -+ -+#ifndef IPU_PLATFORM_RESOURCES_COMMON_H -+#define IPU_PLATFORM_RESOURCES_COMMON_H -+ -+#define IPU_FW_PSYS_N_PADDING_UINT8_IN_PROGRAM_MANIFEST 0 -+ -+#define IPU_FW_PSYS_N_PADDING_UINT8_IN_PROCESS_STRUCT 0 -+#define IPU_FW_PSYS_N_PADDING_UINT8_IN_PROCESS_GROUP_STRUCT 2 -+#define IPU_FW_PSYS_N_PADDING_UINT8_IN_PROGRAM_MANIFEST_EXT 2 -+ -+#define IPU_FW_PSYS_N_PADDING_UINT8_IN_TERMINAL_STRUCT 5 -+ -+#define IPU_FW_PSYS_N_PADDING_UINT8_IN_PARAM_TERMINAL_STRUCT 6 -+ -+#define IPU_FW_PSYS_N_PADDING_UINT8_IN_DATA_TERMINAL_STRUCT 3 -+ -+#define IPU_FW_PSYS_N_PADDING_UINT8_IN_FRAME_DESC_STRUCT 3 -+#define IPU_FW_PSYS_N_FRAME_PLANES 6 -+#define IPU_FW_PSYS_N_PADDING_UINT8_IN_FRAME_STRUCT 4 -+ -+#define IPU_FW_PSYS_N_PADDING_UINT8_IN_BUFFER_SET_STRUCT 1 -+ -+#define IPU_FW_PSYS_MAX_INPUT_DEC_RESOURCES 4 -+#define IPU_FW_PSYS_MAX_OUTPUT_DEC_RESOURCES 4 -+ -+#define IPU_FW_PSYS_PROCESS_MAX_CELLS 1 -+#define IPU_FW_PSYS_KERNEL_BITMAP_NOF_ELEMS 4 -+#define IPU_FW_PSYS_RBM_NOF_ELEMS 5 -+#define IPU_FW_PSYS_KBM_NOF_ELEMS 4 -+ -+struct ipu_fw_psys_process { -+ s16 parent_offset; -+ u8 size; -+ u8 cell_dependencies_offset; -+ u8 terminal_dependencies_offset; -+ u8 process_extension_offset; -+ u8 ID; -+ u8 program_idx; -+ u8 state; -+ u8 cells[IPU_FW_PSYS_PROCESS_MAX_CELLS]; -+ u8 cell_dependency_count; -+ u8 terminal_dependency_count; -+}; -+ -+struct ipu_fw_psys_program_manifest { -+ u32 kernel_bitmap[IPU_FW_PSYS_KERNEL_BITMAP_NOF_ELEMS]; -+ s16 parent_offset; -+ u8 program_dependency_offset; -+ u8 terminal_dependency_offset; -+ u8 size; -+ u8 program_extension_offset; -+ u8 program_type; -+ u8 ID; -+ u8 cells[IPU_FW_PSYS_PROCESS_MAX_CELLS]; -+ u8 cell_type_id; -+ u8 program_dependency_count; -+ u8 terminal_dependency_count; -+}; -+ -+/* platform specific resource interface */ -+struct ipu_psys_resource_pool; -+struct ipu_psys_resource_alloc; -+struct ipu_fw_psys_process_group; -+int ipu_psys_allocate_resources(const struct device *dev, -+ struct ipu_fw_psys_process_group *pg, -+ void *pg_manifest, -+ struct ipu_psys_resource_alloc *alloc, -+ struct ipu_psys_resource_pool *pool); -+int ipu_psys_move_resources(const struct device *dev, -+ struct ipu_psys_resource_alloc *alloc, -+ struct ipu_psys_resource_pool *source_pool, -+ struct ipu_psys_resource_pool *target_pool); -+ -+void ipu_psys_resource_copy(struct ipu_psys_resource_pool *src, -+ struct ipu_psys_resource_pool *dest); -+ -+int ipu_psys_try_allocate_resources(struct device *dev, -+ struct ipu_fw_psys_process_group *pg, -+ void *pg_manifest, -+ struct ipu_psys_resource_pool *pool); -+ -+void ipu_psys_reset_process_cell(const struct device *dev, -+ struct ipu_fw_psys_process_group *pg, -+ void *pg_manifest, -+ int process_count); -+void ipu_psys_free_resources(struct ipu_psys_resource_alloc *alloc, -+ struct ipu_psys_resource_pool *pool); -+ -+int ipu_fw_psys_set_proc_dfm_bitmap(struct ipu_fw_psys_process *ptr, -+ u16 id, u32 bitmap, -+ u32 active_bitmap); -+ -+int ipu_psys_allocate_cmd_queue_res(struct ipu_psys_resource_pool *pool); -+void ipu_psys_free_cmd_queue_res(struct ipu_psys_resource_pool *pool, -+ u8 queue_id); -+ -+extern const struct ipu_fw_resource_definitions *ipu6_res_defs; -+extern const struct ipu_fw_resource_definitions *ipu6se_res_defs; -+extern const struct ipu_fw_resource_definitions *ipu6ep_res_defs; -+extern struct ipu6_psys_hw_res_variant hw_var; -+#endif /* IPU_PLATFORM_RESOURCES_COMMON_H */ -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-psys-compat32.c b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-psys-compat32.c -new file mode 100644 -index 0000000..32350ca ---- /dev/null -+++ b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-psys-compat32.c -@@ -0,0 +1,222 @@ -+// SPDX-License-Identifier: GPL-2.0 -+// Copyright (C) 2013 - 2024 Intel Corporation -+ -+#include -+#include -+#include -+ -+#include -+ -+#include "ipu-psys.h" -+ -+static long native_ioctl(struct file *file, unsigned int cmd, unsigned long arg) -+{ -+ long ret = -ENOTTY; -+ -+ if (file->f_op->unlocked_ioctl) -+ ret = file->f_op->unlocked_ioctl(file, cmd, arg); -+ -+ return ret; -+} -+ -+struct ipu_psys_buffer32 { -+ u64 len; -+ union { -+ int fd; -+ compat_uptr_t userptr; -+ u64 reserved; -+ } base; -+ u32 data_offset; -+ u32 bytes_used; -+ u32 flags; -+ u32 reserved[2]; -+} __packed; -+ -+struct ipu_psys_command32 { -+ u64 issue_id; -+ u64 user_token; -+ u32 priority; -+ compat_uptr_t pg_manifest; -+ compat_uptr_t buffers; -+ int pg; -+ u32 pg_manifest_size; -+ u32 bufcount; -+ u32 min_psys_freq; -+ u32 frame_counter; -+ u32 reserved[2]; -+} __packed; -+ -+struct ipu_psys_manifest32 { -+ u32 index; -+ u32 size; -+ compat_uptr_t manifest; -+ u32 reserved[5]; -+} __packed; -+ -+static int -+get_ipu_psys_command32(struct ipu_psys_command *kp, -+ struct ipu_psys_command32 __user *up) -+{ -+ compat_uptr_t pgm, bufs; -+ bool access_ok; -+ -+ access_ok = access_ok(up, sizeof(struct ipu_psys_command32)); -+ if (!access_ok || get_user(kp->issue_id, &up->issue_id) || -+ get_user(kp->user_token, &up->user_token) || -+ get_user(kp->priority, &up->priority) || -+ get_user(pgm, &up->pg_manifest) || -+ get_user(bufs, &up->buffers) || -+ get_user(kp->pg, &up->pg) || -+ get_user(kp->pg_manifest_size, &up->pg_manifest_size) || -+ get_user(kp->bufcount, &up->bufcount) || -+ get_user(kp->min_psys_freq, &up->min_psys_freq) || -+ get_user(kp->frame_counter, &up->frame_counter) -+ ) -+ return -EFAULT; -+ -+ kp->pg_manifest = compat_ptr(pgm); -+ kp->buffers = compat_ptr(bufs); -+ -+ return 0; -+} -+ -+static int -+get_ipu_psys_buffer32(struct ipu_psys_buffer *kp, -+ struct ipu_psys_buffer32 __user *up) -+{ -+ compat_uptr_t ptr; -+ bool access_ok; -+ -+ access_ok = access_ok(up, sizeof(struct ipu_psys_buffer32)); -+ if (!access_ok || get_user(kp->len, &up->len) || -+ get_user(ptr, &up->base.userptr) || -+ get_user(kp->data_offset, &up->data_offset) || -+ get_user(kp->bytes_used, &up->bytes_used) || -+ get_user(kp->flags, &up->flags)) -+ return -EFAULT; -+ -+ kp->base.userptr = compat_ptr(ptr); -+ -+ return 0; -+} -+ -+static int -+put_ipu_psys_buffer32(struct ipu_psys_buffer *kp, -+ struct ipu_psys_buffer32 __user *up) -+{ -+ bool access_ok; -+ -+ access_ok = access_ok(up, sizeof(struct ipu_psys_buffer32)); -+ if (!access_ok || put_user(kp->len, &up->len) || -+ put_user(kp->base.fd, &up->base.fd) || -+ put_user(kp->data_offset, &up->data_offset) || -+ put_user(kp->bytes_used, &up->bytes_used) || -+ put_user(kp->flags, &up->flags)) -+ return -EFAULT; -+ -+ return 0; -+} -+ -+static int -+get_ipu_psys_manifest32(struct ipu_psys_manifest *kp, -+ struct ipu_psys_manifest32 __user *up) -+{ -+ compat_uptr_t ptr; -+ bool access_ok; -+ -+ access_ok = access_ok(up, sizeof(struct ipu_psys_manifest32)); -+ if (!access_ok || get_user(kp->index, &up->index) || -+ get_user(kp->size, &up->size) || get_user(ptr, &up->manifest)) -+ return -EFAULT; -+ -+ kp->manifest = compat_ptr(ptr); -+ -+ return 0; -+} -+ -+static int -+put_ipu_psys_manifest32(struct ipu_psys_manifest *kp, -+ struct ipu_psys_manifest32 __user *up) -+{ -+ compat_uptr_t ptr = (u32)((unsigned long)kp->manifest); -+ bool access_ok; -+ -+ access_ok = access_ok(up, sizeof(struct ipu_psys_manifest32)); -+ if (!access_ok || put_user(kp->index, &up->index) || -+ put_user(kp->size, &up->size) || put_user(ptr, &up->manifest)) -+ return -EFAULT; -+ -+ return 0; -+} -+ -+#define IPU_IOC_GETBUF32 _IOWR('A', 4, struct ipu_psys_buffer32) -+#define IPU_IOC_PUTBUF32 _IOWR('A', 5, struct ipu_psys_buffer32) -+#define IPU_IOC_QCMD32 _IOWR('A', 6, struct ipu_psys_command32) -+#define IPU_IOC_CMD_CANCEL32 _IOWR('A', 8, struct ipu_psys_command32) -+#define IPU_IOC_GET_MANIFEST32 _IOWR('A', 9, struct ipu_psys_manifest32) -+ -+long ipu_psys_compat_ioctl32(struct file *file, unsigned int cmd, -+ unsigned long arg) -+{ -+ union { -+ struct ipu_psys_buffer buf; -+ struct ipu_psys_command cmd; -+ struct ipu_psys_event ev; -+ struct ipu_psys_manifest m; -+ } karg; -+ int compatible_arg = 1; -+ int err = 0; -+ void __user *up = compat_ptr(arg); -+ -+ switch (cmd) { -+ case IPU_IOC_GETBUF32: -+ cmd = IPU_IOC_GETBUF; -+ break; -+ case IPU_IOC_PUTBUF32: -+ cmd = IPU_IOC_PUTBUF; -+ break; -+ case IPU_IOC_QCMD32: -+ cmd = IPU_IOC_QCMD; -+ break; -+ case IPU_IOC_GET_MANIFEST32: -+ cmd = IPU_IOC_GET_MANIFEST; -+ break; -+ } -+ -+ switch (cmd) { -+ case IPU_IOC_GETBUF: -+ case IPU_IOC_PUTBUF: -+ err = get_ipu_psys_buffer32(&karg.buf, up); -+ compatible_arg = 0; -+ break; -+ case IPU_IOC_QCMD: -+ err = get_ipu_psys_command32(&karg.cmd, up); -+ compatible_arg = 0; -+ break; -+ case IPU_IOC_GET_MANIFEST: -+ err = get_ipu_psys_manifest32(&karg.m, up); -+ compatible_arg = 0; -+ break; -+ } -+ if (err) -+ return err; -+ -+ if (compatible_arg) { -+ err = native_ioctl(file, cmd, (unsigned long)up); -+ } else { -+ err = native_ioctl(file, cmd, (unsigned long)&karg); -+ } -+ -+ if (err) -+ return err; -+ -+ switch (cmd) { -+ case IPU_IOC_GETBUF: -+ err = put_ipu_psys_buffer32(&karg.buf, up); -+ break; -+ case IPU_IOC_GET_MANIFEST: -+ err = put_ipu_psys_manifest32(&karg.m, up); -+ break; -+ } -+ return err; -+} -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.c b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.c -new file mode 100644 -index 0000000..9f36749 ---- /dev/null -+++ b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.c -@@ -0,0 +1,1599 @@ -+// SPDX-License-Identifier: GPL-2.0 -+// Copyright (C) 2013 - 2024 Intel Corporation -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+ -+#include "ipu6.h" -+#include "ipu6-mmu.h" -+#include "ipu6-bus.h" -+#include "ipu6-buttress.h" -+#include "ipu6-cpd.h" -+#include "ipu-fw-psys.h" -+#include "ipu-psys.h" -+#include "ipu6-platform-regs.h" -+#include "ipu6-fw-com.h" -+ -+static bool async_fw_init; -+module_param(async_fw_init, bool, 0664); -+MODULE_PARM_DESC(async_fw_init, "Enable asynchronous firmware initialization"); -+ -+#define SYSCOM_BUTTRESS_FW_PARAMS_PSYS_OFFSET 7 -+ -+#define IPU_PSYS_NUM_DEVICES 4 -+ -+#define IPU_PSYS_MAX_NUM_DESCS 1024 -+#define IPU_PSYS_MAX_NUM_BUFS 1024 -+#define IPU_PSYS_MAX_NUM_BUFS_LRU 12 -+ -+static int psys_runtime_pm_resume(struct device *dev); -+static int psys_runtime_pm_suspend(struct device *dev); -+ -+static dev_t ipu_psys_dev_t; -+static DECLARE_BITMAP(ipu_psys_devices, IPU_PSYS_NUM_DEVICES); -+static DEFINE_MUTEX(ipu_psys_mutex); -+ -+static struct fw_init_task { -+ struct delayed_work work; -+ struct ipu_psys *psys; -+} fw_init_task; -+ -+static void ipu6_psys_remove(struct auxiliary_device *auxdev); -+ -+static const struct bus_type ipu_psys_bus = { -+ .name = "intel-ipu6-psys", -+}; -+#define PKG_DIR_ENT_LEN_FOR_PSYS 2 -+#define PKG_DIR_SIZE_MASK_FOR_PSYS GENMASK(23, 0) -+ -+enum ipu6_version ipu_ver; -+ -+static u32 ipu6_cpd_pkg_dir_get_address(const u64 *pkg_dir, int pkg_dir_idx) -+{ -+ return pkg_dir[++pkg_dir_idx * PKG_DIR_ENT_LEN_FOR_PSYS]; -+} -+ -+static u32 ipu6_cpd_pkg_dir_get_num_entries(const u64 *pkg_dir) -+{ -+ return pkg_dir[1]; -+} -+ -+static u32 ipu6_cpd_pkg_dir_get_size(const u64 *pkg_dir, int pkg_dir_idx) -+{ -+ return pkg_dir[++pkg_dir_idx * PKG_DIR_ENT_LEN_FOR_PSYS + 1] & -+ PKG_DIR_SIZE_MASK_FOR_PSYS; -+} -+ -+#define PKG_DIR_ID_SHIFT 48 -+#define PKG_DIR_ID_MASK 0x7f -+ -+static u32 ipu6_cpd_pkg_dir_get_type(const u64 *pkg_dir, int pkg_dir_idx) -+{ -+ return pkg_dir[++pkg_dir_idx * PKG_DIR_ENT_LEN_FOR_PSYS + 1] >> -+ PKG_DIR_ID_SHIFT & PKG_DIR_ID_MASK; -+} -+ -+/* -+ * These are some trivial wrappers that save us from open-coding some -+ * common patterns and also that's were we have some checking (for the -+ * time being) -+ */ -+static void ipu_desc_add(struct ipu_psys_fh *fh, struct ipu_psys_desc *desc) -+{ -+ fh->num_descs++; -+ -+ WARN_ON_ONCE(fh->num_descs >= IPU_PSYS_MAX_NUM_DESCS); -+ list_add(&desc->list, &fh->descs_list); -+} -+ -+static void ipu_desc_del(struct ipu_psys_fh *fh, struct ipu_psys_desc *desc) -+{ -+ fh->num_descs--; -+ list_del_init(&desc->list); -+} -+ -+static void ipu_buffer_add(struct ipu_psys_fh *fh, -+ struct ipu_psys_kbuffer *kbuf) -+{ -+ fh->num_bufs++; -+ -+ WARN_ON_ONCE(fh->num_bufs >= IPU_PSYS_MAX_NUM_BUFS); -+ list_add(&kbuf->list, &fh->bufs_list); -+} -+ -+static void ipu_buffer_del(struct ipu_psys_fh *fh, -+ struct ipu_psys_kbuffer *kbuf) -+{ -+ fh->num_bufs--; -+ list_del_init(&kbuf->list); -+} -+ -+static void ipu_buffer_lru_add(struct ipu_psys_fh *fh, -+ struct ipu_psys_kbuffer *kbuf) -+{ -+ fh->num_bufs_lru++; -+ list_add_tail(&kbuf->list, &fh->bufs_lru); -+} -+ -+static void ipu_buffer_lru_del(struct ipu_psys_fh *fh, -+ struct ipu_psys_kbuffer *kbuf) -+{ -+ fh->num_bufs_lru--; -+ list_del_init(&kbuf->list); -+} -+ -+static struct ipu_psys_kbuffer *ipu_psys_kbuffer_alloc(void) -+{ -+ struct ipu_psys_kbuffer *kbuf; -+ -+ kbuf = kzalloc(sizeof(*kbuf), GFP_KERNEL); -+ if (!kbuf) -+ return NULL; -+ -+ atomic_set(&kbuf->map_count, 0); -+ INIT_LIST_HEAD(&kbuf->list); -+ return kbuf; -+} -+ -+static struct ipu_psys_desc *ipu_psys_desc_alloc(int fd) -+{ -+ struct ipu_psys_desc *desc; -+ -+ desc = kzalloc(sizeof(*desc), GFP_KERNEL); -+ if (!desc) -+ return NULL; -+ -+ desc->fd = fd; -+ INIT_LIST_HEAD(&desc->list); -+ return desc; -+} -+ -+struct ipu_psys_pg *__get_pg_buf(struct ipu_psys *psys, size_t pg_size) -+{ -+ struct ipu_psys_pg *kpg; -+ unsigned long flags; -+ -+ spin_lock_irqsave(&psys->pgs_lock, flags); -+ list_for_each_entry(kpg, &psys->pgs, list) { -+ if (!kpg->pg_size && kpg->size >= pg_size) { -+ kpg->pg_size = pg_size; -+ spin_unlock_irqrestore(&psys->pgs_lock, flags); -+ return kpg; -+ } -+ } -+ spin_unlock_irqrestore(&psys->pgs_lock, flags); -+ /* no big enough buffer available, allocate new one */ -+ kpg = kzalloc(sizeof(*kpg), GFP_KERNEL); -+ if (!kpg) -+ return NULL; -+ -+ kpg->pg = ipu6_dma_alloc(psys->adev, pg_size, &kpg->pg_dma_addr, -+ GFP_KERNEL, 0); -+ if (!kpg->pg) { -+ kfree(kpg); -+ return NULL; -+ } -+ -+ kpg->pg_size = pg_size; -+ kpg->size = pg_size; -+ spin_lock_irqsave(&psys->pgs_lock, flags); -+ list_add(&kpg->list, &psys->pgs); -+ spin_unlock_irqrestore(&psys->pgs_lock, flags); -+ -+ return kpg; -+} -+ -+static struct ipu_psys_desc *psys_desc_lookup(struct ipu_psys_fh *fh, int fd) -+{ -+ struct ipu_psys_desc *desc; -+ -+ list_for_each_entry(desc, &fh->descs_list, list) { -+ if (desc->fd == fd) -+ return desc; -+ } -+ -+ return NULL; -+} -+ -+static bool dmabuf_cmp(struct dma_buf *lb, struct dma_buf *rb) -+{ -+ return lb == rb && lb->size == rb->size; -+} -+ -+static struct ipu_psys_kbuffer *psys_buf_lookup(struct ipu_psys_fh *fh, int fd) -+{ -+ struct ipu_psys_kbuffer *kbuf; -+ struct dma_buf *dma_buf; -+ -+ dma_buf = dma_buf_get(fd); -+ if (IS_ERR(dma_buf)) -+ return NULL; -+ -+ /* -+ * First lookup so-called `active` list, that is the list of -+ * referenced buffers -+ */ -+ list_for_each_entry(kbuf, &fh->bufs_list, list) { -+ if (dmabuf_cmp(kbuf->dbuf, dma_buf)) { -+ dma_buf_put(dma_buf); -+ return kbuf; -+ } -+ } -+ -+ /* -+ * We didn't find anything on the `active` list, try the LRU list -+ * (list of unreferenced buffers) and possibly resurrect a buffer -+ */ -+ list_for_each_entry(kbuf, &fh->bufs_lru, list) { -+ if (dmabuf_cmp(kbuf->dbuf, dma_buf)) { -+ dma_buf_put(dma_buf); -+ ipu_buffer_lru_del(fh, kbuf); -+ ipu_buffer_add(fh, kbuf); -+ return kbuf; -+ } -+ } -+ -+ dma_buf_put(dma_buf); -+ return NULL; -+} -+ -+struct ipu_psys_kbuffer *ipu_psys_lookup_kbuffer(struct ipu_psys_fh *fh, int fd) -+{ -+ struct ipu_psys_desc *desc; -+ -+ desc = psys_desc_lookup(fh, fd); -+ if (!desc) -+ return NULL; -+ -+ return desc->kbuf; -+} -+ -+struct ipu_psys_kbuffer * -+ipu_psys_lookup_kbuffer_by_kaddr(struct ipu_psys_fh *fh, void *kaddr) -+{ -+ struct ipu_psys_kbuffer *kbuffer; -+ -+ list_for_each_entry(kbuffer, &fh->bufs_list, list) { -+ if (kbuffer->kaddr == kaddr) -+ return kbuffer; -+ } -+ -+ return NULL; -+} -+ -+static int ipu_psys_get_userpages(struct ipu_dma_buf_attach *attach) -+{ -+ struct vm_area_struct *vma; -+ unsigned long start, end; -+ int npages, array_size; -+ struct page **pages; -+ struct sg_table *sgt; -+ int ret = -ENOMEM; -+ int nr = 0; -+ u32 flags; -+ -+ start = attach->userptr; -+ end = PAGE_ALIGN(start + attach->len); -+ npages = (end - (start & PAGE_MASK)) >> PAGE_SHIFT; -+ array_size = npages * sizeof(struct page *); -+ -+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); -+ if (!sgt) -+ return -ENOMEM; -+ -+ WARN_ON_ONCE(attach->npages); -+ -+ pages = kvzalloc(array_size, GFP_KERNEL); -+ if (!pages) -+ goto free_sgt; -+ -+ mmap_read_lock(current->mm); -+ vma = vma_lookup(current->mm, start); -+ if (unlikely(!vma)) { -+ ret = -EFAULT; -+ goto error_up_read; -+ } -+ mmap_read_unlock(current->mm); -+ -+ flags = FOLL_WRITE | FOLL_FORCE | FOLL_LONGTERM; -+ nr = pin_user_pages_fast(start & PAGE_MASK, npages, -+ flags, pages); -+ if (nr < npages) -+ goto error; -+ -+ attach->pages = pages; -+ attach->npages = npages; -+ -+ ret = sg_alloc_table_from_pages(sgt, pages, npages, -+ start & ~PAGE_MASK, attach->len, -+ GFP_KERNEL); -+ if (ret < 0) -+ goto error; -+ -+ attach->sgt = sgt; -+ -+ return 0; -+ -+error_up_read: -+ mmap_read_unlock(current->mm); -+error: -+ if (nr) -+ unpin_user_pages(pages, nr); -+ kvfree(pages); -+free_sgt: -+ kfree(sgt); -+ -+ pr_err("failed to get userpages:%d\n", ret); -+ -+ return ret; -+} -+ -+static void ipu_psys_put_userpages(struct ipu_dma_buf_attach *attach) -+{ -+ if (!attach || !attach->userptr || !attach->sgt) -+ return; -+ -+ unpin_user_pages(attach->pages, attach->npages); -+ kvfree(attach->pages); -+ -+ sg_free_table(attach->sgt); -+ kfree(attach->sgt); -+ attach->sgt = NULL; -+} -+ -+static int ipu_dma_buf_attach(struct dma_buf *dbuf, -+ struct dma_buf_attachment *attach) -+{ -+ struct ipu_psys_kbuffer *kbuf = dbuf->priv; -+ struct ipu_dma_buf_attach *ipu_attach; -+ int ret; -+ -+ ipu_attach = kzalloc(sizeof(*ipu_attach), GFP_KERNEL); -+ if (!ipu_attach) -+ return -ENOMEM; -+ -+ ipu_attach->len = kbuf->len; -+ ipu_attach->userptr = kbuf->userptr; -+ -+ ret = ipu_psys_get_userpages(ipu_attach); -+ if (ret) { -+ kfree(ipu_attach); -+ return ret; -+ } -+ -+ attach->priv = ipu_attach; -+ return 0; -+} -+ -+static void ipu_dma_buf_detach(struct dma_buf *dbuf, -+ struct dma_buf_attachment *attach) -+{ -+ struct ipu_dma_buf_attach *ipu_attach = attach->priv; -+ -+ ipu_psys_put_userpages(ipu_attach); -+ kfree(ipu_attach); -+ attach->priv = NULL; -+} -+ -+static struct sg_table *ipu_dma_buf_map(struct dma_buf_attachment *attach, -+ enum dma_data_direction dir) -+{ -+ struct ipu_dma_buf_attach *ipu_attach = attach->priv; -+ struct pci_dev *pdev = to_pci_dev(attach->dev); -+ struct ipu6_device *isp = pci_get_drvdata(pdev); -+ struct ipu6_bus_device *adev = isp->psys; -+ unsigned long attrs; -+ int ret; -+ -+ attrs = DMA_ATTR_SKIP_CPU_SYNC; -+ ret = dma_map_sgtable(&pdev->dev, ipu_attach->sgt, dir, attrs); -+ if (ret) { -+ dev_err(attach->dev, "pci buf map failed\n"); -+ return ERR_PTR(-EIO); -+ } -+ -+ dma_sync_sgtable_for_device(&pdev->dev, ipu_attach->sgt, dir); -+ -+ ret = ipu6_dma_map_sgtable(adev, ipu_attach->sgt, dir, 0); -+ if (ret) { -+ dev_err(attach->dev, "ipu6 buf map failed\n"); -+ return ERR_PTR(-EIO); -+ } -+ -+ ipu6_dma_sync_sgtable(adev, ipu_attach->sgt); -+ -+ return ipu_attach->sgt; -+} -+ -+static void ipu_dma_buf_unmap(struct dma_buf_attachment *attach, -+ struct sg_table *sgt, enum dma_data_direction dir) -+{ -+ struct pci_dev *pdev = to_pci_dev(attach->dev); -+ struct ipu6_device *isp = pci_get_drvdata(pdev); -+ struct ipu6_bus_device *adev = isp->psys; -+ -+ ipu6_dma_unmap_sgtable(adev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC); -+ dma_unmap_sgtable(&pdev->dev, sgt, dir, 0); -+} -+ -+static int ipu_dma_buf_mmap(struct dma_buf *dbuf, struct vm_area_struct *vma) -+{ -+ return -ENOTTY; -+} -+ -+static void ipu_dma_buf_release(struct dma_buf *buf) -+{ -+ struct ipu_psys_kbuffer *kbuf = buf->priv; -+ -+ if (!kbuf) -+ return; -+ -+ if (kbuf->db_attach) -+ ipu_psys_put_userpages(kbuf->db_attach->priv); -+ -+ kfree(kbuf); -+} -+ -+static int ipu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf, -+ enum dma_data_direction dir) -+{ -+ return -ENOTTY; -+} -+ -+static int ipu_dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map) -+{ -+ struct dma_buf_attachment *attach; -+ struct ipu_dma_buf_attach *ipu_attach; -+ -+ if (list_empty(&dmabuf->attachments)) -+ return -EINVAL; -+ -+ attach = list_last_entry(&dmabuf->attachments, -+ struct dma_buf_attachment, node); -+ ipu_attach = attach->priv; -+ -+ if (!ipu_attach || !ipu_attach->pages || !ipu_attach->npages) -+ return -EINVAL; -+ -+ map->vaddr = vm_map_ram(ipu_attach->pages, ipu_attach->npages, 0); -+ map->is_iomem = false; -+ if (!map->vaddr) -+ return -EINVAL; -+ -+ return 0; -+} -+ -+static void ipu_dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map) -+{ -+ struct dma_buf_attachment *attach; -+ struct ipu_dma_buf_attach *ipu_attach; -+ -+ if (WARN_ON(list_empty(&dmabuf->attachments))) -+ return; -+ -+ attach = list_last_entry(&dmabuf->attachments, -+ struct dma_buf_attachment, node); -+ ipu_attach = attach->priv; -+ -+ if (WARN_ON(!ipu_attach || !ipu_attach->pages || !ipu_attach->npages)) -+ return; -+ -+ vm_unmap_ram(map->vaddr, ipu_attach->npages); -+} -+ -+static const struct dma_buf_ops ipu_dma_buf_ops = { -+ .attach = ipu_dma_buf_attach, -+ .detach = ipu_dma_buf_detach, -+ .map_dma_buf = ipu_dma_buf_map, -+ .unmap_dma_buf = ipu_dma_buf_unmap, -+ .release = ipu_dma_buf_release, -+ .begin_cpu_access = ipu_dma_buf_begin_cpu_access, -+ .mmap = ipu_dma_buf_mmap, -+ .vmap = ipu_dma_buf_vmap, -+ .vunmap = ipu_dma_buf_vunmap, -+}; -+ -+static int ipu_psys_open(struct inode *inode, struct file *file) -+{ -+ struct ipu_psys *psys = inode_to_ipu_psys(inode); -+ struct ipu_psys_fh *fh; -+ int rval; -+ -+ fh = kzalloc(sizeof(*fh), GFP_KERNEL); -+ if (!fh) -+ return -ENOMEM; -+ -+ fh->psys = psys; -+ -+ file->private_data = fh; -+ -+ mutex_init(&fh->mutex); -+ INIT_LIST_HEAD(&fh->bufs_list); -+ INIT_LIST_HEAD(&fh->descs_list); -+ INIT_LIST_HEAD(&fh->bufs_lru); -+ init_waitqueue_head(&fh->wait); -+ -+ rval = ipu_psys_fh_init(fh); -+ if (rval) -+ goto open_failed; -+ -+ mutex_lock(&psys->mutex); -+ list_add_tail(&fh->list, &psys->fhs); -+ mutex_unlock(&psys->mutex); -+ -+ return 0; -+ -+open_failed: -+ mutex_destroy(&fh->mutex); -+ kfree(fh); -+ return rval; -+} -+ -+static inline void ipu_psys_kbuf_unmap(struct ipu_psys_fh *fh, -+ struct ipu_psys_kbuffer *kbuf) -+{ -+ if (!kbuf) -+ return; -+ -+ kbuf->valid = false; -+ if (kbuf->kaddr) { -+ struct iosys_map dmap; -+ -+ iosys_map_set_vaddr(&dmap, kbuf->kaddr); -+ dma_buf_vunmap_unlocked(kbuf->dbuf, &dmap); -+ } -+ -+ if (!kbuf->userptr) -+ ipu6_dma_unmap_sgtable(fh->psys->adev, kbuf->sgt, -+ DMA_BIDIRECTIONAL, 0); -+ -+ if (!IS_ERR_OR_NULL(kbuf->sgt)) -+ dma_buf_unmap_attachment_unlocked(kbuf->db_attach, -+ kbuf->sgt, -+ DMA_BIDIRECTIONAL); -+ if (!IS_ERR_OR_NULL(kbuf->db_attach)) -+ dma_buf_detach(kbuf->dbuf, kbuf->db_attach); -+ dma_buf_put(kbuf->dbuf); -+ -+ kbuf->db_attach = NULL; -+ kbuf->dbuf = NULL; -+ kbuf->sgt = NULL; -+} -+ -+static void __ipu_psys_unmapbuf(struct ipu_psys_fh *fh, -+ struct ipu_psys_kbuffer *kbuf) -+{ -+ /* From now on it is not safe to use this kbuffer */ -+ ipu_psys_kbuf_unmap(fh, kbuf); -+ ipu_buffer_del(fh, kbuf); -+ if (!kbuf->userptr) -+ kfree(kbuf); -+} -+ -+static int ipu_psys_unmapbuf_locked(int fd, struct ipu_psys_fh *fh) -+{ -+ struct ipu_psys *psys = fh->psys; -+ struct device *dev = &psys->adev->auxdev.dev; -+ struct ipu_psys_kbuffer *kbuf; -+ struct ipu_psys_desc *desc; -+ -+ desc = psys_desc_lookup(fh, fd); -+ if (WARN_ON_ONCE(!desc)) { -+ dev_err(dev, "descriptor not found: %d\n", fd); -+ return -EINVAL; -+ } -+ -+ kbuf = desc->kbuf; -+ /* descriptor is gone now */ -+ ipu_desc_del(fh, desc); -+ kfree(desc); -+ -+ if (WARN_ON_ONCE(!kbuf || !kbuf->dbuf)) { -+ dev_err(dev, -+ "descriptor with no buffer: %d\n", fd); -+ return -EINVAL; -+ } -+ -+ /* Wait for final UNMAP */ -+ if (!atomic_dec_and_test(&kbuf->map_count)) -+ return 0; -+ -+ __ipu_psys_unmapbuf(fh, kbuf); -+ -+ return 0; -+} -+ -+static int ipu_psys_release(struct inode *inode, struct file *file) -+{ -+ struct ipu_psys *psys = inode_to_ipu_psys(inode); -+ struct ipu_psys_fh *fh = file->private_data; -+ -+ mutex_lock(&fh->mutex); -+ while (!list_empty(&fh->descs_list)) { -+ struct ipu_psys_desc *desc; -+ -+ desc = list_first_entry(&fh->descs_list, -+ struct ipu_psys_desc, -+ list); -+ -+ ipu_desc_del(fh, desc); -+ kfree(desc); -+ } -+ -+ while (!list_empty(&fh->bufs_lru)) { -+ struct ipu_psys_kbuffer *kbuf; -+ -+ kbuf = list_first_entry(&fh->bufs_lru, -+ struct ipu_psys_kbuffer, -+ list); -+ -+ ipu_buffer_lru_del(fh, kbuf); -+ __ipu_psys_unmapbuf(fh, kbuf); -+ } -+ -+ while (!list_empty(&fh->bufs_list)) { -+ struct dma_buf_attachment *db_attach; -+ struct ipu_psys_kbuffer *kbuf; -+ -+ kbuf = list_first_entry(&fh->bufs_list, -+ struct ipu_psys_kbuffer, -+ list); -+ -+ ipu_buffer_del(fh, kbuf); -+ db_attach = kbuf->db_attach; -+ -+ /* Unmap and release buffers */ -+ if (kbuf->dbuf && db_attach) { -+ ipu_psys_kbuf_unmap(fh, kbuf); -+ } else { -+ if (db_attach) -+ ipu_psys_put_userpages(db_attach->priv); -+ kfree(kbuf); -+ } -+ } -+ mutex_unlock(&fh->mutex); -+ -+ mutex_lock(&psys->mutex); -+ list_del(&fh->list); -+ -+ mutex_unlock(&psys->mutex); -+ ipu_psys_fh_deinit(fh); -+ -+ mutex_lock(&psys->mutex); -+ if (list_empty(&psys->fhs)) -+ psys->power_gating = 0; -+ mutex_unlock(&psys->mutex); -+ mutex_destroy(&fh->mutex); -+ kfree(fh); -+ -+ return 0; -+} -+ -+static int ipu_psys_getbuf(struct ipu_psys_buffer *buf, struct ipu_psys_fh *fh) -+{ -+ struct ipu_psys_kbuffer *kbuf; -+ struct ipu_psys *psys = fh->psys; -+ struct device *dev = &psys->adev->auxdev.dev; -+ struct ipu_psys_desc *desc; -+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info); -+ struct dma_buf *dbuf; -+ int ret; -+ -+ if (!buf->base.userptr) { -+ dev_err(dev, "Buffer allocation not supported\n"); -+ return -EINVAL; -+ } -+ -+ kbuf = ipu_psys_kbuffer_alloc(); -+ if (!kbuf) -+ return -ENOMEM; -+ -+ kbuf->len = buf->len; -+ kbuf->userptr = (unsigned long)buf->base.userptr; -+ kbuf->flags = buf->flags; -+ -+ exp_info.ops = &ipu_dma_buf_ops; -+ exp_info.size = kbuf->len; -+ exp_info.flags = O_RDWR; -+ exp_info.priv = kbuf; -+ -+ dbuf = dma_buf_export(&exp_info); -+ if (IS_ERR(dbuf)) { -+ kfree(kbuf); -+ return PTR_ERR(dbuf); -+ } -+ -+ ret = dma_buf_fd(dbuf, 0); -+ if (ret < 0) { -+ dma_buf_put(dbuf); -+ return ret; -+ } -+ -+ buf->base.fd = ret; -+ buf->flags &= ~IPU_BUFFER_FLAG_USERPTR; -+ buf->flags |= IPU_BUFFER_FLAG_DMA_HANDLE; -+ kbuf->flags = buf->flags; -+ -+ desc = ipu_psys_desc_alloc(ret); -+ if (!desc) { -+ dma_buf_put(dbuf); -+ return -ENOMEM; -+ } -+ -+ kbuf->dbuf = dbuf; -+ -+ mutex_lock(&fh->mutex); -+ ipu_desc_add(fh, desc); -+ ipu_buffer_add(fh, kbuf); -+ mutex_unlock(&fh->mutex); -+ -+ dev_dbg(dev, "IOC_GETBUF: userptr %p size %llu to fd %d", -+ buf->base.userptr, buf->len, buf->base.fd); -+ -+ return 0; -+} -+ -+static int ipu_psys_putbuf(struct ipu_psys_buffer *buf, struct ipu_psys_fh *fh) -+{ -+ return 0; -+} -+ -+static void ipu_psys_kbuffer_lru(struct ipu_psys_fh *fh, -+ struct ipu_psys_kbuffer *kbuf) -+{ -+ ipu_buffer_del(fh, kbuf); -+ ipu_buffer_lru_add(fh, kbuf); -+ -+ while (fh->num_bufs_lru > IPU_PSYS_MAX_NUM_BUFS_LRU) { -+ kbuf = list_first_entry(&fh->bufs_lru, -+ struct ipu_psys_kbuffer, -+ list); -+ -+ ipu_buffer_lru_del(fh, kbuf); -+ __ipu_psys_unmapbuf(fh, kbuf); -+ } -+} -+ -+struct ipu_psys_kbuffer *ipu_psys_mapbuf_locked(int fd, struct ipu_psys_fh *fh) -+{ -+ struct ipu_psys *psys = fh->psys; -+ struct device *dev = &psys->adev->isp->pdev->dev; -+ int ret; -+ struct ipu_psys_kbuffer *kbuf; -+ struct ipu_psys_desc *desc; -+ struct dma_buf *dbuf; -+ struct iosys_map dmap; -+ -+ dbuf = dma_buf_get(fd); -+ if (IS_ERR(dbuf)) -+ return NULL; -+ -+ desc = psys_desc_lookup(fh, fd); -+ if (!desc) { -+ desc = ipu_psys_desc_alloc(fd); -+ if (!desc) -+ goto desc_alloc_fail; -+ ipu_desc_add(fh, desc); -+ } -+ -+ kbuf = psys_buf_lookup(fh, fd); -+ if (!kbuf) { -+ kbuf = ipu_psys_kbuffer_alloc(); -+ if (!kbuf) -+ goto buf_alloc_fail; -+ ipu_buffer_add(fh, kbuf); -+ } -+ -+ /* If this descriptor references no buffer or new buffer */ -+ if (desc->kbuf != kbuf) { -+ if (desc->kbuf) { -+ /* -+ * Un-reference old buffer and possibly put it on -+ * the LRU list -+ */ -+ if (atomic_dec_and_test(&desc->kbuf->map_count)) -+ ipu_psys_kbuffer_lru(fh, desc->kbuf); -+ } -+ -+ /* Grab reference of the new buffer */ -+ atomic_inc(&kbuf->map_count); -+ } -+ -+ desc->kbuf = kbuf; -+ -+ if (kbuf->sgt) { -+ dev_dbg(dev, "fd %d has been mapped!\n", fd); -+ dma_buf_put(dbuf); -+ goto mapbuf_end; -+ } -+ -+ kbuf->dbuf = dbuf; -+ -+ if (kbuf->len == 0) -+ kbuf->len = kbuf->dbuf->size; -+ -+ kbuf->db_attach = dma_buf_attach(kbuf->dbuf, dev); -+ if (IS_ERR(kbuf->db_attach)) { -+ dev_dbg(dev, "dma buf attach failed\n"); -+ goto attach_fail; -+ } -+ -+ kbuf->sgt = dma_buf_map_attachment_unlocked(kbuf->db_attach, -+ DMA_BIDIRECTIONAL); -+ if (IS_ERR_OR_NULL(kbuf->sgt)) { -+ kbuf->sgt = NULL; -+ dev_dbg(dev, "dma buf map attachment failed\n"); -+ goto kbuf_map_fail; -+ } -+ -+ if (!kbuf->userptr) { -+ ret = ipu6_dma_map_sgtable(psys->adev, kbuf->sgt, -+ DMA_BIDIRECTIONAL, 0); -+ if (ret) { -+ dev_dbg(dev, "ipu6 buf map failed\n"); -+ goto kbuf_map_fail; -+ } -+ } -+ -+ kbuf->dma_addr = sg_dma_address(kbuf->sgt->sgl); -+ -+ dmap.is_iomem = false; -+ if (dma_buf_vmap_unlocked(kbuf->dbuf, &dmap)) { -+ dev_dbg(dev, "dma buf vmap failed\n"); -+ goto kbuf_map_fail; -+ } -+ kbuf->kaddr = dmap.vaddr; -+ -+mapbuf_end: -+ dev_dbg(dev, "%s %s kbuf %p fd %d with len %llu mapped\n", -+ __func__, kbuf->userptr ? "private" : "imported", kbuf, fd, -+ kbuf->len); -+ -+ kbuf->valid = true; -+ return kbuf; -+ -+kbuf_map_fail: -+ ipu_buffer_del(fh, kbuf); -+ if (!IS_ERR_OR_NULL(kbuf->sgt)) { -+ if (!kbuf->userptr) -+ ipu6_dma_unmap_sgtable(psys->adev, kbuf->sgt, -+ DMA_BIDIRECTIONAL, 0); -+ dma_buf_unmap_attachment_unlocked(kbuf->db_attach, kbuf->sgt, -+ DMA_BIDIRECTIONAL); -+ } -+ dma_buf_detach(kbuf->dbuf, kbuf->db_attach); -+attach_fail: -+ ipu_buffer_del(fh, kbuf); -+ dbuf = ERR_PTR(-EINVAL); -+ if (!kbuf->userptr) -+ kfree(kbuf); -+ -+buf_alloc_fail: -+ ipu_desc_del(fh, desc); -+ kfree(desc); -+ -+desc_alloc_fail: -+ if (!IS_ERR(dbuf)) -+ dma_buf_put(dbuf); -+ return NULL; -+} -+ -+static long ipu_psys_mapbuf(int fd, struct ipu_psys_fh *fh) -+{ -+ struct ipu_psys_kbuffer *kbuf; -+ -+ dev_dbg(&fh->psys->adev->auxdev.dev, "IOC_MAPBUF\n"); -+ -+ mutex_lock(&fh->mutex); -+ kbuf = ipu_psys_mapbuf_locked(fd, fh); -+ mutex_unlock(&fh->mutex); -+ -+ return kbuf ? 0 : -EINVAL; -+} -+ -+static long ipu_psys_unmapbuf(int fd, struct ipu_psys_fh *fh) -+{ -+ struct device *dev = &fh->psys->adev->auxdev.dev; -+ long ret; -+ -+ dev_dbg(dev, "IOC_UNMAPBUF\n"); -+ -+ mutex_lock(&fh->mutex); -+ ret = ipu_psys_unmapbuf_locked(fd, fh); -+ mutex_unlock(&fh->mutex); -+ -+ return ret; -+} -+ -+static __poll_t ipu_psys_poll(struct file *file, -+ struct poll_table_struct *wait) -+{ -+ struct ipu_psys_fh *fh = file->private_data; -+ struct ipu_psys *psys = fh->psys; -+ struct device *dev = &psys->adev->auxdev.dev; -+ __poll_t ret = 0; -+ -+ dev_dbg(dev, "ipu psys poll\n"); -+ -+ poll_wait(file, &fh->wait, wait); -+ -+ if (ipu_get_completed_kcmd(fh)) -+ ret = POLLIN; -+ -+ dev_dbg(dev, "ipu psys poll ret %u\n", ret); -+ -+ return ret; -+} -+ -+static long ipu_get_manifest(struct ipu_psys_manifest *manifest, -+ struct ipu_psys_fh *fh) -+{ -+ struct ipu_psys *psys = fh->psys; -+ struct device *dev = &psys->adev->auxdev.dev; -+ struct ipu6_bus_device *adev = psys->adev; -+ struct ipu6_device *isp = adev->isp; -+ struct ipu6_cpd_client_pkg_hdr *client_pkg; -+ u32 entries; -+ void *host_fw_data; -+ dma_addr_t dma_fw_data; -+ u32 client_pkg_offset; -+ -+ host_fw_data = (void *)isp->cpd_fw->data; -+ dma_fw_data = sg_dma_address(adev->fw_sgt.sgl); -+ entries = ipu6_cpd_pkg_dir_get_num_entries(adev->pkg_dir); -+ if (!manifest || manifest->index > entries - 1) { -+ dev_err(dev, "invalid argument\n"); -+ return -EINVAL; -+ } -+ -+ if (!ipu6_cpd_pkg_dir_get_size(adev->pkg_dir, manifest->index) || -+ ipu6_cpd_pkg_dir_get_type(adev->pkg_dir, manifest->index) < -+ IPU6_CPD_PKG_DIR_CLIENT_PG_TYPE) { -+ dev_dbg(dev, "invalid pkg dir entry\n"); -+ return -ENOENT; -+ } -+ -+ client_pkg_offset = ipu6_cpd_pkg_dir_get_address(adev->pkg_dir, -+ manifest->index); -+ client_pkg_offset -= dma_fw_data; -+ -+ client_pkg = host_fw_data + client_pkg_offset; -+ manifest->size = client_pkg->pg_manifest_size; -+ -+ if (!manifest->manifest) -+ return 0; -+ -+ if (copy_to_user(manifest->manifest, -+ (uint8_t *)client_pkg + client_pkg->pg_manifest_offs, -+ manifest->size)) { -+ return -EFAULT; -+ } -+ -+ return 0; -+} -+ -+static long ipu_psys_ioctl(struct file *file, unsigned int cmd, -+ unsigned long arg) -+{ -+ union { -+ struct ipu_psys_buffer buf; -+ struct ipu_psys_command cmd; -+ struct ipu_psys_event ev; -+ struct ipu_psys_capability caps; -+ struct ipu_psys_manifest m; -+ } karg; -+ struct ipu_psys_fh *fh = file->private_data; -+ long err = 0; -+ void __user *up = (void __user *)arg; -+ bool copy = (cmd != IPU_IOC_MAPBUF && cmd != IPU_IOC_UNMAPBUF); -+ -+ if (copy) { -+ if (_IOC_SIZE(cmd) > sizeof(karg)) -+ return -ENOTTY; -+ -+ if (_IOC_DIR(cmd) & _IOC_WRITE) { -+ err = copy_from_user(&karg, up, _IOC_SIZE(cmd)); -+ if (err) -+ return -EFAULT; -+ } -+ } -+ -+ switch (cmd) { -+ case IPU_IOC_MAPBUF: -+ err = ipu_psys_mapbuf(arg, fh); -+ break; -+ case IPU_IOC_UNMAPBUF: -+ err = ipu_psys_unmapbuf(arg, fh); -+ break; -+ case IPU_IOC_QUERYCAP: -+ karg.caps = fh->psys->caps; -+ break; -+ case IPU_IOC_GETBUF: -+ err = ipu_psys_getbuf(&karg.buf, fh); -+ break; -+ case IPU_IOC_PUTBUF: -+ err = ipu_psys_putbuf(&karg.buf, fh); -+ break; -+ case IPU_IOC_QCMD: -+ err = ipu_psys_kcmd_new(&karg.cmd, fh); -+ break; -+ case IPU_IOC_DQEVENT: -+ err = ipu_ioctl_dqevent(&karg.ev, fh, file->f_flags); -+ break; -+ case IPU_IOC_GET_MANIFEST: -+ err = ipu_get_manifest(&karg.m, fh); -+ break; -+ default: -+ err = -ENOTTY; -+ break; -+ } -+ -+ if (err) -+ return err; -+ -+ if (copy && _IOC_DIR(cmd) & _IOC_READ) -+ if (copy_to_user(up, &karg, _IOC_SIZE(cmd))) -+ return -EFAULT; -+ -+ return 0; -+} -+ -+static const struct file_operations ipu_psys_fops = { -+ .open = ipu_psys_open, -+ .release = ipu_psys_release, -+ .unlocked_ioctl = ipu_psys_ioctl, -+ .poll = ipu_psys_poll, -+ .owner = THIS_MODULE, -+}; -+ -+static void ipu_psys_dev_release(struct device *dev) -+{ -+} -+ -+static int psys_runtime_pm_resume(struct device *dev) -+{ -+ struct ipu6_bus_device *adev = to_ipu6_bus_device(dev); -+ struct ipu_psys *psys = ipu6_bus_get_drvdata(adev); -+ unsigned long flags; -+ int retval; -+ -+ if (!psys) -+ return 0; -+ -+ spin_lock_irqsave(&psys->ready_lock, flags); -+ if (psys->ready) { -+ spin_unlock_irqrestore(&psys->ready_lock, flags); -+ return 0; -+ } -+ spin_unlock_irqrestore(&psys->ready_lock, flags); -+ -+ retval = ipu6_mmu_hw_init(adev->mmu); -+ if (retval) -+ return retval; -+ -+ if (async_fw_init && !psys->fwcom) { -+ dev_err(dev, "async firmware init not finished, skipping\n"); -+ return 0; -+ } -+ -+ if (!ipu6_buttress_auth_done(adev->isp)) { -+ dev_dbg(dev, "fw not yet authenticated, skipping\n"); -+ return 0; -+ } -+ -+ ipu_psys_setup_hw(psys); -+ -+ ipu_psys_subdomains_power(psys, 1); -+ ipu6_configure_spc(adev->isp, -+ &psys->pdata->ipdata->hw_variant, -+ IPU6_CPD_PKG_DIR_PSYS_SERVER_IDX, -+ psys->pdata->base, adev->pkg_dir, -+ adev->pkg_dir_dma_addr); -+ -+ retval = ipu_fw_psys_open(psys); -+ if (retval) { -+ dev_err(dev, "Failed to open abi.\n"); -+ return retval; -+ } -+ -+ spin_lock_irqsave(&psys->ready_lock, flags); -+ psys->ready = 1; -+ spin_unlock_irqrestore(&psys->ready_lock, flags); -+ -+ return 0; -+} -+ -+static int psys_runtime_pm_suspend(struct device *dev) -+{ -+ struct ipu6_bus_device *adev = to_ipu6_bus_device(dev); -+ struct ipu_psys *psys = ipu6_bus_get_drvdata(adev); -+ unsigned long flags; -+ int rval; -+ -+ if (!psys) -+ return 0; -+ -+ if (!psys->ready) -+ return 0; -+ -+ spin_lock_irqsave(&psys->ready_lock, flags); -+ psys->ready = 0; -+ spin_unlock_irqrestore(&psys->ready_lock, flags); -+ -+ /* -+ * We can trace failure but better to not return an error. -+ * At suspend we are progressing towards psys power gated state. -+ * Any hang / failure inside psys will be forgotten soon. -+ */ -+ rval = ipu_fw_psys_close(psys); -+ if (rval) -+ dev_err(dev, "Device close failure: %d\n", rval); -+ -+ ipu_psys_subdomains_power(psys, 0); -+ -+ ipu6_mmu_hw_cleanup(adev->mmu); -+ -+ return 0; -+} -+ -+/* The following PM callbacks are needed to enable runtime PM in IPU PCI -+ * device resume, otherwise, runtime PM can't work in PCI resume from -+ * S3 state. -+ */ -+static int psys_resume(struct device *dev) -+{ -+ return 0; -+} -+ -+static int psys_suspend(struct device *dev) -+{ -+ return 0; -+} -+ -+static const struct dev_pm_ops psys_pm_ops = { -+ .runtime_suspend = psys_runtime_pm_suspend, -+ .runtime_resume = psys_runtime_pm_resume, -+ .suspend = psys_suspend, -+ .resume = psys_resume, -+}; -+ -+static int ipu_psys_sched_cmd(void *ptr) -+{ -+ struct ipu_psys *psys = ptr; -+ size_t pending = 0; -+ -+ while (1) { -+ wait_event_interruptible(psys->sched_cmd_wq, -+ (kthread_should_stop() || -+ (pending = -+ atomic_read(&psys->wakeup_count)))); -+ -+ if (kthread_should_stop()) -+ break; -+ -+ if (pending == 0) -+ continue; -+ -+ mutex_lock(&psys->mutex); -+ atomic_set(&psys->wakeup_count, 0); -+ ipu_psys_run_next(psys); -+ mutex_unlock(&psys->mutex); -+ } -+ -+ return 0; -+} -+ -+static void start_sp(struct ipu6_bus_device *adev) -+{ -+ struct ipu_psys *psys = ipu6_bus_get_drvdata(adev); -+ void __iomem *spc_regs_base = psys->pdata->base + -+ psys->pdata->ipdata->hw_variant.spc_offset; -+ u32 val = 0; -+ -+ val |= IPU6_PSYS_SPC_STATUS_START | -+ IPU6_PSYS_SPC_STATUS_RUN | -+ IPU6_PSYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE; -+ val |= psys->icache_prefetch_sp ? -+ IPU6_PSYS_SPC_STATUS_ICACHE_PREFETCH : 0; -+ writel(val, spc_regs_base + IPU6_PSYS_REG_SPC_STATUS_CTRL); -+} -+ -+static int query_sp(struct ipu6_bus_device *adev) -+{ -+ struct ipu_psys *psys = ipu6_bus_get_drvdata(adev); -+ void __iomem *spc_regs_base = psys->pdata->base + -+ psys->pdata->ipdata->hw_variant.spc_offset; -+ u32 val = readl(spc_regs_base + IPU6_PSYS_REG_SPC_STATUS_CTRL); -+ -+ /* return true when READY == 1, START == 0 */ -+ val &= IPU6_PSYS_SPC_STATUS_READY | IPU6_PSYS_SPC_STATUS_START; -+ -+ return val == IPU6_PSYS_SPC_STATUS_READY; -+} -+ -+static int ipu_psys_fw_init(struct ipu_psys *psys) -+{ -+ struct ipu6_fw_syscom_queue_config *queue_cfg; -+ struct device *dev = &psys->adev->auxdev.dev; -+ unsigned int size; -+ struct ipu6_fw_syscom_queue_config fw_psys_event_queue_cfg[] = { -+ { -+ IPU_FW_PSYS_EVENT_QUEUE_SIZE, -+ sizeof(struct ipu_fw_psys_event) -+ } -+ }; -+ struct ipu_fw_psys_srv_init server_init = { -+ .ddr_pkg_dir_address = 0, -+ .host_ddr_pkg_dir = NULL, -+ .pkg_dir_size = 0, -+ .icache_prefetch_sp = psys->icache_prefetch_sp, -+ .icache_prefetch_isp = psys->icache_prefetch_isp, -+ }; -+ struct ipu6_fw_com_cfg fwcom = { -+ .num_output_queues = IPU_FW_PSYS_N_PSYS_EVENT_QUEUE_ID, -+ .output = fw_psys_event_queue_cfg, -+ .specific_addr = &server_init, -+ .specific_size = sizeof(server_init), -+ .cell_start = start_sp, -+ .cell_ready = query_sp, -+ .buttress_boot_offset = SYSCOM_BUTTRESS_FW_PARAMS_PSYS_OFFSET, -+ }; -+ int i; -+ -+ size = IPU6SE_FW_PSYS_N_PSYS_CMD_QUEUE_ID; -+ if (ipu_ver == IPU6_VER_6 || ipu_ver == IPU6_VER_6EP || -+ ipu_ver == IPU6_VER_6EP_MTL) -+ size = IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID; -+ -+ queue_cfg = devm_kzalloc(dev, sizeof(*queue_cfg) * size, -+ GFP_KERNEL); -+ if (!queue_cfg) -+ return -ENOMEM; -+ -+ for (i = 0; i < size; i++) { -+ queue_cfg[i].queue_size = IPU_FW_PSYS_CMD_QUEUE_SIZE; -+ queue_cfg[i].token_size = sizeof(struct ipu_fw_psys_cmd); -+ } -+ -+ fwcom.input = queue_cfg; -+ fwcom.num_input_queues = size; -+ fwcom.dmem_addr = psys->pdata->ipdata->hw_variant.dmem_offset; -+ -+ psys->fwcom = ipu6_fw_com_prepare(&fwcom, psys->adev, -+ psys->pdata->base); -+ if (!psys->fwcom) { -+ dev_err(dev, "psys fw com prepare failed\n"); -+ return -EIO; -+ } -+ -+ return 0; -+} -+ -+static void run_fw_init_work(struct work_struct *work) -+{ -+ struct fw_init_task *task = (struct fw_init_task *)work; -+ struct ipu_psys *psys = task->psys; -+ struct device *dev = &psys->adev->auxdev.dev; -+ int rval; -+ -+ rval = ipu_psys_fw_init(psys); -+ -+ if (rval) { -+ dev_err(dev, "FW init failed(%d)\n", rval); -+ ipu6_psys_remove(&psys->adev->auxdev); -+ } else { -+ dev_info(dev, "FW init done\n"); -+ } -+} -+ -+static int ipu6_psys_probe(struct auxiliary_device *auxdev, -+ const struct auxiliary_device_id *auxdev_id) -+{ -+ struct ipu6_bus_device *adev = auxdev_to_adev(auxdev); -+ struct device *dev = &auxdev->dev; -+ struct ipu_psys_pg *kpg, *kpg0; -+ struct ipu_psys *psys; -+ unsigned int minor; -+ int i, rval = -E2BIG; -+ -+ if (!adev->isp->bus_ready_to_probe) -+ return -EPROBE_DEFER; -+ -+ if (!adev->pkg_dir) -+ return -EPROBE_DEFER; -+ -+ ipu_ver = adev->isp->hw_ver; -+ -+ rval = ipu6_mmu_hw_init(adev->mmu); -+ if (rval) -+ return rval; -+ -+ mutex_lock(&ipu_psys_mutex); -+ -+ minor = find_next_zero_bit(ipu_psys_devices, IPU_PSYS_NUM_DEVICES, 0); -+ if (minor == IPU_PSYS_NUM_DEVICES) { -+ dev_err(dev, "too many devices\n"); -+ goto out_unlock; -+ } -+ -+ psys = devm_kzalloc(dev, sizeof(*psys), GFP_KERNEL); -+ if (!psys) { -+ rval = -ENOMEM; -+ goto out_unlock; -+ } -+ -+ adev->auxdrv_data = -+ (const struct ipu6_auxdrv_data *)auxdev_id->driver_data; -+ adev->auxdrv = to_auxiliary_drv(dev->driver); -+ -+ psys->adev = adev; -+ psys->pdata = adev->pdata; -+ psys->icache_prefetch_sp = 0; -+ -+ psys->power_gating = 0; -+ -+ spin_lock_init(&psys->ready_lock); -+ spin_lock_init(&psys->pgs_lock); -+ psys->ready = 0; -+ psys->timeout = IPU_PSYS_CMD_TIMEOUT_MS; -+ -+ mutex_init(&psys->mutex); -+ INIT_LIST_HEAD(&psys->fhs); -+ INIT_LIST_HEAD(&psys->pgs); -+ INIT_LIST_HEAD(&psys->started_kcmds_list); -+ -+ init_waitqueue_head(&psys->sched_cmd_wq); -+ atomic_set(&psys->wakeup_count, 0); -+ /* -+ * Create a thread to schedule commands sent to IPU firmware. -+ * The thread reduces the coupling between the command scheduler -+ * and queueing commands from the user to driver. -+ */ -+ psys->sched_cmd_thread = kthread_run(ipu_psys_sched_cmd, psys, -+ "psys_sched_cmd"); -+ -+ if (IS_ERR(psys->sched_cmd_thread)) { -+ psys->sched_cmd_thread = NULL; -+ mutex_destroy(&psys->mutex); -+ goto out_unlock; -+ } -+ -+ dev_set_drvdata(dev, psys); -+ -+ rval = ipu_psys_res_pool_init(&psys->res_pool_running); -+ if (rval < 0) { -+ dev_err(&psys->dev, -+ "unable to alloc process group resources\n"); -+ goto out_mutex_destroy; -+ } -+ -+ ipu6_psys_hw_res_variant_init(); -+ -+ /* allocate and map memory for process groups */ -+ for (i = 0; i < IPU_PSYS_PG_POOL_SIZE; i++) { -+ kpg = kzalloc(sizeof(*kpg), GFP_KERNEL); -+ if (!kpg) -+ goto out_free_pgs; -+ kpg->pg = ipu6_dma_alloc(adev, IPU_PSYS_PG_MAX_SIZE, -+ &kpg->pg_dma_addr, -+ GFP_KERNEL, 0); -+ if (!kpg->pg) { -+ kfree(kpg); -+ goto out_free_pgs; -+ } -+ kpg->size = IPU_PSYS_PG_MAX_SIZE; -+ list_add(&kpg->list, &psys->pgs); -+ } -+ -+ psys->caps.pg_count = ipu6_cpd_pkg_dir_get_num_entries(adev->pkg_dir); -+ -+ dev_info(dev, "pkg_dir entry count:%d\n", psys->caps.pg_count); -+ if (async_fw_init) { -+ INIT_DELAYED_WORK((struct delayed_work *)&fw_init_task, -+ run_fw_init_work); -+ fw_init_task.psys = psys; -+ schedule_delayed_work((struct delayed_work *)&fw_init_task, 0); -+ } else { -+ rval = ipu_psys_fw_init(psys); -+ } -+ -+ if (rval) { -+ dev_err(dev, "FW init failed(%d)\n", rval); -+ goto out_free_pgs; -+ } -+ -+ psys->dev.bus = &ipu_psys_bus; -+ psys->dev.parent = dev; -+ psys->dev.devt = MKDEV(MAJOR(ipu_psys_dev_t), minor); -+ psys->dev.release = ipu_psys_dev_release; -+ dev_set_name(&psys->dev, "ipu-psys%d", minor); -+ device_initialize(&psys->dev); -+ -+ cdev_init(&psys->cdev, &ipu_psys_fops); -+ psys->cdev.owner = ipu_psys_fops.owner; -+ -+ rval = cdev_device_add(&psys->cdev, &psys->dev); -+ if (rval < 0) { -+ dev_err(dev, "psys device_register failed\n"); -+ goto out_release_fw_com; -+ } -+ -+ set_bit(minor, ipu_psys_devices); -+ -+ /* Add the hw stepping information to caps */ -+ strscpy(psys->caps.dev_model, IPU6_MEDIA_DEV_MODEL_NAME, -+ sizeof(psys->caps.dev_model)); -+ -+ mutex_unlock(&ipu_psys_mutex); -+ -+ dev_info(dev, "psys probe minor: %d\n", minor); -+ -+ ipu6_mmu_hw_cleanup(adev->mmu); -+ -+ return 0; -+ -+out_release_fw_com: -+ ipu6_fw_com_release(psys->fwcom, 1); -+out_free_pgs: -+ list_for_each_entry_safe(kpg, kpg0, &psys->pgs, list) { -+ ipu6_dma_free(adev, kpg->size, kpg->pg, kpg->pg_dma_addr, 0); -+ kfree(kpg); -+ } -+ -+ ipu_psys_res_pool_cleanup(&psys->res_pool_running); -+out_mutex_destroy: -+ mutex_destroy(&psys->mutex); -+ if (psys->sched_cmd_thread) { -+ kthread_stop(psys->sched_cmd_thread); -+ psys->sched_cmd_thread = NULL; -+ } -+out_unlock: -+ /* Safe to call even if the init is not called */ -+ mutex_unlock(&ipu_psys_mutex); -+ ipu6_mmu_hw_cleanup(adev->mmu); -+ return rval; -+} -+ -+static void ipu6_psys_remove(struct auxiliary_device *auxdev) -+{ -+ struct ipu6_bus_device *adev = auxdev_to_adev(auxdev); -+ struct device *dev = &auxdev->dev; -+ struct ipu_psys *psys = dev_get_drvdata(&auxdev->dev); -+ struct ipu_psys_pg *kpg, *kpg0; -+ -+ if (psys->sched_cmd_thread) { -+ kthread_stop(psys->sched_cmd_thread); -+ psys->sched_cmd_thread = NULL; -+ } -+ -+ mutex_lock(&ipu_psys_mutex); -+ -+ list_for_each_entry_safe(kpg, kpg0, &psys->pgs, list) { -+ ipu6_dma_free(adev, kpg->size, kpg->pg, kpg->pg_dma_addr, 0); -+ kfree(kpg); -+ } -+ -+ if (psys->fwcom && ipu6_fw_com_release(psys->fwcom, 1)) -+ dev_err(dev, "fw com release failed.\n"); -+ -+ kfree(psys->server_init); -+ kfree(psys->syscom_config); -+ -+ ipu_psys_res_pool_cleanup(&psys->res_pool_running); -+ -+ cdev_device_del(&psys->cdev, &psys->dev); -+ -+ clear_bit(MINOR(psys->cdev.dev), ipu_psys_devices); -+ -+ mutex_unlock(&ipu_psys_mutex); -+ -+ mutex_destroy(&psys->mutex); -+ -+ dev_info(dev, "removed\n"); -+} -+ -+static irqreturn_t psys_isr_threaded(struct ipu6_bus_device *adev) -+{ -+ struct ipu_psys *psys = ipu6_bus_get_drvdata(adev); -+ struct device *dev = &psys->adev->auxdev.dev; -+ void __iomem *base = psys->pdata->base; -+ u32 status; -+ int r; -+ -+ mutex_lock(&psys->mutex); -+ r = pm_runtime_get_if_in_use(dev); -+ if (!r || WARN_ON_ONCE(r < 0)) { -+ mutex_unlock(&psys->mutex); -+ return IRQ_NONE; -+ } -+ -+ status = readl(base + IPU6_REG_PSYS_GPDEV_IRQ_STATUS); -+ writel(status, base + IPU6_REG_PSYS_GPDEV_IRQ_CLEAR); -+ -+ if (status & IPU6_PSYS_GPDEV_IRQ_FWIRQ(IPU6_PSYS_GPDEV_FWIRQ0)) { -+ writel(0, base + IPU6_REG_PSYS_GPDEV_FWIRQ(0)); -+ ipu_psys_handle_events(psys); -+ } -+ -+ pm_runtime_put(dev); -+ mutex_unlock(&psys->mutex); -+ -+ return status ? IRQ_HANDLED : IRQ_NONE; -+} -+ -+static const struct ipu6_auxdrv_data ipu6_psys_auxdrv_data = { -+ .isr_threaded = psys_isr_threaded, -+ .wake_isr_thread = true, -+}; -+ -+static const struct auxiliary_device_id ipu6_psys_id_table[] = { -+ { -+ .name = "intel_ipu6.psys", -+ .driver_data = (kernel_ulong_t)&ipu6_psys_auxdrv_data, -+ }, -+ { } -+}; -+MODULE_DEVICE_TABLE(auxiliary, ipu6_psys_id_table); -+ -+static struct auxiliary_driver ipu6_psys_aux_driver = { -+ .name = IPU6_PSYS_NAME, -+ .probe = ipu6_psys_probe, -+ .remove = ipu6_psys_remove, -+ .id_table = ipu6_psys_id_table, -+ .driver = { -+ .pm = &psys_pm_ops, -+ }, -+}; -+ -+static int __init ipu_psys_init(void) -+{ -+ int rval = alloc_chrdev_region(&ipu_psys_dev_t, 0, -+ IPU_PSYS_NUM_DEVICES, ipu_psys_bus.name); -+ if (rval) { -+ pr_err("can't alloc psys chrdev region (%d)\n", rval); -+ return rval; -+ } -+ -+ rval = bus_register(&ipu_psys_bus); -+ if (rval) { -+ pr_err("can't register psys bus (%d)\n", rval); -+ unregister_chrdev_region(ipu_psys_dev_t, IPU_PSYS_NUM_DEVICES); -+ return rval; -+ } -+ -+ auxiliary_driver_register(&ipu6_psys_aux_driver); -+ return 0; -+} -+module_init(ipu_psys_init); -+ -+static void __exit ipu_psys_exit(void) -+{ -+ auxiliary_driver_unregister(&ipu6_psys_aux_driver); -+ bus_unregister(&ipu_psys_bus); -+ unregister_chrdev_region(ipu_psys_dev_t, IPU_PSYS_NUM_DEVICES); -+} -+module_exit(ipu_psys_exit); -+ -+MODULE_AUTHOR("Bingbu Cao "); -+MODULE_LICENSE("GPL"); -+MODULE_DESCRIPTION("Intel IPU6 processing system driver"); -+MODULE_IMPORT_NS(DMA_BUF); -+MODULE_IMPORT_NS(INTEL_IPU6); -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.h b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.h -new file mode 100644 -index 0000000..8af6551 ---- /dev/null -+++ b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.h -@@ -0,0 +1,324 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* Copyright (C) 2013 - 2024 Intel Corporation */ -+ -+#ifndef IPU_PSYS_H -+#define IPU_PSYS_H -+ -+#include -+#include -+ -+#include -+#include "ipu6.h" -+#include "ipu6-bus.h" -+#include "ipu6-dma.h" -+#include "ipu-fw-psys.h" -+#include "ipu-platform-psys.h" -+ -+/* PSYS Info bits*/ -+#define IPU_REG_PSYS_INFO_SEG_CMEM_MASTER(a) (0x2c + ((a) * 12)) -+#define IPU_REG_PSYS_INFO_SEG_XMEM_MASTER(a) (0x5c + ((a) * 12)) -+ -+#define IPU_PSYS_REG_SPC_STATUS_CTRL 0x0 -+#define IPU_PSYS_REG_SPC_START_PC 0x4 -+#define IPU_PSYS_REG_SPC_ICACHE_BASE 0x10 -+#define IPU_REG_PSYS_INFO_SEG_0_CONFIG_ICACHE_MASTER 0x14 -+ -+#define IPU_PSYS_SPC_STATUS_START BIT(1) -+#define IPU_PSYS_SPC_STATUS_RUN BIT(3) -+#define IPU_PSYS_SPC_STATUS_READY BIT(5) -+#define IPU_PSYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE BIT(12) -+#define IPU_PSYS_SPC_STATUS_ICACHE_PREFETCH BIT(13) -+ -+#define IPU_PSYS_REG_SPP0_STATUS_CTRL 0x20000 -+ -+#define IPU_INFO_ENABLE_SNOOP BIT(0) -+#define IPU_INFO_DEC_FORCE_FLUSH BIT(1) -+#define IPU_INFO_DEC_PASS_THRU BIT(2) -+#define IPU_INFO_ZLW BIT(3) -+#define IPU_INFO_STREAM_ID_SET(id) (((id) & 0x1f) << 4) -+#define IPU_INFO_REQUEST_DESTINATION_IOSF BIT(9) -+#define IPU_INFO_IMR_BASE BIT(10) -+#define IPU_INFO_IMR_DESTINED BIT(11) -+ -+#define IPU_INFO_REQUEST_DESTINATION_PRIMARY IPU_INFO_REQUEST_DESTINATION_IOSF -+ -+#define IPU_REG_DMA_TOP_AB_GROUP1_BASE_ADDR 0x1ae000 -+#define IPU_REG_DMA_TOP_AB_GROUP2_BASE_ADDR 0x1af000 -+#define IPU_REG_DMA_TOP_AB_RING_MIN_OFFSET(n) (0x4 + (n) * 0xc) -+#define IPU_REG_DMA_TOP_AB_RING_MAX_OFFSET(n) (0x8 + (n) * 0xc) -+#define IPU_REG_DMA_TOP_AB_RING_ACCESS_OFFSET(n) (0xc + (n) * 0xc) -+ -+enum ipu_device_ab_group1_target_id { -+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R0_SPC_DMEM, -+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R1_SPC_DMEM, -+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R2_SPC_DMEM, -+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R3_SPC_STATUS_REG, -+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R4_SPC_MASTER_BASE_ADDR, -+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R5_SPC_PC_STALL, -+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R6_SPC_EQ, -+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R7_SPC_RESERVED, -+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R8_SPC_RESERVED, -+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R9_SPP0, -+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R10_SPP1, -+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R11_CENTRAL_R1, -+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R12_IRQ, -+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R13_CENTRAL_R2, -+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R14_DMA, -+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R15_DMA, -+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R16_GP, -+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R17_ZLW_INSERTER, -+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R18_AB, -+}; -+ -+/* IRQ-related registers in PSYS */ -+#define IPU_REG_PSYS_GPDEV_IRQ_EDGE 0x1aa200 -+#define IPU_REG_PSYS_GPDEV_IRQ_MASK 0x1aa204 -+#define IPU_REG_PSYS_GPDEV_IRQ_STATUS 0x1aa208 -+#define IPU_REG_PSYS_GPDEV_IRQ_CLEAR 0x1aa20c -+#define IPU_REG_PSYS_GPDEV_IRQ_ENABLE 0x1aa210 -+#define IPU_REG_PSYS_GPDEV_IRQ_LEVEL_NOT_PULSE 0x1aa214 -+/* There are 8 FW interrupts, n = 0..7 */ -+#define IPU_PSYS_GPDEV_FWIRQ0 5 -+#define IPU_PSYS_GPDEV_FWIRQ1 6 -+#define IPU_PSYS_GPDEV_FWIRQ2 7 -+#define IPU_PSYS_GPDEV_FWIRQ3 8 -+#define IPU_PSYS_GPDEV_FWIRQ4 9 -+#define IPU_PSYS_GPDEV_FWIRQ5 10 -+#define IPU_PSYS_GPDEV_FWIRQ6 11 -+#define IPU_PSYS_GPDEV_FWIRQ7 12 -+#define IPU_PSYS_GPDEV_IRQ_FWIRQ(n) (1 << (n)) -+#define IPU_REG_PSYS_GPDEV_FWIRQ(n) (4 * (n) + 0x1aa100) -+ -+/* -+ * psys subdomains power request regs -+ */ -+enum ipu_device_buttress_psys_domain_pos { -+ IPU_PSYS_SUBDOMAIN_ISA = 0, -+ IPU_PSYS_SUBDOMAIN_PSA = 1, -+ IPU_PSYS_SUBDOMAIN_BB = 2, -+ IPU_PSYS_SUBDOMAIN_IDSP1 = 3, /* only in IPU6M */ -+ IPU_PSYS_SUBDOMAIN_IDSP2 = 4, /* only in IPU6M */ -+}; -+ -+#define IPU_PSYS_SUBDOMAINS_POWER_MASK (BIT(IPU_PSYS_SUBDOMAIN_ISA) | \ -+ BIT(IPU_PSYS_SUBDOMAIN_PSA) | \ -+ BIT(IPU_PSYS_SUBDOMAIN_BB)) -+ -+#define IPU_PSYS_SUBDOMAINS_POWER_REQ 0xa0 -+#define IPU_PSYS_SUBDOMAINS_POWER_STATUS 0xa4 -+ -+#define IPU_PSYS_CMD_TIMEOUT_MS 2000 -+#define IPU_PSYS_OPEN_TIMEOUT_US 50 -+#define IPU_PSYS_OPEN_RETRY (10000 / IPU_PSYS_OPEN_TIMEOUT_US) -+ -+#define IPU_PSYS_PG_POOL_SIZE 16 -+#define IPU_PSYS_PG_MAX_SIZE 8192 -+#define IPU_MAX_PSYS_CMD_BUFFERS 32 -+#define IPU_PSYS_EVENT_CMD_COMPLETE IPU_FW_PSYS_EVENT_TYPE_SUCCESS -+#define IPU_PSYS_EVENT_FRAGMENT_COMPLETE IPU_FW_PSYS_EVENT_TYPE_SUCCESS -+#define IPU_PSYS_CLOSE_TIMEOUT_US 50 -+#define IPU_PSYS_CLOSE_TIMEOUT (100000 / IPU_PSYS_CLOSE_TIMEOUT_US) -+#define IPU_MAX_RESOURCES 128 -+ -+extern enum ipu6_version ipu_ver; -+ -+/* Opaque structure. Do not access fields. */ -+struct ipu_resource { -+ u32 id; -+ int elements; /* Number of elements available to allocation */ -+ unsigned long *bitmap; /* Allocation bitmap, a bit for each element */ -+}; -+ -+enum ipu_resource_type { -+ IPU_RESOURCE_DEV_CHN = 0, -+ IPU_RESOURCE_EXT_MEM, -+ IPU_RESOURCE_DFM -+}; -+ -+/* Allocation of resource(s) */ -+/* Opaque structure. Do not access fields. */ -+struct ipu_resource_alloc { -+ enum ipu_resource_type type; -+ struct ipu_resource *resource; -+ int elements; -+ int pos; -+}; -+ -+/* -+ * This struct represents all of the currently allocated -+ * resources from IPU model. It is used also for allocating -+ * resources for the next set of PGs to be run on IPU -+ * (ie. those PGs which are not yet being run and which don't -+ * yet reserve real IPU resources). -+ * Use larger array to cover existing resource quantity -+ */ -+ -+/* resource size may need expand for new resource model */ -+struct ipu_psys_resource_pool { -+ u32 cells; /* Bitmask of cells allocated */ -+ struct ipu_resource dev_channels[16]; -+ struct ipu_resource ext_memory[32]; -+ struct ipu_resource dfms[16]; -+ DECLARE_BITMAP(cmd_queues, 32); -+ /* Protects cmd_queues bitmap */ -+ spinlock_t queues_lock; -+}; -+ -+/* -+ * This struct keeps book of the resources allocated for a specific PG. -+ * It is used for freeing up resources from struct ipu_psys_resources -+ * when the PG is released from IPU (or model of IPU). -+ */ -+struct ipu_psys_resource_alloc { -+ u32 cells; /* Bitmask of cells needed */ -+ struct ipu_resource_alloc -+ resource_alloc[IPU_MAX_RESOURCES]; -+ int resources; -+}; -+ -+struct task_struct; -+struct ipu_psys { -+ struct ipu_psys_capability caps; -+ struct cdev cdev; -+ struct device dev; -+ -+ struct mutex mutex; /* Psys various */ -+ int ready; /* psys fw status */ -+ bool icache_prefetch_sp; -+ bool icache_prefetch_isp; -+ spinlock_t ready_lock; /* protect psys firmware state */ -+ spinlock_t pgs_lock; /* Protect pgs list access */ -+ struct list_head fhs; -+ struct list_head pgs; -+ struct list_head started_kcmds_list; -+ struct ipu6_psys_pdata *pdata; -+ struct ipu6_bus_device *adev; -+ struct ia_css_syscom_context *dev_ctx; -+ struct ia_css_syscom_config *syscom_config; -+ struct ia_css_psys_server_init *server_init; -+ struct task_struct *sched_cmd_thread; -+ wait_queue_head_t sched_cmd_wq; -+ atomic_t wakeup_count; /* Psys schedule thread wakeup count */ -+ -+ /* Resources needed to be managed for process groups */ -+ struct ipu_psys_resource_pool res_pool_running; -+ -+ const struct firmware *fw; -+ struct sg_table fw_sgt; -+ u64 *pkg_dir; -+ dma_addr_t pkg_dir_dma_addr; -+ unsigned int pkg_dir_size; -+ unsigned long timeout; -+ -+ int active_kcmds, started_kcmds; -+ void *fwcom; -+ -+ int power_gating; -+}; -+ -+struct ipu_psys_fh { -+ struct ipu_psys *psys; -+ struct mutex mutex;/* Protects bufs_list & kcmds fields */ -+ struct list_head list; -+ /* Holds all buffers that this fh owns */ -+ struct list_head bufs_list; -+ /* Holds all descriptors (fd:kbuffer associations) */ -+ struct list_head descs_list; -+ struct list_head bufs_lru; -+ wait_queue_head_t wait; -+ struct ipu_psys_scheduler sched; -+ -+ u32 num_bufs; -+ u32 num_descs; -+ u32 num_bufs_lru; -+}; -+ -+struct ipu_psys_pg { -+ struct ipu_fw_psys_process_group *pg; -+ size_t size; -+ size_t pg_size; -+ dma_addr_t pg_dma_addr; -+ struct list_head list; -+ struct ipu_psys_resource_alloc resource_alloc; -+}; -+ -+struct ipu6_psys_constraint { -+ struct list_head list; -+ unsigned int min_freq; -+}; -+ -+struct ipu_psys_kcmd { -+ struct ipu_psys_fh *fh; -+ struct list_head list; -+ struct ipu_psys_buffer_set *kbuf_set; -+ enum ipu_psys_cmd_state state; -+ void *pg_manifest; -+ size_t pg_manifest_size; -+ struct ipu_psys_kbuffer **kbufs; -+ struct ipu_psys_buffer *buffers; -+ size_t nbuffers; -+ struct ipu_fw_psys_process_group *pg_user; -+ struct ipu_psys_pg *kpg; -+ u64 user_token; -+ u64 issue_id; -+ u32 priority; -+ u32 kernel_enable_bitmap[4]; -+ u32 terminal_enable_bitmap[4]; -+ u32 routing_enable_bitmap[4]; -+ u32 rbm[5]; -+ struct ipu6_psys_constraint constraint; -+ struct ipu_psys_event ev; -+ struct timer_list watchdog; -+}; -+ -+struct ipu_dma_buf_attach { -+ struct device *dev; -+ u64 len; -+ unsigned long userptr; -+ struct sg_table *sgt; -+ struct page **pages; -+ size_t npages; -+}; -+ -+struct ipu_psys_kbuffer { -+ u64 len; -+ unsigned long userptr; -+ void *kaddr; -+ struct list_head list; -+ dma_addr_t dma_addr; -+ struct sg_table *sgt; -+ struct dma_buf_attachment *db_attach; -+ struct dma_buf *dbuf; -+ u32 flags; -+ atomic_t map_count; /* The number of times this buffer is mapped */ -+ bool valid; /* True when buffer is usable */ -+}; -+ -+struct ipu_psys_desc { -+ struct ipu_psys_kbuffer *kbuf; -+ struct list_head list; -+ u32 fd; -+}; -+ -+#define inode_to_ipu_psys(inode) \ -+ container_of((inode)->i_cdev, struct ipu_psys, cdev) -+ -+void ipu_psys_setup_hw(struct ipu_psys *psys); -+void ipu_psys_subdomains_power(struct ipu_psys *psys, bool on); -+void ipu_psys_handle_events(struct ipu_psys *psys); -+int ipu_psys_kcmd_new(struct ipu_psys_command *cmd, struct ipu_psys_fh *fh); -+void ipu_psys_run_next(struct ipu_psys *psys); -+struct ipu_psys_pg *__get_pg_buf(struct ipu_psys *psys, size_t pg_size); -+struct ipu_psys_kbuffer * -+ipu_psys_lookup_kbuffer(struct ipu_psys_fh *fh, int fd); -+struct ipu_psys_kbuffer * -+ipu_psys_mapbuf_locked(int fd, struct ipu_psys_fh *fh); -+struct ipu_psys_kbuffer * -+ipu_psys_lookup_kbuffer_by_kaddr(struct ipu_psys_fh *fh, void *kaddr); -+int ipu_psys_res_pool_init(struct ipu_psys_resource_pool *pool); -+void ipu_psys_res_pool_cleanup(struct ipu_psys_resource_pool *pool); -+struct ipu_psys_kcmd *ipu_get_completed_kcmd(struct ipu_psys_fh *fh); -+long ipu_ioctl_dqevent(struct ipu_psys_event *event, -+ struct ipu_psys_fh *fh, unsigned int f_flags); -+ -+#endif /* IPU_PSYS_H */ -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-resources.c b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-resources.c -new file mode 100644 -index 0000000..4068e34 ---- /dev/null -+++ b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-resources.c -@@ -0,0 +1,861 @@ -+// SPDX-License-Identifier: GPL-2.0 -+// Copyright (C) 2015 - 2024 Intel Corporation -+ -+#include -+#include -+#include -+#include -+#include -+ -+#include -+ -+#include "ipu-fw-psys.h" -+#include "ipu-psys.h" -+ -+struct ipu6_psys_hw_res_variant hw_var; -+void ipu6_psys_hw_res_variant_init(void) -+{ -+ if (ipu_ver == IPU6_VER_6SE) { -+ hw_var.queue_num = IPU6SE_FW_PSYS_N_PSYS_CMD_QUEUE_ID; -+ hw_var.cell_num = IPU6SE_FW_PSYS_N_CELL_ID; -+ } else if (ipu_ver == IPU6_VER_6) { -+ hw_var.queue_num = IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID; -+ hw_var.cell_num = IPU6_FW_PSYS_N_CELL_ID; -+ } else if (ipu_ver == IPU6_VER_6EP || ipu_ver == IPU6_VER_6EP_MTL) { -+ hw_var.queue_num = IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID; -+ hw_var.cell_num = IPU6EP_FW_PSYS_N_CELL_ID; -+ } else { -+ WARN(1, "ipu6 psys res var is not initialised correctly."); -+ } -+ -+ hw_var.set_proc_dev_chn = ipu6_fw_psys_set_proc_dev_chn; -+ hw_var.set_proc_dfm_bitmap = ipu6_fw_psys_set_proc_dfm_bitmap; -+ hw_var.set_proc_ext_mem = ipu6_fw_psys_set_process_ext_mem; -+ hw_var.get_pgm_by_proc = ipu6_fw_psys_get_pgm_by_process; -+} -+ -+static const struct ipu_fw_resource_definitions *get_res(void) -+{ -+ if (ipu_ver == IPU6_VER_6SE) -+ return ipu6se_res_defs; -+ -+ if (ipu_ver == IPU6_VER_6EP || ipu_ver == IPU6_VER_6EP_MTL) -+ return ipu6ep_res_defs; -+ -+ return ipu6_res_defs; -+} -+ -+static int ipu_resource_init(struct ipu_resource *res, u32 id, int elements) -+{ -+ if (elements <= 0) { -+ res->bitmap = NULL; -+ return 0; -+ } -+ -+ res->bitmap = bitmap_zalloc(elements, GFP_KERNEL); -+ if (!res->bitmap) -+ return -ENOMEM; -+ res->elements = elements; -+ res->id = id; -+ return 0; -+} -+ -+static unsigned long -+ipu_resource_alloc_with_pos(struct ipu_resource *res, int n, -+ int pos, -+ struct ipu_resource_alloc *alloc, -+ enum ipu_resource_type type) -+{ -+ unsigned long p; -+ -+ if (n <= 0) { -+ alloc->elements = 0; -+ return 0; -+ } -+ -+ if (!res->bitmap || pos >= res->elements) -+ return (unsigned long)(-ENOSPC); -+ -+ p = bitmap_find_next_zero_area(res->bitmap, res->elements, pos, n, 0); -+ alloc->resource = NULL; -+ -+ if (p != pos) -+ return (unsigned long)(-ENOSPC); -+ bitmap_set(res->bitmap, p, n); -+ alloc->resource = res; -+ alloc->elements = n; -+ alloc->pos = p; -+ alloc->type = type; -+ -+ return pos; -+} -+ -+static unsigned long -+ipu_resource_alloc(struct ipu_resource *res, int n, -+ struct ipu_resource_alloc *alloc, -+ enum ipu_resource_type type) -+{ -+ unsigned long p; -+ -+ if (n <= 0) { -+ alloc->elements = 0; -+ return 0; -+ } -+ -+ if (!res->bitmap) -+ return (unsigned long)(-ENOSPC); -+ -+ p = bitmap_find_next_zero_area(res->bitmap, res->elements, 0, n, 0); -+ alloc->resource = NULL; -+ -+ if (p >= res->elements) -+ return (unsigned long)(-ENOSPC); -+ bitmap_set(res->bitmap, p, n); -+ alloc->resource = res; -+ alloc->elements = n; -+ alloc->pos = p; -+ alloc->type = type; -+ -+ return p; -+} -+ -+static void ipu_resource_free(struct ipu_resource_alloc *alloc) -+{ -+ if (alloc->elements <= 0) -+ return; -+ -+ if (alloc->type == IPU_RESOURCE_DFM) -+ *alloc->resource->bitmap &= ~(unsigned long)(alloc->elements); -+ else -+ bitmap_clear(alloc->resource->bitmap, alloc->pos, -+ alloc->elements); -+ alloc->resource = NULL; -+} -+ -+static void ipu_resource_cleanup(struct ipu_resource *res) -+{ -+ bitmap_free(res->bitmap); -+ res->bitmap = NULL; -+} -+ -+/********** IPU PSYS-specific resource handling **********/ -+int ipu_psys_res_pool_init(struct ipu_psys_resource_pool *pool) -+{ -+ int i, j, k, ret; -+ const struct ipu_fw_resource_definitions *res_defs; -+ -+ res_defs = get_res(); -+ -+ spin_lock_init(&pool->queues_lock); -+ pool->cells = 0; -+ -+ for (i = 0; i < res_defs->num_dev_channels; i++) { -+ ret = ipu_resource_init(&pool->dev_channels[i], i, -+ res_defs->dev_channels[i]); -+ if (ret) -+ goto error; -+ } -+ -+ for (j = 0; j < res_defs->num_ext_mem_ids; j++) { -+ ret = ipu_resource_init(&pool->ext_memory[j], j, -+ res_defs->ext_mem_ids[j]); -+ if (ret) -+ goto memory_error; -+ } -+ -+ for (k = 0; k < res_defs->num_dfm_ids; k++) { -+ ret = ipu_resource_init(&pool->dfms[k], k, res_defs->dfms[k]); -+ if (ret) -+ goto dfm_error; -+ } -+ -+ spin_lock(&pool->queues_lock); -+ if (ipu_ver == IPU6_VER_6SE) -+ bitmap_zero(pool->cmd_queues, -+ IPU6SE_FW_PSYS_N_PSYS_CMD_QUEUE_ID); -+ else -+ bitmap_zero(pool->cmd_queues, -+ IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID); -+ spin_unlock(&pool->queues_lock); -+ -+ return 0; -+ -+dfm_error: -+ for (k--; k >= 0; k--) -+ ipu_resource_cleanup(&pool->dfms[k]); -+ -+memory_error: -+ for (j--; j >= 0; j--) -+ ipu_resource_cleanup(&pool->ext_memory[j]); -+ -+error: -+ for (i--; i >= 0; i--) -+ ipu_resource_cleanup(&pool->dev_channels[i]); -+ return ret; -+} -+ -+void ipu_psys_resource_copy(struct ipu_psys_resource_pool *src, -+ struct ipu_psys_resource_pool *dest) -+{ -+ int i; -+ const struct ipu_fw_resource_definitions *res_defs; -+ -+ res_defs = get_res(); -+ -+ dest->cells = src->cells; -+ for (i = 0; i < res_defs->num_dev_channels; i++) -+ *dest->dev_channels[i].bitmap = *src->dev_channels[i].bitmap; -+ -+ for (i = 0; i < res_defs->num_ext_mem_ids; i++) -+ *dest->ext_memory[i].bitmap = *src->ext_memory[i].bitmap; -+ -+ for (i = 0; i < res_defs->num_dfm_ids; i++) -+ *dest->dfms[i].bitmap = *src->dfms[i].bitmap; -+} -+ -+void ipu_psys_res_pool_cleanup(struct ipu_psys_resource_pool *pool) -+{ -+ u32 i; -+ const struct ipu_fw_resource_definitions *res_defs; -+ -+ res_defs = get_res(); -+ for (i = 0; i < res_defs->num_dev_channels; i++) -+ ipu_resource_cleanup(&pool->dev_channels[i]); -+ -+ for (i = 0; i < res_defs->num_ext_mem_ids; i++) -+ ipu_resource_cleanup(&pool->ext_memory[i]); -+ -+ for (i = 0; i < res_defs->num_dfm_ids; i++) -+ ipu_resource_cleanup(&pool->dfms[i]); -+} -+ -+static int __alloc_one_resrc(const struct device *dev, -+ struct ipu_fw_psys_process *process, -+ struct ipu_resource *resource, -+ struct ipu_fw_generic_program_manifest *pm, -+ u32 resource_id, -+ struct ipu_psys_resource_alloc *alloc) -+{ -+ const u16 resource_req = pm->dev_chn_size[resource_id]; -+ const u16 resource_offset_req = pm->dev_chn_offset[resource_id]; -+ unsigned long retl; -+ -+ if (!resource_req) -+ return -ENXIO; -+ -+ if (alloc->resources >= IPU_MAX_RESOURCES) { -+ dev_err(dev, "out of resource handles\n"); -+ return -ENOSPC; -+ } -+ if (resource_offset_req != (u16)(-1)) -+ retl = ipu_resource_alloc_with_pos -+ (resource, -+ resource_req, -+ resource_offset_req, -+ &alloc->resource_alloc[alloc->resources], -+ IPU_RESOURCE_DEV_CHN); -+ else -+ retl = ipu_resource_alloc -+ (resource, resource_req, -+ &alloc->resource_alloc[alloc->resources], -+ IPU_RESOURCE_DEV_CHN); -+ if (IS_ERR_VALUE(retl)) { -+ dev_dbg(dev, "out of device channel resources\n"); -+ return (int)retl; -+ } -+ alloc->resources++; -+ -+ return 0; -+} -+ -+static int ipu_psys_allocate_one_dfm(const struct device *dev, -+ struct ipu_fw_psys_process *process, -+ struct ipu_resource *resource, -+ struct ipu_fw_generic_program_manifest *pm, -+ u32 resource_id, -+ struct ipu_psys_resource_alloc *alloc) -+{ -+ u32 dfm_bitmap_req = pm->dfm_port_bitmap[resource_id]; -+ u32 active_dfm_bitmap_req = pm->dfm_active_port_bitmap[resource_id]; -+ const u8 is_relocatable = pm->is_dfm_relocatable[resource_id]; -+ struct ipu_resource_alloc *alloc_resource; -+ unsigned long p = 0; -+ -+ if (!dfm_bitmap_req) -+ return -ENXIO; -+ -+ if (alloc->resources >= IPU_MAX_RESOURCES) { -+ dev_err(dev, "out of resource handles\n"); -+ return -ENOSPC; -+ } -+ -+ if (!resource->bitmap) -+ return -ENOSPC; -+ -+ if (!is_relocatable) { -+ if (*resource->bitmap & dfm_bitmap_req) { -+ dev_warn(dev, -+ "out of dfm resources, req 0x%x, get 0x%lx\n", -+ dfm_bitmap_req, *resource->bitmap); -+ return -ENOSPC; -+ } -+ *resource->bitmap |= dfm_bitmap_req; -+ } else { -+ unsigned int n = hweight32(dfm_bitmap_req); -+ -+ p = bitmap_find_next_zero_area(resource->bitmap, -+ resource->elements, 0, n, 0); -+ -+ if (p >= resource->elements) -+ return -ENOSPC; -+ -+ bitmap_set(resource->bitmap, p, n); -+ dfm_bitmap_req = dfm_bitmap_req << p; -+ active_dfm_bitmap_req = active_dfm_bitmap_req << p; -+ } -+ -+ alloc_resource = &alloc->resource_alloc[alloc->resources]; -+ alloc_resource->resource = resource; -+ /* Using elements to indicate the bitmap */ -+ alloc_resource->elements = dfm_bitmap_req; -+ alloc_resource->pos = p; -+ alloc_resource->type = IPU_RESOURCE_DFM; -+ -+ alloc->resources++; -+ -+ return 0; -+} -+ -+/* -+ * ext_mem_type_id is a generic type id for memory (like DMEM, VMEM) -+ * ext_mem_bank_id is detailed type id for memory (like DMEM0, DMEM1 etc.) -+ */ -+static int __alloc_mem_resrc(const struct device *dev, -+ struct ipu_fw_psys_process *process, -+ struct ipu_resource *resource, -+ struct ipu_fw_generic_program_manifest *pm, -+ u32 ext_mem_type_id, u32 ext_mem_bank_id, -+ struct ipu_psys_resource_alloc *alloc) -+{ -+ const u16 memory_resource_req = pm->ext_mem_size[ext_mem_type_id]; -+ const u16 memory_offset_req = pm->ext_mem_offset[ext_mem_type_id]; -+ -+ unsigned long retl; -+ -+ if (!memory_resource_req) -+ return -ENXIO; -+ -+ if (alloc->resources >= IPU_MAX_RESOURCES) { -+ dev_err(dev, "out of resource handles\n"); -+ return -ENOSPC; -+ } -+ if (memory_offset_req != (u16)(-1)) -+ retl = ipu_resource_alloc_with_pos -+ (resource, -+ memory_resource_req, memory_offset_req, -+ &alloc->resource_alloc[alloc->resources], -+ IPU_RESOURCE_EXT_MEM); -+ else -+ retl = ipu_resource_alloc -+ (resource, memory_resource_req, -+ &alloc->resource_alloc[alloc->resources], -+ IPU_RESOURCE_EXT_MEM); -+ if (IS_ERR_VALUE(retl)) { -+ dev_dbg(dev, "out of memory resources\n"); -+ return (int)retl; -+ } -+ -+ alloc->resources++; -+ -+ return 0; -+} -+ -+int ipu_psys_allocate_cmd_queue_res(struct ipu_psys_resource_pool *pool) -+{ -+ unsigned long p; -+ int size, start; -+ -+ size = IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID; -+ start = IPU6_FW_PSYS_CMD_QUEUE_PPG0_COMMAND_ID; -+ -+ if (ipu_ver == IPU6_VER_6SE) { -+ size = IPU6SE_FW_PSYS_N_PSYS_CMD_QUEUE_ID; -+ start = IPU6SE_FW_PSYS_CMD_QUEUE_PPG0_COMMAND_ID; -+ } -+ -+ spin_lock(&pool->queues_lock); -+ /* find available cmd queue from ppg0_cmd_id */ -+ p = bitmap_find_next_zero_area(pool->cmd_queues, size, start, 1, 0); -+ -+ if (p >= size) { -+ spin_unlock(&pool->queues_lock); -+ return -ENOSPC; -+ } -+ -+ bitmap_set(pool->cmd_queues, p, 1); -+ spin_unlock(&pool->queues_lock); -+ -+ return p; -+} -+ -+void ipu_psys_free_cmd_queue_res(struct ipu_psys_resource_pool *pool, -+ u8 queue_id) -+{ -+ spin_lock(&pool->queues_lock); -+ bitmap_clear(pool->cmd_queues, queue_id, 1); -+ spin_unlock(&pool->queues_lock); -+} -+ -+int ipu_psys_try_allocate_resources(struct device *dev, -+ struct ipu_fw_psys_process_group *pg, -+ void *pg_manifest, -+ struct ipu_psys_resource_pool *pool) -+{ -+ u32 id, idx; -+ u32 mem_type_id; -+ int ret, i; -+ u16 *process_offset_table; -+ u8 processes; -+ u32 cells = 0; -+ struct ipu_psys_resource_alloc *alloc; -+ const struct ipu_fw_resource_definitions *res_defs; -+ -+ if (!pg) -+ return -EINVAL; -+ process_offset_table = (u16 *)((u8 *)pg + pg->processes_offset); -+ processes = pg->process_count; -+ -+ alloc = kzalloc(sizeof(*alloc), GFP_KERNEL); -+ if (!alloc) -+ return -ENOMEM; -+ -+ res_defs = get_res(); -+ for (i = 0; i < processes; i++) { -+ u32 cell; -+ struct ipu_fw_psys_process *process = -+ (struct ipu_fw_psys_process *) -+ ((char *)pg + process_offset_table[i]); -+ struct ipu_fw_generic_program_manifest pm; -+ -+ memset(&pm, 0, sizeof(pm)); -+ -+ if (!process) { -+ dev_err(dev, "can not get process\n"); -+ ret = -ENOENT; -+ goto free_out; -+ } -+ -+ ret = ipu_fw_psys_get_pgm_by_process(&pm, pg_manifest, process); -+ if (ret < 0) { -+ dev_err(dev, "can not get manifest\n"); -+ goto free_out; -+ } -+ -+ if (pm.cell_id == res_defs->num_cells && -+ pm.cell_type_id == res_defs->num_cells_type) { -+ cell = res_defs->num_cells; -+ } else if ((pm.cell_id != res_defs->num_cells && -+ pm.cell_type_id == res_defs->num_cells_type)) { -+ cell = pm.cell_id; -+ } else { -+ /* Find a free cell of desired type */ -+ u32 type = pm.cell_type_id; -+ -+ for (cell = 0; cell < res_defs->num_cells; cell++) -+ if (res_defs->cells[cell] == type && -+ ((pool->cells | cells) & (1 << cell)) == 0) -+ break; -+ if (cell >= res_defs->num_cells) { -+ dev_dbg(dev, "no free cells of right type\n"); -+ ret = -ENOSPC; -+ goto free_out; -+ } -+ } -+ if (cell < res_defs->num_cells) -+ cells |= 1 << cell; -+ if (pool->cells & cells) { -+ dev_dbg(dev, "out of cell resources\n"); -+ ret = -ENOSPC; -+ goto free_out; -+ } -+ -+ if (pm.dev_chn_size) { -+ for (id = 0; id < res_defs->num_dev_channels; id++) { -+ ret = __alloc_one_resrc(dev, process, -+ &pool->dev_channels[id], -+ &pm, id, alloc); -+ if (ret == -ENXIO) -+ continue; -+ -+ if (ret) -+ goto free_out; -+ } -+ } -+ -+ if (pm.dfm_port_bitmap) { -+ for (id = 0; id < res_defs->num_dfm_ids; id++) { -+ ret = ipu_psys_allocate_one_dfm -+ (dev, process, -+ &pool->dfms[id], &pm, id, alloc); -+ if (ret == -ENXIO) -+ continue; -+ -+ if (ret) -+ goto free_out; -+ } -+ } -+ -+ if (pm.ext_mem_size) { -+ for (mem_type_id = 0; -+ mem_type_id < res_defs->num_ext_mem_types; -+ mem_type_id++) { -+ u32 bank = res_defs->num_ext_mem_ids; -+ -+ if (cell != res_defs->num_cells) { -+ idx = res_defs->cell_mem_row * cell + -+ mem_type_id; -+ bank = res_defs->cell_mem[idx]; -+ } -+ -+ if (bank == res_defs->num_ext_mem_ids) -+ continue; -+ -+ ret = __alloc_mem_resrc(dev, process, -+ &pool->ext_memory[bank], -+ &pm, mem_type_id, bank, -+ alloc); -+ if (ret == -ENXIO) -+ continue; -+ -+ if (ret) -+ goto free_out; -+ } -+ } -+ } -+ -+ pool->cells |= cells; -+ -+ kfree(alloc); -+ return 0; -+ -+free_out: -+ dev_dbg(dev, "failed to try_allocate resource\n"); -+ kfree(alloc); -+ return ret; -+} -+ -+/* -+ * Allocate resources for pg from `pool'. Mark the allocated -+ * resources into `alloc'. Returns 0 on success, -ENOSPC -+ * if there are no enough resources, in which cases resources -+ * are not allocated at all, or some other error on other conditions. -+ */ -+int ipu_psys_allocate_resources(const struct device *dev, -+ struct ipu_fw_psys_process_group *pg, -+ void *pg_manifest, -+ struct ipu_psys_resource_alloc *alloc, -+ struct ipu_psys_resource_pool *pool) -+{ -+ u32 id; -+ u32 mem_type_id; -+ int ret, i; -+ u16 *process_offset_table; -+ u8 processes; -+ u32 cells = 0; -+ int p, idx; -+ u32 bmp, a_bmp; -+ const struct ipu_fw_resource_definitions *res_defs; -+ -+ if (!pg) -+ return -EINVAL; -+ -+ res_defs = get_res(); -+ process_offset_table = (u16 *)((u8 *)pg + pg->processes_offset); -+ processes = pg->process_count; -+ -+ for (i = 0; i < processes; i++) { -+ u32 cell; -+ struct ipu_fw_psys_process *process = -+ (struct ipu_fw_psys_process *) -+ ((char *)pg + process_offset_table[i]); -+ struct ipu_fw_generic_program_manifest pm; -+ -+ memset(&pm, 0, sizeof(pm)); -+ if (!process) { -+ dev_err(dev, "can not get process\n"); -+ ret = -ENOENT; -+ goto free_out; -+ } -+ -+ ret = ipu_fw_psys_get_pgm_by_process(&pm, pg_manifest, process); -+ if (ret < 0) { -+ dev_err(dev, "can not get manifest\n"); -+ goto free_out; -+ } -+ -+ if (pm.cell_id == res_defs->num_cells && -+ pm.cell_type_id == res_defs->num_cells_type) { -+ cell = res_defs->num_cells; -+ } else if ((pm.cell_id != res_defs->num_cells && -+ pm.cell_type_id == res_defs->num_cells_type)) { -+ cell = pm.cell_id; -+ } else { -+ /* Find a free cell of desired type */ -+ u32 type = pm.cell_type_id; -+ -+ for (cell = 0; cell < res_defs->num_cells; cell++) -+ if (res_defs->cells[cell] == type && -+ ((pool->cells | cells) & (1 << cell)) == 0) -+ break; -+ if (cell >= res_defs->num_cells) { -+ dev_dbg(dev, "no free cells of right type\n"); -+ ret = -ENOSPC; -+ goto free_out; -+ } -+ ret = ipu_fw_psys_set_process_cell_id(process, 0, cell); -+ if (ret) -+ goto free_out; -+ } -+ if (cell < res_defs->num_cells) -+ cells |= 1 << cell; -+ if (pool->cells & cells) { -+ dev_dbg(dev, "out of cell resources\n"); -+ ret = -ENOSPC; -+ goto free_out; -+ } -+ -+ if (pm.dev_chn_size) { -+ for (id = 0; id < res_defs->num_dev_channels; id++) { -+ ret = __alloc_one_resrc(dev, process, -+ &pool->dev_channels[id], -+ &pm, id, alloc); -+ if (ret == -ENXIO) -+ continue; -+ -+ if (ret) -+ goto free_out; -+ -+ idx = alloc->resources - 1; -+ p = alloc->resource_alloc[idx].pos; -+ ret = ipu_fw_psys_set_proc_dev_chn(process, id, -+ p); -+ if (ret) -+ goto free_out; -+ } -+ } -+ -+ if (pm.dfm_port_bitmap) { -+ for (id = 0; id < res_defs->num_dfm_ids; id++) { -+ ret = ipu_psys_allocate_one_dfm(dev, process, -+ &pool->dfms[id], -+ &pm, id, alloc); -+ if (ret == -ENXIO) -+ continue; -+ -+ if (ret) -+ goto free_out; -+ -+ idx = alloc->resources - 1; -+ p = alloc->resource_alloc[idx].pos; -+ bmp = pm.dfm_port_bitmap[id]; -+ bmp = bmp << p; -+ a_bmp = pm.dfm_active_port_bitmap[id]; -+ a_bmp = a_bmp << p; -+ ret = ipu_fw_psys_set_proc_dfm_bitmap(process, -+ id, bmp, -+ a_bmp); -+ if (ret) -+ goto free_out; -+ } -+ } -+ -+ if (pm.ext_mem_size) { -+ for (mem_type_id = 0; -+ mem_type_id < res_defs->num_ext_mem_types; -+ mem_type_id++) { -+ u32 bank = res_defs->num_ext_mem_ids; -+ -+ if (cell != res_defs->num_cells) { -+ idx = res_defs->cell_mem_row * cell + -+ mem_type_id; -+ bank = res_defs->cell_mem[idx]; -+ } -+ if (bank == res_defs->num_ext_mem_ids) -+ continue; -+ -+ ret = __alloc_mem_resrc(dev, process, -+ &pool->ext_memory[bank], -+ &pm, mem_type_id, -+ bank, alloc); -+ if (ret == -ENXIO) -+ continue; -+ -+ if (ret) -+ goto free_out; -+ -+ /* no return value check here because fw api -+ * will do some checks, and would return -+ * non-zero except mem_type_id == 0. -+ * This maybe caused by that above flow of -+ * allocating mem_bank_id is improper. -+ */ -+ idx = alloc->resources - 1; -+ p = alloc->resource_alloc[idx].pos; -+ ipu_fw_psys_set_process_ext_mem(process, -+ mem_type_id, -+ bank, p); -+ } -+ } -+ } -+ alloc->cells |= cells; -+ pool->cells |= cells; -+ return 0; -+ -+free_out: -+ dev_err(dev, "failed to allocate resources, ret %d\n", ret); -+ ipu_psys_reset_process_cell(dev, pg, pg_manifest, i + 1); -+ ipu_psys_free_resources(alloc, pool); -+ return ret; -+} -+ -+int ipu_psys_move_resources(const struct device *dev, -+ struct ipu_psys_resource_alloc *alloc, -+ struct ipu_psys_resource_pool -+ *source_pool, struct ipu_psys_resource_pool -+ *target_pool) -+{ -+ int i; -+ -+ if (target_pool->cells & alloc->cells) { -+ dev_dbg(dev, "out of cell resources\n"); -+ return -ENOSPC; -+ } -+ -+ for (i = 0; i < alloc->resources; i++) { -+ unsigned long bitmap = 0; -+ unsigned int id = alloc->resource_alloc[i].resource->id; -+ unsigned long fbit, end; -+ -+ switch (alloc->resource_alloc[i].type) { -+ case IPU_RESOURCE_DEV_CHN: -+ bitmap_set(&bitmap, alloc->resource_alloc[i].pos, -+ alloc->resource_alloc[i].elements); -+ if (*target_pool->dev_channels[id].bitmap & bitmap) -+ return -ENOSPC; -+ break; -+ case IPU_RESOURCE_EXT_MEM: -+ end = alloc->resource_alloc[i].elements + -+ alloc->resource_alloc[i].pos; -+ -+ fbit = find_next_bit(target_pool->ext_memory[id].bitmap, -+ end, alloc->resource_alloc[i].pos); -+ /* if find_next_bit returns "end" it didn't find 1bit */ -+ if (end != fbit) -+ return -ENOSPC; -+ break; -+ case IPU_RESOURCE_DFM: -+ bitmap = alloc->resource_alloc[i].elements; -+ if (*target_pool->dfms[id].bitmap & bitmap) -+ return -ENOSPC; -+ break; -+ default: -+ dev_err(dev, "Illegal resource type\n"); -+ return -EINVAL; -+ } -+ } -+ -+ for (i = 0; i < alloc->resources; i++) { -+ u32 id = alloc->resource_alloc[i].resource->id; -+ -+ switch (alloc->resource_alloc[i].type) { -+ case IPU_RESOURCE_DEV_CHN: -+ bitmap_set(target_pool->dev_channels[id].bitmap, -+ alloc->resource_alloc[i].pos, -+ alloc->resource_alloc[i].elements); -+ ipu_resource_free(&alloc->resource_alloc[i]); -+ alloc->resource_alloc[i].resource = -+ &target_pool->dev_channels[id]; -+ break; -+ case IPU_RESOURCE_EXT_MEM: -+ bitmap_set(target_pool->ext_memory[id].bitmap, -+ alloc->resource_alloc[i].pos, -+ alloc->resource_alloc[i].elements); -+ ipu_resource_free(&alloc->resource_alloc[i]); -+ alloc->resource_alloc[i].resource = -+ &target_pool->ext_memory[id]; -+ break; -+ case IPU_RESOURCE_DFM: -+ *target_pool->dfms[id].bitmap |= -+ alloc->resource_alloc[i].elements; -+ *alloc->resource_alloc[i].resource->bitmap &= -+ ~(alloc->resource_alloc[i].elements); -+ alloc->resource_alloc[i].resource = -+ &target_pool->dfms[id]; -+ break; -+ default: -+ /* -+ * Just keep compiler happy. This case failed already -+ * in above loop. -+ */ -+ break; -+ } -+ } -+ -+ target_pool->cells |= alloc->cells; -+ source_pool->cells &= ~alloc->cells; -+ -+ return 0; -+} -+ -+void ipu_psys_reset_process_cell(const struct device *dev, -+ struct ipu_fw_psys_process_group *pg, -+ void *pg_manifest, -+ int process_count) -+{ -+ int i; -+ u16 *process_offset_table; -+ const struct ipu_fw_resource_definitions *res_defs; -+ -+ if (!pg) -+ return; -+ -+ res_defs = get_res(); -+ process_offset_table = (u16 *)((u8 *)pg + pg->processes_offset); -+ for (i = 0; i < process_count; i++) { -+ struct ipu_fw_psys_process *process = -+ (struct ipu_fw_psys_process *) -+ ((char *)pg + process_offset_table[i]); -+ struct ipu_fw_generic_program_manifest pm; -+ int ret; -+ -+ if (!process) -+ break; -+ -+ ret = ipu_fw_psys_get_pgm_by_process(&pm, pg_manifest, process); -+ if (ret < 0) { -+ dev_err(dev, "can not get manifest\n"); -+ break; -+ } -+ if ((pm.cell_id != res_defs->num_cells && -+ pm.cell_type_id == res_defs->num_cells_type)) -+ continue; -+ /* no return value check here because if finding free cell -+ * failed, process cell would not set then calling clear_cell -+ * will return non-zero. -+ */ -+ ipu_fw_psys_clear_process_cell(process); -+ } -+} -+ -+/* Free resources marked in `alloc' from `resources' */ -+void ipu_psys_free_resources(struct ipu_psys_resource_alloc -+ *alloc, struct ipu_psys_resource_pool *pool) -+{ -+ unsigned int i; -+ -+ pool->cells &= ~alloc->cells; -+ alloc->cells = 0; -+ for (i = 0; i < alloc->resources; i++) -+ ipu_resource_free(&alloc->resource_alloc[i]); -+ alloc->resources = 0; -+} -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-fw-resources.c b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-fw-resources.c -new file mode 100644 -index 0000000..fe65a0a ---- /dev/null -+++ b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-fw-resources.c -@@ -0,0 +1,607 @@ -+// SPDX-License-Identifier: GPL-2.0 -+// Copyright (C) 2015 - 2024 Intel Corporation -+ -+#include -+#include -+ -+#include "ipu-psys.h" -+#include "ipu-fw-psys.h" -+#include "ipu6-platform-resources.h" -+ -+/* resources table */ -+ -+/* -+ * Cell types by cell IDs -+ */ -+static const u8 ipu6_fw_psys_cell_types[IPU6_FW_PSYS_N_CELL_ID] = { -+ IPU6_FW_PSYS_SP_CTRL_TYPE_ID, -+ IPU6_FW_PSYS_VP_TYPE_ID, -+ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_OSA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_OSA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_OSA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, /* X2B_MD */ -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, /* ICA_MEDIUM */ -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, /* X2B_SVE_RGBIR */ -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, /* PAF */ -+ IPU6_FW_PSYS_GDC_TYPE_ID, -+ IPU6_FW_PSYS_TNR_TYPE_ID, -+}; -+ -+static const u16 ipu6_fw_num_dev_channels[IPU6_FW_PSYS_N_DEV_CHN_ID] = { -+ IPU6_FW_PSYS_DEV_CHN_DMA_EXT0_MAX_SIZE, -+ IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_READ_MAX_SIZE, -+ IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_MAX_SIZE, -+ IPU6_FW_PSYS_DEV_CHN_DMA_INTERNAL_MAX_SIZE, -+ IPU6_FW_PSYS_DEV_CHN_DMA_ISA_MAX_SIZE, -+}; -+ -+static const u16 ipu6_fw_psys_mem_size[IPU6_FW_PSYS_N_MEM_ID] = { -+ IPU6_FW_PSYS_VMEM0_MAX_SIZE, -+ IPU6_FW_PSYS_TRANSFER_VMEM0_MAX_SIZE, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_MAX_SIZE, -+ IPU6_FW_PSYS_LB_VMEM_MAX_SIZE, -+ IPU6_FW_PSYS_BAMEM0_MAX_SIZE, -+ IPU6_FW_PSYS_DMEM0_MAX_SIZE, -+ IPU6_FW_PSYS_DMEM1_MAX_SIZE, -+ IPU6_FW_PSYS_DMEM2_MAX_SIZE, -+ IPU6_FW_PSYS_DMEM3_MAX_SIZE, -+ IPU6_FW_PSYS_PMEM0_MAX_SIZE -+}; -+ -+static const u16 ipu6_fw_psys_dfms[IPU6_FW_PSYS_N_DEV_DFM_ID] = { -+ IPU6_FW_PSYS_DEV_DFM_BB_FULL_PORT_ID_MAX_SIZE, -+ IPU6_FW_PSYS_DEV_DFM_BB_EMPTY_PORT_ID_MAX_SIZE, -+ IPU6_FW_PSYS_DEV_DFM_ISL_FULL_PORT_ID_MAX_SIZE, -+ IPU6_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID_MAX_SIZE, -+ IPU6_FW_PSYS_DEV_DFM_LB_FULL_PORT_ID_MAX_SIZE, -+ IPU6_FW_PSYS_DEV_DFM_LB_EMPTY_PORT_ID_MAX_SIZE, -+}; -+ -+static const u8 -+ipu6_fw_psys_c_mem[IPU6_FW_PSYS_N_CELL_ID][IPU6_FW_PSYS_N_MEM_TYPE_ID] = { -+ { -+ /* IPU6_FW_PSYS_SP0_ID */ -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_DMEM0_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_SP1_ID */ -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_DMEM1_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_VP0_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_DMEM3_ID, -+ IPU6_FW_PSYS_VMEM0_ID, -+ IPU6_FW_PSYS_BAMEM0_ID, -+ IPU6_FW_PSYS_PMEM0_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ACC1_ID BNLM */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ACC2_ID DM */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ACC3_ID ACM */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ACC4_ID GTC YUV1 */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ACC5_ID OFS pin main */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ACC6_ID OFS pin display */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ACC7_ID OFS pin pp */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ACC8_ID GAMMASTAR */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ACC9_ID GLTM */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ACC10_ID XNR */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_ICA_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_LSC_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_DPC_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_SIS_A_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_SIS_B_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_B2B_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_B2R_ID and ISA_R2I_SIE */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_R2I_DS_A_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_R2I_DS_B_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_AWB_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_AE_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_AF_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_DOL_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_X2B_MD_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_ICA_MEDIUM_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_X2B_SVE_RGBIR_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_PAF_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_BB_ACC_GDC0_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_BB_ACC_TNR_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ } -+}; -+ -+static const struct ipu_fw_resource_definitions ipu6_defs = { -+ .cells = ipu6_fw_psys_cell_types, -+ .num_cells = IPU6_FW_PSYS_N_CELL_ID, -+ .num_cells_type = IPU6_FW_PSYS_N_CELL_TYPE_ID, -+ -+ .dev_channels = ipu6_fw_num_dev_channels, -+ .num_dev_channels = IPU6_FW_PSYS_N_DEV_CHN_ID, -+ -+ .num_ext_mem_types = IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID, -+ .num_ext_mem_ids = IPU6_FW_PSYS_N_MEM_ID, -+ .ext_mem_ids = ipu6_fw_psys_mem_size, -+ -+ .num_dfm_ids = IPU6_FW_PSYS_N_DEV_DFM_ID, -+ -+ .dfms = ipu6_fw_psys_dfms, -+ -+ .cell_mem_row = IPU6_FW_PSYS_N_MEM_TYPE_ID, -+ .cell_mem = &ipu6_fw_psys_c_mem[0][0], -+}; -+ -+const struct ipu_fw_resource_definitions *ipu6_res_defs = &ipu6_defs; -+ -+/********** Generic resource handling **********/ -+ -+int ipu6_fw_psys_set_proc_dev_chn(struct ipu_fw_psys_process *ptr, u16 offset, -+ u16 value) -+{ -+ struct ipu6_fw_psys_process_ext *pm_ext; -+ u8 ps_ext_offset; -+ -+ ps_ext_offset = ptr->process_extension_offset; -+ if (!ps_ext_offset) -+ return -EINVAL; -+ -+ pm_ext = (struct ipu6_fw_psys_process_ext *)((u8 *)ptr + ps_ext_offset); -+ -+ pm_ext->dev_chn_offset[offset] = value; -+ -+ return 0; -+} -+ -+int ipu6_fw_psys_set_proc_dfm_bitmap(struct ipu_fw_psys_process *ptr, -+ u16 id, u32 bitmap, -+ u32 active_bitmap) -+{ -+ struct ipu6_fw_psys_process_ext *pm_ext; -+ u8 ps_ext_offset; -+ -+ ps_ext_offset = ptr->process_extension_offset; -+ if (!ps_ext_offset) -+ return -EINVAL; -+ -+ pm_ext = (struct ipu6_fw_psys_process_ext *)((u8 *)ptr + ps_ext_offset); -+ -+ pm_ext->dfm_port_bitmap[id] = bitmap; -+ pm_ext->dfm_active_port_bitmap[id] = active_bitmap; -+ -+ return 0; -+} -+ -+int ipu6_fw_psys_set_process_ext_mem(struct ipu_fw_psys_process *ptr, -+ u16 type_id, u16 mem_id, u16 offset) -+{ -+ struct ipu6_fw_psys_process_ext *pm_ext; -+ u8 ps_ext_offset; -+ -+ ps_ext_offset = ptr->process_extension_offset; -+ if (!ps_ext_offset) -+ return -EINVAL; -+ -+ pm_ext = (struct ipu6_fw_psys_process_ext *)((u8 *)ptr + ps_ext_offset); -+ -+ pm_ext->ext_mem_offset[type_id] = offset; -+ pm_ext->ext_mem_id[type_id] = mem_id; -+ -+ return 0; -+} -+ -+static struct ipu_fw_psys_program_manifest * -+get_program_manifest(const struct ipu_fw_psys_pgm *manifest, -+ const unsigned int program_index) -+{ -+ struct ipu_fw_psys_program_manifest *prg_manifest_base; -+ u8 *program_manifest = NULL; -+ u8 program_count; -+ unsigned int i; -+ -+ program_count = manifest->program_count; -+ -+ prg_manifest_base = (struct ipu_fw_psys_program_manifest *) -+ ((char *)manifest + manifest->program_manifest_offset); -+ if (program_index < program_count) { -+ program_manifest = (u8 *)prg_manifest_base; -+ for (i = 0; i < program_index; i++) -+ program_manifest += -+ ((struct ipu_fw_psys_program_manifest *) -+ program_manifest)->size; -+ } -+ -+ return (struct ipu_fw_psys_program_manifest *)program_manifest; -+} -+ -+int -+ipu6_fw_psys_get_pgm_by_process(struct ipu_fw_generic_program_manifest *gen_pm, -+ const struct ipu_fw_psys_pgm *pg_manifest, -+ struct ipu_fw_psys_process *process) -+{ -+ u32 program_id = process->program_idx; -+ struct ipu_fw_psys_program_manifest *pm; -+ struct ipu6_fw_psys_program_manifest_ext *pm_ext; -+ -+ pm = get_program_manifest(pg_manifest, program_id); -+ -+ if (!pm) -+ return -ENOENT; -+ -+ if (pm->program_extension_offset) { -+ pm_ext = (struct ipu6_fw_psys_program_manifest_ext *) -+ ((u8 *)pm + pm->program_extension_offset); -+ -+ gen_pm->dev_chn_size = pm_ext->dev_chn_size; -+ gen_pm->dev_chn_offset = pm_ext->dev_chn_offset; -+ gen_pm->ext_mem_size = pm_ext->ext_mem_size; -+ gen_pm->ext_mem_offset = (u16 *)pm_ext->ext_mem_offset; -+ gen_pm->is_dfm_relocatable = pm_ext->is_dfm_relocatable; -+ gen_pm->dfm_port_bitmap = pm_ext->dfm_port_bitmap; -+ gen_pm->dfm_active_port_bitmap = -+ pm_ext->dfm_active_port_bitmap; -+ } -+ -+ memcpy(gen_pm->cells, pm->cells, sizeof(pm->cells)); -+ gen_pm->cell_id = pm->cells[0]; -+ gen_pm->cell_type_id = pm->cell_type_id; -+ -+ return 0; -+} -+ -+#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) || \ -+ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) -+void ipu6_fw_psys_pg_dump(struct ipu_psys *psys, -+ struct ipu_psys_kcmd *kcmd, const char *note) -+{ -+ struct ipu_fw_psys_process_group *pg = kcmd->kpg->pg; -+ struct device *dev = &psys->adev->auxdev.dev; -+ u32 pgid = pg->ID; -+ u8 processes = pg->process_count; -+ u16 *process_offset_table = (u16 *)((char *)pg + pg->processes_offset); -+ unsigned int p, chn, mem, mem_id; -+ unsigned int mem_type, max_mem_id, dev_chn; -+ -+ if (ipu_ver == IPU6_VER_6SE) { -+ mem_type = IPU6SE_FW_PSYS_N_DATA_MEM_TYPE_ID; -+ max_mem_id = IPU6SE_FW_PSYS_N_MEM_ID; -+ dev_chn = IPU6SE_FW_PSYS_N_DEV_CHN_ID; -+ } else if (ipu_ver == IPU6_VER_6 || ipu_ver == IPU6_VER_6EP || -+ ipu_ver == IPU6_VER_6EP_MTL) { -+ mem_type = IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID; -+ max_mem_id = IPU6_FW_PSYS_N_MEM_ID; -+ dev_chn = IPU6_FW_PSYS_N_DEV_CHN_ID; -+ } else { -+ WARN(1, "%s ipu_ver:[%u] is unsupported!\n", __func__, ipu_ver); -+ return; -+ } -+ -+ dev_dbg(dev, "%s %s pgid %i has %i processes:\n", -+ __func__, note, pgid, processes); -+ -+ for (p = 0; p < processes; p++) { -+ struct ipu_fw_psys_process *process = -+ (struct ipu_fw_psys_process *) -+ ((char *)pg + process_offset_table[p]); -+ struct ipu6_fw_psys_process_ext *pm_ext = -+ (struct ipu6_fw_psys_process_ext *)((u8 *)process -+ + process->process_extension_offset); -+ dev_dbg(dev, "\t process %i size=%u", p, process->size); -+ if (!process->process_extension_offset) -+ continue; -+ -+ for (mem = 0; mem < mem_type; mem++) { -+ mem_id = pm_ext->ext_mem_id[mem]; -+ if (mem_id != max_mem_id) -+ dev_dbg(dev, "\t mem type %u id %d offset=0x%x", -+ mem, mem_id, -+ pm_ext->ext_mem_offset[mem]); -+ } -+ for (chn = 0; chn < dev_chn; chn++) { -+ if (pm_ext->dev_chn_offset[chn] != (u16)(-1)) -+ dev_dbg(dev, "\t dev_chn[%u]=0x%x\n", -+ chn, pm_ext->dev_chn_offset[chn]); -+ } -+ } -+} -+#else -+void ipu6_fw_psys_pg_dump(struct ipu_psys *psys, -+ struct ipu_psys_kcmd *kcmd, const char *note) -+{ -+ if (ipu_ver == IPU6_VER_6SE || ipu_ver == IPU6_VER_6 || -+ ipu_ver == IPU6_VER_6EP || ipu_ver == IPU6_VER_6EP_MTL) -+ return; -+ -+ WARN(1, "%s ipu_ver:[%u] is unsupported!\n", __func__, ipu_ver); -+} -+#endif -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-l-scheduler.c b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-l-scheduler.c -new file mode 100644 -index 0000000..5d95843 ---- /dev/null -+++ b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-l-scheduler.c -@@ -0,0 +1,621 @@ -+// SPDX-License-Identifier: GPL-2.0 -+// Copyright (C) 2020 - 2024 Intel Corporation -+ -+#include "ipu-psys.h" -+#include "ipu6-ppg.h" -+ -+extern bool enable_power_gating; -+ -+struct sched_list { -+ struct list_head list; -+ /* to protect the list */ -+ struct mutex lock; -+}; -+ -+static struct sched_list start_list = { -+ .list = LIST_HEAD_INIT(start_list.list), -+ .lock = __MUTEX_INITIALIZER(start_list.lock), -+}; -+ -+static struct sched_list stop_list = { -+ .list = LIST_HEAD_INIT(stop_list.list), -+ .lock = __MUTEX_INITIALIZER(stop_list.lock), -+}; -+ -+static struct sched_list *get_sc_list(enum SCHED_LIST type) -+{ -+ /* for debug purposes */ -+ WARN_ON(type != SCHED_START_LIST && type != SCHED_STOP_LIST); -+ -+ if (type == SCHED_START_LIST) -+ return &start_list; -+ return &stop_list; -+} -+ -+static bool is_kppg_in_list(struct ipu_psys_ppg *kppg, struct list_head *head) -+{ -+ struct ipu_psys_ppg *tmp; -+ -+ list_for_each_entry(tmp, head, sched_list) { -+ if (kppg == tmp) -+ return true; -+ } -+ -+ return false; -+} -+ -+void ipu_psys_scheduler_remove_kppg(struct ipu_psys_ppg *kppg, -+ enum SCHED_LIST type) -+{ -+ struct sched_list *sc_list = get_sc_list(type); -+ struct ipu_psys_ppg *tmp0, *tmp1; -+ struct ipu_psys *psys = kppg->fh->psys; -+ struct device *dev = &psys->adev->auxdev.dev; -+ -+ mutex_lock(&sc_list->lock); -+ list_for_each_entry_safe(tmp0, tmp1, &sc_list->list, sched_list) { -+ if (tmp0 == kppg) { -+ dev_dbg(dev, "remove from %s list, kppg(%d 0x%p) state %d\n", -+ type == SCHED_START_LIST ? "start" : "stop", -+ kppg->kpg->pg->ID, kppg, kppg->state); -+ list_del_init(&kppg->sched_list); -+ } -+ } -+ mutex_unlock(&sc_list->lock); -+} -+ -+void ipu_psys_scheduler_add_kppg(struct ipu_psys_ppg *kppg, -+ enum SCHED_LIST type) -+{ -+ int cur_pri = kppg->pri_base + kppg->pri_dynamic; -+ struct sched_list *sc_list = get_sc_list(type); -+ struct ipu_psys *psys = kppg->fh->psys; -+ struct device *dev = &psys->adev->auxdev.dev; -+ struct ipu_psys_ppg *tmp0, *tmp1; -+ -+ dev_dbg(dev, -+ "add to %s list, kppg(%d 0x%p) state %d prio(%d %d) fh 0x%p\n", -+ type == SCHED_START_LIST ? "start" : "stop", -+ kppg->kpg->pg->ID, kppg, kppg->state, -+ kppg->pri_base, kppg->pri_dynamic, kppg->fh); -+ -+ mutex_lock(&sc_list->lock); -+ if (list_empty(&sc_list->list)) { -+ list_add(&kppg->sched_list, &sc_list->list); -+ goto out; -+ } -+ -+ if (is_kppg_in_list(kppg, &sc_list->list)) { -+ dev_dbg(dev, "kppg already in list\n"); -+ goto out; -+ } -+ -+ list_for_each_entry_safe(tmp0, tmp1, &sc_list->list, sched_list) { -+ int tmp_pri = tmp0->pri_base + tmp0->pri_dynamic; -+ -+ dev_dbg(dev, -+ "found kppg(%d 0x%p), state %d pri(%d %d) fh 0x%p\n", -+ tmp0->kpg->pg->ID, tmp0, tmp0->state, -+ tmp0->pri_base, tmp0->pri_dynamic, tmp0->fh); -+ -+ if (type == SCHED_START_LIST && tmp_pri > cur_pri) { -+ list_add(&kppg->sched_list, tmp0->sched_list.prev); -+ goto out; -+ } else if (type == SCHED_STOP_LIST && tmp_pri < cur_pri) { -+ list_add(&kppg->sched_list, tmp0->sched_list.prev); -+ goto out; -+ } -+ } -+ -+ list_add_tail(&kppg->sched_list, &sc_list->list); -+out: -+ mutex_unlock(&sc_list->lock); -+} -+ -+static int ipu_psys_detect_resource_contention(struct ipu_psys_ppg *kppg) -+{ -+ struct ipu_psys_resource_pool *try_res_pool; -+ struct ipu_psys *psys = kppg->fh->psys; -+ struct device *dev = &psys->adev->auxdev.dev; -+ int ret = 0; -+ int state; -+ -+ try_res_pool = kzalloc(sizeof(*try_res_pool), GFP_KERNEL); -+ if (IS_ERR_OR_NULL(try_res_pool)) -+ return -ENOMEM; -+ -+ mutex_lock(&kppg->mutex); -+ state = kppg->state; -+ mutex_unlock(&kppg->mutex); -+ if (state == PPG_STATE_STARTED || state == PPG_STATE_RUNNING || -+ state == PPG_STATE_RESUMED) -+ goto exit; -+ -+ ret = ipu_psys_res_pool_init(try_res_pool); -+ if (ret < 0) { -+ dev_err(dev, "unable to alloc pg resources\n"); -+ WARN_ON(1); -+ goto exit; -+ } -+ -+ ipu_psys_resource_copy(&psys->res_pool_running, try_res_pool); -+ ret = ipu_psys_try_allocate_resources(dev, kppg->kpg->pg, -+ kppg->manifest, -+ try_res_pool); -+ -+ ipu_psys_res_pool_cleanup(try_res_pool); -+exit: -+ kfree(try_res_pool); -+ -+ return ret; -+} -+ -+static void ipu_psys_scheduler_ppg_sort(struct ipu_psys *psys, bool *stopping) -+{ -+ struct ipu_psys_ppg *kppg, *tmp; -+ struct ipu_psys_scheduler *sched; -+ struct ipu_psys_fh *fh; -+ -+ list_for_each_entry(fh, &psys->fhs, list) { -+ mutex_lock(&fh->mutex); -+ sched = &fh->sched; -+ -+ if (list_empty(&sched->ppgs)) { -+ mutex_unlock(&fh->mutex); -+ continue; -+ } -+ -+ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { -+ mutex_lock(&kppg->mutex); -+ if (kppg->state == PPG_STATE_START || -+ kppg->state == PPG_STATE_RESUME) { -+ ipu_psys_scheduler_add_kppg(kppg, -+ SCHED_START_LIST); -+ } else if (kppg->state == PPG_STATE_RUNNING) { -+ ipu_psys_scheduler_add_kppg(kppg, -+ SCHED_STOP_LIST); -+ } else if (kppg->state == PPG_STATE_SUSPENDING || -+ kppg->state == PPG_STATE_STOPPING) { -+ /* there are some suspending/stopping ppgs */ -+ *stopping = true; -+ } else if (kppg->state == PPG_STATE_RESUMING || -+ kppg->state == PPG_STATE_STARTING) { -+ /* how about kppg are resuming/starting? */ -+ } -+ mutex_unlock(&kppg->mutex); -+ } -+ mutex_unlock(&fh->mutex); -+ } -+} -+ -+static void ipu_psys_scheduler_update_start_ppg_priority(void) -+{ -+ struct sched_list *sc_list = get_sc_list(SCHED_START_LIST); -+ struct ipu_psys_ppg *kppg, *tmp; -+ -+ mutex_lock(&sc_list->lock); -+ if (!list_empty(&sc_list->list)) -+ list_for_each_entry_safe(kppg, tmp, &sc_list->list, sched_list) -+ kppg->pri_dynamic--; -+ mutex_unlock(&sc_list->lock); -+} -+ -+static bool ipu_psys_scheduler_switch_ppg(struct ipu_psys *psys) -+{ -+ struct sched_list *sc_list = get_sc_list(SCHED_STOP_LIST); -+ struct device *dev = &psys->adev->auxdev.dev; -+ struct ipu_psys_ppg *kppg; -+ bool resched = false; -+ -+ mutex_lock(&sc_list->lock); -+ if (list_empty(&sc_list->list)) { -+ /* some ppgs are RESUMING/STARTING */ -+ dev_dbg(dev, "no candidated stop ppg\n"); -+ mutex_unlock(&sc_list->lock); -+ return false; -+ } -+ kppg = list_first_entry(&sc_list->list, struct ipu_psys_ppg, -+ sched_list); -+ mutex_unlock(&sc_list->lock); -+ -+ mutex_lock(&kppg->mutex); -+ if (!(kppg->state & PPG_STATE_STOP)) { -+ dev_dbg(dev, "s_change:%s: %p %d -> %d\n", -+ __func__, kppg, kppg->state, PPG_STATE_SUSPEND); -+ kppg->state = PPG_STATE_SUSPEND; -+ resched = true; -+ } -+ mutex_unlock(&kppg->mutex); -+ -+ return resched; -+} -+ -+/* -+ * search all kppgs and sort them into start_list and stop_list, always start -+ * first kppg(high priority) in start_list; -+ * if there is resource contention, it would switch kppgs in stop_list -+ * to suspend state one by one -+ */ -+static bool ipu_psys_scheduler_ppg_start(struct ipu_psys *psys) -+{ -+ struct sched_list *sc_list = get_sc_list(SCHED_START_LIST); -+ struct device *dev = &psys->adev->auxdev.dev; -+ struct ipu_psys_ppg *kppg, *kppg0; -+ bool stopping_existed = false; -+ int ret; -+ -+ ipu_psys_scheduler_ppg_sort(psys, &stopping_existed); -+ -+ mutex_lock(&sc_list->lock); -+ if (list_empty(&sc_list->list)) { -+ dev_dbg(dev, "no ppg to start\n"); -+ mutex_unlock(&sc_list->lock); -+ return false; -+ } -+ -+ list_for_each_entry_safe(kppg, kppg0, -+ &sc_list->list, sched_list) { -+ mutex_unlock(&sc_list->lock); -+ -+ ret = ipu_psys_detect_resource_contention(kppg); -+ if (ret < 0) { -+ dev_dbg(dev, "ppg %d resource detect failed(%d)\n", -+ kppg->kpg->pg->ID, ret); -+ /* -+ * switch out other ppg in 2 cases: -+ * 1. resource contention -+ * 2. no suspending/stopping ppg -+ */ -+ if (ret == -ENOSPC) { -+ if (!stopping_existed && -+ ipu_psys_scheduler_switch_ppg(psys)) { -+ return true; -+ } -+ dev_dbg(dev, "ppg is suspending/stopping\n"); -+ } else { -+ dev_err(dev, "detect resource error %d\n", ret); -+ } -+ } else { -+ kppg->pri_dynamic = 0; -+ -+ mutex_lock(&kppg->mutex); -+ if (kppg->state == PPG_STATE_START) -+ ipu_psys_ppg_start(kppg); -+ else -+ ipu_psys_ppg_resume(kppg); -+ mutex_unlock(&kppg->mutex); -+ -+ ipu_psys_scheduler_remove_kppg(kppg, -+ SCHED_START_LIST); -+ ipu_psys_scheduler_update_start_ppg_priority(); -+ } -+ mutex_lock(&sc_list->lock); -+ } -+ mutex_unlock(&sc_list->lock); -+ -+ return false; -+} -+ -+static bool ipu_psys_scheduler_ppg_enqueue_bufset(struct ipu_psys *psys) -+{ -+ struct ipu_psys_scheduler *sched; -+ struct ipu_psys_ppg *kppg; -+ struct ipu_psys_fh *fh; -+ bool resched = false; -+ -+ list_for_each_entry(fh, &psys->fhs, list) { -+ mutex_lock(&fh->mutex); -+ sched = &fh->sched; -+ if (list_empty(&sched->ppgs)) { -+ mutex_unlock(&fh->mutex); -+ continue; -+ } -+ -+ list_for_each_entry(kppg, &sched->ppgs, list) { -+ if (ipu_psys_ppg_enqueue_bufsets(kppg)) -+ resched = true; -+ } -+ mutex_unlock(&fh->mutex); -+ } -+ -+ return resched; -+} -+ -+/* -+ * This function will check all kppgs within fhs, and if kppg state -+ * is STOP or SUSPEND, l-scheduler will call ppg function to stop -+ * or suspend it and update stop list -+ */ -+ -+static bool ipu_psys_scheduler_ppg_halt(struct ipu_psys *psys) -+{ -+ struct ipu_psys_scheduler *sched; -+ struct ipu_psys_ppg *kppg, *tmp; -+ struct ipu_psys_fh *fh; -+ bool stopping_exit = false; -+ -+ list_for_each_entry(fh, &psys->fhs, list) { -+ mutex_lock(&fh->mutex); -+ sched = &fh->sched; -+ if (list_empty(&sched->ppgs)) { -+ mutex_unlock(&fh->mutex); -+ continue; -+ } -+ -+ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { -+ mutex_lock(&kppg->mutex); -+ if (kppg->state & PPG_STATE_STOP) { -+ ipu_psys_ppg_stop(kppg); -+ ipu_psys_scheduler_remove_kppg(kppg, -+ SCHED_STOP_LIST); -+ } else if (kppg->state == PPG_STATE_SUSPEND && -+ list_empty(&kppg->kcmds_processing_list)) { -+ ipu_psys_ppg_suspend(kppg); -+ ipu_psys_scheduler_remove_kppg(kppg, -+ SCHED_STOP_LIST); -+ } else if (kppg->state == PPG_STATE_SUSPENDING || -+ kppg->state == PPG_STATE_STOPPING) { -+ stopping_exit = true; -+ } -+ mutex_unlock(&kppg->mutex); -+ } -+ mutex_unlock(&fh->mutex); -+ } -+ return stopping_exit; -+} -+ -+static void ipu_psys_update_ppg_state_by_kcmd(struct ipu_psys *psys, -+ struct ipu_psys_ppg *kppg, -+ struct ipu_psys_kcmd *kcmd) -+{ -+ struct device *dev = &psys->adev->auxdev.dev; -+ int old_ppg_state = kppg->state; -+ -+ /* -+ * Respond kcmd when ppg is in stable state: -+ * STARTED/RESUMED/RUNNING/SUSPENDED/STOPPED -+ */ -+ if (kppg->state == PPG_STATE_STARTED || -+ kppg->state == PPG_STATE_RESUMED || -+ kppg->state == PPG_STATE_RUNNING) { -+ if (kcmd->state == KCMD_STATE_PPG_START) -+ ipu_psys_kcmd_complete(kppg, kcmd, 0); -+ else if (kcmd->state == KCMD_STATE_PPG_STOP) -+ kppg->state = PPG_STATE_STOP; -+ } else if (kppg->state == PPG_STATE_SUSPENDED) { -+ if (kcmd->state == KCMD_STATE_PPG_START) -+ ipu_psys_kcmd_complete(kppg, kcmd, 0); -+ else if (kcmd->state == KCMD_STATE_PPG_STOP) -+ /* -+ * Record the previous state -+ * because here need resume at first -+ */ -+ kppg->state |= PPG_STATE_STOP; -+ else if (kcmd->state == KCMD_STATE_PPG_ENQUEUE) -+ kppg->state = PPG_STATE_RESUME; -+ } else if (kppg->state == PPG_STATE_STOPPED) { -+ if (kcmd->state == KCMD_STATE_PPG_START) { -+ kppg->state = PPG_STATE_START; -+ } else if (kcmd->state == KCMD_STATE_PPG_STOP) { -+ ipu_psys_kcmd_complete(kppg, kcmd, 0); -+ } else if (kcmd->state == KCMD_STATE_PPG_ENQUEUE) { -+ dev_err(dev, "ppg %p stopped!\n", kppg); -+ ipu_psys_kcmd_complete(kppg, kcmd, -EIO); -+ } -+ } -+ -+ if (old_ppg_state != kppg->state) -+ dev_dbg(dev, "s_change:%s: %p %d -> %d\n", -+ __func__, kppg, old_ppg_state, kppg->state); -+} -+ -+static void ipu_psys_scheduler_kcmd_set(struct ipu_psys *psys) -+{ -+ struct ipu_psys_kcmd *kcmd; -+ struct ipu_psys_scheduler *sched; -+ struct ipu_psys_ppg *kppg, *tmp; -+ struct ipu_psys_fh *fh; -+ -+ list_for_each_entry(fh, &psys->fhs, list) { -+ mutex_lock(&fh->mutex); -+ sched = &fh->sched; -+ if (list_empty(&sched->ppgs)) { -+ mutex_unlock(&fh->mutex); -+ continue; -+ } -+ -+ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { -+ mutex_lock(&kppg->mutex); -+ if (list_empty(&kppg->kcmds_new_list)) { -+ mutex_unlock(&kppg->mutex); -+ continue; -+ }; -+ -+ kcmd = list_first_entry(&kppg->kcmds_new_list, -+ struct ipu_psys_kcmd, list); -+ ipu_psys_update_ppg_state_by_kcmd(psys, kppg, kcmd); -+ mutex_unlock(&kppg->mutex); -+ } -+ mutex_unlock(&fh->mutex); -+ } -+} -+ -+static bool is_ready_to_enter_power_gating(struct ipu_psys *psys) -+{ -+ struct ipu_psys_scheduler *sched; -+ struct ipu_psys_ppg *kppg, *tmp; -+ struct ipu_psys_fh *fh; -+ -+ list_for_each_entry(fh, &psys->fhs, list) { -+ mutex_lock(&fh->mutex); -+ sched = &fh->sched; -+ if (list_empty(&sched->ppgs)) { -+ mutex_unlock(&fh->mutex); -+ continue; -+ } -+ -+ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { -+ mutex_lock(&kppg->mutex); -+ if (!list_empty(&kppg->kcmds_new_list) || -+ !list_empty(&kppg->kcmds_processing_list)) { -+ mutex_unlock(&kppg->mutex); -+ mutex_unlock(&fh->mutex); -+ return false; -+ } -+ if (!(kppg->state == PPG_STATE_RUNNING || -+ kppg->state == PPG_STATE_STOPPED || -+ kppg->state == PPG_STATE_SUSPENDED)) { -+ mutex_unlock(&kppg->mutex); -+ mutex_unlock(&fh->mutex); -+ return false; -+ } -+ mutex_unlock(&kppg->mutex); -+ } -+ mutex_unlock(&fh->mutex); -+ } -+ -+ return true; -+} -+ -+static bool has_pending_kcmd(struct ipu_psys *psys) -+{ -+ struct ipu_psys_scheduler *sched; -+ struct ipu_psys_ppg *kppg, *tmp; -+ struct ipu_psys_fh *fh; -+ -+ list_for_each_entry(fh, &psys->fhs, list) { -+ mutex_lock(&fh->mutex); -+ sched = &fh->sched; -+ if (list_empty(&sched->ppgs)) { -+ mutex_unlock(&fh->mutex); -+ continue; -+ } -+ -+ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { -+ mutex_lock(&kppg->mutex); -+ if (!list_empty(&kppg->kcmds_new_list) || -+ !list_empty(&kppg->kcmds_processing_list)) { -+ mutex_unlock(&kppg->mutex); -+ mutex_unlock(&fh->mutex); -+ return true; -+ } -+ mutex_unlock(&kppg->mutex); -+ } -+ mutex_unlock(&fh->mutex); -+ } -+ -+ return false; -+} -+ -+static bool ipu_psys_scheduler_exit_power_gating(struct ipu_psys *psys) -+{ -+ struct device *dev = &psys->adev->auxdev.dev; -+ -+ /* Assume power gating process can be aborted directly during START */ -+ if (psys->power_gating == PSYS_POWER_GATED) { -+ dev_dbg(dev, "powergating: exit ---\n"); -+ ipu_psys_exit_power_gating(psys); -+ } -+ psys->power_gating = PSYS_POWER_NORMAL; -+ return false; -+} -+ -+static bool ipu_psys_scheduler_enter_power_gating(struct ipu_psys *psys) -+{ -+ struct device *dev = &psys->adev->auxdev.dev; -+ struct ipu_psys_scheduler *sched; -+ struct ipu_psys_ppg *kppg, *tmp; -+ struct ipu_psys_fh *fh; -+ -+ if (!enable_power_gating) -+ return false; -+ -+ if (psys->power_gating == PSYS_POWER_NORMAL && -+ is_ready_to_enter_power_gating(psys)) { -+ /* Enter power gating */ -+ dev_dbg(dev, "powergating: enter +++\n"); -+ psys->power_gating = PSYS_POWER_GATING; -+ } -+ -+ if (psys->power_gating != PSYS_POWER_GATING) -+ return false; -+ -+ /* Suspend ppgs one by one */ -+ list_for_each_entry(fh, &psys->fhs, list) { -+ mutex_lock(&fh->mutex); -+ sched = &fh->sched; -+ if (list_empty(&sched->ppgs)) { -+ mutex_unlock(&fh->mutex); -+ continue; -+ } -+ -+ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { -+ mutex_lock(&kppg->mutex); -+ if (kppg->state == PPG_STATE_RUNNING) { -+ kppg->state = PPG_STATE_SUSPEND; -+ mutex_unlock(&kppg->mutex); -+ mutex_unlock(&fh->mutex); -+ return true; -+ } -+ -+ if (kppg->state != PPG_STATE_SUSPENDED && -+ kppg->state != PPG_STATE_STOPPED) { -+ /* Can't enter power gating */ -+ mutex_unlock(&kppg->mutex); -+ mutex_unlock(&fh->mutex); -+ /* Need re-run l-scheduler to suspend ppg? */ -+ return (kppg->state & PPG_STATE_STOP || -+ kppg->state == PPG_STATE_SUSPEND); -+ } -+ mutex_unlock(&kppg->mutex); -+ } -+ mutex_unlock(&fh->mutex); -+ } -+ -+ psys->power_gating = PSYS_POWER_GATED; -+ ipu_psys_enter_power_gating(psys); -+ -+ return false; -+} -+ -+void ipu_psys_run_next(struct ipu_psys *psys) -+{ -+ struct device *dev = &psys->adev->auxdev.dev; -+ /* Wake up scheduler due to unfinished work */ -+ bool need_trigger = false; -+ /* Wait FW callback if there are stopping/suspending/running ppg */ -+ bool wait_fw_finish = false; -+ /* -+ * Code below will crash if fhs is empty. Normally this -+ * shouldn't happen. -+ */ -+ if (list_empty(&psys->fhs)) { -+ WARN_ON(1); -+ return; -+ } -+ -+ /* Abort power gating process */ -+ if (psys->power_gating != PSYS_POWER_NORMAL && -+ has_pending_kcmd(psys)) -+ need_trigger = ipu_psys_scheduler_exit_power_gating(psys); -+ -+ /* Handle kcmd and related ppg switch */ -+ if (psys->power_gating == PSYS_POWER_NORMAL) { -+ ipu_psys_scheduler_kcmd_set(psys); -+ wait_fw_finish = ipu_psys_scheduler_ppg_halt(psys); -+ need_trigger |= ipu_psys_scheduler_ppg_start(psys); -+ need_trigger |= ipu_psys_scheduler_ppg_enqueue_bufset(psys); -+ } -+ if (!(need_trigger || wait_fw_finish)) { -+ /* Nothing to do, enter power gating */ -+ need_trigger = ipu_psys_scheduler_enter_power_gating(psys); -+ if (psys->power_gating == PSYS_POWER_GATING) -+ wait_fw_finish = ipu_psys_scheduler_ppg_halt(psys); -+ } -+ -+ if (need_trigger && !wait_fw_finish) { -+ dev_dbg(dev, "scheduler: wake up\n"); -+ atomic_set(&psys->wakeup_count, 1); -+ wake_up_interruptible(&psys->sched_cmd_wq); -+ } -+} -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-platform-resources.h b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-platform-resources.h -new file mode 100644 -index 0000000..1b67956 ---- /dev/null -+++ b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-platform-resources.h -@@ -0,0 +1,194 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* Copyright (C) 2018 - 2024 Intel Corporation */ -+ -+#ifndef IPU6_PLATFORM_RESOURCES_H -+#define IPU6_PLATFORM_RESOURCES_H -+ -+#include "ipu-platform-resources.h" -+ -+#define IPU6_FW_PSYS_N_PADDING_UINT8_IN_PROCESS_EXT_STRUCT 0 -+ -+enum { -+ IPU6_FW_PSYS_CMD_QUEUE_COMMAND_ID = 0, -+ IPU6_FW_PSYS_CMD_QUEUE_DEVICE_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG0_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG1_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG2_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG3_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG4_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG5_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG6_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG7_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG8_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG9_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG10_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG11_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG12_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG13_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG14_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG15_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG16_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG17_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG18_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG19_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG20_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG21_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG22_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG23_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG24_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG25_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG26_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG27_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG28_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG29_COMMAND_ID, -+ IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID -+}; -+ -+enum { -+ IPU6_FW_PSYS_TRANSFER_VMEM0_TYPE_ID = 0, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_TYPE_ID, -+ IPU6_FW_PSYS_LB_VMEM_TYPE_ID, -+ IPU6_FW_PSYS_DMEM_TYPE_ID, -+ IPU6_FW_PSYS_VMEM_TYPE_ID, -+ IPU6_FW_PSYS_BAMEM_TYPE_ID, -+ IPU6_FW_PSYS_PMEM_TYPE_ID, -+ IPU6_FW_PSYS_N_MEM_TYPE_ID -+}; -+ -+enum ipu6_mem_id { -+ IPU6_FW_PSYS_VMEM0_ID = 0, /* ISP0 VMEM */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, /* TRANSFER VMEM 0 */ -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, /* TRANSFER VMEM 1 */ -+ IPU6_FW_PSYS_LB_VMEM_ID, /* LB VMEM */ -+ IPU6_FW_PSYS_BAMEM0_ID, /* ISP0 BAMEM */ -+ IPU6_FW_PSYS_DMEM0_ID, /* SPC0 Dmem */ -+ IPU6_FW_PSYS_DMEM1_ID, /* SPP0 Dmem */ -+ IPU6_FW_PSYS_DMEM2_ID, /* SPP1 Dmem */ -+ IPU6_FW_PSYS_DMEM3_ID, /* ISP0 Dmem */ -+ IPU6_FW_PSYS_PMEM0_ID, /* ISP0 PMEM */ -+ IPU6_FW_PSYS_N_MEM_ID -+}; -+ -+enum { -+ IPU6_FW_PSYS_DEV_CHN_DMA_EXT0_ID = 0, -+ IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_READ_ID, -+ IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_ID, -+ IPU6_FW_PSYS_DEV_CHN_DMA_INTERNAL_ID, -+ IPU6_FW_PSYS_DEV_CHN_DMA_ISA_ID, -+ IPU6_FW_PSYS_N_DEV_CHN_ID -+}; -+ -+enum { -+ IPU6_FW_PSYS_SP_CTRL_TYPE_ID = 0, -+ IPU6_FW_PSYS_SP_SERVER_TYPE_ID, -+ IPU6_FW_PSYS_VP_TYPE_ID, -+ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_OSA_TYPE_ID, -+ IPU6_FW_PSYS_GDC_TYPE_ID, -+ IPU6_FW_PSYS_TNR_TYPE_ID, -+ IPU6_FW_PSYS_N_CELL_TYPE_ID -+}; -+ -+enum { -+ IPU6_FW_PSYS_SP0_ID = 0, -+ IPU6_FW_PSYS_VP0_ID, -+ IPU6_FW_PSYS_PSA_ACC_BNLM_ID, -+ IPU6_FW_PSYS_PSA_ACC_DM_ID, -+ IPU6_FW_PSYS_PSA_ACC_ACM_ID, -+ IPU6_FW_PSYS_PSA_ACC_GTC_YUV1_ID, -+ IPU6_FW_PSYS_BB_ACC_OFS_PIN_MAIN_ID, -+ IPU6_FW_PSYS_BB_ACC_OFS_PIN_DISPLAY_ID, -+ IPU6_FW_PSYS_BB_ACC_OFS_PIN_PP_ID, -+ IPU6_FW_PSYS_PSA_ACC_GAMMASTAR_ID, -+ IPU6_FW_PSYS_PSA_ACC_GLTM_ID, -+ IPU6_FW_PSYS_PSA_ACC_XNR_ID, -+ IPU6_FW_PSYS_PSA_VCSC_ID, /* VCSC */ -+ IPU6_FW_PSYS_ISA_ICA_ID, -+ IPU6_FW_PSYS_ISA_LSC_ID, -+ IPU6_FW_PSYS_ISA_DPC_ID, -+ IPU6_FW_PSYS_ISA_SIS_A_ID, -+ IPU6_FW_PSYS_ISA_SIS_B_ID, -+ IPU6_FW_PSYS_ISA_B2B_ID, -+ IPU6_FW_PSYS_ISA_B2R_R2I_SIE_ID, -+ IPU6_FW_PSYS_ISA_R2I_DS_A_ID, -+ IPU6_FW_PSYS_ISA_R2I_DS_B_ID, -+ IPU6_FW_PSYS_ISA_AWB_ID, -+ IPU6_FW_PSYS_ISA_AE_ID, -+ IPU6_FW_PSYS_ISA_AF_ID, -+ IPU6_FW_PSYS_ISA_DOL_ID, -+ IPU6_FW_PSYS_ISA_ICA_MEDIUM_ID, -+ IPU6_FW_PSYS_ISA_X2B_MD_ID, -+ IPU6_FW_PSYS_ISA_X2B_SVE_RGBIR_ID, -+ IPU6_FW_PSYS_ISA_PAF_ID, -+ IPU6_FW_PSYS_BB_ACC_GDC0_ID, -+ IPU6_FW_PSYS_BB_ACC_TNR_ID, -+ IPU6_FW_PSYS_N_CELL_ID -+}; -+ -+enum { -+ IPU6_FW_PSYS_DEV_DFM_BB_FULL_PORT_ID = 0, -+ IPU6_FW_PSYS_DEV_DFM_BB_EMPTY_PORT_ID, -+ IPU6_FW_PSYS_DEV_DFM_ISL_FULL_PORT_ID, -+ IPU6_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID, -+ IPU6_FW_PSYS_DEV_DFM_LB_FULL_PORT_ID, -+ IPU6_FW_PSYS_DEV_DFM_LB_EMPTY_PORT_ID, -+}; -+ -+/* Excluding PMEM */ -+#define IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID (IPU6_FW_PSYS_N_MEM_TYPE_ID - 1) -+#define IPU6_FW_PSYS_N_DEV_DFM_ID \ -+ (IPU6_FW_PSYS_DEV_DFM_LB_EMPTY_PORT_ID + 1) -+ -+#define IPU6_FW_PSYS_VMEM0_MAX_SIZE 0x0800 -+/* Transfer VMEM0 words, ref HAS Transfer*/ -+#define IPU6_FW_PSYS_TRANSFER_VMEM0_MAX_SIZE 0x0800 -+/* Transfer VMEM1 words, ref HAS Transfer*/ -+#define IPU6_FW_PSYS_TRANSFER_VMEM1_MAX_SIZE 0x0800 -+#define IPU6_FW_PSYS_LB_VMEM_MAX_SIZE 0x0400 /* LB VMEM words */ -+#define IPU6_FW_PSYS_BAMEM0_MAX_SIZE 0x0800 -+#define IPU6_FW_PSYS_DMEM0_MAX_SIZE 0x4000 -+#define IPU6_FW_PSYS_DMEM1_MAX_SIZE 0x1000 -+#define IPU6_FW_PSYS_DMEM2_MAX_SIZE 0x1000 -+#define IPU6_FW_PSYS_DMEM3_MAX_SIZE 0x1000 -+#define IPU6_FW_PSYS_PMEM0_MAX_SIZE 0x0500 -+ -+#define IPU6_FW_PSYS_DEV_CHN_DMA_EXT0_MAX_SIZE 30 -+#define IPU6_FW_PSYS_DEV_CHN_GDC_MAX_SIZE 0 -+#define IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_READ_MAX_SIZE 30 -+#define IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_MAX_SIZE 43 -+#define IPU6_FW_PSYS_DEV_CHN_DMA_INTERNAL_MAX_SIZE 8 -+#define IPU6_FW_PSYS_DEV_CHN_DMA_IPFD_MAX_SIZE 0 -+#define IPU6_FW_PSYS_DEV_CHN_DMA_ISA_MAX_SIZE 2 -+ -+#define IPU6_FW_PSYS_DEV_DFM_BB_FULL_PORT_ID_MAX_SIZE 32 -+#define IPU6_FW_PSYS_DEV_DFM_ISL_FULL_PORT_ID_MAX_SIZE 32 -+#define IPU6_FW_PSYS_DEV_DFM_LB_FULL_PORT_ID_MAX_SIZE 32 -+#define IPU6_FW_PSYS_DEV_DFM_BB_EMPTY_PORT_ID_MAX_SIZE 32 -+#define IPU6_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID_MAX_SIZE 32 -+#define IPU6_FW_PSYS_DEV_DFM_LB_EMPTY_PORT_ID_MAX_SIZE 32 -+ -+struct ipu6_fw_psys_program_manifest_ext { -+ u32 dfm_port_bitmap[IPU6_FW_PSYS_N_DEV_DFM_ID]; -+ u32 dfm_active_port_bitmap[IPU6_FW_PSYS_N_DEV_DFM_ID]; -+ u16 ext_mem_size[IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID]; -+ u16 ext_mem_offset[IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID]; -+ u16 dev_chn_size[IPU6_FW_PSYS_N_DEV_CHN_ID]; -+ u16 dev_chn_offset[IPU6_FW_PSYS_N_DEV_CHN_ID]; -+ u8 is_dfm_relocatable[IPU6_FW_PSYS_N_DEV_DFM_ID]; -+ u8 dec_resources_input[IPU_FW_PSYS_MAX_INPUT_DEC_RESOURCES]; -+ u8 dec_resources_input_terminal[IPU_FW_PSYS_MAX_INPUT_DEC_RESOURCES]; -+ u8 dec_resources_output[IPU_FW_PSYS_MAX_OUTPUT_DEC_RESOURCES]; -+ u8 dec_resources_output_terminal[IPU_FW_PSYS_MAX_OUTPUT_DEC_RESOURCES]; -+ u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_PROGRAM_MANIFEST_EXT]; -+}; -+ -+struct ipu6_fw_psys_process_ext { -+ u32 dfm_port_bitmap[IPU6_FW_PSYS_N_DEV_DFM_ID]; -+ u32 dfm_active_port_bitmap[IPU6_FW_PSYS_N_DEV_DFM_ID]; -+ u16 ext_mem_offset[IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID]; -+ u16 dev_chn_offset[IPU6_FW_PSYS_N_DEV_CHN_ID]; -+ u8 ext_mem_id[IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID]; -+}; -+ -+#endif /* IPU6_PLATFORM_RESOURCES_H */ -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-ppg.c b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-ppg.c -new file mode 100644 -index 0000000..b6a90f8 ---- /dev/null -+++ b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-ppg.c -@@ -0,0 +1,556 @@ -+// SPDX-License-Identifier: GPL-2.0 -+// Copyright (C) 2020 - 2024 Intel Corporation -+ -+#include -+#include -+#include -+#include -+ -+#include "ipu6-dma.h" -+#include "ipu6-ppg.h" -+ -+static bool enable_suspend_resume; -+module_param(enable_suspend_resume, bool, 0664); -+MODULE_PARM_DESC(enable_suspend_resume, "enable fw ppg suspend/resume api"); -+ -+static struct ipu_psys_kcmd * -+ipu_psys_ppg_get_kcmd(struct ipu_psys_ppg *kppg, enum ipu_psys_cmd_state state) -+{ -+ struct ipu_psys_kcmd *kcmd; -+ -+ if (list_empty(&kppg->kcmds_new_list)) -+ return NULL; -+ -+ list_for_each_entry(kcmd, &kppg->kcmds_new_list, list) { -+ if (kcmd->state == state) -+ return kcmd; -+ } -+ -+ return NULL; -+} -+ -+struct ipu_psys_kcmd *ipu_psys_ppg_get_stop_kcmd(struct ipu_psys_ppg *kppg) -+{ -+ struct ipu_psys_kcmd *kcmd; -+ -+ WARN(!mutex_is_locked(&kppg->mutex), "ppg locking error"); -+ -+ if (list_empty(&kppg->kcmds_processing_list)) -+ return NULL; -+ -+ list_for_each_entry(kcmd, &kppg->kcmds_processing_list, list) { -+ if (kcmd->state == KCMD_STATE_PPG_STOP) -+ return kcmd; -+ } -+ -+ return NULL; -+} -+ -+static struct ipu_psys_buffer_set * -+__get_buf_set(struct ipu_psys_fh *fh, size_t buf_set_size) -+{ -+ struct ipu6_bus_device *adev = fh->psys->adev; -+ struct ipu_psys_buffer_set *kbuf_set; -+ struct ipu_psys_scheduler *sched = &fh->sched; -+ -+ mutex_lock(&sched->bs_mutex); -+ list_for_each_entry(kbuf_set, &sched->buf_sets, list) { -+ if (!kbuf_set->buf_set_size && -+ kbuf_set->size >= buf_set_size) { -+ kbuf_set->buf_set_size = buf_set_size; -+ mutex_unlock(&sched->bs_mutex); -+ return kbuf_set; -+ } -+ } -+ -+ mutex_unlock(&sched->bs_mutex); -+ /* no suitable buffer available, allocate new one */ -+ kbuf_set = kzalloc(sizeof(*kbuf_set), GFP_KERNEL); -+ if (!kbuf_set) -+ return NULL; -+ -+ kbuf_set->kaddr = ipu6_dma_alloc(adev, buf_set_size, -+ &kbuf_set->dma_addr, GFP_KERNEL, 0); -+ if (!kbuf_set->kaddr) { -+ kfree(kbuf_set); -+ return NULL; -+ } -+ -+ kbuf_set->buf_set_size = buf_set_size; -+ kbuf_set->size = buf_set_size; -+ mutex_lock(&sched->bs_mutex); -+ list_add(&kbuf_set->list, &sched->buf_sets); -+ mutex_unlock(&sched->bs_mutex); -+ -+ return kbuf_set; -+} -+ -+static struct ipu_psys_buffer_set * -+ipu_psys_create_buffer_set(struct ipu_psys_kcmd *kcmd, -+ struct ipu_psys_ppg *kppg) -+{ -+ struct ipu_psys_fh *fh = kcmd->fh; -+ struct ipu_psys *psys = fh->psys; -+ struct device *dev = &psys->adev->auxdev.dev; -+ struct ipu_psys_buffer_set *kbuf_set; -+ size_t buf_set_size; -+ u32 *keb; -+ -+ buf_set_size = ipu_fw_psys_ppg_get_buffer_set_size(kcmd); -+ -+ kbuf_set = __get_buf_set(fh, buf_set_size); -+ if (!kbuf_set) { -+ dev_err(dev, "failed to create buffer set\n"); -+ return NULL; -+ } -+ -+ kbuf_set->buf_set = ipu_fw_psys_ppg_create_buffer_set(kcmd, -+ kbuf_set->kaddr, -+ 0); -+ -+ ipu_fw_psys_ppg_buffer_set_vaddress(kbuf_set->buf_set, -+ kbuf_set->dma_addr); -+ keb = kcmd->kernel_enable_bitmap; -+ ipu_fw_psys_ppg_buffer_set_set_keb(kbuf_set->buf_set, keb); -+ ipu6_dma_sync_single(psys->adev, kbuf_set->dma_addr, buf_set_size); -+ -+ return kbuf_set; -+} -+ -+int ipu_psys_ppg_get_bufset(struct ipu_psys_kcmd *kcmd, -+ struct ipu_psys_ppg *kppg) -+{ -+ struct ipu_psys *psys = kppg->fh->psys; -+ struct device *dev = &psys->adev->auxdev.dev; -+ struct ipu_psys_buffer_set *kbuf_set; -+ unsigned int i; -+ int ret; -+ -+ kbuf_set = ipu_psys_create_buffer_set(kcmd, kppg); -+ if (!kbuf_set) { -+ ret = -EINVAL; -+ goto error; -+ } -+ kcmd->kbuf_set = kbuf_set; -+ kbuf_set->kcmd = kcmd; -+ -+ for (i = 0; i < kcmd->nbuffers; i++) { -+ struct ipu_fw_psys_terminal *terminal; -+ u32 buffer; -+ -+ terminal = ipu_fw_psys_pg_get_terminal(kcmd, i); -+ if (!terminal) -+ continue; -+ -+ buffer = (u32)kcmd->kbufs[i]->dma_addr + -+ kcmd->buffers[i].data_offset; -+ -+ ret = ipu_fw_psys_ppg_set_buffer_set(kcmd, terminal, i, buffer); -+ if (ret) { -+ dev_err(dev, "Unable to set bufset\n"); -+ goto error; -+ } -+ } -+ -+ return 0; -+ -+error: -+ dev_err(dev, "failed to get buffer set\n"); -+ return ret; -+} -+ -+void ipu_psys_ppg_complete(struct ipu_psys *psys, struct ipu_psys_ppg *kppg) -+{ -+ struct device *dev; -+ u8 queue_id; -+ int old_ppg_state; -+ -+ if (!psys || !kppg) -+ return; -+ -+ dev = &psys->adev->auxdev.dev; -+ -+ mutex_lock(&kppg->mutex); -+ old_ppg_state = kppg->state; -+ if (kppg->state == PPG_STATE_STOPPING) { -+ struct ipu_psys_kcmd tmp_kcmd = { -+ .kpg = kppg->kpg, -+ }; -+ -+ kppg->state = PPG_STATE_STOPPED; -+ ipu_psys_free_resources(&kppg->kpg->resource_alloc, -+ &psys->res_pool_running); -+ queue_id = ipu_fw_psys_ppg_get_base_queue_id(&tmp_kcmd); -+ ipu_psys_free_cmd_queue_res(&psys->res_pool_running, queue_id); -+ pm_runtime_put(dev); -+ } else { -+ if (kppg->state == PPG_STATE_SUSPENDING) { -+ kppg->state = PPG_STATE_SUSPENDED; -+ ipu_psys_free_resources(&kppg->kpg->resource_alloc, -+ &psys->res_pool_running); -+ } else if (kppg->state == PPG_STATE_STARTED || -+ kppg->state == PPG_STATE_RESUMED) { -+ kppg->state = PPG_STATE_RUNNING; -+ } -+ -+ /* Kick l-scheduler thread for FW callback, -+ * also for checking if need to enter power gating -+ */ -+ atomic_set(&psys->wakeup_count, 1); -+ wake_up_interruptible(&psys->sched_cmd_wq); -+ } -+ if (old_ppg_state != kppg->state) -+ dev_dbg(dev, "s_change:%s: %p %d -> %d\n", __func__, kppg, -+ old_ppg_state, kppg->state); -+ -+ mutex_unlock(&kppg->mutex); -+} -+ -+int ipu_psys_ppg_start(struct ipu_psys_ppg *kppg) -+{ -+ struct ipu_psys *psys = kppg->fh->psys; -+ struct device *dev = &psys->adev->auxdev.dev; -+ struct ipu_psys_kcmd *kcmd = ipu_psys_ppg_get_kcmd(kppg, -+ KCMD_STATE_PPG_START); -+ unsigned int i; -+ int ret; -+ -+ if (!kcmd) { -+ dev_err(dev, "failed to find start kcmd!\n"); -+ return -EINVAL; -+ } -+ -+ dev_dbg(dev, "start ppg id %d, addr 0x%p\n", -+ ipu_fw_psys_pg_get_id(kcmd), kppg); -+ -+ kppg->state = PPG_STATE_STARTING; -+ for (i = 0; i < kcmd->nbuffers; i++) { -+ struct ipu_fw_psys_terminal *terminal; -+ -+ terminal = ipu_fw_psys_pg_get_terminal(kcmd, i); -+ if (!terminal) -+ continue; -+ -+ ret = ipu_fw_psys_terminal_set(terminal, i, kcmd, 0, -+ kcmd->buffers[i].len); -+ if (ret) { -+ dev_err(dev, "Unable to set terminal\n"); -+ return ret; -+ } -+ } -+ -+ ret = ipu_fw_psys_pg_submit(kcmd); -+ if (ret) { -+ dev_err(dev, "failed to submit kcmd!\n"); -+ return ret; -+ } -+ -+ ret = ipu_psys_allocate_resources(dev, kcmd->kpg->pg, kcmd->pg_manifest, -+ &kcmd->kpg->resource_alloc, -+ &psys->res_pool_running); -+ if (ret) { -+ dev_err(dev, "alloc resources failed!\n"); -+ return ret; -+ } -+ -+ ret = pm_runtime_get_sync(dev); -+ if (ret < 0) { -+ dev_err(dev, "failed to power on psys\n"); -+ goto error; -+ } -+ -+ ret = ipu_psys_kcmd_start(psys, kcmd); -+ if (ret) { -+ ipu_psys_kcmd_complete(kppg, kcmd, -EIO); -+ goto error; -+ } -+ -+ dev_dbg(dev, "s_change:%s: %p %d -> %d\n", -+ __func__, kppg, kppg->state, PPG_STATE_STARTED); -+ kppg->state = PPG_STATE_STARTED; -+ ipu_psys_kcmd_complete(kppg, kcmd, 0); -+ -+ return 0; -+ -+error: -+ pm_runtime_put_noidle(dev); -+ ipu_psys_reset_process_cell(dev, kcmd->kpg->pg, kcmd->pg_manifest, -+ kcmd->kpg->pg->process_count); -+ ipu_psys_free_resources(&kppg->kpg->resource_alloc, -+ &psys->res_pool_running); -+ -+ dev_err(dev, "failed to start ppg\n"); -+ return ret; -+} -+ -+int ipu_psys_ppg_resume(struct ipu_psys_ppg *kppg) -+{ -+ struct ipu_psys *psys = kppg->fh->psys; -+ struct device *dev = &psys->adev->auxdev.dev; -+ struct ipu_psys_kcmd tmp_kcmd = { -+ .kpg = kppg->kpg, -+ .fh = kppg->fh, -+ }; -+ int ret; -+ -+ dev_dbg(dev, "resume ppg id %d, addr 0x%p\n", -+ ipu_fw_psys_pg_get_id(&tmp_kcmd), kppg); -+ -+ kppg->state = PPG_STATE_RESUMING; -+ if (enable_suspend_resume) { -+ ret = ipu_psys_allocate_resources(dev, kppg->kpg->pg, -+ kppg->manifest, -+ &kppg->kpg->resource_alloc, -+ &psys->res_pool_running); -+ if (ret) { -+ dev_err(dev, "failed to allocate res\n"); -+ return -EIO; -+ } -+ -+ ret = ipu_fw_psys_ppg_resume(&tmp_kcmd); -+ if (ret) { -+ dev_err(dev, "failed to resume ppg\n"); -+ goto error; -+ } -+ } else { -+ kppg->kpg->pg->state = IPU_FW_PSYS_PROCESS_GROUP_READY; -+ ret = ipu_fw_psys_pg_submit(&tmp_kcmd); -+ if (ret) { -+ dev_err(dev, "failed to submit kcmd!\n"); -+ return ret; -+ } -+ -+ ret = ipu_psys_allocate_resources(dev, kppg->kpg->pg, -+ kppg->manifest, -+ &kppg->kpg->resource_alloc, -+ &psys->res_pool_running); -+ if (ret) { -+ dev_err(dev, "failed to allocate res\n"); -+ return ret; -+ } -+ -+ ret = ipu_psys_kcmd_start(psys, &tmp_kcmd); -+ if (ret) { -+ dev_err(dev, "failed to start kcmd!\n"); -+ goto error; -+ } -+ } -+ dev_dbg(dev, "s_change:%s: %p %d -> %d\n", -+ __func__, kppg, kppg->state, PPG_STATE_RESUMED); -+ kppg->state = PPG_STATE_RESUMED; -+ -+ return 0; -+ -+error: -+ ipu_psys_reset_process_cell(dev, kppg->kpg->pg, kppg->manifest, -+ kppg->kpg->pg->process_count); -+ ipu_psys_free_resources(&kppg->kpg->resource_alloc, -+ &psys->res_pool_running); -+ -+ return ret; -+} -+ -+int ipu_psys_ppg_stop(struct ipu_psys_ppg *kppg) -+{ -+ struct ipu_psys_kcmd *kcmd = ipu_psys_ppg_get_kcmd(kppg, -+ KCMD_STATE_PPG_STOP); -+ struct ipu_psys *psys = kppg->fh->psys; -+ struct device *dev = &psys->adev->auxdev.dev; -+ struct ipu_psys_kcmd kcmd_temp; -+ int ppg_id, ret = 0; -+ -+ if (kcmd) { -+ list_move_tail(&kcmd->list, &kppg->kcmds_processing_list); -+ } else { -+ dev_dbg(dev, "Exceptional stop happened!\n"); -+ kcmd_temp.kpg = kppg->kpg; -+ kcmd_temp.fh = kppg->fh; -+ kcmd = &kcmd_temp; -+ /* delete kppg in stop list to avoid this ppg resuming */ -+ ipu_psys_scheduler_remove_kppg(kppg, SCHED_STOP_LIST); -+ } -+ -+ ppg_id = ipu_fw_psys_pg_get_id(kcmd); -+ dev_dbg(dev, "stop ppg(%d, addr 0x%p)\n", ppg_id, kppg); -+ -+ if (kppg->state & PPG_STATE_SUSPENDED) { -+ if (enable_suspend_resume) { -+ dev_dbg(dev, "need resume before stop!\n"); -+ kcmd_temp.kpg = kppg->kpg; -+ kcmd_temp.fh = kppg->fh; -+ ret = ipu_fw_psys_ppg_resume(&kcmd_temp); -+ if (ret) -+ dev_err(dev, "ppg(%d) failed to resume\n", -+ ppg_id); -+ } else if (kcmd != &kcmd_temp) { -+ u8 queue_id = ipu_fw_psys_ppg_get_base_queue_id(kcmd); -+ -+ ipu_psys_free_cmd_queue_res(&psys->res_pool_running, -+ queue_id); -+ ipu_psys_kcmd_complete(kppg, kcmd, 0); -+ dev_dbg(dev, "s_change:%s %p %d -> %d\n", __func__, -+ kppg, kppg->state, PPG_STATE_STOPPED); -+ pm_runtime_put(dev); -+ kppg->state = PPG_STATE_STOPPED; -+ -+ return 0; -+ } else { -+ return 0; -+ } -+ } -+ dev_dbg(dev, "s_change:%s %p %d -> %d\n", __func__, kppg, kppg->state, -+ PPG_STATE_STOPPING); -+ kppg->state = PPG_STATE_STOPPING; -+ ret = ipu_fw_psys_pg_abort(kcmd); -+ if (ret) -+ dev_err(dev, "ppg(%d) failed to abort\n", ppg_id); -+ -+ return ret; -+} -+ -+int ipu_psys_ppg_suspend(struct ipu_psys_ppg *kppg) -+{ -+ struct ipu_psys *psys = kppg->fh->psys; -+ struct device *dev = &psys->adev->auxdev.dev; -+ struct ipu_psys_kcmd tmp_kcmd = { -+ .kpg = kppg->kpg, -+ .fh = kppg->fh, -+ }; -+ int ppg_id = ipu_fw_psys_pg_get_id(&tmp_kcmd); -+ int ret = 0; -+ -+ dev_dbg(dev, "suspend ppg(%d, addr 0x%p)\n", ppg_id, kppg); -+ -+ dev_dbg(dev, "s_change:%s %p %d -> %d\n", __func__, kppg, kppg->state, -+ PPG_STATE_SUSPENDING); -+ kppg->state = PPG_STATE_SUSPENDING; -+ if (enable_suspend_resume) -+ ret = ipu_fw_psys_ppg_suspend(&tmp_kcmd); -+ else -+ ret = ipu_fw_psys_pg_abort(&tmp_kcmd); -+ if (ret) -+ dev_err(dev, "failed to %s ppg(%d)\n", -+ enable_suspend_resume ? "suspend" : "stop", ret); -+ -+ return ret; -+} -+ -+static bool ipu_psys_ppg_is_bufset_existing(struct ipu_psys_ppg *kppg) -+{ -+ return !list_empty(&kppg->kcmds_new_list); -+} -+ -+/* -+ * ipu_psys_ppg_enqueue_bufsets - enqueue buffer sets to firmware -+ * Sometimes, if the ppg is at suspended state, this function will return true -+ * to reschedule and let the resume command scheduled before the buffer sets -+ * enqueuing. -+ */ -+bool ipu_psys_ppg_enqueue_bufsets(struct ipu_psys_ppg *kppg) -+{ -+ struct ipu_psys_kcmd *kcmd, *kcmd0; -+ struct ipu_psys *psys = kppg->fh->psys; -+ struct device *dev = &psys->adev->auxdev.dev; -+ bool need_resume = false; -+ -+ mutex_lock(&kppg->mutex); -+ -+ if (kppg->state & (PPG_STATE_STARTED | PPG_STATE_RESUMED | -+ PPG_STATE_RUNNING)) { -+ if (ipu_psys_ppg_is_bufset_existing(kppg)) { -+ list_for_each_entry_safe(kcmd, kcmd0, -+ &kppg->kcmds_new_list, list) { -+ int ret; -+ -+ if (kcmd->state != KCMD_STATE_PPG_ENQUEUE) { -+ need_resume = true; -+ break; -+ } -+ -+ ret = ipu_fw_psys_ppg_enqueue_bufs(kcmd); -+ if (ret) { -+ dev_err(dev, -+ "kppg 0x%p fail to qbufset %d", -+ kppg, ret); -+ break; -+ } -+ list_move_tail(&kcmd->list, -+ &kppg->kcmds_processing_list); -+ dev_dbg(dev, -+ "kppg %d %p queue kcmd 0x%p fh 0x%p\n", -+ ipu_fw_psys_pg_get_id(kcmd), -+ kppg, kcmd, kcmd->fh); -+ } -+ } -+ } -+ -+ mutex_unlock(&kppg->mutex); -+ return need_resume; -+} -+ -+void ipu_psys_enter_power_gating(struct ipu_psys *psys) -+{ -+ struct device *dev = &psys->adev->auxdev.dev; -+ struct ipu_psys_scheduler *sched; -+ struct ipu_psys_ppg *kppg, *tmp; -+ struct ipu_psys_fh *fh; -+ -+ list_for_each_entry(fh, &psys->fhs, list) { -+ mutex_lock(&fh->mutex); -+ sched = &fh->sched; -+ if (list_empty(&sched->ppgs)) { -+ mutex_unlock(&fh->mutex); -+ continue; -+ } -+ -+ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { -+ mutex_lock(&kppg->mutex); -+ /* -+ * Only for SUSPENDED kppgs, STOPPED kppgs has already -+ * power down and new kppgs might come now. -+ */ -+ if (kppg->state != PPG_STATE_SUSPENDED) { -+ mutex_unlock(&kppg->mutex); -+ continue; -+ } -+ pm_runtime_put(dev); -+ mutex_unlock(&kppg->mutex); -+ } -+ mutex_unlock(&fh->mutex); -+ } -+} -+ -+void ipu_psys_exit_power_gating(struct ipu_psys *psys) -+{ -+ struct device *dev = &psys->adev->auxdev.dev; -+ struct ipu_psys_scheduler *sched; -+ struct ipu_psys_ppg *kppg, *tmp; -+ struct ipu_psys_fh *fh; -+ int ret = 0; -+ -+ list_for_each_entry(fh, &psys->fhs, list) { -+ mutex_lock(&fh->mutex); -+ sched = &fh->sched; -+ if (list_empty(&sched->ppgs)) { -+ mutex_unlock(&fh->mutex); -+ continue; -+ } -+ -+ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { -+ mutex_lock(&kppg->mutex); -+ /* Only for SUSPENDED kppgs */ -+ if (kppg->state != PPG_STATE_SUSPENDED) { -+ mutex_unlock(&kppg->mutex); -+ continue; -+ } -+ -+ ret = pm_runtime_get_sync(dev); -+ if (ret < 0) { -+ dev_err(dev, "failed to power gating\n"); -+ pm_runtime_put_noidle(dev); -+ } -+ mutex_unlock(&kppg->mutex); -+ } -+ mutex_unlock(&fh->mutex); -+ } -+} -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-ppg.h b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-ppg.h -new file mode 100644 -index 0000000..676a4ea ---- /dev/null -+++ b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-ppg.h -@@ -0,0 +1,38 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* -+ * Copyright (C) 2020 - 2024 Intel Corporation -+ */ -+ -+#ifndef IPU6_PPG_H -+#define IPU6_PPG_H -+ -+#include "ipu-psys.h" -+/* starting from '2' in case of someone passes true or false */ -+enum SCHED_LIST { -+ SCHED_START_LIST = 2, -+ SCHED_STOP_LIST -+}; -+ -+enum ipu_psys_power_gating_state { -+ PSYS_POWER_NORMAL = 0, -+ PSYS_POWER_GATING, -+ PSYS_POWER_GATED -+}; -+ -+int ipu_psys_ppg_get_bufset(struct ipu_psys_kcmd *kcmd, -+ struct ipu_psys_ppg *kppg); -+struct ipu_psys_kcmd *ipu_psys_ppg_get_stop_kcmd(struct ipu_psys_ppg *kppg); -+void ipu_psys_scheduler_remove_kppg(struct ipu_psys_ppg *kppg, -+ enum SCHED_LIST type); -+void ipu_psys_scheduler_add_kppg(struct ipu_psys_ppg *kppg, -+ enum SCHED_LIST type); -+int ipu_psys_ppg_start(struct ipu_psys_ppg *kppg); -+int ipu_psys_ppg_resume(struct ipu_psys_ppg *kppg); -+int ipu_psys_ppg_stop(struct ipu_psys_ppg *kppg); -+int ipu_psys_ppg_suspend(struct ipu_psys_ppg *kppg); -+void ipu_psys_ppg_complete(struct ipu_psys *psys, struct ipu_psys_ppg *kppg); -+bool ipu_psys_ppg_enqueue_bufsets(struct ipu_psys_ppg *kppg); -+void ipu_psys_enter_power_gating(struct ipu_psys *psys); -+void ipu_psys_exit_power_gating(struct ipu_psys *psys); -+ -+#endif /* IPU6_PPG_H */ -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-psys-gpc.c b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-psys-gpc.c -new file mode 100644 -index 0000000..ff19256 ---- /dev/null -+++ b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-psys-gpc.c -@@ -0,0 +1,209 @@ -+// SPDX-License-Identifier: GPL-2.0 -+// Copyright (C) 2020 - 2024 Intel Corporation -+ -+#ifdef CONFIG_DEBUG_FS -+#include -+#include -+ -+#include "ipu-psys.h" -+#include "ipu-platform-regs.h" -+ -+/* -+ * GPC (Gerneral Performance Counters) -+ */ -+#define IPU_PSYS_GPC_NUM 16 -+ -+#ifndef CONFIG_PM -+#define pm_runtime_get_sync(d) 0 -+#define pm_runtime_put(d) 0 -+#endif -+ -+struct ipu_psys_gpc { -+ bool enable; -+ unsigned int route; -+ unsigned int source; -+ unsigned int sense; -+ unsigned int gpcindex; -+ void *prit; -+}; -+ -+struct ipu_psys_gpcs { -+ bool gpc_enable; -+ struct ipu_psys_gpc gpc[IPU_PSYS_GPC_NUM]; -+ void *prit; -+}; -+ -+static int ipu6_psys_gpc_global_enable_get(void *data, u64 *val) -+{ -+ struct ipu_psys_gpcs *psys_gpcs = data; -+ struct ipu_psys *psys = psys_gpcs->prit; -+ -+ mutex_lock(&psys->mutex); -+ -+ *val = psys_gpcs->gpc_enable; -+ -+ mutex_unlock(&psys->mutex); -+ return 0; -+} -+ -+static int ipu6_psys_gpc_global_enable_set(void *data, u64 val) -+{ -+ struct ipu_psys_gpcs *psys_gpcs = data; -+ struct ipu_psys *psys = psys_gpcs->prit; -+ void __iomem *base; -+ int idx, res; -+ -+ if (val != 0 && val != 1) -+ return -EINVAL; -+ -+ if (!psys || !psys->pdata || !psys->pdata->base) -+ return -EINVAL; -+ -+ mutex_lock(&psys->mutex); -+ -+ base = psys->pdata->base + IPU_GPC_BASE; -+ -+ res = pm_runtime_get_sync(&psys->adev->dev); -+ if (res < 0) { -+ pm_runtime_put(&psys->adev->dev); -+ mutex_unlock(&psys->mutex); -+ return res; -+ } -+ -+ if (val == 0) { -+ writel(0x0, base + IPU_GPREG_TRACE_TIMER_RST); -+ writel(0x0, base + IPU_CDC_MMU_GPC_OVERALL_ENABLE); -+ writel(0xffff, base + IPU_CDC_MMU_GPC_SOFT_RESET); -+ psys_gpcs->gpc_enable = false; -+ for (idx = 0; idx < IPU_PSYS_GPC_NUM; idx++) { -+ psys_gpcs->gpc[idx].enable = 0; -+ psys_gpcs->gpc[idx].sense = 0; -+ psys_gpcs->gpc[idx].route = 0; -+ psys_gpcs->gpc[idx].source = 0; -+ } -+ pm_runtime_put(&psys->adev->dev); -+ } else { -+ /* Set gpc reg and start all gpc here. -+ * RST free running local timer. -+ */ -+ writel(0x0, base + IPU_GPREG_TRACE_TIMER_RST); -+ writel(0x1, base + IPU_GPREG_TRACE_TIMER_RST); -+ -+ for (idx = 0; idx < IPU_PSYS_GPC_NUM; idx++) { -+ /* Enable */ -+ writel(psys_gpcs->gpc[idx].enable, -+ base + IPU_CDC_MMU_GPC_ENABLE0 + 4 * idx); -+ /* Setting (route/source/sense) */ -+ writel((psys_gpcs->gpc[idx].sense -+ << IPU_GPC_SENSE_OFFSET) -+ + (psys_gpcs->gpc[idx].route -+ << IPU_GPC_ROUTE_OFFSET) -+ + (psys_gpcs->gpc[idx].source -+ << IPU_GPC_SOURCE_OFFSET), -+ base + IPU_CDC_MMU_GPC_CNT_SEL0 + 4 * idx); -+ } -+ -+ /* Soft reset and Overall Enable. */ -+ writel(0x0, base + IPU_CDC_MMU_GPC_OVERALL_ENABLE); -+ writel(0xffff, base + IPU_CDC_MMU_GPC_SOFT_RESET); -+ writel(0x1, base + IPU_CDC_MMU_GPC_OVERALL_ENABLE); -+ -+ psys_gpcs->gpc_enable = true; -+ } -+ -+ mutex_unlock(&psys->mutex); -+ return 0; -+} -+ -+DEFINE_SIMPLE_ATTRIBUTE(psys_gpc_globe_enable_fops, -+ ipu6_psys_gpc_global_enable_get, -+ ipu6_psys_gpc_global_enable_set, "%llu\n"); -+ -+static int ipu6_psys_gpc_count_get(void *data, u64 *val) -+{ -+ struct ipu_psys_gpc *psys_gpc = data; -+ struct ipu_psys *psys = psys_gpc->prit; -+ void __iomem *base; -+ int res; -+ -+ if (!psys || !psys->pdata || !psys->pdata->base) -+ return -EINVAL; -+ -+ mutex_lock(&psys->mutex); -+ -+ base = psys->pdata->base + IPU_GPC_BASE; -+ -+ res = pm_runtime_get_sync(&psys->adev->dev); -+ if (res < 0) { -+ pm_runtime_put(&psys->adev->dev); -+ mutex_unlock(&psys->mutex); -+ return res; -+ } -+ -+ *val = readl(base + IPU_CDC_MMU_GPC_VALUE0 + 4 * psys_gpc->gpcindex); -+ -+ mutex_unlock(&psys->mutex); -+ return 0; -+} -+ -+DEFINE_SIMPLE_ATTRIBUTE(psys_gpc_count_fops, -+ ipu6_psys_gpc_count_get, -+ NULL, "%llu\n"); -+ -+int ipu_psys_gpc_init_debugfs(struct ipu_psys *psys) -+{ -+ struct dentry *gpcdir; -+ struct dentry *dir; -+ struct dentry *file; -+ int idx; -+ char gpcname[10]; -+ struct ipu_psys_gpcs *psys_gpcs; -+ -+ psys_gpcs = devm_kzalloc(&psys->dev, sizeof(*psys_gpcs), GFP_KERNEL); -+ if (!psys_gpcs) -+ return -ENOMEM; -+ -+ gpcdir = debugfs_create_dir("gpc", psys->debugfsdir); -+ if (IS_ERR(gpcdir)) -+ return -ENOMEM; -+ -+ psys_gpcs->prit = psys; -+ file = debugfs_create_file("enable", 0600, gpcdir, psys_gpcs, -+ &psys_gpc_globe_enable_fops); -+ if (IS_ERR(file)) -+ goto err; -+ -+ for (idx = 0; idx < IPU_PSYS_GPC_NUM; idx++) { -+ sprintf(gpcname, "gpc%d", idx); -+ dir = debugfs_create_dir(gpcname, gpcdir); -+ if (IS_ERR(dir)) -+ goto err; -+ -+ debugfs_create_bool("enable", 0600, dir, -+ &psys_gpcs->gpc[idx].enable); -+ -+ debugfs_create_u32("source", 0600, dir, -+ &psys_gpcs->gpc[idx].source); -+ -+ debugfs_create_u32("route", 0600, dir, -+ &psys_gpcs->gpc[idx].route); -+ -+ debugfs_create_u32("sense", 0600, dir, -+ &psys_gpcs->gpc[idx].sense); -+ -+ psys_gpcs->gpc[idx].gpcindex = idx; -+ psys_gpcs->gpc[idx].prit = psys; -+ file = debugfs_create_file("count", 0400, dir, -+ &psys_gpcs->gpc[idx], -+ &psys_gpc_count_fops); -+ if (IS_ERR(file)) -+ goto err; -+ } -+ -+ return 0; -+ -+err: -+ debugfs_remove_recursive(gpcdir); -+ return -ENOMEM; -+} -+#endif -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-psys.c b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-psys.c -new file mode 100644 -index 0000000..46c6bc8 ---- /dev/null -+++ b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6-psys.c -@@ -0,0 +1,1056 @@ -+// SPDX-License-Identifier: GPL-2.0 -+// Copyright (C) 2020 - 2024 Intel Corporation -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "ipu6.h" -+#include "ipu6-dma.h" -+#include "ipu-psys.h" -+#include "ipu6-ppg.h" -+#include "ipu6-platform-regs.h" -+#include "ipu6-platform-buttress-regs.h" -+ -+MODULE_IMPORT_NS(DMA_BUF); -+ -+static bool early_pg_transfer; -+module_param(early_pg_transfer, bool, 0664); -+MODULE_PARM_DESC(early_pg_transfer, -+ "Copy PGs back to user after resource allocation"); -+ -+bool enable_power_gating = true; -+module_param(enable_power_gating, bool, 0664); -+MODULE_PARM_DESC(enable_power_gating, "enable power gating"); -+ -+static void ipu6_set_sp_info_bits(void __iomem *base) -+{ -+ int i; -+ -+ writel(IPU6_INFO_REQUEST_DESTINATION_IOSF, -+ base + IPU6_REG_PSYS_INFO_SEG_0_CONFIG_ICACHE_MASTER); -+ -+ for (i = 0; i < 4; i++) -+ writel(IPU6_INFO_REQUEST_DESTINATION_IOSF, -+ base + IPU_REG_PSYS_INFO_SEG_CMEM_MASTER(i)); -+ for (i = 0; i < 4; i++) -+ writel(IPU6_INFO_REQUEST_DESTINATION_IOSF, -+ base + IPU_REG_PSYS_INFO_SEG_XMEM_MASTER(i)); -+} -+ -+#define PSYS_SUBDOMAINS_STATUS_WAIT_COUNT 1000 -+void ipu_psys_subdomains_power(struct ipu_psys *psys, bool on) -+{ -+ struct device *dev = &psys->adev->auxdev.dev; -+ unsigned int i; -+ u32 val; -+ -+ /* power domain req */ -+ dev_dbg(dev, "power %s psys sub-domains", on ? "UP" : "DOWN"); -+ if (on) -+ writel(IPU_PSYS_SUBDOMAINS_POWER_MASK, -+ psys->adev->isp->base + IPU_PSYS_SUBDOMAINS_POWER_REQ); -+ else -+ writel(0x0, -+ psys->adev->isp->base + IPU_PSYS_SUBDOMAINS_POWER_REQ); -+ -+ i = 0; -+ do { -+ usleep_range(10, 20); -+ val = readl(psys->adev->isp->base + -+ IPU_PSYS_SUBDOMAINS_POWER_STATUS); -+ if (!(val & BIT(31))) { -+ dev_dbg(dev, "PS sub-domains req done with status 0x%x", -+ val); -+ break; -+ } -+ i++; -+ } while (i < PSYS_SUBDOMAINS_STATUS_WAIT_COUNT); -+ -+ if (i == PSYS_SUBDOMAINS_STATUS_WAIT_COUNT) -+ dev_warn(dev, "Psys sub-domains %s req timeout!", -+ on ? "UP" : "DOWN"); -+} -+ -+void ipu_psys_setup_hw(struct ipu_psys *psys) -+{ -+ void __iomem *base = psys->pdata->base; -+ void __iomem *spc_regs_base = -+ base + psys->pdata->ipdata->hw_variant.spc_offset; -+ void __iomem *psys_iommu0_ctrl; -+ u32 irqs; -+ const u8 r3 = IPU_DEVICE_AB_GROUP1_TARGET_ID_R3_SPC_STATUS_REG; -+ const u8 r4 = IPU_DEVICE_AB_GROUP1_TARGET_ID_R4_SPC_MASTER_BASE_ADDR; -+ const u8 r5 = IPU_DEVICE_AB_GROUP1_TARGET_ID_R5_SPC_PC_STALL; -+ -+ if (!psys->adev->isp->secure_mode) { -+ /* configure access blocker for non-secure mode */ -+ writel(NCI_AB_ACCESS_MODE_RW, -+ base + IPU_REG_DMA_TOP_AB_GROUP1_BASE_ADDR + -+ IPU_REG_DMA_TOP_AB_RING_ACCESS_OFFSET(r3)); -+ writel(NCI_AB_ACCESS_MODE_RW, -+ base + IPU_REG_DMA_TOP_AB_GROUP1_BASE_ADDR + -+ IPU_REG_DMA_TOP_AB_RING_ACCESS_OFFSET(r4)); -+ writel(NCI_AB_ACCESS_MODE_RW, -+ base + IPU_REG_DMA_TOP_AB_GROUP1_BASE_ADDR + -+ IPU_REG_DMA_TOP_AB_RING_ACCESS_OFFSET(r5)); -+ } -+ psys_iommu0_ctrl = base + -+ psys->pdata->ipdata->hw_variant.mmu_hw[0].offset + -+ IPU6_MMU_INFO_OFFSET; -+ writel(IPU_INFO_REQUEST_DESTINATION_IOSF, psys_iommu0_ctrl); -+ -+ ipu6_set_sp_info_bits(spc_regs_base + IPU_PSYS_REG_SPC_STATUS_CTRL); -+ ipu6_set_sp_info_bits(spc_regs_base + IPU_PSYS_REG_SPP0_STATUS_CTRL); -+ -+ /* Enable FW interrupt #0 */ -+ writel(0, base + IPU_REG_PSYS_GPDEV_FWIRQ(0)); -+ irqs = IPU6_PSYS_GPDEV_IRQ_FWIRQ(IPU_PSYS_GPDEV_FWIRQ0); -+ writel(irqs, base + IPU_REG_PSYS_GPDEV_IRQ_EDGE); -+ writel(irqs, base + IPU_REG_PSYS_GPDEV_IRQ_LEVEL_NOT_PULSE); -+ writel(0xffffffff, base + IPU_REG_PSYS_GPDEV_IRQ_CLEAR); -+ writel(irqs, base + IPU_REG_PSYS_GPDEV_IRQ_MASK); -+ writel(irqs, base + IPU_REG_PSYS_GPDEV_IRQ_ENABLE); -+} -+ -+static struct ipu_psys_ppg *ipu_psys_identify_kppg(struct ipu_psys_kcmd *kcmd) -+{ -+ struct ipu_psys_scheduler *sched = &kcmd->fh->sched; -+ struct ipu_psys_ppg *kppg, *tmp; -+ -+ mutex_lock(&kcmd->fh->mutex); -+ if (list_empty(&sched->ppgs)) -+ goto not_found; -+ -+ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { -+ if (ipu_fw_psys_pg_get_token(kcmd) -+ != kppg->token) -+ continue; -+ mutex_unlock(&kcmd->fh->mutex); -+ return kppg; -+ } -+ -+not_found: -+ mutex_unlock(&kcmd->fh->mutex); -+ return NULL; -+} -+ -+/* -+ * Called to free up all resources associated with a kcmd. -+ * After this the kcmd doesn't anymore exist in the driver. -+ */ -+static void ipu_psys_kcmd_free(struct ipu_psys_kcmd *kcmd) -+{ -+ struct ipu_psys_ppg *kppg; -+ struct ipu_psys_scheduler *sched; -+ -+ if (!kcmd) -+ return; -+ -+ kppg = ipu_psys_identify_kppg(kcmd); -+ sched = &kcmd->fh->sched; -+ -+ if (kcmd->kbuf_set) { -+ mutex_lock(&sched->bs_mutex); -+ kcmd->kbuf_set->buf_set_size = 0; -+ mutex_unlock(&sched->bs_mutex); -+ kcmd->kbuf_set = NULL; -+ } -+ -+ if (kppg) { -+ mutex_lock(&kppg->mutex); -+ if (!list_empty(&kcmd->list)) -+ list_del(&kcmd->list); -+ mutex_unlock(&kppg->mutex); -+ } -+ -+ kfree(kcmd->pg_manifest); -+ kfree(kcmd->kbufs); -+ kfree(kcmd->buffers); -+ kfree(kcmd); -+} -+ -+static struct ipu_psys_kcmd *ipu_psys_copy_cmd(struct ipu_psys_command *cmd, -+ struct ipu_psys_fh *fh) -+{ -+ struct ipu_psys *psys = fh->psys; -+ struct device *dev = &psys->adev->auxdev.dev; -+ struct ipu_psys_kcmd *kcmd; -+ struct ipu_psys_kbuffer *kpgbuf; -+ unsigned int i; -+ int ret, prevfd, fd; -+ -+ fd = -1; -+ prevfd = -1; -+ -+ if (cmd->bufcount > IPU_MAX_PSYS_CMD_BUFFERS) -+ return NULL; -+ -+ if (!cmd->pg_manifest_size) -+ return NULL; -+ -+ kcmd = kzalloc(sizeof(*kcmd), GFP_KERNEL); -+ if (!kcmd) -+ return NULL; -+ -+ kcmd->state = KCMD_STATE_PPG_NEW; -+ kcmd->fh = fh; -+ INIT_LIST_HEAD(&kcmd->list); -+ -+ mutex_lock(&fh->mutex); -+ fd = cmd->pg; -+ kpgbuf = ipu_psys_lookup_kbuffer(fh, fd); -+ if (!kpgbuf || !kpgbuf->sgt) { -+ dev_err(dev, "%s kbuf %p with fd %d not found.\n", -+ __func__, kpgbuf, fd); -+ mutex_unlock(&fh->mutex); -+ goto error; -+ } -+ -+ /* check and remap if possible */ -+ kpgbuf = ipu_psys_mapbuf_locked(fd, fh); -+ if (!kpgbuf || !kpgbuf->sgt) { -+ dev_err(dev, "%s remap failed\n", __func__); -+ mutex_unlock(&fh->mutex); -+ goto error; -+ } -+ mutex_unlock(&fh->mutex); -+ -+ kcmd->pg_user = kpgbuf->kaddr; -+ kcmd->kpg = __get_pg_buf(psys, kpgbuf->len); -+ if (!kcmd->kpg) -+ goto error; -+ -+ memcpy(kcmd->kpg->pg, kcmd->pg_user, kcmd->kpg->pg_size); -+ -+ kcmd->pg_manifest = kzalloc(cmd->pg_manifest_size, GFP_KERNEL); -+ if (!kcmd->pg_manifest) -+ goto error; -+ -+ ret = copy_from_user(kcmd->pg_manifest, cmd->pg_manifest, -+ cmd->pg_manifest_size); -+ if (ret) -+ goto error; -+ -+ kcmd->pg_manifest_size = cmd->pg_manifest_size; -+ -+ kcmd->user_token = cmd->user_token; -+ kcmd->issue_id = cmd->issue_id; -+ kcmd->priority = cmd->priority; -+ if (kcmd->priority >= IPU_PSYS_CMD_PRIORITY_NUM) -+ goto error; -+ -+ /* -+ * Kernel enable bitmap be used only. -+ */ -+ memcpy(kcmd->kernel_enable_bitmap, cmd->kernel_enable_bitmap, -+ sizeof(cmd->kernel_enable_bitmap)); -+ -+ kcmd->nbuffers = ipu_fw_psys_pg_get_terminal_count(kcmd); -+ kcmd->buffers = kcalloc(kcmd->nbuffers, sizeof(*kcmd->buffers), -+ GFP_KERNEL); -+ if (!kcmd->buffers) -+ goto error; -+ -+ kcmd->kbufs = kcalloc(kcmd->nbuffers, sizeof(kcmd->kbufs[0]), -+ GFP_KERNEL); -+ if (!kcmd->kbufs) -+ goto error; -+ -+ /* should be stop cmd for ppg */ -+ if (!cmd->buffers) { -+ kcmd->state = KCMD_STATE_PPG_STOP; -+ return kcmd; -+ } -+ -+ if (!cmd->bufcount || kcmd->nbuffers > cmd->bufcount) -+ goto error; -+ -+ ret = copy_from_user(kcmd->buffers, cmd->buffers, -+ kcmd->nbuffers * sizeof(*kcmd->buffers)); -+ if (ret) -+ goto error; -+ -+ for (i = 0; i < kcmd->nbuffers; i++) { -+ struct ipu_fw_psys_terminal *terminal; -+ -+ terminal = ipu_fw_psys_pg_get_terminal(kcmd, i); -+ if (!terminal) -+ continue; -+ -+ if (!(kcmd->buffers[i].flags & IPU_BUFFER_FLAG_DMA_HANDLE)) { -+ kcmd->state = KCMD_STATE_PPG_START; -+ continue; -+ } -+ if (kcmd->state == KCMD_STATE_PPG_START) { -+ dev_err(dev, "buffer.flags & DMA_HANDLE must be 0\n"); -+ goto error; -+ } -+ -+ mutex_lock(&fh->mutex); -+ fd = kcmd->buffers[i].base.fd; -+ kpgbuf = ipu_psys_lookup_kbuffer(fh, fd); -+ if (!kpgbuf || !kpgbuf->sgt) { -+ dev_err(dev, -+ "%s kcmd->buffers[%d] %p fd %d not found.\n", -+ __func__, i, kpgbuf, fd); -+ mutex_unlock(&fh->mutex); -+ goto error; -+ } -+ -+ kpgbuf = ipu_psys_mapbuf_locked(fd, fh); -+ if (!kpgbuf || !kpgbuf->sgt) { -+ dev_err(dev, "%s remap failed\n", -+ __func__); -+ mutex_unlock(&fh->mutex); -+ goto error; -+ } -+ -+ mutex_unlock(&fh->mutex); -+ kcmd->kbufs[i] = kpgbuf; -+ if (!kcmd->kbufs[i] || !kcmd->kbufs[i]->sgt || -+ kcmd->kbufs[i]->len < kcmd->buffers[i].bytes_used) -+ goto error; -+ -+ if ((kcmd->kbufs[i]->flags & IPU_BUFFER_FLAG_NO_FLUSH) || -+ (kcmd->buffers[i].flags & IPU_BUFFER_FLAG_NO_FLUSH) || -+ prevfd == kcmd->buffers[i].base.fd) -+ continue; -+ -+ prevfd = kcmd->buffers[i].base.fd; -+ -+ /* -+ * TODO: remove exported buffer sync here as the cache -+ * coherency should be done by the exporter -+ */ -+ if (kcmd->kbufs[i]->kaddr) -+ clflush_cache_range(kcmd->kbufs[i]->kaddr, -+ kcmd->kbufs[i]->len); -+ } -+ -+ if (kcmd->state != KCMD_STATE_PPG_START) -+ kcmd->state = KCMD_STATE_PPG_ENQUEUE; -+ -+ return kcmd; -+error: -+ ipu_psys_kcmd_free(kcmd); -+ -+ dev_dbg(dev, "failed to copy cmd\n"); -+ -+ return NULL; -+} -+ -+static struct ipu_psys_buffer_set * -+ipu_psys_lookup_kbuffer_set(struct ipu_psys *psys, u32 addr) -+{ -+ struct ipu_psys_fh *fh; -+ struct ipu_psys_buffer_set *kbuf_set; -+ struct ipu_psys_scheduler *sched; -+ -+ list_for_each_entry(fh, &psys->fhs, list) { -+ sched = &fh->sched; -+ mutex_lock(&sched->bs_mutex); -+ list_for_each_entry(kbuf_set, &sched->buf_sets, list) { -+ if (kbuf_set->buf_set && -+ kbuf_set->buf_set->ipu_virtual_address == addr) { -+ mutex_unlock(&sched->bs_mutex); -+ return kbuf_set; -+ } -+ } -+ mutex_unlock(&sched->bs_mutex); -+ } -+ -+ return NULL; -+} -+ -+static struct ipu_psys_ppg *ipu_psys_lookup_ppg(struct ipu_psys *psys, -+ dma_addr_t pg_addr) -+{ -+ struct ipu_psys_scheduler *sched; -+ struct ipu_psys_ppg *kppg, *tmp; -+ struct ipu_psys_fh *fh; -+ -+ list_for_each_entry(fh, &psys->fhs, list) { -+ sched = &fh->sched; -+ mutex_lock(&fh->mutex); -+ if (list_empty(&sched->ppgs)) { -+ mutex_unlock(&fh->mutex); -+ continue; -+ } -+ -+ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { -+ if (pg_addr != kppg->kpg->pg_dma_addr) -+ continue; -+ mutex_unlock(&fh->mutex); -+ return kppg; -+ } -+ mutex_unlock(&fh->mutex); -+ } -+ -+ return NULL; -+} -+ -+#define BUTTRESS_FREQ_CTL_QOS_FLOOR_SHIFT 8 -+static void ipu_buttress_set_psys_ratio(struct ipu6_device *isp, -+ unsigned int psys_divisor, -+ unsigned int psys_qos_floor) -+{ -+ struct ipu6_buttress_ctrl *ctrl = isp->psys->ctrl; -+ -+ mutex_lock(&isp->buttress.power_mutex); -+ -+ if (ctrl->ratio == psys_divisor && ctrl->qos_floor == psys_qos_floor) -+ goto out_mutex_unlock; -+ -+ ctrl->ratio = psys_divisor; -+ ctrl->qos_floor = psys_qos_floor; -+ -+ if (ctrl->started) { -+ /* -+ * According to documentation driver initiates DVFS -+ * transition by writing wanted ratio, floor ratio and start -+ * bit. No need to stop PS first -+ */ -+ writel(BUTTRESS_FREQ_CTL_START | -+ ctrl->qos_floor << BUTTRESS_FREQ_CTL_QOS_FLOOR_SHIFT | -+ psys_divisor, isp->base + BUTTRESS_REG_PS_FREQ_CTL); -+ } -+ -+out_mutex_unlock: -+ mutex_unlock(&isp->buttress.power_mutex); -+} -+ -+static void ipu_buttress_set_psys_freq(struct ipu6_device *isp, -+ unsigned int freq) -+{ -+ unsigned int psys_ratio = freq / BUTTRESS_PS_FREQ_STEP; -+ -+ dev_dbg(&isp->psys->auxdev.dev, "freq:%u\n", freq); -+ -+ ipu_buttress_set_psys_ratio(isp, psys_ratio, psys_ratio); -+} -+ -+static void -+ipu_buttress_add_psys_constraint(struct ipu6_device *isp, -+ struct ipu6_psys_constraint *constraint) -+{ -+ struct ipu6_buttress *b = &isp->buttress; -+ -+ mutex_lock(&b->cons_mutex); -+ list_add(&constraint->list, &b->constraints); -+ mutex_unlock(&b->cons_mutex); -+} -+ -+static void -+ipu_buttress_remove_psys_constraint(struct ipu6_device *isp, -+ struct ipu6_psys_constraint *constraint) -+{ -+ struct ipu6_buttress *b = &isp->buttress; -+ struct ipu6_psys_constraint *c; -+ unsigned int min_freq = 0; -+ -+ mutex_lock(&b->cons_mutex); -+ list_del(&constraint->list); -+ -+ list_for_each_entry(c, &b->constraints, list) -+ if (c->min_freq > min_freq) -+ min_freq = c->min_freq; -+ -+ ipu_buttress_set_psys_freq(isp, min_freq); -+ mutex_unlock(&b->cons_mutex); -+} -+ -+/* -+ * Move kcmd into completed state (due to running finished or failure). -+ * Fill up the event struct and notify waiters. -+ */ -+void ipu_psys_kcmd_complete(struct ipu_psys_ppg *kppg, -+ struct ipu_psys_kcmd *kcmd, int error) -+{ -+ struct ipu_psys_fh *fh = kcmd->fh; -+ struct ipu_psys *psys = fh->psys; -+ struct device *dev = &psys->adev->auxdev.dev; -+ -+ kcmd->ev.type = IPU_PSYS_EVENT_TYPE_CMD_COMPLETE; -+ kcmd->ev.user_token = kcmd->user_token; -+ kcmd->ev.issue_id = kcmd->issue_id; -+ kcmd->ev.error = error; -+ list_move_tail(&kcmd->list, &kppg->kcmds_finished_list); -+ -+ if (kcmd->constraint.min_freq) -+ ipu_buttress_remove_psys_constraint(psys->adev->isp, -+ &kcmd->constraint); -+ -+ if (!early_pg_transfer && kcmd->pg_user && kcmd->kpg->pg) { -+ struct ipu_psys_kbuffer *kbuf; -+ -+ kbuf = ipu_psys_lookup_kbuffer_by_kaddr(kcmd->fh, -+ kcmd->pg_user); -+ if (kbuf && kbuf->valid) -+ memcpy(kcmd->pg_user, -+ kcmd->kpg->pg, kcmd->kpg->pg_size); -+ else -+ dev_dbg(dev, "Skipping unmapped buffer\n"); -+ } -+ -+ kcmd->state = KCMD_STATE_PPG_COMPLETE; -+ wake_up_interruptible(&fh->wait); -+} -+ -+/* -+ * Submit kcmd into psys queue. If running fails, complete the kcmd -+ * with an error. -+ * -+ * Found a runnable PG. Move queue to the list tail for round-robin -+ * scheduling and run the PG. Start the watchdog timer if the PG was -+ * started successfully. Enable PSYS power if requested. -+ */ -+int ipu_psys_kcmd_start(struct ipu_psys *psys, struct ipu_psys_kcmd *kcmd) -+{ -+ struct device *dev = &psys->adev->auxdev.dev; -+ int ret; -+ -+ if (early_pg_transfer && kcmd->pg_user && kcmd->kpg->pg) -+ memcpy(kcmd->pg_user, kcmd->kpg->pg, kcmd->kpg->pg_size); -+ -+ ret = ipu_fw_psys_pg_start(kcmd); -+ if (ret) { -+ dev_err(dev, "failed to start kcmd!\n"); -+ return ret; -+ } -+ -+ ipu_fw_psys_pg_dump(psys, kcmd, "run"); -+ -+ ret = ipu_fw_psys_pg_disown(kcmd); -+ if (ret) { -+ dev_err(dev, "failed to start kcmd!\n"); -+ return ret; -+ } -+ -+ return 0; -+} -+ -+static int ipu_psys_kcmd_send_to_ppg_start(struct ipu_psys_kcmd *kcmd) -+{ -+ struct ipu_psys_fh *fh = kcmd->fh; -+ struct ipu_psys_scheduler *sched = &fh->sched; -+ struct ipu_psys *psys = fh->psys; -+ struct device *dev = &psys->adev->auxdev.dev; -+ struct ipu_psys_ppg *kppg; -+ struct ipu_psys_resource_pool *rpr; -+ int queue_id; -+ int ret; -+ -+ rpr = &psys->res_pool_running; -+ -+ kppg = kzalloc(sizeof(*kppg), GFP_KERNEL); -+ if (!kppg) -+ return -ENOMEM; -+ -+ kppg->fh = fh; -+ kppg->kpg = kcmd->kpg; -+ kppg->state = PPG_STATE_START; -+ kppg->pri_base = kcmd->priority; -+ kppg->pri_dynamic = 0; -+ INIT_LIST_HEAD(&kppg->list); -+ -+ mutex_init(&kppg->mutex); -+ INIT_LIST_HEAD(&kppg->kcmds_new_list); -+ INIT_LIST_HEAD(&kppg->kcmds_processing_list); -+ INIT_LIST_HEAD(&kppg->kcmds_finished_list); -+ INIT_LIST_HEAD(&kppg->sched_list); -+ -+ kppg->manifest = kzalloc(kcmd->pg_manifest_size, GFP_KERNEL); -+ if (!kppg->manifest) { -+ kfree(kppg); -+ return -ENOMEM; -+ } -+ memcpy(kppg->manifest, kcmd->pg_manifest, -+ kcmd->pg_manifest_size); -+ -+ queue_id = ipu_psys_allocate_cmd_queue_res(rpr); -+ if (queue_id == -ENOSPC) { -+ dev_err(dev, "no available queue\n"); -+ kfree(kppg->manifest); -+ kfree(kppg); -+ mutex_unlock(&psys->mutex); -+ return -ENOMEM; -+ } -+ -+ /* -+ * set token as start cmd will immediately be followed by a -+ * enqueue cmd so that kppg could be retrieved. -+ */ -+ kppg->token = (u64)kcmd->kpg; -+ ipu_fw_psys_pg_set_token(kcmd, kppg->token); -+ ipu_fw_psys_ppg_set_base_queue_id(kcmd, queue_id); -+ ret = ipu_fw_psys_pg_set_ipu_vaddress(kcmd, -+ kcmd->kpg->pg_dma_addr); -+ if (ret) { -+ ipu_psys_free_cmd_queue_res(rpr, queue_id); -+ -+ kfree(kppg->manifest); -+ kfree(kppg); -+ return -EIO; -+ } -+ memcpy(kcmd->pg_user, kcmd->kpg->pg, kcmd->kpg->pg_size); -+ -+ mutex_lock(&fh->mutex); -+ list_add_tail(&kppg->list, &sched->ppgs); -+ mutex_unlock(&fh->mutex); -+ -+ mutex_lock(&kppg->mutex); -+ list_add(&kcmd->list, &kppg->kcmds_new_list); -+ mutex_unlock(&kppg->mutex); -+ -+ dev_dbg(dev, "START ppg(%d, 0x%p) kcmd 0x%p, queue %d\n", -+ ipu_fw_psys_pg_get_id(kcmd), kppg, kcmd, queue_id); -+ -+ /* Kick l-scheduler thread */ -+ atomic_set(&psys->wakeup_count, 1); -+ wake_up_interruptible(&psys->sched_cmd_wq); -+ -+ return 0; -+} -+ -+static int ipu_psys_kcmd_send_to_ppg(struct ipu_psys_kcmd *kcmd) -+{ -+ struct ipu_psys_fh *fh = kcmd->fh; -+ struct ipu_psys *psys = fh->psys; -+ struct device *dev = &psys->adev->auxdev.dev; -+ struct ipu_psys_ppg *kppg; -+ struct ipu_psys_resource_pool *rpr; -+ unsigned long flags; -+ u8 id; -+ bool resche = true; -+ -+ rpr = &psys->res_pool_running; -+ if (kcmd->state == KCMD_STATE_PPG_START) -+ return ipu_psys_kcmd_send_to_ppg_start(kcmd); -+ -+ kppg = ipu_psys_identify_kppg(kcmd); -+ spin_lock_irqsave(&psys->pgs_lock, flags); -+ kcmd->kpg->pg_size = 0; -+ spin_unlock_irqrestore(&psys->pgs_lock, flags); -+ if (!kppg) { -+ dev_err(dev, "token not match\n"); -+ return -EINVAL; -+ } -+ -+ kcmd->kpg = kppg->kpg; -+ -+ dev_dbg(dev, "%s ppg(%d, 0x%p) kcmd %p\n", -+ (kcmd->state == KCMD_STATE_PPG_STOP) ? "STOP" : "ENQUEUE", -+ ipu_fw_psys_pg_get_id(kcmd), kppg, kcmd); -+ -+ if (kcmd->state == KCMD_STATE_PPG_STOP) { -+ mutex_lock(&kppg->mutex); -+ if (kppg->state == PPG_STATE_STOPPED) { -+ dev_dbg(dev, "kppg 0x%p stopped!\n", kppg); -+ id = ipu_fw_psys_ppg_get_base_queue_id(kcmd); -+ ipu_psys_free_cmd_queue_res(rpr, id); -+ ipu_psys_kcmd_complete(kppg, kcmd, 0); -+ pm_runtime_put(dev); -+ resche = false; -+ } else { -+ list_add(&kcmd->list, &kppg->kcmds_new_list); -+ } -+ mutex_unlock(&kppg->mutex); -+ } else { -+ int ret; -+ -+ ret = ipu_psys_ppg_get_bufset(kcmd, kppg); -+ if (ret) -+ return ret; -+ -+ mutex_lock(&kppg->mutex); -+ list_add_tail(&kcmd->list, &kppg->kcmds_new_list); -+ mutex_unlock(&kppg->mutex); -+ } -+ -+ if (resche) { -+ /* Kick l-scheduler thread */ -+ atomic_set(&psys->wakeup_count, 1); -+ wake_up_interruptible(&psys->sched_cmd_wq); -+ } -+ return 0; -+} -+ -+int ipu_psys_kcmd_new(struct ipu_psys_command *cmd, struct ipu_psys_fh *fh) -+{ -+ struct ipu_psys *psys = fh->psys; -+ struct device *dev = &psys->adev->auxdev.dev; -+ struct ipu_psys_kcmd *kcmd; -+ size_t pg_size; -+ int ret; -+ -+ kcmd = ipu_psys_copy_cmd(cmd, fh); -+ if (!kcmd) -+ return -EINVAL; -+ -+ pg_size = ipu_fw_psys_pg_get_size(kcmd); -+ if (pg_size > kcmd->kpg->pg_size) { -+ dev_dbg(dev, "pg size mismatch %lu %lu\n", pg_size, -+ kcmd->kpg->pg_size); -+ ret = -EINVAL; -+ goto error; -+ } -+ -+ if (ipu_fw_psys_pg_get_protocol(kcmd) != -+ IPU_FW_PSYS_PROCESS_GROUP_PROTOCOL_PPG) { -+ dev_err(dev, "No support legacy pg now\n"); -+ ret = -EINVAL; -+ goto error; -+ } -+ -+ if (cmd->min_psys_freq) { -+ kcmd->constraint.min_freq = cmd->min_psys_freq; -+ ipu_buttress_add_psys_constraint(psys->adev->isp, -+ &kcmd->constraint); -+ } -+ -+ ret = ipu_psys_kcmd_send_to_ppg(kcmd); -+ if (ret) -+ goto error; -+ -+ dev_dbg(dev, "IOC_QCMD: user_token:%llx issue_id:0x%llx pri:%d\n", -+ cmd->user_token, cmd->issue_id, cmd->priority); -+ -+ return 0; -+ -+error: -+ ipu_psys_kcmd_free(kcmd); -+ -+ return ret; -+} -+ -+static bool ipu_psys_kcmd_is_valid(struct ipu_psys *psys, -+ struct ipu_psys_kcmd *kcmd) -+{ -+ struct ipu_psys_fh *fh; -+ struct ipu_psys_kcmd *kcmd0; -+ struct ipu_psys_ppg *kppg, *tmp; -+ struct ipu_psys_scheduler *sched; -+ -+ list_for_each_entry(fh, &psys->fhs, list) { -+ sched = &fh->sched; -+ mutex_lock(&fh->mutex); -+ if (list_empty(&sched->ppgs)) { -+ mutex_unlock(&fh->mutex); -+ continue; -+ } -+ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { -+ mutex_lock(&kppg->mutex); -+ list_for_each_entry(kcmd0, -+ &kppg->kcmds_processing_list, -+ list) { -+ if (kcmd0 == kcmd) { -+ mutex_unlock(&kppg->mutex); -+ mutex_unlock(&fh->mutex); -+ return true; -+ } -+ } -+ mutex_unlock(&kppg->mutex); -+ } -+ mutex_unlock(&fh->mutex); -+ } -+ -+ return false; -+} -+ -+void ipu_psys_handle_events(struct ipu_psys *psys) -+{ -+ struct device *dev = &psys->adev->auxdev.dev; -+ struct ipu_psys_kcmd *kcmd; -+ struct ipu_fw_psys_event event; -+ struct ipu_psys_ppg *kppg; -+ bool error; -+ u32 hdl; -+ u16 cmd, status; -+ int res; -+ -+ do { -+ memset(&event, 0, sizeof(event)); -+ if (!ipu_fw_psys_rcv_event(psys, &event)) -+ break; -+ -+ if (!event.context_handle) -+ break; -+ -+ dev_dbg(dev, "ppg event: 0x%x, %d, status %d\n", -+ event.context_handle, event.command, event.status); -+ -+ error = false; -+ /* -+ * event.command == CMD_RUN shows this is fw processing frame -+ * done as pPG mode, and event.context_handle should be pointer -+ * of buffer set; so we make use of this pointer to lookup -+ * kbuffer_set and kcmd -+ */ -+ hdl = event.context_handle; -+ cmd = event.command; -+ status = event.status; -+ -+ kppg = NULL; -+ kcmd = NULL; -+ if (cmd == IPU_FW_PSYS_PROCESS_GROUP_CMD_RUN) { -+ struct ipu_psys_buffer_set *kbuf_set; -+ /* -+ * Need change ppg state when the 1st running is done -+ * (after PPG started/resumed) -+ */ -+ kbuf_set = ipu_psys_lookup_kbuffer_set(psys, hdl); -+ if (kbuf_set) -+ kcmd = kbuf_set->kcmd; -+ if (!kbuf_set || !kcmd) -+ error = true; -+ else -+ kppg = ipu_psys_identify_kppg(kcmd); -+ } else if (cmd == IPU_FW_PSYS_PROCESS_GROUP_CMD_STOP || -+ cmd == IPU_FW_PSYS_PROCESS_GROUP_CMD_SUSPEND || -+ cmd == IPU_FW_PSYS_PROCESS_GROUP_CMD_RESUME) { -+ /* -+ * STOP/SUSPEND/RESUME cmd event would run this branch; -+ * only stop cmd queued by user has stop_kcmd and need -+ * to notify user to dequeue. -+ */ -+ kppg = ipu_psys_lookup_ppg(psys, hdl); -+ if (kppg) { -+ mutex_lock(&kppg->mutex); -+ if (kppg->state == PPG_STATE_STOPPING) { -+ kcmd = ipu_psys_ppg_get_stop_kcmd(kppg); -+ if (!kcmd) -+ error = true; -+ } -+ mutex_unlock(&kppg->mutex); -+ } -+ } else { -+ dev_err(dev, "invalid event\n"); -+ continue; -+ } -+ -+ if (error || !kppg) { -+ dev_err(dev, "event error, command %d\n", cmd); -+ break; -+ } -+ -+ dev_dbg(dev, "event to kppg 0x%p, kcmd 0x%p\n", kppg, kcmd); -+ -+ ipu_psys_ppg_complete(psys, kppg); -+ -+ if (kcmd && ipu_psys_kcmd_is_valid(psys, kcmd)) { -+ res = (status == IPU_PSYS_EVENT_CMD_COMPLETE || -+ status == IPU_PSYS_EVENT_FRAGMENT_COMPLETE) ? -+ 0 : -EIO; -+ mutex_lock(&kppg->mutex); -+ ipu_psys_kcmd_complete(kppg, kcmd, res); -+ mutex_unlock(&kppg->mutex); -+ } -+ } while (1); -+} -+ -+int ipu_psys_fh_init(struct ipu_psys_fh *fh) -+{ -+ struct ipu_psys *psys = fh->psys; -+ struct ipu_psys_buffer_set *kbuf_set, *kbuf_set_tmp; -+ struct ipu_psys_scheduler *sched = &fh->sched; -+ int i; -+ -+ mutex_init(&sched->bs_mutex); -+ INIT_LIST_HEAD(&sched->buf_sets); -+ INIT_LIST_HEAD(&sched->ppgs); -+ -+ /* allocate and map memory for buf_sets */ -+ for (i = 0; i < IPU_PSYS_BUF_SET_POOL_SIZE; i++) { -+ kbuf_set = kzalloc(sizeof(*kbuf_set), GFP_KERNEL); -+ if (!kbuf_set) -+ goto out_free_buf_sets; -+ kbuf_set->kaddr = ipu6_dma_alloc(psys->adev, -+ IPU_PSYS_BUF_SET_MAX_SIZE, -+ &kbuf_set->dma_addr, -+ GFP_KERNEL, 0); -+ if (!kbuf_set->kaddr) { -+ kfree(kbuf_set); -+ goto out_free_buf_sets; -+ } -+ kbuf_set->size = IPU_PSYS_BUF_SET_MAX_SIZE; -+ list_add(&kbuf_set->list, &sched->buf_sets); -+ } -+ -+ return 0; -+ -+out_free_buf_sets: -+ list_for_each_entry_safe(kbuf_set, kbuf_set_tmp, -+ &sched->buf_sets, list) { -+ ipu6_dma_free(psys->adev, kbuf_set->size, kbuf_set->kaddr, -+ kbuf_set->dma_addr, 0); -+ list_del(&kbuf_set->list); -+ kfree(kbuf_set); -+ } -+ mutex_destroy(&sched->bs_mutex); -+ -+ return -ENOMEM; -+} -+ -+int ipu_psys_fh_deinit(struct ipu_psys_fh *fh) -+{ -+ struct ipu_psys *psys = fh->psys; -+ struct device *dev = &psys->adev->auxdev.dev; -+ struct ipu_psys_ppg *kppg, *kppg0; -+ struct ipu_psys_kcmd *kcmd, *kcmd0; -+ struct ipu_psys_buffer_set *kbuf_set, *kbuf_set0; -+ struct ipu_psys_scheduler *sched = &fh->sched; -+ struct ipu_psys_resource_pool *rpr; -+ struct ipu_psys_resource_alloc *alloc; -+ u8 id; -+ -+ mutex_lock(&fh->mutex); -+ if (!list_empty(&sched->ppgs)) { -+ list_for_each_entry_safe(kppg, kppg0, &sched->ppgs, list) { -+ unsigned long flags; -+ -+ mutex_lock(&kppg->mutex); -+ if (!(kppg->state & -+ (PPG_STATE_STOPPED | -+ PPG_STATE_STOPPING))) { -+ struct ipu_psys_kcmd tmp = { -+ .kpg = kppg->kpg, -+ }; -+ -+ rpr = &psys->res_pool_running; -+ alloc = &kppg->kpg->resource_alloc; -+ id = ipu_fw_psys_ppg_get_base_queue_id(&tmp); -+ ipu_psys_ppg_stop(kppg); -+ ipu_psys_free_resources(alloc, rpr); -+ ipu_psys_free_cmd_queue_res(rpr, id); -+ dev_dbg(dev, -+ "s_change:%s %p %d -> %d\n", -+ __func__, kppg, kppg->state, -+ PPG_STATE_STOPPED); -+ kppg->state = PPG_STATE_STOPPED; -+ if (psys->power_gating != PSYS_POWER_GATED) -+ pm_runtime_put(dev); -+ } -+ list_del(&kppg->list); -+ mutex_unlock(&kppg->mutex); -+ -+ list_for_each_entry_safe(kcmd, kcmd0, -+ &kppg->kcmds_new_list, list) { -+ kcmd->pg_user = NULL; -+ mutex_unlock(&fh->mutex); -+ ipu_psys_kcmd_free(kcmd); -+ mutex_lock(&fh->mutex); -+ } -+ -+ list_for_each_entry_safe(kcmd, kcmd0, -+ &kppg->kcmds_processing_list, -+ list) { -+ kcmd->pg_user = NULL; -+ mutex_unlock(&fh->mutex); -+ ipu_psys_kcmd_free(kcmd); -+ mutex_lock(&fh->mutex); -+ } -+ -+ list_for_each_entry_safe(kcmd, kcmd0, -+ &kppg->kcmds_finished_list, -+ list) { -+ kcmd->pg_user = NULL; -+ mutex_unlock(&fh->mutex); -+ ipu_psys_kcmd_free(kcmd); -+ mutex_lock(&fh->mutex); -+ } -+ -+ spin_lock_irqsave(&psys->pgs_lock, flags); -+ kppg->kpg->pg_size = 0; -+ spin_unlock_irqrestore(&psys->pgs_lock, flags); -+ -+ mutex_destroy(&kppg->mutex); -+ kfree(kppg->manifest); -+ kfree(kppg); -+ } -+ } -+ mutex_unlock(&fh->mutex); -+ -+ mutex_lock(&sched->bs_mutex); -+ list_for_each_entry_safe(kbuf_set, kbuf_set0, &sched->buf_sets, list) { -+ ipu6_dma_free(psys->adev, kbuf_set->size, kbuf_set->kaddr, -+ kbuf_set->dma_addr, 0); -+ list_del(&kbuf_set->list); -+ kfree(kbuf_set); -+ } -+ mutex_unlock(&sched->bs_mutex); -+ mutex_destroy(&sched->bs_mutex); -+ -+ return 0; -+} -+ -+struct ipu_psys_kcmd *ipu_get_completed_kcmd(struct ipu_psys_fh *fh) -+{ -+ struct ipu_psys_scheduler *sched = &fh->sched; -+ struct device *dev = &fh->psys->adev->auxdev.dev; -+ struct ipu_psys_kcmd *kcmd; -+ struct ipu_psys_ppg *kppg; -+ -+ mutex_lock(&fh->mutex); -+ if (list_empty(&sched->ppgs)) { -+ mutex_unlock(&fh->mutex); -+ return NULL; -+ } -+ -+ list_for_each_entry(kppg, &sched->ppgs, list) { -+ mutex_lock(&kppg->mutex); -+ if (list_empty(&kppg->kcmds_finished_list)) { -+ mutex_unlock(&kppg->mutex); -+ continue; -+ } -+ -+ kcmd = list_first_entry(&kppg->kcmds_finished_list, -+ struct ipu_psys_kcmd, list); -+ mutex_unlock(&fh->mutex); -+ mutex_unlock(&kppg->mutex); -+ dev_dbg(dev, "get completed kcmd 0x%p\n", kcmd); -+ return kcmd; -+ } -+ mutex_unlock(&fh->mutex); -+ -+ return NULL; -+} -+ -+long ipu_ioctl_dqevent(struct ipu_psys_event *event, -+ struct ipu_psys_fh *fh, unsigned int f_flags) -+{ -+ struct ipu_psys *psys = fh->psys; -+ struct device *dev = &psys->adev->auxdev.dev; -+ struct ipu_psys_kcmd *kcmd = NULL; -+ int rval; -+ -+ dev_dbg(dev, "IOC_DQEVENT\n"); -+ -+ if (!(f_flags & O_NONBLOCK)) { -+ rval = wait_event_interruptible(fh->wait, -+ (kcmd = -+ ipu_get_completed_kcmd(fh))); -+ if (rval == -ERESTARTSYS) -+ return rval; -+ } -+ -+ if (!kcmd) { -+ kcmd = ipu_get_completed_kcmd(fh); -+ if (!kcmd) -+ return -ENODATA; -+ } -+ -+ *event = kcmd->ev; -+ ipu_psys_kcmd_free(kcmd); -+ -+ return 0; -+} -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6ep-fw-resources.c b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6ep-fw-resources.c -new file mode 100644 -index 0000000..4a1a869 ---- /dev/null -+++ b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6ep-fw-resources.c -@@ -0,0 +1,393 @@ -+// SPDX-License-Identifier: GPL-2.0 -+// Copyright (C) 2020 - 2024 Intel Corporation -+ -+#include -+#include -+ -+#include "ipu-psys.h" -+#include "ipu-fw-psys.h" -+#include "ipu6-platform-resources.h" -+#include "ipu6ep-platform-resources.h" -+ -+/* resources table */ -+ -+/* -+ * Cell types by cell IDs -+ */ -+static const u8 ipu6ep_fw_psys_cell_types[IPU6EP_FW_PSYS_N_CELL_ID] = { -+ IPU6_FW_PSYS_SP_CTRL_TYPE_ID, -+ IPU6_FW_PSYS_VP_TYPE_ID, -+ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_OSA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_OSA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_OSA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, /* AF */ -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, /* X2B_MD */ -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, /* X2B_SVE_RGBIR */ -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, /* PAF */ -+ IPU6_FW_PSYS_GDC_TYPE_ID, -+ IPU6_FW_PSYS_TNR_TYPE_ID, -+}; -+ -+static const u16 ipu6ep_fw_num_dev_channels[IPU6_FW_PSYS_N_DEV_CHN_ID] = { -+ IPU6_FW_PSYS_DEV_CHN_DMA_EXT0_MAX_SIZE, -+ IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_READ_MAX_SIZE, -+ IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_MAX_SIZE, -+ IPU6_FW_PSYS_DEV_CHN_DMA_INTERNAL_MAX_SIZE, -+ IPU6_FW_PSYS_DEV_CHN_DMA_ISA_MAX_SIZE, -+}; -+ -+static const u16 ipu6ep_fw_psys_mem_size[IPU6_FW_PSYS_N_MEM_ID] = { -+ IPU6_FW_PSYS_VMEM0_MAX_SIZE, -+ IPU6_FW_PSYS_TRANSFER_VMEM0_MAX_SIZE, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_MAX_SIZE, -+ IPU6_FW_PSYS_LB_VMEM_MAX_SIZE, -+ IPU6_FW_PSYS_BAMEM0_MAX_SIZE, -+ IPU6_FW_PSYS_DMEM0_MAX_SIZE, -+ IPU6_FW_PSYS_DMEM1_MAX_SIZE, -+ IPU6_FW_PSYS_DMEM2_MAX_SIZE, -+ IPU6_FW_PSYS_DMEM3_MAX_SIZE, -+ IPU6_FW_PSYS_PMEM0_MAX_SIZE -+}; -+ -+static const u16 ipu6ep_fw_psys_dfms[IPU6_FW_PSYS_N_DEV_DFM_ID] = { -+ IPU6_FW_PSYS_DEV_DFM_BB_FULL_PORT_ID_MAX_SIZE, -+ IPU6_FW_PSYS_DEV_DFM_BB_EMPTY_PORT_ID_MAX_SIZE, -+ IPU6_FW_PSYS_DEV_DFM_ISL_FULL_PORT_ID_MAX_SIZE, -+ IPU6_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID_MAX_SIZE, -+ IPU6_FW_PSYS_DEV_DFM_LB_FULL_PORT_ID_MAX_SIZE, -+ IPU6_FW_PSYS_DEV_DFM_LB_EMPTY_PORT_ID_MAX_SIZE, -+}; -+ -+static const u8 -+ipu6ep_fw_psys_c_mem[IPU6EP_FW_PSYS_N_CELL_ID][IPU6_FW_PSYS_N_MEM_TYPE_ID] = { -+ { -+ /* IPU6_FW_PSYS_SP0_ID */ -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_DMEM0_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_SP1_ID */ -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_DMEM1_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_VP0_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_DMEM3_ID, -+ IPU6_FW_PSYS_VMEM0_ID, -+ IPU6_FW_PSYS_BAMEM0_ID, -+ IPU6_FW_PSYS_PMEM0_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ACC1_ID BNLM */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ACC2_ID DM */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ACC3_ID ACM */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ACC4_ID GTC YUV1 */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ACC5_ID OFS pin main */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ACC6_ID OFS pin display */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ACC7_ID OFS pin pp */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ACC8_ID GAMMASTAR */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ACC9_ID GLTM */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ACC10_ID XNR */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_ICA_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_LSC_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_DPC_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_SIS_A_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_SIS_B_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_B2B_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_B2R_ID and ISA_R2I_SIE */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_R2I_DS_A_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_AWB_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_AE_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_AF_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_X2B_MD_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_X2B_SVE_RGBIR_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_PAF_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_BB_ACC_GDC0_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_BB_ACC_TNR_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ } -+}; -+ -+static const struct ipu_fw_resource_definitions ipu6ep_defs = { -+ .cells = ipu6ep_fw_psys_cell_types, -+ .num_cells = IPU6EP_FW_PSYS_N_CELL_ID, -+ .num_cells_type = IPU6_FW_PSYS_N_CELL_TYPE_ID, -+ -+ .dev_channels = ipu6ep_fw_num_dev_channels, -+ .num_dev_channels = IPU6_FW_PSYS_N_DEV_CHN_ID, -+ -+ .num_ext_mem_types = IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID, -+ .num_ext_mem_ids = IPU6_FW_PSYS_N_MEM_ID, -+ .ext_mem_ids = ipu6ep_fw_psys_mem_size, -+ -+ .num_dfm_ids = IPU6_FW_PSYS_N_DEV_DFM_ID, -+ -+ .dfms = ipu6ep_fw_psys_dfms, -+ -+ .cell_mem_row = IPU6_FW_PSYS_N_MEM_TYPE_ID, -+ .cell_mem = &ipu6ep_fw_psys_c_mem[0][0], -+}; -+ -+const struct ipu_fw_resource_definitions *ipu6ep_res_defs = &ipu6ep_defs; -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6ep-platform-resources.h b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6ep-platform-resources.h -new file mode 100644 -index 0000000..8420149 ---- /dev/null -+++ b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6ep-platform-resources.h -@@ -0,0 +1,42 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* Copyright (C) 2020 - 2024 Intel Corporation */ -+ -+#ifndef IPU6EP_PLATFORM_RESOURCES_H -+#define IPU6EP_PLATFORM_RESOURCES_H -+ -+#include -+#include -+ -+enum { -+ IPU6EP_FW_PSYS_SP0_ID = 0, -+ IPU6EP_FW_PSYS_VP0_ID, -+ IPU6EP_FW_PSYS_PSA_ACC_BNLM_ID, -+ IPU6EP_FW_PSYS_PSA_ACC_DM_ID, -+ IPU6EP_FW_PSYS_PSA_ACC_ACM_ID, -+ IPU6EP_FW_PSYS_PSA_ACC_GTC_YUV1_ID, -+ IPU6EP_FW_PSYS_BB_ACC_OFS_PIN_MAIN_ID, -+ IPU6EP_FW_PSYS_BB_ACC_OFS_PIN_DISPLAY_ID, -+ IPU6EP_FW_PSYS_BB_ACC_OFS_PIN_PP_ID, -+ IPU6EP_FW_PSYS_PSA_ACC_GAMMASTAR_ID, -+ IPU6EP_FW_PSYS_PSA_ACC_GLTM_ID, -+ IPU6EP_FW_PSYS_PSA_ACC_XNR_ID, -+ IPU6EP_FW_PSYS_PSA_VCSC_ID, /* VCSC */ -+ IPU6EP_FW_PSYS_ISA_ICA_ID, -+ IPU6EP_FW_PSYS_ISA_LSC_ID, -+ IPU6EP_FW_PSYS_ISA_DPC_ID, -+ IPU6EP_FW_PSYS_ISA_SIS_A_ID, -+ IPU6EP_FW_PSYS_ISA_SIS_B_ID, -+ IPU6EP_FW_PSYS_ISA_B2B_ID, -+ IPU6EP_FW_PSYS_ISA_B2R_R2I_SIE_ID, -+ IPU6EP_FW_PSYS_ISA_R2I_DS_A_ID, -+ IPU6EP_FW_PSYS_ISA_AWB_ID, -+ IPU6EP_FW_PSYS_ISA_AE_ID, -+ IPU6EP_FW_PSYS_ISA_AF_ID, -+ IPU6EP_FW_PSYS_ISA_X2B_MD_ID, -+ IPU6EP_FW_PSYS_ISA_X2B_SVE_RGBIR_ID, -+ IPU6EP_FW_PSYS_ISA_PAF_ID, -+ IPU6EP_FW_PSYS_BB_ACC_GDC0_ID, -+ IPU6EP_FW_PSYS_BB_ACC_TNR_ID, -+ IPU6EP_FW_PSYS_N_CELL_ID -+}; -+#endif /* IPU6EP_PLATFORM_RESOURCES_H */ -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6se-fw-resources.c b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6se-fw-resources.c -new file mode 100644 -index 0000000..41b6ba6 ---- /dev/null -+++ b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6se-fw-resources.c -@@ -0,0 +1,194 @@ -+// SPDX-License-Identifier: GPL-2.0 -+// Copyright (C) 2015 - 2024 Intel Corporation -+ -+#include -+#include -+ -+#include "ipu-psys.h" -+#include "ipu-fw-psys.h" -+#include "ipu6se-platform-resources.h" -+ -+/* resources table */ -+ -+/* -+ * Cell types by cell IDs -+ */ -+/* resources table */ -+ -+/* -+ * Cell types by cell IDs -+ */ -+static const u8 ipu6se_fw_psys_cell_types[IPU6SE_FW_PSYS_N_CELL_ID] = { -+ IPU6SE_FW_PSYS_SP_CTRL_TYPE_ID, -+ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_ICA_ID */ -+ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_LSC_ID */ -+ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_DPC_ID */ -+ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_B2B_ID */ -+ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_BNLM_ID */ -+ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_DM_ID */ -+ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_R2I_SIE_ID */ -+ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_R2I_DS_A_ID */ -+ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_R2I_DS_B_ID */ -+ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_AWB_ID */ -+ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_AE_ID */ -+ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_AF_ID*/ -+ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID /* PAF */ -+}; -+ -+static const u16 ipu6se_fw_num_dev_channels[IPU6SE_FW_PSYS_N_DEV_CHN_ID] = { -+ IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT0_MAX_SIZE, -+ IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT1_READ_MAX_SIZE, -+ IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_MAX_SIZE, -+ IPU6SE_FW_PSYS_DEV_CHN_DMA_ISA_MAX_SIZE, -+}; -+ -+static const u16 ipu6se_fw_psys_mem_size[IPU6SE_FW_PSYS_N_MEM_ID] = { -+ IPU6SE_FW_PSYS_TRANSFER_VMEM0_MAX_SIZE, -+ IPU6SE_FW_PSYS_LB_VMEM_MAX_SIZE, -+ IPU6SE_FW_PSYS_DMEM0_MAX_SIZE, -+ IPU6SE_FW_PSYS_DMEM1_MAX_SIZE -+}; -+ -+static const u16 ipu6se_fw_psys_dfms[IPU6SE_FW_PSYS_N_DEV_DFM_ID] = { -+ IPU6SE_FW_PSYS_DEV_DFM_ISL_FULL_PORT_ID_MAX_SIZE, -+ IPU6SE_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID_MAX_SIZE -+}; -+ -+static const u8 -+ipu6se_fw_psys_c_mem[IPU6SE_FW_PSYS_N_CELL_ID][IPU6SE_FW_PSYS_N_MEM_TYPE_ID] = { -+ { /* IPU6SE_FW_PSYS_SP0_ID */ -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_DMEM0_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ }, -+ { /* IPU6SE_FW_PSYS_ISA_ICA_ID */ -+ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6SE_FW_PSYS_LB_VMEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ }, -+ { /* IPU6SE_FW_PSYS_ISA_LSC_ID */ -+ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6SE_FW_PSYS_LB_VMEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ }, -+ { /* IPU6SE_FW_PSYS_ISA_DPC_ID */ -+ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6SE_FW_PSYS_LB_VMEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ }, -+ { /* IPU6SE_FW_PSYS_ISA_B2B_ID */ -+ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6SE_FW_PSYS_LB_VMEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ }, -+ -+ { /* IPU6SE_FW_PSYS_ISA_BNLM_ID */ -+ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6SE_FW_PSYS_LB_VMEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ }, -+ { /* IPU6SE_FW_PSYS_ISA_DM_ID */ -+ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6SE_FW_PSYS_LB_VMEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ }, -+ { /* IPU6SE_FW_PSYS_ISA_R2I_SIE_ID */ -+ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6SE_FW_PSYS_LB_VMEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ }, -+ { /* IPU6SE_FW_PSYS_ISA_R2I_DS_A_ID */ -+ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6SE_FW_PSYS_LB_VMEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ }, -+ { /* IPU6SE_FW_PSYS_ISA_R2I_DS_B_ID */ -+ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6SE_FW_PSYS_LB_VMEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ }, -+ { /* IPU6SE_FW_PSYS_ISA_AWB_ID */ -+ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6SE_FW_PSYS_LB_VMEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ }, -+ { /* IPU6SE_FW_PSYS_ISA_AE_ID */ -+ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6SE_FW_PSYS_LB_VMEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ }, -+ { /* IPU6SE_FW_PSYS_ISA_AF_ID */ -+ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6SE_FW_PSYS_LB_VMEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ }, -+ { /* IPU6SE_FW_PSYS_ISA_PAF_ID */ -+ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6SE_FW_PSYS_LB_VMEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ } -+}; -+ -+static const struct ipu_fw_resource_definitions ipu6se_defs = { -+ .cells = ipu6se_fw_psys_cell_types, -+ .num_cells = IPU6SE_FW_PSYS_N_CELL_ID, -+ .num_cells_type = IPU6SE_FW_PSYS_N_CELL_TYPE_ID, -+ -+ .dev_channels = ipu6se_fw_num_dev_channels, -+ .num_dev_channels = IPU6SE_FW_PSYS_N_DEV_CHN_ID, -+ -+ .num_ext_mem_types = IPU6SE_FW_PSYS_N_DATA_MEM_TYPE_ID, -+ .num_ext_mem_ids = IPU6SE_FW_PSYS_N_MEM_ID, -+ .ext_mem_ids = ipu6se_fw_psys_mem_size, -+ -+ .num_dfm_ids = IPU6SE_FW_PSYS_N_DEV_DFM_ID, -+ -+ .dfms = ipu6se_fw_psys_dfms, -+ -+ .cell_mem_row = IPU6SE_FW_PSYS_N_MEM_TYPE_ID, -+ .cell_mem = &ipu6se_fw_psys_c_mem[0][0], -+}; -+ -+const struct ipu_fw_resource_definitions *ipu6se_res_defs = &ipu6se_defs; -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6se-platform-resources.h b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6se-platform-resources.h -new file mode 100644 -index 0000000..81c618d ---- /dev/null -+++ b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu6se-platform-resources.h -@@ -0,0 +1,103 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* Copyright (C) 2018 - 2024 Intel Corporation */ -+ -+#ifndef IPU6SE_PLATFORM_RESOURCES_H -+#define IPU6SE_PLATFORM_RESOURCES_H -+ -+#include -+#include -+#include "ipu-platform-resources.h" -+ -+#define IPU6SE_FW_PSYS_N_PADDING_UINT8_IN_PROCESS_EXT_STRUCT 1 -+ -+enum { -+ IPU6SE_FW_PSYS_CMD_QUEUE_COMMAND_ID = 0, -+ IPU6SE_FW_PSYS_CMD_QUEUE_DEVICE_ID, -+ IPU6SE_FW_PSYS_CMD_QUEUE_PPG0_COMMAND_ID, -+ IPU6SE_FW_PSYS_CMD_QUEUE_PPG1_COMMAND_ID, -+ IPU6SE_FW_PSYS_CMD_QUEUE_PPG2_COMMAND_ID, -+ IPU6SE_FW_PSYS_CMD_QUEUE_PPG3_COMMAND_ID, -+ IPU6SE_FW_PSYS_CMD_QUEUE_PPG4_COMMAND_ID, -+ IPU6SE_FW_PSYS_CMD_QUEUE_PPG5_COMMAND_ID, -+ IPU6SE_FW_PSYS_N_PSYS_CMD_QUEUE_ID -+}; -+ -+enum { -+ IPU6SE_FW_PSYS_TRANSFER_VMEM0_TYPE_ID = 0, -+ IPU6SE_FW_PSYS_LB_VMEM_TYPE_ID, -+ IPU6SE_FW_PSYS_DMEM_TYPE_ID, -+ IPU6SE_FW_PSYS_VMEM_TYPE_ID, -+ IPU6SE_FW_PSYS_BAMEM_TYPE_ID, -+ IPU6SE_FW_PSYS_PMEM_TYPE_ID, -+ IPU6SE_FW_PSYS_N_MEM_TYPE_ID -+}; -+ -+enum ipu6se_mem_id { -+ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID = 0, /* TRANSFER VMEM 0 */ -+ IPU6SE_FW_PSYS_LB_VMEM_ID, /* LB VMEM */ -+ IPU6SE_FW_PSYS_DMEM0_ID, /* SPC0 Dmem */ -+ IPU6SE_FW_PSYS_DMEM1_ID, /* SPP0 Dmem */ -+ IPU6SE_FW_PSYS_N_MEM_ID -+}; -+ -+enum { -+ IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT0_ID = 0, -+ IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT1_READ_ID, -+ IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_ID, -+ IPU6SE_FW_PSYS_DEV_CHN_DMA_ISA_ID, -+ IPU6SE_FW_PSYS_N_DEV_CHN_ID -+}; -+ -+enum { -+ IPU6SE_FW_PSYS_SP_CTRL_TYPE_ID = 0, -+ IPU6SE_FW_PSYS_SP_SERVER_TYPE_ID, -+ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, -+ IPU6SE_FW_PSYS_N_CELL_TYPE_ID -+}; -+ -+enum { -+ IPU6SE_FW_PSYS_SP0_ID = 0, -+ IPU6SE_FW_PSYS_ISA_ICA_ID, -+ IPU6SE_FW_PSYS_ISA_LSC_ID, -+ IPU6SE_FW_PSYS_ISA_DPC_ID, -+ IPU6SE_FW_PSYS_ISA_B2B_ID, -+ IPU6SE_FW_PSYS_ISA_BNLM_ID, -+ IPU6SE_FW_PSYS_ISA_DM_ID, -+ IPU6SE_FW_PSYS_ISA_R2I_SIE_ID, -+ IPU6SE_FW_PSYS_ISA_R2I_DS_A_ID, -+ IPU6SE_FW_PSYS_ISA_R2I_DS_B_ID, -+ IPU6SE_FW_PSYS_ISA_AWB_ID, -+ IPU6SE_FW_PSYS_ISA_AE_ID, -+ IPU6SE_FW_PSYS_ISA_AF_ID, -+ IPU6SE_FW_PSYS_ISA_PAF_ID, -+ IPU6SE_FW_PSYS_N_CELL_ID -+}; -+ -+enum { -+ IPU6SE_FW_PSYS_DEV_DFM_ISL_FULL_PORT_ID = 0, -+ IPU6SE_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID, -+}; -+ -+/* Excluding PMEM */ -+#define IPU6SE_FW_PSYS_N_DATA_MEM_TYPE_ID (IPU6SE_FW_PSYS_N_MEM_TYPE_ID - 1) -+#define IPU6SE_FW_PSYS_N_DEV_DFM_ID \ -+ (IPU6SE_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID + 1) -+#define IPU6SE_FW_PSYS_VMEM0_MAX_SIZE 0x0800 -+/* Transfer VMEM0 words, ref HAS Transfer*/ -+#define IPU6SE_FW_PSYS_TRANSFER_VMEM0_MAX_SIZE 0x0800 -+#define IPU6SE_FW_PSYS_LB_VMEM_MAX_SIZE 0x0400 /* LB VMEM words */ -+#define IPU6SE_FW_PSYS_DMEM0_MAX_SIZE 0x4000 -+#define IPU6SE_FW_PSYS_DMEM1_MAX_SIZE 0x1000 -+ -+#define IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT0_MAX_SIZE 22 -+#define IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT1_READ_MAX_SIZE 22 -+#define IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_MAX_SIZE 22 -+#define IPU6SE_FW_PSYS_DEV_CHN_DMA_IPFD_MAX_SIZE 0 -+#define IPU6SE_FW_PSYS_DEV_CHN_DMA_ISA_MAX_SIZE 2 -+ -+#define IPU6SE_FW_PSYS_DEV_DFM_ISL_FULL_PORT_ID_MAX_SIZE 32 -+#define IPU6SE_FW_PSYS_DEV_DFM_LB_FULL_PORT_ID_MAX_SIZE 32 -+#define IPU6SE_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID_MAX_SIZE 32 -+#define IPU6SE_FW_PSYS_DEV_DFM_LB_EMPTY_PORT_ID_MAX_SIZE 32 -+ -+#endif /* IPU6SE_PLATFORM_RESOURCES_H */ --- -2.43.0 - diff --git a/patches/0022-media-intel-ipu6-support-IPU6-PSYS-FW-trace-dump-for.patch b/patches/0022-media-intel-ipu6-support-IPU6-PSYS-FW-trace-dump-for.patch deleted file mode 100644 index 4b628f0..0000000 --- a/patches/0022-media-intel-ipu6-support-IPU6-PSYS-FW-trace-dump-for.patch +++ /dev/null @@ -1,134 +0,0 @@ -From 6521b164624d49c5e397ed3ec0759aab103c7570 Mon Sep 17 00:00:00 2001 -From: hepengpx -Date: Mon, 12 Jan 2026 08:12:25 -0700 -Subject: [PATCH 22/28] media: intel-ipu6: support IPU6 PSYS FW trace dump for - upstream driver - -[media][pci]Support IPU6 PSYS FW trace dump for upstream driver -Signed-off-by: hepengpx ---- - .../drivers/media/pci/intel/ipu6/ipu6-trace.c | 6 +++ - .../drivers/media/pci/intel/ipu6/ipu6-trace.h | 1 + - .../media/pci/intel/ipu6/psys/ipu-psys.c | 44 ++++++++++++++++++- - 3 files changed, 50 insertions(+), 1 deletion(-) - -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-trace.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-trace.c -index adcee20..077f140 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-trace.c -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-trace.c -@@ -891,6 +891,12 @@ int ipu_trace_buffer_dma_handle(struct device *dev, dma_addr_t *dma_handle) - } - EXPORT_SYMBOL_GPL(ipu_trace_buffer_dma_handle); - -+bool is_ipu_trace_enable(void) -+{ -+ return ipu_trace_enable; -+} -+EXPORT_SYMBOL_GPL(is_ipu_trace_enable); -+ - MODULE_AUTHOR("Samu Onkalo "); - MODULE_LICENSE("GPL"); - MODULE_DESCRIPTION("Intel ipu trace support"); -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-trace.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-trace.h -index f66d889..fe89d1b 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-trace.h -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-trace.h -@@ -144,4 +144,5 @@ void ipu_trace_restore(struct device *dev); - void ipu_trace_uninit(struct device *dev); - void ipu_trace_stop(struct device *dev); - int ipu_trace_buffer_dma_handle(struct device *dev, dma_addr_t *dma_handle); -+bool is_ipu_trace_enable(void); - #endif -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.c b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.c -index 9f36749..e8638a9 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.c -+++ b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.c -@@ -31,6 +31,7 @@ - #include "ipu-psys.h" - #include "ipu6-platform-regs.h" - #include "ipu6-fw-com.h" -+#include "ipu6-trace.h" - - static bool async_fw_init; - module_param(async_fw_init, bool, 0664); -@@ -1201,6 +1202,8 @@ static int ipu_psys_sched_cmd(void *ptr) - return 0; - } - -+#include "../ipu6-trace.h" -+ - static void start_sp(struct ipu6_bus_device *adev) - { - struct ipu_psys *psys = ipu6_bus_get_drvdata(adev); -@@ -1211,7 +1214,7 @@ static void start_sp(struct ipu6_bus_device *adev) - val |= IPU6_PSYS_SPC_STATUS_START | - IPU6_PSYS_SPC_STATUS_RUN | - IPU6_PSYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE; -- val |= psys->icache_prefetch_sp ? -+ val |= (psys->icache_prefetch_sp || is_ipu_trace_enable()) ? - IPU6_PSYS_SPC_STATUS_ICACHE_PREFETCH : 0; - writel(val, spc_regs_base + IPU6_PSYS_REG_SPC_STATUS_CTRL); - } -@@ -1304,6 +1307,40 @@ static void run_fw_init_work(struct work_struct *work) - } - } - -+struct ipu_trace_block psys_trace_blocks[] = { -+ { -+ .offset = IPU_TRACE_REG_PS_TRACE_UNIT_BASE, -+ .type = IPU_TRACE_BLOCK_TUN, -+ }, -+ { -+ .offset = IPU_TRACE_REG_PS_SPC_EVQ_BASE, -+ .type = IPU_TRACE_BLOCK_TM, -+ }, -+ { -+ .offset = IPU_TRACE_REG_PS_SPP0_EVQ_BASE, -+ .type = IPU_TRACE_BLOCK_TM, -+ }, -+ { -+ .offset = IPU_TRACE_REG_PS_SPC_GPC_BASE, -+ .type = IPU_TRACE_BLOCK_GPC, -+ }, -+ { -+ .offset = IPU_TRACE_REG_PS_SPP0_GPC_BASE, -+ .type = IPU_TRACE_BLOCK_GPC, -+ }, -+ { -+ .offset = IPU_TRACE_REG_PS_MMU_GPC_BASE, -+ .type = IPU_TRACE_BLOCK_GPC, -+ }, -+ { -+ .offset = IPU_TRACE_REG_PS_GPREG_TRACE_TIMER_RST_N, -+ .type = IPU_TRACE_TIMER_RST, -+ }, -+ { -+ .type = IPU_TRACE_BLOCK_END, -+ } -+}; -+ - static int ipu6_psys_probe(struct auxiliary_device *auxdev, - const struct auxiliary_device_id *auxdev_id) - { -@@ -1442,6 +1479,9 @@ static int ipu6_psys_probe(struct auxiliary_device *auxdev, - strscpy(psys->caps.dev_model, IPU6_MEDIA_DEV_MODEL_NAME, - sizeof(psys->caps.dev_model)); - -+ ipu_trace_init(adev->isp, psys->pdata->base, &auxdev->dev, -+ psys_trace_blocks); -+ - mutex_unlock(&ipu_psys_mutex); - - dev_info(dev, "psys probe minor: %d\n", minor); -@@ -1503,6 +1543,8 @@ static void ipu6_psys_remove(struct auxiliary_device *auxdev) - - clear_bit(MINOR(psys->cdev.dev), ipu_psys_devices); - -+ ipu_trace_uninit(&auxdev->dev); -+ - mutex_unlock(&ipu_psys_mutex); - - mutex_destroy(&psys->mutex); --- -2.43.0 - diff --git a/patches/0024-drivers-media-set-v4l2_subdev_enable_streams_api-tru.patch b/patches/0024-drivers-media-set-v4l2_subdev_enable_streams_api-tru.patch deleted file mode 100644 index b4c8be3..0000000 --- a/patches/0024-drivers-media-set-v4l2_subdev_enable_streams_api-tru.patch +++ /dev/null @@ -1,28 +0,0 @@ -From 2e8f579bd4a1984f0ba6b0e7a259558ba0de5d09 Mon Sep 17 00:00:00 2001 -From: florent pirou -Date: Mon, 17 Nov 2025 07:45:23 -0700 -Subject: [PATCH 24/28] drivers: media: set v4l2_subdev_enable_streams_api=true - for WA 6.12 - -Signed-off-by: hepengpx -Signed-off-by: florent pirou ---- - 6.12.0/drivers/media/v4l2-core/v4l2-subdev.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/6.12.0/drivers/media/v4l2-core/v4l2-subdev.c b/6.12.0/drivers/media/v4l2-core/v4l2-subdev.c -index 3a4ba08..f535929 100644 ---- a/6.12.0/drivers/media/v4l2-core/v4l2-subdev.c -+++ b/6.12.0/drivers/media/v4l2-core/v4l2-subdev.c -@@ -32,7 +32,7 @@ - * 'v4l2_subdev_enable_streams_api' to 1 below. - */ - --static bool v4l2_subdev_enable_streams_api; -+static bool v4l2_subdev_enable_streams_api = true; - #endif - - /* --- -2.43.0 - diff --git a/patches/0025-ipu6-dkms-add-isys-makefile-adaptation-6.12.patch b/patches/0025-ipu6-dkms-add-isys-makefile-adaptation-6.12.patch deleted file mode 100644 index 7bf0c85..0000000 --- a/patches/0025-ipu6-dkms-add-isys-makefile-adaptation-6.12.patch +++ /dev/null @@ -1,65 +0,0 @@ -From 07afeb848cc7d9a2549b996924297fb09ca43ce3 Mon Sep 17 00:00:00 2001 -From: Florent Pirou -Date: Mon, 12 Jan 2026 03:45:03 -0700 -Subject: [PATCH 25/29] ipu6-dkms: add isys makefile adaptation 6.12 - -*ipu6-isys: compile ISYS RESET and ACPI PDATA support ---- - 6.12.0/drivers/media/pci/intel/ipu6/Makefile | 7 +++++++ - 6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c | 1 + - 6.12.0/drivers/media/pci/intel/ipu6/ipu6.c | 1 + - 6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.c | 1 + - 4 files changed, 10 insertions(+) - -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/Makefile b/6.12.0/drivers/media/pci/intel/ipu6/Makefile -index 67c13c9..9a0a130 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/Makefile -+++ b/6.12.0/drivers/media/pci/intel/ipu6/Makefile -@@ -1,5 +1,10 @@ - # SPDX-License-Identifier: GPL-2.0-only - -+CC := ${CC} -I ${M}/../../../../../include-overrides -DCONFIG_INTEL_IPU6_ACPI -DCONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+ccflags-y := -DDRIVER_VERSION_SUFFIX=\"${DRIVER_VERSION_SUFFIX}\" -+ -+export CONFIG_VIDEO_INTEL_IPU6 = m -+ - intel-ipu6-y := ipu6.o \ - ipu6-bus.o \ - ipu6-dma.o \ -@@ -22,4 +27,6 @@ intel-ipu6-isys-y := ipu6-isys.o \ - ipu6-isys-dwc-phy.o - - obj-$(CONFIG_VIDEO_INTEL_IPU6) += intel-ipu6-isys.o -+ -+subdir-ccflags-y += -DDRIVER_VERSION_SUFFIX=\"${DRIVER_VERSION_SUFFIX}\" - obj-$(CONFIG_VIDEO_INTEL_IPU6) += psys/ -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -index caf29f7..e9fbb9a 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -@@ -1759,3 +1759,4 @@ MODULE_LICENSE("GPL"); - MODULE_DESCRIPTION("Intel IPU6 input system driver"); - MODULE_IMPORT_NS(INTEL_IPU6); - MODULE_IMPORT_NS(INTEL_IPU_BRIDGE); -+MODULE_VERSION(DRIVER_VERSION_SUFFIX); -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c -index c9513c5..3fa8980 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c -@@ -958,3 +958,4 @@ MODULE_AUTHOR("Yunliang Ding "); - MODULE_AUTHOR("Hongju Wang "); - MODULE_LICENSE("GPL"); - MODULE_DESCRIPTION("Intel IPU6 PCI driver"); -+MODULE_VERSION(DRIVER_VERSION_SUFFIX); -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.c b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.c -index e8638a9..4925c4d 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.c -+++ b/6.12.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.c -@@ -1639,3 +1639,4 @@ MODULE_LICENSE("GPL"); - MODULE_DESCRIPTION("Intel IPU6 processing system driver"); - MODULE_IMPORT_NS(DMA_BUF); - MODULE_IMPORT_NS(INTEL_IPU6); -+MODULE_VERSION(DRIVER_VERSION_SUFFIX); --- -2.43.0 - diff --git a/patches/0026-ipu6-dkms-align-ipu7-acpi-pdata-device-parser-on-6.1.patch b/patches/0026-ipu6-dkms-align-ipu7-acpi-pdata-device-parser-on-6.1.patch deleted file mode 100644 index 920d8b9..0000000 --- a/patches/0026-ipu6-dkms-align-ipu7-acpi-pdata-device-parser-on-6.1.patch +++ /dev/null @@ -1,80 +0,0 @@ -From 60f5dbc2e602ad4e56fc2000448be4e01d4fdadf Mon Sep 17 00:00:00 2001 -From: Florent Pirou -Date: Mon, 12 Jan 2026 11:15:12 -0700 -Subject: [PATCH 26/29] ipu6-dkms: align ipu7 acpi pdata device parser on 6.12 - -Signed-off-by: Florent Pirou ---- - 6.12.0/drivers/media/pci/intel/ipu6/Makefile | 5 ++++- - 6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h | 1 + - 6.12.0/drivers/media/pci/intel/ipu6/ipu6.c | 8 ++++---- - 3 files changed, 9 insertions(+), 5 deletions(-) - -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/Makefile b/6.12.0/drivers/media/pci/intel/ipu6/Makefile -index c3d8ffd..9355e3f 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/Makefile -+++ b/6.12.0/drivers/media/pci/intel/ipu6/Makefile -@@ -1,8 +1,11 @@ - # SPDX-License-Identifier: GPL-2.0-only - --CC := ${CC} -I ${M}/../../../../../include-overrides -DCONFIG_INTEL_IPU6_ACPI -DCONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+CC := ${CC} -I ${M}/../../../../../../include -I ${M}/../../../../../include-overrides -DCONFIG_INTEL_IPU_ACPI -DCONFIG_INTEL_IPU6_ACPI -DCONFIG_VIDEO_INTEL_IPU6_ISYS_RESET - ccflags-y := -DDRIVER_VERSION_SUFFIX=\"${DRIVER_VERSION_SUFFIX}\" - -+# Path to ipu_acpi module build directory -+KBUILD_EXTRA_SYMBOLS += ${M}/../../../../../../Module.symvers -+ - export CONFIG_VIDEO_INTEL_IPU6 = m - - intel-ipu6-y := ipu6.o \ -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h -index 57df01c..f935223 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6-isys.h -@@ -104,6 +104,7 @@ struct isys_iwake_watermark { - struct ipu6_isys_csi2_config { - u32 nlanes; - u32 port; -+ enum v4l2_mbus_type bus_type; - }; - - struct sensor_async_sd { -diff --git a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c -index 3fa8980..0fb6b64 100644 ---- a/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c -+++ b/6.12.0/drivers/media/pci/intel/ipu6/ipu6.c -@@ -35,12 +35,12 @@ - #include "ipu6-trace.h" - - #if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) --#include -+#include - #endif - - #if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - static int isys_init_acpi_add_device(struct device *dev, void *priv, -- struct ipu6_isys_csi2_config *csi2, -+ void *csi2, - bool reprobe) - { - return 0; -@@ -437,14 +437,14 @@ ipu6_isys_init(struct pci_dev *pdev, struct device *parent, - #if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - if (!spdata) { - dev_dbg(&pdev->dev, "No subdevice info provided"); -- ret = ipu_get_acpi_devices(isys_adev, &isys_adev->auxdev.dev, &acpi_pdata, NULL, -+ ret = ipu_get_acpi_devices(isys_adev, &isys_adev->auxdev.dev, (void **)&acpi_pdata, NULL, - isys_init_acpi_add_device); - if (acpi_pdata && (*acpi_pdata->subdevs)) { - pdata->spdata = acpi_pdata; - } - } else { - dev_dbg(&pdev->dev, "Subdevice info found"); -- ret = ipu_get_acpi_devices(isys_adev, &isys_adev->auxdev.dev, &acpi_pdata, &spdata, -+ ret = ipu_get_acpi_devices(isys_adev, &isys_adev->auxdev.dev, (void **)&acpi_pdata, (void **)&spdata, - isys_init_acpi_add_device); - } - if (ret) --- -2.43.0 - diff --git a/patches/0050-ipu6-dkms-add-isys-makefile-adaptation-6.12.patch b/patches/0050-ipu6-dkms-add-isys-makefile-adaptation-6.12.patch new file mode 100644 index 0000000..64572fe --- /dev/null +++ b/patches/0050-ipu6-dkms-add-isys-makefile-adaptation-6.12.patch @@ -0,0 +1,76 @@ +From 95a2edd7b1e67b100ada6dbbf7346145b2c13cb2 Mon Sep 17 00:00:00 2001 +From: Florent Pirou +Date: Mon, 12 Jan 2026 03:45:03 -0700 +Subject: [PATCH 50/52] ipu6-dkms: add isys makefile adaptation 6.12 + +*ipu6-isys: compile ISYS RESET and ACPI PDATA support +--- + ipu6-drivers/drivers/media/pci/intel/ipu6/Makefile | 13 +++++++++++++ + .../drivers/media/pci/intel/ipu6/ipu6-isys.c | 1 + + ipu6-drivers/drivers/media/pci/intel/ipu6/ipu6.c | 1 + + .../drivers/media/pci/intel/ipu6/psys/ipu-psys.c | 2 ++ + 4 files changed, 17 insertions(+) + +diff --git a/ipu6-drivers/drivers/media/pci/intel/ipu6/Makefile b/ipu6-drivers/drivers/media/pci/intel/ipu6/Makefile +index 67c13c9..5ac7944 100644 +--- a/ipu6-drivers/drivers/media/pci/intel/ipu6/Makefile ++++ b/ipu6-drivers/drivers/media/pci/intel/ipu6/Makefile +@@ -1,5 +1,16 @@ + # SPDX-License-Identifier: GPL-2.0-only + ++KERNEL_VERSION := $(shell echo $(KERNELRELEASE) | sed 's/[^0-9.]*\([0-9]\.[0-9]*\)\..*/\1.0/') ++ ++CC := ${CC} -I ${M}/../../../../../../include -I ${M}/../../../../../../$(KERNEL_VERSION)/include-overrides -DCONFIG_DEBUG_FS -DCONFIG_INTEL_IPU_ACPI -DCONFIG_INTEL_IPU6_ACPI -DCONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -DCONFIG_VIDEO_INTEL_IPU6_EXT_CTRLS ++ ++KBUILD_EXTRA_SYMBOLS += ${M}/../../../../../../Module.symvers ++ ++ccflags-y := -DDRIVER_VERSION_SUFFIX=\"${DRIVER_VERSION_SUFFIX}\" ++ ++export EXTERNAL_BUILD = 1 ++export CONFIG_VIDEO_INTEL_IPU6 = m ++ + intel-ipu6-y := ipu6.o \ + ipu6-bus.o \ + ipu6-dma.o \ +@@ -22,4 +33,6 @@ intel-ipu6-isys-y := ipu6-isys.o \ + ipu6-isys-dwc-phy.o + + obj-$(CONFIG_VIDEO_INTEL_IPU6) += intel-ipu6-isys.o ++ ++subdir-ccflags-y += -DDRIVER_VERSION_SUFFIX=\"${DRIVER_VERSION_SUFFIX}\" + obj-$(CONFIG_VIDEO_INTEL_IPU6) += psys/ +diff --git a/ipu6-drivers/drivers/media/pci/intel/ipu6/ipu6-isys.c b/ipu6-drivers/drivers/media/pci/intel/ipu6/ipu6-isys.c +index 415ff06..eb66918 100644 +--- a/ipu6-drivers/drivers/media/pci/intel/ipu6/ipu6-isys.c ++++ b/ipu6-drivers/drivers/media/pci/intel/ipu6/ipu6-isys.c +@@ -1774,3 +1774,4 @@ MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("Intel IPU6 input system driver"); + MODULE_IMPORT_NS(INTEL_IPU6); + MODULE_IMPORT_NS(INTEL_IPU_BRIDGE); ++MODULE_VERSION(DRIVER_VERSION_SUFFIX); +diff --git a/ipu6-drivers/drivers/media/pci/intel/ipu6/ipu6.c b/ipu6-drivers/drivers/media/pci/intel/ipu6/ipu6.c +index bd0d706..53a61ac 100644 +--- a/ipu6-drivers/drivers/media/pci/intel/ipu6/ipu6.c ++++ b/ipu6-drivers/drivers/media/pci/intel/ipu6/ipu6.c +@@ -963,3 +963,4 @@ MODULE_AUTHOR("Yunliang Ding "); + MODULE_AUTHOR("Hongju Wang "); + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("Intel IPU6 PCI driver"); ++MODULE_VERSION(DRIVER_VERSION_SUFFIX); +diff --git a/ipu6-drivers/drivers/media/pci/intel/ipu6/psys/ipu-psys.c b/ipu6-drivers/drivers/media/pci/intel/ipu6/psys/ipu-psys.c +index 6423238..5036ff8 100644 +--- a/ipu6-drivers/drivers/media/pci/intel/ipu6/psys/ipu-psys.c ++++ b/ipu6-drivers/drivers/media/pci/intel/ipu6/psys/ipu-psys.c +@@ -2745,7 +2745,9 @@ MODULE_DESCRIPTION("Intel IPU6 processing system driver"); + #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 13, 0) + MODULE_IMPORT_NS(DMA_BUF); + MODULE_IMPORT_NS(INTEL_IPU6); ++MODULE_VERSION(DRIVER_VERSION_SUFFIX); + #else + MODULE_IMPORT_NS("DMA_BUF"); + MODULE_IMPORT_NS("INTEL_IPU6"); ++MODULE_VERSION("DRIVER_VERSION_SUFFIX"); + #endif +-- +2.43.0 + diff --git a/patches/0050-media-ipu6-isys-Use-v4l2_ctrl_subdev_subscribe_event.patch b/patches/0050-media-ipu6-isys-Use-v4l2_ctrl_subdev_subscribe_event.patch deleted file mode 100644 index c6db101..0000000 --- a/patches/0050-media-ipu6-isys-Use-v4l2_ctrl_subdev_subscribe_event.patch +++ /dev/null @@ -1,34 +0,0 @@ -From 10b4d928dfd933ad2fac935a52b638cfe07835f6 Mon Sep 17 00:00:00 2001 -From: Laurent Pinchart -Date: Fri, 16 Jan 2026 00:07:52 -0700 -Subject: [PATCH 50/76] media: ipu6: isys: Use - v4l2_ctrl_subdev_subscribe_event() - -The ipu6-isys driver uses v4l2_ctrl_subscribe_event() to handle control -event subscription on a subdev. While this works, it is the wrong API. -Use the subdev-specific v4l2_ctrl_subdev_subscribe_event() helper -instead. - -Signed-off-by: Laurent Pinchart -Signed-off-by: Sakari Ailus -Signed-off-by: Hans Verkuil ---- - 6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c -index 6030bd2..d1fece6 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c -@@ -109,7 +109,7 @@ static int csi2_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh, - case V4L2_EVENT_FRAME_SYNC: - return v4l2_event_subscribe(fh, sub, 10, NULL); - case V4L2_EVENT_CTRL: -- return v4l2_ctrl_subscribe_event(fh, sub); -+ return v4l2_ctrl_subdev_subscribe_event(sd, fh, sub); - default: - return -EINVAL; - } --- -2.43.0 - diff --git a/patches/0051-Revert-ipu6-isys-lt6911uxc-2-pads-linked-to-ipu-2-po.patch b/patches/0051-Revert-ipu6-isys-lt6911uxc-2-pads-linked-to-ipu-2-po.patch new file mode 100644 index 0000000..dd94daf --- /dev/null +++ b/patches/0051-Revert-ipu6-isys-lt6911uxc-2-pads-linked-to-ipu-2-po.patch @@ -0,0 +1,74 @@ +From d97b31e1923660f05d39db40028c4eb260f333e6 Mon Sep 17 00:00:00 2001 +From: Florent Pirou +Date: Mon, 19 Jan 2026 09:02:23 -0700 +Subject: [PATCH 51/52] Revert "ipu6-isys: lt6911uxc 2 pads linked to ipu 2 + ports for split mode" + +This reverts commit b1c889b6240723d849da32f615ed8945d08d4e21. + +Change Description: +single camera in bios setting, pprval for dummy port. +create software node and link connection in ipu-brige. +match the connection and port num in v4l2-async. +finally create the media links. + +Signed-off-by: Chen Meng J +Signed-off-by: Florent Pirou +--- + .../drivers/media/pci/intel/ipu6/ipu6-isys.c | 19 ++----------------- + 1 file changed, 2 insertions(+), 17 deletions(-) + +diff --git a/ipu6-drivers/drivers/media/pci/intel/ipu6/ipu6-isys.c b/ipu6-drivers/drivers/media/pci/intel/ipu6/ipu6-isys.c +index eb66918..412afd0 100644 +--- a/ipu6-drivers/drivers/media/pci/intel/ipu6/ipu6-isys.c ++++ b/ipu6-drivers/drivers/media/pci/intel/ipu6/ipu6-isys.c +@@ -158,7 +158,6 @@ static int isys_isr_one(struct ipu6_bus_device *adev); + static int + isys_complete_ext_device_registration(struct ipu6_isys *isys, + struct v4l2_subdev *sd, +- s16 src_pad, + struct ipu6_isys_csi2_config *csi2) + { + struct device *dev = &isys->adev->auxdev.dev; +@@ -176,17 +175,6 @@ isys_complete_ext_device_registration(struct ipu6_isys *isys, + goto unregister_subdev; + } + +- if (src_pad >= 0) +- i = (unsigned int)src_pad; +- +- if (sd->entity.pads[i].flags & MEDIA_PAD_FL_SOURCE) { +- dev_info(dev, "src pad %d\n", src_pad); +- } else { +- dev_warn(dev, "src pad %d not for src\n", src_pad); +- ret = -ENOENT; +- goto unregister_subdev; +- } +- + ret = media_create_pad_link(&sd->entity, i, + &isys->csi2[csi2->port].asd.sd.entity, + 0, MEDIA_LNK_FL_ENABLED | +@@ -273,8 +261,7 @@ static int isys_register_ext_subdev(struct ipu6_isys *isys, + if (!sd_info->csi2) + return 0; + +- /* src_pad is useless for non MIPI split case. Set it to '-1'.*/ +- return isys_complete_ext_device_registration(isys, sd, -1, sd_info->csi2); ++ return isys_complete_ext_device_registration(isys, sd, sd_info->csi2); + + skip_put_adapter: + i2c_put_adapter(adapter); +@@ -1008,9 +995,7 @@ static int isys_notifier_bound(struct v4l2_async_notifier *notifier, + + dev_dbg(&isys->adev->auxdev.dev, "bind %s nlanes is %d port is %d\n", + sd->name, s_asd->csi2.nlanes, s_asd->csi2.port); +- ret = isys_complete_ext_device_registration(isys, sd, +- asc->match.src_pad, +- &s_asd->csi2); ++ ret = isys_complete_ext_device_registration(isys, sd, &s_asd->csi2); + if (ret) + return ret; + +-- +2.43.0 + diff --git a/patches/0051-media-ipu6-isys-Don-t-set-V4L2_FL_USES_V4L2_FH-manua.patch b/patches/0051-media-ipu6-isys-Don-t-set-V4L2_FL_USES_V4L2_FH-manua.patch deleted file mode 100644 index f9d7fb3..0000000 --- a/patches/0051-media-ipu6-isys-Don-t-set-V4L2_FL_USES_V4L2_FH-manua.patch +++ /dev/null @@ -1,32 +0,0 @@ -From 9bbab9bf77365736c8040d70eceb1453bd24b1fc Mon Sep 17 00:00:00 2001 -From: Laurent Pinchart -Date: Fri, 16 Jan 2026 00:05:37 -0700 -Subject: [PATCH 51/76] media: ipu6: isys: Don't set V4L2_FL_USES_V4L2_FH - manually - -The V4L2_FL_USES_V4L2_FH flag is set by v4l2_fh_init(). It is not meant -to be set manually by drivers. Drop it from the ipu6-isys driver. - -Signed-off-by: Laurent Pinchart -Reviewed-by: Bingbu Cao -Reviewed-by: Sakari Ailus -Signed-off-by: Hans Verkuil ---- - 6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c | 1 - - 1 file changed, 1 deletion(-) - -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c -index 24a2ef9..f3f3bc0 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c -@@ -1306,7 +1306,6 @@ int ipu6_isys_video_init(struct ipu6_isys_video *av) - __ipu6_isys_vidioc_try_fmt_meta_cap(av, &format_meta); - av->meta_fmt = format_meta.fmt.meta; - -- set_bit(V4L2_FL_USES_V4L2_FH, &av->vdev.flags); - video_set_drvdata(&av->vdev, av); - - ret = video_register_device(&av->vdev, VFL_TYPE_VIDEO, -1); --- -2.43.0 - diff --git a/patches/0052-media-ipu6-isys-Set-embedded-data-type-correctly-for.patch b/patches/0052-media-ipu6-isys-Set-embedded-data-type-correctly-for.patch deleted file mode 100644 index 5c77e96..0000000 --- a/patches/0052-media-ipu6-isys-Set-embedded-data-type-correctly-for.patch +++ /dev/null @@ -1,35 +0,0 @@ -From 284432c3b2dcc1e637446b41c5cafb49c49ccf99 Mon Sep 17 00:00:00 2001 -From: Sakari Ailus -Date: Fri, 16 Jan 2026 00:09:51 -0700 -Subject: [PATCH 52/76] media: ipu6: isys: Set embedded data type correctly for - metadata formats - -The IPU6 ISYS driver supported metadata formats but was missing correct -embedded data type in the receiver configuration. Add it now. - -Signed-off-by: Sakari Ailus -Signed-off-by: Hans Verkuil ---- - 6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c | 6 ++++++ - 1 file changed, 6 insertions(+) - -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c -index 0a06de5..463a0ad 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c -@@ -81,6 +81,12 @@ unsigned int ipu6_isys_mbus_code_to_mipi(u32 code) - case MEDIA_BUS_FMT_SGRBG8_1X8: - case MEDIA_BUS_FMT_SRGGB8_1X8: - return MIPI_CSI2_DT_RAW8; -+ case MEDIA_BUS_FMT_META_8: -+ case MEDIA_BUS_FMT_META_10: -+ case MEDIA_BUS_FMT_META_12: -+ case MEDIA_BUS_FMT_META_16: -+ case MEDIA_BUS_FMT_META_24: -+ return MIPI_CSI2_DT_EMBEDDED_8B; - default: - /* return unavailable MIPI data type - 0x3f */ - WARN_ON(1); --- -2.43.0 - diff --git a/patches/0052-module-Convert-symbol-namespace-to-string-literal.patch b/patches/0052-module-Convert-symbol-namespace-to-string-literal.patch new file mode 100644 index 0000000..bc4d3f0 --- /dev/null +++ b/patches/0052-module-Convert-symbol-namespace-to-string-literal.patch @@ -0,0 +1,389 @@ +From bfbf961cd84252f1d34350c7ba6096f8800711cd Mon Sep 17 00:00:00 2001 +From: Peter Zijlstra +Date: Mon, 19 Jan 2026 11:41:13 -0700 +Subject: [PATCH 52/52] module: Convert symbol namespace to string literal + +Clean up the existing export namespace code along the same lines of +commit 33def8498fdd ("treewide: Convert macro and uses of __section(foo) +to __section("foo")") and for the same reason, it is not desired for the +namespace argument to be a macro expansion itself. + +Scripted using + + git grep -l -e MODULE_IMPORT_NS -e EXPORT_SYMBOL_NS | while read file; + do + awk -i inplace ' + /^#define EXPORT_SYMBOL_NS/ { + gsub(/__stringify\(ns\)/, "ns"); + print; + next; + } + /^#define MODULE_IMPORT_NS/ { + gsub(/__stringify\(ns\)/, "ns"); + print; + next; + } + /MODULE_IMPORT_NS/ { + $0 = gensub(/MODULE_IMPORT_NS\(([^)]*)\)/, "MODULE_IMPORT_NS(\"\\1\")", "g"); + } + /EXPORT_SYMBOL_NS/ { + if ($0 ~ /(EXPORT_SYMBOL_NS[^(]*)\(([^,]+),/) { + if ($0 !~ /(EXPORT_SYMBOL_NS[^(]*)\(([^,]+), ([^)]+)\)/ && + $0 !~ /(EXPORT_SYMBOL_NS[^(]*)\(\)/ && + $0 !~ /^my/) { + getline line; + gsub(/[[:space:]]*\\$/, ""); + gsub(/[[:space:]]/, "", line); + $0 = $0 " " line; + } + + $0 = gensub(/(EXPORT_SYMBOL_NS[^(]*)\(([^,]+), ([^)]+)\)/, + "\\1(\\2, \"\\3\")", "g"); + } + } + { print }' $file; + done + +Requested-by: Masahiro Yamada +Signed-off-by: Peter Zijlstra (Intel) +Link: https://mail.google.com/mail/u/2/#inbox/FMfcgzQXKWgMmjdFwwdsfgxzKpVHWPlc +Acked-by: Greg KH +Signed-off-by: Linus Torvalds +--- + .../media/pci/intel/ipu6/ipu6-buttress.c | 14 +++++++------- + .../drivers/media/pci/intel/ipu6/ipu6-cpd.c | 4 ++-- + .../drivers/media/pci/intel/ipu6/ipu6-dma.c | 18 +++++++++--------- + .../drivers/media/pci/intel/ipu6/ipu6-fw-com.c | 18 +++++++++--------- + .../drivers/media/pci/intel/ipu6/ipu6-isys.c | 4 ++-- + .../drivers/media/pci/intel/ipu6/ipu6-mmu.c | 4 ++-- + .../drivers/media/pci/intel/ipu6/ipu6.c | 4 ++-- + .../media/pci/intel/ipu6/psys/ipu-psys.c | 3 +-- + 8 files changed, 34 insertions(+), 35 deletions(-) + +diff --git a/ipu6-drivers/drivers/media/pci/intel/ipu6/ipu6-buttress.c b/ipu6-drivers/drivers/media/pci/intel/ipu6/ipu6-buttress.c +index 782deeb..53763fd 100644 +--- a/ipu6-drivers/drivers/media/pci/intel/ipu6/ipu6-buttress.c ++++ b/ipu6-drivers/drivers/media/pci/intel/ipu6/ipu6-buttress.c +@@ -508,7 +508,7 @@ bool ipu6_buttress_auth_done(struct ipu6_device *isp) + + return val == BUTTRESS_SECURITY_CTL_AUTH_DONE; + } +-EXPORT_SYMBOL_NS_GPL(ipu6_buttress_auth_done, INTEL_IPU6); ++EXPORT_SYMBOL_NS_GPL(ipu6_buttress_auth_done, "INTEL_IPU6"); + + int ipu6_buttress_reset_authentication(struct ipu6_device *isp) + { +@@ -600,7 +600,7 @@ out: + + return ret; + } +-EXPORT_SYMBOL_NS_GPL(ipu6_buttress_map_fw_image, INTEL_IPU6); ++EXPORT_SYMBOL_NS_GPL(ipu6_buttress_map_fw_image, "INTEL_IPU6"); + + void ipu6_buttress_unmap_fw_image(struct ipu6_bus_device *sys, + struct sg_table *sgt) +@@ -611,7 +611,7 @@ void ipu6_buttress_unmap_fw_image(struct ipu6_bus_device *sys, + dma_unmap_sgtable(&pdev->dev, sgt, DMA_TO_DEVICE, 0); + sg_free_table(sgt); + } +-EXPORT_SYMBOL_NS_GPL(ipu6_buttress_unmap_fw_image, INTEL_IPU6); ++EXPORT_SYMBOL_NS_GPL(ipu6_buttress_unmap_fw_image, "INTEL_IPU6"); + + int ipu6_buttress_authenticate(struct ipu6_device *isp) + { +@@ -776,7 +776,7 @@ int ipu6_buttress_start_tsc_sync(struct ipu6_device *isp) + + return -ETIMEDOUT; + } +-EXPORT_SYMBOL_NS_GPL(ipu6_buttress_start_tsc_sync, INTEL_IPU6); ++EXPORT_SYMBOL_NS_GPL(ipu6_buttress_start_tsc_sync, "INTEL_IPU6"); + #if IS_ENABLED(CONFIG_INTEL_IPU_ACPI) + + /* +@@ -818,7 +818,7 @@ int ipu6_get_i2c_bus_id(int adapter_id, char *adapter_bdf, int bdf_len) + WARN_ON_ONCE(1); + return -1; + } +-EXPORT_SYMBOL_NS_GPL(ipu6_get_i2c_bus_id, INTEL_IPU6); ++EXPORT_SYMBOL_NS_GPL(ipu6_get_i2c_bus_id, "INTEL_IPU6"); + #endif + void ipu6_buttress_tsc_read(struct ipu6_device *isp, u64 *val) + { +@@ -840,7 +840,7 @@ void ipu6_buttress_tsc_read(struct ipu6_device *isp, u64 *val) + } + local_irq_restore(flags); + } +-EXPORT_SYMBOL_NS_GPL(ipu6_buttress_tsc_read, INTEL_IPU6); ++EXPORT_SYMBOL_NS_GPL(ipu6_buttress_tsc_read, "INTEL_IPU6"); + + u64 ipu6_buttress_tsc_ticks_to_ns(u64 ticks, const struct ipu6_device *isp) + { +@@ -855,7 +855,7 @@ u64 ipu6_buttress_tsc_ticks_to_ns(u64 ticks, const struct ipu6_device *isp) + */ + return div_u64(ns, isp->buttress.ref_clk); + } +-EXPORT_SYMBOL_NS_GPL(ipu6_buttress_tsc_ticks_to_ns, INTEL_IPU6); ++EXPORT_SYMBOL_NS_GPL(ipu6_buttress_tsc_ticks_to_ns, "INTEL_IPU6"); + + void ipu6_buttress_restore(struct ipu6_device *isp) + { +diff --git a/ipu6-drivers/drivers/media/pci/intel/ipu6/ipu6-cpd.c b/ipu6-drivers/drivers/media/pci/intel/ipu6/ipu6-cpd.c +index 21c1c12..fd81dc2 100644 +--- a/ipu6-drivers/drivers/media/pci/intel/ipu6/ipu6-cpd.c ++++ b/ipu6-drivers/drivers/media/pci/intel/ipu6/ipu6-cpd.c +@@ -216,14 +216,14 @@ int ipu6_cpd_create_pkg_dir(struct ipu6_bus_device *adev, const void *src) + + return 0; + } +-EXPORT_SYMBOL_NS_GPL(ipu6_cpd_create_pkg_dir, INTEL_IPU6); ++EXPORT_SYMBOL_NS_GPL(ipu6_cpd_create_pkg_dir, "INTEL_IPU6"); + + void ipu6_cpd_free_pkg_dir(struct ipu6_bus_device *adev) + { + ipu6_dma_free(adev, adev->pkg_dir_size, adev->pkg_dir, + adev->pkg_dir_dma_addr, 0); + } +-EXPORT_SYMBOL_NS_GPL(ipu6_cpd_free_pkg_dir, INTEL_IPU6); ++EXPORT_SYMBOL_NS_GPL(ipu6_cpd_free_pkg_dir, "INTEL_IPU6"); + + static int ipu6_cpd_validate_cpd(struct ipu6_device *isp, const void *cpd, + unsigned long cpd_size, +diff --git a/ipu6-drivers/drivers/media/pci/intel/ipu6/ipu6-dma.c b/ipu6-drivers/drivers/media/pci/intel/ipu6/ipu6-dma.c +index b71f66b..aa3e1f7 100644 +--- a/ipu6-drivers/drivers/media/pci/intel/ipu6/ipu6-dma.c ++++ b/ipu6-drivers/drivers/media/pci/intel/ipu6/ipu6-dma.c +@@ -130,7 +130,7 @@ void ipu6_dma_sync_single(struct ipu6_bus_device *sys, dma_addr_t dma_handle, + vaddr = info->vaddr + offset; + clflush_cache_range(vaddr, size); + } +-EXPORT_SYMBOL_NS_GPL(ipu6_dma_sync_single, INTEL_IPU6); ++EXPORT_SYMBOL_NS_GPL(ipu6_dma_sync_single, "INTEL_IPU6"); + + void ipu6_dma_sync_sg(struct ipu6_bus_device *sys, struct scatterlist *sglist, + int nents) +@@ -141,13 +141,13 @@ void ipu6_dma_sync_sg(struct ipu6_bus_device *sys, struct scatterlist *sglist, + for_each_sg(sglist, sg, nents, i) + clflush_cache_range(page_to_virt(sg_page(sg)), sg->length); + } +-EXPORT_SYMBOL_NS_GPL(ipu6_dma_sync_sg, INTEL_IPU6); ++EXPORT_SYMBOL_NS_GPL(ipu6_dma_sync_sg, "INTEL_IPU6"); + + void ipu6_dma_sync_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt) + { + ipu6_dma_sync_sg(sys, sgt->sgl, sgt->orig_nents); + } +-EXPORT_SYMBOL_NS_GPL(ipu6_dma_sync_sgtable, INTEL_IPU6); ++EXPORT_SYMBOL_NS_GPL(ipu6_dma_sync_sgtable, "INTEL_IPU6"); + + void *ipu6_dma_alloc(struct ipu6_bus_device *sys, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, +@@ -239,7 +239,7 @@ out_kfree: + + return NULL; + } +-EXPORT_SYMBOL_NS_GPL(ipu6_dma_alloc, INTEL_IPU6); ++EXPORT_SYMBOL_NS_GPL(ipu6_dma_alloc, "INTEL_IPU6"); + + void ipu6_dma_free(struct ipu6_bus_device *sys, size_t size, void *vaddr, + dma_addr_t dma_handle, unsigned long attrs) +@@ -292,7 +292,7 @@ void ipu6_dma_free(struct ipu6_bus_device *sys, size_t size, void *vaddr, + + kfree(info); + } +-EXPORT_SYMBOL_NS_GPL(ipu6_dma_free, INTEL_IPU6); ++EXPORT_SYMBOL_NS_GPL(ipu6_dma_free, "INTEL_IPU6"); + + int ipu6_dma_mmap(struct ipu6_bus_device *sys, struct vm_area_struct *vma, + void *addr, dma_addr_t iova, size_t size, +@@ -369,7 +369,7 @@ void ipu6_dma_unmap_sg(struct ipu6_bus_device *sys, struct scatterlist *sglist, + mmu->tlb_invalidate(mmu); + __free_iova(&mmu->dmap->iovad, iova); + } +-EXPORT_SYMBOL_NS_GPL(ipu6_dma_unmap_sg, INTEL_IPU6); ++EXPORT_SYMBOL_NS_GPL(ipu6_dma_unmap_sg, "INTEL_IPU6"); + + int ipu6_dma_map_sg(struct ipu6_bus_device *sys, struct scatterlist *sglist, + int nents, enum dma_data_direction dir, +@@ -434,7 +434,7 @@ out_fail: + + return 0; + } +-EXPORT_SYMBOL_NS_GPL(ipu6_dma_map_sg, INTEL_IPU6); ++EXPORT_SYMBOL_NS_GPL(ipu6_dma_map_sg, "INTEL_IPU6"); + + int ipu6_dma_map_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt, + enum dma_data_direction dir, unsigned long attrs) +@@ -449,14 +449,14 @@ int ipu6_dma_map_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt, + + return 0; + } +-EXPORT_SYMBOL_NS_GPL(ipu6_dma_map_sgtable, INTEL_IPU6); ++EXPORT_SYMBOL_NS_GPL(ipu6_dma_map_sgtable, "INTEL_IPU6"); + + void ipu6_dma_unmap_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt, + enum dma_data_direction dir, unsigned long attrs) + { + ipu6_dma_unmap_sg(sys, sgt->sgl, sgt->nents, dir, attrs); + } +-EXPORT_SYMBOL_NS_GPL(ipu6_dma_unmap_sgtable, INTEL_IPU6); ++EXPORT_SYMBOL_NS_GPL(ipu6_dma_unmap_sgtable, "INTEL_IPU6"); + + /* + * Create scatter-list for the already allocated DMA buffer +diff --git a/ipu6-drivers/drivers/media/pci/intel/ipu6/ipu6-fw-com.c b/ipu6-drivers/drivers/media/pci/intel/ipu6/ipu6-fw-com.c +index 45738f3..7a9fcd9 100644 +--- a/ipu6-drivers/drivers/media/pci/intel/ipu6/ipu6-fw-com.c ++++ b/ipu6-drivers/drivers/media/pci/intel/ipu6/ipu6-fw-com.c +@@ -262,7 +262,7 @@ void *ipu6_fw_com_prepare(struct ipu6_fw_com_cfg *cfg, + + return ctx; + } +-EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_prepare, INTEL_IPU6); ++EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_prepare, "INTEL_IPU6"); + + int ipu6_fw_com_open(struct ipu6_fw_com_context *ctx) + { +@@ -297,7 +297,7 @@ int ipu6_fw_com_open(struct ipu6_fw_com_context *ctx) + + return 0; + } +-EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_open, INTEL_IPU6); ++EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_open, "INTEL_IPU6"); + + int ipu6_fw_com_close(struct ipu6_fw_com_context *ctx) + { +@@ -315,7 +315,7 @@ int ipu6_fw_com_close(struct ipu6_fw_com_context *ctx) + + return 0; + } +-EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_close, INTEL_IPU6); ++EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_close, "INTEL_IPU6"); + + int ipu6_fw_com_release(struct ipu6_fw_com_context *ctx, unsigned int force) + { +@@ -328,7 +328,7 @@ int ipu6_fw_com_release(struct ipu6_fw_com_context *ctx, unsigned int force) + kfree(ctx); + return 0; + } +-EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_release, INTEL_IPU6); ++EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_release, "INTEL_IPU6"); + + bool ipu6_fw_com_ready(struct ipu6_fw_com_context *ctx) + { +@@ -340,7 +340,7 @@ bool ipu6_fw_com_ready(struct ipu6_fw_com_context *ctx) + + return state == SYSCOM_STATE_READY; + } +-EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_ready, INTEL_IPU6); ++EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_ready, "INTEL_IPU6"); + + void *ipu6_send_get_token(struct ipu6_fw_com_context *ctx, int q_nbr) + { +@@ -368,7 +368,7 @@ void *ipu6_send_get_token(struct ipu6_fw_com_context *ctx, int q_nbr) + + return (void *)(q->host_address + index * q->token_size); + } +-EXPORT_SYMBOL_NS_GPL(ipu6_send_get_token, INTEL_IPU6); ++EXPORT_SYMBOL_NS_GPL(ipu6_send_get_token, "INTEL_IPU6"); + + void ipu6_send_put_token(struct ipu6_fw_com_context *ctx, int q_nbr) + { +@@ -381,7 +381,7 @@ void ipu6_send_put_token(struct ipu6_fw_com_context *ctx, int q_nbr) + + writel(wr, q_dmem + FW_COM_WR_REG); + } +-EXPORT_SYMBOL_NS_GPL(ipu6_send_put_token, INTEL_IPU6); ++EXPORT_SYMBOL_NS_GPL(ipu6_send_put_token, "INTEL_IPU6"); + + void *ipu6_recv_get_token(struct ipu6_fw_com_context *ctx, int q_nbr) + { +@@ -405,7 +405,7 @@ void *ipu6_recv_get_token(struct ipu6_fw_com_context *ctx, int q_nbr) + + return (void *)(q->host_address + rd * q->token_size); + } +-EXPORT_SYMBOL_NS_GPL(ipu6_recv_get_token, INTEL_IPU6); ++EXPORT_SYMBOL_NS_GPL(ipu6_recv_get_token, "INTEL_IPU6"); + + void ipu6_recv_put_token(struct ipu6_fw_com_context *ctx, int q_nbr) + { +@@ -418,4 +418,4 @@ void ipu6_recv_put_token(struct ipu6_fw_com_context *ctx, int q_nbr) + + writel(rd, q_dmem + FW_COM_RD_REG); + } +-EXPORT_SYMBOL_NS_GPL(ipu6_recv_put_token, INTEL_IPU6); ++EXPORT_SYMBOL_NS_GPL(ipu6_recv_put_token, "INTEL_IPU6"); +diff --git a/ipu6-drivers/drivers/media/pci/intel/ipu6/ipu6-isys.c b/ipu6-drivers/drivers/media/pci/intel/ipu6/ipu6-isys.c +index 412afd0..79780d3 100644 +--- a/ipu6-drivers/drivers/media/pci/intel/ipu6/ipu6-isys.c ++++ b/ipu6-drivers/drivers/media/pci/intel/ipu6/ipu6-isys.c +@@ -1757,6 +1757,6 @@ MODULE_AUTHOR("Yunliang Ding "); + MODULE_AUTHOR("Hongju Wang "); + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("Intel IPU6 input system driver"); +-MODULE_IMPORT_NS(INTEL_IPU6); +-MODULE_IMPORT_NS(INTEL_IPU_BRIDGE); ++MODULE_IMPORT_NS("INTEL_IPU6"); ++MODULE_IMPORT_NS("INTEL_IPU_BRIDGE"); + MODULE_VERSION(DRIVER_VERSION_SUFFIX); +diff --git a/ipu6-drivers/drivers/media/pci/intel/ipu6/ipu6-mmu.c b/ipu6-drivers/drivers/media/pci/intel/ipu6/ipu6-mmu.c +index 57298ac..dc4b4e0 100644 +--- a/ipu6-drivers/drivers/media/pci/intel/ipu6/ipu6-mmu.c ++++ b/ipu6-drivers/drivers/media/pci/intel/ipu6/ipu6-mmu.c +@@ -518,7 +518,7 @@ int ipu6_mmu_hw_init(struct ipu6_mmu *mmu) + + return 0; + } +-EXPORT_SYMBOL_NS_GPL(ipu6_mmu_hw_init, INTEL_IPU6); ++EXPORT_SYMBOL_NS_GPL(ipu6_mmu_hw_init, "INTEL_IPU6"); + + static struct ipu6_mmu_info *ipu6_mmu_alloc(struct ipu6_device *isp) + { +@@ -583,7 +583,7 @@ void ipu6_mmu_hw_cleanup(struct ipu6_mmu *mmu) + mmu->ready = false; + spin_unlock_irqrestore(&mmu->ready_lock, flags); + } +-EXPORT_SYMBOL_NS_GPL(ipu6_mmu_hw_cleanup, INTEL_IPU6); ++EXPORT_SYMBOL_NS_GPL(ipu6_mmu_hw_cleanup, "INTEL_IPU6"); + + static struct ipu6_dma_mapping *alloc_dma_mapping(struct ipu6_device *isp) + { +diff --git a/ipu6-drivers/drivers/media/pci/intel/ipu6/ipu6.c b/ipu6-drivers/drivers/media/pci/intel/ipu6/ipu6.c +index 53a61ac..ad5fd61 100644 +--- a/ipu6-drivers/drivers/media/pci/intel/ipu6/ipu6.c ++++ b/ipu6-drivers/drivers/media/pci/intel/ipu6/ipu6.c +@@ -302,7 +302,7 @@ void ipu6_configure_spc(struct ipu6_device *isp, + ipu6_pkg_dir_configure_spc(isp, hw_variant, pkg_dir_idx, base, + pkg_dir, pkg_dir_dma_addr); + } +-EXPORT_SYMBOL_NS_GPL(ipu6_configure_spc, INTEL_IPU6); ++EXPORT_SYMBOL_NS_GPL(ipu6_configure_spc, "INTEL_IPU6"); + + #define IPU6_ISYS_CSI2_NPORTS 4 + #define IPU6SE_ISYS_CSI2_NPORTS 4 +@@ -954,7 +954,7 @@ static struct pci_driver ipu6_pci_driver = { + + module_pci_driver(ipu6_pci_driver); + +-MODULE_IMPORT_NS(INTEL_IPU_BRIDGE); ++MODULE_IMPORT_NS("INTEL_IPU_BRIDGE"); + MODULE_AUTHOR("Sakari Ailus "); + MODULE_AUTHOR("Tianshu Qiu "); + MODULE_AUTHOR("Bingbu Cao "); +diff --git a/ipu6-drivers/drivers/media/pci/intel/ipu6/psys/ipu-psys.c b/ipu6-drivers/drivers/media/pci/intel/ipu6/psys/ipu-psys.c +index 5036ff8..66c1c28 100644 +--- a/ipu6-drivers/drivers/media/pci/intel/ipu6/psys/ipu-psys.c ++++ b/ipu6-drivers/drivers/media/pci/intel/ipu6/psys/ipu-psys.c +@@ -2745,9 +2745,8 @@ MODULE_DESCRIPTION("Intel IPU6 processing system driver"); + #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 13, 0) + MODULE_IMPORT_NS(DMA_BUF); + MODULE_IMPORT_NS(INTEL_IPU6); +-MODULE_VERSION(DRIVER_VERSION_SUFFIX); + #else + MODULE_IMPORT_NS("DMA_BUF"); + MODULE_IMPORT_NS("INTEL_IPU6"); +-MODULE_VERSION("DRIVER_VERSION_SUFFIX"); + #endif ++MODULE_VERSION(DRIVER_VERSION_SUFFIX); +-- +2.43.0 + diff --git a/patches/0053-upstream-Use-module-parameter-to-set-isys-freq.patch b/patches/0053-upstream-Use-module-parameter-to-set-isys-freq.patch deleted file mode 100644 index ac53fe6..0000000 --- a/patches/0053-upstream-Use-module-parameter-to-set-isys-freq.patch +++ /dev/null @@ -1,45 +0,0 @@ -From ed2570ea754b2a12c4d771be5bc352d0d17cf21e Mon Sep 17 00:00:00 2001 -From: Dongcheng Yan -Date: Fri, 16 Jan 2026 00:36:33 -0700 -Subject: [PATCH 53/76] upstream: Use module parameter to set isys freq - -Signed-off-by: Hongju Wang -Signed-off-by: Dongcheng Yan -Signed-off-by: zouxiaoh ---- - 6.18.0/drivers/media/pci/intel/ipu6/ipu6.c | 12 ++++++++++++ - 1 file changed, 12 insertions(+) - -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c -index 1f4f20b..0311fe7 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c -@@ -33,6 +33,10 @@ - #include "ipu6-platform-isys-csi2-reg.h" - #include "ipu6-platform-regs.h" - -+static unsigned int isys_freq_override; -+module_param(isys_freq_override, uint, 0660); -+MODULE_PARM_DESC(isys_freq_override, "Override ISYS freq(mhz)"); -+ - #define IPU6_PCI_BAR 0 - - struct ipu6_cell_program { -@@ -388,6 +392,14 @@ ipu6_isys_init(struct pci_dev *pdev, struct device *parent, - pdata->base = base; - pdata->ipdata = ipdata; - -+ /* Override the isys freq */ -+ if (isys_freq_override >= BUTTRESS_MIN_FORCE_IS_FREQ && -+ isys_freq_override <= BUTTRESS_MAX_FORCE_IS_FREQ) { -+ ctrl->ratio = isys_freq_override / BUTTRESS_IS_FREQ_STEP; -+ dev_dbg(&pdev->dev, "Override the isys freq:%u(mhz)\n", -+ isys_freq_override); -+ } -+ - isys_adev = ipu6_bus_initialize_device(pdev, parent, pdata, ctrl, - IPU6_ISYS_NAME); - if (IS_ERR(isys_adev)) { --- -2.43.0 - diff --git a/patches/0054-media-pci-Enable-ISYS-reset.patch b/patches/0054-media-pci-Enable-ISYS-reset.patch deleted file mode 100644 index 57cb8f4..0000000 --- a/patches/0054-media-pci-Enable-ISYS-reset.patch +++ /dev/null @@ -1,626 +0,0 @@ -From 3dff0732eab9d5fb91567c0f58b1807d4c9ba3b5 Mon Sep 17 00:00:00 2001 -From: zouxiaoh -Date: Fri, 16 Jan 2026 00:37:54 -0700 -Subject: [PATCH 54/76] media: pci: Enable ISYS reset - ---- - 6.18.0/drivers/media/pci/intel/ipu6/Kconfig | 9 + - .../media/pci/intel/ipu6/ipu6-fw-isys.h | 3 + - .../media/pci/intel/ipu6/ipu6-isys-queue.c | 251 ++++++++++++++++++ - .../media/pci/intel/ipu6/ipu6-isys-queue.h | 3 + - .../media/pci/intel/ipu6/ipu6-isys-video.c | 82 +++++- - .../media/pci/intel/ipu6/ipu6-isys-video.h | 8 + - .../drivers/media/pci/intel/ipu6/ipu6-isys.c | 10 + - .../drivers/media/pci/intel/ipu6/ipu6-isys.h | 5 + - 8 files changed, 370 insertions(+), 1 deletion(-) - -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/Kconfig b/6.18.0/drivers/media/pci/intel/ipu6/Kconfig -index 1129e2b..fca2d78 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/Kconfig -+++ b/6.18.0/drivers/media/pci/intel/ipu6/Kconfig -@@ -16,3 +16,12 @@ config VIDEO_INTEL_IPU6 - - To compile this driver, say Y here! It contains 2 modules - - intel_ipu6 and intel_ipu6_isys. -+ -+config VIDEO_INTEL_IPU6_ISYS_RESET -+ depends on VIDEO_INTEL_IPU6 -+ bool "Enable isys reset" -+ default n -+ help -+ Support ISYS reset in IPU6. -+ -+ To compile this driver, say Y here! -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h -index b60f020..e6ee161 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h -@@ -66,6 +66,9 @@ struct ipu6_isys; - #define IPU6_ISYS_OPEN_RETRY 2000 - #define IPU6_ISYS_CLOSE_RETRY 2000 - #define IPU6_FW_CALL_TIMEOUT_JIFFIES msecs_to_jiffies(2000) -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+#define IPU6_LIB_CALL_TIMEOUT_JIFFIES_RESET msecs_to_jiffies(200) -+#endif - - enum ipu6_fw_isys_resp_type { - IPU6_FW_ISYS_RESP_TYPE_STREAM_OPEN_DONE = 0, -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c -index aa2cf72..cfeea15 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c -@@ -10,6 +10,9 @@ - #include - #include - #include -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+#include -+#endif - - #include - #include -@@ -21,6 +24,9 @@ - #include "ipu6-fw-isys.h" - #include "ipu6-isys.h" - #include "ipu6-isys-video.h" -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+#include "ipu6-cpd.h" -+#endif - - static int ipu6_isys_buf_init(struct vb2_buffer *vb) - { -@@ -218,6 +224,16 @@ static int buffer_list_get(struct ipu6_isys_stream *stream, - ib = list_last_entry(&aq->incoming, - struct ipu6_isys_buffer, head); - -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+ struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq); -+ -+ if (av->skipframe) { -+ atomic_set(&ib->skipframe_flag, 1); -+ av->skipframe--; -+ } else { -+ atomic_set(&ib->skipframe_flag, 0); -+ } -+#endif - dev_dbg(dev, "buffer: %s: buffer %u\n", - ipu6_isys_queue_to_video(aq)->vdev.name, - ipu6_isys_buffer_to_vb2_buffer(ib)->index); -@@ -372,6 +388,19 @@ static void buf_queue(struct vb2_buffer *vb) - list_add(&ib->head, &aq->incoming); - spin_unlock_irqrestore(&aq->lock, flags); - -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+ mutex_lock(&av->isys->reset_mutex); -+ while (av->isys->in_reset) { -+ mutex_unlock(&av->isys->reset_mutex); -+ dev_dbg(dev, "buffer: %s: wait for reset\n", av->vdev.name); -+ usleep_range(10000, 11000); -+ mutex_lock(&av->isys->reset_mutex); -+ } -+ mutex_unlock(&av->isys->reset_mutex); -+ /* ip may be cleared in ipu reset */ -+ stream = av->stream; -+ -+#endif - if (!media_pipe || !vb->vb2_queue->start_streaming_called) { - dev_dbg(dev, "media pipeline is not ready for %s\n", - av->vdev.name); -@@ -603,6 +632,9 @@ static int start_streaming(struct vb2_queue *q, unsigned int count) - - out: - mutex_unlock(&stream->mutex); -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+ av->start_streaming = 1; -+#endif - - return 0; - -@@ -624,12 +656,210 @@ out_return_buffers: - return ret; - } - -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+static void reset_stop_streaming(struct ipu6_isys_video *av) -+{ -+ struct ipu6_isys_queue *aq = &av->aq; -+ struct ipu6_isys_stream *stream = av->stream; -+ -+ mutex_lock(&stream->mutex); -+ -+ ipu6_isys_update_stream_watermark(av, false); -+ -+ mutex_lock(&av->isys->stream_mutex); -+ if (stream->nr_streaming == stream->nr_queues && stream->streaming) -+ ipu6_isys_video_set_streaming(av, 0, NULL); -+ mutex_unlock(&av->isys->stream_mutex); -+ -+ stream->nr_streaming--; -+ list_del(&aq->node); -+ stream->streaming = 0; -+ mutex_unlock(&stream->mutex); -+ -+ ipu6_isys_stream_cleanup(av); -+ -+ return_buffers(aq, VB2_BUF_STATE_ERROR); -+ -+ ipu6_isys_fw_close(av->isys); -+} -+ -+static int reset_start_streaming(struct ipu6_isys_video *av) -+{ -+ struct ipu6_isys_queue *aq = &av->aq; -+ struct device *dev = &av->isys->adev->auxdev.dev; -+ unsigned long flags; -+ int ret; -+ -+ dev_dbg(dev, "%s: start streaming\n", av->vdev.name); -+ -+ spin_lock_irqsave(&aq->lock, flags); -+ while (!list_empty(&aq->active)) { -+ struct ipu6_isys_buffer *ib = list_first_entry(&aq->active, -+ struct ipu6_isys_buffer, head); -+ -+ list_del(&ib->head); -+ list_add_tail(&ib->head, &aq->incoming); -+ } -+ spin_unlock_irqrestore(&aq->lock, flags); -+ -+ av->skipframe = 1; -+ ret = start_streaming(&aq->vbq, 0); -+ if (ret) { -+ dev_dbg(dev, -+ "%s: start streaming failed in reset ! set av->start_streaming = 0.\n", -+ av->vdev.name); -+ av->start_streaming = 0; -+ } else -+ av->start_streaming = 1; -+ -+ return ret; -+} -+ -+static int ipu_isys_reset(struct ipu6_isys_video *self_av, -+ struct ipu6_isys_stream *self_stream) -+{ -+ struct ipu6_isys *isys = self_av->isys; -+ struct ipu6_bus_device *adev = isys->adev; -+ struct ipu6_device *isp = adev->isp; -+ struct ipu6_isys_video *av = NULL; -+ struct ipu6_isys_stream *stream = NULL; -+ struct device *dev = &adev->auxdev.dev; -+ int ret, i, j; -+ int has_streaming = 0; -+ const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = -+ &isys->pdata->ipdata->csi2; -+ -+ -+ mutex_lock(&isys->reset_mutex); -+ if (isys->in_reset) { -+ mutex_unlock(&isys->reset_mutex); -+ return 0; -+ } -+ isys->in_reset = true; -+ -+ while (isys->in_stop_streaming) { -+ dev_dbg(dev, "isys reset: %s: wait for stop\n", -+ self_av->vdev.name); -+ mutex_unlock(&isys->reset_mutex); -+ usleep_range(10000, 11000); -+ mutex_lock(&isys->reset_mutex); -+ } -+ -+ mutex_unlock(&isys->reset_mutex); -+ -+ for (i = 0; i < csi2_pdata->nports; i++) { -+ for (j = 0; j < NR_OF_CSI2_SRC_PADS; j++) { -+ av = &isys->csi2[i].av[j]; -+ if (av == self_av) -+ continue; -+ -+ stream = av->stream; -+ if (!stream || stream == self_stream) -+ continue; -+ -+ if (!stream->streaming && !stream->nr_streaming) -+ continue; -+ -+ av->reset = true; -+ has_streaming = true; -+ reset_stop_streaming(av); -+ } -+ } -+ -+ if (!has_streaming) -+ goto end_of_reset; -+ -+ dev_dbg(dev, "ipu reset, power cycle\n"); -+ /* bus_pm_runtime_suspend() */ -+ /* isys_runtime_pm_suspend() */ -+ dev->bus->pm->runtime_suspend(dev); -+ -+ /* ipu_suspend */ -+ isp->pdev->driver->driver.pm->runtime_suspend(&isp->pdev->dev); -+ -+ /* ipu_runtime_resume */ -+ isp->pdev->driver->driver.pm->runtime_resume(&isp->pdev->dev); -+ -+ /* bus_pm_runtime_resume() */ -+ /* isys_runtime_pm_resume() */ -+ dev->bus->pm->runtime_resume(dev); -+ -+ ipu6_cleanup_fw_msg_bufs(isys); -+ -+ if (isys->fwcom) { -+ dev_err(dev, "Clearing old context\n"); -+ ipu6_fw_isys_cleanup(isys); -+ } -+ -+ ret = ipu6_fw_isys_init(av->isys, -+ isys->pdata->ipdata->num_parallel_streams); -+ if (ret < 0) -+ dev_err(dev, "ipu fw isys init failed\n"); -+ -+ dev_dbg(dev, "restart streams\n"); -+ -+ for (j = 0; j < csi2_pdata->nports; j++) { -+ for (i = 0; i < NR_OF_CSI2_SRC_PADS; i++) { -+ av = &isys->csi2[j].av[i]; -+ if (!av->reset) -+ continue; -+ -+ av->reset = false; -+ reset_start_streaming(av); -+ } -+ } -+ -+end_of_reset: -+ mutex_lock(&isys->reset_mutex); -+ isys->in_reset = false; -+ mutex_unlock(&isys->reset_mutex); -+ dev_dbg(dev, "reset done\n"); -+ -+ return 0; -+} -+ -+#endif - static void stop_streaming(struct vb2_queue *q) - { - struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(q); - struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq); - struct ipu6_isys_stream *stream = av->stream; - -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+ struct device *dev = &av->isys->adev->auxdev.dev; -+ -+ dev_dbg(dev, "stop: %s: enter\n", av->vdev.name); -+ -+ mutex_lock(&av->isys->reset_mutex); -+ while (av->isys->in_reset || av->isys->in_stop_streaming) { -+ mutex_unlock(&av->isys->reset_mutex); -+ dev_dbg(dev, "stop: %s: wait for in_reset = %d\n", -+ av->vdev.name, av->isys->in_reset); -+ dev_dbg(dev, "stop: %s: wait for in_stop = %d\n", -+ av->vdev.name, av->isys->in_stop_streaming); -+ usleep_range(10000, 11000); -+ mutex_lock(&av->isys->reset_mutex); -+ } -+ -+ if (!av->start_streaming) { -+ mutex_unlock(&av->isys->reset_mutex); -+ return; -+ } -+ -+ av->isys->in_stop_streaming = true; -+ mutex_unlock(&av->isys->reset_mutex); -+ -+ stream = av->stream; -+ if (!stream) { -+ dev_err(dev, "stop: %s: ip cleard!\n", av->vdev.name); -+ return_buffers(aq, VB2_BUF_STATE_ERROR); -+ mutex_lock(&av->isys->reset_mutex); -+ av->isys->in_stop_streaming = false; -+ mutex_unlock(&av->isys->reset_mutex); -+ return; -+ } -+#endif -+ - mutex_lock(&stream->mutex); - - ipu6_isys_update_stream_watermark(av, false); -@@ -649,6 +879,22 @@ static void stop_streaming(struct vb2_queue *q) - return_buffers(aq, VB2_BUF_STATE_ERROR); - - ipu6_isys_fw_close(av->isys); -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+ -+ av->start_streaming = 0; -+ mutex_lock(&av->isys->reset_mutex); -+ av->isys->in_stop_streaming = false; -+ mutex_unlock(&av->isys->reset_mutex); -+ -+ if (av->isys->need_reset) { -+ if (!stream->nr_streaming) -+ ipu_isys_reset(av, stream); -+ else -+ av->isys->need_reset = 0; -+ } -+ -+ dev_dbg(dev, "stop: %s: exit\n", av->vdev.name); -+#endif - } - - static unsigned int -@@ -729,6 +975,11 @@ static void ipu6_isys_queue_buf_done(struct ipu6_isys_buffer *ib) - * to the userspace when it is de-queued - */ - atomic_set(&ib->str2mmio_flag, 0); -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+ } else if (atomic_read(&ib->skipframe_flag)) { -+ vb2_buffer_done(vb, VB2_BUF_STATE_ERROR); -+ atomic_set(&ib->skipframe_flag, 0); -+#endif - } else { - vb2_buffer_done(vb, VB2_BUF_STATE_DONE); - } -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h -index 844dfda..183bb51 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h -@@ -29,6 +29,9 @@ struct ipu6_isys_queue { - struct ipu6_isys_buffer { - struct list_head head; - atomic_t str2mmio_flag; -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+ atomic_t skipframe_flag; -+#endif - }; - - struct ipu6_isys_video_buffer { -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c -index f3f3bc0..a864614 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c -@@ -17,6 +17,9 @@ - #include - #include - #include -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+#include -+#endif - - #include - #include -@@ -112,6 +115,26 @@ static int video_open(struct file *file) - return v4l2_fh_open(file); - } - -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+static int video_release(struct file *file) -+{ -+ struct ipu6_isys_video *av = video_drvdata(file); -+ -+ dev_dbg(&av->isys->adev->auxdev.dev, -+ "release: %s: enter\n", av->vdev.name); -+ mutex_lock(&av->isys->reset_mutex); -+ while (av->isys->in_reset) { -+ mutex_unlock(&av->isys->reset_mutex); -+ dev_dbg(&av->isys->adev->auxdev.dev, -+ "release: %s: wait for reset\n", av->vdev.name); -+ usleep_range(10000, 11000); -+ mutex_lock(&av->isys->reset_mutex); -+ } -+ mutex_unlock(&av->isys->reset_mutex); -+ return vb2_fop_release(file); -+} -+ -+#endif - const struct ipu6_isys_pixelformat * - ipu6_isys_get_isys_format(u32 pixelformat, u32 type) - { -@@ -595,7 +618,11 @@ static int start_stream_firmware(struct ipu6_isys_video *av, - } - - reinit_completion(&stream->stream_start_completion); -- -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+ send_type = IPU6_FW_ISYS_SEND_TYPE_STREAM_START; -+ ret = ipu6_fw_isys_simple_cmd(av->isys, stream->stream_handle, -+ send_type); -+#else - if (bl) { - send_type = IPU6_FW_ISYS_SEND_TYPE_STREAM_START_AND_CAPTURE; - ipu6_fw_isys_dump_frame_buff_set(dev, buf, -@@ -608,6 +635,7 @@ static int start_stream_firmware(struct ipu6_isys_video *av, - ret = ipu6_fw_isys_simple_cmd(av->isys, stream->stream_handle, - send_type); - } -+#endif - - if (ret < 0) { - dev_err(dev, "can't start streaming (%d)\n", ret); -@@ -626,7 +654,25 @@ static int start_stream_firmware(struct ipu6_isys_video *av, - ret = -EIO; - goto out_stream_close; - } -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+ if (bl) { -+ dev_dbg(dev, "start stream: capture\n"); -+ send_type = IPU6_FW_ISYS_SEND_TYPE_STREAM_CAPTURE; -+ ipu6_fw_isys_dump_frame_buff_set(dev, buf, stream_cfg->nof_output_pins); -+ ret = ipu6_fw_isys_complex_cmd(av->isys, stream->stream_handle, -+ buf, msg->dma_addr, -+ sizeof(*buf), -+ send_type); -+ -+ if (ret < 0) { -+ dev_err(dev, "can't queue buffers (%d)\n", ret); -+ goto out_stream_close; -+ } -+ } -+ -+#else - dev_dbg(dev, "start stream: complete\n"); -+#endif - - return 0; - -@@ -673,7 +719,11 @@ static void stop_streaming_firmware(struct ipu6_isys_video *av) - } - - tout = wait_for_completion_timeout(&stream->stream_stop_completion, -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+ IPU6_LIB_CALL_TIMEOUT_JIFFIES_RESET); -+#else - IPU6_FW_CALL_TIMEOUT_JIFFIES); -+#endif - if (!tout) - dev_warn(dev, "stream stop time out\n"); - else if (stream->error) -@@ -698,7 +748,11 @@ static void close_streaming_firmware(struct ipu6_isys_video *av) - } - - tout = wait_for_completion_timeout(&stream->stream_close_completion, -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+ IPU6_LIB_CALL_TIMEOUT_JIFFIES_RESET); -+#else - IPU6_FW_CALL_TIMEOUT_JIFFIES); -+#endif - if (!tout) - dev_warn(dev, "stream close time out\n"); - else if (stream->error) -@@ -706,6 +760,12 @@ static void close_streaming_firmware(struct ipu6_isys_video *av) - else - dev_dbg(dev, "close stream: complete\n"); - -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+ stream->last_sequence = atomic_read(&stream->sequence); -+ dev_dbg(dev, "IPU_ISYS_RESET: ip->last_sequence = %d\n", -+ stream->last_sequence); -+ -+#endif - put_stream_opened(av); - } - -@@ -720,7 +780,18 @@ int ipu6_isys_video_prepare_stream(struct ipu6_isys_video *av, - return -EINVAL; - - stream->nr_queues = nr_queues; -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+ if (av->isys->in_reset) { -+ atomic_set(&stream->sequence, stream->last_sequence); -+ dev_dbg(&av->isys->adev->auxdev.dev, -+ "atomic_set : stream->last_sequence = %d\n", -+ stream->last_sequence); -+ } else { -+ atomic_set(&stream->sequence, 0); -+ } -+# else - atomic_set(&stream->sequence, 0); -+#endif - - stream->seq_index = 0; - memset(stream->seq, 0, sizeof(stream->seq)); -@@ -1088,7 +1159,11 @@ static const struct v4l2_file_operations isys_fops = { - .unlocked_ioctl = video_ioctl2, - .mmap = vb2_fop_mmap, - .open = video_open, -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+ .release = video_release, -+#else - .release = vb2_fop_release, -+#endif - }; - - int ipu6_isys_fw_open(struct ipu6_isys *isys) -@@ -1305,6 +1380,11 @@ int ipu6_isys_video_init(struct ipu6_isys_video *av) - av->pix_fmt = format.fmt.pix; - __ipu6_isys_vidioc_try_fmt_meta_cap(av, &format_meta); - av->meta_fmt = format_meta.fmt.meta; -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+ av->reset = false; -+ av->skipframe = 0; -+ av->start_streaming = 0; -+#endif - - video_set_drvdata(&av->vdev, av); - -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.h -index 1dd36f2..3d234ae 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.h -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.h -@@ -45,6 +45,9 @@ struct ipu6_isys_stream { - struct mutex mutex; - struct media_entity *source_entity; - atomic_t sequence; -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+ int last_sequence; -+#endif - unsigned int seq_index; - struct sequence_info seq[IPU6_ISYS_MAX_PARALLEL_SOF]; - int stream_source; -@@ -95,6 +98,11 @@ struct ipu6_isys_video { - u32 source_stream; - u8 vc; - u8 dt; -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+ unsigned int reset; -+ unsigned int skipframe; -+ unsigned int start_streaming; -+#endif - }; - - #define ipu6_isys_queue_to_video(__aq) \ -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -index fc0ec0a..3a7ffca 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -@@ -1084,6 +1084,10 @@ static int isys_probe(struct auxiliary_device *auxdev, - - mutex_init(&isys->mutex); - mutex_init(&isys->stream_mutex); -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+ mutex_init(&isys->reset_mutex); -+ isys->in_reset = false; -+#endif - - spin_lock_init(&isys->listlock); - INIT_LIST_HEAD(&isys->framebuflist); -@@ -1125,6 +1129,9 @@ static int isys_probe(struct auxiliary_device *auxdev, - if (ret) - goto free_fw_msg_bufs; - -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+ mutex_destroy(&isys->reset_mutex); -+#endif - ipu6_mmu_hw_cleanup(adev->mmu); - - return 0; -@@ -1179,6 +1186,9 @@ static void isys_remove(struct auxiliary_device *auxdev) - isys_iwake_watermark_cleanup(isys); - mutex_destroy(&isys->stream_mutex); - mutex_destroy(&isys->mutex); -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+ mutex_destroy(&isys->reset_mutex); -+#endif - } - - struct fwmsg { -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h -index 0e2c8b7..d55d804 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h -@@ -163,6 +163,11 @@ struct ipu6_isys { - struct list_head framebuflist_fw; - struct v4l2_async_notifier notifier; - struct isys_iwake_watermark iwake_watermark; -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+ struct mutex reset_mutex; -+ bool in_reset; -+ bool in_stop_streaming; -+#endif - }; - - struct isys_fw_msgs { --- -2.43.0 - diff --git a/patches/0055-media-intel-ipu6-use-vc1-dma-for-MTL-and-ARL.patch b/patches/0055-media-intel-ipu6-use-vc1-dma-for-MTL-and-ARL.patch deleted file mode 100644 index b9b0348..0000000 --- a/patches/0055-media-intel-ipu6-use-vc1-dma-for-MTL-and-ARL.patch +++ /dev/null @@ -1,41 +0,0 @@ -From e3f271c417e5b2dedf209f3f3c4f8e0329afc849 Mon Sep 17 00:00:00 2001 -From: Chen Meng J -Date: Fri, 16 Jan 2026 00:39:01 -0700 -Subject: [PATCH 55/76] media: intel-ipu6: use vc1 dma for MTL and ARL - ---- - 6.18.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h | 4 ++-- - 6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c | 2 +- - 2 files changed, 3 insertions(+), 3 deletions(-) - -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h -index e6ee161..545180c 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h -@@ -54,8 +54,8 @@ struct ipu6_isys; - /* Max number of planes for frame formats supported by the FW */ - #define IPU6_PIN_PLANES_MAX 4 - --#define IPU6_FW_ISYS_SENSOR_TYPE_START 14 --#define IPU6_FW_ISYS_SENSOR_TYPE_END 19 -+#define IPU6_FW_ISYS_SENSOR_TYPE_START 1 -+#define IPU6_FW_ISYS_SENSOR_TYPE_END 10 - #define IPU6SE_FW_ISYS_SENSOR_TYPE_START 6 - #define IPU6SE_FW_ISYS_SENSOR_TYPE_END 11 - /* -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c -index a864614..148c790 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c -@@ -529,7 +529,7 @@ static int ipu6_isys_fw_pin_cfg(struct ipu6_isys_video *av, - output_pin->csi_be_soc_pixel_remapping = - CSI_BE_SOC_PIXEL_REMAPPING_FLAG_NO_REMAPPING; - -- output_pin->snoopable = true; -+ output_pin->snoopable = false; - output_pin->error_handling_enable = false; - output_pin->sensor_type = isys->sensor_type++; - if (isys->sensor_type > isys->pdata->ipdata->sensor_type_end) --- -2.43.0 - diff --git a/patches/0056-media-ipu-Dma-sync-at-buffer_prepare-callback-as-DMA.patch b/patches/0056-media-ipu-Dma-sync-at-buffer_prepare-callback-as-DMA.patch deleted file mode 100644 index 8b66f0e..0000000 --- a/patches/0056-media-ipu-Dma-sync-at-buffer_prepare-callback-as-DMA.patch +++ /dev/null @@ -1,42 +0,0 @@ -From b7fd94a334f08f78630b3a0c2f809cd6eaf028b7 Mon Sep 17 00:00:00 2001 -From: linya14x -Date: Fri, 16 Jan 2026 00:40:06 -0700 -Subject: [PATCH 56/76] media: ipu: Dma sync at buffer_prepare callback as DMA - is non-coherent - -Test Platform: -MTL ARL LT6911UXE - -Signed-off-by: linya14x -Signed-off-by: Bingbu Cao ---- - 6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c | 5 +++++ - 1 file changed, 5 insertions(+) - -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c -index cfeea15..aaad622 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c -@@ -84,7 +84,9 @@ static int ipu6_isys_queue_setup(struct vb2_queue *q, unsigned int *num_buffers, - static int ipu6_isys_buf_prepare(struct vb2_buffer *vb) - { - struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(vb->vb2_queue); -+ struct ipu6_isys *isys = vb2_get_drv_priv(vb->vb2_queue); - struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq); -+ struct sg_table *sg = vb2_dma_sg_plane_desc(vb, 0); - struct device *dev = &av->isys->adev->auxdev.dev; - u32 bytesperline = ipu6_isys_get_bytes_per_line(av); - u32 height = ipu6_isys_get_frame_height(av); -@@ -98,6 +100,9 @@ static int ipu6_isys_buf_prepare(struct vb2_buffer *vb) - - vb2_set_plane_payload(vb, 0, bytesperline * height); - -+ /* assume IPU is not DMA coherent */ -+ ipu6_dma_sync_sgtable(isys->adev, sg); -+ - return 0; - } - --- -2.43.0 - diff --git a/patches/0057-media-intel-ipu6-Support-IPU6-ISYS-FW-trace-dump-for.patch b/patches/0057-media-intel-ipu6-Support-IPU6-ISYS-FW-trace-dump-for.patch deleted file mode 100644 index dc147b1..0000000 --- a/patches/0057-media-intel-ipu6-Support-IPU6-ISYS-FW-trace-dump-for.patch +++ /dev/null @@ -1,1432 +0,0 @@ -From 6f9b0a17e32276defa3e9099f9fee25da6532695 Mon Sep 17 00:00:00 2001 -From: hepengpx -Date: Fri, 16 Jan 2026 00:42:06 -0700 -Subject: [PATCH 57/76] media: intel-ipu6: Support IPU6 ISYS FW trace dump for - upstream driver - -[media]pci]Support IPU6 ISYS FW trace dump for upstream drive -Signed-off-by: hepengpx ---- - 6.18.0/drivers/media/pci/intel/ipu6/Makefile | 3 +- - .../drivers/media/pci/intel/ipu6/ipu6-bus.h | 2 + - .../media/pci/intel/ipu6/ipu6-fw-com.c | 12 +- - .../drivers/media/pci/intel/ipu6/ipu6-isys.c | 58 +- - .../media/pci/intel/ipu6/ipu6-platform-regs.h | 40 + - .../drivers/media/pci/intel/ipu6/ipu6-trace.c | 896 ++++++++++++++++++ - .../drivers/media/pci/intel/ipu6/ipu6-trace.h | 147 +++ - 6.18.0/drivers/media/pci/intel/ipu6/ipu6.c | 48 + - 6.18.0/drivers/media/pci/intel/ipu6/ipu6.h | 5 + - 9 files changed, 1206 insertions(+), 5 deletions(-) - create mode 100644 6.18.0/drivers/media/pci/intel/ipu6/ipu6-trace.c - create mode 100644 6.18.0/drivers/media/pci/intel/ipu6/ipu6-trace.h - -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/Makefile b/6.18.0/drivers/media/pci/intel/ipu6/Makefile -index a821b0a..e1e123b 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/Makefile -+++ b/6.18.0/drivers/media/pci/intel/ipu6/Makefile -@@ -6,7 +6,8 @@ intel-ipu6-y := ipu6.o \ - ipu6-mmu.o \ - ipu6-buttress.o \ - ipu6-cpd.o \ -- ipu6-fw-com.o -+ ipu6-fw-com.o \ -+ ipu6-trace.o - - obj-$(CONFIG_VIDEO_INTEL_IPU6) += intel-ipu6.o - -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-bus.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-bus.h -index a08c546..16be932 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-bus.h -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-bus.h -@@ -16,6 +16,7 @@ struct firmware; - struct pci_dev; - - struct ipu6_buttress_ctrl; -+struct ipu_subsystem_trace_config; - - struct ipu6_bus_device { - struct auxiliary_device auxdev; -@@ -25,6 +26,7 @@ struct ipu6_bus_device { - void *pdata; - struct ipu6_mmu *mmu; - struct ipu6_device *isp; -+ struct ipu_subsystem_trace_config *trace_cfg; - const struct ipu6_buttress_ctrl *ctrl; - const struct firmware *fw; - struct sg_table fw_sgt; -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.c -index 40d8ce1..9f530e0 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.c -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-fw-com.c -@@ -14,6 +14,7 @@ - #include "ipu6-bus.h" - #include "ipu6-dma.h" - #include "ipu6-fw-com.h" -+#include "ipu6-trace.h" - - /* - * FWCOM layer is a shared resource between FW and driver. It consist -@@ -265,8 +266,15 @@ EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_prepare, "INTEL_IPU6"); - - int ipu6_fw_com_open(struct ipu6_fw_com_context *ctx) - { -- /* write magic pattern to disable the tunit trace */ -- writel(TUNIT_MAGIC_PATTERN, ctx->dmem_addr + TUNIT_CFG_DWR_REG * 4); -+ dma_addr_t trace_buff = TUNIT_MAGIC_PATTERN; -+ -+ /* -+ * Write trace buff start addr to tunit cfg reg. -+ * This feature is used to enable tunit trace in secure mode. -+ */ -+ ipu_trace_buffer_dma_handle(&ctx->adev->auxdev.dev, &trace_buff); -+ writel(trace_buff, ctx->dmem_addr + TUNIT_CFG_DWR_REG * 4); -+ - /* Check if SP is in valid state */ - if (!ctx->cell_ready(ctx->adev)) - return -EIO; -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -index 3a7ffca..1ab8dd8 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -@@ -41,6 +41,7 @@ - #include "ipu6-platform-buttress-regs.h" - #include "ipu6-platform-isys-csi2-reg.h" - #include "ipu6-platform-regs.h" -+#include "ipu6-trace.h" - - #define IPU6_BUTTRESS_FABIC_CONTROL 0x68 - #define GDA_ENABLE_IWAKE_INDEX 2 -@@ -348,9 +349,11 @@ irqreturn_t isys_isr(struct ipu6_bus_device *adev) - u32 status_sw, status_csi; - u32 ctrl0_status, ctrl0_clear; - -+ dev_dbg(&adev->auxdev.dev, "%s enter", __func__); - spin_lock(&isys->power_lock); - if (!isys->power) { - spin_unlock(&isys->power_lock); -+ dev_dbg(&adev->auxdev.dev, "%s exit, no power", __func__); - return IRQ_NONE; - } - -@@ -399,6 +402,7 @@ irqreturn_t isys_isr(struct ipu6_bus_device *adev) - - spin_unlock(&isys->power_lock); - -+ dev_dbg(&adev->auxdev.dev, "%s exit", __func__); - return IRQ_HANDLED; - } - -@@ -1040,6 +1044,46 @@ void ipu6_put_fw_msg_buf(struct ipu6_isys *isys, uintptr_t data) - spin_unlock_irqrestore(&isys->listlock, flags); - } - -+struct ipu_trace_block isys_trace_blocks[] = { -+ { -+ .offset = IPU_TRACE_REG_IS_TRACE_UNIT_BASE, -+ .type = IPU_TRACE_BLOCK_TUN, -+ }, -+ { -+ .offset = IPU_TRACE_REG_IS_SP_EVQ_BASE, -+ .type = IPU_TRACE_BLOCK_TM, -+ }, -+ { -+ .offset = IPU_TRACE_REG_IS_SP_GPC_BASE, -+ .type = IPU_TRACE_BLOCK_GPC, -+ }, -+ { -+ .offset = IPU_TRACE_REG_IS_ISL_GPC_BASE, -+ .type = IPU_TRACE_BLOCK_GPC, -+ }, -+ { -+ .offset = IPU_TRACE_REG_IS_MMU_GPC_BASE, -+ .type = IPU_TRACE_BLOCK_GPC, -+ }, -+ { -+ /* Note! this covers all 8 blocks */ -+ .offset = IPU_TRACE_REG_CSI2_TM_BASE(0), -+ .type = IPU_TRACE_CSI2, -+ }, -+ { -+ /* Note! this covers all 11 blocks */ -+ .offset = IPU_TRACE_REG_CSI2_PORT_SIG2SIO_GR_BASE(0), -+ .type = IPU_TRACE_SIG2CIOS, -+ }, -+ { -+ .offset = IPU_TRACE_REG_IS_GPREG_TRACE_TIMER_RST_N, -+ .type = IPU_TRACE_TIMER_RST, -+ }, -+ { -+ .type = IPU_TRACE_BLOCK_END, -+ } -+}; -+ - static int isys_probe(struct auxiliary_device *auxdev, - const struct auxiliary_device_id *auxdev_id) - { -@@ -1110,6 +1154,8 @@ static int isys_probe(struct auxiliary_device *auxdev, - goto remove_shared_buffer; - } - -+ ipu_trace_init(adev->isp, isys->pdata->base, &auxdev->dev, -+ isys_trace_blocks); - cpu_latency_qos_add_request(&isys->pm_qos, PM_QOS_DEFAULT_VALUE); - - ret = alloc_fw_msg_bufs(isys, 20); -@@ -1139,6 +1185,7 @@ static int isys_probe(struct auxiliary_device *auxdev, - free_fw_msg_bufs: - free_fw_msg_bufs(isys); - out_remove_pkg_dir_shared_buffer: -+ ipu_trace_uninit(&auxdev->dev); - cpu_latency_qos_remove_request(&isys->pm_qos); - if (!isp->secure_mode) - ipu6_cpd_free_pkg_dir(adev); -@@ -1184,6 +1231,7 @@ static void isys_remove(struct auxiliary_device *auxdev) - mutex_destroy(&isys->streams[i].mutex); - - isys_iwake_watermark_cleanup(isys); -+ ipu_trace_uninit(&auxdev->dev); - mutex_destroy(&isys->stream_mutex); - mutex_destroy(&isys->mutex); - #ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -@@ -1236,12 +1284,17 @@ static int isys_isr_one(struct ipu6_bus_device *adev) - u32 index; - u64 ts; - -- if (!isys->fwcom) -+ dev_dbg(&adev->auxdev.dev, "%s enter", __func__); -+ if (!isys->fwcom) { -+ dev_dbg(&adev->auxdev.dev, "%s exit, fwcom is null", __func__); - return 1; -+ } - - resp = ipu6_fw_isys_get_resp(isys->fwcom, IPU6_BASE_MSG_RECV_QUEUES); -- if (!resp) -+ if (!resp) { -+ dev_dbg(&adev->auxdev.dev, "%s exit, resp is null", __func__); - return 1; -+ } - - ts = (u64)resp->timestamp[1] << 32 | resp->timestamp[0]; - -@@ -1349,6 +1402,7 @@ static int isys_isr_one(struct ipu6_bus_device *adev) - ipu6_isys_put_stream(stream); - leave: - ipu6_fw_isys_put_resp(isys->fwcom, IPU6_BASE_MSG_RECV_QUEUES); -+ dev_dbg(&adev->auxdev.dev, "%s exit", __func__); - return 0; - } - -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-platform-regs.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-platform-regs.h -index b883385..a1744d2 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-platform-regs.h -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-platform-regs.h -@@ -176,4 +176,44 @@ enum nci_ab_access_mode { - #define IPU6_PSYS_GPDEV_IRQ_FWIRQ(n) BIT(n) - #define IPU6_REG_PSYS_GPDEV_FWIRQ(n) (4 * (n) + 0x1aa100) - -+/* Trace unit related register definitions */ -+#define TRACE_REG_MAX_ISYS_OFFSET 0xfffff -+#define TRACE_REG_MAX_PSYS_OFFSET 0xfffff -+#define IPU_ISYS_OFFSET IPU6_ISYS_DMEM_OFFSET -+#define IPU_PSYS_OFFSET IPU6_PSYS_DMEM_OFFSET -+/* ISYS trace unit registers */ -+/* Trace unit base offset */ -+#define IPU_TRACE_REG_IS_TRACE_UNIT_BASE 0x27d000 -+/* Trace monitors */ -+#define IPU_TRACE_REG_IS_SP_EVQ_BASE 0x211000 -+/* GPC blocks */ -+#define IPU_TRACE_REG_IS_SP_GPC_BASE 0x210800 -+#define IPU_TRACE_REG_IS_ISL_GPC_BASE 0x2b0a00 -+#define IPU_TRACE_REG_IS_MMU_GPC_BASE 0x2e0f00 -+/* each CSI2 port has a dedicated trace monitor, index 0..7 */ -+#define IPU_TRACE_REG_CSI2_TM_BASE(port) (0x220400 + 0x1000 * (port)) -+ -+/* Trace timers */ -+#define IPU_TRACE_REG_IS_GPREG_TRACE_TIMER_RST_N 0x27c410 -+#define TRACE_REG_GPREG_TRACE_TIMER_RST_OFF BIT(0) -+ -+/* SIG2CIO */ -+#define IPU_TRACE_REG_CSI2_PORT_SIG2SIO_GR_BASE(port) \ -+ (0x220e00 + (port) * 0x1000) -+ -+/* PSYS trace unit registers */ -+/* Trace unit base offset */ -+#define IPU_TRACE_REG_PS_TRACE_UNIT_BASE 0x1b4000 -+/* Trace monitors */ -+#define IPU_TRACE_REG_PS_SPC_EVQ_BASE 0x119000 -+#define IPU_TRACE_REG_PS_SPP0_EVQ_BASE 0x139000 -+ -+/* GPC blocks */ -+#define IPU_TRACE_REG_PS_SPC_GPC_BASE 0x118800 -+#define IPU_TRACE_REG_PS_SPP0_GPC_BASE 0x138800 -+#define IPU_TRACE_REG_PS_MMU_GPC_BASE 0x1b1b00 -+ -+/* Trace timers */ -+#define IPU_TRACE_REG_PS_GPREG_TRACE_TIMER_RST_N 0x1aa714 -+ - #endif /* IPU6_PLATFORM_REGS_H */ -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-trace.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-trace.c -new file mode 100644 -index 0000000..adcee20 ---- /dev/null -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-trace.c -@@ -0,0 +1,896 @@ -+// SPDX-License-Identifier: GPL-2.0 -+// Copyright (C) 2014 - 2025 Intel Corporation -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "ipu6.h" -+#include "ipu6-bus.h" -+#include "ipu6-platform-regs.h" -+#include "ipu6-dma.h" -+#include "ipu6-trace.h" -+ -+/* -+ * enabling ipu trace need a 96 MB buffer. -+ */ -+static bool ipu_trace_enable; -+module_param(ipu_trace_enable, bool, 0660); -+MODULE_PARM_DESC(ipu_trace_enable, "IPU trace enable"); -+ -+struct trace_register_range { -+ u32 start; -+ u32 end; -+}; -+ -+#define MEMORY_RING_BUFFER_SIZE (SZ_1M * 96) -+#define TRACE_MESSAGE_SIZE 16 -+/* -+ * It looks that the trace unit sometimes writes outside the given buffer. -+ * To avoid memory corruption one extra page is reserved at the end -+ * of the buffer. Read also the extra area since it may contain valid data. -+ */ -+#define MEMORY_RING_BUFFER_GUARD PAGE_SIZE -+#define MEMORY_RING_BUFFER_OVERREAD MEMORY_RING_BUFFER_GUARD -+#define MAX_TRACE_REGISTERS 200 -+#define TRACE_CONF_DUMP_BUFFER_SIZE (MAX_TRACE_REGISTERS * 2 * 32) -+#define TRACE_CONF_DATA_MAX_LEN (1024 * 4) -+#define WPT_TRACE_CONF_DATA_MAX_LEN (1024 * 64) -+ -+struct config_value { -+ u32 reg; -+ u32 value; -+}; -+ -+struct ipu_trace_buffer { -+ dma_addr_t dma_handle; -+ void *memory_buffer; -+}; -+ -+struct ipu_subsystem_wptrace_config { -+ bool open; -+ char *conf_dump_buffer; -+ int size_conf_dump; -+ unsigned int fill_level; -+ struct config_value config[MAX_TRACE_REGISTERS]; -+}; -+ -+struct ipu_subsystem_trace_config { -+ u32 offset; -+ void __iomem *base; -+ struct ipu_trace_buffer memory; /* ring buffer */ -+ struct device *dev; -+ struct ipu_trace_block *blocks; -+ unsigned int fill_level; /* Nbr of regs in config table below */ -+ bool running; -+ /* Cached register values */ -+ struct config_value config[MAX_TRACE_REGISTERS]; -+ /* watchpoint trace info */ -+ struct ipu_subsystem_wptrace_config wpt; -+}; -+ -+struct ipu_trace { -+ struct mutex lock; /* Protect ipu trace operations */ -+ bool open; -+ char *conf_dump_buffer; -+ int size_conf_dump; -+ -+ struct ipu_subsystem_trace_config isys; -+ struct ipu_subsystem_trace_config psys; -+}; -+ -+static void __ipu_trace_restore(struct device *dev) -+{ -+ struct ipu6_bus_device *adev = to_ipu6_bus_device(dev); -+ struct ipu6_device *isp = adev->isp; -+ struct ipu_trace *trace = isp->trace; -+ struct config_value *config; -+ struct ipu_subsystem_trace_config *sys = adev->trace_cfg; -+ struct ipu_trace_block *blocks; -+ u32 mapped_trace_buffer; -+ void __iomem *addr = NULL; -+ int i; -+ -+ if (trace->open) { -+ dev_info(dev, "Trace control file open. Skipping update\n"); -+ return; -+ } -+ -+ if (!sys) -+ return; -+ -+ /* leave if no trace configuration for this subsystem */ -+ if (sys->fill_level == 0) -+ return; -+ -+ /* Find trace unit base address */ -+ blocks = sys->blocks; -+ while (blocks->type != IPU_TRACE_BLOCK_END) { -+ if (blocks->type == IPU_TRACE_BLOCK_TUN) { -+ addr = sys->base + blocks->offset; -+ break; -+ } -+ blocks++; -+ } -+ if (!addr) -+ return; -+ -+ if (!sys->memory.memory_buffer) { -+ sys->memory.memory_buffer = -+ ipu6_dma_alloc(adev, MEMORY_RING_BUFFER_SIZE + -+ MEMORY_RING_BUFFER_GUARD, -+ &sys->memory.dma_handle, -+ GFP_KERNEL, 0); -+ } -+ -+ if (!sys->memory.memory_buffer) { -+ dev_err(dev, "No memory for tracing. Trace unit disabled\n"); -+ return; -+ } -+ -+ config = sys->config; -+ mapped_trace_buffer = sys->memory.dma_handle; -+ -+ /* ring buffer base */ -+ writel(mapped_trace_buffer, addr + TRACE_REG_TUN_DRAM_BASE_ADDR); -+ -+ /* ring buffer end */ -+ writel(mapped_trace_buffer + MEMORY_RING_BUFFER_SIZE - -+ TRACE_MESSAGE_SIZE, addr + TRACE_REG_TUN_DRAM_END_ADDR); -+ -+ /* Infobits for ddr trace */ -+ writel(IPU6_INFO_REQUEST_DESTINATION_PRIMARY, -+ addr + TRACE_REG_TUN_DDR_INFO_VAL); -+ -+ /* Find trace timer reset address */ -+ addr = NULL; -+ blocks = sys->blocks; -+ while (blocks->type != IPU_TRACE_BLOCK_END) { -+ if (blocks->type == IPU_TRACE_TIMER_RST) { -+ addr = sys->base + blocks->offset; -+ break; -+ } -+ blocks++; -+ } -+ if (!addr) { -+ dev_err(dev, "No trace reset addr\n"); -+ return; -+ } -+ -+ /* Remove reset from trace timers */ -+ writel(TRACE_REG_GPREG_TRACE_TIMER_RST_OFF, addr); -+ -+ /* Register config received from userspace */ -+ for (i = 0; i < sys->fill_level; i++) { -+ dev_dbg(dev, -+ "Trace restore: reg 0x%08x, value 0x%08x\n", -+ config[i].reg, config[i].value); -+ writel(config[i].value, isp->base + config[i].reg); -+ } -+ -+ /* Register wpt config received from userspace, and only psys has wpt */ -+ config = sys->wpt.config; -+ for (i = 0; i < sys->wpt.fill_level; i++) { -+ dev_dbg(dev, "Trace restore: reg 0x%08x, value 0x%08x\n", -+ config[i].reg, config[i].value); -+ writel(config[i].value, isp->base + config[i].reg); -+ } -+ sys->running = true; -+} -+ -+void ipu_trace_restore(struct device *dev) -+{ -+ struct ipu_trace *trace = to_ipu6_bus_device(dev)->isp->trace; -+ -+ if (!trace) -+ return; -+ -+ mutex_lock(&trace->lock); -+ __ipu_trace_restore(dev); -+ mutex_unlock(&trace->lock); -+} -+EXPORT_SYMBOL_GPL(ipu_trace_restore); -+ -+static void __ipu_trace_stop(struct device *dev) -+{ -+ struct ipu_subsystem_trace_config *sys = -+ to_ipu6_bus_device(dev)->trace_cfg; -+ struct ipu_trace_block *blocks; -+ -+ if (!sys) -+ return; -+ -+ if (!sys->running) -+ return; -+ sys->running = false; -+ -+ /* Turn off all the gpc blocks */ -+ blocks = sys->blocks; -+ while (blocks->type != IPU_TRACE_BLOCK_END) { -+ if (blocks->type == IPU_TRACE_BLOCK_GPC) { -+ writel(0, sys->base + blocks->offset + -+ TRACE_REG_GPC_OVERALL_ENABLE); -+ } -+ blocks++; -+ } -+ -+ /* Turn off all the trace monitors */ -+ blocks = sys->blocks; -+ while (blocks->type != IPU_TRACE_BLOCK_END) { -+ if (blocks->type == IPU_TRACE_BLOCK_TM) { -+ writel(0, sys->base + blocks->offset + -+ TRACE_REG_TM_TRACE_ENABLE_NPK); -+ -+ writel(0, sys->base + blocks->offset + -+ TRACE_REG_TM_TRACE_ENABLE_DDR); -+ } -+ blocks++; -+ } -+ -+ /* Turn off trace units */ -+ blocks = sys->blocks; -+ while (blocks->type != IPU_TRACE_BLOCK_END) { -+ if (blocks->type == IPU_TRACE_BLOCK_TUN) { -+ writel(0, sys->base + blocks->offset + -+ TRACE_REG_TUN_DDR_ENABLE); -+ writel(0, sys->base + blocks->offset + -+ TRACE_REG_TUN_NPK_ENABLE); -+ } -+ blocks++; -+ } -+} -+ -+void ipu_trace_stop(struct device *dev) -+{ -+ struct ipu_trace *trace = to_ipu6_bus_device(dev)->isp->trace; -+ -+ if (!trace) -+ return; -+ -+ mutex_lock(&trace->lock); -+ __ipu_trace_stop(dev); -+ mutex_unlock(&trace->lock); -+} -+EXPORT_SYMBOL_GPL(ipu_trace_stop); -+ -+static int update_register_cache(struct ipu6_device *isp, u32 reg, u32 value) -+{ -+ struct ipu_trace *dctrl = isp->trace; -+ struct ipu_subsystem_trace_config *sys; -+ int rval = -EINVAL; -+ -+ if (dctrl->isys.offset == dctrl->psys.offset) { -+ /* For the IPU with uniform address space */ -+ if (reg >= IPU_ISYS_OFFSET && -+ reg < IPU_ISYS_OFFSET + TRACE_REG_MAX_ISYS_OFFSET) -+ sys = &dctrl->isys; -+ else if (reg >= IPU_PSYS_OFFSET && -+ reg < IPU_PSYS_OFFSET + TRACE_REG_MAX_PSYS_OFFSET) -+ sys = &dctrl->psys; -+ else -+ goto error; -+ } else { -+ if (dctrl->isys.offset && -+ reg >= dctrl->isys.offset && -+ reg < dctrl->isys.offset + TRACE_REG_MAX_ISYS_OFFSET) -+ sys = &dctrl->isys; -+ else if (dctrl->psys.offset && -+ reg >= dctrl->psys.offset && -+ reg < dctrl->psys.offset + TRACE_REG_MAX_PSYS_OFFSET) -+ sys = &dctrl->psys; -+ else -+ goto error; -+ } -+ -+ if (sys->fill_level < MAX_TRACE_REGISTERS) { -+ dev_dbg(sys->dev, -+ "Trace reg addr 0x%08x value 0x%08x\n", reg, value); -+ sys->config[sys->fill_level].reg = reg; -+ sys->config[sys->fill_level].value = value; -+ sys->fill_level++; -+ } else { -+ rval = -ENOMEM; -+ goto error; -+ } -+ return 0; -+error: -+ dev_info(&isp->pdev->dev, -+ "Trace register address 0x%08x ignored as invalid register\n", -+ reg); -+ return rval; -+} -+ -+static void traceconf_dump(struct ipu6_device *isp) -+{ -+ struct ipu_subsystem_trace_config *sys[2] = { -+ &isp->trace->isys, -+ &isp->trace->psys -+ }; -+ int i, j, rem_size; -+ char *out; -+ -+ isp->trace->size_conf_dump = 0; -+ out = isp->trace->conf_dump_buffer; -+ rem_size = TRACE_CONF_DUMP_BUFFER_SIZE; -+ -+ for (j = 0; j < ARRAY_SIZE(sys); j++) { -+ for (i = 0; i < sys[j]->fill_level && rem_size > 0; i++) { -+ int bytes_print; -+ int n = snprintf(out, rem_size, "0x%08x = 0x%08x\n", -+ sys[j]->config[i].reg, -+ sys[j]->config[i].value); -+ -+ bytes_print = min(n, rem_size - 1); -+ rem_size -= bytes_print; -+ out += bytes_print; -+ } -+ } -+ isp->trace->size_conf_dump = out - isp->trace->conf_dump_buffer; -+} -+ -+static void clear_trace_buffer(struct ipu_subsystem_trace_config *sys) -+{ -+ if (!sys || !sys->memory.memory_buffer) -+ return; -+ -+ memset(sys->memory.memory_buffer, 0, MEMORY_RING_BUFFER_SIZE + -+ MEMORY_RING_BUFFER_OVERREAD); -+ -+ dma_sync_single_for_device(sys->dev, -+ sys->memory.dma_handle, -+ MEMORY_RING_BUFFER_SIZE + -+ MEMORY_RING_BUFFER_GUARD, DMA_FROM_DEVICE); -+} -+ -+static int traceconf_open(struct inode *inode, struct file *file) -+{ -+ int ret; -+ struct ipu6_device *isp; -+ -+ if (!inode->i_private) -+ return -EACCES; -+ -+ isp = inode->i_private; -+ -+ ret = mutex_trylock(&isp->trace->lock); -+ if (!ret) -+ return -EBUSY; -+ -+ if (isp->trace->open) { -+ mutex_unlock(&isp->trace->lock); -+ return -EBUSY; -+ } -+ -+ file->private_data = isp; -+ isp->trace->open = 1; -+ if (file->f_mode & FMODE_WRITE) { -+ /* TBD: Allocate temp buffer for processing. -+ * Push validated buffer to active config -+ */ -+ -+ /* Forget old config if opened for write */ -+ isp->trace->isys.fill_level = 0; -+ isp->trace->psys.fill_level = 0; -+ isp->trace->psys.wpt.fill_level = 0; -+ } -+ -+ if (file->f_mode & FMODE_READ) { -+ isp->trace->conf_dump_buffer = -+ vzalloc(TRACE_CONF_DUMP_BUFFER_SIZE); -+ if (!isp->trace->conf_dump_buffer) { -+ isp->trace->open = 0; -+ mutex_unlock(&isp->trace->lock); -+ return -ENOMEM; -+ } -+ traceconf_dump(isp); -+ } -+ mutex_unlock(&isp->trace->lock); -+ return 0; -+} -+ -+static ssize_t traceconf_read(struct file *file, char __user *buf, -+ size_t len, loff_t *ppos) -+{ -+ struct ipu6_device *isp = file->private_data; -+ -+ return simple_read_from_buffer(buf, len, ppos, -+ isp->trace->conf_dump_buffer, -+ isp->trace->size_conf_dump); -+} -+ -+static ssize_t traceconf_write(struct file *file, const char __user *buf, -+ size_t len, loff_t *ppos) -+{ -+ int i; -+ struct ipu6_device *isp = file->private_data; -+ ssize_t bytes = 0; -+ char *ipu_trace_buffer = NULL; -+ size_t buffer_size = 0; -+ u32 ipu_trace_number = 0; -+ struct config_value *cfg_buffer = NULL; -+ -+ if ((*ppos < 0) || (len > TRACE_CONF_DATA_MAX_LEN) || -+ (len < sizeof(ipu_trace_number))) { -+ dev_info(&isp->pdev->dev, -+ "length is error, len:%ld, loff:%lld\n", -+ len, *ppos); -+ return -EINVAL; -+ } -+ -+ ipu_trace_buffer = vzalloc(len); -+ if (!ipu_trace_buffer) -+ return -ENOMEM; -+ -+ bytes = copy_from_user(ipu_trace_buffer, buf, len); -+ if (bytes != 0) { -+ vfree(ipu_trace_buffer); -+ return -EFAULT; -+ } -+ -+ memcpy(&ipu_trace_number, ipu_trace_buffer, sizeof(u32)); -+ buffer_size = ipu_trace_number * sizeof(struct config_value); -+ if ((buffer_size + sizeof(ipu_trace_number)) != len) { -+ dev_info(&isp->pdev->dev, -+ "File size is not right, len:%ld, buffer_size:%zu\n", -+ len, buffer_size); -+ vfree(ipu_trace_buffer); -+ return -EFAULT; -+ } -+ -+ mutex_lock(&isp->trace->lock); -+ cfg_buffer = (struct config_value *)(ipu_trace_buffer + sizeof(u32)); -+ for (i = 0; i < ipu_trace_number; i++) { -+ update_register_cache(isp, cfg_buffer[i].reg, -+ cfg_buffer[i].value); -+ } -+ mutex_unlock(&isp->trace->lock); -+ vfree(ipu_trace_buffer); -+ -+ return len; -+} -+ -+static int traceconf_release(struct inode *inode, struct file *file) -+{ -+ struct ipu6_device *isp = file->private_data; -+ struct device *psys_dev = isp->psys ? &isp->psys->auxdev.dev : NULL; -+ struct device *isys_dev = isp->isys ? &isp->isys->auxdev.dev : NULL; -+ int isys_pm_rval = -EINVAL; -+ int psys_pm_rval = -EINVAL; -+ -+ /* -+ * Turn devices on outside trace->lock mutex. PM transition may -+ * cause call to function which tries to take the same lock. -+ * Also do this before trace->open is set back to 0 to avoid -+ * double restore (one here and one in pm transition). We can't -+ * rely purely on the restore done by pm call backs since trace -+ * configuration can occur in any phase compared to other activity. -+ */ -+ -+ if (file->f_mode & FMODE_WRITE) { -+ if (isys_dev) -+ isys_pm_rval = pm_runtime_resume_and_get(isys_dev); -+ if (isys_pm_rval >= 0 && psys_dev) -+ psys_pm_rval = pm_runtime_resume_and_get(psys_dev); -+ } -+ -+ mutex_lock(&isp->trace->lock); -+ isp->trace->open = 0; -+ vfree(isp->trace->conf_dump_buffer); -+ isp->trace->conf_dump_buffer = NULL; -+ -+ /* Update new cfg to HW */ -+ if (isys_pm_rval >= 0) { -+ __ipu_trace_stop(isys_dev); -+ clear_trace_buffer(isp->isys->trace_cfg); -+ __ipu_trace_restore(isys_dev); -+ } -+ -+ if (psys_pm_rval >= 0) { -+ __ipu_trace_stop(psys_dev); -+ clear_trace_buffer(isp->psys->trace_cfg); -+ __ipu_trace_restore(psys_dev); -+ } -+ mutex_unlock(&isp->trace->lock); -+ -+ if (psys_pm_rval >= 0) -+ pm_runtime_put(psys_dev); -+ if (isys_pm_rval >= 0) -+ pm_runtime_put(isys_dev); -+ -+ return 0; -+} -+ -+static const struct file_operations ipu_traceconf_fops = { -+ .owner = THIS_MODULE, -+ .open = traceconf_open, -+ .release = traceconf_release, -+ .read = traceconf_read, -+ .write = traceconf_write, -+ .llseek = noop_llseek, -+}; -+ -+static void wptraceconf_dump(struct ipu6_device *isp) -+{ -+ struct ipu_subsystem_wptrace_config *sys = &isp->trace->psys.wpt; -+ int i, rem_size; -+ char *out; -+ -+ sys->size_conf_dump = 0; -+ out = sys->conf_dump_buffer; -+ rem_size = TRACE_CONF_DUMP_BUFFER_SIZE; -+ -+ for (i = 0; i < sys->fill_level && rem_size > 0; i++) { -+ int bytes_print; -+ int n = snprintf(out, rem_size, "0x%08x = 0x%08x\n", -+ sys->config[i].reg, -+ sys->config[i].value); -+ -+ bytes_print = min(n, rem_size - 1); -+ rem_size -= bytes_print; -+ out += bytes_print; -+ } -+ sys->size_conf_dump = out - sys->conf_dump_buffer; -+} -+ -+static int wptraceconf_open(struct inode *inode, struct file *file) -+{ -+ int ret; -+ struct ipu6_device *isp; -+ -+ if (!inode->i_private) -+ return -EACCES; -+ -+ isp = inode->i_private; -+ ret = mutex_trylock(&isp->trace->lock); -+ if (!ret) -+ return -EBUSY; -+ -+ if (isp->trace->psys.wpt.open) { -+ mutex_unlock(&isp->trace->lock); -+ return -EBUSY; -+ } -+ -+ file->private_data = isp; -+ if (file->f_mode & FMODE_WRITE) { -+ /* TBD: Allocate temp buffer for processing. -+ * Push validated buffer to active config -+ */ -+ /* Forget old config if opened for write */ -+ isp->trace->psys.wpt.fill_level = 0; -+ } -+ -+ if (file->f_mode & FMODE_READ) { -+ isp->trace->psys.wpt.conf_dump_buffer = -+ vzalloc(TRACE_CONF_DUMP_BUFFER_SIZE); -+ if (!isp->trace->psys.wpt.conf_dump_buffer) { -+ mutex_unlock(&isp->trace->lock); -+ return -ENOMEM; -+ } -+ wptraceconf_dump(isp); -+ } -+ mutex_unlock(&isp->trace->lock); -+ return 0; -+} -+ -+static ssize_t wptraceconf_read(struct file *file, char __user *buf, -+ size_t len, loff_t *ppos) -+{ -+ struct ipu6_device *isp = file->private_data; -+ -+ return simple_read_from_buffer(buf, len, ppos, -+ isp->trace->psys.wpt.conf_dump_buffer, -+ isp->trace->psys.wpt.size_conf_dump); -+} -+ -+static ssize_t wptraceconf_write(struct file *file, const char __user *buf, -+ size_t len, loff_t *ppos) -+{ -+ int i; -+ struct ipu6_device *isp = file->private_data; -+ ssize_t bytes = 0; -+ char *wpt_info_buffer = NULL; -+ size_t buffer_size = 0; -+ u32 wp_node_number = 0; -+ struct config_value *wpt_buffer = NULL; -+ struct ipu_subsystem_wptrace_config *wpt = &isp->trace->psys.wpt; -+ -+ if ((*ppos < 0) || (len > WPT_TRACE_CONF_DATA_MAX_LEN) || -+ (len < sizeof(wp_node_number))) { -+ dev_info(&isp->pdev->dev, -+ "length is error, len:%ld, loff:%lld\n", -+ len, *ppos); -+ return -EINVAL; -+ } -+ -+ wpt_info_buffer = vzalloc(len); -+ if (!wpt_info_buffer) -+ return -ENOMEM; -+ -+ bytes = copy_from_user(wpt_info_buffer, buf, len); -+ if (bytes != 0) { -+ vfree(wpt_info_buffer); -+ return -EFAULT; -+ } -+ -+ memcpy(&wp_node_number, wpt_info_buffer, sizeof(u32)); -+ buffer_size = wp_node_number * sizeof(struct config_value); -+ if ((buffer_size + sizeof(wp_node_number)) != len) { -+ dev_info(&isp->pdev->dev, -+ "File size is not right, len:%ld, buffer_size:%zu\n", -+ len, buffer_size); -+ vfree(wpt_info_buffer); -+ return -EFAULT; -+ } -+ -+ mutex_lock(&isp->trace->lock); -+ wpt_buffer = (struct config_value *)(wpt_info_buffer + sizeof(u32)); -+ for (i = 0; i < wp_node_number; i++) { -+ if (wpt->fill_level < MAX_TRACE_REGISTERS) { -+ wpt->config[wpt->fill_level].reg = wpt_buffer[i].reg; -+ wpt->config[wpt->fill_level].value = -+ wpt_buffer[i].value; -+ wpt->fill_level++; -+ } else { -+ dev_info(&isp->pdev->dev, -+ "Address 0x%08x ignored as invalid register\n", -+ wpt_buffer[i].reg); -+ break; -+ } -+ } -+ mutex_unlock(&isp->trace->lock); -+ vfree(wpt_info_buffer); -+ -+ return len; -+} -+ -+static int wptraceconf_release(struct inode *inode, struct file *file) -+{ -+ struct ipu6_device *isp = file->private_data; -+ -+ mutex_lock(&isp->trace->lock); -+ isp->trace->open = 0; -+ vfree(isp->trace->psys.wpt.conf_dump_buffer); -+ isp->trace->psys.wpt.conf_dump_buffer = NULL; -+ mutex_unlock(&isp->trace->lock); -+ -+ return 0; -+} -+ -+static const struct file_operations ipu_wptraceconf_fops = { -+ .owner = THIS_MODULE, -+ .open = wptraceconf_open, -+ .release = wptraceconf_release, -+ .read = wptraceconf_read, -+ .write = wptraceconf_write, -+ .llseek = noop_llseek, -+}; -+ -+static int gettrace_open(struct inode *inode, struct file *file) -+{ -+ struct ipu_subsystem_trace_config *sys = inode->i_private; -+ -+ if (!sys) -+ return -EACCES; -+ -+ if (!sys->memory.memory_buffer) -+ return -EACCES; -+ -+ dma_sync_single_for_cpu(sys->dev, -+ sys->memory.dma_handle, -+ MEMORY_RING_BUFFER_SIZE + -+ MEMORY_RING_BUFFER_GUARD, DMA_FROM_DEVICE); -+ -+ file->private_data = sys; -+ return 0; -+}; -+ -+static ssize_t gettrace_read(struct file *file, char __user *buf, -+ size_t len, loff_t *ppos) -+{ -+ struct ipu_subsystem_trace_config *sys = file->private_data; -+ -+ return simple_read_from_buffer(buf, len, ppos, -+ sys->memory.memory_buffer, -+ MEMORY_RING_BUFFER_SIZE + -+ MEMORY_RING_BUFFER_OVERREAD); -+} -+ -+static ssize_t gettrace_write(struct file *file, const char __user *buf, -+ size_t len, loff_t *ppos) -+{ -+ struct ipu_subsystem_trace_config *sys = file->private_data; -+ static const char str[] = "clear"; -+ char buffer[sizeof(str)] = { 0 }; -+ ssize_t ret; -+ -+ ret = simple_write_to_buffer(buffer, sizeof(buffer), ppos, buf, len); -+ if (ret < 0) -+ return ret; -+ -+ if (ret < sizeof(str) - 1) -+ return -EINVAL; -+ -+ if (!strncmp(str, buffer, sizeof(str) - 1)) { -+ clear_trace_buffer(sys); -+ return len; -+ } -+ -+ return -EINVAL; -+} -+ -+static int gettrace_release(struct inode *inode, struct file *file) -+{ -+ return 0; -+} -+ -+static const struct file_operations ipu_gettrace_fops = { -+ .owner = THIS_MODULE, -+ .open = gettrace_open, -+ .release = gettrace_release, -+ .read = gettrace_read, -+ .write = gettrace_write, -+ .llseek = noop_llseek, -+}; -+ -+int ipu_trace_init(struct ipu6_device *isp, void __iomem *base, -+ struct device *dev, struct ipu_trace_block *blocks) -+{ -+ struct ipu6_bus_device *adev = to_ipu6_bus_device(dev); -+ struct ipu_trace *trace = isp->trace; -+ struct ipu_subsystem_trace_config *sys; -+ int ret = 0; -+ -+ if (!isp->trace) -+ return 0; -+ -+ mutex_lock(&isp->trace->lock); -+ -+ if (dev == &isp->isys->auxdev.dev) { -+ sys = &trace->isys; -+ } else if (dev == &isp->psys->auxdev.dev) { -+ sys = &trace->psys; -+ } else { -+ ret = -EINVAL; -+ goto leave; -+ } -+ -+ adev->trace_cfg = sys; -+ sys->dev = dev; -+ sys->offset = base - isp->base; /* sub system offset */ -+ sys->base = base; -+ sys->blocks = blocks; -+ -+ sys->memory.memory_buffer = -+ ipu6_dma_alloc(adev, MEMORY_RING_BUFFER_SIZE + -+ MEMORY_RING_BUFFER_GUARD, -+ &sys->memory.dma_handle, -+ GFP_KERNEL, 0); -+ -+ if (!sys->memory.memory_buffer) -+ dev_err(dev, "failed alloc memory for tracing.\n"); -+ -+leave: -+ mutex_unlock(&isp->trace->lock); -+ -+ return ret; -+} -+EXPORT_SYMBOL_GPL(ipu_trace_init); -+ -+void ipu_trace_uninit(struct device *dev) -+{ -+ struct ipu6_bus_device *adev = to_ipu6_bus_device(dev); -+ struct ipu6_device *isp = adev->isp; -+ struct ipu_trace *trace = isp->trace; -+ struct ipu_subsystem_trace_config *sys = adev->trace_cfg; -+ -+ if (!trace || !sys) -+ return; -+ -+ mutex_lock(&trace->lock); -+ -+ if (sys->memory.memory_buffer) -+ ipu6_dma_free( -+ adev, -+ MEMORY_RING_BUFFER_SIZE + MEMORY_RING_BUFFER_GUARD, -+ sys->memory.memory_buffer, sys->memory.dma_handle, 0); -+ -+ sys->dev = NULL; -+ sys->memory.memory_buffer = NULL; -+ -+ mutex_unlock(&trace->lock); -+} -+EXPORT_SYMBOL_GPL(ipu_trace_uninit); -+ -+int ipu_trace_debugfs_add(struct ipu6_device *isp, struct dentry *dir) -+{ -+ struct dentry *files[4]; -+ int i = 0; -+ -+ if (!ipu_trace_enable) -+ return 0; -+ -+ files[i] = debugfs_create_file("traceconf", 0644, -+ dir, isp, &ipu_traceconf_fops); -+ if (!files[i]) -+ return -ENOMEM; -+ i++; -+ -+ files[i] = debugfs_create_file("wptraceconf", 0644, -+ dir, isp, &ipu_wptraceconf_fops); -+ if (!files[i]) -+ goto error; -+ i++; -+ -+ files[i] = debugfs_create_file("getisystrace", 0444, -+ dir, -+ &isp->trace->isys, &ipu_gettrace_fops); -+ -+ if (!files[i]) -+ goto error; -+ i++; -+ -+ files[i] = debugfs_create_file("getpsystrace", 0444, -+ dir, -+ &isp->trace->psys, &ipu_gettrace_fops); -+ if (!files[i]) -+ goto error; -+ -+ return 0; -+ -+error: -+ for (; i > 0; i--) -+ debugfs_remove(files[i - 1]); -+ return -ENOMEM; -+} -+ -+int ipu_trace_add(struct ipu6_device *isp) -+{ -+ if (!ipu_trace_enable) -+ return 0; -+ -+ isp->trace = devm_kzalloc(&isp->pdev->dev, -+ sizeof(struct ipu_trace), GFP_KERNEL); -+ if (!isp->trace) -+ return -ENOMEM; -+ -+ mutex_init(&isp->trace->lock); -+ -+ dev_dbg(&isp->pdev->dev, "ipu trace enabled!"); -+ -+ return 0; -+} -+ -+void ipu_trace_release(struct ipu6_device *isp) -+{ -+ if (!isp->trace) -+ return; -+ mutex_destroy(&isp->trace->lock); -+} -+ -+int ipu_trace_buffer_dma_handle(struct device *dev, dma_addr_t *dma_handle) -+{ -+ struct ipu6_bus_device *adev = to_ipu6_bus_device(dev); -+ struct ipu_subsystem_trace_config *sys = adev->trace_cfg; -+ -+ if (!ipu_trace_enable) -+ return -EACCES; -+ -+ if (!sys || !sys->memory.memory_buffer) -+ return -EACCES; -+ -+ *dma_handle = sys->memory.dma_handle; -+ -+ return 0; -+} -+EXPORT_SYMBOL_GPL(ipu_trace_buffer_dma_handle); -+ -+MODULE_AUTHOR("Samu Onkalo "); -+MODULE_LICENSE("GPL"); -+MODULE_DESCRIPTION("Intel ipu trace support"); -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-trace.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-trace.h -new file mode 100644 -index 0000000..f66d889 ---- /dev/null -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-trace.h -@@ -0,0 +1,147 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* Copyright (C) 2014 - 2025 Intel Corporation */ -+ -+#ifndef IPU_TRACE_H -+#define IPU_TRACE_H -+#include -+ -+/* Trace unit register offsets */ -+#define TRACE_REG_TUN_DDR_ENABLE 0x000 -+#define TRACE_REG_TUN_NPK_ENABLE 0x004 -+#define TRACE_REG_TUN_DDR_INFO_VAL 0x008 -+#define TRACE_REG_TUN_NPK_ADDR 0x00C -+#define TRACE_REG_TUN_DRAM_BASE_ADDR 0x010 -+#define TRACE_REG_TUN_DRAM_END_ADDR 0x014 -+#define TRACE_REG_TUN_LOCAL_TIMER0 0x018 -+#define TRACE_REG_TUN_LOCAL_TIMER1 0x01C -+#define TRACE_REG_TUN_WR_PTR 0x020 -+#define TRACE_REG_TUN_RD_PTR 0x024 -+ -+/* -+ * Following registers are left out on purpose: -+ * TUN_LOCAL_TIMER0, TUN_LOCAL_TIMER1, TUN_DRAM_BASE_ADDR -+ * TUN_DRAM_END_ADDR, TUN_WR_PTR, TUN_RD_PTR -+ */ -+ -+/* Trace monitor register offsets */ -+#define TRACE_REG_TM_TRACE_ADDR_A 0x0900 -+#define TRACE_REG_TM_TRACE_ADDR_B 0x0904 -+#define TRACE_REG_TM_TRACE_ADDR_C 0x0908 -+#define TRACE_REG_TM_TRACE_ADDR_D 0x090c -+#define TRACE_REG_TM_TRACE_ENABLE_NPK 0x0910 -+#define TRACE_REG_TM_TRACE_ENABLE_DDR 0x0914 -+#define TRACE_REG_TM_TRACE_PER_PC 0x0918 -+#define TRACE_REG_TM_TRACE_PER_BRANCH 0x091c -+#define TRACE_REG_TM_TRACE_HEADER 0x0920 -+#define TRACE_REG_TM_TRACE_CFG 0x0924 -+#define TRACE_REG_TM_TRACE_LOST_PACKETS 0x0928 -+#define TRACE_REG_TM_TRACE_LP_CLEAR 0x092c -+#define TRACE_REG_TM_TRACE_LMRUN_MASK 0x0930 -+#define TRACE_REG_TM_TRACE_LMRUN_PC_LOW 0x0934 -+#define TRACE_REG_TM_TRACE_LMRUN_PC_HIGH 0x0938 -+#define TRACE_REG_TM_TRACE_MMIO_SEL 0x093c -+#define TRACE_REG_TM_TRACE_MMIO_WP0_LOW 0x0940 -+#define TRACE_REG_TM_TRACE_MMIO_WP1_LOW 0x0944 -+#define TRACE_REG_TM_TRACE_MMIO_WP2_LOW 0x0948 -+#define TRACE_REG_TM_TRACE_MMIO_WP3_LOW 0x094c -+#define TRACE_REG_TM_TRACE_MMIO_WP0_HIGH 0x0950 -+#define TRACE_REG_TM_TRACE_MMIO_WP1_HIGH 0x0954 -+#define TRACE_REG_TM_TRACE_MMIO_WP2_HIGH 0x0958 -+#define TRACE_REG_TM_TRACE_MMIO_WP3_HIGH 0x095c -+#define TRACE_REG_TM_FWTRACE_FIRST 0x0A00 -+#define TRACE_REG_TM_FWTRACE_MIDDLE 0x0A04 -+#define TRACE_REG_TM_FWTRACE_LAST 0x0A08 -+ -+/* -+ * Following exists only in (I)SP address space: -+ * TM_FWTRACE_FIRST, TM_FWTRACE_MIDDLE, TM_FWTRACE_LAST -+ */ -+ -+#define TRACE_REG_GPC_RESET 0x000 -+#define TRACE_REG_GPC_OVERALL_ENABLE 0x004 -+#define TRACE_REG_GPC_TRACE_HEADER 0x008 -+#define TRACE_REG_GPC_TRACE_ADDRESS 0x00C -+#define TRACE_REG_GPC_TRACE_NPK_EN 0x010 -+#define TRACE_REG_GPC_TRACE_DDR_EN 0x014 -+#define TRACE_REG_GPC_TRACE_LPKT_CLEAR 0x018 -+#define TRACE_REG_GPC_TRACE_LPKT 0x01C -+ -+#define TRACE_REG_GPC_ENABLE_ID0 0x020 -+#define TRACE_REG_GPC_ENABLE_ID1 0x024 -+#define TRACE_REG_GPC_ENABLE_ID2 0x028 -+#define TRACE_REG_GPC_ENABLE_ID3 0x02c -+ -+#define TRACE_REG_GPC_VALUE_ID0 0x030 -+#define TRACE_REG_GPC_VALUE_ID1 0x034 -+#define TRACE_REG_GPC_VALUE_ID2 0x038 -+#define TRACE_REG_GPC_VALUE_ID3 0x03c -+ -+#define TRACE_REG_GPC_CNT_INPUT_SELECT_ID0 0x040 -+#define TRACE_REG_GPC_CNT_INPUT_SELECT_ID1 0x044 -+#define TRACE_REG_GPC_CNT_INPUT_SELECT_ID2 0x048 -+#define TRACE_REG_GPC_CNT_INPUT_SELECT_ID3 0x04c -+ -+#define TRACE_REG_GPC_CNT_START_SELECT_ID0 0x050 -+#define TRACE_REG_GPC_CNT_START_SELECT_ID1 0x054 -+#define TRACE_REG_GPC_CNT_START_SELECT_ID2 0x058 -+#define TRACE_REG_GPC_CNT_START_SELECT_ID3 0x05c -+ -+#define TRACE_REG_GPC_CNT_STOP_SELECT_ID0 0x060 -+#define TRACE_REG_GPC_CNT_STOP_SELECT_ID1 0x064 -+#define TRACE_REG_GPC_CNT_STOP_SELECT_ID2 0x068 -+#define TRACE_REG_GPC_CNT_STOP_SELECT_ID3 0x06c -+ -+#define TRACE_REG_GPC_CNT_MSG_SELECT_ID0 0x070 -+#define TRACE_REG_GPC_CNT_MSG_SELECT_ID1 0x074 -+#define TRACE_REG_GPC_CNT_MSG_SELECT_ID2 0x078 -+#define TRACE_REG_GPC_CNT_MSG_SELECT_ID3 0x07c -+ -+#define TRACE_REG_GPC_CNT_MSG_PLOAD_SELECT_ID0 0x080 -+#define TRACE_REG_GPC_CNT_MSG_PLOAD_SELECT_ID1 0x084 -+#define TRACE_REG_GPC_CNT_MSG_PLOAD_SELECT_ID2 0x088 -+#define TRACE_REG_GPC_CNT_MSG_PLOAD_SELECT_ID3 0x08c -+ -+#define TRACE_REG_GPC_IRQ_TRIGGER_VALUE_ID0 0x090 -+#define TRACE_REG_GPC_IRQ_TRIGGER_VALUE_ID1 0x094 -+#define TRACE_REG_GPC_IRQ_TRIGGER_VALUE_ID2 0x098 -+#define TRACE_REG_GPC_IRQ_TRIGGER_VALUE_ID3 0x09c -+ -+#define TRACE_REG_GPC_IRQ_TIMER_SELECT_ID0 0x0a0 -+#define TRACE_REG_GPC_IRQ_TIMER_SELECT_ID1 0x0a4 -+#define TRACE_REG_GPC_IRQ_TIMER_SELECT_ID2 0x0a8 -+#define TRACE_REG_GPC_IRQ_TIMER_SELECT_ID3 0x0ac -+ -+#define TRACE_REG_GPC_IRQ_ENABLE_ID0 0x0b0 -+#define TRACE_REG_GPC_IRQ_ENABLE_ID1 0x0b4 -+#define TRACE_REG_GPC_IRQ_ENABLE_ID2 0x0b8 -+#define TRACE_REG_GPC_IRQ_ENABLE_ID3 0x0bc -+ -+struct ipu_trace; -+struct ipu_subsystem_trace_config; -+ -+enum ipu_trace_block_type { -+ IPU_TRACE_BLOCK_TUN = 0, /* Trace unit */ -+ IPU_TRACE_BLOCK_TM, /* Trace monitor */ -+ IPU_TRACE_BLOCK_GPC, /* General purpose control */ -+ IPU_TRACE_CSI2, /* CSI2 legacy receiver */ -+ IPU_TRACE_CSI2_3PH, /* CSI2 combo receiver */ -+ IPU_TRACE_SIG2CIOS, -+ IPU_TRACE_TIMER_RST, /* Trace reset control timer */ -+ IPU_TRACE_BLOCK_END /* End of list */ -+}; -+ -+struct ipu_trace_block { -+ u32 offset; /* Offset to block inside subsystem */ -+ enum ipu_trace_block_type type; -+}; -+ -+int ipu_trace_add(struct ipu6_device *isp); -+int ipu_trace_debugfs_add(struct ipu6_device *isp, struct dentry *dir); -+void ipu_trace_release(struct ipu6_device *isp); -+int ipu_trace_init(struct ipu6_device *isp, void __iomem *base, -+ struct device *dev, struct ipu_trace_block *blocks); -+void ipu_trace_restore(struct device *dev); -+void ipu_trace_uninit(struct device *dev); -+void ipu_trace_stop(struct device *dev); -+int ipu_trace_buffer_dma_handle(struct device *dev, dma_addr_t *dma_handle); -+#endif -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c -index 0311fe7..3a00c9b 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c -@@ -32,6 +32,7 @@ - #include "ipu6-platform-buttress-regs.h" - #include "ipu6-platform-isys-csi2-reg.h" - #include "ipu6-platform-regs.h" -+#include "ipu6-trace.h" - - static unsigned int isys_freq_override; - module_param(isys_freq_override, uint, 0660); -@@ -490,6 +491,37 @@ static int ipu6_pci_config_setup(struct pci_dev *dev, u8 hw_ver) - return 0; - } - -+#ifdef CONFIG_DEBUG_FS -+static int ipu_init_debugfs(struct ipu6_device *isp) -+{ -+ struct dentry *dir; -+ -+ dir = debugfs_create_dir(IPU6_NAME, NULL); -+ if (!dir) -+ return -ENOMEM; -+ -+ if (ipu_trace_debugfs_add(isp, dir)) -+ goto err; -+ -+ isp->ipu_dir = dir; -+ -+ return 0; -+err: -+ debugfs_remove_recursive(dir); -+ return -ENOMEM; -+} -+ -+static void ipu_remove_debugfs(struct ipu6_device *isp) -+{ -+ /* -+ * Since isys and psys debugfs dir will be created under ipu root dir, -+ * mark its dentry to NULL to avoid duplicate removal. -+ */ -+ debugfs_remove_recursive(isp->ipu_dir); -+ isp->ipu_dir = NULL; -+} -+#endif /* CONFIG_DEBUG_FS */ -+ - static void ipu6_configure_vc_mechanism(struct ipu6_device *isp) - { - u32 val = readl(isp->base + BUTTRESS_REG_BTRS_CTRL); -@@ -598,6 +630,10 @@ static int ipu6_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) - goto buttress_exit; - } - -+ ret = ipu_trace_add(isp); -+ if (ret) -+ dev_err(&isp->pdev->dev, "Trace support not available\n"); -+ - ret = ipu6_cpd_validate_cpd_file(isp, isp->cpd_fw->data, - isp->cpd_fw->size); - if (ret) { -@@ -677,6 +713,13 @@ static int ipu6_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) - ipu6_mmu_hw_cleanup(isp->psys->mmu); - pm_runtime_put(&isp->psys->auxdev.dev); - -+#ifdef CONFIG_DEBUG_FS -+ ret = ipu_init_debugfs(isp); -+ if (ret) -+ dev_err(&isp->pdev->dev, "Failed to initialize debugfs"); -+ -+#endif -+ - /* Configure the arbitration mechanisms for VC requests */ - ipu6_configure_vc_mechanism(isp); - -@@ -718,6 +761,11 @@ static void ipu6_pci_remove(struct pci_dev *pdev) - struct ipu6_mmu *isys_mmu = isp->isys->mmu; - struct ipu6_mmu *psys_mmu = isp->psys->mmu; - -+#ifdef CONFIG_DEBUG_FS -+ ipu_remove_debugfs(isp); -+#endif -+ ipu_trace_release(isp); -+ - devm_free_irq(&pdev->dev, pdev->irq, isp); - ipu6_cpd_free_pkg_dir(isp->psys); - -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.h -index 92e3c34..d91a78e 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.h -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.h -@@ -82,10 +82,15 @@ struct ipu6_device { - u32 cpd_metadata_cmpnt_size; - - void __iomem *base; -+#ifdef CONFIG_DEBUG_FS -+ struct dentry *ipu_dir; -+#endif - bool need_ipc_reset; - bool secure_mode; - u8 hw_ver; - bool bus_ready_to_probe; -+ -+ struct ipu_trace *trace; - }; - - #define IPU6_ISYS_NAME "isys" --- -2.43.0 - diff --git a/patches/0058-media-pci-The-order-of-return-buffers-should-be-FIFO.patch b/patches/0058-media-pci-The-order-of-return-buffers-should-be-FIFO.patch deleted file mode 100644 index 332535e..0000000 --- a/patches/0058-media-pci-The-order-of-return-buffers-should-be-FIFO.patch +++ /dev/null @@ -1,100 +0,0 @@ -From 24fc44aeb136b4f57622c613efec534ec67e3013 Mon Sep 17 00:00:00 2001 -From: zouxiaoh -Date: Fri, 16 Jan 2026 00:44:19 -0700 -Subject: [PATCH 58/76] media: pci: The order of return buffers should be FIFO. -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Change Description: -return_buffers : use "list_first_entry", will get these logs in hal: -“CamHAL[ERR] DeviceBase: dequeueBuffer, CamBuf index isn't same with -index used by kernel -CamHAL[ERR] CaptureUnit: Device:Generic grab frame failed:-22” - -The index order is changed from sequential to reverse after return_buffers. -The reason why the normal start&stop does not expose the problem is that -every Hal start will start the buffer index from 0 instead of continuing -to use the buffer index returned last stop. - -So need return_buffers from the list of tail, -and need use the "list_last_entry". - -Signed-off-by: linya14x -Signed-off-by: zouxiaoh ---- - .../media/pci/intel/ipu6/ipu6-isys-queue.c | 31 ++++++++++--------- - 1 file changed, 16 insertions(+), 15 deletions(-) - -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c -index aaad622..4839277 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c -@@ -519,39 +519,40 @@ static void return_buffers(struct ipu6_isys_queue *aq, - unsigned long flags; - - spin_lock_irqsave(&aq->lock, flags); -- while (!list_empty(&aq->incoming)) { -+ -+ /* -+ * Something went wrong (FW crash / HW hang / not all buffers -+ * returned from isys) if there are still buffers queued in active -+ * queue. We have to clean up places a bit. -+ */ -+ while (!list_empty(&aq->active)) { - struct vb2_buffer *vb; - -- ib = list_first_entry(&aq->incoming, struct ipu6_isys_buffer, -- head); -+ ib = list_last_entry(&aq->active, struct ipu6_isys_buffer, -+ head); - vb = ipu6_isys_buffer_to_vb2_buffer(ib); -+ - list_del(&ib->head); - spin_unlock_irqrestore(&aq->lock, flags); - - vb2_buffer_done(vb, state); - - spin_lock_irqsave(&aq->lock, flags); -+ need_reset = true; - } - -- /* -- * Something went wrong (FW crash / HW hang / not all buffers -- * returned from isys) if there are still buffers queued in active -- * queue. We have to clean up places a bit. -- */ -- while (!list_empty(&aq->active)) { -+ while (!list_empty(&aq->incoming)) { - struct vb2_buffer *vb; - -- ib = list_first_entry(&aq->active, struct ipu6_isys_buffer, -- head); -+ ib = list_last_entry(&aq->incoming, struct ipu6_isys_buffer, -+ head); - vb = ipu6_isys_buffer_to_vb2_buffer(ib); -- - list_del(&ib->head); - spin_unlock_irqrestore(&aq->lock, flags); - - vb2_buffer_done(vb, state); - - spin_lock_irqsave(&aq->lock, flags); -- need_reset = true; - } - - spin_unlock_irqrestore(&aq->lock, flags); -@@ -699,8 +700,8 @@ static int reset_start_streaming(struct ipu6_isys_video *av) - - spin_lock_irqsave(&aq->lock, flags); - while (!list_empty(&aq->active)) { -- struct ipu6_isys_buffer *ib = list_first_entry(&aq->active, -- struct ipu6_isys_buffer, head); -+ struct ipu6_isys_buffer *ib = list_last_entry(&aq->active, -+ struct ipu6_isys_buffer, head); - - list_del(&ib->head); - list_add_tail(&ib->head, &aq->incoming); --- -2.43.0 - diff --git a/patches/0059-media-pci-Modify-enble-disable-stream-in-CSI2.patch b/patches/0059-media-pci-Modify-enble-disable-stream-in-CSI2.patch deleted file mode 100644 index aae618c..0000000 --- a/patches/0059-media-pci-Modify-enble-disable-stream-in-CSI2.patch +++ /dev/null @@ -1,338 +0,0 @@ -From 9da221ff3a32069ba04dd0df4785ce8a0a4002bb Mon Sep 17 00:00:00 2001 -From: Hongju Wang -Date: Fri, 16 Jan 2026 00:47:04 -0700 -Subject: [PATCH 59/76] media: pci: Modify enble/disable stream in CSI2 - -Signed-off-by: Hongju Wang -Signed-off-by: zouxiaoh ---- - .../media/pci/intel/ipu6/ipu6-isys-csi2.c | 105 ++++++++++++------ - .../media/pci/intel/ipu6/ipu6-isys-csi2.h | 7 +- - .../media/pci/intel/ipu6/ipu6-isys-video.c | 78 +++++++------ - 3 files changed, 118 insertions(+), 72 deletions(-) - -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c -index d1fece6..1e4e70b 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c -@@ -339,6 +339,12 @@ static int ipu6_isys_csi2_set_stream(struct v4l2_subdev *sd, - return ret; - } - -+/* -+ * Maximum stream ID is 63 for now, as we use u64 bitmask to represent a set -+ * of streams. -+ */ -+#define CSI2_SUBDEV_MAX_STREAM_ID 63 -+ - static int ipu6_isys_csi2_enable_streams(struct v4l2_subdev *sd, - struct v4l2_subdev_state *state, - u32 pad, u64 streams_mask) -@@ -346,54 +352,81 @@ static int ipu6_isys_csi2_enable_streams(struct v4l2_subdev *sd, - struct ipu6_isys_subdev *asd = to_ipu6_isys_subdev(sd); - struct ipu6_isys_csi2 *csi2 = to_ipu6_isys_csi2(asd); - struct ipu6_isys_csi2_timing timing = { }; -- struct v4l2_subdev *remote_sd; -- struct media_pad *remote_pad; -- u64 sink_streams; -- int ret; -- -- remote_pad = media_pad_remote_pad_first(&sd->entity.pads[CSI2_PAD_SINK]); -- remote_sd = media_entity_to_v4l2_subdev(remote_pad->entity); -+ u32 sink_pad, sink_stream; -+ struct v4l2_subdev *r_sd; -+ struct media_pad *r_pad; -+ int ret, i; -+ -+ if (!csi2->stream_count) { -+ ret = ipu6_isys_csi2_calc_timing(csi2, &timing, CSI2_ACCINV); -+ if (ret) -+ return ret; -+ -+ ret = ipu6_isys_csi2_set_stream(sd, &timing, csi2->nlanes, -+ true); -+ if (ret) -+ return ret; -+ } - -- sink_streams = -- v4l2_subdev_state_xlate_streams(state, pad, CSI2_PAD_SINK, -- &streams_mask); -+ for (i = 0; i <= CSI2_SUBDEV_MAX_STREAM_ID; i++) { -+ if (streams_mask & BIT_ULL(i)) -+ break; -+ } - -- ret = ipu6_isys_csi2_calc_timing(csi2, &timing, CSI2_ACCINV); -+ ret = v4l2_subdev_routing_find_opposite_end(&state->routing, pad, i, -+ &sink_pad, &sink_stream); - if (ret) - return ret; - -- ret = ipu6_isys_csi2_set_stream(sd, &timing, csi2->nlanes, true); -- if (ret) -- return ret; -+ r_pad = media_pad_remote_pad_first(&sd->entity.pads[CSI2_PAD_SINK]); -+ r_sd = media_entity_to_v4l2_subdev(r_pad->entity); - -- ret = v4l2_subdev_enable_streams(remote_sd, remote_pad->index, -- sink_streams); -- if (ret) { -- ipu6_isys_csi2_set_stream(sd, NULL, 0, false); -- return ret; -+ ret = v4l2_subdev_enable_streams(r_sd, r_pad->index, -+ BIT_ULL(sink_stream)); -+ if (!ret) { -+ csi2->stream_count++; -+ return 0; - } - -- return 0; -+ if (!csi2->stream_count) -+ ipu6_isys_csi2_set_stream(sd, NULL, 0, false); -+ -+ return ret; - } - - static int ipu6_isys_csi2_disable_streams(struct v4l2_subdev *sd, - struct v4l2_subdev_state *state, - u32 pad, u64 streams_mask) - { -- struct v4l2_subdev *remote_sd; -- struct media_pad *remote_pad; -- u64 sink_streams; -+ struct ipu6_isys_subdev *asd = to_ipu6_isys_subdev(sd); -+ struct ipu6_isys_csi2 *csi2 = to_ipu6_isys_csi2(asd); -+ u32 sink_pad, sink_stream; -+ struct v4l2_subdev *r_sd; -+ struct media_pad *r_pad; -+ int ret, i; - -- sink_streams = -- v4l2_subdev_state_xlate_streams(state, pad, CSI2_PAD_SINK, -- &streams_mask); -+ for (i = 0; i <= CSI2_SUBDEV_MAX_STREAM_ID; i++) { -+ if (streams_mask & BIT_ULL(i)) -+ break; -+ } - -- remote_pad = media_pad_remote_pad_first(&sd->entity.pads[CSI2_PAD_SINK]); -- remote_sd = media_entity_to_v4l2_subdev(remote_pad->entity); -+ ret = v4l2_subdev_routing_find_opposite_end(&state->routing, pad, i, -+ &sink_pad, &sink_stream); -+ if (ret) -+ return ret; - -- ipu6_isys_csi2_set_stream(sd, NULL, 0, false); -+ r_pad = media_pad_remote_pad_first(&sd->entity.pads[CSI2_PAD_SINK]); -+ r_sd = media_entity_to_v4l2_subdev(r_pad->entity); - -- v4l2_subdev_disable_streams(remote_sd, remote_pad->index, sink_streams); -+ v4l2_subdev_disable_streams(r_sd, r_pad->index, BIT_ULL(sink_stream)); -+ -+ if (--csi2->stream_count) -+ return 0; -+ -+ dev_dbg(&csi2->isys->adev->auxdev.dev, -+ "stream off CSI2-%u with %u lanes\n", csi2->port, csi2->nlanes); -+ -+ ipu6_isys_csi2_set_stream(sd, NULL, 0, false); - - return 0; - } -@@ -592,7 +625,8 @@ void ipu6_isys_csi2_eof_event_by_stream(struct ipu6_isys_stream *stream) - int ipu6_isys_csi2_get_remote_desc(u32 source_stream, - struct ipu6_isys_csi2 *csi2, - struct media_entity *source_entity, -- struct v4l2_mbus_frame_desc_entry *entry) -+ struct v4l2_mbus_frame_desc_entry *entry, -+ int *nr_queues) - { - struct v4l2_mbus_frame_desc_entry *desc_entry = NULL; - struct device *dev = &csi2->isys->adev->auxdev.dev; -@@ -639,5 +673,14 @@ int ipu6_isys_csi2_get_remote_desc(u32 source_stream, - - *entry = *desc_entry; - -+ for (i = 0; i < desc.num_entries; i++) { -+ if (desc_entry->bus.csi2.vc == desc.entry[i].bus.csi2.vc) -+ (*nr_queues)++; -+ } -+ -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+ csi2->is_multiple = true; -+ dev_dbg(dev, "set csi2->is_multiple is true.\n"); -+#endif - return 0; - } -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h -index ce8eed9..7d751e5 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h -@@ -43,6 +43,10 @@ struct ipu6_isys_csi2 { - u32 receiver_errors; - unsigned int nlanes; - unsigned int port; -+ unsigned int stream_count; -+#ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET -+ bool is_multiple; -+#endif - }; - - struct ipu6_isys_csi2_timing { -@@ -73,6 +77,7 @@ void ipu6_isys_csi2_error(struct ipu6_isys_csi2 *csi2); - int ipu6_isys_csi2_get_remote_desc(u32 source_stream, - struct ipu6_isys_csi2 *csi2, - struct media_entity *source_entity, -- struct v4l2_mbus_frame_desc_entry *entry); -+ struct v4l2_mbus_frame_desc_entry *entry, -+ int *nr_queues); - - #endif /* IPU6_ISYS_CSI2_H */ -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c -index 148c790..b8cbfe2 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-video.c -@@ -1035,35 +1035,40 @@ ipu6_isys_query_stream_by_source(struct ipu6_isys *isys, int source, u8 vc) - return stream; - } - --static u64 get_stream_mask_by_pipeline(struct ipu6_isys_video *__av) -+static u32 get_remote_pad_stream(struct media_pad *r_pad) - { -- struct media_pipeline *pipeline = -- media_entity_pipeline(&__av->vdev.entity); -+ struct v4l2_subdev_state *state; -+ struct v4l2_subdev *sd; -+ u32 stream_id = 0; - unsigned int i; -- u64 stream_mask = 0; - -- for (i = 0; i < NR_OF_CSI2_SRC_PADS; i++) { -- struct ipu6_isys_video *av = &__av->csi2->av[i]; -+ sd = media_entity_to_v4l2_subdev(r_pad->entity); -+ state = v4l2_subdev_lock_and_get_active_state(sd); -+ if (!state) -+ return 0; - -- if (pipeline == media_entity_pipeline(&av->vdev.entity)) -- stream_mask |= BIT_ULL(av->source_stream); -+ for (i = 0; i < state->stream_configs.num_configs; i++) { -+ struct v4l2_subdev_stream_config *cfg = -+ &state->stream_configs.configs[i]; -+ if (cfg->pad == r_pad->index) { -+ stream_id = cfg->stream; -+ break; -+ } - } - -- return stream_mask; -+ v4l2_subdev_unlock_state(state); -+ -+ return stream_id; - } - - int ipu6_isys_video_set_streaming(struct ipu6_isys_video *av, int state, - struct ipu6_isys_buffer_list *bl) - { -- struct v4l2_subdev_krouting *routing; - struct ipu6_isys_stream *stream = av->stream; -- struct v4l2_subdev_state *subdev_state; - struct device *dev = &av->isys->adev->auxdev.dev; - struct v4l2_subdev *sd; - struct media_pad *r_pad; -- u32 sink_pad, sink_stream; -- u64 r_stream; -- u64 stream_mask = 0; -+ u32 r_stream = 0; - int ret = 0; - - dev_dbg(dev, "set stream: %d\n", state); -@@ -1073,31 +1078,22 @@ int ipu6_isys_video_set_streaming(struct ipu6_isys_video *av, int state, - - sd = &stream->asd->sd; - r_pad = media_pad_remote_pad_first(&av->pad); -- r_stream = ipu6_isys_get_src_stream_by_src_pad(sd, r_pad->index); -- -- subdev_state = v4l2_subdev_lock_and_get_active_state(sd); -- routing = &subdev_state->routing; -- ret = v4l2_subdev_routing_find_opposite_end(routing, r_pad->index, -- r_stream, &sink_pad, -- &sink_stream); -- v4l2_subdev_unlock_state(subdev_state); -- if (ret) -- return ret; -- -- stream_mask = get_stream_mask_by_pipeline(av); -+ r_stream = get_remote_pad_stream(r_pad); - if (!state) { - stop_streaming_firmware(av); - - /* stop sub-device which connects with video */ -- dev_dbg(dev, "stream off entity %s pad:%d mask:0x%llx\n", -- sd->name, r_pad->index, stream_mask); -+ dev_dbg(dev, "disable streams %s pad:%d mask:0x%llx for %s\n", -+ sd->name, r_pad->index, BIT_ULL(r_stream), -+ stream->source_entity->name); - ret = v4l2_subdev_disable_streams(sd, r_pad->index, -- stream_mask); -+ BIT_ULL(r_stream)); - if (ret) { -- dev_err(dev, "stream off %s failed with %d\n", sd->name, -- ret); -+ dev_err(dev, "disable streams %s failed with %d\n", -+ sd->name, ret); - return ret; - } -+ - close_streaming_firmware(av); - } else { - ret = start_stream_firmware(av, bl); -@@ -1107,12 +1103,14 @@ int ipu6_isys_video_set_streaming(struct ipu6_isys_video *av, int state, - } - - /* start sub-device which connects with video */ -- dev_dbg(dev, "stream on %s pad %d mask 0x%llx\n", sd->name, -- r_pad->index, stream_mask); -- ret = v4l2_subdev_enable_streams(sd, r_pad->index, stream_mask); -+ dev_dbg(dev, "enable streams %s pad: %d mask:0x%llx for %s\n", -+ sd->name, r_pad->index, BIT_ULL(r_stream), -+ stream->source_entity->name); -+ ret = v4l2_subdev_enable_streams(sd, r_pad->index, -+ BIT_ULL(r_stream)); - if (ret) { -- dev_err(dev, "stream on %s failed with %d\n", sd->name, -- ret); -+ dev_err(dev, "enable streams %s failed with %d\n", -+ sd->name, ret); - goto out_media_entity_stop_streaming_firmware; - } - } -@@ -1276,8 +1274,6 @@ int ipu6_isys_setup_video(struct ipu6_isys_video *av, - /* Find the root */ - state = v4l2_subdev_lock_and_get_active_state(remote_sd); - for_each_active_route(&state->routing, r) { -- (*nr_queues)++; -- - if (r->source_pad == remote_pad->index) - route = r; - } -@@ -1292,11 +1288,13 @@ int ipu6_isys_setup_video(struct ipu6_isys_video *av, - - ret = ipu6_isys_csi2_get_remote_desc(av->source_stream, - to_ipu6_isys_csi2(asd), -- *source_entity, &entry); -+ *source_entity, &entry, -+ nr_queues); - if (ret == -ENOIOCTLCMD) { - av->vc = 0; - av->dt = ipu6_isys_mbus_code_to_mipi(pfmt->code); -- } else if (!ret) { -+ *nr_queues = 1; -+ } else if (*nr_queues && !ret) { - dev_dbg(dev, "Framedesc: stream %u, len %u, vc %u, dt %#x\n", - entry.stream, entry.length, entry.bus.csi2.vc, - entry.bus.csi2.dt); --- -2.43.0 - diff --git a/patches/0060-media-pci-Set-the-correct-SOF-for-different-stream.patch b/patches/0060-media-pci-Set-the-correct-SOF-for-different-stream.patch deleted file mode 100644 index efcf7ab..0000000 --- a/patches/0060-media-pci-Set-the-correct-SOF-for-different-stream.patch +++ /dev/null @@ -1,29 +0,0 @@ -From d60e72405ffdf29cf617e988700735ee9f5c5a24 Mon Sep 17 00:00:00 2001 -From: Hongju Wang -Date: Fri, 16 Jan 2026 00:49:26 -0700 -Subject: [PATCH 60/76] media: pci: Set the correct SOF for different stream - -[Changes] -Distinguish SOF according to VC for streams. - -Signed-off-by: Hongju Wang -Signed-off-by: zouxiaoh ---- - 6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c -index 1e4e70b..70a144e 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c -@@ -605,6 +605,7 @@ void ipu6_isys_csi2_sof_event_by_stream(struct ipu6_isys_stream *stream) - .type = V4L2_EVENT_FRAME_SYNC, - }; - -+ ev.id = stream->vc; - ev.u.frame_sync.frame_sequence = atomic_fetch_inc(&stream->sequence); - v4l2_event_queue(vdev, &ev); - --- -2.43.0 - diff --git a/patches/0061-media-pci-support-imx390-for-6.11.0-rc3.patch b/patches/0061-media-pci-support-imx390-for-6.11.0-rc3.patch deleted file mode 100644 index e3dff66..0000000 --- a/patches/0061-media-pci-support-imx390-for-6.11.0-rc3.patch +++ /dev/null @@ -1,981 +0,0 @@ -From 970cdb3c90921cec75d25a9e8433cb511e3cab1f Mon Sep 17 00:00:00 2001 -From: hepengpx -Date: Fri, 16 Jan 2026 00:56:23 -0700 -Subject: [PATCH 61/76] media: pci: support imx390 for 6.11.0-rc3. - -Signed-off-by: hepengpx -Signed-off-by: zouxiaoh ---- - .../media/pci/intel/ipu6/ipu6-buttress.c | 42 +++ - .../media/pci/intel/ipu6/ipu6-buttress.h | 1 + - .../media/pci/intel/ipu6/ipu6-isys-mcd-phy.c | 77 +++++ - .../drivers/media/pci/intel/ipu6/ipu6-isys.c | 324 +++++++++++++++++- - .../drivers/media/pci/intel/ipu6/ipu6-isys.h | 85 +++++ - 6.18.0/drivers/media/pci/intel/ipu6/ipu6.c | 123 ++++++- - 6.18.0/drivers/media/pci/intel/ipu6/ipu6.h | 15 +- - 7 files changed, 656 insertions(+), 11 deletions(-) - -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c -index 103386c..9523cc6 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c -@@ -21,6 +21,7 @@ - #include - #include - #include -+#include - - #include "ipu6.h" - #include "ipu6-bus.h" -@@ -774,6 +775,47 @@ int ipu6_buttress_start_tsc_sync(struct ipu6_device *isp) - } - EXPORT_SYMBOL_NS_GPL(ipu6_buttress_start_tsc_sync, "INTEL_IPU6"); - -+/* -+ * The dev_id was hard code in platform data, as i2c bus number -+ * may change dynamiclly, we need to update this bus id -+ * accordingly. -+ * -+ * @adapter_id: hardware i2c adapter id, this was fixed in platform data -+ * return: i2c bus id registered in system -+ */ -+int ipu6_get_i2c_bus_id(int adapter_id, char *adapter_bdf, int bdf_len) -+{ -+ struct i2c_adapter *adapter; -+ char name[32]; -+ int i = 0; -+ -+ if (adapter_bdf) { -+ while ((adapter = i2c_get_adapter(i)) != NULL) { -+ struct device *parent = adapter->dev.parent; -+ struct device *pp = parent->parent; -+ -+ if (pp && !strncmp(adapter_bdf, dev_name(pp), bdf_len)) -+ return i; -+ i++; -+ } -+ } -+ -+ i = 0; -+ snprintf(name, sizeof(name), "i2c_designware.%d", adapter_id); -+ while ((adapter = i2c_get_adapter(i)) != NULL) { -+ struct device *parent = adapter->dev.parent; -+ -+ if (parent && !strncmp(name, dev_name(parent), sizeof(name))) -+ return i; -+ i++; -+ } -+ -+ /* Not found, should never happen! */ -+ WARN_ON_ONCE(1); -+ return -1; -+} -+EXPORT_SYMBOL_NS_GPL(ipu6_get_i2c_bus_id, "INTEL_IPU6"); -+ - void ipu6_buttress_tsc_read(struct ipu6_device *isp, u64 *val) - { - u32 tsc_hi_1, tsc_hi_2, tsc_lo; -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h -index 51e5ad4..6fed522 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h -@@ -82,4 +82,5 @@ void ipu6_buttress_exit(struct ipu6_device *isp); - void ipu6_buttress_csi_port_config(struct ipu6_device *isp, - u32 legacy, u32 combo); - void ipu6_buttress_restore(struct ipu6_device *isp); -+int ipu6_get_i2c_bus_id(int adapter_id, char *adapter_bdf, int bdf_len); - #endif /* IPU6_BUTTRESS_H */ -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c -index 71aa500..f18e6fa 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c -@@ -567,6 +567,27 @@ static int ipu6_isys_mcd_phy_ready(struct ipu6_isys *isys, u8 id) - return ret; - } - -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+static int ipu6_isys_mcd_phy_common_init(struct ipu6_isys *isys, struct ipu6_isys_csi2_config *cfg) -+{ -+ unsigned int phy_id; -+ void __iomem *phy_base; -+ struct ipu6_bus_device *adev = to_ipu6_bus_device(&isys->adev->auxdev.dev); -+ struct ipu6_device *isp = adev->isp; -+ void __iomem *isp_base = isp->base; -+ unsigned int i; -+ -+ phy_id = cfg->port / 4; -+ phy_base = isp_base + IPU6_ISYS_MCD_PHY_BASE(phy_id); -+ -+ for (i = 0 ; i < ARRAY_SIZE(common_init_regs); i++) { -+ writel(common_init_regs[i].val, -+ phy_base + common_init_regs[i].reg); -+ } -+ -+ return 0; -+} -+#else - static void ipu6_isys_mcd_phy_common_init(struct ipu6_isys *isys) - { - struct ipu6_bus_device *adev = isys->adev; -@@ -588,6 +609,7 @@ static void ipu6_isys_mcd_phy_common_init(struct ipu6_isys *isys) - phy_base + common_init_regs[i].reg); - } - } -+#endif - - static int ipu6_isys_driver_port_to_phy_port(struct ipu6_isys_csi2_config *cfg) - { -@@ -619,6 +641,55 @@ static int ipu6_isys_driver_port_to_phy_port(struct ipu6_isys_csi2_config *cfg) - return ret; - } - -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+static int ipu6_isys_mcd_phy_config(struct ipu6_isys *isys, struct ipu6_isys_csi2_config *cfg) -+{ -+ unsigned int phy_port, phy_id; -+ void __iomem *phy_base; -+ struct ipu6_bus_device *adev = to_ipu6_bus_device(&isys->adev->auxdev.dev); -+ struct ipu6_device *isp = adev->isp; -+ void __iomem *isp_base = isp->base; -+ const struct phy_reg **phy_config_regs; -+ struct ipu6_isys_subdev_pdata *spdata = isys->pdata->spdata; -+ struct ipu6_isys_subdev_info **subdevs, *sd_info; -+ int i; -+ -+ if (!spdata) { -+ dev_err(&isys->adev->auxdev.dev, "no subdevice info provided\n"); -+ return -EINVAL; -+ } -+ -+ phy_id = cfg->port / 4; -+ phy_base = isp_base + IPU6_ISYS_MCD_PHY_BASE(phy_id); -+ for (subdevs = spdata->subdevs; *subdevs; subdevs++) { -+ sd_info = *subdevs; -+ if (!sd_info->csi2) -+ continue; -+ -+ phy_port = ipu6_isys_driver_port_to_phy_port(sd_info->csi2); -+ if (phy_port < 0) { -+ dev_err(&isys->adev->auxdev.dev, "invalid port %d for lane %d", -+ cfg->port, cfg->nlanes); -+ return -ENXIO; -+ } -+ -+ if ((sd_info->csi2->port / 4) != phy_id) -+ continue; -+ -+ dev_dbg(&isys->adev->auxdev.dev, "port%d PHY%u lanes %u\n", -+ phy_port, phy_id, cfg->nlanes); -+ -+ phy_config_regs = config_regs[sd_info->csi2->nlanes/2]; -+ -+ for (i = 0; phy_config_regs[phy_port][i].reg; i++) { -+ writel(phy_config_regs[phy_port][i].val, -+ phy_base + phy_config_regs[phy_port][i].reg); -+ } -+ } -+ -+ return 0; -+} -+#else - static int ipu6_isys_mcd_phy_config(struct ipu6_isys *isys) - { - struct device *dev = &isys->adev->auxdev.dev; -@@ -658,6 +729,7 @@ static int ipu6_isys_mcd_phy_config(struct ipu6_isys *isys) - - return 0; - } -+#endif - - #define CSI_MCD_PHY_NUM 2 - static refcount_t phy_power_ref_count[CSI_MCD_PHY_NUM]; -@@ -698,9 +770,14 @@ int ipu6_isys_mcd_phy_set_power(struct ipu6_isys *isys, - return ret; - - ipu6_isys_mcd_phy_reset(isys, phy_id, 0); -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+ ipu6_isys_mcd_phy_common_init(isys, cfg); -+ ret = ipu6_isys_mcd_phy_config(isys, cfg); -+#else - ipu6_isys_mcd_phy_common_init(isys); - - ret = ipu6_isys_mcd_phy_config(isys); -+#endif - if (ret) - return ret; - -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -index 1ab8dd8..b3ae1f1 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -@@ -28,9 +28,14 @@ - #include - #include - #include --#include --#include -+#if !IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) - #include -+#include -+#include -+#include -+#include -+#include -+#endif - - #include "ipu6-bus.h" - #include "ipu6-cpd.h" -@@ -100,6 +105,57 @@ enum ltr_did_type { - }; - - #define ISYS_PM_QOS_VALUE 300 -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+/* -+ * The param was passed from module to indicate if port -+ * could be optimized. -+ */ -+static bool csi2_port_optimized = true; -+module_param(csi2_port_optimized, bool, 0660); -+MODULE_PARM_DESC(csi2_port_optimized, "IPU CSI2 port optimization"); -+#endif -+ -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+struct isys_i2c_test { -+ u8 bus_nr; -+ u16 addr; -+ struct i2c_client *client; -+}; -+ -+static int isys_i2c_test(struct device *dev, void *priv) -+{ -+ struct i2c_client *client = i2c_verify_client(dev); -+ struct isys_i2c_test *test = priv; -+ -+ if (!client) -+ return 0; -+ -+ if (i2c_adapter_id(client->adapter) != test->bus_nr || -+ client->addr != test->addr) -+ return 0; -+ -+ test->client = client; -+ -+ return 0; -+} -+ -+static struct -+i2c_client *isys_find_i2c_subdev(struct i2c_adapter *adapter, -+ struct ipu6_isys_subdev_info *sd_info) -+{ -+ struct i2c_board_info *info = &sd_info->i2c.board_info; -+ struct isys_i2c_test test = { -+ .bus_nr = i2c_adapter_id(adapter), -+ .addr = info->addr, -+ }; -+ int rval; -+ -+ rval = i2c_for_each_dev(&test, isys_i2c_test); -+ if (rval || !test.client) -+ return NULL; -+ return test.client; -+} -+#endif - - static int isys_isr_one(struct ipu6_bus_device *adev); - -@@ -142,6 +198,153 @@ unregister_subdev: - return ret; - } - -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+static int isys_register_ext_subdev(struct ipu6_isys *isys, -+ struct ipu6_isys_subdev_info *sd_info) -+{ -+ struct i2c_adapter *adapter; -+ struct v4l2_subdev *sd; -+ struct i2c_client *client; -+ int rval; -+ int bus; -+ -+ bus = ipu6_get_i2c_bus_id(sd_info->i2c.i2c_adapter_id, -+ sd_info->i2c.i2c_adapter_bdf, -+ sizeof(sd_info->i2c.i2c_adapter_bdf)); -+ if (bus < 0) { -+ dev_err(&isys->adev->auxdev.dev, -+ "getting i2c bus id for adapter %d (bdf %s) failed", -+ sd_info->i2c.i2c_adapter_id, -+ sd_info->i2c.i2c_adapter_bdf); -+ return -ENOENT; -+ } -+ dev_info(&isys->adev->auxdev.dev, -+ "got i2c bus id %d for adapter %d (bdf %s)", bus, -+ sd_info->i2c.i2c_adapter_id, -+ sd_info->i2c.i2c_adapter_bdf); -+ adapter = i2c_get_adapter(bus); -+ if (!adapter) { -+ dev_warn(&isys->adev->auxdev.dev, "can't find adapter\n"); -+ return -ENOENT; -+ } -+ -+ dev_info(&isys->adev->auxdev.dev, -+ "creating new i2c subdev for %s (address %2.2x, bus %d)", -+ sd_info->i2c.board_info.type, sd_info->i2c.board_info.addr, -+ bus); -+ -+ if (sd_info->csi2) { -+ dev_info(&isys->adev->auxdev.dev, "sensor device on CSI port: %d\n", -+ sd_info->csi2->port); -+ if (sd_info->csi2->port >= isys->pdata->ipdata->csi2.nports || -+ !isys->csi2[sd_info->csi2->port].isys) { -+ dev_warn(&isys->adev->auxdev.dev, "invalid csi2 port %u\n", -+ sd_info->csi2->port); -+ rval = -EINVAL; -+ goto skip_put_adapter; -+ } -+ } else { -+ dev_info(&isys->adev->auxdev.dev, "non camera subdevice\n"); -+ } -+ -+ client = isys_find_i2c_subdev(adapter, sd_info); -+ if (client) { -+ dev_dbg(&isys->adev->auxdev.dev, "Device exists\n"); -+ rval = 0; -+ goto skip_put_adapter; -+ } -+ -+ sd = v4l2_i2c_new_subdev_board(&isys->v4l2_dev, adapter, -+ &sd_info->i2c.board_info, NULL); -+ if (!sd) { -+ dev_warn(&isys->adev->auxdev.dev, "can't create new i2c subdev\n"); -+ rval = -EINVAL; -+ goto skip_put_adapter; -+ } -+ -+ if (!sd_info->csi2) -+ return 0; -+ -+ return isys_complete_ext_device_registration(isys, sd, sd_info->csi2); -+ -+skip_put_adapter: -+ i2c_put_adapter(adapter); -+ -+ return rval; -+} -+ -+static int isys_unregister_ext_subdev(struct ipu6_isys *isys, -+ struct ipu6_isys_subdev_info *sd_info) -+{ -+ struct i2c_adapter *adapter; -+ struct i2c_client *client; -+ int bus; -+ -+ bus = ipu6_get_i2c_bus_id(sd_info->i2c.i2c_adapter_id, -+ sd_info->i2c.i2c_adapter_bdf, -+ sizeof(sd_info->i2c.i2c_adapter_bdf)); -+ if (bus < 0) { -+ dev_err(&isys->adev->auxdev.dev, -+ "getting i2c bus id for adapter %d (bdf %s) failed\n", -+ sd_info->i2c.i2c_adapter_id, -+ sd_info->i2c.i2c_adapter_bdf); -+ return -ENOENT; -+ } -+ dev_dbg(&isys->adev->auxdev.dev, -+ "got i2c bus id %d for adapter %d (bdf %s)\n", bus, -+ sd_info->i2c.i2c_adapter_id, -+ sd_info->i2c.i2c_adapter_bdf); -+ adapter = i2c_get_adapter(bus); -+ if (!adapter) { -+ dev_warn(&isys->adev->auxdev.dev, "can't find adapter\n"); -+ return -ENOENT; -+ } -+ -+ dev_dbg(&isys->adev->auxdev.dev, -+ "unregister i2c subdev for %s (address %2.2x, bus %d)\n", -+ sd_info->i2c.board_info.type, sd_info->i2c.board_info.addr, -+ bus); -+ -+ client = isys_find_i2c_subdev(adapter, sd_info); -+ if (!client) { -+ dev_dbg(&isys->adev->auxdev.dev, "Device not exists\n"); -+ goto skip_put_adapter; -+ } -+ -+ i2c_unregister_device(client); -+ -+skip_put_adapter: -+ i2c_put_adapter(adapter); -+ -+ return 0; -+} -+ -+static void isys_register_ext_subdevs(struct ipu6_isys *isys) -+{ -+ struct ipu6_isys_subdev_pdata *spdata = isys->pdata->spdata; -+ struct ipu6_isys_subdev_info **sd_info; -+ -+ if (!spdata) { -+ dev_info(&isys->adev->auxdev.dev, "no subdevice info provided\n"); -+ return; -+ } -+ for (sd_info = spdata->subdevs; *sd_info; sd_info++) -+ isys_register_ext_subdev(isys, *sd_info); -+} -+ -+static void isys_unregister_ext_subdevs(struct ipu6_isys *isys) -+{ -+ struct ipu6_isys_subdev_pdata *spdata = isys->pdata->spdata; -+ struct ipu6_isys_subdev_info **sd_info; -+ -+ if (!spdata) -+ return; -+ -+ for (sd_info = spdata->subdevs; *sd_info; sd_info++) -+ isys_unregister_ext_subdev(isys, *sd_info); -+} -+#endif -+ - static void isys_stream_init(struct ipu6_isys *isys) - { - u32 i; -@@ -173,10 +376,43 @@ static int isys_csi2_register_subdevices(struct ipu6_isys *isys) - { - const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = - &isys->pdata->ipdata->csi2; -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+ struct ipu6_isys_subdev_pdata *spdata = isys->pdata->spdata; -+ struct ipu6_isys_subdev_info **sd_info; -+ DECLARE_BITMAP(csi2_enable, 32); -+#endif - unsigned int i; - int ret; - -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+ /* -+ * Here is somewhat a workaround, let each platform decide -+ * if csi2 port can be optimized, which means only registered -+ * port from pdata would be enabled. -+ */ -+ if (csi2_port_optimized && spdata) { -+ bitmap_zero(csi2_enable, 32); -+ for (sd_info = spdata->subdevs; *sd_info; sd_info++) { -+ if ((*sd_info)->csi2) { -+ i = (*sd_info)->csi2->port; -+ if (i >= csi2_pdata->nports) { -+ dev_warn(&isys->adev->auxdev.dev, -+ "invalid csi2 port %u\n", i); -+ continue; -+ } -+ bitmap_set(csi2_enable, i, 1); -+ } -+ } -+ } else { -+ bitmap_fill(csi2_enable, 32); -+ } -+#endif -+ - for (i = 0; i < csi2_pdata->nports; i++) { -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+ if (!test_bit(i, csi2_enable)) -+ continue; -+#endif - ret = ipu6_isys_csi2_init(&isys->csi2[i], isys, - isys->pdata->base + - CSI_REG_PORT_BASE(i), i); -@@ -200,10 +436,43 @@ static int isys_csi2_create_media_links(struct ipu6_isys *isys) - const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = - &isys->pdata->ipdata->csi2; - struct device *dev = &isys->adev->auxdev.dev; -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+ struct ipu6_isys_subdev_pdata *spdata = isys->pdata->spdata; -+ struct ipu6_isys_subdev_info **sd_info; -+ DECLARE_BITMAP(csi2_enable, 32); -+#endif - unsigned int i, j; - int ret; - -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+ /* -+ * Here is somewhat a workaround, let each platform decide -+ * if csi2 port can be optimized, which means only registered -+ * port from pdata would be enabled. -+ */ -+ if (csi2_port_optimized && spdata) { -+ bitmap_zero(csi2_enable, 32); -+ for (sd_info = spdata->subdevs; *sd_info; sd_info++) { -+ if ((*sd_info)->csi2) { -+ i = (*sd_info)->csi2->port; -+ if (i >= csi2_pdata->nports) { -+ dev_warn(&isys->adev->auxdev.dev, -+ "invalid csi2 port %u\n", i); -+ continue; -+ } -+ bitmap_set(csi2_enable, i, 1); -+ } -+ } -+ } else { -+ bitmap_fill(csi2_enable, 32); -+ } -+#endif -+ - for (i = 0; i < csi2_pdata->nports; i++) { -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+ if (!test_bit(i, csi2_enable)) -+ continue; -+#endif - struct media_entity *sd = &isys->csi2[i].asd.sd.entity; - - for (j = 0; j < NR_OF_CSI2_SRC_PADS; j++) { -@@ -238,10 +507,43 @@ static int isys_register_video_devices(struct ipu6_isys *isys) - { - const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = - &isys->pdata->ipdata->csi2; -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+ struct ipu6_isys_subdev_pdata *spdata = isys->pdata->spdata; -+ struct ipu6_isys_subdev_info **sd_info; -+ DECLARE_BITMAP(csi2_enable, 32); -+#endif - unsigned int i, j; - int ret; - -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+ /* -+ * Here is somewhat a workaround, let each platform decide -+ * if csi2 port can be optimized, which means only registered -+ * port from pdata would be enabled. -+ */ -+ if (csi2_port_optimized && spdata) { -+ bitmap_zero(csi2_enable, 32); -+ for (sd_info = spdata->subdevs; *sd_info; sd_info++) { -+ if ((*sd_info)->csi2) { -+ i = (*sd_info)->csi2->port; -+ if (i >= csi2_pdata->nports) { -+ dev_warn(&isys->adev->auxdev.dev, -+ "invalid csi2 port %u\n", i); -+ continue; -+ } -+ bitmap_set(csi2_enable, i, 1); -+ } -+ } -+ } else { -+ bitmap_fill(csi2_enable, 32); -+ } -+#endif -+ - for (i = 0; i < csi2_pdata->nports; i++) { -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+ if (!test_bit(i, csi2_enable)) -+ continue; -+#endif - for (j = 0; j < NR_OF_CSI2_SRC_PADS; j++) { - struct ipu6_isys_video *av = &isys->csi2[i].av[j]; - -@@ -672,6 +974,7 @@ static void isys_iwake_watermark_cleanup(struct ipu6_isys *isys) - mutex_destroy(&iwake_watermark->mutex); - } - -+#if !IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) - /* The .bound() notifier callback when a match is found */ - static int isys_notifier_bound(struct v4l2_async_notifier *notifier, - struct v4l2_subdev *sd, -@@ -783,6 +1086,7 @@ static void isys_notifier_cleanup(struct ipu6_isys *isys) - v4l2_async_nf_unregister(&isys->notifier); - v4l2_async_nf_cleanup(&isys->notifier); - } -+#endif - - static int isys_register_devices(struct ipu6_isys *isys) - { -@@ -820,12 +1124,23 @@ static int isys_register_devices(struct ipu6_isys *isys) - if (ret) - goto out_isys_unregister_subdevices; - -+#if !IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) - ret = isys_notifier_init(isys); - if (ret) - goto out_isys_unregister_subdevices; -+#else -+ isys_register_ext_subdevs(isys); -+#endif -+ -+ ret = v4l2_device_register_subdev_nodes(&isys->v4l2_dev); -+ if (ret) -+ goto out_isys_unregister_ext_subdevs; - - return 0; - -+out_isys_unregister_ext_subdevs: -+ isys_unregister_ext_subdevs(isys); -+ - out_isys_unregister_subdevices: - isys_csi2_unregister_subdevices(isys); - -@@ -848,6 +1163,9 @@ static void isys_unregister_devices(struct ipu6_isys *isys) - { - isys_unregister_video_devices(isys); - isys_csi2_unregister_subdevices(isys); -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+ isys_unregister_ext_subdevs(isys); -+#endif - v4l2_device_unregister(&isys->v4l2_dev); - media_device_unregister(&isys->media_dev); - media_device_cleanup(&isys->media_dev); -@@ -1217,7 +1535,9 @@ static void isys_remove(struct auxiliary_device *auxdev) - free_fw_msg_bufs(isys); - - isys_unregister_devices(isys); -+#if !IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) - isys_notifier_cleanup(isys); -+#endif - - cpu_latency_qos_remove_request(&isys->pm_qos); - -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h -index d55d804..5e58d81 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h -@@ -10,6 +10,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -60,6 +61,15 @@ struct ipu6_bus_device; - #define IPU6EP_MTL_LTR_VALUE 1023 - #define IPU6EP_MTL_MIN_MEMOPEN_TH 0xc - -+#if (IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ -+ && IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING)) \ -+ || IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) -+#define IPU6_SPDATA_NAME_LEN 20 -+#define IPU6_SPDATA_BDF_LEN 32 -+#define IPU6_SPDATA_GPIO_NUM 4 -+#define IPU6_SPDATA_IRQ_PIN_NAME_LEN 16 -+#endif -+ - struct ltr_did { - union { - u32 value; -@@ -161,7 +171,9 @@ struct ipu6_isys { - spinlock_t listlock; /* Protect framebuflist */ - struct list_head framebuflist; - struct list_head framebuflist_fw; -+#if !IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) - struct v4l2_async_notifier notifier; -+#endif - struct isys_iwake_watermark iwake_watermark; - #ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET - struct mutex reset_mutex; -@@ -180,6 +192,79 @@ struct isys_fw_msgs { - dma_addr_t dma_addr; - }; - -+struct ipu6_isys_subdev_i2c_info { -+ struct i2c_board_info board_info; -+ int i2c_adapter_id; -+ char i2c_adapter_bdf[32]; -+}; -+ -+#if (IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ -+ && IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING)) \ -+ || IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) -+#define IPU6_SPDATA_NAME_LEN 20 -+#define IPU6_SPDATA_BDF_LEN 32 -+#define IPU6_SPDATA_GPIO_NUM 4 -+#define IPU6_SPDATA_IRQ_PIN_NAME_LEN 16 -+#endif -+ -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ -+ && IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) -+/** -+ * struct ipu6_spdata_rep - override subdev platform data -+ * -+ * @name: i2c_board_info.type -+ * @i2c_adapter_bdf_o: old i2c adapter bdf -+ * @slave_addr_o: old i2c slave address -+ * @i2c_adapter_bdf_n: new i2c adapter bdf -+ * @slave_addr_n: new i2c slave address -+ * -+ * identify a subdev with @name, @i2c_adapter_bdf_o and @slave_addr_o and -+ * configure it to use the new @i2c_adapter_bdf_n and @slave_addr_n -+ */ -+struct ipu6_spdata_rep { -+ /* i2c old information */ -+ char name[IPU6_SPDATA_NAME_LEN]; -+ unsigned int port_o; -+ char i2c_adapter_bdf_o[IPU6_SPDATA_BDF_LEN]; -+ uint32_t slave_addr_o; -+ -+ /* i2c new information */ -+ unsigned int port_n; -+ char i2c_adapter_bdf_n[IPU6_SPDATA_BDF_LEN]; -+ uint32_t slave_addr_n; -+ -+ /* sensor_platform */ -+ unsigned int lanes; -+ int gpios[IPU6_SPDATA_GPIO_NUM]; -+ int irq_pin; -+ unsigned int irq_pin_flags; -+ char irq_pin_name[IPU6_SPDATA_IRQ_PIN_NAME_LEN]; -+ char suffix; -+}; -+#endif -+ -+struct ipu6_isys_subdev_info { -+ struct ipu6_isys_csi2_config *csi2; -+ struct ipu6_isys_subdev_i2c_info i2c; -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ -+ && IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) -+ void (*fixup_spdata)(const void *spdata_rep, void *spdata); -+#endif -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) -+ char *acpi_hid; -+#endif -+}; -+ -+struct ipu6_isys_clk_mapping { -+ struct clk_lookup clkdev_data; -+ char *platform_clock_name; -+}; -+ -+struct ipu6_isys_subdev_pdata { -+ struct ipu6_isys_subdev_info **subdevs; -+ struct ipu6_isys_clk_mapping *clk_map; -+}; -+ - struct isys_fw_msgs *ipu6_get_fw_msg_buf(struct ipu6_isys_stream *stream); - void ipu6_put_fw_msg_buf(struct ipu6_isys *isys, uintptr_t data); - void ipu6_cleanup_fw_msg_bufs(struct ipu6_isys *isys); -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c -index 3a00c9b..c740c22 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c -@@ -34,6 +34,19 @@ - #include "ipu6-platform-regs.h" - #include "ipu6-trace.h" - -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) -+#include -+#endif -+ -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) -+static int isys_init_acpi_add_device(struct device *dev, void *priv, -+ struct ipu6_isys_csi2_config *csi2, -+ bool reprobe) -+{ -+ return 0; -+} -+#endif -+ - static unsigned int isys_freq_override; - module_param(isys_freq_override, uint, 0660); - MODULE_PARM_DESC(isys_freq_override, "Override ISYS freq(mhz)"); -@@ -373,18 +386,25 @@ static void ipu6_internal_pdata_init(struct ipu6_device *isp) - static struct ipu6_bus_device * - ipu6_isys_init(struct pci_dev *pdev, struct device *parent, - struct ipu6_buttress_ctrl *ctrl, void __iomem *base, -- const struct ipu6_isys_internal_pdata *ipdata) -+ const struct ipu6_isys_internal_pdata *ipdata, -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+ struct ipu6_isys_subdev_pdata *spdata -+#endif -+ ) - { - struct device *dev = &pdev->dev; - struct ipu6_bus_device *isys_adev; - struct ipu6_isys_pdata *pdata; -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) -+ struct ipu6_isys_subdev_pdata *acpi_pdata; -+#endif - int ret; - -- ret = ipu_bridge_init(dev, ipu_bridge_parse_ssdb); -- if (ret) { -- dev_err_probe(dev, ret, "IPU6 bridge init failed\n"); -- return ERR_PTR(ret); -- } -+ // ret = ipu_bridge_init(dev, ipu_bridge_parse_ssdb); -+ // if (ret) { -+ // dev_err_probe(dev, ret, "IPU6 bridge init failed\n"); -+ // return ERR_PTR(ret); -+ // } - - pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); - if (!pdata) -@@ -392,7 +412,9 @@ ipu6_isys_init(struct pci_dev *pdev, struct device *parent, - - pdata->base = base; - pdata->ipdata = ipdata; -- -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+ pdata->spdata = spdata; -+#endif - /* Override the isys freq */ - if (isys_freq_override >= BUTTRESS_MIN_FORCE_IS_FREQ && - isys_freq_override <= BUTTRESS_MAX_FORCE_IS_FREQ) { -@@ -409,6 +431,19 @@ ipu6_isys_init(struct pci_dev *pdev, struct device *parent, - "ipu6_bus_initialize_device isys failed\n"); - } - -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) -+ if (!spdata) { -+ dev_dbg(&pdev->dev, "No subdevice info provided"); -+ ipu_get_acpi_devices(isys_adev, &isys_adev->auxdev.dev, &acpi_pdata, NULL, -+ isys_init_acpi_add_device); -+ pdata->spdata = acpi_pdata; -+ } else { -+ dev_dbg(&pdev->dev, "Subdevice info found"); -+ ipu_get_acpi_devices(isys_adev, &isys_adev->auxdev.dev, &acpi_pdata, &spdata, -+ isys_init_acpi_add_device); -+ } -+#endif -+ - isys_adev->mmu = ipu6_mmu_init(dev, base, ISYS_MMID, - &ipdata->hw_variant); - if (IS_ERR(isys_adev->mmu)) { -@@ -539,6 +574,59 @@ static void ipu6_configure_vc_mechanism(struct ipu6_device *isp) - writel(val, isp->base + BUTTRESS_REG_BTRS_CTRL); - } - -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) -+static inline int match_spdata(struct ipu6_isys_subdev_info *sd, -+ const struct ipu6_spdata_rep *rep) -+{ -+ if (strcmp(sd->i2c.board_info.type, rep->name)) -+ return 0; -+ -+ if (strcmp(sd->i2c.i2c_adapter_bdf, rep->i2c_adapter_bdf_o)) -+ return 0; -+ -+ if (sd->i2c.board_info.addr != rep->slave_addr_o) -+ return 0; -+ -+ if (sd->csi2->port != rep->port_o) -+ return 0; -+ -+ return 1; -+} -+ -+static void fixup_spdata(const void *spdata_rep, -+ struct ipu6_isys_subdev_pdata *spdata) -+{ -+ const struct ipu6_spdata_rep *rep = spdata_rep; -+ struct ipu6_isys_subdev_info **subdevs, *sd_info; -+ -+ if (!spdata) -+ return; -+ -+ for (; rep->name[0]; rep++) { -+ for (subdevs = spdata->subdevs; *subdevs; subdevs++) { -+ sd_info = *subdevs; -+ -+ if (!sd_info->csi2) -+ continue; -+ -+ if (match_spdata(sd_info, rep)) { -+ strcpy(sd_info->i2c.i2c_adapter_bdf, -+ rep->i2c_adapter_bdf_n); -+ sd_info->i2c.board_info.addr = -+ rep->slave_addr_n; -+ sd_info->csi2->port = rep->port_n; -+ -+ if (sd_info->fixup_spdata) -+ sd_info->fixup_spdata(rep, -+ sd_info->i2c.board_info.platform_data); -+ } -+ } -+ } -+} -+#endif -+#endif -+ - static int ipu6_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) - { - struct ipu6_buttress_ctrl *isys_ctrl = NULL, *psys_ctrl = NULL; -@@ -642,6 +730,16 @@ static int ipu6_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) - goto out_ipu6_bus_del_devices; - } - -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) -+ rval = request_firmware(&isp->spdata_fw, IPU6_SPDATA_NAME, &pdev->dev); -+ if (rval) -+ dev_warn(&isp->pdev->dev, "no spdata replace, using default\n"); -+ else -+ fixup_spdata(isp->spdata_fw->data, pdev->dev.platform_data); -+#endif -+#endif -+ - isys_ctrl = devm_kmemdup(dev, &isys_buttress_ctrl, - sizeof(isys_buttress_ctrl), GFP_KERNEL); - if (!isys_ctrl) { -@@ -650,7 +748,11 @@ static int ipu6_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) - } - - isp->isys = ipu6_isys_init(pdev, dev, isys_ctrl, isys_base, -- &isys_ipdata); -+ &isys_ipdata, -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+ pdev->dev.platform_data -+#endif -+ ); - if (IS_ERR(isp->isys)) { - ret = PTR_ERR(isp->isys); - goto out_ipu6_bus_del_devices; -@@ -749,6 +851,11 @@ out_ipu6_bus_del_devices: - ipu6_mmu_cleanup(isp->isys->mmu); - ipu6_bus_del_devices(pdev); - release_firmware(isp->cpd_fw); -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) -+ release_firmware(isp->spdata_fw); -+#endif -+#endif - buttress_exit: - ipu6_buttress_exit(isp); - -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.h -index d91a78e..71d8cdf 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.h -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.h -@@ -23,6 +23,12 @@ struct ipu6_bus_device; - #define IPU6EPMTL_FIRMWARE_NAME "intel/ipu/ipu6epmtl_fw.bin" - #define IPU6EPADLN_FIRMWARE_NAME "intel/ipu/ipu6epadln_fw.bin" - -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ -+ && IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) -+/* array of struct ipu6_spdata_rep terminated by NULL */ -+#define IPU6_SPDATA_NAME "ipu6v1_spdata.bin" -+#endif -+ - enum ipu6_version { - IPU6_VER_INVALID = 0, - IPU6_VER_6 = 1, -@@ -80,7 +86,11 @@ struct ipu6_device { - const struct firmware *cpd_fw; - const char *cpd_fw_name; - u32 cpd_metadata_cmpnt_size; -- -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) -+ const struct firmware *spdata_fw; -+#endif -+#endif - void __iomem *base; - #ifdef CONFIG_DEBUG_FS - struct dentry *ipu_dir; -@@ -328,6 +338,9 @@ struct ipu6_isys_internal_pdata { - struct ipu6_isys_pdata { - void __iomem *base; - const struct ipu6_isys_internal_pdata *ipdata; -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+ struct ipu6_isys_subdev_pdata *spdata; -+#endif - }; - - struct ipu6_psys_internal_pdata { --- -2.43.0 - diff --git a/patches/0062-i2c-media-fix-cov-issue.patch b/patches/0062-i2c-media-fix-cov-issue.patch deleted file mode 100644 index 340eea3..0000000 --- a/patches/0062-i2c-media-fix-cov-issue.patch +++ /dev/null @@ -1,31 +0,0 @@ -From d48e001d4952b4c75f29375d0d122925804579ba Mon Sep 17 00:00:00 2001 -From: hepengpx -Date: Fri, 16 Jan 2026 00:58:48 -0700 -Subject: [PATCH 62/76] i2c: media: fix cov issue - -check return value of v4l2_ctrl_handler_init; -change the type of phy_port to int; - -Signed-off-by: hepengpx -Signed-off-by: zouxiaoh ---- - 6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c -index f18e6fa..ff29130 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c -@@ -644,7 +644,8 @@ static int ipu6_isys_driver_port_to_phy_port(struct ipu6_isys_csi2_config *cfg) - #if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) - static int ipu6_isys_mcd_phy_config(struct ipu6_isys *isys, struct ipu6_isys_csi2_config *cfg) - { -- unsigned int phy_port, phy_id; -+ unsigned int phy_id; -+ int phy_port; - void __iomem *phy_base; - struct ipu6_bus_device *adev = to_ipu6_bus_device(&isys->adev->auxdev.dev); - struct ipu6_device *isp = adev->isp; --- -2.43.0 - diff --git a/patches/0063-media-add-ipu-acpi-module-to-linux-drivers.patch b/patches/0063-media-add-ipu-acpi-module-to-linux-drivers.patch deleted file mode 100644 index be2cd47..0000000 --- a/patches/0063-media-add-ipu-acpi-module-to-linux-drivers.patch +++ /dev/null @@ -1,252 +0,0 @@ -From b1aaf42e3a0cf80f26a45502385469b674b17d66 Mon Sep 17 00:00:00 2001 -From: Dongcheng Yan -Date: Fri, 16 Jan 2026 01:01:26 -0700 -Subject: [PATCH 63/76] media: add ipu-acpi module to linux-drivers - -The ipu-acpi module is responsible for obtaining ACPI device platform -data (pdata) and is universally applicable to both IPU6/7 and -upstream/non-upstream. To better align with its purpose and usage, the -ipu-acpi module is being moved out of the linux_kernel and into the -linux-drivers[android/platform/main]. - -Specific changes include: - -- Struct ipu_isys_subdev_pdata and its internal structs - ipu_isys_subdev_info/ipu_isys_clk_mapping: - solely related to ACPI pdata, not tied to any specific IPU. -- IPU_SPDATA_NAME_LEN and related macros: - used exclusively for ACPI device definition and configuration. - -Signed-off-by: Dongcheng Yan -Signed-off-by: zouxiaoh ---- - .../media/pci/intel/ipu6/ipu6-isys-mcd-phy.c | 4 +-- - .../drivers/media/pci/intel/ipu6/ipu6-isys.c | 26 +++++++++---------- - .../drivers/media/pci/intel/ipu6/ipu6-isys.h | 26 +++++++++---------- - 6.18.0/drivers/media/pci/intel/ipu6/ipu6.c | 10 +++---- - 6.18.0/drivers/media/pci/intel/ipu6/ipu6.h | 2 +- - 5 files changed, 34 insertions(+), 34 deletions(-) - -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c -index ff29130..7ab02b7 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c -@@ -651,8 +651,8 @@ static int ipu6_isys_mcd_phy_config(struct ipu6_isys *isys, struct ipu6_isys_csi - struct ipu6_device *isp = adev->isp; - void __iomem *isp_base = isp->base; - const struct phy_reg **phy_config_regs; -- struct ipu6_isys_subdev_pdata *spdata = isys->pdata->spdata; -- struct ipu6_isys_subdev_info **subdevs, *sd_info; -+ struct ipu_isys_subdev_pdata *spdata = isys->pdata->spdata; -+ struct ipu_isys_subdev_info **subdevs, *sd_info; - int i; - - if (!spdata) { -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -index b3ae1f1..709c582 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -@@ -141,7 +141,7 @@ static int isys_i2c_test(struct device *dev, void *priv) - - static struct - i2c_client *isys_find_i2c_subdev(struct i2c_adapter *adapter, -- struct ipu6_isys_subdev_info *sd_info) -+ struct ipu_isys_subdev_info *sd_info) - { - struct i2c_board_info *info = &sd_info->i2c.board_info; - struct isys_i2c_test test = { -@@ -200,7 +200,7 @@ unregister_subdev: - - #if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) - static int isys_register_ext_subdev(struct ipu6_isys *isys, -- struct ipu6_isys_subdev_info *sd_info) -+ struct ipu_isys_subdev_info *sd_info) - { - struct i2c_adapter *adapter; - struct v4l2_subdev *sd; -@@ -274,7 +274,7 @@ skip_put_adapter: - } - - static int isys_unregister_ext_subdev(struct ipu6_isys *isys, -- struct ipu6_isys_subdev_info *sd_info) -+ struct ipu_isys_subdev_info *sd_info) - { - struct i2c_adapter *adapter; - struct i2c_client *client; -@@ -321,8 +321,8 @@ skip_put_adapter: - - static void isys_register_ext_subdevs(struct ipu6_isys *isys) - { -- struct ipu6_isys_subdev_pdata *spdata = isys->pdata->spdata; -- struct ipu6_isys_subdev_info **sd_info; -+ struct ipu_isys_subdev_pdata *spdata = isys->pdata->spdata; -+ struct ipu_isys_subdev_info **sd_info; - - if (!spdata) { - dev_info(&isys->adev->auxdev.dev, "no subdevice info provided\n"); -@@ -334,8 +334,8 @@ static void isys_register_ext_subdevs(struct ipu6_isys *isys) - - static void isys_unregister_ext_subdevs(struct ipu6_isys *isys) - { -- struct ipu6_isys_subdev_pdata *spdata = isys->pdata->spdata; -- struct ipu6_isys_subdev_info **sd_info; -+ struct ipu_isys_subdev_pdata *spdata = isys->pdata->spdata; -+ struct ipu_isys_subdev_info **sd_info; - - if (!spdata) - return; -@@ -377,8 +377,8 @@ static int isys_csi2_register_subdevices(struct ipu6_isys *isys) - const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = - &isys->pdata->ipdata->csi2; - #if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -- struct ipu6_isys_subdev_pdata *spdata = isys->pdata->spdata; -- struct ipu6_isys_subdev_info **sd_info; -+ struct ipu_isys_subdev_pdata *spdata = isys->pdata->spdata; -+ struct ipu_isys_subdev_info **sd_info; - DECLARE_BITMAP(csi2_enable, 32); - #endif - unsigned int i; -@@ -437,8 +437,8 @@ static int isys_csi2_create_media_links(struct ipu6_isys *isys) - &isys->pdata->ipdata->csi2; - struct device *dev = &isys->adev->auxdev.dev; - #if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -- struct ipu6_isys_subdev_pdata *spdata = isys->pdata->spdata; -- struct ipu6_isys_subdev_info **sd_info; -+ struct ipu_isys_subdev_pdata *spdata = isys->pdata->spdata; -+ struct ipu_isys_subdev_info **sd_info; - DECLARE_BITMAP(csi2_enable, 32); - #endif - unsigned int i, j; -@@ -508,8 +508,8 @@ static int isys_register_video_devices(struct ipu6_isys *isys) - const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = - &isys->pdata->ipdata->csi2; - #if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -- struct ipu6_isys_subdev_pdata *spdata = isys->pdata->spdata; -- struct ipu6_isys_subdev_info **sd_info; -+ struct ipu_isys_subdev_pdata *spdata = isys->pdata->spdata; -+ struct ipu_isys_subdev_info **sd_info; - DECLARE_BITMAP(csi2_enable, 32); - #endif - unsigned int i, j; -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h -index 5e58d81..7d091da 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h -@@ -64,10 +64,10 @@ struct ipu6_bus_device; - #if (IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ - && IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING)) \ - || IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) --#define IPU6_SPDATA_NAME_LEN 20 --#define IPU6_SPDATA_BDF_LEN 32 --#define IPU6_SPDATA_GPIO_NUM 4 --#define IPU6_SPDATA_IRQ_PIN_NAME_LEN 16 -+#define IPU_SPDATA_NAME_LEN 20 -+#define IPU_SPDATA_BDF_LEN 32 -+#define IPU_SPDATA_GPIO_NUM 4 -+#define IPU_SPDATA_IRQ_PIN_NAME_LEN 16 - #endif - - struct ltr_did { -@@ -201,10 +201,10 @@ struct ipu6_isys_subdev_i2c_info { - #if (IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ - && IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING)) \ - || IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) --#define IPU6_SPDATA_NAME_LEN 20 --#define IPU6_SPDATA_BDF_LEN 32 --#define IPU6_SPDATA_GPIO_NUM 4 --#define IPU6_SPDATA_IRQ_PIN_NAME_LEN 16 -+#define IPU_SPDATA_NAME_LEN 20 -+#define IPU_SPDATA_BDF_LEN 32 -+#define IPU_SPDATA_GPIO_NUM 4 -+#define IPU_SPDATA_IRQ_PIN_NAME_LEN 16 - #endif - - #if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ -@@ -243,7 +243,7 @@ struct ipu6_spdata_rep { - }; - #endif - --struct ipu6_isys_subdev_info { -+struct ipu_isys_subdev_info { - struct ipu6_isys_csi2_config *csi2; - struct ipu6_isys_subdev_i2c_info i2c; - #if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ -@@ -255,14 +255,14 @@ struct ipu6_isys_subdev_info { - #endif - }; - --struct ipu6_isys_clk_mapping { -+struct ipu_isys_clk_mapping { - struct clk_lookup clkdev_data; - char *platform_clock_name; - }; - --struct ipu6_isys_subdev_pdata { -- struct ipu6_isys_subdev_info **subdevs; -- struct ipu6_isys_clk_mapping *clk_map; -+struct ipu_isys_subdev_pdata { -+ struct ipu_isys_subdev_info **subdevs; -+ struct ipu_isys_clk_mapping *clk_map; - }; - - struct isys_fw_msgs *ipu6_get_fw_msg_buf(struct ipu6_isys_stream *stream); -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c -index c740c22..8ae3612 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c -@@ -388,7 +388,7 @@ ipu6_isys_init(struct pci_dev *pdev, struct device *parent, - struct ipu6_buttress_ctrl *ctrl, void __iomem *base, - const struct ipu6_isys_internal_pdata *ipdata, - #if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -- struct ipu6_isys_subdev_pdata *spdata -+ struct ipu_isys_subdev_pdata *spdata - #endif - ) - { -@@ -396,7 +396,7 @@ ipu6_isys_init(struct pci_dev *pdev, struct device *parent, - struct ipu6_bus_device *isys_adev; - struct ipu6_isys_pdata *pdata; - #if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) -- struct ipu6_isys_subdev_pdata *acpi_pdata; -+ struct ipu_isys_subdev_pdata *acpi_pdata; - #endif - int ret; - -@@ -576,7 +576,7 @@ static void ipu6_configure_vc_mechanism(struct ipu6_device *isp) - - #if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) - #if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) --static inline int match_spdata(struct ipu6_isys_subdev_info *sd, -+static inline int match_spdata(struct ipu_isys_subdev_info *sd, - const struct ipu6_spdata_rep *rep) - { - if (strcmp(sd->i2c.board_info.type, rep->name)) -@@ -595,10 +595,10 @@ static inline int match_spdata(struct ipu6_isys_subdev_info *sd, - } - - static void fixup_spdata(const void *spdata_rep, -- struct ipu6_isys_subdev_pdata *spdata) -+ struct ipu_isys_subdev_pdata *spdata) - { - const struct ipu6_spdata_rep *rep = spdata_rep; -- struct ipu6_isys_subdev_info **subdevs, *sd_info; -+ struct ipu_isys_subdev_info **subdevs, *sd_info; - - if (!spdata) - return; -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.h -index 71d8cdf..786494d 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.h -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.h -@@ -339,7 +339,7 @@ struct ipu6_isys_pdata { - void __iomem *base; - const struct ipu6_isys_internal_pdata *ipdata; - #if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -- struct ipu6_isys_subdev_pdata *spdata; -+ struct ipu_isys_subdev_pdata *spdata; - #endif - }; - --- -2.43.0 - diff --git a/patches/0064-media-pci-unregister-i2c-device-to-complete-ext_subd.patch b/patches/0064-media-pci-unregister-i2c-device-to-complete-ext_subd.patch deleted file mode 100644 index 4c40916..0000000 --- a/patches/0064-media-pci-unregister-i2c-device-to-complete-ext_subd.patch +++ /dev/null @@ -1,56 +0,0 @@ -From 9e6c9f97ca9a758f980bbe551abce94be050ac04 Mon Sep 17 00:00:00 2001 -From: Dongcheng Yan -Date: Fri, 16 Jan 2026 01:03:24 -0700 -Subject: [PATCH 64/76] media: pci: unregister i2c device to complete - ext_subdev_register - -Signed-off-by: Dongcheng Yan -Signed-off-by: zouxiaoh ---- - 6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c | 4 ++-- - 6.18.0/drivers/media/pci/intel/ipu6/ipu6.c | 7 ++++--- - 2 files changed, 6 insertions(+), 5 deletions(-) - -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -index 709c582..df5de9a 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -@@ -250,8 +250,8 @@ static int isys_register_ext_subdev(struct ipu6_isys *isys, - client = isys_find_i2c_subdev(adapter, sd_info); - if (client) { - dev_dbg(&isys->adev->auxdev.dev, "Device exists\n"); -- rval = 0; -- goto skip_put_adapter; -+ i2c_unregister_device(client); -+ dev_dbg(&isys->adev->auxdev.dev, "Unregister device"); - } - - sd = v4l2_i2c_new_subdev_board(&isys->v4l2_dev, adapter, -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c -index 8ae3612..e183f9c 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c -@@ -434,16 +434,17 @@ ipu6_isys_init(struct pci_dev *pdev, struct device *parent, - #if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - if (!spdata) { - dev_dbg(&pdev->dev, "No subdevice info provided"); -- ipu_get_acpi_devices(isys_adev, &isys_adev->auxdev.dev, &acpi_pdata, NULL, -+ ret = ipu_get_acpi_devices(isys_adev, &isys_adev->auxdev.dev, &acpi_pdata, NULL, - isys_init_acpi_add_device); - pdata->spdata = acpi_pdata; - } else { - dev_dbg(&pdev->dev, "Subdevice info found"); -- ipu_get_acpi_devices(isys_adev, &isys_adev->auxdev.dev, &acpi_pdata, &spdata, -+ ret = ipu_get_acpi_devices(isys_adev, &isys_adev->auxdev.dev, &acpi_pdata, &spdata, - isys_init_acpi_add_device); - } -+ if (ret) -+ return ERR_PTR(ret); - #endif -- - isys_adev->mmu = ipu6_mmu_init(dev, base, ISYS_MMID, - &ipdata->hw_variant); - if (IS_ERR(isys_adev->mmu)) { --- -2.43.0 - diff --git a/patches/0065-media-pci-add-missing-if-for-PDATA.patch b/patches/0065-media-pci-add-missing-if-for-PDATA.patch deleted file mode 100644 index df7dd85..0000000 --- a/patches/0065-media-pci-add-missing-if-for-PDATA.patch +++ /dev/null @@ -1,114 +0,0 @@ -From 0dc67e20a257bc43749e93d9533e963074f4e682 Mon Sep 17 00:00:00 2001 -From: "Signed-off-by: zouxiaoh" -Date: Fri, 16 Jan 2026 01:26:58 -0700 -Subject: [PATCH 65/76] media: pci: add missing #if for PDATA - ---- - 6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c | 5 ++++- - 6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h | 2 ++ - 6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c | 8 ++++---- - 6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h | 7 ++++--- - 4 files changed, 14 insertions(+), 8 deletions(-) - -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c -index 9523cc6..a3bd109 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c -@@ -21,8 +21,9 @@ - #include - #include - #include -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) - #include -- -+#endif - #include "ipu6.h" - #include "ipu6-bus.h" - #include "ipu6-dma.h" -@@ -775,6 +776,7 @@ int ipu6_buttress_start_tsc_sync(struct ipu6_device *isp) - } - EXPORT_SYMBOL_NS_GPL(ipu6_buttress_start_tsc_sync, "INTEL_IPU6"); - -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) - /* - * The dev_id was hard code in platform data, as i2c bus number - * may change dynamiclly, we need to update this bus id -@@ -816,6 +818,7 @@ int ipu6_get_i2c_bus_id(int adapter_id, char *adapter_bdf, int bdf_len) - } - EXPORT_SYMBOL_NS_GPL(ipu6_get_i2c_bus_id, "INTEL_IPU6"); - -+#endif - void ipu6_buttress_tsc_read(struct ipu6_device *isp, u64 *val) - { - u32 tsc_hi_1, tsc_hi_2, tsc_lo; -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h -index 6fed522..d6884c7 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h -@@ -82,5 +82,7 @@ void ipu6_buttress_exit(struct ipu6_device *isp); - void ipu6_buttress_csi_port_config(struct ipu6_device *isp, - u32 legacy, u32 combo); - void ipu6_buttress_restore(struct ipu6_device *isp); -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) - int ipu6_get_i2c_bus_id(int adapter_id, char *adapter_bdf, int bdf_len); -+#endif - #endif /* IPU6_BUTTRESS_H */ -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -index df5de9a..4f2c86f 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -@@ -1131,16 +1131,16 @@ static int isys_register_devices(struct ipu6_isys *isys) - #else - isys_register_ext_subdevs(isys); - #endif -- -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) - ret = v4l2_device_register_subdev_nodes(&isys->v4l2_dev); - if (ret) - goto out_isys_unregister_ext_subdevs; -- -+#endif - return 0; -- -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) - out_isys_unregister_ext_subdevs: - isys_unregister_ext_subdevs(isys); -- -+#endif - out_isys_unregister_subdevices: - isys_csi2_unregister_subdevices(isys); - -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h -index 7d091da..76a452f 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h -@@ -10,8 +10,9 @@ - #include - #include - #include -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) - #include -- -+#endif - #include - #include - #include -@@ -191,13 +192,13 @@ struct isys_fw_msgs { - struct list_head head; - dma_addr_t dma_addr; - }; -- -+#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) - struct ipu6_isys_subdev_i2c_info { - struct i2c_board_info board_info; - int i2c_adapter_id; - char i2c_adapter_bdf[32]; - }; -- -+#endif - #if (IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ - && IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING)) \ - || IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) --- -2.43.0 - diff --git a/patches/0066-media-pci-refine-PDATA-related-config.patch b/patches/0066-media-pci-refine-PDATA-related-config.patch deleted file mode 100644 index 9161e80..0000000 --- a/patches/0066-media-pci-refine-PDATA-related-config.patch +++ /dev/null @@ -1,527 +0,0 @@ -From ea2ce6bd736dcb0311811b92d3c3ff30dbb07743 Mon Sep 17 00:00:00 2001 -From: hepengpx -Date: Fri, 16 Jan 2026 01:28:11 -0700 -Subject: [PATCH 66/76] media: pci: refine PDATA related config - -remove dynamic pdata related code; -replace CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA -with CONFIG_INTEL_IPU6_ACPI; - -Signed-off-by: hepengpx -Signed-off-by: zouxiaoh ---- - .../media/pci/intel/ipu6/ipu6-buttress.c | 5 +- - .../media/pci/intel/ipu6/ipu6-buttress.h | 2 +- - .../media/pci/intel/ipu6/ipu6-isys-mcd-phy.c | 6 +- - .../drivers/media/pci/intel/ipu6/ipu6-isys.c | 38 +++++----- - .../drivers/media/pci/intel/ipu6/ipu6-isys.h | 60 ++------------- - 6.18.0/drivers/media/pci/intel/ipu6/ipu6.c | 74 +------------------ - 6.18.0/drivers/media/pci/intel/ipu6/ipu6.h | 13 +--- - 7 files changed, 34 insertions(+), 164 deletions(-) - -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c -index a3bd109..c243142 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c -@@ -21,7 +21,7 @@ - #include - #include - #include --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - #include - #endif - #include "ipu6.h" -@@ -775,8 +775,7 @@ int ipu6_buttress_start_tsc_sync(struct ipu6_device *isp) - return -ETIMEDOUT; - } - EXPORT_SYMBOL_NS_GPL(ipu6_buttress_start_tsc_sync, "INTEL_IPU6"); -- --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - /* - * The dev_id was hard code in platform data, as i2c bus number - * may change dynamiclly, we need to update this bus id -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h -index d6884c7..8bc5eeb 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h -@@ -82,7 +82,7 @@ void ipu6_buttress_exit(struct ipu6_device *isp); - void ipu6_buttress_csi_port_config(struct ipu6_device *isp, - u32 legacy, u32 combo); - void ipu6_buttress_restore(struct ipu6_device *isp); --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - int ipu6_get_i2c_bus_id(int adapter_id, char *adapter_bdf, int bdf_len); - #endif - #endif /* IPU6_BUTTRESS_H */ -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c -index 7ab02b7..1cf33b5 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c -@@ -567,7 +567,7 @@ static int ipu6_isys_mcd_phy_ready(struct ipu6_isys *isys, u8 id) - return ret; - } - --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - static int ipu6_isys_mcd_phy_common_init(struct ipu6_isys *isys, struct ipu6_isys_csi2_config *cfg) - { - unsigned int phy_id; -@@ -641,7 +641,7 @@ static int ipu6_isys_driver_port_to_phy_port(struct ipu6_isys_csi2_config *cfg) - return ret; - } - --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - static int ipu6_isys_mcd_phy_config(struct ipu6_isys *isys, struct ipu6_isys_csi2_config *cfg) - { - unsigned int phy_id; -@@ -771,7 +771,7 @@ int ipu6_isys_mcd_phy_set_power(struct ipu6_isys *isys, - return ret; - - ipu6_isys_mcd_phy_reset(isys, phy_id, 0); --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - ipu6_isys_mcd_phy_common_init(isys, cfg); - ret = ipu6_isys_mcd_phy_config(isys, cfg); - #else -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -index 4f2c86f..25d01ea 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -@@ -28,7 +28,7 @@ - #include - #include - #include --#if !IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if !IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - #include - #include - #include -@@ -105,7 +105,7 @@ enum ltr_did_type { - }; - - #define ISYS_PM_QOS_VALUE 300 --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - /* - * The param was passed from module to indicate if port - * could be optimized. -@@ -113,9 +113,7 @@ enum ltr_did_type { - static bool csi2_port_optimized = true; - module_param(csi2_port_optimized, bool, 0660); - MODULE_PARM_DESC(csi2_port_optimized, "IPU CSI2 port optimization"); --#endif - --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) - struct isys_i2c_test { - u8 bus_nr; - u16 addr; -@@ -198,7 +196,7 @@ unregister_subdev: - return ret; - } - --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - static int isys_register_ext_subdev(struct ipu6_isys *isys, - struct ipu_isys_subdev_info *sd_info) - { -@@ -376,7 +374,7 @@ static int isys_csi2_register_subdevices(struct ipu6_isys *isys) - { - const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = - &isys->pdata->ipdata->csi2; --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - struct ipu_isys_subdev_pdata *spdata = isys->pdata->spdata; - struct ipu_isys_subdev_info **sd_info; - DECLARE_BITMAP(csi2_enable, 32); -@@ -384,7 +382,7 @@ static int isys_csi2_register_subdevices(struct ipu6_isys *isys) - unsigned int i; - int ret; - --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - /* - * Here is somewhat a workaround, let each platform decide - * if csi2 port can be optimized, which means only registered -@@ -409,7 +407,7 @@ static int isys_csi2_register_subdevices(struct ipu6_isys *isys) - #endif - - for (i = 0; i < csi2_pdata->nports; i++) { --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - if (!test_bit(i, csi2_enable)) - continue; - #endif -@@ -436,7 +434,7 @@ static int isys_csi2_create_media_links(struct ipu6_isys *isys) - const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = - &isys->pdata->ipdata->csi2; - struct device *dev = &isys->adev->auxdev.dev; --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - struct ipu_isys_subdev_pdata *spdata = isys->pdata->spdata; - struct ipu_isys_subdev_info **sd_info; - DECLARE_BITMAP(csi2_enable, 32); -@@ -444,7 +442,7 @@ static int isys_csi2_create_media_links(struct ipu6_isys *isys) - unsigned int i, j; - int ret; - --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - /* - * Here is somewhat a workaround, let each platform decide - * if csi2 port can be optimized, which means only registered -@@ -469,7 +467,7 @@ static int isys_csi2_create_media_links(struct ipu6_isys *isys) - #endif - - for (i = 0; i < csi2_pdata->nports; i++) { --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - if (!test_bit(i, csi2_enable)) - continue; - #endif -@@ -507,7 +505,7 @@ static int isys_register_video_devices(struct ipu6_isys *isys) - { - const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = - &isys->pdata->ipdata->csi2; --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - struct ipu_isys_subdev_pdata *spdata = isys->pdata->spdata; - struct ipu_isys_subdev_info **sd_info; - DECLARE_BITMAP(csi2_enable, 32); -@@ -515,7 +513,7 @@ static int isys_register_video_devices(struct ipu6_isys *isys) - unsigned int i, j; - int ret; - --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - /* - * Here is somewhat a workaround, let each platform decide - * if csi2 port can be optimized, which means only registered -@@ -540,7 +538,7 @@ static int isys_register_video_devices(struct ipu6_isys *isys) - #endif - - for (i = 0; i < csi2_pdata->nports; i++) { --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - if (!test_bit(i, csi2_enable)) - continue; - #endif -@@ -974,7 +972,7 @@ static void isys_iwake_watermark_cleanup(struct ipu6_isys *isys) - mutex_destroy(&iwake_watermark->mutex); - } - --#if !IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if !IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - /* The .bound() notifier callback when a match is found */ - static int isys_notifier_bound(struct v4l2_async_notifier *notifier, - struct v4l2_subdev *sd, -@@ -1124,20 +1122,20 @@ static int isys_register_devices(struct ipu6_isys *isys) - if (ret) - goto out_isys_unregister_subdevices; - --#if !IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if !IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - ret = isys_notifier_init(isys); - if (ret) - goto out_isys_unregister_subdevices; - #else - isys_register_ext_subdevs(isys); - #endif --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - ret = v4l2_device_register_subdev_nodes(&isys->v4l2_dev); - if (ret) - goto out_isys_unregister_ext_subdevs; - #endif - return 0; --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - out_isys_unregister_ext_subdevs: - isys_unregister_ext_subdevs(isys); - #endif -@@ -1163,7 +1161,7 @@ static void isys_unregister_devices(struct ipu6_isys *isys) - { - isys_unregister_video_devices(isys); - isys_csi2_unregister_subdevices(isys); --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - isys_unregister_ext_subdevs(isys); - #endif - v4l2_device_unregister(&isys->v4l2_dev); -@@ -1535,7 +1533,7 @@ static void isys_remove(struct auxiliary_device *auxdev) - free_fw_msg_bufs(isys); - - isys_unregister_devices(isys); --#if !IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if !IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - isys_notifier_cleanup(isys); - #endif - -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h -index 76a452f..5c96f5b 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h -@@ -10,7 +10,7 @@ - #include - #include - #include --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - #include - #endif - #include -@@ -62,11 +62,7 @@ struct ipu6_bus_device; - #define IPU6EP_MTL_LTR_VALUE 1023 - #define IPU6EP_MTL_MIN_MEMOPEN_TH 0xc - --#if (IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ -- && IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING)) \ -- || IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) --#define IPU_SPDATA_NAME_LEN 20 --#define IPU_SPDATA_BDF_LEN 32 -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - #define IPU_SPDATA_GPIO_NUM 4 - #define IPU_SPDATA_IRQ_PIN_NAME_LEN 16 - #endif -@@ -172,7 +168,7 @@ struct ipu6_isys { - spinlock_t listlock; /* Protect framebuflist */ - struct list_head framebuflist; - struct list_head framebuflist_fw; --#if !IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if !IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - struct v4l2_async_notifier notifier; - #endif - struct isys_iwake_watermark iwake_watermark; -@@ -192,66 +188,22 @@ struct isys_fw_msgs { - struct list_head head; - dma_addr_t dma_addr; - }; --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - struct ipu6_isys_subdev_i2c_info { - struct i2c_board_info board_info; - int i2c_adapter_id; - char i2c_adapter_bdf[32]; - }; - #endif --#if (IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ -- && IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING)) \ -- || IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) --#define IPU_SPDATA_NAME_LEN 20 --#define IPU_SPDATA_BDF_LEN 32 -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - #define IPU_SPDATA_GPIO_NUM 4 - #define IPU_SPDATA_IRQ_PIN_NAME_LEN 16 - #endif - --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ -- && IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) --/** -- * struct ipu6_spdata_rep - override subdev platform data -- * -- * @name: i2c_board_info.type -- * @i2c_adapter_bdf_o: old i2c adapter bdf -- * @slave_addr_o: old i2c slave address -- * @i2c_adapter_bdf_n: new i2c adapter bdf -- * @slave_addr_n: new i2c slave address -- * -- * identify a subdev with @name, @i2c_adapter_bdf_o and @slave_addr_o and -- * configure it to use the new @i2c_adapter_bdf_n and @slave_addr_n -- */ --struct ipu6_spdata_rep { -- /* i2c old information */ -- char name[IPU6_SPDATA_NAME_LEN]; -- unsigned int port_o; -- char i2c_adapter_bdf_o[IPU6_SPDATA_BDF_LEN]; -- uint32_t slave_addr_o; -- -- /* i2c new information */ -- unsigned int port_n; -- char i2c_adapter_bdf_n[IPU6_SPDATA_BDF_LEN]; -- uint32_t slave_addr_n; -- -- /* sensor_platform */ -- unsigned int lanes; -- int gpios[IPU6_SPDATA_GPIO_NUM]; -- int irq_pin; -- unsigned int irq_pin_flags; -- char irq_pin_name[IPU6_SPDATA_IRQ_PIN_NAME_LEN]; -- char suffix; --}; --#endif -- - struct ipu_isys_subdev_info { - struct ipu6_isys_csi2_config *csi2; -- struct ipu6_isys_subdev_i2c_info i2c; --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ -- && IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) -- void (*fixup_spdata)(const void *spdata_rep, void *spdata); --#endif - #if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) -+ struct ipu6_isys_subdev_i2c_info i2c; - char *acpi_hid; - #endif - }; -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c -index e183f9c..7a0443a 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c -@@ -387,7 +387,7 @@ static struct ipu6_bus_device * - ipu6_isys_init(struct pci_dev *pdev, struct device *parent, - struct ipu6_buttress_ctrl *ctrl, void __iomem *base, - const struct ipu6_isys_internal_pdata *ipdata, --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - struct ipu_isys_subdev_pdata *spdata - #endif - ) -@@ -412,7 +412,7 @@ ipu6_isys_init(struct pci_dev *pdev, struct device *parent, - - pdata->base = base; - pdata->ipdata = ipdata; --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - pdata->spdata = spdata; - #endif - /* Override the isys freq */ -@@ -575,59 +575,6 @@ static void ipu6_configure_vc_mechanism(struct ipu6_device *isp) - writel(val, isp->base + BUTTRESS_REG_BTRS_CTRL); - } - --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) --static inline int match_spdata(struct ipu_isys_subdev_info *sd, -- const struct ipu6_spdata_rep *rep) --{ -- if (strcmp(sd->i2c.board_info.type, rep->name)) -- return 0; -- -- if (strcmp(sd->i2c.i2c_adapter_bdf, rep->i2c_adapter_bdf_o)) -- return 0; -- -- if (sd->i2c.board_info.addr != rep->slave_addr_o) -- return 0; -- -- if (sd->csi2->port != rep->port_o) -- return 0; -- -- return 1; --} -- --static void fixup_spdata(const void *spdata_rep, -- struct ipu_isys_subdev_pdata *spdata) --{ -- const struct ipu6_spdata_rep *rep = spdata_rep; -- struct ipu_isys_subdev_info **subdevs, *sd_info; -- -- if (!spdata) -- return; -- -- for (; rep->name[0]; rep++) { -- for (subdevs = spdata->subdevs; *subdevs; subdevs++) { -- sd_info = *subdevs; -- -- if (!sd_info->csi2) -- continue; -- -- if (match_spdata(sd_info, rep)) { -- strcpy(sd_info->i2c.i2c_adapter_bdf, -- rep->i2c_adapter_bdf_n); -- sd_info->i2c.board_info.addr = -- rep->slave_addr_n; -- sd_info->csi2->port = rep->port_n; -- -- if (sd_info->fixup_spdata) -- sd_info->fixup_spdata(rep, -- sd_info->i2c.board_info.platform_data); -- } -- } -- } --} --#endif --#endif -- - static int ipu6_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) - { - struct ipu6_buttress_ctrl *isys_ctrl = NULL, *psys_ctrl = NULL; -@@ -731,16 +678,6 @@ static int ipu6_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) - goto out_ipu6_bus_del_devices; - } - --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) -- rval = request_firmware(&isp->spdata_fw, IPU6_SPDATA_NAME, &pdev->dev); -- if (rval) -- dev_warn(&isp->pdev->dev, "no spdata replace, using default\n"); -- else -- fixup_spdata(isp->spdata_fw->data, pdev->dev.platform_data); --#endif --#endif -- - isys_ctrl = devm_kmemdup(dev, &isys_buttress_ctrl, - sizeof(isys_buttress_ctrl), GFP_KERNEL); - if (!isys_ctrl) { -@@ -750,7 +687,7 @@ static int ipu6_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) - - isp->isys = ipu6_isys_init(pdev, dev, isys_ctrl, isys_base, - &isys_ipdata, --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - pdev->dev.platform_data - #endif - ); -@@ -852,11 +789,6 @@ out_ipu6_bus_del_devices: - ipu6_mmu_cleanup(isp->isys->mmu); - ipu6_bus_del_devices(pdev); - release_firmware(isp->cpd_fw); --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) -- release_firmware(isp->spdata_fw); --#endif --#endif - buttress_exit: - ipu6_buttress_exit(isp); - -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.h -index 786494d..f464a7b 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.h -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.h -@@ -23,12 +23,6 @@ struct ipu6_bus_device; - #define IPU6EPMTL_FIRMWARE_NAME "intel/ipu/ipu6epmtl_fw.bin" - #define IPU6EPADLN_FIRMWARE_NAME "intel/ipu/ipu6epadln_fw.bin" - --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) \ -- && IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) --/* array of struct ipu6_spdata_rep terminated by NULL */ --#define IPU6_SPDATA_NAME "ipu6v1_spdata.bin" --#endif -- - enum ipu6_version { - IPU6_VER_INVALID = 0, - IPU6_VER_6 = 1, -@@ -86,11 +80,6 @@ struct ipu6_device { - const struct firmware *cpd_fw; - const char *cpd_fw_name; - u32 cpd_metadata_cmpnt_size; --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_PDATA_DYNAMIC_LOADING) -- const struct firmware *spdata_fw; --#endif --#endif - void __iomem *base; - #ifdef CONFIG_DEBUG_FS - struct dentry *ipu_dir; -@@ -338,7 +327,7 @@ struct ipu6_isys_internal_pdata { - struct ipu6_isys_pdata { - void __iomem *base; - const struct ipu6_isys_internal_pdata *ipdata; --#if IS_ENABLED(CONFIG_VIDEO_INTEL_IPU_USE_PLATFORMDATA) -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - struct ipu_isys_subdev_pdata *spdata; - #endif - }; --- -2.43.0 - diff --git a/patches/0067-kernel-align-ACPI-PDATA-and-ACPI-fwnode-build-for-EC.patch b/patches/0067-kernel-align-ACPI-PDATA-and-ACPI-fwnode-build-for-EC.patch deleted file mode 100644 index e1d5209..0000000 --- a/patches/0067-kernel-align-ACPI-PDATA-and-ACPI-fwnode-build-for-EC.patch +++ /dev/null @@ -1,382 +0,0 @@ -From fde4535bc9282dde7886f0fe24375acc5b35bf09 Mon Sep 17 00:00:00 2001 -From: hepengpx -Date: Fri, 16 Jan 2026 01:31:22 -0700 -Subject: [PATCH 67/76] kernel: align ACPI PDATA and ACPI fwnode build for ECG - -Signed-off-by: hepengpx -Signed-off-by: zouxiaoh ---- - .../media/pci/intel/ipu6/ipu6-isys-mcd-phy.c | 170 ++++++++---------- - .../drivers/media/pci/intel/ipu6/ipu6-isys.c | 29 ++- - .../drivers/media/pci/intel/ipu6/ipu6-isys.h | 4 - - 6.18.0/drivers/media/pci/intel/ipu6/ipu6.c | 14 +- - 4 files changed, 96 insertions(+), 121 deletions(-) - -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c -index 1cf33b5..1be7d75 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c -@@ -567,49 +567,46 @@ static int ipu6_isys_mcd_phy_ready(struct ipu6_isys *isys, u8 id) - return ret; - } - --#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) --static int ipu6_isys_mcd_phy_common_init(struct ipu6_isys *isys, struct ipu6_isys_csi2_config *cfg) -+static void ipu6_isys_write_mcd_phy_common_init_regs(struct ipu6_isys *isys, -+ u32 port) - { -- unsigned int phy_id; -- void __iomem *phy_base; -- struct ipu6_bus_device *adev = to_ipu6_bus_device(&isys->adev->auxdev.dev); -+ struct ipu6_bus_device *adev = isys->adev; - struct ipu6_device *isp = adev->isp; - void __iomem *isp_base = isp->base; -+ void __iomem *phy_base; - unsigned int i; -+ u8 phy_id; - -- phy_id = cfg->port / 4; -+ phy_id = port / 4; - phy_base = isp_base + IPU6_ISYS_MCD_PHY_BASE(phy_id); - -- for (i = 0 ; i < ARRAY_SIZE(common_init_regs); i++) { -+ for (i = 0; i < ARRAY_SIZE(common_init_regs); i++) - writel(common_init_regs[i].val, -- phy_base + common_init_regs[i].reg); -- } -- -- return 0; -+ phy_base + common_init_regs[i].reg); - } --#else --static void ipu6_isys_mcd_phy_common_init(struct ipu6_isys *isys) -+ -+static void ipu6_isys_mcd_phy_common_init(struct ipu6_isys *isys -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) -+ , struct ipu6_isys_csi2_config *csi2_cfg -+#endif -+) - { -- struct ipu6_bus_device *adev = isys->adev; -- struct ipu6_device *isp = adev->isp; -- void __iomem *isp_base = isp->base; - struct sensor_async_sd *s_asd; - struct v4l2_async_connection *asc; -- void __iomem *phy_base; -- unsigned int i; -- u8 phy_id; - -- list_for_each_entry(asc, &isys->notifier.done_list, asc_entry) { -- s_asd = container_of(asc, struct sensor_async_sd, asc); -- phy_id = s_asd->csi2.port / 4; -- phy_base = isp_base + IPU6_ISYS_MCD_PHY_BASE(phy_id); -- -- for (i = 0; i < ARRAY_SIZE(common_init_regs); i++) -- writel(common_init_regs[i].val, -- phy_base + common_init_regs[i].reg); -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) -+ if (isys->pdata->spdata) { -+ ipu6_isys_write_mcd_phy_common_init_regs(isys, csi2_cfg->port); -+ } else { -+#endif -+ list_for_each_entry(asc, &isys->notifier.done_list, asc_entry) { -+ s_asd = container_of(asc, struct sensor_async_sd, asc); -+ ipu6_isys_write_mcd_phy_common_init_regs(isys, s_asd->csi2.port); -+ } -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - } --} - #endif -+} - - static int ipu6_isys_driver_port_to_phy_port(struct ipu6_isys_csi2_config *cfg) - { -@@ -641,96 +638,79 @@ static int ipu6_isys_driver_port_to_phy_port(struct ipu6_isys_csi2_config *cfg) - return ret; - } - --#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) --static int ipu6_isys_mcd_phy_config(struct ipu6_isys *isys, struct ipu6_isys_csi2_config *cfg) -+static int ipu6_isys_write_mcd_phy_config_regs(struct ipu6_isys *isys, -+ struct ipu6_isys_csi2_config *cfg) - { -- unsigned int phy_id; -- int phy_port; -- void __iomem *phy_base; -- struct ipu6_bus_device *adev = to_ipu6_bus_device(&isys->adev->auxdev.dev); -+ struct device *dev = &isys->adev->auxdev.dev; -+ struct ipu6_bus_device *adev = isys->adev; -+ const struct phy_reg **phy_config_regs; - struct ipu6_device *isp = adev->isp; - void __iomem *isp_base = isp->base; -- const struct phy_reg **phy_config_regs; -- struct ipu_isys_subdev_pdata *spdata = isys->pdata->spdata; -- struct ipu_isys_subdev_info **subdevs, *sd_info; -- int i; -+ void __iomem *phy_base; -+ int phy_port, phy_id; -+ unsigned int i; - -- if (!spdata) { -- dev_err(&isys->adev->auxdev.dev, "no subdevice info provided\n"); -- return -EINVAL; -+ phy_port = ipu6_isys_driver_port_to_phy_port(cfg); -+ if (phy_port < 0) { -+ dev_err(dev, "invalid port %d for lane %d", cfg->port, -+ cfg->nlanes); -+ return -ENXIO; - } - - phy_id = cfg->port / 4; - phy_base = isp_base + IPU6_ISYS_MCD_PHY_BASE(phy_id); -- for (subdevs = spdata->subdevs; *subdevs; subdevs++) { -- sd_info = *subdevs; -- if (!sd_info->csi2) -- continue; -- -- phy_port = ipu6_isys_driver_port_to_phy_port(sd_info->csi2); -- if (phy_port < 0) { -- dev_err(&isys->adev->auxdev.dev, "invalid port %d for lane %d", -- cfg->port, cfg->nlanes); -- return -ENXIO; -- } -- -- if ((sd_info->csi2->port / 4) != phy_id) -- continue; -- -- dev_dbg(&isys->adev->auxdev.dev, "port%d PHY%u lanes %u\n", -- phy_port, phy_id, cfg->nlanes); -+ dev_dbg(dev, "port%d PHY%u lanes %u\n", cfg->port, phy_id, cfg->nlanes); - -- phy_config_regs = config_regs[sd_info->csi2->nlanes/2]; -- -- for (i = 0; phy_config_regs[phy_port][i].reg; i++) { -- writel(phy_config_regs[phy_port][i].val, -- phy_base + phy_config_regs[phy_port][i].reg); -- } -- } -+ phy_config_regs = config_regs[cfg->nlanes / 2]; -+ for (i = 0; phy_config_regs[phy_port][i].reg; i++) -+ writel(phy_config_regs[phy_port][i].val, -+ phy_base + phy_config_regs[phy_port][i].reg); - - return 0; - } --#else --static int ipu6_isys_mcd_phy_config(struct ipu6_isys *isys) -+ -+static int ipu6_isys_mcd_phy_config(struct ipu6_isys *isys -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) -+ , struct ipu6_isys_csi2_config *csi2_cfg -+#endif -+) - { -- struct device *dev = &isys->adev->auxdev.dev; -- struct ipu6_bus_device *adev = isys->adev; -- const struct phy_reg **phy_config_regs; -- struct ipu6_device *isp = adev->isp; -- void __iomem *isp_base = isp->base; - struct sensor_async_sd *s_asd; - struct ipu6_isys_csi2_config cfg; - struct v4l2_async_connection *asc; -- int phy_port, phy_id; -- unsigned int i; -- void __iomem *phy_base; -+ int ret = 0; - -- list_for_each_entry(asc, &isys->notifier.done_list, asc_entry) { -- s_asd = container_of(asc, struct sensor_async_sd, asc); -- cfg.port = s_asd->csi2.port; -- cfg.nlanes = s_asd->csi2.nlanes; -- phy_port = ipu6_isys_driver_port_to_phy_port(&cfg); -- if (phy_port < 0) { -- dev_err(dev, "invalid port %d for lane %d", cfg.port, -- cfg.nlanes); -- return -ENXIO; -- } -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) -+ struct ipu_isys_subdev_pdata *spdata = isys->pdata->spdata; -+ struct ipu_isys_subdev_info **subdevs, *sd_info; - -- phy_id = cfg.port / 4; -- phy_base = isp_base + IPU6_ISYS_MCD_PHY_BASE(phy_id); -- dev_dbg(dev, "port%d PHY%u lanes %u\n", cfg.port, phy_id, -- cfg.nlanes); -+ if (spdata) { -+ for (subdevs = spdata->subdevs; *subdevs; subdevs++) { -+ sd_info = *subdevs; -+ if (!sd_info->csi2 || -+ (sd_info->csi2->port / 4) != (csi2_cfg->port / 4)) -+ continue; - -- phy_config_regs = config_regs[cfg.nlanes / 2]; -- cfg.port = phy_port; -- for (i = 0; phy_config_regs[cfg.port][i].reg; i++) -- writel(phy_config_regs[cfg.port][i].val, -- phy_base + phy_config_regs[cfg.port][i].reg); -+ ret = ipu6_isys_write_mcd_phy_config_regs(isys, sd_info->csi2); -+ if (ret) -+ return ret; -+ } -+ } else { -+#endif -+ list_for_each_entry(asc, &isys->notifier.done_list, asc_entry) { -+ s_asd = container_of(asc, struct sensor_async_sd, asc); -+ cfg.port = s_asd->csi2.port; -+ cfg.nlanes = s_asd->csi2.nlanes; -+ ret = ipu6_isys_write_mcd_phy_config_regs(isys, &cfg); -+ if (ret) -+ return ret; -+ } -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - } -+#endif - - return 0; - } --#endif - - #define CSI_MCD_PHY_NUM 2 - static refcount_t phy_power_ref_count[CSI_MCD_PHY_NUM]; -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -index 25d01ea..7d10583 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -@@ -28,14 +28,12 @@ - #include - #include - #include --#if !IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - #include - #include - #include - #include - #include - #include --#endif - - #include "ipu6-bus.h" - #include "ipu6-cpd.h" -@@ -972,7 +970,6 @@ static void isys_iwake_watermark_cleanup(struct ipu6_isys *isys) - mutex_destroy(&iwake_watermark->mutex); - } - --#if !IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - /* The .bound() notifier callback when a match is found */ - static int isys_notifier_bound(struct v4l2_async_notifier *notifier, - struct v4l2_subdev *sd, -@@ -1084,7 +1081,6 @@ static void isys_notifier_cleanup(struct ipu6_isys *isys) - v4l2_async_nf_unregister(&isys->notifier); - v4l2_async_nf_cleanup(&isys->notifier); - } --#endif - - static int isys_register_devices(struct ipu6_isys *isys) - { -@@ -1122,17 +1118,19 @@ static int isys_register_devices(struct ipu6_isys *isys) - if (ret) - goto out_isys_unregister_subdevices; - --#if !IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) -- ret = isys_notifier_init(isys); -- if (ret) -- goto out_isys_unregister_subdevices; --#else -- isys_register_ext_subdevs(isys); -+#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) -+ if (!isys->pdata->spdata) { - #endif -+ ret = isys_notifier_init(isys); -+ if (ret) -+ goto out_isys_unregister_subdevices; - #if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) -- ret = v4l2_device_register_subdev_nodes(&isys->v4l2_dev); -- if (ret) -- goto out_isys_unregister_ext_subdevs; -+ } else { -+ isys_register_ext_subdevs(isys); -+ ret = v4l2_device_register_subdev_nodes(&isys->v4l2_dev); -+ if (ret) -+ goto out_isys_unregister_ext_subdevs; -+ } - #endif - return 0; - #if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) -@@ -1162,7 +1160,8 @@ static void isys_unregister_devices(struct ipu6_isys *isys) - isys_unregister_video_devices(isys); - isys_csi2_unregister_subdevices(isys); - #if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) -- isys_unregister_ext_subdevs(isys); -+ if (isys->pdata->spdata) -+ isys_unregister_ext_subdevs(isys); - #endif - v4l2_device_unregister(&isys->v4l2_dev); - media_device_unregister(&isys->media_dev); -@@ -1533,9 +1532,7 @@ static void isys_remove(struct auxiliary_device *auxdev) - free_fw_msg_bufs(isys); - - isys_unregister_devices(isys); --#if !IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - isys_notifier_cleanup(isys); --#endif - - cpu_latency_qos_remove_request(&isys->pm_qos); - -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h -index 5c96f5b..3768c48 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h -@@ -168,9 +168,7 @@ struct ipu6_isys { - spinlock_t listlock; /* Protect framebuflist */ - struct list_head framebuflist; - struct list_head framebuflist_fw; --#if !IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - struct v4l2_async_notifier notifier; --#endif - struct isys_iwake_watermark iwake_watermark; - #ifdef CONFIG_VIDEO_INTEL_IPU6_ISYS_RESET - struct mutex reset_mutex; -@@ -194,8 +192,6 @@ struct ipu6_isys_subdev_i2c_info { - int i2c_adapter_id; - char i2c_adapter_bdf[32]; - }; --#endif --#if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - #define IPU_SPDATA_GPIO_NUM 4 - #define IPU_SPDATA_IRQ_PIN_NAME_LEN 16 - #endif -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c -index 7a0443a..e469f47 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c -@@ -400,11 +400,11 @@ ipu6_isys_init(struct pci_dev *pdev, struct device *parent, - #endif - int ret; - -- // ret = ipu_bridge_init(dev, ipu_bridge_parse_ssdb); -- // if (ret) { -- // dev_err_probe(dev, ret, "IPU6 bridge init failed\n"); -- // return ERR_PTR(ret); -- // } -+ ret = ipu_bridge_init(dev, ipu_bridge_parse_ssdb); -+ if (ret) { -+ dev_err_probe(dev, ret, "IPU6 bridge init failed\n"); -+ return ERR_PTR(ret); -+ } - - pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); - if (!pdata) -@@ -436,7 +436,9 @@ ipu6_isys_init(struct pci_dev *pdev, struct device *parent, - dev_dbg(&pdev->dev, "No subdevice info provided"); - ret = ipu_get_acpi_devices(isys_adev, &isys_adev->auxdev.dev, &acpi_pdata, NULL, - isys_init_acpi_add_device); -- pdata->spdata = acpi_pdata; -+ if (acpi_pdata && (*acpi_pdata->subdevs)) { -+ pdata->spdata = acpi_pdata; -+ } - } else { - dev_dbg(&pdev->dev, "Subdevice info found"); - ret = ipu_get_acpi_devices(isys_adev, &isys_adev->auxdev.dev, &acpi_pdata, &spdata, --- -2.43.0 - diff --git a/patches/0068-upstream-Use-module-parameter-to-set-psys-freq.patch b/patches/0068-upstream-Use-module-parameter-to-set-psys-freq.patch deleted file mode 100644 index 95476db..0000000 --- a/patches/0068-upstream-Use-module-parameter-to-set-psys-freq.patch +++ /dev/null @@ -1,43 +0,0 @@ -From 28c69edaca0d33d2617677592d8bc864fc607223 Mon Sep 17 00:00:00 2001 -From: Dongcheng Yan -Date: Fri, 16 Jan 2026 01:32:54 -0700 -Subject: [PATCH 68/76] upstream: Use module parameter to set psys freq - ---- - 6.18.0/drivers/media/pci/intel/ipu6/ipu6.c | 13 +++++++++++++ - 1 file changed, 13 insertions(+) - -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c -index e469f47..291b745 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c -@@ -51,6 +51,10 @@ static unsigned int isys_freq_override; - module_param(isys_freq_override, uint, 0660); - MODULE_PARM_DESC(isys_freq_override, "Override ISYS freq(mhz)"); - -+static unsigned int psys_freq_override; -+module_param(psys_freq_override, uint, 0660); -+MODULE_PARM_DESC(psys_freq_override, "Override PSYS freq(mhz)"); -+ - #define IPU6_PCI_BAR 0 - - struct ipu6_cell_program { -@@ -483,6 +487,15 @@ ipu6_psys_init(struct pci_dev *pdev, struct device *parent, - pdata->base = base; - pdata->ipdata = ipdata; - -+ /* Override the isys freq */ -+ if (psys_freq_override >= BUTTRESS_MIN_FORCE_PS_FREQ && -+ psys_freq_override <= BUTTRESS_MAX_FORCE_PS_FREQ) { -+ ctrl->ratio = psys_freq_override / BUTTRESS_PS_FREQ_STEP; -+ ctrl->qos_floor = psys_freq_override; -+ dev_dbg(&pdev->dev, "Override the psys freq:%u(mhz)\n", -+ psys_freq_override); -+ } -+ - psys_adev = ipu6_bus_initialize_device(pdev, parent, pdata, ctrl, - IPU6_PSYS_NAME); - if (IS_ERR(psys_adev)) { --- -2.43.0 - diff --git a/patches/0069-media-pci-add-IPU6-PSYS-driver.patch b/patches/0069-media-pci-add-IPU6-PSYS-driver.patch deleted file mode 100644 index 1adcde1..0000000 --- a/patches/0069-media-pci-add-IPU6-PSYS-driver.patch +++ /dev/null @@ -1,8335 +0,0 @@ -From e4e0e82bb08efc972738b2cadea52b74595ae9f5 Mon Sep 17 00:00:00 2001 -From: zouxiaoh -Date: Fri, 16 Jan 2026 01:40:47 -0700 -Subject: [PATCH 69/77] media: pci: add IPU6 PSYS driver - -* media: intel/ipu6: Remove ipu6_buttress_ctrl started - -Signed-off-by: zouxiaoh -Signed-off-by: Florent Pirou ---- - 6.18.0/drivers/media/pci/intel/ipu6/Makefile | 1 + - .../media/pci/intel/ipu6/psys/Makefile | 26 + - .../media/pci/intel/ipu6/psys/ipu-fw-psys.c | 433 +++++ - .../media/pci/intel/ipu6/psys/ipu-fw-psys.h | 382 ++++ - .../pci/intel/ipu6/psys/ipu-fw-resources.c | 103 ++ - .../pci/intel/ipu6/psys/ipu-platform-psys.h | 78 + - .../intel/ipu6/psys/ipu-platform-resources.h | 103 ++ - .../pci/intel/ipu6/psys/ipu-psys-compat32.c | 222 +++ - .../media/pci/intel/ipu6/psys/ipu-psys.c | 1599 +++++++++++++++++ - .../media/pci/intel/ipu6/psys/ipu-psys.h | 324 ++++ - .../media/pci/intel/ipu6/psys/ipu-resources.c | 861 +++++++++ - .../pci/intel/ipu6/psys/ipu6-fw-resources.c | 607 +++++++ - .../pci/intel/ipu6/psys/ipu6-l-scheduler.c | 621 +++++++ - .../intel/ipu6/psys/ipu6-platform-resources.h | 194 ++ - .../media/pci/intel/ipu6/psys/ipu6-ppg.c | 556 ++++++ - .../media/pci/intel/ipu6/psys/ipu6-ppg.h | 38 + - .../media/pci/intel/ipu6/psys/ipu6-psys-gpc.c | 209 +++ - .../media/pci/intel/ipu6/psys/ipu6-psys.c | 1054 +++++++++++ - .../pci/intel/ipu6/psys/ipu6ep-fw-resources.c | 393 ++++ - .../ipu6/psys/ipu6ep-platform-resources.h | 42 + - .../pci/intel/ipu6/psys/ipu6se-fw-resources.c | 194 ++ - .../ipu6/psys/ipu6se-platform-resources.h | 103 ++ - 22 files changed, 8143 insertions(+) - create mode 100644 6.18.0/drivers/media/pci/intel/ipu6/psys/Makefile - create mode 100644 6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-fw-psys.c - create mode 100644 6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-fw-psys.h - create mode 100644 6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-fw-resources.c - create mode 100644 6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-platform-psys.h - create mode 100644 6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-platform-resources.h - create mode 100644 6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-psys-compat32.c - create mode 100644 6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.c - create mode 100644 6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.h - create mode 100644 6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-resources.c - create mode 100644 6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-fw-resources.c - create mode 100644 6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-l-scheduler.c - create mode 100644 6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-platform-resources.h - create mode 100644 6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-ppg.c - create mode 100644 6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-ppg.h - create mode 100644 6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-psys-gpc.c - create mode 100644 6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-psys.c - create mode 100644 6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6ep-fw-resources.c - create mode 100644 6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6ep-platform-resources.h - create mode 100644 6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6se-fw-resources.c - create mode 100644 6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6se-platform-resources.h - -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/Makefile b/6.18.0/drivers/media/pci/intel/ipu6/Makefile -index e1e123b..67c13c9 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/Makefile -+++ b/6.18.0/drivers/media/pci/intel/ipu6/Makefile -@@ -22,3 +22,4 @@ intel-ipu6-isys-y := ipu6-isys.o \ - ipu6-isys-dwc-phy.o - - obj-$(CONFIG_VIDEO_INTEL_IPU6) += intel-ipu6-isys.o -+obj-$(CONFIG_VIDEO_INTEL_IPU6) += psys/ -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/psys/Makefile b/6.18.0/drivers/media/pci/intel/ipu6/psys/Makefile -new file mode 100644 -index 0000000..299cb04 ---- /dev/null -+++ b/6.18.0/drivers/media/pci/intel/ipu6/psys/Makefile -@@ -0,0 +1,26 @@ -+# SPDX-License-Identifier: GPL-2.0-only -+ -+is_kernel_lt_6_10 = $(shell if [ $$(printf "6.10\n$(KERNELVERSION)" | sort -V | head -n1) != "6.10" ]; then echo 1; fi) -+ifeq ($(is_kernel_lt_6_10), 1) -+ifneq ($(EXTERNAL_BUILD), 1) -+src := $(srctree)/$(src) -+endif -+endif -+ -+intel-ipu6-psys-objs += ipu-psys.o \ -+ ipu-fw-psys.o \ -+ ipu-fw-resources.o \ -+ ipu-resources.o \ -+ ipu6-psys.o \ -+ ipu6-ppg.o \ -+ ipu6-l-scheduler.o \ -+ ipu6-fw-resources.o \ -+ ipu6se-fw-resources.o \ -+ ipu6ep-fw-resources.o -+ -+obj-$(CONFIG_VIDEO_INTEL_IPU6) += intel-ipu6-psys.o -+ -+ifeq ($(is_kernel_lt_6_10), 1) -+ccflags-y += -I$(src)/../ipu6/ -+endif -+ccflags-y += -I$(src)/../ -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-fw-psys.c b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-fw-psys.c -new file mode 100644 -index 0000000..c89cd0c ---- /dev/null -+++ b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-fw-psys.c -@@ -0,0 +1,433 @@ -+// SPDX-License-Identifier: GPL-2.0 -+// Copyright (C) 2016 - 2024 Intel Corporation -+ -+#include -+ -+#include -+ -+#include -+#include "ipu6-fw-com.h" -+#include "ipu-fw-psys.h" -+#include "ipu-psys.h" -+ -+int ipu_fw_psys_pg_start(struct ipu_psys_kcmd *kcmd) -+{ -+ kcmd->kpg->pg->state = IPU_FW_PSYS_PROCESS_GROUP_STARTED; -+ return 0; -+} -+ -+int ipu_fw_psys_pg_disown(struct ipu_psys_kcmd *kcmd) -+{ -+ struct ipu_fw_psys_cmd *psys_cmd; -+ struct device *dev = &kcmd->fh->psys->adev->auxdev.dev; -+ int ret = 0; -+ -+ psys_cmd = ipu6_send_get_token(kcmd->fh->psys->fwcom, 0); -+ if (!psys_cmd) { -+ dev_err(dev, "%s failed to get token!\n", __func__); -+ kcmd->pg_user = NULL; -+ ret = -ENODATA; -+ goto out; -+ } -+ psys_cmd->command = IPU_FW_PSYS_PROCESS_GROUP_CMD_START; -+ psys_cmd->msg = 0; -+ psys_cmd->context_handle = kcmd->kpg->pg->ipu_virtual_address; -+ ipu6_send_put_token(kcmd->fh->psys->fwcom, 0); -+ -+out: -+ return ret; -+} -+ -+int ipu_fw_psys_ppg_suspend(struct ipu_psys_kcmd *kcmd) -+{ -+ struct device *dev = &kcmd->fh->psys->adev->auxdev.dev; -+ struct ipu_fw_psys_cmd *psys_cmd; -+ int ret = 0; -+ -+ /* ppg suspend cmd uses QUEUE_DEVICE_ID instead of QUEUE_COMMAND_ID */ -+ psys_cmd = ipu6_send_get_token(kcmd->fh->psys->fwcom, 1); -+ if (!psys_cmd) { -+ dev_err(dev, "%s failed to get token!\n", __func__); -+ kcmd->pg_user = NULL; -+ ret = -ENODATA; -+ goto out; -+ } -+ psys_cmd->command = IPU_FW_PSYS_PROCESS_GROUP_CMD_SUSPEND; -+ psys_cmd->msg = 0; -+ psys_cmd->context_handle = kcmd->kpg->pg->ipu_virtual_address; -+ ipu6_send_put_token(kcmd->fh->psys->fwcom, 1); -+ -+out: -+ return ret; -+} -+ -+int ipu_fw_psys_ppg_resume(struct ipu_psys_kcmd *kcmd) -+{ -+ struct device *dev = &kcmd->fh->psys->adev->auxdev.dev; -+ struct ipu_fw_psys_cmd *psys_cmd; -+ int ret = 0; -+ -+ psys_cmd = ipu6_send_get_token(kcmd->fh->psys->fwcom, 0); -+ if (!psys_cmd) { -+ dev_err(dev, "%s failed to get token!\n", __func__); -+ kcmd->pg_user = NULL; -+ ret = -ENODATA; -+ goto out; -+ } -+ psys_cmd->command = IPU_FW_PSYS_PROCESS_GROUP_CMD_RESUME; -+ psys_cmd->msg = 0; -+ psys_cmd->context_handle = kcmd->kpg->pg->ipu_virtual_address; -+ ipu6_send_put_token(kcmd->fh->psys->fwcom, 0); -+ -+out: -+ return ret; -+} -+ -+int ipu_fw_psys_pg_abort(struct ipu_psys_kcmd *kcmd) -+{ -+ struct device *dev = &kcmd->fh->psys->adev->auxdev.dev; -+ struct ipu_fw_psys_cmd *psys_cmd; -+ int ret = 0; -+ -+ psys_cmd = ipu6_send_get_token(kcmd->fh->psys->fwcom, 0); -+ if (!psys_cmd) { -+ dev_err(dev, "%s failed to get token!\n", __func__); -+ kcmd->pg_user = NULL; -+ ret = -ENODATA; -+ goto out; -+ } -+ psys_cmd->command = IPU_FW_PSYS_PROCESS_GROUP_CMD_STOP; -+ psys_cmd->msg = 0; -+ psys_cmd->context_handle = kcmd->kpg->pg->ipu_virtual_address; -+ ipu6_send_put_token(kcmd->fh->psys->fwcom, 0); -+ -+out: -+ return ret; -+} -+ -+int ipu_fw_psys_pg_submit(struct ipu_psys_kcmd *kcmd) -+{ -+ kcmd->kpg->pg->state = IPU_FW_PSYS_PROCESS_GROUP_BLOCKED; -+ return 0; -+} -+ -+int ipu_fw_psys_rcv_event(struct ipu_psys *psys, -+ struct ipu_fw_psys_event *event) -+{ -+ void *rcv; -+ -+ rcv = ipu6_recv_get_token(psys->fwcom, 0); -+ if (!rcv) -+ return 0; -+ -+ memcpy(event, rcv, sizeof(*event)); -+ ipu6_recv_put_token(psys->fwcom, 0); -+ return 1; -+} -+ -+int ipu_fw_psys_terminal_set(struct ipu_fw_psys_terminal *terminal, -+ int terminal_idx, -+ struct ipu_psys_kcmd *kcmd, -+ u32 buffer, unsigned int size) -+{ -+ struct device *dev = &kcmd->fh->psys->adev->auxdev.dev; -+ u32 type; -+ u32 buffer_state; -+ -+ type = terminal->terminal_type; -+ -+ switch (type) { -+ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_CACHED_IN: -+ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_CACHED_OUT: -+ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SPATIAL_IN: -+ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SPATIAL_OUT: -+ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SLICED_IN: -+ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SLICED_OUT: -+ case IPU_FW_PSYS_TERMINAL_TYPE_PROGRAM: -+ case IPU_FW_PSYS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT: -+ buffer_state = IPU_FW_PSYS_BUFFER_UNDEFINED; -+ break; -+ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_STREAM: -+ case IPU_FW_PSYS_TERMINAL_TYPE_DATA_IN: -+ case IPU_FW_PSYS_TERMINAL_TYPE_STATE_IN: -+ buffer_state = IPU_FW_PSYS_BUFFER_FULL; -+ break; -+ case IPU_FW_PSYS_TERMINAL_TYPE_DATA_OUT: -+ case IPU_FW_PSYS_TERMINAL_TYPE_STATE_OUT: -+ buffer_state = IPU_FW_PSYS_BUFFER_EMPTY; -+ break; -+ default: -+ dev_err(dev, "unknown terminal type: 0x%x\n", type); -+ return -EAGAIN; -+ } -+ -+ if (type == IPU_FW_PSYS_TERMINAL_TYPE_DATA_IN || -+ type == IPU_FW_PSYS_TERMINAL_TYPE_DATA_OUT) { -+ struct ipu_fw_psys_data_terminal *dterminal = -+ (struct ipu_fw_psys_data_terminal *)terminal; -+ dterminal->connection_type = IPU_FW_PSYS_CONNECTION_MEMORY; -+ dterminal->frame.data_bytes = size; -+ if (!ipu_fw_psys_pg_get_protocol(kcmd)) -+ dterminal->frame.data = buffer; -+ else -+ dterminal->frame.data_index = terminal_idx; -+ dterminal->frame.buffer_state = buffer_state; -+ } else { -+ struct ipu_fw_psys_param_terminal *pterminal = -+ (struct ipu_fw_psys_param_terminal *)terminal; -+ if (!ipu_fw_psys_pg_get_protocol(kcmd)) -+ pterminal->param_payload.buffer = buffer; -+ else -+ pterminal->param_payload.terminal_index = terminal_idx; -+ } -+ return 0; -+} -+ -+void ipu_fw_psys_pg_dump(struct ipu_psys *psys, -+ struct ipu_psys_kcmd *kcmd, const char *note) -+{ -+ ipu6_fw_psys_pg_dump(psys, kcmd, note); -+} -+ -+int ipu_fw_psys_pg_get_id(struct ipu_psys_kcmd *kcmd) -+{ -+ return kcmd->kpg->pg->ID; -+} -+ -+int ipu_fw_psys_pg_get_terminal_count(struct ipu_psys_kcmd *kcmd) -+{ -+ return kcmd->kpg->pg->terminal_count; -+} -+ -+int ipu_fw_psys_pg_get_size(struct ipu_psys_kcmd *kcmd) -+{ -+ return kcmd->kpg->pg->size; -+} -+ -+int ipu_fw_psys_pg_set_ipu_vaddress(struct ipu_psys_kcmd *kcmd, -+ dma_addr_t vaddress) -+{ -+ kcmd->kpg->pg->ipu_virtual_address = vaddress; -+ return 0; -+} -+ -+struct ipu_fw_psys_terminal *ipu_fw_psys_pg_get_terminal(struct ipu_psys_kcmd -+ *kcmd, int index) -+{ -+ struct ipu_fw_psys_terminal *terminal; -+ u16 *terminal_offset_table; -+ -+ terminal_offset_table = -+ (uint16_t *)((char *)kcmd->kpg->pg + -+ kcmd->kpg->pg->terminals_offset); -+ terminal = (struct ipu_fw_psys_terminal *) -+ ((char *)kcmd->kpg->pg + terminal_offset_table[index]); -+ return terminal; -+} -+ -+void ipu_fw_psys_pg_set_token(struct ipu_psys_kcmd *kcmd, u64 token) -+{ -+ kcmd->kpg->pg->token = (u64)token; -+} -+ -+u64 ipu_fw_psys_pg_get_token(struct ipu_psys_kcmd *kcmd) -+{ -+ return kcmd->kpg->pg->token; -+} -+ -+int ipu_fw_psys_pg_get_protocol(struct ipu_psys_kcmd *kcmd) -+{ -+ return kcmd->kpg->pg->protocol_version; -+} -+ -+int ipu_fw_psys_ppg_set_buffer_set(struct ipu_psys_kcmd *kcmd, -+ struct ipu_fw_psys_terminal *terminal, -+ int terminal_idx, u32 buffer) -+{ -+ struct device *dev = &kcmd->fh->psys->adev->auxdev.dev; -+ struct ipu_fw_psys_buffer_set *buf_set = kcmd->kbuf_set->buf_set; -+ u32 buffer_state; -+ u32 *buffer_ptr; -+ u32 type; -+ -+ type = terminal->terminal_type; -+ -+ switch (type) { -+ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_CACHED_IN: -+ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_CACHED_OUT: -+ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SPATIAL_IN: -+ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SPATIAL_OUT: -+ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SLICED_IN: -+ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SLICED_OUT: -+ case IPU_FW_PSYS_TERMINAL_TYPE_PROGRAM: -+ case IPU_FW_PSYS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT: -+ buffer_state = IPU_FW_PSYS_BUFFER_UNDEFINED; -+ break; -+ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_STREAM: -+ case IPU_FW_PSYS_TERMINAL_TYPE_DATA_IN: -+ case IPU_FW_PSYS_TERMINAL_TYPE_STATE_IN: -+ buffer_state = IPU_FW_PSYS_BUFFER_FULL; -+ break; -+ case IPU_FW_PSYS_TERMINAL_TYPE_DATA_OUT: -+ case IPU_FW_PSYS_TERMINAL_TYPE_STATE_OUT: -+ buffer_state = IPU_FW_PSYS_BUFFER_EMPTY; -+ break; -+ default: -+ dev_err(dev, "unknown terminal type: 0x%x\n", type); -+ return -EAGAIN; -+ } -+ -+ buffer_ptr = (u32 *)((char *)buf_set + sizeof(*buf_set) + -+ terminal_idx * sizeof(*buffer_ptr)); -+ -+ *buffer_ptr = buffer; -+ -+ if (type == IPU_FW_PSYS_TERMINAL_TYPE_DATA_IN || -+ type == IPU_FW_PSYS_TERMINAL_TYPE_DATA_OUT) { -+ struct ipu_fw_psys_data_terminal *dterminal = -+ (struct ipu_fw_psys_data_terminal *)terminal; -+ dterminal->frame.buffer_state = buffer_state; -+ } -+ -+ return 0; -+} -+ -+size_t ipu_fw_psys_ppg_get_buffer_set_size(struct ipu_psys_kcmd *kcmd) -+{ -+ return (sizeof(struct ipu_fw_psys_buffer_set) + -+ kcmd->kpg->pg->terminal_count * sizeof(u32)); -+} -+ -+int -+ipu_fw_psys_ppg_buffer_set_vaddress(struct ipu_fw_psys_buffer_set *buf_set, -+ u32 vaddress) -+{ -+ buf_set->ipu_virtual_address = vaddress; -+ return 0; -+} -+ -+int ipu_fw_psys_ppg_buffer_set_set_keb(struct ipu_fw_psys_buffer_set *buf_set, -+ u32 *kernel_enable_bitmap) -+{ -+ memcpy(buf_set->kernel_enable_bitmap, (u8 *)kernel_enable_bitmap, -+ sizeof(buf_set->kernel_enable_bitmap)); -+ return 0; -+} -+ -+struct ipu_fw_psys_buffer_set * -+ipu_fw_psys_ppg_create_buffer_set(struct ipu_psys_kcmd *kcmd, -+ void *kaddr, u32 frame_counter) -+{ -+ struct ipu_fw_psys_buffer_set *buffer_set = NULL; -+ unsigned int i; -+ -+ buffer_set = (struct ipu_fw_psys_buffer_set *)kaddr; -+ -+ /* -+ * Set base struct members -+ */ -+ buffer_set->ipu_virtual_address = 0; -+ buffer_set->process_group_handle = kcmd->kpg->pg->ipu_virtual_address; -+ buffer_set->frame_counter = frame_counter; -+ buffer_set->terminal_count = kcmd->kpg->pg->terminal_count; -+ -+ /* -+ * Initialize adjacent buffer addresses -+ */ -+ for (i = 0; i < buffer_set->terminal_count; i++) { -+ u32 *buffer = -+ (u32 *)((char *)buffer_set + -+ sizeof(*buffer_set) + sizeof(u32) * i); -+ -+ *buffer = 0; -+ } -+ -+ return buffer_set; -+} -+ -+int ipu_fw_psys_ppg_enqueue_bufs(struct ipu_psys_kcmd *kcmd) -+{ -+ struct device *dev = &kcmd->fh->psys->adev->auxdev.dev; -+ struct ipu_fw_psys_cmd *psys_cmd; -+ unsigned int queue_id; -+ int ret = 0; -+ unsigned int size; -+ -+ if (ipu_ver == IPU6_VER_6SE) -+ size = IPU6SE_FW_PSYS_N_PSYS_CMD_QUEUE_ID; -+ else -+ size = IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID; -+ queue_id = kcmd->kpg->pg->base_queue_id; -+ -+ if (queue_id >= size) -+ return -EINVAL; -+ -+ psys_cmd = ipu6_send_get_token(kcmd->fh->psys->fwcom, queue_id); -+ if (!psys_cmd) { -+ dev_err(dev, "%s failed to get token!\n", __func__); -+ kcmd->pg_user = NULL; -+ return -ENODATA; -+ } -+ -+ psys_cmd->command = IPU_FW_PSYS_PROCESS_GROUP_CMD_RUN; -+ psys_cmd->msg = 0; -+ psys_cmd->context_handle = kcmd->kbuf_set->buf_set->ipu_virtual_address; -+ -+ ipu6_send_put_token(kcmd->fh->psys->fwcom, queue_id); -+ -+ return ret; -+} -+ -+u8 ipu_fw_psys_ppg_get_base_queue_id(struct ipu_psys_kcmd *kcmd) -+{ -+ return kcmd->kpg->pg->base_queue_id; -+} -+ -+void ipu_fw_psys_ppg_set_base_queue_id(struct ipu_psys_kcmd *kcmd, u8 queue_id) -+{ -+ kcmd->kpg->pg->base_queue_id = queue_id; -+} -+ -+int ipu_fw_psys_open(struct ipu_psys *psys) -+{ -+ struct device *dev = &psys->adev->auxdev.dev; -+ int retry = IPU_PSYS_OPEN_RETRY, retval; -+ -+ retval = ipu6_fw_com_open(psys->fwcom); -+ if (retval) { -+ dev_err(dev, "fw com open failed.\n"); -+ return retval; -+ } -+ -+ do { -+ usleep_range(IPU_PSYS_OPEN_TIMEOUT_US, -+ IPU_PSYS_OPEN_TIMEOUT_US + 10); -+ retval = ipu6_fw_com_ready(psys->fwcom); -+ if (retval) { -+ dev_dbg(dev, "psys port open ready!\n"); -+ break; -+ } -+ retry--; -+ } while (retry > 0); -+ -+ if (!retry) { -+ dev_err(dev, "psys port open ready failed\n"); -+ ipu6_fw_com_close(psys->fwcom); -+ return -EIO; -+ } -+ -+ return 0; -+} -+ -+int ipu_fw_psys_close(struct ipu_psys *psys) -+{ -+ struct device *dev = &psys->adev->auxdev.dev; -+ int retval; -+ -+ retval = ipu6_fw_com_close(psys->fwcom); -+ if (retval) { -+ dev_err(dev, "fw com close failed.\n"); -+ return retval; -+ } -+ return retval; -+} -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-fw-psys.h b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-fw-psys.h -new file mode 100644 -index 0000000..492a9ea ---- /dev/null -+++ b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-fw-psys.h -@@ -0,0 +1,382 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* Copyright (C) 2016 - 2024 Intel Corporation */ -+ -+#ifndef IPU_FW_PSYS_H -+#define IPU_FW_PSYS_H -+ -+#include "ipu6-platform-resources.h" -+#include "ipu6se-platform-resources.h" -+#include "ipu6ep-platform-resources.h" -+ -+#define IPU_FW_PSYS_CMD_QUEUE_SIZE 0x20 -+#define IPU_FW_PSYS_EVENT_QUEUE_SIZE 0x40 -+ -+#define IPU_FW_PSYS_CMD_BITS 64 -+#define IPU_FW_PSYS_EVENT_BITS 128 -+ -+enum { -+ IPU_FW_PSYS_EVENT_TYPE_SUCCESS = 0, -+ IPU_FW_PSYS_EVENT_TYPE_UNKNOWN_ERROR = 1, -+ IPU_FW_PSYS_EVENT_TYPE_RET_REM_OBJ_NOT_FOUND = 2, -+ IPU_FW_PSYS_EVENT_TYPE_RET_REM_OBJ_TOO_BIG = 3, -+ IPU_FW_PSYS_EVENT_TYPE_RET_REM_OBJ_DDR_TRANS_ERR = 4, -+ IPU_FW_PSYS_EVENT_TYPE_RET_REM_OBJ_NULL_PKG_DIR_ADDR = 5, -+ IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_LOAD_FRAME_ERR = 6, -+ IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_LOAD_FRAGMENT_ERR = 7, -+ IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_PROCESS_COUNT_ZERO = 8, -+ IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_PROCESS_INIT_ERR = 9, -+ IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_ABORT = 10, -+ IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_NULL = 11, -+ IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_VALIDATION_ERR = 12, -+ IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_INVALID_FRAME = 13 -+}; -+ -+enum { -+ IPU_FW_PSYS_EVENT_QUEUE_MAIN_ID, -+ IPU_FW_PSYS_N_PSYS_EVENT_QUEUE_ID -+}; -+ -+enum { -+ IPU_FW_PSYS_PROCESS_GROUP_ERROR = 0, -+ IPU_FW_PSYS_PROCESS_GROUP_CREATED, -+ IPU_FW_PSYS_PROCESS_GROUP_READY, -+ IPU_FW_PSYS_PROCESS_GROUP_BLOCKED, -+ IPU_FW_PSYS_PROCESS_GROUP_STARTED, -+ IPU_FW_PSYS_PROCESS_GROUP_RUNNING, -+ IPU_FW_PSYS_PROCESS_GROUP_STALLED, -+ IPU_FW_PSYS_PROCESS_GROUP_STOPPED, -+ IPU_FW_PSYS_N_PROCESS_GROUP_STATES -+}; -+ -+enum { -+ IPU_FW_PSYS_CONNECTION_MEMORY = 0, -+ IPU_FW_PSYS_CONNECTION_MEMORY_STREAM, -+ IPU_FW_PSYS_CONNECTION_STREAM, -+ IPU_FW_PSYS_N_CONNECTION_TYPES -+}; -+ -+enum { -+ IPU_FW_PSYS_BUFFER_NULL = 0, -+ IPU_FW_PSYS_BUFFER_UNDEFINED, -+ IPU_FW_PSYS_BUFFER_EMPTY, -+ IPU_FW_PSYS_BUFFER_NONEMPTY, -+ IPU_FW_PSYS_BUFFER_FULL, -+ IPU_FW_PSYS_N_BUFFER_STATES -+}; -+ -+enum { -+ IPU_FW_PSYS_TERMINAL_TYPE_DATA_IN = 0, -+ IPU_FW_PSYS_TERMINAL_TYPE_DATA_OUT, -+ IPU_FW_PSYS_TERMINAL_TYPE_PARAM_STREAM, -+ IPU_FW_PSYS_TERMINAL_TYPE_PARAM_CACHED_IN, -+ IPU_FW_PSYS_TERMINAL_TYPE_PARAM_CACHED_OUT, -+ IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SPATIAL_IN, -+ IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SPATIAL_OUT, -+ IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SLICED_IN, -+ IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SLICED_OUT, -+ IPU_FW_PSYS_TERMINAL_TYPE_STATE_IN, -+ IPU_FW_PSYS_TERMINAL_TYPE_STATE_OUT, -+ IPU_FW_PSYS_TERMINAL_TYPE_PROGRAM, -+ IPU_FW_PSYS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT, -+ IPU_FW_PSYS_N_TERMINAL_TYPES -+}; -+ -+enum { -+ IPU_FW_PSYS_COL_DIMENSION = 0, -+ IPU_FW_PSYS_ROW_DIMENSION = 1, -+ IPU_FW_PSYS_N_DATA_DIMENSION = 2 -+}; -+ -+enum { -+ IPU_FW_PSYS_PROCESS_GROUP_CMD_NOP = 0, -+ IPU_FW_PSYS_PROCESS_GROUP_CMD_SUBMIT, -+ IPU_FW_PSYS_PROCESS_GROUP_CMD_ATTACH, -+ IPU_FW_PSYS_PROCESS_GROUP_CMD_DETACH, -+ IPU_FW_PSYS_PROCESS_GROUP_CMD_START, -+ IPU_FW_PSYS_PROCESS_GROUP_CMD_DISOWN, -+ IPU_FW_PSYS_PROCESS_GROUP_CMD_RUN, -+ IPU_FW_PSYS_PROCESS_GROUP_CMD_STOP, -+ IPU_FW_PSYS_PROCESS_GROUP_CMD_SUSPEND, -+ IPU_FW_PSYS_PROCESS_GROUP_CMD_RESUME, -+ IPU_FW_PSYS_PROCESS_GROUP_CMD_ABORT, -+ IPU_FW_PSYS_PROCESS_GROUP_CMD_RESET, -+ IPU_FW_PSYS_N_PROCESS_GROUP_CMDS -+}; -+ -+enum { -+ IPU_FW_PSYS_PROCESS_GROUP_PROTOCOL_LEGACY = 0, -+ IPU_FW_PSYS_PROCESS_GROUP_PROTOCOL_PPG, -+ IPU_FW_PSYS_PROCESS_GROUP_N_PROTOCOLS -+}; -+ -+struct __packed ipu_fw_psys_process_group { -+ u64 token; -+ u64 private_token; -+ u32 routing_bitmap[IPU_FW_PSYS_RBM_NOF_ELEMS]; -+ u32 kernel_bitmap[IPU_FW_PSYS_KBM_NOF_ELEMS]; -+ u32 size; -+ u32 psys_server_init_cycles; -+ u32 pg_load_start_ts; -+ u32 pg_load_cycles; -+ u32 pg_init_cycles; -+ u32 pg_processing_cycles; -+ u32 pg_next_frame_init_cycles; -+ u32 pg_complete_cycles; -+ u32 ID; -+ u32 state; -+ u32 ipu_virtual_address; -+ u32 resource_bitmap; -+ u16 fragment_count; -+ u16 fragment_state; -+ u16 fragment_limit; -+ u16 processes_offset; -+ u16 terminals_offset; -+ u8 process_count; -+ u8 terminal_count; -+ u8 subgraph_count; -+ u8 protocol_version; -+ u8 base_queue_id; -+ u8 num_queues; -+ u8 mask_irq; -+ u8 error_handling_enable; -+ u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_PROCESS_GROUP_STRUCT]; -+}; -+ -+struct ipu_fw_psys_srv_init { -+ void *host_ddr_pkg_dir; -+ u32 ddr_pkg_dir_address; -+ u32 pkg_dir_size; -+ -+ u32 icache_prefetch_sp; -+ u32 icache_prefetch_isp; -+}; -+ -+struct __packed ipu_fw_psys_cmd { -+ u16 command; -+ u16 msg; -+ u32 context_handle; -+}; -+ -+struct __packed ipu_fw_psys_event { -+ u16 status; -+ u16 command; -+ u32 context_handle; -+ u64 token; -+}; -+ -+struct ipu_fw_psys_terminal { -+ u32 terminal_type; -+ s16 parent_offset; -+ u16 size; -+ u16 tm_index; -+ u8 ID; -+ u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_TERMINAL_STRUCT]; -+}; -+ -+struct ipu_fw_psys_param_payload { -+ u64 host_buffer; -+ u32 buffer; -+ u32 terminal_index; -+}; -+ -+struct ipu_fw_psys_param_terminal { -+ struct ipu_fw_psys_terminal base; -+ struct ipu_fw_psys_param_payload param_payload; -+ u16 param_section_desc_offset; -+ u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_PARAM_TERMINAL_STRUCT]; -+}; -+ -+struct ipu_fw_psys_frame { -+ u32 buffer_state; -+ u32 access_type; -+ u32 pointer_state; -+ u32 access_scope; -+ u32 data; -+ u32 data_index; -+ u32 data_bytes; -+ u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_FRAME_STRUCT]; -+}; -+ -+struct ipu_fw_psys_frame_descriptor { -+ u32 frame_format_type; -+ u32 plane_count; -+ u32 plane_offsets[IPU_FW_PSYS_N_FRAME_PLANES]; -+ u32 stride[1]; -+ u32 ts_offsets[IPU_FW_PSYS_N_FRAME_PLANES]; -+ u16 dimension[2]; -+ u16 size; -+ u8 bpp; -+ u8 bpe; -+ u8 is_compressed; -+ u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_FRAME_DESC_STRUCT]; -+}; -+ -+struct ipu_fw_psys_stream { -+ u64 dummy; -+}; -+ -+struct ipu_fw_psys_data_terminal { -+ struct ipu_fw_psys_terminal base; -+ struct ipu_fw_psys_frame_descriptor frame_descriptor; -+ struct ipu_fw_psys_frame frame; -+ struct ipu_fw_psys_stream stream; -+ u32 reserved; -+ u32 connection_type; -+ u16 fragment_descriptor_offset; -+ u8 kernel_id; -+ u8 subgraph_id; -+ u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_DATA_TERMINAL_STRUCT]; -+}; -+ -+struct ipu_fw_psys_buffer_set { -+ u64 token; -+ u32 kernel_enable_bitmap[IPU_FW_PSYS_KERNEL_BITMAP_NOF_ELEMS]; -+ u32 terminal_enable_bitmap[IPU_FW_PSYS_KERNEL_BITMAP_NOF_ELEMS]; -+ u32 routing_enable_bitmap[IPU_FW_PSYS_KERNEL_BITMAP_NOF_ELEMS]; -+ u32 rbm[IPU_FW_PSYS_RBM_NOF_ELEMS]; -+ u32 ipu_virtual_address; -+ u32 process_group_handle; -+ u16 terminal_count; -+ u8 frame_counter; -+ u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_BUFFER_SET_STRUCT]; -+}; -+ -+struct ipu_fw_psys_pgm { -+ u32 kernel_bitmap[IPU_FW_PSYS_KERNEL_BITMAP_NOF_ELEMS]; -+ u32 ID; -+ u16 program_manifest_offset; -+ u16 terminal_manifest_offset; -+ u16 private_data_offset; -+ u16 rbm_manifest_offset; -+ u16 size; -+ u8 alignment; -+ u8 kernel_count; -+ u8 program_count; -+ u8 terminal_count; -+ u8 subgraph_count; -+ u8 reserved[5]; -+}; -+ -+struct ipu_fw_generic_program_manifest { -+ u16 *dev_chn_size; -+ u16 *dev_chn_offset; -+ u16 *ext_mem_size; -+ u16 *ext_mem_offset; -+ u8 cell_id; -+ u8 cells[IPU_FW_PSYS_PROCESS_MAX_CELLS]; -+ u8 cell_type_id; -+ u8 *is_dfm_relocatable; -+ u32 *dfm_port_bitmap; -+ u32 *dfm_active_port_bitmap; -+}; -+ -+struct ipu_fw_resource_definitions { -+ u32 num_cells; -+ u32 num_cells_type; -+ const u8 *cells; -+ u32 num_dev_channels; -+ const u16 *dev_channels; -+ -+ u32 num_ext_mem_types; -+ u32 num_ext_mem_ids; -+ const u16 *ext_mem_ids; -+ -+ u32 num_dfm_ids; -+ const u16 *dfms; -+ -+ u32 cell_mem_row; -+ const u8 *cell_mem; -+}; -+ -+struct ipu6_psys_hw_res_variant { -+ unsigned int queue_num; -+ unsigned int cell_num; -+ int (*set_proc_dev_chn)(struct ipu_fw_psys_process *ptr, u16 offset, -+ u16 value); -+ int (*set_proc_dfm_bitmap)(struct ipu_fw_psys_process *ptr, -+ u16 id, u32 bitmap, u32 active_bitmap); -+ int (*set_proc_ext_mem)(struct ipu_fw_psys_process *ptr, -+ u16 type_id, u16 mem_id, u16 offset); -+ int (*get_pgm_by_proc)(struct ipu_fw_generic_program_manifest *gen_pm, -+ const struct ipu_fw_psys_pgm *pg_manifest, -+ struct ipu_fw_psys_process *process); -+}; -+ -+struct ipu_psys_kcmd; -+struct ipu_psys; -+int ipu_fw_psys_pg_start(struct ipu_psys_kcmd *kcmd); -+int ipu_fw_psys_pg_disown(struct ipu_psys_kcmd *kcmd); -+int ipu_fw_psys_pg_abort(struct ipu_psys_kcmd *kcmd); -+int ipu_fw_psys_pg_submit(struct ipu_psys_kcmd *kcmd); -+int ipu_fw_psys_ppg_suspend(struct ipu_psys_kcmd *kcmd); -+int ipu_fw_psys_ppg_resume(struct ipu_psys_kcmd *kcmd); -+int ipu_fw_psys_pg_load_cycles(struct ipu_psys_kcmd *kcmd); -+int ipu_fw_psys_pg_init_cycles(struct ipu_psys_kcmd *kcmd); -+int ipu_fw_psys_pg_processing_cycles(struct ipu_psys_kcmd *kcmd); -+int ipu_fw_psys_pg_server_init_cycles(struct ipu_psys_kcmd *kcmd); -+int ipu_fw_psys_pg_next_frame_init_cycles(struct ipu_psys_kcmd *kcmd); -+int ipu_fw_psys_pg_complete_cycles(struct ipu_psys_kcmd *kcmd); -+int ipu_fw_psys_rcv_event(struct ipu_psys *psys, -+ struct ipu_fw_psys_event *event); -+int ipu_fw_psys_terminal_set(struct ipu_fw_psys_terminal *terminal, -+ int terminal_idx, -+ struct ipu_psys_kcmd *kcmd, -+ u32 buffer, unsigned int size); -+void ipu_fw_psys_pg_dump(struct ipu_psys *psys, -+ struct ipu_psys_kcmd *kcmd, const char *note); -+int ipu_fw_psys_pg_get_id(struct ipu_psys_kcmd *kcmd); -+int ipu_fw_psys_pg_get_terminal_count(struct ipu_psys_kcmd *kcmd); -+int ipu_fw_psys_pg_get_size(struct ipu_psys_kcmd *kcmd); -+int ipu_fw_psys_pg_set_ipu_vaddress(struct ipu_psys_kcmd *kcmd, -+ dma_addr_t vaddress); -+struct ipu_fw_psys_terminal *ipu_fw_psys_pg_get_terminal(struct ipu_psys_kcmd -+ *kcmd, int index); -+void ipu_fw_psys_pg_set_token(struct ipu_psys_kcmd *kcmd, u64 token); -+u64 ipu_fw_psys_pg_get_token(struct ipu_psys_kcmd *kcmd); -+int ipu_fw_psys_ppg_set_buffer_set(struct ipu_psys_kcmd *kcmd, -+ struct ipu_fw_psys_terminal *terminal, -+ int terminal_idx, u32 buffer); -+size_t ipu_fw_psys_ppg_get_buffer_set_size(struct ipu_psys_kcmd *kcmd); -+int -+ipu_fw_psys_ppg_buffer_set_vaddress(struct ipu_fw_psys_buffer_set *buf_set, -+ u32 vaddress); -+int ipu_fw_psys_ppg_buffer_set_set_keb(struct ipu_fw_psys_buffer_set *buf_set, -+ u32 *kernel_enable_bitmap); -+struct ipu_fw_psys_buffer_set * -+ipu_fw_psys_ppg_create_buffer_set(struct ipu_psys_kcmd *kcmd, -+ void *kaddr, u32 frame_counter); -+int ipu_fw_psys_ppg_enqueue_bufs(struct ipu_psys_kcmd *kcmd); -+u8 ipu_fw_psys_ppg_get_base_queue_id(struct ipu_psys_kcmd *kcmd); -+void ipu_fw_psys_ppg_set_base_queue_id(struct ipu_psys_kcmd *kcmd, u8 queue_id); -+int ipu_fw_psys_pg_get_protocol(struct ipu_psys_kcmd *kcmd); -+int ipu_fw_psys_open(struct ipu_psys *psys); -+int ipu_fw_psys_close(struct ipu_psys *psys); -+ -+/* common resource interface for both abi and api mode */ -+int ipu_fw_psys_set_process_cell_id(struct ipu_fw_psys_process *ptr, u8 index, -+ u8 value); -+u8 ipu_fw_psys_get_process_cell_id(struct ipu_fw_psys_process *ptr, u8 index); -+int ipu_fw_psys_clear_process_cell(struct ipu_fw_psys_process *ptr); -+int ipu_fw_psys_set_proc_dev_chn(struct ipu_fw_psys_process *ptr, u16 offset, -+ u16 value); -+int ipu_fw_psys_set_process_ext_mem(struct ipu_fw_psys_process *ptr, -+ u16 type_id, u16 mem_id, u16 offset); -+int -+ipu_fw_psys_get_pgm_by_process(struct ipu_fw_generic_program_manifest *gen_pm, -+ const struct ipu_fw_psys_pgm *pg_manifest, -+ struct ipu_fw_psys_process *process); -+int ipu6_fw_psys_set_proc_dev_chn(struct ipu_fw_psys_process *ptr, u16 offset, -+ u16 value); -+int ipu6_fw_psys_set_proc_dfm_bitmap(struct ipu_fw_psys_process *ptr, -+ u16 id, u32 bitmap, -+ u32 active_bitmap); -+int ipu6_fw_psys_set_process_ext_mem(struct ipu_fw_psys_process *ptr, -+ u16 type_id, u16 mem_id, u16 offset); -+int -+ipu6_fw_psys_get_pgm_by_process(struct ipu_fw_generic_program_manifest *gen_pm, -+ const struct ipu_fw_psys_pgm *pg_manifest, -+ struct ipu_fw_psys_process *process); -+void ipu6_fw_psys_pg_dump(struct ipu_psys *psys, -+ struct ipu_psys_kcmd *kcmd, const char *note); -+void ipu6_psys_hw_res_variant_init(void); -+#endif /* IPU_FW_PSYS_H */ -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-fw-resources.c b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-fw-resources.c -new file mode 100644 -index 0000000..b6af0b7 ---- /dev/null -+++ b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-fw-resources.c -@@ -0,0 +1,103 @@ -+// SPDX-License-Identifier: GPL-2.0 -+// Copyright (C) 2015 - 2024 Intel Corporation -+ -+#include -+#include -+ -+#include "ipu-psys.h" -+#include "ipu-fw-psys.h" -+#include "ipu6-platform-resources.h" -+#include "ipu6se-platform-resources.h" -+ -+/********** Generic resource handling **********/ -+ -+/* -+ * Extension library gives byte offsets to its internal structures. -+ * use those offsets to update fields. Without extension lib access -+ * structures directly. -+ */ -+static const struct ipu6_psys_hw_res_variant *var = &hw_var; -+ -+int ipu_fw_psys_set_process_cell_id(struct ipu_fw_psys_process *ptr, u8 index, -+ u8 value) -+{ -+ struct ipu_fw_psys_process_group *parent = -+ (struct ipu_fw_psys_process_group *)((char *)ptr + -+ ptr->parent_offset); -+ -+ ptr->cells[index] = value; -+ parent->resource_bitmap |= 1 << value; -+ -+ return 0; -+} -+ -+u8 ipu_fw_psys_get_process_cell_id(struct ipu_fw_psys_process *ptr, u8 index) -+{ -+ return ptr->cells[index]; -+} -+ -+int ipu_fw_psys_clear_process_cell(struct ipu_fw_psys_process *ptr) -+{ -+ struct ipu_fw_psys_process_group *parent; -+ u8 cell_id = ipu_fw_psys_get_process_cell_id(ptr, 0); -+ int retval = -1; -+ u8 value; -+ -+ parent = (struct ipu_fw_psys_process_group *)((char *)ptr + -+ ptr->parent_offset); -+ -+ value = var->cell_num; -+ if ((1 << cell_id) != 0 && -+ ((1 << cell_id) & parent->resource_bitmap)) { -+ ipu_fw_psys_set_process_cell_id(ptr, 0, value); -+ parent->resource_bitmap &= ~(1 << cell_id); -+ retval = 0; -+ } -+ -+ return retval; -+} -+ -+int ipu_fw_psys_set_proc_dev_chn(struct ipu_fw_psys_process *ptr, u16 offset, -+ u16 value) -+{ -+ if (var->set_proc_dev_chn) -+ return var->set_proc_dev_chn(ptr, offset, value); -+ -+ WARN(1, "ipu6 psys res var is not initialised correctly."); -+ return 0; -+} -+ -+int ipu_fw_psys_set_proc_dfm_bitmap(struct ipu_fw_psys_process *ptr, -+ u16 id, u32 bitmap, -+ u32 active_bitmap) -+{ -+ if (var->set_proc_dfm_bitmap) -+ return var->set_proc_dfm_bitmap(ptr, id, bitmap, -+ active_bitmap); -+ -+ WARN(1, "ipu6 psys res var is not initialised correctly."); -+ return 0; -+} -+ -+int ipu_fw_psys_set_process_ext_mem(struct ipu_fw_psys_process *ptr, -+ u16 type_id, u16 mem_id, u16 offset) -+{ -+ if (var->set_proc_ext_mem) -+ return var->set_proc_ext_mem(ptr, type_id, mem_id, offset); -+ -+ WARN(1, "ipu6 psys res var is not initialised correctly."); -+ return 0; -+} -+ -+int -+ipu_fw_psys_get_pgm_by_process(struct ipu_fw_generic_program_manifest *gen_pm, -+ const struct ipu_fw_psys_pgm *pg_manifest, -+ struct ipu_fw_psys_process *process) -+{ -+ if (var->get_pgm_by_proc) -+ return var->get_pgm_by_proc(gen_pm, pg_manifest, process); -+ -+ WARN(1, "ipu6 psys res var is not initialised correctly."); -+ return 0; -+} -+ -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-platform-psys.h b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-platform-psys.h -new file mode 100644 -index 0000000..dc2cae3 ---- /dev/null -+++ b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-platform-psys.h -@@ -0,0 +1,78 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* Copyright (C) 2020 - 2024 Intel Corporation */ -+ -+#ifndef IPU_PLATFORM_PSYS_H -+#define IPU_PLATFORM_PSYS_H -+ -+#include "ipu-psys.h" -+#include -+ -+#define IPU_PSYS_BUF_SET_POOL_SIZE 8 -+#define IPU_PSYS_BUF_SET_MAX_SIZE 1024 -+ -+struct ipu_fw_psys_buffer_set; -+ -+enum ipu_psys_cmd_state { -+ KCMD_STATE_PPG_NEW, -+ KCMD_STATE_PPG_START, -+ KCMD_STATE_PPG_ENQUEUE, -+ KCMD_STATE_PPG_STOP, -+ KCMD_STATE_PPG_COMPLETE -+}; -+ -+struct ipu_psys_scheduler { -+ struct list_head ppgs; -+ struct mutex bs_mutex; /* Protects buf_set field */ -+ struct list_head buf_sets; -+}; -+ -+enum ipu_psys_ppg_state { -+ PPG_STATE_START = (1 << 0), -+ PPG_STATE_STARTING = (1 << 1), -+ PPG_STATE_STARTED = (1 << 2), -+ PPG_STATE_RUNNING = (1 << 3), -+ PPG_STATE_SUSPEND = (1 << 4), -+ PPG_STATE_SUSPENDING = (1 << 5), -+ PPG_STATE_SUSPENDED = (1 << 6), -+ PPG_STATE_RESUME = (1 << 7), -+ PPG_STATE_RESUMING = (1 << 8), -+ PPG_STATE_RESUMED = (1 << 9), -+ PPG_STATE_STOP = (1 << 10), -+ PPG_STATE_STOPPING = (1 << 11), -+ PPG_STATE_STOPPED = (1 << 12), -+}; -+ -+struct ipu_psys_ppg { -+ struct ipu_psys_pg *kpg; -+ struct ipu_psys_fh *fh; -+ struct list_head list; -+ struct list_head sched_list; -+ u64 token; -+ void *manifest; -+ struct mutex mutex; /* Protects kcmd and ppg state field */ -+ struct list_head kcmds_new_list; -+ struct list_head kcmds_processing_list; -+ struct list_head kcmds_finished_list; -+ enum ipu_psys_ppg_state state; -+ u32 pri_base; -+ int pri_dynamic; -+}; -+ -+struct ipu_psys_buffer_set { -+ struct list_head list; -+ struct ipu_fw_psys_buffer_set *buf_set; -+ size_t size; -+ size_t buf_set_size; -+ dma_addr_t dma_addr; -+ void *kaddr; -+ struct ipu_psys_kcmd *kcmd; -+}; -+ -+int ipu_psys_kcmd_start(struct ipu_psys *psys, struct ipu_psys_kcmd *kcmd); -+void ipu_psys_kcmd_complete(struct ipu_psys_ppg *kppg, -+ struct ipu_psys_kcmd *kcmd, -+ int error); -+int ipu_psys_fh_init(struct ipu_psys_fh *fh); -+int ipu_psys_fh_deinit(struct ipu_psys_fh *fh); -+ -+#endif /* IPU_PLATFORM_PSYS_H */ -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-platform-resources.h b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-platform-resources.h -new file mode 100644 -index 0000000..1f064dc ---- /dev/null -+++ b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-platform-resources.h -@@ -0,0 +1,103 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* Copyright (C) 2018 - 2024 Intel Corporation */ -+ -+#ifndef IPU_PLATFORM_RESOURCES_COMMON_H -+#define IPU_PLATFORM_RESOURCES_COMMON_H -+ -+#define IPU_FW_PSYS_N_PADDING_UINT8_IN_PROGRAM_MANIFEST 0 -+ -+#define IPU_FW_PSYS_N_PADDING_UINT8_IN_PROCESS_STRUCT 0 -+#define IPU_FW_PSYS_N_PADDING_UINT8_IN_PROCESS_GROUP_STRUCT 2 -+#define IPU_FW_PSYS_N_PADDING_UINT8_IN_PROGRAM_MANIFEST_EXT 2 -+ -+#define IPU_FW_PSYS_N_PADDING_UINT8_IN_TERMINAL_STRUCT 5 -+ -+#define IPU_FW_PSYS_N_PADDING_UINT8_IN_PARAM_TERMINAL_STRUCT 6 -+ -+#define IPU_FW_PSYS_N_PADDING_UINT8_IN_DATA_TERMINAL_STRUCT 3 -+ -+#define IPU_FW_PSYS_N_PADDING_UINT8_IN_FRAME_DESC_STRUCT 3 -+#define IPU_FW_PSYS_N_FRAME_PLANES 6 -+#define IPU_FW_PSYS_N_PADDING_UINT8_IN_FRAME_STRUCT 4 -+ -+#define IPU_FW_PSYS_N_PADDING_UINT8_IN_BUFFER_SET_STRUCT 1 -+ -+#define IPU_FW_PSYS_MAX_INPUT_DEC_RESOURCES 4 -+#define IPU_FW_PSYS_MAX_OUTPUT_DEC_RESOURCES 4 -+ -+#define IPU_FW_PSYS_PROCESS_MAX_CELLS 1 -+#define IPU_FW_PSYS_KERNEL_BITMAP_NOF_ELEMS 4 -+#define IPU_FW_PSYS_RBM_NOF_ELEMS 5 -+#define IPU_FW_PSYS_KBM_NOF_ELEMS 4 -+ -+struct ipu_fw_psys_process { -+ s16 parent_offset; -+ u8 size; -+ u8 cell_dependencies_offset; -+ u8 terminal_dependencies_offset; -+ u8 process_extension_offset; -+ u8 ID; -+ u8 program_idx; -+ u8 state; -+ u8 cells[IPU_FW_PSYS_PROCESS_MAX_CELLS]; -+ u8 cell_dependency_count; -+ u8 terminal_dependency_count; -+}; -+ -+struct ipu_fw_psys_program_manifest { -+ u32 kernel_bitmap[IPU_FW_PSYS_KERNEL_BITMAP_NOF_ELEMS]; -+ s16 parent_offset; -+ u8 program_dependency_offset; -+ u8 terminal_dependency_offset; -+ u8 size; -+ u8 program_extension_offset; -+ u8 program_type; -+ u8 ID; -+ u8 cells[IPU_FW_PSYS_PROCESS_MAX_CELLS]; -+ u8 cell_type_id; -+ u8 program_dependency_count; -+ u8 terminal_dependency_count; -+}; -+ -+/* platform specific resource interface */ -+struct ipu_psys_resource_pool; -+struct ipu_psys_resource_alloc; -+struct ipu_fw_psys_process_group; -+int ipu_psys_allocate_resources(const struct device *dev, -+ struct ipu_fw_psys_process_group *pg, -+ void *pg_manifest, -+ struct ipu_psys_resource_alloc *alloc, -+ struct ipu_psys_resource_pool *pool); -+int ipu_psys_move_resources(const struct device *dev, -+ struct ipu_psys_resource_alloc *alloc, -+ struct ipu_psys_resource_pool *source_pool, -+ struct ipu_psys_resource_pool *target_pool); -+ -+void ipu_psys_resource_copy(struct ipu_psys_resource_pool *src, -+ struct ipu_psys_resource_pool *dest); -+ -+int ipu_psys_try_allocate_resources(struct device *dev, -+ struct ipu_fw_psys_process_group *pg, -+ void *pg_manifest, -+ struct ipu_psys_resource_pool *pool); -+ -+void ipu_psys_reset_process_cell(const struct device *dev, -+ struct ipu_fw_psys_process_group *pg, -+ void *pg_manifest, -+ int process_count); -+void ipu_psys_free_resources(struct ipu_psys_resource_alloc *alloc, -+ struct ipu_psys_resource_pool *pool); -+ -+int ipu_fw_psys_set_proc_dfm_bitmap(struct ipu_fw_psys_process *ptr, -+ u16 id, u32 bitmap, -+ u32 active_bitmap); -+ -+int ipu_psys_allocate_cmd_queue_res(struct ipu_psys_resource_pool *pool); -+void ipu_psys_free_cmd_queue_res(struct ipu_psys_resource_pool *pool, -+ u8 queue_id); -+ -+extern const struct ipu_fw_resource_definitions *ipu6_res_defs; -+extern const struct ipu_fw_resource_definitions *ipu6se_res_defs; -+extern const struct ipu_fw_resource_definitions *ipu6ep_res_defs; -+extern struct ipu6_psys_hw_res_variant hw_var; -+#endif /* IPU_PLATFORM_RESOURCES_COMMON_H */ -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-psys-compat32.c b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-psys-compat32.c -new file mode 100644 -index 0000000..32350ca ---- /dev/null -+++ b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-psys-compat32.c -@@ -0,0 +1,222 @@ -+// SPDX-License-Identifier: GPL-2.0 -+// Copyright (C) 2013 - 2024 Intel Corporation -+ -+#include -+#include -+#include -+ -+#include -+ -+#include "ipu-psys.h" -+ -+static long native_ioctl(struct file *file, unsigned int cmd, unsigned long arg) -+{ -+ long ret = -ENOTTY; -+ -+ if (file->f_op->unlocked_ioctl) -+ ret = file->f_op->unlocked_ioctl(file, cmd, arg); -+ -+ return ret; -+} -+ -+struct ipu_psys_buffer32 { -+ u64 len; -+ union { -+ int fd; -+ compat_uptr_t userptr; -+ u64 reserved; -+ } base; -+ u32 data_offset; -+ u32 bytes_used; -+ u32 flags; -+ u32 reserved[2]; -+} __packed; -+ -+struct ipu_psys_command32 { -+ u64 issue_id; -+ u64 user_token; -+ u32 priority; -+ compat_uptr_t pg_manifest; -+ compat_uptr_t buffers; -+ int pg; -+ u32 pg_manifest_size; -+ u32 bufcount; -+ u32 min_psys_freq; -+ u32 frame_counter; -+ u32 reserved[2]; -+} __packed; -+ -+struct ipu_psys_manifest32 { -+ u32 index; -+ u32 size; -+ compat_uptr_t manifest; -+ u32 reserved[5]; -+} __packed; -+ -+static int -+get_ipu_psys_command32(struct ipu_psys_command *kp, -+ struct ipu_psys_command32 __user *up) -+{ -+ compat_uptr_t pgm, bufs; -+ bool access_ok; -+ -+ access_ok = access_ok(up, sizeof(struct ipu_psys_command32)); -+ if (!access_ok || get_user(kp->issue_id, &up->issue_id) || -+ get_user(kp->user_token, &up->user_token) || -+ get_user(kp->priority, &up->priority) || -+ get_user(pgm, &up->pg_manifest) || -+ get_user(bufs, &up->buffers) || -+ get_user(kp->pg, &up->pg) || -+ get_user(kp->pg_manifest_size, &up->pg_manifest_size) || -+ get_user(kp->bufcount, &up->bufcount) || -+ get_user(kp->min_psys_freq, &up->min_psys_freq) || -+ get_user(kp->frame_counter, &up->frame_counter) -+ ) -+ return -EFAULT; -+ -+ kp->pg_manifest = compat_ptr(pgm); -+ kp->buffers = compat_ptr(bufs); -+ -+ return 0; -+} -+ -+static int -+get_ipu_psys_buffer32(struct ipu_psys_buffer *kp, -+ struct ipu_psys_buffer32 __user *up) -+{ -+ compat_uptr_t ptr; -+ bool access_ok; -+ -+ access_ok = access_ok(up, sizeof(struct ipu_psys_buffer32)); -+ if (!access_ok || get_user(kp->len, &up->len) || -+ get_user(ptr, &up->base.userptr) || -+ get_user(kp->data_offset, &up->data_offset) || -+ get_user(kp->bytes_used, &up->bytes_used) || -+ get_user(kp->flags, &up->flags)) -+ return -EFAULT; -+ -+ kp->base.userptr = compat_ptr(ptr); -+ -+ return 0; -+} -+ -+static int -+put_ipu_psys_buffer32(struct ipu_psys_buffer *kp, -+ struct ipu_psys_buffer32 __user *up) -+{ -+ bool access_ok; -+ -+ access_ok = access_ok(up, sizeof(struct ipu_psys_buffer32)); -+ if (!access_ok || put_user(kp->len, &up->len) || -+ put_user(kp->base.fd, &up->base.fd) || -+ put_user(kp->data_offset, &up->data_offset) || -+ put_user(kp->bytes_used, &up->bytes_used) || -+ put_user(kp->flags, &up->flags)) -+ return -EFAULT; -+ -+ return 0; -+} -+ -+static int -+get_ipu_psys_manifest32(struct ipu_psys_manifest *kp, -+ struct ipu_psys_manifest32 __user *up) -+{ -+ compat_uptr_t ptr; -+ bool access_ok; -+ -+ access_ok = access_ok(up, sizeof(struct ipu_psys_manifest32)); -+ if (!access_ok || get_user(kp->index, &up->index) || -+ get_user(kp->size, &up->size) || get_user(ptr, &up->manifest)) -+ return -EFAULT; -+ -+ kp->manifest = compat_ptr(ptr); -+ -+ return 0; -+} -+ -+static int -+put_ipu_psys_manifest32(struct ipu_psys_manifest *kp, -+ struct ipu_psys_manifest32 __user *up) -+{ -+ compat_uptr_t ptr = (u32)((unsigned long)kp->manifest); -+ bool access_ok; -+ -+ access_ok = access_ok(up, sizeof(struct ipu_psys_manifest32)); -+ if (!access_ok || put_user(kp->index, &up->index) || -+ put_user(kp->size, &up->size) || put_user(ptr, &up->manifest)) -+ return -EFAULT; -+ -+ return 0; -+} -+ -+#define IPU_IOC_GETBUF32 _IOWR('A', 4, struct ipu_psys_buffer32) -+#define IPU_IOC_PUTBUF32 _IOWR('A', 5, struct ipu_psys_buffer32) -+#define IPU_IOC_QCMD32 _IOWR('A', 6, struct ipu_psys_command32) -+#define IPU_IOC_CMD_CANCEL32 _IOWR('A', 8, struct ipu_psys_command32) -+#define IPU_IOC_GET_MANIFEST32 _IOWR('A', 9, struct ipu_psys_manifest32) -+ -+long ipu_psys_compat_ioctl32(struct file *file, unsigned int cmd, -+ unsigned long arg) -+{ -+ union { -+ struct ipu_psys_buffer buf; -+ struct ipu_psys_command cmd; -+ struct ipu_psys_event ev; -+ struct ipu_psys_manifest m; -+ } karg; -+ int compatible_arg = 1; -+ int err = 0; -+ void __user *up = compat_ptr(arg); -+ -+ switch (cmd) { -+ case IPU_IOC_GETBUF32: -+ cmd = IPU_IOC_GETBUF; -+ break; -+ case IPU_IOC_PUTBUF32: -+ cmd = IPU_IOC_PUTBUF; -+ break; -+ case IPU_IOC_QCMD32: -+ cmd = IPU_IOC_QCMD; -+ break; -+ case IPU_IOC_GET_MANIFEST32: -+ cmd = IPU_IOC_GET_MANIFEST; -+ break; -+ } -+ -+ switch (cmd) { -+ case IPU_IOC_GETBUF: -+ case IPU_IOC_PUTBUF: -+ err = get_ipu_psys_buffer32(&karg.buf, up); -+ compatible_arg = 0; -+ break; -+ case IPU_IOC_QCMD: -+ err = get_ipu_psys_command32(&karg.cmd, up); -+ compatible_arg = 0; -+ break; -+ case IPU_IOC_GET_MANIFEST: -+ err = get_ipu_psys_manifest32(&karg.m, up); -+ compatible_arg = 0; -+ break; -+ } -+ if (err) -+ return err; -+ -+ if (compatible_arg) { -+ err = native_ioctl(file, cmd, (unsigned long)up); -+ } else { -+ err = native_ioctl(file, cmd, (unsigned long)&karg); -+ } -+ -+ if (err) -+ return err; -+ -+ switch (cmd) { -+ case IPU_IOC_GETBUF: -+ err = put_ipu_psys_buffer32(&karg.buf, up); -+ break; -+ case IPU_IOC_GET_MANIFEST: -+ err = put_ipu_psys_manifest32(&karg.m, up); -+ break; -+ } -+ return err; -+} -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.c b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.c -new file mode 100644 -index 0000000..dcd0dce ---- /dev/null -+++ b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.c -@@ -0,0 +1,1599 @@ -+// SPDX-License-Identifier: GPL-2.0 -+// Copyright (C) 2013 - 2024 Intel Corporation -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+ -+#include "ipu6.h" -+#include "ipu6-mmu.h" -+#include "ipu6-bus.h" -+#include "ipu6-buttress.h" -+#include "ipu6-cpd.h" -+#include "ipu-fw-psys.h" -+#include "ipu-psys.h" -+#include "ipu6-platform-regs.h" -+#include "ipu6-fw-com.h" -+ -+static bool async_fw_init; -+module_param(async_fw_init, bool, 0664); -+MODULE_PARM_DESC(async_fw_init, "Enable asynchronous firmware initialization"); -+ -+#define SYSCOM_BUTTRESS_FW_PARAMS_PSYS_OFFSET 7 -+ -+#define IPU_PSYS_NUM_DEVICES 4 -+ -+#define IPU_PSYS_MAX_NUM_DESCS 1024 -+#define IPU_PSYS_MAX_NUM_BUFS 1024 -+#define IPU_PSYS_MAX_NUM_BUFS_LRU 12 -+ -+static int psys_runtime_pm_resume(struct device *dev); -+static int psys_runtime_pm_suspend(struct device *dev); -+ -+static dev_t ipu_psys_dev_t; -+static DECLARE_BITMAP(ipu_psys_devices, IPU_PSYS_NUM_DEVICES); -+static DEFINE_MUTEX(ipu_psys_mutex); -+ -+static struct fw_init_task { -+ struct delayed_work work; -+ struct ipu_psys *psys; -+} fw_init_task; -+ -+static void ipu6_psys_remove(struct auxiliary_device *auxdev); -+ -+static const struct bus_type ipu_psys_bus = { -+ .name = "intel-ipu6-psys", -+}; -+#define PKG_DIR_ENT_LEN_FOR_PSYS 2 -+#define PKG_DIR_SIZE_MASK_FOR_PSYS GENMASK(23, 0) -+ -+enum ipu6_version ipu_ver; -+ -+static u32 ipu6_cpd_pkg_dir_get_address(const u64 *pkg_dir, int pkg_dir_idx) -+{ -+ return pkg_dir[++pkg_dir_idx * PKG_DIR_ENT_LEN_FOR_PSYS]; -+} -+ -+static u32 ipu6_cpd_pkg_dir_get_num_entries(const u64 *pkg_dir) -+{ -+ return pkg_dir[1]; -+} -+ -+static u32 ipu6_cpd_pkg_dir_get_size(const u64 *pkg_dir, int pkg_dir_idx) -+{ -+ return pkg_dir[++pkg_dir_idx * PKG_DIR_ENT_LEN_FOR_PSYS + 1] & -+ PKG_DIR_SIZE_MASK_FOR_PSYS; -+} -+ -+#define PKG_DIR_ID_SHIFT 48 -+#define PKG_DIR_ID_MASK 0x7f -+ -+static u32 ipu6_cpd_pkg_dir_get_type(const u64 *pkg_dir, int pkg_dir_idx) -+{ -+ return pkg_dir[++pkg_dir_idx * PKG_DIR_ENT_LEN_FOR_PSYS + 1] >> -+ PKG_DIR_ID_SHIFT & PKG_DIR_ID_MASK; -+} -+ -+/* -+ * These are some trivial wrappers that save us from open-coding some -+ * common patterns and also that's were we have some checking (for the -+ * time being) -+ */ -+static void ipu_desc_add(struct ipu_psys_fh *fh, struct ipu_psys_desc *desc) -+{ -+ fh->num_descs++; -+ -+ WARN_ON_ONCE(fh->num_descs >= IPU_PSYS_MAX_NUM_DESCS); -+ list_add(&desc->list, &fh->descs_list); -+} -+ -+static void ipu_desc_del(struct ipu_psys_fh *fh, struct ipu_psys_desc *desc) -+{ -+ fh->num_descs--; -+ list_del_init(&desc->list); -+} -+ -+static void ipu_buffer_add(struct ipu_psys_fh *fh, -+ struct ipu_psys_kbuffer *kbuf) -+{ -+ fh->num_bufs++; -+ -+ WARN_ON_ONCE(fh->num_bufs >= IPU_PSYS_MAX_NUM_BUFS); -+ list_add(&kbuf->list, &fh->bufs_list); -+} -+ -+static void ipu_buffer_del(struct ipu_psys_fh *fh, -+ struct ipu_psys_kbuffer *kbuf) -+{ -+ fh->num_bufs--; -+ list_del_init(&kbuf->list); -+} -+ -+static void ipu_buffer_lru_add(struct ipu_psys_fh *fh, -+ struct ipu_psys_kbuffer *kbuf) -+{ -+ fh->num_bufs_lru++; -+ list_add_tail(&kbuf->list, &fh->bufs_lru); -+} -+ -+static void ipu_buffer_lru_del(struct ipu_psys_fh *fh, -+ struct ipu_psys_kbuffer *kbuf) -+{ -+ fh->num_bufs_lru--; -+ list_del_init(&kbuf->list); -+} -+ -+static struct ipu_psys_kbuffer *ipu_psys_kbuffer_alloc(void) -+{ -+ struct ipu_psys_kbuffer *kbuf; -+ -+ kbuf = kzalloc(sizeof(*kbuf), GFP_KERNEL); -+ if (!kbuf) -+ return NULL; -+ -+ atomic_set(&kbuf->map_count, 0); -+ INIT_LIST_HEAD(&kbuf->list); -+ return kbuf; -+} -+ -+static struct ipu_psys_desc *ipu_psys_desc_alloc(int fd) -+{ -+ struct ipu_psys_desc *desc; -+ -+ desc = kzalloc(sizeof(*desc), GFP_KERNEL); -+ if (!desc) -+ return NULL; -+ -+ desc->fd = fd; -+ INIT_LIST_HEAD(&desc->list); -+ return desc; -+} -+ -+struct ipu_psys_pg *__get_pg_buf(struct ipu_psys *psys, size_t pg_size) -+{ -+ struct ipu_psys_pg *kpg; -+ unsigned long flags; -+ -+ spin_lock_irqsave(&psys->pgs_lock, flags); -+ list_for_each_entry(kpg, &psys->pgs, list) { -+ if (!kpg->pg_size && kpg->size >= pg_size) { -+ kpg->pg_size = pg_size; -+ spin_unlock_irqrestore(&psys->pgs_lock, flags); -+ return kpg; -+ } -+ } -+ spin_unlock_irqrestore(&psys->pgs_lock, flags); -+ /* no big enough buffer available, allocate new one */ -+ kpg = kzalloc(sizeof(*kpg), GFP_KERNEL); -+ if (!kpg) -+ return NULL; -+ -+ kpg->pg = ipu6_dma_alloc(psys->adev, pg_size, &kpg->pg_dma_addr, -+ GFP_KERNEL, 0); -+ if (!kpg->pg) { -+ kfree(kpg); -+ return NULL; -+ } -+ -+ kpg->pg_size = pg_size; -+ kpg->size = pg_size; -+ spin_lock_irqsave(&psys->pgs_lock, flags); -+ list_add(&kpg->list, &psys->pgs); -+ spin_unlock_irqrestore(&psys->pgs_lock, flags); -+ -+ return kpg; -+} -+ -+static struct ipu_psys_desc *psys_desc_lookup(struct ipu_psys_fh *fh, int fd) -+{ -+ struct ipu_psys_desc *desc; -+ -+ list_for_each_entry(desc, &fh->descs_list, list) { -+ if (desc->fd == fd) -+ return desc; -+ } -+ -+ return NULL; -+} -+ -+static bool dmabuf_cmp(struct dma_buf *lb, struct dma_buf *rb) -+{ -+ return lb == rb && lb->size == rb->size; -+} -+ -+static struct ipu_psys_kbuffer *psys_buf_lookup(struct ipu_psys_fh *fh, int fd) -+{ -+ struct ipu_psys_kbuffer *kbuf; -+ struct dma_buf *dma_buf; -+ -+ dma_buf = dma_buf_get(fd); -+ if (IS_ERR(dma_buf)) -+ return NULL; -+ -+ /* -+ * First lookup so-called `active` list, that is the list of -+ * referenced buffers -+ */ -+ list_for_each_entry(kbuf, &fh->bufs_list, list) { -+ if (dmabuf_cmp(kbuf->dbuf, dma_buf)) { -+ dma_buf_put(dma_buf); -+ return kbuf; -+ } -+ } -+ -+ /* -+ * We didn't find anything on the `active` list, try the LRU list -+ * (list of unreferenced buffers) and possibly resurrect a buffer -+ */ -+ list_for_each_entry(kbuf, &fh->bufs_lru, list) { -+ if (dmabuf_cmp(kbuf->dbuf, dma_buf)) { -+ dma_buf_put(dma_buf); -+ ipu_buffer_lru_del(fh, kbuf); -+ ipu_buffer_add(fh, kbuf); -+ return kbuf; -+ } -+ } -+ -+ dma_buf_put(dma_buf); -+ return NULL; -+} -+ -+struct ipu_psys_kbuffer *ipu_psys_lookup_kbuffer(struct ipu_psys_fh *fh, int fd) -+{ -+ struct ipu_psys_desc *desc; -+ -+ desc = psys_desc_lookup(fh, fd); -+ if (!desc) -+ return NULL; -+ -+ return desc->kbuf; -+} -+ -+struct ipu_psys_kbuffer * -+ipu_psys_lookup_kbuffer_by_kaddr(struct ipu_psys_fh *fh, void *kaddr) -+{ -+ struct ipu_psys_kbuffer *kbuffer; -+ -+ list_for_each_entry(kbuffer, &fh->bufs_list, list) { -+ if (kbuffer->kaddr == kaddr) -+ return kbuffer; -+ } -+ -+ return NULL; -+} -+ -+static int ipu_psys_get_userpages(struct ipu_dma_buf_attach *attach) -+{ -+ struct vm_area_struct *vma; -+ unsigned long start, end; -+ int npages, array_size; -+ struct page **pages; -+ struct sg_table *sgt; -+ int ret = -ENOMEM; -+ int nr = 0; -+ u32 flags; -+ -+ start = attach->userptr; -+ end = PAGE_ALIGN(start + attach->len); -+ npages = (end - (start & PAGE_MASK)) >> PAGE_SHIFT; -+ array_size = npages * sizeof(struct page *); -+ -+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); -+ if (!sgt) -+ return -ENOMEM; -+ -+ WARN_ON_ONCE(attach->npages); -+ -+ pages = kvzalloc(array_size, GFP_KERNEL); -+ if (!pages) -+ goto free_sgt; -+ -+ mmap_read_lock(current->mm); -+ vma = vma_lookup(current->mm, start); -+ if (unlikely(!vma)) { -+ ret = -EFAULT; -+ goto error_up_read; -+ } -+ mmap_read_unlock(current->mm); -+ -+ flags = FOLL_WRITE | FOLL_FORCE | FOLL_LONGTERM; -+ nr = pin_user_pages_fast(start & PAGE_MASK, npages, -+ flags, pages); -+ if (nr < npages) -+ goto error; -+ -+ attach->pages = pages; -+ attach->npages = npages; -+ -+ ret = sg_alloc_table_from_pages(sgt, pages, npages, -+ start & ~PAGE_MASK, attach->len, -+ GFP_KERNEL); -+ if (ret < 0) -+ goto error; -+ -+ attach->sgt = sgt; -+ -+ return 0; -+ -+error_up_read: -+ mmap_read_unlock(current->mm); -+error: -+ if (nr) -+ unpin_user_pages(pages, nr); -+ kvfree(pages); -+free_sgt: -+ kfree(sgt); -+ -+ pr_err("failed to get userpages:%d\n", ret); -+ -+ return ret; -+} -+ -+static void ipu_psys_put_userpages(struct ipu_dma_buf_attach *attach) -+{ -+ if (!attach || !attach->userptr || !attach->sgt) -+ return; -+ -+ unpin_user_pages(attach->pages, attach->npages); -+ kvfree(attach->pages); -+ -+ sg_free_table(attach->sgt); -+ kfree(attach->sgt); -+ attach->sgt = NULL; -+} -+ -+static int ipu_dma_buf_attach(struct dma_buf *dbuf, -+ struct dma_buf_attachment *attach) -+{ -+ struct ipu_psys_kbuffer *kbuf = dbuf->priv; -+ struct ipu_dma_buf_attach *ipu_attach; -+ int ret; -+ -+ ipu_attach = kzalloc(sizeof(*ipu_attach), GFP_KERNEL); -+ if (!ipu_attach) -+ return -ENOMEM; -+ -+ ipu_attach->len = kbuf->len; -+ ipu_attach->userptr = kbuf->userptr; -+ -+ ret = ipu_psys_get_userpages(ipu_attach); -+ if (ret) { -+ kfree(ipu_attach); -+ return ret; -+ } -+ -+ attach->priv = ipu_attach; -+ return 0; -+} -+ -+static void ipu_dma_buf_detach(struct dma_buf *dbuf, -+ struct dma_buf_attachment *attach) -+{ -+ struct ipu_dma_buf_attach *ipu_attach = attach->priv; -+ -+ ipu_psys_put_userpages(ipu_attach); -+ kfree(ipu_attach); -+ attach->priv = NULL; -+} -+ -+static struct sg_table *ipu_dma_buf_map(struct dma_buf_attachment *attach, -+ enum dma_data_direction dir) -+{ -+ struct ipu_dma_buf_attach *ipu_attach = attach->priv; -+ struct pci_dev *pdev = to_pci_dev(attach->dev); -+ struct ipu6_device *isp = pci_get_drvdata(pdev); -+ struct ipu6_bus_device *adev = isp->psys; -+ unsigned long attrs; -+ int ret; -+ -+ attrs = DMA_ATTR_SKIP_CPU_SYNC; -+ ret = dma_map_sgtable(&pdev->dev, ipu_attach->sgt, dir, attrs); -+ if (ret) { -+ dev_err(attach->dev, "pci buf map failed\n"); -+ return ERR_PTR(-EIO); -+ } -+ -+ dma_sync_sgtable_for_device(&pdev->dev, ipu_attach->sgt, dir); -+ -+ ret = ipu6_dma_map_sgtable(adev, ipu_attach->sgt, dir, 0); -+ if (ret) { -+ dev_err(attach->dev, "ipu6 buf map failed\n"); -+ return ERR_PTR(-EIO); -+ } -+ -+ ipu6_dma_sync_sgtable(adev, ipu_attach->sgt); -+ -+ return ipu_attach->sgt; -+} -+ -+static void ipu_dma_buf_unmap(struct dma_buf_attachment *attach, -+ struct sg_table *sgt, enum dma_data_direction dir) -+{ -+ struct pci_dev *pdev = to_pci_dev(attach->dev); -+ struct ipu6_device *isp = pci_get_drvdata(pdev); -+ struct ipu6_bus_device *adev = isp->psys; -+ -+ ipu6_dma_unmap_sgtable(adev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC); -+ dma_unmap_sgtable(&pdev->dev, sgt, dir, 0); -+} -+ -+static int ipu_dma_buf_mmap(struct dma_buf *dbuf, struct vm_area_struct *vma) -+{ -+ return -ENOTTY; -+} -+ -+static void ipu_dma_buf_release(struct dma_buf *buf) -+{ -+ struct ipu_psys_kbuffer *kbuf = buf->priv; -+ -+ if (!kbuf) -+ return; -+ -+ if (kbuf->db_attach) -+ ipu_psys_put_userpages(kbuf->db_attach->priv); -+ -+ kfree(kbuf); -+} -+ -+static int ipu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf, -+ enum dma_data_direction dir) -+{ -+ return -ENOTTY; -+} -+ -+static int ipu_dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map) -+{ -+ struct dma_buf_attachment *attach; -+ struct ipu_dma_buf_attach *ipu_attach; -+ -+ if (list_empty(&dmabuf->attachments)) -+ return -EINVAL; -+ -+ attach = list_last_entry(&dmabuf->attachments, -+ struct dma_buf_attachment, node); -+ ipu_attach = attach->priv; -+ -+ if (!ipu_attach || !ipu_attach->pages || !ipu_attach->npages) -+ return -EINVAL; -+ -+ map->vaddr = vm_map_ram(ipu_attach->pages, ipu_attach->npages, 0); -+ map->is_iomem = false; -+ if (!map->vaddr) -+ return -EINVAL; -+ -+ return 0; -+} -+ -+static void ipu_dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map) -+{ -+ struct dma_buf_attachment *attach; -+ struct ipu_dma_buf_attach *ipu_attach; -+ -+ if (WARN_ON(list_empty(&dmabuf->attachments))) -+ return; -+ -+ attach = list_last_entry(&dmabuf->attachments, -+ struct dma_buf_attachment, node); -+ ipu_attach = attach->priv; -+ -+ if (WARN_ON(!ipu_attach || !ipu_attach->pages || !ipu_attach->npages)) -+ return; -+ -+ vm_unmap_ram(map->vaddr, ipu_attach->npages); -+} -+ -+static const struct dma_buf_ops ipu_dma_buf_ops = { -+ .attach = ipu_dma_buf_attach, -+ .detach = ipu_dma_buf_detach, -+ .map_dma_buf = ipu_dma_buf_map, -+ .unmap_dma_buf = ipu_dma_buf_unmap, -+ .release = ipu_dma_buf_release, -+ .begin_cpu_access = ipu_dma_buf_begin_cpu_access, -+ .mmap = ipu_dma_buf_mmap, -+ .vmap = ipu_dma_buf_vmap, -+ .vunmap = ipu_dma_buf_vunmap, -+}; -+ -+static int ipu_psys_open(struct inode *inode, struct file *file) -+{ -+ struct ipu_psys *psys = inode_to_ipu_psys(inode); -+ struct ipu_psys_fh *fh; -+ int rval; -+ -+ fh = kzalloc(sizeof(*fh), GFP_KERNEL); -+ if (!fh) -+ return -ENOMEM; -+ -+ fh->psys = psys; -+ -+ file->private_data = fh; -+ -+ mutex_init(&fh->mutex); -+ INIT_LIST_HEAD(&fh->bufs_list); -+ INIT_LIST_HEAD(&fh->descs_list); -+ INIT_LIST_HEAD(&fh->bufs_lru); -+ init_waitqueue_head(&fh->wait); -+ -+ rval = ipu_psys_fh_init(fh); -+ if (rval) -+ goto open_failed; -+ -+ mutex_lock(&psys->mutex); -+ list_add_tail(&fh->list, &psys->fhs); -+ mutex_unlock(&psys->mutex); -+ -+ return 0; -+ -+open_failed: -+ mutex_destroy(&fh->mutex); -+ kfree(fh); -+ return rval; -+} -+ -+static inline void ipu_psys_kbuf_unmap(struct ipu_psys_fh *fh, -+ struct ipu_psys_kbuffer *kbuf) -+{ -+ if (!kbuf) -+ return; -+ -+ kbuf->valid = false; -+ if (kbuf->kaddr) { -+ struct iosys_map dmap; -+ -+ iosys_map_set_vaddr(&dmap, kbuf->kaddr); -+ dma_buf_vunmap_unlocked(kbuf->dbuf, &dmap); -+ } -+ -+ if (!kbuf->userptr) -+ ipu6_dma_unmap_sgtable(fh->psys->adev, kbuf->sgt, -+ DMA_BIDIRECTIONAL, 0); -+ -+ if (!IS_ERR_OR_NULL(kbuf->sgt)) -+ dma_buf_unmap_attachment_unlocked(kbuf->db_attach, -+ kbuf->sgt, -+ DMA_BIDIRECTIONAL); -+ if (!IS_ERR_OR_NULL(kbuf->db_attach)) -+ dma_buf_detach(kbuf->dbuf, kbuf->db_attach); -+ dma_buf_put(kbuf->dbuf); -+ -+ kbuf->db_attach = NULL; -+ kbuf->dbuf = NULL; -+ kbuf->sgt = NULL; -+} -+ -+static void __ipu_psys_unmapbuf(struct ipu_psys_fh *fh, -+ struct ipu_psys_kbuffer *kbuf) -+{ -+ /* From now on it is not safe to use this kbuffer */ -+ ipu_psys_kbuf_unmap(fh, kbuf); -+ ipu_buffer_del(fh, kbuf); -+ if (!kbuf->userptr) -+ kfree(kbuf); -+} -+ -+static int ipu_psys_unmapbuf_locked(int fd, struct ipu_psys_fh *fh) -+{ -+ struct ipu_psys *psys = fh->psys; -+ struct device *dev = &psys->adev->auxdev.dev; -+ struct ipu_psys_kbuffer *kbuf; -+ struct ipu_psys_desc *desc; -+ -+ desc = psys_desc_lookup(fh, fd); -+ if (WARN_ON_ONCE(!desc)) { -+ dev_err(dev, "descriptor not found: %d\n", fd); -+ return -EINVAL; -+ } -+ -+ kbuf = desc->kbuf; -+ /* descriptor is gone now */ -+ ipu_desc_del(fh, desc); -+ kfree(desc); -+ -+ if (WARN_ON_ONCE(!kbuf || !kbuf->dbuf)) { -+ dev_err(dev, -+ "descriptor with no buffer: %d\n", fd); -+ return -EINVAL; -+ } -+ -+ /* Wait for final UNMAP */ -+ if (!atomic_dec_and_test(&kbuf->map_count)) -+ return 0; -+ -+ __ipu_psys_unmapbuf(fh, kbuf); -+ -+ return 0; -+} -+ -+static int ipu_psys_release(struct inode *inode, struct file *file) -+{ -+ struct ipu_psys *psys = inode_to_ipu_psys(inode); -+ struct ipu_psys_fh *fh = file->private_data; -+ -+ mutex_lock(&fh->mutex); -+ while (!list_empty(&fh->descs_list)) { -+ struct ipu_psys_desc *desc; -+ -+ desc = list_first_entry(&fh->descs_list, -+ struct ipu_psys_desc, -+ list); -+ -+ ipu_desc_del(fh, desc); -+ kfree(desc); -+ } -+ -+ while (!list_empty(&fh->bufs_lru)) { -+ struct ipu_psys_kbuffer *kbuf; -+ -+ kbuf = list_first_entry(&fh->bufs_lru, -+ struct ipu_psys_kbuffer, -+ list); -+ -+ ipu_buffer_lru_del(fh, kbuf); -+ __ipu_psys_unmapbuf(fh, kbuf); -+ } -+ -+ while (!list_empty(&fh->bufs_list)) { -+ struct dma_buf_attachment *db_attach; -+ struct ipu_psys_kbuffer *kbuf; -+ -+ kbuf = list_first_entry(&fh->bufs_list, -+ struct ipu_psys_kbuffer, -+ list); -+ -+ ipu_buffer_del(fh, kbuf); -+ db_attach = kbuf->db_attach; -+ -+ /* Unmap and release buffers */ -+ if (kbuf->dbuf && db_attach) { -+ ipu_psys_kbuf_unmap(fh, kbuf); -+ } else { -+ if (db_attach) -+ ipu_psys_put_userpages(db_attach->priv); -+ kfree(kbuf); -+ } -+ } -+ mutex_unlock(&fh->mutex); -+ -+ mutex_lock(&psys->mutex); -+ list_del(&fh->list); -+ -+ mutex_unlock(&psys->mutex); -+ ipu_psys_fh_deinit(fh); -+ -+ mutex_lock(&psys->mutex); -+ if (list_empty(&psys->fhs)) -+ psys->power_gating = 0; -+ mutex_unlock(&psys->mutex); -+ mutex_destroy(&fh->mutex); -+ kfree(fh); -+ -+ return 0; -+} -+ -+static int ipu_psys_getbuf(struct ipu_psys_buffer *buf, struct ipu_psys_fh *fh) -+{ -+ struct ipu_psys_kbuffer *kbuf; -+ struct ipu_psys *psys = fh->psys; -+ struct device *dev = &psys->adev->auxdev.dev; -+ struct ipu_psys_desc *desc; -+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info); -+ struct dma_buf *dbuf; -+ int ret; -+ -+ if (!buf->base.userptr) { -+ dev_err(dev, "Buffer allocation not supported\n"); -+ return -EINVAL; -+ } -+ -+ kbuf = ipu_psys_kbuffer_alloc(); -+ if (!kbuf) -+ return -ENOMEM; -+ -+ kbuf->len = buf->len; -+ kbuf->userptr = (unsigned long)buf->base.userptr; -+ kbuf->flags = buf->flags; -+ -+ exp_info.ops = &ipu_dma_buf_ops; -+ exp_info.size = kbuf->len; -+ exp_info.flags = O_RDWR; -+ exp_info.priv = kbuf; -+ -+ dbuf = dma_buf_export(&exp_info); -+ if (IS_ERR(dbuf)) { -+ kfree(kbuf); -+ return PTR_ERR(dbuf); -+ } -+ -+ ret = dma_buf_fd(dbuf, 0); -+ if (ret < 0) { -+ dma_buf_put(dbuf); -+ return ret; -+ } -+ -+ buf->base.fd = ret; -+ buf->flags &= ~IPU_BUFFER_FLAG_USERPTR; -+ buf->flags |= IPU_BUFFER_FLAG_DMA_HANDLE; -+ kbuf->flags = buf->flags; -+ -+ desc = ipu_psys_desc_alloc(ret); -+ if (!desc) { -+ dma_buf_put(dbuf); -+ return -ENOMEM; -+ } -+ -+ kbuf->dbuf = dbuf; -+ -+ mutex_lock(&fh->mutex); -+ ipu_desc_add(fh, desc); -+ ipu_buffer_add(fh, kbuf); -+ mutex_unlock(&fh->mutex); -+ -+ dev_dbg(dev, "IOC_GETBUF: userptr %p size %llu to fd %d", -+ buf->base.userptr, buf->len, buf->base.fd); -+ -+ return 0; -+} -+ -+static int ipu_psys_putbuf(struct ipu_psys_buffer *buf, struct ipu_psys_fh *fh) -+{ -+ return 0; -+} -+ -+static void ipu_psys_kbuffer_lru(struct ipu_psys_fh *fh, -+ struct ipu_psys_kbuffer *kbuf) -+{ -+ ipu_buffer_del(fh, kbuf); -+ ipu_buffer_lru_add(fh, kbuf); -+ -+ while (fh->num_bufs_lru > IPU_PSYS_MAX_NUM_BUFS_LRU) { -+ kbuf = list_first_entry(&fh->bufs_lru, -+ struct ipu_psys_kbuffer, -+ list); -+ -+ ipu_buffer_lru_del(fh, kbuf); -+ __ipu_psys_unmapbuf(fh, kbuf); -+ } -+} -+ -+struct ipu_psys_kbuffer *ipu_psys_mapbuf_locked(int fd, struct ipu_psys_fh *fh) -+{ -+ struct ipu_psys *psys = fh->psys; -+ struct device *dev = &psys->adev->isp->pdev->dev; -+ int ret; -+ struct ipu_psys_kbuffer *kbuf; -+ struct ipu_psys_desc *desc; -+ struct dma_buf *dbuf; -+ struct iosys_map dmap; -+ -+ dbuf = dma_buf_get(fd); -+ if (IS_ERR(dbuf)) -+ return NULL; -+ -+ desc = psys_desc_lookup(fh, fd); -+ if (!desc) { -+ desc = ipu_psys_desc_alloc(fd); -+ if (!desc) -+ goto desc_alloc_fail; -+ ipu_desc_add(fh, desc); -+ } -+ -+ kbuf = psys_buf_lookup(fh, fd); -+ if (!kbuf) { -+ kbuf = ipu_psys_kbuffer_alloc(); -+ if (!kbuf) -+ goto buf_alloc_fail; -+ ipu_buffer_add(fh, kbuf); -+ } -+ -+ /* If this descriptor references no buffer or new buffer */ -+ if (desc->kbuf != kbuf) { -+ if (desc->kbuf) { -+ /* -+ * Un-reference old buffer and possibly put it on -+ * the LRU list -+ */ -+ if (atomic_dec_and_test(&desc->kbuf->map_count)) -+ ipu_psys_kbuffer_lru(fh, desc->kbuf); -+ } -+ -+ /* Grab reference of the new buffer */ -+ atomic_inc(&kbuf->map_count); -+ } -+ -+ desc->kbuf = kbuf; -+ -+ if (kbuf->sgt) { -+ dev_dbg(dev, "fd %d has been mapped!\n", fd); -+ dma_buf_put(dbuf); -+ goto mapbuf_end; -+ } -+ -+ kbuf->dbuf = dbuf; -+ -+ if (kbuf->len == 0) -+ kbuf->len = kbuf->dbuf->size; -+ -+ kbuf->db_attach = dma_buf_attach(kbuf->dbuf, dev); -+ if (IS_ERR(kbuf->db_attach)) { -+ dev_dbg(dev, "dma buf attach failed\n"); -+ goto attach_fail; -+ } -+ -+ kbuf->sgt = dma_buf_map_attachment_unlocked(kbuf->db_attach, -+ DMA_BIDIRECTIONAL); -+ if (IS_ERR_OR_NULL(kbuf->sgt)) { -+ kbuf->sgt = NULL; -+ dev_dbg(dev, "dma buf map attachment failed\n"); -+ goto kbuf_map_fail; -+ } -+ -+ if (!kbuf->userptr) { -+ ret = ipu6_dma_map_sgtable(psys->adev, kbuf->sgt, -+ DMA_BIDIRECTIONAL, 0); -+ if (ret) { -+ dev_dbg(dev, "ipu6 buf map failed\n"); -+ goto kbuf_map_fail; -+ } -+ } -+ -+ kbuf->dma_addr = sg_dma_address(kbuf->sgt->sgl); -+ -+ dmap.is_iomem = false; -+ if (dma_buf_vmap_unlocked(kbuf->dbuf, &dmap)) { -+ dev_dbg(dev, "dma buf vmap failed\n"); -+ goto kbuf_map_fail; -+ } -+ kbuf->kaddr = dmap.vaddr; -+ -+mapbuf_end: -+ dev_dbg(dev, "%s %s kbuf %p fd %d with len %llu mapped\n", -+ __func__, kbuf->userptr ? "private" : "imported", kbuf, fd, -+ kbuf->len); -+ -+ kbuf->valid = true; -+ return kbuf; -+ -+kbuf_map_fail: -+ ipu_buffer_del(fh, kbuf); -+ if (!IS_ERR_OR_NULL(kbuf->sgt)) { -+ if (!kbuf->userptr) -+ ipu6_dma_unmap_sgtable(psys->adev, kbuf->sgt, -+ DMA_BIDIRECTIONAL, 0); -+ dma_buf_unmap_attachment_unlocked(kbuf->db_attach, kbuf->sgt, -+ DMA_BIDIRECTIONAL); -+ } -+ dma_buf_detach(kbuf->dbuf, kbuf->db_attach); -+attach_fail: -+ ipu_buffer_del(fh, kbuf); -+ dbuf = ERR_PTR(-EINVAL); -+ if (!kbuf->userptr) -+ kfree(kbuf); -+ -+buf_alloc_fail: -+ ipu_desc_del(fh, desc); -+ kfree(desc); -+ -+desc_alloc_fail: -+ if (!IS_ERR(dbuf)) -+ dma_buf_put(dbuf); -+ return NULL; -+} -+ -+static long ipu_psys_mapbuf(int fd, struct ipu_psys_fh *fh) -+{ -+ struct ipu_psys_kbuffer *kbuf; -+ -+ dev_dbg(&fh->psys->adev->auxdev.dev, "IOC_MAPBUF\n"); -+ -+ mutex_lock(&fh->mutex); -+ kbuf = ipu_psys_mapbuf_locked(fd, fh); -+ mutex_unlock(&fh->mutex); -+ -+ return kbuf ? 0 : -EINVAL; -+} -+ -+static long ipu_psys_unmapbuf(int fd, struct ipu_psys_fh *fh) -+{ -+ struct device *dev = &fh->psys->adev->auxdev.dev; -+ long ret; -+ -+ dev_dbg(dev, "IOC_UNMAPBUF\n"); -+ -+ mutex_lock(&fh->mutex); -+ ret = ipu_psys_unmapbuf_locked(fd, fh); -+ mutex_unlock(&fh->mutex); -+ -+ return ret; -+} -+ -+static __poll_t ipu_psys_poll(struct file *file, -+ struct poll_table_struct *wait) -+{ -+ struct ipu_psys_fh *fh = file->private_data; -+ struct ipu_psys *psys = fh->psys; -+ struct device *dev = &psys->adev->auxdev.dev; -+ __poll_t ret = 0; -+ -+ dev_dbg(dev, "ipu psys poll\n"); -+ -+ poll_wait(file, &fh->wait, wait); -+ -+ if (ipu_get_completed_kcmd(fh)) -+ ret = POLLIN; -+ -+ dev_dbg(dev, "ipu psys poll ret %u\n", ret); -+ -+ return ret; -+} -+ -+static long ipu_get_manifest(struct ipu_psys_manifest *manifest, -+ struct ipu_psys_fh *fh) -+{ -+ struct ipu_psys *psys = fh->psys; -+ struct device *dev = &psys->adev->auxdev.dev; -+ struct ipu6_bus_device *adev = psys->adev; -+ struct ipu6_device *isp = adev->isp; -+ struct ipu6_cpd_client_pkg_hdr *client_pkg; -+ u32 entries; -+ void *host_fw_data; -+ dma_addr_t dma_fw_data; -+ u32 client_pkg_offset; -+ -+ host_fw_data = (void *)isp->cpd_fw->data; -+ dma_fw_data = sg_dma_address(adev->fw_sgt.sgl); -+ entries = ipu6_cpd_pkg_dir_get_num_entries(adev->pkg_dir); -+ if (!manifest || manifest->index > entries - 1) { -+ dev_err(dev, "invalid argument\n"); -+ return -EINVAL; -+ } -+ -+ if (!ipu6_cpd_pkg_dir_get_size(adev->pkg_dir, manifest->index) || -+ ipu6_cpd_pkg_dir_get_type(adev->pkg_dir, manifest->index) < -+ IPU6_CPD_PKG_DIR_CLIENT_PG_TYPE) { -+ dev_dbg(dev, "invalid pkg dir entry\n"); -+ return -ENOENT; -+ } -+ -+ client_pkg_offset = ipu6_cpd_pkg_dir_get_address(adev->pkg_dir, -+ manifest->index); -+ client_pkg_offset -= dma_fw_data; -+ -+ client_pkg = host_fw_data + client_pkg_offset; -+ manifest->size = client_pkg->pg_manifest_size; -+ -+ if (!manifest->manifest) -+ return 0; -+ -+ if (copy_to_user(manifest->manifest, -+ (uint8_t *)client_pkg + client_pkg->pg_manifest_offs, -+ manifest->size)) { -+ return -EFAULT; -+ } -+ -+ return 0; -+} -+ -+static long ipu_psys_ioctl(struct file *file, unsigned int cmd, -+ unsigned long arg) -+{ -+ union { -+ struct ipu_psys_buffer buf; -+ struct ipu_psys_command cmd; -+ struct ipu_psys_event ev; -+ struct ipu_psys_capability caps; -+ struct ipu_psys_manifest m; -+ } karg; -+ struct ipu_psys_fh *fh = file->private_data; -+ long err = 0; -+ void __user *up = (void __user *)arg; -+ bool copy = (cmd != IPU_IOC_MAPBUF && cmd != IPU_IOC_UNMAPBUF); -+ -+ if (copy) { -+ if (_IOC_SIZE(cmd) > sizeof(karg)) -+ return -ENOTTY; -+ -+ if (_IOC_DIR(cmd) & _IOC_WRITE) { -+ err = copy_from_user(&karg, up, _IOC_SIZE(cmd)); -+ if (err) -+ return -EFAULT; -+ } -+ } -+ -+ switch (cmd) { -+ case IPU_IOC_MAPBUF: -+ err = ipu_psys_mapbuf(arg, fh); -+ break; -+ case IPU_IOC_UNMAPBUF: -+ err = ipu_psys_unmapbuf(arg, fh); -+ break; -+ case IPU_IOC_QUERYCAP: -+ karg.caps = fh->psys->caps; -+ break; -+ case IPU_IOC_GETBUF: -+ err = ipu_psys_getbuf(&karg.buf, fh); -+ break; -+ case IPU_IOC_PUTBUF: -+ err = ipu_psys_putbuf(&karg.buf, fh); -+ break; -+ case IPU_IOC_QCMD: -+ err = ipu_psys_kcmd_new(&karg.cmd, fh); -+ break; -+ case IPU_IOC_DQEVENT: -+ err = ipu_ioctl_dqevent(&karg.ev, fh, file->f_flags); -+ break; -+ case IPU_IOC_GET_MANIFEST: -+ err = ipu_get_manifest(&karg.m, fh); -+ break; -+ default: -+ err = -ENOTTY; -+ break; -+ } -+ -+ if (err) -+ return err; -+ -+ if (copy && _IOC_DIR(cmd) & _IOC_READ) -+ if (copy_to_user(up, &karg, _IOC_SIZE(cmd))) -+ return -EFAULT; -+ -+ return 0; -+} -+ -+static const struct file_operations ipu_psys_fops = { -+ .open = ipu_psys_open, -+ .release = ipu_psys_release, -+ .unlocked_ioctl = ipu_psys_ioctl, -+ .poll = ipu_psys_poll, -+ .owner = THIS_MODULE, -+}; -+ -+static void ipu_psys_dev_release(struct device *dev) -+{ -+} -+ -+static int psys_runtime_pm_resume(struct device *dev) -+{ -+ struct ipu6_bus_device *adev = to_ipu6_bus_device(dev); -+ struct ipu_psys *psys = ipu6_bus_get_drvdata(adev); -+ unsigned long flags; -+ int retval; -+ -+ if (!psys) -+ return 0; -+ -+ spin_lock_irqsave(&psys->ready_lock, flags); -+ if (psys->ready) { -+ spin_unlock_irqrestore(&psys->ready_lock, flags); -+ return 0; -+ } -+ spin_unlock_irqrestore(&psys->ready_lock, flags); -+ -+ retval = ipu6_mmu_hw_init(adev->mmu); -+ if (retval) -+ return retval; -+ -+ if (async_fw_init && !psys->fwcom) { -+ dev_err(dev, "async firmware init not finished, skipping\n"); -+ return 0; -+ } -+ -+ if (!ipu6_buttress_auth_done(adev->isp)) { -+ dev_dbg(dev, "fw not yet authenticated, skipping\n"); -+ return 0; -+ } -+ -+ ipu_psys_setup_hw(psys); -+ -+ ipu_psys_subdomains_power(psys, 1); -+ ipu6_configure_spc(adev->isp, -+ &psys->pdata->ipdata->hw_variant, -+ IPU6_CPD_PKG_DIR_PSYS_SERVER_IDX, -+ psys->pdata->base, adev->pkg_dir, -+ adev->pkg_dir_dma_addr); -+ -+ retval = ipu_fw_psys_open(psys); -+ if (retval) { -+ dev_err(dev, "Failed to open abi.\n"); -+ return retval; -+ } -+ -+ spin_lock_irqsave(&psys->ready_lock, flags); -+ psys->ready = 1; -+ spin_unlock_irqrestore(&psys->ready_lock, flags); -+ -+ return 0; -+} -+ -+static int psys_runtime_pm_suspend(struct device *dev) -+{ -+ struct ipu6_bus_device *adev = to_ipu6_bus_device(dev); -+ struct ipu_psys *psys = ipu6_bus_get_drvdata(adev); -+ unsigned long flags; -+ int rval; -+ -+ if (!psys) -+ return 0; -+ -+ if (!psys->ready) -+ return 0; -+ -+ spin_lock_irqsave(&psys->ready_lock, flags); -+ psys->ready = 0; -+ spin_unlock_irqrestore(&psys->ready_lock, flags); -+ -+ /* -+ * We can trace failure but better to not return an error. -+ * At suspend we are progressing towards psys power gated state. -+ * Any hang / failure inside psys will be forgotten soon. -+ */ -+ rval = ipu_fw_psys_close(psys); -+ if (rval) -+ dev_err(dev, "Device close failure: %d\n", rval); -+ -+ ipu_psys_subdomains_power(psys, 0); -+ -+ ipu6_mmu_hw_cleanup(adev->mmu); -+ -+ return 0; -+} -+ -+/* The following PM callbacks are needed to enable runtime PM in IPU PCI -+ * device resume, otherwise, runtime PM can't work in PCI resume from -+ * S3 state. -+ */ -+static int psys_resume(struct device *dev) -+{ -+ return 0; -+} -+ -+static int psys_suspend(struct device *dev) -+{ -+ return 0; -+} -+ -+static const struct dev_pm_ops psys_pm_ops = { -+ .runtime_suspend = psys_runtime_pm_suspend, -+ .runtime_resume = psys_runtime_pm_resume, -+ .suspend = psys_suspend, -+ .resume = psys_resume, -+}; -+ -+static int ipu_psys_sched_cmd(void *ptr) -+{ -+ struct ipu_psys *psys = ptr; -+ size_t pending = 0; -+ -+ while (1) { -+ wait_event_interruptible(psys->sched_cmd_wq, -+ (kthread_should_stop() || -+ (pending = -+ atomic_read(&psys->wakeup_count)))); -+ -+ if (kthread_should_stop()) -+ break; -+ -+ if (pending == 0) -+ continue; -+ -+ mutex_lock(&psys->mutex); -+ atomic_set(&psys->wakeup_count, 0); -+ ipu_psys_run_next(psys); -+ mutex_unlock(&psys->mutex); -+ } -+ -+ return 0; -+} -+ -+static void start_sp(struct ipu6_bus_device *adev) -+{ -+ struct ipu_psys *psys = ipu6_bus_get_drvdata(adev); -+ void __iomem *spc_regs_base = psys->pdata->base + -+ psys->pdata->ipdata->hw_variant.spc_offset; -+ u32 val = 0; -+ -+ val |= IPU6_PSYS_SPC_STATUS_START | -+ IPU6_PSYS_SPC_STATUS_RUN | -+ IPU6_PSYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE; -+ val |= psys->icache_prefetch_sp ? -+ IPU6_PSYS_SPC_STATUS_ICACHE_PREFETCH : 0; -+ writel(val, spc_regs_base + IPU6_PSYS_REG_SPC_STATUS_CTRL); -+} -+ -+static int query_sp(struct ipu6_bus_device *adev) -+{ -+ struct ipu_psys *psys = ipu6_bus_get_drvdata(adev); -+ void __iomem *spc_regs_base = psys->pdata->base + -+ psys->pdata->ipdata->hw_variant.spc_offset; -+ u32 val = readl(spc_regs_base + IPU6_PSYS_REG_SPC_STATUS_CTRL); -+ -+ /* return true when READY == 1, START == 0 */ -+ val &= IPU6_PSYS_SPC_STATUS_READY | IPU6_PSYS_SPC_STATUS_START; -+ -+ return val == IPU6_PSYS_SPC_STATUS_READY; -+} -+ -+static int ipu_psys_fw_init(struct ipu_psys *psys) -+{ -+ struct ipu6_fw_syscom_queue_config *queue_cfg; -+ struct device *dev = &psys->adev->auxdev.dev; -+ unsigned int size; -+ struct ipu6_fw_syscom_queue_config fw_psys_event_queue_cfg[] = { -+ { -+ IPU_FW_PSYS_EVENT_QUEUE_SIZE, -+ sizeof(struct ipu_fw_psys_event) -+ } -+ }; -+ struct ipu_fw_psys_srv_init server_init = { -+ .ddr_pkg_dir_address = 0, -+ .host_ddr_pkg_dir = NULL, -+ .pkg_dir_size = 0, -+ .icache_prefetch_sp = psys->icache_prefetch_sp, -+ .icache_prefetch_isp = psys->icache_prefetch_isp, -+ }; -+ struct ipu6_fw_com_cfg fwcom = { -+ .num_output_queues = IPU_FW_PSYS_N_PSYS_EVENT_QUEUE_ID, -+ .output = fw_psys_event_queue_cfg, -+ .specific_addr = &server_init, -+ .specific_size = sizeof(server_init), -+ .cell_start = start_sp, -+ .cell_ready = query_sp, -+ .buttress_boot_offset = SYSCOM_BUTTRESS_FW_PARAMS_PSYS_OFFSET, -+ }; -+ int i; -+ -+ size = IPU6SE_FW_PSYS_N_PSYS_CMD_QUEUE_ID; -+ if (ipu_ver == IPU6_VER_6 || ipu_ver == IPU6_VER_6EP || -+ ipu_ver == IPU6_VER_6EP_MTL) -+ size = IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID; -+ -+ queue_cfg = devm_kzalloc(dev, sizeof(*queue_cfg) * size, -+ GFP_KERNEL); -+ if (!queue_cfg) -+ return -ENOMEM; -+ -+ for (i = 0; i < size; i++) { -+ queue_cfg[i].queue_size = IPU_FW_PSYS_CMD_QUEUE_SIZE; -+ queue_cfg[i].token_size = sizeof(struct ipu_fw_psys_cmd); -+ } -+ -+ fwcom.input = queue_cfg; -+ fwcom.num_input_queues = size; -+ fwcom.dmem_addr = psys->pdata->ipdata->hw_variant.dmem_offset; -+ -+ psys->fwcom = ipu6_fw_com_prepare(&fwcom, psys->adev, -+ psys->pdata->base); -+ if (!psys->fwcom) { -+ dev_err(dev, "psys fw com prepare failed\n"); -+ return -EIO; -+ } -+ -+ return 0; -+} -+ -+static void run_fw_init_work(struct work_struct *work) -+{ -+ struct fw_init_task *task = (struct fw_init_task *)work; -+ struct ipu_psys *psys = task->psys; -+ struct device *dev = &psys->adev->auxdev.dev; -+ int rval; -+ -+ rval = ipu_psys_fw_init(psys); -+ -+ if (rval) { -+ dev_err(dev, "FW init failed(%d)\n", rval); -+ ipu6_psys_remove(&psys->adev->auxdev); -+ } else { -+ dev_info(dev, "FW init done\n"); -+ } -+} -+ -+static int ipu6_psys_probe(struct auxiliary_device *auxdev, -+ const struct auxiliary_device_id *auxdev_id) -+{ -+ struct ipu6_bus_device *adev = auxdev_to_adev(auxdev); -+ struct device *dev = &auxdev->dev; -+ struct ipu_psys_pg *kpg, *kpg0; -+ struct ipu_psys *psys; -+ unsigned int minor; -+ int i, rval = -E2BIG; -+ -+ if (!adev->isp->bus_ready_to_probe) -+ return -EPROBE_DEFER; -+ -+ if (!adev->pkg_dir) -+ return -EPROBE_DEFER; -+ -+ ipu_ver = adev->isp->hw_ver; -+ -+ rval = ipu6_mmu_hw_init(adev->mmu); -+ if (rval) -+ return rval; -+ -+ mutex_lock(&ipu_psys_mutex); -+ -+ minor = find_next_zero_bit(ipu_psys_devices, IPU_PSYS_NUM_DEVICES, 0); -+ if (minor == IPU_PSYS_NUM_DEVICES) { -+ dev_err(dev, "too many devices\n"); -+ goto out_unlock; -+ } -+ -+ psys = devm_kzalloc(dev, sizeof(*psys), GFP_KERNEL); -+ if (!psys) { -+ rval = -ENOMEM; -+ goto out_unlock; -+ } -+ -+ adev->auxdrv_data = -+ (const struct ipu6_auxdrv_data *)auxdev_id->driver_data; -+ adev->auxdrv = to_auxiliary_drv(dev->driver); -+ -+ psys->adev = adev; -+ psys->pdata = adev->pdata; -+ psys->icache_prefetch_sp = 0; -+ -+ psys->power_gating = 0; -+ -+ spin_lock_init(&psys->ready_lock); -+ spin_lock_init(&psys->pgs_lock); -+ psys->ready = 0; -+ psys->timeout = IPU_PSYS_CMD_TIMEOUT_MS; -+ -+ mutex_init(&psys->mutex); -+ INIT_LIST_HEAD(&psys->fhs); -+ INIT_LIST_HEAD(&psys->pgs); -+ INIT_LIST_HEAD(&psys->started_kcmds_list); -+ -+ init_waitqueue_head(&psys->sched_cmd_wq); -+ atomic_set(&psys->wakeup_count, 0); -+ /* -+ * Create a thread to schedule commands sent to IPU firmware. -+ * The thread reduces the coupling between the command scheduler -+ * and queueing commands from the user to driver. -+ */ -+ psys->sched_cmd_thread = kthread_run(ipu_psys_sched_cmd, psys, -+ "psys_sched_cmd"); -+ -+ if (IS_ERR(psys->sched_cmd_thread)) { -+ psys->sched_cmd_thread = NULL; -+ mutex_destroy(&psys->mutex); -+ goto out_unlock; -+ } -+ -+ dev_set_drvdata(dev, psys); -+ -+ rval = ipu_psys_res_pool_init(&psys->res_pool_running); -+ if (rval < 0) { -+ dev_err(&psys->dev, -+ "unable to alloc process group resources\n"); -+ goto out_mutex_destroy; -+ } -+ -+ ipu6_psys_hw_res_variant_init(); -+ -+ /* allocate and map memory for process groups */ -+ for (i = 0; i < IPU_PSYS_PG_POOL_SIZE; i++) { -+ kpg = kzalloc(sizeof(*kpg), GFP_KERNEL); -+ if (!kpg) -+ goto out_free_pgs; -+ kpg->pg = ipu6_dma_alloc(adev, IPU_PSYS_PG_MAX_SIZE, -+ &kpg->pg_dma_addr, -+ GFP_KERNEL, 0); -+ if (!kpg->pg) { -+ kfree(kpg); -+ goto out_free_pgs; -+ } -+ kpg->size = IPU_PSYS_PG_MAX_SIZE; -+ list_add(&kpg->list, &psys->pgs); -+ } -+ -+ psys->caps.pg_count = ipu6_cpd_pkg_dir_get_num_entries(adev->pkg_dir); -+ -+ dev_info(dev, "pkg_dir entry count:%d\n", psys->caps.pg_count); -+ if (async_fw_init) { -+ INIT_DELAYED_WORK((struct delayed_work *)&fw_init_task, -+ run_fw_init_work); -+ fw_init_task.psys = psys; -+ schedule_delayed_work((struct delayed_work *)&fw_init_task, 0); -+ } else { -+ rval = ipu_psys_fw_init(psys); -+ } -+ -+ if (rval) { -+ dev_err(dev, "FW init failed(%d)\n", rval); -+ goto out_free_pgs; -+ } -+ -+ psys->dev.bus = &ipu_psys_bus; -+ psys->dev.parent = dev; -+ psys->dev.devt = MKDEV(MAJOR(ipu_psys_dev_t), minor); -+ psys->dev.release = ipu_psys_dev_release; -+ dev_set_name(&psys->dev, "ipu-psys%d", minor); -+ device_initialize(&psys->dev); -+ -+ cdev_init(&psys->cdev, &ipu_psys_fops); -+ psys->cdev.owner = ipu_psys_fops.owner; -+ -+ rval = cdev_device_add(&psys->cdev, &psys->dev); -+ if (rval < 0) { -+ dev_err(dev, "psys device_register failed\n"); -+ goto out_release_fw_com; -+ } -+ -+ set_bit(minor, ipu_psys_devices); -+ -+ /* Add the hw stepping information to caps */ -+ strscpy(psys->caps.dev_model, IPU6_MEDIA_DEV_MODEL_NAME, -+ sizeof(psys->caps.dev_model)); -+ -+ mutex_unlock(&ipu_psys_mutex); -+ -+ dev_info(dev, "psys probe minor: %d\n", minor); -+ -+ ipu6_mmu_hw_cleanup(adev->mmu); -+ -+ return 0; -+ -+out_release_fw_com: -+ ipu6_fw_com_release(psys->fwcom, 1); -+out_free_pgs: -+ list_for_each_entry_safe(kpg, kpg0, &psys->pgs, list) { -+ ipu6_dma_free(adev, kpg->size, kpg->pg, kpg->pg_dma_addr, 0); -+ kfree(kpg); -+ } -+ -+ ipu_psys_res_pool_cleanup(&psys->res_pool_running); -+out_mutex_destroy: -+ mutex_destroy(&psys->mutex); -+ if (psys->sched_cmd_thread) { -+ kthread_stop(psys->sched_cmd_thread); -+ psys->sched_cmd_thread = NULL; -+ } -+out_unlock: -+ /* Safe to call even if the init is not called */ -+ mutex_unlock(&ipu_psys_mutex); -+ ipu6_mmu_hw_cleanup(adev->mmu); -+ return rval; -+} -+ -+static void ipu6_psys_remove(struct auxiliary_device *auxdev) -+{ -+ struct ipu6_bus_device *adev = auxdev_to_adev(auxdev); -+ struct device *dev = &auxdev->dev; -+ struct ipu_psys *psys = dev_get_drvdata(&auxdev->dev); -+ struct ipu_psys_pg *kpg, *kpg0; -+ -+ if (psys->sched_cmd_thread) { -+ kthread_stop(psys->sched_cmd_thread); -+ psys->sched_cmd_thread = NULL; -+ } -+ -+ mutex_lock(&ipu_psys_mutex); -+ -+ list_for_each_entry_safe(kpg, kpg0, &psys->pgs, list) { -+ ipu6_dma_free(adev, kpg->size, kpg->pg, kpg->pg_dma_addr, 0); -+ kfree(kpg); -+ } -+ -+ if (psys->fwcom && ipu6_fw_com_release(psys->fwcom, 1)) -+ dev_err(dev, "fw com release failed.\n"); -+ -+ kfree(psys->server_init); -+ kfree(psys->syscom_config); -+ -+ ipu_psys_res_pool_cleanup(&psys->res_pool_running); -+ -+ cdev_device_del(&psys->cdev, &psys->dev); -+ -+ clear_bit(MINOR(psys->cdev.dev), ipu_psys_devices); -+ -+ mutex_unlock(&ipu_psys_mutex); -+ -+ mutex_destroy(&psys->mutex); -+ -+ dev_info(dev, "removed\n"); -+} -+ -+static irqreturn_t psys_isr_threaded(struct ipu6_bus_device *adev) -+{ -+ struct ipu_psys *psys = ipu6_bus_get_drvdata(adev); -+ struct device *dev = &psys->adev->auxdev.dev; -+ void __iomem *base = psys->pdata->base; -+ u32 status; -+ int r; -+ -+ mutex_lock(&psys->mutex); -+ r = pm_runtime_get_if_in_use(dev); -+ if (!r || WARN_ON_ONCE(r < 0)) { -+ mutex_unlock(&psys->mutex); -+ return IRQ_NONE; -+ } -+ -+ status = readl(base + IPU6_REG_PSYS_GPDEV_IRQ_STATUS); -+ writel(status, base + IPU6_REG_PSYS_GPDEV_IRQ_CLEAR); -+ -+ if (status & IPU6_PSYS_GPDEV_IRQ_FWIRQ(IPU6_PSYS_GPDEV_FWIRQ0)) { -+ writel(0, base + IPU6_REG_PSYS_GPDEV_FWIRQ(0)); -+ ipu_psys_handle_events(psys); -+ } -+ -+ pm_runtime_put(dev); -+ mutex_unlock(&psys->mutex); -+ -+ return status ? IRQ_HANDLED : IRQ_NONE; -+} -+ -+static const struct ipu6_auxdrv_data ipu6_psys_auxdrv_data = { -+ .isr_threaded = psys_isr_threaded, -+ .wake_isr_thread = true, -+}; -+ -+static const struct auxiliary_device_id ipu6_psys_id_table[] = { -+ { -+ .name = "intel_ipu6.psys", -+ .driver_data = (kernel_ulong_t)&ipu6_psys_auxdrv_data, -+ }, -+ { } -+}; -+MODULE_DEVICE_TABLE(auxiliary, ipu6_psys_id_table); -+ -+static struct auxiliary_driver ipu6_psys_aux_driver = { -+ .name = IPU6_PSYS_NAME, -+ .probe = ipu6_psys_probe, -+ .remove = ipu6_psys_remove, -+ .id_table = ipu6_psys_id_table, -+ .driver = { -+ .pm = &psys_pm_ops, -+ }, -+}; -+ -+static int __init ipu_psys_init(void) -+{ -+ int rval = alloc_chrdev_region(&ipu_psys_dev_t, 0, -+ IPU_PSYS_NUM_DEVICES, ipu_psys_bus.name); -+ if (rval) { -+ pr_err("can't alloc psys chrdev region (%d)\n", rval); -+ return rval; -+ } -+ -+ rval = bus_register(&ipu_psys_bus); -+ if (rval) { -+ pr_err("can't register psys bus (%d)\n", rval); -+ unregister_chrdev_region(ipu_psys_dev_t, IPU_PSYS_NUM_DEVICES); -+ return rval; -+ } -+ -+ auxiliary_driver_register(&ipu6_psys_aux_driver); -+ return 0; -+} -+module_init(ipu_psys_init); -+ -+static void __exit ipu_psys_exit(void) -+{ -+ auxiliary_driver_unregister(&ipu6_psys_aux_driver); -+ bus_unregister(&ipu_psys_bus); -+ unregister_chrdev_region(ipu_psys_dev_t, IPU_PSYS_NUM_DEVICES); -+} -+module_exit(ipu_psys_exit); -+ -+MODULE_AUTHOR("Bingbu Cao "); -+MODULE_LICENSE("GPL"); -+MODULE_DESCRIPTION("Intel IPU6 processing system driver"); -+MODULE_IMPORT_NS("DMA_BUF"); -+MODULE_IMPORT_NS("INTEL_IPU6"); -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.h b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.h -new file mode 100644 -index 0000000..8af6551 ---- /dev/null -+++ b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.h -@@ -0,0 +1,324 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* Copyright (C) 2013 - 2024 Intel Corporation */ -+ -+#ifndef IPU_PSYS_H -+#define IPU_PSYS_H -+ -+#include -+#include -+ -+#include -+#include "ipu6.h" -+#include "ipu6-bus.h" -+#include "ipu6-dma.h" -+#include "ipu-fw-psys.h" -+#include "ipu-platform-psys.h" -+ -+/* PSYS Info bits*/ -+#define IPU_REG_PSYS_INFO_SEG_CMEM_MASTER(a) (0x2c + ((a) * 12)) -+#define IPU_REG_PSYS_INFO_SEG_XMEM_MASTER(a) (0x5c + ((a) * 12)) -+ -+#define IPU_PSYS_REG_SPC_STATUS_CTRL 0x0 -+#define IPU_PSYS_REG_SPC_START_PC 0x4 -+#define IPU_PSYS_REG_SPC_ICACHE_BASE 0x10 -+#define IPU_REG_PSYS_INFO_SEG_0_CONFIG_ICACHE_MASTER 0x14 -+ -+#define IPU_PSYS_SPC_STATUS_START BIT(1) -+#define IPU_PSYS_SPC_STATUS_RUN BIT(3) -+#define IPU_PSYS_SPC_STATUS_READY BIT(5) -+#define IPU_PSYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE BIT(12) -+#define IPU_PSYS_SPC_STATUS_ICACHE_PREFETCH BIT(13) -+ -+#define IPU_PSYS_REG_SPP0_STATUS_CTRL 0x20000 -+ -+#define IPU_INFO_ENABLE_SNOOP BIT(0) -+#define IPU_INFO_DEC_FORCE_FLUSH BIT(1) -+#define IPU_INFO_DEC_PASS_THRU BIT(2) -+#define IPU_INFO_ZLW BIT(3) -+#define IPU_INFO_STREAM_ID_SET(id) (((id) & 0x1f) << 4) -+#define IPU_INFO_REQUEST_DESTINATION_IOSF BIT(9) -+#define IPU_INFO_IMR_BASE BIT(10) -+#define IPU_INFO_IMR_DESTINED BIT(11) -+ -+#define IPU_INFO_REQUEST_DESTINATION_PRIMARY IPU_INFO_REQUEST_DESTINATION_IOSF -+ -+#define IPU_REG_DMA_TOP_AB_GROUP1_BASE_ADDR 0x1ae000 -+#define IPU_REG_DMA_TOP_AB_GROUP2_BASE_ADDR 0x1af000 -+#define IPU_REG_DMA_TOP_AB_RING_MIN_OFFSET(n) (0x4 + (n) * 0xc) -+#define IPU_REG_DMA_TOP_AB_RING_MAX_OFFSET(n) (0x8 + (n) * 0xc) -+#define IPU_REG_DMA_TOP_AB_RING_ACCESS_OFFSET(n) (0xc + (n) * 0xc) -+ -+enum ipu_device_ab_group1_target_id { -+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R0_SPC_DMEM, -+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R1_SPC_DMEM, -+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R2_SPC_DMEM, -+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R3_SPC_STATUS_REG, -+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R4_SPC_MASTER_BASE_ADDR, -+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R5_SPC_PC_STALL, -+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R6_SPC_EQ, -+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R7_SPC_RESERVED, -+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R8_SPC_RESERVED, -+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R9_SPP0, -+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R10_SPP1, -+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R11_CENTRAL_R1, -+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R12_IRQ, -+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R13_CENTRAL_R2, -+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R14_DMA, -+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R15_DMA, -+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R16_GP, -+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R17_ZLW_INSERTER, -+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R18_AB, -+}; -+ -+/* IRQ-related registers in PSYS */ -+#define IPU_REG_PSYS_GPDEV_IRQ_EDGE 0x1aa200 -+#define IPU_REG_PSYS_GPDEV_IRQ_MASK 0x1aa204 -+#define IPU_REG_PSYS_GPDEV_IRQ_STATUS 0x1aa208 -+#define IPU_REG_PSYS_GPDEV_IRQ_CLEAR 0x1aa20c -+#define IPU_REG_PSYS_GPDEV_IRQ_ENABLE 0x1aa210 -+#define IPU_REG_PSYS_GPDEV_IRQ_LEVEL_NOT_PULSE 0x1aa214 -+/* There are 8 FW interrupts, n = 0..7 */ -+#define IPU_PSYS_GPDEV_FWIRQ0 5 -+#define IPU_PSYS_GPDEV_FWIRQ1 6 -+#define IPU_PSYS_GPDEV_FWIRQ2 7 -+#define IPU_PSYS_GPDEV_FWIRQ3 8 -+#define IPU_PSYS_GPDEV_FWIRQ4 9 -+#define IPU_PSYS_GPDEV_FWIRQ5 10 -+#define IPU_PSYS_GPDEV_FWIRQ6 11 -+#define IPU_PSYS_GPDEV_FWIRQ7 12 -+#define IPU_PSYS_GPDEV_IRQ_FWIRQ(n) (1 << (n)) -+#define IPU_REG_PSYS_GPDEV_FWIRQ(n) (4 * (n) + 0x1aa100) -+ -+/* -+ * psys subdomains power request regs -+ */ -+enum ipu_device_buttress_psys_domain_pos { -+ IPU_PSYS_SUBDOMAIN_ISA = 0, -+ IPU_PSYS_SUBDOMAIN_PSA = 1, -+ IPU_PSYS_SUBDOMAIN_BB = 2, -+ IPU_PSYS_SUBDOMAIN_IDSP1 = 3, /* only in IPU6M */ -+ IPU_PSYS_SUBDOMAIN_IDSP2 = 4, /* only in IPU6M */ -+}; -+ -+#define IPU_PSYS_SUBDOMAINS_POWER_MASK (BIT(IPU_PSYS_SUBDOMAIN_ISA) | \ -+ BIT(IPU_PSYS_SUBDOMAIN_PSA) | \ -+ BIT(IPU_PSYS_SUBDOMAIN_BB)) -+ -+#define IPU_PSYS_SUBDOMAINS_POWER_REQ 0xa0 -+#define IPU_PSYS_SUBDOMAINS_POWER_STATUS 0xa4 -+ -+#define IPU_PSYS_CMD_TIMEOUT_MS 2000 -+#define IPU_PSYS_OPEN_TIMEOUT_US 50 -+#define IPU_PSYS_OPEN_RETRY (10000 / IPU_PSYS_OPEN_TIMEOUT_US) -+ -+#define IPU_PSYS_PG_POOL_SIZE 16 -+#define IPU_PSYS_PG_MAX_SIZE 8192 -+#define IPU_MAX_PSYS_CMD_BUFFERS 32 -+#define IPU_PSYS_EVENT_CMD_COMPLETE IPU_FW_PSYS_EVENT_TYPE_SUCCESS -+#define IPU_PSYS_EVENT_FRAGMENT_COMPLETE IPU_FW_PSYS_EVENT_TYPE_SUCCESS -+#define IPU_PSYS_CLOSE_TIMEOUT_US 50 -+#define IPU_PSYS_CLOSE_TIMEOUT (100000 / IPU_PSYS_CLOSE_TIMEOUT_US) -+#define IPU_MAX_RESOURCES 128 -+ -+extern enum ipu6_version ipu_ver; -+ -+/* Opaque structure. Do not access fields. */ -+struct ipu_resource { -+ u32 id; -+ int elements; /* Number of elements available to allocation */ -+ unsigned long *bitmap; /* Allocation bitmap, a bit for each element */ -+}; -+ -+enum ipu_resource_type { -+ IPU_RESOURCE_DEV_CHN = 0, -+ IPU_RESOURCE_EXT_MEM, -+ IPU_RESOURCE_DFM -+}; -+ -+/* Allocation of resource(s) */ -+/* Opaque structure. Do not access fields. */ -+struct ipu_resource_alloc { -+ enum ipu_resource_type type; -+ struct ipu_resource *resource; -+ int elements; -+ int pos; -+}; -+ -+/* -+ * This struct represents all of the currently allocated -+ * resources from IPU model. It is used also for allocating -+ * resources for the next set of PGs to be run on IPU -+ * (ie. those PGs which are not yet being run and which don't -+ * yet reserve real IPU resources). -+ * Use larger array to cover existing resource quantity -+ */ -+ -+/* resource size may need expand for new resource model */ -+struct ipu_psys_resource_pool { -+ u32 cells; /* Bitmask of cells allocated */ -+ struct ipu_resource dev_channels[16]; -+ struct ipu_resource ext_memory[32]; -+ struct ipu_resource dfms[16]; -+ DECLARE_BITMAP(cmd_queues, 32); -+ /* Protects cmd_queues bitmap */ -+ spinlock_t queues_lock; -+}; -+ -+/* -+ * This struct keeps book of the resources allocated for a specific PG. -+ * It is used for freeing up resources from struct ipu_psys_resources -+ * when the PG is released from IPU (or model of IPU). -+ */ -+struct ipu_psys_resource_alloc { -+ u32 cells; /* Bitmask of cells needed */ -+ struct ipu_resource_alloc -+ resource_alloc[IPU_MAX_RESOURCES]; -+ int resources; -+}; -+ -+struct task_struct; -+struct ipu_psys { -+ struct ipu_psys_capability caps; -+ struct cdev cdev; -+ struct device dev; -+ -+ struct mutex mutex; /* Psys various */ -+ int ready; /* psys fw status */ -+ bool icache_prefetch_sp; -+ bool icache_prefetch_isp; -+ spinlock_t ready_lock; /* protect psys firmware state */ -+ spinlock_t pgs_lock; /* Protect pgs list access */ -+ struct list_head fhs; -+ struct list_head pgs; -+ struct list_head started_kcmds_list; -+ struct ipu6_psys_pdata *pdata; -+ struct ipu6_bus_device *adev; -+ struct ia_css_syscom_context *dev_ctx; -+ struct ia_css_syscom_config *syscom_config; -+ struct ia_css_psys_server_init *server_init; -+ struct task_struct *sched_cmd_thread; -+ wait_queue_head_t sched_cmd_wq; -+ atomic_t wakeup_count; /* Psys schedule thread wakeup count */ -+ -+ /* Resources needed to be managed for process groups */ -+ struct ipu_psys_resource_pool res_pool_running; -+ -+ const struct firmware *fw; -+ struct sg_table fw_sgt; -+ u64 *pkg_dir; -+ dma_addr_t pkg_dir_dma_addr; -+ unsigned int pkg_dir_size; -+ unsigned long timeout; -+ -+ int active_kcmds, started_kcmds; -+ void *fwcom; -+ -+ int power_gating; -+}; -+ -+struct ipu_psys_fh { -+ struct ipu_psys *psys; -+ struct mutex mutex;/* Protects bufs_list & kcmds fields */ -+ struct list_head list; -+ /* Holds all buffers that this fh owns */ -+ struct list_head bufs_list; -+ /* Holds all descriptors (fd:kbuffer associations) */ -+ struct list_head descs_list; -+ struct list_head bufs_lru; -+ wait_queue_head_t wait; -+ struct ipu_psys_scheduler sched; -+ -+ u32 num_bufs; -+ u32 num_descs; -+ u32 num_bufs_lru; -+}; -+ -+struct ipu_psys_pg { -+ struct ipu_fw_psys_process_group *pg; -+ size_t size; -+ size_t pg_size; -+ dma_addr_t pg_dma_addr; -+ struct list_head list; -+ struct ipu_psys_resource_alloc resource_alloc; -+}; -+ -+struct ipu6_psys_constraint { -+ struct list_head list; -+ unsigned int min_freq; -+}; -+ -+struct ipu_psys_kcmd { -+ struct ipu_psys_fh *fh; -+ struct list_head list; -+ struct ipu_psys_buffer_set *kbuf_set; -+ enum ipu_psys_cmd_state state; -+ void *pg_manifest; -+ size_t pg_manifest_size; -+ struct ipu_psys_kbuffer **kbufs; -+ struct ipu_psys_buffer *buffers; -+ size_t nbuffers; -+ struct ipu_fw_psys_process_group *pg_user; -+ struct ipu_psys_pg *kpg; -+ u64 user_token; -+ u64 issue_id; -+ u32 priority; -+ u32 kernel_enable_bitmap[4]; -+ u32 terminal_enable_bitmap[4]; -+ u32 routing_enable_bitmap[4]; -+ u32 rbm[5]; -+ struct ipu6_psys_constraint constraint; -+ struct ipu_psys_event ev; -+ struct timer_list watchdog; -+}; -+ -+struct ipu_dma_buf_attach { -+ struct device *dev; -+ u64 len; -+ unsigned long userptr; -+ struct sg_table *sgt; -+ struct page **pages; -+ size_t npages; -+}; -+ -+struct ipu_psys_kbuffer { -+ u64 len; -+ unsigned long userptr; -+ void *kaddr; -+ struct list_head list; -+ dma_addr_t dma_addr; -+ struct sg_table *sgt; -+ struct dma_buf_attachment *db_attach; -+ struct dma_buf *dbuf; -+ u32 flags; -+ atomic_t map_count; /* The number of times this buffer is mapped */ -+ bool valid; /* True when buffer is usable */ -+}; -+ -+struct ipu_psys_desc { -+ struct ipu_psys_kbuffer *kbuf; -+ struct list_head list; -+ u32 fd; -+}; -+ -+#define inode_to_ipu_psys(inode) \ -+ container_of((inode)->i_cdev, struct ipu_psys, cdev) -+ -+void ipu_psys_setup_hw(struct ipu_psys *psys); -+void ipu_psys_subdomains_power(struct ipu_psys *psys, bool on); -+void ipu_psys_handle_events(struct ipu_psys *psys); -+int ipu_psys_kcmd_new(struct ipu_psys_command *cmd, struct ipu_psys_fh *fh); -+void ipu_psys_run_next(struct ipu_psys *psys); -+struct ipu_psys_pg *__get_pg_buf(struct ipu_psys *psys, size_t pg_size); -+struct ipu_psys_kbuffer * -+ipu_psys_lookup_kbuffer(struct ipu_psys_fh *fh, int fd); -+struct ipu_psys_kbuffer * -+ipu_psys_mapbuf_locked(int fd, struct ipu_psys_fh *fh); -+struct ipu_psys_kbuffer * -+ipu_psys_lookup_kbuffer_by_kaddr(struct ipu_psys_fh *fh, void *kaddr); -+int ipu_psys_res_pool_init(struct ipu_psys_resource_pool *pool); -+void ipu_psys_res_pool_cleanup(struct ipu_psys_resource_pool *pool); -+struct ipu_psys_kcmd *ipu_get_completed_kcmd(struct ipu_psys_fh *fh); -+long ipu_ioctl_dqevent(struct ipu_psys_event *event, -+ struct ipu_psys_fh *fh, unsigned int f_flags); -+ -+#endif /* IPU_PSYS_H */ -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-resources.c b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-resources.c -new file mode 100644 -index 0000000..4068e34 ---- /dev/null -+++ b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-resources.c -@@ -0,0 +1,861 @@ -+// SPDX-License-Identifier: GPL-2.0 -+// Copyright (C) 2015 - 2024 Intel Corporation -+ -+#include -+#include -+#include -+#include -+#include -+ -+#include -+ -+#include "ipu-fw-psys.h" -+#include "ipu-psys.h" -+ -+struct ipu6_psys_hw_res_variant hw_var; -+void ipu6_psys_hw_res_variant_init(void) -+{ -+ if (ipu_ver == IPU6_VER_6SE) { -+ hw_var.queue_num = IPU6SE_FW_PSYS_N_PSYS_CMD_QUEUE_ID; -+ hw_var.cell_num = IPU6SE_FW_PSYS_N_CELL_ID; -+ } else if (ipu_ver == IPU6_VER_6) { -+ hw_var.queue_num = IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID; -+ hw_var.cell_num = IPU6_FW_PSYS_N_CELL_ID; -+ } else if (ipu_ver == IPU6_VER_6EP || ipu_ver == IPU6_VER_6EP_MTL) { -+ hw_var.queue_num = IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID; -+ hw_var.cell_num = IPU6EP_FW_PSYS_N_CELL_ID; -+ } else { -+ WARN(1, "ipu6 psys res var is not initialised correctly."); -+ } -+ -+ hw_var.set_proc_dev_chn = ipu6_fw_psys_set_proc_dev_chn; -+ hw_var.set_proc_dfm_bitmap = ipu6_fw_psys_set_proc_dfm_bitmap; -+ hw_var.set_proc_ext_mem = ipu6_fw_psys_set_process_ext_mem; -+ hw_var.get_pgm_by_proc = ipu6_fw_psys_get_pgm_by_process; -+} -+ -+static const struct ipu_fw_resource_definitions *get_res(void) -+{ -+ if (ipu_ver == IPU6_VER_6SE) -+ return ipu6se_res_defs; -+ -+ if (ipu_ver == IPU6_VER_6EP || ipu_ver == IPU6_VER_6EP_MTL) -+ return ipu6ep_res_defs; -+ -+ return ipu6_res_defs; -+} -+ -+static int ipu_resource_init(struct ipu_resource *res, u32 id, int elements) -+{ -+ if (elements <= 0) { -+ res->bitmap = NULL; -+ return 0; -+ } -+ -+ res->bitmap = bitmap_zalloc(elements, GFP_KERNEL); -+ if (!res->bitmap) -+ return -ENOMEM; -+ res->elements = elements; -+ res->id = id; -+ return 0; -+} -+ -+static unsigned long -+ipu_resource_alloc_with_pos(struct ipu_resource *res, int n, -+ int pos, -+ struct ipu_resource_alloc *alloc, -+ enum ipu_resource_type type) -+{ -+ unsigned long p; -+ -+ if (n <= 0) { -+ alloc->elements = 0; -+ return 0; -+ } -+ -+ if (!res->bitmap || pos >= res->elements) -+ return (unsigned long)(-ENOSPC); -+ -+ p = bitmap_find_next_zero_area(res->bitmap, res->elements, pos, n, 0); -+ alloc->resource = NULL; -+ -+ if (p != pos) -+ return (unsigned long)(-ENOSPC); -+ bitmap_set(res->bitmap, p, n); -+ alloc->resource = res; -+ alloc->elements = n; -+ alloc->pos = p; -+ alloc->type = type; -+ -+ return pos; -+} -+ -+static unsigned long -+ipu_resource_alloc(struct ipu_resource *res, int n, -+ struct ipu_resource_alloc *alloc, -+ enum ipu_resource_type type) -+{ -+ unsigned long p; -+ -+ if (n <= 0) { -+ alloc->elements = 0; -+ return 0; -+ } -+ -+ if (!res->bitmap) -+ return (unsigned long)(-ENOSPC); -+ -+ p = bitmap_find_next_zero_area(res->bitmap, res->elements, 0, n, 0); -+ alloc->resource = NULL; -+ -+ if (p >= res->elements) -+ return (unsigned long)(-ENOSPC); -+ bitmap_set(res->bitmap, p, n); -+ alloc->resource = res; -+ alloc->elements = n; -+ alloc->pos = p; -+ alloc->type = type; -+ -+ return p; -+} -+ -+static void ipu_resource_free(struct ipu_resource_alloc *alloc) -+{ -+ if (alloc->elements <= 0) -+ return; -+ -+ if (alloc->type == IPU_RESOURCE_DFM) -+ *alloc->resource->bitmap &= ~(unsigned long)(alloc->elements); -+ else -+ bitmap_clear(alloc->resource->bitmap, alloc->pos, -+ alloc->elements); -+ alloc->resource = NULL; -+} -+ -+static void ipu_resource_cleanup(struct ipu_resource *res) -+{ -+ bitmap_free(res->bitmap); -+ res->bitmap = NULL; -+} -+ -+/********** IPU PSYS-specific resource handling **********/ -+int ipu_psys_res_pool_init(struct ipu_psys_resource_pool *pool) -+{ -+ int i, j, k, ret; -+ const struct ipu_fw_resource_definitions *res_defs; -+ -+ res_defs = get_res(); -+ -+ spin_lock_init(&pool->queues_lock); -+ pool->cells = 0; -+ -+ for (i = 0; i < res_defs->num_dev_channels; i++) { -+ ret = ipu_resource_init(&pool->dev_channels[i], i, -+ res_defs->dev_channels[i]); -+ if (ret) -+ goto error; -+ } -+ -+ for (j = 0; j < res_defs->num_ext_mem_ids; j++) { -+ ret = ipu_resource_init(&pool->ext_memory[j], j, -+ res_defs->ext_mem_ids[j]); -+ if (ret) -+ goto memory_error; -+ } -+ -+ for (k = 0; k < res_defs->num_dfm_ids; k++) { -+ ret = ipu_resource_init(&pool->dfms[k], k, res_defs->dfms[k]); -+ if (ret) -+ goto dfm_error; -+ } -+ -+ spin_lock(&pool->queues_lock); -+ if (ipu_ver == IPU6_VER_6SE) -+ bitmap_zero(pool->cmd_queues, -+ IPU6SE_FW_PSYS_N_PSYS_CMD_QUEUE_ID); -+ else -+ bitmap_zero(pool->cmd_queues, -+ IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID); -+ spin_unlock(&pool->queues_lock); -+ -+ return 0; -+ -+dfm_error: -+ for (k--; k >= 0; k--) -+ ipu_resource_cleanup(&pool->dfms[k]); -+ -+memory_error: -+ for (j--; j >= 0; j--) -+ ipu_resource_cleanup(&pool->ext_memory[j]); -+ -+error: -+ for (i--; i >= 0; i--) -+ ipu_resource_cleanup(&pool->dev_channels[i]); -+ return ret; -+} -+ -+void ipu_psys_resource_copy(struct ipu_psys_resource_pool *src, -+ struct ipu_psys_resource_pool *dest) -+{ -+ int i; -+ const struct ipu_fw_resource_definitions *res_defs; -+ -+ res_defs = get_res(); -+ -+ dest->cells = src->cells; -+ for (i = 0; i < res_defs->num_dev_channels; i++) -+ *dest->dev_channels[i].bitmap = *src->dev_channels[i].bitmap; -+ -+ for (i = 0; i < res_defs->num_ext_mem_ids; i++) -+ *dest->ext_memory[i].bitmap = *src->ext_memory[i].bitmap; -+ -+ for (i = 0; i < res_defs->num_dfm_ids; i++) -+ *dest->dfms[i].bitmap = *src->dfms[i].bitmap; -+} -+ -+void ipu_psys_res_pool_cleanup(struct ipu_psys_resource_pool *pool) -+{ -+ u32 i; -+ const struct ipu_fw_resource_definitions *res_defs; -+ -+ res_defs = get_res(); -+ for (i = 0; i < res_defs->num_dev_channels; i++) -+ ipu_resource_cleanup(&pool->dev_channels[i]); -+ -+ for (i = 0; i < res_defs->num_ext_mem_ids; i++) -+ ipu_resource_cleanup(&pool->ext_memory[i]); -+ -+ for (i = 0; i < res_defs->num_dfm_ids; i++) -+ ipu_resource_cleanup(&pool->dfms[i]); -+} -+ -+static int __alloc_one_resrc(const struct device *dev, -+ struct ipu_fw_psys_process *process, -+ struct ipu_resource *resource, -+ struct ipu_fw_generic_program_manifest *pm, -+ u32 resource_id, -+ struct ipu_psys_resource_alloc *alloc) -+{ -+ const u16 resource_req = pm->dev_chn_size[resource_id]; -+ const u16 resource_offset_req = pm->dev_chn_offset[resource_id]; -+ unsigned long retl; -+ -+ if (!resource_req) -+ return -ENXIO; -+ -+ if (alloc->resources >= IPU_MAX_RESOURCES) { -+ dev_err(dev, "out of resource handles\n"); -+ return -ENOSPC; -+ } -+ if (resource_offset_req != (u16)(-1)) -+ retl = ipu_resource_alloc_with_pos -+ (resource, -+ resource_req, -+ resource_offset_req, -+ &alloc->resource_alloc[alloc->resources], -+ IPU_RESOURCE_DEV_CHN); -+ else -+ retl = ipu_resource_alloc -+ (resource, resource_req, -+ &alloc->resource_alloc[alloc->resources], -+ IPU_RESOURCE_DEV_CHN); -+ if (IS_ERR_VALUE(retl)) { -+ dev_dbg(dev, "out of device channel resources\n"); -+ return (int)retl; -+ } -+ alloc->resources++; -+ -+ return 0; -+} -+ -+static int ipu_psys_allocate_one_dfm(const struct device *dev, -+ struct ipu_fw_psys_process *process, -+ struct ipu_resource *resource, -+ struct ipu_fw_generic_program_manifest *pm, -+ u32 resource_id, -+ struct ipu_psys_resource_alloc *alloc) -+{ -+ u32 dfm_bitmap_req = pm->dfm_port_bitmap[resource_id]; -+ u32 active_dfm_bitmap_req = pm->dfm_active_port_bitmap[resource_id]; -+ const u8 is_relocatable = pm->is_dfm_relocatable[resource_id]; -+ struct ipu_resource_alloc *alloc_resource; -+ unsigned long p = 0; -+ -+ if (!dfm_bitmap_req) -+ return -ENXIO; -+ -+ if (alloc->resources >= IPU_MAX_RESOURCES) { -+ dev_err(dev, "out of resource handles\n"); -+ return -ENOSPC; -+ } -+ -+ if (!resource->bitmap) -+ return -ENOSPC; -+ -+ if (!is_relocatable) { -+ if (*resource->bitmap & dfm_bitmap_req) { -+ dev_warn(dev, -+ "out of dfm resources, req 0x%x, get 0x%lx\n", -+ dfm_bitmap_req, *resource->bitmap); -+ return -ENOSPC; -+ } -+ *resource->bitmap |= dfm_bitmap_req; -+ } else { -+ unsigned int n = hweight32(dfm_bitmap_req); -+ -+ p = bitmap_find_next_zero_area(resource->bitmap, -+ resource->elements, 0, n, 0); -+ -+ if (p >= resource->elements) -+ return -ENOSPC; -+ -+ bitmap_set(resource->bitmap, p, n); -+ dfm_bitmap_req = dfm_bitmap_req << p; -+ active_dfm_bitmap_req = active_dfm_bitmap_req << p; -+ } -+ -+ alloc_resource = &alloc->resource_alloc[alloc->resources]; -+ alloc_resource->resource = resource; -+ /* Using elements to indicate the bitmap */ -+ alloc_resource->elements = dfm_bitmap_req; -+ alloc_resource->pos = p; -+ alloc_resource->type = IPU_RESOURCE_DFM; -+ -+ alloc->resources++; -+ -+ return 0; -+} -+ -+/* -+ * ext_mem_type_id is a generic type id for memory (like DMEM, VMEM) -+ * ext_mem_bank_id is detailed type id for memory (like DMEM0, DMEM1 etc.) -+ */ -+static int __alloc_mem_resrc(const struct device *dev, -+ struct ipu_fw_psys_process *process, -+ struct ipu_resource *resource, -+ struct ipu_fw_generic_program_manifest *pm, -+ u32 ext_mem_type_id, u32 ext_mem_bank_id, -+ struct ipu_psys_resource_alloc *alloc) -+{ -+ const u16 memory_resource_req = pm->ext_mem_size[ext_mem_type_id]; -+ const u16 memory_offset_req = pm->ext_mem_offset[ext_mem_type_id]; -+ -+ unsigned long retl; -+ -+ if (!memory_resource_req) -+ return -ENXIO; -+ -+ if (alloc->resources >= IPU_MAX_RESOURCES) { -+ dev_err(dev, "out of resource handles\n"); -+ return -ENOSPC; -+ } -+ if (memory_offset_req != (u16)(-1)) -+ retl = ipu_resource_alloc_with_pos -+ (resource, -+ memory_resource_req, memory_offset_req, -+ &alloc->resource_alloc[alloc->resources], -+ IPU_RESOURCE_EXT_MEM); -+ else -+ retl = ipu_resource_alloc -+ (resource, memory_resource_req, -+ &alloc->resource_alloc[alloc->resources], -+ IPU_RESOURCE_EXT_MEM); -+ if (IS_ERR_VALUE(retl)) { -+ dev_dbg(dev, "out of memory resources\n"); -+ return (int)retl; -+ } -+ -+ alloc->resources++; -+ -+ return 0; -+} -+ -+int ipu_psys_allocate_cmd_queue_res(struct ipu_psys_resource_pool *pool) -+{ -+ unsigned long p; -+ int size, start; -+ -+ size = IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID; -+ start = IPU6_FW_PSYS_CMD_QUEUE_PPG0_COMMAND_ID; -+ -+ if (ipu_ver == IPU6_VER_6SE) { -+ size = IPU6SE_FW_PSYS_N_PSYS_CMD_QUEUE_ID; -+ start = IPU6SE_FW_PSYS_CMD_QUEUE_PPG0_COMMAND_ID; -+ } -+ -+ spin_lock(&pool->queues_lock); -+ /* find available cmd queue from ppg0_cmd_id */ -+ p = bitmap_find_next_zero_area(pool->cmd_queues, size, start, 1, 0); -+ -+ if (p >= size) { -+ spin_unlock(&pool->queues_lock); -+ return -ENOSPC; -+ } -+ -+ bitmap_set(pool->cmd_queues, p, 1); -+ spin_unlock(&pool->queues_lock); -+ -+ return p; -+} -+ -+void ipu_psys_free_cmd_queue_res(struct ipu_psys_resource_pool *pool, -+ u8 queue_id) -+{ -+ spin_lock(&pool->queues_lock); -+ bitmap_clear(pool->cmd_queues, queue_id, 1); -+ spin_unlock(&pool->queues_lock); -+} -+ -+int ipu_psys_try_allocate_resources(struct device *dev, -+ struct ipu_fw_psys_process_group *pg, -+ void *pg_manifest, -+ struct ipu_psys_resource_pool *pool) -+{ -+ u32 id, idx; -+ u32 mem_type_id; -+ int ret, i; -+ u16 *process_offset_table; -+ u8 processes; -+ u32 cells = 0; -+ struct ipu_psys_resource_alloc *alloc; -+ const struct ipu_fw_resource_definitions *res_defs; -+ -+ if (!pg) -+ return -EINVAL; -+ process_offset_table = (u16 *)((u8 *)pg + pg->processes_offset); -+ processes = pg->process_count; -+ -+ alloc = kzalloc(sizeof(*alloc), GFP_KERNEL); -+ if (!alloc) -+ return -ENOMEM; -+ -+ res_defs = get_res(); -+ for (i = 0; i < processes; i++) { -+ u32 cell; -+ struct ipu_fw_psys_process *process = -+ (struct ipu_fw_psys_process *) -+ ((char *)pg + process_offset_table[i]); -+ struct ipu_fw_generic_program_manifest pm; -+ -+ memset(&pm, 0, sizeof(pm)); -+ -+ if (!process) { -+ dev_err(dev, "can not get process\n"); -+ ret = -ENOENT; -+ goto free_out; -+ } -+ -+ ret = ipu_fw_psys_get_pgm_by_process(&pm, pg_manifest, process); -+ if (ret < 0) { -+ dev_err(dev, "can not get manifest\n"); -+ goto free_out; -+ } -+ -+ if (pm.cell_id == res_defs->num_cells && -+ pm.cell_type_id == res_defs->num_cells_type) { -+ cell = res_defs->num_cells; -+ } else if ((pm.cell_id != res_defs->num_cells && -+ pm.cell_type_id == res_defs->num_cells_type)) { -+ cell = pm.cell_id; -+ } else { -+ /* Find a free cell of desired type */ -+ u32 type = pm.cell_type_id; -+ -+ for (cell = 0; cell < res_defs->num_cells; cell++) -+ if (res_defs->cells[cell] == type && -+ ((pool->cells | cells) & (1 << cell)) == 0) -+ break; -+ if (cell >= res_defs->num_cells) { -+ dev_dbg(dev, "no free cells of right type\n"); -+ ret = -ENOSPC; -+ goto free_out; -+ } -+ } -+ if (cell < res_defs->num_cells) -+ cells |= 1 << cell; -+ if (pool->cells & cells) { -+ dev_dbg(dev, "out of cell resources\n"); -+ ret = -ENOSPC; -+ goto free_out; -+ } -+ -+ if (pm.dev_chn_size) { -+ for (id = 0; id < res_defs->num_dev_channels; id++) { -+ ret = __alloc_one_resrc(dev, process, -+ &pool->dev_channels[id], -+ &pm, id, alloc); -+ if (ret == -ENXIO) -+ continue; -+ -+ if (ret) -+ goto free_out; -+ } -+ } -+ -+ if (pm.dfm_port_bitmap) { -+ for (id = 0; id < res_defs->num_dfm_ids; id++) { -+ ret = ipu_psys_allocate_one_dfm -+ (dev, process, -+ &pool->dfms[id], &pm, id, alloc); -+ if (ret == -ENXIO) -+ continue; -+ -+ if (ret) -+ goto free_out; -+ } -+ } -+ -+ if (pm.ext_mem_size) { -+ for (mem_type_id = 0; -+ mem_type_id < res_defs->num_ext_mem_types; -+ mem_type_id++) { -+ u32 bank = res_defs->num_ext_mem_ids; -+ -+ if (cell != res_defs->num_cells) { -+ idx = res_defs->cell_mem_row * cell + -+ mem_type_id; -+ bank = res_defs->cell_mem[idx]; -+ } -+ -+ if (bank == res_defs->num_ext_mem_ids) -+ continue; -+ -+ ret = __alloc_mem_resrc(dev, process, -+ &pool->ext_memory[bank], -+ &pm, mem_type_id, bank, -+ alloc); -+ if (ret == -ENXIO) -+ continue; -+ -+ if (ret) -+ goto free_out; -+ } -+ } -+ } -+ -+ pool->cells |= cells; -+ -+ kfree(alloc); -+ return 0; -+ -+free_out: -+ dev_dbg(dev, "failed to try_allocate resource\n"); -+ kfree(alloc); -+ return ret; -+} -+ -+/* -+ * Allocate resources for pg from `pool'. Mark the allocated -+ * resources into `alloc'. Returns 0 on success, -ENOSPC -+ * if there are no enough resources, in which cases resources -+ * are not allocated at all, or some other error on other conditions. -+ */ -+int ipu_psys_allocate_resources(const struct device *dev, -+ struct ipu_fw_psys_process_group *pg, -+ void *pg_manifest, -+ struct ipu_psys_resource_alloc *alloc, -+ struct ipu_psys_resource_pool *pool) -+{ -+ u32 id; -+ u32 mem_type_id; -+ int ret, i; -+ u16 *process_offset_table; -+ u8 processes; -+ u32 cells = 0; -+ int p, idx; -+ u32 bmp, a_bmp; -+ const struct ipu_fw_resource_definitions *res_defs; -+ -+ if (!pg) -+ return -EINVAL; -+ -+ res_defs = get_res(); -+ process_offset_table = (u16 *)((u8 *)pg + pg->processes_offset); -+ processes = pg->process_count; -+ -+ for (i = 0; i < processes; i++) { -+ u32 cell; -+ struct ipu_fw_psys_process *process = -+ (struct ipu_fw_psys_process *) -+ ((char *)pg + process_offset_table[i]); -+ struct ipu_fw_generic_program_manifest pm; -+ -+ memset(&pm, 0, sizeof(pm)); -+ if (!process) { -+ dev_err(dev, "can not get process\n"); -+ ret = -ENOENT; -+ goto free_out; -+ } -+ -+ ret = ipu_fw_psys_get_pgm_by_process(&pm, pg_manifest, process); -+ if (ret < 0) { -+ dev_err(dev, "can not get manifest\n"); -+ goto free_out; -+ } -+ -+ if (pm.cell_id == res_defs->num_cells && -+ pm.cell_type_id == res_defs->num_cells_type) { -+ cell = res_defs->num_cells; -+ } else if ((pm.cell_id != res_defs->num_cells && -+ pm.cell_type_id == res_defs->num_cells_type)) { -+ cell = pm.cell_id; -+ } else { -+ /* Find a free cell of desired type */ -+ u32 type = pm.cell_type_id; -+ -+ for (cell = 0; cell < res_defs->num_cells; cell++) -+ if (res_defs->cells[cell] == type && -+ ((pool->cells | cells) & (1 << cell)) == 0) -+ break; -+ if (cell >= res_defs->num_cells) { -+ dev_dbg(dev, "no free cells of right type\n"); -+ ret = -ENOSPC; -+ goto free_out; -+ } -+ ret = ipu_fw_psys_set_process_cell_id(process, 0, cell); -+ if (ret) -+ goto free_out; -+ } -+ if (cell < res_defs->num_cells) -+ cells |= 1 << cell; -+ if (pool->cells & cells) { -+ dev_dbg(dev, "out of cell resources\n"); -+ ret = -ENOSPC; -+ goto free_out; -+ } -+ -+ if (pm.dev_chn_size) { -+ for (id = 0; id < res_defs->num_dev_channels; id++) { -+ ret = __alloc_one_resrc(dev, process, -+ &pool->dev_channels[id], -+ &pm, id, alloc); -+ if (ret == -ENXIO) -+ continue; -+ -+ if (ret) -+ goto free_out; -+ -+ idx = alloc->resources - 1; -+ p = alloc->resource_alloc[idx].pos; -+ ret = ipu_fw_psys_set_proc_dev_chn(process, id, -+ p); -+ if (ret) -+ goto free_out; -+ } -+ } -+ -+ if (pm.dfm_port_bitmap) { -+ for (id = 0; id < res_defs->num_dfm_ids; id++) { -+ ret = ipu_psys_allocate_one_dfm(dev, process, -+ &pool->dfms[id], -+ &pm, id, alloc); -+ if (ret == -ENXIO) -+ continue; -+ -+ if (ret) -+ goto free_out; -+ -+ idx = alloc->resources - 1; -+ p = alloc->resource_alloc[idx].pos; -+ bmp = pm.dfm_port_bitmap[id]; -+ bmp = bmp << p; -+ a_bmp = pm.dfm_active_port_bitmap[id]; -+ a_bmp = a_bmp << p; -+ ret = ipu_fw_psys_set_proc_dfm_bitmap(process, -+ id, bmp, -+ a_bmp); -+ if (ret) -+ goto free_out; -+ } -+ } -+ -+ if (pm.ext_mem_size) { -+ for (mem_type_id = 0; -+ mem_type_id < res_defs->num_ext_mem_types; -+ mem_type_id++) { -+ u32 bank = res_defs->num_ext_mem_ids; -+ -+ if (cell != res_defs->num_cells) { -+ idx = res_defs->cell_mem_row * cell + -+ mem_type_id; -+ bank = res_defs->cell_mem[idx]; -+ } -+ if (bank == res_defs->num_ext_mem_ids) -+ continue; -+ -+ ret = __alloc_mem_resrc(dev, process, -+ &pool->ext_memory[bank], -+ &pm, mem_type_id, -+ bank, alloc); -+ if (ret == -ENXIO) -+ continue; -+ -+ if (ret) -+ goto free_out; -+ -+ /* no return value check here because fw api -+ * will do some checks, and would return -+ * non-zero except mem_type_id == 0. -+ * This maybe caused by that above flow of -+ * allocating mem_bank_id is improper. -+ */ -+ idx = alloc->resources - 1; -+ p = alloc->resource_alloc[idx].pos; -+ ipu_fw_psys_set_process_ext_mem(process, -+ mem_type_id, -+ bank, p); -+ } -+ } -+ } -+ alloc->cells |= cells; -+ pool->cells |= cells; -+ return 0; -+ -+free_out: -+ dev_err(dev, "failed to allocate resources, ret %d\n", ret); -+ ipu_psys_reset_process_cell(dev, pg, pg_manifest, i + 1); -+ ipu_psys_free_resources(alloc, pool); -+ return ret; -+} -+ -+int ipu_psys_move_resources(const struct device *dev, -+ struct ipu_psys_resource_alloc *alloc, -+ struct ipu_psys_resource_pool -+ *source_pool, struct ipu_psys_resource_pool -+ *target_pool) -+{ -+ int i; -+ -+ if (target_pool->cells & alloc->cells) { -+ dev_dbg(dev, "out of cell resources\n"); -+ return -ENOSPC; -+ } -+ -+ for (i = 0; i < alloc->resources; i++) { -+ unsigned long bitmap = 0; -+ unsigned int id = alloc->resource_alloc[i].resource->id; -+ unsigned long fbit, end; -+ -+ switch (alloc->resource_alloc[i].type) { -+ case IPU_RESOURCE_DEV_CHN: -+ bitmap_set(&bitmap, alloc->resource_alloc[i].pos, -+ alloc->resource_alloc[i].elements); -+ if (*target_pool->dev_channels[id].bitmap & bitmap) -+ return -ENOSPC; -+ break; -+ case IPU_RESOURCE_EXT_MEM: -+ end = alloc->resource_alloc[i].elements + -+ alloc->resource_alloc[i].pos; -+ -+ fbit = find_next_bit(target_pool->ext_memory[id].bitmap, -+ end, alloc->resource_alloc[i].pos); -+ /* if find_next_bit returns "end" it didn't find 1bit */ -+ if (end != fbit) -+ return -ENOSPC; -+ break; -+ case IPU_RESOURCE_DFM: -+ bitmap = alloc->resource_alloc[i].elements; -+ if (*target_pool->dfms[id].bitmap & bitmap) -+ return -ENOSPC; -+ break; -+ default: -+ dev_err(dev, "Illegal resource type\n"); -+ return -EINVAL; -+ } -+ } -+ -+ for (i = 0; i < alloc->resources; i++) { -+ u32 id = alloc->resource_alloc[i].resource->id; -+ -+ switch (alloc->resource_alloc[i].type) { -+ case IPU_RESOURCE_DEV_CHN: -+ bitmap_set(target_pool->dev_channels[id].bitmap, -+ alloc->resource_alloc[i].pos, -+ alloc->resource_alloc[i].elements); -+ ipu_resource_free(&alloc->resource_alloc[i]); -+ alloc->resource_alloc[i].resource = -+ &target_pool->dev_channels[id]; -+ break; -+ case IPU_RESOURCE_EXT_MEM: -+ bitmap_set(target_pool->ext_memory[id].bitmap, -+ alloc->resource_alloc[i].pos, -+ alloc->resource_alloc[i].elements); -+ ipu_resource_free(&alloc->resource_alloc[i]); -+ alloc->resource_alloc[i].resource = -+ &target_pool->ext_memory[id]; -+ break; -+ case IPU_RESOURCE_DFM: -+ *target_pool->dfms[id].bitmap |= -+ alloc->resource_alloc[i].elements; -+ *alloc->resource_alloc[i].resource->bitmap &= -+ ~(alloc->resource_alloc[i].elements); -+ alloc->resource_alloc[i].resource = -+ &target_pool->dfms[id]; -+ break; -+ default: -+ /* -+ * Just keep compiler happy. This case failed already -+ * in above loop. -+ */ -+ break; -+ } -+ } -+ -+ target_pool->cells |= alloc->cells; -+ source_pool->cells &= ~alloc->cells; -+ -+ return 0; -+} -+ -+void ipu_psys_reset_process_cell(const struct device *dev, -+ struct ipu_fw_psys_process_group *pg, -+ void *pg_manifest, -+ int process_count) -+{ -+ int i; -+ u16 *process_offset_table; -+ const struct ipu_fw_resource_definitions *res_defs; -+ -+ if (!pg) -+ return; -+ -+ res_defs = get_res(); -+ process_offset_table = (u16 *)((u8 *)pg + pg->processes_offset); -+ for (i = 0; i < process_count; i++) { -+ struct ipu_fw_psys_process *process = -+ (struct ipu_fw_psys_process *) -+ ((char *)pg + process_offset_table[i]); -+ struct ipu_fw_generic_program_manifest pm; -+ int ret; -+ -+ if (!process) -+ break; -+ -+ ret = ipu_fw_psys_get_pgm_by_process(&pm, pg_manifest, process); -+ if (ret < 0) { -+ dev_err(dev, "can not get manifest\n"); -+ break; -+ } -+ if ((pm.cell_id != res_defs->num_cells && -+ pm.cell_type_id == res_defs->num_cells_type)) -+ continue; -+ /* no return value check here because if finding free cell -+ * failed, process cell would not set then calling clear_cell -+ * will return non-zero. -+ */ -+ ipu_fw_psys_clear_process_cell(process); -+ } -+} -+ -+/* Free resources marked in `alloc' from `resources' */ -+void ipu_psys_free_resources(struct ipu_psys_resource_alloc -+ *alloc, struct ipu_psys_resource_pool *pool) -+{ -+ unsigned int i; -+ -+ pool->cells &= ~alloc->cells; -+ alloc->cells = 0; -+ for (i = 0; i < alloc->resources; i++) -+ ipu_resource_free(&alloc->resource_alloc[i]); -+ alloc->resources = 0; -+} -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-fw-resources.c b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-fw-resources.c -new file mode 100644 -index 0000000..fe65a0a ---- /dev/null -+++ b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-fw-resources.c -@@ -0,0 +1,607 @@ -+// SPDX-License-Identifier: GPL-2.0 -+// Copyright (C) 2015 - 2024 Intel Corporation -+ -+#include -+#include -+ -+#include "ipu-psys.h" -+#include "ipu-fw-psys.h" -+#include "ipu6-platform-resources.h" -+ -+/* resources table */ -+ -+/* -+ * Cell types by cell IDs -+ */ -+static const u8 ipu6_fw_psys_cell_types[IPU6_FW_PSYS_N_CELL_ID] = { -+ IPU6_FW_PSYS_SP_CTRL_TYPE_ID, -+ IPU6_FW_PSYS_VP_TYPE_ID, -+ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_OSA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_OSA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_OSA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, /* X2B_MD */ -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, /* ICA_MEDIUM */ -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, /* X2B_SVE_RGBIR */ -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, /* PAF */ -+ IPU6_FW_PSYS_GDC_TYPE_ID, -+ IPU6_FW_PSYS_TNR_TYPE_ID, -+}; -+ -+static const u16 ipu6_fw_num_dev_channels[IPU6_FW_PSYS_N_DEV_CHN_ID] = { -+ IPU6_FW_PSYS_DEV_CHN_DMA_EXT0_MAX_SIZE, -+ IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_READ_MAX_SIZE, -+ IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_MAX_SIZE, -+ IPU6_FW_PSYS_DEV_CHN_DMA_INTERNAL_MAX_SIZE, -+ IPU6_FW_PSYS_DEV_CHN_DMA_ISA_MAX_SIZE, -+}; -+ -+static const u16 ipu6_fw_psys_mem_size[IPU6_FW_PSYS_N_MEM_ID] = { -+ IPU6_FW_PSYS_VMEM0_MAX_SIZE, -+ IPU6_FW_PSYS_TRANSFER_VMEM0_MAX_SIZE, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_MAX_SIZE, -+ IPU6_FW_PSYS_LB_VMEM_MAX_SIZE, -+ IPU6_FW_PSYS_BAMEM0_MAX_SIZE, -+ IPU6_FW_PSYS_DMEM0_MAX_SIZE, -+ IPU6_FW_PSYS_DMEM1_MAX_SIZE, -+ IPU6_FW_PSYS_DMEM2_MAX_SIZE, -+ IPU6_FW_PSYS_DMEM3_MAX_SIZE, -+ IPU6_FW_PSYS_PMEM0_MAX_SIZE -+}; -+ -+static const u16 ipu6_fw_psys_dfms[IPU6_FW_PSYS_N_DEV_DFM_ID] = { -+ IPU6_FW_PSYS_DEV_DFM_BB_FULL_PORT_ID_MAX_SIZE, -+ IPU6_FW_PSYS_DEV_DFM_BB_EMPTY_PORT_ID_MAX_SIZE, -+ IPU6_FW_PSYS_DEV_DFM_ISL_FULL_PORT_ID_MAX_SIZE, -+ IPU6_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID_MAX_SIZE, -+ IPU6_FW_PSYS_DEV_DFM_LB_FULL_PORT_ID_MAX_SIZE, -+ IPU6_FW_PSYS_DEV_DFM_LB_EMPTY_PORT_ID_MAX_SIZE, -+}; -+ -+static const u8 -+ipu6_fw_psys_c_mem[IPU6_FW_PSYS_N_CELL_ID][IPU6_FW_PSYS_N_MEM_TYPE_ID] = { -+ { -+ /* IPU6_FW_PSYS_SP0_ID */ -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_DMEM0_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_SP1_ID */ -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_DMEM1_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_VP0_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_DMEM3_ID, -+ IPU6_FW_PSYS_VMEM0_ID, -+ IPU6_FW_PSYS_BAMEM0_ID, -+ IPU6_FW_PSYS_PMEM0_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ACC1_ID BNLM */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ACC2_ID DM */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ACC3_ID ACM */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ACC4_ID GTC YUV1 */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ACC5_ID OFS pin main */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ACC6_ID OFS pin display */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ACC7_ID OFS pin pp */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ACC8_ID GAMMASTAR */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ACC9_ID GLTM */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ACC10_ID XNR */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_ICA_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_LSC_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_DPC_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_SIS_A_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_SIS_B_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_B2B_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_B2R_ID and ISA_R2I_SIE */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_R2I_DS_A_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_R2I_DS_B_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_AWB_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_AE_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_AF_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_DOL_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_X2B_MD_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_ICA_MEDIUM_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_X2B_SVE_RGBIR_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_PAF_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_BB_ACC_GDC0_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_BB_ACC_TNR_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ } -+}; -+ -+static const struct ipu_fw_resource_definitions ipu6_defs = { -+ .cells = ipu6_fw_psys_cell_types, -+ .num_cells = IPU6_FW_PSYS_N_CELL_ID, -+ .num_cells_type = IPU6_FW_PSYS_N_CELL_TYPE_ID, -+ -+ .dev_channels = ipu6_fw_num_dev_channels, -+ .num_dev_channels = IPU6_FW_PSYS_N_DEV_CHN_ID, -+ -+ .num_ext_mem_types = IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID, -+ .num_ext_mem_ids = IPU6_FW_PSYS_N_MEM_ID, -+ .ext_mem_ids = ipu6_fw_psys_mem_size, -+ -+ .num_dfm_ids = IPU6_FW_PSYS_N_DEV_DFM_ID, -+ -+ .dfms = ipu6_fw_psys_dfms, -+ -+ .cell_mem_row = IPU6_FW_PSYS_N_MEM_TYPE_ID, -+ .cell_mem = &ipu6_fw_psys_c_mem[0][0], -+}; -+ -+const struct ipu_fw_resource_definitions *ipu6_res_defs = &ipu6_defs; -+ -+/********** Generic resource handling **********/ -+ -+int ipu6_fw_psys_set_proc_dev_chn(struct ipu_fw_psys_process *ptr, u16 offset, -+ u16 value) -+{ -+ struct ipu6_fw_psys_process_ext *pm_ext; -+ u8 ps_ext_offset; -+ -+ ps_ext_offset = ptr->process_extension_offset; -+ if (!ps_ext_offset) -+ return -EINVAL; -+ -+ pm_ext = (struct ipu6_fw_psys_process_ext *)((u8 *)ptr + ps_ext_offset); -+ -+ pm_ext->dev_chn_offset[offset] = value; -+ -+ return 0; -+} -+ -+int ipu6_fw_psys_set_proc_dfm_bitmap(struct ipu_fw_psys_process *ptr, -+ u16 id, u32 bitmap, -+ u32 active_bitmap) -+{ -+ struct ipu6_fw_psys_process_ext *pm_ext; -+ u8 ps_ext_offset; -+ -+ ps_ext_offset = ptr->process_extension_offset; -+ if (!ps_ext_offset) -+ return -EINVAL; -+ -+ pm_ext = (struct ipu6_fw_psys_process_ext *)((u8 *)ptr + ps_ext_offset); -+ -+ pm_ext->dfm_port_bitmap[id] = bitmap; -+ pm_ext->dfm_active_port_bitmap[id] = active_bitmap; -+ -+ return 0; -+} -+ -+int ipu6_fw_psys_set_process_ext_mem(struct ipu_fw_psys_process *ptr, -+ u16 type_id, u16 mem_id, u16 offset) -+{ -+ struct ipu6_fw_psys_process_ext *pm_ext; -+ u8 ps_ext_offset; -+ -+ ps_ext_offset = ptr->process_extension_offset; -+ if (!ps_ext_offset) -+ return -EINVAL; -+ -+ pm_ext = (struct ipu6_fw_psys_process_ext *)((u8 *)ptr + ps_ext_offset); -+ -+ pm_ext->ext_mem_offset[type_id] = offset; -+ pm_ext->ext_mem_id[type_id] = mem_id; -+ -+ return 0; -+} -+ -+static struct ipu_fw_psys_program_manifest * -+get_program_manifest(const struct ipu_fw_psys_pgm *manifest, -+ const unsigned int program_index) -+{ -+ struct ipu_fw_psys_program_manifest *prg_manifest_base; -+ u8 *program_manifest = NULL; -+ u8 program_count; -+ unsigned int i; -+ -+ program_count = manifest->program_count; -+ -+ prg_manifest_base = (struct ipu_fw_psys_program_manifest *) -+ ((char *)manifest + manifest->program_manifest_offset); -+ if (program_index < program_count) { -+ program_manifest = (u8 *)prg_manifest_base; -+ for (i = 0; i < program_index; i++) -+ program_manifest += -+ ((struct ipu_fw_psys_program_manifest *) -+ program_manifest)->size; -+ } -+ -+ return (struct ipu_fw_psys_program_manifest *)program_manifest; -+} -+ -+int -+ipu6_fw_psys_get_pgm_by_process(struct ipu_fw_generic_program_manifest *gen_pm, -+ const struct ipu_fw_psys_pgm *pg_manifest, -+ struct ipu_fw_psys_process *process) -+{ -+ u32 program_id = process->program_idx; -+ struct ipu_fw_psys_program_manifest *pm; -+ struct ipu6_fw_psys_program_manifest_ext *pm_ext; -+ -+ pm = get_program_manifest(pg_manifest, program_id); -+ -+ if (!pm) -+ return -ENOENT; -+ -+ if (pm->program_extension_offset) { -+ pm_ext = (struct ipu6_fw_psys_program_manifest_ext *) -+ ((u8 *)pm + pm->program_extension_offset); -+ -+ gen_pm->dev_chn_size = pm_ext->dev_chn_size; -+ gen_pm->dev_chn_offset = pm_ext->dev_chn_offset; -+ gen_pm->ext_mem_size = pm_ext->ext_mem_size; -+ gen_pm->ext_mem_offset = (u16 *)pm_ext->ext_mem_offset; -+ gen_pm->is_dfm_relocatable = pm_ext->is_dfm_relocatable; -+ gen_pm->dfm_port_bitmap = pm_ext->dfm_port_bitmap; -+ gen_pm->dfm_active_port_bitmap = -+ pm_ext->dfm_active_port_bitmap; -+ } -+ -+ memcpy(gen_pm->cells, pm->cells, sizeof(pm->cells)); -+ gen_pm->cell_id = pm->cells[0]; -+ gen_pm->cell_type_id = pm->cell_type_id; -+ -+ return 0; -+} -+ -+#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) || \ -+ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) -+void ipu6_fw_psys_pg_dump(struct ipu_psys *psys, -+ struct ipu_psys_kcmd *kcmd, const char *note) -+{ -+ struct ipu_fw_psys_process_group *pg = kcmd->kpg->pg; -+ struct device *dev = &psys->adev->auxdev.dev; -+ u32 pgid = pg->ID; -+ u8 processes = pg->process_count; -+ u16 *process_offset_table = (u16 *)((char *)pg + pg->processes_offset); -+ unsigned int p, chn, mem, mem_id; -+ unsigned int mem_type, max_mem_id, dev_chn; -+ -+ if (ipu_ver == IPU6_VER_6SE) { -+ mem_type = IPU6SE_FW_PSYS_N_DATA_MEM_TYPE_ID; -+ max_mem_id = IPU6SE_FW_PSYS_N_MEM_ID; -+ dev_chn = IPU6SE_FW_PSYS_N_DEV_CHN_ID; -+ } else if (ipu_ver == IPU6_VER_6 || ipu_ver == IPU6_VER_6EP || -+ ipu_ver == IPU6_VER_6EP_MTL) { -+ mem_type = IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID; -+ max_mem_id = IPU6_FW_PSYS_N_MEM_ID; -+ dev_chn = IPU6_FW_PSYS_N_DEV_CHN_ID; -+ } else { -+ WARN(1, "%s ipu_ver:[%u] is unsupported!\n", __func__, ipu_ver); -+ return; -+ } -+ -+ dev_dbg(dev, "%s %s pgid %i has %i processes:\n", -+ __func__, note, pgid, processes); -+ -+ for (p = 0; p < processes; p++) { -+ struct ipu_fw_psys_process *process = -+ (struct ipu_fw_psys_process *) -+ ((char *)pg + process_offset_table[p]); -+ struct ipu6_fw_psys_process_ext *pm_ext = -+ (struct ipu6_fw_psys_process_ext *)((u8 *)process -+ + process->process_extension_offset); -+ dev_dbg(dev, "\t process %i size=%u", p, process->size); -+ if (!process->process_extension_offset) -+ continue; -+ -+ for (mem = 0; mem < mem_type; mem++) { -+ mem_id = pm_ext->ext_mem_id[mem]; -+ if (mem_id != max_mem_id) -+ dev_dbg(dev, "\t mem type %u id %d offset=0x%x", -+ mem, mem_id, -+ pm_ext->ext_mem_offset[mem]); -+ } -+ for (chn = 0; chn < dev_chn; chn++) { -+ if (pm_ext->dev_chn_offset[chn] != (u16)(-1)) -+ dev_dbg(dev, "\t dev_chn[%u]=0x%x\n", -+ chn, pm_ext->dev_chn_offset[chn]); -+ } -+ } -+} -+#else -+void ipu6_fw_psys_pg_dump(struct ipu_psys *psys, -+ struct ipu_psys_kcmd *kcmd, const char *note) -+{ -+ if (ipu_ver == IPU6_VER_6SE || ipu_ver == IPU6_VER_6 || -+ ipu_ver == IPU6_VER_6EP || ipu_ver == IPU6_VER_6EP_MTL) -+ return; -+ -+ WARN(1, "%s ipu_ver:[%u] is unsupported!\n", __func__, ipu_ver); -+} -+#endif -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-l-scheduler.c b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-l-scheduler.c -new file mode 100644 -index 0000000..5d95843 ---- /dev/null -+++ b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-l-scheduler.c -@@ -0,0 +1,621 @@ -+// SPDX-License-Identifier: GPL-2.0 -+// Copyright (C) 2020 - 2024 Intel Corporation -+ -+#include "ipu-psys.h" -+#include "ipu6-ppg.h" -+ -+extern bool enable_power_gating; -+ -+struct sched_list { -+ struct list_head list; -+ /* to protect the list */ -+ struct mutex lock; -+}; -+ -+static struct sched_list start_list = { -+ .list = LIST_HEAD_INIT(start_list.list), -+ .lock = __MUTEX_INITIALIZER(start_list.lock), -+}; -+ -+static struct sched_list stop_list = { -+ .list = LIST_HEAD_INIT(stop_list.list), -+ .lock = __MUTEX_INITIALIZER(stop_list.lock), -+}; -+ -+static struct sched_list *get_sc_list(enum SCHED_LIST type) -+{ -+ /* for debug purposes */ -+ WARN_ON(type != SCHED_START_LIST && type != SCHED_STOP_LIST); -+ -+ if (type == SCHED_START_LIST) -+ return &start_list; -+ return &stop_list; -+} -+ -+static bool is_kppg_in_list(struct ipu_psys_ppg *kppg, struct list_head *head) -+{ -+ struct ipu_psys_ppg *tmp; -+ -+ list_for_each_entry(tmp, head, sched_list) { -+ if (kppg == tmp) -+ return true; -+ } -+ -+ return false; -+} -+ -+void ipu_psys_scheduler_remove_kppg(struct ipu_psys_ppg *kppg, -+ enum SCHED_LIST type) -+{ -+ struct sched_list *sc_list = get_sc_list(type); -+ struct ipu_psys_ppg *tmp0, *tmp1; -+ struct ipu_psys *psys = kppg->fh->psys; -+ struct device *dev = &psys->adev->auxdev.dev; -+ -+ mutex_lock(&sc_list->lock); -+ list_for_each_entry_safe(tmp0, tmp1, &sc_list->list, sched_list) { -+ if (tmp0 == kppg) { -+ dev_dbg(dev, "remove from %s list, kppg(%d 0x%p) state %d\n", -+ type == SCHED_START_LIST ? "start" : "stop", -+ kppg->kpg->pg->ID, kppg, kppg->state); -+ list_del_init(&kppg->sched_list); -+ } -+ } -+ mutex_unlock(&sc_list->lock); -+} -+ -+void ipu_psys_scheduler_add_kppg(struct ipu_psys_ppg *kppg, -+ enum SCHED_LIST type) -+{ -+ int cur_pri = kppg->pri_base + kppg->pri_dynamic; -+ struct sched_list *sc_list = get_sc_list(type); -+ struct ipu_psys *psys = kppg->fh->psys; -+ struct device *dev = &psys->adev->auxdev.dev; -+ struct ipu_psys_ppg *tmp0, *tmp1; -+ -+ dev_dbg(dev, -+ "add to %s list, kppg(%d 0x%p) state %d prio(%d %d) fh 0x%p\n", -+ type == SCHED_START_LIST ? "start" : "stop", -+ kppg->kpg->pg->ID, kppg, kppg->state, -+ kppg->pri_base, kppg->pri_dynamic, kppg->fh); -+ -+ mutex_lock(&sc_list->lock); -+ if (list_empty(&sc_list->list)) { -+ list_add(&kppg->sched_list, &sc_list->list); -+ goto out; -+ } -+ -+ if (is_kppg_in_list(kppg, &sc_list->list)) { -+ dev_dbg(dev, "kppg already in list\n"); -+ goto out; -+ } -+ -+ list_for_each_entry_safe(tmp0, tmp1, &sc_list->list, sched_list) { -+ int tmp_pri = tmp0->pri_base + tmp0->pri_dynamic; -+ -+ dev_dbg(dev, -+ "found kppg(%d 0x%p), state %d pri(%d %d) fh 0x%p\n", -+ tmp0->kpg->pg->ID, tmp0, tmp0->state, -+ tmp0->pri_base, tmp0->pri_dynamic, tmp0->fh); -+ -+ if (type == SCHED_START_LIST && tmp_pri > cur_pri) { -+ list_add(&kppg->sched_list, tmp0->sched_list.prev); -+ goto out; -+ } else if (type == SCHED_STOP_LIST && tmp_pri < cur_pri) { -+ list_add(&kppg->sched_list, tmp0->sched_list.prev); -+ goto out; -+ } -+ } -+ -+ list_add_tail(&kppg->sched_list, &sc_list->list); -+out: -+ mutex_unlock(&sc_list->lock); -+} -+ -+static int ipu_psys_detect_resource_contention(struct ipu_psys_ppg *kppg) -+{ -+ struct ipu_psys_resource_pool *try_res_pool; -+ struct ipu_psys *psys = kppg->fh->psys; -+ struct device *dev = &psys->adev->auxdev.dev; -+ int ret = 0; -+ int state; -+ -+ try_res_pool = kzalloc(sizeof(*try_res_pool), GFP_KERNEL); -+ if (IS_ERR_OR_NULL(try_res_pool)) -+ return -ENOMEM; -+ -+ mutex_lock(&kppg->mutex); -+ state = kppg->state; -+ mutex_unlock(&kppg->mutex); -+ if (state == PPG_STATE_STARTED || state == PPG_STATE_RUNNING || -+ state == PPG_STATE_RESUMED) -+ goto exit; -+ -+ ret = ipu_psys_res_pool_init(try_res_pool); -+ if (ret < 0) { -+ dev_err(dev, "unable to alloc pg resources\n"); -+ WARN_ON(1); -+ goto exit; -+ } -+ -+ ipu_psys_resource_copy(&psys->res_pool_running, try_res_pool); -+ ret = ipu_psys_try_allocate_resources(dev, kppg->kpg->pg, -+ kppg->manifest, -+ try_res_pool); -+ -+ ipu_psys_res_pool_cleanup(try_res_pool); -+exit: -+ kfree(try_res_pool); -+ -+ return ret; -+} -+ -+static void ipu_psys_scheduler_ppg_sort(struct ipu_psys *psys, bool *stopping) -+{ -+ struct ipu_psys_ppg *kppg, *tmp; -+ struct ipu_psys_scheduler *sched; -+ struct ipu_psys_fh *fh; -+ -+ list_for_each_entry(fh, &psys->fhs, list) { -+ mutex_lock(&fh->mutex); -+ sched = &fh->sched; -+ -+ if (list_empty(&sched->ppgs)) { -+ mutex_unlock(&fh->mutex); -+ continue; -+ } -+ -+ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { -+ mutex_lock(&kppg->mutex); -+ if (kppg->state == PPG_STATE_START || -+ kppg->state == PPG_STATE_RESUME) { -+ ipu_psys_scheduler_add_kppg(kppg, -+ SCHED_START_LIST); -+ } else if (kppg->state == PPG_STATE_RUNNING) { -+ ipu_psys_scheduler_add_kppg(kppg, -+ SCHED_STOP_LIST); -+ } else if (kppg->state == PPG_STATE_SUSPENDING || -+ kppg->state == PPG_STATE_STOPPING) { -+ /* there are some suspending/stopping ppgs */ -+ *stopping = true; -+ } else if (kppg->state == PPG_STATE_RESUMING || -+ kppg->state == PPG_STATE_STARTING) { -+ /* how about kppg are resuming/starting? */ -+ } -+ mutex_unlock(&kppg->mutex); -+ } -+ mutex_unlock(&fh->mutex); -+ } -+} -+ -+static void ipu_psys_scheduler_update_start_ppg_priority(void) -+{ -+ struct sched_list *sc_list = get_sc_list(SCHED_START_LIST); -+ struct ipu_psys_ppg *kppg, *tmp; -+ -+ mutex_lock(&sc_list->lock); -+ if (!list_empty(&sc_list->list)) -+ list_for_each_entry_safe(kppg, tmp, &sc_list->list, sched_list) -+ kppg->pri_dynamic--; -+ mutex_unlock(&sc_list->lock); -+} -+ -+static bool ipu_psys_scheduler_switch_ppg(struct ipu_psys *psys) -+{ -+ struct sched_list *sc_list = get_sc_list(SCHED_STOP_LIST); -+ struct device *dev = &psys->adev->auxdev.dev; -+ struct ipu_psys_ppg *kppg; -+ bool resched = false; -+ -+ mutex_lock(&sc_list->lock); -+ if (list_empty(&sc_list->list)) { -+ /* some ppgs are RESUMING/STARTING */ -+ dev_dbg(dev, "no candidated stop ppg\n"); -+ mutex_unlock(&sc_list->lock); -+ return false; -+ } -+ kppg = list_first_entry(&sc_list->list, struct ipu_psys_ppg, -+ sched_list); -+ mutex_unlock(&sc_list->lock); -+ -+ mutex_lock(&kppg->mutex); -+ if (!(kppg->state & PPG_STATE_STOP)) { -+ dev_dbg(dev, "s_change:%s: %p %d -> %d\n", -+ __func__, kppg, kppg->state, PPG_STATE_SUSPEND); -+ kppg->state = PPG_STATE_SUSPEND; -+ resched = true; -+ } -+ mutex_unlock(&kppg->mutex); -+ -+ return resched; -+} -+ -+/* -+ * search all kppgs and sort them into start_list and stop_list, always start -+ * first kppg(high priority) in start_list; -+ * if there is resource contention, it would switch kppgs in stop_list -+ * to suspend state one by one -+ */ -+static bool ipu_psys_scheduler_ppg_start(struct ipu_psys *psys) -+{ -+ struct sched_list *sc_list = get_sc_list(SCHED_START_LIST); -+ struct device *dev = &psys->adev->auxdev.dev; -+ struct ipu_psys_ppg *kppg, *kppg0; -+ bool stopping_existed = false; -+ int ret; -+ -+ ipu_psys_scheduler_ppg_sort(psys, &stopping_existed); -+ -+ mutex_lock(&sc_list->lock); -+ if (list_empty(&sc_list->list)) { -+ dev_dbg(dev, "no ppg to start\n"); -+ mutex_unlock(&sc_list->lock); -+ return false; -+ } -+ -+ list_for_each_entry_safe(kppg, kppg0, -+ &sc_list->list, sched_list) { -+ mutex_unlock(&sc_list->lock); -+ -+ ret = ipu_psys_detect_resource_contention(kppg); -+ if (ret < 0) { -+ dev_dbg(dev, "ppg %d resource detect failed(%d)\n", -+ kppg->kpg->pg->ID, ret); -+ /* -+ * switch out other ppg in 2 cases: -+ * 1. resource contention -+ * 2. no suspending/stopping ppg -+ */ -+ if (ret == -ENOSPC) { -+ if (!stopping_existed && -+ ipu_psys_scheduler_switch_ppg(psys)) { -+ return true; -+ } -+ dev_dbg(dev, "ppg is suspending/stopping\n"); -+ } else { -+ dev_err(dev, "detect resource error %d\n", ret); -+ } -+ } else { -+ kppg->pri_dynamic = 0; -+ -+ mutex_lock(&kppg->mutex); -+ if (kppg->state == PPG_STATE_START) -+ ipu_psys_ppg_start(kppg); -+ else -+ ipu_psys_ppg_resume(kppg); -+ mutex_unlock(&kppg->mutex); -+ -+ ipu_psys_scheduler_remove_kppg(kppg, -+ SCHED_START_LIST); -+ ipu_psys_scheduler_update_start_ppg_priority(); -+ } -+ mutex_lock(&sc_list->lock); -+ } -+ mutex_unlock(&sc_list->lock); -+ -+ return false; -+} -+ -+static bool ipu_psys_scheduler_ppg_enqueue_bufset(struct ipu_psys *psys) -+{ -+ struct ipu_psys_scheduler *sched; -+ struct ipu_psys_ppg *kppg; -+ struct ipu_psys_fh *fh; -+ bool resched = false; -+ -+ list_for_each_entry(fh, &psys->fhs, list) { -+ mutex_lock(&fh->mutex); -+ sched = &fh->sched; -+ if (list_empty(&sched->ppgs)) { -+ mutex_unlock(&fh->mutex); -+ continue; -+ } -+ -+ list_for_each_entry(kppg, &sched->ppgs, list) { -+ if (ipu_psys_ppg_enqueue_bufsets(kppg)) -+ resched = true; -+ } -+ mutex_unlock(&fh->mutex); -+ } -+ -+ return resched; -+} -+ -+/* -+ * This function will check all kppgs within fhs, and if kppg state -+ * is STOP or SUSPEND, l-scheduler will call ppg function to stop -+ * or suspend it and update stop list -+ */ -+ -+static bool ipu_psys_scheduler_ppg_halt(struct ipu_psys *psys) -+{ -+ struct ipu_psys_scheduler *sched; -+ struct ipu_psys_ppg *kppg, *tmp; -+ struct ipu_psys_fh *fh; -+ bool stopping_exit = false; -+ -+ list_for_each_entry(fh, &psys->fhs, list) { -+ mutex_lock(&fh->mutex); -+ sched = &fh->sched; -+ if (list_empty(&sched->ppgs)) { -+ mutex_unlock(&fh->mutex); -+ continue; -+ } -+ -+ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { -+ mutex_lock(&kppg->mutex); -+ if (kppg->state & PPG_STATE_STOP) { -+ ipu_psys_ppg_stop(kppg); -+ ipu_psys_scheduler_remove_kppg(kppg, -+ SCHED_STOP_LIST); -+ } else if (kppg->state == PPG_STATE_SUSPEND && -+ list_empty(&kppg->kcmds_processing_list)) { -+ ipu_psys_ppg_suspend(kppg); -+ ipu_psys_scheduler_remove_kppg(kppg, -+ SCHED_STOP_LIST); -+ } else if (kppg->state == PPG_STATE_SUSPENDING || -+ kppg->state == PPG_STATE_STOPPING) { -+ stopping_exit = true; -+ } -+ mutex_unlock(&kppg->mutex); -+ } -+ mutex_unlock(&fh->mutex); -+ } -+ return stopping_exit; -+} -+ -+static void ipu_psys_update_ppg_state_by_kcmd(struct ipu_psys *psys, -+ struct ipu_psys_ppg *kppg, -+ struct ipu_psys_kcmd *kcmd) -+{ -+ struct device *dev = &psys->adev->auxdev.dev; -+ int old_ppg_state = kppg->state; -+ -+ /* -+ * Respond kcmd when ppg is in stable state: -+ * STARTED/RESUMED/RUNNING/SUSPENDED/STOPPED -+ */ -+ if (kppg->state == PPG_STATE_STARTED || -+ kppg->state == PPG_STATE_RESUMED || -+ kppg->state == PPG_STATE_RUNNING) { -+ if (kcmd->state == KCMD_STATE_PPG_START) -+ ipu_psys_kcmd_complete(kppg, kcmd, 0); -+ else if (kcmd->state == KCMD_STATE_PPG_STOP) -+ kppg->state = PPG_STATE_STOP; -+ } else if (kppg->state == PPG_STATE_SUSPENDED) { -+ if (kcmd->state == KCMD_STATE_PPG_START) -+ ipu_psys_kcmd_complete(kppg, kcmd, 0); -+ else if (kcmd->state == KCMD_STATE_PPG_STOP) -+ /* -+ * Record the previous state -+ * because here need resume at first -+ */ -+ kppg->state |= PPG_STATE_STOP; -+ else if (kcmd->state == KCMD_STATE_PPG_ENQUEUE) -+ kppg->state = PPG_STATE_RESUME; -+ } else if (kppg->state == PPG_STATE_STOPPED) { -+ if (kcmd->state == KCMD_STATE_PPG_START) { -+ kppg->state = PPG_STATE_START; -+ } else if (kcmd->state == KCMD_STATE_PPG_STOP) { -+ ipu_psys_kcmd_complete(kppg, kcmd, 0); -+ } else if (kcmd->state == KCMD_STATE_PPG_ENQUEUE) { -+ dev_err(dev, "ppg %p stopped!\n", kppg); -+ ipu_psys_kcmd_complete(kppg, kcmd, -EIO); -+ } -+ } -+ -+ if (old_ppg_state != kppg->state) -+ dev_dbg(dev, "s_change:%s: %p %d -> %d\n", -+ __func__, kppg, old_ppg_state, kppg->state); -+} -+ -+static void ipu_psys_scheduler_kcmd_set(struct ipu_psys *psys) -+{ -+ struct ipu_psys_kcmd *kcmd; -+ struct ipu_psys_scheduler *sched; -+ struct ipu_psys_ppg *kppg, *tmp; -+ struct ipu_psys_fh *fh; -+ -+ list_for_each_entry(fh, &psys->fhs, list) { -+ mutex_lock(&fh->mutex); -+ sched = &fh->sched; -+ if (list_empty(&sched->ppgs)) { -+ mutex_unlock(&fh->mutex); -+ continue; -+ } -+ -+ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { -+ mutex_lock(&kppg->mutex); -+ if (list_empty(&kppg->kcmds_new_list)) { -+ mutex_unlock(&kppg->mutex); -+ continue; -+ }; -+ -+ kcmd = list_first_entry(&kppg->kcmds_new_list, -+ struct ipu_psys_kcmd, list); -+ ipu_psys_update_ppg_state_by_kcmd(psys, kppg, kcmd); -+ mutex_unlock(&kppg->mutex); -+ } -+ mutex_unlock(&fh->mutex); -+ } -+} -+ -+static bool is_ready_to_enter_power_gating(struct ipu_psys *psys) -+{ -+ struct ipu_psys_scheduler *sched; -+ struct ipu_psys_ppg *kppg, *tmp; -+ struct ipu_psys_fh *fh; -+ -+ list_for_each_entry(fh, &psys->fhs, list) { -+ mutex_lock(&fh->mutex); -+ sched = &fh->sched; -+ if (list_empty(&sched->ppgs)) { -+ mutex_unlock(&fh->mutex); -+ continue; -+ } -+ -+ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { -+ mutex_lock(&kppg->mutex); -+ if (!list_empty(&kppg->kcmds_new_list) || -+ !list_empty(&kppg->kcmds_processing_list)) { -+ mutex_unlock(&kppg->mutex); -+ mutex_unlock(&fh->mutex); -+ return false; -+ } -+ if (!(kppg->state == PPG_STATE_RUNNING || -+ kppg->state == PPG_STATE_STOPPED || -+ kppg->state == PPG_STATE_SUSPENDED)) { -+ mutex_unlock(&kppg->mutex); -+ mutex_unlock(&fh->mutex); -+ return false; -+ } -+ mutex_unlock(&kppg->mutex); -+ } -+ mutex_unlock(&fh->mutex); -+ } -+ -+ return true; -+} -+ -+static bool has_pending_kcmd(struct ipu_psys *psys) -+{ -+ struct ipu_psys_scheduler *sched; -+ struct ipu_psys_ppg *kppg, *tmp; -+ struct ipu_psys_fh *fh; -+ -+ list_for_each_entry(fh, &psys->fhs, list) { -+ mutex_lock(&fh->mutex); -+ sched = &fh->sched; -+ if (list_empty(&sched->ppgs)) { -+ mutex_unlock(&fh->mutex); -+ continue; -+ } -+ -+ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { -+ mutex_lock(&kppg->mutex); -+ if (!list_empty(&kppg->kcmds_new_list) || -+ !list_empty(&kppg->kcmds_processing_list)) { -+ mutex_unlock(&kppg->mutex); -+ mutex_unlock(&fh->mutex); -+ return true; -+ } -+ mutex_unlock(&kppg->mutex); -+ } -+ mutex_unlock(&fh->mutex); -+ } -+ -+ return false; -+} -+ -+static bool ipu_psys_scheduler_exit_power_gating(struct ipu_psys *psys) -+{ -+ struct device *dev = &psys->adev->auxdev.dev; -+ -+ /* Assume power gating process can be aborted directly during START */ -+ if (psys->power_gating == PSYS_POWER_GATED) { -+ dev_dbg(dev, "powergating: exit ---\n"); -+ ipu_psys_exit_power_gating(psys); -+ } -+ psys->power_gating = PSYS_POWER_NORMAL; -+ return false; -+} -+ -+static bool ipu_psys_scheduler_enter_power_gating(struct ipu_psys *psys) -+{ -+ struct device *dev = &psys->adev->auxdev.dev; -+ struct ipu_psys_scheduler *sched; -+ struct ipu_psys_ppg *kppg, *tmp; -+ struct ipu_psys_fh *fh; -+ -+ if (!enable_power_gating) -+ return false; -+ -+ if (psys->power_gating == PSYS_POWER_NORMAL && -+ is_ready_to_enter_power_gating(psys)) { -+ /* Enter power gating */ -+ dev_dbg(dev, "powergating: enter +++\n"); -+ psys->power_gating = PSYS_POWER_GATING; -+ } -+ -+ if (psys->power_gating != PSYS_POWER_GATING) -+ return false; -+ -+ /* Suspend ppgs one by one */ -+ list_for_each_entry(fh, &psys->fhs, list) { -+ mutex_lock(&fh->mutex); -+ sched = &fh->sched; -+ if (list_empty(&sched->ppgs)) { -+ mutex_unlock(&fh->mutex); -+ continue; -+ } -+ -+ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { -+ mutex_lock(&kppg->mutex); -+ if (kppg->state == PPG_STATE_RUNNING) { -+ kppg->state = PPG_STATE_SUSPEND; -+ mutex_unlock(&kppg->mutex); -+ mutex_unlock(&fh->mutex); -+ return true; -+ } -+ -+ if (kppg->state != PPG_STATE_SUSPENDED && -+ kppg->state != PPG_STATE_STOPPED) { -+ /* Can't enter power gating */ -+ mutex_unlock(&kppg->mutex); -+ mutex_unlock(&fh->mutex); -+ /* Need re-run l-scheduler to suspend ppg? */ -+ return (kppg->state & PPG_STATE_STOP || -+ kppg->state == PPG_STATE_SUSPEND); -+ } -+ mutex_unlock(&kppg->mutex); -+ } -+ mutex_unlock(&fh->mutex); -+ } -+ -+ psys->power_gating = PSYS_POWER_GATED; -+ ipu_psys_enter_power_gating(psys); -+ -+ return false; -+} -+ -+void ipu_psys_run_next(struct ipu_psys *psys) -+{ -+ struct device *dev = &psys->adev->auxdev.dev; -+ /* Wake up scheduler due to unfinished work */ -+ bool need_trigger = false; -+ /* Wait FW callback if there are stopping/suspending/running ppg */ -+ bool wait_fw_finish = false; -+ /* -+ * Code below will crash if fhs is empty. Normally this -+ * shouldn't happen. -+ */ -+ if (list_empty(&psys->fhs)) { -+ WARN_ON(1); -+ return; -+ } -+ -+ /* Abort power gating process */ -+ if (psys->power_gating != PSYS_POWER_NORMAL && -+ has_pending_kcmd(psys)) -+ need_trigger = ipu_psys_scheduler_exit_power_gating(psys); -+ -+ /* Handle kcmd and related ppg switch */ -+ if (psys->power_gating == PSYS_POWER_NORMAL) { -+ ipu_psys_scheduler_kcmd_set(psys); -+ wait_fw_finish = ipu_psys_scheduler_ppg_halt(psys); -+ need_trigger |= ipu_psys_scheduler_ppg_start(psys); -+ need_trigger |= ipu_psys_scheduler_ppg_enqueue_bufset(psys); -+ } -+ if (!(need_trigger || wait_fw_finish)) { -+ /* Nothing to do, enter power gating */ -+ need_trigger = ipu_psys_scheduler_enter_power_gating(psys); -+ if (psys->power_gating == PSYS_POWER_GATING) -+ wait_fw_finish = ipu_psys_scheduler_ppg_halt(psys); -+ } -+ -+ if (need_trigger && !wait_fw_finish) { -+ dev_dbg(dev, "scheduler: wake up\n"); -+ atomic_set(&psys->wakeup_count, 1); -+ wake_up_interruptible(&psys->sched_cmd_wq); -+ } -+} -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-platform-resources.h b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-platform-resources.h -new file mode 100644 -index 0000000..1b67956 ---- /dev/null -+++ b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-platform-resources.h -@@ -0,0 +1,194 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* Copyright (C) 2018 - 2024 Intel Corporation */ -+ -+#ifndef IPU6_PLATFORM_RESOURCES_H -+#define IPU6_PLATFORM_RESOURCES_H -+ -+#include "ipu-platform-resources.h" -+ -+#define IPU6_FW_PSYS_N_PADDING_UINT8_IN_PROCESS_EXT_STRUCT 0 -+ -+enum { -+ IPU6_FW_PSYS_CMD_QUEUE_COMMAND_ID = 0, -+ IPU6_FW_PSYS_CMD_QUEUE_DEVICE_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG0_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG1_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG2_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG3_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG4_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG5_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG6_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG7_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG8_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG9_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG10_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG11_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG12_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG13_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG14_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG15_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG16_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG17_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG18_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG19_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG20_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG21_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG22_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG23_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG24_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG25_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG26_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG27_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG28_COMMAND_ID, -+ IPU6_FW_PSYS_CMD_QUEUE_PPG29_COMMAND_ID, -+ IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID -+}; -+ -+enum { -+ IPU6_FW_PSYS_TRANSFER_VMEM0_TYPE_ID = 0, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_TYPE_ID, -+ IPU6_FW_PSYS_LB_VMEM_TYPE_ID, -+ IPU6_FW_PSYS_DMEM_TYPE_ID, -+ IPU6_FW_PSYS_VMEM_TYPE_ID, -+ IPU6_FW_PSYS_BAMEM_TYPE_ID, -+ IPU6_FW_PSYS_PMEM_TYPE_ID, -+ IPU6_FW_PSYS_N_MEM_TYPE_ID -+}; -+ -+enum ipu6_mem_id { -+ IPU6_FW_PSYS_VMEM0_ID = 0, /* ISP0 VMEM */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, /* TRANSFER VMEM 0 */ -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, /* TRANSFER VMEM 1 */ -+ IPU6_FW_PSYS_LB_VMEM_ID, /* LB VMEM */ -+ IPU6_FW_PSYS_BAMEM0_ID, /* ISP0 BAMEM */ -+ IPU6_FW_PSYS_DMEM0_ID, /* SPC0 Dmem */ -+ IPU6_FW_PSYS_DMEM1_ID, /* SPP0 Dmem */ -+ IPU6_FW_PSYS_DMEM2_ID, /* SPP1 Dmem */ -+ IPU6_FW_PSYS_DMEM3_ID, /* ISP0 Dmem */ -+ IPU6_FW_PSYS_PMEM0_ID, /* ISP0 PMEM */ -+ IPU6_FW_PSYS_N_MEM_ID -+}; -+ -+enum { -+ IPU6_FW_PSYS_DEV_CHN_DMA_EXT0_ID = 0, -+ IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_READ_ID, -+ IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_ID, -+ IPU6_FW_PSYS_DEV_CHN_DMA_INTERNAL_ID, -+ IPU6_FW_PSYS_DEV_CHN_DMA_ISA_ID, -+ IPU6_FW_PSYS_N_DEV_CHN_ID -+}; -+ -+enum { -+ IPU6_FW_PSYS_SP_CTRL_TYPE_ID = 0, -+ IPU6_FW_PSYS_SP_SERVER_TYPE_ID, -+ IPU6_FW_PSYS_VP_TYPE_ID, -+ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_OSA_TYPE_ID, -+ IPU6_FW_PSYS_GDC_TYPE_ID, -+ IPU6_FW_PSYS_TNR_TYPE_ID, -+ IPU6_FW_PSYS_N_CELL_TYPE_ID -+}; -+ -+enum { -+ IPU6_FW_PSYS_SP0_ID = 0, -+ IPU6_FW_PSYS_VP0_ID, -+ IPU6_FW_PSYS_PSA_ACC_BNLM_ID, -+ IPU6_FW_PSYS_PSA_ACC_DM_ID, -+ IPU6_FW_PSYS_PSA_ACC_ACM_ID, -+ IPU6_FW_PSYS_PSA_ACC_GTC_YUV1_ID, -+ IPU6_FW_PSYS_BB_ACC_OFS_PIN_MAIN_ID, -+ IPU6_FW_PSYS_BB_ACC_OFS_PIN_DISPLAY_ID, -+ IPU6_FW_PSYS_BB_ACC_OFS_PIN_PP_ID, -+ IPU6_FW_PSYS_PSA_ACC_GAMMASTAR_ID, -+ IPU6_FW_PSYS_PSA_ACC_GLTM_ID, -+ IPU6_FW_PSYS_PSA_ACC_XNR_ID, -+ IPU6_FW_PSYS_PSA_VCSC_ID, /* VCSC */ -+ IPU6_FW_PSYS_ISA_ICA_ID, -+ IPU6_FW_PSYS_ISA_LSC_ID, -+ IPU6_FW_PSYS_ISA_DPC_ID, -+ IPU6_FW_PSYS_ISA_SIS_A_ID, -+ IPU6_FW_PSYS_ISA_SIS_B_ID, -+ IPU6_FW_PSYS_ISA_B2B_ID, -+ IPU6_FW_PSYS_ISA_B2R_R2I_SIE_ID, -+ IPU6_FW_PSYS_ISA_R2I_DS_A_ID, -+ IPU6_FW_PSYS_ISA_R2I_DS_B_ID, -+ IPU6_FW_PSYS_ISA_AWB_ID, -+ IPU6_FW_PSYS_ISA_AE_ID, -+ IPU6_FW_PSYS_ISA_AF_ID, -+ IPU6_FW_PSYS_ISA_DOL_ID, -+ IPU6_FW_PSYS_ISA_ICA_MEDIUM_ID, -+ IPU6_FW_PSYS_ISA_X2B_MD_ID, -+ IPU6_FW_PSYS_ISA_X2B_SVE_RGBIR_ID, -+ IPU6_FW_PSYS_ISA_PAF_ID, -+ IPU6_FW_PSYS_BB_ACC_GDC0_ID, -+ IPU6_FW_PSYS_BB_ACC_TNR_ID, -+ IPU6_FW_PSYS_N_CELL_ID -+}; -+ -+enum { -+ IPU6_FW_PSYS_DEV_DFM_BB_FULL_PORT_ID = 0, -+ IPU6_FW_PSYS_DEV_DFM_BB_EMPTY_PORT_ID, -+ IPU6_FW_PSYS_DEV_DFM_ISL_FULL_PORT_ID, -+ IPU6_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID, -+ IPU6_FW_PSYS_DEV_DFM_LB_FULL_PORT_ID, -+ IPU6_FW_PSYS_DEV_DFM_LB_EMPTY_PORT_ID, -+}; -+ -+/* Excluding PMEM */ -+#define IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID (IPU6_FW_PSYS_N_MEM_TYPE_ID - 1) -+#define IPU6_FW_PSYS_N_DEV_DFM_ID \ -+ (IPU6_FW_PSYS_DEV_DFM_LB_EMPTY_PORT_ID + 1) -+ -+#define IPU6_FW_PSYS_VMEM0_MAX_SIZE 0x0800 -+/* Transfer VMEM0 words, ref HAS Transfer*/ -+#define IPU6_FW_PSYS_TRANSFER_VMEM0_MAX_SIZE 0x0800 -+/* Transfer VMEM1 words, ref HAS Transfer*/ -+#define IPU6_FW_PSYS_TRANSFER_VMEM1_MAX_SIZE 0x0800 -+#define IPU6_FW_PSYS_LB_VMEM_MAX_SIZE 0x0400 /* LB VMEM words */ -+#define IPU6_FW_PSYS_BAMEM0_MAX_SIZE 0x0800 -+#define IPU6_FW_PSYS_DMEM0_MAX_SIZE 0x4000 -+#define IPU6_FW_PSYS_DMEM1_MAX_SIZE 0x1000 -+#define IPU6_FW_PSYS_DMEM2_MAX_SIZE 0x1000 -+#define IPU6_FW_PSYS_DMEM3_MAX_SIZE 0x1000 -+#define IPU6_FW_PSYS_PMEM0_MAX_SIZE 0x0500 -+ -+#define IPU6_FW_PSYS_DEV_CHN_DMA_EXT0_MAX_SIZE 30 -+#define IPU6_FW_PSYS_DEV_CHN_GDC_MAX_SIZE 0 -+#define IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_READ_MAX_SIZE 30 -+#define IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_MAX_SIZE 43 -+#define IPU6_FW_PSYS_DEV_CHN_DMA_INTERNAL_MAX_SIZE 8 -+#define IPU6_FW_PSYS_DEV_CHN_DMA_IPFD_MAX_SIZE 0 -+#define IPU6_FW_PSYS_DEV_CHN_DMA_ISA_MAX_SIZE 2 -+ -+#define IPU6_FW_PSYS_DEV_DFM_BB_FULL_PORT_ID_MAX_SIZE 32 -+#define IPU6_FW_PSYS_DEV_DFM_ISL_FULL_PORT_ID_MAX_SIZE 32 -+#define IPU6_FW_PSYS_DEV_DFM_LB_FULL_PORT_ID_MAX_SIZE 32 -+#define IPU6_FW_PSYS_DEV_DFM_BB_EMPTY_PORT_ID_MAX_SIZE 32 -+#define IPU6_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID_MAX_SIZE 32 -+#define IPU6_FW_PSYS_DEV_DFM_LB_EMPTY_PORT_ID_MAX_SIZE 32 -+ -+struct ipu6_fw_psys_program_manifest_ext { -+ u32 dfm_port_bitmap[IPU6_FW_PSYS_N_DEV_DFM_ID]; -+ u32 dfm_active_port_bitmap[IPU6_FW_PSYS_N_DEV_DFM_ID]; -+ u16 ext_mem_size[IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID]; -+ u16 ext_mem_offset[IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID]; -+ u16 dev_chn_size[IPU6_FW_PSYS_N_DEV_CHN_ID]; -+ u16 dev_chn_offset[IPU6_FW_PSYS_N_DEV_CHN_ID]; -+ u8 is_dfm_relocatable[IPU6_FW_PSYS_N_DEV_DFM_ID]; -+ u8 dec_resources_input[IPU_FW_PSYS_MAX_INPUT_DEC_RESOURCES]; -+ u8 dec_resources_input_terminal[IPU_FW_PSYS_MAX_INPUT_DEC_RESOURCES]; -+ u8 dec_resources_output[IPU_FW_PSYS_MAX_OUTPUT_DEC_RESOURCES]; -+ u8 dec_resources_output_terminal[IPU_FW_PSYS_MAX_OUTPUT_DEC_RESOURCES]; -+ u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_PROGRAM_MANIFEST_EXT]; -+}; -+ -+struct ipu6_fw_psys_process_ext { -+ u32 dfm_port_bitmap[IPU6_FW_PSYS_N_DEV_DFM_ID]; -+ u32 dfm_active_port_bitmap[IPU6_FW_PSYS_N_DEV_DFM_ID]; -+ u16 ext_mem_offset[IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID]; -+ u16 dev_chn_offset[IPU6_FW_PSYS_N_DEV_CHN_ID]; -+ u8 ext_mem_id[IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID]; -+}; -+ -+#endif /* IPU6_PLATFORM_RESOURCES_H */ -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-ppg.c b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-ppg.c -new file mode 100644 -index 0000000..b6a90f8 ---- /dev/null -+++ b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-ppg.c -@@ -0,0 +1,556 @@ -+// SPDX-License-Identifier: GPL-2.0 -+// Copyright (C) 2020 - 2024 Intel Corporation -+ -+#include -+#include -+#include -+#include -+ -+#include "ipu6-dma.h" -+#include "ipu6-ppg.h" -+ -+static bool enable_suspend_resume; -+module_param(enable_suspend_resume, bool, 0664); -+MODULE_PARM_DESC(enable_suspend_resume, "enable fw ppg suspend/resume api"); -+ -+static struct ipu_psys_kcmd * -+ipu_psys_ppg_get_kcmd(struct ipu_psys_ppg *kppg, enum ipu_psys_cmd_state state) -+{ -+ struct ipu_psys_kcmd *kcmd; -+ -+ if (list_empty(&kppg->kcmds_new_list)) -+ return NULL; -+ -+ list_for_each_entry(kcmd, &kppg->kcmds_new_list, list) { -+ if (kcmd->state == state) -+ return kcmd; -+ } -+ -+ return NULL; -+} -+ -+struct ipu_psys_kcmd *ipu_psys_ppg_get_stop_kcmd(struct ipu_psys_ppg *kppg) -+{ -+ struct ipu_psys_kcmd *kcmd; -+ -+ WARN(!mutex_is_locked(&kppg->mutex), "ppg locking error"); -+ -+ if (list_empty(&kppg->kcmds_processing_list)) -+ return NULL; -+ -+ list_for_each_entry(kcmd, &kppg->kcmds_processing_list, list) { -+ if (kcmd->state == KCMD_STATE_PPG_STOP) -+ return kcmd; -+ } -+ -+ return NULL; -+} -+ -+static struct ipu_psys_buffer_set * -+__get_buf_set(struct ipu_psys_fh *fh, size_t buf_set_size) -+{ -+ struct ipu6_bus_device *adev = fh->psys->adev; -+ struct ipu_psys_buffer_set *kbuf_set; -+ struct ipu_psys_scheduler *sched = &fh->sched; -+ -+ mutex_lock(&sched->bs_mutex); -+ list_for_each_entry(kbuf_set, &sched->buf_sets, list) { -+ if (!kbuf_set->buf_set_size && -+ kbuf_set->size >= buf_set_size) { -+ kbuf_set->buf_set_size = buf_set_size; -+ mutex_unlock(&sched->bs_mutex); -+ return kbuf_set; -+ } -+ } -+ -+ mutex_unlock(&sched->bs_mutex); -+ /* no suitable buffer available, allocate new one */ -+ kbuf_set = kzalloc(sizeof(*kbuf_set), GFP_KERNEL); -+ if (!kbuf_set) -+ return NULL; -+ -+ kbuf_set->kaddr = ipu6_dma_alloc(adev, buf_set_size, -+ &kbuf_set->dma_addr, GFP_KERNEL, 0); -+ if (!kbuf_set->kaddr) { -+ kfree(kbuf_set); -+ return NULL; -+ } -+ -+ kbuf_set->buf_set_size = buf_set_size; -+ kbuf_set->size = buf_set_size; -+ mutex_lock(&sched->bs_mutex); -+ list_add(&kbuf_set->list, &sched->buf_sets); -+ mutex_unlock(&sched->bs_mutex); -+ -+ return kbuf_set; -+} -+ -+static struct ipu_psys_buffer_set * -+ipu_psys_create_buffer_set(struct ipu_psys_kcmd *kcmd, -+ struct ipu_psys_ppg *kppg) -+{ -+ struct ipu_psys_fh *fh = kcmd->fh; -+ struct ipu_psys *psys = fh->psys; -+ struct device *dev = &psys->adev->auxdev.dev; -+ struct ipu_psys_buffer_set *kbuf_set; -+ size_t buf_set_size; -+ u32 *keb; -+ -+ buf_set_size = ipu_fw_psys_ppg_get_buffer_set_size(kcmd); -+ -+ kbuf_set = __get_buf_set(fh, buf_set_size); -+ if (!kbuf_set) { -+ dev_err(dev, "failed to create buffer set\n"); -+ return NULL; -+ } -+ -+ kbuf_set->buf_set = ipu_fw_psys_ppg_create_buffer_set(kcmd, -+ kbuf_set->kaddr, -+ 0); -+ -+ ipu_fw_psys_ppg_buffer_set_vaddress(kbuf_set->buf_set, -+ kbuf_set->dma_addr); -+ keb = kcmd->kernel_enable_bitmap; -+ ipu_fw_psys_ppg_buffer_set_set_keb(kbuf_set->buf_set, keb); -+ ipu6_dma_sync_single(psys->adev, kbuf_set->dma_addr, buf_set_size); -+ -+ return kbuf_set; -+} -+ -+int ipu_psys_ppg_get_bufset(struct ipu_psys_kcmd *kcmd, -+ struct ipu_psys_ppg *kppg) -+{ -+ struct ipu_psys *psys = kppg->fh->psys; -+ struct device *dev = &psys->adev->auxdev.dev; -+ struct ipu_psys_buffer_set *kbuf_set; -+ unsigned int i; -+ int ret; -+ -+ kbuf_set = ipu_psys_create_buffer_set(kcmd, kppg); -+ if (!kbuf_set) { -+ ret = -EINVAL; -+ goto error; -+ } -+ kcmd->kbuf_set = kbuf_set; -+ kbuf_set->kcmd = kcmd; -+ -+ for (i = 0; i < kcmd->nbuffers; i++) { -+ struct ipu_fw_psys_terminal *terminal; -+ u32 buffer; -+ -+ terminal = ipu_fw_psys_pg_get_terminal(kcmd, i); -+ if (!terminal) -+ continue; -+ -+ buffer = (u32)kcmd->kbufs[i]->dma_addr + -+ kcmd->buffers[i].data_offset; -+ -+ ret = ipu_fw_psys_ppg_set_buffer_set(kcmd, terminal, i, buffer); -+ if (ret) { -+ dev_err(dev, "Unable to set bufset\n"); -+ goto error; -+ } -+ } -+ -+ return 0; -+ -+error: -+ dev_err(dev, "failed to get buffer set\n"); -+ return ret; -+} -+ -+void ipu_psys_ppg_complete(struct ipu_psys *psys, struct ipu_psys_ppg *kppg) -+{ -+ struct device *dev; -+ u8 queue_id; -+ int old_ppg_state; -+ -+ if (!psys || !kppg) -+ return; -+ -+ dev = &psys->adev->auxdev.dev; -+ -+ mutex_lock(&kppg->mutex); -+ old_ppg_state = kppg->state; -+ if (kppg->state == PPG_STATE_STOPPING) { -+ struct ipu_psys_kcmd tmp_kcmd = { -+ .kpg = kppg->kpg, -+ }; -+ -+ kppg->state = PPG_STATE_STOPPED; -+ ipu_psys_free_resources(&kppg->kpg->resource_alloc, -+ &psys->res_pool_running); -+ queue_id = ipu_fw_psys_ppg_get_base_queue_id(&tmp_kcmd); -+ ipu_psys_free_cmd_queue_res(&psys->res_pool_running, queue_id); -+ pm_runtime_put(dev); -+ } else { -+ if (kppg->state == PPG_STATE_SUSPENDING) { -+ kppg->state = PPG_STATE_SUSPENDED; -+ ipu_psys_free_resources(&kppg->kpg->resource_alloc, -+ &psys->res_pool_running); -+ } else if (kppg->state == PPG_STATE_STARTED || -+ kppg->state == PPG_STATE_RESUMED) { -+ kppg->state = PPG_STATE_RUNNING; -+ } -+ -+ /* Kick l-scheduler thread for FW callback, -+ * also for checking if need to enter power gating -+ */ -+ atomic_set(&psys->wakeup_count, 1); -+ wake_up_interruptible(&psys->sched_cmd_wq); -+ } -+ if (old_ppg_state != kppg->state) -+ dev_dbg(dev, "s_change:%s: %p %d -> %d\n", __func__, kppg, -+ old_ppg_state, kppg->state); -+ -+ mutex_unlock(&kppg->mutex); -+} -+ -+int ipu_psys_ppg_start(struct ipu_psys_ppg *kppg) -+{ -+ struct ipu_psys *psys = kppg->fh->psys; -+ struct device *dev = &psys->adev->auxdev.dev; -+ struct ipu_psys_kcmd *kcmd = ipu_psys_ppg_get_kcmd(kppg, -+ KCMD_STATE_PPG_START); -+ unsigned int i; -+ int ret; -+ -+ if (!kcmd) { -+ dev_err(dev, "failed to find start kcmd!\n"); -+ return -EINVAL; -+ } -+ -+ dev_dbg(dev, "start ppg id %d, addr 0x%p\n", -+ ipu_fw_psys_pg_get_id(kcmd), kppg); -+ -+ kppg->state = PPG_STATE_STARTING; -+ for (i = 0; i < kcmd->nbuffers; i++) { -+ struct ipu_fw_psys_terminal *terminal; -+ -+ terminal = ipu_fw_psys_pg_get_terminal(kcmd, i); -+ if (!terminal) -+ continue; -+ -+ ret = ipu_fw_psys_terminal_set(terminal, i, kcmd, 0, -+ kcmd->buffers[i].len); -+ if (ret) { -+ dev_err(dev, "Unable to set terminal\n"); -+ return ret; -+ } -+ } -+ -+ ret = ipu_fw_psys_pg_submit(kcmd); -+ if (ret) { -+ dev_err(dev, "failed to submit kcmd!\n"); -+ return ret; -+ } -+ -+ ret = ipu_psys_allocate_resources(dev, kcmd->kpg->pg, kcmd->pg_manifest, -+ &kcmd->kpg->resource_alloc, -+ &psys->res_pool_running); -+ if (ret) { -+ dev_err(dev, "alloc resources failed!\n"); -+ return ret; -+ } -+ -+ ret = pm_runtime_get_sync(dev); -+ if (ret < 0) { -+ dev_err(dev, "failed to power on psys\n"); -+ goto error; -+ } -+ -+ ret = ipu_psys_kcmd_start(psys, kcmd); -+ if (ret) { -+ ipu_psys_kcmd_complete(kppg, kcmd, -EIO); -+ goto error; -+ } -+ -+ dev_dbg(dev, "s_change:%s: %p %d -> %d\n", -+ __func__, kppg, kppg->state, PPG_STATE_STARTED); -+ kppg->state = PPG_STATE_STARTED; -+ ipu_psys_kcmd_complete(kppg, kcmd, 0); -+ -+ return 0; -+ -+error: -+ pm_runtime_put_noidle(dev); -+ ipu_psys_reset_process_cell(dev, kcmd->kpg->pg, kcmd->pg_manifest, -+ kcmd->kpg->pg->process_count); -+ ipu_psys_free_resources(&kppg->kpg->resource_alloc, -+ &psys->res_pool_running); -+ -+ dev_err(dev, "failed to start ppg\n"); -+ return ret; -+} -+ -+int ipu_psys_ppg_resume(struct ipu_psys_ppg *kppg) -+{ -+ struct ipu_psys *psys = kppg->fh->psys; -+ struct device *dev = &psys->adev->auxdev.dev; -+ struct ipu_psys_kcmd tmp_kcmd = { -+ .kpg = kppg->kpg, -+ .fh = kppg->fh, -+ }; -+ int ret; -+ -+ dev_dbg(dev, "resume ppg id %d, addr 0x%p\n", -+ ipu_fw_psys_pg_get_id(&tmp_kcmd), kppg); -+ -+ kppg->state = PPG_STATE_RESUMING; -+ if (enable_suspend_resume) { -+ ret = ipu_psys_allocate_resources(dev, kppg->kpg->pg, -+ kppg->manifest, -+ &kppg->kpg->resource_alloc, -+ &psys->res_pool_running); -+ if (ret) { -+ dev_err(dev, "failed to allocate res\n"); -+ return -EIO; -+ } -+ -+ ret = ipu_fw_psys_ppg_resume(&tmp_kcmd); -+ if (ret) { -+ dev_err(dev, "failed to resume ppg\n"); -+ goto error; -+ } -+ } else { -+ kppg->kpg->pg->state = IPU_FW_PSYS_PROCESS_GROUP_READY; -+ ret = ipu_fw_psys_pg_submit(&tmp_kcmd); -+ if (ret) { -+ dev_err(dev, "failed to submit kcmd!\n"); -+ return ret; -+ } -+ -+ ret = ipu_psys_allocate_resources(dev, kppg->kpg->pg, -+ kppg->manifest, -+ &kppg->kpg->resource_alloc, -+ &psys->res_pool_running); -+ if (ret) { -+ dev_err(dev, "failed to allocate res\n"); -+ return ret; -+ } -+ -+ ret = ipu_psys_kcmd_start(psys, &tmp_kcmd); -+ if (ret) { -+ dev_err(dev, "failed to start kcmd!\n"); -+ goto error; -+ } -+ } -+ dev_dbg(dev, "s_change:%s: %p %d -> %d\n", -+ __func__, kppg, kppg->state, PPG_STATE_RESUMED); -+ kppg->state = PPG_STATE_RESUMED; -+ -+ return 0; -+ -+error: -+ ipu_psys_reset_process_cell(dev, kppg->kpg->pg, kppg->manifest, -+ kppg->kpg->pg->process_count); -+ ipu_psys_free_resources(&kppg->kpg->resource_alloc, -+ &psys->res_pool_running); -+ -+ return ret; -+} -+ -+int ipu_psys_ppg_stop(struct ipu_psys_ppg *kppg) -+{ -+ struct ipu_psys_kcmd *kcmd = ipu_psys_ppg_get_kcmd(kppg, -+ KCMD_STATE_PPG_STOP); -+ struct ipu_psys *psys = kppg->fh->psys; -+ struct device *dev = &psys->adev->auxdev.dev; -+ struct ipu_psys_kcmd kcmd_temp; -+ int ppg_id, ret = 0; -+ -+ if (kcmd) { -+ list_move_tail(&kcmd->list, &kppg->kcmds_processing_list); -+ } else { -+ dev_dbg(dev, "Exceptional stop happened!\n"); -+ kcmd_temp.kpg = kppg->kpg; -+ kcmd_temp.fh = kppg->fh; -+ kcmd = &kcmd_temp; -+ /* delete kppg in stop list to avoid this ppg resuming */ -+ ipu_psys_scheduler_remove_kppg(kppg, SCHED_STOP_LIST); -+ } -+ -+ ppg_id = ipu_fw_psys_pg_get_id(kcmd); -+ dev_dbg(dev, "stop ppg(%d, addr 0x%p)\n", ppg_id, kppg); -+ -+ if (kppg->state & PPG_STATE_SUSPENDED) { -+ if (enable_suspend_resume) { -+ dev_dbg(dev, "need resume before stop!\n"); -+ kcmd_temp.kpg = kppg->kpg; -+ kcmd_temp.fh = kppg->fh; -+ ret = ipu_fw_psys_ppg_resume(&kcmd_temp); -+ if (ret) -+ dev_err(dev, "ppg(%d) failed to resume\n", -+ ppg_id); -+ } else if (kcmd != &kcmd_temp) { -+ u8 queue_id = ipu_fw_psys_ppg_get_base_queue_id(kcmd); -+ -+ ipu_psys_free_cmd_queue_res(&psys->res_pool_running, -+ queue_id); -+ ipu_psys_kcmd_complete(kppg, kcmd, 0); -+ dev_dbg(dev, "s_change:%s %p %d -> %d\n", __func__, -+ kppg, kppg->state, PPG_STATE_STOPPED); -+ pm_runtime_put(dev); -+ kppg->state = PPG_STATE_STOPPED; -+ -+ return 0; -+ } else { -+ return 0; -+ } -+ } -+ dev_dbg(dev, "s_change:%s %p %d -> %d\n", __func__, kppg, kppg->state, -+ PPG_STATE_STOPPING); -+ kppg->state = PPG_STATE_STOPPING; -+ ret = ipu_fw_psys_pg_abort(kcmd); -+ if (ret) -+ dev_err(dev, "ppg(%d) failed to abort\n", ppg_id); -+ -+ return ret; -+} -+ -+int ipu_psys_ppg_suspend(struct ipu_psys_ppg *kppg) -+{ -+ struct ipu_psys *psys = kppg->fh->psys; -+ struct device *dev = &psys->adev->auxdev.dev; -+ struct ipu_psys_kcmd tmp_kcmd = { -+ .kpg = kppg->kpg, -+ .fh = kppg->fh, -+ }; -+ int ppg_id = ipu_fw_psys_pg_get_id(&tmp_kcmd); -+ int ret = 0; -+ -+ dev_dbg(dev, "suspend ppg(%d, addr 0x%p)\n", ppg_id, kppg); -+ -+ dev_dbg(dev, "s_change:%s %p %d -> %d\n", __func__, kppg, kppg->state, -+ PPG_STATE_SUSPENDING); -+ kppg->state = PPG_STATE_SUSPENDING; -+ if (enable_suspend_resume) -+ ret = ipu_fw_psys_ppg_suspend(&tmp_kcmd); -+ else -+ ret = ipu_fw_psys_pg_abort(&tmp_kcmd); -+ if (ret) -+ dev_err(dev, "failed to %s ppg(%d)\n", -+ enable_suspend_resume ? "suspend" : "stop", ret); -+ -+ return ret; -+} -+ -+static bool ipu_psys_ppg_is_bufset_existing(struct ipu_psys_ppg *kppg) -+{ -+ return !list_empty(&kppg->kcmds_new_list); -+} -+ -+/* -+ * ipu_psys_ppg_enqueue_bufsets - enqueue buffer sets to firmware -+ * Sometimes, if the ppg is at suspended state, this function will return true -+ * to reschedule and let the resume command scheduled before the buffer sets -+ * enqueuing. -+ */ -+bool ipu_psys_ppg_enqueue_bufsets(struct ipu_psys_ppg *kppg) -+{ -+ struct ipu_psys_kcmd *kcmd, *kcmd0; -+ struct ipu_psys *psys = kppg->fh->psys; -+ struct device *dev = &psys->adev->auxdev.dev; -+ bool need_resume = false; -+ -+ mutex_lock(&kppg->mutex); -+ -+ if (kppg->state & (PPG_STATE_STARTED | PPG_STATE_RESUMED | -+ PPG_STATE_RUNNING)) { -+ if (ipu_psys_ppg_is_bufset_existing(kppg)) { -+ list_for_each_entry_safe(kcmd, kcmd0, -+ &kppg->kcmds_new_list, list) { -+ int ret; -+ -+ if (kcmd->state != KCMD_STATE_PPG_ENQUEUE) { -+ need_resume = true; -+ break; -+ } -+ -+ ret = ipu_fw_psys_ppg_enqueue_bufs(kcmd); -+ if (ret) { -+ dev_err(dev, -+ "kppg 0x%p fail to qbufset %d", -+ kppg, ret); -+ break; -+ } -+ list_move_tail(&kcmd->list, -+ &kppg->kcmds_processing_list); -+ dev_dbg(dev, -+ "kppg %d %p queue kcmd 0x%p fh 0x%p\n", -+ ipu_fw_psys_pg_get_id(kcmd), -+ kppg, kcmd, kcmd->fh); -+ } -+ } -+ } -+ -+ mutex_unlock(&kppg->mutex); -+ return need_resume; -+} -+ -+void ipu_psys_enter_power_gating(struct ipu_psys *psys) -+{ -+ struct device *dev = &psys->adev->auxdev.dev; -+ struct ipu_psys_scheduler *sched; -+ struct ipu_psys_ppg *kppg, *tmp; -+ struct ipu_psys_fh *fh; -+ -+ list_for_each_entry(fh, &psys->fhs, list) { -+ mutex_lock(&fh->mutex); -+ sched = &fh->sched; -+ if (list_empty(&sched->ppgs)) { -+ mutex_unlock(&fh->mutex); -+ continue; -+ } -+ -+ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { -+ mutex_lock(&kppg->mutex); -+ /* -+ * Only for SUSPENDED kppgs, STOPPED kppgs has already -+ * power down and new kppgs might come now. -+ */ -+ if (kppg->state != PPG_STATE_SUSPENDED) { -+ mutex_unlock(&kppg->mutex); -+ continue; -+ } -+ pm_runtime_put(dev); -+ mutex_unlock(&kppg->mutex); -+ } -+ mutex_unlock(&fh->mutex); -+ } -+} -+ -+void ipu_psys_exit_power_gating(struct ipu_psys *psys) -+{ -+ struct device *dev = &psys->adev->auxdev.dev; -+ struct ipu_psys_scheduler *sched; -+ struct ipu_psys_ppg *kppg, *tmp; -+ struct ipu_psys_fh *fh; -+ int ret = 0; -+ -+ list_for_each_entry(fh, &psys->fhs, list) { -+ mutex_lock(&fh->mutex); -+ sched = &fh->sched; -+ if (list_empty(&sched->ppgs)) { -+ mutex_unlock(&fh->mutex); -+ continue; -+ } -+ -+ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { -+ mutex_lock(&kppg->mutex); -+ /* Only for SUSPENDED kppgs */ -+ if (kppg->state != PPG_STATE_SUSPENDED) { -+ mutex_unlock(&kppg->mutex); -+ continue; -+ } -+ -+ ret = pm_runtime_get_sync(dev); -+ if (ret < 0) { -+ dev_err(dev, "failed to power gating\n"); -+ pm_runtime_put_noidle(dev); -+ } -+ mutex_unlock(&kppg->mutex); -+ } -+ mutex_unlock(&fh->mutex); -+ } -+} -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-ppg.h b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-ppg.h -new file mode 100644 -index 0000000..676a4ea ---- /dev/null -+++ b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-ppg.h -@@ -0,0 +1,38 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* -+ * Copyright (C) 2020 - 2024 Intel Corporation -+ */ -+ -+#ifndef IPU6_PPG_H -+#define IPU6_PPG_H -+ -+#include "ipu-psys.h" -+/* starting from '2' in case of someone passes true or false */ -+enum SCHED_LIST { -+ SCHED_START_LIST = 2, -+ SCHED_STOP_LIST -+}; -+ -+enum ipu_psys_power_gating_state { -+ PSYS_POWER_NORMAL = 0, -+ PSYS_POWER_GATING, -+ PSYS_POWER_GATED -+}; -+ -+int ipu_psys_ppg_get_bufset(struct ipu_psys_kcmd *kcmd, -+ struct ipu_psys_ppg *kppg); -+struct ipu_psys_kcmd *ipu_psys_ppg_get_stop_kcmd(struct ipu_psys_ppg *kppg); -+void ipu_psys_scheduler_remove_kppg(struct ipu_psys_ppg *kppg, -+ enum SCHED_LIST type); -+void ipu_psys_scheduler_add_kppg(struct ipu_psys_ppg *kppg, -+ enum SCHED_LIST type); -+int ipu_psys_ppg_start(struct ipu_psys_ppg *kppg); -+int ipu_psys_ppg_resume(struct ipu_psys_ppg *kppg); -+int ipu_psys_ppg_stop(struct ipu_psys_ppg *kppg); -+int ipu_psys_ppg_suspend(struct ipu_psys_ppg *kppg); -+void ipu_psys_ppg_complete(struct ipu_psys *psys, struct ipu_psys_ppg *kppg); -+bool ipu_psys_ppg_enqueue_bufsets(struct ipu_psys_ppg *kppg); -+void ipu_psys_enter_power_gating(struct ipu_psys *psys); -+void ipu_psys_exit_power_gating(struct ipu_psys *psys); -+ -+#endif /* IPU6_PPG_H */ -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-psys-gpc.c b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-psys-gpc.c -new file mode 100644 -index 0000000..ff19256 ---- /dev/null -+++ b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-psys-gpc.c -@@ -0,0 +1,209 @@ -+// SPDX-License-Identifier: GPL-2.0 -+// Copyright (C) 2020 - 2024 Intel Corporation -+ -+#ifdef CONFIG_DEBUG_FS -+#include -+#include -+ -+#include "ipu-psys.h" -+#include "ipu-platform-regs.h" -+ -+/* -+ * GPC (Gerneral Performance Counters) -+ */ -+#define IPU_PSYS_GPC_NUM 16 -+ -+#ifndef CONFIG_PM -+#define pm_runtime_get_sync(d) 0 -+#define pm_runtime_put(d) 0 -+#endif -+ -+struct ipu_psys_gpc { -+ bool enable; -+ unsigned int route; -+ unsigned int source; -+ unsigned int sense; -+ unsigned int gpcindex; -+ void *prit; -+}; -+ -+struct ipu_psys_gpcs { -+ bool gpc_enable; -+ struct ipu_psys_gpc gpc[IPU_PSYS_GPC_NUM]; -+ void *prit; -+}; -+ -+static int ipu6_psys_gpc_global_enable_get(void *data, u64 *val) -+{ -+ struct ipu_psys_gpcs *psys_gpcs = data; -+ struct ipu_psys *psys = psys_gpcs->prit; -+ -+ mutex_lock(&psys->mutex); -+ -+ *val = psys_gpcs->gpc_enable; -+ -+ mutex_unlock(&psys->mutex); -+ return 0; -+} -+ -+static int ipu6_psys_gpc_global_enable_set(void *data, u64 val) -+{ -+ struct ipu_psys_gpcs *psys_gpcs = data; -+ struct ipu_psys *psys = psys_gpcs->prit; -+ void __iomem *base; -+ int idx, res; -+ -+ if (val != 0 && val != 1) -+ return -EINVAL; -+ -+ if (!psys || !psys->pdata || !psys->pdata->base) -+ return -EINVAL; -+ -+ mutex_lock(&psys->mutex); -+ -+ base = psys->pdata->base + IPU_GPC_BASE; -+ -+ res = pm_runtime_get_sync(&psys->adev->dev); -+ if (res < 0) { -+ pm_runtime_put(&psys->adev->dev); -+ mutex_unlock(&psys->mutex); -+ return res; -+ } -+ -+ if (val == 0) { -+ writel(0x0, base + IPU_GPREG_TRACE_TIMER_RST); -+ writel(0x0, base + IPU_CDC_MMU_GPC_OVERALL_ENABLE); -+ writel(0xffff, base + IPU_CDC_MMU_GPC_SOFT_RESET); -+ psys_gpcs->gpc_enable = false; -+ for (idx = 0; idx < IPU_PSYS_GPC_NUM; idx++) { -+ psys_gpcs->gpc[idx].enable = 0; -+ psys_gpcs->gpc[idx].sense = 0; -+ psys_gpcs->gpc[idx].route = 0; -+ psys_gpcs->gpc[idx].source = 0; -+ } -+ pm_runtime_put(&psys->adev->dev); -+ } else { -+ /* Set gpc reg and start all gpc here. -+ * RST free running local timer. -+ */ -+ writel(0x0, base + IPU_GPREG_TRACE_TIMER_RST); -+ writel(0x1, base + IPU_GPREG_TRACE_TIMER_RST); -+ -+ for (idx = 0; idx < IPU_PSYS_GPC_NUM; idx++) { -+ /* Enable */ -+ writel(psys_gpcs->gpc[idx].enable, -+ base + IPU_CDC_MMU_GPC_ENABLE0 + 4 * idx); -+ /* Setting (route/source/sense) */ -+ writel((psys_gpcs->gpc[idx].sense -+ << IPU_GPC_SENSE_OFFSET) -+ + (psys_gpcs->gpc[idx].route -+ << IPU_GPC_ROUTE_OFFSET) -+ + (psys_gpcs->gpc[idx].source -+ << IPU_GPC_SOURCE_OFFSET), -+ base + IPU_CDC_MMU_GPC_CNT_SEL0 + 4 * idx); -+ } -+ -+ /* Soft reset and Overall Enable. */ -+ writel(0x0, base + IPU_CDC_MMU_GPC_OVERALL_ENABLE); -+ writel(0xffff, base + IPU_CDC_MMU_GPC_SOFT_RESET); -+ writel(0x1, base + IPU_CDC_MMU_GPC_OVERALL_ENABLE); -+ -+ psys_gpcs->gpc_enable = true; -+ } -+ -+ mutex_unlock(&psys->mutex); -+ return 0; -+} -+ -+DEFINE_SIMPLE_ATTRIBUTE(psys_gpc_globe_enable_fops, -+ ipu6_psys_gpc_global_enable_get, -+ ipu6_psys_gpc_global_enable_set, "%llu\n"); -+ -+static int ipu6_psys_gpc_count_get(void *data, u64 *val) -+{ -+ struct ipu_psys_gpc *psys_gpc = data; -+ struct ipu_psys *psys = psys_gpc->prit; -+ void __iomem *base; -+ int res; -+ -+ if (!psys || !psys->pdata || !psys->pdata->base) -+ return -EINVAL; -+ -+ mutex_lock(&psys->mutex); -+ -+ base = psys->pdata->base + IPU_GPC_BASE; -+ -+ res = pm_runtime_get_sync(&psys->adev->dev); -+ if (res < 0) { -+ pm_runtime_put(&psys->adev->dev); -+ mutex_unlock(&psys->mutex); -+ return res; -+ } -+ -+ *val = readl(base + IPU_CDC_MMU_GPC_VALUE0 + 4 * psys_gpc->gpcindex); -+ -+ mutex_unlock(&psys->mutex); -+ return 0; -+} -+ -+DEFINE_SIMPLE_ATTRIBUTE(psys_gpc_count_fops, -+ ipu6_psys_gpc_count_get, -+ NULL, "%llu\n"); -+ -+int ipu_psys_gpc_init_debugfs(struct ipu_psys *psys) -+{ -+ struct dentry *gpcdir; -+ struct dentry *dir; -+ struct dentry *file; -+ int idx; -+ char gpcname[10]; -+ struct ipu_psys_gpcs *psys_gpcs; -+ -+ psys_gpcs = devm_kzalloc(&psys->dev, sizeof(*psys_gpcs), GFP_KERNEL); -+ if (!psys_gpcs) -+ return -ENOMEM; -+ -+ gpcdir = debugfs_create_dir("gpc", psys->debugfsdir); -+ if (IS_ERR(gpcdir)) -+ return -ENOMEM; -+ -+ psys_gpcs->prit = psys; -+ file = debugfs_create_file("enable", 0600, gpcdir, psys_gpcs, -+ &psys_gpc_globe_enable_fops); -+ if (IS_ERR(file)) -+ goto err; -+ -+ for (idx = 0; idx < IPU_PSYS_GPC_NUM; idx++) { -+ sprintf(gpcname, "gpc%d", idx); -+ dir = debugfs_create_dir(gpcname, gpcdir); -+ if (IS_ERR(dir)) -+ goto err; -+ -+ debugfs_create_bool("enable", 0600, dir, -+ &psys_gpcs->gpc[idx].enable); -+ -+ debugfs_create_u32("source", 0600, dir, -+ &psys_gpcs->gpc[idx].source); -+ -+ debugfs_create_u32("route", 0600, dir, -+ &psys_gpcs->gpc[idx].route); -+ -+ debugfs_create_u32("sense", 0600, dir, -+ &psys_gpcs->gpc[idx].sense); -+ -+ psys_gpcs->gpc[idx].gpcindex = idx; -+ psys_gpcs->gpc[idx].prit = psys; -+ file = debugfs_create_file("count", 0400, dir, -+ &psys_gpcs->gpc[idx], -+ &psys_gpc_count_fops); -+ if (IS_ERR(file)) -+ goto err; -+ } -+ -+ return 0; -+ -+err: -+ debugfs_remove_recursive(gpcdir); -+ return -ENOMEM; -+} -+#endif -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-psys.c b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-psys.c -new file mode 100644 -index 0000000..281207a ---- /dev/null -+++ b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6-psys.c -@@ -0,0 +1,1054 @@ -+// SPDX-License-Identifier: GPL-2.0 -+// Copyright (C) 2020 - 2024 Intel Corporation -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "ipu6.h" -+#include "ipu6-dma.h" -+#include "ipu-psys.h" -+#include "ipu6-ppg.h" -+#include "ipu6-platform-regs.h" -+#include "ipu6-platform-buttress-regs.h" -+ -+MODULE_IMPORT_NS("DMA_BUF"); -+ -+static bool early_pg_transfer; -+module_param(early_pg_transfer, bool, 0664); -+MODULE_PARM_DESC(early_pg_transfer, -+ "Copy PGs back to user after resource allocation"); -+ -+bool enable_power_gating = true; -+module_param(enable_power_gating, bool, 0664); -+MODULE_PARM_DESC(enable_power_gating, "enable power gating"); -+ -+static void ipu6_set_sp_info_bits(void __iomem *base) -+{ -+ int i; -+ -+ writel(IPU6_INFO_REQUEST_DESTINATION_IOSF, -+ base + IPU6_REG_PSYS_INFO_SEG_0_CONFIG_ICACHE_MASTER); -+ -+ for (i = 0; i < 4; i++) -+ writel(IPU6_INFO_REQUEST_DESTINATION_IOSF, -+ base + IPU_REG_PSYS_INFO_SEG_CMEM_MASTER(i)); -+ for (i = 0; i < 4; i++) -+ writel(IPU6_INFO_REQUEST_DESTINATION_IOSF, -+ base + IPU_REG_PSYS_INFO_SEG_XMEM_MASTER(i)); -+} -+ -+#define PSYS_SUBDOMAINS_STATUS_WAIT_COUNT 1000 -+void ipu_psys_subdomains_power(struct ipu_psys *psys, bool on) -+{ -+ struct device *dev = &psys->adev->auxdev.dev; -+ unsigned int i; -+ u32 val; -+ -+ /* power domain req */ -+ dev_dbg(dev, "power %s psys sub-domains", on ? "UP" : "DOWN"); -+ if (on) -+ writel(IPU_PSYS_SUBDOMAINS_POWER_MASK, -+ psys->adev->isp->base + IPU_PSYS_SUBDOMAINS_POWER_REQ); -+ else -+ writel(0x0, -+ psys->adev->isp->base + IPU_PSYS_SUBDOMAINS_POWER_REQ); -+ -+ i = 0; -+ do { -+ usleep_range(10, 20); -+ val = readl(psys->adev->isp->base + -+ IPU_PSYS_SUBDOMAINS_POWER_STATUS); -+ if (!(val & BIT(31))) { -+ dev_dbg(dev, "PS sub-domains req done with status 0x%x", -+ val); -+ break; -+ } -+ i++; -+ } while (i < PSYS_SUBDOMAINS_STATUS_WAIT_COUNT); -+ -+ if (i == PSYS_SUBDOMAINS_STATUS_WAIT_COUNT) -+ dev_warn(dev, "Psys sub-domains %s req timeout!", -+ on ? "UP" : "DOWN"); -+} -+ -+void ipu_psys_setup_hw(struct ipu_psys *psys) -+{ -+ void __iomem *base = psys->pdata->base; -+ void __iomem *spc_regs_base = -+ base + psys->pdata->ipdata->hw_variant.spc_offset; -+ void __iomem *psys_iommu0_ctrl; -+ u32 irqs; -+ const u8 r3 = IPU_DEVICE_AB_GROUP1_TARGET_ID_R3_SPC_STATUS_REG; -+ const u8 r4 = IPU_DEVICE_AB_GROUP1_TARGET_ID_R4_SPC_MASTER_BASE_ADDR; -+ const u8 r5 = IPU_DEVICE_AB_GROUP1_TARGET_ID_R5_SPC_PC_STALL; -+ -+ if (!psys->adev->isp->secure_mode) { -+ /* configure access blocker for non-secure mode */ -+ writel(NCI_AB_ACCESS_MODE_RW, -+ base + IPU_REG_DMA_TOP_AB_GROUP1_BASE_ADDR + -+ IPU_REG_DMA_TOP_AB_RING_ACCESS_OFFSET(r3)); -+ writel(NCI_AB_ACCESS_MODE_RW, -+ base + IPU_REG_DMA_TOP_AB_GROUP1_BASE_ADDR + -+ IPU_REG_DMA_TOP_AB_RING_ACCESS_OFFSET(r4)); -+ writel(NCI_AB_ACCESS_MODE_RW, -+ base + IPU_REG_DMA_TOP_AB_GROUP1_BASE_ADDR + -+ IPU_REG_DMA_TOP_AB_RING_ACCESS_OFFSET(r5)); -+ } -+ psys_iommu0_ctrl = base + -+ psys->pdata->ipdata->hw_variant.mmu_hw[0].offset + -+ IPU6_MMU_INFO_OFFSET; -+ writel(IPU_INFO_REQUEST_DESTINATION_IOSF, psys_iommu0_ctrl); -+ -+ ipu6_set_sp_info_bits(spc_regs_base + IPU_PSYS_REG_SPC_STATUS_CTRL); -+ ipu6_set_sp_info_bits(spc_regs_base + IPU_PSYS_REG_SPP0_STATUS_CTRL); -+ -+ /* Enable FW interrupt #0 */ -+ writel(0, base + IPU_REG_PSYS_GPDEV_FWIRQ(0)); -+ irqs = IPU6_PSYS_GPDEV_IRQ_FWIRQ(IPU_PSYS_GPDEV_FWIRQ0); -+ writel(irqs, base + IPU_REG_PSYS_GPDEV_IRQ_EDGE); -+ writel(irqs, base + IPU_REG_PSYS_GPDEV_IRQ_LEVEL_NOT_PULSE); -+ writel(0xffffffff, base + IPU_REG_PSYS_GPDEV_IRQ_CLEAR); -+ writel(irqs, base + IPU_REG_PSYS_GPDEV_IRQ_MASK); -+ writel(irqs, base + IPU_REG_PSYS_GPDEV_IRQ_ENABLE); -+} -+ -+static struct ipu_psys_ppg *ipu_psys_identify_kppg(struct ipu_psys_kcmd *kcmd) -+{ -+ struct ipu_psys_scheduler *sched = &kcmd->fh->sched; -+ struct ipu_psys_ppg *kppg, *tmp; -+ -+ mutex_lock(&kcmd->fh->mutex); -+ if (list_empty(&sched->ppgs)) -+ goto not_found; -+ -+ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { -+ if (ipu_fw_psys_pg_get_token(kcmd) -+ != kppg->token) -+ continue; -+ mutex_unlock(&kcmd->fh->mutex); -+ return kppg; -+ } -+ -+not_found: -+ mutex_unlock(&kcmd->fh->mutex); -+ return NULL; -+} -+ -+/* -+ * Called to free up all resources associated with a kcmd. -+ * After this the kcmd doesn't anymore exist in the driver. -+ */ -+static void ipu_psys_kcmd_free(struct ipu_psys_kcmd *kcmd) -+{ -+ struct ipu_psys_ppg *kppg; -+ struct ipu_psys_scheduler *sched; -+ -+ if (!kcmd) -+ return; -+ -+ kppg = ipu_psys_identify_kppg(kcmd); -+ sched = &kcmd->fh->sched; -+ -+ if (kcmd->kbuf_set) { -+ mutex_lock(&sched->bs_mutex); -+ kcmd->kbuf_set->buf_set_size = 0; -+ mutex_unlock(&sched->bs_mutex); -+ kcmd->kbuf_set = NULL; -+ } -+ -+ if (kppg) { -+ mutex_lock(&kppg->mutex); -+ if (!list_empty(&kcmd->list)) -+ list_del(&kcmd->list); -+ mutex_unlock(&kppg->mutex); -+ } -+ -+ kfree(kcmd->pg_manifest); -+ kfree(kcmd->kbufs); -+ kfree(kcmd->buffers); -+ kfree(kcmd); -+} -+ -+static struct ipu_psys_kcmd *ipu_psys_copy_cmd(struct ipu_psys_command *cmd, -+ struct ipu_psys_fh *fh) -+{ -+ struct ipu_psys *psys = fh->psys; -+ struct device *dev = &psys->adev->auxdev.dev; -+ struct ipu_psys_kcmd *kcmd; -+ struct ipu_psys_kbuffer *kpgbuf; -+ unsigned int i; -+ int ret, prevfd, fd; -+ -+ fd = -1; -+ prevfd = -1; -+ -+ if (cmd->bufcount > IPU_MAX_PSYS_CMD_BUFFERS) -+ return NULL; -+ -+ if (!cmd->pg_manifest_size) -+ return NULL; -+ -+ kcmd = kzalloc(sizeof(*kcmd), GFP_KERNEL); -+ if (!kcmd) -+ return NULL; -+ -+ kcmd->state = KCMD_STATE_PPG_NEW; -+ kcmd->fh = fh; -+ INIT_LIST_HEAD(&kcmd->list); -+ -+ mutex_lock(&fh->mutex); -+ fd = cmd->pg; -+ kpgbuf = ipu_psys_lookup_kbuffer(fh, fd); -+ if (!kpgbuf || !kpgbuf->sgt) { -+ dev_err(dev, "%s kbuf %p with fd %d not found.\n", -+ __func__, kpgbuf, fd); -+ mutex_unlock(&fh->mutex); -+ goto error; -+ } -+ -+ /* check and remap if possible */ -+ kpgbuf = ipu_psys_mapbuf_locked(fd, fh); -+ if (!kpgbuf || !kpgbuf->sgt) { -+ dev_err(dev, "%s remap failed\n", __func__); -+ mutex_unlock(&fh->mutex); -+ goto error; -+ } -+ mutex_unlock(&fh->mutex); -+ -+ kcmd->pg_user = kpgbuf->kaddr; -+ kcmd->kpg = __get_pg_buf(psys, kpgbuf->len); -+ if (!kcmd->kpg) -+ goto error; -+ -+ memcpy(kcmd->kpg->pg, kcmd->pg_user, kcmd->kpg->pg_size); -+ -+ kcmd->pg_manifest = kzalloc(cmd->pg_manifest_size, GFP_KERNEL); -+ if (!kcmd->pg_manifest) -+ goto error; -+ -+ ret = copy_from_user(kcmd->pg_manifest, cmd->pg_manifest, -+ cmd->pg_manifest_size); -+ if (ret) -+ goto error; -+ -+ kcmd->pg_manifest_size = cmd->pg_manifest_size; -+ -+ kcmd->user_token = cmd->user_token; -+ kcmd->issue_id = cmd->issue_id; -+ kcmd->priority = cmd->priority; -+ if (kcmd->priority >= IPU_PSYS_CMD_PRIORITY_NUM) -+ goto error; -+ -+ /* -+ * Kernel enable bitmap be used only. -+ */ -+ memcpy(kcmd->kernel_enable_bitmap, cmd->kernel_enable_bitmap, -+ sizeof(cmd->kernel_enable_bitmap)); -+ -+ kcmd->nbuffers = ipu_fw_psys_pg_get_terminal_count(kcmd); -+ kcmd->buffers = kcalloc(kcmd->nbuffers, sizeof(*kcmd->buffers), -+ GFP_KERNEL); -+ if (!kcmd->buffers) -+ goto error; -+ -+ kcmd->kbufs = kcalloc(kcmd->nbuffers, sizeof(kcmd->kbufs[0]), -+ GFP_KERNEL); -+ if (!kcmd->kbufs) -+ goto error; -+ -+ /* should be stop cmd for ppg */ -+ if (!cmd->buffers) { -+ kcmd->state = KCMD_STATE_PPG_STOP; -+ return kcmd; -+ } -+ -+ if (!cmd->bufcount || kcmd->nbuffers > cmd->bufcount) -+ goto error; -+ -+ ret = copy_from_user(kcmd->buffers, cmd->buffers, -+ kcmd->nbuffers * sizeof(*kcmd->buffers)); -+ if (ret) -+ goto error; -+ -+ for (i = 0; i < kcmd->nbuffers; i++) { -+ struct ipu_fw_psys_terminal *terminal; -+ -+ terminal = ipu_fw_psys_pg_get_terminal(kcmd, i); -+ if (!terminal) -+ continue; -+ -+ if (!(kcmd->buffers[i].flags & IPU_BUFFER_FLAG_DMA_HANDLE)) { -+ kcmd->state = KCMD_STATE_PPG_START; -+ continue; -+ } -+ if (kcmd->state == KCMD_STATE_PPG_START) { -+ dev_err(dev, "buffer.flags & DMA_HANDLE must be 0\n"); -+ goto error; -+ } -+ -+ mutex_lock(&fh->mutex); -+ fd = kcmd->buffers[i].base.fd; -+ kpgbuf = ipu_psys_lookup_kbuffer(fh, fd); -+ if (!kpgbuf || !kpgbuf->sgt) { -+ dev_err(dev, -+ "%s kcmd->buffers[%d] %p fd %d not found.\n", -+ __func__, i, kpgbuf, fd); -+ mutex_unlock(&fh->mutex); -+ goto error; -+ } -+ -+ kpgbuf = ipu_psys_mapbuf_locked(fd, fh); -+ if (!kpgbuf || !kpgbuf->sgt) { -+ dev_err(dev, "%s remap failed\n", -+ __func__); -+ mutex_unlock(&fh->mutex); -+ goto error; -+ } -+ -+ mutex_unlock(&fh->mutex); -+ kcmd->kbufs[i] = kpgbuf; -+ if (!kcmd->kbufs[i] || !kcmd->kbufs[i]->sgt || -+ kcmd->kbufs[i]->len < kcmd->buffers[i].bytes_used) -+ goto error; -+ -+ if ((kcmd->kbufs[i]->flags & IPU_BUFFER_FLAG_NO_FLUSH) || -+ (kcmd->buffers[i].flags & IPU_BUFFER_FLAG_NO_FLUSH) || -+ prevfd == kcmd->buffers[i].base.fd) -+ continue; -+ -+ prevfd = kcmd->buffers[i].base.fd; -+ -+ /* -+ * TODO: remove exported buffer sync here as the cache -+ * coherency should be done by the exporter -+ */ -+ if (kcmd->kbufs[i]->kaddr) -+ clflush_cache_range(kcmd->kbufs[i]->kaddr, -+ kcmd->kbufs[i]->len); -+ } -+ -+ if (kcmd->state != KCMD_STATE_PPG_START) -+ kcmd->state = KCMD_STATE_PPG_ENQUEUE; -+ -+ return kcmd; -+error: -+ ipu_psys_kcmd_free(kcmd); -+ -+ dev_dbg(dev, "failed to copy cmd\n"); -+ -+ return NULL; -+} -+ -+static struct ipu_psys_buffer_set * -+ipu_psys_lookup_kbuffer_set(struct ipu_psys *psys, u32 addr) -+{ -+ struct ipu_psys_fh *fh; -+ struct ipu_psys_buffer_set *kbuf_set; -+ struct ipu_psys_scheduler *sched; -+ -+ list_for_each_entry(fh, &psys->fhs, list) { -+ sched = &fh->sched; -+ mutex_lock(&sched->bs_mutex); -+ list_for_each_entry(kbuf_set, &sched->buf_sets, list) { -+ if (kbuf_set->buf_set && -+ kbuf_set->buf_set->ipu_virtual_address == addr) { -+ mutex_unlock(&sched->bs_mutex); -+ return kbuf_set; -+ } -+ } -+ mutex_unlock(&sched->bs_mutex); -+ } -+ -+ return NULL; -+} -+ -+static struct ipu_psys_ppg *ipu_psys_lookup_ppg(struct ipu_psys *psys, -+ dma_addr_t pg_addr) -+{ -+ struct ipu_psys_scheduler *sched; -+ struct ipu_psys_ppg *kppg, *tmp; -+ struct ipu_psys_fh *fh; -+ -+ list_for_each_entry(fh, &psys->fhs, list) { -+ sched = &fh->sched; -+ mutex_lock(&fh->mutex); -+ if (list_empty(&sched->ppgs)) { -+ mutex_unlock(&fh->mutex); -+ continue; -+ } -+ -+ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { -+ if (pg_addr != kppg->kpg->pg_dma_addr) -+ continue; -+ mutex_unlock(&fh->mutex); -+ return kppg; -+ } -+ mutex_unlock(&fh->mutex); -+ } -+ -+ return NULL; -+} -+ -+#define BUTTRESS_FREQ_CTL_QOS_FLOOR_SHIFT 8 -+static void ipu_buttress_set_psys_ratio(struct ipu6_device *isp, -+ unsigned int psys_divisor, -+ unsigned int psys_qos_floor) -+{ -+ struct ipu6_buttress_ctrl *ctrl = isp->psys->ctrl; -+ -+ mutex_lock(&isp->buttress.power_mutex); -+ -+ if (ctrl->ratio == psys_divisor && ctrl->qos_floor == psys_qos_floor) -+ goto out_mutex_unlock; -+ -+ ctrl->ratio = psys_divisor; -+ ctrl->qos_floor = psys_qos_floor; -+ -+ /* -+ * According to documentation driver initiates DVFS -+ * transition by writing wanted ratio, floor ratio and start -+ * bit. No need to stop PS first -+ */ -+ writel(BUTTRESS_FREQ_CTL_START | -+ ctrl->qos_floor << BUTTRESS_FREQ_CTL_QOS_FLOOR_SHIFT | -+ psys_divisor, isp->base + BUTTRESS_REG_PS_FREQ_CTL); -+ -+out_mutex_unlock: -+ mutex_unlock(&isp->buttress.power_mutex); -+} -+ -+static void ipu_buttress_set_psys_freq(struct ipu6_device *isp, -+ unsigned int freq) -+{ -+ unsigned int psys_ratio = freq / BUTTRESS_PS_FREQ_STEP; -+ -+ dev_dbg(&isp->psys->auxdev.dev, "freq:%u\n", freq); -+ -+ ipu_buttress_set_psys_ratio(isp, psys_ratio, psys_ratio); -+} -+ -+static void -+ipu_buttress_add_psys_constraint(struct ipu6_device *isp, -+ struct ipu6_psys_constraint *constraint) -+{ -+ struct ipu6_buttress *b = &isp->buttress; -+ -+ mutex_lock(&b->cons_mutex); -+ list_add(&constraint->list, &b->constraints); -+ mutex_unlock(&b->cons_mutex); -+} -+ -+static void -+ipu_buttress_remove_psys_constraint(struct ipu6_device *isp, -+ struct ipu6_psys_constraint *constraint) -+{ -+ struct ipu6_buttress *b = &isp->buttress; -+ struct ipu6_psys_constraint *c; -+ unsigned int min_freq = 0; -+ -+ mutex_lock(&b->cons_mutex); -+ list_del(&constraint->list); -+ -+ list_for_each_entry(c, &b->constraints, list) -+ if (c->min_freq > min_freq) -+ min_freq = c->min_freq; -+ -+ ipu_buttress_set_psys_freq(isp, min_freq); -+ mutex_unlock(&b->cons_mutex); -+} -+ -+/* -+ * Move kcmd into completed state (due to running finished or failure). -+ * Fill up the event struct and notify waiters. -+ */ -+void ipu_psys_kcmd_complete(struct ipu_psys_ppg *kppg, -+ struct ipu_psys_kcmd *kcmd, int error) -+{ -+ struct ipu_psys_fh *fh = kcmd->fh; -+ struct ipu_psys *psys = fh->psys; -+ struct device *dev = &psys->adev->auxdev.dev; -+ -+ kcmd->ev.type = IPU_PSYS_EVENT_TYPE_CMD_COMPLETE; -+ kcmd->ev.user_token = kcmd->user_token; -+ kcmd->ev.issue_id = kcmd->issue_id; -+ kcmd->ev.error = error; -+ list_move_tail(&kcmd->list, &kppg->kcmds_finished_list); -+ -+ if (kcmd->constraint.min_freq) -+ ipu_buttress_remove_psys_constraint(psys->adev->isp, -+ &kcmd->constraint); -+ -+ if (!early_pg_transfer && kcmd->pg_user && kcmd->kpg->pg) { -+ struct ipu_psys_kbuffer *kbuf; -+ -+ kbuf = ipu_psys_lookup_kbuffer_by_kaddr(kcmd->fh, -+ kcmd->pg_user); -+ if (kbuf && kbuf->valid) -+ memcpy(kcmd->pg_user, -+ kcmd->kpg->pg, kcmd->kpg->pg_size); -+ else -+ dev_dbg(dev, "Skipping unmapped buffer\n"); -+ } -+ -+ kcmd->state = KCMD_STATE_PPG_COMPLETE; -+ wake_up_interruptible(&fh->wait); -+} -+ -+/* -+ * Submit kcmd into psys queue. If running fails, complete the kcmd -+ * with an error. -+ * -+ * Found a runnable PG. Move queue to the list tail for round-robin -+ * scheduling and run the PG. Start the watchdog timer if the PG was -+ * started successfully. Enable PSYS power if requested. -+ */ -+int ipu_psys_kcmd_start(struct ipu_psys *psys, struct ipu_psys_kcmd *kcmd) -+{ -+ struct device *dev = &psys->adev->auxdev.dev; -+ int ret; -+ -+ if (early_pg_transfer && kcmd->pg_user && kcmd->kpg->pg) -+ memcpy(kcmd->pg_user, kcmd->kpg->pg, kcmd->kpg->pg_size); -+ -+ ret = ipu_fw_psys_pg_start(kcmd); -+ if (ret) { -+ dev_err(dev, "failed to start kcmd!\n"); -+ return ret; -+ } -+ -+ ipu_fw_psys_pg_dump(psys, kcmd, "run"); -+ -+ ret = ipu_fw_psys_pg_disown(kcmd); -+ if (ret) { -+ dev_err(dev, "failed to start kcmd!\n"); -+ return ret; -+ } -+ -+ return 0; -+} -+ -+static int ipu_psys_kcmd_send_to_ppg_start(struct ipu_psys_kcmd *kcmd) -+{ -+ struct ipu_psys_fh *fh = kcmd->fh; -+ struct ipu_psys_scheduler *sched = &fh->sched; -+ struct ipu_psys *psys = fh->psys; -+ struct device *dev = &psys->adev->auxdev.dev; -+ struct ipu_psys_ppg *kppg; -+ struct ipu_psys_resource_pool *rpr; -+ int queue_id; -+ int ret; -+ -+ rpr = &psys->res_pool_running; -+ -+ kppg = kzalloc(sizeof(*kppg), GFP_KERNEL); -+ if (!kppg) -+ return -ENOMEM; -+ -+ kppg->fh = fh; -+ kppg->kpg = kcmd->kpg; -+ kppg->state = PPG_STATE_START; -+ kppg->pri_base = kcmd->priority; -+ kppg->pri_dynamic = 0; -+ INIT_LIST_HEAD(&kppg->list); -+ -+ mutex_init(&kppg->mutex); -+ INIT_LIST_HEAD(&kppg->kcmds_new_list); -+ INIT_LIST_HEAD(&kppg->kcmds_processing_list); -+ INIT_LIST_HEAD(&kppg->kcmds_finished_list); -+ INIT_LIST_HEAD(&kppg->sched_list); -+ -+ kppg->manifest = kzalloc(kcmd->pg_manifest_size, GFP_KERNEL); -+ if (!kppg->manifest) { -+ kfree(kppg); -+ return -ENOMEM; -+ } -+ memcpy(kppg->manifest, kcmd->pg_manifest, -+ kcmd->pg_manifest_size); -+ -+ queue_id = ipu_psys_allocate_cmd_queue_res(rpr); -+ if (queue_id == -ENOSPC) { -+ dev_err(dev, "no available queue\n"); -+ kfree(kppg->manifest); -+ kfree(kppg); -+ mutex_unlock(&psys->mutex); -+ return -ENOMEM; -+ } -+ -+ /* -+ * set token as start cmd will immediately be followed by a -+ * enqueue cmd so that kppg could be retrieved. -+ */ -+ kppg->token = (u64)kcmd->kpg; -+ ipu_fw_psys_pg_set_token(kcmd, kppg->token); -+ ipu_fw_psys_ppg_set_base_queue_id(kcmd, queue_id); -+ ret = ipu_fw_psys_pg_set_ipu_vaddress(kcmd, -+ kcmd->kpg->pg_dma_addr); -+ if (ret) { -+ ipu_psys_free_cmd_queue_res(rpr, queue_id); -+ -+ kfree(kppg->manifest); -+ kfree(kppg); -+ return -EIO; -+ } -+ memcpy(kcmd->pg_user, kcmd->kpg->pg, kcmd->kpg->pg_size); -+ -+ mutex_lock(&fh->mutex); -+ list_add_tail(&kppg->list, &sched->ppgs); -+ mutex_unlock(&fh->mutex); -+ -+ mutex_lock(&kppg->mutex); -+ list_add(&kcmd->list, &kppg->kcmds_new_list); -+ mutex_unlock(&kppg->mutex); -+ -+ dev_dbg(dev, "START ppg(%d, 0x%p) kcmd 0x%p, queue %d\n", -+ ipu_fw_psys_pg_get_id(kcmd), kppg, kcmd, queue_id); -+ -+ /* Kick l-scheduler thread */ -+ atomic_set(&psys->wakeup_count, 1); -+ wake_up_interruptible(&psys->sched_cmd_wq); -+ -+ return 0; -+} -+ -+static int ipu_psys_kcmd_send_to_ppg(struct ipu_psys_kcmd *kcmd) -+{ -+ struct ipu_psys_fh *fh = kcmd->fh; -+ struct ipu_psys *psys = fh->psys; -+ struct device *dev = &psys->adev->auxdev.dev; -+ struct ipu_psys_ppg *kppg; -+ struct ipu_psys_resource_pool *rpr; -+ unsigned long flags; -+ u8 id; -+ bool resche = true; -+ -+ rpr = &psys->res_pool_running; -+ if (kcmd->state == KCMD_STATE_PPG_START) -+ return ipu_psys_kcmd_send_to_ppg_start(kcmd); -+ -+ kppg = ipu_psys_identify_kppg(kcmd); -+ spin_lock_irqsave(&psys->pgs_lock, flags); -+ kcmd->kpg->pg_size = 0; -+ spin_unlock_irqrestore(&psys->pgs_lock, flags); -+ if (!kppg) { -+ dev_err(dev, "token not match\n"); -+ return -EINVAL; -+ } -+ -+ kcmd->kpg = kppg->kpg; -+ -+ dev_dbg(dev, "%s ppg(%d, 0x%p) kcmd %p\n", -+ (kcmd->state == KCMD_STATE_PPG_STOP) ? "STOP" : "ENQUEUE", -+ ipu_fw_psys_pg_get_id(kcmd), kppg, kcmd); -+ -+ if (kcmd->state == KCMD_STATE_PPG_STOP) { -+ mutex_lock(&kppg->mutex); -+ if (kppg->state == PPG_STATE_STOPPED) { -+ dev_dbg(dev, "kppg 0x%p stopped!\n", kppg); -+ id = ipu_fw_psys_ppg_get_base_queue_id(kcmd); -+ ipu_psys_free_cmd_queue_res(rpr, id); -+ ipu_psys_kcmd_complete(kppg, kcmd, 0); -+ pm_runtime_put(dev); -+ resche = false; -+ } else { -+ list_add(&kcmd->list, &kppg->kcmds_new_list); -+ } -+ mutex_unlock(&kppg->mutex); -+ } else { -+ int ret; -+ -+ ret = ipu_psys_ppg_get_bufset(kcmd, kppg); -+ if (ret) -+ return ret; -+ -+ mutex_lock(&kppg->mutex); -+ list_add_tail(&kcmd->list, &kppg->kcmds_new_list); -+ mutex_unlock(&kppg->mutex); -+ } -+ -+ if (resche) { -+ /* Kick l-scheduler thread */ -+ atomic_set(&psys->wakeup_count, 1); -+ wake_up_interruptible(&psys->sched_cmd_wq); -+ } -+ return 0; -+} -+ -+int ipu_psys_kcmd_new(struct ipu_psys_command *cmd, struct ipu_psys_fh *fh) -+{ -+ struct ipu_psys *psys = fh->psys; -+ struct device *dev = &psys->adev->auxdev.dev; -+ struct ipu_psys_kcmd *kcmd; -+ size_t pg_size; -+ int ret; -+ -+ kcmd = ipu_psys_copy_cmd(cmd, fh); -+ if (!kcmd) -+ return -EINVAL; -+ -+ pg_size = ipu_fw_psys_pg_get_size(kcmd); -+ if (pg_size > kcmd->kpg->pg_size) { -+ dev_dbg(dev, "pg size mismatch %lu %lu\n", pg_size, -+ kcmd->kpg->pg_size); -+ ret = -EINVAL; -+ goto error; -+ } -+ -+ if (ipu_fw_psys_pg_get_protocol(kcmd) != -+ IPU_FW_PSYS_PROCESS_GROUP_PROTOCOL_PPG) { -+ dev_err(dev, "No support legacy pg now\n"); -+ ret = -EINVAL; -+ goto error; -+ } -+ -+ if (cmd->min_psys_freq) { -+ kcmd->constraint.min_freq = cmd->min_psys_freq; -+ ipu_buttress_add_psys_constraint(psys->adev->isp, -+ &kcmd->constraint); -+ } -+ -+ ret = ipu_psys_kcmd_send_to_ppg(kcmd); -+ if (ret) -+ goto error; -+ -+ dev_dbg(dev, "IOC_QCMD: user_token:%llx issue_id:0x%llx pri:%d\n", -+ cmd->user_token, cmd->issue_id, cmd->priority); -+ -+ return 0; -+ -+error: -+ ipu_psys_kcmd_free(kcmd); -+ -+ return ret; -+} -+ -+static bool ipu_psys_kcmd_is_valid(struct ipu_psys *psys, -+ struct ipu_psys_kcmd *kcmd) -+{ -+ struct ipu_psys_fh *fh; -+ struct ipu_psys_kcmd *kcmd0; -+ struct ipu_psys_ppg *kppg, *tmp; -+ struct ipu_psys_scheduler *sched; -+ -+ list_for_each_entry(fh, &psys->fhs, list) { -+ sched = &fh->sched; -+ mutex_lock(&fh->mutex); -+ if (list_empty(&sched->ppgs)) { -+ mutex_unlock(&fh->mutex); -+ continue; -+ } -+ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { -+ mutex_lock(&kppg->mutex); -+ list_for_each_entry(kcmd0, -+ &kppg->kcmds_processing_list, -+ list) { -+ if (kcmd0 == kcmd) { -+ mutex_unlock(&kppg->mutex); -+ mutex_unlock(&fh->mutex); -+ return true; -+ } -+ } -+ mutex_unlock(&kppg->mutex); -+ } -+ mutex_unlock(&fh->mutex); -+ } -+ -+ return false; -+} -+ -+void ipu_psys_handle_events(struct ipu_psys *psys) -+{ -+ struct device *dev = &psys->adev->auxdev.dev; -+ struct ipu_psys_kcmd *kcmd; -+ struct ipu_fw_psys_event event; -+ struct ipu_psys_ppg *kppg; -+ bool error; -+ u32 hdl; -+ u16 cmd, status; -+ int res; -+ -+ do { -+ memset(&event, 0, sizeof(event)); -+ if (!ipu_fw_psys_rcv_event(psys, &event)) -+ break; -+ -+ if (!event.context_handle) -+ break; -+ -+ dev_dbg(dev, "ppg event: 0x%x, %d, status %d\n", -+ event.context_handle, event.command, event.status); -+ -+ error = false; -+ /* -+ * event.command == CMD_RUN shows this is fw processing frame -+ * done as pPG mode, and event.context_handle should be pointer -+ * of buffer set; so we make use of this pointer to lookup -+ * kbuffer_set and kcmd -+ */ -+ hdl = event.context_handle; -+ cmd = event.command; -+ status = event.status; -+ -+ kppg = NULL; -+ kcmd = NULL; -+ if (cmd == IPU_FW_PSYS_PROCESS_GROUP_CMD_RUN) { -+ struct ipu_psys_buffer_set *kbuf_set; -+ /* -+ * Need change ppg state when the 1st running is done -+ * (after PPG started/resumed) -+ */ -+ kbuf_set = ipu_psys_lookup_kbuffer_set(psys, hdl); -+ if (kbuf_set) -+ kcmd = kbuf_set->kcmd; -+ if (!kbuf_set || !kcmd) -+ error = true; -+ else -+ kppg = ipu_psys_identify_kppg(kcmd); -+ } else if (cmd == IPU_FW_PSYS_PROCESS_GROUP_CMD_STOP || -+ cmd == IPU_FW_PSYS_PROCESS_GROUP_CMD_SUSPEND || -+ cmd == IPU_FW_PSYS_PROCESS_GROUP_CMD_RESUME) { -+ /* -+ * STOP/SUSPEND/RESUME cmd event would run this branch; -+ * only stop cmd queued by user has stop_kcmd and need -+ * to notify user to dequeue. -+ */ -+ kppg = ipu_psys_lookup_ppg(psys, hdl); -+ if (kppg) { -+ mutex_lock(&kppg->mutex); -+ if (kppg->state == PPG_STATE_STOPPING) { -+ kcmd = ipu_psys_ppg_get_stop_kcmd(kppg); -+ if (!kcmd) -+ error = true; -+ } -+ mutex_unlock(&kppg->mutex); -+ } -+ } else { -+ dev_err(dev, "invalid event\n"); -+ continue; -+ } -+ -+ if (error || !kppg) { -+ dev_err(dev, "event error, command %d\n", cmd); -+ break; -+ } -+ -+ dev_dbg(dev, "event to kppg 0x%p, kcmd 0x%p\n", kppg, kcmd); -+ -+ ipu_psys_ppg_complete(psys, kppg); -+ -+ if (kcmd && ipu_psys_kcmd_is_valid(psys, kcmd)) { -+ res = (status == IPU_PSYS_EVENT_CMD_COMPLETE || -+ status == IPU_PSYS_EVENT_FRAGMENT_COMPLETE) ? -+ 0 : -EIO; -+ mutex_lock(&kppg->mutex); -+ ipu_psys_kcmd_complete(kppg, kcmd, res); -+ mutex_unlock(&kppg->mutex); -+ } -+ } while (1); -+} -+ -+int ipu_psys_fh_init(struct ipu_psys_fh *fh) -+{ -+ struct ipu_psys *psys = fh->psys; -+ struct ipu_psys_buffer_set *kbuf_set, *kbuf_set_tmp; -+ struct ipu_psys_scheduler *sched = &fh->sched; -+ int i; -+ -+ mutex_init(&sched->bs_mutex); -+ INIT_LIST_HEAD(&sched->buf_sets); -+ INIT_LIST_HEAD(&sched->ppgs); -+ -+ /* allocate and map memory for buf_sets */ -+ for (i = 0; i < IPU_PSYS_BUF_SET_POOL_SIZE; i++) { -+ kbuf_set = kzalloc(sizeof(*kbuf_set), GFP_KERNEL); -+ if (!kbuf_set) -+ goto out_free_buf_sets; -+ kbuf_set->kaddr = ipu6_dma_alloc(psys->adev, -+ IPU_PSYS_BUF_SET_MAX_SIZE, -+ &kbuf_set->dma_addr, -+ GFP_KERNEL, 0); -+ if (!kbuf_set->kaddr) { -+ kfree(kbuf_set); -+ goto out_free_buf_sets; -+ } -+ kbuf_set->size = IPU_PSYS_BUF_SET_MAX_SIZE; -+ list_add(&kbuf_set->list, &sched->buf_sets); -+ } -+ -+ return 0; -+ -+out_free_buf_sets: -+ list_for_each_entry_safe(kbuf_set, kbuf_set_tmp, -+ &sched->buf_sets, list) { -+ ipu6_dma_free(psys->adev, kbuf_set->size, kbuf_set->kaddr, -+ kbuf_set->dma_addr, 0); -+ list_del(&kbuf_set->list); -+ kfree(kbuf_set); -+ } -+ mutex_destroy(&sched->bs_mutex); -+ -+ return -ENOMEM; -+} -+ -+int ipu_psys_fh_deinit(struct ipu_psys_fh *fh) -+{ -+ struct ipu_psys *psys = fh->psys; -+ struct device *dev = &psys->adev->auxdev.dev; -+ struct ipu_psys_ppg *kppg, *kppg0; -+ struct ipu_psys_kcmd *kcmd, *kcmd0; -+ struct ipu_psys_buffer_set *kbuf_set, *kbuf_set0; -+ struct ipu_psys_scheduler *sched = &fh->sched; -+ struct ipu_psys_resource_pool *rpr; -+ struct ipu_psys_resource_alloc *alloc; -+ u8 id; -+ -+ mutex_lock(&fh->mutex); -+ if (!list_empty(&sched->ppgs)) { -+ list_for_each_entry_safe(kppg, kppg0, &sched->ppgs, list) { -+ unsigned long flags; -+ -+ mutex_lock(&kppg->mutex); -+ if (!(kppg->state & -+ (PPG_STATE_STOPPED | -+ PPG_STATE_STOPPING))) { -+ struct ipu_psys_kcmd tmp = { -+ .kpg = kppg->kpg, -+ }; -+ -+ rpr = &psys->res_pool_running; -+ alloc = &kppg->kpg->resource_alloc; -+ id = ipu_fw_psys_ppg_get_base_queue_id(&tmp); -+ ipu_psys_ppg_stop(kppg); -+ ipu_psys_free_resources(alloc, rpr); -+ ipu_psys_free_cmd_queue_res(rpr, id); -+ dev_dbg(dev, -+ "s_change:%s %p %d -> %d\n", -+ __func__, kppg, kppg->state, -+ PPG_STATE_STOPPED); -+ kppg->state = PPG_STATE_STOPPED; -+ if (psys->power_gating != PSYS_POWER_GATED) -+ pm_runtime_put(dev); -+ } -+ list_del(&kppg->list); -+ mutex_unlock(&kppg->mutex); -+ -+ list_for_each_entry_safe(kcmd, kcmd0, -+ &kppg->kcmds_new_list, list) { -+ kcmd->pg_user = NULL; -+ mutex_unlock(&fh->mutex); -+ ipu_psys_kcmd_free(kcmd); -+ mutex_lock(&fh->mutex); -+ } -+ -+ list_for_each_entry_safe(kcmd, kcmd0, -+ &kppg->kcmds_processing_list, -+ list) { -+ kcmd->pg_user = NULL; -+ mutex_unlock(&fh->mutex); -+ ipu_psys_kcmd_free(kcmd); -+ mutex_lock(&fh->mutex); -+ } -+ -+ list_for_each_entry_safe(kcmd, kcmd0, -+ &kppg->kcmds_finished_list, -+ list) { -+ kcmd->pg_user = NULL; -+ mutex_unlock(&fh->mutex); -+ ipu_psys_kcmd_free(kcmd); -+ mutex_lock(&fh->mutex); -+ } -+ -+ spin_lock_irqsave(&psys->pgs_lock, flags); -+ kppg->kpg->pg_size = 0; -+ spin_unlock_irqrestore(&psys->pgs_lock, flags); -+ -+ mutex_destroy(&kppg->mutex); -+ kfree(kppg->manifest); -+ kfree(kppg); -+ } -+ } -+ mutex_unlock(&fh->mutex); -+ -+ mutex_lock(&sched->bs_mutex); -+ list_for_each_entry_safe(kbuf_set, kbuf_set0, &sched->buf_sets, list) { -+ ipu6_dma_free(psys->adev, kbuf_set->size, kbuf_set->kaddr, -+ kbuf_set->dma_addr, 0); -+ list_del(&kbuf_set->list); -+ kfree(kbuf_set); -+ } -+ mutex_unlock(&sched->bs_mutex); -+ mutex_destroy(&sched->bs_mutex); -+ -+ return 0; -+} -+ -+struct ipu_psys_kcmd *ipu_get_completed_kcmd(struct ipu_psys_fh *fh) -+{ -+ struct ipu_psys_scheduler *sched = &fh->sched; -+ struct device *dev = &fh->psys->adev->auxdev.dev; -+ struct ipu_psys_kcmd *kcmd; -+ struct ipu_psys_ppg *kppg; -+ -+ mutex_lock(&fh->mutex); -+ if (list_empty(&sched->ppgs)) { -+ mutex_unlock(&fh->mutex); -+ return NULL; -+ } -+ -+ list_for_each_entry(kppg, &sched->ppgs, list) { -+ mutex_lock(&kppg->mutex); -+ if (list_empty(&kppg->kcmds_finished_list)) { -+ mutex_unlock(&kppg->mutex); -+ continue; -+ } -+ -+ kcmd = list_first_entry(&kppg->kcmds_finished_list, -+ struct ipu_psys_kcmd, list); -+ mutex_unlock(&fh->mutex); -+ mutex_unlock(&kppg->mutex); -+ dev_dbg(dev, "get completed kcmd 0x%p\n", kcmd); -+ return kcmd; -+ } -+ mutex_unlock(&fh->mutex); -+ -+ return NULL; -+} -+ -+long ipu_ioctl_dqevent(struct ipu_psys_event *event, -+ struct ipu_psys_fh *fh, unsigned int f_flags) -+{ -+ struct ipu_psys *psys = fh->psys; -+ struct device *dev = &psys->adev->auxdev.dev; -+ struct ipu_psys_kcmd *kcmd = NULL; -+ int rval; -+ -+ dev_dbg(dev, "IOC_DQEVENT\n"); -+ -+ if (!(f_flags & O_NONBLOCK)) { -+ rval = wait_event_interruptible(fh->wait, -+ (kcmd = -+ ipu_get_completed_kcmd(fh))); -+ if (rval == -ERESTARTSYS) -+ return rval; -+ } -+ -+ if (!kcmd) { -+ kcmd = ipu_get_completed_kcmd(fh); -+ if (!kcmd) -+ return -ENODATA; -+ } -+ -+ *event = kcmd->ev; -+ ipu_psys_kcmd_free(kcmd); -+ -+ return 0; -+} -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6ep-fw-resources.c b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6ep-fw-resources.c -new file mode 100644 -index 0000000..4a1a869 ---- /dev/null -+++ b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6ep-fw-resources.c -@@ -0,0 +1,393 @@ -+// SPDX-License-Identifier: GPL-2.0 -+// Copyright (C) 2020 - 2024 Intel Corporation -+ -+#include -+#include -+ -+#include "ipu-psys.h" -+#include "ipu-fw-psys.h" -+#include "ipu6-platform-resources.h" -+#include "ipu6ep-platform-resources.h" -+ -+/* resources table */ -+ -+/* -+ * Cell types by cell IDs -+ */ -+static const u8 ipu6ep_fw_psys_cell_types[IPU6EP_FW_PSYS_N_CELL_ID] = { -+ IPU6_FW_PSYS_SP_CTRL_TYPE_ID, -+ IPU6_FW_PSYS_VP_TYPE_ID, -+ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_OSA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_OSA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_OSA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_PSA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, /* AF */ -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, /* X2B_MD */ -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, /* X2B_SVE_RGBIR */ -+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, /* PAF */ -+ IPU6_FW_PSYS_GDC_TYPE_ID, -+ IPU6_FW_PSYS_TNR_TYPE_ID, -+}; -+ -+static const u16 ipu6ep_fw_num_dev_channels[IPU6_FW_PSYS_N_DEV_CHN_ID] = { -+ IPU6_FW_PSYS_DEV_CHN_DMA_EXT0_MAX_SIZE, -+ IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_READ_MAX_SIZE, -+ IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_MAX_SIZE, -+ IPU6_FW_PSYS_DEV_CHN_DMA_INTERNAL_MAX_SIZE, -+ IPU6_FW_PSYS_DEV_CHN_DMA_ISA_MAX_SIZE, -+}; -+ -+static const u16 ipu6ep_fw_psys_mem_size[IPU6_FW_PSYS_N_MEM_ID] = { -+ IPU6_FW_PSYS_VMEM0_MAX_SIZE, -+ IPU6_FW_PSYS_TRANSFER_VMEM0_MAX_SIZE, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_MAX_SIZE, -+ IPU6_FW_PSYS_LB_VMEM_MAX_SIZE, -+ IPU6_FW_PSYS_BAMEM0_MAX_SIZE, -+ IPU6_FW_PSYS_DMEM0_MAX_SIZE, -+ IPU6_FW_PSYS_DMEM1_MAX_SIZE, -+ IPU6_FW_PSYS_DMEM2_MAX_SIZE, -+ IPU6_FW_PSYS_DMEM3_MAX_SIZE, -+ IPU6_FW_PSYS_PMEM0_MAX_SIZE -+}; -+ -+static const u16 ipu6ep_fw_psys_dfms[IPU6_FW_PSYS_N_DEV_DFM_ID] = { -+ IPU6_FW_PSYS_DEV_DFM_BB_FULL_PORT_ID_MAX_SIZE, -+ IPU6_FW_PSYS_DEV_DFM_BB_EMPTY_PORT_ID_MAX_SIZE, -+ IPU6_FW_PSYS_DEV_DFM_ISL_FULL_PORT_ID_MAX_SIZE, -+ IPU6_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID_MAX_SIZE, -+ IPU6_FW_PSYS_DEV_DFM_LB_FULL_PORT_ID_MAX_SIZE, -+ IPU6_FW_PSYS_DEV_DFM_LB_EMPTY_PORT_ID_MAX_SIZE, -+}; -+ -+static const u8 -+ipu6ep_fw_psys_c_mem[IPU6EP_FW_PSYS_N_CELL_ID][IPU6_FW_PSYS_N_MEM_TYPE_ID] = { -+ { -+ /* IPU6_FW_PSYS_SP0_ID */ -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_DMEM0_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_SP1_ID */ -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_DMEM1_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_VP0_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_DMEM3_ID, -+ IPU6_FW_PSYS_VMEM0_ID, -+ IPU6_FW_PSYS_BAMEM0_ID, -+ IPU6_FW_PSYS_PMEM0_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ACC1_ID BNLM */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ACC2_ID DM */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ACC3_ID ACM */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ACC4_ID GTC YUV1 */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ACC5_ID OFS pin main */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ACC6_ID OFS pin display */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ACC7_ID OFS pin pp */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ACC8_ID GAMMASTAR */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ACC9_ID GLTM */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ACC10_ID XNR */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_ICA_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_LSC_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_DPC_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_SIS_A_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_SIS_B_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_B2B_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_B2R_ID and ISA_R2I_SIE */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_R2I_DS_A_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_AWB_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_AE_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_AF_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_X2B_MD_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_X2B_SVE_RGBIR_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_ISA_PAF_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_LB_VMEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_BB_ACC_GDC0_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ }, -+ { -+ /* IPU6_FW_PSYS_BB_ACC_TNR_ID */ -+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ IPU6_FW_PSYS_N_MEM_ID, -+ } -+}; -+ -+static const struct ipu_fw_resource_definitions ipu6ep_defs = { -+ .cells = ipu6ep_fw_psys_cell_types, -+ .num_cells = IPU6EP_FW_PSYS_N_CELL_ID, -+ .num_cells_type = IPU6_FW_PSYS_N_CELL_TYPE_ID, -+ -+ .dev_channels = ipu6ep_fw_num_dev_channels, -+ .num_dev_channels = IPU6_FW_PSYS_N_DEV_CHN_ID, -+ -+ .num_ext_mem_types = IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID, -+ .num_ext_mem_ids = IPU6_FW_PSYS_N_MEM_ID, -+ .ext_mem_ids = ipu6ep_fw_psys_mem_size, -+ -+ .num_dfm_ids = IPU6_FW_PSYS_N_DEV_DFM_ID, -+ -+ .dfms = ipu6ep_fw_psys_dfms, -+ -+ .cell_mem_row = IPU6_FW_PSYS_N_MEM_TYPE_ID, -+ .cell_mem = &ipu6ep_fw_psys_c_mem[0][0], -+}; -+ -+const struct ipu_fw_resource_definitions *ipu6ep_res_defs = &ipu6ep_defs; -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6ep-platform-resources.h b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6ep-platform-resources.h -new file mode 100644 -index 0000000..8420149 ---- /dev/null -+++ b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6ep-platform-resources.h -@@ -0,0 +1,42 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* Copyright (C) 2020 - 2024 Intel Corporation */ -+ -+#ifndef IPU6EP_PLATFORM_RESOURCES_H -+#define IPU6EP_PLATFORM_RESOURCES_H -+ -+#include -+#include -+ -+enum { -+ IPU6EP_FW_PSYS_SP0_ID = 0, -+ IPU6EP_FW_PSYS_VP0_ID, -+ IPU6EP_FW_PSYS_PSA_ACC_BNLM_ID, -+ IPU6EP_FW_PSYS_PSA_ACC_DM_ID, -+ IPU6EP_FW_PSYS_PSA_ACC_ACM_ID, -+ IPU6EP_FW_PSYS_PSA_ACC_GTC_YUV1_ID, -+ IPU6EP_FW_PSYS_BB_ACC_OFS_PIN_MAIN_ID, -+ IPU6EP_FW_PSYS_BB_ACC_OFS_PIN_DISPLAY_ID, -+ IPU6EP_FW_PSYS_BB_ACC_OFS_PIN_PP_ID, -+ IPU6EP_FW_PSYS_PSA_ACC_GAMMASTAR_ID, -+ IPU6EP_FW_PSYS_PSA_ACC_GLTM_ID, -+ IPU6EP_FW_PSYS_PSA_ACC_XNR_ID, -+ IPU6EP_FW_PSYS_PSA_VCSC_ID, /* VCSC */ -+ IPU6EP_FW_PSYS_ISA_ICA_ID, -+ IPU6EP_FW_PSYS_ISA_LSC_ID, -+ IPU6EP_FW_PSYS_ISA_DPC_ID, -+ IPU6EP_FW_PSYS_ISA_SIS_A_ID, -+ IPU6EP_FW_PSYS_ISA_SIS_B_ID, -+ IPU6EP_FW_PSYS_ISA_B2B_ID, -+ IPU6EP_FW_PSYS_ISA_B2R_R2I_SIE_ID, -+ IPU6EP_FW_PSYS_ISA_R2I_DS_A_ID, -+ IPU6EP_FW_PSYS_ISA_AWB_ID, -+ IPU6EP_FW_PSYS_ISA_AE_ID, -+ IPU6EP_FW_PSYS_ISA_AF_ID, -+ IPU6EP_FW_PSYS_ISA_X2B_MD_ID, -+ IPU6EP_FW_PSYS_ISA_X2B_SVE_RGBIR_ID, -+ IPU6EP_FW_PSYS_ISA_PAF_ID, -+ IPU6EP_FW_PSYS_BB_ACC_GDC0_ID, -+ IPU6EP_FW_PSYS_BB_ACC_TNR_ID, -+ IPU6EP_FW_PSYS_N_CELL_ID -+}; -+#endif /* IPU6EP_PLATFORM_RESOURCES_H */ -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6se-fw-resources.c b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6se-fw-resources.c -new file mode 100644 -index 0000000..41b6ba6 ---- /dev/null -+++ b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6se-fw-resources.c -@@ -0,0 +1,194 @@ -+// SPDX-License-Identifier: GPL-2.0 -+// Copyright (C) 2015 - 2024 Intel Corporation -+ -+#include -+#include -+ -+#include "ipu-psys.h" -+#include "ipu-fw-psys.h" -+#include "ipu6se-platform-resources.h" -+ -+/* resources table */ -+ -+/* -+ * Cell types by cell IDs -+ */ -+/* resources table */ -+ -+/* -+ * Cell types by cell IDs -+ */ -+static const u8 ipu6se_fw_psys_cell_types[IPU6SE_FW_PSYS_N_CELL_ID] = { -+ IPU6SE_FW_PSYS_SP_CTRL_TYPE_ID, -+ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_ICA_ID */ -+ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_LSC_ID */ -+ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_DPC_ID */ -+ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_B2B_ID */ -+ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_BNLM_ID */ -+ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_DM_ID */ -+ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_R2I_SIE_ID */ -+ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_R2I_DS_A_ID */ -+ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_R2I_DS_B_ID */ -+ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_AWB_ID */ -+ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_AE_ID */ -+ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_AF_ID*/ -+ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID /* PAF */ -+}; -+ -+static const u16 ipu6se_fw_num_dev_channels[IPU6SE_FW_PSYS_N_DEV_CHN_ID] = { -+ IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT0_MAX_SIZE, -+ IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT1_READ_MAX_SIZE, -+ IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_MAX_SIZE, -+ IPU6SE_FW_PSYS_DEV_CHN_DMA_ISA_MAX_SIZE, -+}; -+ -+static const u16 ipu6se_fw_psys_mem_size[IPU6SE_FW_PSYS_N_MEM_ID] = { -+ IPU6SE_FW_PSYS_TRANSFER_VMEM0_MAX_SIZE, -+ IPU6SE_FW_PSYS_LB_VMEM_MAX_SIZE, -+ IPU6SE_FW_PSYS_DMEM0_MAX_SIZE, -+ IPU6SE_FW_PSYS_DMEM1_MAX_SIZE -+}; -+ -+static const u16 ipu6se_fw_psys_dfms[IPU6SE_FW_PSYS_N_DEV_DFM_ID] = { -+ IPU6SE_FW_PSYS_DEV_DFM_ISL_FULL_PORT_ID_MAX_SIZE, -+ IPU6SE_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID_MAX_SIZE -+}; -+ -+static const u8 -+ipu6se_fw_psys_c_mem[IPU6SE_FW_PSYS_N_CELL_ID][IPU6SE_FW_PSYS_N_MEM_TYPE_ID] = { -+ { /* IPU6SE_FW_PSYS_SP0_ID */ -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_DMEM0_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ }, -+ { /* IPU6SE_FW_PSYS_ISA_ICA_ID */ -+ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6SE_FW_PSYS_LB_VMEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ }, -+ { /* IPU6SE_FW_PSYS_ISA_LSC_ID */ -+ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6SE_FW_PSYS_LB_VMEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ }, -+ { /* IPU6SE_FW_PSYS_ISA_DPC_ID */ -+ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6SE_FW_PSYS_LB_VMEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ }, -+ { /* IPU6SE_FW_PSYS_ISA_B2B_ID */ -+ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6SE_FW_PSYS_LB_VMEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ }, -+ -+ { /* IPU6SE_FW_PSYS_ISA_BNLM_ID */ -+ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6SE_FW_PSYS_LB_VMEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ }, -+ { /* IPU6SE_FW_PSYS_ISA_DM_ID */ -+ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6SE_FW_PSYS_LB_VMEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ }, -+ { /* IPU6SE_FW_PSYS_ISA_R2I_SIE_ID */ -+ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6SE_FW_PSYS_LB_VMEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ }, -+ { /* IPU6SE_FW_PSYS_ISA_R2I_DS_A_ID */ -+ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6SE_FW_PSYS_LB_VMEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ }, -+ { /* IPU6SE_FW_PSYS_ISA_R2I_DS_B_ID */ -+ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6SE_FW_PSYS_LB_VMEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ }, -+ { /* IPU6SE_FW_PSYS_ISA_AWB_ID */ -+ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6SE_FW_PSYS_LB_VMEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ }, -+ { /* IPU6SE_FW_PSYS_ISA_AE_ID */ -+ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6SE_FW_PSYS_LB_VMEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ }, -+ { /* IPU6SE_FW_PSYS_ISA_AF_ID */ -+ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6SE_FW_PSYS_LB_VMEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ }, -+ { /* IPU6SE_FW_PSYS_ISA_PAF_ID */ -+ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, -+ IPU6SE_FW_PSYS_LB_VMEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ IPU6SE_FW_PSYS_N_MEM_ID, -+ } -+}; -+ -+static const struct ipu_fw_resource_definitions ipu6se_defs = { -+ .cells = ipu6se_fw_psys_cell_types, -+ .num_cells = IPU6SE_FW_PSYS_N_CELL_ID, -+ .num_cells_type = IPU6SE_FW_PSYS_N_CELL_TYPE_ID, -+ -+ .dev_channels = ipu6se_fw_num_dev_channels, -+ .num_dev_channels = IPU6SE_FW_PSYS_N_DEV_CHN_ID, -+ -+ .num_ext_mem_types = IPU6SE_FW_PSYS_N_DATA_MEM_TYPE_ID, -+ .num_ext_mem_ids = IPU6SE_FW_PSYS_N_MEM_ID, -+ .ext_mem_ids = ipu6se_fw_psys_mem_size, -+ -+ .num_dfm_ids = IPU6SE_FW_PSYS_N_DEV_DFM_ID, -+ -+ .dfms = ipu6se_fw_psys_dfms, -+ -+ .cell_mem_row = IPU6SE_FW_PSYS_N_MEM_TYPE_ID, -+ .cell_mem = &ipu6se_fw_psys_c_mem[0][0], -+}; -+ -+const struct ipu_fw_resource_definitions *ipu6se_res_defs = &ipu6se_defs; -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6se-platform-resources.h b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6se-platform-resources.h -new file mode 100644 -index 0000000..81c618d ---- /dev/null -+++ b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu6se-platform-resources.h -@@ -0,0 +1,103 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* Copyright (C) 2018 - 2024 Intel Corporation */ -+ -+#ifndef IPU6SE_PLATFORM_RESOURCES_H -+#define IPU6SE_PLATFORM_RESOURCES_H -+ -+#include -+#include -+#include "ipu-platform-resources.h" -+ -+#define IPU6SE_FW_PSYS_N_PADDING_UINT8_IN_PROCESS_EXT_STRUCT 1 -+ -+enum { -+ IPU6SE_FW_PSYS_CMD_QUEUE_COMMAND_ID = 0, -+ IPU6SE_FW_PSYS_CMD_QUEUE_DEVICE_ID, -+ IPU6SE_FW_PSYS_CMD_QUEUE_PPG0_COMMAND_ID, -+ IPU6SE_FW_PSYS_CMD_QUEUE_PPG1_COMMAND_ID, -+ IPU6SE_FW_PSYS_CMD_QUEUE_PPG2_COMMAND_ID, -+ IPU6SE_FW_PSYS_CMD_QUEUE_PPG3_COMMAND_ID, -+ IPU6SE_FW_PSYS_CMD_QUEUE_PPG4_COMMAND_ID, -+ IPU6SE_FW_PSYS_CMD_QUEUE_PPG5_COMMAND_ID, -+ IPU6SE_FW_PSYS_N_PSYS_CMD_QUEUE_ID -+}; -+ -+enum { -+ IPU6SE_FW_PSYS_TRANSFER_VMEM0_TYPE_ID = 0, -+ IPU6SE_FW_PSYS_LB_VMEM_TYPE_ID, -+ IPU6SE_FW_PSYS_DMEM_TYPE_ID, -+ IPU6SE_FW_PSYS_VMEM_TYPE_ID, -+ IPU6SE_FW_PSYS_BAMEM_TYPE_ID, -+ IPU6SE_FW_PSYS_PMEM_TYPE_ID, -+ IPU6SE_FW_PSYS_N_MEM_TYPE_ID -+}; -+ -+enum ipu6se_mem_id { -+ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID = 0, /* TRANSFER VMEM 0 */ -+ IPU6SE_FW_PSYS_LB_VMEM_ID, /* LB VMEM */ -+ IPU6SE_FW_PSYS_DMEM0_ID, /* SPC0 Dmem */ -+ IPU6SE_FW_PSYS_DMEM1_ID, /* SPP0 Dmem */ -+ IPU6SE_FW_PSYS_N_MEM_ID -+}; -+ -+enum { -+ IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT0_ID = 0, -+ IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT1_READ_ID, -+ IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_ID, -+ IPU6SE_FW_PSYS_DEV_CHN_DMA_ISA_ID, -+ IPU6SE_FW_PSYS_N_DEV_CHN_ID -+}; -+ -+enum { -+ IPU6SE_FW_PSYS_SP_CTRL_TYPE_ID = 0, -+ IPU6SE_FW_PSYS_SP_SERVER_TYPE_ID, -+ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, -+ IPU6SE_FW_PSYS_N_CELL_TYPE_ID -+}; -+ -+enum { -+ IPU6SE_FW_PSYS_SP0_ID = 0, -+ IPU6SE_FW_PSYS_ISA_ICA_ID, -+ IPU6SE_FW_PSYS_ISA_LSC_ID, -+ IPU6SE_FW_PSYS_ISA_DPC_ID, -+ IPU6SE_FW_PSYS_ISA_B2B_ID, -+ IPU6SE_FW_PSYS_ISA_BNLM_ID, -+ IPU6SE_FW_PSYS_ISA_DM_ID, -+ IPU6SE_FW_PSYS_ISA_R2I_SIE_ID, -+ IPU6SE_FW_PSYS_ISA_R2I_DS_A_ID, -+ IPU6SE_FW_PSYS_ISA_R2I_DS_B_ID, -+ IPU6SE_FW_PSYS_ISA_AWB_ID, -+ IPU6SE_FW_PSYS_ISA_AE_ID, -+ IPU6SE_FW_PSYS_ISA_AF_ID, -+ IPU6SE_FW_PSYS_ISA_PAF_ID, -+ IPU6SE_FW_PSYS_N_CELL_ID -+}; -+ -+enum { -+ IPU6SE_FW_PSYS_DEV_DFM_ISL_FULL_PORT_ID = 0, -+ IPU6SE_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID, -+}; -+ -+/* Excluding PMEM */ -+#define IPU6SE_FW_PSYS_N_DATA_MEM_TYPE_ID (IPU6SE_FW_PSYS_N_MEM_TYPE_ID - 1) -+#define IPU6SE_FW_PSYS_N_DEV_DFM_ID \ -+ (IPU6SE_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID + 1) -+#define IPU6SE_FW_PSYS_VMEM0_MAX_SIZE 0x0800 -+/* Transfer VMEM0 words, ref HAS Transfer*/ -+#define IPU6SE_FW_PSYS_TRANSFER_VMEM0_MAX_SIZE 0x0800 -+#define IPU6SE_FW_PSYS_LB_VMEM_MAX_SIZE 0x0400 /* LB VMEM words */ -+#define IPU6SE_FW_PSYS_DMEM0_MAX_SIZE 0x4000 -+#define IPU6SE_FW_PSYS_DMEM1_MAX_SIZE 0x1000 -+ -+#define IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT0_MAX_SIZE 22 -+#define IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT1_READ_MAX_SIZE 22 -+#define IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_MAX_SIZE 22 -+#define IPU6SE_FW_PSYS_DEV_CHN_DMA_IPFD_MAX_SIZE 0 -+#define IPU6SE_FW_PSYS_DEV_CHN_DMA_ISA_MAX_SIZE 2 -+ -+#define IPU6SE_FW_PSYS_DEV_DFM_ISL_FULL_PORT_ID_MAX_SIZE 32 -+#define IPU6SE_FW_PSYS_DEV_DFM_LB_FULL_PORT_ID_MAX_SIZE 32 -+#define IPU6SE_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID_MAX_SIZE 32 -+#define IPU6SE_FW_PSYS_DEV_DFM_LB_EMPTY_PORT_ID_MAX_SIZE 32 -+ -+#endif /* IPU6SE_PLATFORM_RESOURCES_H */ --- -2.43.0 - diff --git a/patches/0070-media-intel-ipu6-support-IPU6-PSYS-FW-trace-dump-for.patch b/patches/0070-media-intel-ipu6-support-IPU6-PSYS-FW-trace-dump-for.patch deleted file mode 100644 index 7ecfc93..0000000 --- a/patches/0070-media-intel-ipu6-support-IPU6-PSYS-FW-trace-dump-for.patch +++ /dev/null @@ -1,134 +0,0 @@ -From f365735ea4b498870c456759fc756a1a332faaab Mon Sep 17 00:00:00 2001 -From: hepengpx -Date: Fri, 16 Jan 2026 02:01:37 -0700 -Subject: [PATCH 70/77] media: intel-ipu6: support IPU6 PSYS FW trace dump for - upstream driver - -[media][pci]Support IPU6 PSYS FW trace dump for upstream driver -Signed-off-by: hepengpx ---- - .../drivers/media/pci/intel/ipu6/ipu6-trace.c | 6 +++ - .../drivers/media/pci/intel/ipu6/ipu6-trace.h | 1 + - .../media/pci/intel/ipu6/psys/ipu-psys.c | 44 ++++++++++++++++++- - 3 files changed, 50 insertions(+), 1 deletion(-) - -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-trace.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-trace.c -index adcee20..077f140 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-trace.c -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-trace.c -@@ -891,6 +891,12 @@ int ipu_trace_buffer_dma_handle(struct device *dev, dma_addr_t *dma_handle) - } - EXPORT_SYMBOL_GPL(ipu_trace_buffer_dma_handle); - -+bool is_ipu_trace_enable(void) -+{ -+ return ipu_trace_enable; -+} -+EXPORT_SYMBOL_GPL(is_ipu_trace_enable); -+ - MODULE_AUTHOR("Samu Onkalo "); - MODULE_LICENSE("GPL"); - MODULE_DESCRIPTION("Intel ipu trace support"); -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-trace.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-trace.h -index f66d889..fe89d1b 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-trace.h -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-trace.h -@@ -144,4 +144,5 @@ void ipu_trace_restore(struct device *dev); - void ipu_trace_uninit(struct device *dev); - void ipu_trace_stop(struct device *dev); - int ipu_trace_buffer_dma_handle(struct device *dev, dma_addr_t *dma_handle); -+bool is_ipu_trace_enable(void); - #endif -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.c b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.c -index dcd0dce..6faa582 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.c -+++ b/6.18.0/drivers/media/pci/intel/ipu6/psys/ipu-psys.c -@@ -31,6 +31,7 @@ - #include "ipu-psys.h" - #include "ipu6-platform-regs.h" - #include "ipu6-fw-com.h" -+#include "ipu6-trace.h" - - static bool async_fw_init; - module_param(async_fw_init, bool, 0664); -@@ -1201,6 +1202,8 @@ static int ipu_psys_sched_cmd(void *ptr) - return 0; - } - -+#include "../ipu6-trace.h" -+ - static void start_sp(struct ipu6_bus_device *adev) - { - struct ipu_psys *psys = ipu6_bus_get_drvdata(adev); -@@ -1211,7 +1214,7 @@ static void start_sp(struct ipu6_bus_device *adev) - val |= IPU6_PSYS_SPC_STATUS_START | - IPU6_PSYS_SPC_STATUS_RUN | - IPU6_PSYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE; -- val |= psys->icache_prefetch_sp ? -+ val |= (psys->icache_prefetch_sp || is_ipu_trace_enable()) ? - IPU6_PSYS_SPC_STATUS_ICACHE_PREFETCH : 0; - writel(val, spc_regs_base + IPU6_PSYS_REG_SPC_STATUS_CTRL); - } -@@ -1304,6 +1307,40 @@ static void run_fw_init_work(struct work_struct *work) - } - } - -+struct ipu_trace_block psys_trace_blocks[] = { -+ { -+ .offset = IPU_TRACE_REG_PS_TRACE_UNIT_BASE, -+ .type = IPU_TRACE_BLOCK_TUN, -+ }, -+ { -+ .offset = IPU_TRACE_REG_PS_SPC_EVQ_BASE, -+ .type = IPU_TRACE_BLOCK_TM, -+ }, -+ { -+ .offset = IPU_TRACE_REG_PS_SPP0_EVQ_BASE, -+ .type = IPU_TRACE_BLOCK_TM, -+ }, -+ { -+ .offset = IPU_TRACE_REG_PS_SPC_GPC_BASE, -+ .type = IPU_TRACE_BLOCK_GPC, -+ }, -+ { -+ .offset = IPU_TRACE_REG_PS_SPP0_GPC_BASE, -+ .type = IPU_TRACE_BLOCK_GPC, -+ }, -+ { -+ .offset = IPU_TRACE_REG_PS_MMU_GPC_BASE, -+ .type = IPU_TRACE_BLOCK_GPC, -+ }, -+ { -+ .offset = IPU_TRACE_REG_PS_GPREG_TRACE_TIMER_RST_N, -+ .type = IPU_TRACE_TIMER_RST, -+ }, -+ { -+ .type = IPU_TRACE_BLOCK_END, -+ } -+}; -+ - static int ipu6_psys_probe(struct auxiliary_device *auxdev, - const struct auxiliary_device_id *auxdev_id) - { -@@ -1442,6 +1479,9 @@ static int ipu6_psys_probe(struct auxiliary_device *auxdev, - strscpy(psys->caps.dev_model, IPU6_MEDIA_DEV_MODEL_NAME, - sizeof(psys->caps.dev_model)); - -+ ipu_trace_init(adev->isp, psys->pdata->base, &auxdev->dev, -+ psys_trace_blocks); -+ - mutex_unlock(&ipu_psys_mutex); - - dev_info(dev, "psys probe minor: %d\n", minor); -@@ -1503,6 +1543,8 @@ static void ipu6_psys_remove(struct auxiliary_device *auxdev) - - clear_bit(MINOR(psys->cdev.dev), ipu_psys_devices); - -+ ipu_trace_uninit(&auxdev->dev); -+ - mutex_unlock(&ipu_psys_mutex); - - mutex_destroy(&psys->mutex); --- -2.43.0 - diff --git a/patches/0071-Revert-media-intel-ipu6-Constify-ipu6_buttress_ctrl-.patch b/patches/0071-Revert-media-intel-ipu6-Constify-ipu6_buttress_ctrl-.patch deleted file mode 100644 index 0e273f9..0000000 --- a/patches/0071-Revert-media-intel-ipu6-Constify-ipu6_buttress_ctrl-.patch +++ /dev/null @@ -1,84 +0,0 @@ -From 73f893117a273db351a6a2cf50b817b6ea3f1735 Mon Sep 17 00:00:00 2001 -From: Florent Pirou -Date: Fri, 16 Jan 2026 08:41:24 -0700 -Subject: [PATCH 71/77] Revert "media: intel/ipu6: Constify ipu6_buttress_ctrl - structure" - -This reverts commit 6ad57f8f86de024d10c764f829a50530e7121958. - -Signed-off-by: Florent Pirou ---- - 6.18.0/drivers/media/pci/intel/ipu6/ipu6-bus.c | 2 +- - 6.18.0/drivers/media/pci/intel/ipu6/ipu6-bus.h | 4 ++-- - 6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c | 4 ++-- - 6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h | 4 ++-- - 4 files changed, 7 insertions(+), 7 deletions(-) - -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-bus.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-bus.c -index 5cee274..37d88dd 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-bus.c -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-bus.c -@@ -82,7 +82,7 @@ static void ipu6_bus_release(struct device *dev) - - struct ipu6_bus_device * - ipu6_bus_initialize_device(struct pci_dev *pdev, struct device *parent, -- void *pdata, const struct ipu6_buttress_ctrl *ctrl, -+ void *pdata, struct ipu6_buttress_ctrl *ctrl, - char *name) - { - struct auxiliary_device *auxdev; -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-bus.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-bus.h -index 16be932..73dcf73 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-bus.h -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-bus.h -@@ -27,7 +27,7 @@ struct ipu6_bus_device { - struct ipu6_mmu *mmu; - struct ipu6_device *isp; - struct ipu_subsystem_trace_config *trace_cfg; -- const struct ipu6_buttress_ctrl *ctrl; -+ struct ipu6_buttress_ctrl *ctrl; - const struct firmware *fw; - struct sg_table fw_sgt; - u64 *pkg_dir; -@@ -49,7 +49,7 @@ struct ipu6_auxdrv_data { - - struct ipu6_bus_device * - ipu6_bus_initialize_device(struct pci_dev *pdev, struct device *parent, -- void *pdata, const struct ipu6_buttress_ctrl *ctrl, -+ void *pdata, struct ipu6_buttress_ctrl *ctrl, - char *name); - int ipu6_bus_add_device(struct ipu6_bus_device *adev); - void ipu6_bus_del_devices(struct pci_dev *pdev); -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c -index c243142..0a7b019 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.c -@@ -445,8 +445,8 @@ irqreturn_t ipu6_buttress_isr_threaded(int irq, void *isp_ptr) - return ret; - } - --int ipu6_buttress_power(struct device *dev, -- const struct ipu6_buttress_ctrl *ctrl, bool on) -+int ipu6_buttress_power(struct device *dev, struct ipu6_buttress_ctrl *ctrl, -+ bool on) - { - struct ipu6_device *isp = to_ipu6_bus_device(dev)->isp; - u32 pwr_sts, val; -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h -index 8bc5eeb..3b22dfb 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-buttress.h -@@ -65,8 +65,8 @@ int ipu6_buttress_map_fw_image(struct ipu6_bus_device *sys, - struct sg_table *sgt); - void ipu6_buttress_unmap_fw_image(struct ipu6_bus_device *sys, - struct sg_table *sgt); --int ipu6_buttress_power(struct device *dev, -- const struct ipu6_buttress_ctrl *ctrl, bool on); -+int ipu6_buttress_power(struct device *dev, struct ipu6_buttress_ctrl *ctrl, -+ bool on); - bool ipu6_buttress_get_secure_mode(struct ipu6_device *isp); - int ipu6_buttress_authenticate(struct ipu6_device *isp); - int ipu6_buttress_reset_authentication(struct ipu6_device *isp); --- -2.43.0 - diff --git a/patches/0077-ipu6-dkms-add-isys-makefile-adaptation-6.17.patch b/patches/0077-ipu6-dkms-add-isys-makefile-adaptation-6.17.patch deleted file mode 100644 index b04042c..0000000 --- a/patches/0077-ipu6-dkms-add-isys-makefile-adaptation-6.17.patch +++ /dev/null @@ -1,48 +0,0 @@ -From df2a50dc1637fc0a2db4093eefa9d8f22f4ff5df Mon Sep 17 00:00:00 2001 -From: florent pirou -Date: Thu, 4 Dec 2025 00:50:25 -0700 -Subject: [PATCH 77/78] ipu6-dkms: add isys makefile adaptation 6.17 - -Signed-off-by: florent pirou ---- - 6.18.0/drivers/media/pci/intel/ipu6/Makefile | 5 +++++ - 6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c | 1 + - 6.18.0/drivers/media/pci/intel/ipu6/ipu6.c | 1 + - 3 files changed, 7 insertions(+) - -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/Makefile b/6.18.0/drivers/media/pci/intel/ipu6/Makefile -index 67c13c9..3e2ce53 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/Makefile -+++ b/6.18.0/drivers/media/pci/intel/ipu6/Makefile -@@ -1,5 +1,10 @@ - # SPDX-License-Identifier: GPL-2.0-only - -+CC := ${CC} -I ${M}/../../../include-overrides -+ccflags-y := -DDRIVER_VERSION_SUFFIX=\"${DRIVER_VERSION_SUFFIX}\" -+ -+export CONFIG_VIDEO_INTEL_IPU6 = m -+ - intel-ipu6-y := ipu6.o \ - ipu6-bus.o \ - ipu6-dma.o \ -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -index 7d10583..eaa7e68 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.c -@@ -1757,3 +1757,4 @@ MODULE_LICENSE("GPL"); - MODULE_DESCRIPTION("Intel IPU6 input system driver"); - MODULE_IMPORT_NS("INTEL_IPU6"); - MODULE_IMPORT_NS("INTEL_IPU_BRIDGE"); -+MODULE_VERSION(DRIVER_VERSION_SUFFIX); -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c -index 291b745..56be152 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c -@@ -959,3 +959,4 @@ MODULE_AUTHOR("Yunliang Ding "); - MODULE_AUTHOR("Hongju Wang "); - MODULE_LICENSE("GPL"); - MODULE_DESCRIPTION("Intel IPU6 PCI driver"); -+MODULE_VERSION(DRIVER_VERSION_SUFFIX); --- -2.43.0 - diff --git a/patches/0078-ipu6-dkms-align-ipu7-acpi-pdata-device-parser-on-6.1.patch b/patches/0078-ipu6-dkms-align-ipu7-acpi-pdata-device-parser-on-6.1.patch deleted file mode 100644 index ecc321e..0000000 --- a/patches/0078-ipu6-dkms-align-ipu7-acpi-pdata-device-parser-on-6.1.patch +++ /dev/null @@ -1,89 +0,0 @@ -From 8bb193b2ad17562e020f2096b7e70c82e96a94aa Mon Sep 17 00:00:00 2001 -From: Florent Pirou -Date: Fri, 16 Jan 2026 03:34:49 -0700 -Subject: [PATCH 78/78] ipu6-dkms: align ipu7 acpi pdata device parser on 6.18 - -Signed-off-by: Florent Pirou ---- - 6.18.0/drivers/media/pci/intel/ipu6/Makefile | 9 ++++++++- - 6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h | 1 + - 6.18.0/drivers/media/pci/intel/ipu6/ipu6.c | 8 ++++---- - 3 files changed, 13 insertions(+), 5 deletions(-) - -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/Makefile b/6.18.0/drivers/media/pci/intel/ipu6/Makefile -index 3e2ce53..d33bfa0 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/Makefile -+++ b/6.18.0/drivers/media/pci/intel/ipu6/Makefile -@@ -1,8 +1,13 @@ - # SPDX-License-Identifier: GPL-2.0-only - --CC := ${CC} -I ${M}/../../../include-overrides -+KERNEL_VERSION := $(shell echo $(KERNELRELEASE) | sed 's/[^0-9.]*\([0-9.]*\).*/\1/') -+ -+CC := ${CC} -I ${M}/../../../../../../include -I ${M}/../../../../../../$(KERNEL_VERSION)/include-overrides -DCONFIG_INTEL_IPU_ACPI -DCONFIG_INTEL_IPU6_ACPI -DCONFIG_VIDEO_INTEL_IPU6_ISYS_RESET - ccflags-y := -DDRIVER_VERSION_SUFFIX=\"${DRIVER_VERSION_SUFFIX}\" - -+# Path to ipu_acpi module build directory -+KBUILD_EXTRA_SYMBOLS += ${M}/../../../../../../Module.symvers -+ - export CONFIG_VIDEO_INTEL_IPU6 = m - - intel-ipu6-y := ipu6.o \ -@@ -27,4 +32,6 @@ intel-ipu6-isys-y := ipu6-isys.o \ - ipu6-isys-dwc-phy.o - - obj-$(CONFIG_VIDEO_INTEL_IPU6) += intel-ipu6-isys.o -+ -+subdir-ccflags-y += -DDRIVER_VERSION_SUFFIX=\"${DRIVER_VERSION_SUFFIX}\" - obj-$(CONFIG_VIDEO_INTEL_IPU6) += psys/ -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h -index 3768c48..d5435c8 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6-isys.h -@@ -102,6 +102,7 @@ struct isys_iwake_watermark { - struct ipu6_isys_csi2_config { - u32 nlanes; - u32 port; -+ enum v4l2_mbus_type bus_type; - }; - - struct sensor_async_sd { -diff --git a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c -index 56be152..618bc49 100644 ---- a/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c -+++ b/6.18.0/drivers/media/pci/intel/ipu6/ipu6.c -@@ -35,12 +35,12 @@ - #include "ipu6-trace.h" - - #if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) --#include -+#include - #endif - - #if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - static int isys_init_acpi_add_device(struct device *dev, void *priv, -- struct ipu6_isys_csi2_config *csi2, -+ void *csi2, - bool reprobe) - { - return 0; -@@ -438,14 +438,14 @@ ipu6_isys_init(struct pci_dev *pdev, struct device *parent, - #if IS_ENABLED(CONFIG_INTEL_IPU6_ACPI) - if (!spdata) { - dev_dbg(&pdev->dev, "No subdevice info provided"); -- ret = ipu_get_acpi_devices(isys_adev, &isys_adev->auxdev.dev, &acpi_pdata, NULL, -+ ret = ipu_get_acpi_devices(isys_adev, &isys_adev->auxdev.dev, (void **)&acpi_pdata, NULL, - isys_init_acpi_add_device); - if (acpi_pdata && (*acpi_pdata->subdevs)) { - pdata->spdata = acpi_pdata; - } - } else { - dev_dbg(&pdev->dev, "Subdevice info found"); -- ret = ipu_get_acpi_devices(isys_adev, &isys_adev->auxdev.dev, &acpi_pdata, &spdata, -+ ret = ipu_get_acpi_devices(isys_adev, &isys_adev->auxdev.dev, (void **)&acpi_pdata, (void **)&spdata, - isys_init_acpi_add_device); - } - if (ret) --- -2.43.0 - diff --git a/patches/0100-ipu-acpi-decouple-the-PLATFORM-ACPI-driver-and-IPU-d.patch b/patches/0100-ipu-acpi-decouple-the-PLATFORM-ACPI-driver-and-IPU-d.patch deleted file mode 100644 index 52683d1..0000000 --- a/patches/0100-ipu-acpi-decouple-the-PLATFORM-ACPI-driver-and-IPU-d.patch +++ /dev/null @@ -1,649 +0,0 @@ -From 3fa91bddbefdfabb8c1510df9c3eb85f9688467f Mon Sep 17 00:00:00 2001 -From: Florent Pirou -Date: Sat, 17 Jan 2026 01:54:42 -0700 -Subject: [PATCH 100/104] ipu-acpi: decouple the PLATFORM ACPI driver and IPU - driver - -Signed-off-by: Florent Pirou ---- - ipu7-drivers/Makefile | 6 +- - .../drivers/media/pci/intel/ipu7/ipu7-isys.c | 8 +-- - .../drivers/media/pci/intel/ipu7/ipu7.c | 8 +-- - .../drivers/media/platform/intel/Kconfig | 2 +- - .../drivers/media/platform/intel/Makefile | 6 +- - .../media/platform/intel/ipu-acpi-common.c | 9 ++- - .../media/platform/intel/ipu-acpi-pdata.c | 57 ++++++++++----- - .../drivers/media/platform/intel/ipu-acpi.c | 61 +++++++++++++--- - ipu7-drivers/include/media/ipu-acpi-pdata.h | 7 +- - ipu7-drivers/include/media/ipu-acpi.h | 70 ++++++++++++++++--- - 10 files changed, 177 insertions(+), 57 deletions(-) - -diff --git a/ipu7-drivers/Makefile b/ipu7-drivers/Makefile -index deb1761..ee53758 100644 ---- a/ipu7-drivers/Makefile -+++ b/ipu7-drivers/Makefile -@@ -8,7 +8,7 @@ MODSRC := $(shell pwd) - export EXTERNAL_BUILD = 1 - export CONFIG_VIDEO_INTEL_IPU7 = m - export CONFIG_IPU_BRIDGE = y --export CONFIG_INTEL_IPU7_ACPI = m -+export CONFIG_INTEL_IPU_ACPI = m - - obj-y += drivers/media/pci/intel/ipu7/ - obj-y += drivers/media/platform/intel/ -@@ -16,8 +16,8 @@ subdir-ccflags-y += -I$(src)/include - - subdir-ccflags-$(CONFIG_IPU_BRIDGE) += \ - -DCONFIG_IPU_BRIDGE --subdir-ccflags-$(CONFIG_INTEL_IPU7_ACPI) += \ -- -DCONFIG_INTEL_IPU7_ACPI -+subdir-ccflags-$(CONFIG_INTEL_IPU_ACPI) += \ -+ -DCONFIG_INTEL_IPU_ACPI - subdir-ccflags-y += $(subdir-ccflags-m) - - all: -diff --git a/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys.c b/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys.c -index ed3e37b..767104d 100644 ---- a/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys.c -+++ b/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys.c -@@ -178,7 +178,7 @@ static int isys_register_ext_subdev(struct ipu7_isys *isys, - client = isys_find_i2c_subdev(adapter, sd_info); - if (client) { - dev_warn(dev, "Device exists\n"); --#if IS_ENABLED(CONFIG_INTEL_IPU7_ACPI) -+#if IS_ENABLED(CONFIG_INTEL_IPU_ACPI) - /* TODO: remove i2c_unregister_device() */ - i2c_unregister_device(client); - #else -@@ -266,7 +266,7 @@ static int isys_fw_log_init(struct ipu7_isys *isys) - return 0; - } - --#if IS_ENABLED(CONFIG_INTEL_IPU7_ACPI) -+#if IS_ENABLED(CONFIG_INTEL_IPU_ACPI) - /* The .bound() notifier callback when a match is found */ - static int isys_notifier_bound(struct v4l2_async_notifier *notifier, - struct v4l2_subdev *sd, -@@ -580,7 +580,7 @@ static int isys_tpg_create_media_links(struct ipu7_isys *isys) - - #endif - --#if IS_ENABLED(CONFIG_INTEL_IPU7_ACPI) -+#if IS_ENABLED(CONFIG_INTEL_IPU_ACPI) - static int isys_register_devices(struct ipu7_isys *isys) - { - struct device *dev = &isys->adev->auxdev.dev; -@@ -875,7 +875,7 @@ static const struct dev_pm_ops isys_pm_ops = { - .resume = isys_resume, - }; - --#if IS_ENABLED(CONFIG_INTEL_IPU7_ACPI) -+#if IS_ENABLED(CONFIG_INTEL_IPU_ACPI) - static void isys_remove(struct auxiliary_device *auxdev) - { - struct ipu7_isys *isys = dev_get_drvdata(&auxdev->dev); -diff --git a/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7.c b/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7.c -index 20c88d5..96c3d2c 100644 ---- a/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7.c -+++ b/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7.c -@@ -40,8 +40,8 @@ - #include "ipu7-mmu.h" - #include "ipu7-platform-regs.h" - --#if IS_ENABLED(CONFIG_INTEL_IPU7_ACPI) --#include -+#if IS_ENABLED(CONFIG_INTEL_IPU_ACPI) -+#include - - #endif - #define IPU_PCI_BAR 0 -@@ -2626,8 +2626,8 @@ static int ipu7_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) - goto out_ipu_bus_del_devices; - } - --#if IS_ENABLED(CONFIG_INTEL_IPU7_ACPI) -- ipu_get_acpi_devices(&dev->platform_data); -+#if IS_ENABLED(CONFIG_INTEL_IPU_ACPI) -+ ipu_get_acpi_devices_new(&dev->platform_data); - #endif - isp->isys = ipu7_isys_init(pdev, dev, isys_ctrl, isys_base, - dev->platform_data, -diff --git a/ipu7-drivers/drivers/media/platform/intel/Kconfig b/ipu7-drivers/drivers/media/platform/intel/Kconfig -index f8affa6..b8a7a36 100644 ---- a/ipu7-drivers/drivers/media/platform/intel/Kconfig -+++ b/ipu7-drivers/drivers/media/platform/intel/Kconfig -@@ -8,7 +8,7 @@ config INTEL_IPU7_FPGA_PDATA - development and mainly used for pixter or sensor enablement - without ACPI support. - --config INTEL_IPU7_ACPI -+config INTEL_IPU_ACPI - tristate "Enable IPU ACPI driver" - default VIDEO_INTEL_IPU7 - depends on I2C -diff --git a/ipu7-drivers/drivers/media/platform/intel/Makefile b/ipu7-drivers/drivers/media/platform/intel/Makefile -index d0c41b7..409d55c 100644 ---- a/ipu7-drivers/drivers/media/platform/intel/Makefile -+++ b/ipu7-drivers/drivers/media/platform/intel/Makefile -@@ -1,5 +1,5 @@ - # SPDX-License-Identifier: GPL-2.0 --# Copyright (c) 2010 - 2022 Intel Corporation. -+# Copyright (c) 2010 - 2025 Intel Corporation. - - is_kernel_lt_6_10 = $(shell if [ $$(printf "6.10\n$(KERNELVERSION)" | sort -V | head -n1) != "6.10" ]; then echo 1; fi) - ifeq ($(is_kernel_lt_6_10), 1) -@@ -10,8 +10,8 @@ endif - - ccflags-y += -I$(src)/../../pci/intel/ipu7/ - --ifneq ($(filter y m, $(CONFIG_INTEL_IPU7_ACPI)),) --obj-$(CONFIG_INTEL_IPU7_ACPI) += ipu-acpi.o \ -+ifneq ($(filter y m, $(CONFIG_INTEL_IPU_ACPI)),) -+obj-$(CONFIG_INTEL_IPU_ACPI) += ipu-acpi.o \ - ipu-acpi-pdata.o \ - ipu-acpi-common.o - else -diff --git a/ipu7-drivers/drivers/media/platform/intel/ipu-acpi-common.c b/ipu7-drivers/drivers/media/platform/intel/ipu-acpi-common.c -index 936c158..1804932 100644 ---- a/ipu7-drivers/drivers/media/platform/intel/ipu-acpi-common.c -+++ b/ipu7-drivers/drivers/media/platform/intel/ipu-acpi-common.c -@@ -1,6 +1,6 @@ - // SPDX-License-Identifier: GPL-2.0 - /* -- * Copyright (c) 2016-2024 Intel Corporation. -+ * Copyright (c) 2016-2025 Intel Corporation. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version -@@ -13,7 +13,10 @@ - * - */ - #include -+#include - #include -+#include -+ - #include - #include - -@@ -365,8 +368,12 @@ int ipu_acpi_get_dep_data(struct device *dev, - continue; - - /* Process device IN3472 created by acpi */ -+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 17, 0) -+ if (acpi_bus_get_device(dep_devices.handles[i], &device)) { -+#else - device = acpi_fetch_acpi_dev(dep_devices.handles[i]); - if (!device) { -+#endif - pr_err("IPU ACPI: Failed to get ACPI device"); - return -ENODEV; - } -diff --git a/ipu7-drivers/drivers/media/platform/intel/ipu-acpi-pdata.c b/ipu7-drivers/drivers/media/platform/intel/ipu-acpi-pdata.c -index 0a84dcc..be75bb4 100644 ---- a/ipu7-drivers/drivers/media/platform/intel/ipu-acpi-pdata.c -+++ b/ipu7-drivers/drivers/media/platform/intel/ipu-acpi-pdata.c -@@ -12,18 +12,31 @@ - * GNU General Public License for more details. - * - */ -- - #include - #include - - #define MIN_SENSOR_I2C 1 - #define MIN_SERDES_I2C 3 - #define SUFFIX_BASE 97 -+#define MSG_LEN 128 - --struct ipu7_isys_subdev_pdata acpi_subdev_pdata = { -- .subdevs = (struct ipu7_isys_subdev_info *[]) { -- NULL, -- } -+static struct ipu_isys_subdev_pdata *ptr_built_in_pdata; -+ -+void set_built_in_pdata(struct ipu_isys_subdev_pdata *pdata) -+{ -+ ptr_built_in_pdata = pdata; -+}; -+EXPORT_SYMBOL(set_built_in_pdata); -+ -+static struct ipu_isys_clk_mapping clk_mapping[] = { -+ { CLKDEV_INIT(NULL, NULL, NULL), NULL } -+}; -+ -+struct ipu_isys_subdev_pdata acpi_subdev_pdata = { -+ .subdevs = (struct ipu_isys_subdev_info *[]) { -+ NULL, NULL, NULL, NULL, NULL, -+ }, -+ .clk_map = clk_mapping, - }; - - struct serdes_local serdes_info; -@@ -74,7 +87,8 @@ static int get_i2c_bus_id(int adapter_id, char *adapter_bdf, int bdf_len) - */ - static void update_i2c_bus_id(void) - { -- struct ipu7_isys_subdev_info **subdevs = acpi_subdev_pdata.subdevs; -+ struct ipu_isys_subdev_info **subdevs = acpi_subdev_pdata.subdevs; -+ - for (int i = 0; subdevs[i] != NULL; i++) { - subdevs[i]->i2c.i2c_adapter_id = - get_i2c_bus_id(subdevs[i]->i2c.i2c_adapter_id, -@@ -83,9 +97,9 @@ static void update_i2c_bus_id(void) - } - } - --struct ipu7_isys_subdev_pdata *get_acpi_subdev_pdata(void) -+struct ipu_isys_subdev_pdata *get_acpi_subdev_pdata(void) - { -- struct ipu7_isys_subdev_pdata *ptr; -+ struct ipu_isys_subdev_pdata *ptr; - - update_i2c_bus_id(); - ptr = &acpi_subdev_pdata; -@@ -122,7 +136,7 @@ static void print_serdes_sdinfo(struct serdes_subdev_info *sdinfo) - (int)sd_mpdata->gpio_powerup_seq[i]); - } - --static void print_serdes_subdev(struct ipu7_isys_subdev_info *sd) -+static void print_serdes_subdev(struct ipu_isys_subdev_info *sd) - { - struct serdes_platform_data *sd_pdata = sd->i2c.board_info.platform_data; - int i; -@@ -153,6 +167,7 @@ static void print_serdes_subdev(struct ipu7_isys_subdev_info *sd) - pr_debug("\t\tlink_freq_mbps \t\t= %d", sd_pdata->link_freq_mbps); - pr_debug("\t\tdeser_nlanes \t\t= %d", sd_pdata->deser_nlanes); - pr_debug("\t\tser_nlanes \t\t= %d", sd_pdata->ser_nlanes); -+ pr_debug("\t\tser_name \t\t= %s", sd_pdata->ser_name); - - for (i = 0; i < serdes_info.rx_port; i++) { - sd_sdinfo = &sd_pdata->subdev_info[i]; -@@ -167,7 +182,7 @@ static void print_serdes_subdev(struct ipu7_isys_subdev_info *sd) - - } - --static void print_subdev(struct ipu7_isys_subdev_info *sd) -+static void print_subdev(struct ipu_isys_subdev_info *sd) - { - struct sensor_platform_data *spdata = sd->i2c.board_info.platform_data; - int i; -@@ -198,7 +213,7 @@ static void print_subdev(struct ipu7_isys_subdev_info *sd) - pr_debug("\t\treset_pin \t\t= %d", spdata->reset_pin); - pr_debug("\t\tdetect_pin \t\t= %d", spdata->detect_pin); - -- for (i = 0; i < IPU7_SPDATA_GPIO_NUM; i++) -+ for (i = 0; i < IPU_SPDATA_GPIO_NUM; i++) - pr_debug("\t\tgpios[%d] \t\t= %d", i, spdata->gpios[i]); - } - -@@ -226,11 +241,11 @@ static void set_common_gpio(struct control_logic_data *ctl_data, - ctl_data->gpio[i].func); - } - --static int set_csi2(struct ipu7_isys_subdev_info **sensor_sd, -+static int set_csi2(struct ipu_isys_subdev_info **sensor_sd, - unsigned int lanes, unsigned int port, - unsigned int bus_type) - { -- struct ipu7_isys_csi2_config *csi2_config; -+ struct ipu_isys_csi2_config *csi2_config; - - csi2_config = kzalloc(sizeof(*csi2_config), GFP_KERNEL); - if (!csi2_config) -@@ -250,7 +265,7 @@ static int set_csi2(struct ipu7_isys_subdev_info **sensor_sd, - return 0; - } - --static void set_i2c(struct ipu7_isys_subdev_info **sensor_sd, -+static void set_i2c(struct ipu_isys_subdev_info **sensor_sd, - struct device *dev, - const char *sensor_name, - unsigned int addr, -@@ -274,7 +289,7 @@ static void set_serdes_sd_pdata(struct serdes_module_pdata **module_pdata, - - #define PORT_NR 8 - --static int set_serdes_subdev(struct ipu7_isys_subdev_info **serdes_sd, -+static int set_serdes_subdev(struct ipu_isys_subdev_info **serdes_sd, - struct device *dev, - struct serdes_platform_data **pdata, - const char *sensor_name, -@@ -305,6 +320,7 @@ static int set_serdes_subdev(struct ipu7_isys_subdev_info **serdes_sd, - /* board info */ - strscpy(serdes_sdinfo[i].board_info.type, sensor_name, I2C_NAME_SIZE); - serdes_sdinfo[i].board_info.addr = serdes_info.sensor_map_addr + i; -+ - serdes_sdinfo[i].board_info.platform_data = module_pdata[i]; - - /* serdes_subdev_info */ -@@ -326,7 +342,7 @@ static int set_serdes_subdev(struct ipu7_isys_subdev_info **serdes_sd, - return 0; - } - --static int set_pdata(struct ipu7_isys_subdev_info **sensor_sd, -+static int set_pdata(struct ipu_isys_subdev_info **sensor_sd, - struct device *dev, - const char *sensor_name, - const char *hid_name, -@@ -423,7 +439,7 @@ static void set_serdes_info(struct device *dev, const char *sensor_name, - } - - static int populate_sensor_pdata(struct device *dev, -- struct ipu7_isys_subdev_info **sensor_sd, -+ struct ipu_isys_subdev_info **sensor_sd, - struct sensor_bios_data *cam_data, - struct control_logic_data *ctl_data, - enum connection_type connect, -@@ -433,7 +449,7 @@ static int populate_sensor_pdata(struct device *dev, - int sensor_physical_addr, - int link_freq) - { -- struct ipu7_isys_subdev_pdata *ptr_acpi_subdev_pdata = &acpi_subdev_pdata; -+ struct ipu_isys_subdev_pdata *ptr_acpi_subdev_pdata = &acpi_subdev_pdata; - int i = 0; - int ret; - -@@ -453,6 +469,7 @@ static int populate_sensor_pdata(struct device *dev, - cam_data->i2c_num); - return -1; - } -+ - /* Others use DISCRETE Control Logic */ - if (ctl_data->type != CL_DISCRETE) { - dev_err(dev, "IPU ACPI: Control Logic Type\n"); -@@ -530,12 +547,13 @@ int get_sensor_pdata(struct device *dev, - { - struct sensor_bios_data *cam_data; - struct control_logic_data *ctl_data; -- struct ipu7_isys_subdev_info *sensor_sd; -+ struct ipu_isys_subdev_info *sensor_sd; - int rval; - - cam_data = kzalloc(sizeof(*cam_data), GFP_KERNEL); - if (!cam_data) - return -ENOMEM; -+ - cam_data->dev = dev; - - ctl_data = kzalloc(sizeof(*ctl_data), GFP_KERNEL); -@@ -543,6 +561,7 @@ int get_sensor_pdata(struct device *dev, - kfree(cam_data); - return -ENOMEM; - } -+ - ctl_data->dev = dev; - - sensor_sd = kzalloc(sizeof(*sensor_sd), GFP_KERNEL); -diff --git a/ipu7-drivers/drivers/media/platform/intel/ipu-acpi.c b/ipu7-drivers/drivers/media/platform/intel/ipu-acpi.c -index deed3bb..a5a54b4 100644 ---- a/ipu7-drivers/drivers/media/platform/intel/ipu-acpi.c -+++ b/ipu7-drivers/drivers/media/platform/intel/ipu-acpi.c -@@ -32,6 +32,8 @@ - #include - #include - #include -+#include -+ - #include - #include - -@@ -73,7 +75,7 @@ static const struct ipu_acpi_devices supported_devices[] = { - - static int get_table_index(const char *acpi_name) - { -- unsigned int i; -+ int i; - - for (i = 0; i < ARRAY_SIZE(supported_devices); i++) { - if (!strncmp(supported_devices[i].hid_name, acpi_name, -@@ -143,8 +145,8 @@ static int ipu_acpi_test(struct device *dev, void *priv) - - if (acpi_idx < 0) - return 0; -- else -- dev_info(dev, "IPU6 ACPI: ACPI device %s\n", dev_name(dev)); -+ -+ dev_info(dev, "IPU6 ACPI: ACPI device %s\n", dev_name(dev)); - - const char *target_hid = supported_devices[acpi_idx].hid_name; - -@@ -161,10 +163,10 @@ static int ipu_acpi_test(struct device *dev, void *priv) - if (!adev) { - dev_dbg(dev, "No ACPI device found for %s\n", target_hid); - return 0; -- } else { -- set_primary_fwnode(dev, &adev->fwnode); -- dev_dbg(dev, "Assigned fwnode to %s\n", dev_name(dev)); - } -+ -+ set_primary_fwnode(dev, &adev->fwnode); -+ dev_dbg(dev, "Assigned fwnode to %s\n", dev_name(dev)); - } - - if (ACPI_COMPANION(dev) != adev) { -@@ -185,15 +187,53 @@ static int ipu_acpi_test(struct device *dev, void *priv) - return 0; /* Continue iteration */ - } - -+/* Scan all i2c devices and pick ones which we can handle */ -+ - /* Try to get all IPU related devices mentioned in BIOS and all related information -- * return a new generated existing pdata -+ * If there is existing ipu_isys_subdev_pdata, update the existing pdata -+ * If not, return a new generated existing pdata - */ - --int ipu_get_acpi_devices(void **spdata) -+int ipu_get_acpi_devices(void *driver_data, -+ struct device *dev, -+ void **spdata, -+ void **built_in_pdata, -+ int (*fn) -+ (struct device *, void *, -+ void *csi2, -+ bool reprobe)) - { - struct ipu_i2c_helper helper = {0}; - int rval; -- struct ipu7_isys_subdev_pdata *ptr = NULL; -+ -+ if (!built_in_pdata) -+ dev_dbg(dev, "Built-in pdata not found"); -+ else { -+ dev_dbg(dev, "Built-in pdata found"); -+ set_built_in_pdata(*built_in_pdata); -+ } -+ -+ if ((!fn) || (!driver_data)) -+ return -ENODEV; -+ -+ rval = acpi_bus_for_each_dev(ipu_acpi_test, NULL); -+ if (rval < 0) -+ return rval; -+ -+ if (!built_in_pdata) { -+ dev_dbg(dev, "Return ACPI generated pdata"); -+ *spdata = get_acpi_subdev_pdata(); -+ } else -+ dev_dbg(dev, "Return updated built-in pdata"); -+ -+ return 0; -+} -+EXPORT_SYMBOL(ipu_get_acpi_devices); -+ -+int ipu_get_acpi_devices_new(void **spdata) -+{ -+ int rval; -+ struct ipu_isys_subdev_pdata *ptr = NULL; - - rval = acpi_bus_for_each_dev(ipu_acpi_test, NULL); - if (rval < 0) -@@ -205,10 +245,11 @@ int ipu_get_acpi_devices(void **spdata) - - return 0; - } --EXPORT_SYMBOL(ipu_get_acpi_devices); -+EXPORT_SYMBOL(ipu_get_acpi_devices_new); - - static int __init ipu_acpi_init(void) - { -+ set_built_in_pdata(NULL); - return 0; - } - -diff --git a/ipu7-drivers/include/media/ipu-acpi-pdata.h b/ipu7-drivers/include/media/ipu-acpi-pdata.h -index e207441..c6ec18e 100644 ---- a/ipu7-drivers/include/media/ipu-acpi-pdata.h -+++ b/ipu7-drivers/include/media/ipu-acpi-pdata.h -@@ -9,6 +9,7 @@ - - #define CL_EMPTY 0 - #define CL_DISCRETE 1 -+#define SERDES_MAX_PORT 4 - #define SERDES_MAX_GPIO_POWERUP_SEQ 4 - #define LOOP_SIZE 10 - -@@ -26,7 +27,7 @@ int get_sensor_pdata(struct device *dev, - int sensor_physical_addr, - int link_freq); - --struct ipu7_isys_subdev_pdata *get_acpi_subdev_pdata(void); -+struct ipu_isys_subdev_pdata *get_acpi_subdev_pdata(void); - - struct sensor_platform_data { - unsigned int port; -@@ -34,11 +35,11 @@ struct sensor_platform_data { - uint32_t i2c_slave_address; - int irq_pin; - unsigned int irq_pin_flags; -- char irq_pin_name[IPU7_SPDATA_IRQ_PIN_NAME_LEN]; -+ char irq_pin_name[IPU_SPDATA_IRQ_PIN_NAME_LEN]; - int reset_pin; - int detect_pin; - char suffix; -- int gpios[IPU7_SPDATA_GPIO_NUM]; -+ int gpios[IPU_SPDATA_GPIO_NUM]; - }; - - struct serdes_platform_data { -diff --git a/ipu7-drivers/include/media/ipu-acpi.h b/ipu7-drivers/include/media/ipu-acpi.h -index bc63240..1f8e09e 100644 ---- a/ipu7-drivers/include/media/ipu-acpi.h -+++ b/ipu7-drivers/include/media/ipu-acpi.h -@@ -16,16 +16,53 @@ - #ifndef MEDIA_INTEL_IPU_ACPI_H - #define MEDIA_INTEL_IPU_ACPI_H - --#include "ipu7-isys.h" -+#include -+#include -+ -+#include -+ -+struct ipu_isys_csi2_config { -+ unsigned int nlanes; -+ unsigned int port; -+ enum v4l2_mbus_type bus_type; -+}; -+ -+struct ipu_isys_subdev_i2c_info { -+ struct i2c_board_info board_info; -+ int i2c_adapter_id; -+ char i2c_adapter_bdf[32]; -+}; -+ -+struct ipu_isys_subdev_info { -+ struct ipu_isys_csi2_config *csi2; -+ struct ipu_isys_subdev_i2c_info i2c; -+#if IS_ENABLED(CONFIG_INTEL_IPU_ACPI) -+ char *acpi_hid; -+#endif -+}; -+ -+struct ipu_isys_clk_mapping { -+ struct clk_lookup clkdev_data; -+ char *platform_clock_name; -+}; -+ -+struct ipu_isys_subdev_pdata { -+ struct ipu_isys_subdev_info **subdevs; -+ struct ipu_isys_clk_mapping *clk_map; -+}; - - #define MAX_ACPI_SENSOR_NUM 4 - #define MAX_ACPI_I2C_NUM 12 - #define MAX_ACPI_GPIO_NUM 12 - - #define GPIO_RESET 0x0 -+#define GPIO_POWER_EN 0xb -+#define GPIO_READY_STAT 0x13 -+ -+#define IPU_SPDATA_GPIO_NUM 4 -+#define IPU_SPDATA_IRQ_PIN_NAME_LEN 16 - --#define IPU7_SPDATA_GPIO_NUM 4 --#define IPU7_SPDATA_IRQ_PIN_NAME_LEN 16 -+void set_built_in_pdata(struct ipu_isys_subdev_pdata *pdata); - - enum connection_type { - TYPE_DIRECT, -@@ -122,6 +159,11 @@ struct ipu_gpio_info { - bool valid; - }; - -+struct ipu_irq_info { -+ int irq_pin; -+ char irq_pin_name[IPU_SPDATA_IRQ_PIN_NAME_LEN]; -+}; -+ - /* Each I2C client linked to 1 set of CTL Logic */ - struct control_logic_data { - struct device *dev; -@@ -133,9 +175,7 @@ struct control_logic_data { - bool completed; - }; - --int ipu_get_acpi_devices(void **spdata); -- --struct ipu7_isys_subdev_pdata *get_built_in_pdata(void); -+struct ipu_isys_subdev_pdata *get_built_in_pdata(void); - - int ipu_acpi_get_cam_data(struct device *dev, - struct sensor_bios_data *sensor); -@@ -146,17 +186,29 @@ int ipu_acpi_get_dep_data(struct device *dev, - int ipu_acpi_get_control_logic_data(struct device *dev, - struct control_logic_data **ctl_data); - -+struct intel_ipu6_regulator { -+ char *src_dev_name; -+ char *src_rail; -+ char *dest_rail; -+}; -+ - struct ipu_i2c_helper { - int (*fn)(struct device *dev, void *priv, -- struct ipu7_isys_csi2_config *csi2, -+ struct ipu_isys_csi2_config *csi2, - bool reprobe); - void *driver_data; - }; - -+struct ipu_i2c_new_dev { -+ struct list_head list; -+ struct i2c_board_info info; -+ unsigned short int bus; -+}; -+ - struct ipu_camera_module_data { - struct list_head list; -- struct ipu7_isys_subdev_info sd; -- struct ipu7_isys_csi2_config csi2; -+ struct ipu_isys_subdev_info sd; -+ struct ipu_isys_csi2_config csi2; - unsigned int ext_clk; - void *pdata; /* Ptr to generated platform data*/ - void *priv; /* Private for specific subdevice */ --- -2.43.0 - diff --git a/patches/0104-ipu7-isys-make-modules-suffix-versioning-same-as-dkm.patch b/patches/0100-ipu7-isys-make-modules-suffix-versioning-same-as-dkm.patch similarity index 86% rename from patches/0104-ipu7-isys-make-modules-suffix-versioning-same-as-dkm.patch rename to patches/0100-ipu7-isys-make-modules-suffix-versioning-same-as-dkm.patch index aaaf24b..53e2926 100644 --- a/patches/0104-ipu7-isys-make-modules-suffix-versioning-same-as-dkm.patch +++ b/patches/0100-ipu7-isys-make-modules-suffix-versioning-same-as-dkm.patch @@ -1,7 +1,7 @@ -From a2d5fa26b49e6f515cd4122a6874ca5b4f98030e Mon Sep 17 00:00:00 2001 +From 70b32b4340d7beb67ef59951ccac07c555de6903 Mon Sep 17 00:00:00 2001 From: Florent Pirou Date: Sat, 17 Jan 2026 02:10:39 -0700 -Subject: [PATCH 104/104] ipu7-isys: make modules suffix versioning same as +Subject: [PATCH 100/104] ipu7-isys: make modules suffix versioning same as dkms Signed-off-by: Florent Pirou @@ -13,13 +13,13 @@ Signed-off-by: Florent Pirou 4 files changed, 15 insertions(+) diff --git a/ipu7-drivers/drivers/media/pci/intel/ipu7/Makefile b/ipu7-drivers/drivers/media/pci/intel/ipu7/Makefile -index 5041701..7497dc7 100644 +index 5041701..754d4e8 100644 --- a/ipu7-drivers/drivers/media/pci/intel/ipu7/Makefile +++ b/ipu7-drivers/drivers/media/pci/intel/ipu7/Makefile @@ -1,5 +1,15 @@ # SPDX-License-Identifier: GPL-2.0 # Copyright (c) 2017 - 2025 Intel Corporation. -+KERNEL_VERSION := $(shell echo $(KERNELRELEASE) | sed 's/[^0-9.]*\([0-9.]*\).*/\1/') ++KERNEL_VERSION := $(shell echo $(KERNELRELEASE) | sed 's/[^0-9.]*\([0-9]\.[0-9]*\)\..*/\1.0/') + +CC := ${CC} -I ${M}/../../../../../../include -I ${M}/../../../../../../$(KERNEL_VERSION)/include-overrides -DCONFIG_DEBUG_FS -DCONFIG_INTEL_IPU_ACPI -DCONFIG_VIDEO_INTEL_IPU7_ISYS_RESET -DCONFIG_VIDEO_INTEL_IPU7_EXT_CTRLS + @@ -42,28 +42,28 @@ index 5041701..7497dc7 100644 ccflags-y += -I$(src)/ diff --git a/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys.c b/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys.c -index 767104d..ba4f8db 100644 +index 00edeeb..301c101 100644 --- a/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys.c +++ b/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys.c -@@ -1627,3 +1627,4 @@ MODULE_IMPORT_NS("INTEL_IPU_BRIDGE"); +@@ -1501,3 +1501,4 @@ MODULE_IMPORT_NS("INTEL_IPU_BRIDGE"); MODULE_IMPORT_NS(INTEL_IPU7); MODULE_IMPORT_NS(INTEL_IPU_BRIDGE); #endif +MODULE_VERSION(DRIVER_VERSION_SUFFIX); diff --git a/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7.c b/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7.c -index 96c3d2c..095ea9a 100644 +index 80d5fda..e1fafe3 100644 --- a/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7.c +++ b/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7.c -@@ -2891,3 +2891,4 @@ MODULE_AUTHOR("Qingwu Zhang "); +@@ -2878,3 +2878,4 @@ MODULE_AUTHOR("Qingwu Zhang "); MODULE_AUTHOR("Intel"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Intel ipu7 pci driver"); +MODULE_VERSION(DRIVER_VERSION_SUFFIX); diff --git a/ipu7-drivers/drivers/media/pci/intel/ipu7/psys/ipu-psys.c b/ipu7-drivers/drivers/media/pci/intel/ipu7/psys/ipu-psys.c -index b20e238..cc0de0f 100644 +index 838f0b2..b5ff06a 100644 --- a/ipu7-drivers/drivers/media/pci/intel/ipu7/psys/ipu-psys.c +++ b/ipu7-drivers/drivers/media/pci/intel/ipu7/psys/ipu-psys.c -@@ -1548,3 +1548,4 @@ MODULE_IMPORT_NS("DMA_BUF"); +@@ -1554,3 +1554,4 @@ MODULE_IMPORT_NS("DMA_BUF"); MODULE_IMPORT_NS(INTEL_IPU7); MODULE_IMPORT_NS(DMA_BUF); #endif diff --git a/patches/0101-ipu7-isys-allow-virtual-channels-on-16-csi2-input-vi.patch b/patches/0101-ipu7-isys-allow-virtual-channels-on-16-csi2-input-vi.patch index 5e7e075..bbc39d1 100644 --- a/patches/0101-ipu7-isys-allow-virtual-channels-on-16-csi2-input-vi.patch +++ b/patches/0101-ipu7-isys-allow-virtual-channels-on-16-csi2-input-vi.patch @@ -1,4 +1,4 @@ -From dbfe2a70deae4b31e089e52d9ad40a1be37970da Mon Sep 17 00:00:00 2001 +From 0017ba6de82f1f3ea9490b026b8b7af41066c019 Mon Sep 17 00:00:00 2001 From: Florent Pirou Date: Sat, 17 Jan 2026 02:05:34 -0700 Subject: [PATCH 101/104] ipu7-isys : allow virtual-channels on 16 csi2 input diff --git a/patches/0102-ipu7-isys-add-d4xx-pixel-format-support.patch b/patches/0102-ipu7-isys-add-d4xx-pixel-format-support.patch index 04c3b04..8dd5027 100644 --- a/patches/0102-ipu7-isys-add-d4xx-pixel-format-support.patch +++ b/patches/0102-ipu7-isys-add-d4xx-pixel-format-support.patch @@ -1,4 +1,4 @@ -From 3849f87c48328b61f1ed81817b8c3f29473c6624 Mon Sep 17 00:00:00 2001 +From 1fcc1dad44343b80e60841e03cff411ee41d9a5b Mon Sep 17 00:00:00 2001 From: Florent Pirou Date: Sat, 17 Jan 2026 02:08:28 -0700 Subject: [PATCH 102/104] ipu7-isys : add d4xx pixel format support @@ -13,10 +13,10 @@ Signed-off-by: Florent Pirou 5 files changed, 18 insertions(+), 2 deletions(-) diff --git a/ipu7-drivers/drivers/media/pci/intel/ipu7/abi/ipu7_fw_isys_abi.h b/ipu7-drivers/drivers/media/pci/intel/ipu7/abi/ipu7_fw_isys_abi.h -index 45db85e..dcb9c49 100644 +index dc63449..9abf8d1 100644 --- a/ipu7-drivers/drivers/media/pci/intel/ipu7/abi/ipu7_fw_isys_abi.h +++ b/ipu7-drivers/drivers/media/pci/intel/ipu7/abi/ipu7_fw_isys_abi.h -@@ -126,6 +126,7 @@ enum ipu7_insys_frame_format_type { +@@ -125,6 +125,7 @@ enum ipu7_insys_frame_format_type { IPU_INSYS_FRAME_FORMAT_ARGB888 = 31, IPU_INSYS_FRAME_FORMAT_BGRA888 = 32, IPU_INSYS_FRAME_FORMAT_ABGR888 = 33, @@ -79,7 +79,7 @@ index 98b6ef6..b99b5e9 100644 case MEDIA_BUS_FMT_SGBRG8_1X8: case MEDIA_BUS_FMT_SGRBG8_1X8: diff --git a/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys-video.c b/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys-video.c -index 12fdf55..2993d62 100644 +index de3ebf9..44aa17c 100644 --- a/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys-video.c +++ b/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys-video.c @@ -83,12 +83,21 @@ const struct ipu7_isys_pixelformat ipu7_isys_pfmts[] = { @@ -105,10 +105,10 @@ index 12fdf55..2993d62 100644 static int video_open(struct file *file) diff --git a/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys.h b/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys.h -index c9ca2fb..8a505b8 100644 +index 2e45258..19eea94 100644 --- a/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys.h +++ b/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys.h -@@ -46,8 +46,8 @@ struct dentry; +@@ -43,8 +43,8 @@ struct dentry; #define IPU_ISYS_SIZE_SEND_QUEUE 40U #define IPU_ISYS_NUM_RECV_QUEUE 1U diff --git a/patches/0103-ipu7-isys-add-video-VIDIOC_S_EXT_CTRLS-callbacks-by-.patch b/patches/0103-ipu7-isys-add-video-VIDIOC_S_EXT_CTRLS-callbacks-by-.patch index 907b329..3507775 100644 --- a/patches/0103-ipu7-isys-add-video-VIDIOC_S_EXT_CTRLS-callbacks-by-.patch +++ b/patches/0103-ipu7-isys-add-video-VIDIOC_S_EXT_CTRLS-callbacks-by-.patch @@ -1,4 +1,4 @@ -From f25d8a31605419e73bd9a0a30d23feee864e5a8f Mon Sep 17 00:00:00 2001 +From 64a32d86d6a48dadabfb6d767be28b6180f01845 Mon Sep 17 00:00:00 2001 From: Florent Pirou Date: Sat, 17 Jan 2026 02:09:32 -0700 Subject: [PATCH 103/104] ipu7-isys: add video VIDIOC_S_EXT_CTRLS callbacks by @@ -60,7 +60,7 @@ index 0000000..91b2eca + void *data), + void *data); diff --git a/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys-video.c b/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys-video.c -index 2993d62..cc48706 100644 +index 44aa17c..2b1ee36 100644 --- a/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys-video.c +++ b/ipu7-drivers/drivers/media/pci/intel/ipu7/ipu7-isys-video.c @@ -38,6 +38,9 @@ @@ -458,7 +458,7 @@ index 2993d62..cc48706 100644 return 0; unlock: v4l2_subdev_unlock_state(s_state); -@@ -1009,6 +1315,266 @@ out_media_entity_stop_streaming_firmware: +@@ -957,6 +1263,266 @@ out_media_entity_stop_streaming_firmware: return ret; } @@ -725,7 +725,7 @@ index 2993d62..cc48706 100644 static const struct v4l2_ioctl_ops ipu7_v4l2_ioctl_ops = { .vidioc_querycap = ipu7_isys_vidioc_querycap, .vidioc_enum_fmt_vid_cap = ipu7_isys_vidioc_enum_fmt, -@@ -1025,6 +1591,11 @@ static const struct v4l2_ioctl_ops ipu7_v4l2_ioctl_ops = { +@@ -973,6 +1539,11 @@ static const struct v4l2_ioctl_ops ipu7_v4l2_ioctl_ops = { .vidioc_streamon = vb2_ioctl_streamon, .vidioc_streamoff = vb2_ioctl_streamoff, .vidioc_expbuf = vb2_ioctl_expbuf, @@ -737,7 +737,7 @@ index 2993d62..cc48706 100644 }; static const struct media_entity_operations entity_ops = { -@@ -1297,8 +1868,28 @@ int ipu7_isys_video_init(struct ipu7_isys_video *av) +@@ -1217,8 +1788,28 @@ int ipu7_isys_video_init(struct ipu7_isys_video *av) if (ret) goto out_media_entity_cleanup; @@ -766,7 +766,7 @@ index 2993d62..cc48706 100644 out_media_entity_cleanup: vb2_video_unregister_device(&av->vdev); media_entity_cleanup(&av->vdev.entity); -@@ -1314,6 +1905,9 @@ out_mutex_destroy: +@@ -1234,6 +1825,9 @@ out_mutex_destroy: void ipu7_isys_video_cleanup(struct ipu7_isys_video *av) { diff --git a/patches/0104-ipu7-dkms-dummy-patch-to-void-last-match-miss.patch b/patches/0104-ipu7-dkms-dummy-patch-to-void-last-match-miss.patch new file mode 100644 index 0000000..a6cf993 --- /dev/null +++ b/patches/0104-ipu7-dkms-dummy-patch-to-void-last-match-miss.patch @@ -0,0 +1,39 @@ +From 6c5b8023d44b41a832c02c685438f58ce15a58cd Mon Sep 17 00:00:00 2001 +From: Florent Pirou +Date: Mon, 19 Jan 2026 10:38:25 -0700 +Subject: [PATCH 104/104] ipu7-dkms: dummy patch to void last match miss + +Signed-off-by: Florent Pirou +--- + ipu7-drivers/drivers/media/pci/intel/ipu7/Makefile | 2 +- + ipu7-drivers/drivers/media/pci/intel/ipu7/Makefile~ | 2 +- + 2 files changed, 2 insertions(+), 2 deletions(-) + +diff --git a/ipu7-drivers/drivers/media/pci/intel/ipu7/Makefile b/ipu7-drivers/drivers/media/pci/intel/ipu7/Makefile +index 754d4e8..30f73db 100644 +--- a/ipu7-drivers/drivers/media/pci/intel/ipu7/Makefile ++++ b/ipu7-drivers/drivers/media/pci/intel/ipu7/Makefile +@@ -10,7 +10,7 @@ ccflags-y += -DDRIVER_VERSION_SUFFIX=\"${DRIVER_VERSION_SUFFIX}\" + + export EXTERNAL_BUILD = 1 + export CONFIG_VIDEO_INTEL_IPU7=m +- ++#add a dummy change + is_kernel_lt_6_10 = $(shell if [ $$(printf "6.10\n$(KERNELVERSION)" | sort -V | head -n1) != "6.10" ]; then echo 1; fi) + ifeq ($(is_kernel_lt_6_10), 1) + ifneq ($(EXTERNAL_BUILD), 1) +diff --git a/ipu7-drivers/drivers/media/pci/intel/ipu7/Makefile~ b/ipu7-drivers/drivers/media/pci/intel/ipu7/Makefile~ +index 7497dc7..754d4e8 100644 +--- a/ipu7-drivers/drivers/media/pci/intel/ipu7/Makefile~ ++++ b/ipu7-drivers/drivers/media/pci/intel/ipu7/Makefile~ +@@ -1,6 +1,6 @@ + # SPDX-License-Identifier: GPL-2.0 + # Copyright (c) 2017 - 2025 Intel Corporation. +-KERNEL_VERSION := $(shell echo $(KERNELRELEASE) | sed 's/[^0-9.]*\([0-9.]*\).*/\1/') ++KERNEL_VERSION := $(shell echo $(KERNELRELEASE) | sed 's/[^0-9.]*\([0-9]\.[0-9]*\)\..*/\1.0/') + + CC := ${CC} -I ${M}/../../../../../../include -I ${M}/../../../../../../$(KERNEL_VERSION)/include-overrides -DCONFIG_DEBUG_FS -DCONFIG_INTEL_IPU_ACPI -DCONFIG_VIDEO_INTEL_IPU7_ISYS_RESET -DCONFIG_VIDEO_INTEL_IPU7_EXT_CTRLS + +-- +2.43.0 + From cc389449065661471e2963e6913ddaf58981b158 Mon Sep 17 00:00:00 2001 From: Pirouf Date: Sat, 17 Jan 2026 17:13:57 +0100 Subject: [PATCH 13/14] doc : update README, add acpi pdata doc Signed-off-by: Florent Pirou --- README.md | 90 ++++++++++++++++++++++++++++++------ doc/ar0234/acpi-ipu-pdata.md | 76 ++++++++++++++++++++++++++++++ doc/isx031/acpi-ipu-pdata.md | 78 +++++++++++++++++++++++++++++++ 3 files changed, 231 insertions(+), 13 deletions(-) create mode 100644 doc/ar0234/acpi-ipu-pdata.md create mode 100644 doc/isx031/acpi-ipu-pdata.md diff --git a/README.md b/README.md index 574386d..25b381d 100644 --- a/README.md +++ b/README.md @@ -1,20 +1,20 @@ -# Intel MIPI CSI Camera Reference Driver +# Intel MIPI-GMSL Camera Reference (Linux v4l2 drivers) -This repository contains reference drivers and configurations for Intel MIPI CSI cameras, supporting various sensor modules and Image Processing Units (IPUs). +This repository contains Linux debian reference drivers and configurations for Intel Image Processing Units (IPUs) MIPI-GMSL CSI cameras, supporting multiple Camera sensor modules and serdes ODM/OEM implementations (e.g. Advantech,... ) ## Supported Sensors -| Sensor Name |Sensor Type | Vendor | Kernel Version | IPU Version | -|-----------------|------------|-----------------|----------------|-------------| -| AR0233 | GMSL | Sensing | K6.12 | IPU6EPMTL | -| AR0820 | GMSL | Sensing | K6.12 | IPU6EPMTL | -| ISX031 | MIPI CSI-2 | D3 Embedded | K6.12 | IPU6EPMTL | -| ISX031 | GMSL | D3 Embedded | K6.12 | IPU6EPMTL | -| ISX031 | GMSL | Sensing | K6.12 | IPU6EPMTL | -| ISX031 | GMSL | Leopard Imaging | K6.12 | IPU6EPMTL | -| AR0234 | MIPI CSI-2 | D3 Embedded | K6.12 | IPU6EPMTL | +| Sensor Name |Sensor Type |Serializer Type |Deserializer Type | Vendor | Kernel Version | IPU Version | +|-----------------|-------------|----------------|------------------|-----------------|--------------------|---------------------| +| AR0233 | GMSL | max9295 | max9296/max96724 | Sensing | K6.12/K6.17/K6.18 | IPU6EPMTL/IPU7PTL | +| AR0820 | GMSL | max9295 | max9296/max96724 | Sensing | K6.12/K6.17/K6.18 | IPU6EPMTL/IPU7PTL | +| ISX031 | GMSL | max9295 | max9296/max96724 | D3 Embedded | K6.12/K6.17/K6.18 | IPU6EPMTL/IPU7PTL | +| ISX031 | GMSL | max9295 | max9296/max96724 | Sensing | K6.12/K6.17/K6.18 | IPU6EPMTL/IPU7PTL | +| ISX031 | GMSL | max9295 | max9296/max96724 | Leopard Imaging | K6.12/K6.17/K6.18 | IPU6EPMTL/IPU7PTL | +| ISX031 | MIPI-direct | none | none | D3 Embedded | K6.12/K6.17/K6.18 | IPU6EPMTL/IPU7PTL | +| AR0234 | MIPI-direct | none | none | D3 Embedded | K6.12/K6.17/K6.18 | IPU6EPMTL/IPU7PTL | -> **Note:** IPU6EPMTL represents MTL and ARL platforms +> **Note:** IPU6EPMTL represents Intel Core Ultra Series 1 and 2 (aka. MTL and ARL) platforms, IPU7PTL represents Intel Core Ultra Series 3 (aka. PTL) platforms. ## Directory Structure @@ -23,8 +23,72 @@ This repository contains reference drivers and configurations for Intel MIPI CSI - `doc/`: Documentation on Kernel Driver dependency on ipu6-drivers repository - `patch/`: Host Dependent Kernel patches to enable specific sensors - `include/`: Header files for driver compilation +- `patches/`: DKMS Dependent out-of-tree patchset for V4L2-core, IPU6 and IPU7 ISYS amd PSYS kernel modules build +- `helpers/`: Various helpers IPU6/IPU7 ISYS, serdes and sensor scripts for binding GMSL v4l2 subdevices, udev rules, modprobe.d .conf, ... -## Getting Started with Reference Camera +## Getting Started with Debian Linux MIPI-GMSL2 Reference Camera + +1. Clone this repository and checkout to your desired release tag : + +```bash +export $HOME=$(pwd) +cd $HOME +git clone --recurse-submodules https://github.com/intel/Intel-MIPI-CSI-Camera-Reference-Driver.git intel-mipi-gmsl-drivers +cd intel-mipi-gmsl-drivers +git checkout +``` + +2. install debian helper dependencies and build Canonical/Ubuntu 24.04 or GNU/Debian 13 compatible package : + +```bash +sudo apt install equivs devscripts linux-headers-$(uname -r) +mk-build-deps -i --host-arch amd64 --build-arch amd64 -t "apt-get -y -q -o Debug::pkgProblemResolver=yes --no-install-recommends --allow-downgrades" debian/control +dpkg-buildpackage +ls ../intel-mipi-gmsl* +``` + +> **Note:** this step should produce multiple debian files, especially `../intel-mipi-gmsl-dkms_20260112-0eci1_amd64.deb` , `../intel-mipi-gmsl-drivers_20260112-0eci1.tar.gz`, `../intel-mipi-gmsl-drivers_20260112-0eci1_amd64.buildinfo`, `../intel-mipi-gmsl-drivers_20260112-0eci1_amd64.changes`, `../intel-mipi-gmsl-drivers_20260112-0eci1.dsc`. + +3. Install & Build everything at once e.g. Intel IPU6/IPU7 PSYS and ISYS, GMSL2 Serdes and Camera Sensor : + +```bash +sudo apt install ../intel-mipi-gmsl-dkms_20260112-0eci1_amd64.deb +``` + +4. Check dkms modules versioning, modprobe.d configuration before loading the IPU ISYS kernel modules : + +```bash +modinfo intel-ipu6-isys +modprobe --show-depends intel-ipu6-isys +modprobe intel-ipu6-isys +``` + +> **Note:** all modules version should match the package version as well as dependencies with `ipu-acpi` kernel modules. + +5. Leverage helper scripts to bind v4l2 media pipeline for each individual sensors modules (Refer to `doc/\/acpi-ipu-pdata.md` to configure ACPI Camera PDATA from BIOS/UEFI menu). For example below the v4l2 media pipeline binding from any ISX031 GMSL sub-devices to IPU ISYS video capture devices: + +```bash +/usr/share/camera/ipu_max9x_bind.sh -s isx031 +``` +> **Note:** ipu6 and ipu7 MIPI CSI2, as well as max9x Serializer and Deserializer, requires `v4l-utils (>=1.30)` to set v4l2 subdevice streams and routes. +The easiest way consist of downloading the latest Canonical Ubuntu `v4l-utils` package or rebuild once and install locally `v4l2-ctl 1.32.0` as followed : + +```bash +cd $HOME +git clone https://git.launchpad.net/ubuntu/+source/v4l-utils +mk-build-deps -i --host-arch amd64 --build-arch amd64 -t "apt-get -y -q -o Debug::pkgProblemResolver=yes --no-install-recommends --allow-downgrades" debian/control +echo 1.0 > debian/source/format +dpkg-buildpackage +apt install ../v4l-utils_1.32.0-2ubuntu1_amd64.deb ../libv4l-0t64_1.32.0-2ubuntu1_amd64.deb ../libv4l2rds0t64_1.32.0-2ubuntu1_amd64.deb ../libv4lconvert0t64_1.32.0-2ubuntu1_amd64.deb +v4l2-ctl --version +``` +6. Final sanity-check if the v4l2 camera video streaming functional. + +```bash +v4l2-ctl -d /dev/video-isx031-a-0 --set-fmt-video=width=1920,height=1536,pixelformat=UYVY && v4l2-ctl -d /dev/video-isx031-a-0 --stream-mmap +``` + +## Getting Started with Linux "BKC-image" Reference Camera 1. Install Intel BKC image: - Go to rdc.intel.com diff --git a/doc/ar0234/acpi-ipu-pdata.md b/doc/ar0234/acpi-ipu-pdata.md new file mode 100644 index 0000000..2462346 --- /dev/null +++ b/doc/ar0234/acpi-ipu-pdata.md @@ -0,0 +1,76 @@ +## Description + +This document outlines the configuration parameters for Sensor AR0234, including validated settings and their compatibility across supported platforms. + +## For Sensor PDATA ACPI : MIPI CSI-2 + +``` +System Agent (SA) Configuration -> IPU Device (B0:D5:F0) [Enabled] +System Agent (SA) Configuration -> MIPI Camera Configuration -> CVF Support [Disabled] +System Agent (SA) Configuration -> MIPI Camera Configuration -> Control Logic 1 [Enabled] +Control Logic Type [Discrete] +CRD Version [CRD-D] +Input Clock [19.2 MHz] +PCH Clock Source [IMGCLKOUT_0] +Number of GPIO Pins 0 + +System Agent (SA) Configuration -> MIPI Camera Configuration -> Control Logic 2 [Enabled] +Control Logic Type [Discrete] +CRD Version [CRD-D] +Input Clock [19.2 MHz] +PCH Clock Source [IMGCLKOUT_1] +Number of GPIO Pins 0 + +System Agent (SA) Configuration -> MIPI Camera Configuration -> Control Logic 3 [Disabled] +System Agent (SA) Configuration -> MIPI Camera Configuration -> Control Logic 4 [Disabled] + +System Agent (SA) Configuration -> MIPI Camera Configuration -> Camera 1 [Enabled] +System Agent (SA) Configuration -> MIPI Camera Configuration -> Link options +Sensor Model [User Custom] +Video HID INTC10C0 +Lanes Clock division [4 4 2 2] +GPIO control [Control Logic 1] +Camera position [Back] +Flash Support [Disabled] +Privacy LED [Driver default] +Rotation [0] +PPR Value 10 +PPR Unit A +Camera module name YHCE +MIPI port 0 +LaneUsed [x2] +MCLK 19200000 +EEPROM Type [ROM_NONE] +VCM Type [VCM_NONE] +Number of I2C 1 +I2C Channel [I2C0] +Device 0 +I2C Address 10 +Device Type [Sensor] +Flash Driver Selection [Disabled] + +System Agent (SA) Configuration -> MIPI Camera Configuration -> Camera 2 [Enabled] +System Agent (SA) Configuration -> MIPI Camera Configuration -> Link options +Sensor Model [User Custom] +Video HID INTC10C0 +Lanes Clock division [4 4 2 2] +GPIO control [Control Logic 2] +Camera position [Back] +Flash Support [Disabled] +Privacy LED [Driver default] +Rotation [0] +PPR Value 10 +PPR Unit A +Camera module name YHCE +MIPI port 4 +LaneUsed [x2] +MCLK 19200000 +EEPROM Type [ROM_NONE] +VCM Type [VCM_NONE] +Number of I2C 1 +I2C Channel [I2C5] +Device 0 +I2C Address 10 +Device Type [Sensor] +Flash Driver Selection [Disabled] +``` \ No newline at end of file diff --git a/doc/isx031/acpi-ipu-pdata.md b/doc/isx031/acpi-ipu-pdata.md new file mode 100644 index 0000000..7426f3b --- /dev/null +++ b/doc/isx031/acpi-ipu-pdata.md @@ -0,0 +1,78 @@ +## Description + +This document outlines the configuration parameters for up to GMSL 2 ISX031 Sensors Modules (from D3, Leopard imaging, Sensing), including validated settings and their compatibility across supported platforms. + +## For ISX031 Sensor PDATA ACPI : GMSL2 max9296 DPHY on Advantech AFE-R360 (MTL) or ASR-A360 (ARL) + +``` +System Agent (SA) Configuration -> IPU Device (B0:D5:F0) [Enabled] +System Agent (SA) Configuration -> MIPI Camera Configuration -> CVF Support [Disabled] +System Agent (SA) Configuration -> MIPI Camera Configuration -> Control Logic 1 [Disabled] +System Agent (SA) Configuration -> MIPI Camera Configuration -> Control Logic 2 [Disabled] +System Agent (SA) Configuration -> MIPI Camera Configuration -> Control Logic 3 [Disabled] +System Agent (SA) Configuration -> MIPI Camera Configuration -> Control Logic 4 [Disabled] + +System Agent (SA) Configuration -> MIPI Camera Configuration -> Camera 1 [Enabled] +System Agent (SA) Configuration -> MIPI Camera Configuration -> Link options +Sensor Model [User Custom] +Video HID INTC031M +Lanes Clock division [4 4 2 2] +GPIO control [No Control Logic] +Camera position [Back] +Flash Support [Disabled] +Privacy LED [Driver default] +Rotation [0] +PPR Value 4 +PPR Unit 2 +Camera module name isx031 +MIPI port 0 +LaneUsed [x2] +MCLK 19200000 +EEPROM Type [ROM_NONE] +VCM Type [VCM_NONE] +Number of I2C 3 +I2C Channel [I2C1] +Device 0 +I2C Address 48 +Device Type [IO expander] +Device 1 +I2C Address 42 +Device Type [IO expander] +Device 2 +I2C Address 12 +Device Type [Sensor] +Flash Driver Selection [Disabled] + +System Agent (SA) Configuration -> MIPI Camera Configuration -> Camera 2 [Enabled] +System Agent (SA) Configuration -> MIPI Camera Configuration -> Link options +Sensor Model [User Custom] +Video HID INTC031M +Lanes Clock division [4 4 2 2] +GPIO control [No Control Logic] +Camera position [Back] +Flash Support [Disabled] +Privacy LED [Driver default] +Rotation [0] +PPR Value 4 +PPR Unit 2 +Camera module name isx031 +MIPI port 4 +LaneUsed [x2] +MCLK 19200000 +EEPROM Type [ROM_NONE] +VCM Type [VCM_NONE] +Number of I2C 3 +I2C Channel [I2C2] +Device 0 +I2C Address 48 +Device Type [IO expander] +Device 1 +I2C Address 42 +Device Type [IO expander] +Device 2 +I2C Address 12 +Device Type [Sensor] +Flash Driver Selection [Disabled] +System Agent (SA) Configuration -> MIPI Camera Configuration -> Camera 3 [Disabled] +System Agent (SA) Configuration -> MIPI Camera Configuration -> Camera 4 [Disabled] +``` \ No newline at end of file From e57537c4679dce419ec57cb31b28dff1797a873a Mon Sep 17 00:00:00 2001 From: florent pirou Date: Tue, 23 Dec 2025 11:12:03 -0700 Subject: [PATCH 14/14] debian: initial dpkg control and rules Signed-off-by: Florent Pirou --- .gitignore | 8 + debian/changelog | 5 + debian/control | 29 +++ debian/intel-mipi-gmsl-dkms.config.in | 28 +++ debian/intel-mipi-gmsl-dkms.install.in | 13 ++ debian/intel-mipi-gmsl-dkms.modaliases | 9 + debian/intel-mipi-gmsl-dkms.postinst.in | 49 +++++ debian/intel-mipi-gmsl-dkms.templates.in | 7 + debian/rules | 66 +++++++ debian/source/format | 1 + debian/source/options | 12 ++ helpers/99-ipu-gmsl-mipi.rules | 21 +++ helpers/blacklist-intel-ipu.conf | 7 + helpers/ipu_max9x_bind.sh | 222 +++++++++++++++++++++++ helpers/ipu_ops.conf | 5 + 15 files changed, 482 insertions(+) create mode 100644 .gitignore create mode 100644 debian/changelog create mode 100644 debian/control create mode 100644 debian/intel-mipi-gmsl-dkms.config.in create mode 100644 debian/intel-mipi-gmsl-dkms.install.in create mode 100644 debian/intel-mipi-gmsl-dkms.modaliases create mode 100644 debian/intel-mipi-gmsl-dkms.postinst.in create mode 100644 debian/intel-mipi-gmsl-dkms.templates.in create mode 100755 debian/rules create mode 100644 debian/source/format create mode 100644 debian/source/options create mode 100755 helpers/99-ipu-gmsl-mipi.rules create mode 100644 helpers/blacklist-intel-ipu.conf create mode 100755 helpers/ipu_max9x_bind.sh create mode 100644 helpers/ipu_ops.conf diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..16713d7 --- /dev/null +++ b/.gitignore @@ -0,0 +1,8 @@ +*.deb +*.changes +*.buildinfo +debian/.debhelper/* +debian/debhelper-build-stamp +debian/*.substvars +debian/*.debhelper* +debian/files diff --git a/debian/changelog b/debian/changelog new file mode 100644 index 0000000..ef58f4f --- /dev/null +++ b/debian/changelog @@ -0,0 +1,5 @@ +intel-mipi-gmsl-drivers (20260112-0eci1) UNRELEASED; urgency=low + + * initial debian package + + -- ECI Maintainer Thu, 10 Jul 2025 14:46:17 +0000 diff --git a/debian/control b/debian/control new file mode 100644 index 0000000..5c16c1c --- /dev/null +++ b/debian/control @@ -0,0 +1,29 @@ +Source: intel-mipi-gmsl-drivers +Section: kernel +Priority: optional +Maintainer: ECI Maintainer +Rules-Requires-Root: no +Build-Depends: + debhelper-compat (= 13), + dh-modaliases, + dkms, + dh-sequence-dkms, + quilt, +Standards-Version: 4.7.0 +Homepage: https://github.com/intel/Intel-MIPI-CSI-Camera-Reference-Driver + +Package: intel-mipi-gmsl-dkms +Architecture: amd64 +Depends: + dkms, + pahole, + v4l-utils, + ${misc:Depends}, +Provides: ipu7-modules +Conflicts: ipu7-modules, intel-ipu6-dkms +Replaces: ipu7-modules, intel-ipu6-dkms +XB-Modaliases: ${modaliases} +Description: Intel Integrated Image Processing Unit 6/7 (IPU6/IPU7) drivers + This package provides kernel drivers for GMSL2 cameras through the Intel IPU7 + on Intel Panther Lake and the Intel IPU6 + on Intel Alder Lake, Raptor Lake, Meteor Lake and Arrow lake platforms. diff --git a/debian/intel-mipi-gmsl-dkms.config.in b/debian/intel-mipi-gmsl-dkms.config.in new file mode 100644 index 0000000..0f0df01 --- /dev/null +++ b/debian/intel-mipi-gmsl-dkms.config.in @@ -0,0 +1,28 @@ +#!/bin/bash +# inspired by http://www.fifi.org/doc/debconf-doc/tutorial.html + +# Exit on error +set -e +# Source debconf library. +. /usr/share/debconf/confmodule +db_version 2.0 + +# This conf script is capable of backing up +#db_capb backup + +declare -A FEATURES=@deserlist@ +STATE="max9x" + +while [ "$STATE" != "quit" -a "$STATE" != "done" ]; do + case "$STATE" in + max9x) + # query max9x + db_input critical intel-mipi-gmsl-dkms/max9x || true + if db_go; then + STATE="quit" + else + STATE="done" + fi + ;; + esac +done diff --git a/debian/intel-mipi-gmsl-dkms.install.in b/debian/intel-mipi-gmsl-dkms.install.in new file mode 100644 index 0000000..a167044 --- /dev/null +++ b/debian/intel-mipi-gmsl-dkms.install.in @@ -0,0 +1,13 @@ +Makefile /usr/src/@DKMS_PACKAGE_NAME@-@DKMS_PACKAGE_VERSION@ +dkms.conf /usr/src/@DKMS_PACKAGE_NAME@-@DKMS_PACKAGE_VERSION@ +drivers /usr/src/@DKMS_PACKAGE_NAME@-@DKMS_PACKAGE_VERSION@ +include /usr/src/@DKMS_PACKAGE_NAME@-@DKMS_PACKAGE_VERSION@ +ipu7-drivers /usr/src/@DKMS_PACKAGE_NAME@-@DKMS_PACKAGE_VERSION@ +6.12.0 /usr/src/@DKMS_PACKAGE_NAME@-@DKMS_PACKAGE_VERSION@ +6.17.0 /usr/src/@DKMS_PACKAGE_NAME@-@DKMS_PACKAGE_VERSION@ +6.18.0 /usr/src/@DKMS_PACKAGE_NAME@-@DKMS_PACKAGE_VERSION@ +patches /usr/src/@DKMS_PACKAGE_NAME@-@DKMS_PACKAGE_VERSION@ +helpers/blacklist-intel-ipu.conf /etc/modprobe.d +helpers/ipu_ops.conf /etc/modprobe.d +helpers/ipu_max9x_bind.sh /usr/share/camera +helpers/99-ipu-gmsl-mipi.rules /lib/udev/rules.d \ No newline at end of file diff --git a/debian/intel-mipi-gmsl-dkms.modaliases b/debian/intel-mipi-gmsl-dkms.modaliases new file mode 100644 index 0000000..d8536b3 --- /dev/null +++ b/debian/intel-mipi-gmsl-dkms.modaliases @@ -0,0 +1,9 @@ +alias pci:v00008086d0000645Dsv*sd*bc*sc*i* intel-ipu7 +alias pci:v00008086d0000B05Dsv*sd*bc*sc*i* intel-ipu7 +alias pci:v00008086d0000D719sv*sd*bc*sc*i* intel-ipu7 +alias pci:v00008086d0000462Esv*sd*bc*sc*i* intel-ipu6-psys +alias pci:v00008086d0000465Dsv*sd*bc*sc*i* intel-ipu6-psys +alias pci:v00008086d00004E19sv*sd*bc*sc*i* intel-ipu6-psys +alias pci:v00008086d00007D19sv*sd*bc*sc*i* intel-ipu6-psys +alias pci:v00008086d00009A19sv*sd*bc*sc*i* intel-ipu6-psys +alias pci:v00008086d0000A75Dsv*sd*bc*sc*i* intel-ipu6-psys diff --git a/debian/intel-mipi-gmsl-dkms.postinst.in b/debian/intel-mipi-gmsl-dkms.postinst.in new file mode 100644 index 0000000..755ba46 --- /dev/null +++ b/debian/intel-mipi-gmsl-dkms.postinst.in @@ -0,0 +1,49 @@ +# Automatically added by dh_dkms/3.0.10-7 +# The original file can be found in template-dkms-mkdeb/debian/postinst +# in the DKMS tarball, check it for copyright notices. + +DKMS_NAME=intel-mipi-gmsl +DKMS_PACKAGE_NAME=$DKMS_NAME-dkms +DKMS_VERSION=@DKMS_PACKAGE_VERSION@ + +postinst_found=0 + +set -e + +. /usr/share/debconf/confmodule + +db_get ${DKMS_PACKAGE_NAME}/max9x +MAX9X=${RET} + +case "$1" in + configure) + echo "postinst d4xx build for ${MAX9X}" >&2 + if [ "$MAX9X" = "max9296" ]; then + echo "clear MAKE_OPTS" >&2 + export MAKE_OPTS="" + fi + if [ "$MAX9X" = "max96724" ]; then + echo "apply MAKE_OPTS=\"-DCONFIG_VIDEO_D4XX_MAX96724 -DCONFIG_VIDEO_D4XX_MAX96712_LEGACY_MODE\"" >&2 + export MAKE_OPTS="-DCONFIG_VIDEO_D4XX_MAX96724 -DCONFIG_VIDEO_D4XX_MAX96712_LEGACY_MODE" + fi + if [ "$MAX9X" = "max96712" ]; then + echo "apply MAKE_OPTS=\"-DCONFIG_VIDEO_D4XX_MAX96724\"" >&2 + export MAKE_OPTS="-DCONFIG_VIDEO_D4XX_MAX96712" + fi + for DKMS_POSTINST in /usr/lib/dkms/common.postinst /usr/share/$DKMS_PACKAGE_NAME/postinst; do + if [ -f $DKMS_POSTINST ]; then + $DKMS_POSTINST $DKMS_NAME $DKMS_VERSION /usr/share/$DKMS_PACKAGE_NAME "" $2 + postinst_found=1 + break + fi + done + if [ "$postinst_found" -eq 0 ]; then + echo "ERROR: DKMS version is too old and $DKMS_PACKAGE_NAME was not" + echo "built with legacy DKMS support." + echo "You must either rebuild $DKMS_PACKAGE_NAME with legacy postinst" + echo "support or upgrade DKMS to a more current version." + exit 1 + fi + ;; +esac +# End automatically added section diff --git a/debian/intel-mipi-gmsl-dkms.templates.in b/debian/intel-mipi-gmsl-dkms.templates.in new file mode 100644 index 0000000..2c3dbc8 --- /dev/null +++ b/debian/intel-mipi-gmsl-dkms.templates.in @@ -0,0 +1,7 @@ +Template: intel-mipi-gmsl-dkms/max9x +Type: select +Choices: @marchlist@ +Default: @marchdefault@ +Description: Select MIPI-Direct or GMSL2 Deserializer [defaut:max9296] + build certain driver in compile options either for max9296 or max9672x. + \ No newline at end of file diff --git a/debian/rules b/debian/rules new file mode 100755 index 0000000..4668d81 --- /dev/null +++ b/debian/rules @@ -0,0 +1,66 @@ +#!/usr/bin/make -f + +export DH_VERBOSE = 1 +include /usr/share/dpkg/pkg-info.mk + +# Deserializer arch +deserlist := "max9296 max96724 mipi" +defaultdeser := max9296 + +# special characters +empty := +space := $(empty) $(empty) +comma := , +semicol := ; + +# helper to create comma separated list from space separated list +commasep = $(subst $(space),$(comma)$(space),$1) +semicolsep = $(subst $(space),$(comma),$1) + +# remove double quotes +unquote = $(subst $\",,$1) + +# recursive wildcard +rwildcard=$(foreach d,$(wildcard $(1:=/*)),$(call rwildcard,$d,$2) $(filter $(subst *,%,$2),$d)) + +DKMS_PACKAGE_NAME = $(shell grep '^PACKAGE_NAME=' dkms.conf | cut -d\" -f2) +DKMS_PACKAGE_VERSION = $(DEB_VERSION_UPSTREAM) +DKMS_PACKAGE_REVISION = $(DEB_VERSION_UPSTREAM_REVISION) + +generated_dkms_files = \ + debian/intel-mipi-gmsl-dkms.install \ + debian/intel-mipi-gmsl-dkms.config \ + debian/intel-mipi-gmsl-dkms.templates \ + debian/intel-mipi-gmsl-dkms.postinst \ + $(empty) + +%: + dh $@ --with modaliases + +override_dh_auto_configure: +override_dh_auto_build: +override_dh_auto_test: +override_dh_auto_install: +override_dh_auto_clean: + +override_dh_dkms: dkms.conf #debian/intel-mipi-gmsl-dkms.postinst + dh_dkms -V $(DEB_VERSION_UPSTREAM_REVISION) -- dkms.conf +# cp -a debian/intel-mipi-gmsl-dkms.postinst debian/intel-mipi-gmsl-dkms.postinst.debhelper + +#override_dh_installdebconf-arch: debian/intel-mipi-gmsl-dkms.config debian/intel-mipi-gmsl-dkms.templates +# dh_installdebconf -a + +override_dh_install: debian/intel-mipi-gmsl-dkms.install + dh_install --exclude=.gitignore + +override_dh_clean: + -rm -f $(generated_dkms_files) + dh_clean + +$(generated_dkms_files) : $(generated_dkms_files:=.in) debian/rules + sed -e 's,@DKMS_PACKAGE_NAME@,$(DKMS_PACKAGE_NAME),g' \ + -e 's,@DKMS_PACKAGE_VERSION@,$(DEB_VERSION_UPSTREAM_REVISION),g' \ + -e 's/@deserlist@/$(call unquote,$(deserlist))/g' \ + -e 's/@marchdefault@/$(defaultdeser)/g' \ + -e 's/@marchlist@/$(call commasep,$(call unquote,$(deserlist)))/g' \ + <$(@:=.in) >$@ diff --git a/debian/source/format b/debian/source/format new file mode 100644 index 0000000..d3827e7 --- /dev/null +++ b/debian/source/format @@ -0,0 +1 @@ +1.0 diff --git a/debian/source/options b/debian/source/options new file mode 100644 index 0000000..9ca393c --- /dev/null +++ b/debian/source/options @@ -0,0 +1,12 @@ +tar-ignore=.git +tar-ignore=.gitignore +tar-ignore=.ci +tar-ignore=ci +tar-ignore=patch/* +tar-ignore=ipu6-drivers/patch/* +tar-ignore=ipu7-drivers/patch/* +tar-ignore=debian/*.tmpl +tar-ignore=.pc +tar-ignore=*.deb +tar-ignore=*.changes +tar-ignore=*.buildinfo diff --git a/helpers/99-ipu-gmsl-mipi.rules b/helpers/99-ipu-gmsl-mipi.rules new file mode 100755 index 0000000..7a8c099 --- /dev/null +++ b/helpers/99-ipu-gmsl-mipi.rules @@ -0,0 +1,21 @@ +# Device rules for 3rd party GMSL2 ISX031 IMX390 and AR0234 camera modules. +# video links for SDK, binding for ipu7 or ipu6 + +SUBSYSTEM=="video4linux", ATTR{name}=="i2c-isx031-*", RUN+="/bin/bash -c '/usr/share/camera/ipu_max9x_bind.sh -s isx031 -n > /dev/kmsg'" +SUBSYSTEM=="video4linux", ATTR{name}=="i2c-imx390-*", RUN+="/bin/bash -c '/usr/share/camera/ipu_max9x_bind.sh -s imx390 -n > /dev/kmsg'" +SUBSYSTEM=="video4linux", ATTR{name}=="i2c-ar0234-*", RUN+="/bin/bash -c '/usr/share/camera/ipu_max9x_bind.sh -s ar0234 -n > /dev/kmsg'" + +# TEMPORARY : Device rules for Intel RealSense MIPI devices. +# DFU rules +SUBSYSTEM=="d4xx-class", KERNEL=="d4xx-dfu*", GROUP="video", MODE="0660" + +# video links for SDK, binding for ipu7 +SUBSYSTEM=="video4linux", ATTR{name}=="DS5 mux *", RUN+="/bin/bash -c '/usr/share/camera/rs_ipu_d457_bind.sh -n > /dev/kmsg; /usr/share/camera/rs-enum-ipu.sh -n -q > /dev/kmsg'" + +# default max96724 gmsl link_freq override for PTL ipu7 DWC DPHY +# (e.g .used udevadm info -a -p $(udevadm info -q path -n $(media-ctl -e "i2c-isx031-a-0")) ) +#SUBSYSTEM=="video4linux", ATTR{name}=="i2c-isx031-*", RUN+="/usr/bin/v4l2-ctl --set-ctrl v4l2_cid_link_freq=1 -d '%E{DEVNAME}'" +#SUBSYSTEM=="video4linux", ATTR{name}=="i2c-imx390-*", RUN+="/usr/bin/v4l2-ctl --set-ctrl v4l2_cid_link_freq=1 -d '%E{DEVNAME}'" +#SUBSYSTEM=="video4linux", ATTR{name}=="i2c-ar0234-*", RUN+="/usr/bin/v4l2-ctl --set-ctrl v4l2_cid_link_freq=1 -d '%E{DEVNAME}'" +# (e.g .used udevadm info -a -p $(udevadm info -q path -n $(media-ctl -e "DS5 mux a-0")) ) +#SUBSYSTEM=="video4linux", ATTR{name}=="DS5 mux *", RUN+="/usr/bin/v4l2-ctl --set-ctrl v4l2_cid_link_freq=1 -d '%E{DEVNAME}'" \ No newline at end of file diff --git a/helpers/blacklist-intel-ipu.conf b/helpers/blacklist-intel-ipu.conf new file mode 100644 index 0000000..f59cf47 --- /dev/null +++ b/helpers/blacklist-intel-ipu.conf @@ -0,0 +1,7 @@ +# avpoid intel CCG & Canonical MIPI-driect only kernel builtin IPU6 / IPU7 clashes with GMSL2 enabled intel-ipu-dkms +blacklist intel_ipu7_isys +blacklist intel_ipu7_psys +blacklist intel_ipu7 +blacklist intel_ipu6_isys +blacklist intel_ipu6_psys +blacklist intel_ipu6 diff --git a/helpers/ipu_max9x_bind.sh b/helpers/ipu_max9x_bind.sh new file mode 100755 index 0000000..869339c --- /dev/null +++ b/helpers/ipu_max9x_bind.sh @@ -0,0 +1,222 @@ +#!/bin/bash +# +# Dependency: v4l-utils +v4l2_util=$(which v4l2-ctl) +media_util=$(which media-ctl) +if [ -z ${v4l2_util} ]; then + echo "v4l2-ctl not found, install with: sudo apt install v4l-utils" + exit 1 +fi +while [[ $# -gt 0 ]]; do + case $1 in + -q|--quiet) + quiet=1 + shift + ;; + -m|--mux) + shift + if [ ${#1} -eq 3 ]; then + mux_param=$1 + fi + shift + ;; + -s|--sensor) + sensor=$2 + shift + ;; + -h|--help) + echo "-q -m -h" + ;; + -f) + fmt="$2" + shift + ;; + *) + quiet=0 + shift + ;; + esac +done + +sensor="${sensor:-isx031}" +if [ ${sensor} = "isx031" ]; then + fmt="${fmt:-[fmt:UYVY8_1X16/1920x1536]}" + # fmt="${fmt:-[fmt:UYVY8_1X16/1600x1280]}" + # fmt="${fmt:-[fmt:UYVY8_1X16/1024x768]}" +elif [ ${sensor} = "imx390" ]; then + fmt="${fmt:-[fmt:SGRBG12_1X12/1920x1200]}" + # fmt="${fmt:-[fmt:SRGGB12_1X12/1936x1096]}" + # fmt="${fmt:-[fmt:SRGGB12_1X12/1280x720]}" + # fmt="${fmt:-[fmt:SRGGB12_1X12/1024x768]}" +elif [ ${sensor} = "ar0234" ]; then + fmt="${fmt:-[fmt:SGRBG10_1X10/1280x960]}" +fi + +declare entities=() +while IFS= read -r line; do + entities+=("$line") +done < <(media-ctl -p | grep entity | sed -e 's/.*: //;s/ (.*$//') + +find_entity() { + local name=$1 + for e in "${entities[@]}"; do + if [[ "${e}" = *"${name}"* ]]; then + echo -n "${e}" + return + fi + done + echo "$1 not found" >&2 + exit 1 +} + +des_node() { + local mux=$1 + case ${mux} in + a|b|c|d) echo -n "max9x a";; + A|B|C|D) echo -n "max9x c";; + esac +} +des_src_pad() { + local mux=$1 + echo -n "\"$(des_node ${mux})\":0" +} +des_sink_pad() { + local mux=$1 + echo -n "\"$(des_node ${mux})\":$(( ( 4 + ${mux_to_index[${mux}]} ) % 2 ))" +} + +ser_node() { + local mux=$1 + case ${mux} in + a) find_entity "max9x a-0";; + b) find_entity "max9x b-0";; + c) find_entity "max9x c-0";; + d) find_entity "max9x d-0";; + A) find_entity "max9x a-2";; + B) find_entity "max9x b-2";; + C) find_entity "max9x c-2";; + D) find_entity "max9x d-2";; + esac +} +ser_src_pad() { + local mux=$1 + echo -n "\"$(ser_node ${mux})\":2" +} +ser_sink_pad() { + local mux=$1 + echo -n "\"$(ser_node ${mux})\":0" +} + +sen_node() { + local mux=$1 + case ${mux} in + a) find_entity "i2c-${sensor} a-0";; + b) find_entity "i2c-${sensor} b-0";; + c) find_entity "i2c-${sensor} c-0";; + d) find_entity "i2c-${sensor} d-0";; + A) find_entity "i2c-${sensor} a-2";; + B) find_entity "i2c-${sensor} b-2";; + C) find_entity "i2c-${sensor} c-2";; + D) find_entity "i2c-${sensor} d-2";; + esac +} +sen_src_pad() { + local mux=$1 + echo -n "\"$(sen_node ${mux})\":0" +} + +# mapping for ISX031/IMX390/AR0234 mux entity to IPU7 CSI-2 entity matching. +declare -A media_mux_capture_link=( [a]=0 [A]=0 [b]=16 [B]=16 [c]=32) + +# mapping for ISX031/IMX390/AR0234 mux entity to IPU7 CSI-2 entity matching. +declare -A media_mux_csi2_link=( [A]=0 [a]=0 [b]=1 [c]=2 [d]=3 [E]=4 [F]=5 [G]=0 [g]=0 [h]=1 [i]=2 [j]=3 [K]=4 [L]=5) + +declare -A media_mux_capture_pad=( + [A]=0 + [a]=0 + [B]=1 + [b]=1 + [C]=2 + [c]=2 + [D]=3 + [d]=3 +) + + +# all available DS5 muxes, each one represent physically connected camera. +# muxes prefix a, b, c, d referes to max96724 aggregated link cameras +# muxes suffix 0, 1, 2 is referes to csi2 mipi port mapping +mux_list=${mux_param:-'a-0 b-0 c-0 a-1 b-1 c-1 a-2 b-2 c-2'} + +# Find media device. +# For case with usb camera plugged in during the boot, +# usb media controller will occupy index 0 +mdev=$(${v4l2_util} --list-devices | grep -A1 ipu6 | grep media) +[[ -z "${mdev}" ]] && exit 0 + +out() { + echo -n "${@} " >&2 + "${@}" + echo " RET=$?" >&2 +} + +media_ctl_cmd="${media_util} -d ${mdev}" +# media-ctl -r # <- this can be used to clean-up all bindings from media controller +# cache media-ctl output +dot=$($media_ctl_cmd --print-dot) + +# all available IMX390/ISX031/AR0234 muxes, each one represent physically connected camera. +# muxes prefix a, b, c, d referes to max96724 aggregated link cameras +# muxes suffix 0, 1, 2 is referes to csi2 mipi port mapping + +for mux in $mux_list; do + e="$(sen_node ${mux})" + if [ -z "${e}" ]; then + continue; + fi + + [[ $quiet -eq 0 ]] && echo "Bind MAX9X mux ${mux} .. " >&2 + + + csi2="$((${camera:2:1}))" + mux=${camera:0:1} + + # mapping for DS5 mux entity to IPU7 ISYS entity matching. + isys_cap="$((${media_mux_capture_link[${mux}]}))" + + # for DS5 aggregated mux (above a) - use ISYS capture pads with offset 6 + # this offset used as capture sensor set - dept,rgb,ir,imu = 4 + cap_pad=0 + if [ "${mux}" \= "b" ]; then + cap_pad=6 + else + if [ "${mux}" \= "c" ]; then + metadata_enabled=0 + cap_pad=1 + else + if [ "${mux}" \= "d" ]; then + metadata_enabled=0 + cap_pad=7 + fi + fi + fi + isys_cap=$((${isys_cap}+${cap_pad})) + + # out $media_ctl_cmd -l "${des_node_src[${mux}]} -> \"Intel IPU6 ${csi2}\":0[1]" + # out $media_ctl_cmd -l "$(des_src_pad ${mux}) -> \"Intel IPU7 ISYS ${csi2}\":0[1]" + out $media_ctl_cmd -l "\"Intel IPU7 ${csi2}\":$((${cap_pad}+1)) -> \"Intel IPU7 ISYS ${isys_cap} $((${cap_pad}+0))\":0[5]" + + out $media_ctl_cmd -V "$(sen_src_pad ${mux}) ${fmt}" + out $media_ctl_cmd -V "$(ser_sink_pad ${mux}) ${fmt}" + out $media_ctl_cmd -V "$(ser_src_pad ${mux}) ${fmt}" + out $media_ctl_cmd -V "$(des_sink_pad ${mux}) ${fmt}" + out $media_ctl_cmd -V "$(des_src_pad ${mux}) ${fmt}" + + out $media_ctl_cmd -V "\"Intel IPU7 ${csi2}\":0 ${fmt}" + out $media_ctl_cmd -V "\"Intel IPU7 ${csi2} $((${cap_pad}+0))\":0 ${fmt}" + out $media_ctl_cmd -V "\"Intel IPU7 ${isys_cap} $((${cap_pad}+0))\":0 ${fmt}" + + cap_dev=$($media_ctl_cmd -e "Intel IPU7 ISYS ${isys_cap} $((${cap_pad}+0))") + out ln -snf ${cap_dev} /dev/video-${sensor}-${mux} + +done diff --git a/helpers/ipu_ops.conf b/helpers/ipu_ops.conf new file mode 100644 index 0000000..6b8564b --- /dev/null +++ b/helpers/ipu_ops.conf @@ -0,0 +1,5 @@ +options intel-ipu6 isys_freq_override=400 ipu_trace_enable=Y +options intel-ipu6-isys csi2_port_optimized=n +options intel-ipu6-psys enable_power_gating=n +#options intel-ipu7-isys dyndbg +#options intel-ipu7 dyndbg